query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Returns a step dictionary which is compatible with annotator.py.
def __call__(self, name, cmd, ok_ret=None, infra_step=False, wrapper=(), timeout=None, allow_subannotations=None, trigger_specs=None, stdout=None, stderr=None, stdin=None, step_test_data=None): # Calculate our full step name. If a step already has that name, add an # index to the end of it. # # Note that another step could exist with that index already added to it # by the user. If this happens, we'll continue appending indexes until we # have a unique step name. with self.m.context(name_prefix=name): base_name = self.m.context.name_prefix name_suffix = '' while True: full_name = base_name + name_suffix if full_name not in self._seen_steps: break step_count = self._step_names.setdefault(full_name, 1) + 1 self._step_names[full_name] = step_count name_suffix = ' (%d)' % step_count self._seen_steps.add(full_name) assert isinstance(cmd, (types.NoneType, list)) if cmd is not None: cmd = list(wrapper) + cmd for x in cmd: if not isinstance(x, (int, long, basestring, Path, Placeholder)): raise AssertionError('Type %s is not permitted. ' 'cmd is %r' % (type(x), cmd)) cwd = self.m.context.cwd if cwd and cwd == self.m.path['start_dir']: cwd = None with self.m.context(env_prefixes={'PATH': self._prefix_path}): env_prefixes = self.m.context.env_prefixes if ok_ret in ('any', 'all'): ok_ret = self.step_client.StepConfig.ALL_OK return self.step_client.run_step(self.step_client.StepConfig( name=full_name, base_name=full_name or name, cmd=cmd, cwd=cwd, env=self.m.context.env, env_prefixes=self.step_client.StepConfig.EnvAffix( mapping=env_prefixes, pathsep=self.m.path.pathsep, ), env_suffixes=self.step_client.StepConfig.EnvAffix( mapping=self.m.context.env_suffixes, pathsep=self.m.path.pathsep, ), allow_subannotations=bool(allow_subannotations), trigger_specs=[self._make_trigger_spec(trig) for trig in (trigger_specs or ())], timeout=timeout, infra_step=self.m.context.infra_step or bool(infra_step), stdout=stdout, stderr=stderr, stdin=stdin, ok_ret=ok_ret, step_test_data=step_test_data, nest_level=self.m.context.nest_level, ))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getSteps():", "def generation_step_to_dict(generation_step: GenerationStep) -> Dict[str, Any]:\n return {\n \"__type\": generation_step.__class__.__name__,\n \"model\": generation_step.model,\n \"num_trials\": generation_step.num_trials,\n \"min_trials_observed\": generation_step.min_trials_observed,\n \"completion_criteria\": generation_step.completion_criteria,\n \"max_parallelism\": generation_step.max_parallelism,\n \"use_update\": generation_step.use_update,\n \"enforce_num_trials\": generation_step.enforce_num_trials,\n \"model_kwargs\": _encode_callables_as_references(\n generation_step.model_kwargs or {}\n ),\n \"model_gen_kwargs\": _encode_callables_as_references(\n generation_step.model_gen_kwargs or {}\n ),\n \"index\": generation_step.index,\n \"should_deduplicate\": generation_step.should_deduplicate,\n }", "def get_steps(self):\n return self.steps", "def getStep():\n # TODO: can there be non-Step logs?", "def get_step(self):\n return self.step", "def get_step(self):\n return self.step", "def _collect_kwargs(step):\n dicts = {}\n for s in _expand_inputs(step):\n name = s.name if s.name is not None else s.__class__.__name__\n if name in dicts.keys():\n raise ValueError(\"Duplicate step names: %s\" % name)\n\n d = dict(s._kwargs)\n d.pop('inputs', None)\n dicts[name] = d\n\n return dicts", "def getCurrentStep():", "def _get_steps(self):\n return self.steps", "def training_step(self, batch):\n return {}", "def step(self):\n return self._step", "def step_impl(context):\n pass", "def step_impl(context):\n pass", "def inference_step(self, batch: Any, **kwargs) -> Dict[str, Any]:\n return self.model.inference_step(batch, **kwargs)", "def get_step_metadata(\n step: Any,\n step_metadata_skip_list: AbstractSet[str] = frozenset()\n) -> Dict[str, Any]:\n if not isinstance(step.custom_data, dict):\n # We ignore step metadata if step.custom_data is not a dictionary\n return {}\n return {\n k: v\n for k, v in step.custom_data.items()\n if k != 'episode_metadata' and k not in step_metadata_skip_list\n }", "def raw_steps(self):\n return self.obj_payload[\"steps\"]", "def step(self, observation):\n action, value = self(observation)\n\n return {\n 'actions': action,\n 'values': value\n }", "def getSteps( self ):\n\n return self.adb.get( 'steps' )", "def get_info(self, streetlearn):\n info = super(StepByStepInstructionGame, self).get_info(streetlearn)\n info['current_step'] = 0\n return info", "def do_steps(self):\n steps = self.get_step_conf()\n all_step_config = dict()\n for k, v in steps.items():\n tmp_list = list()\n all_step_config[k] = tmp_list\n start = v[\"Start Value\"]\n end = v[\"End Value\"]\n # special handling of edge length\n if(k == \"Edge Length\"):\n start = self.convert_to_tuple(start)\n end = self.convert_to_tuple(end)\n tmp_list.append(str(start))\n while(start != end):\n start = self.add_edge_length(\n start, self.convert_to_tuple(v[\"Step\"]))\n tmp_list.append(str(start))\n print start\n else:\n tmp_list.append(float(start))\n while float(start) < float(end):\n start = float(start) + float(v[\"Step\"])\n tmp_list.append(start)\n return all_step_config", "def step_env(self):\n raise NotImplementedError\n # Not needed for this homework", "def step ( self ) :\n return self.__step", "def get_steps(self):\n return self.steps", "def step_index(df):\n steps = {}\n for step in df.STEP:\n steps[step] = df.index[df.STEP == step]\n return steps", "def step_key(self) -> str:\n return self._step_execution_context.step.key", "def step(self, observation: dict) -> dict:\n raise NotImplementedError(\"step\")", "def to_dict(self, exclusions: list = []) -> dict:\n step_details = {name: step.to_dict() for name, step in self.run.items()}\n order = {str(i_step): name for i_step, name in enumerate(self.run.keys())}\n return dict(order=order, details=step_details)", "def get_step_conf(self):\n return self.step_conf", "def get_params(self):\n params = {}\n for step in self.steps:\n params[step[0]] = step[1].get_params()\n return params", "def step(self):\n return self._step", "def step(self):\n return self._step", "def step(self):\n return self._step", "def step(self):\n return self._step", "def _get_params_from_yaml_dict(self, yd):\n params = {}\n for step in yd.keys():\n params.update({step+'__'+k: v for k, v in yd[step].items()})\n return params", "def _get_step_config() -> Dict[str, Any]:\n config_str = os.environ['MAHIRU_STEP_CONFIG']\n return json.loads(config_str) # type: ignore", "def create_step(self, step):\n raise NotImplementedError", "def _generate_steps(\n episode: Sequence[Any],\n step_metadata_skip_list: AbstractSet[str]) -> Dict[str, Any]:\n step_metadata = _empty_nested_list(\n get_step_metadata(episode[0], step_metadata_skip_list))\n\n steps = {\n 'observation':\n _empty_nested_list(episode[0].timestep.observation),\n 'action':\n _empty_nested_list(episode[0].action),\n 'reward': [],\n 'discount': [],\n 'is_terminal': [],\n 'is_first': [],\n 'is_last': [],\n }\n steps.update(step_metadata)\n\n prev_step = None\n for step in episode:\n if prev_step is not None:\n steps['is_first'].append(prev_step.timestep.first())\n steps['is_terminal'].append(False)\n steps['is_last'].append(prev_step.timestep.last())\n steps['observation'] = _append_nested(\n steps['observation'], prev_step.timestep.observation)\n steps['reward'].append(step.timestep.reward)\n steps['discount'].append(step.timestep.discount)\n steps['action'] = _append_nested(steps['action'], step.action)\n step_metadata = get_step_metadata(prev_step, step_metadata_skip_list)\n for k, v in step_metadata.items():\n steps[k] = _append_nested(steps[k], v)\n prev_step = step\n if prev_step is not None:\n # We append the observation of the final step (action and reward were\n # included in the previous step.\n # The terminal flag is inferred like in termination(), truncation()\n # from dm_env/_environment.py\n is_terminal = (\n prev_step.timestep.last() and prev_step.timestep.discount == 0.0)\n steps['is_first'].append(prev_step.timestep.first())\n steps['is_terminal'].append(is_terminal)\n steps['is_last'].append(True)\n steps['observation'] = _append_nested(\n steps['observation'], prev_step.timestep.observation)\n # Discount, action and reward are meaningless in the terminal step\n steps['reward'].append(np.zeros_like(prev_step.timestep.reward))\n steps['discount'].append(\n np.zeros_like(prev_step.timestep.discount))\n steps['action'] = _append_nested(\n steps['action'],\n tf.nest.map_structure(np.zeros_like, prev_step.action))\n step_metadata = get_step_metadata(prev_step, step_metadata_skip_list)\n for k, v in step_metadata.items():\n steps[k] = _append_nested(steps[k], v)\n return steps", "def _step(self) -> int:\n return self._config[CONF_STEP]", "def get_view_steps(self):\n return self._data_dict[self.KEY_VIEW_STEPS]", "def test_stepregistry_should_create_one_step_decorator_per_keyword():\n # given\n registry = StepRegistry()\n context = {}\n\n # when\n registry.create_step_decorators(context)\n\n # then\n assert len(context) == 4\n assert \"given\" in context\n assert \"when\" in context\n assert \"then\" in context\n assert \"step\" in context", "def _get_step_changes(\n project: 'projects.Project',\n step: 'projects.ProjectStep',\n write_running: bool\n) -> typing.Dict[str, typing.Any]:\n step_data = writing.step_writer.serialize(step)\n\n if write_running and step.is_running:\n writing.save(project, step_data.file_writes)\n\n return dict(\n name=step.definition.name,\n action='updated',\n step=step_data._asdict(),\n timestamp=time.time(),\n written=write_running and step.is_running\n )", "def get_workflow_steps(self):\n return self._data_dict[self.KEY_WF_STEPS]", "def get_step(self) -> int:\n return self.step", "def build_step(self):\n pass", "def build_step(self):\n pass", "def steps(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['StepArgs']]]]:\n return pulumi.get(self, \"steps\")", "def parse_step_info(self, step: str) -> Tuple[str]:\n step_info = re.findall(r'\\[[^\\[\\]]+\\]', step)[0][1:-1].split('/')\n return step_info[0], step_info[1]", "def make_predict_step(self):\n return self.make_eval_step()", "def step_id(self):\n return self._step_id", "def to_dict(self):\n view_steps = [view_step.to_dict() for view_step in self.get_view_steps()]\n wf_steps = [wf_step.to_dict() for wf_step in self.get_workflow_steps()]\n dict_data = deepcopy(self._data_dict)\n dict_data[self.KEY_VIEW_STEPS] = view_steps\n dict_data[self.KEY_WF_STEPS] = wf_steps\n return dict_data", "def export_pipeline(scikit_pipeline):\n steps_obj = {'steps':[]}\n for name, md in scikit_pipeline.steps:\n steps_obj['steps'].append({\n 'name': name,\n 'class_name': fullname(md),\n 'params': md.get_params()\n })\n\n return steps_obj", "def step(self, **kwargs):\n pass", "def step(self,\n *,\n global_step: jnp.ndarray,\n rng: jnp.ndarray,\n writer: Optional[utils.Writer]) -> Dict[str, np.ndarray]:", "def step(self, uuid):\n return self.__get_object(self.get(\"steps/{}\".format(uuid)))", "def build_step(self):\n\n pass", "def _parse_steps(recipe):\n steps = []\n\n filtered_dict = {k: v for k, v in recipe.items() if \"step\" in k}\n\n for key, value in filtered_dict.items():\n if value:\n steps.append(value)\n\n return steps", "def _step(self) -> None:", "def step_name(self):\n return self._step_name", "def _step(self):\n pass", "def simulation():\n\n return {\n \"type\": \"class\",\n \"base\": \"iso.process_step\",\n \"is_abstract\": False,\n \"is_document\": True,\n \"pstr\": (\"({}/{}/{})\", (\"used\", \"ran_for_experiments\", \"ensemble_id\")),\n \"properties\": [\n (\n \"part_of_project\",\n \"linked_to(designing.project)\",\n \"1.N\",\n \"Project or projects for which simulation was run\",\n ),\n (\n \"ran_for_experiments\",\n \"linked_to(designing.numerical_experiment)\",\n \"1.N\",\n \"One or more experiments with which the simulation is \"\n \"associated\",\n ),\n (\n \"sub_experiment\",\n \"linked_to(designing.numerical_experiment)\",\n \"0.1\",\n \"For start-date ensembles, this will indicate the beginning \"\n \"year; for offline models driven by output from another \"\n \"model, this will provide the source_id and variant_label \"\n \"for the 'driving' model.\",\n ),\n (\n \"used\",\n \"linked_to(science.model)\",\n \"1.1\",\n \"The model used to run the simulation\",\n ),\n (\n \"primary_ensemble\",\n \"linked_to(activity.ensemble)\",\n \"0.1\",\n \"Primary Ensemble (ensemble for which this simulation was \"\n \"first run).\",\n ),\n (\n \"institution\",\n \"linked_to(shared.party)\",\n \"0.1\",\n \"institution which carried out the simulation\",\n ),\n (\n \"parent_of\",\n \"linked_to(activity.child_simulation)\",\n \"0.N\",\n \"If appropriate, links to simulations which branched from \"\n \"this one\",\n ),\n (\n \"produced\",\n \"linked_to(data.dataset)\",\n \"0.N\",\n \"Products of the simulation\",\n ),\n (\n \"had_performance\",\n \"linked_to(platform.performance)\",\n \"0.1\",\n \"Performance of the simulation.\",\n ),\n (\n \"ran_on\",\n \"linked_to(platform.machine)\",\n \"0.1\",\n \"The machine on which the simulation was run.\",\n ),\n (\n \"errata\",\n \"shared.online_resource\",\n \"0.1\",\n \"Link to errata associated with this simulation.\",\n ),\n (\n \"ensemble_id\",\n \"activity.axis_member\",\n \"0.N\",\n \"Identification within ensemble axes via axis member. \"\n \"(Multiple axis members within a simulation cannot share the \"\n \"same ensemble_axis.) (There must be an axis_member instance \"\n \"for each ensemble axis in a parent ensemble.)\",\n ),\n # Time\n (\n \"start_time\",\n \"time.date_time\",\n \"0.1\",\n \"The start date-time of the simulation. e.g. \"\n \"2012-04-01 00:00:00\",\n ),\n (\n \"end_time\",\n \"time.date_time\",\n \"0.1\",\n \"The end date-time of the simulation. e.g. \"\n \"2087-11-30 12:00:00\",\n ),\n (\n \"calendar\",\n \"time.calendar\",\n \"0.1\",\n \"The calendar used in the simulation\",\n ),\n # Further Info URL\n (\n \"documentation\",\n \"shared.online_resource\",\n \"0.1\",\n \"On-line location of additional documentation\",\n ),\n # Extra attributes\n (\n \"extra_attributes\",\n \"shared.extra_attribute\",\n \"0.N\",\n \"Additional attributes provided with simulation.\",\n ),\n ],\n \"constraints\": [\n (\"cardinality\", \"rationale\", \"0.0\"),\n ],\n }", "def get_step_info(self, seq_descr: str, step_id: int) -> Optional[Tuple[List[Union[str, int]], int, int]]:\n seq_name: str = Sequencer.get_name(seq_descr)\n seq: Optional[Sequencer] = self.get_seq_by_name(seq_name)\n if seq:\n return seq.get_step_brightness(step_id), seq.get_step_wait(step_id), seq.get_step_smooth(step_id)\n return None", "def IterBuilderStepMaps(\n self\n ) -> Generator[Tuple[str, BaseExpectation, 'BuilderStepMap'], None, None]:\n return self.IterToValueType(BuilderStepMap)", "def load_from_dict(input_dict):\n return WorkflowStepInformation(input_dict)", "def to_dict(self) -> Dict:\n data = {\"Type\": \"Pass\"}\n if not isinstance(self.ast_node, ast.Pass):\n data[\"Result\"] = self._parse_result()\n data[\"ResultPath\"] = self._parse_result_path()\n self._set_end_or_next(data)\n return data", "def to_dict(self):\n d = TemplateStep.to_dict(self)\n \n # Add parameters not in parent class\n d.update({\n 'summary_table': self.summary_table,\n 'fitted_parameters': self.fitted_parameters,\n 'model': self.model.to_dict() if self.model else None\n })\n return d", "def _transform_step(self,\n timestep: dm_env.TimeStep,\n action: Optional[Any] = None) -> step_data.StepData:\n custom_data = None\n if self._step_fn is not None:\n custom_data = self._step_fn(timestep, action, self._environment)\n return step_data.StepData(timestep, action, custom_data)", "def step(self, step=None):\n pass", "def get_pars(self, step_name):\n step_list = ['alignment', 'astrodrizzle', 'catalog generation', 'quality control']\n if step_name in step_list:\n return self.pars[step_name].outpars\n else:\n log.critical(\"'{}' is not a recognized step name.\".format(step_name))\n log.critical(\"Recognized step names: \\n{}\".format(str(step_list)[2:-2].replace(\"', '\", \"\\n\")))\n sys.exit(1)", "def _step(self, action):\n \n obs, reward, done, info = self.env.step(action)\n\n \n advice=self.generateAdvice()[1]\n\n obs = {\n \"image\": obs,\n \"advice\": advice\n }\n\n\n \n\n\n\n return obs, reward, done, info", "def test_stepregitry_step_decorators_for_all_keywords():\n # given\n registry = StepRegistry()\n context = {}\n registry.create_step_decorators(context)\n\n # when\n def test_step():\n ...\n\n test_step = context[\"step\"](\"pattern\")(test_step)\n\n # then\n assert registry.step_implementations(\"Given\") == [\n StepImpl(\"Step\", \"pattern\", test_step)\n ]", "def step_id(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"step_id\")", "def step(self):\n raise NotImplementedError", "def add_step(self, step):\n if not step:\n return\n temp = {Result.__STEP: step.get_name(),\n Result.__STATUS: step.get_status(),\n Result.__MESSAGE: step.get_message()}\n self.__run.append(temp)", "def do_step(self) -> None:", "def default_steps(self):\n steps = {}\n for code, color in (\n ('Client', '255,255,255'),\n ('Online', '254,92,255'),\n ('Roto', '253,254,152'),\n ('MM', '254,151,152'),\n ('Anm', '254,173,146'),\n ('FX', '255,218,137'),\n ('Light', '254,205,138'),\n ('Comp', '161,236,154'),\n ('Art', '0,230,254'),\n ('Model', '254,205,138'),\n ('Rig', '253,254,152'),\n ('Surface', '231,251,154'),\n ):\n steps[code] = self.find_or_create('Step', dict(\n code=code,\n short_name=code,\n color=color,\n ))\n return steps", "def step(\n self, actions: ActionDict\n ) -> tuple[\n ObsDict, dict[str, float], dict[str, bool], dict[str, bool], dict[str, dict]\n ]:\n raise NotImplementedError", "def step(self):\r\n raise NotImplementedError", "def get_steps_num():\n return 0", "def get_duration_steps(self):\n return {\n # acc. to ATV-A 121 chap. 5.2 (till 2012)\n ATV: (60 * 3, 60 * 48),\n # acc. to DWA-A 531 chap. 5.2.1\n DWA_adv: (60 * 3, 60 * 24),\n # acc. to DWA-A 531 chap. 5.2.1\n DWA: (60, 60 * 12)\n }[self.worksheet]", "def construct_step(tour, start, direction, nodes, scale):\n return {'Tour': copy.deepcopy(tour),\n 'Tourlength': tsputil.get_path_length(nodes, scale, tour),\n 'Start': start,\n 'Direction': direction}", "def step(self, action: np.ndarray) -> 'EnvStep':\n ...", "def record(self, step):", "def step(self):\n\n pass", "def load_from_dict(input_dict):\n return WorkflowViewStepInformation(input_dict)", "def step_values(self):\n return self._get_values().copy()", "def step_param(self):\n if self.variable_name is None:\n return self.step_name\n elif self.step_name is None:\n return self.variable_name\n else:\n return '{step}__{var}'.format(\n step=self.step_name, var=self.variable_name)", "def steps(self) -> pulumi.Output[Sequence['outputs.StepResponse']]:\n return pulumi.get(self, \"steps\")", "def training_step(self, **kwargs):\n raise NotImplementedError", "def _CreateADictOfFailedSteps(self, build_info):\n failed_steps = dict()\n for step_name in build_info.failed_steps:\n failed_steps[step_name] = {\n 'current_failure': build_info.build_number,\n 'first_failure': build_info.build_number,\n }\n\n return failed_steps", "def steps(self):\n for step in self._get_paged(\"steps\", trailing=True):\n yield self.__get_object(step)\n\n return", "def test_steps(self, steps=1, **net_kwargs) -> dict:\n return self._eval_or_test_steps(steps=steps, is_eval=False, **net_kwargs)", "def test_steps(self, steps=1, **net_kwargs) -> dict:\n return self._eval_or_test_steps(steps=steps, is_eval=False, **net_kwargs)", "def step_id(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"step_id\")", "def step(self):\n raise NotImplementedError()", "def step(self):\n raise NotImplementedError()", "def step(self):\n raise NotImplementedError()", "def formal_step(self) -> str:\n return self._strategy.formal_step()", "def total_steps(self) -> global___Expression:", "def env_step(self, action):\n state, reward, done, info = self.env.step(action)\n state = self.feature_extractor.extract_features(state)\n\n return state, reward, done, info", "def list_step_functions() -> List[str]:\n return list(STEP_SCORES_MAP.keys())", "def add_step_args(cls, parser):" ]
[ "0.69314784", "0.66943085", "0.6273986", "0.61101913", "0.60612357", "0.60612357", "0.6031463", "0.6012955", "0.59872484", "0.5910651", "0.58814424", "0.58362293", "0.58362293", "0.58136266", "0.57853806", "0.5746236", "0.57229125", "0.5714291", "0.57003534", "0.56915283", "0.5690302", "0.5665139", "0.5662633", "0.5635882", "0.5614988", "0.5596812", "0.5582488", "0.5577432", "0.5570337", "0.55640143", "0.55640143", "0.55640143", "0.55640143", "0.5538346", "0.5511629", "0.5507628", "0.5497186", "0.5477385", "0.5476477", "0.5445371", "0.5431053", "0.54215896", "0.54168314", "0.540791", "0.540791", "0.5406402", "0.5388349", "0.53644437", "0.53566086", "0.5345981", "0.53275067", "0.53171474", "0.5312521", "0.5308121", "0.52974474", "0.52910125", "0.52705604", "0.5263362", "0.52581733", "0.5247832", "0.5213587", "0.52058417", "0.51992255", "0.5194165", "0.5188992", "0.518846", "0.518435", "0.5182417", "0.51749027", "0.51707435", "0.5170571", "0.51410353", "0.51309675", "0.51204354", "0.5101806", "0.50843436", "0.508078", "0.5078091", "0.50516796", "0.5047621", "0.5041499", "0.50372654", "0.5036422", "0.5031892", "0.50294894", "0.50238895", "0.5011346", "0.5000674", "0.49831375", "0.4977955", "0.49778154", "0.49778154", "0.49760586", "0.4974869", "0.4974869", "0.4974869", "0.4974282", "0.49741518", "0.49650413", "0.49647", "0.4963292" ]
0.0
-1
Delete or drop table
def _delete_table(self, db, table_name): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_table(self, name: str) -> None:", "def drop_table(cls)->None:\n database.cursor.execute(\n \"DROP TABLE IF EXISTS {}\".format(cls.table_name))\n database.connection.commit()", "def deleteTable(self):\n return self.db.table_drop(self.entity).run(self.r)", "def sqlite3_simple_delete_table(data_base, table):\n con = sqlite3.connect(data_base)\n cur = con.cursor()\n query = 'DROP TABLE IF EXISTS ' + table\n cur.execute(query)\n cur.close()\n con.close()", "def drop(self):\n c = self.cursor()\n for table in ['experiment','fact']:\n c.execute(\"drop table if exists {}\".format(table))\n self.commit()", "def drop_table(cls):\n if not connection.connected:\n raise Exception('Not connected to the database.')\n sql = u'DROP TABLE IF EXISTS %s' % cls.table()\n connection.execute(sql)", "def drop_table_if_exists(self, table): \n results = self.quick(\"DROP TABLE IF EXISTS {}\".format(table),context=\"MYDB\")", "def delete_table(self, table):\n if table in self.table_cols:\n sql = \"DROP TABLE IF EXISTS %s\" % table\n Log.debug('DB -> %s' % sql)\n self.execute(sql)\n self.table_cols.pop(table)", "def delete_table(self, table_name):\n try:\n conn = self.engine.connect()\n conn.execute(\"DROP table \" + table_name)\n print(\"-I- Deleted table \" + table_name)\n except Exception as e:\n print(\"-W- \" + str(e))", "def delete_table(self, name):\n # check read only\n if self.__read_only:\n raise IOError(\"DB is for reading only.\")\n # delete table\n self.__tables.pop(name)\n # add table name as deleted\n self.__dropped_tables.append(name)", "def delete_table(db, table_name):\n global DB_CONNECTIONS\n con = DB_CONNECTIONS.get(db, None)\n if con is not None:\n db_name = Path(db).name\n logging.info(f\">>> Deleting stale table `{table_name}` from database `{db_name}` <<<\")\n con.execute(f\"DROP TABLE IF EXISTS {table_name}\")", "def table_drop(self, table):\n\n stmt = 'DROP TABLE %s' % table\n\n curs = self.cursor()\n try:\n curs.execute(stmt)\n except sqlite3.OperationalError:\n pass\n finally:\n curs.close()", "def delete_table(conn):\n try:\n cur = conn.cursor()\n cur.execute(\"DROP TABLE users;\")\n except Error as e:\n print(e)", "def drop_tables():\n drop_table(ShoppingList)\n drop_table(User)\n drop_table(Category)", "def drop(self):\n self.__init__()\n cursor = self.connection.cursor()\n cursor.execute(drop_tables)\n queries = cursor.fetchall()\n for i in queries:\n cursor.execute(i[0])\n\n self.commit()\n self.__init__()", "def drop_tables(self):\n con = self.connect()\n cursor = con.cursor()\n cursor.execute(\"\"\"DROP TABLE IF EXISTS users CASCADE\"\"\")\n cursor.close()\n con.commit()\n con.close()", "def del_table():\n global data_base, table, output_on_display\n try:\n sqlite3_simple_delete_table(data_base, table)\n list_tables = update_list_tables(data_base)\n list_tables.clear()\n output_on_display.delete(1.0, END)\n output_on_display.insert(END, '')\n return\n except sqlite3.OperationalError:\n mistake_del_table()", "def drop_tables(self, table):\n drop_table = \"DROP TABLE IF EXISTS {} CASCADE;\".format(table)\n self.cursor.execute(drop_table)", "def drop_table(self, tablename):\n # print \"table dropped\"\n # return\n query = 'drop table ' + tablename\n try:\n self.__cur.execute(query)\n self.__conn.commit()\n except Exception as e:\n self.__conn.rollback()\n raise e", "def drop_created_table(opts, stats):\n print(\"--------------------------------------\")\n print(\"Dropping created table %s\" % (opts.table_name, ))\n print(\"--------------------------------------\")\n print(timestamp())\n sql = \"DROP TABLE %s\" % (opts.table_name, )\n cmd = 'echo \"%s\" | impala-shell -i %s -f -' % (sql, opts.impalad_address)\n run_command(opts, cmd)", "def remove_table(self, table):\n self.execute(\"DROP TABLE %s\" % table)", "def drop_tables(self):\n with sql.connect('./{}.db'.format(self.name)) as conn:\n conn.execute(\"DROP TABLE tweets\")\n conn.execute(\"DROP TABLE tweet_peaks\")", "def delete_table(cls, *args, **kwargs):\n operation = cls._meta.database.delete_table(cls._meta.table_name)\n yield cls._meta.database.runOperation(operation)", "def drop_table(self, table_name: str) -> None:\n sql = 'DROP TABLE IF EXISTS ' + table_name\n self.cursor.execute(sql)\n self.connection.commit()", "def sqlite3_simple_clear_table(data_base, table):\n con = sqlite3.connect(data_base)\n cur = con.cursor()\n query = 'DELETE FROM ' + table\n cur.execute(query)\n con.commit()\n cur.close()\n con.close()", "def drop_table(database, table):\n sql = \"\"\"DROP TABLE %s\"\"\" % table\n print \"Dropping Table %s from the Database %s\" % (table, database)\n execute_and_commit_sql(database, sql)\n return None", "def drop_table():\n\n try:\n sql = \"DROP TABLE IF EXISTS movies\"\n conn = psycopg2.connect(dsn=DB_DSN)\n cur = conn.cursor()\n cur.execute(sql)\n conn.commit()\n except psycopg2.Error as e:\n print e.message\n else:\n cur.close()\n conn.close()", "def test_table_drop(app, runner):\n result = runner.invoke(drop_tables, input=\"y\")\n\n with app.app_context():\n assert not db.engine.has_table('link')\n assert not db.engine.has_table('user')", "def tear_down():\n db.flush()\n for table in metadata.tables.values():\n db.execute(table.delete())", "def _delete (self):\n self._exec ('delete from table_name where id=%(id)s')", "def dropTable(self, table):\n\n self.__open()\n query = \"DROP TABLE {}\".format(table)\n self.__cursor.execute(query)\n self.__close()", "def delete_table(table, db_file):\n \n try:\n conn, c = connect_to_db(db_file)\n c.execute(\"DROP TABLE IF EXISTS \" + safe(table) + \";\")\n conn.close()\n except Exception as e:\n print(\"Error when trying to delete table \" + table + \" in database file \" + db_file)\n print(e)\n return False\n else:\n return True", "def empty_table(table: str):\n\n db, c = start_db()\n query = f'DELETE FROM {table}'\n\n c.execute(query)\n db.commit()\n db.close()", "def drop(self):\n cursor = self.connect.create_cursor()\n queries = (\n (\"USE dbPurBeurre\"),\n (\"SET foreign_key_checks = 0\"),\n (\"DROP TABLE IF EXISTS Asso_Prod_Cat\"),\n (\"DROP TABLE IF EXISTS Categories\"),\n (\"DROP TABLE IF EXISTS Produits\")\n )\n\n for query in queries:\n cursor.execute(query)", "def drop_table(conn, drop_table_sql):\n try:\n c = conn.cursor()\n c.execute(drop_table_sql)\n except Error as e:\n print(e)", "def drop_db(self) -> None:\n try:\n if not self._check_delete_okay():\n return\n except DatabaseWriteException as e:\n raise e\n\n existing_tables = self.list_tables()\n for table_name in existing_tables:\n self.dynamodb.Table(table_name).delete()", "def test_drop_table(pawprint_default_tracker_db_with_table):\n\n tracker = pawprint_default_tracker_db_with_table\n\n # make sure table exists\n with pytest.raises(ProgrammingError):\n tracker.create_table()\n\n tracker.drop_table()\n\n with pytest.raises(ProgrammingError):\n tracker.drop_table()", "def drop_tables():\n commands = (\n \"\"\"\n DROP TABLE utilizador_partilha CASCADE\n \"\"\",\n \"\"\" \n DROP TABLE album CASCADE\n \"\"\",\n \"\"\"\n DROP TABLE compositores CASCADE\n \"\"\",\n \"\"\"\n DROP TABLE grupo CASCADE\n \"\"\",\n \"\"\"\n DROP TABLE letras_musica CASCADE \n \"\"\",\n \"\"\"\n DROP TABLE playlist CASCADE \n \"\"\",\n \"\"\"\n DROP TABLE editora CASCADE \n \"\"\",\n \"\"\"\n DROP TABLE criticas CASCADE \n \"\"\",\n \"\"\"\n DROP TABLE genero_musical CASCADE \n \"\"\",\n \"\"\"\n DROP TABLE utilizador_partilha_criticas CASCADE \n \"\"\",\n \"\"\"\n DROP TABLE utilizador_partilha_playlist CASCADE \n \"\"\",\n \"\"\"\n DROP TABLE album_genero_musical CASCADE\n \"\"\",\n \"\"\"\n DROP TABLE letras_musica_playlist CASCADE \n \"\"\")\n\n try:\n\n conn = psycopg2.connect(host=\"localhost\",database=\"SoundBox\", user=\"postgres\", password=\"postgres\")\n cur = conn.cursor()\n # DROP table one by one\n for command in commands:\n cur.execute(command)\n # close communication with the PostgreSQL database server\n cur.close()\n # commit the changes\n conn.commit()\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n finally:\n if conn is not None:\n conn.close()", "def drop():\n if prompt_bool(\"Are you sure you want to lose all your data\"):\n db.drop_all()\n db.engine.execute(\"drop table if exists alembic_version\")", "def drop_table(self, table_name):\n drop_command = \"DROP TABLE {}\".format(table_name)\n try:\n self.cursor.execute(drop_command)\n status = 'Table {} dropped'.format(table_name)\n except p.Error as exception:\n status = 'Exception occured in drop_table()'\n print(exception.pgerror)", "def dropTable(self,table):\n query = \"DROP TABLE \"+table\n\tcur = self.db.cursor()\n\ttry:\n\t iQuery = self.updateLog(query)\n\t cur.execute(iQuery)\n# self.cursor.execute(iQuery)\n\texcept:\n\t self.log.write(\"No %s table found\\n\"%table)\n\tcur.close()", "def drop_table(self):\n sql = 'DROP TABLE {}'.format(self.TABLE_NAME)\n yield self._pool.execute(sql)", "def delete_table(self, table_name, timeout):\n _abstract()", "def delete_table(self, table_name, timeout):\n _abstract()", "def deletePlayers():\n executeNonQuery(\"TRUNCATE TABLE players CASCADE;\")", "def dropTables(t=None):\n tablelist = tables.keys if t == None else [t]\n conn = getConnection()\n try:\n cur = conn.cursor()\n for table in tables.keys():\n query = \"DROP TABLE IF EXISTS %s;\" % table\n cur.execute(query)\n conn.commit()\n except Exception as ex:\n print(\"Failed to drop tables:\" )\n print(ex)\n sys.exit(1)", "def drop_database_tables(cls):\n cursor = Database.connect_to_db()\n # drop users table\n sql_command = \"\"\" DROP TABLE IF EXISTS users CASCADE;\n \"\"\"\n cursor.execute(sql_command)\n # drop parties table\n sql_command = \"\"\" DROP TABLE IF EXISTS parties CASCADE;\n \"\"\"\n cursor.execute(sql_command)\n # drop offices table\n sql_command = \"\"\" DROP TABLE IF EXISTS offices CASCADE;\n \"\"\"\n cursor.execute(sql_command)\n # drop candidates table\n sql_command = \"\"\" DROP TABLE IF EXISTS candidates CASCADE;\n \"\"\"\n cursor.execute(sql_command)\n # drop votes table\n sql_command = \"\"\" DROP TABLE IF EXISTS votes CASCADE;\n \"\"\"\n cursor.execute(sql_command)\n # drop applications table\n sql_command = \"\"\" DROP TABLE IF EXISTS applications CASCADE;\n \"\"\"\n cursor.execute(sql_command)", "def delete_table(self, table_name, validate=True, verbose=True):\n \n assert(self.connected)\n try: \n assert(self.check_table(table_name, verbose=False)) \n except AssertionError: \n raise TableNotFoundError\n \n \n DELETE_TABLE_COMMAND = \"DROP TABLE {0}\".format( table_name)\n \n if validate: \n if not self.validate_action(): return False\n \n if verbose: \n print(\"Deleting the table '{0}' from the database '{1}'...\".format(table_name, self.config['database']))\n print(\"\\t\" + DELETE_TABLE_COMMAND)\n \n self.cursor.execute(DELETE_TABLE_COMMAND)\n \n return True", "def drop_table(self, table_name: str):\n del self.tables[table_name]", "def delete_table_for_ETF(conn):\n cur = conn.cursor()\n statement = f\"\"\"\n DELETE FROM ETFs\n \"\"\"\n cur.execute(statement)\n conn.commit()", "def drop_table_if_exists():\n drop_users_table = \"\"\"\n DROP TABLE IF EXISTS users CASCADE\"\"\"\n drop_parties_table = \"\"\"\n DROP TABLE IF EXISTS parties CASCADE\"\"\"\n drop_offices_table = \"\"\"\n DROP TABLE IF EXISTS offices CASCADE\"\"\"\n drop_candidates_table = \"\"\"\n DROP TABLE IF EXISTS candidates CASCADE\"\"\"\n\n drop_voters_table = \"\"\"\n DROP TABLE IF EXISTS votes CASCADE\"\"\"\n return [drop_users_table, drop_parties_table, drop_offices_table,\n drop_candidates_table, drop_voters_table]", "def drop_tables() -> None:\n print(\"Dropping database tables using SQLAlchemy ORM\")\n Base.metadata.drop_all(engine)\n print(\"Done dropping tables\")", "def drop_table(self, schema, table):\n sql = f'set role {self.write_role}; ' \\\n + f'DROP TABLE IF EXISTS {schema}.{table};'\n return sql", "def clean_table(self, conn) -> None:\n cur = conn.cursor()\n cur.execute(f'DELETE FROM {self.table}')\n conn.commit()", "def drop_table(self, name):\n if not self._open:\n raise ValueError(\"Operation on closed store\")\n\n table_grp = self.grp[name]\n if isinstance(table_grp, self.GROUP_TYPE) and all(\n isinstance(k, self.ARRAY_TYPE) for k in table_grp.values()\n ):\n table_name = table_grp.name\n if table_name == \"/\":\n for colname in self.grp.keys():\n self.delcol(table_grp, colname)\n else:\n del self.grp[name]", "def clear_table(self, database, table):\n engine = sqlalchemy.create_engine(self.db_uri)\n sql = f\"truncate table {database}.{table};\"\n engine.execute(sql)", "def clean_db():\n db = get_db()\n tables = db.tables\n for table in tables:\n db[table].drop()", "def delete_tables(db, table_names):\n with tables(db.engine, *table_names) as tpl:\n for tbl in tpl[1:]:\n tbl.delete().execute()", "def delete_all_tables(self):\n\t\tif self.__dbfile is not None:\n\t\t\tfor table_name in list(LocalData.table_info.keys()):\n\t\t\t\tif self.table_exists(table_name):\n\t\t\t\t\tself._conn.execute(\"DROP TABLE %s\" % table_name)\n\t\t\tself._conn.commit()", "def deleteTable(self, name: str, line, column):\n database = SymbolTable().useDatabase\n if not database:\n desc = f\": Database not selected\"\n ErrorController().add(4, 'Execution', desc,\n line, column)\n return\n dbStatement = data_mode.mode(\n database.mode).dropTable(database.name.lower(), name.lower())\n\n if dbStatement == 0:\n table = self.searchTable(database, name)\n database.tables.remove(table)\n self.writeFile()\n DataWindow().consoleText('Query returned successfully: Table deleted')\n\n elif dbStatement == 1:\n desc = f\": Can't drop table {name}\"\n ErrorController().add(34, 'Execution', desc, line, column)\n\n elif dbStatement == 2:\n desc = f\": Database {database.name} does not exist\"\n ErrorController().add(35, 'Execution', desc, line, column)\n\n elif dbStatement == 3:\n desc = f\": Table {name} does not exist\"\n ErrorController().add(27, 'Execution', desc, line, column)", "def clear(self):\n self.pdq.cursor().execute('drop table pdq')\n self._create()", "def dropTable(self, in_table_name):\n self.cursor.execute('DROP TABLE {};'.format(in_table_name))\n self.connection.commit()", "def clear_db():\n for name in TABLES:\n result = execute_query('truncate table {};'.format(name)), ())", "def dropTable(self, schema, table, cascade=False):\r\n if cascade:\r\n return self.runSql('DROP TABLE IF EXISTS {} CASCADE'.format(self.encodeTableName(schema, table)))\r\n else:\r\n return self.runSql('DROP TABLE IF EXISTS {}'.format(self.encodeTableName(schema, table)))", "def test_drop(self):\n my_conn = MySQL(*self.conn_params)\n sql = \"CREATE TABLE table1 (id integer, column1 varchar(100), \" \\\n \"column2 double)\"\n my_conn.execute(sql)\n my_conn.get_table('table1')\n my_conn.drop('table1') # DROP example\n with self.assertRaises(InvalidRequestError):\n my_conn.get_table('table1')", "def refresh_tables(db):\r\n try:\r\n c = db.cursor()\r\n c.execute(\"DROP TABLE waiting\")\r\n c.execute(\"DROP TABLE help\")\r\n c.execute(\"DROP TABLE helped\")\r\n create_tables()\r\n except Error as e:\r\n print(e)", "def delete_event_table():\r\n sql = 'DROP TABLE events;'\r\n \r\n conn = sqlite3.connect(\"events.db\")\r\n c = conn.cursor()\r\n c.execute(\r\n sql\r\n )\r\n \r\n c.close()\r\n \r\n log = open('log.txt', 'a')\r\n log.write(time.strftime(\"%c\") + \" - \" + 'Deleted events table' + \"\\n\")\r\n log.close()", "def delete_table_data():\n try:\n print 'delete existing data'\n sql = 'delete from document'\n sql1 = 'delete from clean_keywords'\n sql2 = 'delete from keywords'\n util.executeSQL(conn, sql) # delete the existing data.\n util.executeSQL(conn, sql1)\n util.executeSQL(conn, sql2)\n except Exception as e:\n print e", "def deletePlayers():\n #deletes the contents of table players\n DB().execute(\"DELETE FROM players\", True)", "def test_table_drop_exist(self):\n table = \"test_drop\"\n\n if not self.dbh.table_can_query(table):\n self.dbh.table_create(table, 'c1 integer')\n\n self.dbh.table_drop(table)\n\n self.assertFalse(self.dbh.table_can_query(table))", "def drop_tables(session):\n\n for query in drop_table_queries:\n session.execute(query)", "def drop_tables (cur, conn):\n for query in drop_table_queries:\n cur.execute(query)\n conn.commit()", "def test_drop_table(fs: FakeFilesystem) -> None:\n fs.create_file(\"test.csv\", contents=CONTENTS)\n\n connection = connect(\":memory:\", [\"csvfile\"])\n cursor = connection.cursor()\n\n sql = 'DROP TABLE \"test.csv\"'\n cursor.execute(sql)\n assert not Path(\"test.csv\").exists()", "def delete():\n\n from slicr.extensions import db\n\n click.echo('deleting database...')\n\n db.drop_all()", "def drop_tables(session):\n for query in drop_table_queries:\n session.execute(query)", "def drop_table(self, name):\n if not self._open:\n raise ValueError(\"Operation on closed file\")\n\n grp = self.grp[name]\n if isinstance(grp, self.GROUP_TYPE) and all(\n isinstance(k, self.ARRAY_TYPE) for k in grp.values()\n ):\n table_name = grp.name\n if table_name == \"/\":\n for colname in self.grp.keys():\n self.delcol(grp, colname)\n else:\n parent = grp.parent\n del parent[name]", "def clear(cls)->None:\n database.cursor.execute(\"DELETE FROM {}\".format(cls.table_name))\n database.connection.commit()", "def drop_tables(cur, conn):\n for query in drop_table_queries:\n cur.execute(query)\n conn.commit()", "def drop_tables(cur, conn):\n for query in drop_table_queries:\n cur.execute(query)\n conn.commit()", "def drop_tables(cur, conn):\n for query in drop_table_queries:\n cur.execute(query)\n conn.commit()", "def drop_tables(cur, conn):\n for query in drop_table_queries:\n cur.execute(query)\n conn.commit()", "def drop_tables(cur, conn):\n for query in drop_table_queries:\n cur.execute(query)\n conn.commit()", "def drop_tables(cur, conn):\n for query in drop_table_queries:\n cur.execute(query)\n conn.commit()", "def drop_tables(cur, conn):\n for query in drop_table_queries:\n cur.execute(query)\n conn.commit()", "def drop_tables(cur, conn):\n for query in drop_table_queries:\n cur.execute(query)\n conn.commit()", "def drop_tables(db_config):\n tables = [\"users\", \"incidents\", \"images\", \"videos\",\n \"images\", \"location\" \"login\"]\n try:\n conn = connect(db_config)\n cursor = conn.cursor()\n for table in tables:\n query = \"DROP TABLE IF EXISTS {} CASCADE;\".format(table)\n cursor.execute(query)\n conn.commit()\n # print('Table {} deleted'.format(tables), '\\n')\n except(Exception, psycopg2.DatabaseError) as error:\n print(\"Warning: Table Deletion Error\", error)", "def drop_table(cursor, t_name): \n try:\n print(f\"Dropping table '{t_name}'...\")\n cursor.execute(f'DROP TABLE {t_name}')\n except mysql.connector.Error as err:\n if err.errno == 1051:\n print(f\"Table '{t_name}' DNE, moving on...\")\n pass\n else:\n print(str(err.errno) + \": \" + err.msg + \".\")\n exit(1)\n else:\n print(\"OK\")\n return None", "def droptables(db, cursor):\n cursor.execute('''DROP TABLE IF EXISTS worlds;''')\n cursor.execute('''DROP TABLE IF EXISTS characters''')\n cursor.execute('''DROP TABLE IF EXISTS chardata''')\n db.commit()", "def drop_tables(cur, conn) -> None:\n for query in drop_table_queries:\n cur.execute(query)\n conn.commit()", "def drop_tables(cur, conn): \n for query in drop_table_queries:\n cur.execute(query)\n conn.commit()", "def clean_table(self, a_schema, a_table):\n \n self._conn.execute(\"delete from %s.%s;\" %(a_schema, a_table))", "def drop(self, cascade=False):\n if self.db.table_exists(self.name):\n self.drop_foreign_keys()\n self.execute(self.commands.drop_table(self.name, cascade))\n self.commit()", "def clean_db():\n yield\n logging.info(\"Delete table\")\n db.delete_table(\"TestRules\")", "def drop_tables(cur, conn):\n \n for query in drop_table_queries:\n cur.execute(query)\n conn.commit()", "def _drop_db(keep_tables=None):\n server.db.session.remove()\n if keep_tables is None:\n keep_tables = []\n meta = server.db.metadata\n for table in reversed(meta.sorted_tables):\n if table.name in keep_tables:\n continue\n server.db.session.execute(table.delete())\n server.db.session.commit()", "def emptyTable(self, in_table_name): \n self.cursor.execute('DELETE FROM {};'.format(in_table_name))\n self.connection.commit()", "def clear_tables(cursor):\n cursor.execute(\"delete from Review_Votes\")\n cursor.execute(\"delete from Review\")", "def delete(self)->None:\n database.cursor.execute(\n \"DELETE FROM {} WHERE id={}\".format(self.table_name, self.id))\n database.connection.commit()", "def destroy_table(self):\n if self.table is None or self.table_name is None:\n raise ValueError('inner table or table name are none')\n while self.table.table_status == 'CREATING' or self.table.table_status == 'UPDATING':\n time.sleep(0.01)\n self.table = self.dynamodb.Table(self.table_name)\n if self.table.table_status == 'ACTIVE':\n response = self.table.delete()\n if self._is_error_call(response):\n raise RuntimeError('DynamoDB coul not delete the table: %s' % response)\n self.table, self.table_name = None, None\n elif self.table.table_status == 'DELETED':\n pass\n else:\n raise ValueError('Unknown table state')", "def db_delete(table: str, where: str):\n\n db, c = start_db()\n query = f'DELETE FROM {table} WHERE {where}'\n\n c.execute(query)\n db.commit()\n db.close()" ]
[ "0.81365436", "0.80147094", "0.8011938", "0.785006", "0.7730077", "0.7719094", "0.77065164", "0.76392746", "0.7639245", "0.7558649", "0.75291914", "0.7528163", "0.7518751", "0.75040466", "0.7492301", "0.7477169", "0.7466884", "0.7411313", "0.7407684", "0.73777705", "0.7354117", "0.73331034", "0.7329823", "0.73204106", "0.7317779", "0.7310747", "0.725772", "0.7221141", "0.7211841", "0.7174868", "0.7172589", "0.7143175", "0.7124729", "0.71190315", "0.7098738", "0.70977765", "0.70812446", "0.705508", "0.7052633", "0.7052468", "0.70337844", "0.7027801", "0.70185745", "0.70185745", "0.7011062", "0.6991816", "0.6967445", "0.6962571", "0.6961601", "0.69524026", "0.69518596", "0.6938104", "0.6934266", "0.6927161", "0.6926993", "0.6926147", "0.69081587", "0.69037443", "0.6900885", "0.68987834", "0.68962634", "0.689322", "0.68885326", "0.68705624", "0.6863598", "0.6856213", "0.6852744", "0.68345225", "0.6816257", "0.6807498", "0.68038034", "0.67982185", "0.67912555", "0.6785678", "0.6777627", "0.67732215", "0.6763044", "0.67565113", "0.67565113", "0.67565113", "0.67565113", "0.67565113", "0.67565113", "0.67565113", "0.67565113", "0.6751519", "0.6746479", "0.6741373", "0.6738457", "0.6723568", "0.67213106", "0.6708633", "0.670834", "0.6705042", "0.6696601", "0.6694901", "0.6675746", "0.6662978", "0.6654082", "0.6643452" ]
0.7943311
3
Insert single row into a table
def _insert_table_row(self, db: str, table: str, row: Dict[str, Any]): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def insert_row(self, table: str, row_data: dict):\r\n\r\n columns = \"\".join([f\"'{i}',\" for i in row_data]).rstrip(\",\")\r\n keys = \"\".join([f\"'{row_data[i]}',\" for i in row_data]).rstrip(\",\")\r\n sql_statement = f\"INSERT INTO {table} ({columns}) VALUES({keys});\"\r\n try:\r\n self.__cursor(sql_statement)\r\n self.__db_conn.commit()\r\n except sqlite3.Error as error:\r\n print(\"[!] Couldn't add record\")\r\n print(\"[!]\", str(error).capitalize())\r\n return\r\n print(\"[*] Record added successfully.\")", "def insert_row(self, tablename, fields):\n insert_params = \"(\" + \",\".join(['?' for x in fields]) + \")\"\n self.cursor.execute(\"insert into \" + tablename + \" values \" +\n insert_params, fields)", "def insert(db, table, name, row):\n\n # Build insert prepared statement\n columns = [name for name, _ in table.items()]\n insert = INSERT_ROW.format(table=name, columns=\", \".join(columns), values=(\"?, \" * len(columns))[:-2])\n\n try:\n db.execute(insert, values(table, row, columns))\n except Exception as ex:\n print(\"Error inserting row: {}\".format(row), ex)", "def insert(self, row):\n if not self.loaded:\n print(\"Database is not loaded\")\n return False\n\n self.rows.append(row)\n return True", "def add_row(self, row_id):", "def insertRow(self, row, data):\n newRowData = self.createRowData(data)\n self.jobRow.insertRow(row, newRowData)", "def singleInsert(self, table_name, fields, field_values, field_types=[]):\n if not self.checkTable(table_name):\n self.createTable(table_name, fields, field_types)\n self.transactionInsert(table_name, fields, field_values)\n self.transactionEnd()", "def insertRow(self, p_int, *__args): # real signature unknown; restored from __doc__ with multiple overloads\r\n pass", "def insert(self, table, *args, **kwargs):\n\n values = None\n query = \"INSERT INTO %s \" % table\n if kwargs:\n keys = kwargs.keys()\n values = kwargs.values()\n query += \"(\" + \",\".join([\"`%s`\"]*len(keys)) % tuple(keys) + \\\n \") VALUES(\" + \",\".join([\"%s\"]*len(values)) + \")\"\n elif args:\n values = args\n query += \" VALUES(\" + \",\".join([\"%s\"]*len(values)) + \")\"\n\n self.__open()\n self.__cursor.execute(query, values)\n self.__connection.commit()\n self.__close()\n return self.__cursor.lastrowid", "def insert(self, table_name, rows, bulk=True):\n table = self._create_table(table_name)\n return self._perform_query(table.insert(), rows, bulk)", "def insert_data(self, row, table_fields_names, table_fields_types):\n\n\t\tquery = ''\n\n\t\ttry:\t\t\t\t\n\t\t\tquery = self.form_insert_query(TABLE_NAME, row, table_fields_names, table_fields_types)\n\t\t\t# print query\n\t\t\tself.execute_query(query)\t\t\t\n\t\texcept Exception, e:\t\t\t\t\n\t\t\tprint '[e] Exeption: %s' % (str(e))\n\t\t\tprint '\\t[q] Query that caused exception \\n %s' % (query)\n\t\t\treturn False\n\n\t\treturn True", "def insert_row(self, identifier, position, datastore):\n # Get dataset. Raise exception if dataset is unknown.\n dataset = datastore.get_dataset(identifier)\n if dataset is None:\n raise ValueError(\"unknown dataset '{}'\".format(identifier))\n # Insert new row into dataset.\n df = vizual.insert_row(df=dataset.to_dataframe(), pos=position)\n # Store updated dataset to get new identifier.\n ds = datastore.update_dataset(\n origin=dataset,\n df=df,\n annotations=dataset.annotations\n )\n return VizualApiResult(ds)", "def insert_row(self, row_dict):\n sql = self.commands.insert_row(\n self.name,\n self._join_cols(row_dict.keys()),\n self._join_values(row_dict.values())\n )\n return self.execute(sql)[0][0]", "def insert(self):\n sql = u'INSERT INTO %s' % self.table()\n keys = []\n values = []\n format_values = []\n for field in self.fields():\n attr = object.__getattribute__(self, field)\n if attr.auto_value:\n continue\n keys.append(field)\n format_values.append(attr.format)\n values.append(attr._value)\n keys_str = u'( %s )' % u', '.join(keys)\n values_str = u'VALUES( %s )' % u', '.join(format_values)\n sql = '%s %s %s;' % (sql, keys_str, values_str)\n connection.execute(sql, values)\n primary_k = self.__class__.get_primary()\n primary = object.__getattribute__(self, primary_k)\n primary.value = connection.connection.insert_id()", "def insert_row(self, table_model, row, count):\n self.undostack.push(InsertRowCommand(table_model, row, count))", "def insert(self, sql):\n try:\n # Execute the SQL command\n self.cursor.execute(sql)\n # Commit your changes in the database\n self.db.commit()\n except:\n # Rollback in case there is any error\n self.db.rollback()", "def insert(self):\n self.getDbRecord().insert()\n\n return", "def insert_row(table_str, attribute_value_dict): #works\n sql = make_insert_row(table_str, attribute_value_dict)\n #print sql\n execute_edit_queries(sql)", "def insert(self, row: BaseTrackerRow) -> None:\n assert isinstance(row, self._tracker_row)\n row = [row.__dict__[\"_\" + col] for col in list(self._table.columns)]\n self._table.loc[len(self._table)] = row", "def insert(self, data, table, **kwargs):\n logging.info(f'Inserting into `{table}`')\n\n try:\n data.to_sql(table, self.engine, **kwargs)\n try:\n self.execute(f'ALTER TABLE `{table}` ADD PRIMARY KEY (`id`);')\n except:\n pass\n return True\n except:\n logging.exception('Something went wrong inserting. Check trace.')\n return False", "def insert(self,table,values):\n self.connect.execute(self.insert_disc[table],values)\n self.connect.commit()", "def single_insert(conn, insert_req):\n cursor = conn.cursor()\n try:\n cursor.execute(insert_req)\n conn.commit()\n except (Exception, psycopg2.DatabaseError) as error:\n print(\"Error: %s\" % error)\n conn.rollback()\n cursor.close()\n return 1\n cursor.close()", "def insert(self, table, element):\n\n update = self.update(table, element)\n if update:\n return update\n\n fields = []\n values = []\n for key in element.keys():\n fields.append(key)\n values.append(element[key])\n result = self.__insert(table, fields, values)\n return result", "def insert(self, table, **kwargs):\n if 'creator' in self.get_columns(table):\n kwargs.setdefault('creator', self.user_id)\n if 'uuid' in self.get_columns(table):\n generated_uuid = (table == 'concept' and\n (str(kwargs['concept_id']) + 'A'*36)[:36] or uuid.uuid4())\n kwargs.setdefault('uuid', generated_uuid)\n\n columns = kwargs.keys()\n values = kwargs.values()\n placeholders = ['%s'] * len(values)\n if 'date_created' in self.get_columns(table):\n columns += ['date_created']\n placeholders += ['now()']\n\n self.db.execute(\n 'insert into %s (%s) values (%s)' %\n (table, ', '.join(columns), ', '.join(placeholders)), *values)\n if table + '_id' in self.get_columns(table):\n return self.db.get(table + '_id', uuid=kwargs['uuid'])", "def Insert(self):\n sql = 'INSERT INTO %s ( %s ) VALUES ( %s )' % (\n self.table_name,\n ', '.join(self.values),\n ', '.join(['?' for _ in self.values])\n )\n return Database().Execute(sql, tuple(self.values.values()))", "def insert_one(self, data):\n _client = self.client\n _db = _client[self.database]\n _col = _db[self.collection]\n\n x = _col.insert_one(data)\n\n return x", "def insertRow(self, index, *row):\n if ((len(row) == 1) and (type(row[0]) in MATRIX_VALID_COLLECTIONS)):\n row = row[0]\n if self._width:\n if not (len(row) == self._width):\n raise ValueError('Improper length for new row: %d, should be %d' % (len(row), self._width))\n else:\n self._width = len(row)\n self._height += 1\n # make a deep copy\n newrow = list()\n for item in row:\n if not (type(item) in MATRIX_VALID_TYPES):\n message = \"Values must be of type \"\n for t in range(len(MATRIX_VALID_TYPENAMES)):\n if t:\n message += ' or '\n message += \"'%s'\" % MATRIX_VALID_TYPENAMES[t]\n raise TypeError(message)\n newrow.append(item)\n self._value.insert(index, newrow)", "def add_row(self, row):\n ...", "async def insert_one(self, model):\n\n pass", "def rpc_database_insert_row(self, keys, values):\n\t\tif not isinstance(keys, (list, tuple)):\n\t\t\tkeys = (keys,)\n\t\tif not isinstance(values, (list, tuple)):\n\t\t\tvalues = (values,)\n\t\tassert len(keys) == len(values)\n\t\ttable_name = self.path.split('/')[-2]\n\t\tfor key, value in zip(keys, values):\n\t\t\tassert key in DATABASE_TABLES[table_name]\n\t\ttable = DATABASE_TABLE_OBJECTS.get(table_name)\n\t\tassert table\n\t\tsession = db_manager.Session()\n\t\trow = table()\n\t\tfor key, value in zip(keys, values):\n\t\t\tsetattr(row, key, value)\n\t\tsession.add(row)\n\t\tsession.close()\n\t\treturn", "def insert(q, *params):\n db = Database()\n db.cur.execute(q, *params)\n ret_id = db.cur.lastrowid\n db.con.close()\n return ret_id", "def insert_into(self, **kwargs):\n\n if not self.worksheet:\n raise AttributeError('Create a worksheet first!')\n\n ## TODO: Figure out how to get fields out of an empty table!\n #for key in kwargs.keys():\n # if not key in self.fields:\n # raise KeyError('Table does not accept key \"%s\"' % key)\n\n row = Row(self)\n\n row.create(**kwargs)\n\n return row", "def insert_row(conn, episode_info):\n\tp_key = get_p_key(episode_info)\n\t\n\tinsert_statement = f'INSERT INTO shows (p_key, show_stub, show_name, season, episode_number, episode_title watched_status, hidden_status) VALUES (\\\"{p_key}\\\", \\\"{episode_info[\"show_stub\"]}\\\", \\\"{episode_info[\"show_name\"]}\\\", {episode_info[\"season\"]}, {episode_info[\"episode_number\"]}, {episode_info[\"episode_title\"]}, {episode_info[\"watched_status\"]}, {episode_info[\"hidden_status\"]});'\n\t\n\texecute_sql(conn, insert_statement)", "def insert(self):\n db.session.add(self)\n db.session.commit()", "def insert(self):\n db.session.add(self)\n db.session.commit()", "def insert(self):\n db.session.add(self)\n db.session.commit()", "def insert(conn, table_info, table_data):\n\n sql = ''' INSERT INTO ''' + table_info \n + ''' VALUES(''' + \"?,\" * (len(table_data)-1) + \"?)\"\n cursor = conn.cursor()\n cursor.execute(sql, table_data)\n conn.commit()", "def insertQuery(self, master, row_num):\n pass", "def insert(table_name, **kwargs):\n if 'id' not in kwargs:\n kwargs['id'] = str(uuid.uuid4())\n\n kwargs['epoch'] = time()\n\n with get_connection() as conn:\n rethink.table(table_name).insert(kwargs).run(conn)\n return kwargs", "def insert_record(self):\r\n try:\r\n db.session.add(self)\r\n db.session.commit()\r\n return {\"error\": False, \"id\": self.id}\r\n except exc.SQLAlchemyError as e: # pragma: no cover\r\n # print(e)\r\n # print(sys.exc_info())\r\n db.session.rollback()\r\n return {\"error\": True}\r\n finally:\r\n db.session.close()", "def add_row(self, row):\n \n new_row = pd.DataFrame(data=[row], columns = self.table.columns) \n self.table = self.table.append(new_row, ignore_index=True)", "def insert_db(table, schema, value):\n cursor.execute(schema, value)\n db.commit()\n print(cursor.rowcount, \"record inserted into db: \" + table)", "def _insert_single(self, disc, class_num):\n self.cursor.execute(self.INSERT, (disc, class_num))\n self.conn.commit()", "def insert(self, data):\r\n pass", "def insert(self, tablename, seqname=None, _test=False, **values):\n def q(x): return \"(\" + x + \")\"\n \n if values:\n _keys = SQLQuery.join(values.keys(), ', ')\n _values = SQLQuery.join([sqlparam(v) for v in values.values()], ', ')\n sql_query = \"INSERT INTO %s \" % tablename + q(_keys) + ' VALUES ' + q(_values)\n else:\n sql_query = SQLQuery(self._get_insert_default_values_query(tablename))\n\n if _test: return sql_query\n \n db_cursor = self._db_cursor()\n if seqname is not False: \n sql_query = self._process_insert_query(sql_query, tablename, seqname)\n\n if isinstance(sql_query, tuple):\n # for some databases, a separate query has to be made to find \n # the id of the inserted row.\n q1, q2 = sql_query\n self._db_execute(db_cursor, q1)\n self._db_execute(db_cursor, q2)\n else:\n self._db_execute(db_cursor, sql_query)\n\n try: \n out = db_cursor.fetchone()[0]\n except Exception: \n out = None\n \n if not self.ctx.transactions: \n self.ctx.commit()\n return out", "def insert(self, tname, valdict, cols = None):\n icmd, vals = make_insert_command(tname, valdict, cols)\n self.write_curs.execute(icmd, vals)", "def insert(self, table_name, fields):\n LOGGER.debug(\"%r: Inserting %r\" % (table_name, fields))\n return self.db.table(table_name).insert(fields)", "def insert(self, query: str, *args, **kwargs):\n cursor = self._cursor()\n try:\n self._execute(cursor, query, args, kwargs)\n return cursor.lastrowid\n finally:\n cursor.close()", "def insert_row(self, row_value, index):\n row = pd.DataFrame(row_value, columns=['lat', 'long', 'alt', 'descr'])\n self.df = pd.concat([self.df.iloc[:index], row, self.df.iloc[index:]]).reset_index(drop=True)", "def insert(self, data):\n\n if not data:\n raise ValueError('invalid data')\n\n # TODO: validate and insert data into model", "def execute_insert(self,insert):\n try:\n self.cursor.execute(insert)\n self.connection.commit()\n except Exception as error:\n self.connection.rollback()\n raise error", "def add_entry(db, table, columns, values):\n mycursor = db.cursor()\n\n sql = \"INSERT INTO \" + table + \" (\" + parse_sql_param_from_array(columns) + \") VALUES (\" + parse_sql_param_from_array(values, escape=True) + \")\"\n mycursor.execute(sql)\n\n db.commit()", "def test_insert(self):\n query = \"insert into cds values(%s,%s,%s,%s)\"\n values = (109876,\"cinderella\",\"big 5\",5)\n self.a.insert(query,values)\n query1 = \"select * from cds where id=109876\"", "def insert(self, fields, values):\n sql = self.generate_insert_sql(fields, values)\n self.sqlhistory.append(sql)\n return self.sql_insert(sql)", "def insert_record(record, table):\n\n\ttups = [(key, val) for key, val in record.iteritems()]\n\tkeys = [key for key, val in tups]\n\tvals = [val for key, val in tups]\n\n\tconn = get_database_connection(port = 2001)\n\tcursor = conn.cursor()\n\n\tnum_cols = len(keys)\n\tkey_str = ','.join(keys)\n\tval_str = ','.join(['%s'] * num_cols)\n\n\tqry = \"REPLACE INTO %s (%s) VALUES (%s)\" % (table, key_str, val_str)\n\tcursor.execute(qry, vals)\n\n\tconn.commit()\n\tcursor.close()\n\tconn.close()", "def _query_insert(self, sql, data=None):\n\n conn = psycopg2.connect(self.connect_args)\n cur = conn.cursor()\n cur.execute(sql, data)\n conn.commit()\n cur.close()\n conn.close()", "def insert_values():\n pass", "def insert(self):\n pass", "def addEntryToTable(self):\n self.table_view.table_model.insertRows(0, 1)", "def insert(self, value):\n # Build insert query\n into_sql = ''\n col_sql = ''\n val_sql = []\n for key, val in value.items():\n into_sql += ', {}'.format(key)\n col_sql += ', ?'\n val_sql.append(val)\n # Execute query\n self.execute(\"insert into {} ({}) values ({})\".format(self.name, into_sql[2:], col_sql[2:]), val_sql)", "def add_record(self, table_name, **kwargs):\n\n if not self.open:\n print(\"Not currently connected to a DB.\")\n return False\n\n\n fields = \", \".join([str(f) for f in kwargs.keys()])\n values = \", \".join([str(v) for v in kwargs.values()])\n q = \"INSERT INTO {tn}({columns}) VALUES ({values})\"\n self.query = q.format(tn=table_name,\n columns=fields,\n values=values)\n\n # try:\n self.cursor.execute(self.query)\n print(\"{}\\n inserted into {} table.\".format(values, table_name))\n return True\n # except Exception as error:\n # print(\"Failed to add {} to {} table.\".format(values, table_name))\n # print(\"SQL Query: \\n{}\\n\".format(self.query))\n # print(\"Exception: \\n{}\".format(error))\n\n # return False", "def _insert_if_new(cursor, table, data, **kwargs):\n pk_only_data = _subdict(METADATA_PRIMARY_KEYS[table], data, enforce_key_presence=True)\n if not _exists(cursor, table, pk_only_data):\n log('inserting new {}...'.format(table), end='')\n result = _insert_dict(cursor, table, data, **kwargs)\n log('done.')\n return result", "def _store_entry_in_table(conn, table_name, entry):\n # Create entry insertion template.\n template = ('?, ' * len(entry)).rstrip(', ') # \"?\" for each value\n template = '(%s)' % template # enclose in parentheses\n # Try to insert a new row into the table.\n with conn:\n cur = conn.cursor()\n cur.execute('INSERT INTO %s VALUES%s' % (table_name, template), entry)", "def do_insert_data(self, *args):\n print(\"Provide data to insert\")\n self.connection_obj.insert_into_table(**self.__class__.populate_data())\n print(\"Data Insertion Successful\")", "def insert_to_db(self, query):\n try:\n q = self.connection.execute(query)\n except Exception:\n self.print_std_error()", "def insert_to_table(self, tableName, dataRow, colNames):\n queryString = f\"INSERT INTO {tableName} VALUES \"\n # building the Values list\n valueString = \"\"\n # looping \n for value in dataRow:", "def insert(self, table, value):\n col_name = self.table_cols[table][1:]\n sql = \"INSERT INTO %s(%s) VALUES (%s)\" % (table, str(','.join(col_name)), array_join(value, ','))\n Log.debug('DB -> %s' % sql)\n self.execute(sql)", "def InsertChartRow(self, row):\n query = ('INSERT INTO %s VALUES '\n ' (\\'%s\\', \\'%s\\', \\'%s\\', \\'%s\\', %s, %s, %s)' %\n (self.BOX_OFFICE_TABLE, row['week'], row['id'], row['title'],\n row['studio'], row['gross'], row['theaters'], row['budget']))\n self.cursor.execute(query)", "def insert_row(self, row):\n utils.insert_layout_row(self.formLayout,row)", "def insert_or_update(self, table, connection, row):\n\n # find line, if it exist\n dbrow = self.find(connection, table, row)\n\n # TODO XXX use actual database function instead of this stupid thing\n now = datetime.datetime.now()\n\n column_names = table.columns.keys()\n\n # UpdatedAt field configured ? Let's set the value in source hash\n if self.updated_at_field in column_names:\n row[self.updated_at_field] = now # XXX not pure ...\n\n # Update logic\n if dbrow:\n if not UPDATE in self.allowed_operations:\n raise ProhibitedOperationError('UPDATE operations are not allowed by this transformation.')\n\n query = table.update().values(\n **{col: row.get(col)\n for col in self.get_columns_for(column_names, row, dbrow)}\n ).where(and_(*(getattr(table.c, col) == row.get(col) for col in self.discriminant)))\n\n # INSERT\n else:\n if not INSERT in self.allowed_operations:\n raise ProhibitedOperationError('INSERT operations are not allowed by this transformation.')\n\n if self.created_at_field in column_names:\n row[self.created_at_field] = now # XXX UNPURE\n else:\n if self.created_at_field in row:\n del row[self.created_at_field] # UNPURE\n\n query = table.insert().values(**{col: row.get(col) for col in self.get_columns_for(column_names, row)})\n\n # Execute\n try:\n connection.execute(query)\n except Exception:\n connection.rollback()\n raise\n\n # Increment stats TODO\n # if dbrow:\n # self._output._special_stats[UPDATE] += 1\n # else:\n # self._output._special_stats[INSERT] += 1\n\n # If user required us to fetch some columns, let's query again to get their actual values.\n if self.fetch_columns and len(self.fetch_columns):\n if not dbrow:\n dbrow = self.find(row)\n if not dbrow:\n raise ValueError('Could not find matching row after load.')\n\n for alias, column in self.fetch_columns.items():\n row[alias] = dbrow[column]\n\n return row", "def bq_insert_rows(bq_client, table, rows):\n _batch_insert(bq_client, table, rows)", "def add_row(self, table, row_dict):\n self.open_db()\n for k, v in six.iteritems(row_dict):\n table.row[k] = v\n table.row.append()\n table.flush()", "def add_row(df, row):\n df.loc[df.shape[0]] = row", "def insert_row(self, event_id, description, date, link_url):\n with self.conn:\n self.c.execute(\n \"\"\"INSERT INTO {table}({event}, {desc}, {date}, {link})\n VALUES (?, ?, ?, ?)\"\"\".format(\n table=TABLE,\n event=EVENT,\n desc=DESC,\n date=DATE,\n link=LINK,\n ),\n (event_id, description, date, link_url),\n )", "def InsertRow(rowIndex,name=\"\",label=\"\",Matrix=None):\n if(Matrix == None):\n from globals import Matrix\n rowToInsertBefore = Matrix[rowIndex].Member\n\n newRow = Matrix.InsertBlankRowAfter(rowToInsertBefore,name,label)\n Matrix.SwitchRows(rowToInsertBefore.DataIndex,newRow.DataIndex)\n return Matrix[rowIndex]", "def _addObject(self, row):\n if row.has_key('id'):\n txn_id = row.get('id')\n\n # increment transaction id\n prefix = self.context.getLedger().getTransactionPrefix()\n txn_number = int(txn_id.replace(prefix, ''))\n if self.context.getLedger().getTransactionID() < txn_number:\n self.context.getLedger().setTransactionID(txn_number)\n\n else:\n txn_id = self.context.generateUniqueId(type_name='Transaction')\n\n # create transaction if it doesn't exist\n if not self.context.hasObject(txn_id):\n self.context.invokeFactory(type_name='Transaction', id=txn_id,\n title=row['title'], effectiveDate=row['effectiveDate'])\n\n txn = self.context[txn_id]\n\n # lookup account\n pc = getToolByName(self.context, 'portal_catalog')\n brains = pc(id=row['Account'])\n __traceback_info__ = str(row)\n assert len(brains) == 1\n row['Account'] = brains[0].getObject()\n\n # create transaction entry\n entry_id = txn.generateUniqueId('TransactionEntry')\n entry = TransactionEntry(entry_id)\n txn._setObject(entry_id, entry)\n entry = txn._getOb(entry_id)\n\n row['id'] = entry_id\n entry.edit(**row)", "def insert_into_table(self, conn, insert_into_table_sql):\n try:\n c = conn.cursor()\n c.execute(insert_into_table_sql)\n conn.commit()\n\n except Error as e:\n print(e)", "async def _insert_stmt(self):\n raise NotImplementedError", "def addRow(self, row_i, data_tuple, start_col, sheet):\r\n col_n = len(data_tuple)\r\n last_col = start_col + col_n - 1\r\n insert_range = self.getRangeByCells((row_i, start_col), (row_i, last_col), sheet)\r\n insert_range.Value = data_tuple", "def test_table_insert_rows(data):\n dataset_id = 'eu_cbec_bi_data'\n table_id = 'marketplaces'\n dataset = bigquery.Dataset(bigquery_client.dataset(dataset_id))\n \n table = bigquery.Table(dataset.table(table_id), schema=SCHEMA)\n\n # [START table_insert_rows]\n rows_to_insert = [data]\n\n errors = bigquery_client.insert_rows(table, rows_to_insert) # API request\n\n assert errors == []\n\n # [END table_insert_rows]", "def insert(self):\n ret = True\n\n schema = self.schema\n fields = self.depopulate(False)\n\n q = self.query\n q.set_fields(fields)\n pk = q.insert()\n if pk:\n fields = q.fields\n fields[schema.pk.name] = pk\n self._populate(fields)\n\n else:\n ret = False\n\n return ret", "def sql_insert(self, sqlstr):\n get_connection().insert_raw(sqlstr)\n return 1", "def insert_to_db(self) -> None:\n query = '''INSERT INTO ESLReceipts(Transaction_Number, Date, Description, Memo,\n Amount_Debit, Amount_Credit, Balance, Check_Number, \n Fees, Card_Type, Is_Payment, Is_Transaction, User_id)\n VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?);'''\n self.db.commit(query, values=self.to_tuple())\n\n if self.is_transaction \\\n and self.transaction is not None \\\n and not self.transaction.exists_in_db():\n self.transaction.insert_to_db()", "def add_testcase_row(conn, testcase_row_sql):\n try:\n c = conn.cursor()\n c.execute(testcase_row_sql, (os.environ['SUITE'], os.environ['LOCATION']))\n conn.commit() \n except Error as e:\n print(e)", "def insert(self, query):\n try:\n self.cursor.execute(query)\n self.connection.commit()\n\n except MySQLdb.Error as e:\n self.connection.rollback()\n try:\n print(\"MySQL Error {}: {}\".format(e.args[0], e.args[1]))\n except IndexError:\n print(\"MySQL Error: {}\".format(str(e)))", "def insert(self, table_name, data, ignore=False):\n fields = map((lambda s: \"`\" + str(s) + \"`\"), data.keys())\n values = map(self.quote, data.values())\n curs = self.q(\n \"INSERT \" + (\"IGNORE\" if ignore else \"\") + \" INTO `{0}` ({1}) VALUES({2})\".\n format(table_name, \", \".join(fields), \", \".join(values)),\n True\n )\n last_id = curs.lastrowid\n curs.close()\n return last_id", "def insertData(table, column, input):\n\ttry:\n\t\tcon = sqlite3.connect('PampDb.db')\n\t\tcur = con.cursor()\n\t\tcur.execute(\"INSERT INTO '\" + table + \"' (\" + column + \") VALUES ('\" + input + \"')\")\n\t\tcon.commit()\n\t\tcon.close()\n\texcept:\n\t\tprint('Could not run function insertData from DbController')", "def add_row(self, row):\r\n self.rows.append(row)", "def add_row(self, row_id):\n TODO('https://github.com/posterior/treecat/issues/27')", "def insert(self, *columns):\n schema_encoding = int('0' * self.table.num_columns)\n timestamp = int(time())\n self.index.add_record_to_index(columns,self.table.base_rid)\n return self.table.insert(self.table.key_column, schema_encoding, timestamp, *columns)", "def insert(self, df):\r\n with t.connect('{\"host\":\"tdprd\",\"logmech\":\"krb5\"}') as con:\r\n with con.cursor () as cur:\r\n# try:\r\n# cur.execute(q, df.values.tolist())\r\n# except Exception as ex:\r\n# if \"Duplicate row error\" in str(ex):\r\n# cur.execute(\"delete {0}\".format(self.tmp_table_name))\r\n# cur.execute(q, df.values.tolist())\r\n cur.execute(self.q, df.values.tolist()) \r\n cur.execute(\"insert into {0} sel * from {1}\".format(self.main_table_name, self.tmp_table_name))\r\n cur.execute(\"delete {0}\".format(self.tmp_table_name))\r\n self.num += len(df)\r\n print(\"{0} lines were added out of {1}\".format(str(self.num), str(len(self.df))))", "def insert(self, table, columns, values, execute=True):\n # TODO: Cant accept lists?\n # Concatenate statement\n cols, vals = get_col_val_str(columns)\n statement = \"INSERT INTO {0} ({1}) VALUES ({2})\".format(wrap(table), cols, vals)\n\n # Execute statement\n if execute:\n self._cursor.execute(statement, values)\n self._commit()\n self._printer('\\tMySQL row successfully inserted into {0}'.format(table))\n\n # Only return statement\n else:\n return statement", "def insert(self, row_values):\n if len(row_values) != len(self.columns):\n raise TypeError(\"wrong number of elements\")\n\n self.rows += [dict(zip(self.columns, row_values))]", "def add_entry_to_db(entry):\n db.session.add(entry)\n db.session.commit()", "def insert_unique(db, klass, **kwargs):\n try:\n value = db.query(klass).filter_by(**kwargs).one()\n except NoResultFound:\n value = klass(**kwargs)\n db.add(value)\n db.commit()\n return value", "def insert(self, i, row, default=None):\n try: row = [v for v in row] # Creates a copy of the row (fast + safe for generators and Columns).\n except:\n raise TypeError, \"Table.insert(x): x must be list\"\n list.insert(self, i, row)\n m = max((len(self) > 1 and self._m or 0, len(row)))\n if len(row) < m:\n row.extend([default] * (m-len(row)))\n if self._m < m:\n # The given row might have more columns than the rows in the table.\n # Performance takes a hit when these rows have to be expanded:\n for row in self:\n if len(row) < m:\n row.extend([default] * (m-len(row)))\n self._m = m", "def insert_execute(self, insert_data):\n self.execute(query=self.db_insert_schema.format(self.table_name), data=insert_data)", "def test_dummydb_add_data_to_table(self):\n db = DummyDB()\n columns = {\n \"one\": int,\n \"two\": str,\n \"three\": bool,\n }\n db.create_table(\"new_table\", columns)\n db.insert(\"new_table\", one=1, two=\"haunted\", three=True)\n result = db.select(\"new_table\", one=1)\n self.assertEqual(result[0]['two'], \"haunted\")", "def insert_data(db, metadata, data):\n with Tx(db) as c:\n lock_tables(c)\n metadata['set_id'] = _insert_metadata(c, metadata)\n\n data_iterator = iter(data)\n first_row = next(data_iterator)\n headers = list(first_row.keys())\n for table in _tables_from_headers(headers):\n _insert_data_rows(c, table, metadata, chain([first_row], data_iterator))", "def insert_record(self, record, session):\n try:\n session.add(record)\n session.commit()\n session.close()\n return True\n except:\n\n logging.exception(\"http record cannot be added to db \" \":Time: \" + str(datetime.datetime.now()))\n return False" ]
[ "0.7340923", "0.7256007", "0.7239077", "0.7177126", "0.71218693", "0.70833904", "0.7065664", "0.69191945", "0.68331724", "0.682139", "0.68028295", "0.67609316", "0.6746725", "0.6736111", "0.673245", "0.6703915", "0.6692947", "0.6680959", "0.66731584", "0.6654706", "0.6627586", "0.66269606", "0.6552719", "0.65406734", "0.64872986", "0.6486009", "0.6460466", "0.6456129", "0.64366454", "0.64338666", "0.64232826", "0.6418839", "0.6416571", "0.64134413", "0.64134413", "0.64134413", "0.641072", "0.6410717", "0.6398064", "0.639337", "0.63742447", "0.63610065", "0.6357344", "0.6351835", "0.63506156", "0.6346227", "0.63331765", "0.6323962", "0.63018996", "0.6277464", "0.62660486", "0.6255073", "0.6245613", "0.62374514", "0.62285584", "0.62246037", "0.6217716", "0.621566", "0.6202652", "0.6196879", "0.6193152", "0.6190131", "0.6180505", "0.6167517", "0.61570674", "0.6150573", "0.61493224", "0.61463416", "0.612807", "0.6120235", "0.6106472", "0.6106207", "0.6098923", "0.6088312", "0.6082332", "0.6080453", "0.6060226", "0.6058727", "0.6035854", "0.60240304", "0.60194844", "0.60034025", "0.5993613", "0.59930277", "0.5992738", "0.5992558", "0.5991993", "0.59886324", "0.59871745", "0.5986507", "0.59805804", "0.5975229", "0.5974846", "0.59724796", "0.5972323", "0.5971286", "0.59683156", "0.5966563", "0.5962687", "0.595786" ]
0.7353583
0
select data from a table (select from db.table where column_filers ...)
def _select_data( self, db: str, table: str, column_filters: Dict[str, str] ) -> List[List]: pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_data(db, columns, table, condition=\"\"):\n cur = db.cursor()\n cur.execute(SELECT.format(columns, table) + \" \" + condition)\n return cur.fetchall()", "def select_db(table, cond):\n query = \"SELECT * FROM \" + table + \" WHERE \" + cond\n cursor.execute(query)\n records = cursor.fetchall()\n return records", "def select_from_DB (itemToSearch, tableWhereToSearch):\n session = open_session()\n s = select([itemToSearch.tableWhereToSearch])\n result = session.execute(s)\n for row in result:\n print(row)", "def select_all(table):\n # Establish connection\n conn = psycopg2.connect(dbname='db', user='grok')\n # Establish cursor\n cursor = conn.cursor()\n try:\n # Execute query\n cursor.execute('SELECT * from '+table+';')\n records = cursor.fetchall()\n except:\n return []\n return records", "def select(self, table, field='', condition=''):\n sql = \"SELECT * FROM %s\" % table\n if field and condition:\n sql += \" WHERE %s='%s'\" % (field, condition)\n Log.debug('DB -> %s' % sql)\n return self.execute(sql)", "def sql_query(self, table, record_name, columns):\n cursorObj = self.db.cursor()\n cursorObj.execute('SELECT {0} FROM {1} WHERE name=\"{2}\"'.format(columns, table, record_name))\n records = cursorObj.fetchall()\n return records", "def select_all(cur, table) -> list:\n cur.execute(f'''SELECT * FROM {table}''')\n return cur.fetchall()", "def select(self, table, where=None, *args, **kwargs):\n result = None\n query = 'SELECT '\n keys = args\n values = tuple(kwargs.values())\n length = len(keys) - 1\n\n for i, key in enumerate(keys):\n query += \"`\" + key + \"`\"\n if i < length:\n query += \",\"\n\n query += ' FROM {}'.format(table)\n\n if where:\n query += \" WHERE {}\".format(where)\n\n print(query)\n\n self.__open()\n self.__session.execute(query, values)\n number_rows = self.__session.rowcount\n number_columns = len(self.__session.description)\n\n if number_rows >= 1 and number_columns > 1:\n result = [item for item in self.__session.fetchall()]\n else:\n result = [item[0] for item in self.__session.fetchall()]\n\n self.__close()\n\n return result", "def db_select(table: str, columns: list, where: str) -> list:\n\n db, c = start_db()\n\n cols = ', '.join(columns)\n if where:\n query = f'SELECT {cols} FROM {table} WHERE {where}'\n else:\n query = f'SELECT {cols} FROM {table}'\n\n content = list(c.execute(query))\n db.close()\n\n if len(columns) == 1 and columns[0] != '*':\n content = [el[0] for el in content if el[0]]\n\n return content", "def select(self, table, where=None, *args):\n\n result = None\n query = \"SELECT \"\n keys = args\n l = len(keys) - 1\n for i, key in enumerate(keys):\n query += \"`\"+key+\"`\"\n if i < l:\n query += \",\"\n query += \" FROM %s\" % table\n if where:\n query += \" WHERE %s\" % where\n\n self.__open()\n self.__cursor.execute(query)\n result = self.__cursor.fetchall()\n self.__close()\n return result", "def selectAll_db(table, name=\"*\"):\n query = \"SELECT \" + name + \" FROM \" + table\n cursor.execute(query)\n records = cursor.fetchall()\n return records", "def get_data(cur, table, col):\n cur.execute('SELECT {} FROM {}'.format(col, table))\n d = cur.fetchall()\n\n data = []\n for dat in d:\n data.append(dat[0])\n\n return data", "def select(self, table, columns=['*'], condition='', orderby='', limit=0, isFetchAll=True):\n return True", "def sql_select(sql):\n cur = c.cursor()\n cur.execute(sql)\n results = cur.fetchall()\n return results", "def select_columns(db, table, columns, condition=None):\n mycursor = db.cursor()\n if isinstance(columns, str):\n sql = \"SELECT \" + columns + \" FROM \" + table\n else: # columns is a list of columns\n sql = \"SELECT \" + parse_sql_param_from_array(columns) + \" FROM \" + table\n if condition:\n sql += \" WHERE \" + condition\n mycursor.execute(sql)\n result = mycursor.fetchall()\n return result", "def select (a_data,a_column) :\n return a_data[a_column]", "def get_data(self, table_name, condition=None):\n\t\tif (self.__dbfile is not None) and self.table_exists(table_name):\n\t\t\tquery = \"SELECT * FROM %s\" % table_name\n\t\t\tif condition is not None:\n\t\t\t\tquery = query + \" WHERE %s\" % condition\n\t\t\tquery = query + \";\"\n\t\t\tdf = pd.read_sql_query(query, self._conn)\n\n\t\t\t# Strange columns appear. Get only the actual columns\n\t\t\treturn df[[col for col in LocalData.table_info[table_name] if col in df]]\n\t\treturn pd.DataFrame(columns=LocalData.table_info[table_name])", "def select_all_data(conn, select_sql):\n cur = conn.cursor()\n cur.execute(select_sql)\n\n rows = cur.fetchall()\n\n for row in rows:\n print(row)", "def get_table_col(colnames=\"pmid\", \n condition=\"\",\n table=\"test_articles_1\", \n database=\"tumba\", \n user=\"xui\", \n password=\"mmceez\", \n host=\"localhost\", \n port=\"5433\"):\n engine = create_engine(f\"postgresql://{user}:{password}@{host}:{port}/{database}\", echo = False) \n Query = f\"select {colnames} from {table} {condition};\"\n result = pd.read_sql(sqlalchemy.text(Query), engine)\n \n return(result)", "def select(self, table_name, columns=None):\n table = self._create_table(table_name)\n if columns:\n expressions = [table.c[name] for name in columns]\n query = select(expressions)\n else:\n query = select(table.c)\n return self.cursor.execute(query).fetchall()", "def select(self, table, date=None, columns=\"*\", where=None):\n if date is not None:\n where_clause = \" WHERE date = '%s' \" % date\n else:\n where_clause = \"\"\n \n if where is not None:\n if where_clause == \"\":\n where_clause += \" WHERE 1 = 1 \"\n where_clause += \" AND %s \" % where \n select_sql = \"SELECT %s FROM %s %s \" % (columns, table, where_clause)\n return self._execute(select_sql)", "def select_all_from_db(table_name):\n table = sqlalchemy.Table(table_name, metadata, autoload=True, autoload_with=engine)\n query = sqlalchemy.select([table])\n result_proxy = connection.execute(query)\n result_set = result_proxy.fetchall()\n\n return result_set", "def select(self, table, field, condition, *parameters, **kwparameters):\n table = self.prefix + table\n query = \"SELECT \" + field + \" FROM \" + table + \" \" + condition\n\n return self.query(query, *parameters, **kwparameters)", "def get(self, field, table=None, **constraints):\n keys = constraints.keys()\n table = (table or\n (len(keys) == 1 and keys[0].endswith('_id') and keys[0][:-3]) or\n (field.endswith('_id') and field[:-3]))\n condition = ' and '.join(key + ' = %s' for key in keys)\n for row in self.iter(\n 'select %s from %s where %s' % (field, table, condition),\n *(constraints[key] for key in keys)):\n return row[0]", "def select_query(self):\n query = db.select([self.tables])\n print(query)\n ResultProxy = self.connection.execute(query)\n ResultSet = ResultProxy.fetchall()\n return ResultSet", "def fetch_query(table, id_col, thing_id):\r\n single = False\r\n\r\n if not isinstance(thing_id, iters):\r\n single = True\r\n thing_id = (thing_id,)\r\n\r\n s = sa.select([table], sa.or_(*[id_col == tid\r\n for tid in thing_id]))\r\n r = s.execute().fetchall()\r\n return (r, single)", "def select_two_columns_from_db(table_name):\n table = sqlalchemy.Table(table_name, metadata, autoload=True, autoload_with=engine)\n query = sqlalchemy.select([table.columns.tweet_id, table.columns.created_at])\n result_proxy = connection.execute(query)\n result_set = result_proxy.fetchall()\n\n return result_set", "def _select_table(self):\n\n return self.postgres.execute(f\"SELECT * FROM {self.table_name};\")", "def test_select(self):\n my_conn = MySQL(*self.conn_params)\n table_name = \"inf_schema\"\n inf_schema = my_conn.get_table(table_name)\n # SELECT * FROM inf_schema\n # WHERE table_name like 'INNO%' AND avg_row_length > 100\n results = my_conn.engine.execute(select('*')\n .where(inf_schema.c.table_name\n .like('INNO%'))\n .where(inf_schema.c.avg_row_length >\n 100)\n .select_from(inf_schema)).fetchall()\n table_df = pd.DataFrame(results)\n self.assertGreaterEqual(len(table_df), 6)", "def selectAll(conn, params):\n cur = conn.cursor()\n cur.execute(f\"SELECT {params} FROM criptomonedas\")\n\n # rows = cur.fetchall()\n rows = [r[0] for r in cur]\n # for row in rows:\n # print(row[0])\n return rows", "def _select(\n self, table=None, fields=(), where=None, order=None, limit=None\n ):\n\n sql = 'SELECT %s FROM `%s`' % (','.join(fields), table)\n\n if where and len(where) > 0:\n sql += ' WHERE %s' % where[0]\n\n if order:\n sql += ' ORDER BY %s' % order[0]\n\n if len(order) > 1:\n sql += ' %s' % order[1]\n\n if limit:\n sql += ' LIMIT %s' % limit[0]\n\n if len(limit) > 1:\n sql += ', %s' % limit[1]\n\n return self.query(sql, where[1] if where and len(where) > 1 else None)", "def select_recs(self,**kwargs):\n if self.sql:\n # self.sql is assumed to be a fully formed sql statement\n self.recs = self.table.query(self.sql)\n else:\n filters = self.get_list_filters()\n self.recs = self.table.select(where=filters.where,order_by=filters.order_by,**kwargs)", "def read_all_rows(condition, database, table):\n connection = sqlite3.connect(database)\n connection.row_factory = sqlite3.Row\n cursor = connection.cursor()\n cursor.execute('SELECT * FROM ' + table + ' WHERE ' + condition)\n rows = cursor.fetchall()\n cursor.close()\n connection.close()\n return rows", "def get_data_in_region(table_name, cur, path, fetch=False):\r\n if len(path)==1:\r\n cur.execute(\"SELECT * FROM \" + table_name + \" WHERE \" + path[0] + \";\")\r\n else:\r\n cur.execute(\"SELECT * FROM \" + table_name + \" WHERE \" + \" AND \".join(path) + \";\")\r\n\r\n if fetch is True:\r\n return cur.fetchall()\r\n else:\r\n return", "def fetch_data_from_db(query):\n cursor.execute(query)\n result = cursor.fetchall()\n return result", "def fetch_table(self,table_name, field_names = None,where_ind_name = None, where_ind_value = None, where_clause = None, primary_key = 'Id', return_array = False):\n\t\tif field_names is None:\n\t\t\tfield_names = ['*']\n\t\tfield_names = field_names.copy()\n\t\tif (primary_key not in field_names) and (field_names != ['*']):\n\t\t\tfield_names.append(primary_key)\n\t\tif self.connected:\n\t\t\tfetch_data = pd.read_sql_query(sql=self.get_sql_str_select(table_name,field_names,where_ind_name,where_ind_value,where_clause),con=self.__engine,index_col = primary_key)\n\t\t\tif not return_array:\n\t\t\t\treturn fetch_data\n\t\t\telse:\n\t\t\t\treturn np.asarray(fetch_data)\n\t\telse:\n\t\t\tprint ('db not connected yet. Do connect first')", "def select(conn, sql):\n cur = conn.cursor()\n cur.execute(sql)\n\n rows = cur.fetchall()\n\n for row in rows:\n print(row)", "def select_from_table(self, table_name):\n sql_str = \"SELECT * FROM {tb}\".format(tb=table_name)\n cur = self.conn.cursor()\n cur.execute(sql_str)\n names = [description[0] for description in cur.description]\n\n rows = cur.fetchall()\n\n df = pd.DataFrame(rows, columns =names) \n\n return df", "def sql_query_fetch_df(self,sql,primary_key = None):\n\t\tif not self.connected:\n\t\t\tprint ('db not connected yet. Do connect first')\n\t\t\treturn\n\t\tresults = pd.read_sql_query(sql,self.__engine,index_col = primary_key)\n\t\treturn results", "def filter():\n return get_filter_data(db, MyTable)", "def select(table,query_field='',query_value='',fields=''):\n query=None\n if query_field:\n query = db[table][query_field]==query_value\n if fields:\n fields=['%s.%s' % (table,f.strip()) for f in fields.split(',')]\n else:\n fields=None\n return crud.select(db[table],query=query,fields=fields,headers='fieldname:capitalize')", "def get_rows(column_to_search, value_to_match, table, db_file):\n \n try:\n conn, c = connect_to_db(db_file) \n c.execute('SELECT * FROM {t} WHERE {col}=\"{value}\"'.format(t=safe(table), \n col=safe(column_to_search), value=value_to_match))\n row = c.fetchall()\n conn.close()\n return row\n except Exception as e:\n print(\"Error when trying to get row in table\", table, \"in\", db_file)\n print(e)\n return None", "def get_data(query):\n db = psycopg2.connect(database=DBNAME)\n c = db.cursor()\n c.execute(query)\n data = c.fetchall()\n db.close()\n return data", "def get_data(query):\n db = psycopg2.connect(database=DBNAME)\n c = db.cursor()\n c.execute(query)\n data = c.fetchall()\n db.close()\n return data", "def get(self, table, column, limit=None):\n\n query = \"SELECT {0} from {1};\".format(column, table)\n self.cursor.execute(query)\n\n # fetch data\n rows = self.cursor.fetchall()\n\n return rows[len(rows) - limit if limit else 0:]", "def select_all_persons(conn):\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM person\")\n\n rows = cur.fetchall()\n\n return rows # return the rows", "def select(self, get_date=False, date_range=None, columns=None):\n\n\t\tdb = self.db\n\t\tif date_range:\n\t\t\tdate_snip = sql.SQL('WHERE date >= {sd} AND date <= {ed}').format(\n\t\t\t\tsd=sql.Literal(date_range[0]),\n\t\t\t\ted=sql.Literal(date_range[1])\n\t\t\t)\n\t\telse:\n\t\t\tdate_snip = sql.SQL('')\n\n\t\tif columns is None:\n\t\t\tcolumns = self.columns\n\n\t\tif get_date:\n\t\t\tcolumns = ['date'] + columns\n\n\t\tcolumns_snip = sql.SQL(',').join(sql.Identifier(c) for c in columns)\n\n\t\tquery = sql.SQL(\"\"\"\nWITH filtered_table AS (SELECT * FROM {schema}.{tbl} WHERE mask = FALSE)\nSELECT {columns} FROM filtered_table {date_cond}\n\t\t\"\"\").format(\n\t\t\tschema=sql.Identifier(db.config['schema']),\n\t\t\ttbl=sql.Identifier(self.name),\n\t\t\tcolumns=columns_snip,\n\t\t\tdate_cond=date_snip,\n\t\t)\n\t\ttry:\n\t\t\tres = db.query(query)\n\t\texcept:\n\t\t\traise InternalServerError(description=\"Failed to select data.\")\n\t\treturn res, columns", "def fetch_data(universe='all', start=None, end=None, connection=None, tablename=None, where_clause = None):\n from datetime import datetime, timedelta\n if end is None:\n end = datetime.today().strftime('%Y-%m-%d')\n if start is None:\n start = (datetime.today() - timedelta(days=30)).strftime('%Y-%m-%d')\n q = []\n select = \"SELECT * from {tablename} where \".format(tablename=tablename)\n if universe != 'all':\n q.append(\"symbol in {universe}\")\n q.append(\"timestamp >= '{start}'\")\n q.append(\"timestamp <= '{end}'\")\n if where_clause:\n [q.append(x)for x in where_clause]\n order_by = ' ORDER BY timestamp'\n query = ' AND '.join(q).format(universe=tuple(universe), \n start=start, end=end)\n query = select + query + order_by\n # This should be any column\n data = pd.read_sql_query(query, connection, parse_dates=['timestamp'])\n # Delete index column if any\n if 'index' in data.columns:\n del data['index']\n return data", "def find_some(self,table,field_list,**query_dict):\n start_sql = 'SELECT '\n sql = ''\n query_sql = ''\n for field in field_list: start_sql += field + ',' \n start_sql = start_sql[0:-1] + ' FROM %s WHERE ' % (table)\n try:\n if query_dict:\n for index in query_dict:\n if not isinstance(query_dict[index],dict): query_sql += \" %s = '%s' and\" % (index,query_dict[index]) \n else: query_sql += \" %s %s '%s' and\" % (index,query_dict[index]['rule'],query_dict[index]['value'])\n sql = (start_sql + query_sql)[0:-3] \n info_list = self.db.query(sql)\n except Exception,e: self.treat_except(e) \n return info_list", "def select_all_rows(self):\n with self.conn:\n self.c.execute(\"SELECT * FROM %s\" % (TABLE))\n return self.c.fetchall()", "def select(self, table_name: str, row_filter: dict) -> list:\n sql = 'SELECT * FROM ' + table_name + ' WHERE '\n for key, value in row_filter.items():\n if type(value) is tuple:\n sql += key + ' '\n sql += value[0] + ' '\n sql += \"'\" + value[1] + \"'\"\n elif type(value) == str:\n sql += key + ' = '\n sql += \"'\" + value + \"'\"\n elif value is None:\n sql += key + ' ISNULL '\n else:\n sql += key + ' = '\n sql += str(value)\n if not key == list(row_filter.keys())[-1]:\n sql += ' AND '\n return self.cursor.execute(sql).fetchall()", "def select(cur, variable, table):\n cur.execute(\"SELECT {v} FROM {t}\".format(v = variable, t = table))\n variable = cur.fetchall()\n variable = [i[0] for i in variable]\n return variable", "def get_data(self):\n\n return pd.read_sql_query(\"Select * from {table}\".format(table=self.table_name), con=self.con)", "def get_all_element_of_table(path, table):\n conn = sqlite3.connect(path)\n c = conn.cursor()\n to_return = []\n for row in c.execute('SELECT * FROM '+table).fetchall():\n for x in row:\n to_return.append(x)\n conn.close()\n return to_return", "def get(self, table, field, condition, *parameters, **kwparameters):\n data = self.select(table, field, condition, *parameters, **kwparameters)\n return data[0] if data else []", "def _from_sql(connection, table_or_query):\n cursor = connection.cursor()\n try:\n cursor.execute(table_or_query)\n except Exception:\n try:\n cursor.execute('SELECT * FROM {0}'.format(table_or_query))\n except Exception:\n cursor.close()\n raise\n\n try:\n # If iterable, use cursor directly.\n iter(cursor)\n results = cursor\n except TypeError:\n # If not iterable, build a generator.\n def result_generator(cursor):\n row = cursor.fetchone()\n while row != None:\n yield row\n row = cursor.fetchone()\n results = result_generator(cursor)\n\n header = tuple(x[0] for x in cursor.description)\n reader = chain([header], results)\n return (reader, cursor.close)", "def grasspi_query_db(table_name,query,value):\n\n query_entries = []\n conn = sqlite3.connect(grasspi_config.cfg.db_file)\n conn.text_factory = str\n c = conn.cursor()\n val = \"SELECT * FROM \" + table_name + ' WHERE '+ query +' = '+\"'\" + value +\"'\"\n for row in c.execute(val):\n query_entries.append(row)\n c.close()\n return query_entries", "def query_generic_table(self, table_name):\n\n query = \"select * from {}\"\n try:\n self.dbCursor.execute(query.format(table_name))\n except mysql.connector.Error as err:\n ErrorMessageWindow(err)\n finally:\n return self.dbCursor.fetchall()", "def get_users_from_table():\n # Connect to database\n conn = psycopg2.connect(DATABASE_URL, sslmode='require')\n # Open a cursor to perform db operations\n cur = conn.cursor()\n # Query the table\n cur.execute(\"\"\"\n SELECT *\n FROM test \n ;\n \"\"\"\n )\n rows = cur.fetchall()\n # Commit and close connection\n conn.commit()\n cur.close()\n conn.close()\n return rows", "def select_form_table(conn , quary):\n cur = conn.cursor()\n cur.execute(quary)\n\n rows = cur.fetchall()\n\n for row in rows:\n print(row)", "def selectAs(conn):\n start = datetime.now()\n conn = sqlite3.connect('db.sqlite3')\n cursor = conn.execute(\"SELECT * from randomData where col0 like 'a%'\")\n #for row in cursor:\n # for entry in row:\n # print(entry)\n conn.close()\n end = datetime.now()\n return end - start", "def select_entities(table):\n with db.connect() as conn:\n order_field = getattr(\n table.c,\n flask.request.args.get('order_by', list(table.c)[0].name)\n )\n order_fn = db.order_directions[flask.request.args.get('order_direction', 'asc')]\n order_by = order_fn(order_field)\n offset = int(flask.request.args.get('offset', 0))\n limit = int(flask.request.args.get('limit', 10))\n return conn.execute(\n sa.select([table])\n .limit(limit)\n .offset(offset)\n .order_by(order_by)\n ).fetchall()", "def select_all_students(conn):\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM student\")\n\n rows = cur.fetchall()\n\n return rows # return the rows", "def _queryFetchOne(cols, tables, params):\n try:\n db = sqlite3.connect(DBPATH)\n cursor = db.cursor()\n\n cursor.execute('SELECT ' + cols\n + ' FROM ' + tables\n + ' WHERE ' + params\n )\n objectData = cursor.fetchone()\n\n except Exception as e:\n db.rollback()\n raise e\n\n finally:\n db.close()\n\n return objectData", "def selectOpt(self, sql): # select\n # apply connection rescource\n dbp_opt = dbPool()\n results = dbp_opt.opSelect(sql)\n # release connection rescource\n dbp_opt.dispose()\n return results", "def select_execute(self, where_condition=None):\n if where_condition:\n data_from_db = self.execute(query=self.db_select_schema.format(self.table_name), data=where_condition)\n else:\n data_from_db = self.execute(query=self.db_select_schema.format(self.table_name))\n return data_from_db", "def select_all(db, tableName):\r\n try:\r\n c = db.cursor()\r\n c.execute(\"SELECT * FROM \" + tableName)\r\n print json.dumps(c.fetchall())\r\n except Error as e:\r\n print(e)", "def get_all(self, table, discard=None, condition=None):\n logging.info(f'Getting all data from `{table}`')\n \n if discard is not None:\n logging.info(f'Discarding columns `{discard}`')\n columns = list(self.execute_default_index(f'SHOW COLUMNS FROM `{table}`',).Field)\n columns = [col for col in columns if col not in discard]\n columns_str = json.dumps(columns).replace(\"'\",'`').replace('\"','`')[1:-1]\n return self.execute(f'SELECT {columns_str} FROM `{table}`')\n\n if isinstance(condition, dict):\n where_clause = []\n where_value_list = []\n for where_column, where_value in condition.items():\n where_clause.append(f'`{where_column}`=%s')\n where_value_list.append(where_value)\n where_clause_string = ' AND '.join(where_clause)\n return self.execute(f'SELECT * FROM `{table}` WHERE {where_clause_string}', params=where_value_list)\n\n return self.execute(f'SELECT * FROM `{table}`')", "def query_table(cursor, t_name, query, cnx):\n try:\n df=pd.read_sql(query, cnx)\n print(df)\n return df\n except mysql.connector.Error as err:\n if err.errno == 1051:\n print(f\"Cant read '{t_name}', table D.N.E, moving on...\")\n pass\n else:\n print(err.msg + \".\")\n exit(1)\n return None", "def dbselect(cxn, query, payload):\n\tcursor = cxn.cursor()\n\tif not payload:\n\t\trows = cursor.execute(query)\n\telse:\n\t\trows = cursor.execute(query,payload)\n\tresults = []\n\tfor row in rows:\n\t\tresults.append(row)\n\tcursor.close()\n\treturn results", "def acquire_only(db,query):\n url = get_connection(db)\n df = pd.read_sql(query, url)\n return df", "def select_all_lines(conn):\n\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM ayasdi_table\")\n\n rows = cur.fetchall()\n\n for row in rows:\n print row", "def select(self):\n connection = sqlite3.connect(DB_FILE)\n cursor = connection.cursor()\n cursor.execute(\"SELECT * FROM foodbank\")\n return cursor.fetchall()", "def query(sql):\n if (sql is None):\n raise Exception(\"SQL not specified\") \n try:\n database = App.instance().environment.database\n connection = psycopg2.connect(host=database.host, dbname=database.database, \n user=database.user, password=database.password)\n cursor = connection.cursor()\n cursor.execute(sql)\n fields = [ x[0] for x in cursor.description]\n return (fields, cursor.fetchall())\n except(Exception, psycopg2.DatabaseError) as error:\n print(\"Error connecting to database\", error)\n finally:\n if not connection is None:\n cursor.close()\n connection.close()", "def get_rows(self, tbl):\n statmt = \"select * from %s\" % tbl\n self.cur.execute(statmt)\n rows = list(self.cur.fetchall())\n return rows", "def get(table, field, val):\n\n # return session.query(table).filter(getattr(table, field).like(val)).all()\n return session.query(table).filter(getattr(table, field) == val).all()", "def selectData(self, sql: str) -> List:\n try:\n connection = self.connect()\n cursor = connection.cursor() \n data = cursor.execute(sql)\n result = data.fetchall() \n return result\n except Exception as e:\n logging.error(f'{self.cn} Exception: {e}', exc_info=1)\n logging.error(f'{self.cn} SQL: {sql}')", "def select_rows(self, table: str, column: str, where_like: tuple = None, or_like: tuple = None):\r\n\r\n sql_statement = f\"SELECT {column} FROM {table}\"\r\n if where_like: # if parameter passed execute this\r\n sql_statement += f\" WHERE {where_like[0]} like '%{where_like[1]}%'\"\r\n if where_like and or_like: # must have a where like to have an or like\r\n sql_statement += f\" OR {or_like[0]} like '%{or_like[1]}%'\"\r\n sql_statement += \";\" # add the statement closer thingo\r\n try:\r\n rows = self.__cursor(sql_statement, fetch=True)\r\n return rows\r\n except sqlite3.Error as error:\r\n print(\"[!]\", str(error).capitalize())", "def select_sql(command):\n logging.debug(\"Running Select sql \"+str(command))\n try:\n## host, userid, password, database instance\n con = mdb.connect(serverip, username, userpass, schema);\n cursor = con.cursor()\n \n sql = command\n cursor.execute(sql)\n return cursor.fetchall()\n \n con.close()\n\n except mdb.Error, e:\n logger.error(e)", "def get_execute_table(self, limit=None):\n query = self.select_all()\n self.cur.execute(query)\n if limit is None:\n result = self.cur.fetchall()\n else:\n result = self.cur.fetchmany(limit)\n return to_data_frame(result)", "def get_execute_table(self, limit=None):\n query = self.select_all()\n self.cur.execute(query)\n if limit is None:\n result = self.cur.fetchall()\n else:\n result = self.cur.fetchmany(limit)\n return to_data_frame(result)", "def read(name, db):\n \n # Make connection with the database\n\tconn = sqlite3.connect(db)\n\tdf = pd.read_sql_query(\"select * from \" + name + ';', conn)\n \n # Print loaded data table name and return DataFrame\n\tprint(name + ': loaded')\n\treturn df", "def select(self, column=None, x=None):\n result = []\n # 1. 判断查询的column在schema的索引下标\n # 2. 遍历数据,返回命中结果\n try:\n for key, record in self.items():\n # 判断select * 的情况\n # key 与 value 要拼接到一起,当作一行数据来看到\n record = record.decode().split(config.TABLE_DATA_SEG)\n record.insert(0, key)\n if not column:\n # 判断 column 相等的情况\n result.append(record)\n else:\n column_index = self.find_column_index(column)\n if x == record[column_index]:\n result.append(record)\n return result\n except Exception as e:\n pass\n return result", "def open_data(table):\n engine = create_engine(myDB, encoding='latin1') \n conn = engine.connect()\n select = conn.execute('select * from ' + table)\n\n df = pd.DataFrame(select.fetchall()) \n df.columns = select.keys()\n\n conn.close()\n return df", "def select_all(self, table_name: str) -> list:\n sql = 'SELECT * FROM ' + table_name\n return self.cursor.execute(sql).fetchall()", "def select_all(self, table):\n select_table = \"SELECT * FROM {} WHERE delete_status = FALSE;\".format(table)\n self.cursor.execute(select_table)\n rows = self.cursor.fetchall()\n return rows", "def _queryFetchMany(cols, tables, params):\n try:\n db = sqlite3.connect(DBPATH)\n cursor = db.cursor()\n\n cursor.execute('SELECT ' + cols\n + ' FROM ' + tables\n + ' WHERE ' + params\n )\n #print(params)\n while True:\n objectList = cursor.fetchmany()\n if objectList == []:\n break\n yield objectList[0]\n\n except Exception as e:\n db.rollback()\n raise e\n\n finally:\n db.close()", "def select_all():\n sql = 'SELECT * FROM dostawy.przesylki'\n rows = DBconnector.fetch_query(sql)\n return _wrap_in_parcel_list(rows)", "def find_one(self,table,field_list,**query_dict):\n start_sql = 'SELECT '\n sql = ''\n query_sql = ''\n for field in field_list: start_sql += field + ',' \n start_sql = start_sql[0:-1] + ' FROM %s WHERE ' % (table)\n try:\n if not query_dict: query_sql = '*'\n else:\n for index in query_dict:\n if not isinstance(query_dict[index],dict): query_sql += \" %s = '%s' and\" % (index,query_dict[index]) \n else: query_sql += \" %s %s '%s' and\" % (index,query_dict[index]['rule'],query_dict[index]['value'])\n sql = (start_sql + query_sql)[0:-3] \n info = self.db.get(sql)\n except Exception,e: self.treat_except(e) \n return info", "def select_table_query(self, repo, table):\n return self.user_con.select_table_query(\n repo_base=self.repo_base, repo=repo, table=table)", "def _get_df_from_db(self, tab_name: str, cols: list or str = \"*\",\n condition: str or None = None, limit: int or None = None):\n cols = ', '.join(cols) if cols != '*' else cols\n sql_query = \"\"\"SELECT {cols} FROM {tab} \"\"\".format(cols=cols, tab=tab_name)\n if condition:\n sql_query += \"\"\"WHERE {cond} \"\"\".format(cond=condition)\n if limit:\n sql_query += \"\"\"LIMIT {l}\"\"\".format(l=limit)\n df = pd.read_sql(sql_query, self.engine)\n return df", "def get_column(col_to_search, value_to_match, col_to_get, table, db_file):\n \n try:\n conn, c = connect_to_db(db_file) \n c.execute('SELECT {cg} FROM {t} WHERE {col}=\"{value}\"'.format(t=safe(table), \n cg=safe(col_to_get), col=safe(col_to_search), value=safe(value_to_match)))\n column = c.fetchone()\n conn.close()\n return column\n except Exception as e:\n print(\"Error when trying to fetch row in table\", table, \"in database file\", db_file)\n print(e)\n return None", "def searchAll(name, table, field, goal):\n connection, cursor = DBconnect(name)\n cursor.execute(\"SELECT * FROM \"+table+\" WHERE \"+field+\"=:Id\",{\"Id\": goal})\n result = cursor.fetchall()\n DBdisconnect(connection)\n return result", "def run_select_query(query, args = None):\n cursor = db.get_cursor()\n cursor.execute(query, args)\n return cursor.fetchall()", "def queryTable(self, in_table_name, in_field_name, in_conditions=[]):\n fields = ','.join(in_field_name if type(in_field_name)is list else [])\n query = \"SELECT {} FROM {}\".format(fields, in_table_name)\n cond_list = []\n for c,(cond_field, cond_value) in enumerate(in_conditions):\n condition_string = ' WHERE {}=?' if c == 0 else ' AND {}=?'\n query += condition_string.format(cond_field)\n cond_list.append(cond_value)\n\n result = self.cursor.execute('{};'.format(query), tuple(cond_list))\n return result.fetchall()", "def fetch_all(self, sql):\n result = []\n\n curs = self.q(sql, True)\n cols = curs.column_names\n for row in curs:\n row_result = {}\n for field in cols:\n k = cols.index(field)\n row_result[cols[k]] = row[k]\n #print cols[k], row[k]\n result.append(row_result)\n curs.close()\n return result", "def execute_and_fetch(db, query):\n rows = []\n\n db.query(query)\n res = db.use_result()\n row = res.fetch_row()\n while row != ():\n rows.append(row[0])\n row = res.fetch_row()\n return rows", "def query(statement, con=None, params=None):\n if con is None:\n con = get_connection()\n table = pd.io.sql.read_sql(statement, con, params=params)\n return table", "def get_db_data(selected_columns, app_id, y, m, d):\n # TODO edit string based on what query to run\n query_string = \"select {} from hive.dfs_prod.eo_custom_event where app_id={} and y={} and m={} and d={}\".format(\n ', '.join(selected_columns), app_id, y, m, d)\n return pd.read_sql(query_string, con=dfs)", "def sqlite_query(\n filename,\n database,\n filter=\"1\",\n select=None,\n computed_columns=None,\n column_map=None,\n sort_by=None,\n sort_desc=True,\n limit=None,\n explain=False,\n):\n if os.path.isfile(filename):\n conn = sqlite3.connect(filename)\n else:\n raise Exception(\"file not found: %r\" % filename)\n\n add_python_functions(conn)\n unify_computed_columns(computed_columns)\n\n columns = [\n row[0]\n for row in conn.execute(\"SELECT name FROM pragma_table_info(?);\", (database,))\n ]\n\n if column_map is None:\n metadata = {}\n else:\n for column_name, field_name in column_map.items():\n if field_name in columns:\n columns.remove(field_name)\n columns.append(column_name)\n metadata = {\n key: {\"field_expr\": value, \"field_name\": value}\n for key, value in column_map.items()\n }\n\n metadata.update(\n {\n row[0]: {\"field_expr\": row[0], \"field_name\": row[0]}\n for row in conn.execute(\n \"SELECT name FROM pragma_table_info(?);\", (database,)\n )\n }\n )\n\n select_expr_as = [metadata[column][\"field_name\"] for column in columns]\n databases = [database]\n\n # Side-effects: updates metadata, database, columns, select_expr_as:\n where = update_state(\n computed_columns,\n metadata,\n databases,\n columns,\n select_expr_as,\n filter,\n )\n\n select_fields = [metadata[column][\"field_name\"] for column in columns]\n if sort_by is None:\n order_by = \"\"\n else:\n sort_by_field_name = metadata[sort_by][\"field_name\"]\n sort_desc = \"DESC\" if sort_desc else \"ASC\"\n order_by = f\"ORDER BY {sort_by_field_name} {sort_desc}\"\n\n if limit is None:\n limit = \"\"\n else:\n limit = f\"LIMIT {limit}\"\n\n env = {\n \"limit\": limit,\n \"order_by\": order_by,\n \"where\": where,\n \"select_expr_as\": \", \".join(select_expr_as),\n \"select_fields\": \", \".join(select_fields),\n \"databases\": \", \".join(databases),\n }\n select_sql = (\n \"SELECT {select_expr_as} FROM {databases} WHERE {where} {order_by} {limit}\"\n )\n\n select_command = select_sql.format(**env)\n\n if explain:\n yield select_command\n return\n\n for row in conn.execute(select_command):\n data = {name: value for name, value in zip(columns, row)}\n if select is None:\n yield data\n else:\n yield {key: data[key] for key in select}" ]
[ "0.7451726", "0.7248107", "0.69168055", "0.69079506", "0.68438905", "0.6825986", "0.6800241", "0.6792302", "0.6681862", "0.6657107", "0.66489816", "0.6637284", "0.66219336", "0.66154295", "0.654893", "0.6454015", "0.641332", "0.6410601", "0.63980806", "0.63969445", "0.63832796", "0.6362977", "0.63387203", "0.6334186", "0.63319814", "0.6310538", "0.630782", "0.62963694", "0.6267048", "0.6256693", "0.6241022", "0.6225443", "0.62194943", "0.6210153", "0.62033653", "0.61921513", "0.6154428", "0.6124806", "0.6122894", "0.6119272", "0.6085897", "0.6082292", "0.60807663", "0.60807663", "0.6076173", "0.6050452", "0.60431874", "0.60408735", "0.60274", "0.60189295", "0.60170233", "0.601635", "0.60133904", "0.6012826", "0.60104144", "0.60048634", "0.6001343", "0.599165", "0.5987813", "0.5985805", "0.59770626", "0.59752405", "0.5973246", "0.59468746", "0.59447473", "0.59277725", "0.59192747", "0.591919", "0.59184986", "0.58762515", "0.58750093", "0.5864298", "0.58624196", "0.5861058", "0.5857722", "0.58553624", "0.5855197", "0.58542234", "0.58422834", "0.5834623", "0.5834623", "0.5833168", "0.5831818", "0.5816824", "0.580578", "0.58029836", "0.5789765", "0.5783947", "0.5782712", "0.57715017", "0.5770234", "0.5768431", "0.5768249", "0.5759848", "0.5757067", "0.57559973", "0.57515776", "0.5749308", "0.57248974", "0.57217604" ]
0.6876609
4
Return DB API Connection
def _db_connection(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_connection(self):\n\n\t\treturn dbapi.connect(credentials.SERVER,\\\n\t\t\t\t\t\t\t credentials.PORT,\\\n\t\t\t\t\t\t\t credentials.USER,\\\n\t\t\t\t\t\t\t credentials.PASSWORD)", "def get_dbapi20_connection ():\n return cherrypy.engine.pool.connect ()", "def get_connection():\n con = psycopg2.connect(**DB_CONFIG)\n return con", "async def _get_db_connection():\n return await gino.Gino(get_database_dsn())", "def getconn(self):\n #hdbport = int('3%s15' % Settings.hdbinstancenum)\n con = dbapi.connect(address = self.host, \\\n port = self.port, \\\n user = self.username, \\\n password = self.password, \\\n autocommit = True)\n if self.schema:\n cur = con.cursor()\n try:\n cur.execute('ALTER SESSION SET CURRENT_SCHEMA = %s' % self.schema)\n return con\n except dbapi.Error, err:\n cur.close()\n con.close()\n cur = None\n raise err\n finally:\n if cur:\n cur.close()\n else:\n return con", "def get_connection(db_url=None):\n return engine(db_url).connect()", "def __connect(self):\n session, metadata, connection = db(dbhost=getattr(self, \"host\"),\n dbuser=getattr(self, \"user\"),\n dbpass=getattr(self, \"password\"),\n dbname=getattr(self, \"dbname\"))\n return session, metadata, connection", "def get_db():\n if not hasattr(g, 'db_connection'):\n g.db_connection = connect_db()\n return g.db_connection", "def obtainDatabaseConnection(self):\n\t\tself.databaseConnector = DatabaseConnector()", "def getConnection(self):\n if (not self.initialized):\n logging.error(\"Module is not initialized\")\n \n conn_options = {\n 'user': self.user,\n 'password' : self.password,\n 'host' : self.host,\n 'port' : self.port,\n 'database' : self.dbname,\n 'raise_on_warnings': True\n }\n db = mysql.connector.connect(**conn_options)\n return db", "def get_connection(cls):\n return cls.database.connection", "def get_connection(self):\n if self.__connection is None:\n from pymongo import MongoClient\n from ir_config import IRConfig\n self.__connection = MongoClient(\n IRConfig.get_instance().get('db_host', self.__default_host), \n IRConfig.get_instance().get_int('db_port', self.__default_port))\n return self.__connection", "def create_connection():\r\n try:\r\n conn = sq.connect(DBClass.db_name)\r\n except sq.Error as e:\r\n raise e\r\n \r\n return conn", "def get_db(self):\n self.logger.info('in get_db()')\n try:\n return self.client[self.db_name]\n except Exception as e:\n self.logger.error(f'Error occurred while getting client {e}')", "def get_connection():\n\n return MongoClientManager().client.__getattr__(MONGODB_SETTINGS['db'])", "def __get_connection():\n # 根据配置文件创建连接池\n if not Mysql.__mysql_pool:\n Mysql.__mysql_pool = PooledDB(\n creator=MySQLdb,\n use_unicode=False,\n cursorclass=DictCursor,\n db=sqlconf.MysqlConfig['db'],\n host=sqlconf.MysqlConfig['host'],\n port=sqlconf.MysqlConfig['port'],\n user=sqlconf.MysqlConfig['user'],\n passwd=sqlconf.MysqlConfig['passwd'],\n charset=sqlconf.MysqlConfig['charset'],\n mincached=sqlconf.MysqlConfig['mincached'],\n maxcached=sqlconf.MysqlConfig['maxcached'],\n maxconnections=sqlconf.MysqlConfig['maxconnections'])\n # 返回连接池中连接对象\n return Mysql.__mysql_pool.connection()", "async def connection():\n return await r.connect(db='main_db')", "def getDbConnection(self):\n return self._oDb;", "def connect(self):\n\n self.logger.debug(\"creating DB connection\")\n conn = sql.connect(**self.connection_arguments)\n self.logger.debug(\"DB connection ready: %r\", conn.get_host_info())\n return conn", "def get_db():\n # when used with a `file` object, `with` ensures it gets closed\n # pylint: disable=no-member\n with file('config.json') as config_file:\n config = json.load(config_file)\n return cx_Oracle.connect(config['user'], config['pass'], config['host'])", "def get_db():\n # when used with a `file` object, `with` ensures it gets closed\n # pylint: disable=no-member\n with file('config.json') as config_file:\n config = json.load(config_file)\n return cx_Oracle.connect(config['user'], config['pass'], config['host'])", "def get_connection(self):\n import psycopg2 as dbapi\n self.get_input()\n conn = dbapi.connect(host=self.opts[\"host\"],\n port=int(self.opts[\"port\"]),\n user=self.opts[\"user\"],\n password=self.opts[\"password\"],\n database=self.opts[\"database\"])\n encoding = ENCODING.lower()\n if self.script.encoding:\n encoding = self.script.encoding.lower()\n encoding_lookup = {'iso-8859-1': 'Latin1', 'latin-1': 'Latin1', 'utf-8': 'UTF8'}\n db_encoding = encoding_lookup.get(encoding)\n conn.set_client_encoding(db_encoding)\n return conn", "def connect_to_db(self):\n\t\t# connection = psycopg2.connect(database=config.database, user=config.user,password = config.password)\n\t\tconnection = psycopg2.connect(database=config.database, user=config.user)\n\t\treturn connection", "def connect(self, dbapi_connection, connection_record):", "def _connect(self):\r\n if not self._db:\r\n import boto\r\n sdb = boto.connect_sdb()\r\n if not self.domain_name:\r\n self.domain_name = boto.config.get(\"DB\", \"sequence_db\", boto.config.get(\"DB\", \"db_name\", \"default\"))\r\n try:\r\n self._db = sdb.get_domain(self.domain_name)\r\n except SDBResponseError, e:\r\n if e.status == 400:\r\n self._db = sdb.create_domain(self.domain_name)\r\n else:\r\n raise\r\n return self._db", "def get_conn(self):\n conn = sqlite3.connect(self.uri)\n conn.row_factory = sqlite3.Row\n return conn", "def _get_db_connection(name='ace'):\n\n if name is None:\n name = 'ace'\n\n #if _cached_db_connections_enabled():\n #return _get_cached_db_connection(name)\n\n config_section = 'ace'\n if name:\n config_section = 'database_{}'.format(name)\n\n if config_section not in saq.CONFIG:\n raise ValueError(\"invalid database {}\".format(name))\n\n _section = saq.CONFIG[config_section]\n kwargs = {\n 'db': _section['database'],\n 'user': _section['username'],\n 'passwd': _section['password'],\n 'charset': 'utf8'\n }\n\n if 'hostname' in _section:\n kwargs['host'] = _section['hostname']\n\n if 'port' in _section:\n kwargs['port'] = _section.getint('port')\n \n if 'unix_socket' in _section:\n kwargs['unix_socket'] = _section['unix_socket']\n\n if 'ssl_ca' in _section or 'ssl_key' in _section or 'ssl_cert' in _section:\n kwargs['ssl'] = {}\n\n if 'ssl_ca' in _section and _section['ssl_ca']:\n path = abs_path(_section['ssl_ca'])\n if not os.path.exists(path):\n logging.error(\"ssl_ca file {} does not exist (specified in {})\".format(path, config_section))\n else:\n kwargs['ssl']['ca'] = path\n\n if 'ssl_key' in _section and _section['ssl_key']:\n path = abs_path(_section['ssl_key'])\n if not os.path.exists(path):\n logging.error(\"ssl_key file {} does not exist (specified in {})\".format(path, config_section))\n else:\n kwargs['ssl']['key'] = path\n\n if 'ssl_cert' in _section and _section['ssl_cert']:\n path = _section['ssl_cert']\n if not os.path.exists(path):\n logging.error(\"ssl_cert file {} does not exist (specified in {})\".format(path, config_section))\n else:\n kwargs['ssl']['cert'] = path\n\n logging.debug(\"opening database connection {}\".format(name))\n return pymysql.connect(**kwargs)\n #return pymysql.connect(host=_section['hostname'] if 'hostname' in _section else None,\n #port=3306 if 'port' not in _section else _section.getint('port'),\n #unix_socket=_section['unix_socket'] if 'unix_socket' in _section else None,\n #db=_section['database'],\n #user=_section['username'],\n #passwd=_section['password'],\n #charset='utf8')", "def api_db():\n return pymongo.MongoClient(SCITRAN_PERSISTENT_DB_URI).get_database()", "def get_database() -> Database:\n db_config = DatabaseConfig(DB_NAME)\n return connect_to_db(db_config)", "def getDbConnection(self, **kwargs):\r\n \r\n con = sql.connect(self._filename, **kwargs)\r\n con.row_factory = sql.Row\r\n return con", "def get_connection():\n\t# flask.g documentation: http://flask.pocoo.org/docs/0.12/api/#flask.g\n\ttry:\n\t\tconn = flask.g._database_connection\n\texcept AttributeError:\n\t\tconn = flask.g._database_connection = sqlite3.connect(config.PATH_DATABASE,\n\t\t\t\tdetect_types=sqlite3.PARSE_DECLTYPES) # allows storing datetime, etc.\n\t\tconn.row_factory = sqlite3.Row\n\treturn conn", "def conn(self):\n try:\n if self._db is None:\n self._db = sqlc.connect(user=self.login,\n password=self.passwd,\n host=self.host,\n database=self.database)\n\n except sqlc.Error as e:\n print (\"MySQL exception #{0} getting connection: {1}\".format(e.errno, e.msg))\n if e.errno == 2003:\n exit(-1)\n except Exception as e:\n print (\"Couldn't get connection property: {0}\".format(e.message))\n finally:\n return self._db", "def get_connection(self):\n\t\tfrom pymongo import MongoClient\n\n\t\tif self._connection is None:\n\t\t\tself._connection = MongoClient(host=self.url, max_pool_size=10)\n\n\t\treturn self._connection", "def connect_db():\n return hc_db.HCDB(app.config[\"DATABASE\"])", "def get_connection():\n connection = mdb.connect(host='localhost',\n user='root',\n passwd='password',\n database='pur_beurre')\n return connection", "def db_connection():\n global dbconnection\n try:\n conn = dbconnection\n except:\n dbconnection = psycopg2.connect(user = dbuser,\n password = dbpass,\n host = dbserver,\n port = \"5432\",\n database = dbname)\n conn = dbconnection\n return conn", "def connection_to_db():\n try:\n credentials = parse_url_db(URL)\n connection = psycopg2.connect(**credentials)\n\n return connection\n except psycopg2.OperationalError:\n raise DatabaseConnectionError('No connection!')", "def connect(self):\n if self.connection is not None:\n logger.info(\" connection: %s \" % (self.connection is not None))\n return self.connection\n try:\n self.connection = DataPostgres.connect(**self.options)\n except Exception as e:\n logger.critical(\"Unable to connect to DB: {0}\".format(e.message))\n raise\n\n return self.connection", "def get_connection(self):\n from pymongo.connection import Connection\n \n if self._connection is None:\n self._connection = Connection(self.host, self.port)\n return self._connection", "def __enter__(self) -> cx_Oracle.connect:\n self.db_connection = cx_Oracle.connect(\n self.config['oracle']['ora_user'],\n self.config['oracle']['ora_pass'],\n f\"{self.config['oracle']['ora_host']}/{self.config['oracle']['ora_sid']}\")\n return self.db_connection", "def get_db():\n if ( g.get( 'db' ) is None ):\n g.db = connect_db()\n\n return g.db.connect()", "def _connect(self):\n conn = pymongo.MongoClient(self._config.get('mongodb', 'host'), self._config.getint('mongodb', 'port'))\n db = conn[self._config.get('mongodb', 'db')]\n return db[self._config.get('mongodb', 'collection')]", "async def get(self):\n if self._connect_kwargs == None:\n raise IllegalAccessError(\"DB connection parameters not set yet\")\n\n if not hasattr(self._tl, \"conn\"):\n self._tl.conn = await r.connect(**self._connect_kwargs)\n\n return self._tl.conn", "def get_conn(dbfile, timeout=5000):\n \n conn = connect(dbfile, timeout=timeout) \n conn.row_factory = Row \n return conn", "def get_connection(self):\n return self.connection", "def get_connection(self):\n return self.connection", "def connect(self, host, port, db):\r\n params = self.make_connection_params(host, port, db)\r\n return self.get_connection(params)", "def connect(self):\n if self.connection is not None:\n logger.info(\" connection: %s \" % (self.connection is not None))\n if not self.connection.opened():\n logger.info(\"connection is closed\")\n return self.reconect()\n\n if self.connection.opened():\n return self.connection\n try:\n self.connection = connect(**self.options)\n except Exception as e:\n logger.critical(\"Unable to connect to DB: {0}\".format(e.message))\n raise\n\n return self.connection", "def connectToDatabase():\n\n #Connect to Database With Environment Values\n conn = pymysql.connect(\n host=os.environ.get(\"CHRONICLER_DATABASE_HOST\"),\n user=os.environ.get(\"CHRONICLER_DATABASE_USER\"),\n passwd=os.environ.get(\"CHRONICLER_DATABASE_PASSWORD\"),\n db=os.environ.get(\"CHRONICLER_DATABASE_DB\"),\n\t\t\t\tcharset=\"utf8mb4\")\n\n #Return the Connection\n return conn", "def openConnection():\n connection = nj.GraphDatabase.driver(\n uri=URI, auth=nj.basic_auth(USER, PASSWORD))\n return connection", "def OpenConnection(self):\r\n # Open connection to database. If the database is not accessible,\r\n # throw a mariadb exception.\r\n try: \r\n Connection = mariadb.connect(\r\n user = self.Name,\r\n host = self.Host,\r\n password= self.Password,\r\n port=3306)\r\n # Catch mariadb exception.\r\n except mariadb.Error as e:\r\n print('Unable open connection {}.'.format(e))\r\n\r\n return Connection", "def get_db_connection():\n db = sqlite3.connect(config.PERSISTENCE_LOCATION, check_same_thread=False)\n db.isolation_level = None\n db.row_factory = sqlite3.Row\n return db", "def get_conn(cls):\n\n if not cls.conn or not cls.conn.open:\n cls.connect()\n\n try:\n cls.conn.ping() # ping to test if the current conn is working\n except MySQLdb.OperationalError:\n cls.connect()\n\n return cls.conn", "def _getConnection():\n db = sqlite.connect(db_path)\n db.row_factory = sqlite.Row\n cursor = db.cursor()\n return cursor, db", "def sql_connection():\n return sqlite3.connect('database.db')", "def get_connection(url):\n conn = psycopg2.connect(url)\n return conn", "def _get_connection(reconnect=False):\n global _connection\n identity = get_identity()\n # Connect to the database if not already connected\n if _connection.get(identity) is None or reconnect:\n try:\n _connection[identity] = Connection(**_connection_settings)\n except Exception, e:\n raise ConnectionError(\"Cannot connect to the database:\\n%s\" % e)\n return _connection[identity]", "def _get_db(reconnect=False):\n global _db, _connection\n identity = get_identity()\n # Connect if not already connected\n if _connection.get(identity) is None or reconnect:\n _connection[identity] = _get_connection(reconnect=reconnect)\n\n if _db.get(identity) is None or reconnect:\n # _db_name will be None if the user hasn't called connect()\n if _db_name is None:\n raise ConnectionError('Not connected to the database')\n\n # Get DB from current connection and authenticate if necessary\n _db[identity] = _connection[identity][_db_name]\n if _db_username and _db_password:\n _db[identity].authenticate(_db_username, _db_password)\n\n return _db[identity]", "def db_connection(self):\n try:\n self.connection = connect(host=self.host, user=self.user, password = self.password, db = self.db, cursorclass = self.cursor)\n except MySQLError:\n print(\"DB Error\")", "def access_db(self):\n try:\n driver = GraphDatabase.driver(self.url, auth=(self.username, self.password))\n except Exception:\n raise ConnectionError\n return driver", "def get_db_connection(uri):\n client = pymongo.MongoClient(uri)\n return client.cryptongo", "def get_db():\n conn = g.get('sqlite_db', None)\n if conn is None:\n conn = g.sqlite_db = connect_db()\n return conn", "def connect_db():\n db = psycopg2.connect(\n dbname=app.config['DBNAME'],\n user=app.config['DBUSER'],\n password=app.config['DBPASSWORD'],\n host=app.config['DBHOST'],\n port=app.config['DBPORT']\n )\n return db", "def __get_database_connection(self, reuse=True):\n if not self.__database_connection or not reuse:\n if self.__database_connection:\n self.__database_connection.close()\n self.__database_connection = None\n\n self.__database_connection = http.client.HTTPConnection(self.__DATABASE_HOST,\n port=self.__DATABASE_PORT,\n timeout=self.__TIMEOUT)\n\n return self.__database_connection", "def createConnection(self):\r\n conn_string = \"host='{}' dbname='{}' user='{}' password='{}' port={}\".format(\r\n self.host, self.database, self.user, self.password, self.port)\r\n return psycopg2.connect(conn_string)", "def logic_db_connection():\n try:\n boto_session = boto3.Session(profile_name='loidsig')\n except:\n boto_session = boto3.Session()\n sm_client = boto_session.client(\n service_name='secretsmanager',\n region_name='us-east-1',\n endpoint_url='https://secretsmanager.us-east-1.amazonaws.com'\n )\n get_secret_value_response = sm_client.get_secret_value(SecretId='Loidsig_DB')\n cred_dict = ast.literal_eval(get_secret_value_response['SecretString'])\n db_user, db_pass = cred_dict['username'], cred_dict['password']\n db_host, db_port, db_name = cred_dict['host'], cred_dict['port'], cred_dict['dbname']\n\n try:\n conn = psycopg2.connect(\n host=db_host,\n port=db_port,\n user=db_user,\n password=db_pass,\n database=db_name,\n )\n except Exception as e:\n print(\"Unable to connect to postgres! Error: {}\".format(e))\n raise\n return conn", "def get_connection():\n conn = psycopg2.connect(\n host=\"ec2-174-129-229-162.compute-1.amazonaws.com\",\n database=\"d3fkgbedn66ll5\",\n user=\"vsimxlvondhgoo\",\n password=\"7402a95816c42b475ae285eb18918c56c9a012e96a85aafce983ea1618010511\",\n port=5432\n )\n return conn", "def _get_db_connection():\n conn = sqlite3.connect(str(DB_FILE_PATH))\n c = conn.cursor()\n\n return conn, c", "def db_connect():\n if 'db' not in g:\n g.db = sql.connect(current_app.config[\"DATABASE\"], detect_types=sql.PARSE_DECLTYPES)\n g.db.row_factory = sql.Row\n return g.db", "def getDB(self):\r\n return MySQLdb.connect(user=\"root\", passwd=\"asdf\", db=\"cloudchatdb\", connect_timeout=30, charset=\"utf8\")", "def get_sql_conn():\r\n\r\n # get config information\r\n config = configparser.ConfigParser()\r\n config.sections()\r\n config.read('../config.ini')\r\n dbname = config['PostgresDB']['db_name']\r\n host = config['PostgresDB']['host']\r\n port = config['PostgresDB']['port']\r\n user = config['PostgresDB']['user']\r\n pw = config['PostgresDB']['pw']\r\n\r\n # connect to the database\r\n conn = psycopg2.connect(host=host, port=port, database=dbname,\r\n user=user, password=pw)\r\n return conn", "def db_connect():\n client = pymongo.MongoClient(get_project_settings().get(\"MONGO_URI\"))\n db = client.vehicles\n\n return db", "def db_connect():\n DB_SETTINGS = app.config['DB_SETTINGS']\n engine = create_engine(URL(**DB_SETTINGS))\n connection = engine.connect()\n return connection", "def openDb() -> Union[Union[CMySQLConnection, MySQLConnection], Any]:\n connectService: Union[Union[CMySQLConnection, MySQLConnection], Any] = mysql.connector.connect(\n host=\"\",\n user=\"\",\n passwd=\"\",\n database=\"ServiceDirectoryOpenActiveAggregated\")\n return connectService", "async def database():\n db = await Database.connect_pool()\n return db", "def get_db():\n if not hasattr(g, \"sql_db\"):\n g.sql_db = connect_db()\n return g.sql_db", "def get_db():\n if not hasattr(g, 'db_conn'):\n g.db_conn = sqlite3.connect(\"pypatch.sqlite\")\n g.db_conn.row_factory = sqlite3.Row\n \n return g.db_conn", "def dictionary_conn(self):\n\n if not hasattr(self, \"_dictionary_conn\"):\n opts = dict(database=\"pdq_dictionaries\", tier=self.tier)\n self._dictionary_conn = db.connect(**opts)\n return self._dictionary_conn", "def openConn():\n\t#abstract the user,passwd,db,host fields in a config file next\n\tconnection = sql.connect(user=\"root\",passwd=\"sct\",db=\"sct\")\n\tcursor = connection.cursor()\n\treturn connection,cursor", "def get_db_connection (dbname, username,\n password=None,\n host='/var/run/postgresql'):\n\n con = psycopg2.connect(\n database=dbname, user=username, password=password,\n host='/var/run/postgresql')\n return (con)", "def create_connection(self):\n try:\n conn = sqlite3.connect(self.db_path)\n return conn\n except Error as e:\n print(e)\n raise e", "def get_connection(**kwargs):\n try:\n logging.debug(\"Connecting to mapd db...\")\n con = pymapd.connect(\n user=kwargs[\"db_user\"],\n password=kwargs[\"db_passwd\"],\n host=kwargs[\"db_server\"],\n port=kwargs[\"db_port\"],\n dbname=kwargs[\"db_name\"],\n )\n logging.info(\"Succesfully connected to mapd db\")\n return con\n except (pymapd.exceptions.OperationalError, pymapd.exceptions.Error):\n logging.exception(\"Error connecting to database.\")\n return False", "def db_connect():\n return create_engine(get_project_settings().get(\"CONNECTION_STRING\"))", "def db_connect():\n return create_engine(get_project_settings().get(\"CONNECTION_STRING\"))", "def db_connect():\n return create_engine(get_project_settings().get(\"CONNECTION_STRING\"))", "def db_connect():\n return create_engine(get_project_settings().get(\"CONNECTION_STRING\"))", "def get_connection(self):\n return self.application.get_connection()", "def initDbConnection():\n try:\n db_user = os.environ[\"BDB_DB_USER\"]\n db_pass = os.environ[\"BDB_DB_PASS\"]\n db_name = os.environ[\"BDB_DB_NAME\"]\n db_host = os.environ[\"BDB_DB_HOST\"]\n db_port = os.environ[\"BDB_DB_PORT\"]\n except Exception as e:\n logger.critical(\"could not parse environment for DB connection parameters\")\n return ERR\n db_config = {\n \"pool_size\": 5,\n \"max_overflow\": 2,\n \"pool_timeout\": 30,\n \"pool_recycle\": 1800\n }\n logger.info(\"attempting MySQL connection to %s:%s\" % (db_host, db_port))\n logger.debug(\"connection parameters: DB name %s, DB user %s\" % (db_name, db_user))\n logger.debug(\"connection config: %s\" % db_config)\n try:\n dbConn = sqlalchemy.create_engine(\n sqlalchemy.engine.url.URL.create(\n drivername=\"mysql+pymysql\",\n username=db_user,\n password=db_pass,\n host=db_host,\n port=db_port,\n database=db_name\n ),\n **db_config\n )\n except Exception as e:\n logger.critical(\"could not connect to DB - %s\" % e)\n return ERR\n logger.info(\"connection established to MySQL server at %s:%s\" % (db_host, db_port))\n return dbConn", "def __GetConnection(self):\n\n self.conn = httplib.HTTPConnection(BLIP_API_URL)\n return self.conn", "def db_connection(conf_dict):\n log.debug(conf_dict)\n client = ArangoClient(hosts=conf_dict['hosts'])\n db = client.db(conf_dict['database'],\n username=conf_dict['username'],\n password=conf_dict['password'])\n\n return client, db", "def get_connection():\n return {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql',\n 'NAME': os.environ.get('database'),\n 'USER': os.environ.get('user'),\n 'PASSWORD': os.environ.get('password'),\n 'HOST': os.environ.get('host'),\n 'PORT': '5432',\n }\n }", "def get_db_conn(db_config):\n return psycopg2.connect(\n \"dbname='{}' user='{}' host='{}'\".format(\n db_config[\"name\"],\n db_config[\"user\"],\n db_config[\"host\"]\n )\n )", "def db( self ):\n #TODO: backoff\n if self._db is None:\n self._db = self._GetNewConnection()\n try:\n self._db.isolation_level\n except (OperationalError, InterfaceError, InternalError):\n l_logger.exception(\"Looks like the db is not responding. Trying to recover.\")\n try:\n self._db.close()\n except ProgrammingError:\n l_logger.info(\"Database is closed, attempting to recover\")\n self._db = self._GetNewConnection()\n return self._db", "def connect_to_db(cls):\n conn = psycopg2.connect(os.environ['DATABASE_URL'])\n conn.autocommit = True\n cursor = conn.cursor()\n\n return cursor", "def _access(option=True):\n return connect(\n dbname=os.environ[\"DB_NAME\"],\n user=os.environ[\"DB_USER\"],\n password=os.environ[\"DB_PASS\"],\n host=os.environ[\"DB_HOST\"],\n port=os.environ[\"DB_PORT\"]\n )", "def connection(self):\n return self.get_connection()", "def db_init(self):\n if self.platform == 'Linux':\n print(self.db)\n conn = sqlite3.connect(self.db)\n # if debug\n conn.set_trace_callback(print)\n # converter = Converter(self.db)\n # converter.mdb2sqlite()\n # return converter.conn\n return conn, conn.cursor()\n elif self.platform == 'Windows':\n # Todo: update database config.\n user = ''\n password = ''\n odbc_conn_str = \"DRIVER={Microsoft Access Driver (*.mdb, *.accdb)};\" \\\n \"DBQ=%s;UID=%s;PWD=%s\" % (self.db, user, password)\n import pypyodbc\n conn = pypyodbc.connect(odbc_conn_str)\n return conn, conn.cursor()\n else:\n raise OSError('Unsupported OS')", "def _get_connection(self) -> Connection:\n # TODO(101) is there a problem with having just one db connection?\n # Will this cause bugs with failed commits?\n curr_thread = threading.get_ident()\n if curr_thread not in self.conn or self.conn[curr_thread] is None:\n try:\n conn = sqlite3.connect(self.db_path)\n conn.row_factory = StringIDRow\n self.conn[curr_thread] = conn\n except sqlite3.Error as e:\n raise MephistoDBException(e)\n return self.conn[curr_thread]", "def conn(self):\n conn = self.engine.connect()\n return conn", "def _get_db(self):\n return DB(\n ClientStorage.ClientStorage((self.server, self.port))\n )" ]
[ "0.8348434", "0.82954013", "0.78892154", "0.7847629", "0.77854586", "0.7766536", "0.7627953", "0.75845844", "0.75740683", "0.7564091", "0.7547969", "0.7504719", "0.7473917", "0.7430099", "0.7425724", "0.74137056", "0.74033695", "0.7394597", "0.73863715", "0.73794466", "0.73794466", "0.73514515", "0.7304877", "0.7296706", "0.72959566", "0.7293512", "0.7288035", "0.72827417", "0.7275014", "0.7269802", "0.72678494", "0.7257425", "0.72485286", "0.72389", "0.72344476", "0.7232432", "0.72315717", "0.72105515", "0.7198567", "0.7192802", "0.7184665", "0.7180624", "0.71558034", "0.71413124", "0.71227956", "0.71227956", "0.7121261", "0.71189946", "0.7118763", "0.71144027", "0.7109832", "0.7093528", "0.7086861", "0.7084545", "0.7083301", "0.7081827", "0.70807934", "0.7071172", "0.7066649", "0.7058303", "0.7056713", "0.70500976", "0.7044123", "0.7041425", "0.70363677", "0.70350516", "0.70296127", "0.70283616", "0.70217395", "0.7000779", "0.6999308", "0.6997801", "0.69954497", "0.69929576", "0.6990373", "0.69789624", "0.6978742", "0.69756466", "0.6973216", "0.6971704", "0.696576", "0.6964465", "0.69616354", "0.69616354", "0.69616354", "0.69616354", "0.69565123", "0.6949874", "0.69493115", "0.6949051", "0.69453454", "0.69423467", "0.6941779", "0.69404817", "0.69360566", "0.6934809", "0.6932514", "0.6931995", "0.69286495", "0.6921612" ]
0.74661154
13
Converts a tuple to its string representation. Uses different separators (;, /, |) for different depths of the representation.
def tuple_to_string(tuptup): def join_deepest(tup, sep=';'): """ Recursive function to create the string representation for the deepest level of the tuptup list. Parameters ---------- tup : object Element to join if list or list of lists. sep : str, optional Separation character to join the list elements by. Returns ------- object List containing joined string in max depth. Str if input depth = 1. """ if not isinstance(tup, list): return tup if not isinstance(tup[0], list): return sep.join(tup) for idx, val in enumerate(tup): tup[idx] = join_deepest(val, sep) return tup tup = copy.deepcopy(tuptup) tup = join_deepest(tup, ';') tup = join_deepest(tup, '/') tup = join_deepest(tup, '|') return tup
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _tupstr(tuple_):\n return ', '.join(list(map(str, tuple_)))", "def _tuple_to_str(self, the_tuple):\r\n ret = \"\"\r\n for item in the_tuple:\r\n ret += (\" \" + str(item))\r\n return ret[1:]", "def tupleStrFormat(tupl):\n string = \"this is a tuple (\"\n for element in tupl:\n string += str(element) + \", \"\n string += \")\"\n return string", "def deg_tuple_to_str(tup):\n if len(tup) == 0:\n return \"()\"\n str = '('\n for x in tup:\n str += \"{0:.2f}, \".format(x)\n str = str[:-2] + ')'\n return str", "def str_tuple(item):\n return \"{}:{}\".format(item[0], item[1])", "def tuple2str(tagged_token, sep='/'):\n word, tag = tagged_token\n if tag is None:\n return word\n else:\n assert sep not in tag, 'tag may not contain sep!'\n return '%s%s%s' % (word, sep, tag)", "def strtuple(iterable): \n string = ''\n function = type(strtuple)\n for i in iterable:\n if isinstance(i , function):\n string += i.__name__ + ', '\n else:\n string += str(i) + ', '\n string = string.rstrip(', ')\n string = '(' + string + ')'\n return string", "def format_tuple(data):\n return \",\".join([str(item) for item in data])", "def tuple_to_string(letter_word_pair):\n letter, word = letter_word_pair\n return '{letter}: {word}'.format(letter=letter, word=word)", "def delimit_tuple(tuple_: tuple, delimiter=\",\"):\n if not type(tuple_) == tuple:\n raise TypeError(\n \"Expected a list or tuple, \" \"but got {}\".format(type(tuple_).__name__)\n )\n return delimiter.join(map(str, tuple_))", "def tuple_to_string(transcript_info):\n\n return \"\\t\".join(transcript_info.data_attributes())", "def tupleToString(vector):\n string = '[%d](' % len(vector)\n for x in vector[:-1]:\n string += '%f,' % x\n string += '%f)' % vector[-1]\n return string", "def serialize_tuple(self, obj):\n return '(' + ''.join([self.serialize(i) for i in obj]) + 't'", "def main():\n sampleTuple = (100, 200, 300)\n print(tupleStrFormat(sampleTuple))", "def tuple(self, arg: SeField[Any]) -> str:\n if is_bare_tuple(arg.type):\n return arg.varname\n elif is_variable_tuple(arg.type):\n earg = arg[0]\n earg.name = \"v\"\n return f\"tuple({self.render(earg)} for v in {arg.varname})\"\n else:\n rvalues = []\n for i, _ in enumerate(type_args(arg.type)):\n r = arg[i]\n r.name = f\"{arg.varname}[{i}]\"\n rvalues.append(self.render(r))\n return f\"({', '.join(rvalues)},)\" # trailing , is required for single element tuples", "def to_string_tuple(self):\n return \" \".join(self._left), \" \".join(self._right)", "def build_tuple(self, t):\n comma = self.art_type([self.string_type(', ')],\n baseline=0,\n breakpoints=[1])\n repr_elems = self.concatenate(t, comma)\n return self.build_container(\n repr_elems, self.left_parenthesis, self.right_parenthesis)", "def gen_type_tuple_string(self, name, node):\n return \"('{}', {})\".format(name, self.gen_type_string(node))", "def prettyTuple(app, tup, seq=None, _asString=False, item=RESULT, **options):\n\n display = app.display\n\n if not display.check(\"prettyTuple\", options):\n return \"\"\n\n dContext = display.distill(options)\n colorMap = dContext.colorMap\n highlights = dContext.highlights\n condenseType = dContext.condenseType\n condensed = dContext.condensed\n\n _browse = app._browse\n inNb = app.inNb\n asString = _browse or _asString\n\n if len(tup) == 0:\n if asString:\n return \"\"\n else:\n return\n\n api = app.api\n N = api.N\n sortKey = N.sortKey\n\n containers = {tup[0]} if condensed else condenseSet(api, tup, condenseType)\n highlights = getTupleHighlights(api, tup, highlights, colorMap, condenseType)\n seqRep = \"\" if seq is None else f\" <i>{seq}</i>\"\n\n if not asString:\n dh(f\"<p><b>{item}</b>{seqRep}\", inNb=inNb)\n if asString:\n html = []\n for t in sorted(containers, key=sortKey):\n h = app.pretty(\n t,\n highlights=highlights,\n **display.consume(options, \"highlights\"),\n )\n if asString:\n html.append(h)\n if asString:\n return \"\".join(html)", "def _convert_rgb_tuple_to_string(self, rgb_tuple):\n\n return ''.join([self._zero_pad_number(v) for v in rgb_tuple])", "def val2str(val):\n # Return the input if it's a string\n if isinstance(val,str ): valstr=val\n # Handle types where spaces are added\n elif isinstance(val,tuple): valstr=repr(val).replace(', ',',')\n elif isinstance(val,list ): valstr=repr(val).replace(', ',',')\n elif isinstance(val,dict ): valstr=repr(val).replace(', ',',').replace(': ',':')\n # Otherwise use repr()\n else: valstr=repr(val)\n # Return output\n return valstr", "def parents_to_string(parent_tuple):\n return str(parent_tuple[0])+\" \"+str(parent_tuple[1])", "def formatter(t: tuple):\n s = 'The {} numbers are: ' + '{}, '*(len(t)-1) + '{}'\n return s.format(len(t),*t)", "def ymd_tuple_to_string(t):\n return '%s_%s_%s' % t", "def formatter(in_tuple):\n in_tuple_length = len(in_tuple)\n form_string = \"the {} numbers are: \".format(in_tuple_length)\n form_string += ', '.join(['{:d}'] * in_tuple_length)\n\n return form_string.format(*in_tuple)", "def formatter(in_tuple):\n length = len(in_tuple)\n form_string = (\"the {} numbers are: \" + \", \".join([\"{}\"]*length)).format(length, *in_tuple)\n return form_string.format(in_tuple)", "def tostr (x):\n if isinstance (x, tuple):\n return tuple ( map (tostr, x))\n if isinstance(x, (float, numpy.float32,numpy.float64)):\n return float_to_str(x)\n return str(x)", "def species_tuple_to_string(species_tuple, roman_numerals=True):\n atomic_number, ion_number = species_tuple\n element_symbol = ATOMIC_NUMBER2SYMBOL[atomic_number]\n if roman_numerals:\n roman_ion_number = int_to_roman(ion_number+1)\n return '{0} {1}'.format(str(element_symbol), roman_ion_number)\n else:\n return '{0} {1:d}'.format(element_symbol, ion_number)", "def _list2str(self, data, delimiter=\",\", classify=lambda x: x):\n res = \"\"\n for i in range(len(data)):\n res += classify(data[i])\n if i != len(data) - 1:\n res += delimiter + \" \"\n return res", "def __str__(self):\n left = ''\n right = ''\n for i in range(len(self.ant)):\n left += Prop.__str__(self.ant[i]) + \", \"\n \n for i in range(len(self.con)):\n right += Prop.__str__(self.con[i]) + \", \"\n return left[:-2] + '|-- ' + right[:-2]", "def _tuple_to_cpppo_tags(cls, tags, serializer=':'):\n\n tags_string = ''\n for tag in tags:\n tags_string += str(tag[0])\n for field in tag[1:-1]:\n tags_string += serializer\n # print 'DEBUG _tuple_to_cpppo_tags field: ', field\n tags_string += str(field)\n\n tags_string += '='\n tags_string += str(tag[-1])\n tags_string += ' '\n # print('DEBUG enip server tags_string: ', tags_string)\n\n return tags_string", "def as_str(the_val):\n if hasattr(the_val, \"__iter__\"):\n return \"[{}]\".format(\", \".join([str(v) for v in the_val]))\n return str(the_val)", "def format_pair(pair):\n logger.info('format pair...')\n formatted_pair = pair[0] + ', ' + pair[1] + '\\n'\n logger.debug('formatted_pair: ' + formatted_pair)\n return formatted_pair", "def __n3_to_str(triple):\n s, p, o = triple\n s = s.n3()\n p = p.n3()\n o = o.n3()\n if s.startswith('<') and s.endswith('>'):\n s = s[1:len(s) - 1]\n if p.startswith('<') and p.endswith('>'):\n p = p[1:len(p) - 1]\n if o.startswith('<') and o.endswith('>'):\n o = o[1:len(o) - 1]\n return (s, p, o)", "def _shape_repr(shape):\r\n\r\n \r\n if len(shape) == 0:\r\n return \"()\"\r\n \r\n joined = \", \".join(\"%d\" % e for e in shape)\r\n \r\n #------------------------------------------------------\r\n # x for x in shape 语法:\r\n # for x in shape:\r\n # return x\r\n # example [x for x in [1,2,3]] = [1,2,3]\r\n # 因此 '%d' % e for e in shape 等于把 shape 中的数字以 str 形式\r\n # 打印出来\r\n # example ['%d' %x for x in [1,2]] = ['1','2']\r\n \r\n #------------------------------------------------------\r\n # join 语法\r\n # str.join(sequence)\r\n # sequence:要连接的元素序列\r\n # str 连接字符\r\n # ','.join('%d' % e for e in shape) 即是将 shape 中的元素用‘,’连接起来\r\n # if shape = [1,2] -> '(1,2)'\r\n \r\n if len(shape) == 1:\r\n \r\n # special notation for singleton tuples\r\n joined += ','\r\n \r\n # 如果 len(shape) = 1, 比如 shape = (1,)\r\n # joined = \", \".join(\"%d\" % e for e in shape)\r\n # 的结果是 '1'\r\n # '(%s)' % joined = '(%s)' % '1,' = '(1,)'\r\n \r\n return \"(%s)\" % joined", "def get_string(tree): \n\n tree_str = tree.pformat()\n tree_str_flat = ' '.join(tree_str.split())\n\n return tree_str_flat", "def value_to_string(self, value, type_class, param_info=None):\n if isinstance(value, Entry):\n var = self.get_variable(value.code_entry)\n if isinstance(value.target, list):\n return \"tuple(%s)\" % var\n return var\n else:\n if type_class == TypeClass.STRING:\n return '\"%s\"' % value\n elif type_class == TypeClass.ENUM:\n name = value.typeName\n suffix = self.get_last_part(name)\n upper_chars = [c for c in suffix if c.isupper()]\n as_name = \"%s_%s\" % (\"\".join(upper_chars), value.value)\n self.add_import('%s.%s' % (value.typeName, value.value), as_name)\n #return value.value\n return as_name\n elif type_class == TypeClass.CHAR:\n return \"uno.Char(\\\"%s\\\")\" % value.value\n elif type_class == TypeClass.SEQUENCE:\n comp_type, n = self.parse_seq(param_info)\n _comp_type_class = comp_type.getTypeClass()\n str_val = [self.value_to_string(v, _comp_type_class) for v in value]\n return \"(%s)\" % \", \".join(str_val)\n else:\n return str(value)", "def _tuple_to_cpppo_tag_multiple(cls, what, values=None, serializer=':'):\n tag_string = ''\n\n if values == None:\n for i in range(len(what)):\n tag_string += what[i][0] + EnipProtocol._SERIALIZER + str(what[i][1]) + \" \"\n else:\n for i in range(len(what)):\n tag_string += what[i][0] + EnipProtocol._SERIALIZER + str(what[i][1]) + \"=\" + str(values[i]) + \" \"\n\n return tag_string", "def encode_tuple3(value: tuple) -> bytes:\n raise NotImplementedError()", "def __str__(self):\n return \"%s(%s)\" % (self[0], \", \".join(map(str, self[1:])))", "def format_string_3(*tmp_tuple):\n\n total_items = len(tmp_tuple)\n formated_string = \"the {} numbers are: \"\n formated_string += \", \".join([\"{}\"] * total_items)\n result = formated_string.format(total_items, *tmp_tuple)\n\n print(result)\n return result", "def display(self):\n res = \"(\"\n curr = self.head\n while curr:\n val = curr.val\n if type(val) is str:\n val = \"'\" + val + \"'\"\n else:\n val = str(val)\n res += val\n if curr.next:\n res += ', '\n curr = curr.next\n return res + ')'", "def __repr__(self):\n return \"[{0}:{1}, {2}:{3}]\".format(*self.to_tuple())", "def join_recursive(lst, sep):\n msg = ''\n for i in lst:\n if isinstance(i, tuple) or isinstance(i, list):\n msg += join_recursive(i, sep)\n else:\n msg += (i + sep)\n return msg", "def _transform_opt(opt_val):\n if isinstance(opt_val, (list, tuple)):\n return ','.join(opt_val)\n else:\n return opt_val", "def __str__ (self):\n return f'\"{self.value[0]}|{self.value[1]}\"'", "def serialize(self, root: TreeNode) -> str:\n l = []\n def preOrder(root):\n if not root:\n l.append(\"n\")\n return\n \n l.append(str(root.val))\n preOrder(root.left)\n preOrder(root.right)\n \n \n preOrder(root)\n #print(\",\".join(l))\n return \",\".join(l)", "def serialize_to_python(cls, value):\n return '%s %s %s' % (\n serialize_to_python(value.lhs),\n value.connector,\n serialize_to_python(value.rhs),\n )", "def pack_tuple(self, values):\n assert isinstance(values, (tuple, list))\n cardinality = [struct_L.pack(len(values))]\n packed_items = [self.pack_field(v) for v in values]\n return b''.join(itertools.chain(cardinality, packed_items))", "def _tuple_to_cpppo_tag(cls, what, value=None, serializer=':'):\n\n tag_string = ''\n tag_string += str(what[0])\n\n if len(what) > 1:\n for field in what[1:]:\n tag_string += EnipProtocol._SERIALIZER\n tag_string += str(field)\n if value is not None:\n if type(value) is str:\n # TODO: add support for SSTRING tags\n # ''' enip_client -a 192.168.1.20 'README:2[0]=(SSTRING)\"string\"' '''\n pass\n tag_string += '='\n tag_string += str(value)\n # print 'DEBUG _tuple_to_cpppo_tag tag_string: ', tag_string\n\n return tag_string", "def format_node(self, node):\n if node is None:\n return \"None\"\n\n if isinstance(node, list):\n return \"; \".join(self.format_node(elem) for elem in node)\n\n s = RE_SPACE.sub(' ', astor.to_source(node)).strip()\n if len(s) > self.NODE_MAX_LENGTH - len(\"...\"):\n s = s[:self.NODE_MAX_LENGTH] + \"...\"\n return repr(s)", "def test_node_to_str(self):\n f = lws.node_to_str\n # normal\n assert f(('a', 'b')) == 'a: b'\n # exception\n assert f(('a',),) == \"('a',): \"\n assert f('a') == 'a: '", "def encode_tuple2(value: tuple) -> bytes:\n raise NotImplementedError()", "def serialize(self, root):\n\n if root is None:\n return \"\"\n curr_lvl=[root]\n next_lvl = []\n ans =[]\n\n while(curr_lvl):\n tmp_ans = \",\".join(str(node.val) if node is not None else \"*\" for node in curr_lvl)\n ans.append(tmp_ans)\n nxt_lvl = []\n for each in curr_lvl:\n if each is not None:\n nxt_lvl.append(each.left) \n nxt_lvl.append(each.right)\n\n curr_lvl=nxt_lvl\n\n return \";\".join(ans)", "def __str__(self):\n s = ''\n for v in self:\n s = ''.join([s, ',' if s else '', str(v)])\n return s", "def _rgb_to_string(rgb_tup: tuple, alpha: int = 1) -> str:\n return f\"rgba({', '.join(map(str, rgb_tup))}, {alpha})\"", "def __str__(self) -> str:\n values = []\n self._str_helper(self.root, values)\n return \"TREE in order { \" + \", \".join(values) + \" }\"", "def __str__(self) -> str:\n values = []\n self._str_helper(self.root, values)\n return \"TREE in order { \" + \", \".join(values) + \" }\"", "def _tupleListToStrings(self):\n graphColorStrings = []\n previousSelection = self.colorlist.GetSelection()\n print(repr(self.graphColors))\n if isinstance(self.graphColors, str):\n self.graphColors = eval(self.graphColors)\n for col in self.graphColors:\n col1 = '%.2f' % float(col[0])\n col2 = '%.2f' % float(col[1])\n col3 = '%.2f' % float(col[2])\n graphColorStrings.append(', '.join([col1, col2, col3]))\n self.colorlist.SetItems(graphColorStrings)\n if 0 <= previousSelection < len(graphColorStrings):\n self.colorlist.SetSelection(previousSelection)\n return graphColorStrings", "def _encode_tuple(self, time_tuple):\n\t\treturn self._encode_bit('1', time_tuple[0]) + self._encode_bit('0', time_tuple[1])", "def _to_string(self, lst, indent=''):\n result = []\n for elem in lst:\n if isinstance(elem, list):\n if len(elem) > 0:\n result.append('\\n')\n result.append(self._to_string(elem, indent + ' '))\n elif isinstance(elem, float):\n result.append('%.6f' % elem)\n elif isinstance(elem, basestring):\n for char in ('(', ')', ' '):\n if char in elem:\n result.append('\"%s\"' % elem)\n break\n else:\n result.append(str(elem))\n elif elem is not None:\n result.append(str(elem))\n return indent + '(' + ' '.join(result) + ')\\n' + indent", "def arr2str(arr, sep=\", \", fmt=\"{}\"):\n return sep.join([fmt.format(v) for v in arr])", "def readable_sequence(values: Sequence[str], conjunction: str = 'and') -> str:\n if len(values) == 0:\n return ''\n elif len(values) == 1:\n return values[0]\n elif len(values) == 2:\n return f'{values[0]} {conjunction} {values[1]}'\n\n before = ', '.join(values[:-1])\n after = values[-1]\n return f'{before}, {conjunction} {after}'", "def stringify(self, value):\n if isinstance(value, list):\n return \", \".join(value)\n else:\n return str(value)", "def __str__(self):\n def recurse(node, level):\n s = \"\"\n if type(node) == LeafNode:\n return (\"| \" * level) + str(node) + \"\\n\"\n if node != None:\n s += recurse(node.rightOperand, level + 1)\n s += \"| \" * level\n s += str(node.operator) + \"\\n\"\n s += recurse(node.leftOperand, level + 1)\n return s\n return recurse(self, 0)", "def __str__(self):\n res = \"<\"\n for elem in self.values[:-1]:\n res += str(elem) + \", \"\n res += str(self.values[-1]) + \">\"\n return res", "def tuple_multi_string(dictionary, sep=','):\n for key, value in dictionary.items():\n value_split = value.split(sep)\n\n if len(value_split) == 1 or len(value_split) == 0:\n pass\n else:\n dictionary[key] = tuple(value_split)\n\n return dictionary", "def encode_tuple1(value: tuple) -> bytes:\n raise NotImplementedError()", "def format_seq(seq, new_seq):\n if type(seq) == str:\n return \"\".join(new_seq)\n elif type(seq) == tuple:\n return tuple(new_seq)\n else:\n return new_seq", "def row2str(row,sep=' '):\n\ta = ''\n\tfor i in range(len(row)):\n\t\ta = a+str(row[i])\n\t\tif i<len(row)-1:\n\t\t\ta = a+sep\n\treturn a", "def row2str(row,sep=' '):\n\ta = ''\n\tfor i in range(len(row)):\n\t\ta = a+str(row[i])\n\t\tif i<len(row)-1:\n\t\t\ta = a+sep\n\treturn a", "def __str__(self):\n return \"((a = {}, b={}), (c = {}, d={}))\".format(self._a, self._b, self._c, self._d)", "def __str__(self):\n if self._leftchild is None and self._rightchild is None:\n return str(self._element)\n if self._rightchild is None and self._leftchild is not None:\n return str(self._leftchild) + \", \" + str(self._element)\n if self._leftchild is None and self._rightchild is not None:\n return str(self._element) + \", \" + str(self._rightchild)\n outstr = str(self._leftchild) + \", \" + str(self._element)\n outstr += \", \" + str(self._rightchild)\n return outstr", "def serialize_to_python(cls, value):\n if len(value) == 1:\n suffix = ','\n else:\n suffix = ''\n\n return '(%s%s)' % (\n ', '.join(\n serialize_to_python(_item)\n for _item in value\n ),\n suffix)", "def convert_raw_tuple(value_tuple, format_string):\n values = []\n for v, c in zip(value_tuple, format_string):\n if v is None:\n # append None\n values.append(v)\n elif c == u\"s\":\n # string\n values.append(v)\n elif c == u\"S\":\n # string, split using space as delimiter\n values.append([s for s in v.split(u\" \") if len(s) > 0])\n elif c == u\"i\":\n # int\n values.append(int(v))\n elif c == u\"U\":\n # Unicode\n values.append(convert_unicode_field(v))\n elif c == u\"A\":\n # ASCII\n values.append(convert_ascii_field(v))\n #elif c == u\"x\":\n # # ignore\n # pass\n return tuple(values)", "def __str__(self):\n cur_node = self.head\n str_list = ['{']\n while cur_node is not None:\n str_list.append(str(cur_node))\n if cur_node is not self.tail:\n str_list.append(', ')\n cur_node = cur_node.next_node\n str_list.append('}')\n return ''.join(str_list)", "def _fieldDefAsString(self, fieldTuple):\n\n fname, ftype, flen = fieldTuple\n if ftype == 'char':\n # pdb.set_trace()\n try:\n slen, flen = flen\n except TypeError as e:\n slen = flen\n flen = 1\n\n if slen > 0:\n fname = '%s[%d]' % (fname, slen)\n else:\n fname = '%s[]' % (fname)\n \n if flen > 1: \n return \" %s %s[%d];\" % (ftype, fname, flen)\n else:\n return \" %s %s;\" % (ftype, fname)", "def tokens_to_str(cls, tokens):\n\n assert np.iterable(tokens)\n result = []\n for t in tokens:\n # if type(t) in [str, int]:\n if isinstance(t, (int, long, basestring)):\n result.append('/'+str(t))\n elif type(t) == slice:\n start = str(t.start) if t.start is not None else ''\n stop = str(t.stop) if t.stop is not None else ''\n result.append('[%s:%s]' % (start, stop))\n elif type(t) in [tuple, list]:\n if not t:\n raise ValueError('invalid token')\n result.append('['+','.join(map(str, t))+']')\n else:\n raise ValueError('invalid token')\n return ''.join(result)", "def render(tree):\n if type(tree) == list:\n return \"-\"+render(tree[0])+render(tree[1])\n elif type(tree) == tuple:\n return tree[0]+render(tree[1])\n else:\n return tree", "def serialize(self, root):\n def dfs(root):\n if not root:\n res.append('None')\n return\n res.append(str(root.val))\n dfs(root.left)\n dfs(root.right)\n \n res = []\n\n dfs(root)\n \n return ','.join(res)", "def __str__(self):\n self.vals.sort()\n result = ''\n for e in self.vals:\n result = result + str(e) + ','\n return '{' + result[:-1] + '}'", "def serialize(node):\r\n serial = node.val \r\n\r\n if node.left or node.right:\r\n serial += r'('\r\n\r\n if node.left:\r\n serial += serialize(node.left)\r\n \r\n serial += r'|' \r\n \r\n if node.right:\r\n serial += serialize(node.right)\r\n \r\n serial += r')'\r\n\r\n return serial", "def serialize(self, root):\n if not root:\n return 'null,'\n left = self.serialize(root.left)\n right = self.serialize(root.right)\n return str(root.val) + ',' + left + right", "def str(self) -> List[Tuple[str, str]]:\n kl = self.keys()\n vl = self.values()\n return [str(kl[idx]) + \",\" + str(vl[idx]) for idx in range(len(kl))]", "def __repr__(self) -> str:\n return 'Pair({!r}, {!r})'.format(*self.names)", "def get_prep_value(self, value):\n if isinstance(value, tuple) and len(value) == 2:\n return '[%s,%s]' % value\n return value", "def __str__(self):\n stubs = ['' for _ in range(self.nChildren())]\n label = dist = ''\n for i in range(self.nChildren()):\n stubs[i] = str(self.children[i])\n if self.dist or self.dist == 0.0:\n dist = ':' + str(self.dist)\n if self.label != None:\n label = str(self.label)\n if self.nChildren() == 0:\n return label + dist\n else:\n stubstr = '('\n for i in range(len(stubs) - 1):\n stubstr += stubs[i] + ','\n return stubstr + stubs[-1] + ')' + label + dist\n # there is no label\n '''\n if not self.left and self.right:\n return ',' + right\n elif self.left and not self.right:\n return left + ','\n elif self.left and self.right:\n return '(' + left + ',' + right + ')' + dist\n '''", "def __str__(self):\n return str(self._key) + \", \" + str(self._value[0]) + \", \" + str(self._value[1])", "def serialize(self, root):\n if(not root) :\n return \"X\"\n else :\n return \",\".join([str(root.val), self.serialize(root.left), self.serialize(root.right)])", "def __repr__(self):\n result = '\"{0}\"'.format(self._filepath.unexpanded)\n if self.nonlocal is None: result += \", None\"\n else: result += ', \"%s\"' % (self._nonlocal.unexpanded)\n result += \", %f, %f, %f, %f, %f\" % (self.s, self.p, self.d, self.pnl, self.dnl)\n return result", "def serialize(self, root):\n result = []\n def dfs(root):\n if root == None:\n result.append(\"null\")\n return\n result.append(str(root.val))\n dfs(root.left)\n dfs(root.right)\n dfs(root)\n return ','.join(result)", "def __str__ (self):\n return \", \".join(str(row) for row in self.rows()).join(\"()\")", "def _str_helper(self, cur, values):\n # base case\n if cur is None:\n return\n # recursive case for left subtree\n if cur.left:\n self._str_helper(cur.left, values)\n # store value of current node\n values.append(str(cur.value))\n # recursive case for right subtree\n if cur.right:\n self._str_helper(cur.right, values)", "def space_join(*items):\n valid_items = []\n for item in items:\n if item is None:\n continue\n if isinstance(item, tuple):\n if item[0] is None:\n continue\n stripped = strip_if_not_blank(item[0])\n if not is_null(stripped):\n if len(item) == 2:\n if not is_null(item[1]):\n valid_items.append(\"%s%s\" % (item[1], stripped))\n else:\n valid_items.append(stripped)\n elif len(item) >= 3:\n if not is_null(item[1]) and not is_null(item[2]):\n valid_items.append(\"%s%s%s\" % (\n item[1], stripped, item[2]))\n elif not is_null(item[1]):\n valid_items.append(\"%s%s\" % (item[1], stripped))\n elif not is_null(item[2]):\n valid_items.append(\"%s%s\" % (stripped, item[2]))\n else:\n stripped = strip_if_not_blank(item)\n if stripped != \"\":\n valid_items.append(stripped)\n return \" \".join(valid_items)", "def tree2str(self, root: Optional[TreeNode]) -> str:\n if not root:\n return \"\"\n if not root.left and not root.right:\n return str(root.val)\n if not root.right:\n return f'{root.val}({self.tree2str(root.left)})'\n return f'{root.val}({self.tree2str(root.left)})({self.tree2str(root.right)})'", "def no_parentheses():\n weird_tuple = 1, 2, 3\n print(weird_tuple) # (1, 2, 3)\n print(type(weird_tuple)) # <type 'tuple'>", "def _str_helper(self, cur, values):\n # base case\n if cur is None:\n return\n # recursive case for left subtree\n self._str_helper(cur.left, values)\n # store value of current node\n values.append(str(cur.value))\n # recursive case for right subtree\n self._str_helper(cur.right, values)", "def __str__(self):\n if len(self.children) == 0:\n return self.val\n ret = [\n self.val]\n for child in self.children:\n ret += [ '\\t' + child_s for child_s in str(child).split('\\n') ]\n\n return ('\\n').join(ret)", "def join_float(val: Tuple[Union[float, Enum], Union[float, Enum]]) -> str:\n low, high = val\n if low == high:\n return str(low)\n else:\n return f'{low!s}, {high!s}'", "def __str__(self) -> str:\n return F\"<{self.priority}, {self.value}>\"" ]
[ "0.80097777", "0.77167994", "0.7229197", "0.72018117", "0.7117311", "0.707209", "0.70130855", "0.70075023", "0.6833385", "0.6807723", "0.67460287", "0.6742025", "0.6638861", "0.66135544", "0.6548663", "0.64345735", "0.6370722", "0.63114953", "0.6277102", "0.627624", "0.62588453", "0.62480843", "0.6009705", "0.59545505", "0.59416825", "0.5827847", "0.5823593", "0.58088034", "0.5773828", "0.5770003", "0.573831", "0.568291", "0.56553626", "0.5644268", "0.5639123", "0.56289166", "0.56220394", "0.5611772", "0.5574737", "0.55672115", "0.55653083", "0.5541116", "0.5541003", "0.55078304", "0.54916316", "0.54746175", "0.54735357", "0.54708624", "0.5456615", "0.5455954", "0.5454322", "0.54527414", "0.5440309", "0.5434708", "0.54336953", "0.5428364", "0.5417632", "0.5417632", "0.5414764", "0.5411856", "0.5408651", "0.54017943", "0.5401674", "0.53964907", "0.5392605", "0.53854084", "0.5374276", "0.5374041", "0.5372738", "0.535745", "0.535745", "0.5357448", "0.5356402", "0.5354384", "0.5344265", "0.5343312", "0.53397554", "0.5339343", "0.5336968", "0.53304136", "0.5315855", "0.5311441", "0.52886146", "0.52866685", "0.5285242", "0.52828956", "0.528123", "0.52784735", "0.52770585", "0.52735734", "0.5265663", "0.5248398", "0.52407223", "0.5236782", "0.5235229", "0.52283233", "0.52273804", "0.5224443", "0.5221361", "0.521798" ]
0.74598444
2
Recursive function to create the string representation for the deepest level of the tuptup list.
def join_deepest(tup, sep=';'): if not isinstance(tup, list): return tup if not isinstance(tup[0], list): return sep.join(tup) for idx, val in enumerate(tup): tup[idx] = join_deepest(val, sep) return tup
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_tree_str(self, depth: int = 0) -> str:\n temp = \" \" * depth + str(self.head) + \"\\n\"\n for son in self.sons:\n temp += son.get_tree_str(depth + 1)\n return temp", "def tuple_to_string(tuptup):\n\n def join_deepest(tup, sep=';'):\n \"\"\" Recursive function to create the string representation for the deepest level of the\n tuptup list.\n\n Parameters\n ----------\n tup : object\n Element to join if list or list of lists.\n\n sep : str, optional\n Separation character to join the list elements by.\n\n Returns\n -------\n object\n List containing joined string in max depth. Str if input depth = 1.\n\n \"\"\"\n\n if not isinstance(tup, list):\n return tup\n if not isinstance(tup[0], list):\n return sep.join(tup)\n\n for idx, val in enumerate(tup):\n tup[idx] = join_deepest(val, sep)\n return tup\n\n tup = copy.deepcopy(tuptup)\n tup = join_deepest(tup, ';')\n tup = join_deepest(tup, '/')\n tup = join_deepest(tup, '|')\n return tup", "def tree_string(self, indent=0): # pragma: no cover\r\n return \"\"", "def string_postorder(t: Tree) -> str:\n if t.value is None:\n return ''\n else:\n return ''.join([string_postorder(s) for s in t.children]) + str(t.value)\n\n # Version 2\n # if t.value is None:\n # return ''\n # else:\n # s = ''\n # for subtree in t.children:\n # s += string_postorder(subtree)\n # s += str(t.value)\n # return s", "def str_recursive(node):\n\n if node == None:\n return \"\"\n else:\n return str(node.item) + \" \" + LinkedList.str_recursive(node.next)", "def tree_str(self, depth_index=0, recursive_dict=None):\r\n if not hasattr(self,'iteritems'): return ''\r\n if recursive_dict is not None: self = TreeMap(recursive_dict)\r\n buff_str = ''\r\n \r\n for item in self.iteritems():\r\n # Starts working now.\r\n k = item[0]\r\n v = item[1]\r\n \r\n spacer = '\\n' + '| ' * depth_index\r\n \r\n if hasattr(v,'iteritems'):\r\n buff_str += spacer + '+--[ ' + k + ' ]'\r\n buff_str += self.tree_str(depth_index=depth_index + 1, recursive_dict=v)\r\n else:\r\n buff_str += spacer + '\\_.--[ ' + str(k) + ' = ' + str(v) + ' ]'\r\n \r\n return buff_str", "def __str__(self):\n string = ''\n\n # gets the nodes at each level and puts the values into a string\n for i in range(self.get_height()+1):\n nodes = self.get_nodes_on_level(i)\n level = [str(node.value) if node else '-' for node in nodes]\n string += '{}\\n'.format(' '.join(level))\n\n return string", "def _return_string_all_descendants_rec(self, node, string, level):\n if len(node.get_children()) == 0:\n return string\n else:\n level += 1\n for child in node.get_children():\n string += \"| \"*level\n string += \"|---\" + str(child) + \"\\n\"\n string = self._return_string_all_descendants_rec(child, string, level)\n return string", "def __str__(self) -> str:\n\n if not self.root:\n return 'Empty RB Tree'\n\n root, bfs_queue, height = self.root, queue.SimpleQueue(), self.root.subtree_height()\n track = {i: [] for i in range(height + 1)}\n bfs_queue.put((root, 0, root.parent))\n\n while bfs_queue:\n n = bfs_queue.get()\n if n[1] > height:\n break\n track[n[1]].append(n)\n if n[0] is None:\n bfs_queue.put((None, n[1] + 1, None))\n bfs_queue.put((None, n[1] + 1, None))\n continue\n bfs_queue.put((None, n[1] + 1, None) if not n[0].left else (n[0].left, n[1] + 1, n[0]))\n bfs_queue.put((None, n[1] + 1, None) if not n[0].right else (n[0].right, n[1] + 1, n[0]))\n\n spaces = 12 * (2 ** (height))\n ans = '\\n' + '\\t\\tVisual Level Order Traversal of RBtree'.center(spaces) + '\\n\\n'\n for i in range(height):\n ans += f\"Level {i + 1}: \"\n for n in track[i]:\n space = int(round(spaces / (2 ** i)))\n if not n[0]:\n ans += ' ' * space\n continue\n ans += \"{} ({})\".format(n[0], n[2].value if n[2] else None).center(space, \" \")\n ans += '\\n'\n return ans", "def __str__(self) -> str:\n values = []\n self._str_helper(self.root, values)\n return \"TREE in order { \" + \", \".join(values) + \" }\"", "def __str__(self) -> str:\n values = []\n self._str_helper(self.root, values)\n return \"TREE in order { \" + \", \".join(values) + \" }\"", "def get_string(tree): \n\n tree_str = tree.pformat()\n tree_str_flat = ' '.join(tree_str.split())\n\n return tree_str_flat", "def __str__(self):\n\t\tself._synchronize_attributes()\n\t\ts = \"\"\n\t\tqueue = c3.Queue()\n\t\tlevel = 0\n\t\tqueue.enqueue((1, self._root))\n\t\twhile queue.peek():\n\t\t\tnodelev, node = queue.dequeue()._data\n\t\t\tif (not node):\n\n\t\t\t\t#NODE IS NOT THERE - just a placeholder\n\t\t\t\t#print spacing and enqueue fake left and right children\n\t\t\t\t#but stops if they would be past the max depth of the tree\n\t\t\t\tif ((self._depth - nodelev + 1) <= 0):\n\t\t\t\t\tcontinue\n\n\t\t\t\tif (nodelev != level):\n\t\t\t\t\ts += \"\\n\"\n\t\t\t\t\t#PRINT THE INDENT\n\t\t\t\t\tindent = \" \"*int((self._max_chars)*(2**(self._depth - nodelev) - 1))\n\t\t\t\t\ts += indent\n\t\t\t\t\tlevel = nodelev\n\n\t\t\t\t#PRINT THE SPACING\n\t\t\t\ts += \" \"*(self._max_chars)*(2**(self._depth - nodelev + 1) - 1)\n\n\t\t\t\t#PRINT SPACES TO REPLACE DATA\n\t\t\t\ts += \" \"*self._max_chars\n\n\t\t\t\t#Enqueue fake children\n\t\t\t\tqueue.enqueue((nodelev + 1, None))\n\t\t\t\tqueue.enqueue((nodelev + 1, None))\n\t\t\t\tcontinue\n\n\t\t\tif (nodelev != level):\n\t\t\t\ts += \"\\n\"\n\t\t\t\t#PRINT THE INDENT\n\t\t\t\tindent = \" \"*(self._max_chars)*(2**(self._depth - nodelev) - 1)\n\t\t\t\ts += indent\n\t\t\t\tlevel = nodelev\n\n\t\t\t#adds preceding \"|\"s if the str length of the data is smaller than the max\n\t\t\tfor i in range(int(self._max_chars - len(str(node.value())))):\n\t\t\t\ts += \"|\"\n\t\t\ts += str(node.value()) \n\n\t\t\t#PRINT THE SPACING\n\t\t\tspacing = \" \"*(self._max_chars)*(2**(self._depth - nodelev + 1) - 1)\n\t\t\ts += spacing\n\n\t\t\t#Enqueues\n\t\t\tif node.lchild():\n\t\t\t\tqueue.enqueue((nodelev + 1, node.lchild()))\n\t\t\telse:\n\t\t\t\t#ENQUEUES A FAKE NODE IN ORDER TO FORMAT THE TREE FOR MISSING NODES\n\t\t\t\tqueue.enqueue((nodelev + 1, None))\n\t\t\tif node.rchild():\n\t\t\t\tqueue.enqueue((nodelev + 1, node.rchild()))\n\t\t\telse:\n\t\t\t\t#ENQUEUES A FAKE NODE IN ORDER TO FORMAT THE TREE FOR MISSING NODES\n\t\t\t\tqueue.enqueue((nodelev + 1, None))\n\t\ts += \"\\n\"\n\t\treturn s", "def _compute_repr(tree):\n if tree.height() == 2:\n return \"[.{} {} ] \".format(tree.label(), \"\".join(tree.leaves()))\n else:\n s = \"\"\n for child in tree:\n s += _compute_repr(child)\n return \"[.{} {} ] \".format(tree.label(), s)", "def __str__(self):\n current = self.root\n nodes = [self.root]\n final = str(self.root) + \"\\n\"\n count = 0\n while len(nodes) != 0:\n count += 1\n if count == 10:\n return \"\"\n temp = []\n for node in nodes:\n if node.left != None:\n temp.append(node.left)\n final += str(node.left) + \" \"\n else:\n final += \"_ \"\n if node.right != None:\n temp.append(node.right)\n final += str(node.right) + \" \"\n else:\n final += \"_ \"\n if temp == []:\n if node == nodes[len(nodes) - 1]:\n break\n final += \"\\n\"\n nodes = temp\n self.in_order_traversal()\n for item in self.traverse:\n final += str(item.key) + \" \"\n final += \"\\n\"\n return final", "def print_tree(self):\n return \"\"", "def print_tree(t):\r\n if (t==None):\r\n return \r\n else:\r\n print_tree(left(t))\r\n print(value(t),end=\" \")\r\n print_tree(right(t))", "def parents_to_string(parent_tuple):\n return str(parent_tuple[0])+\" \"+str(parent_tuple[1])", "def __str__(self):\n def recurse(node, level):\n s = \"\"\n if type(node) == LeafNode:\n return (\"| \" * level) + str(node) + \"\\n\"\n if node != None:\n s += recurse(node.rightOperand, level + 1)\n s += \"| \" * level\n s += str(node.operator) + \"\\n\"\n s += recurse(node.leftOperand, level + 1)\n return s\n return recurse(self, 0)", "def render(tree):\n if type(tree) == list:\n return \"-\"+render(tree[0])+render(tree[1])\n elif type(tree) == tuple:\n return tree[0]+render(tree[1])\n else:\n return tree", "def postorder_traversal(tree):\n post = '' # Handles the spaces between the postorder traversal\n # in the string\n\n # To make sure the function doesn't move on if it doesn't have\n # a left child, so it doesn't add to string if it is None\n if tree.get_left() != None:\n post += postorder_traversal(tree.get_left()) + ' '\n\n # To make sure the function doesn't move on if it doesn't have\n # a right child, so it doesn't add to string if it is None\n if tree.get_right() != None:\n post += postorder_traversal(tree.get_right()) + ' '\n\n # Prints the current value (this is all recursed in postorder)\n post += str(tree.get_val())\n\n return post", "def __str__(self):\r\n levels = tuple(self.generate_levels())\r\n self.compute_representation_positions()\r\n levels_to_strings = self.represent_tree_levels(levels)\r\n branches = self.represent_tree_branches(levels)\r\n\r\n return \"\".join(\"\".join((level, \"\\n\\n\", branch))\r\n for (level, branch) in zip(levels_to_strings, branches))", "def display(self, tree, level = 0):\n\t\tresult = \"\"\n\t\tfor name, node in tree.soon:\n\t\t\tresult += \" \"*level+repr(node)+\"\\n\"\n\t\t\tresult += self.display(tree.getSoon(name),level + 1)\n\t\treturn result", "def tree_to_string(tree):\n if type(tree) == Tree:\n return sum(list(map(tree_to_string, tree.children)), [])\n else:\n return [str(tree)]", "def get_tree_string(self, node):\n string = \"\"\n for child in sorted(node.children):\n string += node.depth * \"\\t\"\n if node.depth > 0:\n string += \"|\"\n string += node.feature + \"=\" + child\n if node.children[child].is_leaf:\n string += \":\" + node.children[child].pred + \"\\n\"\n else:\n string += \"\\n\" + self.get_tree_string(node.children[child])\n\n return string", "def print_tree(tree, pref=\"\"):\r\n leaf = \"|_____> \"\r\n top = \"|_______\"\r\n son1 = \"| \"\r\n son2 = \" \"\r\n width = len(top)\r\n\r\n a = \"\"\r\n if len(tree) == 3:\r\n if (pref == \"\"):\r\n a += pref + str(tree[0]) + \"\\n\"\r\n else:\r\n a += pref[:-width] + top + str(tree[0]) + \"\\n\"\r\n a += print_tree(tree[1], pref + son1)\r\n a += print_tree(tree[2], pref + son2)\r\n return a\r\n\r\n else:\r\n return (pref[:-width] + leaf + str(tree) + \"\\n\")", "def __repr__ (self, depth=None):\n\t\ts=[];add=s.append\n\t\t\n\t\tadd (\"%s%s\" % (myglobals.getIndent(self.level), self.name))\n\t\tif depth is None or self.level < depth:\n\t\t\tfor status in self.selected:\n\t\t\t\tobj = status.fsObj\n\t\t\t\t# if obj.level > depth:\n\t\t\t\t\t# # print 'level (%d) exceeds depth, skipping' % obj.level\n\t\t\t\t\t# continue\n\t\t\t\tif isinstance (obj, WorkingDirectory):\n\t\t\t\t\t# print \"DIRECTORY %s\" % obj.name\n\t\t\t\t\tif not obj.selected.isempty():\n\t\t\t\t\t\tadd (str(obj))\n\t\t\t\telif isinstance (obj, JloFile):\n\t\t\t\t\tif os.path.exists(obj.path):\n\t\t\t\t\t\tadd (\"%s (%s)!!!\" % ( str(obj), status.flag))\n\t\t\t\t\t\t# add (\"%s%s (%s)!!!\" % (myglobals.getIndent(self.level), str(obj), status.flag))\n\t\t\t\t\telse:\n\t\t\t\t\t\tadd (\"%s%s (%s)???\" % (myglobals.getIndent(self.level), str(obj), status.flag))\n\t\t\t\telse:\n\t\t\t\t\t## missing directory\n\t\t\t\t\tadd (\"%s%s (missing)##\" % (myglobals.getIndent(self.level+1), obj.name))\n\t\treturn '\\n'.join (s)", "def __str__(self):\n # Tricky to do iteratively so we do it recursively.\n return BST._str(\"\", self.root)", "def recursifTreePrinter(tree,indent):\n listOfBranches = tree.GetListOfBranches()\n if len(listOfBranches) > 0: # Width informations\n maxCharName = max([len(branch.GetName()) \\\n for branch in listOfBranches])\n maxCharTitle = max([len(branch.GetTitle()) \\\n for branch in listOfBranches])\n dic = { \\\n \"nameWidth\":maxCharName+2, \\\n \"titleWidth\":maxCharTitle+4, \\\n \"memoryWidth\":1}\n for branch in listOfBranches: # Print loop\n rec = \\\n [branch.GetName(), \\\n \"\\\"\"+branch.GetTitle()+\"\\\"\", \\\n str(branch.GetTotBytes())]\n write(TREE_TEMPLATE.format(*rec,**dic),indent,end=\"\\n\")\n recursifTreePrinter(branch,indent+2)", "def _str(indent, root):\n if root is None:\n return \"\"\n else:\n return (BST._str(indent + \"\\t\", root.right) +\n indent + repr(root.item) + \"\\n\" +\n BST._str(indent + \"\\t\", root.left))", "def print_tree(self):\n recur_print = self.recur_print(tree.root, '')[:-1]\n return recur_print", "def represent_tree_levels(self, levels):\r\n prev_node_end = 0 \r\n level_string = []\r\n for level in levels:\r\n prev_node_end = 0 \r\n level_string = []\r\n for node in level: \r\n node_to_str = str(node.keys)\r\n space_between_nodes = node.str_pos - prev_node_end \r\n level_string.extend((\" \"*space_between_nodes, node_to_str))\r\n prev_node_end = node.str_pos + len(node_to_str)\r\n\r\n yield \"\".join(level_string)", "def print_node_tree(node, level=0):\n str_builder = []\n if node:\n str_builder.append(print_node_tree(node.right, level + 1))\n str_builder.append(\"| \" * level)\n str_builder.append(\n ''.join([str(node.value), \" - \", str(node.level), \"\\n\"]))\n str_builder.append(print_node_tree(node.left, level + 1))\n return ''.join(str_builder)", "def __str__(self):\r\n T = Btree(2)\r\n T.root = Node(self.keys, [Node(child.keys, []) for child in self.children])\r\n return str(T)", "def tree_to_string(self, indent):\n\t\ts = self.indent_string(indent) + str(self)\n\t\tfor c in self.child_nodes:\n\t\t\ts += c.tree_to_string(indent + 1)\n\t\treturn s", "def __str__(self):\n stubs = ['' for _ in range(self.nChildren())]\n label = dist = ''\n for i in range(self.nChildren()):\n stubs[i] = str(self.children[i])\n if self.dist or self.dist == 0.0:\n dist = ':' + str(self.dist)\n if self.label != None:\n label = str(self.label)\n if self.nChildren() == 0:\n return label + dist\n else:\n stubstr = '('\n for i in range(len(stubs) - 1):\n stubstr += stubs[i] + ','\n return stubstr + stubs[-1] + ')' + label + dist\n # there is no label\n '''\n if not self.left and self.right:\n return ',' + right\n elif self.left and not self.right:\n return left + ','\n elif self.left and self.right:\n return '(' + left + ',' + right + ')' + dist\n '''", "def print_leaves(t):\n for depth, leaves in sorted(leaves_by_depth(t).items()):\n print(depth-1, 'bits:', leaves)", "def pprint_nodes(subtrees):\n def indent(s,type=1):\n x = s.split(\"\\n\")\n r = \"+-%s\\n\"%x[0]\n for a in x[1:]:\n if a==\"\": continue\n if type==1:\n r += \"| %s\\n\"%a\n else:\n r += \" %s\\n\"%a\n return r\n if len(subtrees)==0: return \"\"\n f=\"\";\n for a in subtrees[:-1]:\n f += indent(a)\n f += indent(subtrees[-1],2)\n return f", "def print_tree(t, indent=0, end='\\n'):\n if isinstance(t, Leaf):\n print(t, end='')\n else:\n s = '(' + t.tag + ' '\n indent += len(s)\n print(s, end='')\n print_tree(t.branches[0], indent, '')\n for b in t.branches[1:]:\n print('\\n' + ' '*indent, end='')\n print_tree(b, indent, '')\n print(')', end=end)", "def str_reverse_recur(node):\n\n if node == None:\n return \"\"\n else:\n return LinkedList.str_reverse_recur(node.next) + \" \" + str(node.item)", "def serialize(self, root: TreeNode) -> str:\n l = []\n def preOrder(root):\n if not root:\n l.append(\"n\")\n return\n \n l.append(str(root.val))\n preOrder(root.left)\n preOrder(root.right)\n \n \n preOrder(root)\n #print(\",\".join(l))\n return \",\".join(l)", "def __str__(self, depth=1):\n if self.isLeaf():\n return \"Predict: \\\"{:s}\\\"\".format(str(self.predict))\n else:\n s = \"if features[{:d}] != \\\"{:s}\\\" then:\\n {:s} \\n{:s}else:\\n {:s}\"\n return s.format(self.feature, \n str(self.value), \n \"\\t\" * depth+self.left.__str__(depth+1),\n \"\\t\" * (depth-1),\n \"\\t\" * depth+self.right.__str__(depth+1))", "def __str__(self):\n if len(self.children) == 0:\n return self.val\n ret = [\n self.val]\n for child in self.children:\n ret += [ '\\t' + child_s for child_s in str(child).split('\\n') ]\n\n return ('\\n').join(ret)", "def test_print_level_order(depth_one_tree):\n assert print_level_order(depth_one_tree) == ['0', '1 2 3 4']", "def show_as_tree(\n self,\n *,\n format_func: typing.Callable[[HierarchicalCategory], str] = str,\n maxdepth: typing.Union[None, int] = None,\n root: typing.Union[None, HierarchicalCategory, str] = None,\n ) -> str:\n if root is None:\n top_level_nodes = (node for node in self.values() if not node.parents)\n else:\n if not isinstance(root, HierarchicalCategory):\n root = self[root]\n top_level_nodes = [root]\n return \"\\n\".join(\n (\n self._show_subtree(\n node=top_level_node, format_func=format_func, maxdepth=maxdepth\n )\n )\n for top_level_node in top_level_nodes\n )", "def __str__(self):\n return \"{}\\n\\n{}\".format(self.puzzle,\n \"\\n\".join([str(x) for x in self.children]))", "def _generate_hierarchy_string(self, skeleton):\n hierarchy_string = \"HIERARCHY\\n\"\n hierarchy_string += self._generate_joint_string(skeleton.root, skeleton, 0)\n return hierarchy_string", "def tree2str(self, root: Optional[TreeNode]) -> str:\n if not root:\n return \"\"\n if not root.left and not root.right:\n return str(root.val)\n if not root.right:\n return f'{root.val}({self.tree2str(root.left)})'\n return f'{root.val}({self.tree2str(root.left)})({self.tree2str(root.right)})'", "def to_string(cls, hierarchical_dict: dict) -> str:\n keys = cls.get_all_keys(hierarchical_dict)\n keys = sorted(keys)\n res = \"\"\n for key in keys:\n res += f\"{key} = {FuseUtilsHierarchicalDict.get(hierarchical_dict, key)}\\n\"\n\n return res", "def print_recursive(self, indents):\n\n\t\tind = \"\\t\"\n\t\toutput = indents * ind + self.name\n\t\tprint(output)\n\t\tfor i in self.children:\n\t\t\ti.print_recursive(indents+1)", "def print_tree(self,root_key='',offset=''):\n itm = self._root\n if root_key:\n itm = self.get_data(root_key)\n tstr = os.linesep \n try: #if isinstance(itm,dict):\n for k in itm.keys():\n x_str = self.print_tree(root_key+'.'+k,offset+' ')\n tstr = tstr+offset+'{}: {}'.format(k,x_str)+os.linesep\n except:\n try: #elif isinstance(itm,list):\n for i,x in enumerate(itm):\n x_str = self.print_tree(root_key+'.'+str(i),offset+' ')\n tstr = tstr+offset+'{}: {}'.format(i,x_str)+os.linesep\n except:\n return '{}'.format(itm)\n return tstr", "def reverse_level_order(self):\r\n stack = []\r\n queue = [self]\r\n while queue:\r\n curr_node = queue.pop(0)\r\n stack.append(curr_node.root)\r\n if curr_node.right:\r\n queue.append(curr_node.right)\r\n if curr_node.left:\r\n queue.append(curr_node.left)\r\n while stack:\r\n print (stack.pop(), '' , end= '')", "def serialize(self, root):\n\n if root is None:\n return \"\"\n curr_lvl=[root]\n next_lvl = []\n ans =[]\n\n while(curr_lvl):\n tmp_ans = \",\".join(str(node.val) if node is not None else \"*\" for node in curr_lvl)\n ans.append(tmp_ans)\n nxt_lvl = []\n for each in curr_lvl:\n if each is not None:\n nxt_lvl.append(each.left) \n nxt_lvl.append(each.right)\n\n curr_lvl=nxt_lvl\n\n return \";\".join(ans)", "def print_tree(t, indent=0):\n print(' ' * indent + str(label(t)))\n for b in branches(t):\n print_tree(b, indent + 1)", "def __repr__(self) ->str:\n # Our __repr__ is recursive, because it can also be called\n # via repr...!\n return ('{}({}, {})'.format(self.__class__.__name__,\n repr(self.value),\n repr(self.children))\n if self.children\n else 'Tree({})'.format(repr(self.value)))", "def _build_directory_structure_string(structure):\n def _recurse_dic(dic, level, prefix, buf):\n idx = 0\n for key, value in dic.items():\n idc = \"┣━\"\n if idx == len(dic.keys()) - 1:\n idc = \"┗━\"\n if level == 0:\n idc = \"\"\n\n if isinstance(value, dict):\n buf.append(\"{0}{1}[{2}]\".format(prefix, idc, key))\n if len(dic.keys()) > 1 and idx != len(dic.keys()) - 1:\n tmp_prefix = prefix + \"┃ \"\n else:\n tmp_prefix = prefix + \" \"\n _recurse_dic(value, level + 1, tmp_prefix, buf)\n else:\n buf.append(\"{0}{1}{2}\".format(prefix, idc, key))\n\n idx += 1\n\n buf = []\n _recurse_dic(structure, 0, \"\", buf)\n return \"\\n\".join(buf)", "def print_recursive(value, indent=0):\n tabs = lambda count: '' + str(' ' * (indent + count))\n if isinstance(value, dict):\n to_print = '{}{}'.format(tabs(1), '{')\n for key, item in value.iteritems():\n to_print += '\\n{}{}:\\n{}'.format(tabs(2), key, print_recursive(item, indent + 2))\n return to_print + '{}{}'.format('\\n' + tabs(1) if len(value) > 0 else ' ', '}')\n if isinstance(value, list):\n to_print = '{}['.format(tabs(1))\n for item in value:\n to_print += '\\n' + print_recursive(item, indent + 1)\n return to_print + '{}{}'.format('\\n' + tabs(1) if len(value) > 0 else ' ', ']')\n if isinstance(value, str) or isinstance(value, unicode):\n return tabs(1) + '\\'' + value + '\\''\n if len(str(value)) > 0:\n return tabs(1) + str(value) + ''\n return ''", "def __str__(self):\n\n\t\tif not self.root:\n\t\t\treturn str([])\n\n\t\tQ = [self.root]\n\t\tvals = []\n\t\twhile Q:\n\t\t\tnode = Q.pop(0)\n\t\t\tif node:\n\t\t\t\tvals.append(node.val)\n\t\t\t\tQ.append(node.left)\n\t\t\t\tQ.append(node.right)\n\t\t\telse:\n\t\t\t\tvals.append(None)\n\t\treturn str(vals)", "def __str__(self):\n width = int(np.prod(self.no_parent_states)) #multiplies the elements in list, here tot num of states\n grid = np.meshgrid(*[range(i) for i in self.no_parent_states]) \n s = \"\"\n for (i, e) in enumerate(self.parents):\n s += '+----------+' + '----------+' * width + '\\n'\n gi = grid[i].reshape(-1)\n s += f'|{e:^10}|' + '|'.join([f'{e + \"(\"+str(j)+\")\":^10}' for j in gi])\n s += '|\\n'\n\n for i in range(self.no_states):\n s += '+----------+' + '----------+' * width + '\\n'\n state_name = self.name + f'({i})'\n s += f'|{state_name:^10}|' + '|'.join([f'{p:^10.4f}' for p in self.table[i]])\n s += '|\\n'\n\n s += '+----------+' + '----------+' * width + '\\n'\n\n return s", "def findHierarchy(self):\n def __recursiveHelper(key_name, output, indent):\n if key_name in self.relations:\n for employee in self.relations[key_name].employees:\n output += \" \" * indent + str(employee) +\"\\n\"\n # return __recursiveHelper(employee, output, indent+1)\n __recursiveHelper(employee, output, indent+1)\n else:\n print(output)\n return output\n\n\n #experimenting with Iter() and next() iterators/generators\n #and a while loop in the recursive function:\n\n # def __recursiveHelper(key_name, output, indent):\n # if key_name in self.relations:\n # employees = iter(self.relations[key_name].employees)\n # employee = next(employees, \"stop\")\n # while employees and employee != 'stop':\n # output += \" \" * indent + str(employee) +\"\\n\"\n # __recursiveHelper(next(employees, \"stop\"), output, indent+1)\n # else:\n # employee = next(employees, \"stop\")\n #\n # else:\n # return output\n\n\n\n\n\n output = \"\"\n indent = -1\n # self.relations is a dictionary of manager-name string keys.\n # The employees of None are the top-ranking managers.\n # only issue:\n # having trouble returning the concatenated output\n # from the recursive function:\n return __recursiveHelper(None, output, indent+1)", "def tree(self, depth_index=0):\r\n print(self.tree_str(depth_index))", "def __str__(self):\n pieces = [] # sequence of piecewise strings to compose\n self._parenthesize_recur(self.root(), pieces)\n return ''.join(pieces)", "def __str__(self):\n pieces = [] # sequence of piecewise strings to compose\n self._parenthesize_recur(self.root(), pieces)\n return ''.join(pieces)", "def __str__(self):\n if self.keys():\n childStrings = [str(child) for child in self.itervalues()]\n return \"[{} {}]\".format(self.name,\" \".join(childStrings))\n else:\n return \"[{}]\".format(self.name)", "def level_serialize(self, root):\n if not root: return ''\n res = [str(root.val)]\n q = [root]\n\n while q:\n new_q = []\n has_not_null = False\n for v in q:\n left_node = v.left if v else v\n new_q.append(left_node)\n\n right_node = v.right if v else v\n new_q.append(right_node)\n\n has_not_null |= bool(left_node)\n has_not_null |= bool(right_node)\n\n if not has_not_null: break\n q = new_q\n res.extend(str(v.val) if v else 'None' for v in q)\n\n return ','.join(res)", "def pretty_print(self,depth=0):\n\t\tfor i in range(depth):\n\t\t\tprint \"\\t\",\n\t\t\t\t\n\t\tprint self.__str__()\n\t\t\n\t\tfor c in self.tree.children:\n\t\t\tc.viz.pretty_print(depth+1)", "def __str__(self, indent: int=0) -> str:\n root_str = indent * \" \" + str(self.value)\n mid = len(self.non_none_kids()) // 2\n left_str = [c.__str__(indent + 3)\n for c in self.non_none_kids()][: mid]\n right_str = [c.__str__(indent + 3)\n for c in self.non_none_kids()][mid:]\n return '\\n'.join(right_str + [root_str] + left_str)", "def node_s(self, lvl=0):\n s = \"\"\n for n in self.kids:\n s += \" \" * (lvl + 1) + n.node_s(lvl + 1) + \"\\n\\n\"\n return s", "def pformat(self, tree):\n return str(self.to_tree_text_block(tree))", "def print_tree(tree, depth=0):\n print('+','--'*depth,tree[0])\n if isinstance(tree[1], str):\n print('|',' '*depth,'->',tree[1])\n return\n if isinstance(tree[1],Terminal):\n print('|',' '*depth,'->',repr(tree[1]))\n return\n for subtree in tree[1]:\n print_tree(subtree, depth+1)", "def join_recursive(lst, sep):\n msg = ''\n for i in lst:\n if isinstance(i, tuple) or isinstance(i, list):\n msg += join_recursive(i, sep)\n else:\n msg += (i + sep)\n return msg", "def __str__(self):\n string = \"\"\n cur_node = self.head\n while cur_node is not None:\n string += cur_node.data.__str__()\n cur_node = cur_node.next\n return string", "def print_tree(self):\n return self.preorder_print(self.root, \"\")[:-1]", "def print_tree(self):\n return self.preorder_print(self.root, \"\")[:-1]", "def __str__(self):\n cur_node = self.head\n str_list = ['{']\n while cur_node is not None:\n str_list.append(str(cur_node))\n if cur_node is not self.tail:\n str_list.append(', ')\n cur_node = cur_node.next_node\n str_list.append('}')\n return ''.join(str_list)", "def print_cr_tree(self, tree):\n str = ''\n try:\n if not tree: return \"None\"\n else:\n for x in tree: str += \" \" + x.name\n except TypeError: return tree.name\n return str", "def printTreeF(node, n, root):\n out = \"\"\n if isinstance(node, DecisionTree):\n out += \"..\" * n + \"[atr \" + str(node.i) + \" < %.2f\" % node.v + \"]\\n\"\n out += printTreeF(node.lt, n + 1, root)\n out += printTreeF(node.gt, n + 1, root)\n else:\n out += \"..\" * n + root.getString(node) + \"\\n\"\n return out", "def _generateNestingLevel(self, obj, **args):\n start = args.get('startOffset')\n end = args.get('endOffset')\n if start is not None and end is not None:\n return []\n\n result = []\n if not args.get('mode', None):\n args['mode'] = self._mode\n args['stringType'] = 'nestinglevel'\n nestingLevel = self._script.utilities.nestingLevel(obj)\n if nestingLevel:\n result.append(self._script.formatting.getString(**args)\\\n % nestingLevel)\n return result", "def get_repr(self, *args):\n level_representation = \"--\"\n if self.level == 0:\n node = \"| \"\n else:\n node = \"+ \"\n _tree_structure = node + level_representation * self.level + ' ' + self.name\n return _tree_structure", "def rcontainer_tree_str(obj):\n tree_task = ContainerTreePrintTask()\n the_recurser = ObjectRecursion(tasks=[tree_task])\n return the_recurser.recurse(obj)[0][0]", "def __str__(self):\n s = ''\n for node in self.nodes:\n s += '\\n\\n'+str(node)+'\\n\\t'\n edges = node.getChildren()\n keys = edges.keys()\n keys.sort()\n for key in keys:\n bounds = edges[key].getSuffix()\n s += str(edges[key])+' '\n for i in xrange(bounds[0], bounds[1]):\n s += self.target[i]\n s += '\\n\\t'\n return s", "def children_str(self, state=None):\n return \"\".join([\n \" {}\\n\".format(c.to_str(state))\n for c in sorted(self.children, key=lambda c: -c.explore_count)])", "def _print_with_depth(self, string, depth):\n print(\"{0}{1}\".format(\" \" * depth, string))", "def __str__(self):\n s = \"--\\n\"\n for node in self:\n s += node.__str__() + \"\\n\"\n return s + \"--\"", "def print_tree(root):\n queue = [(root, [\"1\"])]\n while queue:\n this, depth = queue.pop(0)\n if isinstance(this, int):\n reprr = \"L %i\" % this\n else:\n reprr = str(this.attribute)\n for key, child in this.children.items():\n queue.append((child, depth + [\"%s\" % key]))\n print \"%s: %s\" % (\".\".join(depth), reprr)", "def _print_all_descendants_rec(self, node, level):\n if level == 0:\n print(\"|---\" + str(node))\n \n if node.get_children():\n level += 1\n for child in node.get_children():\n string = \"| \"*level\n print(string + \"|---\" + str(child))\n self._print_all_descendants_rec(child, level)\n return\n else:\n if level == 0:\n string = \"\"\n else:\n string = \"|\" + (\" \"*level)\n return", "def _create_str(results_dict, level=0, parent=True):\n result = ''\n keys = sorted(results_dict.keys())\n if not keys:\n return result\n\n if parent:\n has_remote_entries = any(\n self._map(\n lambda lk, entry: not entry.physical_key.is_local()\n )\n )\n pkg_type = 'remote' if has_remote_entries else 'local'\n result = f'({pkg_type} Package)\\n'\n\n for key in keys:\n result += ' ' + (' ' * level) + '└─' + key + '\\n'\n result += _create_str(results_dict[key], level + 1, parent=False)\n\n return result", "def serialize(obj: TreeNode) -> str:\n operators = set(\",:_;()[]\")\n current_depth = 0\n nodes_left = [(obj, 0)]\n fh = ''\n while len(nodes_left) > 0:\n entry = nodes_left.pop()\n node, node_depth = entry\n if node.children and node_depth >= current_depth:\n fh += '('\n nodes_left.append(entry)\n nodes_left += ((child, node_depth + 1) for child in\n reversed(node.children))\n current_depth = node_depth + 1\n else:\n if node_depth < current_depth:\n fh += ')'\n current_depth -= 1\n\n # Note we don't check for None because there is no way to represent\n # an empty string as a label in Newick. Therefore, both None and ''\n # are considered to be the absence of a label.\n lblst = []\n if node.support is not None: # prevents support of NoneType\n lblst.append(str(node.support))\n if node.name: # prevents name of NoneType\n lblst.append(node.name)\n label = ':'.join(lblst)\n if label:\n escaped = \"%s\" % label.replace(\"'\", \"''\")\n if any(t in operators for t in label):\n fh += \"'\"\n fh += escaped\n fh += \"'\"\n else:\n fh += escaped.replace(\" \", \"_\")\n if nodes_left and nodes_left[-1][1] == current_depth:\n fh += ','\n\n fh += ';\\n'\n return fh", "def toString(self, recursive=True, indent=\"\"):\n s = indent + '<' + self.name\n for (n,v) in self.attrs.items():\n s = s + ' ' + n + '=\"' + v + '\"'\n c = self.content.strip()\n if c or len(self.kids) > 0:\n s = s + '>\\n'\n if c: s = s + indent + \" \" + c + '\\n'\n if recursive:\n for nd in self.kids:\n s = s + nd.toString(recursive,indent=indent+\" \")\n s = s + indent + '</' + self.name + '>\\n'\n else:\n s = s + '/>\\n'\n\n return s", "def dump_tree(self) -> str:\n return utils.dump_tree(self._tree)", "def _show_subtree(\n self,\n *,\n node: HierarchicalCategory,\n prefix=\"\",\n last=False,\n format_func: typing.Callable[[HierarchicalCategory], str] = str,\n maxdepth: typing.Union[None, int],\n ) -> str:\n\n r = self._render_node(node, last=last, prefix=prefix, format_func=format_func)\n\n if maxdepth is not None:\n maxdepth -= 1\n if maxdepth == 0: # maxdepth reached, nothing more to do\n return r\n\n child_sets = node.children\n if len(child_sets) == 1:\n children = child_sets[0]\n if children:\n r += self._show_subtree_children(\n children=children,\n format_func=format_func,\n maxdepth=maxdepth,\n prefix=prefix,\n )\n elif len(child_sets) > 1:\n prefix += \"║\"\n i = 1\n for children in child_sets:\n if children:\n if i == 1:\n r += (\n f\"{prefix[:-1]}╠╤══ ('{format_func(node)}'s children,\"\n f\" option 1)\\n\"\n )\n else:\n r += (\n f\"{prefix[:-1]}╠╕ ('{format_func(node)}'s children,\"\n f\" option {i})\\n\"\n )\n\n r += self._show_subtree_children(\n children=children,\n format_func=format_func,\n maxdepth=maxdepth,\n prefix=prefix,\n )\n i += 1\n\n r += f\"{prefix[:-1]}╚═══\\n\"\n\n return r", "def serialize1(self, root):\n if not root:\n return \"\"\n \n serial = \"\"\n stack = [root]\n while stack:\n node = stack.pop()\n if not node:\n serial += \"null,\"\n else:\n serial += str(node.val) + ','\n stack.append(node.right)\n stack.append(node.left)\n \n \n print(serial[:-1])\n return serial[:-1]", "def print_tree(account, level=0):\r\n \"\"\" In the example output below, \"GE\" is the root account, \"Jet Engines\"\r\n and \"Appliances\" are first-degree ChildAccounts, and \"DoD Contracts\"\r\n and \"Washing Machines\" are second-degree ChildAccounts.\r\n\r\n > print_tree(general_electric)\r\n GE (Manufacturing, R&D): Daniel Testperson\r\n Jet Engines (Manufacturing, R&D, Aerospace): Daniel Testperson\r\n DoD Contracts (Defense, R&D, Aerospace): William Testperson\r\n Appliances (Manufacturing, Consumer Goods): Janet Testperson\r\n Washing Machines (Consumer Goods): Janet Testperson\r\n \"\"\"\r\n markets_output = \"\"\r\n # work a little magic to properly format the names of the market segments\r\n # specifically strip off the leading and trailing quotes and add a\r\n # separating comma\r\n for market in account.get_market_segments():\r\n markets_output += market.name.strip(\"\\'\") + \", \"\r\n markets_output = markets_output.strip(\"\\'\")\r\n\r\n # print a row to console\r\n print(\"{arrow}> {ac_name} ({markets}): {rep}\"\r\n .format(arrow=2*level*\"-\",\r\n ac_name=account.name,\r\n markets=markets_output[:-2],\r\n rep=account.get_sales_rep()))\r\n\r\n # recursively call print on the children (if any) Base Case: no children\r\n for child in account.get_children():\r\n print_tree(child, level=level+1)", "def postorder_recursive(root):\n if root:\n postorder_recursive(root.left)\n postorder_recursive(root.right)\n print(root.data, end=\" \")", "def serialize(node, tree=\"\"):\n \n \n if (not node): #Base case\n tree += \"# \"\n return tree\n tree += (str(node.val) + \" \")\n tree = serialize(node.left, tree)\n tree = serialize(node.right, tree)\n\n return tree", "def to_string(self):\n tree_structure_str = self.node_to_string(self.root, 0, is_add_children=True).rstrip()\n return tree_structure_str", "def __str__(self):\n tree_rows = [\n [\"Index\", str(self.index)],\n [\n \"Interval\",\n f\"{self.interval.left:.8g}-{self.interval.right:.8g} ({self.span:.8g})\",\n ],\n [\"Roots\", str(self.num_roots)],\n [\"Nodes\", str(len(self.preorder()))],\n [\"Sites\", str(self.num_sites)],\n [\"Mutations\", str(self.num_mutations)],\n [\"Total Branch Length\", f\"{self.total_branch_length:.8g}\"],\n ]\n return util.unicode_table(tree_rows, title=\"Tree\")", "def __str__(self):\n rep = super().__str__()\n rep = rep[:-1] + '; '\n if self.netlist is None:\n rep += 'parent netlist undefined'\n elif self.netlist.name is None:\n rep += 'parent netlist.name undefined'\n else:\n rep += 'parent netlist.name \\'' + self.netlist.name + '\\''\n rep += '>'\n return rep", "def max_depth(self):\r\n lvl = 1\r\n has_lvl_desc = True\r\n while has_lvl_desc:\r\n num_children = len(self.level_n_descendants(lvl))\r\n if num_children==0:\r\n has_lvl_desc = False\r\n else:\r\n lvl+=1\r\n return lvl-1", "def __str__(self, level=0):\n pt1 = ' join '.join([str(video.video_metadata.file) for video in self.videos])\n pt2 = ' = '.join([str(id) for id in self._join_ids])\n join_cols_str = '{}__{}'.format(pt1, pt2)\n out_string = \"\\t\" * level + join_cols_str + \"\\n\"\n for child in self.children:\n out_string += child.__str__(level + 1)\n return out_string", "def __str__(self):\n _str = \"{} From {} depth {}\".format(\n self.__class__.__name__, self.get_name(), self.get_caravan_depth()\n )\n return _str" ]
[ "0.6911783", "0.6560992", "0.64162666", "0.64148694", "0.63143986", "0.6257376", "0.6203628", "0.6170759", "0.6084149", "0.60680145", "0.60680145", "0.6012516", "0.60102165", "0.60009885", "0.59931505", "0.59649897", "0.59439313", "0.59319067", "0.5925515", "0.5863372", "0.58621585", "0.5860999", "0.5727037", "0.5720818", "0.57133317", "0.57086796", "0.56898654", "0.5681657", "0.56807685", "0.5670235", "0.56544554", "0.56391424", "0.56292427", "0.56283087", "0.56266516", "0.5619637", "0.5601229", "0.5600983", "0.55936384", "0.55914956", "0.5586945", "0.5568089", "0.55664706", "0.5563463", "0.55541414", "0.55257946", "0.5523316", "0.55214524", "0.55041116", "0.5487997", "0.54855204", "0.54814315", "0.5463676", "0.5463599", "0.546131", "0.5458429", "0.54559934", "0.545372", "0.5448475", "0.5446323", "0.5443974", "0.54424906", "0.54406494", "0.54383147", "0.54261935", "0.54248405", "0.5423676", "0.54139173", "0.54087037", "0.54047894", "0.5403434", "0.539854", "0.53982186", "0.53982186", "0.53965855", "0.5386904", "0.53817934", "0.538095", "0.53803575", "0.5379759", "0.5369861", "0.5367301", "0.5366084", "0.5364323", "0.5358495", "0.53528076", "0.5348779", "0.5342528", "0.53388", "0.5335846", "0.5332645", "0.5313132", "0.5312532", "0.52898526", "0.52893806", "0.52868664", "0.52846116", "0.5284115", "0.52839655", "0.5283513", "0.5282625" ]
0.0
-1
Compares two response objects based on equality.
def compare(obj_a, obj_b): return tuple_to_string(obj_a) == tuple_to_string(obj_b)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __eq__(self, other):\n if not isinstance(other, Response):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, InlineResponse200):\n return False\n\n return self.to_dict() == other.to_dict()", "def __eq__(self, other):\n if not isinstance(other, ClientDetailResponseResponse):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, InlineResponse20023):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, BalanceResponse):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, InlineResponse20020Result):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, GetSesameResponse):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, HtmlAnalysisResponse):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, ChannelReturnResponse):\n return False\n\n return self.to_dict() == other.to_dict()", "def __eq__(self, other):\n if not isinstance(other, TravelRecordResponse):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, VehicleStatsListResponseData):\n return False\n\n return self.to_dict() == other.to_dict()", "def __eq__(self, other):\n if not isinstance(other, PrefetchResponse):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, StackViewResponse):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, VirtualMachinesResponse):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, EscrowTransactionResponse):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, InlineResponse2001):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, SearchArticlesOldGet200ApplicationJsonResponse):\n return False\n\n return self.to_dict() == other.to_dict()", "def __eq__(self, other):\n if not isinstance(other, UserResponse):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, InlineResponse2018):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, QuickSearchResponse):\n return False\n\n return self.to_dict() == other.to_dict()", "def __eq__(self, other):\n if not isinstance(other, MigrateListingResponse):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, InlineResponseDefault1):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, HttpResponseData):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, DepositCompleteResponse):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, ResolvepostResponse):\n return False\n\n return self.__dict__ == other.__dict__", "def compare_response_to_model_instance(self, response, model_instance):\n parsed_response = json_decode(response)\n headers = parsed_response['headers']\n data = parsed_response['data']\n self.assertEquals(len(data), len(model_instance))\n for i in range(len(data)):\n datum = self.deserialize(headers, data[i])\n self.compare_model_instance(datum, model_instance[i])", "def __eq__(self, other):\n if not isinstance(other, AttachmentResponse):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, V1UserAppListResponse):\n return False\n\n return self.to_dict() == other.to_dict()", "def __eq__(self, other):\n if not isinstance(other, AsyncIDResponse):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, CreateProductResponse):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, InlineResponse200MessageFondo):\n return False\n\n return self.__dict__ == other.__dict__", "def __ne__(self, other):\n if not isinstance(other, InlineResponse200):\n return True\n\n return self.to_dict() != other.to_dict()", "def __eq__(self, other):\n if not isinstance(other, ImportFunctionResponse):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, MetadataGetMetadataPropertyResponse200):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, SiteResultResponseSites):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, GetMessagingCountersResponse):\n return False\n\n return self.__dict__ == other.__dict__", "def compare_original_response_with_copy(context):\n original = context.response.json()\n copy = context.response_copy\n\n def compare_top_level_values():\n # get the list of fields that are JSON values not arrays\n keys = [val for val in original.iterkeys() if not isinstance(original[val], (dict, list, set))]\n assert keys, ('Expected at least 1 field key to compare but got none!')\n logging.debug('List of top tier field keys to compare: %s', keys)\n for key in keys:\n assert original[key] == copy[key]\n logging.debug(\n 'All top level fields in the response copy have the same values as'\n ' in the original response. Here is a list of compared fields:\\n%s',\n ', '.join(keys))\n\n def compare_items():\n original_items = original['items']\n copy_items = copy['items']\n skip = ['title', 'last_activity_date']\n for original_item in original_items:\n # get all item field keys\n keys = [val for val in original_item.iterkeys()]\n # remove the keys that need to be skipped\n keys = [x for x in keys if x not in skip]\n for copy_item in copy_items:\n # find matching items\n if original_item['question_id'] == copy_item['question_id']:\n # compare original an copied items\n for key in keys:\n assert original_item[key] == copy_item[key]\n logging.debug(\n 'All fields in the copied item ID: %s'\n ' have the same values as in in the original items',\n copy_item['question_id'])\n\n compare_top_level_values()\n compare_items()", "def __eq__(self, other):\n if not isinstance(other, InlineResponse20021Links):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, TokenizeResponseSchema):\n return False\n\n return self.to_dict() == other.to_dict()", "def __eq__(self, other):\n if not isinstance(other, AdditionalInfoResponseTimestamps):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, InlineResponse2006Billing):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, DestinyCharacterResponse):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, PathResponseResultResponseNetworkElementsInfo):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, CreateDeploymentResponse):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, AdResult):\n return False\n\n return self.to_dict() == other.to_dict()", "def __eq__(self, other):\n if not isinstance(other, CreateMyActionTemplateResponse):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, AddTunnelResponse):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, OrganizationPolicyAssignmentResponse):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, ExportResponseMetadata):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n\t\ttry:\n\t\t\t# Attributes and properties to compare\n\t\t\tattrs = (\"url\", \"status_code\", \"reason\", \"headers\", \"content\")\n\t\t\tfor attr in attrs:\n\t\t\t\tif getattr(self, attr) != getattr(other, attr):\n\t\t\t\t\treturn False\n\t\texcept AttributeError:\n\t\t\treturn NotImplemented\n\t\telse:\n\t\t\treturn True", "def __eq__(self, other):\n if not isinstance(other, UpdateApiGroupV2Response):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, ShowInstanceDetailResponse):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, TranslationsGetTranslatorFormatsResponse200Items):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, AggregatedReturnsRequest):\n return False\n\n return self.to_dict() == other.to_dict()", "def __eq__(self, other):\n if not isinstance(other, ShowDataJobResponse):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\r\n if not isinstance(other, MessageConsumeResp):\r\n return False\r\n\r\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, CreateCcRuleResponse):\n return False\n\n return self.__dict__ == other.__dict__", "def __ne__(self, other):\n if not isinstance(other, ChannelReturnResponse):\n return True\n\n return self.to_dict() != other.to_dict()", "def __eq__(self, other) -> bool:\n if json.dumps(self.data,sort_keys=True) == json.dumps(other.data,sort_keys=True):\n return True\n else:\n return False", "def __eq__(self, other):\n if not isinstance(other, DriversSummaryResponseSummaries):\n return False\n\n return self.__dict__ == other.__dict__", "def __ne__(self, other):\n if not isinstance(other, VehicleStatsListResponseData):\n return True\n\n return self.to_dict() != other.to_dict()", "def __eq__(self, other):\n if not isinstance(other, PcrTestRecordResult):\n return False\n\n return self.__dict__ == other.__dict__", "def assert_equal_resource(res1, res2):\n assert isinstance(res1, FakedBaseResource)\n assert isinstance(res2, FakedBaseResource)\n assert res1.uri == res2.uri\n assert res1.oid == res2.oid\n names1 = set(res1.properties.keys())\n names2 = set(res2.properties.keys())\n if names1 != names2:\n raise AssertionError(\n \"Resources do not have the same set of properties:\\n\"\n \"- res1 names: {}\\n\"\n \"- res2 names: {}\\n\".\n format(names1, names2))\n for name in res1.properties:\n value1 = res1.properties[name]\n value2 = res2.properties[name]\n if value1 != value2:\n raise AssertionError(\n \"Resources do not have the same value for property {}:\\n\"\n \"- res1 value: {}\\n\"\n \"- res2 value: {}\\n\".\n format(name, value1, value2))", "def __eq__(self, other):\n if not isinstance(other, ListJobInfoDetailResponse):\n return False\n\n return self.__dict__ == other.__dict__", "def __ne__(self, other):\n if not isinstance(other, SearchArticlesOldGet200ApplicationJsonResponse):\n return True\n\n return self.to_dict() != other.to_dict()", "def __eq__(self, other):\n if not isinstance(other, HTTPConnectionData):\n return False\n\n return self.to_dict() == other.to_dict()", "def __eq__(self, other):\n if not isinstance(other, AddUserResponse):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, CreatePersistentStorageResponse):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, SurveyResponseItem):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, ListGraphsRespGraphs):\n return False\n\n return self.__dict__ == other.__dict__", "def __ne__(self, other):\n if not isinstance(other, QuickSearchResponse):\n return True\n\n return self.to_dict() != other.to_dict()", "def __eq__(self, other):\n if not isinstance(other, ResultStatus):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, MdHistoryRequestCO):\n return False\n\n return self.to_dict() == other.to_dict()", "def __eq__(self, other):\n if not isinstance(other, CreateAssetCategoryResponse):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, GlanceCreateImageMetadataResponse):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, UpdateVehicleRequest):\n return False\n\n return self.to_dict() == other.to_dict()", "def __eq__(self, other):\n if not isinstance(other, EarningResult):\n return False\n\n return self.to_dict() == other.to_dict()", "def __eq__(self, other):\n if not isinstance(other, CoordRequestResponseLocationProperties):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, ComplianceSummaryRuleResultRequest):\n return False\n\n return self.to_dict() == other.to_dict()", "def _matcher(r1: vcr.request.Request, r2: vcr.request.Request) -> None:\n assert r1.uri == r2.uri and r1.body == r2.body and r1.headers == r2.headers", "def __eq__(self, other):\n if not isinstance(other, ListBareMetalServersRequest):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, GatewayJson):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, MainlandTravelPermitResult):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, CrfItemRpc):\n return False\n\n return self.to_dict() == other.to_dict()", "def __eq__(self, other):\n if not isinstance(other, ServerRateLimiting):\n return False\n\n return self.to_dict() == other.to_dict()", "def __eq__(self, other):\n if not isinstance(other, StudySiteRpc):\n return False\n\n return self.to_dict() == other.to_dict()", "def __eq__(self, other):\n if not isinstance(other, PaginatedSearchResultsDto):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, AnalyticsLicenseUpdateRequest):\n return False\n\n return self.__dict__ == other.__dict__", "def compare_json(json1, json2):\r\n return JsonType.eq(json1, json2)", "def assert_response_correct(self, response, expected_status, expected_content):\n assert response.status_code == expected_status\n parsed_content = json.loads(response.content.decode('utf-8'))\n assert parsed_content == expected_content", "def __eq__(self, other):\n if not isinstance(other, ShowAutoCreatePolicyResponse):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, BlogAuthorCloneRequestVNext):\n return False\n\n return self.to_dict() == other.to_dict()", "def __eq__(self, other):\n if not isinstance(other, GetUniverseAncestries200Ok):\n return False\n\n return self.__dict__ == other.__dict__", "def __ne__(self, other):\n if not isinstance(other, V1UserAppListResponse):\n return True\n\n return self.to_dict() != other.to_dict()", "def __eq__(self, other):\n if not isinstance(other, DriverLicenseResult):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, APIObjectBusinessInformation):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, GetClientServicesRequest):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, ShowProjectWorkHoursResponseBodyWorkHours):\n return False\n\n return self.__dict__ == other.__dict__", "def testEquality(self) -> None:\n r = data_types.Result('test_1', ('win', 'x86'), (1, 10), 'id')\n other = data_types.Result('test_1', ('win', 'x86'), (1, 10), 'id')\n self.assertEqual(r, other)\n\n other = data_types.Result('test_2', ('win', 'x86'), (1, 10), 'id')\n self.assertNotEqual(r, other)\n\n other = data_types.Result('test_1', ('win', 'arm64'), (1, 10), 'id')\n self.assertNotEqual(r, other)\n\n other = data_types.Result('test_1', ('win', 'x86'), (2, 11), 'id')\n self.assertNotEqual(r, other)\n\n other = data_types.Result('test_1', ('win', 'x86'), (1, 10), 'id_2')\n self.assertNotEqual(r, other)\n\n other = None\n self.assertNotEqual(r, other)", "def __eq__(self, other):\n if not isinstance(other, PayorV1):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, CertificateIssuerConfigResponse):\n return False\n\n return self.__dict__ == other.__dict__" ]
[ "0.7743906", "0.7587754", "0.7484174", "0.7346719", "0.73336023", "0.73172593", "0.7316397", "0.7289357", "0.72549665", "0.724851", "0.72303915", "0.722987", "0.7205563", "0.7172449", "0.71693856", "0.7155242", "0.7135397", "0.7128382", "0.71163017", "0.71147203", "0.71114576", "0.7098978", "0.70904654", "0.7088183", "0.7072157", "0.7021535", "0.6996348", "0.6967296", "0.69227546", "0.691377", "0.690756", "0.68788934", "0.6868401", "0.68497956", "0.6849634", "0.68262845", "0.68099254", "0.68038434", "0.6801782", "0.67957735", "0.67915696", "0.678959", "0.67788565", "0.67698985", "0.67570233", "0.6740477", "0.6737533", "0.67284954", "0.6725435", "0.6699867", "0.6675002", "0.66635025", "0.6657241", "0.6645903", "0.6640059", "0.6639594", "0.6631747", "0.66152537", "0.6613383", "0.65815467", "0.6576572", "0.65487677", "0.65402", "0.653482", "0.6518838", "0.6516701", "0.6509902", "0.6486563", "0.6485789", "0.6483346", "0.64654565", "0.6463953", "0.64476955", "0.6426392", "0.6420709", "0.64076436", "0.6403213", "0.63903654", "0.6384541", "0.6368121", "0.6353892", "0.63359666", "0.63327426", "0.6317497", "0.6311536", "0.63036346", "0.6300251", "0.6297724", "0.6296016", "0.6293721", "0.62820756", "0.6274771", "0.6264581", "0.625869", "0.62537724", "0.625109", "0.6247272", "0.62311167", "0.6229572", "0.6225593", "0.62216896" ]
0.0
-1
Compares two response objects based on their NVCness. Only returns true if both responses are in agreement with either responding NVC or not NVC.
def compare(obj_a, obj_b): return (tuple_to_string(obj_a) == 'NVC') == (tuple_to_string(obj_b) == 'NVC')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def verify_vn_in_api_server(self):\n self.api_verification_flag = True\n self.api_s_vn_obj = self.api_s_inspect.get_cs_vn(\n domain=self.domain_name, project=self.project_name,\n vn=self.vn_name, refresh=True)\n if not self.api_s_vn_obj:\n self.logger.debug(\"VN %s is not found in API-Server\" %\n (self.vn_name))\n self.api_verification_flag = self.api_verification_flag and False\n return False\n if self.api_s_vn_obj['virtual-network']['uuid'] != self.uuid:\n self.logger.warn(\n \"VN Object ID %s in API-Server is not what was created\" % (self.uuid))\n self.api_verification_flag = self.api_verification_flag and False\n return False\n\n subnets = list()\n for ipam in self.api_s_vn_obj['virtual-network']['network_ipam_refs']:\n subnets.extend(ipam['attr']['ipam_subnets'])\n for vn_subnet in self.vn_subnets:\n subnet_found = False\n vn_subnet_cidr = str(IPNetwork(vn_subnet['cidr']).ip)\n for subnet in subnets:\n if subnet['subnet']['ip_prefix'] == vn_subnet_cidr:\n subnet_found = True\n if not subnet_found:\n self.logger.warn(\n \"VN Subnet IP %s not found in API-Server for VN %s\" %\n (vn_subnet_cidr, self.vn_name))\n self.api_verification_flag = self.api_verification_flag and False\n return False\n # end for\n self.api_s_route_targets = self.api_s_inspect.get_cs_route_targets(\n vn_id=self.uuid)\n if not self.api_s_route_targets:\n errmsg = \"Route targets not yet found in API-Server for VN %s\" % self.vn_name\n self.logger.error(errmsg)\n self.api_verification_flag = self.api_verification_flag and False\n return False\n self.rt_names = self.api_s_inspect.get_cs_rt_names(\n self.api_s_route_targets)\n\n if not self.rt_names:\n self.logger.debug(\n 'RT names not yet present for VN %s', self.vn_name)\n return False\n\n if self.rt_number:\n if not any(item.endswith(self.rt_number) for item in self.rt_names):\n self.logger.debug('RT %s is not found in API Server RT list %s ' %(\n self.rt_number, self.rt_names))\n self.api_verification_flag = self.api_verification_flag and False\n return False\n\n self.api_s_routing_instance = self.api_s_inspect.get_cs_routing_instances(\n vn_id=self.uuid)\n if not self.api_s_routing_instance:\n msg = \"Routing Instances not found in API-Server for VN %s\" % self.vn_name\n self.logger.warn(msg)\n self.api_verification_flag = self.api_verification_flag and False\n return False\n self.ri_ref = self.api_s_routing_instance['routing_instances'][0]['routing-instance']\n if not self.verify_network_id():\n return False\n self.api_verification_flag = self.api_verification_flag and True\n self.logger.info(\"Verifications in API Server for VN %s passed\" %\n (self.vn_name))\n return True", "def __eq__(self, other):\n if not isinstance(other, VirtualMachinesResponse):\n return False\n\n return self.__dict__ == other.__dict__", "def consistent(self, c, combination):\t\t\n\t\treturn (self.response(c, combination) \n\t\t\t== self.response(combination, self.code))", "def compare_cpes(lhs: ImageCpe, rhs: ImageCpe):\n vendor_cmp = compare_fields(lhs.vendor, rhs.vendor)\n if vendor_cmp != 0:\n return vendor_cmp\n\n name_cmp = compare_fields(lhs.name, rhs.name)\n if name_cmp != 0:\n return name_cmp\n\n version_cmp = compare_fields(lhs.version, rhs.version)\n if version_cmp != 0:\n return version_cmp\n\n update_cmp = compare_fields(lhs.update, rhs.update)\n if update_cmp != 0:\n return update_cmp\n\n meta_cmp = compare_fields(lhs.meta, rhs.meta)\n if meta_cmp != 0:\n return meta_cmp\n\n # all avenues of comparison have been depleted, the two cpes are same for all practical purposes\n return 0", "def test_equality(self):\n self.assertEqual(self._version1, self._version1)\n self.assertNotEqual(self._version2, self._version1)\n self.assertEqual(self._version1, PrcsVersion(self._version1))", "def __eq__(self, other):\n if not isinstance(other, InlineResponse20020Result):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, ComputingResourceFlavorsRsp):\n return False\n\n return self.__dict__ == other.__dict__", "def _cryptovariables_equal(x, y):\n\n return (\n _hmac_sha256(CRYPTOVARIABLE_EQUALITY_COMPARISON_NONCE, x) ==\n _hmac_sha256(CRYPTOVARIABLE_EQUALITY_COMPARISON_NONCE, y))", "def __eq__(self, other):\n if not isinstance(other, ClientDetailResponseResponse):\n return False\n\n return self.__dict__ == other.__dict__", "def is_response_correct(self, response):\n for answer in self.my_osid_object.get_answers():\n if self._is_match(response, answer):\n return True\n return False", "def __eq__(self, other):\n if not isinstance(other, InlineResponse20023):\n return False\n\n return self.__dict__ == other.__dict__", "def __ne__(self, other):\n if not isinstance(other, ChannelReturnResponse):\n return True\n\n return self.to_dict() != other.to_dict()", "def __eq__(self, other):\n if not isinstance(other, IQueryUserPartnerCouponsResultV2):\n return False\n\n return self.__dict__ == other.__dict__", "def is_correctness_available_for_response(self, response):\n return True", "def __eq__(self, other):\n if not isinstance(other, ChannelReturnResponse):\n return False\n\n return self.to_dict() == other.to_dict()", "def __eq__(self, other):\n if not isinstance(other, BalanceResponse):\n return False\n\n return self.__dict__ == other.__dict__", "def is_equivalence(self) -> bool:", "def __eq__(self, other: Any) -> bool:\n if not isinstance(other, DetectionResult):\n return False\n\n return self.to_pb2().__eq__(other.to_pb2())", "def __eq__(self, other):\n if not isinstance(other, InlineResponse2018):\n return False\n\n return self.__dict__ == other.__dict__", "def vote_result(self) -> bool:\n token_score = self.create_interface_score(self._token_score.get(), TokenInterface)\n yes = 0\n no = 0\n for address in self._voted:\n vote = self._vote[str(address)]\n if vote == 'yes':\n yes += token_score.balanceOf(address)\n else:\n no += token_score.balanceOf(address)\n self._yes_votes.set(yes)\n self._no_votes.set(no)\n if self._yes_votes.get() > (token_score.totalSupply() - token_score.balanceOf(self._rewards_score.get())) // 2:\n return True\n else:\n return False", "def __ne__(self, other):\n if not isinstance(other, InlineResponse200):\n return True\n\n return self.to_dict() != other.to_dict()", "def __eq__(self, other):\n if not isinstance(other, GetSesameResponse):\n return False\n\n return self.__dict__ == other.__dict__", "def compareVerOnly(v1, v2):\n return compareEVR(('', v1, ''), ('', v2, ''))", "def __eq__(self, other):\n if not isinstance(other, InlineResponse200):\n return False\n\n return self.to_dict() == other.to_dict()", "def compare_results(self, result1, result2):\n return self.compare_measurements(measurement1=result1, measurement2=result2)", "def __eq__(self, other):\n if not isinstance(other, InlineResponse2001):\n return False\n\n return self.__dict__ == other.__dict__", "def verif_response(response):\n if response.status_code >= 200 and response.status_code <= 299:\n logging.debug(\"response server OK::{}\".format(response.text))\n return True\n\n logging.error(\"response server KO::{}\".format(response.text))\n return False", "def __eq__(self, other):\n if not isinstance(other, DepositCompleteResponse):\n return False\n\n return self.__dict__ == other.__dict__", "def compare_response_to_model_instance(self, response, model_instance):\n parsed_response = json_decode(response)\n headers = parsed_response['headers']\n data = parsed_response['data']\n self.assertEquals(len(data), len(model_instance))\n for i in range(len(data)):\n datum = self.deserialize(headers, data[i])\n self.compare_model_instance(datum, model_instance[i])", "def test_equal_method(self):\n sc1 = ServComs(self.serverIp, \"1\")\n sc2 = ServComs(self.serverIp, \"1\")\n sc3 = ServComs(self.serverIp, \"2\")\n\n self.assertEqual(sc1, sc2) # Same ip and id\n self.assertNotEqual(sc1, sc3) # different ip", "def test_c(self):\n v1 = versions.Version(version='1.2.3', name='foo')\n v2 = versions.Version(version='2.2.3', name='bar')\n\n self.assertFalse(v1 == v2)\n self.assertFalse(v2 == v1)", "def test_c(self):\n v1 = versions.Version(version='1.2.3', name='foo')\n v2 = versions.Version(version='1.2.3', name='bar')\n\n self.assertFalse(v1 != v2)\n self.assertFalse(v2 != v1)", "def compare_results(self):\n return self.guess_number == self.secret_number", "def test_1010(self, mn_client_v1_v2):\n d1_test.mock_api.get.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL)\n obj_1a_str = mn_client_v1_v2.get(\"test_pid_1\").content\n obj_2a_str = mn_client_v1_v2.get(\"test_pid_2\").content\n obj_1b_str = mn_client_v1_v2.get(\"test_pid_1\").content\n obj_2b_str = mn_client_v1_v2.get(\"test_pid_2\").content\n assert obj_1a_str == obj_1b_str\n assert obj_2a_str == obj_2b_str", "def compare(isvgAppliance1, isvgAppliance2):\n ret_obj1 = get_all(isvgAppliance1)\n ret_obj2 = get_all(isvgAppliance2)\n\n for obj in ret_obj1['data']['snmpObjects']:\n del obj['uuid']\n for obj in ret_obj2['data']['snmpObjects']:\n del obj['uuid']\n\n return ibmsecurity.utilities.tools.json_compare(ret_obj1, ret_obj2, deleted_keys=['uuid'])", "def __ne__(self, other):\n if not isinstance(other, VehicleStatsListResponseData):\n return True\n\n return self.to_dict() != other.to_dict()", "def test_compare_different_expectations(self):\n\n pd_single = norm(0, 1)\n pd = []\n for i in range(0, 3):\n pd.append(pd_single)\n meas = [-1, 0, 1]\n meanCRIGN1, singleCRIGN1 = crign.crign(pd, meas)\n\n pd2 = []\n for i in range(0, 3):\n pd2.append(norm(i, 1))\n meas2 = [-1, 1, 3]\n\n meanCRIGN2, singleCRIGN2 = crign.crign(pd2, meas2)\n\n is_good = np.isclose(singleCRIGN1, singleCRIGN2).all()\n assert_true(is_good, msg=\"Relation of individual CRIGN values should return roughly the same value.\")", "def __eq__(self, other):\n if not isinstance(other, VehicleStatsListResponseData):\n return False\n\n return self.to_dict() == other.to_dict()", "def are_equal(self, sp1, sp2):\n return True", "def __eq__(self, other):\n if not isinstance(other, DriverLicenseResult):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, PrefetchResponse):\n return False\n\n return self.__dict__ == other.__dict__", "def is_converged(self,a,b):\n return np.array_equal(a,b)", "def is_match(self, other_cpe):\n if not isinstance(other_cpe, CPE):\n return False\n\n if self.part == other_cpe.part and self.vendor == other_cpe.vendor:\n\n if other_cpe.product not in ['*', self.product]:\n return False\n if other_cpe.version not in ['*', self.version]:\n return False\n if other_cpe.update not in ['*', self.update]:\n return False\n if other_cpe.edition not in ['*', self.edition]:\n return False\n if other_cpe.language not in ['*', self.language]:\n return False\n if other_cpe.sw_edition not in ['*', self.sw_edition]:\n return False\n if other_cpe.target_sw not in ['*', self.target_sw]:\n return False\n if other_cpe.target_hw not in ['*', self.target_hw]:\n return False\n if other_cpe.other not in ['*', self.other]:\n return False\n\n return True\n else:\n return False", "def __eq__(self, other):\n if not isinstance(other, InlineResponse200MessageFondo):\n return False\n\n return self.__dict__ == other.__dict__", "def compare(vcfX, gzvcfY, innerProd):\n regions = countOverlapsVCFGZVCF(vcf = vcfX, gzvcf = gzvcfY)\n # countOverlaps(vcfX, vcfY)\n return innerProd(regions)\n # TODO: figure out a distance metric\n # return regions[\"both\"]/(regions[\"onlyX\"] + regions[\"onlyY\"] + regions[\"both\"])", "def __eq__(self, other):\n if not isinstance(other, GetUniverseAncestries200Ok):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, InlineResponseDefault1):\n return False\n\n return self.__dict__ == other.__dict__", "def are_equal(self, sp1, sp2):\n return", "def __eq__(self, other):\n if not isinstance(other, V0CommandRequest):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, DestinyCharacterResponse):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, CreateProductResponse):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, InlineResponse2006Billing):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, ResolvepostResponse):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, IpamsvcDHCPUtilization):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, CreateCcRuleResponse):\n return False\n\n return self.__dict__ == other.__dict__", "def test_equal(self):\n\n candidate1 = pyrankvote.Candidate(\"Per\")\n candidate2 = pyrankvote.Candidate(\"Per\")\n candidate3 = pyrankvote.Candidate(\"Aase\")\n\n self.assertEqual(candidate1, candidate2, \"These candidates should be equal/the same candidate.\")\n self.assertNotEqual(candidate1, candidate3, \"These candidates should NOT be equal/the same candidate.\")", "def testReponse(question, reponse):\r\n if reponse == question[5]:\r\n return True\r\n else:\r\n return False", "def nonceVerification(nonce, decryptedNonce):\n #Enter code to compare the nonce and the decryptedNonce. This method\n # should return a string of \"200 OK\" if the parameters match otherwise\n # it should return \"400 Error Detected\"\n if nonce == decryptedNonce:\n return \"200 OK\"\n else:\n return \"400 Error Detected\"", "def _compare_strict(self, other):\n\n if self.address_type != other.address_type:\n return MatchResult(\"NO\", 0.0)\n\n if len(self.tokens) != len(other.tokens):\n return MatchResult(\"NO\", 0.0)\n\n for t1, t2 in zip(self.tokens, other.tokens):\n if t1 != t2:\n return MatchResult(\"NO\", 0.0)\n\n if self.has_number and self.address_number == other.address_number:\n return MatchResult(\"A1\", 1.0)\n else:\n return MatchResult(\"A3\", 1.0)", "def test_equal_on_equal(self):\n a = Certificate(\n certificate_type=self.certificate_type_b,\n certificate_value=self.certificate_value_b)\n b = Certificate(\n certificate_type=self.certificate_type_b,\n certificate_value=self.certificate_value_b)\n\n self.assertTrue(a == b)\n self.assertTrue(b == a)", "def equivalent(kls, first, second):\n if first.empty() and second.empty():\n return True\n elif first.vertices.shape[0] != second.vertices.shape[0]:\n return False\n elif first.edges.shape[0] != second.edges.shape[0]:\n return False\n\n EPSILON = 1e-7\n\n vertex1, ct1 = np.unique(first.vertices, axis=0, return_counts=True)\n vertex2, ct2 = np.unique(second.vertices, axis=0, return_counts=True)\n \n vertex_match = np.all(np.abs(vertex1 - vertex2) < EPSILON)\n ct_match = np.all(ct1 == ct2)\n if not (vertex_match and ct_match):\n return False\n\n g1 = nx.Graph()\n g1.add_edges_from(first.edges)\n g2 = nx.Graph()\n g2.add_edges_from(second.edges)\n edges_match = nx.is_isomorphic(g1, g2)\n del g1 \n del g2\n\n if not edges_match:\n return False\n\n second_verts = {}\n for i, vert in enumerate(second.vertices):\n second_verts[tuple(vert)] = i\n \n attrs = [ attr['id'] for attr in first.extra_attributes ]\n for attr in attrs:\n buf1 = getattr(first, attr)\n buf2 = getattr(second, attr)\n if len(buf1) != len(buf2):\n return False\n\n for i in range(len(buf1)):\n i2 = second_verts[tuple(first.vertices[i])]\n if buf1[i] != buf2[i2]:\n return False\n\n return True", "def __eq__(self, other):\n if not isinstance(other, Response):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, EscrowTransactionResponse):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, StackViewResponse):\n return False\n\n return self.__dict__ == other.__dict__", "def _coincident(a,b):\n return np.array_equal(a, b) or np.array_equal(np.flipud(a),b)", "def are_equal(self, sp1, sp2):\n for s1 in sp1.keys():\n spin1 = getattr(s1, \"spin\", 0)\n oxi1 = getattr(s1, \"oxi_state\", 0)\n for s2 in sp2.keys():\n spin2 = getattr(s2, \"spin\", 0)\n oxi2 = getattr(s2, \"oxi_state\", 0)\n if (s1.symbol == s2.symbol and oxi1 == oxi2 and\n spin2 == -spin1):\n break\n else:\n return False\n return True", "def test_c(self):\n v1 = versions.Version(version='1.2', name='foo')\n v2 = versions.Version(version='1.2.1', name='bar')\n\n self.assertTrue(v1 <= v2)\n self.assertFalse(v2 <= v1)", "def __eq__(self, other):\n if not isinstance(other, VnicFcAdapterPolicyAllOf):\n return False\n\n return self.to_dict() == other.to_dict()", "def __eq__(self, other):\n if not isinstance(other, CountPreoccupyIpNumRequest):\n return False\n\n return self.__dict__ == other.__dict__", "def equality_testing(self, piDD):\r\n p = False\r\n if len(self.return_values())== len(piDD.return_values()):\r\n if len(self.return_keys()) == len(piDD.return_keys()):\r\n p = True\r\n for i in self.return_keys():\r\n if i in piDD.return_keys():\r\n p = True\r\n if p == True:\r\n for i in self.return_values():\r\n if i in piDD.return_values():\r\n return 1\r\n return 0", "def isResp(obxDict):\n readingCode = getReadingCode(obxDict)\n return readingCode == '76270-8'", "def test_equal(scraper):\n\n assert scraper.is_compatible_with(punters_client.__version__) is True", "def __eq__(self, other):\n if not isinstance(other, GetMessagingCountersResponse):\n return False\n\n return self.__dict__ == other.__dict__", "def compare(self, other_code_object):\n response = []\n if self.name != other_code_object.name:\n response.append('DIFF: Code object names: %s' % self.name)\n response.append('and %s' % other_code_object.name)\n if self.object_type != other_code_object.object_type:\n response.append('DIFF: Code object types: %s' % self.object_type)\n response.append('and %s' % other_code_object.object_type)\n return response", "def __eq__(self, other):\n if not isinstance(other, OrganizationPolicyAssignmentResponse):\n return False\n\n return self.__dict__ == other.__dict__", "def nonceVerification(nonce, decryptedNonce):\n if nonce == decryptedNonce:\n return \"200 OK\"\n else:\n return \"400 Error Detected\"", "def compare(self):\n same = self.eta()[0] and self.omega()[0] and self.data()[0]\n return same", "def test_not_equal_on_equal(self):\n a = Certificate(\n certificate_type=self.certificate_type_b,\n certificate_value=self.certificate_value_b)\n b = Certificate(\n certificate_type=self.certificate_type_b,\n certificate_value=self.certificate_value_b)\n\n self.assertFalse(a != b)\n self.assertFalse(b != a)", "def eq_version(v0, v1):\n return cmp_version(v0, v1) == 0", "def __ne__(self, other):\n if not isinstance(other, SearchArticlesOldGet200ApplicationJsonResponse):\n return True\n\n return self.to_dict() != other.to_dict()", "def compare_version_objects(version1, version2):\n if version1.epoch < version2.epoch:\n return -1\n if version1.epoch > version2.epoch:\n return 1\n result = compare_strings(version1.upstream, version2.upstream)\n if result != 0:\n return result\n if version1.revision or version2.revision:\n return compare_strings(version1.revision, version2.revision)\n return 0", "def test_when_oppenent_all_Cs(self):\n self.responses_test([C, C, C, C], [C, C, C, C], [C, C, C],\n random_seed=5)", "def test_when_oppenent_all_Cs(self):\n self.responses_test([C, C, C, C], [C, C, C, C], [C, C, C],\n random_seed=5)", "def verify_response(self, system_name, expected_api_response,\n expected_response_type, comparison_mode,\n request_id=None, generate_output_diff_file=\"Yes\"):\n arguments = {'system_name': system_name,\n 'expected_api_response': expected_api_response,\n 'expected_response_type': expected_response_type,\n 'comparison_mode': comparison_mode,\n 'request_id': request_id,\n 'generate_output_diff_file': generate_output_diff_file}\n wdesc = \"Verify API response with the expected API response\"\n pNote(wdesc)\n output_file = self.logsdir+\"/difference_output.log\"\n output_file = Utils.file_Utils.addTimeDate(output_file)\n generate_output_diff_file = Utils.rest_Utils.\\\n resolve_value_of_verify(generate_output_diff_file)\n\n try:\n arguments[\"expected_api_response\"] = Utils.rest_Utils.\\\n check_ext_get_abspath(arguments[\"expected_api_response\"],\n self.tc_path)\n\n credentials = Utils.data_Utils.\\\n get_user_specified_tag_values_in_tc(self.datafile, **arguments)\n\n credentials[\"expected_api_response\"] = Utils.rest_Utils.\\\n check_ext_get_abspath(credentials[\"expected_api_response\"],\n os.path.dirname(self.datafile))\n\n if request_id:\n response = Utils.data_Utils.get_object_from_datarepository(\n \"{0}_{1}_api_response_object\".format(system_name,\n credentials['request_id']))\n else:\n response = Utils.data_Utils.get_object_from_datarepository(\n \"{0}_api_response_object\".format(system_name))\n except Exception as exception:\n pNote(exception, \"error\")\n return False\n if any([x in credentials[\"comparison_mode\"] for x in [\"xpath=\", \"jsonpath=\", \"regex=\"]]) \\\n or credentials[\"comparison_mode\"] == \"\":\n status = self.rest_object.cmp_content_response(self.datafile, system_name, response,\n credentials['expected_api_response'],\n credentials['expected_response_type'],\n credentials['comparison_mode'])\n else:\n status = self.rest_object.cmp_response(response,\n credentials['expected_api_response'],\n credentials['expected_response_type'],\n output_file,\n credentials['generate_output_diff_file'])\n return status", "def compare_car_ads(self):\n for car_id, car in self.new_cars.items():\n if self.car_ad_is_new(car_id):\n continue\n self._diff(car_id, car, \"price\")\n self._diff(car_id, car, \"comments\")", "def test_c(self):\n v1 = versions.Version(version='1.2', name='foo')\n v2 = versions.Version(version='1.2.1', name='bar')\n\n self.assertFalse(v1 >= v2)\n self.assertTrue(v2 >= v1)", "def check_comparison_response(self, response, city_names):\n body = json_decode(response.body)\n\n # General checks for the correct information\n self.assertIsNotNone(body.get('city_data'))\n data = body.get('city_data')\n # Ensure the results contain each city name\n self.assertEqual(len(data), len(city_names), \"Incorrect number of cities returned\")\n self.assertTrue(set([x.lower() for x in city_names]) <= {x.get('city_name').lower() for x in data},\n \"All cities not included in the results\")\n for city_result in data:\n self.assertIsInstance(city_result, dict, \"Incorrect type for the city_data entries\")\n self.assertTrue({'city_name', 'city_rank', 'city_score'} <= set(city_result.keys()),\n \"Missing entries in a city_data entry\")\n self.assertIsInstance(city_result.get('city_rank'), int, \"Incorrect type of city_rank\")\n self.assertIsInstance(city_result.get('city_score'), numbers.Number, \"Incorrect type of city_score\")\n self.assertIsInstance(city_result.get('city_name'), str, \"Incorrect type of city_name\")\n\n # Get the ranks and scores for the cities and ensure they are in the right order (low->high for ranks and\n # high->low for scores).\n results = {x.get('city_rank'): x.get('city_score') for x in data}\n ranks = sorted(results.keys())\n scores = sorted(results.values(), reverse=True)\n for entry, score in zip(ranks, scores):\n self.assertEqual(results.get(entry), score, \"The city rankings are not in the correct order\")", "def __eq__(self, other):\n if not isinstance(other, GetClientServicesRequest):\n return False\n\n return self.__dict__ == other.__dict__", "def checkSame(self, other):\n checkVector(self, other)\n futures = self.client.map(_call_checkSame, self.vecDask, other.vecDask, pure=False)\n results = self.client.gather(futures)\n return all(results)", "def _compare(self, x,y, pr=False):\n batched = self.ex.batched(x, y)\n looped = self.ex.looped(x, y)\n #print(f'batched value {batched}')\n #print(f'looped value {looped}')\n \n self.assertTrue(\n torch.equal(batched, looped)\n )", "def _compare_results(y_pred, y_pred_sampled, y_true):\n scores_og = _compute_scores(y_pred, y_true)\n scores_samp = _compute_scores(y_pred_sampled, y_true)\n\n # Aggreggate both results\n result_comp = pd.concat({\"Og\": scores_og, \"samp\": scores_samp}, axis = 1)\n\n return result_comp", "def __cmp__(self, other_code_object):\n # If our 'compare' method returns anything there are differences\n if self.compare(other_code_object):\n return True\n else:\n return False", "def __eq__(self, other):\n if isinstance(other, CNPJ):\n return self.cnpj == other.cnpj\n return False", "def _compare(self, actual, expected, num_vert):\n # get sparktk res in pandas form and iterate\n actual_pandas = actual.to_pandas()\n for (index, row) in actual_pandas.iterrows():\n # get the row id and deg cen result as floats\n # from the sparktk result\n row_id = float(row[\"id\"])\n row_res = float(row[\"degree_centrality\"])\n\n # now we get the expected result from our calculated edge_counts\n # if that vertex isn't in edge_counts it means we incurred no instances\n # of edges originating or ending there, therefore the edge_count is 0\n if int(row_id) in expected:\n expected_res_for_row = expected[int(row_id)]\n else:\n expected_res_for_row = 0\n\n # ensure that the expected res matches the actual res from sparktk\n self.assertAlmostEqual(row_res, expected_res_for_row / float(num_vert) - 1)", "def __eq__(self, other):\n if not isinstance(other, HtmlAnalysisResponse):\n return False\n\n return self.__dict__ == other.__dict__", "def test_consistent(self):\n\n np.testing.assert_array_almost_equal(\n self.ocv_av(self.t), self.ocp_p_av(self.t) - self.ocp_n_av(self.t)\n )\n np.testing.assert_array_almost_equal(\n self.eta_r_av(self.t), self.eta_r_p_av(self.t) - self.eta_r_n_av(self.t)\n )\n\n np.testing.assert_array_almost_equal(\n self.voltage(self.t),\n self.ocv_av(self.t)\n + self.eta_r_av(self.t)\n + self.eta_e_av(self.t)\n + self.delta_phi_s_av(self.t)\n + self.eta_sei_av(self.t),\n decimal=2,\n )", "def cz_compare(a, b):\n ma = __unicode_to_ascii(unicode(a))\n mb = __unicode_to_ascii(unicode(b))\n return cmp(ma, mb)", "def __eq__(self, other):\n if not isinstance(other, SearchArticlesOldGet200ApplicationJsonResponse):\n return False\n\n return self.to_dict() == other.to_dict()", "def compare():\n body: t.Any = request.json\n check_error({'input': {'old': {}, 'new': {}}}, body)\n response_new = rpc_search({'input': body['input']['new']})\n response_old = rpc_search({'input': body['input']['old']})\n\n modules_new = response_new['yang-catalog:modules']['module']\n modules_old = response_old['yang-catalog:modules']['module']\n\n if len(modules_new) == 0 or len(modules_old) == 0:\n abort(404, description='No hits found either in old or new input')\n\n new_mods = []\n for mod_new in modules_new:\n new_rev = mod_new['revision']\n new_name = mod_new['name']\n found = False\n new_rev_found = False\n for mod_old in modules_old:\n old_rev = mod_old['revision']\n old_name = mod_old['name']\n if new_name == old_name and new_rev == old_rev:\n found = True\n break\n if new_name == old_name and new_rev != old_rev:\n new_rev_found = True\n if not found:\n mod_new['reason-to-show'] = 'New module'\n new_mods.append(mod_new)\n if new_rev_found:\n mod_new['reason-to-show'] = 'Different revision'\n new_mods.append(mod_new)\n if len(new_mods) == 0:\n abort(404, description='No new modules or modules with different revisions found')\n output = {'output': new_mods}\n return output", "def __ne__(self, other):\n if not isinstance(other, QuickSearchResponse):\n return True\n\n return self.to_dict() != other.to_dict()" ]
[ "0.57760257", "0.57723004", "0.57442796", "0.5704212", "0.5581943", "0.55500567", "0.5549865", "0.55349195", "0.55025715", "0.54922974", "0.544647", "0.5440523", "0.54150635", "0.5412424", "0.5370746", "0.5357937", "0.535282", "0.5340017", "0.53387195", "0.5306857", "0.5294818", "0.5288538", "0.52878577", "0.52720547", "0.5267108", "0.5264616", "0.5248908", "0.52382374", "0.5237619", "0.5237269", "0.52242935", "0.52161914", "0.52143437", "0.5212706", "0.5208477", "0.5206771", "0.5203347", "0.5196956", "0.5190698", "0.5185222", "0.5180216", "0.5169523", "0.51464236", "0.5144974", "0.51351875", "0.5130693", "0.512981", "0.5120553", "0.512031", "0.51193327", "0.51184314", "0.51176125", "0.5112807", "0.5111974", "0.51108027", "0.51073843", "0.50965536", "0.50955784", "0.50899553", "0.50875056", "0.5085388", "0.50763905", "0.50755125", "0.5062438", "0.5061452", "0.5058363", "0.5056945", "0.5051618", "0.5050942", "0.50501615", "0.504542", "0.50324404", "0.503058", "0.5028683", "0.5019657", "0.5010639", "0.50066435", "0.5003635", "0.5002965", "0.49977282", "0.4994278", "0.49941605", "0.49941605", "0.49930766", "0.49917832", "0.4987972", "0.49834502", "0.49806115", "0.4980354", "0.4976422", "0.4972389", "0.49714077", "0.4968499", "0.49683362", "0.4965809", "0.49653465", "0.49623778", "0.49602318", "0.49578556", "0.49570563" ]
0.6934954
0
Parse script input arguments.
def parse_arguments(): arguments_parser = argparse.ArgumentParser() arguments_parser.add_argument('--libvirt', help='Using KVM-libvirt as VM provider', action="store_true") arguments_parser.add_argument('--virtualbox', help='Using KVM as VM provider', action="store_true") arguments_parser.add_argument('-i', '--img', required=True, action='store', dest='vm_image', help='Vagrant VM image') arguments_parser.add_argument('-m', '--memory', required=True, action='store', dest='vm_memory', help='VM box memory size') arguments_parser.add_argument('-c', '--cpu', required=True, action='store', dest='vm_cpu', help='VM box CPU count') arguments_parser.add_argument('-n', '--node', required=True, action='store', dest='vm_node', help='VM box nodes count') arguments_parser.add_argument('-p', '--netprefix', required=True, action='store', dest='vm_netprefix', help='VM box network prefix ex: 10.10.0') arguments_parser.add_argument('-e', '--exec', action='store', dest='exec_path', help='VM box init path') arguments = arguments_parser.parse_args() return arguments
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_arguments(args):", "def parse_arguments():\n # shift away script name\n scriptname=sys.argv[0]\n shift()\n ncl_cmd=list()\n quali_cmd=list()\n id_cmd=list() \n while(len(sys.argv)>0):\n carg = sys.argv[0]\n shift()\n if(carg == \"--nucleotide\"):\n ncl_cmd = mungeArgs(sys.argv)\n elif(carg == \"--quality\"):\n quali_cmd = mungeArgs(sys.argv)\n elif(carg == \"--id\" ):\n id_cmd = mungeArgs(sys.argv)\n elif(carg in [\"-h\", \"--help\"]):\n usage()\n else:\n usage(error=True)\n # Excess arguments which are not processed \n if(len(sys.argv) > 0):\n sys.stdout.write(\"Excess arguments!\\n\")\n sys.stdout.flush()\n usage(error=True)\n\n # external modules rely on non-empty argv array, \n # re-append the script name as first command line argument\n sys.argv.append(scriptname)\n return (id_cmd, ncl_cmd, quali_cmd)", "def parse_arguments(self):\n \n for arg in sys.argv[1:]:\n (key, sep, value) = arg.partition(\"=\")\n if sep != \"=\":\n raise ProcessorError(\"Illegal argument '%s'\" % arg)\n self.update_data(key, value)", "def parse_arguments(args=sys.argv[1:]):\n \n parser = argparse.ArgumentParser()\n \n parser.add_argument('-i', '--input',\n help=\"Path of input file to read. Default: {d}\".format(d=INPUT_FILE),\n default=INPUT_FILE)\n \n return parser.parse_args(args)", "def parseArguments(self):\n iterator = iter(sys.argv[1:]) # Skip file name\n for argument in iterator:\n if len(argument) < 2 or argument[:2] != '--':\n self.error('syntax error \"{}\"'.format(argument))\n else:\n def getValueOfArgument(): return next(iterator)\n self.parseArgument(argument[2:], getValueOfArgument)", "def parseInputArgs():\n parser = argparse.ArgumentParser(description=\"Unix cut analog\", usage='%(prog)s [arguments]')\n\n # pos arg\n parser.add_argument('filename', type=str, help='input file name')\n\n # req arg\n requiredNamed = parser.add_argument_group('required arguments')\n requiredNamed.add_argument('-f', '--fields', type=str, help='list of fields, separated by comma', required=True)\n # optional args\n parser.add_argument('-s', '--separator', type=str, default='\\t', help='column separator, default tab')\n\n args = parser.parse_args()\n return args", "def __parse_args(self):\n for argument in self.args:\n source_arg = re.match(\"^(--source=(([A-Z]|[a-z]|/|_|[0-9]|.)+))$\", argument)\n input_arg = re.match(\"^(--input=(([A-Z]|[a-z]|/|_|[0-9]|.)+))$\", argument)\n stats_arg = re.match(\"^(--stats=(([A-Z]|[a-z]|/|_|[0-9]|.)+))$\", argument)\n help_arg = re.match(\"^--help$\", argument)\n vars_arg = re.match(\"^--vars$\", argument)\n insts_arg = re.match(\"^--insts$\", argument)\n if source_arg:\n self.sourceFile = source_arg.group(2)\n self.passedArgs.append(\"source\")\n elif input_arg:\n self.inputFile = input_arg.group(2)\n self.passedArgs.append(\"input\")\n elif help_arg:\n print(\"napoveda\")\n sys.exit(0)\n elif stats_arg:\n self.statsFile = stats_arg.group(2)\n self.passedArgs.append(\"stats\")\n elif vars_arg:\n self.passedArgs.append(\"vars\")\n if self.first_stat_arg is None:\n self.first_stat_arg = \"vars\"\n elif insts_arg:\n self.passedArgs.append(\"insts\")\n if self.first_stat_arg is None:\n self.first_stat_arg = \"insts\"\n else:\n raise ArgError(\"Unknown argument or format of the argument! (\" + argument + \")\")", "def parse_args():\n parser = ArgumentParser(\n description=\"This is a script for auto apply ipex optimization.\"\n \"\\n################################# Basic usage ############################# \\n\"\n \"\\n 1. Apply ipex optimization with fp32 data type\\n\"\n \"\\n >>> python -m intel_extension_for_pytorch.cpu.auto_ipex python_script args \\n\"\n \"\\n 2. Apply ipex optimization with bf16 data type\\n\"\n \"\\n >>> python -m intel_extension_for_pytorch.cpu.auto_ipex --dtype bfloat16 python_script args \\n\",\n formatter_class=RawTextHelpFormatter,\n )\n\n add_auto_ipex_params(parser, auto_ipex_default_enabled=True)\n\n # positional\n parser.add_argument(\n \"program\",\n type=str,\n help=\"The full path to the proram/script to be launched. \"\n \"followed by all the arguments for the script\",\n )\n # rest from the training program\n parser.add_argument(\"program_args\", nargs=REMAINDER)\n return parser.parse_args()", "def parse():\n\n args = sys.argv\n if os.name == 'nt' and args and 'python' in os.path.basename(args[0]).lower():\n args = args[2:]\n else:\n args = args[1:]\n args = vars(parser.parse_args(args))\n \n # set the global verbosity level of the script\n script.set_verbosity(args['verbosity']) \n \n return args", "def parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-e\", \"--events\", type=str,\n help=\"path to events CSV-file\")\n parser.add_argument(\"-d\", \"--data\", type=str,\n help=\"path to data CSV-file\")\n parser.add_argument(\"-l\", \"--limit\", nargs='?', type=int, default=None,\n help=\"max records to be processed\")\n parser.add_argument(\"-t\", \"--timezone\", nargs='?', type=int, default=5,\n help=\"date and time shift\")\n parser.add_argument(\"-o\", \"--optimized\", action='store_true',\n help=\"if specified, then data CSV will be processed\"\n \" by small chunks to escape memory issues\")\n parser.add_argument(\"-v\", \"--verbose\", action='store_true')\n parser.add_argument(\"--output-folder\", nargs='?', type=str,\n default=\"linked\")\n return vars(parser.parse_args())", "def parse_arguments():\n parser = argparse.ArgumentParser(\n description=\"script for downloading and merging log files from S3 for particular time period\")\n parser.add_argument(\"-s\", \n \"--startdate\", \n help=\"start date in format YYYYMMDD\", \n required=True, \n type=valid_date)\n parser.add_argument(\"-e\", \"--enddate\", \n help=\"end date in format YYYYMMDD\", \n required=True, \n type=valid_date)\n parser.add_argument(\"-f\", \n \"--file\", \n help=\"destination file\", \n required=True)\n parser.add_argument( \"-c\", \"--config\",\n default=\"/Users/samarius/.get_analytics_log.config.json\",\n help=\"configuration file path\")\n\n\n try:\n args = parser.parse_args()\n return args\n except Exception as e:\n print \"can't parse command line args: {}\".format(repr(e))\n raise", "def parse_cmd_arguments():\n parser = argparse.ArgumentParser(description='Process some integers.')\n parser.add_argument('-i', '--input', required=True, help='input JSON file')\n parser.add_argument('-o', '--output', required=True,\n help='ouput JSON file')\n parser.add_argument('-d', '--debug', required=False,\n help='log level. Can be 0-3. Defaults to 0')\n\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser(\n description=\"Reads datapacket pcds, interpolates quaternions and generates scans from dataset in config file\")\n parser.add_argument(\"--visualization\", \"-v\", action=\"store_true\", help=\"if generated clouds should be visualized\")\n parser.add_argument(\"--directory\", \"-d\",\n help=\"if only specified directory should be interpolated, e.g. 'fragments/fragment0'\")\n args = parser.parse_args()\n return args.visualization, args.directory", "def parse_args(self):\n \n # check args:\n # XXX: make them position independent\n if not os.path.isdir(self.params.R_source_folder):\n raise gc3libs.exceptions.InvalidUsage(\n \"Invalid path to R scripts folder: '%s'. Path not found\"\n % self.params.R_source_folder)\n # XXX: shall we check/validate the content ( presence of valid R scripts ) ?\n\n self.log.info(\"source dir: %s\" % self.params.R_source_folder)\n\n if not os.path.exists(self.params.command_file):\n raise gc3libs.exceptions.InvalidUsage(\n \"gc_gps command file '%s' does not exist;\"\n % self.params.command_file)\n gc3libs.utils.test_file(self.params.command_file, os.R_OK,\n gc3libs.exceptions.InvalidUsage)\n\n if self.params.input_dir and not os.path.isdir(self.params.input_dir):\n raise gc3libs.exceptions.InvalidUsage(\n \"Input folder '%s' does not exists\"\n % self.params.input_dir)\n\n self.log.info(\"Command file: %s\" % self.params.command_file)\n self.log.info(\"R source dir: %s\" % self.params.R_source_folder)\n if self.params.input_dir:\n self.log.info(\"Input data dir: '%s'\" % self.params.input_dir)", "def parse_arguments():\n parser = argparse.ArgumentParser(description=\"Parse library type information.\")\n parser.add_argument(\"input_file\", help=\"Salmon library type information file.\")\n return parser.parse_args()", "def process_command_line_arguments() -> Namespace:\n\n parser = build_parser()\n arguments = parser.parse_args()\n\n return arguments", "def parse_args(self):\n assert os.path.isfile(self.params.csv_input_file), \\\n \"Input CSV file %s not found\" % self.params.csv_input_file", "def _parse_args():\n parser = argparse.ArgumentParser(description='Run DAFI.')\n parser.add_argument('input_file', help='Name (path) of input file')\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--version',\n metavar=\"<str>\",\n help=\"Input data version number\",\n type=str,\n required=True\n )\n args = parser.parse_args()\n return args", "def parse_arguments():\n global parser\n parser = argparse.ArgumentParser(\n description='Certainly this isn\\'t how Food Network does it',\n formatter_class=argparse.RawDescriptionHelpFormatter,\n epilog=textwrap.dedent('''\n Recipe List must appear as follows. **\n =======\n recipe_name\n serveing_size\n ingredient 0\n ingredient 1\n ingredient 2\n ...\n ...\n ...\n ingredient n\n '''))\n parser.add_argument('input_file',\n help=\"An input text file to read in recipes from. \"\n \"Must adhere certain structure.**\")\n parser.add_argument('out_file', help=\"File to write json recipe data to.\")\n parser.add_argument('-s', '--serving-size', type=str,\n help='The number of servings you\\'d like to make.',\n dest='serving_size', default=4)\n parser.add_argument('-f', '--filter-items', type=split_cmdline_filter_items,\n dest='filter_items',\n help='A comma delimited string of ingredients to filter recipes by. '\n 'Multi-word ingredients must be quoted.')\n global args\n args = parser.parse_args()\n\n global serving_size_override\n serving_size_override = args.serving_size\n global filter_ingredients\n filter_ingredients = args.filter_items", "def parse_arguments():\n\n parser = argparse.ArgumentParser(\n description=\"生成用户字符串识别的切分字符串\"\n )\n parser.add_argument(\n \"-o\",\n \"--output_dir\",\n type=str,\n nargs=\"?\",\n help=\"The output directory\",\n default=\"output/\"\n )\n parser.add_argument(\n \"-i\",\n \"--input_file\",\n type=str,\n nargs=\"?\",\n help=\"When set, this argument uses a specified text file as source for the text\",\n default=\"\",\n required=True\n )\n parser.add_argument(\n \"-mi\",\n \"--min_char_count\",\n type=int,\n nargs=\"?\",\n help=\"The minimum number of characters per line, Default is 3.\",\n default=3,\n\n )\n parser.add_argument(\n \"-ma\",\n \"--max_char_count\",\n type=int,\n nargs=\"?\",\n help=\"The maximum number of characters per line, Default is 20.\",\n default=20,\n )\n return parser.parse_args()", "def parse_command_line_arguments(argv):\n print(\"reading command line arguments in...\")\n\n parser = argparse.ArgumentParser(description='Description of your program')\n parser.add_argument('-i', '--input', help='Location of input csv file', required=True)\n parser.add_argument('-p', '--predicting', help='The column name containing the category to predict', required=True)\n parser.add_argument('-s', '--scoring', help='The scoring type to be used with model evaluation', required=False)\n parser.add_argument('-c', '--scale', help='List of column names to scale values for', nargs='+', required=False)\n args = parser.parse_args()\n\n return args.input, args.predicting, args.scoring, args.scale", "def parse_user_arguments():\n\n ap = argparse.ArgumentParser()\n\n ap.add_argument(\"-m\", \"--method\",\n help=\"Enter the type of baseline run, \"\n \"bm_25, tf_idf or jm_qlm\", required=True)\n\n ap.add_argument(\"-j\", \"--json_fname\", help=\"Enter the path to the json \"\n \"filename containing\"\n \"all the paths to the \"\n \"test_collection\",\n required=True)\n\n return vars(ap.parse_args())", "def parsare_argumente():\n for arg in sys.argv:\n if arg == \"-h\":\n display_usage()\n\n in_dir=\"input\"\n out_dir=\"output\"\n n=3\n timeout=10\n for arg in sys.argv[1:]:\n check = arg.split(\"=\")\n if len(check) < 2:\n print(\"invalid\")\n exit()\n if check[0] == \"if\":\n in_dir = ''.join(check[1:])\n elif check[0] == \"of\":\n out_dir = ''.join(check[1:])\n elif check[0] == 'n':\n try:\n n = int(''.join(check[1:]))\n except ValueError:\n print(\"nr invalid\")\n display_usage()\n elif check[0] == 't':\n try:\n timeout = int(''.join(check[1:]))\n except ValueError:\n print(\"nr invalid\")\n display_usage()\n\n return [in_dir, out_dir, n, timeout]", "def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"-d\",\n \"--debug\",\n help=\"Print lots of debugging statements\",\n action=\"store_const\",\n dest=\"loglevel\",\n const=logging.DEBUG,\n default=logging.ERROR,\n )\n parser.add_argument(\n \"-v\",\n \"--verbose\",\n help=\"Be verbose\",\n action=\"store_const\",\n dest=\"loglevel\",\n const=logging.INFO,\n )\n parser.add_argument(\"runscript\", default=None)\n return parser.parse_args()", "def parseArgs(arguments=None):\n\tparser = generateParser(None)\n\treturn parser.parse_known_args(arguments)", "def parse_args():\n parser = argparse.ArgumentParser()\n \n parser.add_argument('--p', dest='path_in',\n action='store', type=str, required=True, default='',\n help=\"Path relative to the data/ directory, to the input ATL01, ANC13, and ANC27 files.\")\n parser.add_argument('--atl01', dest='atl01_file',\n action='store', type=str, required=False, default=None,\n help=\"Path + filename to directory of the ATL01.\")\n parser.add_argument('--anc13', dest='anc13_path',\n action='store', type=str, required=False, default=None,\n help=\"Path to outputs directory of the ANC13.\") \n parser.add_argument('--anc27', dest='anc27_path',\n action='store', type=str, required=False, default=None,\n help=\"Path to directory of the ANC27.\")\n\n args = parser.parse_args()\n \n return args", "def parse_args():\n parser = argparse.ArgumentParser(\n description=\"Subscription Watch CSV file packaging script\", prog=sys.argv[0])\n\n # required args\n parser.add_argument(\"-f\", \"--filepath\", required=True,\n help=\"path to files to package\")\n parser.add_argument(\n \"-s\",\n \"--max-size\",\n type=int,\n default=DEFAULT_MAX_SIZE,\n help=f\"Maximum size of packages in MiB. (Default: {DEFAULT_MAX_SIZE} MiB)\",\n )\n parser.add_argument(\n \"-o\", \"--overwrite\", action=\"store_true\", default=False, help=\"whether to overwrite existing files.\"\n )\n parser.add_argument(\"--ocp-cluster-id\", required=True,\n help=\"OCP Cluster ID\")\n parser.add_argument(\"-v\", \"--verbosity\", action=\"count\",\n default=0, help=\"increase verbosity (up to -vvv)\")\n return parser.parse_args()", "def parse_args():\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-i\", \"--input_path\", required=True)\n parser.add_argument(\"-c\", \"--config\", required=True)\n return parser.parse_args()", "def parse_args(self, argv=None):\n self.opts, self.args = self.cli_parser.parse_args(argv)\n self._begin_logging()\n if argv is None:\n argv = sys.argv\n logger.info(' '.join(argv))\n self._process_input_files()\n self._construct_links_of_interest()\n self._open_output_files()\n data = self._construct_data_struct()\n return data", "def argumentsParser(args):\n\targuments = []\n\tif args.find('\"') > -1:\n\t\tt_arguments = args.split('\"')\n\t\tfor a in t_arguments:\n\t\t\tif a == '' or a == ' ':\n\t\t\t\tpass\n\t\t\telif a[-1] == ' ':\n\t\t\t\targuments.append(a[:-1])\n\t\t\telse:\n\t\t\t\targuments.append(a)\n\telif args.find(\"'\") > -1:\n\t\tt_arguments = args.split(\"'\")\n\t\tfor a in t_arguments:\n\t\t\tif a == '' or a == ' ':\n\t\t\t\tpass\n\t\t\telif a[-1] == ' ':\n\t\t\t\targuments.append(a[:-1])\n\t\t\telse:\n\t\t\t\targuments.append(a)\n\telif args == ' ':\n\t\tpass\n\telse:\n\t\targuments = args.split(' ')\n\treturn arguments", "def parse_args():\n\n\t# Define the input parser\n\tdesc = \"computes long term temperature anomaly trend for the GHNC dataset\"\n\tepilog = \"\"\"\ndatarange input argument is of the format:\n\t\t YYYY[MM[DD]][:YYYY[MM[DD]]]\nWhere the date before the optional ':'' represents the lower bound of\nthe range and the optional date after the : represents the upper\nbound. The optional elements of the date default to the lowest possible\nvalue for the lower bound and to the maximum possible for the upper\none. For example,\n\t2006 is equivalent to 2006/01/01:2006/12/31\n\t2006/02 is equivalent to 2006/02/01:2006/02/28\n\"\"\"\n\n\tparser = argparse.ArgumentParser(description=desc, epilog=epilog,\n\t\t\t\t\t\tformatter_class=argparse.RawDescriptionHelpFormatter)\n\tparser.add_argument(\"daterange\",\n\t\t\t\t\t\thelp=\"range of dates to make available locally\")\n\tparser.add_argument('-t',\"--timeseries\",nargs=2,metavar=('lon','lat'),type=float,\n\t\t\t\t\t\thelp=\"plot timeseries for the lon lat pair of coordinates\")\n\tparser.add_argument('-r',\"--recompute\",default=False,action='store_true',\n\t\t\t\t\t\thelp=\"force recompute trend\")\n\n\treturn parser.parse_args()", "def __parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('-f', '--force', action=\"store_true\", default=False,\n help='overwrite existing database files during import')\n parser.add_argument('-e', '--extension', action=\"store\", default='txt',\n help='specify file extension. default is \"txt\"')\n parser.add_argument('-d', '--delimiter', action=\"store\", default='\\t',\n help='specify column delimiter. default is tab (\\\\t)')\n parser.add_argument('-m', '--mark', action=\"store\", default='.',\n help='specify decimal mark for numeric data. default is'\n ' dot (.)')\n parser.add_argument('-o', '--outformat', action=\"store\", default='npz',\n help='specify output database format. default is \"npz\"'\n ' for numpy database. use \"mat\" for matlab '\n ' database format.')\n parser.add_argument('-r', '--recursive', action=\"store_true\", default=False,\n help='recursively walk through all sub-directories of'\n ' current working directory')\n parser.add_argument('-p', '--pcs', action=\"store_true\", default=True,\n help='indicate if files are pcs files.')\n parser.add_argument('-c', '--colheadlines', action=\"store\", default='1',\n help='number of lines spanned by the column headers')\n args = parser.parse_args()\n return args", "def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('n_iter',\n help='number of iteration',\n type=int)\n parser.add_argument('n_processes',\n help='number of processes',\n type=int)\n parser.add_argument('method',\n help='mutual exclusion method')\n parser.add_argument('duration',\n help='Duration of each process',\n type=float)\n return parser.parse_args()", "def parse_args():\n if len(sys.argv) < REQUIRED_NUM_ARGS or len(sys.argv) > MAXIMUM_NUM_ARGS:\n error_quit(\"Incorrect number of arguments!\", 400)\n # Set port to DEFAULT if not specified as an arg. Otherwise, port = portarg.\n port = sys.argv[PORT_ARG_NUM] if len(sys.argv) == MAXIMUM_NUM_ARGS else DEFAULT_FTP_PORT\n port = validate_port(port)\n # Get host address and logfile name from args.\n host, log_file = sys.argv[HOST_ARG_NUM], sys.argv[LOG_ARG_NUM]\n return host, log_file, port", "def Args(parser):", "def parse_args():\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\n \"-i\", \"--input\", required=True, action=\"store\", dest=\"f_in\", help=\"input file\"\n )\n parser.add_argument(\n \"-o\",\n \"--output\",\n required=True,\n action=\"store\",\n dest=\"f_out\",\n help=\"stem of output file\",\n )\n\n parser.add_argument(\n \"-d\",\n \"--dir\",\n required=True,\n action=\"store\",\n dest=\"dir\",\n help=\"directory to save output files\",\n )\n\n parser.add_argument(\n \"-t\",\n \"--threshold\",\n required=False,\n action=\"store\",\n dest=\"thres\",\n default=0.85,\n help=\"threshold for the scoring function\",\n )\n\n parser.add_argument(\n \"-l\",\n \"--language\",\n required=True,\n action=\"store\",\n dest=\"lang\",\n help=\"provide language in order to set stop words\",\n )\n\n parser.add_argument(\n \"-min\",\n \"--minimum\",\n required=False,\n action=\"store\",\n dest=\"min\",\n default=100,\n help=\"minimum number of occurrences to be considered as ngram\",\n )\n\n parser.add_argument(\n \"--trigram\",\n required=False,\n action=\"store_true\",\n dest=\"trigram\",\n help=\"extracting trigrams in addition to bigrams\",\n )\n\n return parser.parse_args()", "def parse_args():\n sentinel_dict = {}\n\n def _preprocess_sysargv(argv):\n inputs = []\n for arg in argv[1:]:\n # handles case where values contain --, otherwise they will\n # be interpreted as arguments.\n if '--,' in arg or ',--' in arg or arg == '--':\n sentinel = uuid4().hex\n key = '%s' % sentinel\n sentinel_dict[key] = arg\n inputs.append(sentinel)\n else:\n inputs.append(arg)\n return inputs\n\n def _postprocess_sysargv(v):\n if v in sentinel_dict:\n return sentinel_dict.get(v)\n else:\n return v\n\n #----- read input arguments\n for i, arg in enumerate(sys.argv):\n if (arg[0] == '-') and arg[1].isdigit(): sys.argv[i] = ' ' + arg\n parser = argparse.ArgumentParser()\n\n parser.add_argument('-u', action='store_true', dest='helpmenu',help='extended HELP MENU with examples')\n parser.add_argument('-i','--infile',action='store', dest='infile',help='name of file with SAC or mseed file(s)')\n parser.add_argument('-g','--gain',action='store', dest='sensitivity',help='Stage 0 sensitivity')\n parser.add_argument('-N','--net', action='store', dest='network',help='network')\n parser.add_argument('-S','--sta', action='store', dest='station',help='station')\n parser.add_argument('-C','--cha', action='store', dest='chantype',help='chantype')\n parser.add_argument('-s','--start', action='store', dest='startstring',help='start time YYYY-MM-DDTHH:MM:SS')\n parser.add_argument('-e','--end', action='store', dest='endstring',help='end time YYYY-MM-DDTHH:MM:SS')\n parser.add_argument('-d','--duration', action='store', dest='durationinhours',help='duration in hours')\n parser.add_argument('-dc','--dc','--datacenter', action='store', dest='datacenter',default='IRIS',help='FDSN data center (e.g. IRIS, SCEDC, NCEDC)')\n parser.add_argument('-p','--plot',action='store_true',dest='iplot',help='make plots of each hourly trace (NOTE: can be slow)')\n\n helpextended = parser.parse_args(_preprocess_sysargv(sys.argv)).helpmenu\n if ( helpextended is True ):\n print ('')\n print ('portable_pip_squeak: assess a station either using local data or to be downloaded')\n print ('')\n print ('Usage: portable_pip_squeak.py [options]')\n print ('')\n print ('EXAMPLES:')\n print ('portable_pip_squeak.py --infile my_SAC_files.txt')\n print ('portable_pip_squeak.py -N UW -S TKEY -C HH -s 2018-01-01T00:00:00 -d 2 -p')\n print ('portable_pip_squeak.py -N CI -S LEO -C HN -s 2020-01-01T00:00:00 -d 24 -dc SCEDC')\n print ('')\n print ('Inputs if supplying your own data:')\n print (' -i, --infile Name of text file with SAC/mseed file(s) of 3 (Z,N,E) traces.')\n print (' -g, --gain Gain or Stage 0 sensitivity')\n print (' ')\n print ('Inputs if downloading data:')\n print (' -s, --starttime Trace start time (YYYY-MM-DD,HH:MM:SS)')\n print ('')\n print (' One of these:')\n print (' -e, --endtime Trace end time (YYYY-MM-DD,HH:MM:SS)')\n print (' -d, --duration Duration in hours from starttime')\n print (' Note: if duration is neg, starttime becomes endtime')\n print (' N, S, C and a datacenter if other than IRIS')\n print (' -N, --net Network code')\n print (' -S, --sta Station code')\n print (' -C, --cha Channel type, e.g. EN or HH')\n print (' -dc, --datacenter Name of FDSN data center if not IRIS, e.g. SCEDC, NCEDC')\n print (' ')\n print ('Optional flags:')\n print ('-P, --plot Flag to make a figure for each hour. Note: can be slow.')\n print ('-u Print this extended help menu')\n print ('')\n\n\n return parser.parse_args(_preprocess_sysargv(sys.argv))", "def parse_args(args):\n\n parser = argparse.ArgumentParser(\n description=\"\"\"Generates and runs an afni_proc.py script to preprocess resting state fMRI data\"\"\",\n formatter_class=argparse.RawDescriptionHelpFormatter)\n\n # Optional Flags\n parser.add_argument(\"-t\", \"--trs_remove\", action=\"store\", default=5, type=int, metavar='TRs',\n help=\"\"\"number of trs to remove at the beginning of the epi data\n (default = 5 trs)\"\"\")\n parser.add_argument(\"-d\", \"--dim_voxel\", action=\"store\", default=2.0, type=float, metavar='MM',\n help=\"voxel dimensions in mm that processed epi will be resampled to (default = 2.0 mm)\")\n parser.add_argument(\"-b\", \"--bandpass\", action=\"store\", default=[0.01, 0.25], nargs=2, type=float, metavar=\"F\",\n help=\"bandpass frequencies lower and upper limits (default = 0.01 0.25)\")\n parser.add_argument(\"-v\", \"--volumes\", action=\"store\", default=0, type=int, metavar=\"V\",\n help=\"\"\"truncate the epi data to the inputted number of volumes, useful if subjects have data \n with different numbers of volumes (default = no truncation)\"\"\")\n parser.add_argument(\"-f\", \"--fwhm\", action=\"store\", default=5.0, type=float, metavar=\"MM\",\n help=\"the full width half maximum that is used when blurring (default = 5.0 mm)\")\n parser.add_argument(\"-c\", \"--cores\", action=\"store\", default=cpu_count(), type=int, metavar=\"C\",\n help=\"number of cores supplied to 3dDeconvolve (default = all cores)\")\n parser.add_argument(\"-s\", \"--subj_id\", action=\"store\", default=\"sub\", metavar=\"SUB\",\n help=\"text file of subject ids (default = sub)\")\n parser.add_argument(\"-T\", \"--time_step\", action=\"store\", default=0, type=float, metavar=\"TS\",\n help=\"set the time step for bandpassing (default = ts in header info\")\n\n parser.add_argument(\"-g\", \"--global_signal_regression\", action=\"store_false\", default=True,\n help=\"do not perform global signal regression (default = perform gsr)\")\n\n parser.add_argument(\"-r\", \"--rerun\", action=\"store_true\", default=False,\n help=\"\"\"rerun preprocessing, override and delete previous results in \n 'Processed' folder (default = don't override)\"\"\")\n parser.add_argument(\"-m\", \"--motion_param\", action=\"store_true\", default=False,\n help=\"use 12 motion parameters for regression (default = 6 motion parameters)\")\n parser.add_argument(\"-G\", \"--gm_blur\", action=\"store_true\", default=False,\n help=\"blur only in grey matter mask (default = blur in whole brain)\")\n parser.add_argument(\"-n\", \"--nl_reg\", action=\"store_true\", default=False,\n help=\"use non-linear warp between anatomical and MNI template (default = linear warp)\")\n\n # Required Inputs\n required = parser.add_argument_group(\"required arguments\")\n required.add_argument(\"-e\", \"--epi\", action=\"store\", required=True,\n help=\"text file of paths to raw epi data\")\n required.add_argument(\"-a\", \"--anat\", action=\"store\", required=True,\n help=\"text file of paths to raw anatomical data\")\n required.add_argument(\"-o\", \"--out_dir\", action=\"store\", required=True, metavar=\"OUT\",\n help=\"text file of paths to output directory\")\n result = parser.parse_args(args)\n\n # Make sure inputted parameters are legal\n assert (os.path.isfile(result.epi)), \"{} does not exist or is not a file\".format(result.epi)\n assert (os.path.isfile(result.anat)), \"{} does not exist or is not a file\".format(result.ant)\n assert (result.trs_remove >= 0), \"Cannot remove negative trs\"\n assert (result.dim_voxel >= 0), \"Cannot have a negative voxel dimension\"\n assert (np.all(np.array(result.bandpass) > 0)), \"Cannot have a negative frequency limit for bandpassing\"\n assert (result.volumes > -1), \"Number of volumes must be greater than 0\"\n assert (result.cores > 0), \"Number of cores used must be greater than 0\"\n assert (result.time_step > -1), \"Time step must be greater than 0\"\n\n return result", "def parse_args(self, args):\n raise Exception(\"Not implemented\")", "def parse_args():\n parser = argparse.ArgumentParser(description=\"Reads in iClicker data from the submission \"\n \"directory, and then writes it to the given \"\n \"remote file.\")\n parser.add_argument(\"submission_directory\", type=str, help=\"Directory of submissions that \"\n \"contain a 'textbox_0.txt' file that references iClicker ID\")\n parser.add_argument(\"remote_id_file\", type=str)\n return parser.parse_args()", "def parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-i\", \"--input\", dest=\"input_file\", help=\"input file or pattern\", default=\"\")\n parser.add_argument(\"-o\", \"--output\", dest=\"output_file\", help=\"output file or pattern\", default=\"\")\n parser.add_argument(\"-d\", \"--debug\", dest=\"debug\", action='store_true')\n parser.add_argument(\"-v\", \"--verbose\", dest=\"verbose\", action='store_true')\n parser.set_defaults(verbose=False)\n parser.set_defaults(debug=False)\n return parser.parse_args()", "def _parse_arguments():\n parser = argparse.ArgumentParser(\n fromfile_prefix_chars='@',\n formatter_class = argparse.ArgumentDefaultsHelpFormatter,\n description = 'Arguments for the SITL simulation.'\n )\n\n parser.add_argument('-id', type=str, default='FF', metavar='AgentID', required=True,\n help=\"AGENT_ID, must be a 2-digit integer.\")\n parser.add_argument('-alt', type=float, default=15.0, metavar='',\n help='Takeoff altitude, within [10.0, 100.0] (m).')\n parser.add_argument('-xbee', type=str, default=None, metavar='',\n help=\"XBee module's device path. If not provided, use ZeroMQ.\")\n parser.add_argument('-pix', type=str, default='fw/ac3.5.2_port5760', metavar='',\n help=\"Pixhawk's device path. Can be SITL firmware.\")\n parser.add_argument('-algorithm', '-a', type=str, default='MPC', metavar='',\n choices=['Vicsek','MPC'],\n help=\"Algorithm used for main script.\")\n parser.add_argument('-character', '-c', type=str, default='follower', metavar='',\n choices=['square','passive','follower'],\n help=\"Whether this agent is leader or follower?\")\n parser.add_argument('-n', type=int, default=5, metavar='',\n help=\"Total agent count.\") \n parser.add_argument('-level', '-l', type=str, default='info', metavar='',\n choices=['warning','debug','info'],\n help=\"Logging level: ['warning','debug','info']\") \n \n args = parser.parse_args()\n\n # get correct parameters\n if args.alt < 10.0 or args.alt > 100.0:\n raise Exception('-alt should between [10.0, 100.0]')\n if not args.id.isdigit() or len(args.id) != 2:\n raise Exception('-id shoud be a 2-digit integer')\n \n return args", "def _parse_args(self, prepared_args):\n pass", "def parse_arguments(arguments=None):\n parser = argparse.ArgumentParser()\n parser.add_argument(\"input_file\",\n type=argparse.FileType('r'))\n parser.parse_args()\n return parser", "def _parse_args(argv):\n parser = make_parser()\n args = parser.parse_args(argv)\n LOGGER.setLevel(to_log_level(args.loglevel))\n\n if not args.inputs:\n if args.list:\n tlist = \", \".join(API.list_types())\n _exit_with_output(\"Supported config types: \" + tlist)\n elif args.env:\n cnf = os.environ.copy()\n _output_result(cnf, args.output, args.otype or \"json\", None, None)\n sys.exit(0)\n else:\n parser.print_usage()\n sys.exit(1)\n\n if args.validate and args.schema is None:\n _exit_with_output(\"--validate option requires --scheme option\", 1)\n\n return args", "def parse_args():\n global Args\n parser = argparse.ArgumentParser()\n subparsers = parser.add_subparsers()\n pars_simulation(subparsers)\n pars_analyze(subparsers)\n Args = parser.parse_args()", "def _read_cmd_args():\n\n # Check if argument count is correct.\n if len(sys.argv) != 5:\n print(\"[ERR] Invalid number of command line arguments!\")\n _usage()\n sys.exit(1)\n\n # Get path to config file\n configfile = sys.argv[1]\n if not os.path.exists(configfile):\n print(f\"[ERR] Config file {configfile} does not exist!\")\n sys.exit(1)\n\n # Get top directory of LIS data\n topdatadir = sys.argv[2]\n if not os.path.exists(topdatadir):\n print(f\"[ERR] LIS data directory {topdatadir} does not exist!\")\n sys.exit(1)\n\n # Get valid year and month\n yyyymm = sys.argv[3]\n if len(yyyymm) != 6:\n print(\"[ERR] Invalid length of YYYYMM, must be 6 characters!\")\n sys.exit(1)\n year = int(yyyymm[0:4])\n month = int(yyyymm[4:6])\n try:\n startdate = datetime.datetime(year, month, day=1)\n except ValueError:\n print(\"[ERR] Invalid YYYYMM passed to script!\")\n sys.exit(1)\n\n # Get model forcing ID\n model_forcing = sys.argv[4]\n\n return configfile, topdatadir, startdate, model_forcing", "def parse_args():\n parser = argparse.ArgumentParser(description='Extract left-turn speed data CSV files from Excel')\n parser.add_argument('veh_conflict_data', type=str, help='Excel file with all veh conflicts data')\n return parser.parse_args()", "def parseArgs ():\n independentBaseName = None\n dependentBaseName = None\n independentTSID = None\n dependentTSID = None\n statisticsFile = None\n nEquations = None\n logFile = None\n #\n # Loop through command line arguments\n for arg in sys.argv:\n parts = arg.split('=')\n if ( (parts == None) or (len(parts) != 2) ):\n # Not an arg=value command line argument\n continue\n argName = parts[0].upper()\n argValue = parts[1]\n if ( argName == 'DEPENDENTBASENAME' ):\n dependentBaseName = argValue\n elif ( argName == 'DEPENDENTTSID' ):\n dependentTSID = argValue\n elif ( argName == 'INDEPENDENTBASENAME' ):\n independentBaseName = argValue\n elif ( argName == 'INDEPENDENTTSID' ):\n independentTSID = argValue\n elif ( argName == 'LOGFILE' ):\n logFile = argValue\n elif ( argName == 'NUMBEROFEQUATIONS' ):\n nEquations = int(argValue)\n elif ( argName == 'STATISTICSFILE' ):\n statisticsFile = argValue\n return ( independentBaseName, dependentBaseName, independentTSID, dependentTSID,\n statisticsFile, nEquations, logFile )", "def parseArgs():\n parser = argparse.ArgumentParser(description='Runs RHEAS simulation.')\n parser.add_argument('config', help='configuration file')\n parser.add_argument('-d', metavar='DB', help='name of database to connect')\n parser.add_argument('-u', help='update database', action='store_true')\n args = parser.parse_args()\n return args.config, args.d, args.u", "def parse_user_args():\n ap = argparse.ArgumentParser()\n\n ap.add_argument(\"-j\", \"--json_fname\", help=\"Enter the path to the json \"\n \"filename containing\"\n \"all the paths to the \"\n \"test_collection\", required=True)\n\n ap.add_argument(\"-m\", \"--method\", help=\"Enter the type of baseline run, \"\n \"bm_25, tf_idf or jm_qlm\",\n required=True)\n\n return vars(ap.parse_args())", "def handle_cmdline_args():\n\n parser = argparse.ArgumentParser(\n description='Generate synthetic data from a specification in a json '\n 'file using the \"synth-method\" described in the json file. ')\n\n parser.add_argument(\n '-i', dest='infile', required=True,\n help='The input json file. Must contain a \"synth-method\" property')\n\n parser.add_argument(\n '-o', dest='outfile_prefix', required=True, help='The prefix of the output paths (data json and csv), relative to the QUIPP-pipeline root directory')\n\n args = parser.parse_args()\n return args", "def parse_arguments():\n\n info = 'Divides pdb info files for parallelization'\n parser = argparse.ArgumentParser(description=info)\n\n # program arguments\n parser.add_argument('-f', '--in-file',\n type=str,\n required=True,\n help='PDB info file to divide')\n parser.add_argument('-n', '--num-splits',\n default=1000,\n type=int,\n help='Number of splits to perform (Default: 1000)')\n parser.add_argument('-m', '--mut-file',\n type=str,\n required=True,\n help='File containing mutation information')\n parser.add_argument('--split-dir',\n default = \"../data/split_pdbs/\",\n type=str,\n help='Output directory for split PDB info files')\n\n args = parser.parse_args()\n opts = vars(args)\n return opts", "def parse_args():\n parser = default_argument_parser()\n parser.add_argument(\"--label-map\",\n dest=\"label_map\",\n type=pathlib.Path,\n help=\"Label map in YAML format which maps from category \"\n \"ID to name.\")\n parser.add_argument(\"--train-csv\",\n dest=\"train_csv\",\n required=True,\n type=pathlib.Path,\n help=\"Path to training data CSV file.\")\n parser.add_argument(\"--valid-csv\",\n dest=\"valid_csv\",\n required=False,\n type=pathlib.Path,\n help=\"Optional path to validation data CSV file.\")\n parser.add_argument(\n \"--image-width\",\n type=int,\n help=\"Image width (optional, used to speed up dataset processing).\")\n parser.add_argument(\n \"--image-height\",\n type=int,\n help=\"Image height (optional, used to speed up dataset processing).\")\n return parser.parse_args()", "def parse_arguments():\n ## Initialize Parser Object\n parser = argparse.ArgumentParser(description=\"Preprocess raw Twitter or Reddit data\")\n ## Generic Arguments\n parser.add_argument(\"--input\",\n type=str,\n default=None,\n help=\"Path to input folder of raw *.gz files or a single raw *.gz file\")\n parser.add_argument(\"--output_folder\",\n type=str,\n default=None,\n help=\"Name of output folder for placing predictions.\")\n parser.add_argument(\"--platform\",\n type=str,\n choices=[\"twitter\",\"reddit\"],\n help=\"Platform from which the data comes\")\n parser.add_argument(\"--jobs\",\n type=int,\n default=1,\n help=\"Number of processes to spawn.\")\n parser.add_argument(\"--keep_retweets\",\n default=False,\n action=\"store_true\",\n help=\"If included, will preserve retweets in preprocessed data\")\n parser.add_argument(\"--keep_non_english\",\n default=False,\n action=\"store_true\",\n help=\"If included, will preserve non-English tweets in preprocessed data\")\n ## Parse Arguments\n args = parser.parse_args()\n ## Check Arguments\n if args.input is None:\n raise ValueError(\"Must provide --input folder or .gz file\")\n if not os.path.exists(args.input):\n raise FileNotFoundError(f\"Could not find input filepath {args.input}\")\n if args.output_folder is None:\n raise ValueError(\"Must provide an --output_folder argument\")\n if not os.path.exists(args.output_folder):\n os.makedirs(args.output_folder)\n return args", "def _parse_command_line_arguments():\n parser = ArgumentParser(\n description=(\n 'Command-line tool to generate a list of unique from a TS file from FermiFAST'\n ),\n )\n parser.add_argument(\n 'ts-file',\n type=str,\n help=(\n 'A file containing the TS sky map'\n ),\n )\n parser.add_argument('--skiprows',\n type=int,\n help='number of rows to skip at the top (default 0)',\n required=False)\n parser.set_defaults(skiprows=0)\n arguments = vars(parser.parse_args())\n return arguments", "def parse_args(self, argv):\n\t\tself.argv={'user': argv[1]}", "def parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument('-u', '--urls_dirpath', type=unicode)\n parser.add_argument('-r', '--resources_dir', type=unicode)\n parser.add_argument('-t', '--total_docs', type=int)\n parser.add_argument('-m', '--mapping', type=unicode,\n help='File with the yago to lkif mapping')\n\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser(description=__doc__)\n # If user doesn't specify an input file, read from standard input. Since\n # encodings are the worst thing, we're explicitly expecting std\n parser.add_argument('-i', '--infile',\n type=lambda x: open(x, encoding=ENCODE_IN),\n default=io.TextIOWrapper(\n sys.stdin.buffer, encoding=ENCODE_IN)\n )\n # Same thing goes with the output file.\n parser.add_argument('-o', '--outfile',\n type=lambda x: open(x, 'w', encoding=ENCODE_OUT),\n default=io.TextIOWrapper(\n sys.stdout.buffer, encoding=ENCODE_OUT)\n )\n # Set the verbosity level for the logger. The `-v` option will set it to\n # the debug level, while the `-q` will set it to the warning level.\n # Otherwise use the info level.\n verbosity = parser.add_mutually_exclusive_group()\n verbosity.add_argument('-v', '--verbose', action='store_const',\n const=logging.DEBUG, default=logging.INFO)\n verbosity.add_argument('-q', '--quiet', dest='verbose',\n action='store_const', const=logging.WARNING)\n return parser.parse_args()", "def _parse_args(self):\n parser = argparse.ArgumentParser()\n _, args = parser.parse_known_args()\n self.args = [a for a in args if a != '']", "def parse_args():\n parser = argparse.ArgumentParser(\"Run arguments for system submitted tasks\")\n\n parser.add_argument(\"-f\", \"--funcs\", type=str, nargs=\"?\", required=True,\n help=\"path to pickle file containing a list of \"\n \"functions/methods that should be run by the \"\n \"submitted process\"\n )\n parser.add_argument(\"-k\", \"--kwargs\", type=str, nargs=\"?\", required=False,\n default=None,\n help=\"path to pickle file containing a dictionary of \"\n \"keyword argumnets that should be passed to the \"\n \"functions\")\n parser.add_argument(\"-e\", \"--environment\", type=str, nargs=\"?\",\n required=False,\n help=\"Optional comma-separated environment variables, \"\n \"which should be given as \"\n \"VARNAME1=value1,VARNAME2=value2 and so on. These \"\n \"will be separated and instantiated into Python's \"\n \"os.environ\")\n\n return parser.parse_args()", "def parse_user_arguments(argv):\n\n parser = argparse.ArgumentParser(description=__doc__,\n formatter_class=\n argparse.RawTextHelpFormatter)\n\n parser.add_argument('-i',\n '--input-file',\n help='HMMER3 text output file',\n type=str,\n required=True)\n parser.add_argument('-o',\n '--output-file',\n help='JSON output file',\n type=str,\n required=False)\n parser.add_argument('-e',\n '--e-value',\n help='Inclusion threshold',\n type=float,\n required=False,\n default=0.005)\n\n parser.add_argument('-m',\n '--max-number',\n help='Maximal number of hits to save',\n type=int,\n required=False,\n default=5000)\n arguments = parser.parse_args(argv)\n\n return arguments", "def parse_arguments():\n parser = argparse.ArgumentParser(\n description=(\n \"SpotiQuote: An automatic ad silencer combined with spottily played quotes.\"\n \" Spotify is queried by an AppleScript to report its status and when found\"\n \" to be presenting an advertisement, automatically muted. Once the an\"\n \" advertisement concludes the volume is set back to its previous level.\"\n )\n )\n\n parser.add_argument(\n \"--volume\",\n type=int,\n default=80,\n dest=\"volume\",\n help=\"Integer value between 0 and 100 to start Spotify at.\",\n )\n\n parser.add_argument(\n \"--memos\",\n type=str,\n default=None,\n dest=\"memos\",\n help=(\n \"File path to json file containing memos to say at the beginning of a muted\"\n ),\n )\n\n parser.add_argument(\n \"--voice\",\n type=str,\n default=\"Alex\",\n dest=\"voice\",\n help=\"Default voice to read memos in\",\n )\n\n parser.add_argument(\n \"--after_num_plays\",\n type=int,\n default=None,\n dest=\"after_num_plays\",\n help=(\n \"Recite memo after an integer number of songs have completed. Completion\"\n \" of play is defined as reaching the last 5 seconds of a song. Must be at\"\n \" least one.\"\n ),\n )\n\n return parser.parse_args()", "def parseArguments():\n parser = argparse.ArgumentParser()\n parser.add_argument('--output_folder',\n help='Path of the folder where output files should be written.')\n parser.add_argument('--partition_id',\n help='ID of the computer partition to collect data from.')\n parser.add_argument('--collector_db',\n help='The path of slapos collect database.')\n\n return parser.parse_args()", "def parse_arguments():\n \n parser = argparse.ArgumentParser()\n parser.add_argument(\"postcode\", type=str, help=\"Postcode of current location\")\n parser.add_argument(\"-d\", \"--debug\", \n help=\"Turns on debug mode\", \n action='store_true')\n parser.add_argument(\"-fp\",\"--postcode-file\", \n default=\"postcodes_swift_sample.csv\",\n help=\"Location of Postcode CSV file (default postcodes_swift_sample.csv)\",\n type=str)\n parser.add_argument(\"-fb\",\"--pub-file\", \n default=\"pubnames_swift_sample.csv\",\n help=\"Location of Pub Postcode CSV file (default pubnames_swift_sample.csv)\",\n type=str)\n parser.add_argument(\"-l\",\"--limit\", \n default=10, \n help=\"Limit Number of Results (default 10)\",\n type=int)\n parser.add_argument(\"-m\",\"--max-distance\", \n default=50, \n help=\"Only return results less than this distance (default 50)\",\n type=int)\n return parser", "def parse_cmdline():\n\tparser = ArgumentParser(prog=\"FastP_QC.py\", description=\"\"\"Script collects stats from fastp jsons.\"\"\")\n\tparser.add_argument(\"-r1\", \"--r1_stats\", dest=\"r1_stats\", action=\"store\", required=True, help=\"Text file with r1 stats, from q30.py script.\")\n\tparser.add_argument(\"-r2\", \"--r2_stats\", dest=\"r2_stats\", action=\"store\", required=True, help=\"Text file with r2 stats, from q30.py script.\")\n\tparser.add_argument(\"-n\", \"--name\", dest=\"name\", action=\"store\", required=True, help=\"Sample name\")\n\targs = parser.parse_args()\n\treturn args", "def parse_args():\n parser = argparse.ArgumentParser(\"Plot time series figures.\")\n parser.add_argument('--log-file', type=str, nargs=\"+\", required=True,\n help=\"path to a testing log file.\")\n parser.add_argument('--trace-file', type=str, default=None,\n help=\"path to a trace file.\")\n parser.add_argument('--save-dir', type=str, default=None,\n help=\"path to save.\")\n parser.add_argument('--noise', type=float, default=0)\n\n args, unknown = parser.parse_known_args()\n return args", "def parse_arguments():\n p = argparse.ArgumentParser(description='Prepare the dataset for use by neural models.')\n p.add_argument(\"json_file\", type=argparse.FileType('r'), help=\"json file with all the data\")\n p.add_argument(\"prefix\", type=str, help=\"prefix for all the generated files\")\n p.add_argument(\"data_type\", type=str, choices=[\"names\", \"comments\", \"nc\"],\n default=\"nc\", help=\"type of the information recorded in the dataset\")\n p.add_argument(\"labels\", type=str, choices=[\"PROG\", \"ALL\", \"TOP\"],\n default=\"PROG\", help=\"method by which to choose the labels for the dataset\")\n p.add_argument(\"-other_label\", type=str, required=False, default=\"\",\n help=\"label to use instead of all infrequent labels. \"\n \"This can be left blank to ignore infrequent labels altogether\")\n p.add_argument(\"-label_num\", type=int, default=100, required=False,\n help=\"Number of most frequent labels to keep. Works with label_choice=TOP\")\n p.add_argument(\"-min_prog_labels\", type=int, default=5, required=False,\n help=\"Minimal number of programs a label has to appear in for it to be included \"\n \"in the dataset. Works with label_choice=PROG\")\n p.add_argument(\"-test_prog_list\", type=argparse.FileType('r'), default=None, required=False,\n help=\"file with the list of programs in the test set (optional)\")\n\n return p.parse_args(sys.argv[1:])", "def parse_arguments():\n parser = argparse.ArgumentParser(description='Arguments get parsed via --commands')\n \n parser.add_argument('-in', dest='input', type=str, \n help=\"Specify the RTStruct file read.\")\n parser.add_argument('--out', dest='output', type=str, default=\"RTSS_info.json\",\n help=\"Specify the RTStruct file read.\")\n \n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser(description=\"\")\n parser.add_argument(\n \"config_path\",\n type=str,\n help=\"Path to the JSON configuration file containing the image transformation settings.\",\n )\n parser.add_argument(\n \"img_path\",\n type=str,\n help=\"Path to the input image file to apply transformations.\",\n )\n return parser.parse_args()", "def parseArguments():\n parser = argparse.ArgumentParser(description=__doc__.split(\"\\n\")[0])\n\n input = parser.add_argument_group(\"input\",\n description=\"Supported URL list formats.\")\n input.add_argument(\"-g\", \"--gcsv\", nargs=\"?\",\n help=\"path to Google crawl error csv file(s)\")\n input.add_argument(\"-p\", \"--plain\", nargs=\"?\",\n help=\"path to newline separated file(s)\")\n\n output = parser.add_argument_group(\"output\",\n description=\"Supported output formats.\")\n formats = {\"rack\": \"rack-rewrite 301 static redirect\",\n \"csv\": \"comma separated\"}\n output.add_argument(\"-o\", \"--output\", choices=formats.keys(),\n help=\"an output format; one of: \" + pprint.saferepr(formats) +\n \" else, a machine-readable format\")\n output.add_argument(\"-e\", \"--ext\", action=\"store_true\",\n help=\"remove file extension in redirects\")\n output.add_argument(\"-s\", \"--subdomain\", action=\"store_true\",\n help=\"remove subdomains in redirects\")\n\n files = parser.add_argument_group(\"files\",\n description=\"Heuristics regarding filename format.\")\n files.add_argument(\"-u\", \"--utc\", action=\"store_true\",\n help=\"filenames start with a date in UTC format\")\n\n search = parser.add_argument_group(\"search\",\n description=\"Fuzzy search parameters.\")\n search.add_argument(\"-c\", \"--cutoff\", default=0.32, type=float,\n help=\"fuzzy search threshold (float, defaults to: 0.32)\")\n search.add_argument(\"-m\", \"--matches\", default=1, type=int,\n help=\"number of fuzzy search matches (int, defaults to: 1)\")\n\n parser.add_argument(\"-d\", \"--dirs\", nargs=\"?\", default=\".\",\n help=\"path to directory(s) of files\")\n\n return parser.parse_args()", "def parseArguments():\n # Create argument parser\n parser = argparse.ArgumentParser()\n\n # Optional arguments\n parser.add_argument(\"-t\", \"--test\", help=\"Optionally test algorithm on subsample of the data. Set to 1 for testing\", type=int, default=0)\n\n parser.add_argument(\"--cores\", help=\"Optimized code for a server with a lot of RAM, set to the number of available cores\", type=int, default=40)\n\n\n # Print version\n parser.add_argument(\"--version\", action=\"version\", version='%(prog)s - Version 2.0') #version 1.0 is for the observations in June 2018\n #version 1.1 contains the optimizations made after the june observations (mainly the switch to stackmags)\n #version 1.2 changed sim class to NOT include the list of failed candidates (not qsos)\n #... copied changes made to crossval version\n #version 1.5 added check for duplicate quasars and remove them\n #version 1.6 new simulated quasars (december)\n ##-------------------\n #version 2.0: combined training of classifier and regressor, streamlined input\n #version 2.1: Tryied to updates excluded area to a little more than stripe 82 but decided not to keep it, so no change\n\n # Parse arguments\n args = parser.parse_args()\n\n return args", "def parse_args():\n parser = argparse.ArgumentParser(description=_program_description)\n parser.add_argument('input_file', help=_input_file_description)\n #parser.add_argument('-v', '--verbose', action='store_true', \n # default=False, help='show progress')\n args = parser.parse_args()\n return args", "def parse_args(args=None):\n\t\treturn _get_args_parser().parse_args(args)", "def parse_parameters():\n parser = argparse.ArgumentParser(description='Program that checks for bad evaluations.')\n parser.usage = 'bettercorrectors [-h] client_id client_secret start_date [end_date] [--sql file]'\n parser.add_argument('client_id', help='the client_id of your intranet application', type=str)\n parser.add_argument('client_secret', help='the client_secret of your intra application', type=str)\n parser.add_argument('start_date', help='the latest date in iso format', type=datetime.fromisoformat)\n parser.add_argument('end_date', help='the closest date in iso format (optional)', type=datetime.fromisoformat,\n default=datetime.now(), nargs='?')\n parser.add_argument('--sql', dest='file', help='''name of the database file in case you want to save results in a \n sqlite database''', type=str)\n args = parser.parse_args()\n return args", "def parse_inputs():\n\n parser = argparse.ArgumentParser(description=\"Command line argument handler for ugaudio spectral_average program.\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n # a group of args for verbosity\n group = parser.add_mutually_exclusive_group()\n group.add_argument('-v', '--verbose', action='store_true')\n group.add_argument('-q', '--quiet', action='store_true')\n\n # nfiles\n help_nfiles = 'nfiles to preview (ONLY for testing)'\n parser.add_argument('-g', '--nfiles', default=DEFAULT_NFILES, type=nfiles_int, help=help_nfiles)\n\n # nfft\n help_nfft = 'Nfft'\n parser.add_argument('-n', '--nfft', default=DEFAULT_NFFT, type=nfft_int, help=help_nfft)\n\n # sample rate\n help_rate = 'sample rate (sa/sec)'\n parser.add_argument('-r', '--rate', default=DEFAULT_RATE, type=rate_str, help=help_rate)\n\n # cutoff\n help_cutoff = 'cutoff'\n parser.add_argument('-c', '--cutoff', default=DEFAULT_CUTOFF, type=cutoff_str, help=help_cutoff)\n\n # sensors\n help_sensors = 'sensors'\n parser.add_argument('-s', '--sensors', default=DEFAULT_SENSORS, type=sensors_list, help=help_sensors)\n\n # PAD directory\n help_paddir = 'PAD dir'\n parser.add_argument('-p', '--paddir', default=DEFAULT_PADDIR, type=folder_str, help=help_paddir)\n\n # output directory\n help_outdir = 'output dir'\n parser.add_argument('-o', '--outdir', default=DEFAULT_OUTDIR, type=outdir_str, help=help_outdir)\n\n # start date\n help_start = 'start date'\n parser.add_argument('-d', '--start', default=DEFAULT_START, type=dtm_date, help=help_start)\n\n # end date\n help_end = 'end date'\n parser.add_argument('-e', '--end', default=DEFAULT_END, type=dtm_date, help=help_end)\n\n # parse arguments\n module_logger.debug('calling parse_args')\n args = parser.parse_args()\n\n return args", "def parse_args():\r\n parser = argparse.ArgumentParser(description=\"Available Options\")\r\n\r\n parser.add_argument('-i'\r\n ,'--input_path'\r\n ,dest='input_path'\r\n ,type=is_valid_path\r\n ,required=True\r\n ,help = \"Enter the path of the image file to process\")\r\n\r\n args = vars(parser.parse_args())\r\n\r\n #To Display The Command Line Arguments\r\n print(\"## Command Arguments #################################################\")\r\n print(\"\\n\".join(\"{}:{}\".format(i,j) for i,j in args.items()))\r\n print(\"######################################################################\")\r\n\r\n return args", "def parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-f\", \"--file\", help=\"file with the cohort you want to check / fix\", type=str, required=True)\n parser.add_argument(\"-o\", \"--outdir\", help=\"where should the files and the result readme be stored?\", type=str, required=True)\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)\n parser.add_argument(\"plumes\", help=\"path to input plume file\")\n parser.add_argument(\"output\", help=\"path to output plume file\")\n parser.add_argument(\"-r\", \"--radius\", required=True,\n help=\"radius (meters) for nearest neighbor clustering\")\n parser.add_argument(\"-v\", \"--visualize\", action='store_true',\n help=\"Show plot of points/clusters (default=no plot)\")\n args = parser.parse_args()\n return args.plumes, args.output, float(args.radius), args.visualize", "def parse_arguments(self,parser):\r\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser(description=\"Bandits algorithms on a click-through \"\n \"rate dataset.\")\n parser.add_argument('--plot', action='store_true')\n return parser.parse_args()", "def ReadArguments():\n\n args = ParseArguments()\n\n logging.info('Command line arguments...')\n for arg in vars(args):\n logging.info(str(arg) + ': ' + str(getattr(args, arg)))\n logging.info('')\n\n IsTest(args)\n ProcessCacheSize(args)\n ProcessLineSize(args)\n ProcessMulti(args)\n ProcessMemPattern(args)\n ProcessMemFile(args)", "def parse_args():\n import argparse\n\n parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)\n parser.add_argument(\"plumes\", help=\"path to input plume file\")\n parser.add_argument(\"output\", help=\"path to output plume file\")\n parser.add_argument(\"--max_overlap\", default = .30,\n help=\"max_overlap value, default 0.3\")\n \n args = parser.parse_args()\n return args.plumes, args.output, float(args.max_overlap)", "def parse_arguments():\n parser = argparse.ArgumentParser(description=\n \"This script is designed to take two bed files and generate a mean-difference plot of the CORE lengths\\n\\n \\\n Example: python {0} -a A.bed -b B.bed -o BED_mean_difference.pdf\".format(argv[0]),\n formatter_class = argparse.RawDescriptionHelpFormatter)\n \n requiredNamed = parser.add_argument_group('required arguments')\n\n requiredNamed.add_argument(\"-a\", \"--BED_A\", type=str, required=True,\n help=\"BED file A\", action=\"store\")\n\n requiredNamed.add_argument(\"-b\", \"--BED_B\", type=str, required=True,\n help=\"BED file B\", action=\"store\")\n\n parser.add_argument(\"-o\", \"--OUTPUT\", type=str, required=False,\n help=\"Output file name.\", action=\"store\")\n\n return parser.parse_args()", "def parse_arguments():\n parser = argparse.ArgumentParser(description='Apply timbre translation to an audio file')\n parser.add_argument('model_path', type=str, help='Path to model file')\n parser.add_argument('model_config_path', type=str, help='Path to model training config file')\n parser.add_argument('audio_path', type=str, help='Path to audio file')\n parser.add_argument('output_path', type=str,\n help='Path where output audio file will be saved')\n parser.add_argument('--hop-divisor-power', '-hdp', dest='hop_divisor_power',\n type=int, default=1,\n help='Exponent of 2 used to divde the window size to get the hop_size')\n\n args = parser.parse_args()\n return vars(args)", "def parse_arguments():\n parser = argparse.ArgumentParser(description='Helps analyze articles.')\n parser.add_argument('--config', default='config.yaml',\n help='Configuration file for the options of this script')\n parser.add_argument('--search', default=None, type=str,\n help='Search for text in the articles')\n parser.add_argument('--case-sensitive', action='store_true',\n help='Makes search case-senstive (only applicatble to --search)')\n parser.add_argument('--list', default=None, type=str,\n help='List [title|authors|date|word-count|author|excerpt|content] of the articles')\n parser.add_argument('--sort', action='store_true',\n help='Sorts output (only applicable to --list).')\n parser.add_argument('--sort-by', default=None, type=str,\n help='Sorts output by another attribute [title|author|date] (only applicable to --list)')\n parser.add_argument('--statistics', action='store_true',\n help='Gives basic statistics about the articles.')\n parser.add_argument('--count-articles', action='store_true',\n help='Counts the total number of articles')\n parser.add_argument('--count-words', action='store_true',\n help='Counts the total number of words')\n parser.add_argument('--count-paragraphs', action='store_true',\n help='Counts the total number of paragraphs')\n parser.add_argument('--count-by-author', action='store_true',\n help='Counts the number of articles by each author')\n parser.add_argument('--count-by-year', action='store_true',\n help='Counts the number of articles bucketed by year')\n parser.add_argument('--count-by-months', default=None, type=int,\n help='Counts the number of articles bucketed by number of months')\n \n return parser, parser.parse_args()", "def parse_user_input():\n DISC = 'Generate dataset from input files to one csv frame.'\n parser = argparse.ArgumentParser(description=DISC)\n\n # USER ARGS\n parser.add_argument('-raw_dir',\n type=str,\n help='Path to the dir of raw data.',\n required=True\n )\n\n parser.add_argument('-csv_file',\n type=str,\n help='CSV file of the utterances to transform.',\n required=True\n )\n\n parser.add_argument('-feature_dir',\n type=str,\n help='Path to the dir of output feature representations.',\n required=True\n )\n\n parser.add_argument('-feature_type',\n type=str,\n help='Feature representation of the speech signal.',\n required=True\n )\n\n return parser.parse_args()", "def parse_args(self):\n parser = argparse.ArgumentParser()\n parser.add_argument('-c', '--calendar', dest='calendar_name', type=str, help=\"Destination calendar name\",\n required=True)\n parser.add_argument('-t', '--credentials', dest='credentials', type=str, help=\"Google credentials file\",\n required=True)\n parser.add_argument('-s', '--start', dest='start', type=str, help=\"Starting insert date, %d-%m-%Y\",\n required=False)\n parser.add_argument('-e', '--end', dest='end', type=str, help=\"Ending insert date, %d-%m-%Y\", required=False)\n self.args = parser.parse_args()\n if self.args[\"start\"]:\n self.fromDate = datetime.datetime.strptime(self.args[\"start\"], \"%d-%m-%Y\")\n else:\n self.fromDate = datetime.today()\n if self.args[\"end\"]:\n self.toDate = datetime.datetime.strptime(self.args[\"end\"], \"%d-%m-%Y\")\n else:\n self.toDate = self.fromDate + timedelta(days=5) # By default, we'll use a timedelta of 5 days", "def arg_parse():\n p = ap.ArgumentParser()\n p.add_argument('infile',\n help='path to file containing objects')\n p.add_argument('n1',\n help='night 1')\n p.add_argument('n2',\n help='night 2')\n p.add_argument('observatory',\n help='Astropy name of observatory')\n return p.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser(description=\"Deep SORT\")\n parser.add_argument(\n \"--input\", help=\"Path to MOTChallenge sequence directory\",\n default=None, required=True)\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-t\", \"--thoughtspot_host\", required=True,\n help=\"domain or ip. E.g. http://1.1.1.1\")\n parser.add_argument(\"-u\", \"--username\", required=True,\n help=\"username - must have administrative privileges\")\n parser.add_argument(\"-p\", \"--password\", required=True,\n help=\"password - must have administrative privileges\")\n parser.add_argument(\"-d\", \"--delimiter\", default=',',\n help=\"character to seperate values by. Default to comma\")\n parser.add_argument(\"-c\", \"--csv\", action=\"store_true\",\n help=\"create csv file called permissions.csv\")\n parser.add_argument(\"-s\", \"--share\", action=\"store_true\",\n help=\"output usable format for share api\")\n return parser.parse_args()", "def parse_args():\n parser = common_parser()\n parser.description = (\n \"Given a sequence dict, fasta index or a bed file, scatter over the \"\n \"defined contigs/regions. Each contig/region will be split into \"\n \"multiple overlapping regions, which will be written to a new bed \"\n \"file. Each contig will be placed in a new file, unless the length of \"\n \"the contigs/regions doesn't exceed a given number.\")\n\n parser.add_argument(\"-c\", \"--chunk-size\", type=int, default=1e6,\n metavar=\"SIZE\",\n help=\"The size of the chunks. The first chunk in a \"\n \"region or contig will be exactly length SIZE, \"\n \"subsequent chunks will SIZE + OVERLAP and the final \"\n \"chunk may be anywhere from 0.5 to 1.5 times SIZE \"\n \"plus overlap. If a region (or contig) is smaller \"\n \"than SIZE the original regions will be returned. \"\n \"Defaults to 1e6\")\n parser.add_argument(\"-m\", \"--minimum-bp-per-file\", type=int, default=45e6,\n help=\"The minimum number of bases represented within \"\n \"a single output bed file. If an input contig or \"\n \"region is smaller than this MINIMUM_BP_PER_FILE, \"\n \"then the next contigs/regions will be placed in the \"\n \"same file untill this minimum is met. Defaults to \"\n \"45e6.\")\n parser.add_argument(\"-o\", \"--overlap\", type=int, default=150,\n help=\"The number of bases which each chunk should \"\n \"overlap with the preceding one. Defaults to 150.\")\n parser.add_argument(\"-S\", \"--split-contigs\", action=\"store_true\",\n help=\"If set, contigs are allowed to be split up over \"\n \"multiple files.\")\n args = parser.parse_args()\n return args", "def parse_cmdline(argv):\n if argv is None:\n argv = sys.argv[1:]\n\n # initialize the parser object:\n parser = argparse.ArgumentParser(description='For each timestep, gather the energy information output by LAMMPS '\n 'in the log file.')\n parser.add_argument(\"-f\", \"--file\", help=\"The log file to be processed.\",\n default=None)\n parser.add_argument(\"-l\", \"--list_file\", help=\"The a file with a list of log files to be processes.\",\n default=None)\n args = None\n try:\n args = parser.parse_args(argv)\n if args.file is None:\n args.file_list = []\n else:\n if os.path.isfile(args.file):\n args.file_list = [args.file]\n args.source_name = args.file\n else:\n raise IOError(\"Could not find specified log file: {}\".format(args.file))\n if args.list_file is not None:\n args.file_list += file_rows_to_list(args.list_file)\n args.source_name = args.list_file\n if len(args.file_list) < 1:\n raise InvalidDataError(\"Found no log file names to process. Specify one or more files as specified in \"\n \"the help documentation ('-h').\")\n except IOError as e:\n warning(\"Problems reading file:\", e)\n parser.print_help()\n return args, IO_ERROR\n except (KeyError, InvalidDataError, SystemExit) as e:\n if hasattr(e, 'code') and e.code == 0:\n return args, GOOD_RET\n warning(e)\n parser.print_help()\n return args, INPUT_ERROR\n return args, GOOD_RET", "def parseArgs():\n parser = argparse.ArgumentParser()\n parser.add_argument('--dataset', default='fsod', help='training dataset') # use fsod dataset for default\n parser.add_argument('--cfg', dest='cfg_file', required=True, help='optional config file')\n parser.add_argument('--load_ckpt', help='path to load checkpoint')\n parser.add_argument('--load_detectron', help='path to load detectron weight pickle file')\n parser.add_argument('--output_dir', help='output directory to save the testing results.')\n parser.add_argument('--range', help='[start, end)', type=int, nargs=2)\n parser.add_argument('--visualize', dest='visualize', help='output images of detection', action='store_true')\n return parser.parse_args()", "def parse_args(args=None):\n return AP.parse_args(args=args)", "def parse_arguments():\n parser = argparse.ArgumentParser(description=\"\"\"A script to get the kmer frequency\n from csv files with kmer counts from genomes.\"\"\")\n\n parser.add_argument('-sd',\n '--sub_dir',\n type=str,\n dest='sub_dir',\n help='Subdirectory name for output files.') # kmer_count\n\n parser.add_argument('-do',\n '--dir_out',\n type=str,\n dest='dir_out',\n help='directory name for output files.') # Results/kmer_freq\n\n return parser.parse_args()", "def readArgs():\n args = sys.argv\n if len(args) != 3:\n print(\"ERROR - Wrong number of arguments! \\n\")\n print(\"Usage: plotGantt.py TYPE path/to/result/file.gantt \\n where TYPE is : MTS / SCH\")\n exit(5)\n if args[1] != \"MTS\" and args[1] != \"SCH\":\n print(\"ERROR - Wrong type specified! : \" + args[1])\n print(\"Usage: plotGantt.py TYPE path/to/result/file.gantt \\n where TYPE is : MTS / SCH\")\n return args", "def _parse_arguments():\n import argparse\n\n parser = argparse.ArgumentParser(description=__doc__)\n\n parser.add_argument(\n 'list_of_files', type=str,\n help='Input ASCII file with a list of files to be downloaded')\n\n return parser.parse_args()", "def parse_arguments(args):\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '-d', '--debug',\n help=\"Activates debug mode\",\n action=\"store_const\", dest=\"loglevel\", const=logging.DEBUG,\n default=logging.WARNING,\n )\n parser.add_argument(\n '-v', '--verbose',\n help=\"Activates verbose mode\",\n action=\"store_const\", dest=\"loglevel\", const=logging.INFO,\n )\n parser.add_argument(\n '-m', '--model',\n help=\"Path to model input file (e.g. model.json)\",\n action=\"store\", dest=\"model\",\n default='/'.join([os.path.dirname(__file__), '../../data/model.json'])\n )\n parser.add_argument(\n 'text',\n help=\"Text to be translated\",\n )\n parser.add_argument(\n '-i', '--implementation',\n help=\"Chosen method (e.g. CavnarTrenkleImpl)\",\n action=\"store\", dest=\"implementation\",\n default='CavnarTrenkleImpl'\n )\n parser.add_argument(\n '-o', '--output',\n help=\"Output results file in JSON (e.g. results.json)\",\n action=\"store\", dest=\"output_file\",\n default=None\n )\n # This argument is a json object which will be mapped to dict\n parser.add_argument(\n '--predict-args',\n help=\"Arguments for the prediction method (JSON format)\",\n action=\"store\", dest=\"predict_args\",\n type=json.loads\n )\n\n return vars(parser.parse_args(args))", "def arguments():\n\tparser = argparse.ArgumentParser(description=\"Integrate all columns of a data file. Time is in column 0.\")\n\tparser.add_argument(\"-v\", \"--verbose\", action=\"store_true\", dest=\"verbose\", default=False, help=\"Print debug info.\")\n\tparser.add_argument(\"-k\", \"--kwh\", action=\"store_true\", dest=\"kwh\", default=False, help=\"output in kWh (instead of Ws)\")\n\tparser.add_argument(\"-f\", \"--file\", action=\"store\", dest=\"filename\", help=\"Path to file to read. Defaults to STDIN.\")\n\tparser.add_argument(\"-s\", \"--separator\", dest=\"separator\", default=\",\", help=\"Specify the separation character. Defaults to comma (,).\")\n\n\treturn parser.parse_args()" ]
[ "0.79977995", "0.7492374", "0.74865186", "0.74785024", "0.73592305", "0.72400653", "0.71956307", "0.7175748", "0.7048367", "0.70400876", "0.70262593", "0.7005223", "0.6989726", "0.69382596", "0.6925051", "0.69050217", "0.69037956", "0.68907475", "0.6834261", "0.68291724", "0.68274677", "0.68274677", "0.6814847", "0.68122405", "0.6806022", "0.68037355", "0.68009156", "0.6781427", "0.67692953", "0.6754925", "0.67307186", "0.67294365", "0.6719926", "0.67193615", "0.67167866", "0.67105097", "0.67089075", "0.66979533", "0.66924626", "0.6687855", "0.6682652", "0.66685253", "0.66606647", "0.66583663", "0.6653355", "0.66512084", "0.66498244", "0.6647407", "0.66460377", "0.6642615", "0.6636654", "0.6636244", "0.66276246", "0.66242343", "0.66237766", "0.66163206", "0.6613998", "0.66139513", "0.66127", "0.6599973", "0.6599519", "0.65954846", "0.6594952", "0.6588188", "0.65867907", "0.65853286", "0.6584661", "0.65827453", "0.658148", "0.6579377", "0.6578583", "0.6578574", "0.6563792", "0.6556284", "0.6553471", "0.6551801", "0.6551746", "0.65515393", "0.655089", "0.6546887", "0.65304685", "0.6520623", "0.6516904", "0.65140676", "0.65137154", "0.6511231", "0.65074956", "0.65065134", "0.6503884", "0.64991724", "0.6498503", "0.64977914", "0.64973944", "0.6495232", "0.64939576", "0.6490369", "0.6489066", "0.64880395", "0.6487893", "0.6487189", "0.6484124" ]
0.0
-1
Returns the html of url or None if status code is not 200
def get_html(url): req = urllib.request.Request( url, headers={ 'User-Agent': 'Python Learning Program', 'From': 'hklee310@gmail.com' } ) resp = urllib.request.urlopen(req) if resp.code == 200: return resp.read() # returns the html document else: return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def simple_get(self, url):\r\n \"\"\"\r\n The simple_get function accepts a single url argument. \r\n It then makes a GET request to that url. \r\n If nothing goes wrong, you end up with the raw HTML content for the page you requested. \r\n If there were any problems with your request (like the url is bad or the remote server is down) \r\n then your functon returns None.\r\n \"\"\"\r\n try:\r\n with closing(get(url, stream=True)) as resp:\r\n if self.is_good_response(resp):\r\n return resp.content\r\n else:\r\n return None\r\n except RequestException as e:\r\n self.log_error('Error during requests to {0} : {1}'.format(url, str(e)))\r\n return None", "def _get(self, url):\n response = requests.get(url)\n if response.status_code == requests.codes.ok:\n return response\n else:\n raise HTTPError", "def get_page_html(url: str) -> Union[int, str]:\n req = requests.get(url=url)\n if req.status_code == 200:\n return req.text\n raise requests.exceptions.RequestException('')", "def simple_get(url):\n\n def is_good_response(resp):\n \"\"\"\n Checks if a response is good.\n \"\"\"\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200 and content_type is not None\n and content_type.find('html') > -1)\n\n def log_error(err):\n \"\"\"\n Simple error logging wrapper\n \"\"\"\n print(err)\n\n try:\n with closing(get(url, stream=True)) as resp:\n if is_good_response(resp):\n return resp.content\n return None\n\n except RequestException as err:\n log_error(\"Error during requests to {0} : {1}\".format(url, str(err)))", "def simple_get(url: str) -> str:\n try:\n with closing(get(url, stream=True)) as resp:\n if is_good_response(resp):\n return resp.text\n else:\n return \"none\"\n except RequestException as e:\n log_error('Error during requests to {0} : {1}'.format(url, str(e)))\n return \"none\"", "def simple_get(url):\n try:\n with closing(get(url, stream=True)) as resp:\n if is_good_response(resp):\n return resp.content\n else:\n return None\n\n except RequestException as e:\n return None", "def simple_get(url):\n try:\n with closing(requests.get(url, stream=True)) as resp:\n if(is_good_response(resp)):\n return resp.content\n\n except:\n return None", "def simple_get(url):\n try:\n with closing(get(url, stream=True)) as resp:\n if is_good_response(resp):\n return resp.content\n else:\n return None\n except RequestException as e:\n print('The following error occurred during HTTP GET request to {0} : {1}'.format(url, str(e)))\n return None", "def simple_get(url):\n try:\n with closing(get(url, stream=True)) as resp:\n if is_good_response(resp):\n return resp.content\n else:\n return None\n except RequestException as e:\n print('The following error occurred during HTTP GET request to {0} : {1}'.format(url, str(e)))\n return None", "def simple_get(url):\n try:\n with closing(get(url, stream=True)) as resp:\n if is_good_response(resp):\n return resp.content\n else:\n return None\n except RequestException as e:\n print('The following error occurred during HTTP GET request to {0} : {1}'.format(url, str(e)))\n return None", "def simple_get(url):\n try:\n with closing(get(url, stream=True)) as resp:\n if is_good_response(resp):\n return resp.content\n else:\n return None\n except RequestException as e:\n print('The following error occurred during HTTP GET request \\\n to {0} : {1}'.format(url, str(e)))\n return None", "async def http_get(self, url, ignore_errors=False):\n self.logger.debug(\"HTTP GET %s\", url)\n code, header, body = await fetch(\n url,\n request_timeout=config.activator.http_request_timeout,\n follow_redirects=True,\n validate_cert=config.activator.http_validate_cert,\n eof_mark=b\"</html>\",\n )\n if 200 <= code <= 299:\n return smart_text(body, errors=\"replace\")\n elif ignore_errors:\n metrics[\"error\", (\"type\", f\"http_error_{code}\")] += 1\n self.logger.debug(\"HTTP GET %s failed: %s %s\", url, code, body)\n return smart_text(header, errors=\"replace\") + smart_text(body, errors=\"replace\")\n else:\n metrics[\"error\", (\"type\", f\"http_error_{code}\")] += 1\n self.logger.debug(\"HTTP GET %s failed: %s %s\", url, code, body)\n return None", "def _extract_html(self, url):\n self.response = requests.get(url, timeout=5)\n self.html = BeautifulSoup(self.response.content, \"lxml\") if self.response.ok else None\n # return self.html", "def simple_get(url):\n try:\n with closing(get(url, stream=True)) as resp:\n if is_good_response(resp):\n return resp.content\n else:\n return None\n\n except RequestException as e:\n print('Error during requests to {0} : {1}'.format(url, str(e)))\n return None", "def simple_get(url):\n try:\n with closing(get(url, stream=True)) as resp:\n if is_good_response(resp):\n return resp.content\n else:\n return None\n\n except RequestException as e:\n print('Error during requests to {0} : {1}'.format(url, str(e)))\n return None", "def get_page_response(url):\n try:\n page_response = requests.get(url)\n except:\n print('Error loading url')\n return None\n else:\n return page_response", "def simple_get(url):\r\n try:\r\n with closing(get(url, stream=True)) as resp:\r\n if is_good_response(resp):\r\n return resp.content\r\n else:\r\n return None\r\n\r\n except RequestException as e:\r\n log_error('Error during requests to {0} : {1}'.format(url, str(e)))\r\n return None", "def simpleGet(url):\r\n try:\r\n with closing(get(url, stream=True)) as resp:\r\n if isGoodResponse(resp):\r\n return resp.content\r\n else:\r\n return None\r\n\r\n except RequestException as e:\r\n logError('Error during requests to {0} : {1}'.format(url, str(e)))\r\n return None", "def getHTML(url): \n return urlopen(url)", "def simple_get(url):\r\n try:\r\n with closing(get(url, stream=True, timeout=10)) as resp:\r\n if is_good_response(resp):\r\n return resp #.content\r\n else:\r\n return None\r\n\r\n except RequestException as e:\r\n log_error('Error during requests to {0} : {1}'.format(url, str(e)))\r\n return None", "def get_html(url):\n print('fetching', url)\n try:\n re = requests.get(url, timeout=1, stream=True)\n print('success!')\n # limit file size to 1mb\n html = re.raw.read(1000000+1, decode_content=True)\n if len(html) > 1000000:\n raise ValueError('response too large')\n return html\n except:\n raise TimeoutError('request timed out')", "def simple_get(url):\n\ttry:\n\t\twith closing(get(url, stream=True)) as resp:\n\t\t\tif is_good_response(resp):\n\t\t\t\treturn resp.content\n\t\t\telse:\n\t\t\t\treturn None\n\n\texcept RequestException as e:\n\t\tlog_error('Error during requests to {0} : {1}'.format(url, str(e)))\n\t\treturn None", "def get_html(url):\n try:\n response = requests.get(url)\n except requests.exceptions.ConnectionError as e:\n print \"Site %s isn't accessibility\" % BASE_URL\n except requests.exceptions.ReadTimeout as e:\n print \"Error: Read Timeout\"\n except requests.exceptions.HTTPError as e:\n print \"Get an HTTPError:\", e.message\n return response.text", "def simple_get(url):\n try:\n with closing(get(url, stream=True)) as resp:\n if is_good_response(resp):\n return resp.content\n else:\n return None\n\n except RequestException as e:\n log_error('Error during requests to {0} : {1}'.format(url, str(e)))\n return None", "def simple_get(url):\n try:\n with closing(get(url, stream=True)) as resp:\n if is_good_response(resp):\n return resp.content\n else:\n return None\n\n except RequestException as e:\n log_error('Error during requests to {0} : {1}'.format(url, str(e)))\n return None", "def simple_get(url):\n try:\n with closing(get(url, stream=True)) as resp:\n if is_good_response(resp):\n return resp.content\n else:\n return None\n\n except RequestException as e:\n log_error('Error during requests to {0} : {1}'.format(url, str(e)))\n return None", "def simple_get(url):\n try:\n with closing(get(url, stream=True)) as resp:\n if is_good_response(resp):\n return resp.content\n else:\n return None\n\n except RequestException as e:\n log_error('Error during requests to {0} : {1}'.format(url, str(e)))\n return None", "def simple_get(url):\n try:\n with closing(get(url, stream=True)) as resp:\n if is_good_response(resp):\n return resp.content\n else:\n return None\n\n except RequestException as e:\n log_error('Error during requests to {0} : {1}'.format(url, str(e)))\n return None", "def simple_get(url):\n try:\n with closing(get(url, stream=True)) as resp:\n if is_good_response(resp):\n return resp.content\n else:\n return None\n\n except RequestException as e:\n log_error('Error during requests to {0} : {1}'.format(url, str(e)))\n return None", "def get_page(url):\n # todo need some error checking\n\n r = requests.get(url)\n\n if r.status_code != 200:\n log_date = datetime.now().strftime(\"%Y-%m-%d %H%M%S\")\n filename = f'{log_date} response.html'\n with open(filename, 'w+') as f:\n f.write(r.text)\n logging.critical('get_page failed with status {}. See file {}.'.format(\n r.status_code,\n filename\n ))\n r.raise_for_status()\n\n return r", "def simple_get(url):\n\n try:\n with closing(get(url, stream=True)) as resp:\n if is_good_response(resp):\n return resp.content\n else:\n return None\n\n except RequestException as e:\n log_error(\"Error during requests to {0} : {1}\".format(url, str(e)))\n return None", "def GET(self):\n content = requests.get(self._url)\n if content.status_code != 200:\n print(\"There was a problem with the get request. Error code is: %s.\" % content.status_code)\n return False\n else:\n print(\"The status code is %s.\" % content.status_code)\n return content", "def simple_get(url):\n\ttry:\n\t\twith closing(get(url, stream=True)) as resp:\n\t\t\tif is_good_response(resp):\n\t\t\t\treturn resp.content # pylint: disable=no-member\n\t\t\telse:\n\t\t\t\treturn None\n\n\texcept RequestException as e:\n\t\tlog_error('Error during requests to {0} : {1}'.format(url, str(e)))\n\t\treturn None", "def get_html(url):\n return urllib.request.urlopen(url)", "def getHtml(_url):\n try:\n logger.info('getHtml: Requesting: %s' % _url)\n\n response = urllib2.urlopen(_url)\n\n #download data\n html_ = response.read()\n logger.debug('getHtml: Retrieved data: %s' % html_)\n\n return html_\n\n except urllib2.HTTPError, e:\n logger.error('getHtml: HTTPError: ' + str(e.code))\n\n except urllib2.URLError, e:\n logger.error('getHtml: URLError: ' + str(e.reason))\n\n except httplib.HTTPException, e:\n logger.error('getHtml: HTTPException: ', str(e))\n\n except Exception:\n logger.exception('getHtml: Unhandled exception: ')", "def getHtml(url):\n return urlopen(url)", "def get_page(url):\n try:\n with closing(get(url, stream=True)) as res:\n if is_good_response(res):\n return res.content\n except RequestException as e:\n log_error(e)", "def url_check_tester(client, url, status_code):\n response = client.get(url)\n assert response.status_code == status_code, \\\n f'Unexpected status code for {url}'\n assert response.data == b''", "def check_for_get_code(self, code, url):\r\n resp = self.client.get(url)\r\n self.assertEqual(resp.status_code, code,\r\n \"got code %d for url '%s'. Expected code %d\"\r\n % (resp.status_code, url, code))\r\n return resp", "def get_html(url: str) -> str:\n headers = {\n 'User-Agent': Config.Scraper.user_agent,\n }\n logging.debug('User-Agent: ' + headers['User-Agent'])\n r = requests.get(url.strip(), headers=headers)\n r.encoding = 'utf8'\n print('[Status Code: %s]' % r.status_code)\n if r.status_code != 200:\n raise Exception('Error in get HTML!')\n return r.text", "def retrieve_html(url):\n req = urllib2.Request(url)\n req.add_header('User-Agent', 'Just-Crawling 0.1')\n request = None\n status = 0\n try:\n logger.info(\"Crawling %s\" % url)\n request = urllib2.urlopen(req)\n except urllib2.URLError as e:\n logger.error(\"Exception at url: %s\\n%s\" % (url, e))\n except urllib2.HTTPError as e:\n status = e.code\n except:\n return\n if status == 0:\n status = 200\n\n try:\n data = request.read()\n except:\n return\n\n return str(data)", "def get_check_url(self,url):\n r = requests.get(url).status_code\n if r==requests.codes.ok:\n return(True)\n else:\n print \"something wrong! status_code: \" + r\n return(False)", "def load_page(url):\n try:\n response = urllib2.urlopen(url)\n html = response.read()\n\n if response.code == 200:\n body_text = html\n return html\n return \"\"\n except Exception:\n return \"\"", "def get_content_from_url(link):\n # sleep time before making web request\n sleep(SCRAPING_REQUEST_STAGGER)\n response = requests.get(link)\n if response.status_code != 200:\n return False\n return response.content", "def fetch_url(url):\n try:\n soup = bs(urlopen(url).read(), 'html.parser')\n return soup\n except:\n print \"Couldnot download the content from the URL\", url\n return \"\"", "def html(self) -> str:\n if self.html_file:\n with open(self.html_file, \"r\") as f:\n return f.read()\n else:\n try:\n return get(self.url)\n except HTTPError as e:\n if e.code == 404:\n raise PageNotFoundException(\n e.code,\n f\"Object {self.id} not found. Check that the id is correct.\",\n )\n return \"\"", "async def fetch_html(url: str,\n session: aiohttp.ClientSession,\n **kwargs) -> str:\n\n resp = await session.request(method=\"GET\", url=url, **kwargs)\n resp.raise_for_status()\n logger.info(\"Got response [%s] for URL: %s\", resp.status, url)\n html = await resp.text()\n return html", "def get(url:str, session: requests.Session = None):\n if session:\n resp = session.get(url)\n else:\n resp = requests.get(url)\n \n if resp.status_code == 401:\n raise AuthenticationError('Authentication failed.')\n if resp.status_code == 404:\n raise NotFoundError('Page not found.')\n if resp.status_code >= 500 and resp.status_code < 600:\n print(resp.text)\n raise ServerError('Encountered a server error.')\n \n return resp.text", "def get_html_from_url(url):\n request = requests.get(url)\n data = request.text\n return data", "def simple_get(url):\n try:\n resp = get(url, verify=False, headers={'User-agent': 'your bot 0.1'})\n if is_good_response(resp):\n return resp.content\n else:\n print(\"Bad response\")\n return None\n\n except RequestException as e:\n log_error('Error during requests to {0} : {1}'.format(url, str(e)))\n return None", "def _get(self, url: str) -> requests.Response:\n # todo: do some error checking here\n if url.startswith(API_PATH['base']):\n try:\n # logger.debug(f\"RestClient._get(): {url}\") # log in calling function\n response = requests.get(url, auth=self.auth)\n rest_code = response.json()['meta']['code']\n if rest_code not in [200, 201, 204]:\n raise RestException(f\"REST API Error: {rest_code}. {response.content}\")\n except RestException as e:\n logger.error(e)\n return None\n return response\n else:\n raise ValueError(f\"URL is invalid: {url}\")", "def test_get_indexhtml(self):\n url = self.baseurl + \"/main\"\n req = urllib2.urlopen(url, None, 3)\n self.assertTrue( req.getcode() == 200 , \"200 OK Not FOUND!\")", "def read_html(url: str) -> BeautifulSoup:\n try:\n response = requests.get(url, stream=True)\n status_code = response.status_code\n content_type = response.headers[\"Content-Type\"].lower()\n except requests.RequestException as e:\n raise RuntimeError(f\"Error during requests to {url} : {str(e)}\")\n else:\n if (\n status_code == 200\n and content_type is not None\n and content_type.find(\"html\") > -1\n ):\n return BeautifulSoup(response.content, \"html.parser\")", "def get(self, url):\n htmlDoc = None\n if url == None:\n return htmlDoc\n url = url.strip()\n if url == '' or not url.startswith('http'):\n return htmlDoc\n\n try:\n # sleep time\n if self.config.get_sleep_time is not None:\n time.sleep(self.config.get_sleep_time)\n htmlDoc = self.opener.open(url)\n except urllib2.HTTPError as error:\n code = error.code\n try:\n error_reason = error.read()\n except Exception as error:\n error_reason = error\n if code == 400:\n log.info(\"400 Error(请求参数出错!) reason: %r url: %r\", error_reason, url)\n elif code == 403:\n log.info(\"403 Error(资源不可使用!) reason: %r url: %r\", error_reason, url)\n elif code == 404:\n log.info(\"404 Error(无法找到指定资源地址!) reason: %r url: %r\", error_reason, url)\n elif code == 503:\n log.info(\"503 Error(服务不可使用!) reason: %r url: %r\", error_reason, url)\n elif code == 504:\n log.info(\"504 Error(网关超时!) reason: %r url: %r\", error_reason, url)\n else:\n log.info(\"%r Error reason: %r url: %r\", code, error_reason, url)\n htmlDoc = None\n except urllib2.URLError as error:\n if isinstance(error.reason, socket.timeout):\n log.info(\"TimeoutError(reason: %r) url: %r\", error.reason, url)\n else:\n log.info(\"URLError(reason: %r) url: %r\", error.reason, url)\n htmlDoc = None\n except Exception as error:\n log.info(\"Error(reason: %r) url: %r\", error, url)\n htmlDoc = None\n\n return htmlDoc", "def get_page(url):\n try:\n return urlopen(url).read()\n except:\n return None\n return None", "def get_html_data(url):\n print('Downloading HTML from - {}'.format(url))\n try:\n site = urlopen(url)\n html_data = site.read()\n except HTTPError as e:\n if e.code == 404:\n print('404 Not Found error occurred - continuing...')\n return None\n elif e.code == 403:\n print('403 Forbidden error occurred - continuing...')\n return None\n else:\n raise\n\n return html_data", "def get_html_soup(url_target, getter=1):\n if getter == 1:\n response = requests.get(url_target) # getter == 1\n status_code = response.status_code\n markup = response.text\n else:\n response = urlopen(url_target)\n status_code = response.getcode()\n markup = response\n print(f\"status_code = [{status_code}] \\n\")\n return BeautifulSoup(markup=markup, features='html.parser')", "def get_url_content(url):\n try:\n print(\"HTTP request to the URL {}\".format(url))\n page = requests.get(url, headers=http_headers, timeout=10)\n except requests.exceptions.Timeout:\n print(\"Timeout exceeded for URL {}\".format(url))\n except requests.exceptions.RequestException:\n print(\"Broken connection for URL {}\".format(url))\n finally:\n return page", "def checkStatus(url):\n def checkForIndexPage(r):\n \"\"\"Checks whether it a given url is actually an Index Of page. Takes in a Request object\"\"\"\n soup = BeautifulSoup(r.text, 'lxml')\n head = soup.find('h1')\n if head != None and head.string != None and (\"Index of \" in head.string):\n return \"Shows 'Index Of' page ✘\" \n else:\n return \"Displays properly ✓\"\n\n returnString = \"\"\n try:\n r = requests.get(url)\n returnString += str(r.status_code) \n if r.status_code == 200: # if the page is accessible, then check whether it displays properly\n returnString += \"\\n\\t\" + checkForIndexPage(r)\n return returnString\n except Exception as e:\n return(e)", "def load_url_content(url):\n try:\n r = requests.get(url)\n if r.ok:\n return r.text\n else:\n return None\n except Exception:\n return None", "def getHtml(self, url):\n r = requests.get(url)\n html = r.content\n return html", "def read_url(url):\n try:\n response = requests.get(url)\n except requests.ConnectionError:\n content = '{\"error\": \"Bad Connection\"}'\n except MissingSchema: # The url does not exist\n content = '{\"error\": \"Bad Url\"}'\n else:\n if response.status_code == 200:\n content = response.text\n else:\n content = '{\"error\": \"' + response.reason + '\"}'\n\n return content", "def _request(self, url):\n response = requests.get(url, headers=self.header)\n\n if str(response.status_code).startswith('2'):\n return response\n\n raise Exception(\"URI request returned an error. Error Code \" + str(response.status_code))", "def getHTMLText(url):\n try:\n r = requests.get(url, timeout = 300)\n r.raise_for_status()\n r.encoding = r.apparent_encoding\n return r.text\n except:\n return \"\"", "def get_html(url):\n # time.sleep(float(random.randint(1, 500)) / 100)\n requests.adapters.DEFAULT_RETRIES = 5\n headers = {\n 'Content-Type': \"application/json;charset=uf8\"\n }\n\n try:\n response = requests.get(url, headers=headers, stream=False, timeout=10)\n except Exception as e:\n print(e)\n print('html连接异常')\n return 'html_err'\n\n s = requests.session()\n s.keep_alive = False\n response.close()\n\n if response.status_code == 200:\n print(f'{url}\\n页面请求成功')\n response.encoding = 'utf8'\n # print(type(response)) # <class 'requests.models.Response'>\n return response.text # 输出网页文本\n # return response.json() # 输入的地址内容是json\n # return response.content # 输入的地址内容是文件,比如图片、视频\n else:\n print('请求网页源代码错误, 错误状态码:', response.status_code)\n return response.status_code", "def get_html(url):\n\n r = requests.get(url, headers={\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 11_1_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36'\n })\n html = r.text\n\n return html", "def get_html(website_url):\n\n website_response = requests.get(website_url, headers=headers_req)\n if website_response.status_code != requests.codes.ok:\n raise SiteUnreachableException()\n return BeautifulSoup(website_response.content, 'html.parser')", "def get_html(url):\n response = requests.get(url)\n response.encoding = 'utf-8'\n return response.text", "def url_was_found(url=\"localhost:5000/health\"):\n res = requests.get(url).json()\n\n if res['status_code'] == 200:\n return True\n elif res['status_code'] == 404:\n return False\n else:\n raise UnexpectedResponseError(\"Expected 200 OK or 404, got {}.\\n\".format(res['status']), \"Full response : {}\".format(res))", "def get_status():\n return \"OK\" # defaults to a 200 HTML status return code", "def validate_url(url):\n response, content = get_response_from_file(url)\n\n if response == None and content == None:\n response, content = get_response_and_content(url)\n\n if response == None:\n return url, url, 0, \"\", \"N\", \"N\", \"N\", hit(\"No Response\"), \"false\"\n else:\n #print(url, get_visible_text(content))\n return evaluate_content_for_200s(response, url, content)", "def get_html_source(url):\n # import urllib\n try:\n sock = urllib.urlopen(url)\n html_source = sock.read()\n sock.close()\n return html_source\n except IOError:\n print \"IOError: Not a valid URL\"", "def load_page(url: str) -> str:\n try:\n response = urlopen(url)\n\n if response.status == 200:\n body_text = str(response.read())\n return body_text\n return \"\"\n except URLError:\n return \"\"", "def scrape(self):\n try:\n self.result = urlfetch.fetch(self.url)\n except DownloadError:\n self.result = urlfetch.fetch(self.url) \n if ((self.result.status_code == 200) and\n (self.result.content_was_truncated == 0)):\n self.soup = BeautifulSoup(self.result.content)\n else:\n logging.critical(\"Bad Status Code: \", self.result.status_code, self.url)\n sys.exit(1)", "def get_api_result(session,url):\n try:\n res=session.get(url,timeout=TIMEOUT)\n if res.status_code == 404:\n raise NotFound(\"URL {} not found\".format(url))\n except Exception as e:\n log.error(e)\n return None\n return res.content", "def check_mitm_status_page(self, check_url):\n response = requests.get(check_url)\n if response.status_code == 200:\n return response\n else:\n sys.exit(2)", "def _get(self, url):\n ret = self.session.get(url, headers=self.headers)\n if ret.status_code != 200:\n raise ConnectionError(\n 'Status code {status} for url {url}\\n{content}'.format(\n status=ret.status_code, url=url, content=ret.text))\n return ret", "def get_response(self, url):\n\n conn = http.client.HTTPConnection('localhost:8080')\n conn.request('GET', url)\n\n response = conn.getresponse()\n self.assertEqual(200, response.getcode())\n\n conn.close()\n\n return response", "def get_response(self, url):\n # Getting response from zillow.com.\n for i in range(5):\n response = requests.get(url, headers=self.get_headers())\n if response.status_code != 200:\n continue\n else:\n return response\n return None", "def _get_url(self, url, proxies=None):\n scraper = cloudscraper.create_scraper()\n try:\n html_rsp = scraper.get(url, proxies=proxies).text\n if html_rsp is None:\n logging.info('Error in SBScraper._get_url with url %s and proxy %s.', url, proxies)\n logging.info('Web response had NoneType.')\n self.html_response = False\n return\n self.html_response = html_rsp\n return\n # General exception as there are lots of errors with cloudflare. Every exception is handled via return values.\n except Exception as err: # pylint: disable=broad-except\n logging.info('Error in SBScraper._get_url with url %s and proxy %s.', url, proxies)\n logging.info('Error message was: %s', err)\n self.html_response = False\n return", "def get_response(url):\n resp = requests.get(url)\n if resp.status_code == 200:\n return resp.json()\n\n raise Exception(f\"Failed to fetch: {url}\")", "def http_get_and_parse(self, url, *args, **kwargs):\n fr = FuzzableRequest(url, method='GET')\n\n http_response = self._uri_opener.send_mutant(fr, cache=True)\n\n # The 204 check is because of Plugin.handle_url_error()\n if not is_404(http_response) and not http_response.get_code() == 204:\n self.output_queue.put(fr)\n\n on_success = kwargs.get('on_success', None)\n if on_success is not None:\n on_success(http_response, url, *args)\n\n return http_response", "def check_link(url):\n try:\n\n r = requests.get(url)\n r.raise_for_status()\n r.encoding = r.apparent_encoding\n return r.text\n except:\n print('Connection Failed!!!')", "def get_response(request_url):\n return requests.get(request_url)", "def get_soup(url: str):\n\n page_response = get_page_response(url)\n if page_response is not None:\n try:\n soup = BeautifulSoup(page_response.content, 'lxml')\n except:\n print('Trouble parsing the soup for: {}'.format(url))\n return None\n else:\n return soup\n else:\n print(f'The response object was \"None\" so there is no point in trying to parse for url {url}')\n return None", "def describe_url(url):\n code, content, resp = util.get_page(url)\n if code != 200:\n print \"I tried to look at %s but it told me %s.\" % (url, str(code))\n return\n\n if resp.headers[\"content-type\"].startswith(\"text/html\"):\n return extract_article_text(url)\n else:\n msg = None\n try:\n msg = \"%s, %s bytes\" % (resp.headers[\"content-type\"],\n resp.headers[\"content-length\"])\n except KeyError:\n print \"Missing headers for %s\" % url\n return msg", "def fetch(url):\n content = requests.get(url).text\n if \"Error\" in content:\n raise ValueError(f\"Cannot read from: {url}\")\n return content", "def call_website(link: str) -> str:\n r = requests.get(link)\n\n if r.status_code != 200:\n sys.exit(1)\n\n return r.text", "def get_server_status_code(url):\n host, path = urlparse.urlparse(url)[1:3] # elems [1] and [2]\n print \"host : \", host, \" | path : \", path\n try:\n conn = httplib.HTTPConnection(host)\n conn.request('HEAD', path)\n print \" | status : \" , conn.getresponse().status\n return conn.getresponse().status\n except StandardError as e:\n print \"StandardError : \", e\n return None", "def fetch_code(url):\n status, response = http_request(url)\n\n if status != 200:\n writer(\n f\"\\nError: HTTP status {status} returned, 200 expected\\n - {url}\\n\",\n FORMAT[\"ERROR\"]\n )\n sys.exit(1)\n\n code_type = classify_response(response)\n\n return response, code_type", "def get(self, url):\n h = httplib2.Http('.tmp')\n (response, xml) = h.request(url, \"GET\")\n if int(response['status']) >= 400:\n if 'verbose' in self.args and self.args.verbose:\n print \"URL: %s\" % url\n raise ValueError(\"URL %s response: %s\" % (url, response['status']))\n self.xml = xml\n return True", "def _get_site_html(url):\n\n hdr = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Ge\\cko)' \n 'Chrome/23.0.1271.64 Safari/537.11',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',\n 'Accept-Encoding': 'none',\n 'Accept-Language': 'en-US,en;q=0.8',\n 'Connection': 'keep-alive'}\n\n try:\n req = urllib2.Request(url, headers = hdr)\n source = urllib2.urlopen(req).read()\n except:\n source = []\n\n return source", "def get_response(url: str):\n response = requests.get(url, timeout=10, allow_redirects=False, verify=False)\n response.raise_for_status()\n return response", "def get_url(url):\r\n response = requests.get(url)\r\n content = response.content.decode(\"utf8\")\r\n return content", "def test_get_publish_html(self):\n response = self.setup_get_html_test('/api/publish')\n self.assertEqual(response.status_code, 200)", "def get(self, url):\n \n content = \"\"\n if hasattr(http.client, \"HTTPSConnection\"): \n url_options = urlparse(url)\n\n conn = http.client.HTTPSConnection(url_options.netloc)\n conn.request('GET', url_options.path + '?' + url_options.query)\n content = conn.getresponse().read().decode('utf-8')\n conn.close()\n else: \n p = os.popen('curl -k \"' + url + '\"')\n content = p.read()\n p.close() \n\n return content", "def _http_request(self, url):\n try:\n opener = urllib.build_opener()\n opener.addheaders = [('User-agent', 'Mozilla/5.0')]\n response = opener.open(url, None)\n #response = urllib.urlopen(url, timeout=5)\n return response.read()\n except (urllib.URLError, ValueError, IndexError, TimeoutException, BadStatusLine):\n return ''", "def _html(url: str) -> BeautifulSoup:\n with urllib3.PoolManager() as manager:\n res = manager.request(\"GET\", url, headers={\"User-Agent\": ua.chrome})\n if res.status != 200:\n raise Exception(res.status)\n soup = BeautifulSoup(res.data, \"html.parser\")\n return soup", "def fetchUrl(self, url):\n self.driver.get(url)\n html = self.driver.page_source\n return html", "def test_get_view_html(self):\n response = self.setup_get_html_test('/api/view/1')\n self.assertEqual(response.status_code, 200)" ]
[ "0.73372376", "0.6996886", "0.6942967", "0.69052494", "0.6894238", "0.6819237", "0.68131924", "0.68110603", "0.68110603", "0.68110603", "0.6805156", "0.6804368", "0.6774557", "0.67645854", "0.67645854", "0.67632246", "0.6751196", "0.6743401", "0.67430574", "0.6718777", "0.6717295", "0.67070925", "0.67063063", "0.66989934", "0.66989934", "0.66989934", "0.66989934", "0.66989934", "0.66989934", "0.66986704", "0.6693046", "0.6690417", "0.66828567", "0.66724974", "0.6622054", "0.6588788", "0.65866524", "0.65722144", "0.65545774", "0.6553394", "0.6537466", "0.65229934", "0.6515347", "0.6486314", "0.6474141", "0.6421707", "0.6421534", "0.6409075", "0.6404275", "0.6404196", "0.63924426", "0.63874984", "0.63790846", "0.63763964", "0.6366177", "0.6355302", "0.63497525", "0.6348355", "0.6330668", "0.63289666", "0.6314917", "0.6308786", "0.63039225", "0.6291665", "0.6284061", "0.6275683", "0.62625235", "0.6257465", "0.6236482", "0.6219173", "0.6209229", "0.6189995", "0.61758435", "0.61650246", "0.614872", "0.61216843", "0.6109461", "0.6093409", "0.6093122", "0.6091046", "0.60876775", "0.608653", "0.60842776", "0.6071615", "0.6057247", "0.60491335", "0.60411483", "0.60163057", "0.60150874", "0.6008001", "0.60038733", "0.60018086", "0.59955496", "0.59954935", "0.5992215", "0.59880924", "0.59864074", "0.59815323", "0.59795177", "0.5978568" ]
0.6987104
2
Decodes char byte array to string.
def decode_to_string(self, value): #if python3 or python 2.7 ret = bytearray(value).decode(encoding='UTF-8') #if python2.7 #ret = str(bytearray(value)) return ret
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def decode_bytes(data: bytearray) -> str:\n pattern = re.compile('\\r', re.UNICODE)\n res = data.decode('utf-8', 'ignore')\n res = pattern.sub('', res)\n return res", "def _bytes_bytearray_to_str(s):\n if isinstance(s, (bytes, bytearray)):\n return s.decode()\n return s", "def bytes_to_str(b, encoding='ascii'):\n return b.decode(encoding)", "def bytes_to_text(byte_array, encoding='UTF-8'):\n return bytes(byte_array).decode(encoding)", "def _decode_octet_string(bytes_data): # type: (bytes) -> bytes\n return bytes_data", "def bytes_to_str(self, data):\n if isinstance(data, str):\n return data\n return data.decode(\"utf-8\")", "def bytes_string_to_string(bytes_string):\n return str(bytes_string).split(\"'\")[1]", "def unpack_utf8_string(data, length_byte_size=2):\n array_bytes, consumed = DecodeUtils.unpack_byte_array(\n data, length_byte_size\n )\n return array_bytes.decode('utf-8'), consumed", "def _encoded_string_to_string(encoded_blob):\n try:\n return encoded_blob.decode(\"base64\")\n except Exception:\n raise InvalidDeckDataException(\"Cannot decode deck data into anything readable.\")", "def convert_to_string(_bytes: bytes)-> str:\n # print('input bytes: ', _bytes)\n # print('string: ', binascii.hexlify(_bytes))\n # print('string2: ', _bytes.hex())\n # print('string3: ', \" \".join([\"{:02x}\".format(x) for x in _bytes]))\n return \" \".join([\"{:02x}\".format(x) for x in _bytes])", "def decodeUtf8(self, arrayBuffer):", "def decodeUtf8(self, arrayBuffer):", "def _decode_binary(data):\n try:\n data = data.decode('utf-8')\n except UnicodeDecodeError: # pragma: no cover\n # for data written an upstream java App\n data = data.decode('latin-1')\n return data", "def decode_bytes(bytes_to_decode):\n if not bytes_to_decode:\n return ''\n\n try:\n return bytes_to_decode.decode('utf-8')\n except UnicodeError:\n return bytes_to_decode.decode(locale.getpreferredencoding(), errors='replace')", "def bytes_to_str(s, encoding='utf-8'):\n if isinstance(s, bytes):\n return s.decode(encoding)\n return s", "def decryptToString(self, data, keyobj):\n return self.decryptByteArray(data, keyobj).decode().split('\\x00')[0]", "def tostring(b):\n if isinstance(b, bytes):\n return bytes2str(b)\n else:\n return b", "def to_string(data):\n if isinstance(data, bytes):\n return data.decode('utf-8')\n else:\n return data", "def _decode_str(self, buf):\n length = self._decode_vint(buf)\n result = buf.read(length)\n if len(result) != length:\n raise EndOfMessage(True)\n return result", "def _decode_bytes(data: BencodedString) -> bytes:\n # Get byte string length\n delimiter_index = data.bytes.find(COLON)\n\n if delimiter_index > 0:\n length_prefix = data.get_prefix(delimiter_index)\n string_length = int(length_prefix.decode(\"ascii\"))\n data.del_prefix(delimiter_index + 1)\n else:\n raise ValueError(\n \"Cannot decode a byte string, it doesn't contain a delimiter. \"\n \"Most likely the bencoded string is incomplete or incorrect.\"\n )\n\n # Get byte string data\n if len(data.bytes) >= string_length:\n result_bytes = data.get_prefix(string_length)\n data.del_prefix(string_length)\n else:\n raise ValueError(\n f\"Cannot decode a byte string (prefix length \"\n f\"- {string_length}, real_length - {len(data.bytes)}. \"\n \"Most likely the bencoded string is incomplete or incorrect.\"\n )\n\n return result_bytes", "def decode_doomstring(byte_string):\n s = list()\n import sys\n if len(byte_string) > 0:\n for b in byte_string:\n if b == 0:\n break\n try:\n b = (b).to_bytes(1, 'little').decode('ascii')\n except Exception:\n # Encountered an invalid character, just ignore it\n continue\n s.append(b)\n return ''.join(s)\n else:\n return ''", "def _as_bytes(s):\n if isinstance(s, bytes):\n return s\n # Assume it is a unicode string\n # Note ISO-8859-1 aka Latin-1 preserves first 256 chars\n return codecs.latin_1_encode(s)[0]", "def hex_decode_bytes(hex_bytes):\n out = \"\"\n hex_len = len(hex_bytes)\n i = 0\n while i < hex_len - 1:\n out += chr(int(hex_bytes[i:i + 2]), 16)\n i += 2\n return out", "def _decode_data(self, data):\r\n return data.decode('ISO-8859-1')", "def decode_string(encoded: bytes) -> str:\n if encoded.startswith(codecs.BOM_UTF8):\n return encoded.decode(\"utf-8-sig\")\n elif encoded.startswith(codecs.BOM_UTF16):\n encoded = encoded[len(codecs.BOM_UTF16) :]\n return encoded.decode(\"utf-16\")\n else:\n # No BOM to determine encoding, try utf-8\n return encoded.decode(\"utf-8\")", "def __decodeString(self,ascii):\n second = ascii%256\n first = (ascii-second)/256\n return str(chr(first))+str(chr(second))", "def decodebytes(s):\n\n decoded = decode(s)\n buf = bytearray()\n while decoded > 0:\n buf.append(decoded & 0xff)\n decoded //= 256\n buf.reverse()\n\n return bytes(buf)", "def decoding_strings(data):\n if isinstance(data, str):\n data = data.replace(\"b'\", \"\")\n return data\n elif isinstance(data, bytes):\n return data.decode()\n else:\n return False", "def bytes_to_str(s, encoding='utf-8'):\n if six.PY3 and isinstance(s, bytes):\n return s.decode(encoding)\n return s", "def bytes_to_str(s, encoding='utf-8'):\n if six.PY3 and isinstance(s, bytes):\n return s.decode(encoding)\n return s", "def bytes2decstr(bytes_buffer, sep=''):\t\n\treturn sep.join(map(lambda x: '{0}'.format(x), bytes_buffer))", "def str_to_bytes(data):\n u_type = type(b''.decode('utf8'))\n if isinstance(data, u_type):\n return data.encode('utf8')\n return data", "def b2s(b):\n return b.decode('utf-8')", "def decode_python(self, encoded_chunks: np.ndarray) -> np.ndarray:\n encoded_chunks_reshaped = encoded_chunks.reshape((-1, 1))\n encoded_chunks_tiles = np.tile(\n encoded_chunks_reshaped, [1, self._dtype_size_bytes]\n )\n encoded_chunks_bytes_shifted = np.right_shift(\n encoded_chunks_tiles, self._bit_lengths\n )\n encoded_chunks_bytes = encoded_chunks_bytes_shifted % 2 ** (\n self._utf8_size_bits\n )\n int_to_char_fn = lambda x: (dict(enumerate(self._int_to_byte_map)).get(x))\n\n # Added `otypes=(np.string_,)` as an additional arg to np.vectorize to avoid\n # numpy crashes with empty strings (not able to identify the type).\n decoded_chars = np.vectorize(int_to_char_fn, otypes=(np.string_,))(\n encoded_chunks_bytes\n )\n decoded_chars_reshaped = decoded_chars.reshape(-1, self._max_length)\n decoded_strings = np.apply_along_axis(\n lambda r: r.tobytes(), arr=decoded_chars_reshaped, axis=1\n )\n\n return decoded_strings", "def decode(self, s):", "def decode(self, s):", "def bdecode_buffer(data):\n\tif isinstance(data, str):\n\t\tdata = data.encode()\n\twith BytesIO(data) as f:\n\t\treturn bdecode(f)", "def decode_buffer(buf):\n return buf.getvalue().decode('utf-8')", "def bytes2str(data):\n # pylint: disable=multiple-statements\n\n if isinstance(data, bytes): return data.decode('utf-8')\n if isinstance(data, dict): return dict(map(bytes2str, data.items()))\n if isinstance(data, tuple): return map(bytes2str, data)\n return data", "def bytes_as_char_array(b):\n return \"{ \" + \", \".join(\"0x%02x\" % x for x in b) + \" }\"", "def Cbytestring2Python(bytestring):\n try:\n return bytes(bytestring).partition(b'\\0')[0].decode('utf_8').rstrip()\n except BaseException:\n pass\n try: # Codepage 1252 includes Scandinavian characters\n return bytes(bytestring).partition(b'\\0')[0].decode('cp1252').rstrip()\n except BaseException:\n pass\n try: # OK, struggling, just ignore errors\n return bytes(bytestring).partition(b'\\0')[\n 0].decode('utf_8', 'ignore').rstrip()\n except Exception as e:\n print('Trouble decoding a string')\n print(e)", "def getStr_c_decode(s):\n try:\n return unicode(s[:s.index('\\x00')].decode('gb18030'))\n except:\n return unicode(s.decode('gb18030'))", "def read_char(data):\n s_type = \"=%s\" % get_type(\"char\")\n return struct.unpack(s_type, data.read(1))[0]", "def h5_to_string(char_array):\n import numpy as np\n if type(char_array) in [bytes, np.bytes_]:\n return char_array.decode()\n if type(char_array) == str:\n return char_array\n raise TypeError(\"Char_array must be a string or byte array!\\n\"\n +\"Your type is: {}.\\n\".format(type(char_array)))", "def test_bytes(self):\n self.assertRaises(\n UnicodeDecodeError,\n lambda: bytes_to_str(\"\\N{SNOWMAN}\".encode(\"utf-8\")),\n )\n decoded = bytes_to_str(b\"hello world\")\n self.assertIsInstance(decoded, str)\n self.assertEqual(decoded, \"hello world\")", "def bytes2str(val):\n if isinstance(val, bytes):\n return str(val, \"utf8\")\n else:\n return val", "def str_to_bytes(self, data):\n if isinstance(data, bytes):\n return data\n return data.encode(\"utf-8\")", "def decode_str(self, value, pos):\n length, pos = self.decode_varint(value, pos)\n end = pos+length\n return value[pos:end].decode('utf-8', 'backslashreplace'), end", "def bytes_to_str(str1, logger=None, str_decode='default', py_version=3):\n if not ((py_version == 2 and isinstance(str1, str)) or\n (py_version == 3 and isinstance(str1, str))):\n try:\n if str_decode != 'default':\n str1 = str1.decode(str_decode.lower(), 'ignore')\n else:\n try:\n str1 = str1.decode('utf-8', 'ignore')\n except UnicodeDecodeError:\n try:\n str1 = str1.decode('gbk', 'ignore')\n except UnicodeDecodeError:\n str1 = str1.decode(sys.getfilesystemencoding(),\n 'ignore')\n except UnicodeDecodeError as err_message:\n if logger:\n logger.info('[err]bytes_to_str:decode %s to str failed', str1)\n logger.info(str(err_message))\n return str1", "def to_string(data):\n return to_bytes(data)", "def decodeBytesUtf8Safe(self, data):\n N_bytes = len(data)\n\n decoded = \"\"\n while(N_bytes>0):\n try:\n decoded = data[:N_bytes].decode(\"utf-8\")\n except UnicodeDecodeError as ex:\n N_bytes -= 1\n else:\n break\n\n return decoded, data[N_bytes:]", "def decode(self, data):\n encoding = getattr(self, 'encoding', 'ascii')\n return data.decode(encoding, 'ignore')", "def decode(b64_msg: str) -> str:\n\n b64_bytes = b64_msg.encode(\"ascii\")\n b64_bytes = base64.b64decode(b64_bytes)\n return b64_bytes.decode(\"ascii\")", "def read_string(self):\n return self.bits.read('bytes:{0}'.format(self.read_int())).decode(\"utf-8\", 'replace')", "def decode_encoded_string_value(byte_iter):\n try:\n # First try \"Value-length Char-set Text-string\"\n value_length = wsp_pdu.Decoder.decode_value_length(byte_iter)\n # TODO: add proper support for charsets...\n try:\n charset = wsp_pdu.Decoder.decode_well_known_charset(byte_iter)\n except wsp_pdu.DecodeError, msg:\n raise Exception('encoded_string_value decoding error - '\n 'Could not decode Charset value: %s' % msg)\n\n return wsp_pdu.Decoder.decode_text_string(byte_iter)\n except wsp_pdu.DecodeError:\n # Fall back on just \"Text-string\"\n return wsp_pdu.Decoder.decode_text_string(byte_iter)", "def toString(data):\n\tif isString(data):\n\t\treturn data\n\telse:\n\t\treturn data.decode(\"latin-1\")", "def decode(value, encoding='utf-8') -> str:\n return value.decode(encoding) if isinstance(value, bytes) else value", "def decode (self, s):\n if s == \"null\": return []\n return s.split(chr(257))", "def test_bytes_to_native_str(self):\n b = bytes(b'abc')\n s = bytes_to_native_str(b)\n if PY2:\n self.assertEqual(s, b)\n else:\n self.assertEqual(s, 'abc')\n self.assertTrue(isinstance(s, native_str))\n self.assertEqual(type(s), native_str)", "def _unicode(arr):\n try:\n return unicode(arr)\n except UnicodeEncodeError:\n dt = arr.dtype.newbyteorder('S')\n return unicode(arr.view(dt))", "def decode_string(self, value):\r\n return value", "def decode(self, s):\n i = 0\n strs = []\n while i < len(s):\n l = int(s[i:i+8], 16)\n strs.append(s[i+8:i+8+l])\n i += 8+l\n return strs", "def to_str(array, encoding='utf8'):\n\n if not isinstance(array, np.ndarray):\n raise ValueError('input should be a NumPy array.')\n\n return np.char.decode(array, encoding)", "def to_string(msg):\n if type(msg) is bytes:\n msg = str(msg)\n msg = msg[2:]\n return msg[:-1]\n else:\n return msg", "def escaped_str_to_bytes(data):\n if not isinstance(data, str):\n raise ValueError(\"data must be str, but is {}\".format(data.__class__.__name__))\n\n # This one is difficult - we use an undocumented Python API here\n # as per http://stackoverflow.com/a/23151714/934719\n return codecs.escape_decode(data)[0]", "def base32_decode(encoded_bytes: bytes) -> str:\n\n # decode the bytes from base32\n # then, decode the bytes-like object to return as a string\n return base64.b32decode(encoded_bytes).decode(\"utf-8\")", "def bit_array_to_string(array: Iterable) -> str:\n\n res = ''.join(\n [chr(int(y, 2)) for y in [''.join([str(x) for x in _bytes])\n for _bytes in Des.n_split(array, 8)]])\n return res", "def decode_xarray_bytes(xdf):\n for col in list(xdf):\n if xdf[col].dtype == 'O':\n try:\n xdf[col] = xdf[col].astype(str)\n except:\n xdf[col] = xdf[col].str.decode('cp1252').str.strip()\n return xdf", "def bytes_string_to_bytes(bytes_string):\n return bytes(bytes_string.split(\"'\")[1], 'utf-8')", "def read_c_string(fd: BinaryIO) -> bytes:\n string = bytearray()\n while True:\n byte = fd.read(1)\n if not byte or byte == b'\\0':\n return bytes(string)\n string += byte", "def get_byte_string(self):\n return \"\".join(['%02X' % i for i in self._data]).decode('hex')", "def _decode_text(self):\n\n print(f\"Hex decode; received message is {self.message}\")\n return bytes.fromhex(self.message).decode('utf-8')", "def to_bytes(data):\n if isinstance(data, str):\n return data.encode(encoding='utf-8')\n else:\n return data", "def convert_char_array_to_string(char_array):\n str = \"\"\n try:\n return str.join(char_array)\n except:\n return \"Error occured: variable non string\"", "def DEIMdecode(byte):\n\n result = ''\n if byte & 0x80:\n if byte & 0x40: result += 'B'\n else: result += 'D'\n if byte & 0x20: result += '-'\n result += '%d' % ((byte >> 3) & 0x03)\n if byte & 0x04: result += '-'\n result += '%d' % (byte & 0x03)\n else:\n if byte == 0111: result += 'N'\n elif byte == 0151: result += 'R'\n elif byte == 0171: result += 'F'\n elif byte == 0200: result += 'P'\n else: result += 'A%3.3o' % byte\n return result", "def bit_array_to_string(array):\n result_string = ''.join(\n [chr(int(i, 2)) for i in\n [''.join([str(x) for x in s_bytes])\n for s_bytes in split_into_n(array, 8)]]\n )\n return result_string", "def javaByteArray2(s):\n a = array.array('B', binascii.unhexlify(s))\n return javaByteArray(a)", "def ToString(bval):\n return bval.decode('utf-8')", "def safe_decode_utf8(s):\n if isinstance(s, bytes):\n return s.decode('utf-8', 'surrogateescape')\n return s", "def asbytes(s):\n if isinstance(s, bytes):\n return s\n else:\n return s.encode('utf-8')", "def read_str(self) -> str:\n t = self.pc\n while self.data[self.pc] != 0:\n self.pc += 1\n s = str(self.data[t:self.pc], encoding=\"utf8\")\n self.pc += 1 # jump '\\0'\n return s", "def b64decode(s: str) -> str:\n return base64.b64decode(s.encode()).decode()", "def try_tag_to_string(tag_data):\n if not isinstance(tag_data, array.array):\n return tag_data\n\n if tag_data.typecode == 'H':\n try:\n tag_data = str(tag_data.tostring().decode('utf-16'))\n except UnicodeDecodeError:\n pass\n except UnicodeEncodeError:\n pass\n except:\n raise\n\n return tag_data", "def imap4_utf7_decode(data):\n\n if not isinstance(data, bytes):\n return bytearray(data, 'utf-8')\n return imap_utf7_codec.imap4_utf7_decode(data)", "def decode(self, s):\n o = self._decoder.decode(s)\n return o", "def s2b(s):\n return s.encode('utf-8')", "def decode_data ( data ) :\n cipher = get_cipher( data )\n index = 0\n firstpass = []\n datalen = len( data )\n while index < datalen :\n if index % 2 == 0 :\n firstpass.append( chr( ord( data[ index ] ) - cipher ) )\n else :\n firstpass.append( chr( ord( data[ index ] ) + cipher ) )\n index += 1\n\n firstpass[ 0 ] = data[ 0 ]\n firstpass[ -1 ] = data[ -1 ]\n firstpass[ -2 ] = data[ -2 ]\n decoded_data = ''.join( firstpass )\n return base64.b64decode( decoded_data )", "def decode_to_utf8(text) -> bytes: # pragma: no cover\n try:\n return text.decode(\"utf-8\")\n except (AttributeError, UnicodeEncodeError):\n return text", "def decode(self, encoded):", "def _decode(data: BencodedString) -> Union[bytes, dict, int, list]:\n if not data.bytes:\n raise ValueError(\"Cannot decode an empty bencoded string.\")\n\n if data.bytes[0] == START_DICT:\n return _decode_dict(data)\n\n if data.bytes[0] == START_LIST:\n return _decode_list(data)\n\n if data.bytes[0] == START_INTEGER:\n return _decode_int(data)\n\n if chr(data.bytes[0]).isdigit():\n return _decode_bytes(data)\n\n raise ValueError(\n \"Cannot decode data, expected the first byte to be one of \"\n f\"'d', 'i', 'l' or a digit, got {chr(data.bytes[0])!r} instead.\"\n )", "def decode_message(self, raw):\n return raw.decode('utf-8')", "def ToChar(byte):\n return chr(byte) if type(byte) != str else byte", "def to_str(value: bytes) -> str:\n if value is not None:\n out = value.decode(\"utf-8\")\n else:\n out = \"\"\n\n return out", "def decode(x):\n\n try:\n return str(unichr(x).encode('ascii', 'replace')) # Make sure data is encoded properly\n except ValueError as err:\n print err\n print \"** ERROR - Decoded character is unrecognized **\"", "def test_decompress_1_char(self):\n b_array = bytearray([0]) + bytearray(b'a')\n actual = LZ77.decompress(b_array)\n expected = 'a'\n self.assertEqual(actual, expected)", "def byte2str(self, obj):\n encoding = \"utf-8\"\n if isinstance(obj, list):\n if len(obj)>0 and not isinstance(obj[0], str):\n self.helper.log_debug(\n \"conversion from list of %s onto list of <class 'str'>\" %\n type(obj[0]))\n return [ s.decode(encoding) for s in obj ]\n elif not isinstance(obj, str):\n self.helper.log_debug(\n \"conversion from %s onto <class 'str'>\" %\n type(obj))\n return obj.decode(encoding)\n return obj", "def to_native_str(s):\n if not isinstance(s, str):\n return s.decode('ascii', 'strict')\n return s", "def decode(data): #@NoSelf", "def _readString(self, rawData, offset=0):\n\n strLen, = unpack(\n self.byteFormat, rawData[\n offset:offset + self.byteFormatLen])\n\n return rawData[self.byteFormatLen:][:strLen]", "def decode(encoded_key, encoded_string, size):\n\t\n\tdecoded_string = ''\n\ti = 0\n\tfor i in range(0, size):\n\t\tdecoded_string = decoded_string + chr(ord(encoded_string[i]) ^ ord(encoded_key[i % len(encoded_key)]))\n\t\n\treturn str(decoded_string)" ]
[ "0.7620947", "0.7159157", "0.6875555", "0.6810529", "0.6642219", "0.65613323", "0.6543301", "0.6531421", "0.6475144", "0.6392671", "0.639041", "0.639041", "0.6374796", "0.6297807", "0.62701327", "0.6170347", "0.616709", "0.61501384", "0.6131751", "0.6120125", "0.60999626", "0.6094426", "0.60926473", "0.6087876", "0.60862523", "0.6077456", "0.60755473", "0.60681707", "0.60319597", "0.60319597", "0.6023885", "0.6016554", "0.60054266", "0.599477", "0.59844565", "0.59844565", "0.5976475", "0.5958793", "0.59568125", "0.59478253", "0.59350234", "0.59240395", "0.5923282", "0.59231615", "0.5918058", "0.5861409", "0.5857345", "0.5843297", "0.58415705", "0.5838293", "0.58360505", "0.5833115", "0.5829748", "0.582259", "0.58223426", "0.5810729", "0.5800828", "0.57972515", "0.5786352", "0.5757902", "0.5754658", "0.5748205", "0.5739942", "0.5719738", "0.5708487", "0.57014525", "0.5697628", "0.56911373", "0.5680874", "0.56667656", "0.5664077", "0.56638044", "0.5663683", "0.5661885", "0.5642181", "0.5633742", "0.56306756", "0.56292456", "0.56266695", "0.560428", "0.5586982", "0.5581516", "0.5577754", "0.557458", "0.55668986", "0.55606186", "0.5550153", "0.55334055", "0.5533164", "0.5529867", "0.5529229", "0.5523696", "0.5518791", "0.5514613", "0.5512429", "0.55058354", "0.5502184", "0.5499099", "0.5498025", "0.54968274" ]
0.69120693
2
Runs main gobject loop.
def run_main_loop(): mainloop = GObject.MainLoop()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def loop( self ):\n import gtk\n while self.count >= 1:\n log.debug( 'GTK loop restarting' )\n while gtk.events_pending():\n gtk.main_iteration()\n log.debug( 'GTK loop exiting' )\n try:\n del self.t_loop\n except AttributeError, err:\n pass", "def run(self):\n GLib.MainLoop().run()", "def startGTK( ):\n if not INITIALIZED:\n init()\n if LOOP_TRACKER:\n LOOP_TRACKER.increment()", "def start(self):\n if self.__started:\n return\n\n self.__started = True\n GLib.timeout_add(GtkMainLoop.DEADLINE_GLIB, self.__ioloop_run)\n self.__gi_loop.run()", "def main(self):\n\t\tgtk.main()", "def run(self):\n\n while not self.done:\n\n self.event_loop()\n\n self.update()", "def main():\n global loop\n DBusGMainLoop(set_as_default=True)\n\n loop = gobject.MainLoop()\n bus = dbus.SessionBus()\n\n bus.add_signal_receiver(catchall_handler, \n dbus_interface=\"org.freedesktop.DBus.Properties\")\n\n threading.Thread(target=run_spotify).start()\n loop.run()", "def run(self):\n self.cmdloop()", "def main_loop(self):\n dt = 0\n self.clock.tick(FPS)\n while not self.done:\n self.event_loop()\n self.update(dt)\n self.render()\n dt = self.clock.tick(FPS) / 1000.0", "def run(self):\n\t\tgtk.gdk.threads_init()\t\t\t# (!) important for multi-threading to work with GTK+\n\t\tself.__update_timer = gobject.timeout_add(250, self.__update, self)\n\t\tself.statusbar1.push(0, \"Ready (for about dialog; right-click to lower right corner).\")\n\t\tgtk.main()", "def _run(self):\n while(self._loop):\n pass", "async def _main(self):\n while True:\n time.sleep(1)", "def run(self):\n self.ui['main_window'].widgets['main'].show_all()\n gtk.main()", "def loop(self):\n pass", "def loop(self):\r\n while self.__running:\r\n self.__check_events()\r\n self.__render()\r\n self.__reset_variables()", "def main():\n BouncyGUI().mainloop()", "def main():\r\n gameclass = data.game.GameClass()\r\n gameclass.main_loop()", "def main():\n dealCards().mainloop()", "def main_loop(self):\n dt = 0.3\n self.clock.tick(self.fps)\n while not self.done:\n self.event_loop()\n self.update(dt)\n self.render()\n dt = self.clock.tick(self.fps)/1000.0 # create delta time variable to multiply with movement and rotation\n self.display_fps()\n self.health_bar()\n self.enemy_health()\n self.energy_bar()", "def run():\n gui = GUI()\n gui.mainloop()", "def gameloop(self):\r\n\r\n # What you see above (\"\"\" some text \"\"\") is called a docstring.\r\n # It explains the purpose of the method/function.\r\n # There should generally be one for every function.\r\n\r\n\r\n # Below is the main loop\r\n while True: \r\n # One cycle in the loop is equivalent to one frame.\r\n\r\n self.event()\r\n\r\n self.draw_objects()\r\n self.move_objects()\r\n\r\n self.update_display()", "def run(self):\n\n observer = Observer()\n observer.schedule(self.ehandler, \"./gl\", True)\n observer.start()\n observer.join()", "def run(self):\n self.window.show()\n Gtk.main()", "def run(self):\n if self._main_loop:\n return\n self._main_loop = GObject.MainLoop()\n self._disconnect_all()\n self._register()\n logger.info(\"--- Mainloop started ---\")\n logger.info(\"Hub is ready for onboarding\")\n try:\n self._main_loop.run()\n except KeyboardInterrupt:\n # ignore exception as it is a valid way to exit the program\n # and skip to finally clause\n pass\n except Exception as e:\n logger.error(e)\n finally:\n logger.info(\"--- Mainloop finished ---\")\n self._unregister()\n self._main_loop.quit()\n self._main_loop = None", "def run(self):\n self.window.mainloop()", "def main_loop(self) -> None:\n # Modify signal handlers to make sure Ctrl-C is caught and handled.\n signal.signal(signal.SIGINT, signal.SIG_DFL)\n\n self._impl.main_loop()", "def takeControl(self):\n mainloop()", "def takeControl(self):\n mainloop()", "def main(self,Surf):\n while True:\n if self.state == \"GAME\":\n self.event_loop()\n self.update(Surf)\n elif self.state == \"QUIT\":\n break\n pg.display.update()\n self.Clock.tick(65)", "def game_loop(self):\n self.interface.game_loop(self)", "def run(self):\n while not self.done:\n time_delta = self.clock.tick(self.fps)\n self.event_loop()\n self.update(time_delta)\n pg.display.update()\n if self.show_fps:\n fps = self.clock.get_fps()\n with_fps = \"{} - {:.2f} FPS\".format(self.caption, fps)\n pg.display.set_caption(with_fps)", "def loop_run(self):\n super(EventLoop, self).loop_run()\n self.inq = self.cothread.EventQueue()", "def run(self):\n if self.okay:\n ExtLoopWin32.run()", "def mainloop(self):\n self.root.mainloop()", "def mainloop(self):\n self.root.mainloop()", "def run(self):\n self.loop.spawn_callback(self.main)\n self.loop.start()\n if self.exc_info:\n six.reraise(*self.exc_info)", "def run(self):\n\t\t\n\t\twhile self.update():\n\t\t\tpass", "def mainloop(self):\n self.app.mainloop()", "def run(self):\n self._app.processEvents()\n try:\n while not self._stop:\n # GRobot._app.processEvents()\n while self._app.hasPendingEvents():\n self._app.processEvents()\n gevent.sleep(0.01)\n except Exception, e:\n logger.error(e)\n logger.debug('Goodbye GRobot')", "def run(self): # pragma: no cover\n while True:\n self.update()", "def postloop(self):\n print 'Bye!'", "def _bg_thread_main(self) -> None:\n while not self._done:\n self._run_server_cycle()", "def _handle_loop(self):\n pass", "def run(self):\n # for running indefinitely if 'watch' is passed\n if self._arguments.watch:\n while True:\n self.watch(self.main(), int(self._arguments.watch))\n else:\n self.main()", "def loop(self):\n while not self.should_exit:\n self._run_once()\n\n self.on_exit()", "def main(self):\n\n self.window.show_all()\n gtk.main()", "def loop_run(self):\n self.log_debug(\"Running loop\")\n import cothread\n self.cothread = cothread\n self._loop_state = LState.Running\n if self.loop_event:\n # Call unbound function with a weak reference to self so that\n # garbage collector will call __del__ when we finish\n event_loop = weak_method(self.event_loop)\n loop_event = weak_method(self.loop_event)\n self.event_loop_proc = cothread.Spawn(event_loop, loop_event)\n else:\n self.event_loop_proc = cothread.Pulse()", "def run(self):\n watcher = self._watcher(self.on_recv)\n watcher.loop()", "def loop(self):\n keys.mode = 'main'\n for line in client.readlines('/event'):\n if not self.alive:\n break\n self.dispatch(*line.split(' ', 1))\n self.alive = False", "def main_loop():\n while len(fake_threads) > 0:\n pulse(0.1)", "def _mainloop(self):\n while not self._shutdown:\n events = self._selector.select(timeout=0.01)\n for key, _ in events:\n key.data(key.fileobj)\n self.close()", "def main(self):\n self.root.mainloop()", "def _main_loop(self):\n while not rospy.is_shutdown():\n # Check for reconfiguration data\n if self._transfer_reconfigure_data is not None:\n # Copy reconfigure data from shared memory\n with self._transfer_reconfigure_data_mutex:\n reconfigure_data = deepcopy(self._transfer_reconfigure_data)\n self._transfer_reconfigure_data = None\n # Run vision reconfiguration\n self._configure_vision(*reconfigure_data)\n # Check for new image\n elif self._transfer_image_msg is not None:\n # Copy image from shared memory\n with self._transfer_image_msg_mutex:\n image_msg = self._transfer_image_msg\n self._transfer_image_msg = None\n # Run the vision pipeline\n self._handle_image(image_msg)\n # Now the first image has been processed\n self._first_image_callback = False\n else:\n try:\n self._rate.sleep()\n except rospy.exceptions.ROSTimeMovedBackwardsException:\n pass", "def loop(self):\n while True:\n if self.gui_updates:\n self.update_gui()\n\n event, values = self.window.read(100)\n\n if event == \"btn_con_game\":\n Thread(target=self.connect_game, daemon=True).start()\n elif event == \"btn_con_headset\":\n Thread(target=self.connect_headset, daemon=True).start()\n elif event == \"btn_train_model\":\n Thread(target=self.train_model, daemon=True).start()\n elif event == \"btn_finalize\":\n Thread(target=self.finalize, daemon=True).start()\n \n to_update = self.loading.copy()\n for update in to_update:\n self.window.Element(f'{update}_loading').UpdateAnimation('assets/loading.gif')\n\n # End program if user closes window\n if event == sg.WIN_CLOSED:\n break\n\n self.window.close()", "def _hijack_gtk(self):\n import gtk\n orig_mainloop = gtk.main\n dumb_ml = _DummyMainloop(orig_mainloop, self, GUI_GTK)\n gtk.mainloop = dumb_ml\n gtk.main = dumb_ml\n return orig_mainloop", "def run(self):\n self.thread = threading.Thread(target=self._main)\n self.thread.start()\n self.running = True", "def run(self):\n #game loop set self.playing to False to end game\n self.playing = True\n while self.playing:\n self.dt = self.clock.tick(FPS) / 1000\n self.events()\n self.update()\n self.draw()\n self.losing_sequence()", "def mainloop(self):\n\t\tself.root.after(100, self.tkloop)\n\t\tself.root.mainloop()", "def run(self) -> None:\n pg.mixer.init()\n pg.mixer.music.load(path.join(sound_dir, 'theme.wav'))\n pg.mixer.music.set_volume(0.1)\n pg.mixer.music.play(-1, fade_ms=1000)\n while True:\n if self.state == State.MENU:\n self.show_menu()\n if self.state == State.GAME_OVER:\n self.show_game_over_screen()\n if self.state == State.PLAY:\n self.run_game()", "def loop(self):\n raise NotImplementedError()", "def run(self):\n while self.mode is WorldMode.run:\n self.handle_events()\n self.update()\n self.draw()\n pygame.display.update()\n self.fps_clock.tick(FPS)", "def _start_loop(self):\n self.p = tread.Thread(target=self._loop)\n self.p.start()", "def run(self):\n while True:\n display(self.world.draw())\n self.read_and_process_input()", "def run(self):\n\n print 'Starting Event Loop'\n\n running = True\n # run until something tells us to stop\n while running:\n\n # tick pygame clock\n # you can limit the fps by passing the desired frames per seccond to tick()\n self.clock.tick(60)\n\n # handle pygame events -- if user closes game, stop running\n running = self.handleEvents()\n\n # update the title bar with our frames per second\n pygame.display.set_caption('Pygame Tutorial 4 - Breakout %d fps' % self.clock.get_fps())\n\n # render blocks\n self.blocks.clear(self.window, self.background)\n dirty = self.blocks.draw(self.window)\n\n # render everything else\n self.sprites.clear(self.window, self.background)\n dirty += self.sprites.draw(self.window)\n\n # draw a grid on our background\n self.drawGrid()\n\n # blit the dirty areas of the screen\n pygame.display.update(dirty) # updates just the 'dirty' areas\n\n print 'Quitting. Thanks for playing'", "def run(self):\n while self.running:\n QtCore.QCoreApplication.processEvents()", "def main():\n \n # load_and_initialize_func()\n\n loop_and_update_forever()\n\n pygame.quit()", "def mainloop(self):\r\n self.bindHotkeys()\r\n self.root.mainloop()", "def MainLoop(self):\n while 1:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()", "def run(self):\n\n while self.source.SAMPLING:\n wx.CallAfter(self.target, self)\n sleep(0.75)\n\n self.Terminate()", "def main_loop(self):\n try:\n self.state_machine.set_state('wait')\n\n while True:\n events = list(reversed(pygame.event.get())) # Take all events, most recent first\n\n if self.find_quit_event(events):\n break\n\n if self.find_fullscreen_event(events):\n self.window.toggle_fullscreen()\n\n event = self.find_resize_event(events)\n if event:\n self.window.resize(event.size)\n\n self.state_machine.process(events)\n\n finally:\n self.led_picture.quit()\n self.led_print.quit()\n GPIO.cleanup()\n self.camera.quit()\n self.printer.quit()\n pygame.quit()", "def run():\n main()", "def event_loop(self):\n if self.on_init() == False:\n self._running = False\n## for event in pg.event.get():\n## if event.type == pg.QUIT: # The user closes the game \n## self.done = True\n \n## keys = pg.key.get_pressed()\n## if (keys[K_LEFT]):\n## pass # To be filled\n## if (keys[K_RIGHT]):\n## pass\n## if (keys[K_UP]):\n## pass\n## if (keys[K_DOWN]):\n## pass\n self.draw()", "def run(self):\n if not self.running:\n self.loop.run_forever()", "def main_loop(self):\n while self.game_manager.game_state != GameState.Quit:\n\n self.handle_events()\n self.handle_ui_response()\n #in menu\n if self.game_manager.game_state == GameState.Menu: \n self.display.clear()\n\n #in game\n elif self.game_manager.game_state == GameState.Running:\n self.game_manager.move_players()\n\n #after game\n elif self.game_manager.game_state == GameState.Finished:\n if self.game_manager.winner == None:\n self.game_manager.player1.decay()\n self.game_manager.player2.decay() \n else:\n self.game_manager.loser.decay()\n self.game_manager.loser.draw()\n\n #perform game manager actions\n self.game_manager.act()\n #do all the rendering stuff\n self.render_scene()\n #control FPS\n self.clock.tick(self.FPS)", "def run(self):\n while True:\n event, values = self.window.read()\n if event == sg.WIN_CLOSED:\n break\n\n ev.fire(self.window, event, values)", "def run(self):\n try:\n while self._running:\n time.sleep(1)\n finally:\n self._exit()", "def run(self):\n self.__power_on()\n\n self.__main()", "def loop(self):\n while self.dispatch(True) is not QUIT:\n pass", "def run(self):\n while self.__running:\n enum = self.__gui_app.pollButtonEvent()\n if enum != '':\n print enum\n if int(enum, 16) == 4:\n self.__qf.tick()\n else:\n self._publish(enum)\n\n print \"Exit: %s\\n\" % self", "def run(self):\n\n self._daemon_thread.start()\n\n while True:\n time.sleep(5)", "def main():\n Canvas1Demo().mainloop()", "def run(self):\n\t\twhile True:\n\t\t\tself.clock.tick(self.settings.max_fps)\n\t\t\tself._check_events()\n\t\t\tself._update_screen()", "def run_game(self):\n while True:\n self._check_events()\n self.update_screen()", "def loop(self):\n while self.running:\n self.clock.tick(self.fps)\n self.events()\n self.update()\n self.draw()\n self.game_over()", "def run(self):\n self.run()", "def mainloop(self):\n self.master.mainloop()", "def main(self):\n self.startup()\n if self.vehicle:\n try:\n while not self._loop_should_exit:\n self.tick()\n time.sleep(1)\n except KeyboardInterrupt:\n self.cleanup()\n self.cleanup()", "def run(self):\n while True:\n self.sm.run()\n time.sleep(0.05)", "def run(self):\n while self._update_func():\n self.update_signal.emit(None)", "def main(dir_to_watch):\n event_handler = AudioCreatedHandler()\n observer = Observer()\n observer.schedule(event_handler, dir_to_watch, recursive=True)\n observer.start()\n try:\n while True:\n time.sleep(1) \n except KeyboardInterrupt:\n print \"Stopping...\"\n observer.stop()\n observer.join()", "def stopGTK( ):\n if LOOP_TRACKER:\n LOOP_TRACKER.decrement()", "def run(self):\n \n # Wrap the outer loop in a try block so we can do an orderly shutdown\n # should an exception occur:\n try:\n # Send out a STARTUP event:\n self.dispatchEvent(weewx.Event(weewx.STARTUP))\n \n syslog.syslog(syslog.LOG_INFO, \"engine: Starting main packet loop.\")\n\n last_gc = int(time.time())\n\n # This is the outer loop. \n while True:\n\n # See if garbage collection is scheduled:\n if int(time.time()) - last_gc > self.gc_interval:\n ngc = gc.collect()\n syslog.syslog(syslog.LOG_INFO, \"engine: garbage collected %d objects\" % ngc)\n last_gc = int(time.time())\n\n # First, let any interested services know the packet LOOP is\n # about to start\n self.dispatchEvent(weewx.Event(weewx.PRE_LOOP))\n \n # Get ready to enter the main packet loop. An exception of type\n # BreakLoop will get thrown when a service wants to break the\n # loop and interact with the console.\n try:\n \n # And this is the main packet LOOP. It will continuously\n # generate LOOP packets until some service breaks it by\n # throwing an exception (usually when an archive period\n # has passed).\n for packet in self.console.genLoopPackets():\n \n # Package the packet as an event, then dispatch it.\n self.dispatchEvent(weewx.Event(weewx.NEW_LOOP_PACKET, packet=packet))\n\n # Allow services to break the loop by throwing\n # an exception:\n self.dispatchEvent(weewx.Event(weewx.CHECK_LOOP, packet=packet))\n\n syslog.syslog(syslog.LOG_CRIT, \"engine: Internal error. Packet loop has exited.\")\n \n except BreakLoop:\n \n # Send out an event saying the packet LOOP is done:\n self.dispatchEvent(weewx.Event(weewx.POST_LOOP))\n\n finally:\n # The main loop has exited. Shut the engine down.\n syslog.syslog(syslog.LOG_DEBUG, \"engine: Main loop exiting. Shutting engine down.\")\n self.shutDown()", "def _event_loop(self):\n while True:\n self.scheduler.run(blocking=True)\n time.sleep(1)", "def _main_loop(scr):\n\tglobal curr_player\n\tbob = Player()\n\tbob.name = 'Bob'\n\tcurr_player = bob\n\n\twhile 1:\n\t\t# Keep player in bounds (should actually change maps)\n\t\tif bob.map_coords[0] > 79:\n\t\t\tbob.map_coords[0] = 0\n\t\tif bob.map_coords[0] < 0:\n\t\t\tbob.map_coords[0] = 79\n\t\tif bob.map_coords[1] > 19:\n\t\t\tbob.map_coords[1] = 0\n\t\tif bob.map_coords[1] < 0:\n\t\t\tbob.map_coords[1] = 19\n\t\ttime.sleep(0.01)\n\n\t\tscr.erase()\n\t\tscr.addstr(bob.map_coords[1], bob.map_coords[0], '@')\n\n\t\t_draw_info_area(scr)\n\t\t_handle_input(scr)", "def MainLoop(self):\n self.pleaseQuit=0\n\n self.logger.info(\"Starting main eventloop\")\n try:\n self.irc.process_forever(1)\n except KeyboardInterrupt:\n self.logger.warn(\"Received interrupt, disconnecting from irc\")\n #self.irc.disconnect_all(\"^C received\")\n self.irc.disconnect_all(\"even de suiker bijvullen\")\n \n self.logger.info(\"Finished disconnecting, shutting down\")", "def startMainLoop(self):\n self.thread.start()\n self.theMainLoop.emit(QtCore.SIGNAL(\"step()\"))\n self.app.exec_()", "def run_game(self):\n while True:\n self._check_event()\n self._update_screen()", "def run(self):\n self.arbiter.start()", "def run():\n\n args = parse_arguments()\n app = rummage_app.RummageApp(args)\n app.MainLoop()\n\n return 0", "def main():\n g = DemoGame(800, 600)\n g.start()" ]
[ "0.760594", "0.7460419", "0.72110164", "0.7099442", "0.7034407", "0.691706", "0.6902692", "0.68630695", "0.6785323", "0.67737657", "0.6773619", "0.6746319", "0.6662695", "0.66581476", "0.66040593", "0.6601622", "0.65761584", "0.6574493", "0.65733767", "0.6544273", "0.65289533", "0.65134174", "0.65104914", "0.6422718", "0.64116424", "0.641118", "0.64009243", "0.64009243", "0.63575095", "0.6356688", "0.63510925", "0.6334766", "0.63307065", "0.63021904", "0.63021904", "0.62975496", "0.62725866", "0.6267865", "0.62437516", "0.6243074", "0.6236803", "0.6233461", "0.6220173", "0.61887074", "0.61845136", "0.6178171", "0.61751515", "0.6172693", "0.61622536", "0.6159163", "0.614506", "0.6134585", "0.61284965", "0.612494", "0.61226535", "0.6117013", "0.60924196", "0.60912114", "0.6088257", "0.6071614", "0.6064556", "0.6055554", "0.60513556", "0.60503775", "0.6043369", "0.6041144", "0.6031919", "0.6030755", "0.60219073", "0.60157233", "0.60069406", "0.6001202", "0.5996911", "0.5993762", "0.59843886", "0.59843457", "0.5981674", "0.5976127", "0.5971768", "0.59678286", "0.5953351", "0.59509516", "0.594808", "0.5947148", "0.5945024", "0.59404904", "0.59387213", "0.5935183", "0.59342927", "0.59284556", "0.5922263", "0.59207964", "0.59172", "0.5915121", "0.59148175", "0.59063303", "0.59054655", "0.5905335", "0.5902442", "0.5899086" ]
0.8040331
0
Initialie dbus system bus acquire adapter/interface for org.bluez.GattManager1 register application for 'org.bluez.GattService1'
def __init__(self): dbus.mainloop.glib.DBusGMainLoop(set_as_default=True) self.bus = dbus.SystemBus() self.adapter = self._find_adapter() if not self.adapter: IFaceNotFoundException('%s interface not found' % GATT_MANAGER_IFACE) self.service_manager = dbus.Interface( self.bus.get_object(BLUEZ_SERVICE_NAME, self.adapter), GATT_MANAGER_IFACE) self.mainloop = GObject.MainLoop() self.ctx = GattContext(self.bus, self.mainloop) self.app = Application(self.ctx) #print('Registering GATT application...') self.service_manager.RegisterApplication(self.app.get_path(), {}, reply_handler=register_app_cb, error_handler=register_app_error_cb)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, alias, adapter=None):\n\n dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)\n\n self.bus = dbus.SystemBus()\n\n if not adapter:\n adapter = self._find_adapter()\n if not adapter:\n logger.error(\"Could not find any adapter implementing GattManager1 + LEAdvertisingManager1 interfaces\")\n raise BleNotSupportedException(\n \"No adapter implementing GattManager1 + LEAdvertisingManager1 found\")\n self._adapter_path = '/org/bluez/' + adapter\n self._device_properties_changed_signal = None\n self._adapter_properties_changed_signal = None\n self._main_loop = None\n self.on_remote_disconnected = None\n\n self._adapter_props = dbus.Interface(\n self.bus.get_object(BLUEZ_SERVICE_NAME, self._adapter_path), DBUS_PROP_IFACE)\n\n self._disable_br_edr()\n\n logger.info(\"Creating BLE Peripheral with alias: %s\" % alias)\n\n self.alias = alias\n self.is_powered = True\n self.discoverable_timeout = 0\n self.is_advertising = False\n\n # Prepare Managers:\n\n self._ad_manager = dbus.Interface(\n self.bus.get_object(BLUEZ_SERVICE_NAME, self._adapter_path),\n LE_ADVERTISING_MANAGER_IFACE)\n\n self._gatt_manager = dbus.Interface(\n self.bus.get_object(BLUEZ_SERVICE_NAME, self._adapter_path),\n GATT_MANAGER_IFACE)\n\n # Create Advertisement and GATT Application:\n\n self._advertisement = Advertisement(self.bus, 0, 'peripheral')\n self._app = Application(self.bus)", "def start(self):\n \r\n # Fetch the XBee Manager name from the Settings Manager:\r\n xbee_manager_name = SettingsBase.get_setting(self, \"xbee_device_manager\")\r\n dm = self.__core.get_service(\"device_driver_manager\")\r\n self.__xbee_manager = dm.instance_get(xbee_manager_name)\r\n\r\n # Register ourselves with the XBee Device Manager instance:\r\n self.__xbee_manager.xbee_device_register(self)\r\n\r\n # Get the extended address of the device:\r\n extended_address = SettingsBase.get_setting(self, \"extended_address\")\r\n\r\n # Create a callback specification for our device address, endpoint\r\n # Digi XBee profile and sample cluster id:\r\n xbdm_rx_event_spec = XBeeDeviceManagerRxEventSpec()\r\n xbdm_rx_event_spec.cb_set(self.sample_indication)\r\n xbdm_rx_event_spec.match_spec_set(\r\n (extended_address, 0xe8, 0xc105, 0x92),\r\n (True, True, True, True))\r\n self.__xbee_manager.xbee_device_event_spec_add(self,\r\n xbdm_rx_event_spec)\r\n\r\n # Create a callback specification that calls back this driver when\r\n # our device has left the configuring state and has transitioned\r\n # to the running state:\r\n xbdm_running_event_spec = XBeeDeviceManagerRunningEventSpec()\r\n xbdm_running_event_spec.cb_set(self.running_indication)\r\n self.__xbee_manager.xbee_device_event_spec_add(self,\r\n xbdm_running_event_spec)\r\n\r\n # Create a DDO configuration block for this device:\r\n xbee_ddo_cfg = XBeeConfigBlockDDO(extended_address)\r\n\r\n # Get the gateway's extended address:\r\n gw_xbee_sh, gw_xbee_sl = gw_extended_address_tuple()\r\n\r\n # Set the destination for I/O samples to be the gateway:\r\n xbee_ddo_cfg.add_parameter('DH', gw_xbee_sh)\r\n xbee_ddo_cfg.add_parameter('DL', gw_xbee_sl)\r\n\r\n # TODO: Configure the XBee pins to be Digital/Analog IO\r\n #\r\n # I.E.: Configure pins DI0 .. DI3 for digital input and \r\n # enable line monitoring on pins DIO0 .. DIO3:\r\n #for io_pin in [ 'D0', 'D1', 'D2', 'D3' ]:\r\n # xbee_ddo_cfg.add_parameter(io_pin, 3)\r\n # Enable I/O line monitoring on pins DIO0 .. DIO3:\r\n #xbee_ddo_cfg.add_parameter('IC', 0xf)\r\n #\r\n # I.E.: Configure pins DI1 .. DI3 for analog input:\r\n #for io_pin in [ 'D1', 'D2', 'D3' ]:\r\n # xbee_ddo_cfg.add_parameter(io_pin, 2)\r\n\r\n # Configure node sleep behavior:\r\n sleep_ms = SettingsBase.get_setting(self, \"sleep_ms\")\r\n awake_time_ms = SettingsBase.get_setting(self, \"awake_time_ms\")\r\n xbee_sleep_cfg = XBeeConfigBlockSleep(extended_address)\r\n if sleep_ms > 0:\r\n # Configure node to sleep for the specified interval:\r\n xbee_sleep_cfg.sleep_cycle_set(awake_time_ms, sleep_ms)\r\n else:\r\n # If sleep_ms is 0, disable sleeping on the node altogether:\r\n xbee_sleep_cfg.sleep_mode_set(SM_DISABLED)\r\n\r\n # Register the Sleep configuration block with the XBee Device Manager:\r\n self.__xbee_manager.xbee_device_config_block_add(self, xbee_sleep_cfg)\r\n\r\n # Register the DDO configuration block with the XBee Device Manager:\r\n self.__xbee_manager.xbee_device_config_block_add(self, xbee_ddo_cfg)\r\n\r\n # Indicate that we have no more configuration to add:\r\n self.__xbee_manager.xbee_device_configure(self)\r\n\r\n # Start the thread\r\n threading.Thread.start(self)\r\n\n return True", "def _init_dbus(self):\n self.players = [ 'amarokapp','amarok','rhythmbox','audacious','banshee',\n 'exaile','gmusicbrowser','juk','quodlibet','listen','songbird',\n 'muine','beep-media-play','mpd' ]\n try:\n self.bus=dbus.SessionBus()\n except ImportError:\n self.display_message(\"Some issues python-dbus\")", "def setup():\n global zb\n # Signal handler (Ctrl+C exit)\n signal.signal(signal.SIGINT, signal_handler) \n # DBus\n session_bus = dbus.SessionBus()\n objXBZB = session_bus.get_object(PROTOCOL_BUS_NAME, PROTOCOL_OBJ_PATH + \"/\" + XBEE_ZB + \"/\" + SOCKET0)\n zb = dbus.Interface(objXBZB, dbus_interface=PROTOCOL_BUS_NAME)", "def start(self):\n # Fetch the XBee Manager name from the Settings Manager:\n xbee_manager_name = SettingsBase.get_setting(self, \"xbee_device_manager\")\n dm = self.__core.get_service(\"device_driver_manager\")\n self.__xbee_manager = dm.instance_get(xbee_manager_name)\n\n # Register ourselves with the XBee Device Manager instance:\n self.__xbee_manager.xbee_device_register(self)\n\n # Get the extended address of the device:\n extended_address = SettingsBase.get_setting(self, \"extended_address\")\n\n # Create a callback specification for our device address, endpoint\n # Digi XBee profile and sample cluster id:\n xbdm_rx_event_spec = XBeeDeviceManagerRxEventSpec()\n xbdm_rx_event_spec.cb_set(self._sample_indication)\n xbdm_rx_event_spec.match_spec_set(\n (extended_address, 0xe8, 0xc105, 0x92),\n (True, True, True, True))\n self.__xbee_manager.xbee_device_event_spec_add(self,\n xbdm_rx_event_spec)\n\n # Create a DDO configuration block for this device:\n xbee_ddo_cfg = XBeeConfigBlockDDO(extended_address)\n\n # Get the gateway's extended address:\n gw_xbee_sh, gw_xbee_sl = gw_extended_address_tuple()\n\n # Set the destination for I/O samples to be the gateway:\n xbee_ddo_cfg.add_parameter('DH', gw_xbee_sh)\n xbee_ddo_cfg.add_parameter('DL', gw_xbee_sl)\n \n #\"\"\" IF YOUR XBEE DEVICE DON'N SLEEP AND YOU SEND DATA FROM XBEE DEVICE TO ConnectPort X manually then uncoment the start of that line.\n # Configure the IO Sample Rate:\n # Clip sample_rate_ms to the max value of IR:\n sample_rate_ms = SettingsBase.get_setting(self, \"sample_rate_ms\")\n sample_rate_ms = min(sample_rate_ms, 0xffff)\n xbee_ddo_cfg.add_parameter('IR', sample_rate_ms)\n\n # Register this configuration block with the XBee Device Manager:\n self.__xbee_manager.xbee_device_config_block_add(self, xbee_ddo_cfg)\n\n # Setup the sleep parameters on this device:\n will_sleep = SettingsBase.get_setting(self, \"sleep\")\n sample_predelay = SettingsBase.get_setting(self, \"sample_predelay\")\n awake_time_ms = (SettingsBase.get_setting(self, \"awake_time_ms\") +\n sample_predelay)\n \n if will_sleep:\n # Sample time pre-delay, allow the circuitry to power up and\n # settle before we allow the XBee to send us a sample: \n xbee_ddo_wh_block = XBeeConfigBlockDDO(extended_address)\n xbee_ddo_wh_block.apply_only_to_modules((MOD_XB_ZB, MOD_XB_S2C_ZB,))\n xbee_ddo_wh_block.add_parameter('WH', sample_predelay)\n self.__xbee_manager.xbee_device_config_block_add(self,\n xbee_ddo_wh_block)\n\n # The original sample rate is used as the sleep rate:\n sleep_rate_ms = SettingsBase.get_setting(self, \"sample_rate_ms\")\n xbee_sleep_cfg = XBeeConfigBlockSleep(extended_address)\n if will_sleep:\n xbee_sleep_cfg.sleep_cycle_set(awake_time_ms, sleep_rate_ms)\n else:\n xbee_sleep_cfg.sleep_mode_set(SM_DISABLED)\n self.__xbee_manager.xbee_device_config_block_add(self, xbee_sleep_cfg)\n #\"\"\"\n # Register this configuration block with the XBee Device Manager:\n self.__xbee_manager.xbee_device_config_block_add(self, xbee_ddo_cfg)\n\n # Indicate that we have no more configuration to add:\n self.__xbee_manager.xbee_device_configure(self)\n \n #threading.Thread.start(self)\n \n return True", "def __init__(self, name=None, address=None):\n self.name = name\n self.address = address\n self.dongle = adapter.Adapter(adapter.list_adapters()[0])\n if not self.dongle.powered:\n self.dongle.powered = True\n logger.debug('Adapter powered')\n logger.debug('Start discovery')\n self.dongle.nearby_discovery()\n device_path = None\n if name is not None:\n device_path = tools.get_dbus_path(\n constants.DEVICE_INTERFACE,\n 'Name',\n name)\n elif address is not None:\n device_path = tools.get_dbus_path(\n constants.DEVICE_INTERFACE,\n 'Address',\n address)\n\n self.blnkt = device.Device(device_path[0])\n\n self.blinkt_srv_path = None\n self.blinkt_chrc_path = None", "def __init__(self, dev):\n\n self.dev = dev\n\n # do pygatt communication in the background\n self.gatt = PyGattThread(dev)\n self.gatt.start()", "def connect(self):\n self.blnkt.connect()\n while not self.blnkt.services_resolved:\n sleep(0.5)\n self._get_dbus_paths()", "async def init(self):\n logger.info(\"Init device: %s\", self._serial)\n self._callback(STATUS_INIT)\n\n self._init_binaries()\n self._init_apks()\n await self._init_forwards()\n\n await adb.shell(self._serial, \"/data/local/tmp/atx-agent server --stop\")\n await adb.shell(self._serial, \"/data/local/tmp/atx-agent server --nouia -d\")", "def connect_to_dbus(self):\n if not self._connected_to_dbus:\n self._connected_to_dbus = True\n proxy_obj = self._bus.get_object(\"org.wicd.daemon\", \n '/org/wicd/daemon')\n self.proxy_obj = proxy_obj\n daemon = dbus.Interface(proxy_obj, 'org.wicd.daemon')\n interface = dbus.Interface(proxy_obj, 'org.wicd.daemon.interface')\n ui = dbus.Interface(proxy_obj, 'org.wicd.daemon.ui')\n self._dbus_ifaces = {\"daemon\" : daemon,\n \"interface\" : interface, \n \"ui\" : ui}", "def __init__(self, ifname):\n\n self._dbus_loop = gobject.MainLoop()\n self._bus = dbus.SystemBus()\n wait_bus_owner_timeout = 5 # Wait for 5s to have an owner for the bus name we are expecting\n logger.debug('Going to wait for an owner on bus name ' + RemoteDhcpClientControl.DBUS_NAME)\n while not self._bus.name_has_owner(RemoteDhcpClientControl.DBUS_NAME):\n time.sleep(0.2)\n wait_bus_owner_timeout -= 0.2\n if wait_bus_owner_timeout <= 0: # We timeout without having an owner for the expected bus name\n raise Exception('No owner found for bus name ' + RemoteDhcpClientControl.DBUS_NAME)\n \n logger.debug('Got an owner for bus name ' + RemoteDhcpClientControl.DBUS_NAME)\n gobject.threads_init() # Allow the mainloop to run as an independent thread\n dbus.mainloop.glib.threads_init()\n \n dbus_object_name = RemoteDhcpClientControl.DBUS_OBJECT_ROOT + '/' + str(ifname)\n logger.debug('Going to communicate with object ' + dbus_object_name)\n self._dhcp_client_proxy = self._bus.get_object(RemoteDhcpClientControl.DBUS_SERVICE_INTERFACE, dbus_object_name)\n self._dbus_iface = dbus.Interface(self._dhcp_client_proxy, RemoteDhcpClientControl.DBUS_SERVICE_INTERFACE)\n \n logger.debug(\"Connected to D-Bus\")\n self._dhcp_client_proxy.connect_to_signal(\"IpConfigApplied\",\n self._handleIpConfigApplied,\n dbus_interface = RemoteDhcpClientControl.DBUS_SERVICE_INTERFACE,\n message_keyword='dbus_message') # Handle the IpConfigApplied signal\n \n self._dhcp_client_proxy.connect_to_signal(\"LeaseLost\",\n self._handleLeaseLost,\n dbus_interface = RemoteDhcpClientControl.DBUS_SERVICE_INTERFACE,\n message_keyword='dbus_message') # Handle the IpConfigApplied signal\n \n #Lionel: the following line is used for D-Bus debugging only\n #self._bus.add_signal_receiver(catchall_signal_handler, interface_keyword='dbus_interface', member_keyword='member')\n self._dbus_loop_thread = threading.Thread(target = self._loopHandleDbus) # Start handling D-Bus messages in a background thread\n self._dbus_loop_thread.setDaemon(True) # D-Bus loop should be forced to terminate when main program exits\n self._dbus_loop_thread.start()\n \n self._bus.watch_name_owner(RemoteDhcpClientControl.DBUS_NAME, self._handleBusOwnerChanged) # Install a callback to run when the bus owner changes\n \n self._callback_new_lease_mutex = threading.Lock() # This mutex protects writes to the _callback_new_lease attribute\n self._callback_new_lease = None\n \n self._exit_unlock_event = threading.Event() # Create a new threading event that will allow the exit() method to wait for the child to terminate properly\n self._getversion_unlock_event = threading.Event() # Create a new threading event that will allow the GetVersion() D-Bus call below to execute within a timed limit \n\n self.status = DhcpLeaseStatus.DhcpLeaseStatus()\n\n self._getversion_unlock_event.clear()\n self._remote_version = ''\n self._dbus_iface.GetVersion(reply_handler = self._getVersionUnlock, error_handler = self._getVersionError)\n if not self._getversion_unlock_event.wait(10): # We give 10s for slave to answer the GetVersion() request\n logfile = tempfile.NamedTemporaryFile(prefix='TimeoutOnGetVersion-', suffix='.log', delete=False)\n if logfile:\n print('Saving TimeoutOnGetVersion environment dump to file \"' + logfile.name + '\"', file=sys.stderr)\n print('TimeoutOnGetVersion', file=logfile)\n subprocess.call('ps -ef', stdout=logfile, shell=True)\n subprocess.call('perl ./dbus-introspect.pl --system com.legrandelectric.RobotFrameworkIPC.DhcpClientLibrary /com/legrandelectric/RobotFrameworkIPC/DhcpClientLibrary/eth1', stdout=logfile, shell=True)\n subprocess.call('dbus-send --system --type=method_call --print-reply --dest=com.legrandelectric.RobotFrameworkIPC.DhcpClientLibrary /com/legrandelectric/RobotFrameworkIPC/DhcpClientLibrary/eth1 com.legrandelectric.RobotFrameworkIPC.DhcpClientLibrary.GetVersion', stdout=logfile, shell=True)\n logfile.close()\n raise Exception('TimeoutOnGetVersion')\n else:\n logger.debug('Slave version: ' + self._remote_version)", "def connectAdapter(self):\n self.canusb = pycanusb.CanUSB(bitrate='500')\n print('CanUSB: ',self.canusb)\n Msg = Switch_to_Operational_State_Msg()\n QTimer.singleShot(50,lambda msg = Msg : self.initialization(Msg))", "def __init__(self, interface_watcher, conn, dbus_object_path = DBUS_OBJECT_ROOT, **kwargs):\n # Note: **kwargs is here to make this contructor more generic (it will however force args to be named, but this is anyway good practice) and is a step towards efficient mutliple-inheritance with Python new-style-classes\n dbus.service.Object.__init__(self, conn=conn, object_path=dbus_object_path)\n self.interface_watcher = interface_watcher\n interface_watcher.interface_destroy_callback = self.InterfaceRemoved\t# Request interface_watcher object to call InterfaceRemoved (in order to send a D-Bus signal when secondary network interface is going down)\n interface_watcher.interface_add_callback = self.InterfaceAdded\t# Request interface_watcher object to call InterfaceAdded (in order to send a D-Bus signal when secondary network interface is going up)\n logger.debug('Registered binding with D-Bus object PATH: ' + str(dbus_object_path))", "def register_to_core(self):\n self.channel.basic_publish(exchange='', routing_key='peripheral_register', body=json.dumps({self.name: api}))", "def run(self):\n\n # self.peripheral.connect(self.address)\n\n # //-set the delegate to handle notification message process\n # self.peripheral.setDelegate(MyDelegate(self.sinOut))\n if self._type == \"BW\":\n uuid = \"0000fff0-0000-1000-8000-00805f9b34fb\" # the bought module distinguished by the name.\n # BW means the bought module's name \"BW-ECG-01\".\n svc = self.peripheral.getServiceByUUID(uuid)\n\n # //-the characteristic that data can be written to\n chr_of_writable = svc.getCharacteristics()[0]\n # //-the characteristic that receives notification from other peripheral.\n chr_of_notify = svc.getCharacteristics()[1]\n # //-enable the notify\n self.peripheral.writeCharacteristic(chr_of_notify.valHandle + 1, struct.pack('<bb', 0x01, 0x00), True)\n # //-bind user ID to BW-ECG-01, the ID could be a random ID.\n chr_of_writable.write(b'\\xE8\\x41\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00',\n True)\n # //-start the acquiring, a time(Y/M/D/H/H/S/deltaT) should be given. the time could be a random time\n # //-but the delta T should have meaning which is the acquiring time. 0x01 means 1 minutes.\n # //-the delta T could be modified as other number, this could be done by UI.\n # //-if the number could be set by user, that will be perfection.\n chr_of_writable.write(b'\\xE8\\x23\\x15\\x03\\x0b\\x10\\x15\\x00\\x00\\x01', True)\n # //-start continually acquiring\n chr_of_writable.write(b'\\xE8\\20', True)\n\n while self.working:\n if self.peripheral.waitForNotifications(1.0):\n # print(\"notification:\")\n continue\n else:\n uuid = \"f000fff0-0451-4000-b000-000000000000\" # the module made by ourselves\n svc = self.peripheral.getServiceByUUID(uuid)\n ch = svc.getCharacteristics()[0]\n self.peripheral.writeCharacteristic(ch.valHandle + 1, struct.pack('<bb', 0x01, 0x00))\n # print(\"waiting...\")\n # self.sinOut.emit(\"waiting...\")\n\n while self.working:\n if self.peripheral.waitForNotifications(1.0):\n # print(\"notification:\")\n continue", "async def async_setup(self) -> None:\n await self.hass.async_add_executor_job(self._setup)\n\n # set already known devices to away instead of unavailable\n device_registry = dr.async_get(self.hass)\n devices = dr.async_entries_for_config_entry(device_registry, self.entry_id)\n for device_entry in devices:\n if device_entry.via_device_id is None:\n continue # do not add the router itself\n\n device_mac = dict(device_entry.connections).get(dr.CONNECTION_NETWORK_MAC)\n self.devices[device_mac] = {\n \"mac\": device_mac,\n \"name\": device_entry.name,\n \"active\": False,\n \"last_seen\": dt_util.utcnow() - timedelta(days=365),\n \"device_model\": None,\n \"device_type\": None,\n \"type\": None,\n \"link_rate\": None,\n \"signal\": None,\n \"ip\": None,\n }\n\n await self.async_update_device_trackers()\n self.entry.async_on_unload(\n async_track_time_interval(\n self.hass, self.async_update_device_trackers, SCAN_INTERVAL\n )\n )\n\n async_dispatcher_send(self.hass, self.signal_device_new)", "def onConfigureMessage(self, config):\n for adaptor in config[\"adaptors\"]:\n adtID = adaptor[\"id\"]\n if adtID not in self.devices:\n # Because configure may be re-called if devices are added\n name = adaptor[\"name\"]\n friendly_name = adaptor[\"friendly_name\"]\n logging.debug(\"%s Configure app. Adaptor name: %s\", ModuleName, name)\n self.idToName[adtID] = friendly_name.replace(\" \", \"_\")\n self.devices.append(adtID)\n self.dm = DataManager(self.bridge_id)\n self.setState(\"starting\")", "def registerDevice(self):\n\t\tr = req.post(\"http://localhost:9090/devices?id={}&sensors={}_{}&board={}\".format(\n\t\t\tBOARD_ID,\n\t\t\tSENSOR1,\n\t\t\tSENSOR2,\n\t\t\tBOARD\n\t\t))\n\t\tprint (\"[{}] Device Registered on Room Catalog\".format(\n\t\t\tint(time.time()),\n\t\t))", "def do_dbus_register(self, connection, object_path):\n logger.debug('::dbus_register')\n Gio.Application.do_dbus_register(self, connection, object_path)\n failure = False\n try:\n connection.connect('closed', lambda i: self.quit())\n self._dbus_id = connection.register_object(\n object_path,\n DeskChangerDaemonDBusInterface.interfaces[0],\n self._handle_dbus_call,\n self._handle_dbus_get,\n self._handle_dbus_set\n )\n except TypeError:\n # TODO - Handle this failure correctly.\n failure = True\n except GLib.Error as e:\n logger.debug(e.args)\n finally:\n if self._dbus_id is None or self._dbus_id == 0:\n logger.critical('failed to register DBus name %s', object_path)\n if failure:\n logger.error('possibly unsupported version of glib')\n return False\n\n logger.info('successfully registered DBus name %s', object_path)\n return True", "def __init__(self,device=None,port=0):\n self.device= Service.initDevice(device)\n self.adbCmd= r'adb -s %s '%self.device\n self.port = port\n if self.port == 0:\n self.port = utils.free_port()", "def init(self):\n logger.info('systime service init')\n yield self._connect_dbus()", "def device_connect(self):\n pass", "def init():\n\n global registry, fsk_router, ook_router\n\n radio.init()\n OpenThings.init(Devices.CRYPT_PID)\n\n fsk_router = Registry.Router(\"fsk\")\n\n #OOK receive not yet written\n #It will be used to be able to learn codes from Energenie legacy hand remotes\n ##ook_router = Registry.Router(\"ook\")\n\n registry = Registry.DeviceRegistry()\n registry.set_fsk_router(fsk_router)\n ##registry.set_ook_router(ook_router\n\n path = os.path.join(sys.path[0], registry.DEFAULT_FILENAME)\n if os.path.isfile(path):\n registry.load_from(path)\n print(\"loaded registry from file\")\n registry.list()\n fsk_router.list()\n\n # Default discovery mode, unless changed by app\n ##discovery_none()\n ##discovery_auto()\n ##discovery_ask(ask)\n discovery_autojoin()\n ##discovery_askjoin(ask)", "def __action_connect_system_bus_cb(self, action, parameter):\n try:\n if self.system_bus is not None:\n return\n bw = BusWatch(self.data_dir, Gio.BusType.SYSTEM)\n self.system_bus = bw.box_bus\n self.stack.add_titled(self.system_bus, 'System Bus', 'System Bus')\n self.remove_action('connect-system-bus')\n except Exception as e:\n print(e)", "def Init(self, factory_reset=True):\n # Create a new serial device every time since the serial driver\n # on chameleon board is not very stable.\n result = self.CreateSerialDevice()\n\n if factory_reset:\n # Enter command mode to issue commands.\n # This must happen first, so that other commands work\n result = self.EnterCommandMode() and result\n\n # Do a factory reset to make sure it is in a known initial state.\n # Do the factory reset before proceeding to set parameters below.\n result = self.FactoryReset() and result\n\n # Set HID as the service profile.\n result = self.SetServiceProfileHID() and result\n\n # Set the HID device type.\n result = self.SetHIDType(self.device_type) and result\n\n # Set the default class of service.\n result = self.SetDefaultClassOfService() and result\n\n # Set the class of device (CoD) according to the hid device type.\n result = self.SetClassOfDevice(self.device_type) and result\n\n # Set authentication to the specified mode.\n if self.authentication_mode != PeripheralKit.OPEN_MODE:\n result = self.SetAuthenticationMode(self.authentication_mode)\\\n and result\n\n # Set RN-42 to work as a slave.\n result = self.SetSlaveMode() and result\n\n # Set a temporary pin code for testing purpose.\n # Only do this when we want to use a pin code.\n if self.authentication_mode == PeripheralKit.PIN_CODE_MODE:\n result = self.SetPinCode(self.TMP_PIN_CODE) and result\n\n # Enable the connection status message so that we could get the message\n # of connection/disconnection status.\n result = self.EnableConnectionStatusMessage() and result\n\n if not isinstance(self._kit, nRF52):\n # Reboot so that the configurations above take effect.\n result = self.Reboot() and result\n\n # Enter command mode again after reboot.\n result = self.EnterCommandMode() and result\n time.sleep(self.INIT_SLEEP_SECS)\n\n logging.info('A bluetooth HID \"%s\" device is connected.', self.device_type)\n return result", "def init(self):\n try:\n yield self._connect_dbus()\n logger.info(\"Request the GSM resource\")\n yield WaitFSOResource('GSM', time_out=30)\n yield WaitDBus(self.ousage.RequestResource, 'GSM')\n yield self._turn_on()\n logger.info(\"register on the network\")\n register = yield self._register()\n #if register:\n #provider = yield tichy.Wait(self, 'provider-modified')\n \n self._keep_alive().start()\n \n ##network selection end\n \n except Exception, ex:\n logger.error(\"Error : %s\", ex)\n raise\n \n try:\n \n yield tichy.Service.get('ConfigService').wait_initialized()\n self.config_service = tichy.Service.get(\"ConfigService\")\n logger.info(\"got config service\")\n \n except Exception, ex:\n logger.error(\"Error in try retrieving config service : %s\", ex)\n \n try:\n \n ##call forwaring setting start\n self.values = self.config_service.get_items(\"call_forwarding\")\n if self.values != None: self.values = dict(self.values)\n logger.info(\"realized values is none\")\n\n except Exception, ex:\n logger.error(\"Error in try call forwarding setting : %s\", ex)\n \n \n try:\n\n self.SettingReason = tichy.settings.ListSetting('Call Forwarding','Reason',tichy.Text,value='unconditional', setter=self.ForwardingSetReason,options=[\"unconditional\",\"mobile busy\",\"no reply\",\"not reachable\",\"all\",\"allconditional\"],model=tichy.List([ ListSettingObject(\"unconditional\", self.action),ListSettingObject(\"mobile busy\",self.action),ListSettingObject(\"no reply\", self.action),ListSettingObject(\"not reachable\", self.action),ListSettingObject(\"all\", self.action),ListSettingObject(\"all conditional\", self.action)]), ListLabel =[('title','name')])\n \n self.SettingForwarding = tichy.settings.ToggleSetting('Call Forwarding', 'active', tichy.Text, value=self.GetForwardingStatus('unconditional'),setter=self.ToggleForwarding, options=['active','inactive'])\n \n \n except Exception, ex:\n logger.error(\"Error in try call forwarding setting list : %s\", ex)\n \n \n try:\n\n self.SettingChannels = tichy.settings.Setting('Call Forwarding', 'channels', tichy.Text, value=self.ForwardingGet('class'), setter=self.ForwardingSetClass, options=[\"voice\",\"data\",\"voice+data\",\"fax\",\"voice+data+fax\"])\n \n self.SettingTargetNumber = tichy.settings.NumberSetting('Call Forwarding', 'Target Number', tichy.Text, value=self.ForwardingGet('number'), setter=self.ForwardingSetNumber)\n \n self.SettingTargetNumber = tichy.settings.NumberSetting('Call Forwarding', 'Timeout', tichy.Text, value=self.ForwardingGet('timeout'), setter=self.ForwardingSetTimeout)\n \n ##call forwaring setting stop\n \n \n except Exception, ex:\n logger.error(\"Error in try Error in try call forwarding setting : %s\", ex)\n \n try:\n\n ##call identifaction setting start\n self.CallIdentification = tichy.settings.Setting('Network', 'Call Identification', tichy.Text, value=self.GetCallIdentification(), setter=self.SetCallIdentifaction, options=[\"on\",\"off\",\"network\"])\n ##call identifaction setting stop\n \n except Exception, ex:\n logger.error(\"Error in network identification setting: %s\", ex)\n \n try: \n ##network selection etc begin\n self.NetworkRegistration = tichy.settings.Setting('Network', 'Registration', tichy.Text, value=self.GetRegStatus(), setter=self.SetRegStatus, options=[\"registered\",\"not registered\"])\n \n \n except Exception, ex:\n logger.error(\"Error in network registration setting : %s\", ex)\n \n \n try:\n \n self.scanning = False\n self.NetworkList = tichy.List()\n self.ListLabel = [('title','name'),('subtitle','status')]\n \n self.scan_setting = tichy.settings.ListSetting('Network', 'List', tichy.Text, value=\"scan\", setter=self.run_scan, options=['scan'], model=self.NetworkList, ListLabel=self.ListLabel)\n \n except Exception, ex:\n logger.error(\"Error in network list setting : %s\", ex)\n #raise", "def create_bond(device_address=None, adapter_address=None):\n con = pexpect.spawn('sudo bluetoothctl')\n con.expect(\"bluetooth\", timeout=1)\n \n print(\"selecting adapter ...\")\n con.sendline(\"select \" + adapter_address.upper())\n\n #check to see if already paired\n print(\"checking if bond exists already ...\")\n no_bond=False\n try:\n con.sendline(\"paired-devices\")\n con.expect(device_address.upper(), timeout=1)\n except(pexpect.TIMEOUT):\n no_bond = True\n else:\n print(\"bond already exists for %s\" % (device_address.upper()))\n print(\"successfully quiting bluetoothctl since bond is already formed\")\n con.sendline(\"quit\") \n return(0) \n \n con.sendline(\"select \" + adapter_address.upper())\n \n print(\"registering agent ...\")\n try:\n con.sendline(\"agent NoInputNoOutput\")\n con.expect(['Agent registered', 'Agent is already registered'], timeout=1)\n con.sendline(\"default-agent\")\n con.expect(\"Default agent request successful\", timeout=1)\n except(pexpect.TIMEOUT):\n print(\"unable to register agent\")\n return(1)\n\n print(\"enabling pairing ...\")\n try:\n con.sendline(\"pairable on\")\n con.expect(\"Changing pairable on succeeded\", timeout=1)\n except(pexpect.TIMEOUT):\n print(\"unable to turn pairing on\")\n return(1)\n\n print(\"starting scan ...\")\n try:\n con.sendline(\"scan on\")\n devfound = con.expect(device_address.upper(), timeout=5)\n if devfound == 0:\n try:\n con.sendline(\"scan off\")\n print (\"Found device. connecting to %s\" % (device_address.upper()))\n con.sendline(\"connect \" + device_address.upper())\n con.expect(\"Connection successful\", timeout=10)\n #sleep(10) #need extra time here to finish pairing\n except(pexpect.TIMEOUT):\n print(\"could not connect to %s\" % (device_address.upper()))\n return(1)\n try:\n #explicitly pair with the device\n con.sendline(\"pair \" + device_address.upper())\n con.expect(\"Pairing successful\", timeout=5)\n except(pexpect.TIMEOUT):\n print(\"pairing not successful\")\n try:\n con.sendline(\"info \" + device_address.upper()) \n con.expect(\"Paired: yes\", timeout=1)\n except(pexpect.TIMEOUT):\n print(\"could not pair with %s\" % (device_address.upper()))\n return(1)\n else:\n con.sendline(\"trust \" + device_address.upper())\n print(\"Connection and pairing successful!\")\n #try:\n #con.sendline(\"list-attributes\")\n #con.expect(\"6e400003-b5a3-f393-e0a9-e50e24dcca9e\", timeout=2)\n #print(con.before)\n #for line in con.before:\n # read_characteristics = line\n #print(read_characteristics)\n #except(pexpect.TIMEOUT):\n #print(\"could not list the attributes\")\n #return(1)\n try:\n print(\"disconnecting temporarily ...\")\n con.sendline(\"disconnect \" + device_address.upper())\n con.expect(\"Connected: no\", timeout=5)\n except(pexpect.TIMEOUT):\n print(\"could not disconnect.. \")\n con.sendline(\"quit\")\n return(1)\n else:\n print(\"successfully quiting bluetoothctl after forming bond\")\n con.sendline(\"quit\")\n return(0)\n except(pexpect.TIMEOUT):\n con.sendline(\"scan off\")\n print(\"unable to find device %s\" % (device_address))\n return(1)", "def init_bluetooth(self, btport = 4):\n self.server_sock = BluetoothSocket( RFCOMM )\n self.server_sock.bind((\"\", btport))\n self.server_sock.listen(1)\n port = self.server_sock.getsockname()[1]\n uuid = \"94f39d29-7d6d-437d-973b-fba39e49d4ee\"\n advertise_service(self.server_sock, \"SampleServer\",\n service_id=uuid,\n service_classes=[uuid, SERIAL_PORT_CLASS],\n profiles=[SERIAL_PORT_PROFILE],\n )\n import atexit\n atexit.register(goodbye, None, self.server_sock)\n #print \"atexit registered 1\"\n print_msg(self.name, \"waiting for connection on RFCOMM channel %d\" % port)\n self.client_sock, client_info = self.server_sock.accept() # blocking\n atexit.register(goodbye, self.client_sock, self.server_sock)\n #print \"atexit registered 2\"\n print_msg(self.name, \"Accepted connection from \"+str(client_info))\n self.is_connected = True", "def broadcast(loopstate):\n cmdstring = 'sudo hcitool -i hci0 cmd ' # Send cmd to hci0\n cmdstring += '0x08 ' # Set group to BLE\n cmdstring += '0x0008 ' # Set command to HCI_LE_Set_Advertising_Data\n cmdstring += '0D ' # Length of entire following data, in bytes\n cmdstring += '02 ' # Length of flag info\n cmdstring += '01 ' # Use AD flags\n cmdstring += '02 ' # Flag value:\n # bit 0 (OFF) LE Limited Discoverable Mode\n # bit 1 (ON) LE General Discoverable Mode\n # bit 2 (OFF) BR/EDR Not Supported\n # bit 3 (ON) Simultaneous LE and BR/EDR to Same Device Capable (controller)\n # bit 4 (ON) Simultaneous LE and BR/EDR to Same Device Capable (Host)\n cmdstring += '09 ' # Length of following message, in bytes\n cmdstring += '07 ' # GAP value (07 = 128 Bit Complete Service UUID List)\n cmdstring += '42 69 63 79 63 6c 65 ' # Header to identify beacon message-\n # - and it's also is Bicycle in ASCII!\n if loopstate:\n cmdstring = cmdstring + LOOP_ON\n else:\n cmdstring = cmdstring + LOOP_OFF + ' >/dev/null 2>&1'\n subprocess.call(cmdstring, shell=True)\n subprocess.call('sudo hciconfig hci0 leadv 3 >/dev/null 2>&1', shell=True)", "def test_gwservice_createdevice(self, setup_controller):\n configuration = {'uuid': '1'}\n payload = {'serialNumber': 'DEADBEEF0011',\n 'UUID': '123456',\n 'configuration': configuration,\n 'deviceType': 'AP',\n 'location': '',\n 'macAddress': 'DE:AD:BE:EF:00:11',\n 'manufacturer': 'Testing',\n 'owner': ''}\n print(json.dumps(payload))\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"POST\", None, json.dumps(payload))\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw create devices\", body=body)\n if resp.status_code != 200:\n assert False\n devices = json.loads(resp.text)\n print (devices)\n\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"GET\", None, None)\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw create device verify\", body=body)\n if resp.status_code != 200:\n assert False\n\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"DELETE\", None, None)\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw create device delete\", body=body)\n if resp.status_code != 200:\n assert False", "def __init__(self, name, core_services, set_in=None, prop_in=None):\r\n\r\n # DeviceBase will create:\r\n # self._name, self._core, self._tracer,\r\n # XBeeBase will create:\r\n # self._xbee_manager, self._extended_address\r\n\r\n ## Settings Table Definition:\r\n\r\n settings_list = [\r\n Setting(\r\n name='sleep_ms', type=int, required=False,\r\n default_value=self.DEF_SLEEP_MS,\r\n verify_function=lambda x: x >= 0 and \\\r\n x <= CYCLIC_SLEEP_EXT_MAX_MS),\r\n Setting(name=\"led1_source\", type=str, required=False),\r\n Setting(name=\"led2_source\", type=str, required=False),\r\n Setting(name=\"led3_source\", type=str, required=False),\r\n # This setting is provided for advanced users:\r\n Setting(\r\n name='awake_time_ms', type=int, required=False,\r\n default_value=self.DEF_AWAKE_MS,\r\n verify_function=lambda x: x >= 0 and x <= 0xffff),\r\n ]\r\n # Add our settings_list entries into the settings passed to us.\r\n set_in = self.merge_settings(set_in, settings_list)\r\n\r\n ## Channel Properties Definition:\r\n property_list = [\r\n # gettable properties\r\n ChannelSourceDeviceProperty(name=\"sw1\", type=bool,\r\n initial=Sample(timestamp=0, value=False),\r\n perms_mask=DPROP_PERM_GET, options=DPROP_OPT_AUTOTIMESTAMP),\r\n ChannelSourceDeviceProperty(name=\"sw2\", type=bool,\r\n initial=Sample(timestamp=0, value=False),\r\n perms_mask=DPROP_PERM_GET, options=DPROP_OPT_AUTOTIMESTAMP),\r\n ChannelSourceDeviceProperty(name=\"sw3\", type=bool,\r\n initial=Sample(timestamp=0, value=False),\r\n perms_mask=DPROP_PERM_GET, options=DPROP_OPT_AUTOTIMESTAMP),\r\n ChannelSourceDeviceProperty(name=\"sw4\", type=bool,\r\n initial=Sample(timestamp=0, value=False),\r\n perms_mask=DPROP_PERM_GET, options=DPROP_OPT_AUTOTIMESTAMP),\r\n # gettable and settable properties\r\n ChannelSourceDeviceProperty(name=\"led1\", type=Boolean,\r\n initial=Sample(timestamp=0,\r\n value=Boolean(False, style=STYLE_ONOFF)),\r\n perms_mask=(DPROP_PERM_GET | DPROP_PERM_SET),\r\n options=DPROP_OPT_AUTOTIMESTAMP,\r\n set_cb=lambda sample: self.prop_set_led(\"led1\", sample)),\r\n ChannelSourceDeviceProperty(name=\"led2\", type=Boolean,\r\n initial=Sample(timestamp=0,\r\n value=Boolean(False, style=STYLE_ONOFF)),\r\n perms_mask=(DPROP_PERM_GET | DPROP_PERM_SET),\r\n options=DPROP_OPT_AUTOTIMESTAMP,\r\n set_cb=lambda sample: self.prop_set_led(\"led2\", sample)),\r\n ChannelSourceDeviceProperty(name=\"led3\", type=Boolean,\r\n initial=Sample(timestamp=0,\r\n value=Boolean(False, style=STYLE_ONOFF)),\r\n perms_mask=(DPROP_PERM_GET | DPROP_PERM_SET),\r\n options=DPROP_OPT_AUTOTIMESTAMP,\r\n set_cb=lambda sample: self.prop_set_led(\"led3\", sample)),\r\n ]\r\n # Add our property_list entries into the properties passed to us.\r\n prop_in = self.merge_properties(prop_in, property_list)\r\n\r\n ## Initialize the XBeeBase interface:\r\n XBeeBase.__init__(self, name, core_services, set_in, prop_in)\r\n\r\n self._tracer.calls(\"XBeeXBIB.__init__()\")", "def __init__(self) -> None:\n self._found_devices = {} # type: Dict[IPv4Address, conf.BaseService]", "async def async_setup(self):\n dev_reg = await device_registry.async_get_registry(self.hass)\n model_type = self.device.settings[\"device\"][\"type\"]\n dev_reg.async_get_or_create(\n config_entry_id=self.entry.entry_id,\n name=self.name,\n connections={(device_registry.CONNECTION_NETWORK_MAC, self.mac)},\n # This is duplicate but otherwise via_device can't work\n identifiers={(DOMAIN, self.mac)},\n manufacturer=\"Shelly\",\n model=aioshelly.MODEL_NAMES.get(model_type, model_type),\n sw_version=self.device.settings[\"fw\"],\n )", "def async_register_initial_scan_callback(\n hass: HomeAssistant, callback: CALLBACK_TYPE\n) -> CALLBACK_TYPE:\n discovery: USBDiscovery = hass.data[DOMAIN]\n return discovery.async_register_initial_scan_callback(callback)", "def advertise_and_connect(self):\n self.server_sock = bluetooth.BluetoothSocket(bluetooth.RFCOMM)\n self.server_sock.bind((\"\", self.PORT))\n self.server_sock.listen(1)\n bluetooth.advertise_service(\n self.server_sock,\n \"Sensor Service\",\n self.UUID\n )\n self.client_sock, client_address = self.server_sock.accept()", "def missing_dbus():\n global NM_AVAILABLE\n debug(\"Cannot import the dbus module\")\n NM_AVAILABLE = False", "def setUp(self):\n self.dbus_mock = MagicMock()\n self.mainloop_mock = MagicMock()\n self.gobject_mock = MagicMock()\n self.process_mock = MagicMock()\n\n modules = {\n 'dbus': self.dbus_mock,\n 'dbus.mainloop.glib': self.mainloop_mock,\n 'gi.repository': self.gobject_mock,\n 'subprocess': self.process_mock\n }\n self.dbus_mock.Interface.return_value.GetManagedObjects.return_value = tests.obj_data.full_ubits\n self.process_mock.check_output = self.get_bluetooth_service\n self.process_mock.run.return_value = subprocess.CompletedProcess(\n args=['bluetoothctl', '-v'], returncode=0, stdout=b'bluetoothctl: 5.53\\n', stderr=b'')\n self.module_patcher = patch.dict('sys.modules', modules)\n self.module_patcher.start()\n from bluezero import dbus_tools\n self.module_under_test = dbus_tools", "async def init_device(self):\n await Device.init_device(self)\n # PROTECTED REGION ID(AsyncTabata.init_device) ENABLED START #\n self.logger = logging.getLogger(__name__)\n self._lock = threading.Lock()\n self._dev_factory = DevFactory()\n self._prepare = 10\n self._work = 20\n self._rest = 10\n self._cycles = 8\n self._tabatas = 1\n self._running_state = RunningState.PREPARE\n self.subscribed = False\n self.set_state(DevState.OFF)\n # The below commented commands are not really needed\n # since in GreenMode.Asyncio mode the monitor\n # lock is disabled by default.\n # util = tango.Util.instance()\n # util.set_serial_model(tango.SerialModel.NO_SYNC)\n # PROTECTED REGION END # // AsyncTabata.init_device", "def setUp(self):\n self.dbus_mock = MagicMock()\n self.mainloop_mock = MagicMock()\n self.gobject_mock = MagicMock()\n\n modules = {\n 'dbus': self.dbus_mock,\n 'dbus.mainloop.glib': self.mainloop_mock,\n 'gi.repository': self.gobject_mock,\n }\n self.dbus_mock.Interface.return_value.GetManagedObjects.return_value = tests.obj_data.full_ubits\n self.dbus_mock.Interface.return_value.Get = mock_get\n self.dbus_mock.Interface.return_value.Set = mock_set\n self.dbus_mock.Interface.return_value.GetAll = mock_get_all\n self.module_patcher = patch.dict('sys.modules', modules)\n self.module_patcher.start()\n from bluezero import adapter\n self.module_under_test = adapter\n self.adapter_device = 'hci0'\n self.adapter_name = 'linaro-alip'\n self.path = '/org/bluez/hci0'", "def scan_bluetooth(self):\n nearby_devices = bluetooth.discover_devices(lookup_names=True)\n print(\"Found {} devices at {}\".format(len(nearby_devices), datetime.now()))\n timestamp = datetime.now().strftime('%m/%d/%Y %H:%M:%S')\n self.capture = self.MonitorCapture(timestamp=timestamp, structure=nearby_devices, ip_addr=self.ip_addr,\n location=self.location)\n for name, addr in nearby_devices:\n print(\" %s - %s\" % (addr, name))\n\n self.capture = json.dumps(self.capture.__dict__)", "def main():\n print(\"Configuring system\")\n ain_config, settling_conf, resolution_config = None, None, None\n print(\"\\tSetting globals\")\n Globals.init()\n print(\"\\tSetting signals\")\n signal.signal(signal.SIGINT, signal_handler)\n signal.signal(signal.SIGTERM, signal_handler)\n signal.signal(signal.SIGABRT, signal_handler)\n signal.signal(signal.SIGQUIT, signal_handler)\n signal.signal(signal.SIGTSTP, signal_handler)\n signal.signal(signal.SIGHUP, signal_handler)\n print(\"\\tConnecting to devices\")\n handles, information = ld_connect(T7_DEVICE, CT[0])\n if len(handles) != 0:\n print(\"\\tFound \" + str(len(handles)) + \" device(s)\")\n ain_addr = [AIN_ADDRS[0], AIN_ADDRS[2], AIN_ADDRS[1], AIN_ADDRS[3]]\n settling_addr = [SETTLING_ADDR[0], SETTLING_ADDR[2], SETTLING_ADDR[1], SETTLING_ADDR[3]]\n resolution_addr = [RES_ADDR[0], RES_ADDR[2], RES_ADDR[1], RES_ADDR[3]]\n ain_range = [1.0, 10.0, 1.0, 10.0]\n gnd_ref_range = [NEGATIVE_REF_ADDR[0], NEGATIVE_REF_ADDR[2]]\n \"\"\" 1 is for AIN1, 3 for AIN3 and 5 for AIN5 \"\"\"\n gnd_io_range = [1, 3]\n ain_config = ld_ain_config(handles, ain_addr, aio_dir=1, ain_range=ain_range)\n settling_conf = ld_settling_config(handles, settling_addr, SETTLING_LIST[6])\n resolution_config = ld_resolution_config(handles, resolution_addr, RES_LIST[12])\n gnd_config = ld_gnd_ref_conf(handles, gnd_ref_range, gnd_io_range)\n Globals.add_global_handles(handles)\n Globals.add_global_information(information)\n if ain_config == 0 and settling_conf == 0 and resolution_config == 0 and gnd_config == 0:\n \"\"\"\n sync = Sync.Devices(handles, 10, 1)\n\n sync.sync()\n \"\"\"\n print(\"\\tScanning device(s)\")\n Monitor = Devices(handles, 500, [\"AIN0\", \"AIN2\"], 10000, 1)\n \"\"\"\n Sync = sync.Devices(handles, 500, [\"AIN0\", \"AIN2\", \"AIN4\"], 3000, 1)\n \"\"\"\n Sync.sync()\n \"\"\"\n monitor_dio_ain(handles, information)\n print(\"Closing connection to devices\")\n \"\"\"\n else:\n if ain_config == 0:\n print(\"Analog configuration: Success.\")\n else:\n print(\"Analog configuration: Failure.\")\n if settling_conf == 0:\n print(\"Settling time configuration: Success.\")\n else:\n print(\"Settling time configuration: Failure.\")\n if resolution_config == 0:\n print(\"Resolution configuration: Success.\")\n else:\n print(\"Resolution configuration: Failure.\")\n if gnd_config == 0:\n print(\"Gnd references configuration: Success.\")\n else:\n print(\"Gnd references configuration: Failure.\")\n print(\"Configuration unsuccessful. Closing connection\")\n for handle in Globals.handles:\n ljm.close(handle)\n print(\"Connections closed\")\n\n else:\n print(\"\\tUnable to detect any devices\")\n signal.signal(signal.SIGINT, signal.SIG_DFL)\n signal.signal(signal.SIGTERM, signal.SIG_DFL)\n signal.signal(signal.SIGQUIT, signal.SIG_DFL)\n signal.signal(signal.SIGTSTP, signal.SIG_DFL)\n signal.signal(signal.SIGHUP, signal.SIG_DFL)\n signal.signal(signal.SIGABRT, signal.SIG_DFL)\n print(\"Exiting\")\n return", "def discover(bt_addr):\n print \"performing inquiry...\"\n nearby_devices = bluetooth.discover_devices(lookup_names = True)\n print \"Found %d devices\" % len(nearby_devices)\n \n for addr, name in neaby_devices:\n print \" %s - %s\" % (addr, name)", "def test_gatt_connect(self):\n gatt_server_cb = self.per_ad.droid.gattServerCreateGattServerCallback()\n gatt_server = self.per_ad.droid.gattServerOpenGattServer(\n gatt_server_cb)\n self.gatt_server_list.append(gatt_server)\n try:\n bluetooth_gatt, gatt_callback, adv_callback = (\n orchestrate_gatt_connection(self.cen_ad, self.per_ad))\n self.bluetooth_gatt_list.append(bluetooth_gatt)\n except GattTestUtilsError as err:\n self.log.error(err)\n return False\n self.adv_instances.append(adv_callback)\n return self._orchestrate_gatt_disconnection(bluetooth_gatt,\n gatt_callback)", "async def async_init_single_device(dev: Device) -> None:\n await dev.async_added_to_hass()\n dev.async_write_ha_state()", "def connectToDevice(self, address):\n self.client = ModbusTcpClient(address)", "def device_discovery(endless):\r\n click.echo(\"start device discovery ...\")\r\n _device_discovery(endless)", "def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n bt_device_id: int = config[CONF_BT_DEVICE_ID]\n\n beacons: dict[str, dict[str, str]] = config[CONF_BEACONS]\n devices: list[EddystoneTemp] = []\n\n for dev_name, properties in beacons.items():\n namespace = get_from_conf(properties, CONF_NAMESPACE, 20)\n instance = get_from_conf(properties, CONF_INSTANCE, 12)\n name = properties.get(CONF_NAME, dev_name)\n\n if instance is None or namespace is None:\n _LOGGER.error(\"Skipping %s\", dev_name)\n continue\n\n devices.append(EddystoneTemp(name, namespace, instance))\n\n if devices:\n mon = Monitor(hass, devices, bt_device_id)\n\n def monitor_stop(event: Event) -> None:\n \"\"\"Stop the monitor thread.\"\"\"\n _LOGGER.info(\"Stopping scanner for Eddystone beacons\")\n mon.stop()\n\n def monitor_start(event: Event) -> None:\n \"\"\"Start the monitor thread.\"\"\"\n _LOGGER.info(\"Starting scanner for Eddystone beacons\")\n mon.start()\n\n add_entities(devices)\n mon.start()\n hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, monitor_stop)\n hass.bus.listen_once(EVENT_HOMEASSISTANT_START, monitor_start)\n else:\n _LOGGER.warning(\"No devices were added\")", "async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:\n usb = await async_get_usb(hass)\n usb_discovery = USBDiscovery(hass, usb)\n await usb_discovery.async_setup()\n hass.data[DOMAIN] = usb_discovery\n websocket_api.async_register_command(hass, websocket_usb_scan)\n\n return True", "def __init__(self):\n config.register_l2gw_opts_helper()\n self.service_type_manager = st_db.ServiceTypeManager.get_instance()\n add_provider_configuration(self.service_type_manager, constants.L2GW)\n self._load_drivers()\n super(L2GatewayPlugin, self).__init__()\n l2gateway_db.subscribe()", "def connect(self):\n if self._btleSubThread is not None and threading.current_thread().ident != self._btleSubThread.ident:\n return # not allow\n self._peripheral.connect(self.car_mac, btle.ADDR_TYPE_RANDOM)\n self._readChar = self._peripheral.getCharacteristics(1, 0xFFFF, \"be15bee06186407e83810bd89c4d8df4\")[0]\n self._writeChar = self._peripheral.getCharacteristics(1, 0xFFFF, \"be15bee16186407e83810bd89c4d8df4\")[0]\n self._delegate.setHandle(self._readChar.getHandle())\n self._peripheral.setDelegate(self._delegate)\n self.turnOnSdkMode()\n self.enableNotify()\n self._connected = True\n self._reconnect = False\n if self._btleSubThread is None:\n self._transferExecution()", "async def request_scan(self) -> None:\n await self.dbus.Device.Wireless.call_request_scan({})", "def async_register_services(hass, config, insteon_modem):\n\n def add_all_link(service):\n \"\"\"Add an INSTEON All-Link between two devices.\"\"\"\n group = service.data.get(SRV_ALL_LINK_GROUP)\n mode = service.data.get(SRV_ALL_LINK_MODE)\n link_mode = 1 if mode.lower() == SRV_CONTROLLER else 0\n insteon_modem.start_all_linking(link_mode, group)\n\n def del_all_link(service):\n \"\"\"Delete an INSTEON All-Link between two devices.\"\"\"\n group = service.data.get(SRV_ALL_LINK_GROUP)\n insteon_modem.start_all_linking(255, group)\n\n def load_aldb(service):\n \"\"\"Load the device All-Link database.\"\"\"\n entity_id = service.data[CONF_ENTITY_ID]\n reload = service.data[SRV_LOAD_DB_RELOAD]\n if entity_id.lower() == ENTITY_MATCH_ALL:\n for entity_id in hass.data[DOMAIN][INSTEON_ENTITIES]:\n _send_load_aldb_signal(entity_id, reload)\n else:\n _send_load_aldb_signal(entity_id, reload)\n\n def _send_load_aldb_signal(entity_id, reload):\n \"\"\"Send the load All-Link database signal to INSTEON entity.\"\"\"\n signal = f\"{entity_id}_{SIGNAL_LOAD_ALDB}\"\n dispatcher_send(hass, signal, reload)\n\n def print_aldb(service):\n \"\"\"Print the All-Link Database for a device.\"\"\"\n # For now this sends logs to the log file.\n # Future direction is to create an INSTEON control panel.\n entity_id = service.data[CONF_ENTITY_ID]\n signal = f\"{entity_id}_{SIGNAL_PRINT_ALDB}\"\n dispatcher_send(hass, signal)\n\n def print_im_aldb(service):\n \"\"\"Print the All-Link Database for a device.\"\"\"\n # For now this sends logs to the log file.\n # Future direction is to create an INSTEON control panel.\n print_aldb_to_log(insteon_modem.aldb)\n\n def x10_all_units_off(service):\n \"\"\"Send the X10 All Units Off command.\"\"\"\n housecode = service.data.get(SRV_HOUSECODE)\n insteon_modem.x10_all_units_off(housecode)\n\n def x10_all_lights_off(service):\n \"\"\"Send the X10 All Lights Off command.\"\"\"\n housecode = service.data.get(SRV_HOUSECODE)\n insteon_modem.x10_all_lights_off(housecode)\n\n def x10_all_lights_on(service):\n \"\"\"Send the X10 All Lights On command.\"\"\"\n housecode = service.data.get(SRV_HOUSECODE)\n insteon_modem.x10_all_lights_on(housecode)\n\n def scene_on(service):\n \"\"\"Trigger an INSTEON scene ON.\"\"\"\n group = service.data.get(SRV_ALL_LINK_GROUP)\n insteon_modem.trigger_group_on(group)\n\n def scene_off(service):\n \"\"\"Trigger an INSTEON scene ON.\"\"\"\n group = service.data.get(SRV_ALL_LINK_GROUP)\n insteon_modem.trigger_group_off(group)\n\n hass.services.async_register(\n DOMAIN, SRV_ADD_ALL_LINK, add_all_link, schema=ADD_ALL_LINK_SCHEMA\n )\n hass.services.async_register(\n DOMAIN, SRV_DEL_ALL_LINK, del_all_link, schema=DEL_ALL_LINK_SCHEMA\n )\n hass.services.async_register(\n DOMAIN, SRV_LOAD_ALDB, load_aldb, schema=LOAD_ALDB_SCHEMA\n )\n hass.services.async_register(\n DOMAIN, SRV_PRINT_ALDB, print_aldb, schema=PRINT_ALDB_SCHEMA\n )\n hass.services.async_register(DOMAIN, SRV_PRINT_IM_ALDB, print_im_aldb, schema=None)\n hass.services.async_register(\n DOMAIN, SRV_X10_ALL_UNITS_OFF, x10_all_units_off, schema=X10_HOUSECODE_SCHEMA\n )\n hass.services.async_register(\n DOMAIN, SRV_X10_ALL_LIGHTS_OFF, x10_all_lights_off, schema=X10_HOUSECODE_SCHEMA\n )\n hass.services.async_register(\n DOMAIN, SRV_X10_ALL_LIGHTS_ON, x10_all_lights_on, schema=X10_HOUSECODE_SCHEMA\n )\n hass.services.async_register(\n DOMAIN, SRV_SCENE_ON, scene_on, schema=TRIGGER_SCENE_SCHEMA\n )\n hass.services.async_register(\n DOMAIN, SRV_SCENE_OFF, scene_off, schema=TRIGGER_SCENE_SCHEMA\n )\n _LOGGER.debug(\"Insteon Services registered\")", "def __init__(self):\n super(UpnpEmbeddedDevice, self).__init__()\n return", "def initService(self):", "def _find_adapter(self):\n required_interfaces = [GATT_MANAGER_IFACE, LE_ADVERTISING_MANAGER_IFACE]\n object_manager = dbus.Interface(self.bus.get_object(BLUEZ_SERVICE_NAME, '/'), DBUS_OM_IFACE)\n objects = object_manager.GetManagedObjects()\n\n for object_path, properties in objects.items():\n missing_interfaces = [i for i in required_interfaces if i not in properties.keys()]\n if missing_interfaces:\n continue\n return object_path.rsplit('/', 1)[1]\n\n return None", "def __init__(self, uid):\n Device.__init__(self, uid)\n\n self.expected_name = 'Analog Out Bricklet'\n\n self.binding_version = [1, 0, 0]", "def start(self):\n\n if self.__bus_controller == None:\n print(\"can't start please pass me the needed dictionaries\")\n\n self.__global_messages = {\"kick reason\": \"\", \"free text\": \"\"}\n self.__lock_data = False\n self.__socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.stop = False\n __main_loop = threading.Thread(target=self.__main_loop, args=(), name=\"bus updater\")\n __main_loop.start()", "def _get_dbus_paths(self):\n self.blinkt_srv_path = tools.uuid_dbus_path(\n constants.GATT_SERVICE_IFACE,\n BLINKT_SRV)[0]\n self.blinkt_chrc_path = tools.uuid_dbus_path(\n constants.GATT_CHRC_IFACE,\n BLINKT_CHRC)[0]\n self.blinkt_obj = tools.get_dbus_obj(constants.BLUEZ_SERVICE_NAME,\n self.blinkt_chrc_path)\n\n self.blinkt_iface = tools.get_dbus_iface(constants.GATT_CHRC_IFACE,\n self.blinkt_obj)", "def test_gatt_connect_autoconnect(self):\n gatt_server_cb = self.per_ad.droid.gattServerCreateGattServerCallback()\n gatt_server = self.per_ad.droid.gattServerOpenGattServer(\n gatt_server_cb)\n self.gatt_server_list.append(gatt_server)\n autoconnect = False\n mac_address, adv_callback = (\n get_mac_address_of_generic_advertisement(self.cen_ad, self.per_ad))\n try:\n bluetooth_gatt, gatt_callback = setup_gatt_connection(\n self.cen_ad, mac_address, autoconnect)\n self.bluetooth_gatt_list.append(bluetooth_gatt)\n except GattTestUtilsError as err:\n self.log.error(err)\n return False\n try:\n disconnect_gatt_connection(self.cen_ad, bluetooth_gatt,\n gatt_callback)\n close_gatt_client(self.cen_ad, bluetooth_gatt)\n if bluetooth_gatt in self.bluetooth_gatt_list:\n self.bluetooth_gatt_list.remove(bluetooth_gatt)\n except GattTestUtilsError as err:\n self.log.error(err)\n return False\n autoconnect = True\n bluetooth_gatt = self.cen_ad.droid.gattClientConnectGatt(\n gatt_callback, mac_address, autoconnect, gatt_transport['auto'],\n False, gatt_phy_mask['1m_mask'])\n self.bluetooth_gatt_list.append(bluetooth_gatt)\n expected_event = gatt_cb_strings['gatt_conn_change'].format(\n gatt_callback)\n try:\n event = self.cen_ad.ed.pop_event(expected_event,\n self.default_timeout)\n except Empty:\n self.log.error(gatt_cb_err['gatt_conn_change_err'].format(\n expected_event))\n test_result = False\n return self._orchestrate_gatt_disconnection(bluetooth_gatt,\n gatt_callback)", "def startManager(self):\n\t\tlogging.info(\"----->>>The DeviceDataManager will be started\")\n\t\tself.sysPerfManager.startManager()\n\t\tself.sensorAdapterManager.startManager()\n\t\tif self.enableRedis:\n\t\t\tself.redisClient.connectClient()\n\t\t\n\t\tif self.enableMqtt:\n\t\t\tself.mqttClient.connectClient()", "def getDevices():\n \n scannedDevices = list()\n \n proc = subprocess.Popen('bluetoothctl scan on', shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=8192, universal_newlines=True)\n \n time.sleep(10)\n \n proc.stdin.write('scan off')\n \n try:\n stdout, stderr = proc.communicate()\n except subprocess.TimeoutExpired:\n proc.kill()\n stdout, stderr = proc.communicate()\n\n ansiEscapePattern = re.compile(r'\\x1B[@-_][0-?]*[ -/]*[@-~]')\n stdout = ansiEscapePattern.sub('', stdout)\n \n #deviceNamePattern = re.compile('^\\[NEW\\] Device [A-F0-9]{2}:[A-F0-9]{2}:[A-F0-9]{2}:[A-F0-9]{2}:[A-F0-9]{2}:[A-F0-9]{2} ')\n \n for line in stdout.split('\\n'):\n if '[NEW] Device' in line:\n device = list()\n device.append(line[13:31])\n device.append(line[31:])\n scannedDevices.append(device)\n \n return scannedDevices", "def __init__(self, usb_bus, usb_dev):\n self.usb_bus = usb_bus\n self.usb_dev = usb_dev\n self.usb_target = 'usb:{bus}:{dev}'.format(bus = self.usb_bus,\n dev = self.usb_dev)\n self.hw_connected = False\n\n self.rdwr_options = {\n 'targets': ['106A'], # type2tag, nfcA\n 'on-startup': on_startup,\n 'on-connect': on_connect,\n }\n\n self.last_nfcid = \"\";", "def __init__(self, device):\n self.logger = logging.getLogger('ADB')\n self.device = device\n self.cmd_prefix = ['adb']\n\n r = subprocess.check_output(['adb', 'devices']).split('\\n')\n if not r[0].startswith(\"List of devices attached\"):\n raise ADBException()\n\n online_devices = []\n for line in r[1:]:\n if not line:\n continue\n segments = line.split(\"\\t\")\n if len(segments) != 2:\n continue\n if segments[1].strip() == \"device\":\n online_devices.append(segments[0])\n\n if not online_devices:\n raise ADBException()\n\n if device.serial:\n if device.serial not in online_devices:\n raise ADBException()\n else:\n device.serial = online_devices[0]\n\n self.cmd_prefix.append(\"-s\")\n self.cmd_prefix.append(device.serial)\n\n if self.check_connectivity():\n self.logger.info(\"adb successfully initiated, the device is %s\" % device.serial)\n else:\n raise ADBException()", "def load_devices():", "def setup_usb(self):\n global DEVICE\n global epBulkWriter\n global epBulkReader\n global VID\n global PID\n\n DEVICE = usb.core.find(idVendor=0x2AB9,idProduct=0xFFFF)\n if DEVICE is None:#If not a LVPM, look for an HVPM.\n DEVICE = usb.core.find(idVendor=0x04d8,idProduct=0x000b)\n VID = '0x4d8'\n PID = '0xb'\n if \"Linux\" == platform.system():\n try:\n DEVICE.detach_kernel_driver(0)\n except:\n pass # already unregistered\n DEVICE.set_configuration()\n\n cfg = DEVICE.get_active_configuration()\n intf = cfg[(0,0)]\n\n epBulkWriter = usb.util.find_descriptor(\n intf,\n custom_match = \\\n lambda e: \\\n usb.util.endpoint_direction(e.bEndpointAddress) == \\\n usb.util.ENDPOINT_OUT)\n epBulkReader = usb.util.find_descriptor(\n intf,\n custom_match = \\\n lambda e: \\\n usb.util.endpoint_direction(e.bEndpointAddress) == \\\n usb.util.ENDPOINT_IN)", "def register_device():\n payload = request.get_json()\n return _register_device(payload)", "def start_advertising(self):\n self._periph.start_advertising(self._advertisement.advertising_data_bytes,\n scan_response=self._advertisement.scan_response_bytes)", "def create_ble(device_name=None, serial_number=None,\n scan_timeout=None, loop=None):\n impl = JadeInterface.create_ble(device_name, serial_number,\n scan_timeout, loop)\n return JadeAPI(impl)", "def attach_message_bus(self):\n print(\"Connecting to Mycroft message bus\")\n self.client = MessageBusClient()\n print(\"Calling client.run_in_thread()\")\n try:\n self.client.run_in_thread()\n except Exception as e:\n print(\"ERROR: run_in_thread() failed - is Mycroft running?\")\n sys.exit(1)", "def initMana():\n run(\"chariot-me -i\")", "def scan_chip_ble_devices(devCtrl):\n devices = []\n bleMgr = BleManager(devCtrl)\n bleMgr.scan(\"-t 10\")\n\n for device in bleMgr.peripheral_list:\n devIdInfo = bleMgr.get_peripheral_devIdInfo(device)\n if devIdInfo:\n devInfo = devIdInfo.__dict__\n devInfo[\"name\"] = device.Name\n devices.append(devInfo)\n\n return devices", "def connect(self):\n\n # Send some bogus UART characters to force a sleeping device to wake\n self._writeBytes(bytes([START1, START1, START1, START1]))\n time.sleep(0.1) # wait 100ms to give device time to start running\n\n self._rxThread.start()\n if not self.noProto: # Wait for the db download if using the protocol\n self._waitConnected()", "def async_register_scan_request_callback(\n hass: HomeAssistant, callback: CALLBACK_TYPE\n) -> CALLBACK_TYPE:\n discovery: USBDiscovery = hass.data[DOMAIN]\n return discovery.async_register_scan_request_callback(callback)", "def createWIFIAccessPoint():\n ifname = config.get(\"interface\", \"wifi\")\n ipaddress = config.get(\"hotspot\", \"ip\")\n prefix = int(config.get(\"hotspot\", \"prefix\"))\n ssid = config.get(\"hotspot\", \"ssid\")\n password = config.get(\"hotspot\", \"password\")\n ################################\n s_wifi = dbus.Dictionary(\n {\n \"ssid\": dbus.ByteArray(ssid.encode(\"utf-8\")),\n \"mode\": \"ap\",\n })\n s_wsec = dbus.Dictionary(\n {\n \"key-mgmt\": \"wpa-psk\",\n \"psk\": password\n })\n s_con = dbus.Dictionary(\n {\"type\": \"802-11-wireless\",\n \"interface-name\":ifname ,\n \"uuid\": str(uuid.uuid4()),\n \"id\": ssid,\n \"autoconnect\":dbus.Boolean(True)\n })\n addr1 = dbus.Dictionary({\"address\": ipaddress, \"prefix\": dbus.UInt32(prefix)})\n dns = []\n s_ip4 = dbus.Dictionary(\n {\n \"address-data\": dbus.Array([addr1], signature=dbus.Signature(\"a{sv}\")),\n \"dns\": dbus.Array(dns, signature=dbus.Signature('u'), variant_level=1),\n \"method\": \"manual\",\n })\n s_ip6 = dbus.Dictionary({\"method\": \"ignore\"})\n con = dbus.Dictionary(\n {\n \"802-11-wireless\": s_wifi,\n \"802-11-wireless-security\":s_wsec,\n \"connection\": s_con,\n \"ipv4\": s_ip4,\n \"ipv6\": s_ip6\n })\n try:\n logging.info(\"Creating hotspot connection: {} - {}\".format(s_con[\"id\"], s_con[\"uuid\"]))\n ##########\n bus = dbus.SystemBus()\n proxy = bus.get_object(\n \"org.freedesktop.NetworkManager\", \"/org/freedesktop/NetworkManager/Settings\"\n )\n settings = dbus.Interface(proxy, \"org.freedesktop.NetworkManager.Settings\")\n connection = settings.AddConnection(con)\n logging.info(f\"Created access point connection {connection}\")\n except Exception as e:\n logging.error(\"Hotspot connection creation failed\")\n logging.error(e)", "def init_device(platform=\"Android\", uuid=None, **kwargs):\n cls = import_device_cls(platform)\n dev = cls(uuid, **kwargs)\n # Add device instance in G and set as current device.\n G.add_device(dev)\n return dev", "def register(self, dbus_path, uuid, codec, capabilities):\n self._media_proxy.proxy.RegisterEndpoint(\n dbus_path,\n {\n \"UUID\": uuid,\n \"Codec\": Byte(codec),\n \"Capabilities\": Array(capabilities, signature=\"y\")\n })", "async def async_setup(self):\n self._unsub_stop = self.hass.bus.async_listen(\n EVENT_HOMEASSISTANT_STOP, self._handle_ha_stop\n )\n dev_reg = await device_registry.async_get_registry(self.hass)\n model_type = self.device.settings[\"device\"][\"type\"]\n dev_reg.async_get_or_create(\n config_entry_id=self.entry.entry_id,\n name=self.name,\n connections={(device_registry.CONNECTION_NETWORK_MAC, self.mac)},\n # This is duplicate but otherwise via_device can't work\n identifiers={(DOMAIN, self.mac)},\n manufacturer=\"Shelly\",\n model=aioshelly.MODEL_NAMES.get(model_type, model_type),\n sw_version=self.device.settings[\"fw\"],\n )", "def run(self):\n cached_char = Characteristic(BLE_TEMP_UUID, BLE_TEMP_HANDLE)\n adapter = GATTToolBackend()\n while True:\n try:\n _LOGGER.debug(\"Connecting to %s\", self.name)\n # We need concurrent connect, so lets not reset the device\n adapter.start(reset_on_start=False)\n # Seems only one connection can be initiated at a time\n with CONNECT_LOCK:\n device = adapter.connect(\n self.mac, CONNECT_TIMEOUT, BLEAddressType.random\n )\n if SKIP_HANDLE_LOOKUP:\n # HACK: inject handle mapping collected offline\n # pylint: disable-next=protected-access\n device._characteristics[UUID(BLE_TEMP_UUID)] = cached_char\n # Magic: writing this makes device happy\n device.char_write_handle(0x1B, bytearray([255]), False)\n device.subscribe(BLE_TEMP_UUID, self._update)\n _LOGGER.info(\"Subscribed to %s\", self.name)\n while self.keep_going:\n # protect against stale connections, just read temperature\n device.char_read(BLE_TEMP_UUID, timeout=CONNECT_TIMEOUT)\n self.event.wait(60)\n break\n except (BLEError, NotConnectedError, NotificationTimeout) as ex:\n _LOGGER.error(\"Exception: %s \", str(ex))\n finally:\n adapter.stop()", "def __init__(self, hdw=['Soundcard'], devicename='dev1'):\n self.debugFlag = False\n self.task = None # NI Task\n self.required_hardware = hdw # Require specific hardware \n self.hardware = [] # list of hardware actually found on this system\n self.find_hardware(device_info={'devicename': devicename}) # population the self.hardware list", "def load_device():", "def __init__(self,app):\n self.app = app\n builder = gtk.Builder()\n glade_file = os.path.join(APP_DIR,'ui','device-dialog.glade')\n builder.add_from_file(glade_file)\n\n # Set defaults\n glade = etree.parse(glade_file)\n self.set_adjustment_values(builder,glade)\n\n # Populate the combo boxes\n # TODO: FIX FOR WINDOWS\n #if os.name != 'nt':\n # con = cups.Connection()\n # printers = con.getPrinters()\n # combo = builder.get_object(\"printer\")\n # self.set_model_from_list(combo,printers)\n # combo.set_active(len(printers)-1)\n\n # Scan for serial ports, should work on both linux and windows\n ports = Device.port_scan()\n combo = builder.get_object(\"port\")\n self.set_model_from_list(combo,ports)\n combo.set_active(len(ports)-1)\n\n combo = builder.get_object(\"baudrate\")\n self.set_model_from_list(combo,[2400,4800,9600,19200,38400,57600,115200])\n combo.set_active(2)\n\n combo = builder.get_object(\"parity\")\n self.set_model_from_list(combo,['None','Odd','Even','Mark','Space'])\n combo.set_active(0)\n\n combo = builder.get_object(\"stopbits\")\n self.set_model_from_list(combo,[1,1.5,2])\n combo.set_active(0)\n\n combo = builder.get_object(\"bytesize\")\n self.set_model_from_list(combo,[8,7,6,5])\n combo.set_active(0)\n\n # Connect the signals\n builder.connect_signals(self)\n\n self.widgets = self.keep_widgets(builder,[\n 'main',\n ])", "def setupDeviceGui(self):\n\n dropDowns = list(self.drivers[driver]['uiDriver'] for driver in self.drivers)\n for dropDown in dropDowns:\n dropDown.clear()\n dropDown.setView(PyQt5.QtWidgets.QListView())\n dropDown.addItem('No device selected')\n\n # adding special items\n self.drivers['dome']['uiDriver'].addItem('INDI')\n self.drivers['imaging']['uiDriver'].addItem('INDI')\n self.drivers['sensorWeather']['uiDriver'].addItem('INDI')\n self.drivers['directWeather']['uiDriver'].addItem('Built-In')\n self.drivers['onlineWeather']['uiDriver'].addItem('Built-In')\n self.drivers['cover']['uiDriver'].addItem('INDI')\n self.drivers['skymeter']['uiDriver'].addItem('INDI')\n self.drivers['telescope']['uiDriver'].addItem('INDI')\n self.drivers['power']['uiDriver'].addItem('INDI')\n self.drivers['relay']['uiDriver'].addItem('Built-In')\n for app in self.app.astrometry.solverAvailable:\n self.drivers['astrometry']['uiDriver'].addItem(app)\n self.drivers['remote']['uiDriver'].addItem('Built-In')\n self.drivers['measure']['uiDriver'].addItem('Built-In')\n\n return True", "def __init__(self,usb_ser_port_num,sm_gpib_addr,dmm_gpib_addr):\n self.initialize_elexol(usb_ser_port_num)\n\n #find the number of shift registers installed in test system\n print \"Counting number of shift registers connected to fixture...\"\n num_shift_reg_found = self.count_shift_reg()\n\n if num_shift_reg_found < 1:\n print \"No shift registers present... Exiting program.\n exit()\n else:\n print \"found %d shift registers\"%self.num_registers\n\n #initialize all of the relays to the normally closed state\n print \"\\nInitializing %d shift registers\"%self.num_registers\n self.initialize_relays()\n print \"initialization done.\"\n\n #create instances of the sourcemeter and dmm for communication\n self.dmm = Instruments.DMM_34401A(\"GPIB::22\",\"meter\")\n self.sm = Instruments.sourcemeter_2400(\"GPIB::4\",\"sourcemeter\")", "def __init__(self, mainwindow, bus):\n # register this object on the given bus\n dbus.service.Object.__init__(self, bus,\n # the dbus object path\n '/lunar/PyQt4DBusTest/MainWindow')\n self.mainwindow = mainwindow", "def __init__(self):\n self.data0 = [] # This will hold data from ADC0\n self.data1 = [] # This will hold data from ADC1\n self.dev = _configure_device()", "def create_device(self, app_name='FooBar', device_type='Raspberry Pi 2'):\n\n app = self.resin.models.application.create(app_name, device_type)\n return app, self.resin.models.device.register(app['id'], self.resin.models.device.generate_uuid())", "def register_feg_gw(c):\n subprocess.check_call(\n 'fab register-feg-gw', shell=True, cwd=FEG_FAB_PATH,\n )", "def __init__(self, bus=0, device=0):\n ...", "def setUp(self):\n self.dbus_mock = MagicMock()\n self.dbus_exception_mock = MagicMock()\n self.dbus_service_mock = MagicMock()\n self.mainloop_mock = MagicMock()\n self.gobject_mock = MagicMock()\n\n modules = {\n 'dbus': self.dbus_mock,\n 'dbus.exceptions': self.dbus_exception_mock,\n 'dbus.service': self.dbus_service_mock,\n 'dbus.mainloop.glib': self.mainloop_mock,\n 'gi.repository': self.gobject_mock,\n }\n self.dbus_mock.Interface.return_value.GetManagedObjects.return_value = tests.obj_data.full_ubits\n self.dbus_mock.Interface.return_value.Get = mock_get\n self.dbus_mock.Interface.return_value.Set = mock_set\n self.dbus_mock.return_value\n self.dbus_mock.SystemBus = MagicMock()\n self.module_patcher = patch.dict('sys.modules', modules)\n self.module_patcher.start()\n from bluezero import advertisement\n from bluezero import dbus_tools\n self.module_under_test = advertisement\n self.module_tools = dbus_tools", "def test_device_registration(self):\n sensor = self._get_dummy_sensor()\n responses = []\n sensor.set_response_callback(func=lambda response: responses.append(response))\n sensor.register()\n time.sleep(wait_seconds)\n for response in responses:\n print(json.loads(response.content.decode()))\n assert len(responses) > 0\n assert json.loads(responses[0].content.decode())['module_name'] == 'test_get_sensor'\n sensor.stopped.set()", "async def connect(self) -> None:\n # No protocols to setup + no protocols previously set up => no service\n if self._protocols_to_setup.empty() and not self._protocol_handlers:\n raise exceptions.NoServiceError(\"no service to connect to\")\n\n # Protocols set up already => we have already connected\n if self._protocol_handlers:\n raise exceptions.InvalidStateError(\"already connected\")\n\n devinfo: Dict[str, Any] = {}\n\n # Set up protocols, ignoring duplicates\n while not self._protocols_to_setup.empty():\n setup_data = self._protocols_to_setup.get()\n\n if setup_data.protocol in self._protocol_handlers:\n _LOGGER.debug(\n \"Protocol %s already set up, ignoring\", setup_data.protocol\n )\n continue\n\n _LOGGER.debug(\"Connecting to protocol: %s\", setup_data.protocol)\n if await setup_data.connect():\n self._protocol_handlers[setup_data.protocol] = setup_data\n\n for iface, instance in setup_data.interfaces.items():\n self._interfaces[iface].register(instance, setup_data.protocol)\n\n self._features.add_mapping(setup_data.protocol, setup_data.features)\n dict_merge(devinfo, setup_data.device_info())\n\n self._device_info = interface.DeviceInfo(devinfo)\n\n # Forward power events in case an interface exists for it\n try:\n power = cast(\n interface.Power, self._interfaces[interface.Power].main_instance\n )\n power.listener = self._interfaces[interface.Power]\n except exceptions.NotSupportedError:\n _LOGGER.debug(\"Power management not supported by any protocols\")", "def init(self):\n\n @asyncio.coroutine\n def interface_prepare_config(dts, acg, xact, xact_info, ksp, msg, scratch):\n \"\"\"Prepare for application configuration.\n \"\"\"\n self.log.debug(\"Prepare Callback\")\n # Store the interfaces\n self.interfaces[msg.name] = msg\n acg.handle.prepare_complete_ok(xact_info.handle)\n\n @asyncio.coroutine\n def routes_prepare_config(dts, acg, xact, xact_info, ksp, msg, scratch):\n \"\"\"Prepare for application configuration.\n \"\"\"\n self.log.debug(\"Prepare Callback\")\n acg.handle.prepare_complete_ok(xact_info.handle)\n\n @asyncio.coroutine\n def dns_prepare_config(dts, acg, xact, xact_info, ksp, msg, scratch):\n \"\"\"Prepare for application configuration.\n \"\"\"\n self.log.debug(\"Prepare Callback\")\n acg.handle.prepare_complete_ok(xact_info.handle)\n\n def apply_config(dts, acg, xact, action, scratch):\n \"\"\"On apply callback for AppConf registration\"\"\"\n self.log.debug(\"Apply Config\")\n return rwtypes.RwStatus.SUCCESS\n\n @asyncio.coroutine\n def interface_status(xact_info, action, ks_path, msg):\n xpath = \"D,/interfaces:interfaces/interfaces:interface[interfaces:name='eth0']\"\n interf = interfaces.Interface()\n interf.name = \"eth0\"\n interf.status.link = \"up\"\n interf.status.speed = \"hundred\"\n interf.status.duplex = \"full\"\n interf.status.mtu = 1500\n interf.status.receive.bytes = 1234567\n interf.status.receive.packets = 1234\n interf.status.receive.errors = 0\n interf.status.receive.dropped = 100\n xact_info.respond_xpath(rwdts.XactRspCode.ACK, xpath, interf)\n\n @asyncio.coroutine\n def clear_interface(xact_info, action, ks_path, msg):\n xpath = \"O,/interfaces:clear-interface\"\n op=interfaces.ClearInterfaceOp()\n op.status=\"Success\"\n xact_info.respond_xpath(rwdts.XactRspCode.ACK, xpath, op)\n\n #Operational data\n yield from self._dts.register(\n flags=rwdts.Flag.PUBLISHER,\n xpath=\"D,/interfaces:interfaces/interfaces:interface\",\n handler=rift.tasklets.DTS.RegistrationHandler(\n on_prepare=interface_status))\n\n #RPC\n yield from self._dts.register(\n xpath=\"I,/interfaces:clear-interface\",\n flags=rwdts.Flag.PUBLISHER,\n handler=rift.tasklets.DTS.RegistrationHandler(\n on_prepare=clear_interface))\n\n with self._dts.appconf_group_create(\n handler=rift.tasklets.AppConfGroup.Handler(\n on_apply=apply_config)) as acg:\n acg.register(\n xpath=\"C,/interfaces:interfaces/interfaces:interface\",\n flags=rwdts.Flag.SUBSCRIBER|rwdts.Flag.CACHE|0,\n on_prepare=interface_prepare_config)\n\n acg.register(\n xpath=\"C,/routes:routes\",\n flags=rwdts.Flag.SUBSCRIBER|rwdts.Flag.CACHE|0,\n on_prepare=routes_prepare_config)\n\n acg.register(\n xpath=\"C,/dns:dns\",\n flags=rwdts.Flag.SUBSCRIBER|rwdts.Flag.CACHE|0,\n on_prepare=dns_prepare_config)", "def test_add_device(self):\n\n pass", "def start_scan(self):\n try:\n out = self.get_output(\"scan on\")\n except BluetoothctlError, e:\n print(e)\n return None", "async def async_setup(hass: HomeAssistant, config):\n if DOMAIN not in config:\n return True\n\n conf_adapters = config[DOMAIN].get(CONF_ADAPTERS)\n if not conf_adapters:\n return False\n\n hass.data.setdefault(DOMAIN, {}).update({CONF_INDOOR_UNITS: {}})\n\n for conf_adapter in conf_adapters:\n conf_adapter_name = conf_adapter[CONF_ADAPTER_NAME]\n conf_adapter_host = conf_adapter[CONF_ADAPTER_HOST]\n conf_adapter_port = conf_adapter[CONF_ADAPTER_PORT]\n conf_adapter_slave = conf_adapter[CONF_ADAPTER_SLAVE]\n adapter : DaikinAPI = await hass.async_add_executor_job(create_adapter, conf_adapter_host, conf_adapter_port, conf_adapter_slave)\n\n for indoor_unit in adapter.indoor_units.values():\n indoor_unit_global_name = \"daikin_dta116a621_\" + conf_adapter_name + \"_\" + str(indoor_unit.indoor_unit_id).replace(\"-\",\"_\")\n hass.data[DOMAIN][CONF_INDOOR_UNITS][indoor_unit_global_name] = indoor_unit\n hass.helpers.discovery.load_platform('climate', DOMAIN, {}, config)\n #discovery.async_load_platform(hass, DOMAIN, 'climate', {}, config)\n return True", "async def async_setup(self):\n scanner = YeelightScanner.async_get(self._hass)\n self.capabilities = await scanner.async_get_capabilities(self._host) or {}\n if self.capabilities:\n self._bulb_device.set_capabilities(self.capabilities)\n if name := self._config.get(CONF_NAME):\n # Override default name when name is set in config\n self._name = name\n elif self.capabilities:\n # Generate name from model and id when capabilities is available\n self._name = _async_unique_name(self.capabilities)\n else:\n self._name = self._host # Default name is host", "def setup(hass, config):\n\n\n def handle_service(call):\n \n _get_access_token()\n \n msg_title = call.data.get(ATTR_TITLE, '')\n msg_text = call.data.get(ATTR_MESSAGE,'')\n msg_type = call.data.get(ATTR_TYPE,'unkown')\n\n msg = _build_common_message(msg_title, msg_text,msg_type)\n\n json.dumps(msg, indent=2)\n res = _send_fcm_message(msg)\n if(res):\n return True\n else:\n return False\n\n hass.services.register(DOMAIN, 'android', handle_service)\n\n # Return boolean to indicate that initialization was successfully.\n return True", "def setup(hass, config):\n\n\n def handle_service(call):\n \n _get_access_token()\n \n msg_title = call.data.get(ATTR_TITLE, '')\n msg_text = call.data.get(ATTR_MESSAGE,'')\n msg_type = call.data.get(ATTR_TYPE,'unkown')\n\n msg = _build_common_message(msg_title, msg_text,msg_type)\n\n json.dumps(msg, indent=2)\n res = _send_fcm_message(msg)\n if(res):\n return True\n else:\n return False\n\n hass.services.register(DOMAIN, 'android', handle_service)\n\n # Return boolean to indicate that initialization was successfully.\n return True", "def cmd_register(self, app_path=None):\n if app_path:\n app_path = os.path.abspath(app_path)\n else:\n app_path = os.getcwd()\n app_name = os.path.basename(app_path)\n rc = self.socket_command('register %s %s' % (app_name, app_path))\n return rc", "def acquire(self):\n clf = nfc.ContactlessFrontend()\n\n if clf.open('usb:{bus}:{dev}'.format(bus = self.usb_bus,\n dev = self.usb_dev)):\n print(\"dev {0} acquired successfully\".format(self.usb_target))\n self.hw_connected = True\n return True\n\n print(\"dev {0} not found\".format(self.usb_target))\n return False" ]
[ "0.7221227", "0.61894155", "0.6014742", "0.59280574", "0.5923623", "0.57182866", "0.57111996", "0.56961524", "0.56900334", "0.56577533", "0.5630222", "0.5583196", "0.5564716", "0.5501188", "0.54448515", "0.54448307", "0.5401305", "0.5356949", "0.5353707", "0.5304931", "0.5298031", "0.5284333", "0.5272044", "0.526911", "0.52525955", "0.525026", "0.52470005", "0.52267975", "0.5224401", "0.5193984", "0.5183212", "0.5182689", "0.51660913", "0.5158336", "0.5157952", "0.5156294", "0.5155603", "0.5147579", "0.5137761", "0.5128901", "0.5125507", "0.51204586", "0.5110374", "0.50983286", "0.5095968", "0.509029", "0.50856113", "0.5063515", "0.5062134", "0.50607216", "0.50596374", "0.50475883", "0.5042623", "0.5036278", "0.50360453", "0.50349", "0.50060874", "0.50048083", "0.49905834", "0.49903247", "0.49794626", "0.49776638", "0.4977245", "0.49722487", "0.49678695", "0.49626195", "0.4959246", "0.49583787", "0.49569735", "0.49529666", "0.4948931", "0.49444237", "0.49431604", "0.49417982", "0.49373105", "0.4928086", "0.49273032", "0.49222144", "0.4919251", "0.49126613", "0.49081337", "0.49050996", "0.48976928", "0.4885087", "0.4869759", "0.48657683", "0.4865517", "0.48652634", "0.4863577", "0.48632485", "0.48626274", "0.4856213", "0.48549628", "0.48541188", "0.48452127", "0.4844338", "0.48426807", "0.48426807", "0.4837496", "0.4834862" ]
0.77838093
0
Adds service to previously initialize app.
def add_service(self, service): self.app.add_service(service)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def addService(self, service):\n\t\tself.services.append(service)\n\t\treturn self", "def add(self, service: AbstractService):\n self.services.append(service)", "def initService(self):", "def add_app(self):\n \n pass", "def add_service(self, zeroconf, service_type, name):\n self.pending.add(\n asyncio.ensure_future(self._internal_add(zeroconf, service_type, name))\n )", "def add_service(torconfig, service, port=None):\n # picks a random port until it finds one avaible.\n while not service.tcp:\n port = port or new_port()\n try:\n service.tcp = reactor.listenTCP(port, service.factory)\n except error.CannotListenError:\n pass\n\n service.hs = txtorcon.HiddenService(\n torconfig, os.path.join(config.tor_data, service.name),\n ['%d 127.0.0.1:%d' % (service.port, port)])\n apaf.hiddenservices.append(service)", "def set_service(self):\n\n if self.service:\n self.service = self.service(\n json=self.json,\n google_user=self.google_user,\n endpoint=self\n )", "def _installed_apps_add(self):\n config.add_plugin(self.module_path)", "def service(self, service):\n \n self._service = service", "def _add_services(self):\n this_service = {'name': 'swift-proxy'}\n other_services = [\n {'name': 'percona-cluster'},\n {'name': 'keystone'},\n {'name': 'glance'},\n {'name': 'swift-storage'}\n ]\n super(SwiftProxyBasicDeployment, self)._add_services(this_service,\n other_services)", "def start_services(self, app_to_start):\n\n self.service_collection = service.IServiceCollection(app_to_start)\n\n amp_factory = AmpServerFactory(self)\n\n port = settings.SERVER_AMP_PORT\n amp_server = internet.TCPServer(port, amp_factory)\n amp_server.setName('dott%s' % port)\n amp_server.setServiceParent(self.service_collection)", "def register_service(self) -> None:\n strategy = cast(Strategy, self.context.strategy)\n description = strategy.get_register_service_description()\n self._register(description, \"registering agent's service on the SOEF.\")", "def addServices(self):\r\n self.addHendrix()\r\n\r\n if not self.options.get('global_cache') and not self.options.get('nocache'):\r\n self.addLocalCacheService()\r\n\r\n if self.is_secure:\r\n self.addSSLService()\r\n\r\n self.catalogServers(self.hendrix)", "def _add_services(self):\n this_service = {'name': 'keystone'}\n other_services = [\n {'name': 'percona-cluster', 'constraints': {'mem': '3072M'}},\n {'name': 'rabbitmq-server'}, # satisfy wrkload stat\n {'name': 'cinder'},\n ]\n super(KeystoneBasicDeployment, self)._add_services(this_service,\n other_services)", "def addServiceListener(self, listener: ghidra.framework.plugintool.util.ServiceListener) -> None:\n ...", "def _registerService(self, callerId, service, serviceApi, callerApi):\n if service not in self.FilterServices:\n # The type of the service is not included in the XMLRPC call\n self.__docWriter.addService(callerId, service, \"TODO: type\")", "def add_preload_service(acc, service, chars=None, opt_chars=None):\n from pyhap.loader import get_serv_loader, get_char_loader\n service = get_serv_loader().get(service)\n if chars:\n chars = chars if isinstance(chars, list) else [chars]\n for char_name in chars:\n char = get_char_loader().get(char_name)\n service.add_characteristic(char)\n if opt_chars:\n opt_chars = opt_chars if isinstance(opt_chars, list) else [opt_chars]\n for opt_char_name in opt_chars:\n opt_char = get_char_loader().get(opt_char_name)\n service.add_opt_characteristic(opt_char)\n acc.add_service(service)\n return service", "def _add_services(self):\n # Services and relations which are present merely to satisfy\n # required_interfaces and workload status are not inspected.\n # Fix me. Inspect those too.\n this_service = {'name': 'neutron-openvswitch'}\n other_services = [\n {'name': 'nova-compute'},\n {'name': 'nova-cloud-controller'},\n {'name': 'rabbitmq-server'},\n {'name': 'keystone'},\n {'name': 'glance'},\n {'name': 'neutron-api'},\n self.get_percona_service_entry(),\n ]\n if self._get_openstack_release() >= self.bionic_train:\n other_services.append({'name': 'placement'})\n super(NeutronOVSBasicDeployment, self)._add_services(this_service,\n other_services)", "def RegisterService():\n hooks.RegisterHook(SERVICE_NAME, 'file-exists', hook_class=HookForExists)\n hooks.RegisterHook(SERVICE_NAME, 'file-write',\n hook_class=HookForWriteAndTouch)\n hooks.RegisterHook(SERVICE_NAME, 'file-touch',\n hook_class=HookForWriteAndTouch)\n hooks.RegisterHook(SERVICE_NAME, 'file-get', hook_class=HookForGet)\n hooks.RegisterHook(SERVICE_NAME, 'list-files', hook_class=HookForListFiles)\n hooks.RegisterHook(SERVICE_NAME, 'list-dir', hook_class=HookForListDir)", "def _add_services(self):\n this_service = {'name': '{{ metadata.package }}'}\n other_services = [\n {'name': 'mysql',\n 'location': 'cs:percona-cluster',\n 'constraints': {'mem': '3072M'}},\n {'name': 'rabbitmq-server'},\n {'name': 'keystone'},\n {'name': 'manila'}\n ]\n super(ManilaPluginCharmDeployment, self)._add_services(\n this_service, other_services)", "def addService(self, interfaceClass: java.lang.Class, service: object) -> None:\n ...", "def register_service(service, iface, name):", "def _register_services(self, pipeline):\n\n pipeline.register_service(self._aprs_service)", "async def on_terncy_svc_add(event):\n dev_id = event.data[\"dev_id\"]\n _LOGGER.info(\"found terncy service: %s %s\", dev_id, event.data)\n host = event.data[\"ip\"]\n if dev_id == tern.dev_id and not tern.is_connected():\n tern.host = host\n _LOGGER.info(\"start connection to %s %s\", dev_id, tern.host)\n\n hass.async_create_task(setup_terncy_loop())", "def service():\n conf = template('remote/addok.service', **config)\n put(conf, '/etc/systemd/system/addok.service')\n systemctl('enable addok.service')", "def register_service(self, name, command):\n service_name = command['service_name']\n try:\n service_type = self.get_interface_type(command['interface_type'], '.srv')\n self.srv_clients[service_name] = self.AsyncServiceProxy(\n self,\n service_name,\n service_type)\n\n if service_name in self.offline_services:\n self.offline_services.remove(service_name)\n except JoyTeleopException:\n if service_name not in self.offline_services:\n self.offline_services.append(service_name)", "def _init_service(self):\n self.robot_variables.check_variables()\n # setting launch id for report portal service\n self.robot_service.init_service(endpoint=self.robot_variables.endpoint,\n project=self.robot_variables.project,\n uuid=self.robot_variables.uuid)", "def add_service(self, service):\n # type: (LoadBalancerService) -> List[BoundAction]\n return self._client.add_service(self, service=service)", "def _register_service(self, extkey, extcls):\n if extkey not in self._service_registry:\n self._service_registry[extkey] = extcls\n else:\n self._service_registry[extkey] = extcls\n return", "def appMgr( *varg , **kwarg ) :\n import GaudiPython.Bindings\n _g = GaudiPython.Bindings.AppMgr()\n if not 'LoKiSvc' in _g.ExtSvc :\n logger.debug ('appMgr: add LoKiSvc into the list of services')\n _g.ExtSvc += [ 'LoKiSvc']\n return _g", "def enable(self):\n self._installed_apps_add()", "def init_app(self, app):\n try:\n # Assume this is a blueprint and defer initialization\n if app._got_registered_once is True:\n raise ValueError(\"\"\"Blueprint is already registered with an app.\"\"\")\n app.record(self._deferred_blueprint_init)\n except AttributeError:\n self._init_app(app)\n else:\n self.blueprint = app", "def reset_services():\n service_init = ServiceContainerInit()\n service_init.notify_listeners()", "def start_as_service(self):\n from ..program_manager import ProgramManager\n send_action(ProgramManager.NAME, 'start', self.name)", "def service(self):\n pass", "def register_service(self, service, name):\n assert service._remote_service, \"Services should be decorated correctly.\"\n \n prepare_remote_service(service)\n self._services[name] = service", "def add_app(self, app_name):\n self.add_list_setting('applications', 'installed_apps', app_name)", "def on_startup(self) -> None:\n ...", "def add_runtime(tm_env, manifest):\n _transform_services(manifest)\n\n app_manifest.add_linux_system_services(tm_env, manifest)\n app_manifest.add_linux_services(manifest)", "def load_services(service_store):\n service_store.register_service(GetDrugStoreService)\n service_store.register_service(FuelLevelService)\n service_store.register_service(SetFuelLevelService)\n service_store.register_service(GetRobotPosition)\n service_store.register_service(SetRobotPosition)", "def init_app(self, app):\n # Avoid double initialization.\n if self._flask_app is app:\n return None\n if self._flask_app is not None:\n raise RuntimeError(\n \"This api has already been registered on a flask application.\"\n )\n\n self._flask_app = app\n\n # Add the url rule.\n app.add_url_rule(\n rule=self._uri + \"/<path:path>\",\n endpoint=\"jsonapi\",\n view_func=self.handle_request,\n methods=[\"get\", \"post\", \"patch\", \"delete\", \"head\"]\n )\n\n # Register the jsonapi extension on the flask application.\n app.extensions = getattr(app, \"extensions\", dict())\n app.extensions[\"jsonapi\"] = self\n\n # Add the api to the jinja environment\n app.jinja_env.globals[\"jsonapi\"] = current_api\n return None", "def load(self):\n\n if self.getStatus() not in (Application.MISCONFIGURED, Application.STOPPED, Application.CRASHED):\n raise AppErrLoaded()\n \n old=signal.signal(signal.SIGINT, self.sighandler )\n old=signal.signal(signal.SIGTERM, self.sighandler )\n #signal.signal(signal.SIGCHLD, self.onSigChild)\n signal.signal(signal.SIGHUP, self.onSigHUP)\n # todo: it wolud be nice if we restored theses signals when\n # the App is done.\n\n\n try:\n self.switch_user()\n self.switch_group()\n\n # Here's were we'd load up the config file, the core services,\n # any extensions and register this object as a service, fire\n # off the Initilization event to tell the components that\n # registration is complete and finally signal our controlling\n # app (if any) that we've open for business.\n\n compReg = self.componentRegistry\n\n # this add's the ability to read services from a descriptor file\n compReg.addComponent(\"ServiceHandler\", ServiceHandler())\n compReg.addComponent(\"Component\", Component)\n\n\n # these add the ability to read sessions and register them\n # globaly from descriptors.\n \n compReg.addComponent(\"SessionRegistry\", SessionRegistry())\n\n # Bind the core service to each other\n compReg.bind()\n self.eventChannel.registerEvent(\"Initializing\", self, str)\n self.eventChannel.registerEvent(\"Shutdown\", self, str)\n\n\n\n options = {}\n for extension in self.config.extensions:\n options[extension.name] = extension.options\n\n self.configService.addConfigSource(DictConfigSource(options))\n\n compReg.bind()\n\n try:\n # we need a transaction service before continuing, right\n # now the real txnservice is in our corba extension,\n # so.. if the corba bridge wasn't loaded we need to use an\n # in memory one\n\n compReg.lookup(\"TransactionService\")\n except KeyError:\n from Rambler.LocalTXNService import LocalTXNService\n compReg.addComponent(\"TransactionService\", LocalTXNService())\n compReg.bind()\n\n if len(self.componentRegistry.needsBinding):\n raise RuntimeError, \"Server could not start because\\n\" +\\\n self.componentRegistry.analyzeFailures()\n\n self.txn.set_timeout(0)\n self.txn.begin()\n \n self.eventChannel.publishEvent(\"Initializing\", self, self.txn.get_transaction_name())\n self.txn.commit(0)\n self.txn.set_timeout(60)\n \n self.setStatus(Application.STARTED)\n\n\n except AppMisconfigured:\n # We don't want to report this as a crash, or do we?...\n raise\n except Exception:\n \n # We got an error while loading, notify our controller\n # with it then reraise the exception to kill this\n # application\n \n msg = \"\".join(traceback.format_exception(*sys.exc_info()))\n self.setStatus(Application.CRASHED, msg)\n raise", "def services(self, services):\n\n self._services = services", "def services(self, services):\n\n self._services = services", "def service(self, service: IBMExperimentService) -> None:\n self._set_service(service)", "def startService(self):\n self.world.start()", "def init_app(self, app=None):\n app.extensions[\"orders_controller\"] = self", "def init_services(self):\n service_prefix = rospy.get_name() + \"/\"\n\n self._request_components_serv = rospy.Service(service_prefix +\n 'list_components',\n ListComponents,\n self.get_components)\n self._request_fields_serv = rospy.Service(service_prefix +\n 'list_fields',\n ListFields,\n self.get_fields)\n self._request_values_serv = rospy.Service(service_prefix +\n 'request_values',\n RequestValues,\n self.get_values)\n self._unsubscribe_values_serv = rospy.Service(service_prefix +\n 'unsubscribe_values',\n UnsubscribeValues,\n self.unsubscribe_values)", "def offerService(self, serviceName):\n service = Service(serviceName)\n self.serviceTable[serviceName] = service\n return service", "def init_app(self, app):\n self.app = app\n # Follow the Flask guidelines on usage of app.extensions\n if not hasattr(app, 'extensions'):\n app.extensions = {}\n if 'sso' in app.extensions:\n raise RuntimeError(\"Flask application already initialized\")\n app.extensions['sso'] = self\n\n # Set default configuration\n app.config.setdefault('SSO_LOGIN_URL', config.SSO_LOGIN_URL)\n app.config.setdefault('SSO_LOGIN_ENDPOINT', config.SSO_LOGIN_ENDPOINT)\n app.config.setdefault('SSO_ATTRIBUTE_MAP', config.SSO_ATTRIBUTE_MAP)\n\n app.add_url_rule(app.config.get('SSO_LOGIN_URL'),\n app.config.get('SSO_LOGIN_ENDPOINT'),\n self.login)", "def activate(self):\n if not self._env.enable_registration:\n return\n legacy_key = '{}:{}'.format(self._env.flask_host, self._env.flask_port)\n self._key = self._env.get('my_ident', legacy_key, 'microservice')\n LoopingCall(self.ping).start(5, now=False)", "def set_service(service_name, reference):\n Container.services[service_name] = reference", "def init(app):\n from sirepo import feature_config\n from sirepo import simulation_db\n\n if _uri_to_route:\n return\n global _app\n _app = app\n for n in _REQUIRED_MODULES + feature_config.cfg.api_modules:\n register_api_module(importlib.import_module('sirepo.' + n))\n _init_uris(app, simulation_db)", "def startService(self):\n self.addStep(self)\n self.stepper.start()", "def register_service_agent(cm, sc, conf, rpcmgr):\n\n service_type = lb_const.SERVICE_TYPE\n cm.register_service_agent(service_type, rpcmgr)", "def _set_service(self, service: IBMExperimentService, replace: bool = None) -> None:\n if self._service and not replace:\n raise ExperimentDataError(\"An experiment service is already being used.\")\n self._service = service\n for result in self._analysis_results.values():\n result.service = service\n with contextlib.suppress(Exception):\n self.auto_save = self._service.options.get(\"auto_save\", False)\n for data in self.child_data():\n data._set_service(service)", "def add_services(self):\n # first get the names\n names = str(self.client.console_execute('services -c name {0}\\n'.format(self.ip))[b'data'])\n while not 'name' in names:\n sleep(10)\n names = self.client.console_read()\n names = names.split('\\n')\n for row in names:\n if self.ip in row:\n row = strip_whitespaces(row)\n self.services.append({'name': row.split(' ')[1]})\n\n # get the ports by service name\n ports = str(self.client.console_execute('services -c port {0}\\n'.format(self.ip))[b'data'])\n while not 'port' in ports:\n sleep(10)\n ports = self.client.console_read()\n ports = ports.split('\\n')\n for row in ports:\n for service in self.services:\n if service['name'] in row:\n row = strip_whitespaces(row)\n service['port'] = row.split(' ')[1]\n\n # get some information by service name (only useful if a report shall be generated)\n info = str(self.client.console_execute('services -c info {0}\\n'.format(self.ip))[b'data'])\n while not 'info' in info:\n sleep(10)\n info = self.client.console_read()\n info = info.split('\\n')\n for row in info:\n for service in self.services:\n if service['name'] in row:\n row = strip_whitespaces(row)\n service['info'] = row.split(' ')[1]", "def init_app(self, app):\n self.app = app\n log.info(\"Flask application has been initialized in Flask-PAM!\")", "def service_client_initialization(self) -> global___Snippet.ClientInitialization:", "def initialize_service(self,wrapper=None,message=None):\n if wrapper is not None:\n name = wrapper.name\n remap = wrapper.remap\n elif message is not None:\n name = message.get(\"service\")\n remap = message.get(\"_remap\")\n \n self.setup_service_remaps(name,remap)", "def register_service(self, service):\n for message_handler in service.iter_message_handlers():\n self.message_handlers[message_handler.name] = message_handler", "def start( self ):\n\n self.service()", "def register(default_opts, default_opts_help, ServiceClass, serviceName):\n # Set any options needed\n options = opts.parse(default_opts, default_opts_help)\n swarmService = ServiceClass()\n swarmService.password = options[\"sPass\"]\n swarmService.keys(options[\"sPublic\"], options[\"sPrivate\"])\n # Start the server\n if options[\"sStart\"]:\n thread.start_new_thread(swarmService.start, ())\n # Give it time to start the swarm server so we can register\n # This is a problem because theres only one server in this case\n time.sleep(0.1)\n swarmService.getKey(host=options[\"sHost\"], port=options[\"sPort\"])\n swarmService.registerWith(serviceName, password=options[\"sPass\"], \\\n host=options[\"sHost\"], port=options[\"sPort\"])\n return swarmService", "def init_app(self, app):\n # Avoid double initialization\n if self._tornado_app is app:\n return None\n if self._tornado_app is not None:\n raise RuntimeError(\n \"This API has already been registered on a tornado application.\"\n )\n\n self._tornado_app = app\n app.settings[\"jsonapi\"] = self\n\n # Add the handler.\n url_rule = tornado.web.url(\n self.uri + \"/.*\", Handler, dict(jsonapi=self), name=\"jsonapi\"\n )\n app.add_handlers(\".*\", [url_rule])\n return None", "def add(self, application):\n\n if (application.add() is None):\n self.__logger.debug(\"Adding application %s to list\" % application)\n self.__applicationList[application.application_name] = application\n return None\n else:\n return 1", "def service(self):\n self.serviceConnects()\n self.serviceQueries()", "def add_service(self, type_or_name: str | type, value: Any):\n self._add(type_or_name, value)", "def service_app_factory(global_conf, **local_conf):\n conf = global_conf.copy()\n conf.update(local_conf)\n return ServiceApi(conf)", "def init_app(app):\n request_started.connect(update_user_active_at, app)", "def perform_setup(self, services):\n pass", "def app(self, app):\n\n self._app = app", "def setupSERVICES():\n services = Services()\n services.rest = setupREST()\n\n return services", "def start_service(self, service):\n host.service(\"start\", service)\n host.service(\"enable\", service)\n return host.service_running(service)", "def on_start(self):\n App.on_start(self)\n self.root.register()", "async def set_appservice_state(\n self, service: ApplicationService, state: ApplicationServiceState\n ) -> None:\n await self.db_pool.simple_upsert(\n \"application_services_state\", {\"as_id\": service.id}, {\"state\": state.value}\n )", "def _remember_service_name(self, event):\n service_name = event[\"arguments\"][\"service_name\"]\n # We've added logging of the service_handle to the API signature in\n # the Monitor, but for backwards compatibility we'll keep it as\n # follows for now.\n service_handle = \"0x%08x\" % event[\"return_value\"]\n self.services[service_handle] = service_name", "def initialize(self, application):", "def register_service(self, service: str, cb: Callable, **kwargs: Optional[Any]) -> None:\n self._check_service(service)\n d, s = service.split(\"/\")\n self.logger.debug(\"register_service: %s/%s, %s\", d, s, kwargs)\n\n namespace = self._get_namespace(**kwargs)\n\n if \"namespace\" in kwargs:\n del kwargs[\"namespace\"]\n\n kwargs[\"__name\"] = self.name\n\n self.AD.services.register_service(namespace, d, s, cb, __async=\"auto\", **kwargs)", "def init_app(self, app):\r\n\r\n app.config.setdefault('REDIS_URLS', {\r\n 'main': 'redis://localhost:6379/0',\r\n 'admin': 'redis://localhost:6379/1',\r\n })\r\n\r\n app.before_request(self.before_request)\r\n\r\n self.app = app", "def test_add_preload_service(mock_serv, mock_char):\n acc = MockAccessory('Accessory')\n serv = add_preload_service(acc, 'TestService',\n ['TestChar', 'TestChar2'],\n ['TestOptChar', 'TestOptChar2'])\n\n assert serv.display_name == 'TestService'\n assert len(serv.characteristics) == 2\n assert len(serv.opt_characteristics) == 2\n\n acc.services = []\n serv = add_preload_service(acc, 'TestService')\n\n assert not serv.characteristics\n assert not serv.opt_characteristics\n\n acc.services = []\n serv = add_preload_service(acc, 'TestService',\n 'TestChar', 'TestOptChar')\n\n assert len(serv.characteristics) == 1\n assert len(serv.opt_characteristics) == 1\n\n assert serv.characteristics[0].display_name == 'TestChar'\n assert serv.opt_characteristics[0].display_name == 'TestOptChar'", "def add_to_server(self, server):\n \n annotation_pb2_grpc.add_NlpServiceServicer_to_server(self, server)", "def test_add_virtual_service(self):\n pass", "def start_services(torconfig):\n for service in config.custom['services']:\n # load service\n try:\n service_mod = imp.load_module(\n service, *imp.find_module(service, [config.services_dir]))\n except ImportError:\n return log.err('Cannot import service %s' % service)\n except Exception as e:\n traceback.print_tb(sys.exc_info()[2])\n return log.err('Error loading service %s -\\n %s' % (service, e))\n\n service = getattr(service_mod, 'ServiceDescriptor', None)\n if not service:\n log.err('Unable to find class Service in ', repr(service_mod))\n continue\n\n # create hidden service\n add_service(torconfig, service())", "def add_service(project, env_spec_name, service_type, variable_name=None):\n failed = _check_problems(project)\n if failed is not None:\n return failed\n\n known_types = project.plugin_registry.list_service_types()\n found = None\n for known in known_types:\n if known.name == service_type:\n found = known\n break\n\n if found is None:\n return SimpleStatus(success=False,\n description=\"Unable to add service.\",\n errors=[\n \"Unknown service type '%s', we know about: %s\" %\n (service_type, \", \".join(map(lambda s: s.name, known_types)))\n ])\n\n if variable_name is None:\n variable_name = found.default_variable\n\n assert len(known_types) == 1 # when this fails, see change needed in the loop below\n\n requirement_already_exists = False\n existing_requirements = project.find_requirements(project.default_env_spec_name, env_var=variable_name)\n if len(existing_requirements) > 0:\n requirement = existing_requirements[0]\n if isinstance(requirement, ServiceRequirement):\n assert requirement.service_type == service_type\n # when the above assertion fails, add the second known type besides\n # redis in test_project_ops.py::test_add_service_already_exists_with_different_type\n # and then uncomment the below code.\n # if requirement.service_type != service_type:\n # return SimpleStatus(success=False, description=\"Unable to add service.\",\n # errors=[\"Service %s already exists but with type '%s'\" %\n # (variable_name, requirement.service_type)])\n # else:\n requirement_already_exists = True\n else:\n return SimpleStatus(success=False,\n description=\"Unable to add service.\",\n errors=[\"Variable %s is already in use.\" % variable_name])\n\n if not requirement_already_exists:\n project.project_file.set_value(_path_to_service(env_spec_name, variable_name), service_type)\n\n return _commit_requirement_if_it_works(project, variable_name, env_spec_name=env_spec_name)", "def pibooth_startup(cfg, app):", "def register(self):\n if self.registered:\n return\n\n config = current_app.config.get('TERMINAL_CONFIGS', {})\n apps = config.get('apps', [])\n\n for app in apps:\n cls, mod = app.rsplit('.', maxsplit=1)\n imported = import_module(cls)\n instance = getattr(imported, mod)()\n\n if getattr(instance, 'name', None) is None:\n continue\n\n if getattr(instance, 'hidden', False):\n self.hidden[getattr(instance, 'name')] = instance\n else:\n self.apps[getattr(instance, 'name')] = instance\n\n self.__set_apps_aliases(getattr(instance, 'name'), getattr(instance, 'aliases'))\n\n self.registered = True", "def service_code(self, service_code):\n \n self._service_code = service_code", "def __context_init(self):\n self._context.data[\"services\"] = copy.deepcopy(INITIAL_SRVDATA)", "def external_controller_services(self, external_controller_services):\n\n self._external_controller_services = external_controller_services", "def init_app(self, app):\n\n self.app = app\n self.app.apscheduler = self\n\n self._load_config()\n if self.api_enabled:\n self._load_api()", "def _load_services(self) -> None:\n # load default services\n self.service_errors = ServiceManager.load_locals()\n # load custom services\n service_paths = self.config.get(\"custom_services_dir\")\n logger.debug(\"custom service paths: %s\", service_paths)\n if service_paths is not None:\n for service_path in service_paths.split(\",\"):\n service_path = Path(service_path.strip())\n custom_service_errors = ServiceManager.add_services(service_path)\n self.service_errors.extend(custom_service_errors)\n # load default config services\n self.service_manager.load_locals()\n # load custom config services\n custom_dir = self.config.get(\"custom_config_services_dir\")\n if custom_dir is not None:\n custom_dir = Path(custom_dir)\n self.service_manager.load(custom_dir)", "def setup_application(self):\n pass", "def _load_default_connected_app(self):\n if \"connected_app\" not in self.config[\"services\"]:\n self.config[\"services\"][\"connected_app\"] = {}\n self.config[\"services\"][\"connected_app\"][\n DEFAULT_CONNECTED_APP_NAME\n ] = DEFAULT_CONNECTED_APP", "def exposed_services(self, exposed_services):\n\n self._exposed_services = exposed_services", "def enable_service(service_name, start_type='auto'):\n run_program(['sc', 'config', service_name, 'start=', start_type])", "def __init__(self, service_name):\n self.service_name = service_name", "def turn_on_service(service):\n # We could turn on script directly here, but we only want to offer\n # one way to do it. Otherwise no easy way to call invocations.\n for script in component.extract_from_service(service):\n turn_on(hass, script.entity_id)", "def connect(self, service, handler):\n self.partyline.setdefault(service, []).append(handler)", "def get_service(self):", "def __init__(self):\n dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)\n self.bus = dbus.SystemBus()\n self.adapter = self._find_adapter()\n if not self.adapter:\n IFaceNotFoundException('%s interface not found' % GATT_MANAGER_IFACE)\n self.service_manager = dbus.Interface(\n self.bus.get_object(BLUEZ_SERVICE_NAME, self.adapter),\n GATT_MANAGER_IFACE)\n\n self.mainloop = GObject.MainLoop()\n self.ctx = GattContext(self.bus, self.mainloop)\n self.app = Application(self.ctx)\n\n #print('Registering GATT application...')\n self.service_manager.RegisterApplication(self.app.get_path(), {},\n reply_handler=register_app_cb,\n error_handler=register_app_error_cb)" ]
[ "0.70407766", "0.6967143", "0.6634693", "0.6463807", "0.6374438", "0.628673", "0.62649804", "0.6252654", "0.62386537", "0.62090975", "0.61053437", "0.6091435", "0.605257", "0.6042793", "0.60172504", "0.599833", "0.599623", "0.59924555", "0.5940163", "0.58883333", "0.5873045", "0.5859798", "0.5848243", "0.5843242", "0.5801694", "0.5795095", "0.5783658", "0.57812464", "0.57582015", "0.57482976", "0.5737236", "0.5730116", "0.57255745", "0.5723923", "0.5712023", "0.5710847", "0.5702602", "0.5693007", "0.5691783", "0.56779194", "0.565508", "0.5654882", "0.5633166", "0.5633166", "0.56222117", "0.56159115", "0.5559681", "0.55564225", "0.55518746", "0.55428606", "0.5530208", "0.55229795", "0.5521037", "0.55147016", "0.5513711", "0.5513163", "0.55016464", "0.54895675", "0.54888076", "0.5475022", "0.54524744", "0.54233927", "0.5405879", "0.5395743", "0.53920746", "0.5388692", "0.53830993", "0.537165", "0.5363986", "0.53612256", "0.535469", "0.53515005", "0.5325188", "0.5320042", "0.5312856", "0.53084934", "0.5307232", "0.5305411", "0.5304194", "0.5303845", "0.5303345", "0.5302961", "0.5299582", "0.52886605", "0.52883786", "0.52778345", "0.5277583", "0.5270143", "0.52649504", "0.52643263", "0.52633613", "0.5262742", "0.5260207", "0.52303696", "0.522595", "0.5222926", "0.52204025", "0.5208195", "0.51976067", "0.51963574" ]
0.8112667
0
Return today's date (UTC) formatted as YYYYMMDD.
def _today() -> str: return strftime(DATE_FORMAT, gmtime())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_date_today() -> str:\n return datetime.now().strftime(\"%Y-%m-%d\")", "def today():\n today_object = datetime.utcnow()\n today_string = today_object.strftime('%m/%d/%Y')\n return today_string", "def utc_today_str():\n return datetime.datetime.strftime(datetime.datetime.utcnow(), \"%Y-%m-%d\")", "def get_date():\n now = datetime.now()\n date = now.strftime(\"%Y%m%d\")\n return date", "def todaystr():\n today = datetime.datetime.today()\n return f\"{today.year}{today.month:02}{today.day:02}\"", "def get_date():\n dt = datetime.now()\n return dt.strftime(\"%Y-%m-%d\")", "def todayDate(self):\n return time.strftime(\"%m/%d/%Y\", time.localtime())", "def get_current_date():\n return datetime.datetime.today().strftime(constants.DATE_FORMAT)", "def formalDateToday():\n return dt.date.today().strftime(\"%B %d, %Y\")", "def date_now():\n return datetime.today().strftime('%c')", "def get_today(self):\n # using now() to get current time\n current_time = datetime.datetime.now()\n day = str(current_time.day)\n month = str(current_time.month)\n\n day = self.check_and_repair_right_format(day)\n month = self.check_and_repair_right_format(month)\n\n return str(current_time.year) + month + day", "def today(self):\n return(datetime.date.today().isoformat())", "def date() -> str:\n\n return datetime.strftime(datetime.today(), _fmt)", "def getDate():\n current_time = datetime.datetime.now()\n day = current_time.day\n month = current_time.month\n year = current_time.year\n date = \"{dd}-{mm}-{yyyy}\".format(dd=day,mm=month,yyyy=year)\n return date", "def _today() -> datetime.date:\n return datetime.today().date()", "def get_datecode():\n now = datetime.utcnow()\n return now.strftime(\"%Y%m%d\")", "def get_gds_current_date(self, remove_leading_zero='true'):\r\n time_now = datetime.datetime.now().time()\r\n today_2pm = time_now.replace(hour=14, minute=31, second=0, microsecond=0)\r\n if time_now < today_2pm:\r\n gds_date = datetime.datetime.now() - datetime.timedelta(days=int(1))\r\n else:\r\n gds_date = datetime.datetime.now()\r\n\r\n if remove_leading_zero.lower() == 'true':\r\n return str('{dt.day}{dt:%b}'.format(dt=gds_date).upper())\r\n else:\r\n return self._set_gds_date_format(gds_date)", "def get_today_date():\n return date.today()", "def today():\n today = datetime.utcnow() \n return datetime(today.year, today.month, today.day)", "def get_today() -> datetime.date:\n return datetime.date.today()", "def today(cls):\n return date()", "def get_date():\n return datetime.now().strftime(\"%c\")", "def get_current_date(fmt=\"%Y-%m-%d\"):\n return datetime.datetime.now().strftime(fmt)", "def actual_date():\n actual_date = datetime.now()\n return str(actual_date.day) + '-' + str(actual_date.month) + '-' + str(actual_date.year)", "def get_date():\n\n return datetime.datetime.utcnow().isoformat()", "def get_todays_date(self):\r\n \r\n date=str(dt.datetime.today())\r\n raw_date=date.split(\" \")[0]\r\n Day=raw_date.split(\"-\")[-1]\r\n Month=raw_date.split(\"-\")[-2]\r\n Year=raw_date.split(\"-\")[-3]\r\n todays_date=Day+\"-\"+Month+\"-\"+Year\r\n return todays_date", "def createTimeStamp_Date():\r\n\r\n return str(datetime.now().strftime(\"%Y%m%d\"))", "def get_date_str(bias=0):\n today = datetime.datetime.today() # 獲得今天的日期\n date = (today + datetime.timedelta(days=bias)).strftime(\"%m/%d\") # 格式化日期\n return ' ' + date[1:] if date[0] == '0' else date # 把0換成空白", "def least_current_date():\n # This is not the right way to do it, timezones can change\n # at the time of writing, Baker Island observes UTC-12\n return datetime.now(timezone(timedelta(hours=-12))).strftime(\"%Y-%m-%d\")", "def current_day():\n now = pytz.timezone('America/Los_Angeles').localize(datetime.now())\n return now.strftime('%m/%d')", "def today():\n return datetime.today()", "def get_date(self):\n return self.date.strftime(\"%a %x\")", "def today():\n return date.today()", "def calculate_date(x, now):\n\t#now = datetime.datetime.now()\n\tn = int(extract_only_number(x))\n\tif n > 0:\n\t\treturn (now - datetime.timedelta(n)).strftime(\"%d-%m-%Y\")\n\treturn now.strftime(\"%d-%m-%Y\")", "def get_today():\n return datetime.today()", "def get_date_DM(): \n \n now = date.datetime.now()\n date_DM = str(now.day)+'_'+str(now.month)+'/' \n return date_DM", "def today_string(fmt='%Y-%m-%d'):\n return brasilia_time().strftime(fmt)", "def get_date(format_of_date):\n current_date = datetime.datetime.today().strftime(format_of_date) # \"%d%m%Y\"\n return current_date", "def timestamp(): \n timestamp = datetime.today().strftime('%Y-%m-%d')\n \n return timestamp", "def _getCurrentDateString(self):\n currentDateTime = datetime.now()\n return currentDateTime.strftime(\"%Y%m%d_%H%M\")", "def get_fecha_actual():\n hoy = datetime.datetime.now()\n fecha_actual = hoy.strftime(\"%d-%m-%Y\")\n return fecha_actual", "def default_date(self):\n return datetime.datetime.now().strftime('%Y-%m-%d')", "def get_day_today() -> str:\n day = datetime.now().strftime(\"%w\")\n if day == '0': # Sunday\n return '6'\n elif day == '6': # Saturday\n return '5'\n elif day == '1': # Monday\n return '0'\n elif day == '2': # Tuesday\n return '1'\n elif day == '3': # Wednesday\n return '2'\n elif day == '4': # Thursday\n return '3'\n elif day == '5': # Friday\n return '4'", "def get_date():\n return str(datetime.now()).split(' ')[0]", "def current_date_time_stamp():\n return datetime.now().strftime('%Y.%m.%d %H:%M:%S.%f')[:-7]", "def current_valid_date(self):\r\n return datetime.datetime.now().strftime('%Y-%m-%d')", "def getdate():\r\n import datetime\r\n return datetime.datetime.now()", "def today(cls):\n timestamp = time.localtime()\n return Date(timestamp[0], timestamp[1], timestamp[3], timestamp[6], timestamp[7])", "def get_today(**kwargs: int) -> Date:\n return Date.today().replace(**kwargs)", "def get_date_time():\n now = datetime.datetime.now()\n month = str(now.month) if now.month > 9 else '0' + str(now.month)\n day = str(now.day) if now.day > 9 else '0' + str(now.day)\n date = ''.join(str(t) for t in [now.year, month, day, now.time().hour])\n return date", "def TODAY():\n return datetime.date.today()", "def date_printer():\n curr_date = datetime.date.today()\n\n print(format_date(curr_date))", "def get_date():\n now=datetime.now()\n s=\"%s%s%s\" % (now.year, str(now.month).zfill(2), str(now.day).zfill(2))\n return (now, s)", "def now():\n return datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S UTC')", "def get_date():\n return datetime.datetime.now()", "def get_current_day():\n current_day = datetime.now().strftime('%A').lower()\n return current_day", "def get_current_datetime_string ( ) :\n return get_current_datetime( ).strftime( \"%Y%m%d-%H%M%S\" )", "def dtstr():\n return dt.strftime(dt.now(),'%Y %m %d, %H:%M:%S')", "def now_datetime():\n now = datetime.datetime.now()\n return now.strftime('%Y%m%d%H%M%S')", "def __get_settlement_date():\n day_after_tomorrow = datetime.now(timezone.utc).date() + \\\n timedelta(days=2)\n settlement_date = day_after_tomorrow.strftime(\"%Y%m%d\")\n\n return settlement_date", "def get_today_utc():\n return pytz.utc.localize(datetime.datetime.utcnow()).replace(\n hour=0, minute=0, second=0, microsecond=0\n )", "def get_today_utc():\n return pytz.utc.localize(datetime.datetime.utcnow()).replace(\n hour=0, minute=0, second=0, microsecond=0\n )", "def get_date_display(self, context):\n return '{year}/{month}/{day}'.format(\n year=self.get_year(),\n month=self.get_month().zfill(2),\n day=self.get_day().zfill(2))", "def iso_date(self):\n return self.strftime(self.FORMAT_PRECISION_DAY)", "def get_date():\n return (datetime.now() - TIMEDELTA).isoformat()", "async def date(self) -> dt.date:\n now = await self.AD.sched.get_now()\n return now.astimezone(self.AD.tz).date()", "def current_datetime(self):\n return DateAccessor().today()", "def get_date_hour_today() -> str:\n return datetime.now().strftime(\"%Y-%m-%dT%H\")", "def date_stamp():\n return datetime.fromtimestamp(time()).strftime('%Y.%m.%d')", "def _create_date(self):\n return strftime(\"%a, %d %b %Y %H:%M:%S GMT\", gmtime())", "def currentUTC():\n return str(datetime.utcnow())", "def render_date(dt):\n return dt.strftime('%Y-%m-%d')", "def currentDay(self):\n day = datetime.datetime.today().day\n return day", "def first_month_day():\r\n return datetime.now().replace(day=1).strftime('%d-%m-%Y')", "def get_date(fmt=\"%Y-%m-%d\"):\n now = datetime.datetime.now().replace(tzinfo=IST)\n\n return now.strftime(fmt)", "def TODAY() -> func_xltypes.XlDateTime:\n date_and_time = now()\n date = date_and_time.replace(\n hour=0, minute=0, second=0, microsecond=0)\n return utils.datetime_to_number(date)", "def get_date_time():\n date_time = datetime.now()\n date_time_string = date_time.strftime(\"%b-%d-%Y (%H:%M:%S)\")\n return date_time_string", "def get_now():\r\n now = dt.datetime.now()\r\n now_str = now.strftime(\"%d/%m %H:%M\")\r\n return now_str", "def nowdt():\n from datetime import datetime\n\n now = datetime.now()\n return now.strftime(\"%d/%m/%Y %H:%M:%S\")", "def date_to_final_str(date_obj: datetime) -> str:\n return date_obj.strftime(\"%Y-%m-%d\")", "def get_now():\n\treturn datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")", "def _get_date():\n return datetime.datetime.now()", "def now():\n return time.strftime(\"%Y_%m_%d_%H_%M_%S\")", "def qToday():\n \n return _qDate.todaysDate().ISO()", "def largest_current_date():\n # This is not the right way to do it, timezones can change\n # at the time of writing, Samoa observes UTC+14 in Summer\n return datetime.now(timezone(timedelta(hours=14))).strftime(\"%Y-%m-%d\")", "def get_datetime_string():\n return datetime.now().strftime(DATETIME_FORMAT)", "def get_current_day() -> int:\n return datetime.now().day", "def first_day_of_month():\n first_object = datetime.utcnow()\n first_string = first_object.strftime('%m/01/%Y')\n return first_string", "def static_now():\n return datetime.datetime(2000, 9, 4).replace(tzinfo=timezone.utc)", "def format_date(value: int) -> str:\n\n return (datetime(1970, 1, 1) + timedelta(milliseconds=value)).strftime('%Y%m%d')", "def get_date():\n\n return tz.get_brisbane_time().date()", "def date_string(date):\n day = date.day\n month = date.month\n year = date.year\n formatted_string = str(month) + \"/\"\n formatted_string += str(day) + \"/\"\n formatted_string += str(year)\n return formatted_string", "def _create_time_stamp() -> str:\n\n return datetime.datetime.now().strftime(\"%Y%m%d\")", "def convert_datetime_to_string(dt):\n\treturn '{0}-{1}-{2}'.format(dt.year, dt.month, dt.day)", "def convert_datetime_to_string(dt):\n\treturn '{0}-{1}-{2}'.format(dt.year, dt.month, dt.day)", "def get_date(self):\n return datetime.date(\n int(self.kwargs['year']),\n int(self.kwargs['month']),\n int(self.kwargs['day'])\n )", "def now():\n now = datetime.datetime.now()\n return \"%04d-%02d-%02d %02d:%02d:%02d.%03d\" % ( now.year, now.month,now.day,\n now.hour,now.minute,now.second,int(now.microsecond/1e3))", "def today(cls, **kwargs: Any) -> Date:\n return cls.from_date(dt.date.today())", "def timestamp():\n my_date_object = datetime.utcnow()\n my_date_string = my_date_object.strftime('%d-%m-%Y %H:%M:%S')\n return my_date_string", "def get_template_formatted_date(date):\n return date.strftime(str(month(date.month)) + ' ' + str(date.day) + ', %Y')" ]
[ "0.8165175", "0.77760684", "0.77601326", "0.77202994", "0.76923996", "0.7595528", "0.7585238", "0.7497268", "0.7361167", "0.7340656", "0.73232466", "0.7306702", "0.72497416", "0.7205314", "0.71836877", "0.71293193", "0.7124812", "0.70916235", "0.7083198", "0.7062737", "0.70110285", "0.69925165", "0.69904625", "0.6980833", "0.69388855", "0.691354", "0.69106877", "0.68715036", "0.68599504", "0.6815337", "0.67586267", "0.67573524", "0.6752105", "0.674455", "0.6731827", "0.6730538", "0.67123765", "0.6698561", "0.66869706", "0.66248953", "0.6604416", "0.6596782", "0.6594801", "0.65899", "0.6549776", "0.6537292", "0.65366447", "0.65276784", "0.64839727", "0.64702237", "0.6469009", "0.64672846", "0.6454735", "0.64345926", "0.6431781", "0.6423794", "0.64199024", "0.64118063", "0.6392695", "0.6364726", "0.6360009", "0.6360009", "0.63424855", "0.6334312", "0.6330589", "0.6327281", "0.6325215", "0.63243496", "0.6321921", "0.6321409", "0.6309814", "0.63030523", "0.6298867", "0.628542", "0.6257071", "0.6232304", "0.62210995", "0.6218348", "0.6205837", "0.61972266", "0.6173461", "0.61666113", "0.6125845", "0.61253166", "0.61229044", "0.6106283", "0.61057913", "0.60978246", "0.60918176", "0.6088576", "0.6067302", "0.60548466", "0.60474557", "0.6039277", "0.6039277", "0.60272145", "0.601959", "0.600934", "0.600873", "0.60051197" ]
0.7295072
12
Return the date (UTC) from 10 days ago formatted as YYYYMMDD.
def _ten_days_ago() -> str: ten_days_ago = gmtime(mktime(gmtime()) - TEN_DAYS_SECONDS) return strftime(DATE_FORMAT, ten_days_ago)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ago(self):\n return human(self.timestamp/1000.0, precision=1, abbreviate=True)", "def relativeTime(date):\n diff = datetime.utcnow() - date\n\n if diff.days > 7 or diff.days < 0:\n return date.ctime()\n elif diff.days == 1:\n return '1 day ago'\n elif diff.days > 1:\n return '%d days ago' % diff.days\n elif diff.seconds <= 1:\n return 'just now'\n elif diff.seconds < 60:\n return '%d seconds ago' % diff.seconds\n elif diff.seconds < (60 * 2):\n return '1 minute ago'\n elif diff.seconds < (60 * 60):\n return '%d minutes ago' % (diff.seconds / 60)\n elif diff.seconds < (60 * 60 * 2):\n return '1 hour ago'\n else:\n return '%d hours ago' % (diff.seconds / (60 * 60))", "def pretty_date(date: datetime):\n if not isinstance(date, datetime) or date > NOW:\n raise ValueError('pretty_date() only accepts datetime objects in the past')\n diff = NOW - date\n seconds = int(diff.total_seconds())\n minutes = seconds // 60\n hours = minutes // 60\n # This doesn't _feel_ very pythonic…\n if seconds < 10:\n return 'just now'\n if seconds < 60:\n return f'{seconds} seconds ago'\n if minutes < 2:\n return 'a minute ago'\n if minutes < 60:\n return f'{minutes} minutes ago'\n if hours < 2:\n return 'an hour ago'\n if hours < 24:\n return f'{hours} hours ago'\n if hours < 48:\n return 'yesterday'\n return date.strftime('%m/%d/%y')", "def human_date(self, date):\n return timeago.format(date)", "def render_delta_from_now(date):\n return render_delta(__timedelta_millis(date - utc()))", "def future_time():\n ten_days_after = datetime.now() + timedelta(days=10)\n time_format = '%Y-%m-%d %H:%M:%S'\n return ten_days_after.strftime(time_format)", "def create_past_date(self, days):\n past_date = datetime.now() - timedelta(days=days)\n return past_date.isoformat()", "def SAgeDdt(ddt):\n if ddt.days < 0:\n return \"in the future?\"\n months = int(ddt.days*12/365)\n years = int(ddt.days/365)\n if years >= 1:\n return \"%d year%s ago\" % (years, SPlural(years))\n if months >= 3:\n return \"%d months ago\" % months \n if ddt.days == 1:\n return \"yesterday\"\n if ddt.days > 1:\n return \"%d days ago\" % ddt.days\n hrs = int(ddt.seconds/60/60)\n if hrs >= 1:\n return \"%d hour%s ago\" % (hrs, SPlural(hrs))\n minutes = round(ddt.seconds/60)\n if minutes < 1:\n return \"seconds ago\"\n return \"%d minute%s ago\" % (minutes, SPlural(minutes))", "def calculate_date(x, now):\n\t#now = datetime.datetime.now()\n\tn = int(extract_only_number(x))\n\tif n > 0:\n\t\treturn (now - datetime.timedelta(n)).strftime(\"%d-%m-%Y\")\n\treturn now.strftime(\"%d-%m-%Y\")", "def relative_datetime(self):\n now = datetime.now(timezone.utc)\n created_at = self.created_at.astimezone(timezone.utc)\n\n delta = humanize.naturaldelta(abs(created_at - now))\n tense = \"from now\" if now < created_at else \"ago\"\n\n return f\"{delta} {tense}\"", "def get_n_days_ago(self, startdate, n):\n return startdate - datetime.timedelta(days=n)", "def prevDate(y, m, d):\n dateTuple = (y, m, d, 0, 0, 0, 0, 0, 0)\n epochSecs = mktime(dateTuple)\n prevDateTuple = localtime(epochSecs-24*60*60)\n return prevDateTuple[:3]", "def pretty_date(time=False):\n now = datetime.now()\n if type(time) is int:\n diff = now - datetime.fromtimestamp(time)\n elif isinstance(time, datetime):\n diff = now - time\n elif not time:\n diff = now - now\n else:\n diff = now - now\n second_diff = diff.seconds\n day_diff = diff.days\n\n if day_diff < 0:\n return ''\n\n if day_diff == 0:\n if second_diff < 10:\n return \"just now\"\n if second_diff < 60:\n return str(int(round(second_diff, 0))) + \" seconds ago\"\n if second_diff < 120:\n return \"a minute ago\"\n if second_diff < 3600:\n return str(int(round(second_diff / 60, 0))) + \" minutes ago\"\n if second_diff < 7200:\n return \"an hour ago\"\n if second_diff < 86400:\n return str(int(round(second_diff / 3600, 0))) + \" hours ago\"\n if day_diff == 1:\n return \"Yesterday\"\n if day_diff < 7:\n return str(int(round(day_diff, 0))) + \" days ago\"\n if day_diff < 31:\n return str(int(round(day_diff / 7, 0))) + \" weeks ago\"\n if day_diff < 365:\n return str(int(round(day_diff / 30, 0))) + \" months ago\"\n return str(int(round(day_diff / 365, 0))) + \" years ago\"", "def thirty_days_ago():\n return date.today() - timedelta(days=30)", "def pretty_date(time=False):\r\n from datetime import datetime\r\n import dateutil.parser\r\n now = datetime.now()\r\n if type(time) is str or type(time) is unicode:\r\n time = dateutil.parser.parse(time)\r\n if type(time) is int:\r\n diff = now - datetime.fromtimestamp(time)\r\n elif isinstance(time, datetime):\r\n diff = now - time\r\n elif not time:\r\n diff = now - now\r\n second_diff = diff.seconds\r\n day_diff = diff.days\r\n\r\n if day_diff < 0:\r\n return ''\r\n\r\n if day_diff == 0:\r\n if second_diff < 10:\r\n return \"just now\"\r\n if second_diff < 60:\r\n return str(second_diff) + \" seconds ago\"\r\n if second_diff < 120:\r\n return \"a minute ago\"\r\n if second_diff < 3600:\r\n return ' '.join([str(second_diff / 60), \"minutes ago\"])\r\n if second_diff < 7200:\r\n return \"an hour ago\"\r\n if second_diff < 86400:\r\n return ' '.join([str(second_diff / 3600), \"hours ago\"])\r\n if day_diff == 1:\r\n return \"Yesterday\"\r\n if day_diff < 7:\r\n return ' '.join([str(day_diff), \"days ago\"])\r\n if day_diff < 31:\r\n return ' '.join([str(day_diff / 7), \"weeks ago\"])\r\n if day_diff < 60:\r\n return ' '.join([str(day_diff / 30), \"month ago\"])\r\n if day_diff < 365:\r\n return ' '.join([str(day_diff / 30), \"months ago\"])\r\n if day_diff < (365 * 2):\r\n return ' '.join([str(day_diff / 365), \"year ago\"])\r\n return ' '.join([str(day_diff / 365), \"years ago\"])", "def day_relative_to_absolute(relative):\n today = datetime.datetime.today()\n delta = datetime.timedelta(days=relative)\n return (today - delta).strftime(\"%Y-%m-%d\")", "def render_date_time_with_relative_into(into, date_time, add_ago):\n into.append(format(date_time, DATETIME_FORMAT_CODE))\n \n into.append(' [*')\n into.append(elapsed_time(date_time))\n if add_ago:\n into.append(' ago')\n into.append('*]')\n \n return into", "def get_preceeding_dekad(c):\n if c.day < 10:\n prec_dekad = dt.date(c.year, c.month, 1) - dt.timedelta(days=1)\n elif c.day < 20:\n prec_dekad = dt.date(c.year, c.month, 10)\n else:\n prec_dekad = dt.date(c.year, c.month, 20)\n\n return prec_dekad", "def yesterday_string(fmt='%Y-%m-%d'):\n return (brasilia_time() - pd.Timedelta(days=1)).strftime(fmt)", "def pretty_date(time=False):\n from datetime import datetime\n\n now = datetime.now()\n if type(time) is int:\n diff = now - datetime.fromtimestamp(time)\n elif isinstance(time, datetime):\n diff = now - time\n elif not time:\n diff = now - now\n second_diff = diff.seconds\n day_diff = diff.days\n\n if day_diff < 0:\n return \"\"\n\n if day_diff == 0:\n if second_diff < 10:\n return \"just now\"\n if second_diff < 60:\n return str(second_diff) + \" seconds ago\"\n if second_diff < 120:\n return \"a minute ago\"\n if second_diff < 3600:\n return str(second_diff / 60) + \" minutes ago\"\n if second_diff < 7200:\n return \"an hour ago\"\n if second_diff < 86400:\n return str(second_diff / 3600) + \" hours ago\"\n if day_diff == 1:\n return \"Yesterday\"\n if day_diff < 7:\n return str(day_diff) + \" days ago\"\n if day_diff < 31:\n return str(day_diff / 7) + \" weeks ago\"\n if day_diff < 365:\n return str(day_diff / 30) + \" months ago\"\n return str(day_diff / 365) + \" years ago\"", "def pretty_date(time=False):\n from datetime import datetime\n now = datetime.now()\n if type(time) is int:\n diff = now - datetime.fromtimestamp(time)\n elif isinstance(time,datetime):\n diff = now - time \n elif not time:\n diff = now - now\n second_diff = diff.seconds\n day_diff = diff.days\n\n if day_diff < 0:\n return ''\n\n if day_diff == 0:\n if second_diff < 10:\n return \"just now\"\n if second_diff < 60:\n return str(second_diff) + \" seconds ago\"\n if second_diff < 120:\n return \"a minute ago\"\n if second_diff < 3600:\n return str( second_diff / 60 ) + \" minutes ago\"\n if second_diff < 7200:\n return \"an hour ago\"\n if second_diff < 86400:\n return str( second_diff / 3600 ) + \" hours ago\"\n if day_diff == 1:\n return \"Yesterday\"\n if day_diff < 7:\n return str(day_diff) + \" days ago\"\n if day_diff < 31:\n return str(day_diff/7) + \" weeks ago\"\n if day_diff < 365:\n return str(day_diff/30) + \" months ago\"\n return str(day_diff/365) + \" years ago\"", "def timesince_limited(d):\n today = datetime.datetime.now()\n delta = datetime.timedelta\n interval = today - d\n if today.strftime('%Y-%m-%d') == d.strftime('%Y-%m-%d'):\n if interval < delta(days=0, hours=1):\n return timesince(d) + ' ago '\n else:\n return d.strftime('%H:%M')\n else:\n return d", "def get_date():\n return (datetime.now() - TIMEDELTA).isoformat()", "def getfuturedate(runningdate, futuredays):\n d = (runningdate + datetime.timedelta(days=(futuredays-1))).strftime('%d-%m')\n return str(d)", "def timesince(date):\n format = '%b %d, %Y'\n return date.strftime(format)", "def format_date(value: int) -> str:\n\n return (datetime(1970, 1, 1) + timedelta(milliseconds=value)).strftime('%Y%m%d')", "def timesince(dt, default=\"just now\"):\n\n now = datetime.datetime.now()\n diff = now - dt\n \n periods = (\n (diff.days / 365, \"year\", \"years\"),\n (diff.days / 30, \"month\", \"months\"),\n (diff.days / 7, \"week\", \"weeks\"),\n (diff.days, \"day\", \"days\"),\n (diff.seconds / 3600, \"hour\", \"hours\"),\n (diff.seconds / 60, \"minute\", \"minutes\"),\n (diff.seconds, \"second\", \"seconds\"),\n )\n\n for period, singular, plural in periods:\n \n if period:\n return \"%d %s ago\" % (period, singular if period == 1 else plural)\n\n return default", "def time_since(dt, default=\"just now\"):\n\t\n\tnow = datetime.utcnow()\n\tdiff = now - dt\n\t\n\tperiods = (\n\t\t(diff.days / 365, \"year\", \"years\"),\n\t\t(diff.days / 30, \"month\", \"months\"),\n\t\t(diff.days / 7, \"week\", \"weeks\"),\n\t\t(diff.days, \"day\", \"days\"),\n\t\t(diff.seconds / 3600, \"hour\", \"hours\"),\n\t\t(diff.seconds / 60, \"minute\", \"minutes\"),\n\t\t(diff.seconds, \"second\", \"seconds\"),\n\t)\n\n\tfor period, singular, plural in periods:\n\t\tif period:\n\t\t\treturn \"%d %s ago\" % (period, singular if period == 1 else plural)\n\n\treturn default", "def yesterdayDate(self):\n yesterday = time.time() - 24*3600\n return time.strftime(\"%m/%d/%Y\", time.localtime(yesterday))", "def get_days_old(days):\n days = int(days)\n current_time = datetime.datetime.today()\n days_after = datetime.timedelta(days)\n new_date = current_time - days_after\n new_date = new_date.strftime(\"%d-%b-%Y\")\n return new_date", "def get_pervious_date(months_in_past):\n pervious_date = date.today() + relativedelta(months=-months_in_past)\n return str(pervious_date)", "def generate_dates(self):\r\n\r\n numdays = 20\r\n\r\n base = datetime.datetime.today()\r\n\r\n date_list = [base + datetime.timedelta(days=x) for x in range(numdays)]\r\n\r\n date_str = [x.strftime(\"%d-%m-%Y\") for x in date_list]\r\n\r\n return date_str", "def get_date_str(bias=0):\n today = datetime.datetime.today() # 獲得今天的日期\n date = (today + datetime.timedelta(days=bias)).strftime(\"%m/%d\") # 格式化日期\n return ' ' + date[1:] if date[0] == '0' else date # 把0換成空白", "def least_current_date():\n # This is not the right way to do it, timezones can change\n # at the time of writing, Baker Island observes UTC-12\n return datetime.now(timezone(timedelta(hours=-12))).strftime(\"%Y-%m-%d\")", "def published_date(subtract_days=0):\n pub_date = datetime.datetime.today()\n pub_date = pub_date - datetime.timedelta(days=subtract_days)\n return datetime.datetime.strftime(pub_date, \"%Y-%m-%dT%H:%M:%SZ\")", "def last_update(blank):\n today = date.today()\n return today.strftime('%A %B %d')", "def timesince(dt, default=None, reverse=False):\n\n if not dt:\n return ''\n\n if default is None:\n default = u'刚刚'\n now = datetime.utcnow()\n diff = (dt - now) if reverse else now - dt\n\n if diff < timedelta(days=0):\n return default\n\n periods = (\n (diff.days / 365, u'年', u'年'),\n (diff.days / 30, u'月', u'月'),\n (diff.days / 7, u'周', u'周'),\n (diff.days, u'天', u'天'),\n (diff.seconds / 3600, u'小时', u'小时'),\n (diff.seconds / 60, u'分钟', u'分钟'),\n (diff.seconds, u'秒', u'秒'),\n )\n\n for period, singular, plural in periods:\n\n if not period:\n continue\n\n if reverse:\n if period == 1:\n return u'剩余 %d %s' % (period, singular)\n else:\n return u'剩余 %d %s' % (period, plural)\n\n else:\n if period == 1:\n return u'%d%s前' % (period, singular)\n else:\n return u'%d%s前' % (period, plural)\n\n return default", "def get_todays_date(self):\r\n \r\n date=str(dt.datetime.today())\r\n raw_date=date.split(\" \")[0]\r\n Day=raw_date.split(\"-\")[-1]\r\n Month=raw_date.split(\"-\")[-2]\r\n Year=raw_date.split(\"-\")[-3]\r\n todays_date=Day+\"-\"+Month+\"-\"+Year\r\n return todays_date", "def prev(self):\n return self.from_date(self.date_a - datetime.timedelta(1))", "def pretty_date_filter(dt, default=None):\n\n if default is None:\n default = 'just now'\n\n now = datetime.utcnow()\n diff = now - dt\n\n periods = (\n (diff.days / 365, 'year', 'years'),\n (diff.days / 30, 'month', 'months'),\n (diff.days / 7, 'week', 'weeks'),\n (diff.days, 'day', 'days'),\n (diff.seconds / 3600, 'hour', 'hours'),\n (diff.seconds / 60, 'minute', 'minutes'),\n (diff.seconds, 'second', 'seconds'),\n )\n\n for period, singular, plural in periods:\n\n if not period:\n continue\n\n if period == 1:\n return u'%d %s ago' % (period, singular)\n else:\n return u'%d %s ago' % (period, plural)\n\n return default", "def get_year_ago(from_date: dt.datetime = None):\n from_date = from_date or get_now()\n date = from_date.replace(year=from_date.year - 1)\n return date", "def previous_date(self):\n yesterday = pendulum.yesterday('UTC')\n last_update = self.storage.last_update(self.feed)\n if not last_update or last_update < yesterday:\n last_update = yesterday\n return last_update", "def max_drawdown_date(self) -> dt.date:\n mdd_date = (self.tsdf / self.tsdf.expanding(min_periods=1).max()).idxmin().values[0].astype(dt.datetime)\n return dt.datetime.fromtimestamp(mdd_date / 1e9).date()", "def _utc_date(self):\n if self.date_stamp == '0':\n return '0'\n else:\n if '.' in self.date_stamp:\n t = datetime.datetime.strptime(self.date_stamp,\n '%Y%m%d%H%M%S.%f')\n else:\n t = datetime.datetime.strptime(self.date_stamp,\n '%Y%m%d%H%M%S')\n tdelta = datetime.timedelta(hours = int(self.tzone[1:3]),\n minutes = int(self.tzone[3:5]))\n \n if self.tzone[0] == '-':\n ut = t - tdelta\n return ut.strftime('%Y%m%d%H%M%S.%f')\n else:\n ut = t + tdelta\n return ut.strftime('%Y%m%d%H%M%S.%f')", "def draw_num_to_date(draw_num):\n\n #each day 300 draws, find diff in draw num from init draw num\n draw_diff = draw_num - INIT_DRAW_NUMBER + 1\n days_diff = draw_diff / 300\n\n draw_num_date = INIT_DATE + timedelta(days=days_diff)\n\n #add date by days diff\n print(\"Draw num:\", draw_num, \"Date:\", draw_num_date)\n return draw_num_date", "def get_date_display(self, context):\n return '{year}/{month}/{day}'.format(\n year=self.get_year(),\n month=self.get_month().zfill(2),\n day=self.get_day().zfill(2))", "def howLongAgo(time=False):\n now = timezone.now()\n if type(time) is int:\n diff = now - datetime.fromtimestamp(time)\n elif isinstance(time,datetime):\n diff = now - time\n elif not time:\n diff = now - now\n second_diff = diff.seconds\n day_diff = diff.days\n\n if day_diff < 0:\n return ''\n\n if day_diff == 0:\n if second_diff < 10:\n return \"genau jetzt\"\n if second_diff < 60:\n return \"vor \" + str(second_diff) + \" Sek.\"\n if second_diff < 120:\n return \"vor einer Min.\"\n if second_diff < 3600:\n return \"vor \" + str( second_diff / 60 ) + \" Min.\"\n if second_diff < 7200:\n return \"vor einer St.\"\n if second_diff < 86400:\n return \"vor \" + str( second_diff / 3600 ) + \" St.\"\n if day_diff == 1:\n return \"Gestern\"\n if day_diff < 7:\n return \"vor \" + str(day_diff) + \" Tagen\"\n if day_diff < 31:\n return \"vor \" + str(day_diff/7) + \" Wochen\"\n if day_diff < 365:\n return \"vor \" + str(day_diff/30) + \" Monaten\"\n return \"vor \" + str(day_diff/365) + \" Jahren\"", "def now_minus(days: int):\n return NOW - datetime.timedelta(days=days)", "def convert_date(date):\n\n if len(date) > 10: date = date[:date.rfind(\"-\")]\n return convf(date)", "def largest_current_date():\n # This is not the right way to do it, timezones can change\n # at the time of writing, Samoa observes UTC+14 in Summer\n return datetime.now(timezone(timedelta(hours=14))).strftime(\"%Y-%m-%d\")", "def get_ago(dt, d_years=0, d_months=0):\n\n # Get year number, month number and day number applying offset as required\n _y, _m, _d = dt.year + d_years, dt.month + d_months, dt.day\n # Calculate actual month number taking into account EOY rollover\n _a, _m = divmod(_m - 1, 12)\n # Calculate and return date object\n _eom = calendar.monthrange(_y + _a, _m + 1)[1]\n return date(_y + _a, _m + 1, _d if _d <= _eom else _eom)", "def createTimeStamp_Date():\r\n\r\n return str(datetime.now().strftime(\"%Y%m%d\"))", "def yesterday():\n return datetime.today() - timedelta(1)", "def utc_today_str():\n return datetime.datetime.strftime(datetime.datetime.utcnow(), \"%Y-%m-%d\")", "def hydrate_date(days):\n return Date.from_ordinal(unix_epoch_date_ordinal + days)", "def get_n_days_ahead(self, startdate, n, fmt=None):\n return startdate + datetime.timedelta(days=n)", "def get_crpp_date(dtThis):\n\n # Model: yyyy-MM-dd'T'HH:mm:ss\n sDate = dtThis.strftime(\"%Y-%m-%dT%H:%M:%S\")\n return sDate", "def get_crpp_date(dtThis):\n\n # Model: yyyy-MM-dd'T'HH:mm:ss\n sDate = dtThis.strftime(\"%Y-%m-%dT%H:%M:%S\")\n return sDate", "def utc2localtime(date):\n return date - (datetime.utcnow() - datetime.now())", "def dateByDelta(daysInTheFuture=1):\n\ttmDT = datetime.today() + timedelta(days=daysInTheFuture)\n\tnewDate = datetime(tmDT.year,tmDT.month,tmDT.day).date()\n\treturn newDate", "def __get_date_string(self, t):\n # TODO: Consider using datetime.utcfromtimestamp(timestamp).date() --> datetime.date(2014, 8, 8)\n return time.strftime(\"%m/%d/%Y\", time.gmtime(float(t)))", "def get_gds_current_date(self, remove_leading_zero='true'):\r\n time_now = datetime.datetime.now().time()\r\n today_2pm = time_now.replace(hour=14, minute=31, second=0, microsecond=0)\r\n if time_now < today_2pm:\r\n gds_date = datetime.datetime.now() - datetime.timedelta(days=int(1))\r\n else:\r\n gds_date = datetime.datetime.now()\r\n\r\n if remove_leading_zero.lower() == 'true':\r\n return str('{dt.day}{dt:%b}'.format(dt=gds_date).upper())\r\n else:\r\n return self._set_gds_date_format(gds_date)", "def numeric_date_recover(self):\n \n sp_time_zone, current_datetime = self.setup_datetime() \n converter2sptimezone = current_datetime.astimezone(sp_time_zone)\n \n return converter2sptimezone.strftime('%d-%m-%Y')", "def humanize_ts(timestamp=False):\n now = datetime.now()\n diff = now - datetime.fromtimestamp(timestamp)\n second_diff = diff.seconds\n day_diff = diff.days\n\n if day_diff < 0:\n return ''\n\n if day_diff == 0:\n if second_diff < 10:\n return \"just now\"\n if second_diff < 60:\n return str(int(second_diff)) + \" seconds ago\"\n if second_diff < 120:\n return \"a minute ago\"\n if second_diff < 3600:\n return str(int(second_diff / 60)) + \" minutes ago\"\n if second_diff < 7200:\n return \"an hour ago\"\n if second_diff < 86400:\n return str(int(second_diff / 3600)) + \" hours ago\"\n if day_diff == 1:\n return \"Yesterday\"\n if day_diff < 7:\n return str(day_diff) + \" days ago\"\n if day_diff < 31:\n return str(int(day_diff / 7)) + \" weeks ago\"\n if day_diff < 365:\n return str(int(day_diff / 30)) + \" months ago\"\n return str(int(day_diff / 365)) + \" years ago\"", "def _get_creation_date(self, user):\n url = 'https://api.twitch.tv/kraken/users/{}'.format(user)\n for attempt in range(5):\n try:\n r = requests.get(url)\n creation_date = r.json()['created_at']\n cut_creation_date = creation_date[:10]\n except ValueError:\n continue\n except TypeError:\n continue\n else:\n return cut_creation_date\n else:\n self._add_to_chat_queue(\n \"Sorry, there was a problem talking to the twitch api. Maybe wait a bit and retry your command?\")", "def days_ago(n, hour=0, minute=0, second=0, microsecond=0):\n warnings.warn(\n \"Function `days_ago` is deprecated and will be removed in Airflow 3.0. \"\n \"You can achieve equivalent behavior with `pendulum.today('UTC').add(days=-N, ...)`\",\n RemovedInAirflow3Warning,\n stacklevel=2,\n )\n\n today = timezone.utcnow().replace(hour=hour, minute=minute, second=second, microsecond=microsecond)\n return today - timedelta(days=n)", "def timeago(time=False):\n\n return arrow.get(time).humanize()", "def __str__(self):\n return '{y}-{m:0>2}-{d:0>2}'.format(y=self.year, m=self.month, d=self.day)", "def get_last_seven_days_label(self):\n return gettext_lazy('Last seven days')", "def render_date(dt):\n return dt.strftime('%Y-%m-%d')", "def get_date(self):\n return self.date.strftime(\"%a %x\")", "def since(self):\n return str(datetime.datetime.now() - self._refreshed_at)", "def get_oldest_article_date():\n\n # date = datetime.datetime.strptime(date, \"%m/%d/%Y\")\n today_date = datetime.date.today()\n last_week = today_date-timedelta(days=2)\n search_date = last_week.isoformat()\n\n return search_date", "def run_date(self) -> datetime.date:\n return self.timestamp.date()", "def date_prettyfier(self, date):\n units = 'days since 1900-01-01 00:00'\n date = date * 365.25\n date = cftime.num2date(date, units)\n pretty_date = str(date.day)+'/'+str(date.month)+'/'+str(date.year-1900) \n return pretty_date", "def date2trost(datetime_date):\n # '0>2' means: add leading zero(s) if the int is less than two digits long\n return '{0}-{1:0>2}-{2:0>2}'.format(datetime_date.year, datetime_date.month, datetime_date.day)", "def date_to_final_str(date_obj: datetime) -> str:\n return date_obj.strftime(\"%Y-%m-%d\")", "def last_modified_ago(self):\n revision = self.last_revision\n if revision:\n seconds = (now() - revision.created_on).total_seconds()\n ago = time_format(seconds, 4)\n return \"{} ago\".format(ago)\n\n return \"never\"", "def human_datetime(date_time):\n current_datetime = datetime.datetime.now()\n delta = str(current_datetime - date_time)\n if delta.find(',') > 0:\n days, hours = delta.split(',')\n days = int(days.split()[0].strip())\n hours, minutes = hours.split(':')[0:2]\n else:\n hours, minutes = delta.split(':')[0:2]\n days = 0\n days, hours, minutes = int(days), int(hours), int(minutes)\n datelets = []\n years, months, xdays = None, None, None\n plural = lambda x: 's' if x != 1 else ''\n if days >= 365:\n years = int(days / 365)\n datelets.append('%d year%s' % (years, plural(years)))\n days = days % 365\n if days >= 30 and days < 365:\n months = int(days / 30)\n datelets.append('%d month%s' % (months, plural(months)))\n days = days % 30\n if not years and days > 0 and days < 30:\n xdays = days\n datelets.append('%d day%s' % (xdays, plural(xdays)))\n if not (months or years) and hours != 0:\n datelets.append('%d hour%s' % (hours, plural(hours)))\n if not (xdays or months or years):\n datelets.append('%d minute%s' % (minutes, plural(minutes)))\n return ', '.join(datelets) + ' ago.'", "def friendly_date(self):\n\n return self.created_at.strftime(\"%a %b %-d %Y, %-I:%M %p\")", "def age(self) -> str:\n tdelta = dt.now() - self.created_timestamp\n if tdelta.days >= 548: # enough to round it up to 2 years\n return f'about {tdelta.days/365:.0f} years'\n elif tdelta.days >= 345: # enough to round it up to 1 year (so it doesn't report '12 months')\n return f'about a year'\n elif tdelta.days > 45: # beyond 1 month (after rounding)\n return f'about {tdelta.days/30:.0f} months'\n elif tdelta.days > 24: # enough to round it up to 1 month (so it doesn't report '4 weeks')\n return f'about a month'\n elif tdelta.days > 7:\n # round to nearest half, dropping '.0' when whole\n return f'{round((tdelta.days/7)*2)/2:g} weeks'\n elif tdelta.days == 7:\n return 'a week'\n elif tdelta.days > 1:\n return f'{tdelta.days} days'\n elif tdelta.days == 1:\n return f'a day'\n # break it down into parts of a day\n hours = tdelta.seconds // 3600\n if hours > 1:\n return f'{hours:.0f} hours'\n elif hours == 1:\n return f'an hour'\n minutes = tdelta.seconds % 3600 / 60\n if minutes > 1:\n return f'{minutes:.0f} minutes'\n elif minutes == 1:\n return f'a minute'\n return 'moments'", "def day_str(day):\n return str(day) if day >= 10 else '0' + str(day)", "def day_str(day):\n return str(day) if day >= 10 else '0' + str(day)", "def day_str(day):\n return str(day) if day >= 10 else '0' + str(day)", "def todaystr():\n today = datetime.datetime.today()\n return f\"{today.year}{today.month:02}{today.day:02}\"", "def time_from(self, dt, **options):\n return self._format_delta(self - dt)", "def get_date():\n now = datetime.now()\n date = now.strftime(\"%Y%m%d\")\n return date", "def from_my_birthday (d):\n birthday = datetime(1986, 4, 23)\n return relativedelta.relativedelta(d, birthday)", "def delta_today(N: int) -> dt.datetime:\n today = dt.date.today()\n return dt.datetime.combine(today, dt.time.min) + dt.timedelta(days=N)", "def __get_settlement_date():\n day_after_tomorrow = datetime.now(timezone.utc).date() + \\\n timedelta(days=2)\n settlement_date = day_after_tomorrow.strftime(\"%Y%m%d\")\n\n return settlement_date", "def humanize_arrow_date(date):\n try:\n then = arrow.get(date)\n now = arrow.utcnow()\n now = now.replace(hour=0, minute=0, second=0)\n if then.date() == now.date():\n human = \"Today\"\n else:\n human = then.humanize(now)\n if human == \"in a day\":\n human = \"Tomorrow\"\n elif human == \"a day ago\":\n human = \"Yesterday\"\n except:\n human = date\n return human", "def friendly_date(self):\n return self.created_at.strftime(\"%a %b %#d %Y, %#I:%M %p\")", "def get_timestamp(self, days=1):\n offset = datetime.datetime.utcnow().date() - datetime.timedelta(days=days-1)\n # est = tz.gettz('Europe/Amsterdam')\n # temporary dirty fix for timezone:\n timezone = '+02:00'\n start = datetime.datetime(offset.year, offset.month, offset.day)\n return start.isoformat() + timezone", "def time_since(date):\n now = date_now()\n return now - date", "def get_date_today() -> str:\n return datetime.now().strftime(\"%Y-%m-%d\")", "def txfDate(date):\n return date.strftime('%m/%d/%Y')", "def get_hundred_days_end_date():\n days = timedelta(days=100)\n return str(start_100days + days)", "def today():\n today_object = datetime.utcnow()\n today_string = today_object.strftime('%m/%d/%Y')\n return today_string", "def render_delta(d):\n s = '' if d >= 0 else '-'\n d = abs(d)\n\n for unit in _SORTED_UNITS:\n span = __timedelta_millis(unit[1])\n if d >= span:\n count = int(d // span)\n s += '{0}{1}'.format(count, unit[0])\n d -= count * span\n\n if d or not s:\n s += str(d)\n\n return s", "def current_date_time_stamp():\n return datetime.now().strftime('%Y.%m.%d %H:%M:%S.%f')[:-7]" ]
[ "0.63436246", "0.6039876", "0.5983086", "0.5962784", "0.58728963", "0.581312", "0.58069855", "0.5791033", "0.5787348", "0.5763219", "0.56588507", "0.5579537", "0.5577654", "0.55374175", "0.5471164", "0.539841", "0.53682923", "0.5352607", "0.53389454", "0.53330094", "0.5331382", "0.5319435", "0.52447456", "0.5240271", "0.5236799", "0.5219827", "0.5197637", "0.5193551", "0.5193048", "0.5184128", "0.518164", "0.5121002", "0.51119685", "0.5072984", "0.5064115", "0.5063366", "0.5058964", "0.50552934", "0.5051387", "0.5039742", "0.5029247", "0.50143915", "0.49888507", "0.4983479", "0.49763685", "0.49742603", "0.49707583", "0.49679056", "0.49648172", "0.49433845", "0.49362367", "0.49149105", "0.4914352", "0.48874444", "0.48753664", "0.48675013", "0.48575088", "0.48575088", "0.48523816", "0.4849231", "0.48474184", "0.48399448", "0.48277318", "0.48196995", "0.48087743", "0.4805404", "0.479575", "0.47947487", "0.47938743", "0.47886974", "0.47841135", "0.47828722", "0.47724262", "0.47657442", "0.47549033", "0.47526887", "0.47504067", "0.474763", "0.47360256", "0.47309715", "0.4719895", "0.47060806", "0.47060806", "0.47060806", "0.4696691", "0.46959177", "0.46909383", "0.46866944", "0.46860924", "0.4682049", "0.4679049", "0.4677931", "0.46760035", "0.4669516", "0.46683264", "0.46659473", "0.46601415", "0.4651534", "0.46479797", "0.46444985" ]
0.7581562
0
Return the last month (UTC) formatted as YYYYMM.
def _last_month() -> str: time_now = gmtime() return ( f"{time_now.tm_year}-{time_now.tm_mon - 1:02d}" if time_now.tm_mon > 1 else f"{time_now.tm_year - 1}-12" )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def last_month_day():\r\n return (datetime.now().replace(day=1) + relativedelta(months=1) + timedelta(days=-1)).strftime(\r\n '%d-%m-%Y')", "def make_last_month_period(dt=None):\n if not dt:\n dt = datetime.utcnow()\n dt = dt.replace(day=1) - timedelta(days=1)\n return dt.strftime('%Y%m')", "def last_month():\n return datetime.now() + relativedelta(months=-1)", "def make_last_year_month_period(dt=None):\n if not dt:\n dt = datetime.utcnow()\n dt = dt.replace(year=dt.year - 1, month=dt.month, day=1)\n return int(dt.strftime('%Y%m'))", "def last_month_first_day():\r\n return (datetime.now().replace(day=1) + relativedelta(months=-1) + timedelta(days=-1)).strftime(\r\n '%d-%m-%Y')", "def floor_end_month(date):\n return datetime(date.year, date.month, 1) + timedelta(days=-1)", "def last_month(self):\r\n return RecordsLastMonth(self)", "def end_month(d):\n return date(d.year, d.month, monthrange(d.year, d.month)[1])", "def get_end_month(month):\n return datetime(2020, month, 28)", "def get_month_end(x: Optional[Date] = None) -> Date:\n return (x or get_today()).replace(day=1) + relativedelta(months=+1, days=-1)", "def _get_last_date_month(self, date_find):\n day = datetime.strptime(date_find, settings.TIME_FORMAT)\n last_day_of_month = calendar.monthrange(day.year, day.month)[1]\n date_day_of_month = '{}-{}-{} 00:00:00'.format(day.year, day.month, last_day_of_month)\n return date_day_of_month", "def last_day_of_month(date):\n last_day = calendar.monthrange(date.year, date.month)[1]\n return datetime.date(date.year, date.month, last_day)", "def last_day_of_month(date):\n last_day = calendar.monthrange(date.year, date.month)[1]\n return datetime.date(date.year, date.month, last_day)", "def to_end_of_month(self):\n days = _num_days_in_month(self._months, self._years)\n return from_year_month_day(self._years, self._months, days, validate=False)", "def get_months_to_date():\n month_sequence = [5, 4, 3, 2, 1, 12, 11, 10, 9, 8] # season is August to May\n try:\n current_month_index = month_sequence.index(dt.now().month)\n except ValueError:\n current_month_index = 0\n\n return month_sequence[current_month_index:]", "def get_current_month() -> int:\n return datetime.now().month", "def get_month(self, indate):\n return indate.strftime(\"%B\") + \"-\" + indate.strftime(\"%Y\")", "def get_default():\n today = datetime.date.today()\n if today.month == 1:\n return YearMonth(today.year - 1, 12)\n return YearMonth(today.year, today.month - 1)", "def get_last_day_of_month(today: Optional[datetime] = None) -> int:\n if today is None:\n today = datetime.utcnow()\n return monthrange(today.year, today.month)[1]", "def MONTH(date):\n return _make_datetime(date).month", "def getCurrentMonth(self):\n return math.ceil((self.wcount % 48) / 4)", "def decrement_month(self):\n month: int = int(self.month)\n month -= 1\n if month == 0:\n month == 12\n year: int = int(self.year)\n year -= 1\n self.year = str(year)\n self.month = str(month)\n if len(self.month) == 1:\n self.month = \"0\" + self.month", "def resolve_month(ym):\n if isinstance(ym, (tuple, list)):\n y, m = ym\n elif isinstance(ym, (datetime.datetime, datetime.date)):\n y, m = ym.year, ym.month-1\n elif isinstance(ym, int) or ym is None:\n today = timezone.now()\n y, m = today.year, today.month + (ym or 0)\n else:\n raise RuntimeError(\"Unsupported argument %r\" % ym)\n\n return y*12 + m", "def getMonth(self):\n return _libsbml.Date_getMonth(self)", "def mm(self):\n return '%02d' % self._month", "def month(self) -> int:\n if self.is_old_style:\n return int(self.split('/', 1)[1][2:4])\n return int(self[2:4])", "def pick_month():\n today = date.today()\n month = date(today.year, today.month, 1)\n if today.day < 14:\n # Use last month\n month -= timedelta(days=27)\n while month.day != 1:\n month -= timedelta(days=1)\n return month", "def month(self):\n return 0", "def month(self):\n return 0", "def last_month(today: Optional[datetime] = None, tz: Any = None) -> Tuple[datetime, datetime]:\n if today is None:\n today = datetime.utcnow()\n end = datetime(day=1, month=today.month, year=today.year)\n end_incl = end - timedelta(seconds=1)\n begin = datetime(day=1, month=end_incl.month, year=end_incl.year)\n return localize_time_range(begin, end, tz)", "def month(self) -> str:\r\n return self._month", "def monthname(self):\n return self.strftime(\"%B\")", "def date_month(date):\n return date.month", "def month(self):\n return gocept.month.Month(self.calendar_month, self.calendar_year)", "def month(self) -> int:\n return self.arxiv_id.month", "def test_date_accept_last_month(self):\n spi_search = \"find date last month\"\n inv_search = \"year:\" + datetime.datetime.strftime(datetime.datetime.today()\\\n +dateutil.relativedelta.relativedelta(months=-1), '%Y-%m')\n self._compare_searches(inv_search, spi_search)", "def month(self):\n return self.__month", "def get_month_name(month_of_year):\n return VALID_MONTHS[month_of_year - 1].title()", "def month_str(month, upper=True):\n\n months=['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug',\n 'sep', 'oct', 'nov', 'dec']\n\n mstr = months[month - 1]\n if upper:\n mstr = mstr.upper()\n return mstr", "def ceil_start_month(date):\n if date.month == 12:\n date = datetime(date.year + 1, 1, 1)\n else:\n date = datetime(date.year, date.month + 1, 1)\n return date", "def timestamp_month_ago():\n\n d = str(datetime.date.today() - datetime.timedelta(days=31)).split('-')\n y = d[0]\n m = d[1]\n d = d[2]\n a_month_ago = get_timestamp(int(y),int(m),int(d))\n return a_month_ago", "def get_month_name(self, month):\n return self.month_names[month.month - 1]", "def MonthName(num, length=99):\n if num < 1 or num > NUM_MONTHS:\n raise ValueError('Bad month number')\n return _MONTH_NAMES[num - 1][:length]", "def time_left_in_month():\n now = datetime.now()\n\n year = now.year\n month = now.month\n day = now.day\n hour = now.hour\n minute = now.minute\n second = now.second\n\n M = (month + 1) % 12\n Y = year + 1 if M == 1 else year\n return int((datetime(Y, M, 1) - datetime(year, month, day, hour = hour,\n minute = minute,\n second = second)).total_seconds() * 1000)", "def month(self):\n return self._month", "def month(self):\n return self._month", "def month(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"month\")", "def get_last_year(year=None):\n if year:\n return str(int(year)-1)\n else:\n return str(get_current_year(as_string=False) - 1)", "def get_month(self, as_str=False):\n\n # First we get the first 8 bits stored in the month register\n month_bcd = self.__read_register(_REGISTER_MONTH)\n\n # Then we extract the digits and the tens\n tens = (month_bcd & 0x10) >> 4 # 0x10 = 0b00010000\n digit = (month_bcd & 0x0F) # 0x0F = 0b00001111\n\n month = 10 * (tens) + digit\n\n if as_str is True: # if we want the month's name\n month = MONTH_STR[month - 1]\n\n return month", "def prev_month(dateobj):\n year_delta, old_month = divmod(dateobj.month - 2, 12)\n return datetime.date(dateobj.year + year_delta, old_month + 1, 1)", "def this_month(self):\n if self.time.month != datetime.datetime.now().month or self._this_month is None:\n self._update_time()\n return self._this_month", "def get_end_date(year, month):\n num_days = int(calendar.monthrange(year, month)[1])\n end_date = date(year, month, num_days).strftime(\"%Y-%m-%d\")\n return end_date", "def YM(year=None, month=None):\n if month is None:\n if year is None:\n year = timezone.now()\n\n month = year.month\n year = year.year\n\n return module_globals[\n '%s_%04d_%02d' % (name, year, month)\n ]", "def first_day_of_month():\n first_object = datetime.utcnow()\n first_string = first_object.strftime('%m/01/%Y')\n return first_string", "def decrement_year_month(year, month):\n month -= 1\n if month <= 0:\n year -= 1\n month = 12\n return year, month", "def first_month_day():\r\n return datetime.now().replace(day=1).strftime('%d-%m-%Y')", "def Month(self):\n return self._fmon", "def aMonth(self):\n return self._amon", "def __month(self):\n return _VirtualColumn(\n df_name=self.thisptr[\"df_name_\"],\n operator=\"month\",\n operand1=self,\n operand2=None\n )", "def last_quarter(today):\n quarter_date = today - relativedelta(months=1)\n while quarter_date.month % 3 != 0:\n quarter_date = quarter_date - relativedelta(months=1)\n return quarter_date.year, int(quarter_date.month / 3)", "def get_previous_month(self, startdate):\n first_day_current = datetime.datetime(startdate.year, startdate.month, 1)\n last_day_previous = first_day_current - datetime.timedelta(days=1)\n first_day_previous = datetime.datetime(last_day_previous.year, last_day_previous.month, 1)\n\n last_month = (first_day_previous, last_day_previous)\n return last_month", "def named_month(pMonthNumber):\n return datetime.date(1900, pMonthNumber, 1).strftime('%B')", "def named_month(month_number):\n return date(1900, month_number, 1).strftime(\"%B\")", "def get_month_as_str_col(df, date_col):\n return df[date_col].dt.to_period(\"M\").astype(str)", "def last_day(orig_date):\n day_number = calendar.monthrange(orig_date.year, orig_date.month)[1]\n return datetime.date(orig_date.year, orig_date.month, day_number)", "def pMonth(self):\n return self._pmon", "def convert_month(string): \n datetime_object = datetime.datetime.strptime(string, \"%B\")\n\n month_number = datetime_object.month\n\n return month_number", "def _to_nb_months(ym_dec):\n nb_years = int(ym_dec)\n zz = ym_dec - nb_years\n return 12 * nb_years + int(zz * 12.0 + 0.5)", "def effective_invoice_month(self) -> pulumi.Input['GoogleTypeDateArgs']:\n return pulumi.get(self, \"effective_invoice_month\")", "def date_to_month(date):\r\n return re.sub(r'(\\d{4}-\\d{2})-\\d{2}', r'\\1', date)", "def short_time(self):\n return \"%s%02d\" % (util.SHORT_MONTH[self.month_num], self.year - 2000)", "def get_month(x):\n return x[\"SALE DATE\"].month", "def wordmonth(self, month):\n monthname = [word for word in self.months if word.istitle()]\n Month = int(month) -1\n return monthname[Month]", "def set_month(self, month):\r\n\t\tmonths = ['Enero', 'Febrero', 'Marzo', 'Abril',\r\n\t\t\t\t 'Mayo', 'Junio', 'Julio', 'Agosto'\r\n\t\t\t\t 'Septiembre', 'Octubre', 'Noviembre', 'Diciembre']\r\n\t\tfor i in range(12):\r\n\t\t\tif month == i: \r\n\t\t\t\treturn months[i-1]", "def to_yearmonth(yearmonthdate_str):\n # yearmonth = int(yearmonth_str[:7].replace('-', ''))\n yearmonth = int(yearmonthdate_str[:4] + yearmonthdate_str[5:7])\n return yearmonth", "def cc_expire_months():\n months = []\n for month in range(1, 13):\n if len(str(month)) == 1:\n numeric = '0' + str(month)\n else:\n numeric = str(month)\n months.append((numeric, datetime.date(2009, month, 1).strftime('%B')))\n return months", "def end_of_month(today: Optional[datetime] = None, n: int = 0, tz: Any = None) -> datetime:\n if today is None:\n today = datetime.utcnow()\n last_day = monthrange(today.year, today.month)[1]\n end = today.replace(day=last_day, hour=0, minute=0, second=0, microsecond=0) + timedelta(hours=24)\n while n > 0:\n last_day = monthrange(end.year, end.month)[1]\n end = end.replace(day=last_day, hour=0, minute=0, second=0, microsecond=0) + timedelta(hours=24)\n n -= 1\n while n < 0:\n end -= timedelta(days=1)\n end = end.replace(day=1, hour=0, minute=0, second=0, microsecond=0)\n n += 1\n end_incl = end - timedelta(microseconds=1)\n if tz is None:\n tz = pytz.utc\n return tz.localize(end_incl)", "def last_day_of_month(year, month):\n last_days = [31, 30, 29, 28, 27]\n for i in last_days:\n try:\n end = datetime.datetime(year, month, i)\n except ValueError:\n continue\n else:\n return end.day\n return None", "def get_month_name(month):\n datetime_object = datetime.datetime.strptime(str(month), \"%m\")\n month_name = str(datetime_object.strftime(\"%b\"))\n return month_name", "def get_month_number(date, padded_or_unpadded):\n if padded_or_unpadded == constants.str_padded:\n return date.strftime('%m')\n elif padded_or_unpadded == constants.str_unpadded:\n return str(int(date.strftime('%m')))\n else:\n err_msg = str_possible_values('padded_or_unpadded', [\n constants.str_padded, constants.str_unpadded])\n raise ValueError(err_msg)", "def month(self, month: str):\n return get_from_list(self.months, \"month\", month)", "def month(self):\n return self._months", "def get_month_from_date(raw_format_date):\n date = None\n for pattern in NAMCS_DATASET_MONTH_PATTERNS:\n try:\n date = datetime.strptime(raw_format_date, pattern)\n if date:\n break\n except ValueError:\n continue\n\n # Numeric format for month\n month = date.strftime(\"%m\")\n return int(month)", "def last_n_months(self, month_num=1):\n return [\n timezone.now() - datetime.timedelta(days=month_num*30),\n timezone.now()\n ]", "def get_fiscal_year_start_month(self):\n return self.fiscal_year_start_month", "def MONTH(\n serial_number: func_xltypes.XlNumber\n) -> func_xltypes.XlNumber:\n\n date = utils.number_to_datetime(int(serial_number))\n return int(date.strftime(\"%m\"))", "def yymm(self) -> str:\n if self.is_old_style:\n numeric_part = self.split('/', 1)[1]\n yy = numeric_part[0:2]\n mm = numeric_part[2:4]\n else:\n yy = self[:2]\n mm = self[2:4]\n return f'{yy}{mm}'", "def fetch_months_to_download(cur_date, year_to_collect):\n year_to_collect = int(year_to_collect) # fail fast\n output_fmt = '%Y%m%d'\n\n if year_to_collect > cur_date.year:\n raise ValueError('Error: Year to collect is greater than current year')\n\n range_end = f'{cur_date.year}-{cur_date.month}-01' if cur_date.year == year_to_collect else f'{year_to_collect + 1}-01-01'\n\n return pd.date_range(\n start=f'{year_to_collect - 1}-12-01', # start at last month of previous year\n end=range_end,\n freq='MS'\n ).strftime(output_fmt)", "def _french_month(month):\n mois = \"janvfévrmarsavr-mai-juinjuilaoûtseptoct-nov-déc-\"\n mois_loc = re.search(month.lower(), mois.lower())\n if mois_loc:\n mois_no = (mois_loc.start() + 4) / 4\n return \"0{}\".format(mois_no)", "def calendar_month(year, month):\n start = datetime.datetime(year, month, 1)\n if month == 12:\n end = datetime.datetime(year+1, 1, 1)\n else:\n end = datetime.datetime(year, month+1, 1)\n print(start)\n print(end)\n return start, end", "def month(self) -> Index:\n return Index(self.to_series().dt.month)", "def __prev_month(self, year, month):\n year, month = (year, month - 1) if month > 1 else (year - 1, 12)\n\n return self.create(year, month)", "def slug(self):\n return self.date.strftime('%Y-%m')", "def get_date_year_month(date):\n cut_date = date.split('-')\n date_month_year = cut_date[0] + '-' + cut_date[1]\n return date_month_year", "def get_month(string): \n return int(string[15:17])", "def start_month(d):\n return date(d.year, d.month, 1)", "def __get_step1_end_month(yaml_content: dict) -> str:\n\n end_month = None\n\n try:\n end_month = yaml_content['step1.end_month']\n except KeyError as exc:\n print(ConfigurationFactory.__get_key_missing_error_message(exc))\n\n return end_month", "def year_month(cls,\n year: typing.Union[int, str],\n month: typing.Union[int, str])->str:\n yearstr: str\n if isinstance(year, int):\n yearstr = str(year)\n else:\n yearstr = year\n\n monthstr: str\n if isinstance(month, int):\n monthstr = str(month)\n else:\n monthstr = month\n if len(monthstr) == 1:\n monthstr = \"0\" + monthstr\n return cls.DATE_AND_TIMES_SIGIL + yearstr + \"-\" + monthstr + \"-01T00:00:00/10\"", "def get_workex_months(current_bucket_workex):\r\n\r\n computed_workex = re.findall(r\"\\d+\", current_bucket_workex)\r\n if len(computed_workex) > 0:\r\n return computed_workex[0]\r\n return 0", "def yy(self):\n return str(self._year)[-2:]" ]
[ "0.7890911", "0.77757496", "0.7772196", "0.74962604", "0.7192194", "0.6839517", "0.6807995", "0.6761683", "0.6746424", "0.6616606", "0.66151744", "0.6588002", "0.6588002", "0.6579385", "0.6565151", "0.65579987", "0.65383244", "0.64961576", "0.6473222", "0.64433897", "0.6393315", "0.6372145", "0.6341654", "0.63224846", "0.63065106", "0.6232501", "0.62307566", "0.61551315", "0.61551315", "0.6147469", "0.6135701", "0.61142373", "0.6095906", "0.6088894", "0.60623354", "0.6004843", "0.5999738", "0.59480035", "0.5914901", "0.5904654", "0.5879799", "0.5878165", "0.58741647", "0.5858947", "0.5851093", "0.5851093", "0.584377", "0.5826116", "0.58194846", "0.5796774", "0.57773435", "0.5772134", "0.57557833", "0.575534", "0.5726586", "0.57214415", "0.5716278", "0.5713712", "0.5658021", "0.5652917", "0.56497556", "0.56467235", "0.56427324", "0.5638807", "0.5638152", "0.56362545", "0.562454", "0.5602464", "0.56021345", "0.55876154", "0.55770177", "0.557539", "0.55732685", "0.5547818", "0.55468667", "0.5544808", "0.55437446", "0.5536862", "0.55335593", "0.5522477", "0.5515734", "0.54926896", "0.5489771", "0.5482897", "0.54636973", "0.5458983", "0.5445962", "0.5426738", "0.5421715", "0.5408783", "0.5397065", "0.53838986", "0.5381353", "0.5378484", "0.5362185", "0.5342822", "0.53425664", "0.5320191", "0.53047645", "0.5302813" ]
0.8435946
0
Retrieve the ECB exchange rate data based on the arguments provided.
def _get_ecb_data(frequency: str, start: str, end: str) -> bytes: content = bytearray() query_url = urljoin(ECB_DATA_API, ECB_EXR_GBP_EUR.format(frequency)) query_url = urljoin(query_url, ECB_QUERY_PARAMS.format(start, end)) with requests.get(query_url) as response: response.raise_for_status() # The data we're requesting is relatively small so we can just read it # one chunk; to do that we'll set the chunk size to something bigger # than the data we're reading. Based on some trial and error, it looks # like 3 KBytes is the right number. for chunk in response.iter_content(chunk_size=1024 * 3): # 3 KBytes content.extend(chunk) return bytes(content)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_exchange_rate_data(self, source_currency, exchanged_currency, valuation_date):\n raise NotImplementedError", "def getData(self):\n\n url = 'https://www.ecb.europa.eu/stats/eurofxref/eurofxref-hist.zip'\n try:\n file, _ = urlretrieve(url)\n zip_file_object = zipfile.ZipFile(file, 'r')\n first_file = zip_file_object.namelist()[0]\n file = zip_file_object.open(first_file)\n\n file_handler = []\n for row in file:\n file_handler.append(row.decode())\n\n # getting the currency headers into header_list\n header_list = []\n notFound = True\n x = 0\n while notFound:\n if file_handler[x].startswith('Date'):\n header = file_handler[x].split(',')\n for col in header:\n header_list.append(col.strip())\n notFound = False\n x += 1\n self.currencies = list(filter(None, header_list))\n self.currencies.append('EUR')\n self.currencies = self.currencies[1:] # Removing the \"Date\" entry\n\n data = []\n for row in file_handler[x:]:\n if row.startswith('`\\n'):\n break\n else:\n data.append(list(filter(None, [x.replace('\\n', '') for x in row.split(',')]))) # Removing any empty extra columns at the end of each rows\n\n # filling my self.rates with the currency in the format {CURR: {date: rate, ...}, ...}\n for row in data:\n for i in range(len(self.currencies)):\n try:\n if self.currencies[i] not in self.rates:\n self.rates[self.currencies[i]] = {row[0]: row[i + 1]}\n else:\n self.rates[self.currencies[i]].update({row[0]: row[i + 1]})\n except IndexError:\n # We reached the EUR section\n if self.currencies[i] not in self.rates:\n self.rates[self.currencies[i]] = {row[0]: '1.0000'}\n else:\n self.rates[self.currencies[i]].update({row[0]: '1.0000'})\n\n self.currencies.sort()\n\n except Exception as e:\n print('Failed to process the data')\n print(e)\n finally:\n file.close()", "def get_exchange_rate_data(self, source_currency, exchanged_currency, valuation_date, provider=None, *args, **kwargs):\n raise NotImplementedError", "def get_euro_exchange_rates(currency, frequency=\"D\"):\n ISO_4217_RE = re.compile(r\"[A-Z]{3}\")\n FREQUENCIES = [\"D\", \"M\", \"A\"]\n \n URL_TEMPLATE = \"http://sdw-wsrest.ecb.europa.eu/service/data/EXR/{}.{}.EUR.SP00.A?format=csvdata\"\n \n if not ISO_4217_RE.match(currency):\n raise ValueError('\"' + currency + '\" is no valid currency code!')\n if frequency not in FREQUENCIES:\n raise ValueError(\"Frequency must be one of \" + \", \".join(FREQUENCIES))\n \n url = URL_TEMPLATE.format(frequency, currency)\n req = Request(url)\n response = urlopen(req)\n lines = []\n for line in response:\n lines.append(line.decode(\"utf-8\"))\n reader = csv.DictReader(lines)\n result = {}\n for line in reader:\n date = line[\"TIME_PERIOD\"]\n value = line[\"OBS_VALUE\"]\n result[date] = value\n return result", "def get_realtime_exchange_rate(from_currency, to_currency) :\n\turl = f\"{BASE_URL}function={settings.CURRENCY_EXCHANGE_RATE}&from_currency={from_currency}&to_currency={to_currency}&apikey={API_KEY}\" \n\trequest = requests.get(url)\n\tresult = request.json()\n\treturn result[PREFIX][EXCHANGE_RATE], result[PREFIX][DATE]", "def parse_rate():\n try:\n response = requests.get(ecb_url)\n except Exception as e:\n return {\"error\": \"error occurred while accessing www.ecb.europa.eu: {}\".format(e)}, True\n else:\n currency_xml = response.content.decode()\n root = ET.fromstring(currency_xml)\n currencies_list = [currency.attrib.get('currency') for currency in root.iter(cube) if currency.attrib.get('currency')]\n rates_list = [float(currency.attrib.get('rate')) for currency in root.iter(cube) if currency.attrib.get('rate')]\n result = dict(zip(currencies_list, rates_list))\n result['EUR'] = float(1)\n return result, False", "def get_currency_exchange_rate(self, from_currency, to_currency):\n _FUNCTION_KEY = 'CURRENCY_EXCHANGE_RATE'\n return _FUNCTION_KEY, 'Realtime Currency Exchange Rate', None", "def fetch_and_store_latest_ecb_exrates():\n response = requests.get(DAILY_ECB_URL)\n # Raise exception if status_code != 200 or ConnectionError\n response.raise_for_status()\n info = ET.fromstring(response.content)[2][0]\n datestamp = datetime.strptime(info.attrib['time'], \"%Y-%m-%d\").date()\n rates = [x.attrib for x in info]\n\n exrates = []\n for item in rates:\n if item['currency'] in SUPPORTED_CURRENCIES:\n exrate, created = ExchangeRate.objects.update_or_create(\n datestamp=datestamp,\n currency=item['currency'],\n defaults={'rate': Decimal(item['rate'])}\n )\n exrates.append(exrate)\n print(exrate, \"NEW EXRATE!\" if created else \"<noupdate>\")\n\n return exrates", "def exchange_rate(self):\n res = r.get(self.url + self.current_rate)\n return self.execute(res)", "def get_ecb_rates_for_currency(currency):\n # UPDATE 2018-06-05 -- read directly from the database, and skip caching\n if currency not in SUPPORTED_CURRENCIES:\n raise CurrencyNotSupported(\"Currently we don't support %s\" % currency)\n exrates = get_latest_ecb_rates_from_db(currency)\n return (exrates['datestamp'], exrates[currency])", "def get_rates_for(currency: str, date: str):\n baseurl = f\"https://openexchangerates.org/api/historical/{date}.json\"\n params = {\"app_id\": OEG_APP_ID, \"symbols\": currency, \"base\": \"USD\"}\n return make_request(baseurl=baseurl, params=params)", "def _get_latest_ecb_rate(data: bytes) -> float:\n root = etree.fromstring(data)\n values = root.xpath('.//generic:ObsValue/@value', namespaces=root.nsmap)\n last_value = len(values) - 1\n\n return float(values[last_value])", "def list(self, request, *args, **kwargs):\n data = self.process_query_params()\n if data:\n self.currency_client.get_exchange_rates_by_date_range(**data)\n return super().list(request, *args, **kwargs)", "def downloadExchangeRates(_source_currency, _track_reconnections):\n try:\n logger.info('downloadExchangeRates: Retrieving exchange rates.')\n logger.debug('downloadExchangeRates: Retrieving exchange rates for: %s' % _source_currency)\n\n exchange_rate_ = 0\n\n #download exchange rate\n got_html = getHtml(URL_CALCULATOR + '1' + _source_currency + '=?' + BASE_CURRENCY)\n\n #parse\n if got_html:\n if 'error: \"\"' in got_html:\n #parse data\n re_object = re.search(\".*rhs: \\\"(\\d\\.\\d*)\", got_html)\n\n #using float since we're not interested in high precision\n exchange_rate_ = float(re_object.group(1))\n logger.debug('downloadExchangeRates: Parsed exchange rate: %s' % exchange_rate_)\n\n else:\n #reconnect if error field not empty\n if _track_reconnections['times_reconnected'] <= MAXIMUM_RECONNECTIONS:\n logger.debug('downloadExchangeRates: Times reconnected: %s' %\n _track_reconnections['times_reconnected'])\n logger.warning('downloadExchangeRates: Server signalizes an error, repeating request.')\n\n _track_reconnections['times_reconnected'] += 1\n\n #wait for the server to allow another inquiry\n time.sleep(PAUSE_BETWEEN_RECONNECTIONS)\n\n #repeat request\n downloadExchangeRates(_source_currency, _track_reconnections)\n\n else:\n logger.error('downloadExchangeRates: Could not obtain exchange rate for: %s, returning '\n 'default value.' % _source_currency)\n\n return exchange_rate_\n\n except:\n raise", "def getCurrencies():", "def get_rate(currency, date):\n status = 400\n while status != 200:\n url = (\"http://api.nbp.pl/api/exchangerates/rates/A/%s/%d-%02d-%02d?format=json\" %\n (currency, date.year, date.month, date.day))\n\n response = requests.get(url)\n status = response.status_code\n if status != 200:\n date = date - datetime.timedelta(1)\n\n tree = json.loads(response.content)\n assert len(tree['rates']) == 1\n print_rate_info(tree['rates'])\n return (tree['rates'][0]['mid'], date)", "def get_data_from_exchange(self, now):\n currency_options = dict(\n currency_pair='USD',\n bid={12.00 : {'guy_1' : 100.00}},\n ask={14.00 : {'guy_2' : 200.00}},\n time=datetime.datetime.now()\n )\n currency_pair_state = CurrencyPairState(**currency_options)\n return [currency_pair_state]", "def fetch_currency_rates(url=\"http://www.nbrb.by/API/ExRates/Rates?Periodicity=0\") -> dict:\n data = {}\n response = requests.get(url)\n if response.status_code == 200:\n data = get_json(response)\n return data", "def comprxbytesrate(self) :\n\t\ttry :\n\t\t\treturn self._comprxbytesrate\n\t\texcept Exception as e:\n\t\t\traise e", "def do_eurusd(self,args):\n try:\n ppdict(bitstamp.eur_usd())\n except Exception as e:\n print \"Unexpected Error: %s\" % e\n self.onecmd('help eurusd')", "def get_ether_current_prices():\n req = requests.get('https://min-api.cryptocompare.com/data/price?fsym=ETH&tsyms=BTC,USD,EUR')\n data = req.json()\n\n print('{0}, {1}, {2}'.format(data['EUR'], data['USD'], data['BTC']))", "def base_exchange_rate(self):\n return self._base_exchange_rate", "def currencies(exchange: str = CRYPTO_EXCHANGE,\n rate_limit: bool = True) -> dict:\n try:\n check_exchange_existence(exchange=exchange)\n return asyncio.get_event_loop().run_until_complete(\n getCurrencies(exchange=exchange, rate_limit=rate_limit))\n except Exception as exception:\n logger.error('Oops! An error Occurred ⚠️')\n raise exception", "def GetOpsRates():\n return GetDataFromCsvFile('ops_rates.csv')", "def get_price_data(ticker, days_befoure):\r\n #config_file=raw_input('config file: ')\r\n config_file=\"d:/tmp/moex.json\" \r\n try:\r\n with open(config_file) as config_file: \r\n conn_data = json.load(config_file)\r\n except:\r\n print \"Error: Unable to read config file. \"\r\n sys.exit(1)\r\n\r\n username = conn_data['username']\r\n password = conn_data['password']\r\n my_config = Config(user=username, password=password, proxy_url='')\r\n\r\n my_auth = MicexAuth(my_config)\r\n date = datetime.datetime.now() - datetime.timedelta(days_befoure)\r\n \r\n #ticker = 'SBER' # for tesing...\r\n \r\n if my_auth.is_real_time():\r\n iss = MicexISSClient(my_config, my_auth, MyDataHandler, MyData)\r\n iss.get_history_securities('stock',\r\n 'shares',\r\n 'tqbr',\r\n ticker, \r\n date.strftime(\"%Y-%m-%d\")\r\n #here to be start end dates\r\n )\r\n #print iss.handler.data.history\r\n return iss.handler.data.as_dataframe()", "def URDBv7_to_ElectricityRates(urdb_response):\n warnings.warn(\"ResourceTools.URDBv7_to_ElectricityRates is deprecated. Please use UtilityRateTools.URDBv8_to_ElectricityRates instead.\", DeprecationWarning)\n\n urdb_data = dict()\n urdb_data['en_electricity_rates'] = 1\n\n def try_get_schedule(urdb_name, data_name):\n if urdb_name in urdb_response.keys():\n urdb_data[data_name] = urdb_response[urdb_name]\n for i in range(12):\n for j in range(24):\n urdb_data[data_name][i][j] += 1\n\n def try_get_rate_structure(urdb_name, data_name):\n mat = []\n supported_units = {\n \"kwh\" : 0,\n \"kwh/kw\" : 1,\n \"kwh daily\" : 2,\n \"kwh/kw daily\" : 3\n }\n if urdb_name in urdb_response.keys():\n structure = urdb_response[urdb_name]\n for i, period in enumerate(structure):\n for j, entry in enumerate(period):\n rate = entry['rate']\n if 'adj' in entry.keys():\n rate += entry['adj']\n tier_max = 1e38\n if 'max' in entry.keys():\n tier_max = entry['max']\n sell = 0\n if 'sell' in entry.keys():\n sell = entry['sell']\n units = 0\n if 'unit' in entry.keys():\n try:\n units = supported_units[entry['unit'].lower()]\n except KeyError:\n raise RuntimeError(\"UtilityRateDatabase error: unrecognized unit in rate structure\")\n mat.append((i + 1, j + 1, tier_max, units, rate, sell))\n urdb_data[data_name] = mat\n\n def try_get_demand_structure(urdb_name, data_name):\n mat = []\n if urdb_name in urdb_response.keys():\n structure = urdb_response[urdb_name]\n for i, period in enumerate(structure):\n for j, entry in enumerate(period):\n rate = entry['rate']\n if 'adj' in entry.keys():\n rate += entry['adj']\n tier_max = 1e38\n if 'max' in entry.keys():\n tier_max = entry['max']\n if 'unit' in entry.keys():\n if entry['unit'].lower() != \"kW\".lower():\n raise RuntimeError(\"UtilityRateDatabase error: unrecognized unit in rate structure\")\n mat.append((i + 1, j + 1, tier_max, rate))\n if data_name:\n urdb_data[data_name] = mat\n else:\n return mat\n\n if \"dgrules\" in urdb_response.keys():\n rules = urdb_response['dgrules'] # dgrules\n if rules == \"Net Metering\":\n urdb_data['ur_metering_option'] = 0\n elif rules == \"Net Billing Instantaneous\":\n urdb_data['ur_metering_option'] = 2\n elif rules == \"Net Billing Hourly\":\n urdb_data['ur_metering_option'] = 3\n elif rules == \"Buy All Sell All\":\n urdb_data['ur_metering_option'] = 4\n else:\n # if no metering option provided, assume Net Metering\n urdb_data['ur_metering_option'] = 0\n\n if 'fixedchargefirstmeter' in urdb_response.keys() and 'fixedchargeunits' in urdb_response.keys():\n fixed_charge = urdb_response['fixedchargefirstmeter']\n fixed_charge_units = urdb_response['fixedchargeunits']\n if fixed_charge_units == \"$/day\":\n fixed_charge *= 365 / 12\n elif fixed_charge_units == \"$/year\":\n fixed_charge /= 12\n urdb_data['ur_monthly_fixed_charge'] = fixed_charge\n\n if 'mincharge' in urdb_response.keys():\n min_charge = urdb_response['mincharge']\n min_charge_units = urdb_response['minchargeunits']\n if min_charge_units == \"$/year\":\n urdb_data['ur_annual_min_charge'] = min_charge\n else:\n if min_charge_units == \"$/day\":\n min_charge *= 365 / 12\n urdb_data['ur_monthly_min_charge'] = min_charge\n\n try_get_schedule('energyweekdayschedule', 'ur_ec_sched_weekday')\n try_get_schedule('energyweekendschedule', 'ur_ec_sched_weekend')\n try_get_rate_structure('energyratestructure', 'ur_ec_tou_mat')\n\n try_get_demand_structure('demandratestructure', 'ur_dc_tou_mat')\n try_get_schedule('demandweekdayschedule', 'ur_dc_sched_weekday')\n try_get_schedule('demandweekendschedule', 'ur_dc_sched_weekend')\n\n flat_demand_structure = try_get_demand_structure('flatdemandstructure', None)\n\n if 'flatdemandmonths' in urdb_response.keys():\n urdb_data['ur_dc_enable'] = 1\n flat_mat = []\n flat_demand = urdb_response['flatdemandmonths']\n for month, period in enumerate(flat_demand):\n tiers = []\n for r in flat_demand_structure:\n if r[0] == int(period + 1):\n tiers.append(r)\n \n if len(tiers) == 0:\n raise ValueError(\"flatdemandstructure missing period number \", period)\n for t in tiers:\n month_row = []\n month_row.append(month)\n month_row += [t[i] for i in (1, 2, 3)]\n flat_mat.append(month_row)\n urdb_data['ur_dc_flat_mat'] = flat_mat\n # Fill out an empty flat rate structure if the rate has TOU demand but not flat demand \n elif \"demandratestructure\" in urdb_response.keys():\n urdb_data['ur_dc_enable'] = 1\n # Enumerate a dc_flat table with $0/kW in 12 months\n flat_mat = []\n for i in range(0, 12):\n month_mat = [i, 1, 1e38, 0]\n flat_mat.append(month_mat)\n urdb_data['ur_dc_flat_mat'] = flat_mat\n else:\n urdb_data['ur_dc_enable'] = 0\n\n if urdb_data['ur_dc_enable'] == 1 and \"ur_dc_tou_mat\" not in urdb_data.keys():\n urdb_data['ur_dc_tou_mat'] = [[1, 1, 1e38, 0], ]\n urdb_data['ur_dc_sched_weekday'] = [[1] * 24 for i in range(12)]\n urdb_data['ur_dc_sched_weekend'] = urdb_data['ur_dc_sched_weekday']\n\n return urdb_data", "def api_call(cls, currency):\n headers = {\"x-accept-version\": \"2.0.0\", \"Accept\": \"application/json\"}\n r = requests.get(cls.API_URL + currency, headers=headers)\n r.raise_for_status()\n return r.json()[\"data\"][\"rate\"]", "def parse_exchange_rates(cls, exchange_rates):\n currency_exchange_rates = []\n for exchange in exchange_rates:\n source_currency_symbol = exchange['base']\n source_currency = cls.parse_currency(source_currency_symbol)\n for exchanged_currency_symbol in exchange['rates']:\n if source_currency_symbol != exchanged_currency_symbol:\n exchanged_currency = cls.parse_currency(exchanged_currency_symbol)\n cur_exchange_rate = CurrencyExchangeRate(source_currency=source_currency,\n exchanged_currency=exchanged_currency,\n valuation_date=cls.parse_date(exchange['date']),\n rate_value=exchange['rates'][exchanged_currency_symbol])\n currency_exchange_rates.append(cur_exchange_rate)\n return currency_exchange_rates", "def createChargeExchangeRates(Z, A, pressure, ionTemperature):\r\n chargeExchangeRates = [0] * (Z + 1)\r\n\r\n # Need to return a Z+1 array\r\n h2Density = pressure * __TORR__ # using IGL, determine number density of H2 from the prescribed pressure\r\n ionMassInAMU = A * __AMU__\r\n avgIonV = __C__ * sqrt(8.0 * (ionTemperature / (pi * ionMassInAMU)))\r\n\r\n avgIonVinBohr = avgIonV / __VBOHR__\r\n sigV = __CHEXCONST__ * log( 15.0 / avgIonVinBohr) * avgIonV\r\n\r\n for i in range(1, Z + 1):\r\n chargeExchangeRates[i] = i * sigV * h2Density\r\n return chargeExchangeRates", "def get_call_data(stock_name, expire_time, strike_price):\n date = time.mktime(datetime.datetime.strptime(expire_time, \"%d/%m/%Y\").timetuple())+(16*3600)\n url = 'https://finance.yahoo.com/quote/'+stock_name+'/options?date='+str(int(date))+'&p='+stock_name\n response = requests.get(url)\n soup = BeautifulSoup(response.text, \"html.parser\")\n values = soup.findAll(\"td\" )\n\n for i in range(2,len(values),11):\n x = float(str(values[i].contents[0].contents[0]))\n if x == float(strike_price):\n option_link = 'https://finance.yahoo.com/'+str(values[i-2].contents[0])[61:109]\n bid = float(values[i+2].contents[0])\n ask = float(values[i+3].contents[0])\n return bid, ask", "def acquire_rates_data(self):\n prinf('%s params: %s', self.base_url, self.my_params)\n g_start()\n try:\n self.response_data = requests.get(self.base_url, params=self.my_params, timeout=self.timeout)\n except OSError:\n prinw('%s host not available', self.name)\n return False\n g_end('request responded')\n\n if not self.response_data:\n return False\n else:\n status_code = self.response_data.status_code\n prinf(status_code )\n if status_code > 400 :\n prinw('%s currency converter site response not found. %s', self.nam, status_code)\n return False\n elif status_code == 200:\n prinf('%s response ok', self.name)\n\n self.update_rates_valid_data()\n self.in_ccode = self.response_data.json()[self.strs[jpn.key_in_ccode]]\n\n self.rates = self.response_data.json()[self.strs[jpn.key_output]]\n\n # as requested ccode is not in the request respond\n # we add it => e.g 1 EUR = 1 EUR => needed for further pandas extrapolation\n self.rates.update({self.in_ccode: float(1)})\n return True", "def _get_currency_rate(currency):\n response = requests.get(f'{config(\"OPENEXCHANGERATES_URL\")}')\n if not response.ok:\n # log\n # can handle exception in better way later\n raise Exception(\n f'currency conversion api not working {response.text}')\n rates = response.json().get('rates')\n currency_rate = rates.get(currency.upper(), None)\n if not currency_rate:\n raise ValueError(f'Given currency conversion rate not found')\n return currency_rate", "def getActiveCurrencies():", "def get_price_history_lookback(access_token,ticker,periodType,period,frequencyType,frequency):\r\n \r\n price_url = 'https://api.tdameritrade.com/v1/marketdata/{}/pricehistory'.format(ticker)\r\n\r\n #The header for getting a quote needs to define the input type (json)\r\n headers = {'Authorization':'Bearer {}'.format(access_token),\r\n 'Content-Type':'application/json'}\r\n\r\n #Parameters for period of time and frequency of data to get\r\n params = {'periodType':periodType,\r\n 'period': period,\r\n 'frequencyType': frequencyType,\r\n 'frequency': frequency}\r\n \r\n #Make the get request to TD Ameritrade\r\n price_history_json = requests.get(url=price_url,headers=headers,params=params)\r\n return price_history_json.json()", "def forex_rate(**params):\n endpoint = 'calc/fx'\n return request(authenticate=False, version=2, endpoint=endpoint, method='POST', body_params=params)", "def get_latest(self):\n url = f\"{self.get_api_url()}+latest\"\n # set api parameters\n params = {}\n params.update({'base': self.base_currency})\n params.update({'symbols': ','.join(self.target_currency_codes)})\n # call the api for rates\n response = requests.get(url, params=params)\n if response.status_code == 200:\n base, rates = response.json().get('base'), response.json().get('rates')\n # remove base currency from rates if it is returned by the data source\n rates.pop(self.base_currency, None)\n return base, rates\n return None, None", "def test_exchange():\n exchange_rate = exchange(\"USD\", \"EUR\", 2.5)\n re_exchange_rate = exchange(\"EUR\", \"USD\", exchange_rate)\n assert re_exchange_rate == 2.5", "def getUserCurrency():", "def getValue(currency=None):", "def GetCurrencies():\n return GetDataFromCsvFile('currencies.csv')", "def comptcprxbytesrate(self) :\n\t\ttry :\n\t\t\treturn self._comptcprxbytesrate\n\t\texcept Exception as e:\n\t\t\traise e", "def test_currency_rate(self):\n currency_name = ['USD'] * 4\n rates = [3.67, 4.07, 3.04, 3.89]\n helper.currency_loop_helper(get_historical_currency_rate, TestHistoricalRates.dates_rate,\n rates, currency_name)", "def rates(self):\n raise NotImplementedError(\"Must be implemented by subclass.\")", "def getFactor(currency):", "def getDataRate(self):\n \n return self.DataRate", "def get(self):\n args = parser_degree.parse_args()\n if args['amount'] == 'all':\n return GenericGet().get_data(args,0,False)\n elif args['amount'] == 'nearest':\n return GenericGet().get_data(args,0,True)", "async def fetch_currencies(self, params={}):\n labels = [\n 'pub:list:currency',\n 'pub:map:currency:sym', # maps symbols to their API symbols, BAB > BCH\n 'pub:map:currency:label', # verbose friendly names, BNT > Bancor\n 'pub:map:currency:unit', # maps symbols to unit of measure where applicable\n 'pub:map:currency:undl', # maps derivatives symbols to their underlying currency\n 'pub:map:currency:pool', # maps symbols to underlying network/protocol they operate on\n 'pub:map:currency:explorer', # maps symbols to their recognised block explorer URLs\n 'pub:map:currency:tx:fee', # maps currencies to their withdrawal fees https://github.com/ccxt/ccxt/issues/7745,\n 'pub:map:tx:method', # maps withdrawal/deposit methods to their API symbols\n ]\n config = ','.join(labels)\n request = {\n 'config': config,\n }\n response = await self.publicGetConfConfig(self.extend(request, params))\n #\n # [\n #\n # a list of symbols\n # [\"AAA\",\"ABS\",\"ADA\"],\n #\n # # sym\n # # maps symbols to their API symbols, BAB > BCH\n # [\n # ['BAB', 'BCH'],\n # ['CNHT', 'CNHt'],\n # ['DSH', 'DASH'],\n # ['IOT', 'IOTA'],\n # ['LES', 'LEO-EOS'],\n # ['LET', 'LEO-ERC20'],\n # ['STJ', 'STORJ'],\n # ['TSD', 'TUSD'],\n # ['UDC', 'USDC'],\n # ['USK', 'USDK'],\n # ['UST', 'USDt'],\n # ['USTF0', 'USDt0'],\n # ['XCH', 'XCHF'],\n # ['YYW', 'YOYOW'],\n # # ...\n # ],\n # # label\n # # verbose friendly names, BNT > Bancor\n # [\n # ['BAB', 'Bitcoin Cash'],\n # ['BCH', 'Bitcoin Cash'],\n # ['LEO', 'Unus Sed LEO'],\n # ['LES', 'Unus Sed LEO(EOS)'],\n # ['LET', 'Unus Sed LEO(ERC20)'],\n # # ...\n # ],\n # # unit\n # # maps symbols to unit of measure where applicable\n # [\n # ['IOT', 'Mi|MegaIOTA'],\n # ],\n # # undl\n # # maps derivatives symbols to their underlying currency\n # [\n # ['USTF0', 'UST'],\n # ['BTCF0', 'BTC'],\n # ['ETHF0', 'ETH'],\n # ],\n # # pool\n # # maps symbols to underlying network/protocol they operate on\n # [\n # ['SAN', 'ETH'], ['OMG', 'ETH'], ['AVT', 'ETH'], ['EDO', 'ETH'],\n # ['ESS', 'ETH'], ['ATD', 'EOS'], ['ADD', 'EOS'], ['MTO', 'EOS'],\n # ['PNK', 'ETH'], ['BAB', 'BCH'], ['WLO', 'XLM'], ['VLD', 'ETH'],\n # ['BTT', 'TRX'], ['IMP', 'ETH'], ['SCR', 'ETH'], ['GNO', 'ETH'],\n # # ...\n # ],\n # # explorer\n # # maps symbols to their recognised block explorer URLs\n # [\n # [\n # 'AIO',\n # [\n # \"https://mainnet.aion.network\",\n # \"https://mainnet.aion.network/#/account/VAL\",\n # \"https://mainnet.aion.network/#/transaction/VAL\"\n # ]\n # ],\n # # ...\n # ],\n # # fee\n # # maps currencies to their withdrawal fees\n # [\n # [\"AAA\",[0,0]],\n # [\"ABS\",[0,131.3]],\n # [\"ADA\",[0,0.3]],\n # ],\n # ]\n #\n indexed = {\n 'sym': self.index_by(self.safe_value(response, 1, []), 0),\n 'label': self.index_by(self.safe_value(response, 2, []), 0),\n 'unit': self.index_by(self.safe_value(response, 3, []), 0),\n 'undl': self.index_by(self.safe_value(response, 4, []), 0),\n 'pool': self.index_by(self.safe_value(response, 5, []), 0),\n 'explorer': self.index_by(self.safe_value(response, 6, []), 0),\n 'fees': self.index_by(self.safe_value(response, 7, []), 0),\n }\n ids = self.safe_value(response, 0, [])\n result = {}\n for i in range(0, len(ids)):\n id = ids[i]\n if id.find('F0') >= 0:\n # we get a lot of F0 currencies, skip those\n continue\n code = self.safe_currency_code(id)\n label = self.safe_value(indexed['label'], id, [])\n name = self.safe_string(label, 1)\n pool = self.safe_value(indexed['pool'], id, [])\n type = self.safe_string(pool, 1)\n feeValues = self.safe_value(indexed['fees'], id, [])\n fees = self.safe_value(feeValues, 1, [])\n fee = self.safe_number(fees, 1)\n undl = self.safe_value(indexed['undl'], id, [])\n precision = '8' # default precision, todo: fix \"magic constants\"\n fid = 'f' + id\n result[code] = {\n 'id': fid,\n 'uppercaseId': id,\n 'code': code,\n 'info': [id, label, pool, feeValues, undl],\n 'type': type,\n 'name': name,\n 'active': True,\n 'deposit': None,\n 'withdraw': None,\n 'fee': fee,\n 'precision': int(precision),\n 'limits': {\n 'amount': {\n 'min': self.parse_number(self.parse_precision(precision)),\n 'max': None,\n },\n 'withdraw': {\n 'min': fee,\n 'max': None,\n },\n },\n 'networks': {},\n }\n networks = {}\n currencyNetworks = self.safe_value(response, 8, [])\n cleanId = id.replace('F0', '')\n for j in range(0, len(currencyNetworks)):\n pair = currencyNetworks[j]\n networkId = self.safe_string(pair, 0)\n currencyId = self.safe_string(self.safe_value(pair, 1, []), 0)\n if currencyId == cleanId:\n network = self.safe_network(networkId)\n networks[network] = {\n 'info': networkId,\n 'id': networkId.lower(),\n 'network': networkId,\n 'active': None,\n 'deposit': None,\n 'withdraw': None,\n 'fee': None,\n 'precision': None,\n 'limits': {\n 'withdraw': {\n 'min': None,\n 'max': None,\n },\n },\n }\n keysNetworks = list(networks.keys())\n networksLength = len(keysNetworks)\n if networksLength > 0:\n result[code]['networks'] = networks\n return result", "def convert_exchange_base(cls, exchange_values):\n bases = exchange_values['rates'].keys()\n rates = exchange_values['rates']\n exchange_rates = [{'base': base, 'date': exchange_values['date'],\n 'rates': {rate: rates[rate] / rates[base] for rate in rates}} for base in bases]\n\n return exchange_rates", "def get_exchange_rate(self, order_type, amount=None, price=None, pair=\"btc_jpy\"):\n if order_type not in [\"sell\", \"buy\"]:\n raise CoinCheckApiException(\"order_type should be \\\"sell\\\" or \\\"buy\\\"\")\n if amount is not None and price is not None:\n raise CoinCheckApiException(\"only one of \\\"amount\\\" and \\\"price\\\" can be provided \")\n\n query = \"order_type={}&pair={}\".format(order_type, pair)\n if amount is not None:\n query += \"&amount={}\".format(amount)\n else:\n query += \"&price={}\".format(price)\n\n return self.execute_http_call(\"/api/exchange/orders/rate?{}\".format(query), \"GET\", headers=None)", "def ex_curve(data):\n rv = []\n try:\n ef = autocomplete_curve_function(data[0])\n ed = autocomplete_curve_direction(data[1])\n period = 2\n try:\n period = max(int(data[2]), 2)\n except ValueError:\n pass\n data = data[3:]\n if not data:\n if consts.VERBOSE:\n print('ERROR: No data for curve')\n return []\n f = CURVE_FUNCTIONS[ef][ed]\n maxi = len(data)-1\n for i in range(period):\n v = f(float(i) / float(period-1))\n di = int(round(v*float(maxi)))\n rv.append(data[di])\n\n except Exception as e:\n if consts.VERBOSE:\n print('ERROR: Curve failed [%s]'%e)\n\n return rv", "def test_foreign_exchange_python2(self, mock_urlopen):\n fe = ForeignExchange(key=TestAlphaVantage._API_KEY_TEST)\n url = \"https://www.alphavantage.co/query?function=CURRENCY_EXCHANGE_RATE&from_currency=BTC&to_currency=CNY&apikey=test\"\n path_file = self.get_file_from_url(url)\n with open(path_file) as f:\n mock_urlopen.return_value = f\n data, _ = fe.get_currency_exchange_rate(\n from_currency='BTC', to_currency='CNY')\n self.assertIsInstance(\n data, dict, 'Result Data must be a dictionary')", "async def getCurrencies(self, body=\"\"):\n payload = {}\n \n # Parameter validation\n schema = ConfigurationValidator.getCurrencies()\n schema.dump(schema.load(payload))\n \n\n url_with_params = await create_url_with_params(api_url=self._urls[\"getCurrencies\"], proccessed_params=\"\"\"{\"required\":[],\"optional\":[],\"query\":[],\"headers\":[],\"path\":[]}\"\"\", )\n query_string = await create_query_string()\n headers = {\n \"Authorization\": \"Bearer \" + base64.b64encode(\"{}:{}\".format(self._conf.applicationID, self._conf.applicationToken).encode()).decode()\n }\n if self._conf.locationDetails:\n headers[\"x-location-detail\"] = ujson.dumps(self._conf.locationDetails)\n for h in self._conf.extraHeaders:\n headers.update(h)\n exclude_headers = []\n for key, val in headers.items():\n if not key.startswith(\"x-fp-\"):\n exclude_headers.append(key)\n return await AiohttpHelper().aiohttp_request(\"GET\", url_with_params, headers=get_headers_with_signature(urlparse(self._urls[\"getCurrencies\"]).netloc, \"get\", await create_url_without_domain(\"/service/application/configuration/v1.0/currencies\", ), query_string, headers, body, exclude_headers=exclude_headers), data=body, cookies=self._conf.cookies)", "def epfromdata(data, krdel, kexp, fp):\n balance = compute_balance(data, krdel)\n return weighted_energy(balance, fp, kexp)", "def lookup(self, invoice_code):\n return self.exchange_rate_btc_today[0]", "def get(self, as_of_date: str = None):\n if not as_of_date:\n as_of_date = (datetime.now() - timedelta(days=1)).strftime(\"%Y-%m-%d\")\n\n ExchangeRates.GET_EXCHANGE_RATES = ExchangeRates.GET_EXCHANGE_RATES.format(as_of_date, '{0}')\n return self._query_get_all('ExchangeRate', ExchangeRates.GET_EXCHANGE_RATES)", "def getActiveCurrency():", "def get_trade_data(back_days=20, candle_interval=\"H1\", bb_period=50):\n\n NY_time = datetime.datetime.utcnow() - datetime.timedelta(hours=5)\n back_days = datetime.timedelta(days=back_days)\n start_date = (NY_time - back_days).isoformat(\"T\") + \"Z\"\n cr = ot.CandleRequest()\n res = cr.get_list(start_date, end_date=None, interval=candle_interval)\n bb = dfutil.get_bb(res[\"closeBid\"], bb_period,)\n return bb", "def LoadEEGData(filename, EEGdevice):\n if EEGdevice == 7:\n x = 1\n elif EEGdevice == 8:\n # Read in the .easy file\n df = pd.read_csv(filename, delimiter='\\t', header=None)\n\n # Get metadata from the .info file\n fname = filename[:-5] + '.info'\n with open(fname) as f:\n content = f.readlines()\n content = [x.strip() for x in content]\n\n # Get the channel names\n channel_info = [x for x in content if 'Channel ' in x]\n channel_names = []\n for ch in range(len(channel_info)):\n channel_names.append(channel_info[ch].split(': ')[1])\n\n channel_names.append('X')\n channel_names.append('Y')\n channel_names.append('Z')\n channel_names.append('STI 014')\n channel_names.append('DateTime')\n\n # Get sampling rates\n sampling_rates = [x for x in content if 'sampling rate: ' in x]\n fs_all = []\n for freq in range(len(sampling_rates)):\n tmp = sampling_rates[freq].split(': ')[1].split(' ')[0]\n if tmp in ['N/A']:\n print('Skipping N/A')\n else:\n fs_all.append(float(sampling_rates[freq].split(': ')[1].split(' ')[0]))\n\n # Store sampling rates\n fs = fs_all[0]\n fs_accel = fs_all[1]\n\n # Assign the column names\n df.columns = channel_names\n \n # Return dataframe and sampling rates\n return df, fs, fs_accel", "def getAmount2(*args):", "def _get_eur_gbp_last_daily(self) -> None:\n data = _get_ecb_data(FREQUENCY_DAILY, _ten_days_ago(), _today())\n\n self.eur_gbp_last_day = _get_latest_ecb_rate(data)", "def _generate_currency_rates(self, parsed_data):\n\n for line in self:\n rate_info = parsed_data.get(line.move_id.currency_id.name, None)\n\n if not rate_info:\n raise UserError(_(\"Your main currency (%s) is not supported by this exchange rate provider. Please choose another one.\", company.currency_id.name))\n\n base_currency_rate = rate_info[0]\n\n for currency, (rate, date_rate) in parsed_data.items():\n rate_value = rate/base_currency_rate\n if currency == line.user_currency_id.name:\n line.exchange_rate = rate_value\n line.price_unit = rate_value * line.user_amount", "def readOneData(self):\n\n\t\tif self._mt5Client is not None:\n\t\t\tdatas = self._mt5Client.getData()\n\n\t\t\tif datas is not None:\n\t\t\t\tPERIOD = int(self._config['data']['predict'])\n\t\t\t\tHALF_PERIOD = int(PERIOD/2)\n\n\t\t\t\tdata = []\n\n\t\t\t\t#Time Got\n\t\t\t\tself._LAST_PERIOD_PREDICTED_END = datas['time']\n\n\t\t\t\t#time open high low close tick_volume spread real_\n\t\t\t\t#Switch the price type calucation\n\n\t\t\t\tw_p = self._config['data']['price']\n\t\t\t\tv = 0\n\n\t\t\t\tif(w_p == CHART_PRICES_TYPE['O']):\n\t\t\t\t\tv = float(datas['open']) \n\n\t\t\t\telif(w_p == CHART_PRICES_TYPE['C']):\n\t\t\t\t\t\n\t\t\t\t\tv = float(datas['close']) \n\n\t\t\t\telif(w_p == CHART_PRICES_TYPE['H']):\n\t\t\t\t\t\n\t\t\t\t\tv = float(datas['high'])\n\n\t\t\t\telif(w_p == CHART_PRICES_TYPE['L']):\n\n\t\t\t\t\tv = float(datas['low']) \n\n\t\t\t\telif(w_p == CHART_PRICES_TYPE['HL/2']):\n\t\t\t\t\tv = ( float(datas['low']) + float(datas['high']) ) /2\n\t\t\t\t\n\t\t\t\tself.notify(msg={\n\t\t\t\t\t\t\t\t\t'prices': {\n\t\t\t\t\t\t\t\t\t\t'values': {\n\t\t\t\t\t\t\t\t\t\t\t'RP': str(v)\n\t\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t} \n\t\t\t\t\t\t\t\t} \n\t\t\t\t)\n\n\t\t\t\tdata.append(100000 * v ) \n\n\t\t\t\tself._TEMPORARY_GLOBAL_DATA.append(data[-1])\n\n\t\t\t\tself._GLOBAL_DATA.append(data[-1])\n\n\t\t\t\treturn data", "async def test_get_rates_get(client):\n params = [('exchangeType', 'exchange_type_example')]\n headers = { \n 'Accept': 'application/json',\n 'Authorization': 'Bearer special-key',\n }\n response = await client.request(\n method='GET',\n path='/public/exchange/1/getRates',\n headers=headers,\n params=params,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')", "def get_base_depos(self, start, end, currencies, tenor, cut=\"NYC\", source=\"bloomberg\",\n cache_algo=\"internet_load_return\"):\n\n market_data_generator = self.market_data_generator\n\n if isinstance(currencies, str): currencies = [currencies]\n if isinstance(tenor, str): tenor = [tenor]\n\n tickers = []\n\n for cr in currencies:\n for tn in tenor:\n tickers.append(cr + tn)\n\n market_data_request = MarketDataRequest(\n start_date=start, finish_date=end,\n data_source=source,\n category='base-depos',\n freq='daily',\n cut=cut,\n tickers=tickers,\n fields=['close'],\n cache_algo=cache_algo,\n environment='backtest'\n )\n\n data_frame = market_data_generator.fetch_market_data(market_data_request)\n data_frame.index.name = 'Date'\n\n return data_frame", "def get_ebbp(data):\n if data is None:\n raise EmptyDataError('[!] Invalid data value')\n\n result = TA.EBBP(data)\n if result is None:\n raise IndicatorException\n return result", "def get_exchanges():\n url = 'https://help.yahoo.com/kb/finance-for-web/SLN2310.html?impressions=true'\n dataframes = pd.read_html(url)\n return dataframes[0]", "def update_exchange_rates():\n try:\n from djmoney.contrib.exchange.models import Rate\n\n from common.settings import currency_code_default, currency_codes\n from InvenTree.exchange import InvenTreeExchange\n except AppRegistryNotReady: # pragma: no cover\n # Apps not yet loaded!\n logger.info(\"Could not perform 'update_exchange_rates' - App registry not ready\")\n return\n except Exception: # pragma: no cover\n # Other error?\n return\n\n backend = InvenTreeExchange()\n base = currency_code_default()\n logger.info(f\"Updating exchange rates using base currency '{base}'\")\n\n try:\n backend.update_rates(base_currency=base)\n\n # Remove any exchange rates which are not in the provided currencies\n Rate.objects.filter(backend=\"InvenTreeExchange\").exclude(currency__in=currency_codes()).delete()\n except OperationalError:\n logger.warning(\"Could not update exchange rates - database not ready\")\n except Exception as e: # pragma: no cover\n logger.error(f\"Error updating exchange rates: {e} ({type(e)})\")", "async def getAppCurrencies(self, body=\"\"):\n payload = {}\n \n # Parameter validation\n schema = ConfigurationValidator.getAppCurrencies()\n schema.dump(schema.load(payload))\n \n\n url_with_params = await create_url_with_params(api_url=self._urls[\"getAppCurrencies\"], proccessed_params=\"\"\"{\"required\":[],\"optional\":[],\"query\":[],\"headers\":[],\"path\":[]}\"\"\", )\n query_string = await create_query_string()\n headers = {\n \"Authorization\": \"Bearer \" + base64.b64encode(\"{}:{}\".format(self._conf.applicationID, self._conf.applicationToken).encode()).decode()\n }\n if self._conf.locationDetails:\n headers[\"x-location-detail\"] = ujson.dumps(self._conf.locationDetails)\n for h in self._conf.extraHeaders:\n headers.update(h)\n exclude_headers = []\n for key, val in headers.items():\n if not key.startswith(\"x-fp-\"):\n exclude_headers.append(key)\n return await AiohttpHelper().aiohttp_request(\"GET\", url_with_params, headers=get_headers_with_signature(urlparse(self._urls[\"getAppCurrencies\"]).netloc, \"get\", await create_url_without_domain(\"/service/application/configuration/v1.0/currency\", ), query_string, headers, body, exclude_headers=exclude_headers), data=body, cookies=self._conf.cookies)", "def get_data_logic():\r\n global input_exchange\r\n global input_symbols\r\n global all_symbols\r\n global input_timeframe\r\n\r\n # create exchange connection\r\n exchange = Exchange(input_exchange)\r\n\r\n # perform check that exchange can grab price data\r\n if exchange.connection.has['fetchOHLCV']:\r\n\r\n # user ticked 'All Symbols?', so includes all symbols in\r\n # exchange_tickers.py for the particular exchange\r\n if all_symbols:\r\n symbol_list = SymbolList(symbols='auto', exchange=exchange)\r\n # user didn't tick 'All Symbols?', so create unpopulated symbol list\r\n else:\r\n symbol_list = SymbolList(exchange=exchange)\r\n # add all symbols user inputted\r\n for s in input_symbols:\r\n symbol_list.input_symbol(s)\r\n\r\n # get auto timeframe and check it is valid\r\n timeframe = Timeframe(timeframe=input_timeframe, exchange=exchange)\r\n while not timeframe.check_timeframe():\r\n timeframe.input_timeframe() # default to asking for input\r\n\r\n print(f\"Pulling data on the {timeframe.tf} timeframe for...\")\r\n print(symbol_list.symbols)\r\n\r\n # get current time in UTC in milliseconds\r\n now = datetime.now().astimezone(pytz.timezone('UTC'))\r\n now = int(now.timestamp()*1000)\r\n\r\n # loop over each symbol and pull new data\r\n for sym in symbol_list.symbols:\r\n # create csv filename and path\r\n file_sym = sym.replace('/', '')\r\n file_sym = file_sym.replace('-', '')\r\n filename = f\"{exchange.name}_{file_sym}_{timeframe.tf}.csv\" # generate filename from given information\r\n csv_path = f\"{exchange.name}/{timeframe.tf}/{filename}\"\r\n\r\n # get most recent price data and append it to existing data\r\n # (if it exists)\r\n price_data = PriceData(exchange=exchange, tf=timeframe.tf,\r\n sym=sym, now=now, path=csv_path)\r\n\r\n # check if price data csv already exists\r\n if price_data.exists():\r\n price_data.get_current()\r\n # get new data as far back as possible if csv does not exist\r\n else:\r\n price_data.get_new()\r\n\r\n # keep updating price_data until current time\r\n price_data.update()\r\n\r\n # write to csv\r\n price_data.write()\r\n\r\n print(\"Finished writing files!\")", "def _do_get_rate(self):\n rate = {\n 1: \"1 : Helium Probe in FAST rate\",\n 0: \"0 : Helium Probe in SLOW rate\"\n }\n result = self._execute('X')\n return rate.get(int(format(int(result[5:7]), '08b')[6]), \"Unknown\")", "def get_updated_currency(self, currency_array, main_currency,\n max_delta_days):\n url = 'http://rate.bot.com.tw/xrt/flcsv/0/day'\n\n # We do not want to update the main currency\n if main_currency in currency_array:\n currency_array.remove(main_currency)\n _logger.debug(\"BOT currency rate service : connecting...\")\n try:\n url_open = urllib.request.urlopen(url)\n csvfile = csv.reader(io.StringIO(url_open.read().decode('utf-8-sig')), delimiter=',')\n url_open.close()\n except IOError:\n raise UserError(\n _('Web Service does not exist (%s)!') % url)\n\n next(csvfile)\n exchange = {}\n for row in csvfile:\n bid = float(row[3])\n ask = float(row[13])\n\n exchange[row[0]] = {\n 'bid': bid,\n 'ask': ask\n }\n\n self.check_rate_date(datetime.today(), max_delta_days)\n self.supported_currency_array = list(exchange.keys())\n\n self.supported_currency_array.append('TWD')\n _logger.debug(\"Supported currencies = %s \" %\n self.supported_currency_array)\n self.validate_cur(main_currency)\n if main_currency != 'TWD':\n main_rate = float(exchange[main_currency]['ask'])\n if main_currency in currency_array:\n currency_array.remove(main_currency)\n for curr in currency_array:\n self.validate_cur(curr)\n if curr == 'TWD':\n rate = main_rate\n else:\n if main_currency == 'TWD':\n rate = 1 / float(exchange[curr]['ask'])\n else:\n rate = main_rate / float(exchange[curr]['ask'])\n self.updated_currency[curr] = rate\n _logger.debug(\n \"Rate retrieved : 1 %s = %s %s\" % (main_currency, rate, curr)\n )\n return self.updated_currency, self.log_info", "def latest_L2_order_book_entry(symbol: str,\n exchange: str = CRYPTO_EXCHANGE,\n rate_limit: bool = True):\n try:\n check_exchange_existence(exchange=exchange)\n response = asyncio.get_event_loop().run_until_complete(\n getOrderBookL2(symbol=symbol,\n number_of_data_points=1,\n exchange=exchange,\n rate_limit=rate_limit))\n latest_orderbook_entry_dict = {}\n latest_orderbook_entry_dict['symbol'] = symbol\n latest_orderbook_entry_dict['ask'] = response['asks'][0][0] if len(\n response['asks']) > 0 else None\n latest_orderbook_entry_dict['asksize'] = response['asks'][0][1] if len(\n response['asks']) > 0 else None\n latest_orderbook_entry_dict['bid'] = response['bids'][0][0] if len(\n response['bids']) > 0 else None\n latest_orderbook_entry_dict['bidsize'] = response['bids'][0][1] if len(\n response['bids']) > 0 else None\n latest_orderbook_entry_dict['datetime'] = response['datetime']\n latest_orderbook_entry_dict['nonce'] = response['nonce']\n return latest_orderbook_entry_dict\n except Exception as exception:\n logger.error('Oops! An error Occurred ⚠️')\n raise exception", "def get_data(end_date, n, local, foreign):\n URL = \"https://api.exchangeratesapi.io/history\"\n PARAMS = {'start_at': str(get_weekday_n_days_ago(end_date, n)),\n 'end_at': str(end_date),\n 'symbols': foreign,\n 'base': local}\n r = requests.get(url=URL, params=PARAMS)\n data = r.json()\n input_data = []\n for day in data['rates']:\n input_data.append([datetime.strptime(day, '%Y-%m-%d').date(),\n float(\"{:.8f}\".format(data['rates'][day][foreign]))])\n input_data.sort(key=lambda x: x[0])\n return input_data[-n:]", "def update(self):\n self.rate = self.exchange.latest()", "def get_by_source(self, source_currency_code: str):\n as_of_date = (datetime.now() - timedelta(days=1)).strftime(\"%Y-%m-%d\")\n return self._get_request(\n 'ExchangeRate', self.GET_EXCHANGE_RATES_BY_SOURCE.format(source_currency_code, as_of_date))", "def currency_rate(self, init):\r\n\r\n curr = CurrencyRates()\r\n curr_rate = curr.get_rates(init)\r\n return curr_rate", "def OnRtnDepthMarketData(self, data: dict) -> None:\n current_date = data[\"TradingDay\"]\n current_time = data[\"UpdateTime\"]\n dt = datetime.strptime(\n f'{current_date}-{current_time}', \"%Y%m%d-%H:%M:%S\"\n )\n dt = CHINA_TZ.localize(dt)\n\n tick = TickData(\n symbol=data[\"SecurityID\"],\n exchange=EXCHANGE_TORA2VT[bytes.decode(data[\"ExchangeID\"])],\n datetime=dt,\n name=data[\"SecurityName\"],\n volume=0,\n open_interest=data[\"OpenInterest\"],\n last_price=data[\"LastPrice\"],\n last_volume=data[\"Volume\"],\n limit_up=data[\"UpperLimitPrice\"],\n limit_down=data[\"LowerLimitPrice\"],\n open_price=data[\"OpenPrice\"],\n high_price=data[\"HighestPrice\"],\n low_price=data[\"LowestPrice\"],\n pre_close=data[\"PreClosePrice\"],\n bid_price_1=data[\"BidPrice1\"],\n ask_price_1=data[\"AskPrice1\"],\n bid_volume_1=data[\"BidVolume1\"],\n ask_volume_1=data[\"AskVolume1\"],\n gateway_name=self.gateway_name\n )\n\n if data[\"BidVolume2\"] or data[\"AskVolume2\"]:\n tick.bid_price_2 = data[\"BidPrice2\"]\n tick.bid_price_3 = data[\"BidPrice3\"]\n tick.bid_price_4 = data[\"BidPrice4\"]\n tick.bid_price_5 = data[\"BidPrice5\"]\n\n tick.ask_price_2 = data[\"AskPrice2\"]\n tick.ask_price_3 = data[\"AskPrice3\"]\n tick.ask_price_4 = data[\"AskPrice4\"]\n tick.ask_price_5 = data[\"AskPrice5\"]\n\n tick.bid_volume_2 = data[\"BidVolume2\"]\n tick.bid_volume_3 = data[\"BidVolume3\"]\n tick.bid_volume_4 = data[\"BidVolume4\"]\n tick.bid_volume_5 = data[\"BidVolume5\"]\n\n tick.ask_volume_2 = data[\"AskVolume2\"]\n tick.ask_volume_3 = data[\"AskVolume3\"]\n tick.ask_volume_4 = data[\"AskVolume4\"]\n tick.ask_volume_5 = data[\"AskVolume5\"]\n\n self.gateway.on_tick(tick)", "def fetch_decay_radiation(**kwargs):\n\n query = _DecayRadiationQuery(**kwargs)\n # apply elevel_range filter (hack around the web API)\n elevel = query.df[\"Energy Level (MeV)\"]\n keep = (elevel >= query.elevel_range[0]) & (elevel <= query.elevel_range[1])\n query.df = query.df[keep]\n return query.df", "def bitcoinaverage(site):\n url = \"https://apiv2.bitcoinaverage.com/frontend/constants/exchangerates/local\"\n try:\n session = requests.Session()\n cfscrape_requests = cfscrape.create_scraper(sess=session)\n ret = cfscrape_requests.get(url, timeout=(15, 15)).json()[\"rates\"]\n data = {\"USD:\" + key: float(val[\"rate\"]) for key, val in ret.items()}\n data = refine_data(data)\n print(site, data)\n race_write(f\"{site}_forex.txt\", json_dumps(data))\n except:\n print(f\"{site} failed to load\")", "def getBaseCurrency():", "def test_get_currency_using_get(self):\n pass", "def OnRtnDepthMarketData(self, data: dict) -> None:\n current_date = data[\"TradingDay\"]\n current_time = data[\"UpdateTime\"]\n dt = datetime.strptime(\n f'{current_date}-{current_time}', \"%Y%m%d-%H:%M:%S\"\n )\n # dt = CHINA_TZ.localize(dt)\n\n tick = TickData(\n symbol=data[\"SecurityID\"],\n exchange=EXCHANGE_TORA2VT[bytes.decode(data[\"ExchangeID\"])],\n datetime=dt,\n name=data[\"SecurityName\"],\n volume=0,\n open_interest=data[\"OpenInterest\"],\n last_price=data[\"LastPrice\"],\n last_volume=data[\"Volume\"],\n limit_up=data[\"UpperLimitPrice\"],\n limit_down=data[\"LowerLimitPrice\"],\n open_price=data[\"OpenPrice\"],\n high_price=data[\"HighestPrice\"],\n low_price=data[\"LowestPrice\"],\n pre_close=data[\"PreClosePrice\"],\n bid_price_1=data[\"BidPrice1\"],\n ask_price_1=data[\"AskPrice1\"],\n bid_volume_1=data[\"BidVolume1\"],\n ask_volume_1=data[\"AskVolume1\"],\n gateway_name=self.gateway_name\n )\n\n if data[\"BidVolume2\"] or data[\"AskVolume2\"]:\n tick.bid_price_2 = data[\"BidPrice2\"]\n tick.bid_price_3 = data[\"BidPrice3\"]\n tick.bid_price_4 = data[\"BidPrice4\"]\n tick.bid_price_5 = data[\"BidPrice5\"]\n\n tick.ask_price_2 = data[\"AskPrice2\"]\n tick.ask_price_3 = data[\"AskPrice3\"]\n tick.ask_price_4 = data[\"AskPrice4\"]\n tick.ask_price_5 = data[\"AskPrice5\"]\n\n tick.bid_volume_2 = data[\"BidVolume2\"]\n tick.bid_volume_3 = data[\"BidVolume3\"]\n tick.bid_volume_4 = data[\"BidVolume4\"]\n tick.bid_volume_5 = data[\"BidVolume5\"]\n\n tick.ask_volume_2 = data[\"AskVolume2\"]\n tick.ask_volume_3 = data[\"AskVolume3\"]\n tick.ask_volume_4 = data[\"AskVolume4\"]\n tick.ask_volume_5 = data[\"AskVolume5\"]\n\n self.gateway.on_tick(tick)", "def calculateDataRate(self):\n pass", "async def fetch_balance(self, params={}):\n # self api call does not return the 'used' amount - use the v1 version instead(which also returns zero balances)\n # there is a difference between self and the v1 api, namely trading wallet is called margin in v2\n await self.load_markets()\n accountsByType = self.safe_value(self.options, 'v2AccountsByType', {})\n requestedType = self.safe_string(params, 'type', 'exchange')\n accountType = self.safe_string(accountsByType, requestedType, requestedType)\n if accountType is None:\n keys = list(accountsByType.keys())\n raise ExchangeError(self.id + ' fetchBalance() type parameter must be one of ' + ', '.join(keys))\n isDerivative = requestedType == 'derivatives'\n query = self.omit(params, 'type')\n response = await self.privatePostAuthRWallets(query)\n result = {'info': response}\n for i in range(0, len(response)):\n balance = response[i]\n type = self.safe_string(balance, 0)\n currencyId = self.safe_string_lower(balance, 1, '')\n start = len(currencyId) - 2\n isDerivativeCode = currencyId[start:] == 'f0'\n # self will only filter the derivative codes if the requestedType is 'derivatives'\n derivativeCondition = (not isDerivative or isDerivativeCode)\n if (accountType == type) and derivativeCondition:\n code = self.safe_currency_code(currencyId)\n account = self.account()\n account['total'] = self.safe_string(balance, 2)\n account['free'] = self.safe_string(balance, 4)\n result[code] = account\n return self.safe_balance(result)", "def comprequestsrate(self) :\n\t\ttry :\n\t\t\treturn self._comprequestsrate\n\t\texcept Exception as e:\n\t\t\traise e", "def getAmount1(*args):", "def get_inbound_statement_details_exchange_rate(self):\n return self.get_text_from_element(self.inbound_statements_details_exchange_rate_locator, True)", "def test_reaction_rate_results_02():\n rc = chemkin.ReactionRate()\n rate = rc.read_XML('./data/rxns_hw5.xml').set_temp(1500).get_reaction_rate(np.array([2.0, 1.0, 0.5, 1.0, 1.0]))\n test1 = np.array([ -2.811803e+08, -2.856604e+08, 5.668407e+08, 4.480138e+06, -4.480138e+06])\n np.testing.assert_allclose(rate, test1, 1e-06)", "def comptxbytesrate(self) :\n\t\ttry :\n\t\t\treturn self._comptxbytesrate\n\t\texcept Exception as e:\n\t\t\traise e", "def createChargeExchangeRates_MS(Z, A, pressure, ionTemperature):\r\n\r\n chargeExchangeRates = [0] * (Z + 1)\r\n\r\n h2Density = pressure * __TORR__\r\n \r\n\r\n # Epgas is the ionization potential of the background gas. Make this adjustable?\r\n\r\n # The full cross section includes qi^alphak, but this is added in the loop below.\r\n sigV = __SALZBORNAK__*__Epgas__**__SALZBORNBETAK__\r\n for i in range(1, Z+1):\r\n chargeExchangeRates[i] = h2Density*sigV*i**__SALZBORNALPHAK__\r\n return chargeExchangeRates", "def getDecayRate(self, channel, unitCode=0):\n res = self.XAPCommand(\"DECAY\", channel, unitCode=unitCode)\n return int(res)", "def load_eeg(filename):\r\n data = np.load(filename)\r\n return data['eeg'], int(data['srate'])", "def load_eeg(filename):\r\n data = np.load(filename)\r\n return data['eeg'], int(data['srate'])", "def create_get_exchange_info_request(self) -> Request:", "def get_exchange_reward_per_euro(model):\n exchanged_euros = get_exchanged_euros(model)\n total_euros = get_total_euros(model) \n total_teos = get_total_teos(model)\n exchange_pool = (total_euros - total_teos)*model.buffer_share*model.exchange_reward_share\n if exchanged_euros == 0 or exchange_pool <= 0:\n return 0\n exchange_reward_per_euro = exchange_pool / exchanged_euros \n return round(float(exchange_reward_per_euro),4)", "def latest_order_book_entry(symbol: str,\n exchange: str = CRYPTO_EXCHANGE,\n rate_limit: bool = True):\n try:\n check_exchange_existence(exchange=exchange)\n response = asyncio.get_event_loop().run_until_complete(\n getOrderBook(symbol=symbol,\n number_of_data_points=1,\n exchange=exchange,\n rate_limit=rate_limit))\n latest_orderbook_entry_dict = {}\n latest_orderbook_entry_dict['symbol'] = symbol\n latest_orderbook_entry_dict['ask'] = response['asks'][0][0] if len(\n response['asks']) > 0 else None\n latest_orderbook_entry_dict['asksize'] = response['asks'][0][1] if len(\n response['asks']) > 0 else None\n latest_orderbook_entry_dict['bid'] = response['bids'][0][0] if len(\n response['bids']) > 0 else None\n latest_orderbook_entry_dict['bidsize'] = response['bids'][0][1] if len(\n response['bids']) > 0 else None\n latest_orderbook_entry_dict['datetime'] = response['datetime']\n latest_orderbook_entry_dict['nonce'] = response['nonce']\n return latest_orderbook_entry_dict\n except Exception as exception:\n logger.error('Oops! An error Occurred ⚠️')\n raise exception", "async def fetch_trading_fees(self, params={}):\n await self.load_markets()\n response = await self.privatePostAuthRSummary(params)\n #\n # Response Spec:\n # [\n # PLACEHOLDER,\n # PLACEHOLDER,\n # PLACEHOLDER,\n # PLACEHOLDER,\n # [\n # [\n # MAKER_FEE,\n # MAKER_FEE,\n # MAKER_FEE,\n # PLACEHOLDER,\n # PLACEHOLDER,\n # DERIV_REBATE\n # ],\n # [\n # TAKER_FEE_TO_CRYPTO,\n # TAKER_FEE_TO_STABLE,\n # TAKER_FEE_TO_FIAT,\n # PLACEHOLDER,\n # PLACEHOLDER,\n # DERIV_TAKER_FEE\n # ]\n # ],\n # PLACEHOLDER,\n # PLACEHOLDER,\n # PLACEHOLDER,\n # PLACEHOLDER,\n # {\n # LEO_LEV,\n # LEO_AMOUNT_AVG\n # }\n # ]\n #\n # Example response:\n #\n # [\n # null,\n # null,\n # null,\n # null,\n # [\n # [0.001, 0.001, 0.001, null, null, 0.0002],\n # [0.002, 0.002, 0.002, null, null, 0.00065]\n # ],\n # [\n # [\n # {\n # curr: 'Total(USD)',\n # vol: '0',\n # vol_safe: '0',\n # vol_maker: '0',\n # vol_BFX: '0',\n # vol_BFX_safe: '0',\n # vol_BFX_maker: '0'\n # }\n # ],\n # {},\n # 0\n # ],\n # [null, {}, 0],\n # null,\n # null,\n # {leo_lev: '0', leo_amount_avg: '0'}\n # ]\n #\n result = {}\n fiat = self.safe_value(self.options, 'fiat', {})\n feeData = self.safe_value(response, 4, [])\n makerData = self.safe_value(feeData, 0, [])\n takerData = self.safe_value(feeData, 1, [])\n makerFee = self.safe_number(makerData, 0)\n makerFeeFiat = self.safe_number(makerData, 2)\n makerFeeDeriv = self.safe_number(makerData, 5)\n takerFee = self.safe_number(takerData, 0)\n takerFeeFiat = self.safe_number(takerData, 2)\n takerFeeDeriv = self.safe_number(takerData, 5)\n for i in range(0, len(self.symbols)):\n symbol = self.symbols[i]\n market = self.market(symbol)\n fee = {\n 'info': response,\n 'symbol': symbol,\n 'percentage': True,\n 'tierBased': True,\n }\n if market['quote'] in fiat:\n fee['maker'] = makerFeeFiat\n fee['taker'] = takerFeeFiat\n elif market['contract']:\n fee['maker'] = makerFeeDeriv\n fee['taker'] = takerFeeDeriv\n else: # TODO check if stable coin\n fee['maker'] = makerFee\n fee['taker'] = takerFee\n result[symbol] = fee\n return result", "def test_foreign_exchange_python3(self, mock_urlopen):\n fe = ForeignExchange(key=TestAlphaVantage._API_KEY_TEST)\n url = \"https://www.alphavantage.co/query?function=CURRENCY_EXCHANGE_RATE&from_currency=BTC&to_currency=CNY&apikey=test\"\n path_file = self.get_file_from_url(url)\n with open(path_file) as f:\n mock_urlopen.return_value = f\n data, _ = fe.get_currency_exchange_rate(\n from_currency='BTC', to_currency='CNY')\n self.assertIsInstance(\n data, dict, 'Result Data must be a dictionary')", "def new_get_historical_price(base, target, date):\n if base == \"BTC\" and target == \"EUR\":\n return {\"BTC\": {\"EUR\": 10000}}\n elif base == \"EUR\" and target == \"BTC\":\n return {\"EUR\": {\"BTC\": 0.00012}}\n elif base == \"LTC\" and target == \"BTC\":\n return {\"LTC\": {\"BTC\": 0.02}}\n elif base == \"LTC\" and target == \"EUR\":\n return {\"LTC\": {\"EUR\": 250}}", "def fetch_balance(self, params={}):\n self.load_markets()\n response = self.privateGetAccountBalanceV2(params)\n #\n # {\n # \"AVAILABLE_NIS\": 0.0,\n # \"NIS\": 0.0,\n # \"LOCKED_NIS\": 0.0,\n # \"AVAILABLE_BTC\": 0.0,\n # \"BTC\": 0.0,\n # \"LOCKED_BTC\": 0.0,\n # \"AVAILABLE_ETH\": 0.0,\n # \"ETH\": 0.0,\n # \"LOCKED_ETH\": 0.0,\n # \"AVAILABLE_BCHSV\": 0.0,\n # \"BCHSV\": 0.0,\n # \"LOCKED_BCHSV\": 0.0,\n # \"AVAILABLE_BCHABC\": 0.0,\n # \"BCHABC\": 0.0,\n # \"LOCKED_BCHABC\": 0.0,\n # \"AVAILABLE_LTC\": 0.0,\n # \"LTC\": 0.0,\n # \"LOCKED_LTC\": 0.0,\n # \"AVAILABLE_ETC\": 0.0,\n # \"ETC\": 0.0,\n # \"LOCKED_ETC\": 0.0,\n # \"AVAILABLE_BTG\": 0.0,\n # \"BTG\": 0.0,\n # \"LOCKED_BTG\": 0.0,\n # \"AVAILABLE_GRIN\": 0.0,\n # \"GRIN\": 0.0,\n # \"LOCKED_GRIN\": 0.0,\n # \"Fees\": {\n # \"BtcNis\": {\"FeeMaker\": 1.0, \"FeeTaker\": 1.0},\n # \"EthNis\": {\"FeeMaker\": 1.0, \"FeeTaker\": 1.0},\n # \"BchabcNis\": {\"FeeMaker\": 1.0, \"FeeTaker\": 1.0},\n # \"LtcNis\": {\"FeeMaker\": 1.0, \"FeeTaker\": 1.0},\n # \"EtcNis\": {\"FeeMaker\": 1.0, \"FeeTaker\": 1.0},\n # \"BtgNis\": {\"FeeMaker\": 1.0, \"FeeTaker\": 1.0},\n # \"LtcBtc\": {\"FeeMaker\": 1.0, \"FeeTaker\": 1.0},\n # \"BchsvNis\": {\"FeeMaker\": 1.0, \"FeeTaker\": 1.0},\n # \"GrinNis\": {\"FeeMaker\": 1.0, \"FeeTaker\": 1.0}\n # }\n # }\n #\n return self.parse_balance(response)" ]
[ "0.6843077", "0.67887974", "0.6766991", "0.67545015", "0.6378838", "0.6367499", "0.62257475", "0.619634", "0.6012515", "0.5951624", "0.57854265", "0.5767857", "0.57583064", "0.5754834", "0.57475054", "0.57261825", "0.5651264", "0.5597155", "0.557165", "0.5563183", "0.5555224", "0.5547397", "0.55405486", "0.55147886", "0.55141634", "0.54966974", "0.5473914", "0.54547876", "0.544138", "0.5413844", "0.5387589", "0.53733367", "0.5360719", "0.5344227", "0.53143775", "0.5304933", "0.5301821", "0.53009087", "0.5294292", "0.5277829", "0.5277542", "0.52743304", "0.52631986", "0.5256713", "0.52474785", "0.52462256", "0.52428925", "0.5236115", "0.5229821", "0.522645", "0.5221471", "0.52203894", "0.5215562", "0.5210063", "0.5202802", "0.5197084", "0.5192767", "0.51861024", "0.51833814", "0.518235", "0.5169247", "0.5153288", "0.5146648", "0.513882", "0.51341474", "0.5133511", "0.5120379", "0.5112029", "0.5101965", "0.5095068", "0.5092221", "0.50852877", "0.50827956", "0.50826794", "0.507129", "0.50678164", "0.5058472", "0.50512993", "0.5049861", "0.5045773", "0.50455475", "0.50428635", "0.504188", "0.5028102", "0.50232005", "0.5013789", "0.4996742", "0.49937594", "0.49918073", "0.49900293", "0.4988759", "0.4987538", "0.4987538", "0.4987009", "0.49819145", "0.49817517", "0.49745336", "0.49735373", "0.49706712", "0.49657905" ]
0.58530164
10
Retrieve the latest exchange rate from the given ECB data.
def _get_latest_ecb_rate(data: bytes) -> float: root = etree.fromstring(data) values = root.xpath('.//generic:ObsValue/@value', namespaces=root.nsmap) last_value = len(values) - 1 return float(values[last_value])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fetch_and_store_latest_ecb_exrates():\n response = requests.get(DAILY_ECB_URL)\n # Raise exception if status_code != 200 or ConnectionError\n response.raise_for_status()\n info = ET.fromstring(response.content)[2][0]\n datestamp = datetime.strptime(info.attrib['time'], \"%Y-%m-%d\").date()\n rates = [x.attrib for x in info]\n\n exrates = []\n for item in rates:\n if item['currency'] in SUPPORTED_CURRENCIES:\n exrate, created = ExchangeRate.objects.update_or_create(\n datestamp=datestamp,\n currency=item['currency'],\n defaults={'rate': Decimal(item['rate'])}\n )\n exrates.append(exrate)\n print(exrate, \"NEW EXRATE!\" if created else \"<noupdate>\")\n\n return exrates", "def exchange_rate(self):\n res = r.get(self.url + self.current_rate)\n return self.execute(res)", "def get_realtime_exchange_rate(from_currency, to_currency) :\n\turl = f\"{BASE_URL}function={settings.CURRENCY_EXCHANGE_RATE}&from_currency={from_currency}&to_currency={to_currency}&apikey={API_KEY}\" \n\trequest = requests.get(url)\n\tresult = request.json()\n\treturn result[PREFIX][EXCHANGE_RATE], result[PREFIX][DATE]", "def getData(self):\n\n url = 'https://www.ecb.europa.eu/stats/eurofxref/eurofxref-hist.zip'\n try:\n file, _ = urlretrieve(url)\n zip_file_object = zipfile.ZipFile(file, 'r')\n first_file = zip_file_object.namelist()[0]\n file = zip_file_object.open(first_file)\n\n file_handler = []\n for row in file:\n file_handler.append(row.decode())\n\n # getting the currency headers into header_list\n header_list = []\n notFound = True\n x = 0\n while notFound:\n if file_handler[x].startswith('Date'):\n header = file_handler[x].split(',')\n for col in header:\n header_list.append(col.strip())\n notFound = False\n x += 1\n self.currencies = list(filter(None, header_list))\n self.currencies.append('EUR')\n self.currencies = self.currencies[1:] # Removing the \"Date\" entry\n\n data = []\n for row in file_handler[x:]:\n if row.startswith('`\\n'):\n break\n else:\n data.append(list(filter(None, [x.replace('\\n', '') for x in row.split(',')]))) # Removing any empty extra columns at the end of each rows\n\n # filling my self.rates with the currency in the format {CURR: {date: rate, ...}, ...}\n for row in data:\n for i in range(len(self.currencies)):\n try:\n if self.currencies[i] not in self.rates:\n self.rates[self.currencies[i]] = {row[0]: row[i + 1]}\n else:\n self.rates[self.currencies[i]].update({row[0]: row[i + 1]})\n except IndexError:\n # We reached the EUR section\n if self.currencies[i] not in self.rates:\n self.rates[self.currencies[i]] = {row[0]: '1.0000'}\n else:\n self.rates[self.currencies[i]].update({row[0]: '1.0000'})\n\n self.currencies.sort()\n\n except Exception as e:\n print('Failed to process the data')\n print(e)\n finally:\n file.close()", "def get_currency_exchange_rate(self, from_currency, to_currency):\n _FUNCTION_KEY = 'CURRENCY_EXCHANGE_RATE'\n return _FUNCTION_KEY, 'Realtime Currency Exchange Rate', None", "def get_rate(currency, date):\n status = 400\n while status != 200:\n url = (\"http://api.nbp.pl/api/exchangerates/rates/A/%s/%d-%02d-%02d?format=json\" %\n (currency, date.year, date.month, date.day))\n\n response = requests.get(url)\n status = response.status_code\n if status != 200:\n date = date - datetime.timedelta(1)\n\n tree = json.loads(response.content)\n assert len(tree['rates']) == 1\n print_rate_info(tree['rates'])\n return (tree['rates'][0]['mid'], date)", "def get_latest(self):\n url = f\"{self.get_api_url()}+latest\"\n # set api parameters\n params = {}\n params.update({'base': self.base_currency})\n params.update({'symbols': ','.join(self.target_currency_codes)})\n # call the api for rates\n response = requests.get(url, params=params)\n if response.status_code == 200:\n base, rates = response.json().get('base'), response.json().get('rates')\n # remove base currency from rates if it is returned by the data source\n rates.pop(self.base_currency, None)\n return base, rates\n return None, None", "def get_exchange_rate_data(self, source_currency, exchanged_currency, valuation_date):\n raise NotImplementedError", "def get_euro_exchange_rates(currency, frequency=\"D\"):\n ISO_4217_RE = re.compile(r\"[A-Z]{3}\")\n FREQUENCIES = [\"D\", \"M\", \"A\"]\n \n URL_TEMPLATE = \"http://sdw-wsrest.ecb.europa.eu/service/data/EXR/{}.{}.EUR.SP00.A?format=csvdata\"\n \n if not ISO_4217_RE.match(currency):\n raise ValueError('\"' + currency + '\" is no valid currency code!')\n if frequency not in FREQUENCIES:\n raise ValueError(\"Frequency must be one of \" + \", \".join(FREQUENCIES))\n \n url = URL_TEMPLATE.format(frequency, currency)\n req = Request(url)\n response = urlopen(req)\n lines = []\n for line in response:\n lines.append(line.decode(\"utf-8\"))\n reader = csv.DictReader(lines)\n result = {}\n for line in reader:\n date = line[\"TIME_PERIOD\"]\n value = line[\"OBS_VALUE\"]\n result[date] = value\n return result", "def parse_rate():\n try:\n response = requests.get(ecb_url)\n except Exception as e:\n return {\"error\": \"error occurred while accessing www.ecb.europa.eu: {}\".format(e)}, True\n else:\n currency_xml = response.content.decode()\n root = ET.fromstring(currency_xml)\n currencies_list = [currency.attrib.get('currency') for currency in root.iter(cube) if currency.attrib.get('currency')]\n rates_list = [float(currency.attrib.get('rate')) for currency in root.iter(cube) if currency.attrib.get('rate')]\n result = dict(zip(currencies_list, rates_list))\n result['EUR'] = float(1)\n return result, False", "def update(self):\n self.rate = self.exchange.latest()", "def get_exchange_rate_data(self, source_currency, exchanged_currency, valuation_date, provider=None, *args, **kwargs):\n raise NotImplementedError", "def lookup(self, invoice_code):\n return self.exchange_rate_btc_today[0]", "def get_ecb_rates_for_currency(currency):\n # UPDATE 2018-06-05 -- read directly from the database, and skip caching\n if currency not in SUPPORTED_CURRENCIES:\n raise CurrencyNotSupported(\"Currently we don't support %s\" % currency)\n exrates = get_latest_ecb_rates_from_db(currency)\n return (exrates['datestamp'], exrates[currency])", "def get_rate(self, t):\n return self.rates[bisect.bisect(self.change_times, t) - 1]", "def _get_currency_rate(currency):\n response = requests.get(f'{config(\"OPENEXCHANGERATES_URL\")}')\n if not response.ok:\n # log\n # can handle exception in better way later\n raise Exception(\n f'currency conversion api not working {response.text}')\n rates = response.json().get('rates')\n currency_rate = rates.get(currency.upper(), None)\n if not currency_rate:\n raise ValueError(f'Given currency conversion rate not found')\n return currency_rate", "def latest(self, base='USD'):\n try:\n resp = self.client.get(self.ENDPOINT_LATEST, params={'base': base})\n resp.raise_for_status()\n except requests.exceptions.RequestException as e:\n raise OpenExchangeRatesClientException(e)\n return resp.json(parse_int=decimal.Decimal,\n parse_float=decimal.Decimal)", "def _get_eur_gbp_last_daily(self) -> None:\n data = _get_ecb_data(FREQUENCY_DAILY, _ten_days_ago(), _today())\n\n self.eur_gbp_last_day = _get_latest_ecb_rate(data)", "def base_exchange_rate(self):\n return self._base_exchange_rate", "def api_call(cls, currency):\n headers = {\"x-accept-version\": \"2.0.0\", \"Accept\": \"application/json\"}\n r = requests.get(cls.API_URL + currency, headers=headers)\n r.raise_for_status()\n return r.json()[\"data\"][\"rate\"]", "def fetch_currency_rates(url=\"http://www.nbrb.by/API/ExRates/Rates?Periodicity=0\") -> dict:\n data = {}\n response = requests.get(url)\n if response.status_code == 200:\n data = get_json(response)\n return data", "def downloadExchangeRates(_source_currency, _track_reconnections):\n try:\n logger.info('downloadExchangeRates: Retrieving exchange rates.')\n logger.debug('downloadExchangeRates: Retrieving exchange rates for: %s' % _source_currency)\n\n exchange_rate_ = 0\n\n #download exchange rate\n got_html = getHtml(URL_CALCULATOR + '1' + _source_currency + '=?' + BASE_CURRENCY)\n\n #parse\n if got_html:\n if 'error: \"\"' in got_html:\n #parse data\n re_object = re.search(\".*rhs: \\\"(\\d\\.\\d*)\", got_html)\n\n #using float since we're not interested in high precision\n exchange_rate_ = float(re_object.group(1))\n logger.debug('downloadExchangeRates: Parsed exchange rate: %s' % exchange_rate_)\n\n else:\n #reconnect if error field not empty\n if _track_reconnections['times_reconnected'] <= MAXIMUM_RECONNECTIONS:\n logger.debug('downloadExchangeRates: Times reconnected: %s' %\n _track_reconnections['times_reconnected'])\n logger.warning('downloadExchangeRates: Server signalizes an error, repeating request.')\n\n _track_reconnections['times_reconnected'] += 1\n\n #wait for the server to allow another inquiry\n time.sleep(PAUSE_BETWEEN_RECONNECTIONS)\n\n #repeat request\n downloadExchangeRates(_source_currency, _track_reconnections)\n\n else:\n logger.error('downloadExchangeRates: Could not obtain exchange rate for: %s, returning '\n 'default value.' % _source_currency)\n\n return exchange_rate_\n\n except:\n raise", "def comprxbytesrate(self) :\n\t\ttry :\n\t\t\treturn self._comprxbytesrate\n\t\texcept Exception as e:\n\t\t\traise e", "def get_rates_for(currency: str, date: str):\n baseurl = f\"https://openexchangerates.org/api/historical/{date}.json\"\n params = {\"app_id\": OEG_APP_ID, \"symbols\": currency, \"base\": \"USD\"}\n return make_request(baseurl=baseurl, params=params)", "def latest_L2_order_book_entry(symbol: str,\n exchange: str = CRYPTO_EXCHANGE,\n rate_limit: bool = True):\n try:\n check_exchange_existence(exchange=exchange)\n response = asyncio.get_event_loop().run_until_complete(\n getOrderBookL2(symbol=symbol,\n number_of_data_points=1,\n exchange=exchange,\n rate_limit=rate_limit))\n latest_orderbook_entry_dict = {}\n latest_orderbook_entry_dict['symbol'] = symbol\n latest_orderbook_entry_dict['ask'] = response['asks'][0][0] if len(\n response['asks']) > 0 else None\n latest_orderbook_entry_dict['asksize'] = response['asks'][0][1] if len(\n response['asks']) > 0 else None\n latest_orderbook_entry_dict['bid'] = response['bids'][0][0] if len(\n response['bids']) > 0 else None\n latest_orderbook_entry_dict['bidsize'] = response['bids'][0][1] if len(\n response['bids']) > 0 else None\n latest_orderbook_entry_dict['datetime'] = response['datetime']\n latest_orderbook_entry_dict['nonce'] = response['nonce']\n return latest_orderbook_entry_dict\n except Exception as exception:\n logger.error('Oops! An error Occurred ⚠️')\n raise exception", "def get_updated_currency(self, currency_array, main_currency,\n max_delta_days):\n url = 'http://rate.bot.com.tw/xrt/flcsv/0/day'\n\n # We do not want to update the main currency\n if main_currency in currency_array:\n currency_array.remove(main_currency)\n _logger.debug(\"BOT currency rate service : connecting...\")\n try:\n url_open = urllib.request.urlopen(url)\n csvfile = csv.reader(io.StringIO(url_open.read().decode('utf-8-sig')), delimiter=',')\n url_open.close()\n except IOError:\n raise UserError(\n _('Web Service does not exist (%s)!') % url)\n\n next(csvfile)\n exchange = {}\n for row in csvfile:\n bid = float(row[3])\n ask = float(row[13])\n\n exchange[row[0]] = {\n 'bid': bid,\n 'ask': ask\n }\n\n self.check_rate_date(datetime.today(), max_delta_days)\n self.supported_currency_array = list(exchange.keys())\n\n self.supported_currency_array.append('TWD')\n _logger.debug(\"Supported currencies = %s \" %\n self.supported_currency_array)\n self.validate_cur(main_currency)\n if main_currency != 'TWD':\n main_rate = float(exchange[main_currency]['ask'])\n if main_currency in currency_array:\n currency_array.remove(main_currency)\n for curr in currency_array:\n self.validate_cur(curr)\n if curr == 'TWD':\n rate = main_rate\n else:\n if main_currency == 'TWD':\n rate = 1 / float(exchange[curr]['ask'])\n else:\n rate = main_rate / float(exchange[curr]['ask'])\n self.updated_currency[curr] = rate\n _logger.debug(\n \"Rate retrieved : 1 %s = %s %s\" % (main_currency, rate, curr)\n )\n return self.updated_currency, self.log_info", "def get_rate(parent=None):\n dialog = RateDialog(parent)\n dialog.exec_()\n rate = dialog.rate\n return rate", "def getDataRate(self):\n \n return self.DataRate", "def latest_order_book_entry(symbol: str,\n exchange: str = CRYPTO_EXCHANGE,\n rate_limit: bool = True):\n try:\n check_exchange_existence(exchange=exchange)\n response = asyncio.get_event_loop().run_until_complete(\n getOrderBook(symbol=symbol,\n number_of_data_points=1,\n exchange=exchange,\n rate_limit=rate_limit))\n latest_orderbook_entry_dict = {}\n latest_orderbook_entry_dict['symbol'] = symbol\n latest_orderbook_entry_dict['ask'] = response['asks'][0][0] if len(\n response['asks']) > 0 else None\n latest_orderbook_entry_dict['asksize'] = response['asks'][0][1] if len(\n response['asks']) > 0 else None\n latest_orderbook_entry_dict['bid'] = response['bids'][0][0] if len(\n response['bids']) > 0 else None\n latest_orderbook_entry_dict['bidsize'] = response['bids'][0][1] if len(\n response['bids']) > 0 else None\n latest_orderbook_entry_dict['datetime'] = response['datetime']\n latest_orderbook_entry_dict['nonce'] = response['nonce']\n return latest_orderbook_entry_dict\n except Exception as exception:\n logger.error('Oops! An error Occurred ⚠️')\n raise exception", "def currency_rate(self, init):\r\n\r\n curr = CurrencyRates()\r\n curr_rate = curr.get_rates(init)\r\n return curr_rate", "def get_current_rate(self):\n pass", "def test_currency_rate(self):\n currency_name = ['USD'] * 4\n rates = [3.67, 4.07, 3.04, 3.89]\n helper.currency_loop_helper(get_historical_currency_rate, TestHistoricalRates.dates_rate,\n rates, currency_name)", "def data_rate(self):\n return self._data_rate", "def poll_price_data():\n resp = requests.get(COINDESK_ENDPOINT) # Powered by CoinDesk\n if resp.status_code == 200:\n logging.info(\"GET request succeeded\")\n data = resp.json()\n data_dict = {\n \"id\": str(uuid.uuid1()),\n \"time\": data['time']['updated'],\n \"currency\": data['bpi']['USD']['code'],\n \"price\": data['bpi']['USD']['rate']\n }\n return data_dict\n else:\n logging.error(\"GET request failed\")", "def getMostRecentRelevantRate(self, currency_rates, reference_date=QDate.currentDate().toPyDate()):\n\n try:\n for date in currency_rates:\n if QDate.fromString(date, \"yyyy-MM-dd\").toPyDate() <= reference_date and currency_rates[date] != 'N/A':\n return currency_rates[date]\n except Exception as e:\n print('Could not retrieve any relevant rate')\n print(e)", "def update_currency_data_from_rss(currency: str, commit: bool = True) -> Rate:\n currency_data = get_data_for_currency(currency)\n\n try:\n current_rate = Rate.objects.get(currency=currency)\n except ObjectDoesNotExist:\n current_rate = Rate(**currency_data)\n else:\n new_update_date = currency_data[\"parser_update_date\"]\n if current_rate.parser_update_date < new_update_date:\n current_rate.parser_update_date = new_update_date\n current_rate.exchange_rate = currency_data[\"exchange_rate\"]\n current_rate.description = currency_data[\"description\"]\n\n if commit:\n current_rate.save()\n return current_rate", "def parse_exchange_rates(cls, exchange_rates):\n currency_exchange_rates = []\n for exchange in exchange_rates:\n source_currency_symbol = exchange['base']\n source_currency = cls.parse_currency(source_currency_symbol)\n for exchanged_currency_symbol in exchange['rates']:\n if source_currency_symbol != exchanged_currency_symbol:\n exchanged_currency = cls.parse_currency(exchanged_currency_symbol)\n cur_exchange_rate = CurrencyExchangeRate(source_currency=source_currency,\n exchanged_currency=exchanged_currency,\n valuation_date=cls.parse_date(exchange['date']),\n rate_value=exchange['rates'][exchanged_currency_symbol])\n currency_exchange_rates.append(cur_exchange_rate)\n return currency_exchange_rates", "def get_inbound_statement_details_exchange_rate(self):\n return self.get_text_from_element(self.inbound_statements_details_exchange_rate_locator, True)", "def getprice():\n\n print(\"Get price\")\n latest_price = get_latest_price(item_code)\n return latest_price", "def _do_get_rate(self):\n rate = {\n 1: \"1 : Helium Probe in FAST rate\",\n 0: \"0 : Helium Probe in SLOW rate\"\n }\n result = self._execute('X')\n return rate.get(int(format(int(result[5:7]), '08b')[6]), \"Unknown\")", "def test_foreign_exchange_python2(self, mock_urlopen):\n fe = ForeignExchange(key=TestAlphaVantage._API_KEY_TEST)\n url = \"https://www.alphavantage.co/query?function=CURRENCY_EXCHANGE_RATE&from_currency=BTC&to_currency=CNY&apikey=test\"\n path_file = self.get_file_from_url(url)\n with open(path_file) as f:\n mock_urlopen.return_value = f\n data, _ = fe.get_currency_exchange_rate(\n from_currency='BTC', to_currency='CNY')\n self.assertIsInstance(\n data, dict, 'Result Data must be a dictionary')", "def get_ether_current_prices():\n req = requests.get('https://min-api.cryptocompare.com/data/price?fsym=ETH&tsyms=BTC,USD,EUR')\n data = req.json()\n\n print('{0}, {1}, {2}'.format(data['EUR'], data['USD'], data['BTC']))", "def get(self, as_of_date: str = None):\n if not as_of_date:\n as_of_date = (datetime.now() - timedelta(days=1)).strftime(\"%Y-%m-%d\")\n\n ExchangeRates.GET_EXCHANGE_RATES = ExchangeRates.GET_EXCHANGE_RATES.format(as_of_date, '{0}')\n return self._query_get_all('ExchangeRate', ExchangeRates.GET_EXCHANGE_RATES)", "def get_by_source(self, source_currency_code: str):\n as_of_date = (datetime.now() - timedelta(days=1)).strftime(\"%Y-%m-%d\")\n return self._get_request(\n 'ExchangeRate', self.GET_EXCHANGE_RATES_BY_SOURCE.format(source_currency_code, as_of_date))", "def update_exchange_rates():\n try:\n from djmoney.contrib.exchange.models import Rate\n\n from common.settings import currency_code_default, currency_codes\n from InvenTree.exchange import InvenTreeExchange\n except AppRegistryNotReady: # pragma: no cover\n # Apps not yet loaded!\n logger.info(\"Could not perform 'update_exchange_rates' - App registry not ready\")\n return\n except Exception: # pragma: no cover\n # Other error?\n return\n\n backend = InvenTreeExchange()\n base = currency_code_default()\n logger.info(f\"Updating exchange rates using base currency '{base}'\")\n\n try:\n backend.update_rates(base_currency=base)\n\n # Remove any exchange rates which are not in the provided currencies\n Rate.objects.filter(backend=\"InvenTreeExchange\").exclude(currency__in=currency_codes()).delete()\n except OperationalError:\n logger.warning(\"Could not update exchange rates - database not ready\")\n except Exception as e: # pragma: no cover\n logger.error(f\"Error updating exchange rates: {e} ({type(e)})\")", "def comptcprxbytesrate(self) :\n\t\ttry :\n\t\t\treturn self._comptcprxbytesrate\n\t\texcept Exception as e:\n\t\t\traise e", "def get_price(horizon_host, pair):\n print \"fetching latest price for:\" + pair[\"name\"]\n params = make_trade_params(pair)\n res = requests.get(horizon_host + \"/trades\", params).json()\n try:\n trade_record = res[\"_embedded\"][\"records\"][0]\n except IndexError:\n return DatedPrice(date=datetime.utcfromtimestamp(0), price=0)\n price = float(trade_record[\"price\"][\"n\"]) / float(trade_record[\"price\"][\"d\"])\n timestamp = parser.parse(trade_record[\"ledger_close_time\"])\n return DatedPrice(date=timestamp, price=price)", "def getDecayRate(self, channel, unitCode=0):\n res = self.XAPCommand(\"DECAY\", channel, unitCode=unitCode)\n return int(res)", "def highest_bid(self):\n (price_eur, volume, _) = self._order_book['bids'][0]\n price_usd = Decimal(price_eur) * self._eurusd_rate\n return {\n 'price': Decimal(price_usd),\n 'volume': Decimal(volume),\n }", "def read(self):\n beats, interval_ms = self.read_raw()\n if 0 < interval_ms < 2500:\n rate = 60000.0 / interval_ms\n else:\n raise RuntimeError(\"Value out of range or device not connected.\")\n return rate", "def getCurrentPrice(self,primary,secondary):\n pair = self.getTradedPair(primary,secondary)\n uri = \"https://www.bitstamp.net/api/v2/ticker/\"\n requestUrl = uri + pair\n jsonResponse = self.getJson(requestUrl)\n currentPrice = jsonResponse[\"last\"]\n return currentPrice", "def comprequestsrate(self) :\n\t\ttry :\n\t\t\treturn self._comprequestsrate\n\t\texcept Exception as e:\n\t\t\traise e", "def get_last_rates(limit: int):\n conn = sqlite3.connect(CONF.database, detect_types=sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES)\n curs = conn.cursor()\n try:\n return curs.execute(\"SELECT price FROM rates ORDER BY date_time DESC LIMIT {}\".format(limit)).fetchall()\n finally:\n curs.close()\n conn.close()", "def getCurrentPrice(self,primary,secondary):\n pair = self.getTradedPair(primary,secondary)\n uri = \"https://bittrex.com/api/v1.1/public/getticker?market=\"+pair\n jsonResponse = self.getJson(uri)\n currentPrice = jsonResponse[\"result\"][\"Last\"]\n return currentPrice", "def _generate_currency_rates(self, parsed_data):\n\n for line in self:\n rate_info = parsed_data.get(line.move_id.currency_id.name, None)\n\n if not rate_info:\n raise UserError(_(\"Your main currency (%s) is not supported by this exchange rate provider. Please choose another one.\", company.currency_id.name))\n\n base_currency_rate = rate_info[0]\n\n for currency, (rate, date_rate) in parsed_data.items():\n rate_value = rate/base_currency_rate\n if currency == line.user_currency_id.name:\n line.exchange_rate = rate_value\n line.price_unit = rate_value * line.user_amount", "def _get_eur_gbp_last_month(self) -> None:\n last_month = _last_month()\n data = _get_ecb_data(FREQUENCY_MONTHLY, last_month, last_month)\n\n self.eur_gbp_last_month = _get_latest_ecb_rate(data)", "def getRate(self, context):\n try:\n return VTypeHelper.toDouble(context.getDevice(\"rate\").read())\n except:\n return 60.0", "def historical(self, date, base='USD'):\n try:\n resp = self.client.get(self.ENDPOINT_HISTORICAL %\n date.strftime(\"%Y-%m-%d\"),\n params={'base': base})\n resp.raise_for_status()\n except requests.exceptions.RequestException as e:\n raise OpenExchangeRatesClientException(e)\n return resp.json(parse_int=decimal.Decimal,\n parse_float=decimal.Decimal)", "async def fetch_funding_rate(self, symbol: str, params={}):\n return self.fetch_funding_rates([symbol], params)", "def currencies(self):\n try:\n resp = self.client.get(self.ENDPOINT_CURRENCIES)\n except requests.exceptions.RequestException as e:\n raise OpenExchangeRatesClientException(e)\n\n return resp.json()", "def rate(self):\n return self._rate", "def get_exchange_rate(self, order_type, amount=None, price=None, pair=\"btc_jpy\"):\n if order_type not in [\"sell\", \"buy\"]:\n raise CoinCheckApiException(\"order_type should be \\\"sell\\\" or \\\"buy\\\"\")\n if amount is not None and price is not None:\n raise CoinCheckApiException(\"only one of \\\"amount\\\" and \\\"price\\\" can be provided \")\n\n query = \"order_type={}&pair={}\".format(order_type, pair)\n if amount is not None:\n query += \"&amount={}\".format(amount)\n else:\n query += \"&price={}\".format(price)\n\n return self.execute_http_call(\"/api/exchange/orders/rate?{}\".format(query), \"GET\", headers=None)", "def URDBv7_to_ElectricityRates(urdb_response):\n warnings.warn(\"ResourceTools.URDBv7_to_ElectricityRates is deprecated. Please use UtilityRateTools.URDBv8_to_ElectricityRates instead.\", DeprecationWarning)\n\n urdb_data = dict()\n urdb_data['en_electricity_rates'] = 1\n\n def try_get_schedule(urdb_name, data_name):\n if urdb_name in urdb_response.keys():\n urdb_data[data_name] = urdb_response[urdb_name]\n for i in range(12):\n for j in range(24):\n urdb_data[data_name][i][j] += 1\n\n def try_get_rate_structure(urdb_name, data_name):\n mat = []\n supported_units = {\n \"kwh\" : 0,\n \"kwh/kw\" : 1,\n \"kwh daily\" : 2,\n \"kwh/kw daily\" : 3\n }\n if urdb_name in urdb_response.keys():\n structure = urdb_response[urdb_name]\n for i, period in enumerate(structure):\n for j, entry in enumerate(period):\n rate = entry['rate']\n if 'adj' in entry.keys():\n rate += entry['adj']\n tier_max = 1e38\n if 'max' in entry.keys():\n tier_max = entry['max']\n sell = 0\n if 'sell' in entry.keys():\n sell = entry['sell']\n units = 0\n if 'unit' in entry.keys():\n try:\n units = supported_units[entry['unit'].lower()]\n except KeyError:\n raise RuntimeError(\"UtilityRateDatabase error: unrecognized unit in rate structure\")\n mat.append((i + 1, j + 1, tier_max, units, rate, sell))\n urdb_data[data_name] = mat\n\n def try_get_demand_structure(urdb_name, data_name):\n mat = []\n if urdb_name in urdb_response.keys():\n structure = urdb_response[urdb_name]\n for i, period in enumerate(structure):\n for j, entry in enumerate(period):\n rate = entry['rate']\n if 'adj' in entry.keys():\n rate += entry['adj']\n tier_max = 1e38\n if 'max' in entry.keys():\n tier_max = entry['max']\n if 'unit' in entry.keys():\n if entry['unit'].lower() != \"kW\".lower():\n raise RuntimeError(\"UtilityRateDatabase error: unrecognized unit in rate structure\")\n mat.append((i + 1, j + 1, tier_max, rate))\n if data_name:\n urdb_data[data_name] = mat\n else:\n return mat\n\n if \"dgrules\" in urdb_response.keys():\n rules = urdb_response['dgrules'] # dgrules\n if rules == \"Net Metering\":\n urdb_data['ur_metering_option'] = 0\n elif rules == \"Net Billing Instantaneous\":\n urdb_data['ur_metering_option'] = 2\n elif rules == \"Net Billing Hourly\":\n urdb_data['ur_metering_option'] = 3\n elif rules == \"Buy All Sell All\":\n urdb_data['ur_metering_option'] = 4\n else:\n # if no metering option provided, assume Net Metering\n urdb_data['ur_metering_option'] = 0\n\n if 'fixedchargefirstmeter' in urdb_response.keys() and 'fixedchargeunits' in urdb_response.keys():\n fixed_charge = urdb_response['fixedchargefirstmeter']\n fixed_charge_units = urdb_response['fixedchargeunits']\n if fixed_charge_units == \"$/day\":\n fixed_charge *= 365 / 12\n elif fixed_charge_units == \"$/year\":\n fixed_charge /= 12\n urdb_data['ur_monthly_fixed_charge'] = fixed_charge\n\n if 'mincharge' in urdb_response.keys():\n min_charge = urdb_response['mincharge']\n min_charge_units = urdb_response['minchargeunits']\n if min_charge_units == \"$/year\":\n urdb_data['ur_annual_min_charge'] = min_charge\n else:\n if min_charge_units == \"$/day\":\n min_charge *= 365 / 12\n urdb_data['ur_monthly_min_charge'] = min_charge\n\n try_get_schedule('energyweekdayschedule', 'ur_ec_sched_weekday')\n try_get_schedule('energyweekendschedule', 'ur_ec_sched_weekend')\n try_get_rate_structure('energyratestructure', 'ur_ec_tou_mat')\n\n try_get_demand_structure('demandratestructure', 'ur_dc_tou_mat')\n try_get_schedule('demandweekdayschedule', 'ur_dc_sched_weekday')\n try_get_schedule('demandweekendschedule', 'ur_dc_sched_weekend')\n\n flat_demand_structure = try_get_demand_structure('flatdemandstructure', None)\n\n if 'flatdemandmonths' in urdb_response.keys():\n urdb_data['ur_dc_enable'] = 1\n flat_mat = []\n flat_demand = urdb_response['flatdemandmonths']\n for month, period in enumerate(flat_demand):\n tiers = []\n for r in flat_demand_structure:\n if r[0] == int(period + 1):\n tiers.append(r)\n \n if len(tiers) == 0:\n raise ValueError(\"flatdemandstructure missing period number \", period)\n for t in tiers:\n month_row = []\n month_row.append(month)\n month_row += [t[i] for i in (1, 2, 3)]\n flat_mat.append(month_row)\n urdb_data['ur_dc_flat_mat'] = flat_mat\n # Fill out an empty flat rate structure if the rate has TOU demand but not flat demand \n elif \"demandratestructure\" in urdb_response.keys():\n urdb_data['ur_dc_enable'] = 1\n # Enumerate a dc_flat table with $0/kW in 12 months\n flat_mat = []\n for i in range(0, 12):\n month_mat = [i, 1, 1e38, 0]\n flat_mat.append(month_mat)\n urdb_data['ur_dc_flat_mat'] = flat_mat\n else:\n urdb_data['ur_dc_enable'] = 0\n\n if urdb_data['ur_dc_enable'] == 1 and \"ur_dc_tou_mat\" not in urdb_data.keys():\n urdb_data['ur_dc_tou_mat'] = [[1, 1, 1e38, 0], ]\n urdb_data['ur_dc_sched_weekday'] = [[1] * 24 for i in range(12)]\n urdb_data['ur_dc_sched_weekend'] = urdb_data['ur_dc_sched_weekday']\n\n return urdb_data", "def LoadRateValue(self):\n\t\treturn self._get_attribute('loadRateValue')", "def currency_prices_last_year(currency = 'ETH', to = 'EUR'):\n\n currencies = 'fsym={0}&tsym={1}'.format(currency, to)\n\n try:\n req = requests.get( 'https://min-api.cryptocompare.com/data/histoday?'\n + currencies\n + '&limit=365&aggregate=1&e=CCCAGG')\n\n result = req.json()\n\n list = [float(day['close']) for day in result['Data']]\n except ConnectionError:\n print('Could not connect to \"min-api.cryptocompare.com\"')\n list = []\n\n return list", "def GetOpsRates():\n return GetDataFromCsvFile('ops_rates.csv')", "def get_price(data):\n return data[\"summaryDetail\"][\"regularMarketPreviousClose\"][\"raw\"]", "def _get_ecb_data(frequency: str, start: str, end: str) -> bytes:\n content = bytearray()\n\n query_url = urljoin(ECB_DATA_API, ECB_EXR_GBP_EUR.format(frequency))\n query_url = urljoin(query_url, ECB_QUERY_PARAMS.format(start, end))\n\n with requests.get(query_url) as response:\n response.raise_for_status()\n # The data we're requesting is relatively small so we can just read it\n # one chunk; to do that we'll set the chunk size to something bigger\n # than the data we're reading. Based on some trial and error, it looks\n # like 3 KBytes is the right number.\n for chunk in response.iter_content(chunk_size=1024 * 3): # 3 KBytes\n content.extend(chunk)\n\n return bytes(content)", "def get_price_history_lookback(access_token,ticker,periodType,period,frequencyType,frequency):\r\n \r\n price_url = 'https://api.tdameritrade.com/v1/marketdata/{}/pricehistory'.format(ticker)\r\n\r\n #The header for getting a quote needs to define the input type (json)\r\n headers = {'Authorization':'Bearer {}'.format(access_token),\r\n 'Content-Type':'application/json'}\r\n\r\n #Parameters for period of time and frequency of data to get\r\n params = {'periodType':periodType,\r\n 'period': period,\r\n 'frequencyType': frequencyType,\r\n 'frequency': frequency}\r\n \r\n #Make the get request to TD Ameritrade\r\n price_history_json = requests.get(url=price_url,headers=headers,params=params)\r\n return price_history_json.json()", "def get_coin_price(asset, time=None):\n url = 'https://rest.coinapi.io/v1/exchangerate/{}/USD'.format(asset)\n if time is not None:\n url = url + '?time={}'.format(time)\n headers = {'X-CoinAPI-Key': os.environ.get('COIN_API_KEY', '')}\n r = requests.get(url, headers=headers)\n if r.status_code / 100 == 2:\n price = {\"price\": r.json()['rate']}\n return price\n else:\n return {\"error\": r.content.decode('utf-8')}", "def latest(\n self,\n curve,\n date=None):\n # Build URL\n safe_curve = self._urlencode_curve_name(curve, curve_types=CURVE_TYPES)\n url = f\"/ohlc/{safe_curve}/latest/\"\n # Parameters\n params = {}\n self._add_date(params, \"date\", date)\n # HTTP request\n response = self._get(url, params=params)\n return parse_ohlc_response(response.json())", "def list(self, request, *args, **kwargs):\n data = self.process_query_params()\n if data:\n self.currency_client.get_exchange_rates_by_date_range(**data)\n return super().list(request, *args, **kwargs)", "def getChange(coin,interval):\n change = 'https://api.coinmarketcap.com/v1/ticker/' + coin\n json = requests.get(change).json()\n value = json[0]['percent_change_' + str(interval)]\n return value", "def test_get_historical_gold_rate(self):\n rates = [153.50, 162.49, 123.86, 155.10]\n helper.gold_loop_helper(get_historical_gold_rate, TestHistoricalRates.dates_rate, rates)", "def getCurrentPrice(self,primary,secondary):\n pair = self.getTradedPair(primary,secondary)\n uri = \"https://www.bitfinex.com/v2/ticker/t\"\n requestUrl = uri + pair\n jsonResponse = self.getJson(requestUrl)\n currentPrice = jsonResponse[0]\n return currentPrice", "def comptxbytesrate(self) :\n\t\ttry :\n\t\t\treturn self._comptxbytesrate\n\t\texcept Exception as e:\n\t\t\traise e", "def test_foreign_exchange_python3(self, mock_urlopen):\n fe = ForeignExchange(key=TestAlphaVantage._API_KEY_TEST)\n url = \"https://www.alphavantage.co/query?function=CURRENCY_EXCHANGE_RATE&from_currency=BTC&to_currency=CNY&apikey=test\"\n path_file = self.get_file_from_url(url)\n with open(path_file) as f:\n mock_urlopen.return_value = f\n data, _ = fe.get_currency_exchange_rate(\n from_currency='BTC', to_currency='CNY')\n self.assertIsInstance(\n data, dict, 'Result Data must be a dictionary')", "def acquire_rates_data(self):\n prinf('%s params: %s', self.base_url, self.my_params)\n g_start()\n try:\n self.response_data = requests.get(self.base_url, params=self.my_params, timeout=self.timeout)\n except OSError:\n prinw('%s host not available', self.name)\n return False\n g_end('request responded')\n\n if not self.response_data:\n return False\n else:\n status_code = self.response_data.status_code\n prinf(status_code )\n if status_code > 400 :\n prinw('%s currency converter site response not found. %s', self.nam, status_code)\n return False\n elif status_code == 200:\n prinf('%s response ok', self.name)\n\n self.update_rates_valid_data()\n self.in_ccode = self.response_data.json()[self.strs[jpn.key_in_ccode]]\n\n self.rates = self.response_data.json()[self.strs[jpn.key_output]]\n\n # as requested ccode is not in the request respond\n # we add it => e.g 1 EUR = 1 EUR => needed for further pandas extrapolation\n self.rates.update({self.in_ccode: float(1)})\n return True", "def past_record(self):\r\n data = pd.read_csv(\"MonthlyRate.csv\")\r\n code = data[\"CurrencyCode\"]\r\n position = 0\r\n for x in code:\r\n # Get the row of the currency choice\r\n if x == self.choice:\r\n value = data.iloc[position]\r\n else:\r\n position += 1\r\n # Get the record from column 2 onward and reversed the order\r\n record = list(value[2:])[::-1]\r\n return record", "def conversion_rate(self, init, new_currency):\r\n\r\n curr = CurrencyRates()\r\n curr_conv_rate = curr.get_rate(init, new_currency)\r\n return curr_conv_rate", "def Get_Vital_Heart_Rate(raw_data,\n heart_rate_startpos,\n heart_rate_endpos):\n heart_rate_ = raw_data[heart_rate_startpos:heart_rate_endpos]\n print(f'| raw_heart_rate = {heart_rate_}')\n return Convert_Hex_To_Decimal(heart_rate_)", "def get_current_price(self):\n URL = config.coin['price_hist_url'] + self.ticker.lower()\n try:\n r = requests.get(URL)\n data = json.loads(r.text)\n value = data['last']\n timestamp = data['timestamp']\n self.current_price = value\n self.current_datetime = timestamp\n except Exception as err:\n logger.error(err)", "def get_data_from_exchange(self, now):\n currency_options = dict(\n currency_pair='USD',\n bid={12.00 : {'guy_1' : 100.00}},\n ask={14.00 : {'guy_2' : 200.00}},\n time=datetime.datetime.now()\n )\n currency_pair_state = CurrencyPairState(**currency_options)\n return [currency_pair_state]", "def bitcoinaverage(site):\n url = \"https://apiv2.bitcoinaverage.com/frontend/constants/exchangerates/local\"\n try:\n session = requests.Session()\n cfscrape_requests = cfscrape.create_scraper(sess=session)\n ret = cfscrape_requests.get(url, timeout=(15, 15)).json()[\"rates\"]\n data = {\"USD:\" + key: float(val[\"rate\"]) for key, val in ret.items()}\n data = refine_data(data)\n print(site, data)\n race_write(f\"{site}_forex.txt\", json_dumps(data))\n except:\n print(f\"{site} failed to load\")", "def get_rates(data, course):\n\n return sorted([(item['name'], item['rate']) for item in data if item['course'] == course],\n key=lambda x: x[1], reverse=True)", "def get_last_price(args):\n\tmarket = get_market(args)\n\trequest = api.get_ticker(market)\n\tif not request['message']:\n\t\tlast = str(request['result']['Last'])\n\t\treturn (last)\n\telse:\n\t\tprint(request['message'])\n\t\tsys.exit(0)", "def rate(self):\n return self.brate / FAC", "def flyer_rate(self, obj):\n return currency(calculate_current_price(1, obj,\n obj.get_or_set_consumer_count()))", "def latest_price_info(symbol: str,\n exchange: str = CRYPTO_EXCHANGE,\n rate_limit: bool = True) -> dict:\n try:\n check_exchange_existence(exchange=exchange)\n return asyncio.get_event_loop().run_until_complete(\n latestPriceInfo(symbol=symbol,\n exchange=exchange,\n rate_limit=rate_limit))\n except Exception as exception:\n logger.error('Oops! An error Occurred ⚠️')\n raise exception", "def new_get_historical_price(base, target, date):\n if base == \"BTC\" and target == \"EUR\":\n return {\"BTC\": {\"EUR\": 10000}}\n elif base == \"EUR\" and target == \"BTC\":\n return {\"EUR\": {\"BTC\": 0.00012}}\n elif base == \"LTC\" and target == \"BTC\":\n return {\"LTC\": {\"BTC\": 0.02}}\n elif base == \"LTC\" and target == \"EUR\":\n return {\"LTC\": {\"EUR\": 250}}", "def get_exchange_reward_per_euro(model):\n exchanged_euros = get_exchanged_euros(model)\n total_euros = get_total_euros(model) \n total_teos = get_total_teos(model)\n exchange_pool = (total_euros - total_teos)*model.buffer_share*model.exchange_reward_share\n if exchanged_euros == 0 or exchange_pool <= 0:\n return 0\n exchange_reward_per_euro = exchange_pool / exchanged_euros \n return round(float(exchange_reward_per_euro),4)", "def rate(self):\n return self.__rate", "def getActiveCurrency():", "def getActiveCurrencies():", "def getRatesInRange(self, currency_rates):\n\n rates = list()\n try:\n date = self.from_date\n while date <= self.to_date:\n rates.append(float(self.getMostRecentRelevantRate(currency_rates, date)))\n date += timedelta(days=1)\n except Exception as e:\n print('Could not retrieve rates')\n print(e)\n\n rates.reverse()\n return rates", "def getCurrencies():", "def get_sale_rate(self, pair):\n response = self.execute_http_call(\"/api/rate/{}\".format(pair), \"GET\")\n return float(response[\"rate\"])", "def getCurrentPrice(self,primary,secondary):\n pair = self.getTradedPair(primary,secondary)\n jsonResponse = self.getJson(\"https://poloniex.com/public?command=returnTicker\")\n currentPrice = jsonResponse[pair][\"last\"]\n return currentPrice", "def fetch_price():\n\n url = \"https://www.bitstamp.net/api/ticker/\"\n\n response = json.load(urllib2.urlopen(url))\n\n return {\"buy\": response['ask'], \"sell\": response['bid']}", "def decomptcprxbytesrate(self) :\n\t\ttry :\n\t\t\treturn self._decomptcprxbytesrate\n\t\texcept Exception as e:\n\t\t\traise e" ]
[ "0.7017314", "0.6967795", "0.6684886", "0.6548335", "0.65449774", "0.65055674", "0.6461167", "0.6431432", "0.64266616", "0.6394351", "0.62385744", "0.62312907", "0.62306887", "0.62064004", "0.61375725", "0.6130839", "0.6130635", "0.6115328", "0.60870564", "0.6060977", "0.59144956", "0.58851856", "0.58674395", "0.583746", "0.58213174", "0.5775519", "0.5773551", "0.5763365", "0.57522315", "0.5652959", "0.56494427", "0.56166506", "0.55552846", "0.55544466", "0.55512977", "0.5546707", "0.5509468", "0.549989", "0.5493659", "0.5492984", "0.547166", "0.5470837", "0.546572", "0.54571164", "0.5457052", "0.54542446", "0.5442896", "0.5430026", "0.5428383", "0.54167753", "0.5398226", "0.5388148", "0.53409404", "0.5334869", "0.53313214", "0.5312802", "0.5309239", "0.53064656", "0.5305144", "0.5292491", "0.52886957", "0.52828366", "0.52728987", "0.5269789", "0.52693856", "0.5268895", "0.5263712", "0.52630293", "0.52592796", "0.52570397", "0.5255318", "0.52508223", "0.5239751", "0.52388054", "0.52380925", "0.5236658", "0.5235358", "0.52348554", "0.52343667", "0.52323884", "0.52314055", "0.5220501", "0.52171326", "0.5211181", "0.52072495", "0.520669", "0.5206128", "0.5199351", "0.51680595", "0.5164669", "0.51637304", "0.5157489", "0.51364005", "0.5134887", "0.5111754", "0.51108587", "0.5106131", "0.51057684", "0.5105238", "0.5100861" ]
0.759938
0
Retrieve and store the 15min delayed BTC market price in EUR.
def _get_btc_eur_15min(self) -> None: with requests.get(BITCOIN_TICKER) as response: response.raise_for_status() json_data = response.json() self.btc_eur_15min = json_data["EUR"]["15m"]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getprice():\n\n print(\"Get price\")\n latest_price = get_latest_price(item_code)\n return latest_price", "def do_work(self) -> None:\n self._get_btc_eur_15min()\n print(\n f\"1 BTC = {self.btc_eur_15min} EUR\"\n f\"\\t\\t(15min delayed market price)\"\n )\n\n self._get_eur_gbp_last_month()\n print(\n f\"1 EUR = {self.eur_gbp_last_month} GBP\"\n f\"\\t(last month average rate)\"\n )\n\n self._get_btc_gbp_15min()\n print(\n f\"1 BTC = {self.btc_gbp_15min:.6f} GBP\"\n f\"\\t(BTC 15min delayed market price; GBP latest daily average rate)\"\n )", "def BuyingPrice(self):\n return self.buying_rice", "def _get_btc_gbp_15min(self) -> None:\n self._get_eur_gbp_last_daily()\n\n self.btc_gbp_15min = self.btc_eur_15min * self.eur_gbp_last_day", "def track_price():\n r = requests.get('https://finance.yahoo.com/quote/EURPLN=X?p=EURPLN%3DX&.tsrc=fin-srch&guce_referrer'\n '=aHR0cHM6Ly9maW5hbmNlLnlhaG9vLmNvbS8_Z3VjZV9yZWZlcnJlcj1hSFIwY0hNNkx5OTNkM2N1WjI5d'\n 'loyeGxMbU52YlM4Jmd1Y2VfcmVmZXJyZXJfc2lnPUFRQUFBRG1vS3ROMkF5bzFpTDRpd29Td0Z4Z0NDTVN'\n 'XU3M0UkNoa2pBcGl2NmxobmxJcWRab0JIWUF6NVJuNHlZdkN1WTRBNEdwVTRfWjBZQ3JNM1RwX2ZMd05rej'\n 'g0TkVWdksyUzA3LVNmNXdndUJCUjhieG5sZEN4dGRCRmV6eEZfMnNQdEpQeXJ6UzREeV9WRUF4ZXNUMXNLYz'\n 'lnTm1pSlFCV3R6LVpLX0hvc2p5Jl9ndWNfY29uc2Vud'\n 'F9za2lwPTE1OTcwODc3MTg&guce_referrer_sig=AQAAAKzjjM2--Diw1M3gykrGHjIn9NdqSch_odxmo6xqtgD4pNo'\n 'anrEQBgPoZ9xkh8HPYFN1_9mpio4Fg2tEGa4GrsK69bHe4yN9LactTwdKEuBxazZPO751TNSeFH_lltkNoN1k7D6I978v'\n '1eXB9WaCp0NUgbRZRmbYEdoZmkmQvUq7&_guc_consent_skip=1597087949')\n if r.status_code != 200:\n raise ConnectionError\n else:\n soup = BeautifulSoup(r.text, 'html.parser')\n price_elem = soup.find('span', {\"class\": \"Trsdu(0.3s) Fw(b) Fz(36px) Mb(-4px) D(ib)\"})\n return float(price_elem.text)", "def price(temp):\n now = datetime.datetime.now()\n r = requests.get(\"https://bitcoin.co.th/\")\n soup = BeautifulSoup(r.content, \"html.parser\")\n data = soup.find_all(\"div\", {\"class\": \"price\"})\n print(\"[%02i:%02i:%02i] Now BTC Price : \" % (now.hour, now.minute, now.second), end=\"\")\n for i in range(len(data)):\n price = (data[i].text)\n print(price)\n if price != temp: # Price Change\n line_sent(price)\n temp = price\n time.sleep(30) # Delay 30 second\n main(temp) # call function main for loop", "def getCurrentPrice(self,primary,secondary):\n pair = self.getTradedPair(primary,secondary)\n uri = \"https://bittrex.com/api/v1.1/public/getticker?market=\"+pair\n jsonResponse = self.getJson(uri)\n currentPrice = jsonResponse[\"result\"][\"Last\"]\n return currentPrice", "def getCurrentPrice(self,primary,secondary):\n pair = self.getTradedPair(primary,secondary)\n uri = \"https://www.bitfinex.com/v2/ticker/t\"\n requestUrl = uri + pair\n jsonResponse = self.getJson(requestUrl)\n currentPrice = jsonResponse[0]\n return currentPrice", "def getCurrentPrice(self,primary,secondary):\n pair = self.getTradedPair(primary,secondary)\n jsonResponse = self.getJson(\"https://poloniex.com/public?command=returnTicker\")\n currentPrice = jsonResponse[pair][\"last\"]\n return currentPrice", "def getCurrentPrice(self,primary,secondary):\n pair = self.getTradedPair(primary,secondary)\n uri = \"https://www.bitstamp.net/api/v2/ticker/\"\n requestUrl = uri + pair\n jsonResponse = self.getJson(requestUrl)\n currentPrice = jsonResponse[\"last\"]\n return currentPrice", "def get_current_price(self):\n URL = config.coin['price_hist_url'] + self.ticker.lower()\n try:\n r = requests.get(URL)\n data = json.loads(r.text)\n value = data['last']\n timestamp = data['timestamp']\n self.current_price = value\n self.current_datetime = timestamp\n except Exception as err:\n logger.error(err)", "def updateLastPrice(self):\n dt = datetime.now(tz=pytz.UTC).replace(microsecond=0)\n\n dt_central = dt.astimezone(\n pytz.timezone('US/Central')).strftime(\"%H:%M\")\n\n # UPDATE POSITION LAST PRICE AND UPDATE HIGH PRICE\n open_positions = self.open_positions.find(\n {\"Trader\": self.user[\"Name\"], \"Asset_Type\": self.asset_type, \"Account_ID\": self.account_id})\n\n open_positions_list = []\n\n for position in open_positions:\n\n symbol = position[\"Symbol\"]\n\n if symbol not in open_positions_list:\n\n open_positions_list.append(symbol)\n\n if len(open_positions_list) > 0:\n\n resp = self.tdameritrade.getQuotes(open_positions_list)\n\n if resp:\n\n for key, value in resp.items():\n\n symbol = key\n\n last_price = value[\"lastPrice\"]\n\n self.open_positions.update_many({\"Trader\": self.user[\"Name\"], \"Symbol\": symbol, \"Asset_Type\": self.asset_type, \"Account_ID\": self.account_id}, {\n \"$set\": {\"Last_Price\": last_price}})\n\n if dt_central == \"15:00\":\n\n self.open_positions.update_many({\"Trader\": self.user[\"Name\"], \"Symbol\": symbol, \"Asset_Type\": self.asset_type, \"Account_ID\": self.account_id}, {\n \"$set\": {\"Opening_Price\": last_price}})\n\n # UPDATE QUEUE LAST PRICE\n queues = self.queue.find(\n {\"Trader\": self.user[\"Name\"], \"Asset_Type\": self.asset_type})\n\n queues_list = []\n\n for queue in queues:\n\n if self.asset_type == \"EQUITY\":\n\n symbol = queue[\"Symbol\"]\n\n elif self.asset_type == \"OPTION\":\n\n symbol = queue[\"Pre_Symbol\"]\n\n if symbol not in queues_list:\n\n queues_list.append(symbol)\n\n if len(queues_list) > 0:\n\n resp = self.tdameritrade.getQuotes(queues_list)\n\n for key, value in resp.items():\n\n symbol = key\n\n last_price = value[\"lastPrice\"]\n\n if self.asset_type == \"EQUITY\":\n\n self.queue.update_many({\"Trader\": self.user[\"Name\"], \"Symbol\": symbol, \"Asset_Type\": self.asset_type, \"Account_ID\": self.account_id}, {\n \"$set\": {\"Last_Price\": last_price}})\n\n elif self.asset_type == \"OPTION\":\n\n self.queue.update_many({\"Trader\": self.user[\"Name\"], \"Pre_Symbol\": symbol, \"Asset_Type\": self.asset_type, \"Account_ID\": self.account_id}, {\n \"$set\": {\"Last_Price\": last_price}})", "def getPrice(coin,cur):\n price = 'https://api.coinmarketcap.com/v1/ticker/' + coin\n json = requests.get(price).json()\n value = json[0]['price_' + str(cur)]\n return value", "def price(self) -> float:\n return self.close", "def get_price():\n return uniform(1.0, 350.0)", "def buy_and_pay(self):\n return self.price", "def buying_price(self):\n buy_price = self.standard_init_price()\n # Special status and resources price adaptation\n if self.planet.status in [self.tradeitem.dps]:\n buy_price = (buy_price * 5) / 3\n\n elif self.planet.special in [self.tradeitem.cr]:\n buy_price = (buy_price * 3) / 4\n\n elif self.planet.special in [self.tradeitem.er]:\n buy_price = (buy_price * 4) / 3\n\n # randomize a bit\n moins = random.randrange(self.tradeitem.var)\n plus = random.randrange(self.tradeitem.var)\n buy_price = buy_price - moins + plus\n\n # price can't be negative\n if buy_price < 0:\n buy_price = 0\n\n return int(buy_price)", "def purchase_price(self):\n if self.sold_on is None:\n return 0.0 # Not yet sold\n return 10000 - (.10 * self.miles)", "def compute_time_price(supplier_with_transaction):\n supplier_item = supplier_with_transaction.get('supplier_detail')\n transaction_item = supplier_with_transaction.get('supplier_transaction')\n # Check if there is time prices or not\n if supplier_with_transaction.get('time_price'):\n # Check if we will compute in complex or simple\n if not supplier_item.get('has_complex_minute_price'):\n # start to calculate the simple version for time price\n charging_start = transaction_item.get('charging_start')\n charging_end = transaction_item.get('charging_end')\n if charging_start and charging_end:\n charging_start_obj = datetime.strptime(charging_start, '%Y-%m-%dT%H:%M:%S')\n charging_end_obj = datetime.strptime(charging_end, '%Y-%m-%dT%H:%M:%S')\n duration_in_minutes = (charging_end_obj - charging_start_obj).total_seconds() / 60\n # Check for min duration\n if supplier_item.get('min_duration') and duration_in_minutes < supplier_item.get('min_duration'):\n duration_in_minutes = supplier_item.get('min_duration')\n price = supplier_item.get('simple_minute_price')\n total_price = price * duration_in_minutes\n return total_price\n else:\n # start calculate the complex version for time price\n total_price = 0\n if supplier_item.get('interval') == 'start':\n for start_rec in supplier_item.get('time_price'):\n timeframe = start_rec.get('billing_each_timeframe') * 60\n if start_rec.get('hour_from', 0) > start_rec.get('hour_to', 0):\n duration = (start_rec.get('hour_to') - start_rec.get('hour_from')) * 60\n else:\n duration = (start_rec.get('hour_to') - (24 - start_rec.get('hour_from'))) * 60\n duration_after_timeframe = duration % timeframe\n total_duration = duration + duration_after_timeframe\n total_price += total_duration * start_rec.get('minute_price')\n else:\n for end_rec in supplier_item.get('time_price'):\n timeframe = end_rec.get('billing_each_timeframe') * 60\n if end_rec.get('hour_from', 0) > end_rec.get('hour_to', 0):\n duration = (end_rec.get('hour_to') - end_rec.get('hour_from')) * 60\n else:\n duration = (end_rec.get('hour_to') - (24 - end_rec.get('hour_from'))) * 60\n duration_after_timeframe = duration % timeframe\n total_duration = duration - (timeframe - duration_after_timeframe)\n total_price += total_duration * end_rec.get('minute_price')\n\n return total_price\n else:\n total_price = 0\n return total_price", "def poll_price_data():\n resp = requests.get(COINDESK_ENDPOINT) # Powered by CoinDesk\n if resp.status_code == 200:\n logging.info(\"GET request succeeded\")\n data = resp.json()\n data_dict = {\n \"id\": str(uuid.uuid1()),\n \"time\": data['time']['updated'],\n \"currency\": data['bpi']['USD']['code'],\n \"price\": data['bpi']['USD']['rate']\n }\n return data_dict\n else:\n logging.error(\"GET request failed\")", "def target_buy_price(self):\n if self.period_tick == 0:\n return random.randint(1, 10)\n elif self.period_tick % self.perseverance == 0:\n # Player runs out of patience and decides to change target price.\n (avg_price,\n max_price,\n min_price) = self.market.get_stock_price_last_period()\n\n power = self.period_tick / self.perseverance\n target_price = min(min_price + power, self.money_balance * 0.5)\n return target_price\n else:\n return None", "def get_product_price(self, url):\n self.driver.get(url)\n\n try:\n price = self.driver.find_element_by_id(\"priceblock_ourprice\").text\n except:\n pass\n\n try:\n price = self.driver.find_element_by_id(\"priceblock_dealprice\").text\n except:\n pass\n\n if price is None:\n price = \"Not available\"\n\n else:\n non_decimal = re.compile(r'[^\\d.]+')\n price = non_decimal.sub('', price)\n\n return price", "def fetch_price():\n\n url = \"https://www.bitstamp.net/api/ticker/\"\n\n response = json.load(urllib2.urlopen(url))\n\n return {\"buy\": response['ask'], \"sell\": response['bid']}", "def get_market_price(self, exchange, pair, type):\n return self.ccxt.get_market_price(exchange, pair, type)", "def get_price(data):\n return data[\"summaryDetail\"][\"regularMarketPreviousClose\"][\"raw\"]", "def block5_price(self):\n return self._safe_value(VAR_BLOCK5PRICE, float)", "def get_price(self):\r\n return self.price", "def getCurrentPrice(self,primary,secondary):\n pair = self.getTradedPair(primary,secondary)\n uri = \"https://api.kraken.com/0/public/Ticker\"\n requestUrl = uri + \"?pair=\" + pair\n jsonResponse = self.getJson(requestUrl)\n currentPrice = jsonResponse[\"result\"][pair][\"c\"]\n return currentPrice", "def field_buy(self, symbol):\r\n\r\n end_percent = 150\r\n current_price = 15#self.get_price()\r\n self.log(current_price)\r\n buys = {}\r\n new_price = current_price * 1.05\r\n while (new_price / current_price) > 150:\r\n self.log(\"New sell at: {}\".format(new_price))\r\n new_price *= 1.05\r\n\r\n self.log(buys)\r\n\r\n return buys", "def calculate_buy_price(price: float):\n return round(price / (1 + CONF.trade_advantage_in_percent / 100), 1)", "def selling_price(self):\n # If a system can't produce something, its price is zero.\n _good = self.tradeitem\n if self.planet.tech_level < _good.tp and _good.name not in 'fuel':\n sell_price = 0\n else:\n sell_price = self.standard_init_price()\n # raise a bit, randomized\n sell_price = sell_price + random.randrange(self.tradeitem.var)\n\n return int(sell_price)", "async def on_symbol_price_updated(self, price: MetatraderSymbolPrice):\n self._pricesBySymbol[price['symbol']] = price\n positions = list(filter(lambda p: p['symbol'] == price['symbol'], self._positions))\n orders = list(filter(lambda o: o['symbol'] == price['symbol'], self._orders))\n specification = self.specification(price['symbol'])\n if specification:\n for position in positions:\n if 'unrealizedProfit' not in position or 'realizedProfit' not in position:\n position['unrealizedProfit'] = (1 if (position['type'] == 'POSITION_TYPE_BUY') else -1) * \\\n (position['currentPrice'] - position['openPrice']) * \\\n position['currentTickValue'] * position['volume'] / specification['tickSize']\n position['realizedProfit'] = position['profit'] - position['unrealizedProfit']\n new_position_price = price['bid'] if (position['type'] == 'POSITION_TYPE_BUY') else price['ask']\n is_profitable = (1 if (position['type'] == 'POSITION_TYPE_BUY') else -1) * (new_position_price -\n position['openPrice'])\n current_tick_value = price['profitTickValue'] if (is_profitable > 0) else price['lossTickValue']\n unrealized_profit = (1 if (position['type'] == 'POSITION_TYPE_BUY') else -1) * \\\n (new_position_price - position['openPrice']) * current_tick_value * position['volume'] / \\\n specification['tickSize']\n position['unrealizedProfit'] = unrealized_profit\n position['profit'] = position['unrealizedProfit'] + position['realizedProfit']\n position['currentPrice'] = new_position_price\n position['currentTickValue'] = current_tick_value\n for order in orders:\n order['currentPrice'] = price['ask'] if (order['type'] == 'ORDER_TYPE_BUY_LIMIT' or\n order['type'] == 'ORDER_TYPE_BUY_STOP' or\n order['type'] == 'ORDER_TYPE_BUY_STOP_LIMIT') else price['bid']\n if self._accountInformation:\n self._accountInformation['equity'] = self._accountInformation['balance'] + \\\n functools.reduce(lambda a, b: a + b['profit'], self._positions, 0)", "async def price(self, ctx, name):\n\t\tname = name.upper()\n\t\ttry:\n\t\t\tstock_data = await self._get_stock_data([name])\n\t\texcept ValueError as e:\n\t\t\treturn await ctx.send(e)\n\t\tif name not in stock_data:\n\t\t\tawait ctx.send(f'I couldn\\'t find any data for the stock {name}. Please try another stock.')\n\t\t\treturn\n\t\tprice = stock_data[name]['price']\n\t\treal = str(price)\n\t\treal = ('0' * (3 - max(len(real), 0))) + real\n\t\treal = '$' + real[:-2] + '.' + real[-2:]\n\t\tcurrency = await bank.get_currency_name(ctx.guild)\n\t\tawait ctx.send(f'**{name}:** {price} {currency} per share ({real}).')", "def get_price(curr: str):\r\n\tif curr in COIN_VALUES:\r\n\t\treturn\r\n\r\n\tapi_delay('prices')\r\n\tresp = requests.get(\"https://api.coinstats.app/public/v1/coins?skip=0&limit=20&currency=USD\")\r\n\tif resp.status_code == 200:\r\n\t\tinfo = json.loads(resp.text)['coins']\r\n\t\tfor x in info:\r\n\t\t\tif x['name'] == curr:\r\n\t\t\t\tCOIN_VALUES[curr] = x['price']\r\n\telse:\r\n\t\tprint(f'Failed to get price of {curr}')", "def calculate_sell_price(price: float):\n return round(price * (1 + CONF.trade_advantage_in_percent / 100), 1)", "def get_sp500():\n sp500 = si.get_live_price(\"^GSPC\")\n sp500_trim = \"%.2f\" % sp500\n\n _time = datetime.datetime.now().timetuple()\n _time = time.mktime(tuple(_time))\n _time_label = f\"test\"\n\n return float(sp500_trim), int(_time)", "def buy_fixed_price(self, buying_price):\n\n print(f\"Ingresando orden a ${buying_price:,.2f}\".replace('.', ','))\n pyRofex.send_order(\n ticker=self.symbol,\n side=pyRofex.Side.BUY,\n price=buying_price,\n size=1,\n order_type=pyRofex.OrderType.LIMIT\n )\n return buying_price", "def exchange_rate(self):\n res = r.get(self.url + self.current_rate)\n return self.execute(res)", "def get_futbin_price_lastupdated(self, ID):\n r = requests.get(\n 'https://www.futbin.com/22/playerPrices?player={0}'.format(ID))\n # r = requests.get('https://www.futbin.com/20/playerGraph?type=daily_graph&year=20&player={0}'.format(ID))\n data = r.json()\n\n price = data[str(ID)][\"prices\"][\"xbox\"][\"LCPrice\"]\n lastupdated = data[str(ID)][\"prices\"][\"xbox\"][\"updated\"]\n\n if (lastupdated == \"Never\"):\n return 0, 100\n elif (\"mins ago\" in lastupdated):\n lastupdated = lastupdated[:-9]\n lastupdated = int(lastupdated)\n elif(\"hour ago\" in lastupdated):\n lastupdated = lastupdated[:-9]\n lastupdated = int(lastupdated) * 60\n elif(\"hours ago\" in lastupdated):\n lastupdated = lastupdated[:-10]\n lastupdated = int(lastupdated) * 60\n elif(\"seconds\" in lastupdated):\n lastupdated = 1\n elif(\"second\" in lastupdated):\n lastupdated = 1\n else:\n return 0, 100\n\n price = price.replace(\",\", \"\")\n price = int(price)\n\n # MINUTES\n lastupdated = int(lastupdated)\n return price, lastupdated", "def get_price(self):\n return self.price", "def get_price(self):\n return self.price", "def get_price(self):\n return self.price", "def tick(price, tick_size=0.05):\n return round(price / tick_size)*tick_size", "def get_last_price_tmp(market):\n\trequest = api.get_ticker(market)\n\tif not request['message']:\n\t\tlast = str(request['result']['Last'])\n\t\treturn (last)\n\telse:\n\t\tprint(request['message'])\n\t\tsys.exit(0)", "def get_order_price(self):\r\n if self.price is not None:\r\n return self.price #typical limit price order\r\n else:\r\n #Check the orderbook\r\n logger.info(\"floating price\")\r\n self.get_orderbook()\r\n logger.info(self.orderbook_snapshot)\r\n\r\n pass", "def fetchPrice(self, token):\n i = 0\n cache = self.cache\n cacheLen = len(self.cache)\n stamp = time.time()\n minStamp = stamp - self.maxCacheAge\n data = None\n while True:\n if i >= cacheLen:\n break\n cacheToken, cacheStamp, cacheData = cache[i]\n if cacheStamp < minStamp:\n print(\"CMClient: expired cache data for %s\" % cacheToken)\n cache.pop(i)\n cacheLen -= 1\n continue\n if token == cacheToken:\n data = cacheData\n i += 1\n if data:\n print(\"CMClient: returning cached data for %s\" % token)\n return data\n data = helpers.getUriAsJson(self.tickerTemplate % token)\n cache.insert(0, (token, stamp, data))\n self.saveSettings()\n print(\"CMClient: returning new data for %s\" % token)\n return data", "def check_price(self):\n return self.day*self.price", "def fetch_current():\n\n data = json.load(urllib2.urlopen(TICKER_URL))\n\n buy = float(data[\"ask\"])\n sell = float(data[\"bid\"])\n\n now = int(time.time()) # Get current unix time\n\n return now, buy, sell", "def priceGetSome(soup):\n price = soup.find('span', id='priceblock_ourprice', class_='a-size-medium a-color-price')\n price = price.text\n price = price.strip()\n price = price.lstrip('$')\n price = float(price)\n\n return price", "def getPrice(self):\n priceElem = self.driver.find_element_by_xpath(self.priceXPath)\n price = priceElem.text.replace(\"€\", \"\").replace(\" \", \"\").replace(\",\", \".\")\n return float(price)", "def price(self):\n return self._safe_value(VAR_PRICE, float)", "def __get_deal_price(self):\n return self.create_random_decimal(min=1, max=100000)", "def get_quote(self, ticker):\r\n key = 'GLC0GTVKR51SY1V'\r\n quote_url = 'https://www.alphavantage.co/query?function=GLOBAL_QUOTE&symbol=' + ticker.upper() + '&apikey=' + key\r\n key_metrics_url = 'https://www.alphavantage.co/query?function=OVERVIEW&symbol=' + ticker.upper() + '&apikey=' + key\r\n\r\n quote_response = requests.get(quote_url)\r\n string = quote_response.json()\r\n\r\n key_metrics_response= requests.get(key_metrics_url)\r\n metrics_str = key_metrics_response.json()\r\n color_tag = None\r\n\r\n if quote_response and 'Global Quote' in string:\r\n\r\n current_price = round(float(string['Global Quote']['05. price']), 2)\r\n change = round(float(string['Global Quote']['09. change']), 2)\r\n change_pct = string['Global Quote']['10. change percent'][:5] + \"%\"\r\n previous_price = round(float(string['Global Quote']['08. previous close']), 2)\r\n\r\n yearly_high = metrics_str['52WeekHigh']\r\n mark_cap = round(int(metrics_str['MarketCapitalization'])/10E8, 2)\r\n mark_cap_str = str(mark_cap) + \"B\"\r\n\r\n if ticker not in self.holdings:\r\n self.holdings[ticker] = current_price\r\n tuples = [ticker, current_price, change, change_pct, yearly_high, mark_cap_str]\r\n\r\n if current_price > previous_price:\r\n color_tag = 'green'\r\n else:\r\n color_tag = 'red'\r\n self.treeview.insert(parent='', index='end', values=tuples, tags=(color_tag,))\r\n return current_price\r\n else:\r\n return None", "def get_stock_price(stock):\n pass", "def get_price(self):\n return self._price", "def price(self, irc, msg, args, optlist, typeName):\n\n try:\n typeID = self._get_typeID(typeName)\n itemType = self._get_type(typeID)\n except:\n irc.error('Unknown type')\n return\n\n if len(optlist) == 1:\n location = optlist[0][1]\n else:\n location = 'Jita'\n\n try:\n locationID = self._get_locationID(location)\n location = self._get_location(locationID)\n except:\n irc.error('Unknown location')\n return\n\n market = self._sql(\"\"\"\n SELECT * FROM evecentral_market\n WHERE \"locationID\"=%s\"\"\", [locationID])\n if not market:\n irc.reply('No data for that market location')\n return\n\n marketitem = self._sql(\"\"\"\n SELECT * FROM evecentral_marketitem\n WHERE \"locationID\"=%s AND \"typeID\"=%s\"\"\", [locationID, typeID])\n if marketitem:\n irc.reply('{0} in {1}: buy max: {2} (volume: {3:,d}). sell min: {4} (volume: {5:,d}).'.format(\n ircutils.bold(itemType['typeName']),\n self._colorize_system(location),\n ircutils.mircColor(\n '{:,.2f}'.format(marketitem['buy_max']),\n fg='green'),\n int(marketitem['buy_volume']),\n ircutils.mircColor(\n '{:,.2f}'.format(marketitem['sell_min']),\n fg='green'),\n int(marketitem['sell_volume']),\n ), prefixNick=False)\n else:\n irc.reply(\"Prices for {0} in {1} isn't updated yet.\".format(\n itemType['typeName'],\n location['itemName']\n ))", "def get_coin_price(asset, time=None):\n url = 'https://rest.coinapi.io/v1/exchangerate/{}/USD'.format(asset)\n if time is not None:\n url = url + '?time={}'.format(time)\n headers = {'X-CoinAPI-Key': os.environ.get('COIN_API_KEY', '')}\n r = requests.get(url, headers=headers)\n if r.status_code / 100 == 2:\n price = {\"price\": r.json()['rate']}\n return price\n else:\n return {\"error\": r.content.decode('utf-8')}", "def block6_price(self):\n return self._safe_value(VAR_BLOCK6PRICE, float)", "def block8_price(self):\n return self._safe_value(VAR_BLOCK8PRICE, float)", "def showCurrentPrice():\n\n page = requests.get(\n \"https://markets.businessinsider.com/commodities/oil-price?type=wti\"\n )\n soup = BeautifulSoup(page.text, \"html.parser\")\n currentPrices = soup.find(class_=\"push-data\")\n price = str(currentPrices.next)\n\n return price", "def price(self):\n return self._price", "def price(self):\n return self._price", "def get_price(horizon_host, pair):\n print \"fetching latest price for:\" + pair[\"name\"]\n params = make_trade_params(pair)\n res = requests.get(horizon_host + \"/trades\", params).json()\n try:\n trade_record = res[\"_embedded\"][\"records\"][0]\n except IndexError:\n return DatedPrice(date=datetime.utcfromtimestamp(0), price=0)\n price = float(trade_record[\"price\"][\"n\"]) / float(trade_record[\"price\"][\"d\"])\n timestamp = parser.parse(trade_record[\"ledger_close_time\"])\n return DatedPrice(date=timestamp, price=price)", "def buy_limit_order(self, price=0, volume=0):\n auth = CoinbaseExchangeAuth(self.api_key, self.secret_key, self.passphrase)\n data = {\n \"size\": volume,\n \"price\": price,\n \"side\": \"buy\",\n \"product_id\": self.rate,\n \"type\": \"limit\"\n # \"time_in_force\": 'GTC',\n # \"cancel_after\": (datetime.now() + timedelta(minutes=10)).strftime('%M,%H,%d')\n }\n\n buy = requests.post(self.url + 'orders',\n data=json.dumps(data),\n auth=auth).json()\n buy['txid'] = buy['id']\n\n logging.debug(buy)\n return buy", "def price(self) -> float:\n return self._price", "def desired_price(self):\n return self._desired_price", "def getMarketPrices(market, interval):\n try:\n tf = {3600: '3600Min',\n 60: '60Min',\n 30: '30Min',\n 5: '5Min',\n 1: '1Min'}\n history = API.getmarkethistory(MARKET)\n df = pd.DataFrame(history)\n df.index = pd.to_datetime(df['TimeStamp'])\n prices = df['Price'].resample(tf[interval]).ohlc()\n if BACKTESTFILE != \"\":\n return prices.dropna()\n return prices.dropna().tail(MEAN)\n except Exception as e:\n logging.error(\"failed at getMarketPrices\")\n logging.error(str(e))", "def _get_eur_gbp_last_daily(self) -> None:\n data = _get_ecb_data(FREQUENCY_DAILY, _ten_days_ago(), _today())\n\n self.eur_gbp_last_day = _get_latest_ecb_rate(data)", "def get_product_price(self):\n\n price = \"0.0000\"\n\n try:\n price = self.trees.get_element_by_id(\"priceblock_ourprice\").text\n except:\n try:\n price = self.trees.get_element_by_id(\n \"price_inside_buybox\").text\n except:\n try:\n price = self.trees.get_element_by_id(\n \"priceblock_dealprice\").text\n except:\n try:\n price = self.trees.xpath(\n \"//span[@class='a-color-price']/text()\")[0]\n except:\n try:\n price = self.trees.xpath(\n \"//span[@class='a-size-base a-color-price']/text()\")[0]\n except:\n pass\n\n non_decimal = re.compile(r'[^\\d.]+')\n price = non_decimal.sub('', price)\n\n return round(float(price[0:5]), 2)", "def price(self):\n return self.price_", "def buy_cost(self):\n return self._manager.get_buy_price(self.name)", "def block7_price(self):\n return self._safe_value(VAR_BLOCK7PRICE, float)", "def sell_cost(self):\n return self._manager.get_sell_price(self.name)", "async def get_quote_price(self, trading_pair: str, is_buy: bool, amount: Decimal) -> Optional[Decimal]:\n\n try:\n\n base, quote = trading_pair.split(\"-\")\n side = \"buy\" if is_buy else \"sell\"\n resp = await self._api_request(\"post\", \"terra/price\", {\"base\": base, \"quote\": quote, \"trade_type\": side,\n \"amount\": str(amount)})\n txFee = resp[\"txFee\"] / float(amount)\n price_with_txfee = resp[\"price\"] + txFee if is_buy else resp[\"price\"] - txFee\n return Decimal(str(price_with_txfee))\n # if resp[\"price\"] is not None:\n # return Decimal(str(resp[\"price\"]))\n except asyncio.CancelledError:\n raise\n except Exception as e:\n self.logger().network(\n f\"Error getting quote price for {trading_pair} {side} order for {amount} amount.\",\n exc_info=True,\n app_warning_msg=str(e)\n )", "def priceGetDeal(soup):\n price = soup.find('td', id='priceblock_dealprice', class_='a-color-price a-size-medium')\n price = price.text\n priceList = price.split()\n price = priceList[0]\n price = price.strip()\n price = price.lstrip('$')\n price = float(price)\n\n return price", "def get_price(self, spot, t = 0, k = 1):\n if k == 0:\n return self.fv * np.exp(- spot * (self.maturity - t))\n else:\n return self.fv / np.power(1 + spot / k, (self.maturity - t) * k)", "def getPrice(self):\n return self.price", "def get_price(self):\n return f'{self.soup.find(attrs={\"class\": \"woocommerce-Price-amount\"}).text}'", "def get_base_price(self):\n\n price = randint(5, 9)\n\n now = datetime.now()\n weekday = now.weekday()\n hour = now.hour\n\n if weekday < 5 and 7 < hour < 12:\n price = price + 4\n\n return price", "def market_value(self, ref_prices, suspensions=None):\n # TODO some securities could not be able to be traded\n if suspensions is None:\n suspensions = []\n \n market_value_float = 0.0\n market_value_frozen = 0.0 # suspended or high/low limit\n for sec in self.holding_securities:\n size = self.get_position(sec).current_size\n # TODO PortfolioManager object should not access price\n price = ref_prices[sec]\n mv_sec = price * size\n if sec in suspensions:\n market_value_frozen += mv_sec\n else:\n market_value_float += mv_sec\n \n return market_value_float, market_value_frozen", "def sale_price(self):\n if self.sold_on is not None:\n return 0.0 # Already sold\n return 5000.0 * self.wheels", "def sale_price(self):\n if self.sold_on is not None:\n return 0.0 # Already sold\n return 5000.0 * self.wheels", "def sale_price(self):\n if self.sold_on is not None:\n return 0.0 # Already sold\n return 5000.0 * self.wheels", "def purchase_price(self):\n if self.sold_on is None:\n return 0.0 # Not yet sold\n return self.base_sale_price - (.10 * self.miles)", "def purchase_price(self):\n if self.sold_on is None:\n return 0.0 # Not yet sold\n return self.base_sale_price - (.10 * self.miles)", "async def btc( ctx):\r\n await ctx.message.delete()\r\n r = requests.get(\r\n \"https://min-api.cryptocompare.com/data/price?fsym=BTC&tsyms=USD,EUR,GBP\"\r\n )\r\n r = r.json()\r\n usd = r[\"USD\"]\r\n eur = r[\"EUR\"]\r\n gbp = r[\"GBP\"]\r\n em = discord.Embed(\r\n description=f\"USD: `{str(usd)}$`\\n\\nEUR: `{str(eur)}€`\\n\\nGBP: `{str(gbp)}£`\"\r\n )\r\n em.set_author(\r\n name=\"Bitcoin\",\r\n icon_url=\"https://cdn.pixabay.com/photo/2013/12/08/12/12/bitcoin-225079_960_720.png\",\r\n )\r\n await ctx.send(embed=em)\r\n ### I hope this code is so horrible I'm never allowed to code embeds again\r", "def block1_price(self):\n return self._safe_value(VAR_BLOCK1PRICE, float)", "def getChange(coin,interval):\n change = 'https://api.coinmarketcap.com/v1/ticker/' + coin\n json = requests.get(change).json()\n value = json[0]['percent_change_' + str(interval)]\n return value", "def set_treasury(self) -> None:\n if self.msg.value < 10 ** 22:\n revert(f\"{TAG}: set_treasury method doesnt accept ICX less than 10000 ICX\")\n if self.msg.value % 10 ** 22 != 0:\n revert(f\"{TAG}: Set treasury error, Please send amount in multiples of 10,000 ICX\")\n self._treasury_min.set(self._treasury_min.get() + self.msg.value)\n Logger.debug(f'Increasing treasury minimum by {self.msg.value} to {self._treasury_min.get()}.')\n self._set_bet_limit()\n self._open_treasury.set(False)\n self.FundReceived(self.msg.sender, self.msg.value, f\"Treasury minimum increased by {self.msg.value}\")\n Logger.debug(f'{self.msg.value} was added to the treasury from address {self.msg.sender}', TAG)", "def query_last_price(market_data):\n print(\"Consultando el último precio\")\n if market_data[\"marketData\"][\"LA\"]:\n last_price = market_data[\"marketData\"][\"LA\"][\"price\"]\n print(\n f\"Último precio operado: ${last_price:,.2f}\".replace('.', ','))\n return last_price\n print(\"Último precio operado: No hay datos disponibles\")\n return None", "def get_used_balance():\n try:\n if CONF.exchange == 'bitmex':\n position = EXCHANGE.private_get_position()\n if not position:\n return None\n return position[0]['currentQty']\n if CONF.exchange == 'kraken':\n result = EXCHANGE.private_post_tradebalance()['result']\n return round(float(result['e']) - float(result['mf']))\n if CONF.exchange == 'liquid':\n return round(get_crypto_balance()['used'] * get_current_price())\n\n except (ccxt.ExchangeError, ccxt.NetworkError) as error:\n LOG.error(RETRY_MESSAGE, type(error).__name__, str(error.args))\n sleep_for(4, 6)\n get_used_balance()", "def standard_init_price(self):\n # If a system can't use something, its price is zero.\n _good = self.tradeitem\n if self.planet.tech_level < _good.tu and _good.name not in 'fuel':\n base_price = 0\n else:\n base_price = _good.plt + (self.planet.tech_level * _good.pi)\n # if good is highly requested, increase the price\n if self.planet.status in [_good.dps]:\n base_price = base_price + (base_price * 0.5)\n # large system: high production decreases prices\n base_price = (base_price * (100 - self.planet.system_size)) / 100\n\n # price can't be negative\n if base_price < 0:\n base_price = 0\n\n return int(base_price)", "def trade(self) -> float:\n return self._trade", "def btc_scraping():\n\n url_btc_web = requests.get('https://awebanalysis.com/es/coin-details/bitcoin/')\n soup = BeautifulSoup(url_btc_web.content, 'html.parser')\n\n # we need the class html\n result = soup.find('td', {'class': 'wbreak_word align-middle coin_price'})\n btc_price = result.text\n\n return btc_price", "def usdToBtc(dollar, bitcoin):\n global btc\n global usd\n if usd>dollar:\n usd-=dollar\n btc+=bitcoin\n return True\n return False", "def get_current_price(self, crypto, fiat, confirmations=1):\n raise NotImplementedError(\n \"This service does not support getting the current fiat exchange rate. \"\n \"Or rather it has no defined 'get_current_price' method.\"\n )", "def price(self, tf=None):\n if self._ticks:\n return (self._ticks[-1][1] + self._ticks[-1][2]) * 0.5\n else:\n candles = None\n if tf and self._candles.get(tf):\n candles = self._candles[tf]\n elif self._candles.get(Instrument.TF_SEC):\n candles = self._candles[Instrument.TF_SEC]\n elif self._candles.get(Instrument.TF_MIN):\n candles = self._candles[Instrument.TF_MIN]\n\n if candles:\n return candles[-1].close\n\n return None", "def get_price():\n \n #Teacher's code. Could not get it working.\n #price = db(db.product.name == productName).select(db.product.price)[0].price\n \n \n return (200)", "def get_price(self, pair='XBTZAR'):\n data = {'pair': pair}\n query_string = build_query_string(data)\n\n r = requests.get(build_api_call(self.base_url, None, 'ticker', query_string))\n if r.status_code == 200:\n return r.json()", "def update(self, price, dt):\n reached = self.get('reached')\n price_diff = self.get('price_diff')\n price_offset = self.get('price_offset')\n #log.info(\"Update bo feature '%s' at price change with price=%s dt=%s\" % (self.name, price, dt))\n #log.info(self.bo)\n if self.bo.price_diff_d is not None and not reached:\n if self.bo.price_diff_d >= price_diff:\n self.set('reached', True)\n new_stop = self.bo.price_open + self.bo.direction * price_offset\n log.info(\"<BOFeature:%s BE reached: price stop set to %s\" % (self.name, new_stop))\n self.modify_stop(new_stop)\n return(self.bo.ticket)\n else:\n return" ]
[ "0.65415394", "0.6299414", "0.62688947", "0.6254417", "0.62187046", "0.6205762", "0.620122", "0.61773175", "0.61718124", "0.6161998", "0.6105141", "0.60968745", "0.6074585", "0.6072953", "0.601132", "0.5992859", "0.5969653", "0.5964009", "0.5900362", "0.5885169", "0.586665", "0.5839722", "0.58291465", "0.5826046", "0.576981", "0.57643616", "0.57634306", "0.5751886", "0.5744245", "0.5734768", "0.5728995", "0.57212096", "0.57183003", "0.57072085", "0.5706146", "0.57031304", "0.56722564", "0.56719285", "0.56581014", "0.56575215", "0.56575215", "0.56575215", "0.56456345", "0.56397194", "0.5637578", "0.5628475", "0.56220657", "0.56196856", "0.5614508", "0.5613672", "0.5613131", "0.56110495", "0.56039345", "0.5603744", "0.55909246", "0.55845195", "0.55833036", "0.55799377", "0.5556274", "0.555535", "0.55547", "0.55547", "0.5536032", "0.55071336", "0.55042785", "0.55038285", "0.55020267", "0.5498454", "0.5497147", "0.5495868", "0.5493749", "0.5475944", "0.54739946", "0.5471494", "0.54694164", "0.54609483", "0.5456954", "0.5452496", "0.5435963", "0.54292506", "0.5424204", "0.5424204", "0.5424204", "0.5415886", "0.5415886", "0.54153347", "0.541493", "0.54097897", "0.54063433", "0.54036707", "0.53996855", "0.53921235", "0.53882474", "0.53867507", "0.53773266", "0.53760517", "0.5374007", "0.53682244", "0.53675413", "0.5359777" ]
0.70017016
0
Retrieve and store last month's EUR to GBP average rate.
def _get_eur_gbp_last_month(self) -> None: last_month = _last_month() data = _get_ecb_data(FREQUENCY_MONTHLY, last_month, last_month) self.eur_gbp_last_month = _get_latest_ecb_rate(data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_eur_gbp_last_daily(self) -> None:\n data = _get_ecb_data(FREQUENCY_DAILY, _ten_days_ago(), _today())\n\n self.eur_gbp_last_day = _get_latest_ecb_rate(data)", "def get_avg(self):\r\n df = pd.read_csv(\"MonthlyRate.csv\")\r\n df = df[df.CurrencyCode == self.choice]\r\n mean = df.mean(axis=1).values[0]\r\n # Round the value to 4 d.p.\r\n mean = round(float(mean), 4)\r\n return mean", "def bitcoinaverage(site):\n url = \"https://apiv2.bitcoinaverage.com/frontend/constants/exchangerates/local\"\n try:\n session = requests.Session()\n cfscrape_requests = cfscrape.create_scraper(sess=session)\n ret = cfscrape_requests.get(url, timeout=(15, 15)).json()[\"rates\"]\n data = {\"USD:\" + key: float(val[\"rate\"]) for key, val in ret.items()}\n data = refine_data(data)\n print(site, data)\n race_write(f\"{site}_forex.txt\", json_dumps(data))\n except:\n print(f\"{site} failed to load\")", "def test_get_historical_gold_rate(self):\n rates = [153.50, 162.49, 123.86, 155.10]\n helper.gold_loop_helper(get_historical_gold_rate, TestHistoricalRates.dates_rate, rates)", "def get_average_for_month(self, month, weekend):\n\t\tif weekend:\n\t\t\treturn self.averages_weekend[month]\n\t\telse:\n\t\t\treturn self.averages_weekday[month]", "def _prorata_rate(self, days_used, days_in_month):\n return (100 * days_used // days_in_month) / 100.0", "def rate(self):\n return self.brate / FAC", "def year_average_price_rule(_m, y):\r\n\r\n # Total revenue\r\n return sum(m.SCENARIO_REVENUE[y, s] for s in m.S) / sum(m.SCENARIO_DEMAND[y, s] for s in m.S)", "def calculate_revenue(self, period: int, currency_rate: float) -> float:\n revenue_in_currency = self.capital * ((1.0 + self.interest) ** period - 1.0)\n revenue = revenue_in_currency * currency_rate\n return revenue", "def get_current_rate(self):\n pass", "def market_avg_price(**params):\n endpoint = 'calc/trade/avg'\n return request(authenticate=False, version=2, endpoint=endpoint, method='POST', query_params=params)", "def test_forecast_precision_mase1y_avg():\n joined_data = pd.DataFrame({'temp': [1], 'dt': [1575082800], 'today': ['2019-11-30'],\n 't5': [4.0],\n 't4': [3],\n 't3': [2.0],\n 't2': [1],\n 't1': [1.0]})\n historical_data = rutil.read_historical_data(\"tests/csv_files/historical_data_year_avg.csv\")\n years_back = 2\n\n result = rmet.mean_absolute_scaled_error_year_avg(joined_data, historical_data, 'temp', years_back)\n assert result == [1, 2 / 3, 1 / 3, 0, 0]", "def get_meanrates(self):\n return np.asarray([ n.meanrate for n in self.alln.values() ])", "def growthrate(cur, pre, y):\n return (cur-pre)/y", "def calculate_today_last(region, db, local_forecast):\n\t\n\t# get all temps from today table\n\ttoday_historical_params = {'table': 'today',\n\t\t\t\t\t\t 'conditions': ['region_id'],\n\t\t\t\t\t\t 'condition_data': (region.id,),\n\t\t\t\t\t\t 'filters': ['hour_{}'.format(x) for x in range(0, 24, 3)]}\n\n\t# returns a list of tuples\n\ttoday_historical = db.select(**today_historical_params)\n\n\tif None in today_historical[0]:\n\t\treturn 0.0\n\n\t# calculate mean\n\ttoday_avg = round(statistics.mean(today_historical[0]), 2)\n\n\t# save to history\n\tcurrent_date = dt_to_string(list(local_forecast)[0].date(), time=False)\n\tsave_to_history_params = {'table': 'history',\n\t\t\t\t\t\t\t 'destinations': ['region_id', 'temp', 'date', 'datetime'],\n\t\t\t\t\t\t\t 'data': (region.id, today_avg, current_date,\n\t\t\t\t\t\t\t\t\t dt_to_string(datetime.datetime.now()))}\n\tdb.insert(**save_to_history_params)\n\n\t# set today table to null\n\tset_today_to_null_params = {'table': 'today',\n\t\t\t\t\t\t\t\t'destinations': ['hour_{}'.format(x) for x in range(0, 24, 3)] + ['datetime'],\n\t\t\t\t\t\t\t\t'data': tuple([None] * 9),\n\t\t\t\t\t\t\t\t'conditions': ['region_id'],\n\t\t\t\t\t\t\t\t'condition_data': (region.id,)}\n\tdb.update(**set_today_to_null_params)\n\n\treturn today_avg", "def get_value(\n self\n ) -> float:\n\n return self.average", "def value_ret_calendar_period(self, year: int, month: int = None) -> float:\n if month is None:\n period = str(year)\n else:\n period = '-'.join([str(year), str(month).zfill(2)])\n rtn = self.tsdf.copy().pct_change()\n rtn = rtn.loc[period] + 1\n return float(rtn.apply(np.cumprod, axis='index').iloc[-1] - 1)", "def get_latest_average(fsym, tsym, markets='all', try_conversion=True, \n format='raw'):\n\t\n\t# build url \n\turl = build_url('generateAvg', fsym=fsym, tsym=tsym, markets=markets,\n\t try_conversion=try_conversion)\n\n\t# http request\n\tr = requests.get(url)\n\n\t# decode to json\n\tdata = r.json()\n\n\tif format == 'raw':\n\t\tdata = data['RAW']\n\telif format == 'display':\n\t\tdata = data['DISPLAY']\n\n\treturn data", "def annualized_gains(self, day='today'):\n assert day == 'today' or isinstance(day, date), 'Error! You have to pass a datetime.date istance to the day parameter.'\n if day == 'today':\n day = self.data.index[-1]\n if self.data.index[-1] >= day >= self.data.index[0]:\n day = self._first_good_date(day)\n initialValue = self.invested_amount(day)\n finalValue = self.value(day)\n numberOfDays = (day - self.data.index[0]).days\n return round(((finalValue / initialValue)**(365/numberOfDays) - 1) * 100, 2) \n else:\n return 0", "def api_asset_calculate_revenue():\n periods = request.args.getlist(\"period\")\n\n daily_response = requests.get(CBR_DAILY_URL)\n key_indicators_response = requests.get(CBR_INDICATORS_URL)\n currency_rates = parse_cbr_currency_base_daily(daily_response.text)\n currency_rates.update(parse_cbr_key_indicators(key_indicators_response.text))\n\n result = {}\n for period in periods:\n result[period] = app.bank.calculate_revenue(int(period), currency_rates)\n return result, 200", "def getAvg(self):\r\n\t\treturn self.data['avg']", "def GrowthAPR(self, years=10):\n pastPrice = self.history[-1].price\n import datetime\n today = datetime.datetime.now()\n pastDate = self.history[-1].date\n # TODO: This is an inefficient way to look up a specific date\n for index in range(0, len(self.history) - 1):\n if today - self.history[index].date < datetime.timedelta(days=365.25*years):\n # Assuming the stock data is in chronological order, the first result more recent than X years\n # is a good enough approximation\n pastPrice = self.history[index].price\n pastDate = self.history[index].date\n break\n if pastPrice == 0.:\n return 0.\n n_years = (self.history[-1].date - pastDate).days / 365.25\n if n_years == 0.:\n return 0.\n return 100. * (self.history[-1].price / pastPrice) ** (1. / n_years) - 100.", "def calc_av_daily_return(self):\n av_return = 0.0\n total_ret = sum(self._returns)\n num_averages = len(self._returns)\n \n if num_averages > 0:\n av_return = total_ret/float(num_averages)\n \n self._av_daily_return = av_return\n return av_return", "def genMarketStat(self):\n myMarketStat = marketstat.MarketStat({'id':str(self.currentRound)})\n self.marketStats[str(self.currentRound)] = myMarketStat\n # set avg price to last rounds market avg price\n if self.currentRound > 1:\n lastMarketStat = self.marketStats[str(self.currentRound-1)]\n myMarketStat.avgSoldAL = lastMarketStat.avgSoldAL\n myMarketStat.avgSoldEC = lastMarketStat.avgSoldEC\n myMarketStat.avgSoldIA = lastMarketStat.avgSoldIA", "def stock_average(stock):\n closing_price=stock['Close']\n average=stats.mean(closing_price)\n return average", "def rate_last(self):\n diff = (self.time - self.lasts[0][0]).total_seconds()\n try:\n return (self.pos - self.lasts[0][1]) / FAC / diff\n except ZeroDivisionError:\n return 0.0", "def compute_rate(self):\n bg_rate = self.counts.data / self.livetime.data\n\n bg_rate /= self.counts.bin_volume\n\n bg_rate = bg_rate.to('MeV-1 sr-1 s-1')\n\n self.bg_rate.data = bg_rate\n self.bg_rate.data_err = (np.sqrt(self.counts.data) / (self.counts.bin_volume * self.livetime.data)).to(\n 'MeV-1 sr-1 s-1')", "def arithmetic_ret(self) -> float:\n return float(np.log(self.tsdf).diff().mean() * self.periods_in_a_year)", "def get_monthly_avg(all_stock_data):\n try:\n monthly_data = {}\n for data in all_stock_data:\n month = data[0][0:7]\n if month not in monthly_data:\n monthly_data[month] = []\n monthly_data[month].append(data)\n monthly_avg_list = []\n for month, stock_data in monthly_data.items():\n monthly_avg_list.append((month, get_avg(stock_data)))\n return monthly_avg_list\n\n except Exception as e:\n print(e)\n exit()", "def em_mean(self) -> float:\n if self.__total_pulls == 0:\n raise Exception('Number of pulls is 0. No empirical mean.')\n return self.__total_rewards / self.__total_pulls", "def monthly_benefit(self):\n \"\"\"Calculate weekly benefit of this company from this day\"\"\"\n total_purchase_price = 0\n total_selling_price = 0\n last_thirty_days = timezone.now() - timedelta(days=30)\n items = self.item_set.filter(status=\"sold\", updated_at__gte=last_thirty_days)\n for item in items:\n total_purchase_price += item.price\n total_selling_price += item.selling_price\n benefit = total_selling_price - total_purchase_price\n return benefit", "def get_current(self):\r\n with open('MonthlyRate.csv', newline='') as csvfile:\r\n reader = csv.DictReader(csvfile)\r\n for row in reader:\r\n if self.choice == row['CurrencyCode']:\r\n current = row[\"Current Rate\"]\r\n csvfile.close()\r\n # Round the value to 4 d.p.\r\n current = round(float(current), 4)\r\n return current", "def get_average_survival(self):\n return np.mean(self.survival_rates)", "def ADK_Rate_Avg(Uion,Z,E):\n\treturn Cycle_Averaging_Factor(Uion,E)*ADK_Rate(Uion,Z,E)", "def get_avg(all_stock_data):\n try:\n sum_close_vol = 0.0\n sum_vol = 0.0\n for item in all_stock_data:\n adj_close = item[1]\n volume = item[2]\n sum_close_vol += adj_close * volume\n sum_vol += item[2]\n return sum_close_vol / sum_vol\n\n except Exception as e:\n print(e)\n exit()", "def obtain_monthly_mean(data=pd.DataFrame()):\n return data.resample(\"MS\").mean()", "def average(self, returns):\r\n return returns.mean() * self.day", "def get_avg_price(cls, instrument: Instrument) -> float:\n try:\n avg_price = cls.objects.filter(\n instrument=instrument,\n # status=OrderStatus.COMPLETED.value\n ).annotate(price_t_volume=models.F('price') *\n models.F('total_sum')).aggregate(\n avg_price=models.Sum('price_t_volume') /\n models.Sum('total_sum'))\n except DataError: # handle division by zero\n return 0\n return float(avg_price.get('avg_price', 0) or 0)", "def calc_meanrates(self):\n TRACKNEURONPERIOD = get_ipython().user_ns['TRACKNEURONPERIOD']\n if TRACKNEURONPERIOD == 'track':\n # calc tn.meanrate using entire track duration:\n for tn in self.alln.values():\n tn.meanrate = tn.nspikes / self.dtsec\n elif TRACKNEURONPERIOD == 'trange':\n # calc tn.meanrate using duration between its first and last spike:\n for tn in self.alln.values():\n if tn.dtsec == 0:\n tn.meanrate = 0.0\n else:\n tn.meanrate = tn.nspikes / tn.dtsec\n else:\n raise ValueError(\"invalid value for TRACKNEURONPERIOD: %r\" % TRACKNEURONPERIOD)", "def get_rates(self):\n rates = np.empty(len(self.periods))\n for index, element in enumerate(self.periods):\n rates[index] = self.periods[element]['price']\n return(pd.Series(rates, self.periods.keys()))", "def get_gae(rewards, value_estimates, value_next=0.0, gamma=0.99, lambd=0.95):\n value_estimates = np.append(value_estimates, value_next)\n delta_t = rewards + gamma * value_estimates[1:] - value_estimates[:-1]\n advantage = discount_rewards(r=delta_t, gamma=gamma * lambd)\n return advantage", "def get_placed_assets_rate(cls, instrument: Instrument) -> float:\n bank_balance = InstrumentBalance.objects.filter(\n user__email__contains='bank',\n instrument=instrument).order_by('user__created_at_dt').last()\n if bank_balance:\n return float(bank_balance.amount)\n return 0", "def get_cpi_rates():\n\n df = pd.read_csv('cpi_usa.csv', index_col=0)\n df.index = pd.to_datetime(df.index)\n\n df = df.resample('BAS').mean() # change sampling to business year start\n df.index = df.index.year # datetime to year\n df.columns = ['cpi_rate']\n\n return df", "def averagePrice(self, onlyUnconsumed):\n\n\t\tif onlyUnconsumed:\n\t\t\treturn self.unconsumedValue / (len(self.bottles) - self.numberConsumed)\n\n\t\treturn self.totalValue / len(self.bottles)", "def aveVolumeCalc(ins, date):\n cal = ins.Currency().Calendar()\n enddate = cal.AdjustBankingDays(date, 0)\n startdate = cal.AdjustBankingDays(date, AVERAGING_PERIOD)\n\n prices=[]\n histprices = acm.FPrice.Select(\"instrument = %s and market = '%s' \\\n and day > '%s' and day <='%s'\" % \n (ins.Oid(), DAILY_MARKET, startdate, enddate))\n \n for price in histprices:\n settle = price.Settle()\n if settle >= 0:\n prices.append(settle)\n \n #upgrade 2013 fix for failure during run - acm.Math().AverageOf seems buggy\n try:\n avgprice = (sum(prices)/len(prices))\n except ZeroDivisionError:\n avgprice = 0\n \n #avgprice = acm.Math().AverageOf(prices, None)\n \n #Overwrite today's price if you find it \n newPrice = acm.FPrice.Select01(\"instrument = %s and market = '%s' and day = %s\" % \n (ins.Oid(), THREE_MONTH_MARKET, enddate),\n 'NaN')\n if not newPrice:\n newPrice = acm.FPrice()\n newPrice.Instrument(ins)\n newPrice.Day(enddate)\n newPrice.Market(THREE_MONTH_MARKET)\n newPrice.Currency(ins.Currency())\n\n newPrice.Settle(avgprice)\n try:\n newPrice.Commit()\n print 'INFO: %s price for %s was created on %s' %(THREE_MONTH_MARKET, ins.Name(), date)\n except Exception, err:\n print 'ERROR: %s price for %s did not commit: %s' %(THREE_MONTH_MARKET, ins.Name(), str(err))\n \n return newPrice", "def calculateLatestThreeDayMA(self, closingPrices):\n return ((closingPrices[0]+closingPrices[1]+closingPrices[2])/3)", "def acc_rate(L_acc, R, M):\n mdot = 1.25*L_acc*u.W*R*u.R_sun/(G*M*u.M_sun)\n mdot = mdot.decompose().to(u.M_sun/u.yr)\n return(mdot/(u.M_sun/u.yr))", "def average(self):\n return (self.current + self.last) / 2.0", "def mean_rate(df):\n mean_of_rate = dict(df.mean())\n return mean_of_rate", "def geo_average(self, returns):\r\n return (1 + returns).prod() ** (self.day / len(returns)) - 1", "def avg_ttm_2y(df):\n return 0.5 * (df + df.shift(4))", "def annualized_volatility(self):\n return self.daily_std() * math.sqrt(252)", "def avg_annual_returns(end_of_year_returns, mstat):\n\n # imports mean stats\n from scipy.stats import mstats\n\n # converts returns dict to an array (in decimal fmt)\n returns_arr = np.array(list(end_of_year_returns.values()))/100\n\n if mstat == 'geometric':\n\n # calculates the geometric mean\n gmean_returns = (mstats.gmean(1 + returns_arr) - 1)*100\n\n return round(gmean_returns, 2)\n\n if mstat == 'arithmetic':\n\n # calculates the arithmetic mean\n mean_returns = np.mean(returns_arr)\n\n return round(mean_returns, 2)", "def get_gae(rewards, value_estimates, value_next=0.0, gamma=0.99, lambd=0.95):\n value_estimates = np.asarray(value_estimates.tolist() + [value_next])\n delta_t = rewards + gamma * value_estimates[1:] - value_estimates[:-1]\n advantage = discount_rewards(r=delta_t, gamma=gamma * lambd)\n return advantage", "def get_rate(currency, date):\n status = 400\n while status != 200:\n url = (\"http://api.nbp.pl/api/exchangerates/rates/A/%s/%d-%02d-%02d?format=json\" %\n (currency, date.year, date.month, date.day))\n\n response = requests.get(url)\n status = response.status_code\n if status != 200:\n date = date - datetime.timedelta(1)\n\n tree = json.loads(response.content)\n assert len(tree['rates']) == 1\n print_rate_info(tree['rates'])\n return (tree['rates'][0]['mid'], date)", "def calculateDataRate(self):\n pass", "def get_pressure(self):\n\n\t\tvoltage_pressure = self.pressure_sensor.getVoltage()\n\t\tnew_value = (250 * voltage_pressure / 5) - 25\n\n\t\tself._pressure_samples.append(new_value)\n\n\t\tif not self.pressure_timer.hasPeriodPassed(self.pressure_timer_delay):\n\t\t\treturn self._last_pressure_value\n\n\t\t# Calculate new running average\n\t\tnew_avg = sum(self._pressure_samples) / len(self._pressure_samples)\n\n\t\tself._pressure_samples = [ ]\n\t\tself._last_pressure_value = new_avg\n\n\t\treturn new_avg", "def dbl_to_rate(days):\n\n if days == np.inf:\n return 0\n else:\n return np.expm1(np.log(2) / days)", "def interval_average():\r\n import statistics as st\r\n from tach_detect import tach_detect\r\n r = request.get_json()\r\n try:\r\n email = r[\"user_email\"]\r\n except KeyError:\r\n return jsonify(\"no email input\"), 400\r\n raise LookupError(\"no email input\")\r\n check_email = Check_For_User(email)\r\n if check_email.user_exists is False:\r\n return jsonify(str(email) + \" was not found. Please re-enter\"), 400\r\n raise LookupError(str(user_email) + \" was not found. Please re-enter\")\r\n try:\r\n input_date_time = r[\"date_time\"]\r\n except KeyError:\r\n return jsonify(\"no date entered\"), 400\r\n raise LookupError(\"no date entered\")\r\n try:\r\n validate_date_time(input_date_time)\r\n except (ValueError, TypeError) as error:\r\n return jsonify(\"date entered is invalid. Please re-type.\"), 400\r\n date_time = datetime.datetime(input_date_time[0], input_date_time[1],\r\n input_date_time[2], input_date_time[3],\r\n input_date_time[4], input_date_time[5],\r\n input_date_time[6])\r\n time_list = get_all_times(email)\r\n heart_rate_list = get_all_rates(email)\r\n interval_list = find_first_date(date_time, time_list, heart_rate_list)\r\n try:\r\n interval_average_post = st.mean(interval_list)\r\n user = models.User.objects.raw({\"_id\": email}).first()\r\n curr_age = user.age\r\n tach_test = tach_detect(curr_age, interval_average_post)\r\n return_dict = {\r\n \"user_email\": email,\r\n \"heart_rate_average_since\": str(date_time),\r\n \"heart_rate_average\": interval_average_post,\r\n \"is_heart rate_tachycardic\": str(tach_test)\r\n }\r\n except st.StatisticsError:\r\n interval_average_post = heart_rate_list[len(heart_rate_list)-1]\r\n user = models.User.objects.raw({\"_id\": email}).first()\r\n curr_age = user.age\r\n tach_test = tach_detect(curr_age, interval_average_post)\r\n return_dict = {\r\n \"user_email\": email,\r\n \"heart_rate_average_since\": str(date_time),\r\n \"heart_rate_average\": interval_average_post,\r\n \"is_heart rate_tachycardic\": str(tach_test)\r\n }\r\n return jsonify(return_dict), 200", "def rate(self):\n return self.__rate", "def getClosingPrice(self):\t\n\t\treturn self.dataPoints[-1].getDate(), self.dataPoints[-1].getAdjustedValue()", "def get_average_for_month_at_time(self, month, hour, minute, weekend):\n\t\tif weekend:\n\t\t\treturn self.averages_weekend[month][self.get_list_id(hour, minute)]\n\t\telse:\n\t\t\treturn self.averages_weekday[month][self.get_list_id(hour, minute)]", "def price_to_seven_year_earnings_ratio_less_than_25(self):\n\n note = ''\n # check if 'EPS' exists\n if 'EPS' not in self.stock.main_df.columns:\n note = note + 'Could not find EPS on MacroTrends. '\n\n # check if Current price is not 0\n if self.stock.stats_dict['Current Price'] == 0:\n note = note + 'Could not find current price on MacroTrends. '\n\n if note != '':\n self.stock.append_calc_result('7 year P/E ratio < 25 ?', 'N/A', 'N/A', note)\n return\n\n curr_price = self.stock.stats_dict['Current Price']\n df = self.stock.main_df\n\n average = 0\n # i want to use previous year if current year is empty\n if not np.isnan(df.iloc[0]['EPS']):\n # present year is there\n past_7_years_df = df.iloc[0: 7]['EPS']\n average = past_7_years_df.mean()\n elif np.isnan(df.iloc[0]['EPS']):\n # present year is not there\n past_7_years_df = df.iloc[1: 8]['EPS']\n average = past_7_years_df.mean()\n if np.isnan(df.iloc[1]['EPS']):\n # past year is not there either\n past_7_years_df = df.iloc[2: 9]['EPS']\n average = past_7_years_df.mean()\n if np.isnan(df.iloc[2]['EPS']):\n self.stock.append_calc_result('7 year P/E ratio < 25 ?', 'N/A', 'N/A',\n 'Must not have filed their annual report for {}'.format(\n self.current_year - 2))\n return\n\n if average == 0:\n self.stock.append_calc_result('7 year P/E ratio < 25 ?', 'N/A', 'N/A',\n 'No average found')\n return\n elif (curr_price / average) <= 25:\n criteria_passed = 'Yes'\n else:\n criteria_passed = 'No'\n\n self.stock.append_calc_result('7 year P/E ratio < 25 ?', round((curr_price / average), 2),\n criteria_passed, '7 Year Average EPS = {}'.format(round(average, 2)))", "def exchange_rate(self):\n res = r.get(self.url + self.current_rate)\n return self.execute(res)", "def _get_latest_ecb_rate(data: bytes) -> float:\n root = etree.fromstring(data)\n values = root.xpath('.//generic:ObsValue/@value', namespaces=root.nsmap)\n last_value = len(values) - 1\n\n return float(values[last_value])", "def get_latest(self):\n url = f\"{self.get_api_url()}+latest\"\n # set api parameters\n params = {}\n params.update({'base': self.base_currency})\n params.update({'symbols': ','.join(self.target_currency_codes)})\n # call the api for rates\n response = requests.get(url, params=params)\n if response.status_code == 200:\n base, rates = response.json().get('base'), response.json().get('rates')\n # remove base currency from rates if it is returned by the data source\n rates.pop(self.base_currency, None)\n return base, rates\n return None, None", "def save(self, *args, **kwargs):\n self.item.rates_total += 1\n self.item.average_rate += (self.item.average_rate + self.rate) / self.item.rates_total\n self.item.save()\n super(Rate, self).save(*args, **kwargs)", "def get_rate(self, t):\n return self.rates[bisect.bisect(self.change_times, t) - 1]", "def asset_calculate_revenue():\n search_period = request.args.getlist(\"period\")\n try:\n daily_response = requests.get(\"https://www.cbr.ru/eng/currency_base/daily/\")\n key_indicators_response = requests.get(\"https://www.cbr.ru/eng/key-indicators/\")\n except:\n return redirect(url_for(\"cbr_not_avalible\"))\n if daily_response.status_code >= 400:\n return redirect(url_for(\"cbr_not_avalible\"))\n daily_dict = parse_cbr_currency_base_daily(daily_response.text)\n if key_indicators_response.status_code >= 400:\n return redirect(url_for(\"cbr_not_avalible\"))\n key_indicators_dict = parse_cbr_key_indicators(key_indicators_response.text)\n for key in daily_dict:\n if key not in key_indicators_dict:\n key_indicators_dict[key] = daily_dict[key]\n key_indicators_dict['RUB'] = 1.0\n\n total_revenue = dict()\n for period in search_period:\n total_revenue[period] = 0\n for asset_name in app.bank:\n revenue = app.bank[asset_name].calculate_revenue(int(period))\n char_code = app.bank[asset_name].char_code\n if char_code not in key_indicators_dict:\n continue\n revenue *= key_indicators_dict[char_code]\n total_revenue[period] += revenue\n\n return jsonify(total_revenue)", "def monthly_gross_income(annual_salary):\n gross_income = annual_salary / 12\n return round_nearest_whole_dollar(gross_income)", "def compute_avg_weekly_price_to_csv(self):\n c = self.connection.cursor()\n self.print_datetime_output('Group time series by week and compute mean price')\n query = \"SELECT min((strftime('%Y%m%d', timestamp)/7*7 - 19000101) + 19000106) AS start_day, timestamp, \" \\\n \"avg(close_USD) FROM \" + self.db_table + \" GROUP BY (strftime('%Y%m%d', timestamp) - 19000106)/7\"\n c.execute(query)\n avg_close_by_week_df = pd.DataFrame({x[0]: x[1:] for x in c.fetchall()})\n self.print_datetime_output('Store data frame to file \\'%s\\'' % self.avg_price_file_name)\n avg_close_by_week_df.to_csv('%s/%s' % (self.dir_path, self.avg_price_file_name))", "def get_NextMonthsBalance(self):\n balance = (self.principal * math.exp(self.interestRate * (1/12))) - self.actualMonthlyPayment\n if balance <= 0:\n return 0\n return balance", "def get_yearly_avg(all_stock_data):\n try:\n yearly_stock_data = {}\n for data in all_stock_data:\n year = data[0][0:4]\n if year not in yearly_stock_data:\n yearly_stock_data[year] = []\n yearly_stock_data[year].append(data)\n yearly_avg_list = []\n for year, stock_data in yearly_stock_data.items():\n yearly_avg_list.append((year, get_avg(stock_data)))\n return yearly_avg_list\n\n except Exception as e:\n print(e)\n exit()", "def getAvgMarketCosts(self):\n try:\n avgAL = self.frame.mode.game.marketStats[str(self.frame.mode.game.currentRound-1)]['avgSoldAL']\n avgEC = self.frame.mode.game.marketStats[str(self.frame.mode.game.currentRound-1)]['avgSoldEC']\n avgIA = self.frame.mode.game.marketStats[str(self.frame.mode.game.currentRound-1)]['avgSoldIA']\n except:\n avgAL = 0.0\n avgEC = 0.0\n avgIA = 0.0\n \n return (avgAL, avgEC, avgIA)", "def rate(self):\n return self._rate", "def PercentMaxRate(self):\n\t\treturn self._get_attribute('percentMaxRate')", "def currency_rate(self, init):\r\n\r\n curr = CurrencyRates()\r\n curr_rate = curr.get_rates(init)\r\n return curr_rate", "def update_attendance_rate(self):\n session_avg_rate = self.session_set\\\n .filter(attendance_rate__isnull=False)\\\n .aggregate(Avg('attendance_rate'))\n self.attendance_rate = session_avg_rate['attendance_rate__avg']\n self.save()", "def average_revenue():\n graph = pygal.SolidGauge(inner_radius=0.70)\n usd_formatter = lambda x: '{:.10g}‎M$'.format(x)\n graph.value_formatter = usd_formatter\n graph.title = \"Average Revenue of Movies per year\"\n\n for year in range(2000, 2017):\n print(\">> Year : %i\" % year)\n\n # Start display\n print(\">> [status] Create Graph Starting!\")\n\n dataset = pd.read_csv(\"Top-100_Export/Top-100_%i.csv\" % (year))\n revenue = dataset[\"revenue\"].tolist() #Revenue\n temp = []\n for i in revenue:\n if i != 0:\n temp.append(i)\n average = ((((sum(temp)/len(temp)))/1000000//0.01)/100)\n graph.add(str(year), [{'value': average, 'max_value': 250}])\n\n # End display\n print(\">> [status] Created Graph Successful!\")\n\n graph.render_to_file(\"Graph_Export/Average_Revenue_of_Movies.svg\")\n\n # Used time\n print(\">> [status] Completed : Used time = %s seconds\" % (time.time() - start_time))", "def _get_btc_gbp_15min(self) -> None:\n self._get_eur_gbp_last_daily()\n\n self.btc_gbp_15min = self.btc_eur_15min * self.eur_gbp_last_day", "def Close_Average(db):\r\n \r\n average = 0\r\n n = 0\r\n \r\n for document in db.Transaction.find():\r\n average = average + float(document.get(\"Close\"))\r\n n = n + 1\r\n \r\n return average / n", "def test_read_cloud_fraction_monthly_avg(self, meteo):\n meteo.config.climate.meteo.cloud_fraction_mapping = {\n 'Fog': [\n 9.6210045662100452, 9.3069767441860467, 9.5945945945945947,\n 9.5, 9.931034482758621, 10.0, 9.7777777777777786,\n 9.6999999999999993, 7.8518518518518521, 8.9701492537313428,\n 9.2686980609418281, 9.0742358078602621]\n }\n record = Mock(name='record')\n record.find().text = 'Fog'\n\n def mock_timestamp_data(part):\n parts = {'year': 2012, 'month': 4, 'day': 1, 'hour': 12}\n return parts[part]\n record.get = mock_timestamp_data\n cloud_faction = meteo.read_cloud_fraction(record)\n assert cloud_faction == 9.5", "def mean_variance_analysis(df):\n rets = np.log(df['close']/df['close'].shift(1))\n\n std = rets.std()* 252\n\n annualized_returns = rets.mean() * 252\n\n print(f'The annualized returns of the stock is {annualized_returns}, and the standard deviation of the stock is {std}')", "def calculate_revenue(self, period: int, currency_rates: dict):\n total_revenue = sum(\n asset.calculate_revenue(period, currency_rates[asset.char_code])\n for asset in self.asset_collection\n )\n return total_revenue", "def get_max(self):\r\n df = pd.read_csv(\"MonthlyRate.csv\")\r\n df = df[df.CurrencyCode == self.choice]\r\n maximum = df.max(axis=1).values[0]\r\n # Round the value to 4 d.p.\r\n maximum = round(float(maximum), 4)\r\n return maximum", "def query_last_price(market_data):\n print(\"Consultando el último precio\")\n if market_data[\"marketData\"][\"LA\"]:\n last_price = market_data[\"marketData\"][\"LA\"][\"price\"]\n print(\n f\"Último precio operado: ${last_price:,.2f}\".replace('.', ','))\n return last_price\n print(\"Último precio operado: No hay datos disponibles\")\n return None", "def _set_rate(self):\r\n interval = self.data.iloc[2, 0] - self.data.iloc[1, 0]\r\n self.rate = int(1 / interval)", "def five_years_avg_dividend(self) -> float:\n return self._five_years_avg_dividend", "def price_to_3_year_earnings_less_than_15(self):\n\n note = ''\n # check if 'EPS' exists\n if 'EPS' not in self.stock.main_df.columns:\n note = note + 'Could not find EPS on MacroTrends. '\n\n # check if Current price is not 0\n if self.stock.stats_dict['Current Price'] == 0:\n note = note + 'Could not find current price on MacroTrends. '\n\n if note != '':\n self.stock.append_calc_result('3 year P/E ratio < 15 ?', 'N/A', 'N/A', note)\n return\n\n curr_price = self.stock.stats_dict['Current Price']\n df = self.stock.main_df\n\n average = 0\n # i want to use 2020 if not empty and 2019 if 2020 is empty\n if not np.isnan(df.iloc[0]['EPS']):\n # current year is there\n past_3_years_df = df.iloc[0: 3]['EPS']\n average = past_3_years_df.mean()\n elif np.isnan(df.iloc[0]['EPS']):\n # current year is not there\n past_3_years_df = df.iloc[1: 4]['EPS']\n average = past_3_years_df.mean()\n if np.isnan(df.iloc[1]['EPS']):\n # past year is not there either\n past_7_years_df = df.iloc[2: 5]['EPS']\n average = past_7_years_df.mean()\n if np.isnan(df.iloc[2]['EPS']):\n self.stock.append_calc_result('7 year P/E ratio < 25 ?', 'N/A', 'N/A',\n 'Must not have filed their annual report for {}'.format(\n self.current_year - 2))\n return\n\n if average == 0:\n self.stock.append_calc_result('3 year P/E ratio < 15 ?', 'N/A', 'N/A',\n 'No average found')\n return\n elif (curr_price / average) <= 15:\n criteria_passed = 'Yes'\n else:\n criteria_passed = 'No'\n\n self.stock.append_calc_result('3 year P/E ratio < 15 ?', round((curr_price / average), 2),\n criteria_passed, '3 Year Average EPS = {}'.format(round(average, 2)))", "def read_stock_data(stock_name, stock_file_name):\n daily_stock = read_json_from_file(stock_file_name)\n monthly_averages.sort(key=operator.itemgetter(0),reverse=False) # sorts list in ascending order by formatted_date\n average_price_numerator = 0 # resets numerator value to 0 when previous month value has been calculated\n average_price_denominator = 0 # resets denominator value to 0 when previous month value has been calculated\n del monthly_averages[:] # monthly_averages list will be cleared to allow testing of different files\n comparison_month = \"\"\n for ele in daily_stock:\n current_month = ele.get(\"Date\")[0:7]\n if comparison_month == \"\": # determines whether the next dictionary is of the current month or previous\n comparison_month = current_month\n if current_month == comparison_month:\n average_price_numerator += (ele.get(\"Volume\") * ele.get(\"Close\"))\n average_price_denominator += (ele.get(\"Volume\"))\n else:\n monthly_average_price = average_price_numerator / average_price_denominator\n formatted_date = comparison_month.replace(\"-\",\"/\") # formats date to match format in tests\n monthly_averages.append((formatted_date, round(monthly_average_price,2)))\n comparison_month = current_month\n average_price_numerator = (ele.get(\"Volume\") * ele.get(\"Close\"))\n average_price_denominator = (ele.get(\"Volume\"))\n\n # final month calculation\n monthly_average_price = average_price_numerator / average_price_denominator\n formatted_date = comparison_month.replace(\"-\", \"/\")\n monthly_averages.append((formatted_date, round(monthly_average_price, 2)))\n\n return monthly_averages", "def total_scheme_revenue_rule(_m):\r\n\r\n return sum(m.YEAR_SCHEME_REVENUE[y] for y in m.Y)", "def get_final_return(self):\n values = self.stats['return_stats']['episode_totals']\n _, y, (_, _) = self._moving_average(values, window=_ROLLING_WINDOW,\n p=_CONFIDENCE_LEVEL)\n return y[-1]", "def get_sale_rate(self, pair):\n response = self.execute_http_call(\"/api/rate/{}\".format(pair), \"GET\")\n return float(response[\"rate\"])", "def data_rate(self):\n return self._data_rate", "def getLastAverage(self):\n lastAve=dict()\n lastAve['identifier']=self.lastWaveIdentifier\n lastAve['averageCalculated']=self.lastAverageCalculated \n lastAve['lastAverageArray']=self.lastAverageArray\n return lastAve", "def get_sma(self,period):\n #df=pandas.DataFrame()\n sma=self.close.rolling(period).mean()\n return sma", "def _get_user_avg(self, user):\n return calculate_user_numbers_avg(self._users_numbers, user)", "def avg(year):\r\n df = ouvrir_fichier()\r\n df = df.loc[df[\"year\"].isin([year])]\r\n df = df[(\r\n df[\r\n \"emissions\"\r\n ] == 'Emissions (thousand metric tons of carbon dioxide)'\r\n )]\r\n print(df)\r\n mean_value = df.mean()['value']\r\n resultat = {}\r\n resultat[\"year\"] = year\r\n resultat['total'] = float(mean_value)\r\n print(mean_value)\r\n return resultat", "def annual_dividend_rate(self) -> float:\n return self._annual_dividend_rate", "def fetch_and_store_latest_ecb_exrates():\n response = requests.get(DAILY_ECB_URL)\n # Raise exception if status_code != 200 or ConnectionError\n response.raise_for_status()\n info = ET.fromstring(response.content)[2][0]\n datestamp = datetime.strptime(info.attrib['time'], \"%Y-%m-%d\").date()\n rates = [x.attrib for x in info]\n\n exrates = []\n for item in rates:\n if item['currency'] in SUPPORTED_CURRENCIES:\n exrate, created = ExchangeRate.objects.update_or_create(\n datestamp=datestamp,\n currency=item['currency'],\n defaults={'rate': Decimal(item['rate'])}\n )\n exrates.append(exrate)\n print(exrate, \"NEW EXRATE!\" if created else \"<noupdate>\")\n\n return exrates" ]
[ "0.65970474", "0.6344092", "0.5897189", "0.5880332", "0.58152103", "0.5769175", "0.57214457", "0.5711045", "0.55768675", "0.5523535", "0.5501505", "0.5479372", "0.5457628", "0.54402447", "0.542044", "0.5388946", "0.5384516", "0.5383942", "0.53801686", "0.53687716", "0.53634995", "0.5359037", "0.5356137", "0.535052", "0.5333389", "0.53300786", "0.53068876", "0.5300441", "0.5293764", "0.52898085", "0.52797735", "0.5269131", "0.52651525", "0.52630436", "0.52607447", "0.52435833", "0.52403814", "0.523527", "0.5228901", "0.52263886", "0.52195865", "0.52176934", "0.5206023", "0.5204706", "0.5198164", "0.51962876", "0.5178598", "0.5176161", "0.51733315", "0.51697975", "0.5167214", "0.5166552", "0.5156727", "0.51562387", "0.51539296", "0.51381594", "0.5137363", "0.5124213", "0.5122692", "0.5116142", "0.5115336", "0.5112449", "0.5104633", "0.51033753", "0.50975764", "0.50908685", "0.50874895", "0.50870174", "0.5086933", "0.50807315", "0.50640506", "0.506157", "0.5057074", "0.5048382", "0.50431013", "0.50392485", "0.5030057", "0.50285393", "0.5020998", "0.501074", "0.5010118", "0.50095505", "0.50051695", "0.49978662", "0.49967664", "0.49924254", "0.4991226", "0.49901503", "0.4983116", "0.4982973", "0.49794984", "0.49756595", "0.49620488", "0.49473104", "0.49441707", "0.49410006", "0.49378267", "0.49369958", "0.49356464", "0.49343234" ]
0.7096632
0
Retrieve and store the latest daily EUR to GBP average rate.
def _get_eur_gbp_last_daily(self) -> None: data = _get_ecb_data(FREQUENCY_DAILY, _ten_days_ago(), _today()) self.eur_gbp_last_day = _get_latest_ecb_rate(data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bitcoinaverage(site):\n url = \"https://apiv2.bitcoinaverage.com/frontend/constants/exchangerates/local\"\n try:\n session = requests.Session()\n cfscrape_requests = cfscrape.create_scraper(sess=session)\n ret = cfscrape_requests.get(url, timeout=(15, 15)).json()[\"rates\"]\n data = {\"USD:\" + key: float(val[\"rate\"]) for key, val in ret.items()}\n data = refine_data(data)\n print(site, data)\n race_write(f\"{site}_forex.txt\", json_dumps(data))\n except:\n print(f\"{site} failed to load\")", "def fetch_and_store_latest_ecb_exrates():\n response = requests.get(DAILY_ECB_URL)\n # Raise exception if status_code != 200 or ConnectionError\n response.raise_for_status()\n info = ET.fromstring(response.content)[2][0]\n datestamp = datetime.strptime(info.attrib['time'], \"%Y-%m-%d\").date()\n rates = [x.attrib for x in info]\n\n exrates = []\n for item in rates:\n if item['currency'] in SUPPORTED_CURRENCIES:\n exrate, created = ExchangeRate.objects.update_or_create(\n datestamp=datestamp,\n currency=item['currency'],\n defaults={'rate': Decimal(item['rate'])}\n )\n exrates.append(exrate)\n print(exrate, \"NEW EXRATE!\" if created else \"<noupdate>\")\n\n return exrates", "def get_avg(self):\r\n df = pd.read_csv(\"MonthlyRate.csv\")\r\n df = df[df.CurrencyCode == self.choice]\r\n mean = df.mean(axis=1).values[0]\r\n # Round the value to 4 d.p.\r\n mean = round(float(mean), 4)\r\n return mean", "def test_get_historical_gold_rate(self):\n rates = [153.50, 162.49, 123.86, 155.10]\n helper.gold_loop_helper(get_historical_gold_rate, TestHistoricalRates.dates_rate, rates)", "def exchange_rate(self):\n res = r.get(self.url + self.current_rate)\n return self.execute(res)", "def _get_eur_gbp_last_month(self) -> None:\n last_month = _last_month()\n data = _get_ecb_data(FREQUENCY_MONTHLY, last_month, last_month)\n\n self.eur_gbp_last_month = _get_latest_ecb_rate(data)", "def market_avg_price(**params):\n endpoint = 'calc/trade/avg'\n return request(authenticate=False, version=2, endpoint=endpoint, method='POST', query_params=params)", "def get_current_rate(self):\n pass", "def interval_average():\r\n import statistics as st\r\n from tach_detect import tach_detect\r\n r = request.get_json()\r\n try:\r\n email = r[\"user_email\"]\r\n except KeyError:\r\n return jsonify(\"no email input\"), 400\r\n raise LookupError(\"no email input\")\r\n check_email = Check_For_User(email)\r\n if check_email.user_exists is False:\r\n return jsonify(str(email) + \" was not found. Please re-enter\"), 400\r\n raise LookupError(str(user_email) + \" was not found. Please re-enter\")\r\n try:\r\n input_date_time = r[\"date_time\"]\r\n except KeyError:\r\n return jsonify(\"no date entered\"), 400\r\n raise LookupError(\"no date entered\")\r\n try:\r\n validate_date_time(input_date_time)\r\n except (ValueError, TypeError) as error:\r\n return jsonify(\"date entered is invalid. Please re-type.\"), 400\r\n date_time = datetime.datetime(input_date_time[0], input_date_time[1],\r\n input_date_time[2], input_date_time[3],\r\n input_date_time[4], input_date_time[5],\r\n input_date_time[6])\r\n time_list = get_all_times(email)\r\n heart_rate_list = get_all_rates(email)\r\n interval_list = find_first_date(date_time, time_list, heart_rate_list)\r\n try:\r\n interval_average_post = st.mean(interval_list)\r\n user = models.User.objects.raw({\"_id\": email}).first()\r\n curr_age = user.age\r\n tach_test = tach_detect(curr_age, interval_average_post)\r\n return_dict = {\r\n \"user_email\": email,\r\n \"heart_rate_average_since\": str(date_time),\r\n \"heart_rate_average\": interval_average_post,\r\n \"is_heart rate_tachycardic\": str(tach_test)\r\n }\r\n except st.StatisticsError:\r\n interval_average_post = heart_rate_list[len(heart_rate_list)-1]\r\n user = models.User.objects.raw({\"_id\": email}).first()\r\n curr_age = user.age\r\n tach_test = tach_detect(curr_age, interval_average_post)\r\n return_dict = {\r\n \"user_email\": email,\r\n \"heart_rate_average_since\": str(date_time),\r\n \"heart_rate_average\": interval_average_post,\r\n \"is_heart rate_tachycardic\": str(tach_test)\r\n }\r\n return jsonify(return_dict), 200", "def getAvg(self):\r\n\t\treturn self.data['avg']", "def rate(self):\n return self.brate / FAC", "def get_updated_currency(self, currency_array, main_currency,\n max_delta_days):\n url = 'http://rate.bot.com.tw/xrt/flcsv/0/day'\n\n # We do not want to update the main currency\n if main_currency in currency_array:\n currency_array.remove(main_currency)\n _logger.debug(\"BOT currency rate service : connecting...\")\n try:\n url_open = urllib.request.urlopen(url)\n csvfile = csv.reader(io.StringIO(url_open.read().decode('utf-8-sig')), delimiter=',')\n url_open.close()\n except IOError:\n raise UserError(\n _('Web Service does not exist (%s)!') % url)\n\n next(csvfile)\n exchange = {}\n for row in csvfile:\n bid = float(row[3])\n ask = float(row[13])\n\n exchange[row[0]] = {\n 'bid': bid,\n 'ask': ask\n }\n\n self.check_rate_date(datetime.today(), max_delta_days)\n self.supported_currency_array = list(exchange.keys())\n\n self.supported_currency_array.append('TWD')\n _logger.debug(\"Supported currencies = %s \" %\n self.supported_currency_array)\n self.validate_cur(main_currency)\n if main_currency != 'TWD':\n main_rate = float(exchange[main_currency]['ask'])\n if main_currency in currency_array:\n currency_array.remove(main_currency)\n for curr in currency_array:\n self.validate_cur(curr)\n if curr == 'TWD':\n rate = main_rate\n else:\n if main_currency == 'TWD':\n rate = 1 / float(exchange[curr]['ask'])\n else:\n rate = main_rate / float(exchange[curr]['ask'])\n self.updated_currency[curr] = rate\n _logger.debug(\n \"Rate retrieved : 1 %s = %s %s\" % (main_currency, rate, curr)\n )\n return self.updated_currency, self.log_info", "def get_latest(self):\n url = f\"{self.get_api_url()}+latest\"\n # set api parameters\n params = {}\n params.update({'base': self.base_currency})\n params.update({'symbols': ','.join(self.target_currency_codes)})\n # call the api for rates\n response = requests.get(url, params=params)\n if response.status_code == 200:\n base, rates = response.json().get('base'), response.json().get('rates')\n # remove base currency from rates if it is returned by the data source\n rates.pop(self.base_currency, None)\n return base, rates\n return None, None", "def get_avg(all_stock_data):\n try:\n sum_close_vol = 0.0\n sum_vol = 0.0\n for item in all_stock_data:\n adj_close = item[1]\n volume = item[2]\n sum_close_vol += adj_close * volume\n sum_vol += item[2]\n return sum_close_vol / sum_vol\n\n except Exception as e:\n print(e)\n exit()", "def averagePrice(self, onlyUnconsumed):\n\n\t\tif onlyUnconsumed:\n\t\t\treturn self.unconsumedValue / (len(self.bottles) - self.numberConsumed)\n\n\t\treturn self.totalValue / len(self.bottles)", "def update_attendance_rate(self):\n session_avg_rate = self.session_set\\\n .filter(attendance_rate__isnull=False)\\\n .aggregate(Avg('attendance_rate'))\n self.attendance_rate = session_avg_rate['attendance_rate__avg']\n self.save()", "def get_latest_average(fsym, tsym, markets='all', try_conversion=True, \n format='raw'):\n\t\n\t# build url \n\turl = build_url('generateAvg', fsym=fsym, tsym=tsym, markets=markets,\n\t try_conversion=try_conversion)\n\n\t# http request\n\tr = requests.get(url)\n\n\t# decode to json\n\tdata = r.json()\n\n\tif format == 'raw':\n\t\tdata = data['RAW']\n\telif format == 'display':\n\t\tdata = data['DISPLAY']\n\n\treturn data", "def ADK_Rate_Avg(Uion,Z,E):\n\treturn Cycle_Averaging_Factor(Uion,E)*ADK_Rate(Uion,Z,E)", "def get_days_rate():\n db = psycopg2.connect(database=DBNAME)\n c = db.cursor()\n query_days_rate = \"\"\"\n SELECT * FROM (SELECT TO_CHAR(time::date,'Mon DD, YYYY') AS date,\n ROUND((COUNT(status) FILTER (\n WHERE status='404 NOT FOUND'))*100/COUNT(status)::decimal, 2)::text\n ||'% errors' AS rate\n FROM log\n GROUP BY time::date) AS error_rate\n WHERE rate::text > 1::text;\"\"\"\n c.execute(query_days_rate)\n rates = from_db_cursor(c)\n db.close()\n return rates", "def update(self):\n self.rate = self.exchange.latest()", "def get_patient_average():\n r = requests.get(\"http://vcm-7474.vm.duke.edu:5000/api/heart_rate/average/2\")\n print(r.text)", "def getData(self):\n\n url = 'https://www.ecb.europa.eu/stats/eurofxref/eurofxref-hist.zip'\n try:\n file, _ = urlretrieve(url)\n zip_file_object = zipfile.ZipFile(file, 'r')\n first_file = zip_file_object.namelist()[0]\n file = zip_file_object.open(first_file)\n\n file_handler = []\n for row in file:\n file_handler.append(row.decode())\n\n # getting the currency headers into header_list\n header_list = []\n notFound = True\n x = 0\n while notFound:\n if file_handler[x].startswith('Date'):\n header = file_handler[x].split(',')\n for col in header:\n header_list.append(col.strip())\n notFound = False\n x += 1\n self.currencies = list(filter(None, header_list))\n self.currencies.append('EUR')\n self.currencies = self.currencies[1:] # Removing the \"Date\" entry\n\n data = []\n for row in file_handler[x:]:\n if row.startswith('`\\n'):\n break\n else:\n data.append(list(filter(None, [x.replace('\\n', '') for x in row.split(',')]))) # Removing any empty extra columns at the end of each rows\n\n # filling my self.rates with the currency in the format {CURR: {date: rate, ...}, ...}\n for row in data:\n for i in range(len(self.currencies)):\n try:\n if self.currencies[i] not in self.rates:\n self.rates[self.currencies[i]] = {row[0]: row[i + 1]}\n else:\n self.rates[self.currencies[i]].update({row[0]: row[i + 1]})\n except IndexError:\n # We reached the EUR section\n if self.currencies[i] not in self.rates:\n self.rates[self.currencies[i]] = {row[0]: '1.0000'}\n else:\n self.rates[self.currencies[i]].update({row[0]: '1.0000'})\n\n self.currencies.sort()\n\n except Exception as e:\n print('Failed to process the data')\n print(e)\n finally:\n file.close()", "def save(self, *args, **kwargs):\n self.item.rates_total += 1\n self.item.average_rate += (self.item.average_rate + self.rate) / self.item.rates_total\n self.item.save()\n super(Rate, self).save(*args, **kwargs)", "def _get_latest_ecb_rate(data: bytes) -> float:\n root = etree.fromstring(data)\n values = root.xpath('.//generic:ObsValue/@value', namespaces=root.nsmap)\n last_value = len(values) - 1\n\n return float(values[last_value])", "def calculateDataRate(self):\n pass", "def compute_rate(self):\n bg_rate = self.counts.data / self.livetime.data\n\n bg_rate /= self.counts.bin_volume\n\n bg_rate = bg_rate.to('MeV-1 sr-1 s-1')\n\n self.bg_rate.data = bg_rate\n self.bg_rate.data_err = (np.sqrt(self.counts.data) / (self.counts.bin_volume * self.livetime.data)).to(\n 'MeV-1 sr-1 s-1')", "def data_rate(self):\n return self._data_rate", "def rate(self):\n return self._rate", "def get_average_survival(self):\n return np.mean(self.survival_rates)", "def rate(self):\n return self.__rate", "def mean_rate(df):\n mean_of_rate = dict(df.mean())\n return mean_of_rate", "def api_call(cls, currency):\n headers = {\"x-accept-version\": \"2.0.0\", \"Accept\": \"application/json\"}\n r = requests.get(cls.API_URL + currency, headers=headers)\n r.raise_for_status()\n return r.json()[\"data\"][\"rate\"]", "def get_rate(currency, date):\n status = 400\n while status != 200:\n url = (\"http://api.nbp.pl/api/exchangerates/rates/A/%s/%d-%02d-%02d?format=json\" %\n (currency, date.year, date.month, date.day))\n\n response = requests.get(url)\n status = response.status_code\n if status != 200:\n date = date - datetime.timedelta(1)\n\n tree = json.loads(response.content)\n assert len(tree['rates']) == 1\n print_rate_info(tree['rates'])\n return (tree['rates'][0]['mid'], date)", "def get_realtime_exchange_rate(from_currency, to_currency) :\n\turl = f\"{BASE_URL}function={settings.CURRENCY_EXCHANGE_RATE}&from_currency={from_currency}&to_currency={to_currency}&apikey={API_KEY}\" \n\trequest = requests.get(url)\n\tresult = request.json()\n\treturn result[PREFIX][EXCHANGE_RATE], result[PREFIX][DATE]", "def get_rate(self, t):\n return self.rates[bisect.bisect(self.change_times, t) - 1]", "def get_value(\n self\n ) -> float:\n\n return self.average", "def getDataRate(self):\n \n return self.DataRate", "def get_day_average(fsym, tsym, e='all', try_conversion=True, \n avgType='HourVWAP', UTCHourDiff=0):\n\n\t# build url\n\turl = build_url('dayAvg', fsym=fsym, tsym=tsym, e=e, \n\t try_conversion=try_conversion, avgType=avgType, \n\t UTCHourDiff=UTCHourDiff)\n\n\t# http request\n\tr = requests.get(url)\n\n\t# decode to json\n\tdata = r.json()\n\n\t# remove 'ConversionType' information\n\t#del data['ConversionType']\n\t\n\treturn {fsym: data}", "def Close_Average(db):\r\n \r\n average = 0\r\n n = 0\r\n \r\n for document in db.Transaction.find():\r\n average = average + float(document.get(\"Close\"))\r\n n = n + 1\r\n \r\n return average / n", "def genMarketStat(self):\n myMarketStat = marketstat.MarketStat({'id':str(self.currentRound)})\n self.marketStats[str(self.currentRound)] = myMarketStat\n # set avg price to last rounds market avg price\n if self.currentRound > 1:\n lastMarketStat = self.marketStats[str(self.currentRound-1)]\n myMarketStat.avgSoldAL = lastMarketStat.avgSoldAL\n myMarketStat.avgSoldEC = lastMarketStat.avgSoldEC\n myMarketStat.avgSoldIA = lastMarketStat.avgSoldIA", "def stock_average(stock):\n closing_price=stock['Close']\n average=stats.mean(closing_price)\n return average", "def LoadRateValue(self):\n\t\treturn self._get_attribute('loadRateValue')", "def get_meanrates(self):\n return np.asarray([ n.meanrate for n in self.alln.values() ])", "def calc_av_daily_return(self):\n av_return = 0.0\n total_ret = sum(self._returns)\n num_averages = len(self._returns)\n \n if num_averages > 0:\n av_return = total_ret/float(num_averages)\n \n self._av_daily_return = av_return\n return av_return", "def get_average_by_email_since():\n r = request.get_json() # parses the POST request body as JSON\n user_email = r[\"user_email\"]\n heart_rate_average_since = r[\"heart_rate_average_since\"] #\"2018-03-09 11:00:36.372339\" // date string\n since = datetime.strptime(heart_rate_average_since, \"%Y-%m-%d %H:%M:%S.%f\")\n\n # TODO determine tachycardia for user_age\n\n user = heart_rate_databases_starter.models.User.objects.raw({\"user_email\":user_email, \"time\": {'$gte': since}}).first()\n avg = helper_get_users_average_heart_rate(user)\n if avg is not none:\n return jsonify({\"average\": avg}), 200\n else:\n return jsonify({\"error\": \"no entries\"}), 400", "def calc_meanrates(self):\n TRACKNEURONPERIOD = get_ipython().user_ns['TRACKNEURONPERIOD']\n if TRACKNEURONPERIOD == 'track':\n # calc tn.meanrate using entire track duration:\n for tn in self.alln.values():\n tn.meanrate = tn.nspikes / self.dtsec\n elif TRACKNEURONPERIOD == 'trange':\n # calc tn.meanrate using duration between its first and last spike:\n for tn in self.alln.values():\n if tn.dtsec == 0:\n tn.meanrate = 0.0\n else:\n tn.meanrate = tn.nspikes / tn.dtsec\n else:\n raise ValueError(\"invalid value for TRACKNEURONPERIOD: %r\" % TRACKNEURONPERIOD)", "def ingest_rates():\n api_key = app.config[\"FIXER_KEY\"]\n symbols = app.config[\"CURRENCIES\"]\n\n query_string = f\"?access_key={api_key}&symbols={symbols}\"\n\n date_today = datetime.utcnow().date()\n\n rates = []\n for i in range(1, 11):\n # We need to run through the daily endpoints one by one\n try:\n date = date_today - timedelta(days=i)\n endpoint = f\"{app.config['FIXER_ENDPOINT']}/{date.isoformat()}{query_string}\"\n res = requests.get(endpoint)\n res.raise_for_status()\n data = res.json()\n\n # Only write new rates if they don't already exist\n for currency, rate in data[\"rates\"].items():\n if (\n db.session.query(Rate)\n .filter(Rate.currency == currency)\n .filter(Rate.date == date)\n .first()\n is None\n ):\n db.session.add(Rate(currency=currency, date=date, rate=rate))\n\n # If the request for a single day fails, continue with the rest of the days\n except requests.exceptions.HTTPError:\n print(f\"Request for date {date} failed, moving on\")\n continue\n\n db.session.commit()", "def get_pressure(self):\n\n\t\tvoltage_pressure = self.pressure_sensor.getVoltage()\n\t\tnew_value = (250 * voltage_pressure / 5) - 25\n\n\t\tself._pressure_samples.append(new_value)\n\n\t\tif not self.pressure_timer.hasPeriodPassed(self.pressure_timer_delay):\n\t\t\treturn self._last_pressure_value\n\n\t\t# Calculate new running average\n\t\tnew_avg = sum(self._pressure_samples) / len(self._pressure_samples)\n\n\t\tself._pressure_samples = [ ]\n\t\tself._last_pressure_value = new_avg\n\n\t\treturn new_avg", "def URDBv7_to_ElectricityRates(urdb_response):\n warnings.warn(\"ResourceTools.URDBv7_to_ElectricityRates is deprecated. Please use UtilityRateTools.URDBv8_to_ElectricityRates instead.\", DeprecationWarning)\n\n urdb_data = dict()\n urdb_data['en_electricity_rates'] = 1\n\n def try_get_schedule(urdb_name, data_name):\n if urdb_name in urdb_response.keys():\n urdb_data[data_name] = urdb_response[urdb_name]\n for i in range(12):\n for j in range(24):\n urdb_data[data_name][i][j] += 1\n\n def try_get_rate_structure(urdb_name, data_name):\n mat = []\n supported_units = {\n \"kwh\" : 0,\n \"kwh/kw\" : 1,\n \"kwh daily\" : 2,\n \"kwh/kw daily\" : 3\n }\n if urdb_name in urdb_response.keys():\n structure = urdb_response[urdb_name]\n for i, period in enumerate(structure):\n for j, entry in enumerate(period):\n rate = entry['rate']\n if 'adj' in entry.keys():\n rate += entry['adj']\n tier_max = 1e38\n if 'max' in entry.keys():\n tier_max = entry['max']\n sell = 0\n if 'sell' in entry.keys():\n sell = entry['sell']\n units = 0\n if 'unit' in entry.keys():\n try:\n units = supported_units[entry['unit'].lower()]\n except KeyError:\n raise RuntimeError(\"UtilityRateDatabase error: unrecognized unit in rate structure\")\n mat.append((i + 1, j + 1, tier_max, units, rate, sell))\n urdb_data[data_name] = mat\n\n def try_get_demand_structure(urdb_name, data_name):\n mat = []\n if urdb_name in urdb_response.keys():\n structure = urdb_response[urdb_name]\n for i, period in enumerate(structure):\n for j, entry in enumerate(period):\n rate = entry['rate']\n if 'adj' in entry.keys():\n rate += entry['adj']\n tier_max = 1e38\n if 'max' in entry.keys():\n tier_max = entry['max']\n if 'unit' in entry.keys():\n if entry['unit'].lower() != \"kW\".lower():\n raise RuntimeError(\"UtilityRateDatabase error: unrecognized unit in rate structure\")\n mat.append((i + 1, j + 1, tier_max, rate))\n if data_name:\n urdb_data[data_name] = mat\n else:\n return mat\n\n if \"dgrules\" in urdb_response.keys():\n rules = urdb_response['dgrules'] # dgrules\n if rules == \"Net Metering\":\n urdb_data['ur_metering_option'] = 0\n elif rules == \"Net Billing Instantaneous\":\n urdb_data['ur_metering_option'] = 2\n elif rules == \"Net Billing Hourly\":\n urdb_data['ur_metering_option'] = 3\n elif rules == \"Buy All Sell All\":\n urdb_data['ur_metering_option'] = 4\n else:\n # if no metering option provided, assume Net Metering\n urdb_data['ur_metering_option'] = 0\n\n if 'fixedchargefirstmeter' in urdb_response.keys() and 'fixedchargeunits' in urdb_response.keys():\n fixed_charge = urdb_response['fixedchargefirstmeter']\n fixed_charge_units = urdb_response['fixedchargeunits']\n if fixed_charge_units == \"$/day\":\n fixed_charge *= 365 / 12\n elif fixed_charge_units == \"$/year\":\n fixed_charge /= 12\n urdb_data['ur_monthly_fixed_charge'] = fixed_charge\n\n if 'mincharge' in urdb_response.keys():\n min_charge = urdb_response['mincharge']\n min_charge_units = urdb_response['minchargeunits']\n if min_charge_units == \"$/year\":\n urdb_data['ur_annual_min_charge'] = min_charge\n else:\n if min_charge_units == \"$/day\":\n min_charge *= 365 / 12\n urdb_data['ur_monthly_min_charge'] = min_charge\n\n try_get_schedule('energyweekdayschedule', 'ur_ec_sched_weekday')\n try_get_schedule('energyweekendschedule', 'ur_ec_sched_weekend')\n try_get_rate_structure('energyratestructure', 'ur_ec_tou_mat')\n\n try_get_demand_structure('demandratestructure', 'ur_dc_tou_mat')\n try_get_schedule('demandweekdayschedule', 'ur_dc_sched_weekday')\n try_get_schedule('demandweekendschedule', 'ur_dc_sched_weekend')\n\n flat_demand_structure = try_get_demand_structure('flatdemandstructure', None)\n\n if 'flatdemandmonths' in urdb_response.keys():\n urdb_data['ur_dc_enable'] = 1\n flat_mat = []\n flat_demand = urdb_response['flatdemandmonths']\n for month, period in enumerate(flat_demand):\n tiers = []\n for r in flat_demand_structure:\n if r[0] == int(period + 1):\n tiers.append(r)\n \n if len(tiers) == 0:\n raise ValueError(\"flatdemandstructure missing period number \", period)\n for t in tiers:\n month_row = []\n month_row.append(month)\n month_row += [t[i] for i in (1, 2, 3)]\n flat_mat.append(month_row)\n urdb_data['ur_dc_flat_mat'] = flat_mat\n # Fill out an empty flat rate structure if the rate has TOU demand but not flat demand \n elif \"demandratestructure\" in urdb_response.keys():\n urdb_data['ur_dc_enable'] = 1\n # Enumerate a dc_flat table with $0/kW in 12 months\n flat_mat = []\n for i in range(0, 12):\n month_mat = [i, 1, 1e38, 0]\n flat_mat.append(month_mat)\n urdb_data['ur_dc_flat_mat'] = flat_mat\n else:\n urdb_data['ur_dc_enable'] = 0\n\n if urdb_data['ur_dc_enable'] == 1 and \"ur_dc_tou_mat\" not in urdb_data.keys():\n urdb_data['ur_dc_tou_mat'] = [[1, 1, 1e38, 0], ]\n urdb_data['ur_dc_sched_weekday'] = [[1] * 24 for i in range(12)]\n urdb_data['ur_dc_sched_weekend'] = urdb_data['ur_dc_sched_weekday']\n\n return urdb_data", "def avg_tx_value_USD(df):\n\n tx_vol_USD = df['Tx Volume (USD)']\n daily_txs = df['Txs']\n result = tx_vol_USD.div(daily_txs)\n result.name = 'Avg Tx Value (USD)'\n return out(SETTINGS, df, result)", "def get_sale_rate(self, pair):\n response = self.execute_http_call(\"/api/rate/{}\".format(pair), \"GET\")\n return float(response[\"rate\"])", "def all_average(user_email):\r\n import statistics as st\r\n import json\r\n check_email = Check_For_User(user_email)\r\n if check_email.user_exists is False:\r\n return jsonify(str(user_email) + \" not found\"), 400\r\n raise LookupError(str(user_email) + \" was not found. Please re-enter\")\r\n heart_rate_list = get_all_rates(user_email)\r\n all_average = st.mean(heart_rate_list)\r\n return_dict = {\r\n \"user\": user_email,\r\n \"average\": all_average\r\n }\r\n return jsonify(return_dict), 200", "def get_average_rating(self):\n queryset = ArticleRating.objects.filter(article_id=self.get_object())\n return queryset.aggregate(Avg('rate')).get(\"rate__avg\")", "def getprice():\n\n print(\"Get price\")\n latest_price = get_latest_price(item_code)\n return latest_price", "def get_rates(self):\n rates = np.empty(len(self.periods))\n for index, element in enumerate(self.periods):\n rates[index] = self.periods[element]['price']\n return(pd.Series(rates, self.periods.keys()))", "def compute_avg_weekly_price_to_csv(self):\n c = self.connection.cursor()\n self.print_datetime_output('Group time series by week and compute mean price')\n query = \"SELECT min((strftime('%Y%m%d', timestamp)/7*7 - 19000101) + 19000106) AS start_day, timestamp, \" \\\n \"avg(close_USD) FROM \" + self.db_table + \" GROUP BY (strftime('%Y%m%d', timestamp) - 19000106)/7\"\n c.execute(query)\n avg_close_by_week_df = pd.DataFrame({x[0]: x[1:] for x in c.fetchall()})\n self.print_datetime_output('Store data frame to file \\'%s\\'' % self.avg_price_file_name)\n avg_close_by_week_df.to_csv('%s/%s' % (self.dir_path, self.avg_price_file_name))", "def get_avg_price(cls, instrument: Instrument) -> float:\n try:\n avg_price = cls.objects.filter(\n instrument=instrument,\n # status=OrderStatus.COMPLETED.value\n ).annotate(price_t_volume=models.F('price') *\n models.F('total_sum')).aggregate(\n avg_price=models.Sum('price_t_volume') /\n models.Sum('total_sum'))\n except DataError: # handle division by zero\n return 0\n return float(avg_price.get('avg_price', 0) or 0)", "def getRate(self, context):\n try:\n return VTypeHelper.toDouble(context.getDevice(\"rate\").read())\n except:\n return 60.0", "def aveVolumeCalc(ins, date):\n cal = ins.Currency().Calendar()\n enddate = cal.AdjustBankingDays(date, 0)\n startdate = cal.AdjustBankingDays(date, AVERAGING_PERIOD)\n\n prices=[]\n histprices = acm.FPrice.Select(\"instrument = %s and market = '%s' \\\n and day > '%s' and day <='%s'\" % \n (ins.Oid(), DAILY_MARKET, startdate, enddate))\n \n for price in histprices:\n settle = price.Settle()\n if settle >= 0:\n prices.append(settle)\n \n #upgrade 2013 fix for failure during run - acm.Math().AverageOf seems buggy\n try:\n avgprice = (sum(prices)/len(prices))\n except ZeroDivisionError:\n avgprice = 0\n \n #avgprice = acm.Math().AverageOf(prices, None)\n \n #Overwrite today's price if you find it \n newPrice = acm.FPrice.Select01(\"instrument = %s and market = '%s' and day = %s\" % \n (ins.Oid(), THREE_MONTH_MARKET, enddate),\n 'NaN')\n if not newPrice:\n newPrice = acm.FPrice()\n newPrice.Instrument(ins)\n newPrice.Day(enddate)\n newPrice.Market(THREE_MONTH_MARKET)\n newPrice.Currency(ins.Currency())\n\n newPrice.Settle(avgprice)\n try:\n newPrice.Commit()\n print 'INFO: %s price for %s was created on %s' %(THREE_MONTH_MARKET, ins.Name(), date)\n except Exception, err:\n print 'ERROR: %s price for %s did not commit: %s' %(THREE_MONTH_MARKET, ins.Name(), str(err))\n \n return newPrice", "def getClosingPrice(self):\t\n\t\treturn self.dataPoints[-1].getDate(), self.dataPoints[-1].getAdjustedValue()", "def get_exchange_rate_data(self, source_currency, exchanged_currency, valuation_date):\n raise NotImplementedError", "def get_placed_assets_rate(cls, instrument: Instrument) -> float:\n bank_balance = InstrumentBalance.objects.filter(\n user__email__contains='bank',\n instrument=instrument).order_by('user__created_at_dt').last()\n if bank_balance:\n return float(bank_balance.amount)\n return 0", "def daily_avg(self, run_id):\n time_series = self.get_data(run_id=run_id,\n metric_ids=['00003', '00060', '00001'])\n if len(time_series) == 0:\n return None\n\n precip = time_series[time_series.metric_id == '00003']\n precip['date_time'] = pd.to_datetime(precip['date_time'], utc=True)\n precip.index = precip['date_time']\n precip_daily = precip.resample('D').sum()\n\n flow = time_series[time_series.metric_id == '00060']\n flow['date_time'] = pd.to_datetime(flow['date_time'], utc=True)\n flow.index = flow['date_time']\n flow_daily = flow.resample('D').mean()\n\n temp = time_series[time_series.metric_id == '00001']\n temp['date_time'] = pd.to_datetime(temp['date_time'], utc=True)\n temp.index = temp['date_time']\n temp_daily = temp.resample('D').mean()\n\n time_series_daily = temp_daily\\\n .merge(flow_daily,\n how='inner',\n left_index=True,\n right_index=True) \\\n .merge(precip_daily,\n how='inner',\n left_index=True,\n right_index=True)\n time_series_daily.columns = ['temp', 'flow', 'precip']\n time_series_daily = time_series_daily.dropna()\n return time_series_daily", "def sample_rate(self):\n\n properties_file = open(self.scenario_path + \"/conf/sandag_abm.properties\", \"r\")\n rate = None\n\n for line in properties_file:\n # strip all white space from the line\n line = line.replace(\" \", \"\")\n\n # find line containing \"sample_rates=\"\n m = re.compile(\"sample_rates=\").match(line)\n if m:\n # take the portion of the line after the matching string\n # and split by the comma character\n line = line[m.end():].split(\",\")\n\n # if the split line contains a single element return that element\n # otherwise return the final element\n if len(line) == 0:\n rate = float(line[0])\n else:\n rate = float(line[-1])\n break\n\n properties_file.close()\n\n return rate", "def get_rates(src, dst):\n if not dst:\n dst = ''\n\n request = '{}{}'.format(api_url, rates_query.replace(':?', '\"{}{}\"'.format(src, dst)))\n\n response = web.get(urllib.quote(request, ':/?&=*'))\n\n response.raise_for_status()\n\n rates = response.json()\n\n rate_resp = rates['query']['results']['rate']\n\n if rate_resp['Rate'] == 'N/A':\n return -1\n\n return Decimal(rate_resp['Rate'])", "def latest(self, base='USD'):\n try:\n resp = self.client.get(self.ENDPOINT_LATEST, params={'base': base})\n resp.raise_for_status()\n except requests.exceptions.RequestException as e:\n raise OpenExchangeRatesClientException(e)\n return resp.json(parse_int=decimal.Decimal,\n parse_float=decimal.Decimal)", "def test_currency_rate(self):\n currency_name = ['USD'] * 4\n rates = [3.67, 4.07, 3.04, 3.89]\n helper.currency_loop_helper(get_historical_currency_rate, TestHistoricalRates.dates_rate,\n rates, currency_name)", "def get_current_price(self):\n URL = config.coin['price_hist_url'] + self.ticker.lower()\n try:\n r = requests.get(URL)\n data = json.loads(r.text)\n value = data['last']\n timestamp = data['timestamp']\n self.current_price = value\n self.current_datetime = timestamp\n except Exception as err:\n logger.error(err)", "def get_cpi_rates():\n\n df = pd.read_csv('cpi_usa.csv', index_col=0)\n df.index = pd.to_datetime(df.index)\n\n df = df.resample('BAS').mean() # change sampling to business year start\n df.index = df.index.year # datetime to year\n df.columns = ['cpi_rate']\n\n return df", "def fillHistoricalPricesAndRating(self):\r\n time_start = time.time()\r\n self.buildPriceHistory()\r\n savepath = TEMPPATH + 'bondhistoryrating.csv'\r\n #If bondhistoryratingUAT.csv doesn't exist, download data and write file.\r\n cols = ['SNP', 'MDY', 'FTC', 'P1D', 'P1W', 'P1M', 'Y1D', 'Y1W', 'Y1M', 'ACCRUED', 'D2CPN', 'SAVG', 'ISP1D', 'ISP1W', 'ISP1M', 'RISK_MID', 'PRINCIPAL_FACTOR', 'SIZE']\r\n if not (os.path.exists(savepath)) or datetime.datetime.fromtimestamp(\r\n os.path.getmtime(savepath)).date() < datetime.datetime.today().date():\r\n isins = self.df['ISIN'] + BBGHand + ' Corp'\r\n isins = list(isins.astype(str))\r\n\r\n ##\r\n flds = ['RTG_SP', 'RTG_MOODY', 'RTG_FITCH', 'INT_ACC', 'DAYS_TO_NEXT_COUPON', 'YRS_TO_SHORTEST_AVG_LIFE', 'RISK_MID', 'PRINCIPAL_FACTOR', 'AMT_OUTSTANDING']\r\n out = blpapiwrapper.simpleReferenceDataRequest(pandas.Series((self.df['ISIN'] + ' Corp').values, index=self.df.index).to_dict(),flds)[flds]\r\n #loop\r\n for f in flds:\r\n self.df[bbgToBdmDic[f]] = out[f]\r\n self.df['RISK_MID'].fillna(0, inplace=True)\r\n ##\r\n self.df.drop(['P1D', 'P1W', 'P1M', 'Y1D', 'Y1W', 'Y1M', 'ISP1D', 'ISP1W', 'ISP1M'], axis=1, inplace=True)\r\n dbPriceHistory = pandas.read_csv(PHPATH + 'dbPriceHistory.csv', index_col=0)\r\n dbYieldHistory = pandas.read_csv(PHPATH + 'dbYieldHistory.csv', index_col=0)\r\n dbSpreadHistory = pandas.read_csv(PHPATH + 'dbSpreadHistory.csv', index_col=0)\r\n hdt = []\r\n if self.dtYesterday.strftime('%Y%m%d') in dbPriceHistory.columns:\r\n hdt.append(self.dtYesterday.strftime('%Y%m%d'))\r\n else:\r\n self.df['P1D'] = pandas.np.nan\r\n self.df['Y1D'] = pandas.np.nan\r\n self.df['ISP1D'] = pandas.np.nan\r\n if self.dtLastWeek.strftime('%Y%m%d') in dbPriceHistory.columns:\r\n hdt.append(self.dtLastWeek.strftime('%Y%m%d'))\r\n else:\r\n self.df['P1W'] = pandas.np.nan\r\n self.df['Y1W'] = pandas.np.nan\r\n self.df['ISP1W'] = pandas.np.nan\r\n if self.dtLastMonth.strftime('%Y%m%d') in dbPriceHistory.columns:\r\n hdt.append(self.dtLastMonth.strftime('%Y%m%d'))\r\n else:\r\n self.df['P1M'] = pandas.np.nan\r\n self.df['Y1M'] = pandas.np.nan\r\n self.df['ISP1M'] = pandas.np.nan\r\n ohdt = [self.dtYesterday.strftime('%Y%m%d'), self.dtLastWeek.strftime('%Y%m%d'), self.dtLastMonth.strftime('%Y%m%d')]\r\n self.df = self.df.join(dbPriceHistory[hdt], on='ISIN')\r\n self.df.rename(columns={ohdt[0]:'P1D', ohdt[1]:'P1W', ohdt[2]:'P1M'}, inplace=True)\r\n self.df = self.df.join(dbYieldHistory[hdt], on='ISIN')\r\n self.df.rename(columns={ohdt[0]:'Y1D', ohdt[1]:'Y1W', ohdt[2]:'Y1M'}, inplace=True)\r\n self.df = self.df.join(dbSpreadHistory[hdt], on='ISIN')\r\n self.df.rename(columns={ohdt[0]:'ISP1D', ohdt[1]:'ISP1W', ohdt[2]:'ISP1M'}, inplace=True)\r\n\r\n self.df[cols].to_csv(savepath)\r\n self.df['ACCRUED'] = self.df['ACCRUED'].apply(lambda x: '{:,.2f}'.format(float(x)))\r\n self.df['D2CPN'].fillna(-1, inplace=True)\r\n self.df['D2CPN'] = self.df['D2CPN'].astype(int)\r\n self.df[['RISK_MID','PRINCIPAL_FACTOR','SIZE']] = self.df[['RISK_MID','PRINCIPAL_FACTOR','SIZE']].astype(float)\r\n self.df[['SNP', 'MDY', 'FTC']] = self.df[['SNP', 'MDY', 'FTC']].fillna('NA') # ,'ACCRUED','D2CPN'\r\n self.df[['SNP', 'MDY', 'FTC', 'ACCRUED']] = self.df[['SNP', 'MDY', 'FTC', 'ACCRUED']].astype(str)\r\n\r\n #Otherwise, load and read from file.\r\n else:\r\n print 'Found existing file from today'\r\n df = pandas.read_csv(savepath, index_col=0)\r\n self.df[cols] = df[cols]\r\n self.df[['RISK_MID','PRINCIPAL_FACTOR','SIZE','SAVG', 'ISP1D','ISP1W','ISP1M']] = self.df[['RISK_MID','PRINCIPAL_FACTOR','SIZE','SAVG', 'ISP1D','ISP1W','ISP1M']].astype(float)\r\n self.df[['SNP', 'MDY', 'FTC']] = self.df[['SNP', 'MDY', 'FTC']].astype(str)\r\n self.df['ACCRUED'].fillna(-1,inplace=True)#HACK SO NEXT LINE DOESN'T BLOW UP - WE DON'T WANT TO PUT 0 THERE!\r\n self.df['ACCRUED'] = self.df['ACCRUED'].astype(float)\r\n self.df['ACCRUED'] = self.df['ACCRUED'].apply(lambda x: '{:,.2f}'.format(float(x)))\r\n self.df['D2CPN'].fillna(-1, inplace=True)#HACK SO NEXT LINE DOESN'T BLOW UP - WE DON'T WANT TO PUT 0 THERE!\r\n self.df['D2CPN'] = self.df['D2CPN'].astype(int) \r\n\r\n print 'History fetched in: ' + str(int(time.time() - time_start)) + ' seconds.'", "def get_average_by_email(user_email):\n user = helper_get_by_email(user_email)\n avg = helper_get_users_average_heart_rate(user)\n if avg is not None:\n return jsonify({\"average\": avg}), 200\n else:\n return jsonify({\"error\": \"no entries\"}), 400", "def test_list_average_prices(self):\n from grand_exchanger.resources.graph import Graph\n\n price_history = Graph(\n daily={},\n average={\n datetime(2020, 7, 26, 0, 0): 100,\n datetime(2020, 7, 27, 0, 0): 104,\n datetime(2020, 7, 25, 0, 0): 110,\n },\n )\n\n assert list(price_history.list_average_prices()) == [\n (datetime(2020, 7, 27, 0, 0), 104),\n (datetime(2020, 7, 26, 0, 0), 100),\n (datetime(2020, 7, 25, 0, 0), 110),\n ]", "def get_position_avg_price(self):\n self.__init_client()\n return float(self.get_position()['entryPrice'])", "def sample_rate(self):\n return self.query_float('ENTER Current Sample Rate (Sa/s)')", "def getAvgMarketCosts(self):\n try:\n avgAL = self.frame.mode.game.marketStats[str(self.frame.mode.game.currentRound-1)]['avgSoldAL']\n avgEC = self.frame.mode.game.marketStats[str(self.frame.mode.game.currentRound-1)]['avgSoldEC']\n avgIA = self.frame.mode.game.marketStats[str(self.frame.mode.game.currentRound-1)]['avgSoldIA']\n except:\n avgAL = 0.0\n avgEC = 0.0\n avgIA = 0.0\n \n return (avgAL, avgEC, avgIA)", "def readAvg(self):\n self.flushInput()\n\n if (self.model == 'TDS'):\n self.write('ACQuire:NUMAVg?\\n')\n return int(self.readline())\n #elif (self.model == 'GDS'):\n # FIXME: I'll implement this later. I need to do some\n # testing, re: whether GDS returns the actual average\n # number, or log-base-2 of the average number.", "def parse_rate():\n try:\n response = requests.get(ecb_url)\n except Exception as e:\n return {\"error\": \"error occurred while accessing www.ecb.europa.eu: {}\".format(e)}, True\n else:\n currency_xml = response.content.decode()\n root = ET.fromstring(currency_xml)\n currencies_list = [currency.attrib.get('currency') for currency in root.iter(cube) if currency.attrib.get('currency')]\n rates_list = [float(currency.attrib.get('rate')) for currency in root.iter(cube) if currency.attrib.get('rate')]\n result = dict(zip(currencies_list, rates_list))\n result['EUR'] = float(1)\n return result, False", "def getRate(self) -> int:\n if (self._total_stake.get() + self._daily_reward.get()) == 0:\n rate = DENOMINATOR\n else:\n rate = (self._total_stake.get() + self._daily_reward.get()) * DENOMINATOR // self.sICX_score.totalSupply()\n return rate", "def get_gae(rewards, value_estimates, value_next=0.0, gamma=0.99, lambd=0.95):\n value_estimates = np.append(value_estimates, value_next)\n delta_t = rewards + gamma * value_estimates[1:] - value_estimates[:-1]\n advantage = discount_rewards(r=delta_t, gamma=gamma * lambd)\n return advantage", "def annualized_gains(self, day='today'):\n assert day == 'today' or isinstance(day, date), 'Error! You have to pass a datetime.date istance to the day parameter.'\n if day == 'today':\n day = self.data.index[-1]\n if self.data.index[-1] >= day >= self.data.index[0]:\n day = self._first_good_date(day)\n initialValue = self.invested_amount(day)\n finalValue = self.value(day)\n numberOfDays = (day - self.data.index[0]).days\n return round(((finalValue / initialValue)**(365/numberOfDays) - 1) * 100, 2) \n else:\n return 0", "def daily_bond_yc(request, template_name=\"bonds/daily_bond_yc.html\"):\n date = request.GET.get(\"d\",\"20120823\")\n country = request.GET.get(\"ct\",\"US\")\n currency = request.GET.get(\"cu\",\"USD\")\n currency = currency.upper()\n rating = request.GET.get(\"rt\",\"AAA\")\n #print \"query\"+query\n picname = \"\" \n \n list_bond = [] \n #r = redis.StrictRedis(host='10.1.248.202',port=6379,db=0)\n #reader = csv.reader(open(\"static/webdata/BOND_YC_USA_USD_yyyymmdd_AAA.csv\")) \n #add head\n bonds = (\" \",\"CURRENT YLD\",\"PREV YLD\",\"CHANGE\",\"1 WK YLD\",\"1 MO YLD\",\"6 MO YLD\")\n list_bond.append(bonds)\n durations = ('1M','3M','6M','1Y','2Y','3Y','5Y','7Y','10Y','20Y','30Y')\n \n def formatstr(fstr): \n \tif(str!=None):\n \t return '%.3f' % round(string.atof(fstr),3) if(fstr != None) else None\n \t #return '%.3f' % round(string.atof(fstr),3) +\"%\" if(fstr != None) else None\n \t \n for dur in durations:\n #duration,current,last,change,weekAVG,days30AVG,days60AVG\n current = r.hget('YLD:USTB:'+currency+':GOV:DLY:'+dur, date)\n last = r.hget('YLD:USTB:'+currency+':GOV:LAST:'+dur, date)\n change = string.atof(current)- string.atof(last) if(current != None and last != None) else None \n change = formatstr(change)\n last = formatstr(last)\n current = formatstr(current)\n weekAVG = r.hget('YLD:USTB:'+currency+':GOV:AV1W:'+dur, date)\n weekAVG = formatstr(weekAVG)\n days30AVG = r.hget('YLD:USTB:'+currency+':GOV:AV1M:'+dur, date)\n days30AVG = formatstr(days30AVG)\n days60AVG = r.hget('YLD:USTB:'+currency+':GOV:AV6M:'+dur, date)\n days60AVG = formatstr(days60AVG) \n \n bonds = (dur,current,last,change,weekAVG,days30AVG,days60AVG)\n list_bond.append(bonds)\n #for bonds in list_bond:\n # print bonds[0] \n if(date != \"\"):\n picname = \"BOND_YC_USA_USD_AAA_\"+date+\".png\"\n from datetime import datetime\n date = datetime.strptime(date, \"%Y%m%d\").strftime(\"%Y-%m-%d\")\n \n return render_to_response(template_name, {\n \"list_bond\": list_bond,\n \"len_list_bond\": len(list_bond),\n \"picname\": picname,\n \"date\": date,\n }, context_instance=RequestContext(request))", "def get_data(self):\n data = list(IgnitionRow.objects.all().order_by('-pub_date')[:self.num_ticks].values())\n two_hours = data[::-1] # The most recent two hours of data\n# print([elem['avg_pot_5'] for elem in two_hours])\n# avg_pot_data = [[float(elem['avg_pot_{}'.format(key)]) / (int(key) / 100) for elem in two_hours]\n avg_pot_data = [[float(elem['avg_pot_{}'.format(key)]) for elem in two_hours] \n \tfor key in self.keys]\n# print(avg_pot_data[0][-5:])\n avg_pot_data = [[max(min(elem, 100),0) for elem in arr] for arr in avg_pot_data] # Assume a max pot size of 2000 BBs\n avg_pot_data = [[elem if elem != 100 else 0 for elem in arr] for arr in avg_pot_data] # Assume a max pot size of 2000 BBs\n# print(avg_pot_data[0][-5:])\n return avg_pot_data", "def update_by_day(self, date):\n print 'UPDATE EXCHANGE RATE for day: %s' % date\n currencies = self.get_currencies()\n for code, name in currencies:\n if code in self.base_curr:\n _, created = Currency.objects.get_or_create(\n code=code, defaults={'name': name})\n if created:\n print('currency: %s created', code)\n\n for source in Currency.objects.filter(code__in=self.base_curr).all():\n exchange_rates = self.get_exchangerates_by_day(source.code, date)\n if exchange_rates:\n exchange_rates.pop(source.code)\n for code, rate in exchange_rates.iteritems():\n try:\n target = Currency.objects.get(code=code)\n exchange_rate = ExchangeRate.objects.get(date=date, source=source, target=target)\n exchange_rate.rate = rate\n exchange_rate.save()\n print('exchange rate updated %s, %s/%s=%s' % (date, source, target, rate))\n except ExchangeRate.DoesNotExist:\n exchange_rate = ExchangeRate.objects.create(date=date, source=source, target=target, rate=rate)\n print('exchange rate created %s, %s/%s=%s' % (date, source, target, rate))\n else:\n print('There is no rate for the current day')\n mail_admins('Exchange Rates Warning', 'There is no today exchange rate')\n break", "def getrating(self):\n\n\t\tquery = \"\"\"select C.user_id, C.venue_id , R.Rating\n\t\t\t\t\tfrom \n\t\t\t\t\t(\n\t\t\t\t\tselect user_id, venue_id, max(created_at)\n\t\t\t\t\tfrom checkins\n\t\t\t\t\tgroup by user_id, venue_id) C\n\t\t\t\t\tleft join\n\t\t\t\t\t(\n\t\t\t\t\tselect user_id, venue_id, avg(rating) as Rating from ratings \n\t\t\t\t\tgroup by user_id, venue_id\n\t\t\t\t\t) R\n\t\t\t\t\twhere C.user_id = R.user_id and C.venue_id = R.venue_id\"\"\"\n\n\t\tdf = pd.read_sql_query(query, self.conn)\n\n\t\treturn df", "def get_current(self):\r\n with open('MonthlyRate.csv', newline='') as csvfile:\r\n reader = csv.DictReader(csvfile)\r\n for row in reader:\r\n if self.choice == row['CurrencyCode']:\r\n current = row[\"Current Rate\"]\r\n csvfile.close()\r\n # Round the value to 4 d.p.\r\n current = round(float(current), 4)\r\n return current", "def currency_rate(self, init):\r\n\r\n curr = CurrencyRates()\r\n curr_rate = curr.get_rates(init)\r\n return curr_rate", "def storeLatestPercentageChange(self, stockSymbol, stockExchange, currentDate, latestPercentageChange):\n self.db.insert_stock_latestPercentageChange(\n stockSymbol, stockExchange, currentDate, latestPercentageChange)", "def new_get_historical_price(base, target, date):\n if base == \"BTC\" and target == \"EUR\":\n return {\"BTC\": {\"EUR\": 10000}}\n elif base == \"EUR\" and target == \"BTC\":\n return {\"EUR\": {\"BTC\": 0.00012}}\n elif base == \"LTC\" and target == \"BTC\":\n return {\"LTC\": {\"BTC\": 0.02}}\n elif base == \"LTC\" and target == \"EUR\":\n return {\"LTC\": {\"EUR\": 250}}", "def earnings():\n db = app.get_db()\n\n gain = list(db.trades.aggregate([\n {'$match': {'status':'closed', 'pct_net_gain':{'$gte':0}}},\n {'$group': {\n '_id': {'algo':'$algo', 'day': {'$dayOfYear':'$end_time'}},\n 'total': {'$sum':'$pct_net_gain'},\n 'count': {'$sum': 1}\n }}\n ]))\n loss = list(db.trades.aggregate([\n {'$match': {'status':'closed', 'pct_net_gain':{'$lt':0}}},\n {'$group': {\n '_id': {'algo':'$algo', 'day': {'$dayOfYear':'$end_time'}},\n 'total': {'$sum':'$pct_net_gain'},\n 'count': {'$sum': 1}\n }}\n ]))\n assets = list(db.trades.aggregate([\n { '$match': {'status':'closed', 'pct_net_gain':{'$gte':0}}},\n { '$group': {\n '_id': {\n 'asset':'$quote_asset',\n 'day': {'$dayOfYear':'$end_time'}},\n 'total': {'$sum':'$pct_net_gain'},\n 'count': {'$sum': 1}\n }}\n ]))\n\n today = int(datetime.utcnow().strftime('%j'))\n gain = [ n for n in gain if n['_id']['day'] == today]\n loss = [ n for n in loss if n['_id']['day'] == today]\n\n tradelog('-'*TRADELOG_WIDTH)\n for n in gain:\n tradelog(\"{:} today: {:} wins ({:+.2f}%).\"\\\n .format(\n n['_id']['algo'],\n n['count'],\n n['total']\n ))\n for n in loss:\n tradelog(\"{:} today: {:} losses ({:+.2f}%).\"\\\n .format(\n n['_id']['algo'],\n n['count'],\n n['total']\n ))\n\n\n return (gain, loss, assets)", "def fetch_currency_rates(url=\"http://www.nbrb.by/API/ExRates/Rates?Periodicity=0\") -> dict:\n data = {}\n response = requests.get(url)\n if response.status_code == 200:\n data = get_json(response)\n return data", "def get_gae(rewards, value_estimates, value_next=0.0, gamma=0.99, lambd=0.95):\n value_estimates = np.asarray(value_estimates.tolist() + [value_next])\n delta_t = rewards + gamma * value_estimates[1:] - value_estimates[:-1]\n advantage = discount_rewards(r=delta_t, gamma=gamma * lambd)\n return advantage", "def rates(self):\n return self._rates", "def average_speed(self): # pylint: disable=no-self-use\n query = read_sql(\"ave_speed.sql\")\n ave_speed_df = get_dataframe_from_bigquery(query, multipart=True)\n ave_speed_df.pipe(save_to_gcs, settings.ASSETS.FILES.AVESPEED)", "def get_btcoin_day_data(self):\n cursor = self.__connect().cursor()\n limit = (str(int(time.time() - 24*60*60)),)\n hashdata = []\n rewarddata = []\n summ = 0\n for row in cursor.execute('SELECT * from btcoin where key > ? ORDER BY key ASC', limit):\n date = int(row[0])\n hashrate = str(row[1])\n hashrate = self.convert_hashrate_to_float(hashrate)\n summ = summ + hashrate\n reward = float(row[2])\n hashdata.append([date, hashrate])\n rewarddata.append([date, reward])\n cursor.close()\n self.__disconnect()\n if len(hashdata) != 0:\n hashaverage = summ / len(hashdata)\n return (hashaverage, hashdata, rewarddata)\n else:\n return (-1, hashdata, rewarddata)", "def rate_bucket(dataset, rate_low, rate_high):\r\n rated_movies=[]\r\n for row in dataset.values():\r\n rate = float(row[11]) \r\n if((rate >= rate_low and rate <= rate_high) and row[3] == \"en\"):\r\n rated_movies.append(row) \r\n explore_data(rated_movies,0,5,False)\r\n #del rated_movies[0]\r\n return rated_movies", "def average_revenue():\n graph = pygal.SolidGauge(inner_radius=0.70)\n usd_formatter = lambda x: '{:.10g}‎M$'.format(x)\n graph.value_formatter = usd_formatter\n graph.title = \"Average Revenue of Movies per year\"\n\n for year in range(2000, 2017):\n print(\">> Year : %i\" % year)\n\n # Start display\n print(\">> [status] Create Graph Starting!\")\n\n dataset = pd.read_csv(\"Top-100_Export/Top-100_%i.csv\" % (year))\n revenue = dataset[\"revenue\"].tolist() #Revenue\n temp = []\n for i in revenue:\n if i != 0:\n temp.append(i)\n average = ((((sum(temp)/len(temp)))/1000000//0.01)/100)\n graph.add(str(year), [{'value': average, 'max_value': 250}])\n\n # End display\n print(\">> [status] Created Graph Successful!\")\n\n graph.render_to_file(\"Graph_Export/Average_Revenue_of_Movies.svg\")\n\n # Used time\n print(\">> [status] Completed : Used time = %s seconds\" % (time.time() - start_time))", "def get_daily():\n cbr_response = requests.get(CBR_DAILY_URL)\n if not cbr_response.ok:\n abort(503)\n\n result = parse_cbr_currency_base_daily(cbr_response.text)\n\n return result, 200", "def avg_tx_fees_USD(df):\n result = df['Tx fees (USD)'].div(df['Txs'])\n result.name = 'Avg Tx Fees (USD)'\n return out(SETTINGS, df, result)", "def avg_rate(self, instance, avg=0):\n\n try:\n # user_count = self.filter_by_model(\n # instance=instance).annotate(Count('user')).count()\n # avg = sum(x.rating for x in self.filter_by_model(\n # instance=instance)) / int(user_count)\n my_avg = self.filter_by_model(\n instance).aggregate(Avg('rating'))\n except ZeroDivisionError:\n logging.error(error_handling())\n\n # f = ''\n # if avg <= 1.0:\n # f = \"خیلی بد\"\n # if 1.0 <= avg < 3.0:\n # f = \"بد\"\n # if 3.0 <= avg < 4.0:\n # f = \"متوسط\"\n # if 4.0 <= avg < 5.0:\n # f = \"خوب\"\n # if avg >= 5.0:\n # f = \"خیلی خوب\"\n # if avg == 0:\n # f = 'نظری داده نشده'\n\n # return float(\"%.1f\" % round(my_avg, 2))\n if my_avg['rating__avg'] is None:\n return 0.0\n return my_avg['rating__avg']", "def api_asset_calculate_revenue():\n periods = request.args.getlist(\"period\")\n\n daily_response = requests.get(CBR_DAILY_URL)\n key_indicators_response = requests.get(CBR_INDICATORS_URL)\n currency_rates = parse_cbr_currency_base_daily(daily_response.text)\n currency_rates.update(parse_cbr_key_indicators(key_indicators_response.text))\n\n result = {}\n for period in periods:\n result[period] = app.bank.calculate_revenue(int(period), currency_rates)\n return result, 200" ]
[ "0.65365905", "0.62230974", "0.61657596", "0.6133238", "0.60252744", "0.5859477", "0.58187735", "0.58073163", "0.5803112", "0.5690306", "0.56884164", "0.56819123", "0.56722516", "0.5655974", "0.56529254", "0.56445503", "0.5622465", "0.5596036", "0.559005", "0.5584927", "0.55840766", "0.55819106", "0.5573295", "0.5554427", "0.5548469", "0.5541163", "0.55384475", "0.5531847", "0.5527902", "0.5507146", "0.55064046", "0.55038303", "0.5495335", "0.5493657", "0.5487691", "0.547995", "0.54736143", "0.5470093", "0.5464318", "0.5459266", "0.54359126", "0.5427273", "0.54264903", "0.5419065", "0.54184437", "0.5415809", "0.53995275", "0.53894615", "0.5388561", "0.53833646", "0.5381225", "0.5356071", "0.5355881", "0.5347769", "0.5343813", "0.5343246", "0.5336738", "0.533045", "0.5329497", "0.53277683", "0.53203297", "0.53124696", "0.53102005", "0.5308376", "0.53080523", "0.5296295", "0.52855146", "0.5284089", "0.5278885", "0.5274482", "0.52648896", "0.52621067", "0.5257617", "0.52545893", "0.52482533", "0.5247647", "0.52339476", "0.52332574", "0.5226413", "0.5216845", "0.52117443", "0.52109534", "0.51968557", "0.5196567", "0.51866233", "0.5185989", "0.5182993", "0.5179161", "0.51788723", "0.5174236", "0.5171915", "0.5166097", "0.5156488", "0.51559377", "0.51482475", "0.51448417", "0.5135652", "0.51314765", "0.51308995", "0.51284057" ]
0.714428
0
Calculate the 15min delayed BTC market price in GBP.
def _get_btc_gbp_15min(self) -> None: self._get_eur_gbp_last_daily() self.btc_gbp_15min = self.btc_eur_15min * self.eur_gbp_last_day
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def do_work(self) -> None:\n self._get_btc_eur_15min()\n print(\n f\"1 BTC = {self.btc_eur_15min} EUR\"\n f\"\\t\\t(15min delayed market price)\"\n )\n\n self._get_eur_gbp_last_month()\n print(\n f\"1 EUR = {self.eur_gbp_last_month} GBP\"\n f\"\\t(last month average rate)\"\n )\n\n self._get_btc_gbp_15min()\n print(\n f\"1 BTC = {self.btc_gbp_15min:.6f} GBP\"\n f\"\\t(BTC 15min delayed market price; GBP latest daily average rate)\"\n )", "def calculate_buy_price(price: float):\n return round(price / (1 + CONF.trade_advantage_in_percent / 100), 1)", "def _get_btc_eur_15min(self) -> None:\n with requests.get(BITCOIN_TICKER) as response:\n response.raise_for_status()\n json_data = response.json()\n\n self.btc_eur_15min = json_data[\"EUR\"][\"15m\"]", "def buying_price(self):\n buy_price = self.standard_init_price()\n # Special status and resources price adaptation\n if self.planet.status in [self.tradeitem.dps]:\n buy_price = (buy_price * 5) / 3\n\n elif self.planet.special in [self.tradeitem.cr]:\n buy_price = (buy_price * 3) / 4\n\n elif self.planet.special in [self.tradeitem.er]:\n buy_price = (buy_price * 4) / 3\n\n # randomize a bit\n moins = random.randrange(self.tradeitem.var)\n plus = random.randrange(self.tradeitem.var)\n buy_price = buy_price - moins + plus\n\n # price can't be negative\n if buy_price < 0:\n buy_price = 0\n\n return int(buy_price)", "def BuyingPrice(self):\n return self.buying_rice", "def target_buy_price(self):\n if self.period_tick == 0:\n return random.randint(1, 10)\n elif self.period_tick % self.perseverance == 0:\n # Player runs out of patience and decides to change target price.\n (avg_price,\n max_price,\n min_price) = self.market.get_stock_price_last_period()\n\n power = self.period_tick / self.perseverance\n target_price = min(min_price + power, self.money_balance * 0.5)\n return target_price\n else:\n return None", "def get_price():\n return uniform(1.0, 350.0)", "def CalculateTimeFrameGasEneregyCost(self, dth:float, dollarsPerDTH = 6.53535):\n\t\treturn dth * dollarsPerDTH", "def buy_cost(self):\n return self._manager.get_buy_price(self.name)", "def price(temp):\n now = datetime.datetime.now()\n r = requests.get(\"https://bitcoin.co.th/\")\n soup = BeautifulSoup(r.content, \"html.parser\")\n data = soup.find_all(\"div\", {\"class\": \"price\"})\n print(\"[%02i:%02i:%02i] Now BTC Price : \" % (now.hour, now.minute, now.second), end=\"\")\n for i in range(len(data)):\n price = (data[i].text)\n print(price)\n if price != temp: # Price Change\n line_sent(price)\n temp = price\n time.sleep(30) # Delay 30 second\n main(temp) # call function main for loop", "def field_buy(self, symbol):\r\n\r\n end_percent = 150\r\n current_price = 15#self.get_price()\r\n self.log(current_price)\r\n buys = {}\r\n new_price = current_price * 1.05\r\n while (new_price / current_price) > 150:\r\n self.log(\"New sell at: {}\".format(new_price))\r\n new_price *= 1.05\r\n\r\n self.log(buys)\r\n\r\n return buys", "def buy_one_cent_less_than_bid_or_50(self, bid_price):\n if bid_price:\n buying_price = self.buy_fixed_quantity_less_than_bid_price(\n bid_price=bid_price,\n fixed_quantity=0.01)\n else:\n buying_price = self.buy_fixed_price(50)\n return buying_price", "def get_sp500():\n sp500 = si.get_live_price(\"^GSPC\")\n sp500_trim = \"%.2f\" % sp500\n\n _time = datetime.datetime.now().timetuple()\n _time = time.mktime(tuple(_time))\n _time_label = f\"test\"\n\n return float(sp500_trim), int(_time)", "def usdToBtc(dollar, bitcoin):\n global btc\n global usd\n if usd>dollar:\n usd-=dollar\n btc+=bitcoin\n return True\n return False", "def compute_time_price(supplier_with_transaction):\n supplier_item = supplier_with_transaction.get('supplier_detail')\n transaction_item = supplier_with_transaction.get('supplier_transaction')\n # Check if there is time prices or not\n if supplier_with_transaction.get('time_price'):\n # Check if we will compute in complex or simple\n if not supplier_item.get('has_complex_minute_price'):\n # start to calculate the simple version for time price\n charging_start = transaction_item.get('charging_start')\n charging_end = transaction_item.get('charging_end')\n if charging_start and charging_end:\n charging_start_obj = datetime.strptime(charging_start, '%Y-%m-%dT%H:%M:%S')\n charging_end_obj = datetime.strptime(charging_end, '%Y-%m-%dT%H:%M:%S')\n duration_in_minutes = (charging_end_obj - charging_start_obj).total_seconds() / 60\n # Check for min duration\n if supplier_item.get('min_duration') and duration_in_minutes < supplier_item.get('min_duration'):\n duration_in_minutes = supplier_item.get('min_duration')\n price = supplier_item.get('simple_minute_price')\n total_price = price * duration_in_minutes\n return total_price\n else:\n # start calculate the complex version for time price\n total_price = 0\n if supplier_item.get('interval') == 'start':\n for start_rec in supplier_item.get('time_price'):\n timeframe = start_rec.get('billing_each_timeframe') * 60\n if start_rec.get('hour_from', 0) > start_rec.get('hour_to', 0):\n duration = (start_rec.get('hour_to') - start_rec.get('hour_from')) * 60\n else:\n duration = (start_rec.get('hour_to') - (24 - start_rec.get('hour_from'))) * 60\n duration_after_timeframe = duration % timeframe\n total_duration = duration + duration_after_timeframe\n total_price += total_duration * start_rec.get('minute_price')\n else:\n for end_rec in supplier_item.get('time_price'):\n timeframe = end_rec.get('billing_each_timeframe') * 60\n if end_rec.get('hour_from', 0) > end_rec.get('hour_to', 0):\n duration = (end_rec.get('hour_to') - end_rec.get('hour_from')) * 60\n else:\n duration = (end_rec.get('hour_to') - (24 - end_rec.get('hour_from'))) * 60\n duration_after_timeframe = duration % timeframe\n total_duration = duration - (timeframe - duration_after_timeframe)\n total_price += total_duration * end_rec.get('minute_price')\n\n return total_price\n else:\n total_price = 0\n return total_price", "def get_base_price(self):\n base_price = random.randint(5,9)\n print(base_price)\n\n # see if the order was placed during rush hour\n now = datetime.datetime.now()\n\n dow = now.weekday() # Mon is 0, Sun is 6\n hour = now.hour\n\n if hour >= 8 and hour < 11 and dow >= 0 and dow < 5:\n base_price += 4\n\n return base_price", "def purchase_price(self):\n if self.sold_on is None:\n return 0.0 # Not yet sold\n return 10000 - (.10 * self.miles)", "def gbm(price: float,\n mu: float,\n sigma: float,\n dt: float,\n n: int) -> np.array:\n y = np.exp((mu - sigma ** 2 / 2) * dt + sigma * np.random.normal(0, np.sqrt(dt), size=n).T)\n y = price * y.cumprod(axis=0)\n return y", "def standard_init_price(self):\n # If a system can't use something, its price is zero.\n _good = self.tradeitem\n if self.planet.tech_level < _good.tu and _good.name not in 'fuel':\n base_price = 0\n else:\n base_price = _good.plt + (self.planet.tech_level * _good.pi)\n # if good is highly requested, increase the price\n if self.planet.status in [_good.dps]:\n base_price = base_price + (base_price * 0.5)\n # large system: high production decreases prices\n base_price = (base_price * (100 - self.planet.system_size)) / 100\n\n # price can't be negative\n if base_price < 0:\n base_price = 0\n\n return int(base_price)", "def getprice():\n\n print(\"Get price\")\n latest_price = get_latest_price(item_code)\n return latest_price", "def price(self) -> float:\n return self.close", "def tick(price, tick_size=0.05):\n return round(price / tick_size)*tick_size", "def INVITE_COST(sent, isNonProfit=False):\n cost = 0\n if sent > 100:\n cost = 500 # $5\n if sent > 500:\n cost = 1000 # $10\n if sent > 1000:\n cost = 1500 # $15\n if sent > 2000:\n cost = 2000 # $20\n if sent > 10000:\n cost = 2500 # $25\n if isNonProfit:\n cost = cost * .75\n return int(round(cost))", "def get_base_price(self):\n\n price = randint(5, 9)\n\n now = datetime.now()\n weekday = now.weekday()\n hour = now.hour\n\n if weekday < 5 and 7 < hour < 12:\n price = price + 4\n\n return price", "def buy_and_pay(self):\n return self.price", "def buy_fixed_price(self, buying_price):\n\n print(f\"Ingresando orden a ${buying_price:,.2f}\".replace('.', ','))\n pyRofex.send_order(\n ticker=self.symbol,\n side=pyRofex.Side.BUY,\n price=buying_price,\n size=1,\n order_type=pyRofex.OrderType.LIMIT\n )\n return buying_price", "def _get_eur_gbp_last_daily(self) -> None:\n data = _get_ecb_data(FREQUENCY_DAILY, _ten_days_ago(), _today())\n\n self.eur_gbp_last_day = _get_latest_ecb_rate(data)", "def trading_cost(self) -> float:\n return self.__trading_cost", "def get_used_balance():\n try:\n if CONF.exchange == 'bitmex':\n position = EXCHANGE.private_get_position()\n if not position:\n return None\n return position[0]['currentQty']\n if CONF.exchange == 'kraken':\n result = EXCHANGE.private_post_tradebalance()['result']\n return round(float(result['e']) - float(result['mf']))\n if CONF.exchange == 'liquid':\n return round(get_crypto_balance()['used'] * get_current_price())\n\n except (ccxt.ExchangeError, ccxt.NetworkError) as error:\n LOG.error(RETRY_MESSAGE, type(error).__name__, str(error.args))\n sleep_for(4, 6)\n get_used_balance()", "def calculate_sell_price(price: float):\n return round(price * (1 + CONF.trade_advantage_in_percent / 100), 1)", "def unused_daily_balance(self):\n current_date = timezone.now()\n if self.date_billing_next > current_date:\n days_left = (self.date_billing_next - current_date).days\n return round(days_left * self.plan_cost.daily_cost, 2)\n return 0", "def schmeckle2usd(schmeckle):\n return schmeckle * 148.0", "def used_daily_balance(self):\n current_date = timezone.now()\n if current_date > self.date_billing_start:\n days_used = (current_date - self.date_billing_start).days\n return round(days_used * self.plan_cost.daily_cost, 2)\n return 0", "def __get_deal_price(self):\n return self.create_random_decimal(min=1, max=100000)", "def amount_to_pay_in_period(self):\n assert self.type == \"N\", _(\"Subscription must be normal to use this method\")\n period_start, period_end = self.get_current_period()\n price_per_day = (\n self.get_price_for_full_period() / (period_end - period_start).days\n )\n days_not_used = 30 * self.frequency - (date.today() - period_start).days\n return int(price_per_day * days_not_used)", "def calc_total_btc():\n total_btc_val = 0\n for holding in val[\"accHoldings\"]:\n free = val[\"accHoldings\"][holding][\"free\"]\n locked = val[\"accHoldings\"][holding][\"locked\"]\n total = float(free) + float(locked)\n\n if holding + \"BTC\" in val[\"coins\"]:\n if holding != \"BTC\" and total * float(val[\"tickers\"][holding + \"BTC\"][\"lastPrice\"]) > 0.001:\n\n coin_total = total * float(val[\"tickers\"][holding + \"BTC\"][\"lastPrice\"])\n total_btc_val += coin_total\n\n elif holding == \"BTC\":\n total_btc_val += total\n\n total_formatted = '{number:.{digits}f}'.format(number=float(total_btc_val), digits=8) + \" BTC\"\n # print(\"total: \" + total_formatted)\n return total_formatted", "def block6_price(self):\n return self._safe_value(VAR_BLOCK6PRICE, float)", "def bitcoin():\r\n # get the bitcoin price, until 2 decimal number, based on US dollar\r\n bitcoin_info = requests.get(\"https://api.binance.com/api/v3/avgPrice?symbol=BTCUSDT\")\r\n btcusdt = format(float(bitcoin_info.json()[\"price\"]), '.2f')\r\n\r\n if users_language[update.effective_chat.id] == \"english\":\r\n return \"bitcoin : $ \" + btcusdt\r\n elif users_language[update.effective_chat.id] == \"persian\":\r\n return \" دلار\" + btcusdt + \"بیت کوین : \"", "def buy_cost(self, buy_price, count):\n fee = 20 if math.floor(count*buy_price*1000*self.fee_count*self.handling_fee) <= 20 else math.ceil(count*buy_price*1000*self.fee_count*self.handling_fee)\n return int(buy_price*1000*count+fee)", "def conversion_rate(self, price):\n\n price = ( price - 20 ) / 2\n\n a = self.a_conversion_rate\n b = self.b_conversion_rate\n c = self.c_conversion_rate\n d = self.d_conversion_rate\n e = self.e_conversion_rate\n # price_min = self.price_min\n # Probabilities of conversion given a price\n return c * np.exp ( a * ( price - e) ** (1/ (2 * b) ) ) * (d - 2*price) ** (3/2)", "def CalculateGasEneregyCost(self, dollarsPerDTH = 6.53535):\n\t\tdthUsed = self.building_hvac.GetGasDTH()\n\t\treturn dthUsed * dollarsPerDTH", "def abbott_steam():\n per_klb = 20 # dollars per klb of steam\n kwh_eq = to_kwh(1) # kwh equivalent of steam\n per_kwh = per_klb / kwh_eq\n return per_kwh", "def detectar_constantes_btc():\n\n ultimos_precios = persistence.traer_ultimos_precios_btc()\n prev = int(ultimos_precios[0])\n porcentaje = 0\n counter = 0\n for i in range(1,60):\n if prev < int(ultimos_precios[i]):\n counter = counter + 1\n elif prev > int(ultimos_precios[i]):\n counter = counter - 1\n prev = int(ultimos_precios[i])\n porcentaje = calcular_porcentaje(int(ultimos_precios[0]), int(ultimos_precios[i]))\n porcentaje = round(porcentaje, 2)\n if counter > 10 and porcentaje > 1:\n return porcentaje\n elif counter < -10 and porcentaje < -1:\n return porcentaje\n else:\n return 0", "def get_base_price(self):\n\n base_price = random.randint(5, 9)\n week_day = datetime.datetime.weekday(self.time_of_order)\n hour = self.time_of_order.hour\n\n if week_day in range(0, 5) and hour in range(8, 11):\n base_price = base_price + 4\n\n return base_price", "def servicing_cost_long(self) -> float:\n return self.__servicing_cost_long", "def cost(self):\n assert(self._calculated)\n settings = config_get_group('shipping.modules.ups')\n if settings.HANDLING_FEE and Decimal(str(settings.HANDLING_FEE)) > Decimal(0):\n self.charges = Decimal(self.charges) + Decimal(str(settings.HANDLING_FEE))\n\n return(Decimal(self.charges))", "async def btc(self, ctx):\n try:\n btc_bitstamp_json = await self.bot.aiojson(\"https://www.bitstamp.net/api/ticker\")\n\n btc_currentprice_rate = Decimal(btc_bitstamp_json[\"last\"])\n btc_currentprice_string = self.format_currency(btc_currentprice_rate)\n\n btc_lastopen_rate = Decimal(btc_bitstamp_json[\"open\"])\n btc_lastopen_string = self.format_currency(btc_lastopen_rate)\n\n btc_high_string = self.format_currency(btc_bitstamp_json[\"high\"])\n btc_low_string = self.format_currency(btc_bitstamp_json[\"low\"])\n btc_bid_string = self.format_currency(btc_bitstamp_json[\"bid\"])\n btc_ask_string = self.format_currency(btc_bitstamp_json[\"ask\"])\n btc_volume_string = str(btc_bitstamp_json[\"volume\"]) + \" BTC\"\n\n btc_diff = btc_currentprice_rate - btc_lastopen_rate\n btc_change_percentage = (\n 100 * Decimal(btc_diff) / Decimal(btc_currentprice_rate))\n btc_change_percentage_string = f\"{str(btc_change_percentage)[:6]}%\"\n\n btc_change_color = self.get_change_color(btc_change_percentage, 10)\n\n btc_data_timestamp = datetime.datetime.utcfromtimestamp(\n int(btc_bitstamp_json[\"timestamp\"]))\n\n link = \"https://bitcoincharts.com/charts/chart.png?width=600&m=bitstampUSD&r=30\"\\\n f\"&t=S&v=1&cacheinval={int(time.time())}\"\n embed = discord.Embed(color=btc_change_color,\n timestamp=btc_data_timestamp)\n\n embed.set_author(name=\"30 Day BTC Chart and Info\",\n icon_url=\"https://bitcoin.org/img/icons/opengraph.png\")\n embed.set_image(url=link)\n embed.set_footer(text=\"Chart supplied by bitcoincharts.com under CC-BY-SA 3.0, \"\\\n \"price info supplied by BitStamp. \" + self.legal_notice)\n\n embed.add_field(name=\"Current Price\", value=btc_currentprice_string)\n embed.add_field(name=\"Opening Price\", value=btc_lastopen_string)\n\n embed.add_field(name=\"Change\", value=btc_change_percentage_string)\n embed.add_field(name=\"Volume\", value=btc_volume_string)\n\n embed.add_field(name=\"High\", value=btc_high_string)\n embed.add_field(name=\"Low\", value=btc_low_string)\n\n embed.add_field(name=\"Bid\", value=btc_bid_string)\n embed.add_field(name=\"Ask\", value=btc_ask_string)\n\n await ctx.send(embed=embed)\n except:\n await ctx.send(\"Error while fetching BTC data.\")\n self.bot.log.error(traceback.format_exc())", "async def btc(message, currency):\n\n prices = await utils.get_btc_prices()\n if not prices:\n return \"failed to download & parse data from blockchain api.\"\n\n price = prices.get(currency.upper())\n if price is None:\n return \"unrecognized currency code.\"\n\n return \"The price of BTC in {} is {}{}\".format(currency.upper(), price[\"symbol\"], price[\"15m\"])", "async def btc( ctx):\r\n await ctx.message.delete()\r\n r = requests.get(\r\n \"https://min-api.cryptocompare.com/data/price?fsym=BTC&tsyms=USD,EUR,GBP\"\r\n )\r\n r = r.json()\r\n usd = r[\"USD\"]\r\n eur = r[\"EUR\"]\r\n gbp = r[\"GBP\"]\r\n em = discord.Embed(\r\n description=f\"USD: `{str(usd)}$`\\n\\nEUR: `{str(eur)}€`\\n\\nGBP: `{str(gbp)}£`\"\r\n )\r\n em.set_author(\r\n name=\"Bitcoin\",\r\n icon_url=\"https://cdn.pixabay.com/photo/2013/12/08/12/12/bitcoin-225079_960_720.png\",\r\n )\r\n await ctx.send(embed=em)\r\n ### I hope this code is so horrible I'm never allowed to code embeds again\r", "def getGemPrice():\n return Gw2Spidy._request('gem-price')", "def transport_cost_per_t(self):\n return safe_divide(self.reseller.operating_expenses(), self.quantity_fieldside)", "def get_transfer_fee(value: float) -> float:\n return (value * (0.99 / 100)) + 4.9", "def porcentaje_btc_24():\n\n viejo = 1\n nuevo = 1\n viejo = persistence.traer_masviejo_precio_btc()\n nuevo = persistence.traer_ultimo_precio_btc()\n viejo = int(viejo)\n nuevo = int(nuevo)\n porcentaje = 100 * (nuevo - viejo) / viejo\n porcentaje = round(porcentaje, 2)\n return porcentaje", "def market_buy_limit(self, market_symbol, quantity, rate, time_in_force='GOOD_TIL_CANCELLED'):\n return self.post('orders', {\n 'marketSymbol': market_symbol,\n 'direction': 'BUY',\n 'type': 'LIMIT',\n 'quantity': quantity,\n 'limit': rate,\n 'timeInForce': time_in_force\n }, auth=True)", "def calc_market_order_buy(self, time, order_volume):\n rec = self.select_order_book_price_with_retry(time)\n if rec is None:\n return None\n\n sell_min, sell_volume, buy_max, buy_volume = rec\n\n if order_volume * 1.5 < sell_volume: # 1.5 means enough margin\n return sell_min\n else:\n return sell_min + PRICE_UNIT", "def get_billed_amount(self):\n return Leg.objects.filter(transaction__recurred_cost__recurring_cost=self, amount__gt=0).sum_to_balance()", "def initial_cash_balance(self) -> float:\n return self.buy_budget * len(self.stocks)", "def priceit(self):\n paytree = np.zeros((self.steps+1,self.steps+1))\n paytree[-1,:] = np.array( list( map(lambda x:max(x-self.s,0.0),self.pricetree[-1,:]) ) )\n discount = math.exp( self.r*self.deltatime )\n for i in range(self.steps,0,-1):\n for j in range(i):\n paytree[i-1][j] = (paytree[i][j]*self.upprob +paytree[i][j+1]*(1-self.upprob))/discount\n return paytree[0][0]", "def calc_new_bid_price_after_failure( self, cheapest_price ):\n\n new_bid_price = cheapest_price * 1.1\n return str(new_bid_price)", "def selling_price(self):\n # If a system can't produce something, its price is zero.\n _good = self.tradeitem\n if self.planet.tech_level < _good.tp and _good.name not in 'fuel':\n sell_price = 0\n else:\n sell_price = self.standard_init_price()\n # raise a bit, randomized\n sell_price = sell_price + random.randrange(self.tradeitem.var)\n\n return int(sell_price)", "def block_period_consumption(self):\n return self._safe_value(VAR_BLOCKPERIODCONSUMPTION, float)", "def track_price():\n r = requests.get('https://finance.yahoo.com/quote/EURPLN=X?p=EURPLN%3DX&.tsrc=fin-srch&guce_referrer'\n '=aHR0cHM6Ly9maW5hbmNlLnlhaG9vLmNvbS8_Z3VjZV9yZWZlcnJlcj1hSFIwY0hNNkx5OTNkM2N1WjI5d'\n 'loyeGxMbU52YlM4Jmd1Y2VfcmVmZXJyZXJfc2lnPUFRQUFBRG1vS3ROMkF5bzFpTDRpd29Td0Z4Z0NDTVN'\n 'XU3M0UkNoa2pBcGl2NmxobmxJcWRab0JIWUF6NVJuNHlZdkN1WTRBNEdwVTRfWjBZQ3JNM1RwX2ZMd05rej'\n 'g0TkVWdksyUzA3LVNmNXdndUJCUjhieG5sZEN4dGRCRmV6eEZfMnNQdEpQeXJ6UzREeV9WRUF4ZXNUMXNLYz'\n 'lnTm1pSlFCV3R6LVpLX0hvc2p5Jl9ndWNfY29uc2Vud'\n 'F9za2lwPTE1OTcwODc3MTg&guce_referrer_sig=AQAAAKzjjM2--Diw1M3gykrGHjIn9NdqSch_odxmo6xqtgD4pNo'\n 'anrEQBgPoZ9xkh8HPYFN1_9mpio4Fg2tEGa4GrsK69bHe4yN9LactTwdKEuBxazZPO751TNSeFH_lltkNoN1k7D6I978v'\n '1eXB9WaCp0NUgbRZRmbYEdoZmkmQvUq7&_guc_consent_skip=1597087949')\n if r.status_code != 200:\n raise ConnectionError\n else:\n soup = BeautifulSoup(r.text, 'html.parser')\n price_elem = soup.find('span', {\"class\": \"Trsdu(0.3s) Fw(b) Fz(36px) Mb(-4px) D(ib)\"})\n return float(price_elem.text)", "def get_gas_price_and_wait_time(price_key='average', wait_key='avgWait'):\n if current_app.config.get('MAINNET') == True:\n try:\n payload = fetch_gas_pricing()\n except Exception:\n raise Exception('Error fetching JSON from EthGasStation API')\n # our json will include an avg price and an avg wait time. we'll 2x the wait just in case...\n price = payload.get(price_key)\n wait = payload.get(wait_key)\n if price and wait:\n # assure these are ints...\n if not isinstance(price, int):\n price = ceil(price)\n if not isinstance(wait, int):\n wait = ceil(wait)\n # return (price_in_gwei, doubled_wait_time_seconds) NOTE that we only use the wait as a max timeout\n return (ceil(price / 10), (wait * 2) * 60)\n else:\n raise Exception('Error fetching values from EthGasStation API')\n else:\n return(C.POA_GAS_PRICE, C.EVM_TIMEOUT)", "def compute_kwh_price(supplier_with_transaction):\n\n supplier_item = supplier_with_transaction.get('supplier_detail')\n total_kwh_price = 0\n if supplier_item.get('has_time_based_kwh') and supplier_item.get('time_price'):\n # start to compute as complex\n for rec in supplier_item.get('time_price'):\n if rec.get('hour_from') and rec.get('hour_to'):\n if rec.get('hour_from') > rec.get('hour_to'):\n duration = (rec.get('hour_to') - rec.get('hour_from')) * 60\n else:\n duration = (rec.get('hour_to') - (24 - rec.get('hour_from'))) * 60\n else:\n duration = 0\n total_kwh_price += duration * rec.get('kwh_price', 0)\n else:\n # start to calculate the simple version for kwh price\n total_kwh_price = 24 * supplier_item.get('kwh_price', 0)\n return total_kwh_price", "def __call__(self, auctioneer):\n curr_bid = auctioneer.current_bid\n bid_price = curr_bid * self._bid_increase_perc\n if bid_price <= self._budget and self.get_bid_probability() > 0.3:\n self._highest_bid = bid_price\n return bid_price\n return 0", "def get_btc_supply(normalize=False, at_block_index=None):\n block_count = config.CURRENT_BLOCK_INDEX if at_block_index is None else at_block_index\n blocks_remaining = block_count\n total_supply = 0 \n reward = 50.0\n while blocks_remaining > 0:\n if blocks_remaining >= 210000:\n blocks_remaining -= 210000\n total_supply += 210000 * reward\n reward /= 2\n else:\n total_supply += (blocks_remaining * reward)\n blocks_remaining = 0\n \n return total_supply if normalize else int(total_supply * config.UNIT)", "def base_price(self):\n return self._base_price", "def btc_scraping():\n\n url_btc_web = requests.get('https://awebanalysis.com/es/coin-details/bitcoin/')\n soup = BeautifulSoup(url_btc_web.content, 'html.parser')\n\n # we need the class html\n result = soup.find('td', {'class': 'wbreak_word align-middle coin_price'})\n btc_price = result.text\n\n return btc_price", "def getCurrentPrice(self,primary,secondary):\n pair = self.getTradedPair(primary,secondary)\n uri = \"https://bittrex.com/api/v1.1/public/getticker?market=\"+pair\n jsonResponse = self.getJson(uri)\n currentPrice = jsonResponse[\"result\"][\"Last\"]\n return currentPrice", "def calculate_cost(self):\n booking_days, booking_hours = self.calculate_daily_hourly_billable_counts()\n day_cost = booking_days * Decimal(self.vehicle.type.daily_rate)\n hour_cost = booking_hours * Decimal(self.vehicle.type.hourly_rate)\n if hour_cost > self.vehicle.type.daily_rate:\n hour_cost = self.vehicle.type.daily_rate\n return float(day_cost + hour_cost)", "async def money(ctx):\n pass", "def get_sell_cost(self):\n return round(0.75 * self.sell_price[self.level - 1])", "def getPrice(coin,cur):\n price = 'https://api.coinmarketcap.com/v1/ticker/' + coin\n json = requests.get(price).json()\n value = json[0]['price_' + str(cur)]\n return value", "def desired_price(self):\n return self._desired_price", "def ebay_fee(sell_price):\r\n\r\n p50 = 0.13 # for amount $50 and lower\r\n p50_to_1000 = 0.05 # for $50.01-$1000\r\n p1000 = 0.02 # for $1000.01 and higher\r\n fee = 0.50 # fee to list item\r\n\r\n if sell_price <= 50:\r\n fee = fee + (sell_price*p50)\r\n elif sell_price <= 1000:\r\n fee = fee + (50*p50) + ((sell_price-50)*p50_to_1000)\r\n else:\r\n fee = fee + (50*p50) + ((1000-50)*p50_to_1000) \\\r\n + ((sell_price-1000)*p1000)\r\n return fee", "def calc_price_for_period(prev_price):\n result = []\n for i in range(1, N+1):\n price = prev_price + calc_price_delta(prev_price, i)\n prev_price = price\n result.append(price)\n return result", "def budget(self):\n\n budget = (_House.closing_cost*self.vars['after_repair_value']) - self.vars['purchase_price'] - self.vars['profit'] - _House.broker_fee\n return float(round(budget, 2))", "def block_reward_USD(df):\n\n miners_revenue_USD = df['Miners Revenue (USD)']\n tx_fees_USD = df['Tx fees (USD)']\n result = miners_revenue_USD - tx_fees_USD\n result.name = 'Block Reward (USD)'\n return out(SETTINGS, df, result)", "def buy_limit_order(self, price=0, volume=0):\n auth = CoinbaseExchangeAuth(self.api_key, self.secret_key, self.passphrase)\n data = {\n \"size\": volume,\n \"price\": price,\n \"side\": \"buy\",\n \"product_id\": self.rate,\n \"type\": \"limit\"\n # \"time_in_force\": 'GTC',\n # \"cancel_after\": (datetime.now() + timedelta(minutes=10)).strftime('%M,%H,%d')\n }\n\n buy = requests.post(self.url + 'orders',\n data=json.dumps(data),\n auth=auth).json()\n buy['txid'] = buy['id']\n\n logging.debug(buy)\n return buy", "def get_quote(symbol):\n\t\n\t# For Step C: Replace CODE HERE to get the stock\n\t# prices from the Yahoo Finance website using\n\t# requests and Beautiful Soup\n\tprices = ['20', '25', '30', '30', '30', '20']\n\tprice = prices[0]\n\tprev_price = '10'\n\n\ttext = \"Start watching \" + symbol + \": Price: \" + price\n\tprint(text)\n\tlogging.info(text)\n\n\ti = 0 # not needed with Step C (remove)\n\n\t# Start watching and continue until CTRL-Break\n\twhile True:\n\t\n\t\t# Get Price with Steps A and B only\n\t\t# Step C use requests and Beautiful Soup\n\t\tprice = prices[i%6]\n\n\t\t# Send price for symbol to log\n\t\tlogging.info(symbol + \"\\t\" + price)\n\n\t\ti = i + 1 # not needed with Step C (remove)\n\n\t\t# Check for price difference and send email,\n\t\t# if different\n\t\tif price != prev_price:\n\t\t\ttext = symbol + \" now at \" + price + \\\n\t\t\t\t \"; was \" + prev_price\n\t\t\tprint(text)\n\t\t\tsend_email(text)\n\t\t\tprev_price = price\n\n\t\ttime.sleep(WAIT_INTERVAL)", "def one_second_update(self):\n\n val[\"timeRunning\"] += 1\n total_btc_value = self.calc_total_btc()\n\n self.mw.total_btc_label.setText(\"<span style='font-size: 14px; color: #f3ba2e; font-family: Arial Black;'>\" + total_btc_value + \"</span>\")\n\n total_usd_value = '{number:,.{digits}f}'.format(number=float(total_btc_value.replace(\" BTC\", \"\")) * float(val[\"tickers\"][\"BTCUSDT\"][\"lastPrice\"]), digits=2) + \"$\"\n\n self.mw.total_usd_label.setText(\"<span style='font-size: 14px; color: white; font-family: Arial Black;'>\" + total_usd_value + \"</span>\")\n\n last_btc_price = float(val[\"tickers\"][\"BTCUSDT\"][\"lastPrice\"])\n last_btc_price_formatted = '{number:,.{digits}f}'.format(number=last_btc_price, digits=2) + \"$\"\n \n\n if last_btc_price > self.last_btc_price:\n last_color = Colors.color_green\n elif last_btc_price == self.last_btc_price:\n last_color = Colors.color_lightgrey\n else:\n last_color = Colors.color_pink\n\n self.mw.btc_price_label.setText(\"<span style='color: \" + last_color + \"'>\" + last_btc_price_formatted + \"</span>\")\n self.last_btc_price = last_btc_price\n\n operator = \"\"\n percent_change = float(val[\"tickers\"][\"BTCUSDT\"][\"priceChangePercent\"])\n if percent_change > 0:\n operator = \"+\"\n percent_color = Colors.color_green\n else:\n percent_color = Colors.color_pink\n\n btc_percent = operator + '{number:,.{digits}f}'.format(number=percent_change, digits=2) + \"%\"\n self.mw.btc_percent_label.setText(\"<span style='color: \" + percent_color + \"'>\" + btc_percent + \"</span>\")\n\n high = float(val[\"tickers\"][\"BTCUSDT\"][\"highPrice\"])\n low = float(val[\"tickers\"][\"BTCUSDT\"][\"lowPrice\"])\n vol = float(val[\"tickers\"][\"BTCUSDT\"][\"volume\"])\n\n high_formatted = '{number:,.{digits}f}'.format(number=high, digits=2) + \"$\"\n low_formatted = '{number:,.{digits}f}'.format(number=low, digits=2) + \"$\"\n vol_formatted = '{number:,.{digits}f}'.format(number=vol, digits=2) + \" BTC\"\n\n self.mw.btc_high_label.setText(\"<span style='color: \" + Colors.color_green + \"'>\" + high_formatted + \"</span>\")\n self.mw.btc_low_label.setText(\"<span style='color: \" + Colors.color_pink + \"'>\" + low_formatted + \"</span>\")\n self.mw.btc_vol_label.setText(\"<span style='color: \" + Colors.color_lightgrey + \"'>\" + vol_formatted + \"</span>\")\n\n\n self.mw.debug.setText(str(val[\"volDirection\"]))\n\n self.mw.debug.setText('{number:.{digits}f}'.format(number=float(val[\"volDirection\"]), digits=4) + \"BTC\")\n\n self.percent_changes()\n self.volume_values()\n\n self.check_websocket()\n\n self.update_stats()\n # only update the currently active table\n tab_index_botLeft = self.mw.tabsBotLeft.currentIndex()\n\n if tab_index_botLeft == 3:\n self.mw.holdings_table.update_holding_prices()\n val[\"indexTabOpen\"] = False\n elif tab_index_botLeft == 0:\n self.mw.coin_index.update_coin_index_prices()\n\n # decouple eventually\n val[\"indexTabOpen\"] = True\n # self.start_kline_iterator()\n else:\n val[\"indexTabOpen\"] = False\n self.mw.coin_index.start_kline_iterator()", "def get_current_price(self):\n highest_bid = sorted([bid.amount for bid in self.bids])[-1] if self.bids else 0\n return max(self.starting_price, highest_bid)", "def Gtilde(x):\n gt1 = 1.808 * cbrt(x) / np.sqrt(1 + 3.4 * cbrt(x) ** 2.)\n gt2 = 1 + 2.210 * cbrt(x) ** 2. + 0.347 * cbrt(x) ** 4.\n gt3 = 1 + 1.353 * cbrt(x) ** 2. + 0.217 * cbrt(x) ** 4.\n return gt1 * (gt2 / gt3) * np.exp(-x)", "def main(price, service, vat):\n service = (price * 10)/100\n if service < 50:\n service = 50\n elif service > 1000:\n service = 1000\n price += service\n vat = (price * 7)/100\n price += vat\n print(\"%.2f\" % (price))", "def block5_price(self):\n return self._safe_value(VAR_BLOCK5PRICE, float)", "def trend_price_up(self):\n raise NotImplementedError()", "def block8_price(self):\n return self._safe_value(VAR_BLOCK8PRICE, float)", "def money_in_bank_given(initial, time_weeks):\n return initial * 2 ** time_weeks", "def block7_price(self):\n return self._safe_value(VAR_BLOCK7PRICE, float)", "def best_price(self):\n # TODO rename this to \"display_lowest_price\" or something...\n price = self.lowest_price\n if price:\n return Decimal.quantize(price.per_kg, settings.DECIMAL_CENTS) # round to cents", "def send_btc_price(message):\n\n bot_token = TOKEN\n chat_id = ID\n sendText = 'https://api.telegram.org/bot' + bot_token + '/sendMessage?chat_id=' + chat_id + '&parse_mode=Markdown&text=' + message\n\n response = requests.get(sendText)\n\n return response", "def find_balanced_budget_tax(c):\n def steady_state_budget(t):\n e, u, w = compute_steady_state_quantities(c, t)\n return t - u * c\n\n tau = brentq(steady_state_budget, 0.0, 0.9 * c)\n return tau", "def buy_limit(self, market, quantity, rate):\n\n result = self.api_query('Trade', {'type':'buy', 'pair': market, 'amount': quantity, 'rate':'%.8f'%rate})\n return result", "def time_for_travel(self):\n return great_circle(self.pickupcoords, self.dropoffcoords).miles * 3600 / 25", "def get_price(curr: str):\r\n\tif curr in COIN_VALUES:\r\n\t\treturn\r\n\r\n\tapi_delay('prices')\r\n\tresp = requests.get(\"https://api.coinstats.app/public/v1/coins?skip=0&limit=20&currency=USD\")\r\n\tif resp.status_code == 200:\r\n\t\tinfo = json.loads(resp.text)['coins']\r\n\t\tfor x in info:\r\n\t\t\tif x['name'] == curr:\r\n\t\t\t\tCOIN_VALUES[curr] = x['price']\r\n\telse:\r\n\t\tprint(f'Failed to get price of {curr}')", "def current_close_price(self) -> float:\n opening = self._prices.open[self._offset]\n closing = self._prices.close[self._offset]\n real_price = opening * (1.0 + closing)\n return real_price", "def get_complete_hourly_prices(coin_symbol: str, base: str):\n all_prices = []\n delta = 0\n while True:\n answer = get_90d_of_hourly_prices(coin_symbol, base, datetime.now() - timedelta(days=delta))\n if not answer:\n break\n delta += 90\n all_prices.extend(answer)\n return tidy_up_prices(all_prices)", "def getCurrentPrice(self,primary,secondary):\n pair = self.getTradedPair(primary,secondary)\n uri = \"https://www.bitstamp.net/api/v2/ticker/\"\n requestUrl = uri + pair\n jsonResponse = self.getJson(requestUrl)\n currentPrice = jsonResponse[\"last\"]\n return currentPrice", "def to_btc(value):\n value = float(value) / 100000000\n value = format(value, \".9f\")\n return value", "def initial_price(self) -> float:\n return self.__initial_price" ]
[ "0.6735869", "0.6266725", "0.61613244", "0.6135455", "0.6012119", "0.5998848", "0.5924012", "0.5846881", "0.58299065", "0.58050394", "0.5770253", "0.5769695", "0.576529", "0.5724111", "0.57105625", "0.5661389", "0.563068", "0.56113887", "0.560933", "0.5585725", "0.5583436", "0.5555005", "0.5533116", "0.55289125", "0.55273867", "0.5526039", "0.5513316", "0.5493882", "0.54899186", "0.5470674", "0.5462603", "0.54351956", "0.5413897", "0.54123336", "0.5411547", "0.5399551", "0.5353007", "0.5352095", "0.53342706", "0.5330697", "0.5329621", "0.5327512", "0.5325002", "0.5313644", "0.5308436", "0.53055567", "0.53054106", "0.52981555", "0.5297642", "0.52775544", "0.52670395", "0.52654076", "0.52543074", "0.5252367", "0.5251391", "0.5246713", "0.52455", "0.52436525", "0.5243587", "0.5228405", "0.52186376", "0.52177954", "0.5204247", "0.51756185", "0.51709527", "0.51534677", "0.5151128", "0.51502705", "0.51441747", "0.51402587", "0.51359737", "0.51320446", "0.5131005", "0.5123266", "0.5115605", "0.51148486", "0.5112212", "0.5097241", "0.5092283", "0.5088224", "0.50854754", "0.5085457", "0.5079436", "0.50730157", "0.5071131", "0.5071128", "0.506981", "0.5069533", "0.50677115", "0.50650793", "0.50634366", "0.50631785", "0.50583494", "0.50571644", "0.5057013", "0.5052384", "0.5051786", "0.5047619", "0.5045889", "0.504437" ]
0.7985306
0
Retrieve and display the data requested in the requirements.
def do_work(self) -> None: self._get_btc_eur_15min() print( f"1 BTC = {self.btc_eur_15min} EUR" f"\t\t(15min delayed market price)" ) self._get_eur_gbp_last_month() print( f"1 EUR = {self.eur_gbp_last_month} GBP" f"\t(last month average rate)" ) self._get_btc_gbp_15min() print( f"1 BTC = {self.btc_gbp_15min:.6f} GBP" f"\t(BTC 15min delayed market price; GBP latest daily average rate)" )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def display_required():\n return display_required()", "def _get_information(self):\n pass", "def get_details(self):", "def get_data():\n pass", "def get_entry(name, req, form):\n entry = {'requirement': name, 'form': form}\n form.initial['credits_needed'] = 0\n if req:\n form.instance = req\n form.initial['credits_needed'] = req.credits_needed\n return entry", "def req():\n\n if not current.auth.s3_logged_in():\n return None\n\n ADMIN = current.session.s3.system_roles.ADMIN\n settings = current.deployment_settings\n types = settings.get_req_req_type()\n\n get_vars = {}\n if len(types) == 1:\n t = types[0]\n if t == \"Stock\":\n get_vars = {\"type\": \"1\"}\n elif t == \"People\":\n get_vars = {\"type\": \"2\"}\n create_menu = M(\"Create\", m=\"create\", vars=get_vars)\n\n recurring = lambda i: settings.get_req_recurring()\n use_commit = lambda i: settings.get_req_use_commit()\n req_items = lambda i: \"Stock\" in types\n req_skills = lambda i: \"People\" in types\n\n return M(c=\"req\")(\n M(\"Current Needs\", f=\"organisation_needs\")(\n M(\"Create\", m=\"create\"),\n M(\"Import\", m=\"import\", restrict=[ADMIN]),\n ),\n M(\"Needs at Facilities\", f=\"site_needs\", m=\"summary\")(\n M(\"Create\", m=\"create\"),\n ),\n M(\"Requests\", f=\"req\", vars=get_vars)(\n create_menu,\n M(\"List Recurring Requests\", f=\"req_template\", check=recurring),\n M(\"Map\", m=\"map\"),\n M(\"Report\", m=\"report\"),\n M(\"Search All Requested Items\", f=\"req_item\",\n check=req_items),\n M(\"Search All Requested Skills\", f=\"req_skill\",\n check=req_skills),\n ),\n M(\"Commitments\", f=\"commit\", check=use_commit)(\n ),\n M(\"Items\", c=\"supply\", f=\"item\")(\n M(\"Create\", m=\"create\"),\n M(\"Report\", m=\"report\"),\n M(\"Import\", m=\"import\", p=\"create\"),\n ),\n # Catalog Items moved to be next to the Item Categories\n #M(\"Catalog Items\", c=\"supply\", f=\"catalog_item\")(\n #M(\"Create\", m=\"create\"),\n #),\n M(\"Catalogs\", c=\"supply\", f=\"catalog\")(\n M(\"Create\", m=\"create\"),\n ),\n M(\"Item Categories\", c=\"supply\", f=\"item_category\",\n restrict=[ADMIN])(\n M(\"Create\", m=\"create\"),\n ),\n )", "def requires(self):\n return [GetListings()]", "def get_data_reqs(self):\n reqs = {\n \"requires_partial_lc\": True,\n \"metric\": self.metric,\n \"requires_hyperparameters\": False,\n \"hyperparams\": None,\n \"unlabeled\": False,\n \"unlabeled_factor\": 0,\n }\n return reqs", "def get_data(self):", "def get_requirement_info():\n links, requirements = [], []\n info = {'dependency_links': links, 'install_requires': requirements}\n requirements_path = 'requirements.txt'\n\n if not os.path.isfile(requirements_path):\n print('requirements.txt not found. Did you forget it?')\n return info\n\n reqs = filter(None, map(str.strip, open(requirements_path)))\n for line in reqs:\n if is_http(line):\n i = line.find('#egg=')\n if i == -1:\n raise SetupError('Missing \\'#egg=\\' in requirement link.')\n links.append(line[:i])\n requirements.append(line[i+5:])\n else:\n requirements.append(line)\n return info", "def get_data(self):\n pass", "def get_data(self):\n pass", "def get_data(self):\r\n pass", "def __verify_requirements(self):\n if self.major[1] not in self.data[self.root] or self.data[self.root][self.major[1]] is None:\n self.data[self.root][self.major[1]] = {\"Requirement\": []}\n elif \"Requirement\" not in self.data[self.root][self.major[1]] or self.data[self.root][self.major[1]][\"Requirement\"] is None:\n self.data[self.root][self.major[1]][\"Requirement\"] = []\n elif not isinstance(self.data[self.root][self.major[1]][\"Requirement\"], list):\n self.data[self.root][self.major[1]][\"Requirement\"] = [self.data[self.root][self.major[1]][\"Requirement\"]]", "def _fetch_data(self):\n pass", "def info():\n # -------- Task 1 -------------------------\n # Please complete the following information\n\n return {\"agent name\": \"?\", # COMPLETE HERE\n \"student name\": [\"?\"], # COMPLETE HERE\n \"student number\": [\"?\"]} # COMPLETE HERE", "def request_data(self):\n pass", "def requirements(self):\n requirements = []\n return requirements", "def getInfo():", "def get_info(self):\n self.exists = self.check_subscr()\n return self.attrs", "def get_data():\n return", "def fetch_data(self):", "def get_data(self):\n\n raise NotImplementedError('''\n Must Implement get_data. Call help() for details.\n ''')", "def get_com_data(self):\n self.form_url_str()\n if self.__print_url: print self.com_data_full_url\n self.download_json()\n self.get_datalist_fr_json()", "def get_info(self):\n pass", "def get_info(self):\n pass", "def get_data(self):\n\n self.read_expression()\n self.read_tfs()\n self.read_metadata()\n self.set_gold_standard_and_priors()", "def requirements():\n raise NotImplementedError()", "def test_render_data(self):\n url = '{}?is_bigcourse=0'.format(reverse(\n 'completion_data_view', kwargs={\n 'course_id': self.course.id}))\n self.response = self.staff_client.get(url)\n data = json.loads(self.response.content.decode())\n self.assertEqual(data['data'],[[False]])\n\n self.response = self.staff_client.get(url)\n self.assertEqual(self.response.status_code, 200)\n data = json.loads(self.response.content.decode())\n self.assertEqual(len(data['data']), 12)\n self.assertEqual(\n data['data'][-1], ['student@edx.org', 'student', '', '', '0/1', '0/1', 'No'])", "def get_requirement_strings(self):\n opts = self.get_options()\n return (\n opts.requirements,\n opts.timeout_requirements,\n opts.cov_requirements,\n opts.unittest2_requirements,\n )", "def show_data():", "def gather_project_entries(self):\n\n user_inputs = [\n self.customer_name.get(), self.proj_date.get(),\n self.proj_descrpt.get(), self.proj_estdatest.get(),\n self.proj_estdateend.get(), self.proj_estbudget.get(),\n self.proj_actdatest.get(), self.proj_actdateend.get(),\n self.proj_actcost.get()\n ]\n\n return self.check_input_empty(user_inputs)", "def get_data():\n pass", "def get_data():\n pass", "def get_data():\n pass", "def details(self):\n pass", "def validate_requirements(self, key: str, value: str) -> str:\n if value or self.requirement_items:\n logger.warn('Requirements will not be set when using requirement items.')\n\n if self.requirement_items:\n value = ''\n for item in self.requirement_items:\n category = item.get('category')\n description = item.get('description')\n min_number_assets = item.get('min_number_assets')\n value += f'Category: {category}: {min_number_assets}\\n' \\\n f'Description: {description}\\n\\n'\n\n return value", "def requiresData():\n return True", "def data_requirements(self) -> List[DataRequirement]:\n return [\n self.bmi_cfg_data_requirement,\n self.forcing_data_requirement,\n self.hydrofabric_data_requirement,\n self.partition_cfg_data_requirement,\n self.realization_cfg_data_requirement,\n ]", "def data():\n return render_template(\n 'data.html',\n title='World Happiness Report',\n year=datetime.now().year,\n message='Main Data Model'\n )", "def test_get_recipe_information(self):\n pass", "def _get_data(self):\n raise NotImplementedError()", "def _get_spec_info(self):\n raise NotImplementedError()", "def test_get_node_requirements(self):\n pass", "def get(self):\n\n def export(q_dict):\n p_dict = copy.deepcopy(q_dict)\n # InputEx does not correctly roundtrip booleans, so pass strings\n p_dict['multiple_selections'] = (\n 'true' if q_dict.get('multiple_selections') else 'false')\n return p_dict\n\n key = self.request.get('key')\n\n if not CourseOutlineRights.can_view(self):\n transforms.send_json_response(\n self, 401, 'Access denied.', {'key': key})\n return\n\n if key:\n question = QuestionDAO.load(key)\n payload_dict = export(question.dict)\n else:\n payload_dict = {\n 'version': self.SCHEMA_VERSION,\n 'question': '',\n 'description': '',\n 'multiple_selections': 'false',\n 'choices': [\n {'score': '1', 'text': '', 'feedback': ''},\n {'score': '0', 'text': '', 'feedback': ''},\n {'score': '0', 'text': '', 'feedback': ''},\n {'score': '0', 'text': '', 'feedback': ''}\n ]}\n\n transforms.send_json_response(\n self, 200, 'Success',\n payload_dict=payload_dict,\n xsrf_token=XsrfTokenManager.create_xsrf_token(self.XSRF_TOKEN))", "def get_raw_requisites_data(url):\n response = requests.get(url)\n if response.status_code != 200:\n raise Exception('Error while making GET request: %s' % response.status_code)\n\n soup = BeautifulSoup(response.text)\n\n course_name = soup.find('span', course_name_class).text\n course_name = course_name.split('.')[0]\n course_name = ' '.join(course_name.split())\n course_data = course_name.split(' ')\n course_num = course_data[:-1]\n course_dept = ' '.join(course_data[0:len(course_data)-1])\n course_dept = dept_map.get(course_dept, course_dept)\n\n course_requisites = soup.find('span', {'id': enforced_requisites_id}).text\n return course_requisites, course_dept", "def detail(self, req):\n return self.index(req)", "def get():\n\n # \\todo List of available data, fetched and processed\n\n return jsonify({'valid_resources': ['balance', 'balance_usd', 'trade_history', 'balance_norm_price_history', 'open_orders']})", "def calculations(self):\n self.prerequisite_set = get_prerequisites(self.major, self.major2)\n self.check_prerequisites()\n self.get_requirements_for_majors()", "def display_record_purchase_form_return_data(trx_type: str) -> Dict:\n artist = st.text_input(\"Artist (separate multiple artists with ';')\")\n artist_country = st.text_input(\n \"(Artist) Country (one for each artist, separate with ';')\"\n )\n title = st.text_input(\"Title\")\n genre = st.selectbox(\"Genre\", app_utils.genre_list, 3)\n label = st.text_input(\"Label (separate multiple labels with ';')\")\n year = st.number_input(\"Year\", value=dt.date.today().year, format=\"%d\")\n record_format = st.selectbox(\"Format\", app_utils.record_format_list, 4)\n vinyl_color = st.text_input(\"Vinyl Color\", value=\"black\").lower()\n lim_edition = st.text_input(\"Lim Edition\")\n number = st.text_input(\"Number\")\n remarks = st.text_input(\"Remarks\")\n purchase_date = st.date_input(\"Purchase Date\", value=dt.date.today())\n price = st.number_input(\n \"Price\", value=20.00, min_value=0.00, step=5.00, format=\"%f\"\n )\n rating = st.text_input(\"Rating\")\n is_digitized = st.number_input(\n \"is digitized\", value=0, min_value=0, max_value=1, step=1, format=\"%i\",\n )\n is_active = st.number_input(\n \"is active\", value=1, min_value=0, max_value=1, step=1, format=\"%d\"\n )\n credit_value = st.number_input(\n \"Credits\", value=1, min_value=0, max_value=1, step=1, format=\"%d\"\n )\n\n record_data = {\n \"trx_type\": trx_type,\n \"artist\": artist if artist != \"\" else \"NA\",\n \"artist_country\": artist_country if artist_country != \"\" else \"NA\",\n \"title\": title if title != \"\" else None,\n \"genre\": genre,\n \"label\": label if label != \"\" else \"NA\",\n \"year\": year,\n \"record_format\": record_format,\n \"vinyl_color\": vinyl_color if vinyl_color != \"\" else None,\n \"lim_edition\": lim_edition if lim_edition != \"\" else None,\n \"number\": number if number != \"\" else None,\n \"remarks\": remarks if remarks != \"\" else None,\n \"purchase_date\": purchase_date,\n \"price\": price,\n \"rating\": rating\n if rating not in [\"\", \"None\"]\n else None, # TODO Check if that has solved the None problem\n \"is_digitized\": is_digitized,\n \"is_active\": is_active,\n \"credit_value\": credit_value,\n }\n return record_data", "def process_requirements(self):\r\n # Use local dicts and sets so that if there are exceptions, we don't\r\n # end up in a partially-initialized state.\r\n loaded = {}\r\n to_render = set()\r\n for attribute in self.get_attributes():\r\n loaded[attribute.name] = attribute.parse_from_xml(self.xml)\r\n if attribute.render:\r\n to_render.add(attribute.name)\r\n\r\n self.loaded_attributes = loaded\r\n self.to_render = to_render", "def _read_requirements():\n LOG.info(\"Reading rally requirements...\")\n for file_name in RALLY_REQUIREMENTS_FILES:\n LOG.debug(\"Try to read '%s'.\", file_name)\n with open(file_name) as f:\n data = f.read()\n LOG.info(\"Parsing requirements from %s.\" % file_name)\n yield file_name, parse_data(data)", "def get_info(self) -> Optional[Dict[str, Any]]:", "def show_proposal(self, req):\n z = 1\n y = z\n x = 1\n self.dico_product = {}\n for prod in req:\n if z <= 5:\n List_store = orm_imp.find_store(prod.id)\n print(\"Choix numéro\", z, \":\", prod.name, \"| score : \",\n prod.nutriscore, \"| Magasins : \", List_store,\n \"| Lien :\",\n prod.url, \"| \\n ==> description :\",\n prod.ingredient, \"\\n==================================\")\n self.dico_product.update({z: prod.id})\n x += 1\n z += 1\n return self.dico_product, y, x - 1", "def description():\n\n # Captures the global requests variable\n global _requests\n\n if request.method == 'POST':\n\n global _drivers\n \n IPAddr = g.IPAddr\n\n # All sensitive data in the session must be encrypted\n \n password = None\n if g.password:\n AESKey = [ord(elem) for elem in current_app.config['SECRET_KEY']]\n myAES = AES.AESEncryptor(key=AESKey)\n password = myAES.decrypt(g.password)\n \n user = g.user\n ontap_version = g.version\n \n _drivers = [oval.OVALDriver( ovalrequest, IPAddr=IPAddr, user=user, password=password, verbose=False, version=ontap_version ) for ovalrequest in _requests]\n current_app.logger.info(time.ctime() + \"\\tOVAL drivers initialized\")\n \n # we have handled the requests so we no longer need them\n _remove_persist_storage('filenames')\n _remove_persist_storage('processType')\n _remove_persist_storage('coreFactor')\n del _requests[:] \n \n return redirect(url_for('checks.results_overview'))\n\n\n # GET\n # Calls all the backend code\n _create_descriptions()\n return render_template('checks/description.html', requests=_requests)", "def getInfo(self):\n self.name, self.description = achievements[self.id]", "def get_info(self):\n return \"TODO !\"", "def get(self):\n key = self.request.get('key')\n\n if not CourseOutlineRights.can_view(self):\n transforms.send_json_response(\n self, 401, 'Access denied.', {'key': key})\n return\n\n if key:\n question = QuestionDAO.load(key)\n payload_dict = question.dict\n else:\n payload_dict = {\n 'version': self.SCHEMA_VERSION,\n 'question': '',\n 'description': '',\n 'graders': [\n {\n 'score': '1.0',\n 'matcher': 'case_insensitive',\n 'response': '',\n 'feedback': ''}]}\n\n transforms.send_json_response(\n self, 200, 'Success',\n payload_dict=payload_dict,\n xsrf_token=XsrfTokenManager.create_xsrf_token(self.XSRF_TOKEN))", "def details(self, identifier_type, identifier):\n self.require_librarian(flask.request.library)\n\n work = self.load_work(flask.request.library,\n identifier_type, identifier)\n if isinstance(work, ProblemDetail):\n return work\n\n annotator = AdminAnnotator(self.circulation, flask.request.library)\n\n # single_entry returns an OPDSEntryResponse that will not be\n # cached, which is perfect. We want the admin interface\n # to update immediately when an admin makes a change.\n return AcquisitionFeed.single_entry(self._db, work, annotator)", "def describe(self):\n if not self.name:\n raise ValueError(\"Sorry! id_type must be 'name'\")\n r = requests.get(f\"https://api.fda.gov/drug/ndc.json?search=brand_name:{self.drug_id}\")\n response = r.json()\n data = response['results'][0]\n self.brand_name = data['brand_name']\n self.generic_name = data['generic_name']\n self.active_ingredients = [i['name'] for i in data['active_ingredients']]\n self.pharm_class = get_pharm_class(self.drug_id, as_df=False)\n self.route = data['route']\n self.ndc = data['product_ndc']\n self.product_type = data['product_type']\n\n print(f\"Generic name: {self.generic_name}\")\n print(f\"Brand name: {self.brand_name}\")\n print(f\"Active ingredients: {self.active_ingredients}\")\n print(f\"Routes of administration: {self.route}\")\n print(f\"Pharmacologic Classes: {self.pharm_class}\")\n print(f\"NDC: {self.ndc}\")\n print(f\"Product type: {self.product_type}\")", "def test_render_data_researcher_user(self):\n url = '{}?is_bigcourse=0'.format(reverse(\n 'completion_data_view', kwargs={\n 'course_id': self.course.id}))\n self.response = self.client_data_researcher.get(url)\n data = json.loads(self.response.content.decode())\n self.assertEqual(data['data'],[[False]])\n\n self.response = self.client_data_researcher.get(url)\n self.assertEqual(self.response.status_code, 200)\n data = json.loads(self.response.content.decode())\n self.assertEqual(len(data['data']), 12)\n self.assertEqual(\n data['data'][-1], ['student@edx.org', 'student', '', '', '0/1', '0/1', 'No'])", "def info(self):", "def info(self):", "def required_tool_results():\n raise NotImplementedError()", "def quick_info_retrieve_view(request):\n kind_of_ballot_item = request.GET.get('kind_of_ballot_item', \"\")\n ballot_item_we_vote_id = request.GET.get('ballot_item_we_vote_id', \"\")\n return quick_info_retrieve_for_api(kind_of_ballot_item=kind_of_ballot_item,\n ballot_item_we_vote_id=ballot_item_we_vote_id)", "def detail(self, req):\n return self._get_models(req, is_detail=True)", "def info(self) -> DesignSpecs:\n return self._info", "def details(self):\n raise NotImplementedError()", "def get(self, request):\n LOGGER.info(\"Retrieving career planning data\")\n request_type = request.GET.get(\"request_type\")\n\n if request_type == \"SEARCH\":\n name = request.GET.get(\"menu\")\n result = CareerPlanning.objects.filter(manu__icontains=name)\n else:\n result = CareerPlanning.objects.all()\n career_planning_list = []\n\n for career_planning in result:\n career_planning_dict = model_to_dict(career_planning)\n career_planning_dict.pop(\"content\")\n career_planning_list.append(career_planning_dict)\n return Response({\"status\": \"SUCCESS\", \"data\": career_planning_list})", "def needs_by_status(cls):\n\n db = current.db\n\n # Extract the data\n table = current.s3db.req_need_line\n status = table.status\n number = table.id.count()\n query = (table.deleted == False)\n rows = db(query).select(status, number, groupby = status)\n\n # Build data structure for chart renderer\n rows = dict((row[status], row[number]) for row in rows)\n data = []\n for code, label, color in cls.REQ_STATUS:\n value = rows.get(code)\n data.append({\"label\": s3_str(label),\n \"value\": value if value else 0,\n \"color\": color,\n \"filterKey\": code,\n })\n\n return data", "def require_data(self, typename):\r\n self.required_data_products.add(typename)", "def receiveData():\r\n preference = request.get_json()\r\n program = preference.pop('program')\r\n enroll_yr = preference.pop('enroll_yr')\r\n enroll_sem = preference.pop('enroll_sem')\r\n spec = 0\r\n if 'spec' in preference:\r\n spec = int(preference['spec'])\r\n preference.pop('spec')\r\n\r\n program_link = 'https://programsandcourses.anu.edu.au/2019/program/'\r\n\r\n program_link = str(program_link) + str(program)\r\n # calculate which type of semester does the enrolled semester fall in\r\n # S1 in odd year, S2 in odd year, S1 in even year or S2 in even year \r\n if int(enroll_yr)%2 == 1:\r\n if int(enroll_sem)%2 == 1:\r\n sem = 1\r\n else:\r\n sem = 2\r\n else:\r\n if int(enroll_sem)%2 == 1:\r\n sem = 3\r\n else:\r\n sem = 4\r\n \r\n # call the pre-processing program which put the model in file test1.mzn & test1.dzn\r\n scraper = dp.DegreeRuleScraper(str(program_link))\r\n orders = scraper.build_program_order_struct()\r\n orders.buildAModel(preference, sem, spec)\r\n \r\n # call MiniZinc to solve for the model\r\n cmd = 'minizinc --solver OSICBC test1.mzn test1.dzn > plan.txt'\r\n os.system(cmd)\r\n jsondata = readmyJson('plan')\r\n \r\n return jsonify(jsondata)", "def completed_requirements(self, requirements):\n to_return = []\n for req in requirements:\n hrs = self.get_hours_by_req_type(req.fulfills)\n total = sum([float(le.num_hrs) for le in hrs])\n\n if total >= req.req_hrs:\n to_return.append(req)\n\n return to_return", "def require_data(self, typename):\n self.required_data_products.add(typename)", "def get_infos(self):\n infos = dict()\n infos['dataset'] = self._dataset_name()\n infos['task'] = self.task\n if self.task == 'sep_clean':\n data_license = [librispeech_license]\n else:\n data_license = [librispeech_license, wham_noise_license]\n infos['licenses'] = data_license\n return infos", "def get_data(self):\n raise NotImplementedError(\"Not implemented!\")", "def data():\n return render_template(\n 'data.html',\n title='Data',\n year=datetime.now().year,\n message='Your application description page.'\n )", "def data(self):", "def getResults():", "def test_get_proposal_demand(self):\n pass", "def present_data(self, data=None):\n print('--------------------------------------------------------------------------')\n print('{:<10}{:<10}{:<15}{:<17}{:<17}'.\n format(\n 'index',\n 'name',\n 'surname',\n 'email',\n 'phone'\n )\n )\n print('--------------------------------------------------------------------------')\n\n data = data if data else self.contacts\n for contact in data:\n print('{:<10}{:<10}{:<15}{:<17}{:<17}'.\n format(\n contact[0],\n contact[1],\n contact[2],\n contact[3],\n contact[4]\n )\n )", "def get_flights(request_data):\n flight_data = []\n flights = re.findall('Provided\\s\\w+.+<', request_data)\n\n [flight_data.append(i.split(\",\")) for i in flights]\n\n console = Console()\n user_render = [Panel(get_data(user), expand=False, title=f\"FLIGHT\",box=box.HEAVY_HEAD, border_style=\"pale_turquoise1\") for user in flight_data]\n console.print(Columns(user_render))", "def get_items(data, requisites, formatted):\n returndata = \"\"\n traits = requisites['trait']\n allergens = requisites['allergens']\n\n if formatted:\n prefix = '\\t'\n suffix = '\\n'\n else:\n prefix = ''\n suffix = ', '\n\n for course in data['menu']['meal']['course']:\n item_data = []\n datatype = type(course['menuitem'])\n\n if datatype is list:\n item_data += course['menuitem']\n else:\n item_data.append(course['menuitem'])\n\n for item in item_data:\n if check_item_specifications(item, traits, allergens) and 'No Service at this Time' not in item['name']:\n returndata += (prefix + (item['name']).rstrip(', ') + suffix)\n\n return returndata", "def get_requirements_for_majors(self):\n self.requirements_dict = {}\n self.major1_requirements = open_requirements(self.degree, self.major2)\n major1_requirement_set = interpret_point_requirements(self.major1_requirements)\n self.requirements_dict[self.major] = major1_requirement_set\n if self.major2 is not None:\n self.major2_requirements = open_requirements(self.degree, self.major2)\n major2_requirement_set = interpret_point_requirements(self.major2_requirements)\n self.requirements_dict[self.major2] = major2_requirement_set\n else:\n self.requirements_dict[self.major2] = set()\n \n \n #all_requirements = self.major1_requirements | self.major2_requirements\n #self.requirements_dict = interpret_point_requirements(all_requirements)", "def get_info(self):\n return None", "def show_requirements(request, course_id):\r\n course_id = SlashSeparatedCourseKey.from_deprecated_string(course_id)\r\n if CourseEnrollment.enrollment_mode_for_user(request.user, course_id) == 'verified':\r\n return redirect(reverse('dashboard'))\r\n\r\n upgrade = request.GET.get('upgrade', False)\r\n course = course_from_id(course_id)\r\n context = {\r\n \"course_id\": course_id.to_deprecated_string(),\r\n \"course_modes_choose_url\": reverse(\"course_modes_choose\", kwargs={'course_id': course_id.to_deprecated_string()}),\r\n \"verify_student_url\": reverse('verify_student_verify', kwargs={'course_id': course_id.to_deprecated_string()}),\r\n \"course_name\": course.display_name_with_default,\r\n \"course_org\": course.display_org_with_default,\r\n \"course_num\": course.display_number_with_default,\r\n \"is_not_active\": not request.user.is_active,\r\n \"upgrade\": upgrade,\r\n }\r\n return render_to_response(\"verify_student/show_requirements.html\", context)", "def test_installments_get(self):\n pass", "def get_perfect_information(self):\n raise NotImplementedError", "def view_requests(self):\n requests = self.caller.db.scene_requests or {}\n table = EvTable(\"{wName{n\", \"{wSummary{n\", width=78, border=\"cells\")\n for tup in requests.values():\n table.add_row(tup[0], tup[1])\n self.msg(str(table))", "def test_context_data_with_valid_search_and_some_results(self):\n dataset = factories.SourceDatasetFactory.create(i_dbgap_description='lorem ipsum')\n factories.SourceDatasetFactory.create(i_dbgap_description='other')\n response = self.client.get(self.get_url(), {'description': 'lorem'})\n context = response.context\n self.assertIn('form', context)\n self.assertTrue(context['has_results'])\n self.assertIsInstance(context['results_table'], tables.SourceDatasetTableFull)\n self.assertQuerysetEqual(context['results_table'].data, [repr(dataset)])", "def get(self):\n\n return {\n 'product': self.request.matchdict['product_name'],\n 'version': self.request.matchdict['product_version'],\n }", "def required_fields():\n module_logger.debug(\"In required_fields.\")\n return (\"comment\", \"lib_layout\", \"lib_selection\",\n \"ncbi_taxon_id\", \"prep_id\", \"sequencing_center\",\n \"sequencing_contact\", \"storage_duration\", \"tags\")", "def get_applicant_info():\n\n credit_score = questionary.text(\"Enter a credit score between 300 and 850: \").ask()\n credit_score = number_checker(credit_score)\n if credit_score == False or credit_score < 300 and credit_score > 850:\n print(\"\\u001b[31m\", \"\\n\")\n print(\"Credit score must be a number between 300 and 850.\", \"\\n\")\n print(\"Exiting app...\", \"\\u001b[0m\", \"\\n\")\n exit()\n \n debt = questionary.text(\"What's your current monthly debt? \").ask()\n debt = number_checker(debt)\n if debt == False or debt < 0:\n print(\"\\u001b[31m\", \"\\n\")\n print(\"Monthly debt must be greater than or equal to 0 to use this app.\", \"\\n\")\n print(\"Exiting system...\", \"\\u001b[0m\", \"\\n\")\n exit()\n\n income = questionary.text(\"What's your total monthly income?\").ask()\n income = number_checker(income)\n if income == False or income < 0:\n print(\"\\u001b[31m\", \"\\n\")\n print(\"Your Monthly INCOME must be greater than 0 to sue this app.\", \"\\n\")\n print(\"Exiting system...\", \"\\u001b[0m\", \"\\n\")\n exit()\n else:\n True\n\n loan_amount = questionary.text(\"What's your desired loan amount?\").ask()\n loan_amount = number_checker(loan_amount)\n if loan_amount == False or loan_amount < 0:\n print(\"\\u001b[31m\", \"\\n\")\n print(\"Loan amount must be greater than 0.\", \"\\n\")\n print(\"Exiting system...\", \"\\u001b[0m\", \"\\n\")\n exit()\n\n home_value = questionary.text(\"What's your home value?\").ask()\n home_value = number_checker(home_value)\n if home_value == False or home_value < 0:\n print(\"\\u001b[31m\", \"\\n\")\n print(\"Your home value must be greater than or equal to 0.\", \"\\n\")\n print(\"Exiting system...\", \"\\u001b[0m\", \"\\n\")\n exit()\n\n return credit_score, debt, income, loan_amount, home_value", "def on_get(self, _req, resp, permit_type):\n msg = False\n if permit_type in ('retail', 'retail_legacy'):\n permit_list = self.get_permit_list(permit_type)\n permit_list.sort(key=lambda v:\n ((v.get('dba_name') if v.get('dba_name')\n else v.get('business_name', ''))\n +' '+v.get('application_id', '')).upper())\n if isinstance(permit_list, list):\n if permit_type == 'retail_legacy':\n data = self.get_legacy_list_transform(permit_list)\n else:\n data = {'list': permit_list}\n data_json = jsend.success(data)\n msg = 'success ('+str(len(permit_list))+')'\n else:\n pass\n\n if msg is not False:\n sentry_sdk.capture_message(msg, 'info')\n resp.body = json.dumps(data_json)\n resp.status = falcon.HTTP_200\n else:\n msg = 'ERROR'\n sentry_sdk.capture_message(msg, 'error')\n resp.body = json.dumps(jsend.error(msg))\n resp.status = falcon.HTTP_400", "def get(self):\n dev = self.request.get('device')\n reg = self.request.get('registry')\n states = self.request.get('states')\n\n self.response.headers['Content-Type'] = 'text/plain'\n if (not dev) or len(dev)==0:\n self.response.write('parameter dev not found')\n elif (not reg) or len(reg)==0:\n self.response.write('parameter reg not found')\n elif (not states) or states <= 0:\n self.response.write('invalid or no states found')\n else:\n # Get user account\n ds = Datastore()\n user = ds.get_registry(reg)\n if len(user) == 0:\n self.response.write(\"Registry does not exist\")\n else:\n region = get_region_from_user(user)\n\n # Add Device on IOT Core\n iot = IOT()\n success, message = iot.get_data(dev, reg, states, region)\n if success:\n try:\n for msg in message['deviceStates']:\n msg['binaryData'] = decode64(msg['binaryData'])\n self.response.write(message)\n except:\n self.response.write([])\n else:\n self.response.write(message)", "def cal_desc(self):\n desc = \"\"\n desc += \"Requested by \"\n orgs = self.org.all()\n if len(orgs) > 0:\n for org in orgs:\n desc += org.name + \", \"\n desc = desc[:-2] + \".\\n\" # removes trailing comma\n ccs = self.ccinstances.all()\n if len(ccs) > 0:\n desc += \"Crew Chiefs: \"\n for cc in ccs:\n desc += cc.crew_chief.get_full_name() + \" [\" + (cc.service.shortname if cc.service else cc.category.name) + \"], \"\n desc = desc[:-2] + \".\\n\" # removes trailing comma\n if self.description:\n desc += self.description + \"\\n\"\n return desc", "def test_context_data_only_finds_results_in_requested_study(self):\n dataset = factories.SourceDatasetFactory.create(\n i_dbgap_description='lorem ipsum',\n source_study_version__study=self.study)\n factories.SourceDatasetFactory.create(i_dbgap_description='lorem ipsum')\n get = {'description': 'lorem'}\n response = self.client.get(self.get_url(self.study.pk), get)\n context = response.context\n self.assertIn('form', context)\n self.assertTrue(context['has_results'])\n self.assertIsInstance(context['results_table'], tables.SourceDatasetTableFull)\n self.assertQuerysetEqual(context['results_table'].data, [repr(dataset)])", "def required_slots(tracker: Tracker) -> List[Text]:\n\n return [\"product\", \"applicant_name\", \"applicant_dob\", \"applicant_phoneno\", \"applicant_address\"]", "def test_data_object_get_details(self):\n pass", "def get_presence_examode_concepts(request):\n\n json_resp = {}\n json_resp['concepts'] = get_presence_exa_concepts()\n json_resp['labels'] = get_presence_exa_labels()\n # print(json_resp)\n return JsonResponse(json_resp)", "def required(project):\n required = [{'short_name': 'co2'}, {'short_name': 'ps'}]\n return required" ]
[ "0.6163567", "0.60463357", "0.6041842", "0.59421384", "0.59399956", "0.58859646", "0.58683854", "0.5867265", "0.5864334", "0.5829533", "0.57291526", "0.57291526", "0.57073796", "0.5669223", "0.5638486", "0.5616218", "0.560131", "0.55974495", "0.5584069", "0.5578408", "0.5574402", "0.55666906", "0.55586463", "0.5535691", "0.5507841", "0.5507841", "0.5507283", "0.5482169", "0.5474474", "0.5468583", "0.54313725", "0.5427157", "0.5424445", "0.5424445", "0.5424445", "0.54218817", "0.54078305", "0.53946155", "0.5369984", "0.5365525", "0.5363782", "0.5346809", "0.53414434", "0.533648", "0.529532", "0.52944267", "0.52910626", "0.52872807", "0.5270773", "0.5263068", "0.52629673", "0.52509546", "0.5225971", "0.5222738", "0.5222222", "0.52158785", "0.52106905", "0.51898724", "0.5182496", "0.5171854", "0.5167729", "0.51537484", "0.51537484", "0.5152153", "0.51478255", "0.51444286", "0.51328063", "0.51318455", "0.5124229", "0.5121691", "0.51203644", "0.51202106", "0.51136416", "0.51121026", "0.51119715", "0.51109517", "0.51103234", "0.51070595", "0.51028335", "0.509819", "0.50934464", "0.50823087", "0.5080692", "0.508064", "0.5077232", "0.50753117", "0.50719416", "0.50695986", "0.50667673", "0.50622964", "0.5058144", "0.5046283", "0.5045306", "0.5041047", "0.50385827", "0.5037123", "0.5035678", "0.5034649", "0.5032066", "0.5028405", "0.502735" ]
0.0
-1
Instantiate and run the worker.
def main() -> None: worker = Worker() worker.do_work()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_and_run_worker(self):\n\n # Run processing on QThread worker - prevents GUI lock up\n # Create processing object, map control data\n processing_hub = ProcessingHub(control=self.control)\n\n # Create worker thread, connect signals to methods in this class and start, which calls worker.run()\n self.worker = ProcessingWorker(processing_hub, parent=self)\n self.worker.signal_screening_output_to_gui.connect(self.set_screening_output_to_gui)\n self.worker.signal_error.connect(self.error)\n self.worker.start()", "def start(self):\n if not self._worker:\n # the worker might be already created in case of deserialization\n self._worker = APIWorker(self.queue)\n self._worker.start()", "def worker(ctx_obj):\n execute(start_worker_command(settings=ctx_obj['settings']))", "def run_worker(self):\n # TODO(xiejw): To allow execution framework to add train hooks.\n return self._start_distributed_training()", "def run(self, worker, evaluator=None):\n pass", "def worker(self, **options):\n pass", "def run(self):\n self.loop = asyncio.new_event_loop()\n asyncio.set_event_loop(self.loop)\n tasks = []\n self.threads[0] = Worker(self, 0, self.options, self.logger,\n self.queue, self.storage, self.parser, self.addToQueue, role=1)\n tasks.append(self.threads[0].begin())\n for thread in range(1, self.max_threads):\n # Spawn and start the threads\n self.threads[thread] = Worker(self, thread, self.options, self.logger,\n self.queue, self.storage, self.parser, self.addToQueue)\n tasks.append(self.threads[thread].begin())\n self.loop.run_until_complete(asyncio.gather(*tasks))", "def run(self):\n self.submit()\n self.start()", "def _worker(self, args):\n pass", "def create(self):\n return self.start()", "def __init__(self, run, expname):\n logger.debug('Initializing worker {}.'.format(rank))\n self.run = int(run)\n self.expname = expname\n bcast_var = None\n dsname = comm.bcast(bcast_var, root=0)\n print(dsname)\n \n print('********** Start setup.')\n t0 = time.time()\n self.dsIdx = psana.DataSource(str(dsname))\n logger.info('********** Datasource on rank {}: {}s'.format(rank, time.time()-t0))\n self.dsIdxRun = next(self.dsIdx.runs())\n self.parse_detectors()\n logger.info('Rank {} has datasource and detectors.'.format(rank))\n print('********** Setup on rank {}: {}s'.format(rank, time.time()-t0))\n return", "def _run(self):\n self._worker = _stash.runtime.run(\n input_=self.cmd,\n final_ins=self._sp_stdin,\n final_outs=self._sp_stdout,\n final_errs=self._sp_stderr,\n add_to_history=None,\n persistent_level=2,\n is_background=False,\n cwd=self._cwd,\n environ=self._environ\n )\n self.pid = self._worker.job_id", "def run(self):\n self.run()", "def start(self):\r\n thread = threading.Thread(target=self.run)\r\n try:\r\n thread.start()\r\n except RuntimeError as e:\r\n raise SchedulerError(f\"Failed to start worker '{self.WORKER_ID}': \" + str(e))", "def run(self):\n run_simple(self.hostname, self.port, self.dispatch,\n use_reloader=self.debug)", "def create_worker(context=None):\n return BasicWorker(context)", "def run(self):\n self.log.info(\"Starting thread: \" + self.name)\n self.object__ = self.run_process(self.object__, self.args)", "def run(self):\n self.initialize()\n\n self.engine = setup_db_connection(driver=\"Fake\")\n self.logger = multiprocessing.get_logger()\n self.logger.handlers[0] = setup_logging()\n\n self.logger.debug(\"\\n\\n\")\n self.logger.debug(f'Spawning Worker')\n self.logger.debug(\"\\n\\n\")\n\n self.time_start_process = time.time()\n self.time_start_cycle = time.time()\n\n # -------------------------------\n # Start Processing Data\n\n\n data_unprocessed = self.get_data_from_queue()\n\n df = pd.DataFrame()\n\n df = self.process_data(data_unprocessed)\n\n if not df.empty:\n self.insert_data_into_database(df)\n\n # -------------------------------\n\n self.check_status(\"COMPLETED\")\n return", "def __init__(self, worker):\n self._worker = worker\n self._jobs = Queue()\n self._results, self._errors = [], []\n self._jobfinished = Condition()", "def getWorker(self):\n pass", "def run_worker(self):\n\n # exec(open('restarter.py').read())\n # sys.exit()\n self.update_session_state()\n currentTime = QTime().currentTime()\n fromTime = QTime(int(self.settings.TECHFROMHOUR), int(self.settings.TECHFROMMIN))\n toTime = QTime(int(self.settings.TECHTOHOUR), int(self.settings.TECHTOMIN))\n sessionState = self.lblMarket.text()\n\n if fromTime < currentTime < toTime:\n print(\"Worker skept-Technical break : \", fromTime.toString(\"hh:mm\"), \" to \", toTime.toString(\"hh:mm\"))\n self.update_console(\"Technical break untill \" + toTime.toString(\"hh:mm\"))\n\n else:\n self.update_console(\"Starting Worker- UI Paused\")\n self.uiTimer.stop() # to not cause an errors when lists will be resetted\n worker = Worker(\n self.ibkrworker.process_positions_candidates) # Any other args, kwargs are passed to the run function\n worker.signals.result.connect(self.update_ui)\n worker.signals.status.connect(self.update_status)\n worker.signals.notification.connect(self.update_console)\n # Execute\n self.threadpool.start(worker)", "def run (self):\n t = threading.Thread(target=self.runController)\n t.start()", "def run(self):\n self.connect()\n self.run_forever()", "def run(self):\n self.thread = threading.Thread(target=self._main)\n self.thread.start()\n self.running = True", "def run_worker():\n listen = ['default']\n conn = Redis(host=app.config['RQ_DEFAULT_HOST'],\n port=app.config['RQ_DEFAULT_PORT'],\n db=0,\n password=app.config['RQ_DEFAULT_PASSWORD'])\n\n with Connection(conn):\n worker = Worker(map(Queue, listen))\n worker.work()", "def run(self):\n self.class_inst_obj.processor(self.msg)", "def run(self):\n self.started()", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n\n self.make_connection()\n self.channel()\n self.declare_queue()\n self.publish_message()\n self.close_connection()", "def run(self):\n self.arbiter.start()", "def worker(self, worker):\n\n self._worker = worker", "def run(self):\n\t\t\n\t\tpass", "def run(self):\n raise NotImplementedError", "def run(self):\n raise NotImplementedError", "def run(self):\n raise NotImplementedError", "def run(self):\n raise NotImplementedError", "def run(self):\n raise NotImplementedError", "def run(self):\n raise NotImplementedError", "def run(self):\n raise NotImplementedError", "def run(self):\n raise NotImplementedError", "def run(self):\n raise NotImplementedError", "def run(self):\r\n pass", "def exec(cls, *args, **kwargs):\n task = cls(*args, **kwargs)\n task.run()\n return task", "def run(self):\n try:\n self._run_internal()\n finally:\n self._cleanup()", "def start_work(self):\n self.worker_thread = WorkerThread(self.feedback_log, self.job_list) # only created when processing begins. May be recreated\n self.worker_thread.daemon = True\n self.worker_thread.start()", "def start(self):\n \n rpc = self.smartstarter.rpcsystem\n \n process = yield self.smartstarter.start()\n \n try:\n \n make_worker_url = yield process.get_function_url(make_worker)\n make_worker_stub = rpc.create_function_stub(make_worker_url)\n \n worker = yield make_worker_stub(\"local\") # TODO remove network\n \n worker.get_function_url = process.get_function_url_stub\n \n worker.reset = rpc.create_local_function_stub(process.reset)\n worker.stop = rpc.create_local_function_stub(process.stop)\n worker.kill = rpc.create_local_function_stub(process.kill)\n worker.stdout = process.stdout.make_stub(rpc)\n worker.stderr = process.stderr.make_stub(rpc)\n worker.exited = process.exited.make_stub(rpc)\n\n except:\n process.kill()\n raise \n \n\n \n # worker.stdout.add_callback(stdout)\n # worker.stderr.add_callback(stderr)\n \n# receiver_stub = rpc.create_local_function_stub(hook.receiver)\n# hookinstall_url = yield process.get_function_url(hook.install_hook)\n# hookinstall_url_stub = rpc.create_function_stub(hookinstall_url)\n# yield hookinstall_url_stub(receiver_stub)\n \n defer.returnValue(worker)", "def __init__(self, queue):\n super(Worker, self).__init__()\n self.queue = queue\n logger.debug(\"Worker thread started.\")\n self.start()", "def run(self):\n self.process.start()", "def _spawn_workers(self):\n self._event.set()\n self._workers = [ClassifierWorker(self._event, self._queue, self._results) for x in range(self._NUM_WORKERS)]\n [worker.start() for worker in self._workers]", "def run(self):\n raise NotImplementedError()", "def run(self):\n raise NotImplementedError()", "def run(self):\n ioloop.IOLoop.current().start()", "def register_worker(self):\n raise Exception('not implemented')", "def run(self) -> None:\n raise NotImplementedError()", "def run(self):\n try:\n self._run()\n except Exception as err:\n # TODO: Do Task Failure to run exception handling\n pass", "def run(self):\n self.loop.spawn_callback(self.main)\n self.loop.start()\n if self.exc_info:\n six.reraise(*self.exc_info)", "def run(self):\n receiver = threading.Thread(target=self.receive_data)\n # Setting daemon to True means that this Thread will be terminated when the main program ends.\n receiver.daemon = True\n receiver.start()", "def run(self):\n \n pass", "def run(self):\n self._server = self._get_server()\n self._server.serve_forever()", "def exec_worker(self, endpoint, args, request):\n raise NotImplementedError", "def run(self):\n # Daemonize and continue the work.\n if not self.daemonize():\n return\n \n # run the work.\n syslog.syslog('This is worker daemon and we will now begin the work.')\n self._do_work()\n\n # shall never reach.\n return", "async def run(self):\n # Create dictionaries to hold configured sources and models\n await self.setup()\n await self.start()\n # Load\n if self.mc_config is not None:\n # Restore atomic after config is set, allow setting for now\n atomic = self.mc_atomic\n self.mc_atomic = False\n await self.register_directory(self.mc_config)\n self.mc_atomic = atomic\n # Write out port to file\n if self.portfile is not None:\n pathlib.Path(self.portfile).write_text(str(self.port))\n try:\n # If we are testing then RUN_YIELD will be an asyncio.Event\n if self.RUN_YIELD_START is not False:\n await self.RUN_YIELD_START.put(self)\n await self.RUN_YIELD_FINISH.wait()\n else: # pragma: no cov\n # Wait for ctrl-c\n while True:\n await asyncio.sleep(60)\n finally:\n await self.app.cleanup()\n await self.site.stop()", "def _new_worker(\n self,\n work: WorkType,\n node: DOMNode,\n *,\n name: str | None = \"\",\n group: str = \"default\",\n description: str = \"\",\n exit_on_error: bool = True,\n start: bool = True,\n exclusive: bool = False,\n thread: bool = False,\n ) -> Worker:\n worker: Worker[Any] = Worker(\n node,\n work,\n name=name or getattr(work, \"__name__\", \"\") or \"\",\n group=group,\n description=description or repr(work),\n exit_on_error=exit_on_error,\n thread=thread,\n )\n self.add_worker(worker, start=start, exclusive=exclusive)\n return worker", "def do_work(self):", "def run(self):\n raise NotImplementedError('Run method not implemented in %s' % type(self).__name__)", "def run(self):\n # Ready to load modules\n self._module_loader.load_all_async()\n \n self._setup_keybinder()\n self._threadpool.start()", "def run(self):\n self.__rpc_server.run()", "def _run(self):\n raise NotImplementedError", "def _run(self):\n raise NotImplementedError", "def start(self):\n self._do_work.set()\n self._worker_thread.start()", "def spawn(cls, *args, **kwargs):\n g = cls(*args, **kwargs)\n g.start()\n return g", "def run(self) -> None:\n loop = switch_to_uvloop()\n\n with ThreadPoolExecutor(max_workers=1) as pipe_awaiter:\n async def _run():\n node = await DHTNode.create(\n initial_peers=list(self.initial_peers), listen_on=self.listen_on, parallel_rpc=self.parallel_rpc,\n num_workers=self.max_workers or 1, record_validator=self._record_validator,\n **self.kwargs)\n if node.port is not None:\n self._port.value = node.port\n self.ready.set()\n\n while True:\n method, args, kwargs = await loop.run_in_executor(pipe_awaiter, self._pipe.recv)\n asyncio.create_task(getattr(self, method)(node, *args, **kwargs))\n\n coro = _run()\n loop.run_until_complete(coro)", "def run(self):\r\n self._initialize()\r\n\r\n # FIXME: do better exception handling here: what if both will raise exception?\r\n try:\r\n self._run()\r\n finally:\r\n self._finalize()", "def run(self):\n client = ProcessorClient()\n try:\n client.connect(self.address)\n except Exception as e:\n self.error = e\n logging.error(e)\n else:\n self.clients[self.name] = client", "def run(self):\n for worker in self.simulation_workers:\n worker.start()", "def start(self) -> None:\n JavaGate().exec_process_instance(\n self._user,\n self._project,\n self.name,\n \"\",\n self.worker_group,\n self.warning_type,\n self.warning_group_id,\n 24 * 3600,\n )", "def run(self):\n server = TCPServer((self.host, self.port), TCPHandler)\n server.lymphocytes_getter = self.lymphocytes_getter\n\n #runs forever - so make this thread daemon\n server.serve_forever()", "async def _setup(self):\n\n Reporter.info('Setting up workers...')\n self.workers = [asyncio.Task(self._work(), loop=self.loop)\n for _ in range(self.MAX_WORKERS)]\n Reporter.info('Starting scan...')\n await self.q.join()", "def _worker(self, robot_id):\n robot = Robot(self, rid=robot_id, scroll_times=3)\n self.robots.update({robot_id: robot})\n d('Starting ROBO_%s' % str(robot_id))\n robot.start()\n d('End of robot_thread %s ' % str(robot_id))\n return", "def run(self):\n # Get the UUID so we can heartbeat to Ironic. Raises LookupNodeError\n # if there is an issue (uncaught, restart agent)\n self.started_at = _time()\n\n # Cached hw managers at runtime, not load time. See bug 1490008.\n hardware.load_managers()\n\n if not self.standalone:\n # Inspection should be started before call to lookup, otherwise\n # lookup will fail due to unknown MAC.\n uuid = inspector.inspect()\n\n content = self.api_client.lookup_node(\n hardware_info=hardware.dispatch_to_managers(\n 'list_hardware_info'),\n timeout=self.lookup_timeout,\n starting_interval=self.lookup_interval,\n node_uuid=uuid)\n\n self.node = content['node']\n self.heartbeat_timeout = content['heartbeat_timeout']\n\n wsgi = simple_server.make_server(\n self.listen_address[0],\n self.listen_address[1],\n self.api,\n server_class=simple_server.WSGIServer)\n\n if not self.standalone:\n # Don't start heartbeating until the server is listening\n self.heartbeater.start()\n\n try:\n wsgi.serve_forever()\n except BaseException:\n self.log.exception('shutting down')\n\n if not self.standalone:\n self.heartbeater.stop()", "def train_main(cls):\n launcher = cls()\n launcher.launch()", "def _make_thread(self):\r\n pass", "def run(self):\n print('starting up on {} port {}'.format(*self.listener_address))\n self.selector.register(self.listener, selectors.EVENT_READ)\n\n # Serialize our listener's host and port\n serializedAdd = fxp_bytes_subscriber.serialize_address(\n self.listener_address[0], self.listener_address[1])\n\n # Contact with Publisher\n self.listener.sendto(serializedAdd, self.gcd_address)\n\n while True:\n events = self.selector.select(CHECK_INTERVAL)\n for key, mask in events:\n data = self.receive_message()\n self.removeOldQuote()\n self.createGraph(data)\n self.arbitrage()\n self.checkTimeout()", "def main(self):\n\n self._setup_task_manager()\n self._setup_source_and_destination()\n self.task_manager.blocking_start(waiting_func=self.waiting_func)\n self._cleanup()", "def run(self, worker_num=1):\n worker_proc = Process(target=self._start_worker, args=(worker_num,))\n worker_proc.start()\n\n beat_proc = Process(target=self._start_beat, args=())\n beat_proc.start()\n\n beat_proc.join()\n worker_proc.join()", "def run(self):\n if self.is_running:\n print('Broadcaster is already running')\n return\n\n if not self._thread:\n logger.debug('Creating server for given database ...')\n #Create server\n self.server = SimpleServer()\n self.server.createPV(prefix, self.db)\n\n #Create driver\n self.driver = LightDriver(self.cmds)\n\n #Create thread\n self._thread = ServerThread(self.server)\n\n #Start the server\n logger.info('Starting pcaspy server ...')\n self._thread.start()\n\n #Update all the callbacks\n logger.debug(\"Running stored callbacks to update broadcast PVs\")\n [cb() for cb in self.subs.keys()]" ]
[ "0.7279886", "0.7101314", "0.70331264", "0.7012368", "0.70091134", "0.69631004", "0.6924996", "0.68743694", "0.6871539", "0.6871292", "0.68372667", "0.6789902", "0.67830294", "0.6669269", "0.6668853", "0.6661168", "0.6640133", "0.6624162", "0.6593175", "0.6540589", "0.6489322", "0.6478791", "0.64734346", "0.64396524", "0.6408806", "0.6343388", "0.6324077", "0.63200325", "0.63200325", "0.63200325", "0.63200325", "0.63200325", "0.63200325", "0.63200325", "0.63200325", "0.63200325", "0.63200325", "0.63200325", "0.63200325", "0.63200325", "0.63200325", "0.63200325", "0.6310664", "0.62938035", "0.626895", "0.6268784", "0.62626606", "0.62626606", "0.62626606", "0.62626606", "0.62626606", "0.62626606", "0.62626606", "0.62626606", "0.62626606", "0.62579906", "0.6256374", "0.6250547", "0.6244802", "0.62446725", "0.6239922", "0.62188005", "0.6203112", "0.6200478", "0.6200478", "0.6187026", "0.61781895", "0.61732423", "0.6170444", "0.61684364", "0.61658645", "0.61626637", "0.6161693", "0.61610377", "0.61533135", "0.6150677", "0.6147901", "0.6137788", "0.6135913", "0.61332303", "0.6116216", "0.61099833", "0.61099833", "0.6101963", "0.6101154", "0.6099798", "0.6094901", "0.60915715", "0.60907423", "0.6088881", "0.60879534", "0.6083836", "0.60771465", "0.6066905", "0.6056482", "0.6043242", "0.60404915", "0.6039018", "0.60376143", "0.6026629" ]
0.7226993
1
Return the cosmology that is being used
def get_cosmology(cosmology=conf.cosmology): if cosmology.lower() not in available_cosmologies: raise ValueError( "Unrecognised cosmology {}. Available cosmologies are {}".format( cosmology, ", ".join(available_cosmologies) ) ) elif cosmology.lower() in _astropy_cosmologies: ind = [ num for num, name in enumerate(_astropy_cosmologies) if name == cosmology.lower() ][0] return getattr(cosmo, list(parameters.available)[ind]) elif cosmology.lower() == "planck15_lal": return Planck15_lal_cosmology() elif "_with_riess2019_h0" in cosmology.lower(): base_cosmology = cosmology.lower().split("_with_riess2019_h0")[0] return Riess2019_H0_cosmology(base_cosmology)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_cos_dscp(self):\n return self.__cos_dscp", "def set_cosmology(self, cosmo):\n self.cosmo = cosmo\n self.h70 = cosmo['h'] # Hubble parameter, H0 = 100h km/s/Mpc\n self.Om = cosmo['omega_M_0'] # Omega_matter\n self.Ol = cosmo['omega_lambda_0'] # Omega_Lambda", "def _get_cosmo(self, *args):\n with open(self.filename) as f:\n for l in f:\n if l.startswith(\"#a\"):\n self.cosmo_a = float(l.split('=')[-1])\n if l.startswith(\"#O\"):\n self.cosmo_h = float(l.split(';')[-1].split('=')[-1])", "def get_coml_s(hyplo):\r\n\tres=\"\"\r\n\tfor x in hyplo:\r\n\t\tif x==\"1\":\r\n\t\t\tres+=\"0\"\r\n\t\telse:\r\n\t\t \tres+=\"1\"\r\n\treturn res", "def _get_dscp_cos(self):\n return self.__dscp_cos", "def get_cosmology_from_name(cosmology):\n\n # This list should be updated when astropy releases the Planck18 cosmology\n available_cosmologies = {\n \"WMAP5\": acosmo.WMAP5,\n \"WMAP7\": acosmo.WMAP7,\n \"WMAP9\": acosmo.WMAP9,\n \"Planck13\": acosmo.Planck13,\n \"Planck15\": acosmo.Planck15,\n }\n\n # If the user uses a string for the cosmology look it up in the dict.\n # If they specify a cosmology class, use that instead.\n if isinstance(cosmology, str):\n if cosmology in available_cosmologies.keys():\n cosmo = available_cosmologies[cosmology]\n else:\n msg = (f\"\"\"The cosmology '{cosmology}' is not in the list of\n available cosmologies with string keywords. The list\n if available cosmologies accessable via keyword are:\n {available_cosmologies.keys()}\"\"\")\n raise ValueError(msg)\n\n elif isinstance(cosmology, acosmo.core.FLRW):\n cosmo = cosmology\n\n return cosmo", "def Seljak04_Cosmo(self,dc,nu):\n mass_non_linear = (np.argmin((self.sigmaM-dc)**2.).to(self.Msunh)).value\n Mh = (self.M.to(self.Msunh)).value\n x = Mh/self.mass_non_linear\n if len(self.bias_par.keys()) == 0:\n a = 0.53\n b = 0.39\n c = 0.45\n d = 0.13\n e = 40.\n f = 5e-4\n g = 1.5\n a1 = 0.4\n a2 = 0.3\n a3 = 0.8\n else:\n a = self.bias_par['a']\n b = self.bias_par['b']\n c = self.bias_par['c']\n d = self.bias_par['d']\n e = self.bias_par['e']\n f = self.bias_par['f']\n g = self.bias_par['g']\n a1 = self.bias_par['a1']\n a2 = self.bias_par['a2']\n a3 = self.bias_par['a3']\n if self.cosmo_code == 'camb':\n Om0m = self.camb_pars.omegam\n ns = self.cosmo_input_camb['ns']\n s8 = self.cosmo.get_sigma8_0()\n nrun = self.cosmo_input_camb['nrun']\n else:\n Om0m = self.cosmo.Omega0_m()\n ns = self.cosmo.n_s()\n s8 = self.cosmo.sigma8()\n try:\n nrun = self.cosmo_input_class['alpha_s']\n except:\n nrun = 0.\n return a + b*x**c + d/(e*x+1.) + f*x**g + np.log10(x)* \\\n (a1*(Om0m - 0.3 + ns - 1.) + \\\n a2*(self.s8-0.9 + self.hubble - 0.7) + a4*nrun)", "def covariates(self):\n return None", "def _get_cofm(self, num, base):\n try:\n #Use saved sightlines if we have them.\n return (self.cofm, self.axis)\n except AttributeError:\n #Otherwise get sightlines at random positions\n #Re-seed for repeatability\n np.random.seed(23)\n box = _get_header_attr_from_snap(\"BoxSize\", num, base)\n #All through y axis\n axis = np.ones(self.NumLos)\n cofm = box*np.random.random_sample((self.NumLos,3))\n return cofm, axis", "def get_cosmo(fname):\n grep_cosmo = Popen(['grep', '^#Omega_', str(fname)], stdout=PIPE)\n grep_box = Popen(['grep', '^#Full box', str(fname)], stdout=PIPE)\n cosmo_str = (grep_cosmo.communicate()[0]\n .decode(\"utf-8\")\n .strip(\"#\\n\")\n .split(\"; \"))\n box_str = (grep_box.communicate()[0]\n .decode(\"utf-8\")\n .strip(\"#\\n\")\n .split(\" = \"))\n cosmo = {i.split(' = ')[0]: float(i.split(' = ')[1]) for i in cosmo_str}\n cosmo['Box_size_Mpc/h'] = float(box_str[1].split()[0])\n return cosmo", "def get_coulomb_info(self):\n return", "def Planck15_lal_cosmology():\n return cosmo.LambdaCDM(H0=67.90, Om0=0.3065, Ode0=0.6935)", "def nonflatcosmo(self):\n return LambdaCDM(70, 0.4, 0.8)", "def CIS(self):\n return self.get_class_average(self.CIS_class_level)", "def set_cosmo(self,astropycosmo):\n if \"astropy\" not in astropycosmo.__module__:\n raise ValueError(\"'astropycosmo' must be an astropy cosmology object\")\n \n self._side_properties[\"cosmology\"] = astropycosmo\n self._update_distance_()", "def getStoichiometryMath(self, *args):\n return _libsbml.SpeciesReference_getStoichiometryMath(self, *args)", "def init_physical(\n ombh2=0.022161, omch2=0.11889, H0=67.77, omkh2=0.0, t0=2.726, nnu=3.046\n ):\n h = H0 / 100.0\n\n c = Cosmology()\n\n c.omega_b = ombh2 / h ** 2\n c.omega_c = omch2 / h ** 2\n c.H0 = H0\n\n rhoc = 3.0 * c.H() ** 2 * c_sl ** 2 / (8.0 * math.pi * G_n)\n rhorad = a_rad * t0 ** 4\n c.omega_g = rhorad / rhoc\n\n rhonu = nnu * rhorad * 7.0 / 8.0 * (4.0 / 11.0) ** (4.0 / 3.0)\n c.omega_n = rhonu / rhoc\n\n c.omega_l = 1.0 - (omkh2 + ombh2 + omch2) / h ** 2 - (c.omega_g + c.omega_n)\n\n return c", "def createStoichiometryMath(self):\n return _libsbml.SpeciesReference_createStoichiometryMath(self)", "def cole_coeff(self):\n return self.diseq_coeff(standardize=True)", "def get_cosin_sim(question, contexts):\r\n cos_sim_for_question = []\r\n for context in contexts :\r\n cv = CountVectorizer(stop_words=MY_STOPWORDS, lowercase=False)\r\n matrix = cv.fit_transform(pd.DataFrame([question, context])[0]).toarray()\r\n cos_sim = dot(matrix[0], matrix[1])/(norm(matrix[0])*norm(matrix[1]))\r\n cos_sim_for_question.append(cos_sim)\r\n return pd.Series(cos_sim_for_question)", "def matthewscc(self):\n if not self.total_examples:\n return 0.\n\n true_pos = float(self.true_positives)\n false_pos = float(self.false_positives)\n false_neg = float(self.false_negatives)\n true_neg = float(self.true_negatives)\n terms = [(true_pos + false_pos),\n (true_pos + false_neg),\n (true_neg + false_pos),\n (true_neg + false_neg)]\n denom = 1.\n for t in filter(lambda t: t != 0., terms):\n denom *= t\n return ((true_pos * true_neg) - (false_pos * false_neg)) / math.sqrt(denom)", "def _get_traffic_class_cos(self):\n return self.__traffic_class_cos", "def mychem_info(self):\n return self._mychem_info", "def calc_coherence(model, corpus):\n cm = CoherenceModel(model=model, corpus=corpus, coherence='u_mass')\n coherence = cm.get_coherence()\n print(timestamp(),\"Topic coherence:\", coherence)", "def concentrations(self):\n return self.quantities/self.volume", "def cosmo(self):\n return self.cls(*self.cls_args, **self.cls_kwargs)", "def coherence(self):\r\n\r\n #XXX Calculate this from the standard output, instead of recalculating\r\n #the coherence:\r\n\r\n tseries_length = self.input.data.shape[0]\r\n spectrum_length = self.spectrum.shape[-1]\r\n coherence = np.zeros((tseries_length,\r\n tseries_length,\r\n spectrum_length))\r\n\r\n for i in range(tseries_length):\r\n for j in range(i, tseries_length):\r\n coherence[i][j] = tsa.coherence_spec(self.spectrum[i][j],\r\n self.spectrum[i][i],\r\n self.spectrum[j][j])\r\n\r\n idx = tril_indices(tseries_length, -1)\r\n coherence[idx[0], idx[1], ...] = coherence[idx[1], idx[0], ...].conj()\r\n\r\n return coherence", "def getStoichiometry(self):\n return _libsbml.SpeciesReference_getStoichiometry(self)", "def from_mypackage(mycosmo):\n # Cosmology provides a nice method \"mapping\", so all that needs to\n # be done here is create a dictionary of the parameters\n mapping = {}\n mapping[\"H0\"] = mycosmo.hubble_parameter\n mapping[\"Om0\"] = mycosmo.Omega_matter_initial\n ... # keep building mapping\n\n return Cosmology.from_format(\n mapping, format=\"mapping\", move_to_meta=True\n ) # extra info -> meta", "def getMath(self):\n return _libsbml.StoichiometryMath_getMath(self)", "def _get_concentration(self, state):\n return self.fc(state.float_features).exp() + self.EPSILON", "def evaluation_cc(self, property='clustering-coeff'):\n\n if property == 'clustering-coeff':\n rw_cc = [np.mean(clustering_coef_wu(self.rw_data[t])) for t in range(0, self.T)]\n smth_cc = [np.mean(clustering_coef_wu(self.smth_data[t])) for t in range(0, self.T)]\n elif property == 'transitivity':\n rw_cc = [np.mean(transitivity_wu(self.rw_data[t])) for t in range(0, self.T)]\n smth_cc = [np.mean(transitivity_wu(self.smth_data[t])) for t in range(0, self.T)]\n elif property == 'coreness':\n rw_cc = [np.mean(core.core_periphery_dir(self.rw_data[t])) for t in range(0, self.T)]\n smth_cc = [np.mean(core.core_periphery_dir(self.smth_data[t])) for t in range(0, self.T)]\n elif property == 'assortativity':\n rw_cc = [np.mean(core.assortativity_wei(self.rw_data[t], 0)) for t in range(0, self.T)]\n smth_cc = [np.mean(core.assortativity_wei(self.smth_data[t], 0)) for t in range(0, self.T)]\n elif property == 'modularity':\n rw_cc, _ = get_number_of_components(self.rw_data)\n smth_cc, _ = get_number_of_components(self.smth_data)\n elif property == 'path_length':\n rw_cc = [charpath(rw)[0] for rw in self.rw_data]\n smth_cc = [charpath(sm)[0] for sm in self.smth_data]\n\n # rw_cc_ent = get_entropy_list(rw_cc)\n # smth_cc_ent = get_entropy_list(smth_cc)\n\n return rw_cc, smth_cc", "def cos_sim(com_feat,ref_feat):\n # Fill this in\n a = numpy.squeeze(com_feat)\n b = numpy.squeeze(ref_feat)\n return numpy.dot(a, b) / (numpy.linalg.norm(a) * numpy.linalg.norm(b))", "def get_clarifications_mctaco(ex, nlp, comet_model):\n context = ex['context']\n personx, _ = get_personx(nlp, context)\n \n if len(personx) == 0:\n return []\n \n outputs = {category: comet_model.predict(context, category, num_beams=5) for category in comet_model.categories}\n\n curr_events = []\n for category, prefix in CATEGORY_TO_PREFIX.items():\n for out_event in outputs[category]:\n if out_event != \"none\" and out_event != \"\":\n if not out_event.lower().startswith(\"person\") and not out_event.lower().startswith(\"other\"):\n out_event = \" \".join((prefix, out_event))\n\n out_event = re.sub(\"personx\", personx, out_event, flags=re.I)\n out_event = re.sub(\"person x\", personx, out_event, flags=re.I)\n out_event = re.sub(\"persony\", \"others\", out_event, flags=re.I)\n out_event = re.sub(\"person y\", \"others\", out_event, flags=re.I)\n\n question = CATEGORY_TO_QUESTION[category].replace(\"PersonX\", personx)\n curr_events.append((question, out_event))\n\n return curr_events", "def getMatricula(self):\n return self._l[0]", "def _setup_from_cosmology(self,cosmo):\n\n self.linP_params=fit_linP.parameterize_cosmology_kms(cosmo,\n self.z_star,self.kp_kms)", "def csi(self):\n return self.table[0, 0] / (self.table[0, 0] + self.table[0, 1] + self.table[1, 0])", "def chelsea():\n from skimage import data\n\n return data.chelsea()", "def testCosmologyCatalog(self):\n dbObj = myTestGals(database=self.dbName)\n cat = cosmologicalGalaxyCatalog(dbObj)\n cat.write_catalog(self.catName)", "def find_cosmics_in_cut(x, cut_wave, cut_brightest_line, line_wavelength = 0.,\n kernel_median_cosmics = 5, cosmic_higher_than = 100, extra_factor = 1., plot=False, verbose=False):\n \n gc_bl=signal.medfilt(cut_brightest_line,kernel_size=kernel_median_cosmics)\n max_val = np.abs(cut_brightest_line-gc_bl)\n\n gc=signal.medfilt(cut_wave,kernel_size=kernel_median_cosmics)\n verde=np.abs(cut_wave-gc)-extra_factor*max_val\n \n cosmics_list = [i for i, x in enumerate(verde) if x > cosmic_higher_than]\n \n if plot:\n ptitle=\"Cosmic identification in cut\"\n if line_wavelength != 0 : ptitle=\"Cosmic identification in cut at \"+np.str(line_wavelength)+\" $\\mathrm{\\AA}$\" \n plot_plot(x,verde, ymin=0,ymax=200, hlines=[cosmic_higher_than], ptitle=ptitle, ylabel=\"abs (cut - medfilt(cut)) - extra_factor * max_val\")\n \n if verbose:\n if line_wavelength == 0:\n print(\"\\n> Identified\", len(cosmics_list),\"cosmics in fibres\",cosmics_list)\n else:\n print(\"\\n> Identified\", len(cosmics_list),\"cosmics at\",np.str(line_wavelength),\"A in fibres\",cosmics_list)\n return cosmics_list", "def covariates(self) -> List[str]:\n return self._obj._names[\"covariates\"]", "def coherence(self):\r\n return np.abs(self.coherency) ** 2", "def sc(self) -> float:\n a = np.dot(self.true - np.mean(self.true), self.predicted - np.mean(self.predicted))\n b = np.linalg.norm(self.true - np.mean(self.true))\n c = np.linalg.norm(self.predicted - np.mean(self.predicted))\n e = b * c\n return float(np.arccos(a / e))", "def getCrystal(self):\n\t\treturn self.crystal,self.dspace,self.offset7()", "def get_center_of_mass_allies(self,obs):", "def recommend_cosim():\n pass", "def getViscosityLaw(self):\n return self.viscosityLaw", "def get_covid_term() -> pd.DataFrame:\n return NOTICE_GETTER.term", "def Cima(self):\n if(self.Pila_Vacia()=='true'):\n return \"Pila Vacia\"\n else:\n return self.pila[self.puntero]", "def _morphophonemics_of(entry: _LexiconEntry) -> str:\n return entry[\"morphophonemics\"]", "def cosinewave(self): # YYY\n x_label = str(app.option_x_selected())\n y_label = str(app.option_y_selected())\n\n xdata = app.retrieve_x_data()\n ydata = app.retrieve_y_data()\n xdata = list(map(float, xdata))\n\n # x = np.arange(0, 20, 0.2) # allows us to get x values for the data plot\n # y = np.cos(x) # allows the amplitude/height (the peak deviation of the function from zero)\n # of the cosine wave to be cosine of a variable like time\n\n x = np.array(xdata) # allows us to get x values for the data plot\n print(x, type(x))\n y = x*np.cos(x) # allows the amplitude/height (the peak deviation of the function from zero)\n # of the cosine wave to be cosine of a variable like time\n print('self:', self)\n\n\n #self.axes.stem(mean_y, (xdata == 0), color='red')\n #self.axeshlines(y=0, color='r')\n\n self.axes.plot(x, y)\n\n self.axes.set_title('Group E Project_Cosine_wave_plot')\n self.axes.set_xlabel(x_label)\n self.axes.set_ylabel(y_label)\n\n self.c1.draw()", "def show_crisscross(self, mole_object):\r\n if mole_object.plugin_type == \"PyMOL\":\r\n obj = [\r\n LINEWIDTH, 3,\r\n\r\n BEGIN, LINE_STRIP,\r\n VERTEX, float(float(self.point_x.get()) - 0.5), float(self.point_y.get()), float(self.point_z.get()),\r\n VERTEX, float(float(self.point_x.get()) + 0.5), float(self.point_y.get()), float(self.point_z.get()),\r\n END,\r\n\r\n BEGIN, LINE_STRIP,\r\n VERTEX, float(self.point_x.get()), float(float(self.point_y.get()) - 0.5), float(self.point_z.get()),\r\n VERTEX, float(self.point_x.get()), float(float(self.point_y.get()) + 0.5), float(self.point_z.get()),\r\n END,\r\n\r\n BEGIN, LINE_STRIP,\r\n VERTEX, float(self.point_x.get()), float(self.point_y.get()), float(float(self.point_z.get()) - 0.5),\r\n VERTEX, float(self.point_x.get()), float(self.point_y.get()), float(float(self.point_z.get()) + 0.5),\r\n END\r\n\r\n ]\r\n\r\n PymolPlugin.PymolPlugin().delete(self.point_name)\r\n view = PymolPlugin.PymolPlugin().get_view()\r\n PymolPlugin.PymolPlugin().load_CGO(obj, self.point_name)\r\n PymolPlugin.PymolPlugin().set_view(view)\r\n\r\n else:\r\n chimera_model_number = int(mole_object.input_structure_box.index('active')) - 1\r\n ChimeraPlugin.ChimeraPlugin().make_icosahedron(str(chimera_model_number), float(self.point_x.get()),\r\n float(self.point_y.get()), float(self.point_z.get()))", "def sound_horizon_Class(self):\n if 'classy' not in sys.modules:\n warnings.warn(\"Class not installed, using a custom function to compute sound horizon (not precise)\")\n return self.r_s_drag()\n else:\n params = {\n 'A_s': self.As,\n 'n_s': self.ns, \n 'h': self.h,\n 'omega_b': self.Omega_b*self.h**2.,\n 'omega_cdm': self.Omega_cdm*self.h**2.,\n 'Omega_k': self.Omega_K,\n 'Omega_fld': self.Omega_lambda,\n 'w0_fld': self.w0,\n 'wa_fld': self.wa,\n 'N_ur': self.massless_nu,\n 'N_ncdm': self.massive_nu}\n if self.massive_nu != 0:\n params['m_ncdm'] = ''\n params['T_ncdm'] = ''\n for im, m in enumerate(self.M_nu):\n params['m_ncdm'] += '%.8f, ' %(m)\n params['T_ncdm'] += '%.8f, ' %(self.Gamma_nu)\n params['m_ncdm'] = params['m_ncdm'][:-2]\n params['T_ncdm'] = params['T_ncdm'][:-2]\n\n cosmo = Class()\n cosmo.set(params)\n cosmo.compute()\n\n rs = cosmo.rs_drag()*cosmo.h()\n\n cosmo.struct_cleanup()\n cosmo.empty()\n\n return rs", "def getPrimaryMood():\n now = datetime.datetime.now()\n\n # sin curve, best mood during noon 2pm, worst mood during midnight 2am.\n time_offset = max(0.0, min(((now.hour*60 + now.minute - 120) % 1440)/(1440), 1.0))\n time_moodadj = math.sin(time_offset*math.pi)\n\n # approximately monthly cos curve where middle of the month is moody\n date_offset = max(0.0, min(((now.month-1)*30 + (now.day-1))/360, 1.0))\n date_moodadj = math.cos(date_offset*(12*math.pi))\n\n # recompute exposed positivity as exponential of degree 3, adjustable by how stable the mood is\n exp_pos = Sentience.getExposedPositivity()\n if exp_pos < 0:\n exp_pos = (exp_pos**3) * (1-Sentience.getMoodStability())\n else:\n exp_pos = exp_pos**3 * Sentience.getMoodStability()\n\n mood = 0.3 + date_moodadj*0.2 + time_moodadj*0.5 + exp_pos*0.25\n return max(-1.0, min(mood, 1.0))", "def isSetStoichiometryMath(self):\n return _libsbml.SpeciesReference_isSetStoichiometryMath(self)", "def give_compo_exp(self):\n return self._exps[self.compo_type]", "def Riess2019_H0_cosmology(base_cosmology):\n _base_cosmology = get_cosmology(base_cosmology)\n return cosmo.LambdaCDM(\n H0=74.03, Om0=_base_cosmology.Om0, Ode0=_base_cosmology.Ode0\n )", "def get_covariate_names(self):\n if self._population_model is None:\n return []\n\n return self._population_model.get_covariate_names()", "def wypisz_info(self):\n print(f\"Samochód: {self.producent} {self.model}\")", "def _get_cu(self):\n c_undrained=0\n #group_index = self._data['GI']\n if self.is_clayey():\n c_undrained = self.qu(self._data[SoilProperty.N60])/2\n #c_undrained=_clamp(c_undrained, 10, 103)\n # Plasix calculation needs very small c_undrained\n #if c_undrained<0.21:\n # c_undrained = 0.21\n #use 0.2 as per plasix recommendation\n return c_undrained#the cu is always 103 check with small value of n_60, some mistake maybe", "def name(self):\n return s.ECOS", "def get_input(self):\n system, configuration = self.get_system_configuration(None)\n references = self.parent.references\n\n P = self.parameters.current_values_to_dict(\n context=seamm.flowchart_variables._data\n )\n\n # The model chemistry, for labeling properties.\n self.model = P[\"hamiltonian\"]\n\n # Have to fix formatting for printing...\n PP = dict(P)\n for key in PP:\n if isinstance(PP[key], units_class):\n PP[key] = \"{:~P}\".format(PP[key])\n\n # Save the description for later printing\n self.description = []\n self.description.append(__(self.description_text(PP), **PP, indent=self.indent))\n\n # Start gathering the keywords\n keywords = copy.deepcopy(P[\"extra keywords\"])\n keywords.append(\"1SCF\")\n keywords.append(P[\"hamiltonian\"])\n\n if P[\"hamiltonian\"] == \"AM1\":\n elements = configuration.atoms.symbols\n references.cite(\n raw=self.parent._bibliography[\"Dewar_1985c\"],\n alias=\"Dewar_1985c\",\n module=\"mopac_step\",\n level=1,\n note=\"Main reference for AM1 + C, H, N, O.\",\n )\n for element in (\"F\", \"Cl\", \"Br\", \"I\"):\n if element in elements:\n references.cite(\n raw=self.parent._bibliography[\"Dewar_1988\"],\n alias=\"Dewar_1988\",\n module=\"mopac_step\",\n level=1,\n note=\"AM1 parameters for F, Cl, Br, I.\",\n )\n break\n if \"Al\" in elements:\n references.cite(\n raw=self.parent._bibliography[\"Dewar_1990\"],\n alias=\"Dewar_1990\",\n module=\"mopac_step\",\n level=1,\n note=\"AM1 parameters for Al.\",\n )\n if \"Si\" in elements:\n references.cite(\n raw=self.parent._bibliography[\"Dewar_1987b\"],\n alias=\"Dewar_1987b\",\n module=\"mopac_step\",\n level=1,\n note=\"AM1 parameters for Si.\",\n )\n if \"P\" in elements:\n references.cite(\n raw=self.parent._bibliography[\"Dewar_1989\"],\n alias=\"Dewar_1989\",\n module=\"mopac_step\",\n level=1,\n note=\"AM1 parameters for P.\",\n )\n if \"S\" in elements:\n references.cite(\n raw=self.parent._bibliography[\"Dewar_1990b\"],\n alias=\"Dewar_1990b\",\n module=\"mopac_step\",\n level=1,\n note=\"AM1 parameters for S.\",\n )\n if \"Zn\" in elements:\n references.cite(\n raw=self.parent._bibliography[\"Dewar_1988b\"],\n alias=\"Dewar_1988b\",\n module=\"mopac_step\",\n level=1,\n note=\"AM1 parameters for Zn.\",\n )\n if \"Ge\" in elements:\n references.cite(\n raw=self.parent._bibliography[\"Dewar_1989b\"],\n alias=\"Dewar_1989b\",\n module=\"mopac_step\",\n level=1,\n note=\"AM1 parameters for Ge.\",\n )\n if \"Mo\" in elements:\n references.cite(\n raw=self.parent._bibliography[\"Voityuk_2000\"],\n alias=\"Voityuk_2000\",\n module=\"mopac_step\",\n level=1,\n note=\"AM1 parameters for Mo.\",\n )\n if \"Hg\" in elements:\n references.cite(\n raw=self.parent._bibliography[\"Dewar_1989c\"],\n alias=\"Dewar_1989c\",\n module=\"mopac_step\",\n level=1,\n note=\"AM1 parameters for Hg.\",\n )\n for element in (\n \"Li\",\n \"Be\",\n \"Na\",\n \"Mg\",\n \"K\",\n \"Ca\",\n \"Ga\",\n \"As\",\n \"Se\",\n \"Rb\",\n \"Sr\",\n \"In\",\n \"Sn\",\n \"Sb\",\n \"Te\",\n \"Cs\",\n \"Ba\",\n \"Pb\",\n \"Bi\",\n ):\n if element in elements:\n references.cite(\n raw=self.parent._bibliography[\"Stewart_2004\"],\n alias=\"Stewart_2004\",\n module=\"mopac_step\",\n level=1,\n note=\"AM1 parameterization for main-group elements.\",\n )\n break\n elif P[\"hamiltonian\"] == \"MNDO\" or P[\"hamiltonian\"] == \"MNDOD\":\n elements = configuration.atoms.symbols\n references.cite(\n raw=self.parent._bibliography[\"Dewar_1977\"],\n alias=\"Dewar_1977\",\n module=\"mopac_step\",\n level=1,\n note=\"Main reference for MNDO + C, H, N, O.\",\n )\n if \"Be\" in elements:\n references.cite(\n raw=self.parent._bibliography[\"Dewar_1978\"],\n alias=\"Dewar_1978\",\n module=\"mopac_step\",\n level=1,\n note=\"MNDO parameters for Be.\",\n )\n if \"B\" in elements or \"Al\" in elements:\n if \"B\" in elements or P[\"hamiltonian\"] == \"MNDO\":\n references.cite(\n raw=self.parent._bibliography[\"Davis_1981\"],\n alias=\"Davis_1981\",\n module=\"mopac_step\",\n level=1,\n note=\"MNDO parameters for B and Al.\",\n )\n if \"F\" in elements:\n references.cite(\n raw=self.parent._bibliography[\"Dewar_1978b\"],\n alias=\"Dewar_1978b\",\n module=\"mopac_step\",\n level=1,\n note=\"MNDO parameters for F.\",\n )\n if \"Si\" in elements and P[\"hamiltonian\"] == \"MNDO\":\n references.cite(\n raw=self.parent._bibliography[\"Dewar_1986\"],\n alias=\"Dewar_1986\",\n module=\"mopac_step\",\n level=1,\n note=\"Revised MNDO parameters for Si.\",\n )\n if \"P\" in elements and P[\"hamiltonian\"] == \"MNDO\":\n references.cite(\n raw=self.parent._bibliography[\"Dewar_1978b\"],\n alias=\"Dewar_1978b\",\n module=\"mopac_step\",\n level=1,\n note=\"MNDO parameters for P.\",\n )\n if \"S\" in elements and P[\"hamiltonian\"] == \"MNDO\":\n references.cite(\n raw=self.parent._bibliography[\"Dewar_1986b\"],\n alias=\"Dewar_1986b\",\n module=\"mopac_step\",\n level=1,\n note=\"MNDO parameters for S.\",\n )\n if \"Cl\" in elements and P[\"hamiltonian\"] == \"MNDO\":\n references.cite(\n raw=self.parent._bibliography[\"Dewar_1983\"],\n alias=\"Dewar_1983\",\n module=\"mopac_step\",\n level=1,\n note=\"MNDO parameters for Cl.\",\n )\n if \"Zn\" in elements and P[\"hamiltonian\"] == \"MNDO\":\n references.cite(\n raw=self.parent._bibliography[\"Dewar_1986c\"],\n alias=\"Dewar_1986c\",\n module=\"mopac_step\",\n level=1,\n note=\"MNDO parameters for Zn.\",\n )\n if \"Ge\" in elements:\n references.cite(\n raw=self.parent._bibliography[\"Dewar_1987\"],\n alias=\"Dewar_1987\",\n module=\"mopac_step\",\n level=1,\n note=\"MNDO parameters for Ge.\",\n )\n if \"Br\" in elements and P[\"hamiltonian\"] == \"MNDO\":\n references.cite(\n raw=self.parent._bibliography[\"Dewar_1983b\"],\n alias=\"Dewar_1983b\",\n module=\"mopac_step\",\n level=1,\n note=\"MNDO parameters for Br.\",\n )\n if \"Sn\" in elements:\n references.cite(\n raw=self.parent._bibliography[\"Dewar_1984\"],\n alias=\"Dewar_1984\",\n module=\"mopac_step\",\n level=1,\n note=\"MNDO parameters for Sn.\",\n )\n if \"I\" in elements and P[\"hamiltonian\"] == \"MNDO\":\n references.cite(\n raw=self.parent._bibliography[\"Dewar_1984b\"],\n alias=\"Dewar_1984b\",\n module=\"mopac_step\",\n level=1,\n note=\"MNDO parameters for I.\",\n )\n if \"Hg\" in elements and P[\"hamiltonian\"] == \"MNDO\":\n references.cite(\n raw=self.parent._bibliography[\"Dewar_1985\"],\n alias=\"Dewar_1985\",\n module=\"mopac_step\",\n level=1,\n note=\"MNDO parameters for Hg.\",\n )\n if \"Pb\" in elements:\n references.cite(\n raw=self.parent._bibliography[\"Dewar_1985b\"],\n alias=\"Dewar_1985b\",\n module=\"mopac_step\",\n level=1,\n note=\"MNDO parameters for Pb.\",\n )\n for element in (\n \"Na\",\n \"Mg\",\n \"K\",\n \"Ca\",\n \"Ga\",\n \"As\",\n \"Se\",\n \"Rb\",\n \"Sr\",\n \"In\",\n \"Sb\",\n \"Te\",\n \"Cs\",\n \"Ba\",\n \"Tl\",\n \"Bi\",\n ):\n if element in elements:\n references.cite(\n raw=self.parent._bibliography[\"Stewart_2004\"],\n alias=\"Stewart_2004\",\n module=\"mopac_step\",\n level=1,\n note=\"MNDO parameterization for main-group elements.\",\n )\n break\n if P[\"hamiltonian\"] == \"MNDOD\":\n for element in (\n \"Al\",\n \"Si\",\n \"P\",\n \"S\",\n \"Cl\",\n \"Br\",\n \"I\",\n \"Zn\",\n \"Cd\",\n \"Hg\",\n ):\n if element in elements:\n references.cite(\n raw=self.parent._bibliography[\"Thiel_1992\"],\n alias=\"Thiel_1992\",\n module=\"mopac_step\",\n level=1,\n note=(\"MNDO-D formalism for d-orbitals.\"),\n )\n references.cite(\n raw=self.parent._bibliography[\"Thiel_1996\"],\n alias=\"Thiel_1996\",\n module=\"mopac_step\",\n level=1,\n note=(\n \"MNDO-D, parameters for Al, Si, P, S, Cl, Br, \"\n \"I, Zn, Cd, and Hg.\"\n ),\n )\n break\n elif P[\"hamiltonian\"] == \"PM3\":\n elements = configuration.atoms.symbols\n references.cite(\n raw=self.parent._bibliography[\"Stewart_1989\"],\n alias=\"Stewart_1989\",\n module=\"mopac_step\",\n level=1,\n note=\"The citation for the MOPAC parameterization.\",\n )\n for element in (\n \"Be\",\n \"Mg\",\n \"Zn\",\n \"Ga\",\n \"Ge\",\n \"As\",\n \"Se\",\n \"Cd\",\n \"In\",\n \"Sn\",\n \"Sb\",\n \"Te\",\n \"Hg\",\n \"Tl\",\n \"Pb\",\n \"Bi\",\n ):\n if element in elements:\n references.cite(\n raw=self.parent._bibliography[\"Stewart_1991\"],\n alias=\"Stewart_1991\",\n module=\"mopac_step\",\n level=1,\n note=\"The citation for the MOPAC parameterization.\",\n )\n break\n if \"Li\" in elements:\n references.cite(\n raw=self.parent._bibliography[\"Anders_1993\"],\n alias=\"Anders_1993\",\n module=\"mopac_step\",\n level=1,\n note=\"The citation for the MOPAC parameterization.\",\n )\n for element in (\"B\", \"Na\", \"K\", \"Ca\", \"Rb\", \"Sr\", \"Cs\", \"Ba\"):\n if element in elements:\n references.cite(\n raw=self.parent._bibliography[\"Stewart_2004\"],\n alias=\"Stewart_2004\",\n module=\"mopac_step\",\n level=1,\n note=\"The citation for the MOPAC parameterization.\",\n )\n break\n elif \"PM6\" in P[\"hamiltonian\"]:\n references.cite(\n raw=self.parent._bibliography[\"Stewart_2007\"],\n alias=\"Stewart_2007\",\n module=\"mopac_step\",\n level=1,\n note=\"The PM6 parameterization in MOPAC.\",\n )\n if P[\"hamiltonian\"] == \"PM6-D3\":\n references.cite(\n raw=self.parent._bibliography[\"Grimme_2010\"],\n alias=\"Grimme_2010\",\n module=\"mopac_step\",\n level=1,\n note=\"Dispersion correction by Grimme, et al.\",\n )\n if P[\"hamiltonian\"] == \"PM6-DH+\":\n references.cite(\n raw=self.parent._bibliography[\"Korth_2010\"],\n alias=\"Korth_2010\",\n module=\"mopac_step\",\n level=1,\n note=\"Hydrogen-bonding correction by Korth.\",\n )\n if \"PM6-DH2\" in P[\"hamiltonian\"]:\n references.cite(\n raw=self.parent._bibliography[\"Korth_2009\"],\n alias=\"Korth_2009\",\n module=\"mopac_step\",\n level=1,\n note=\"Hydrogen-bonding and dispersion correction.\",\n )\n references.cite(\n raw=self.parent._bibliography[\"Rezac_2009\"],\n alias=\"Rezac_2009\",\n module=\"mopac_step\",\n level=1,\n note=\"Hydrogen-bonding and dispersion correction.\",\n )\n if P[\"hamiltonian\"] == \"PM6-DH2x\":\n references.cite(\n raw=self.parent._bibliography[\"Rezac_2011\"],\n alias=\"Rezac_2011\",\n module=\"mopac_step\",\n level=1,\n note=\"Halogen-bonding correction.\",\n )\n if \"PM6-D3H4\" in P[\"hamiltonian\"]:\n references.cite(\n raw=self.parent._bibliography[\"Rezac_2011\"],\n alias=\"Rezac_2011\",\n module=\"mopac_step\",\n level=1,\n note=\"Hydrogen-bonding and dispersion correction.\",\n )\n references.cite(\n raw=self.parent._bibliography[\"Vorlova_2015\"],\n alias=\"Vorlova_2015\",\n module=\"mopac_step\",\n level=1,\n note=\"Hydrogen-hydrogen repulsion correction.\",\n )\n if P[\"hamiltonian\"] == \"PM6-D3H4x\":\n references.cite(\n raw=self.parent._bibliography[\"Brahmkshatriya_2013\"],\n alias=\"Brahmkshatriya_2013\",\n module=\"mopac_step\",\n level=1,\n note=\"Halogen-oxygen and halogen-nitrogen correction.\",\n )\n elif \"PM7\" in P[\"hamiltonian\"]:\n references.cite(\n raw=self.parent._bibliography[\"Stewart_2012\"],\n alias=\"Stewart_2012\",\n module=\"mopac_step\",\n level=1,\n note=\"The PM7 parameterization in MOPAC.\",\n )\n elif P[\"hamiltonian\"] == \"RM1\":\n references.cite(\n raw=self.parent._bibliography[\"Rocha_2006\"],\n alias=\"Rocha_2006\",\n module=\"mopac_step\",\n level=1,\n note=\"RM1 parameterization.\",\n )\n\n # which structure? may need to set default first...\n if P[\"structure\"] == \"default\":\n if self._id[-1] == \"1\":\n structure = \"initial\"\n else:\n structure = \"current\"\n elif self._id[-1] == \"1\":\n structure = \"initial\"\n elif P[\"structure\"] == \"current\":\n structure = \"current\"\n\n if structure == \"current\":\n keywords.append(\"OLDGEO\")\n\n if P[\"convergence\"] == \"normal\":\n pass\n elif P[\"convergence\"] == \"precise\":\n keywords.append(\"PRECISE\")\n elif P[\"convergence\"] == \"relative\":\n keywords.append(\"RELSCF=\" + P[\"relative\"])\n elif P[\"convergence\"] == \"absolute\":\n keywords.append(\"SCFSCRT=\" + P[\"absolute\"])\n else:\n raise RuntimeError(\n \"Don't recognize convergence '{}'\".format(P[\"convergence\"])\n )\n\n if P[\"uhf\"]:\n keywords.append(\"UHF\")\n\n if P[\"MOZYME\"] == \"always\":\n keywords.append(\"MOZYME\")\n elif (\n P[\"MOZYME\"] == \"for larger systems\"\n and configuration.n_atoms >= P[\"nMOZYME\"]\n ):\n keywords.append(\"MOZYME\")\n\n if P[\"COSMO\"]:\n keywords.append(f\"EPS={P['eps']}\")\n rsolve = P[\"rsolve\"].to(\"Å\").magnitude\n keywords.append(f\"RSOLVE={rsolve}\")\n keywords.append(f\"NSPA={P['nspa']}\")\n keywords.append(f\"DISEX={P['disex']}\")\n\n if P[\"calculate gradients\"]:\n keywords.append(\"GRADIENTS\")\n\n if \"yes\" in P[\"bond orders\"]:\n keywords.append(\"BONDS\")\n\n # Add any extra keywords so that they appear at the end\n metadata = self.metadata[\"keywords\"]\n for keyword in P[\"extra keywords\"]:\n if \"=\" in keyword:\n keyword, value = keyword.split(\"=\")\n if keyword not in metadata or \"format\" not in metadata[keyword]:\n keywords.append(keyword + \"=\" + value)\n else:\n keywords.append(metadata[keyword][\"format\"].format(keyword, value))\n\n result = []\n result.append([[*keywords], None, None])\n\n # Handle MOZYME follow-up calculations\n if \"MOZYME\" in keywords:\n follow_up = P[\"MOZYME follow-up\"]\n if \"exact\" in follow_up:\n keywords.remove(\"MOZYME\")\n if \"1SCF\" not in keywords:\n keywords.append(\"1SCF\")\n keywords.append(\"OLDGEO\")\n result.append([[*keywords], None, \"MOZYME follow-up using MOPAC\"])\n elif \"new\" in follow_up:\n if \"1SCF\" not in keywords:\n keywords.append(\"1SCF\")\n keywords.append(\"OLDGEO\")\n result.append([[*keywords], None, \"MOZYME follow-up, reinitializing\"])\n elif follow_up == \"none\":\n pass\n else:\n logger.error(f\"Don't recognize the MOZYME follow-up: '{follow_up}'\")\n\n return result", "def getMath(self):\n return _libsbml.KineticLaw_getMath(self)", "def get_co_occ_mat(s_hc_ml, n_s_real, n_s_hc):\n\n co_occs = np.zeros((n_s_hc, n_s_real))\n for idx, n in s_hc_ml.items():\n co_occs[idx] = n\n\n return co_occs", "def getX_cortex(atlas='tessels0042',sub = 's02'):\n Xdata = Dataset('sc1','glm7',atlas,sub)\n Xdata.load_mat() # Load from Matlab\n X1, INFO1 = Xdata.get_data(averaging=\"sess\") # Get numpy\n # Get the test data set\n XTdata = Dataset('sc2','glm7',atlas,sub)\n XTdata.load_mat() # Load from Matlab\n X2, INFO2 = XTdata.get_data(averaging=\"sess\") # Get numpy\n # z-standardize cortical regressors\n X1 = X1 / np.sqrt(np.sum(X1 ** 2, 0) / X1.shape[0])\n X2 = X2 / np.sqrt(np.sum(X2 ** 2, 0) / X1.shape[0])\n X1 = np.nan_to_num(X1)\n X2 = np.nan_to_num(X2)\n # i1 = np.where(INFO1.sess==1)\n # i2 = np.where(INFO1.sess==2)\n # rel = np.sum(X1[i1,:]*X1[i2,:])/np.sqrt(np.sum(X1[i1,:]**2) * np.sum(X1[i2,:]**2))\n return X1,X2,INFO1,INFO2", "def get_corporal_comp(imc: float):\n if imc < 18.5:\n return \"Peso inferior al normal\"\n if imc >= 18.5 and imc < 25:\n return \"Normal\"\n if imc >= 25 and imc < 30:\n return \"Peso superior al normal\"\n if imc >= 30:\n return \"Obesidad\"", "def Sphericity(self):\n s = self.sphericity\n assert s in range(1,6), \"Sphericity score out of bounds.\"\n if s == 1: return 'Linear'\n elif s == 2: return 'Ovoid Linear'\n elif s == 3: return 'Ovoid'\n elif s == 4: return 'Ovoid Round'\n elif s == 5: return 'Round'", "def _repr_(self):\n return \"Category of hyperbolic models of {}\".format(self.base())", "def cm(self,cm):\n tp = cm[(cm.y_true ==1) & ( cm.y_pred ==1 )].shape[0]\n fn = cm[(cm.y_true == 1) & (cm.y_pred == 0)].shape[0]\n fp = cm[(cm.y_true ==0) & (cm.y_pred == 1)].shape[0]\n tn = cm[(cm.y_true == 0) &( cm.y_pred == 0)].shape[0]\n sens = format(tp/float(tp+fn),'.2f')\n spec = format(tn/float(tn+fp),'.2f')\n return sens, spec", "def cmm(self) -> Optional[np.ndarray]:\n if self.sensorsz is None:\n return None\n return self.c * self.sensorsz / self.imgsz", "def coherence(self):\r\n coherence = np.abs(self.coherency ** 2)\r\n\r\n return coherence", "def getConstantSentenceForms(self):", "def sim(self):\n return self.mujoco_simulation.sim", "def getFactura(self): \n return self.caja", "def getFactura(self): \n return self.caja", "def piece_coor(self):\n return self.piece_type[self.rotation]", "def mostraCotxe(self):\n return str(self._l[0])+\" \"+str(self._l[1])+\" \"+str(self._l[2])+\" \"+str(self._l[3])", "def get_n2o_soil_organic_content(self):\n attribute_temp = 'more than 3.0'\n if float(self.soil_organic_c) < 1.0:\n attribute_temp = 'less than 1.0'\n elif (float(self.soil_organic_c) >= 1.0) and (float(self.soil_organic_c) < 3.0):\n attribute_temp = '1.0-3.0'\n\n soc_n2o_content = ef.n2o_soc_options.loc[ef.n2o_soc_options.iloc[:, 1] == attribute_temp].iloc[:, 2].values[0]\n return soc_n2o_content", "def get_C(self, observation, area):\n if observation:\n C = []\n for Theta in area.Thetas:\n C.append(Theta @ self.Omega)\n else:\n C = [self.Omega]\n return C", "def coherence_from_spectral(Sw):\r\n\r\n Sxx = Sw[0, 0].real\r\n Syy = Sw[1, 1].real\r\n\r\n Sxy_mod_sq = (Sw[0, 1] * Sw[1, 0]).real\r\n Sxy_mod_sq /= Sxx\r\n Sxy_mod_sq /= Syy\r\n return Sxy_mod_sq", "def modelCS(self,ht=None,hf=None):\n if ht is not None:\n hf = time2freq(ht)\n cs,a,b,c = make_model_cs(hf, self.s0, self.bw, self.ref_freq)\n \n return cs", "def test_coherence_matlab():\r\n\r\n ts = np.loadtxt(os.path.join(test_dir_path, 'tseries12.txt'))\r\n\r\n ts0 = ts[1]\r\n ts1 = ts[0]\r\n\r\n method = {}\r\n method['this_method'] = 'welch'\r\n method['NFFT'] = 64\r\n method['Fs'] = 1.0\r\n method['noverlap'] = method['NFFT'] / 2\r\n\r\n ttt = np.vstack([ts0, ts1])\r\n f, cxy_mlab = tsa.coherence(ttt, csd_method=method)\r\n cxy_matlab = np.loadtxt(os.path.join(test_dir_path, 'cxy_matlab.txt'))\r\n\r\n npt.assert_almost_equal(cxy_mlab[0][1], cxy_matlab, decimal=5)", "def getMainChemCompSysName(self):\n dataDict = self.__dict__\n result = self.findFirstChemCompSysName(specificChemCompVars=frozenset())\n return result", "def get_power_spectrum(self, cosmo, nz1, nz2):\n t1 = ccl.WeakLensingTracer(cosmo, nz1)\n t2 = ccl.WeakLensingTracer(cosmo, nz2)\n return ccl.angular_cl(cosmo, t1, t2, self.ells)", "def getCF(self):\r\n return self.lEq;", "def cov(self):\n return self.cond_proba.cov", "def cosine_sim(im, s):\n return im.mm(s.t()) #image.mm(sentence.t()) & mm() Performs a matrix multiplication of the matrices ", "def omega(self):\n self.cosineSequences()", "def system_capex(self):\n\n topside = self.config[\"offshore_substation_topside\"][\"unit_cost\"]\n substructure = self.config[\"offshore_substation_substructure\"][\"unit_cost\"]\n mooring = self.config[\"offshore_substation_substructure\"][\"mooring_cost\"]\n\n return self.num_substations * (topside + substructure + mooring)", "def get_coordinated_car_name(self):\n return self.coordinated_car_name", "def Problem11():\n return 'Ductile Coulomb-Mohr'", "def phosphate(self):\n index = self.var_index(4)\n return self.var_data(index)", "def obtem_ciclo_in(self):\n\n return self.ciclo_in", "def csr(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"csr\")", "def plotOfCos1(self):\n#\t\tp1=_plot.plot(yLabel='',xLabel='time [ms]',title=self.title,\n#\t\t\t\t\t subtitle='Cos1 Rogowski',shotno=[self.shotno])\n#\t\tp1.addTrace(xData=self.time*1000,yData=self.cos1)\n#\t\t\n#\t\treturn p1\n\t\tfig,p1=_plt.subplots()\n\t\tp1.plot(self.time*1e3,self.cos1)\n\t\t_plot.finalizeSubplot(p1,xlabel='Time (ms)',ylabel='')\n\t\t_plot.finalizeFigure(fig,title=self.title)\n\t\t\n\t\treturn p1", "def carbon_monoxide(self) -> float | None:\n return round_state(self._get_sensor_value(API_CO))", "def get_cosine_sim(self):\r\n return CosineSimilarity().calculate_similarity(self.tweets)", "def get_mutual_information(c_wic, c_wioc, c_owic, c_owioc):\n # total word count\n c_total = c_wic + c_wioc + c_owic + c_owioc\n\n mi_1 = (c_wic / float(c_total)) * log10((c_total * c_wic) /\n float((c_wic + c_wioc) * (c_wic + c_owic)))\n mi_2 = (c_owic / float(c_total)) * log10((c_total * c_owic) /\n float((c_owic + c_owioc) * (c_wic + c_owic)))\n mi_3 = (c_wioc / float(c_total)) * log10((c_total * c_wioc) /\n float((c_wic + c_wioc) * (c_wioc + c_owioc)))\n mi_4 = (c_owioc / float(c_total)) * log10((c_total * c_owioc) /\n float((c_owic + c_owioc) * (c_wioc + c_owioc)))\n\n return mi_1 + mi_2 + mi_3 + mi_4", "def getChemicalFormula(self):\n return _libsbml.FbcSpeciesPlugin_getChemicalFormula(self)", "def _corresponding_simu(self):\n return SimuHawkes()" ]
[ "0.6441433", "0.64167005", "0.6164821", "0.6161581", "0.6125467", "0.60074854", "0.59982276", "0.5963294", "0.5915752", "0.58894485", "0.5884741", "0.57696545", "0.5693378", "0.5673406", "0.5641914", "0.56051314", "0.56012136", "0.55621797", "0.55592734", "0.55408865", "0.5471717", "0.544486", "0.54382503", "0.5423644", "0.5420825", "0.5414656", "0.54142207", "0.5402984", "0.53972864", "0.5392122", "0.5366626", "0.5360309", "0.5359027", "0.5355137", "0.534328", "0.533012", "0.53255564", "0.53124845", "0.5303632", "0.5295869", "0.5290216", "0.5286819", "0.5276189", "0.52747446", "0.5274518", "0.5274372", "0.5272481", "0.5264002", "0.52484757", "0.5229603", "0.52275634", "0.52253556", "0.5215547", "0.52075785", "0.5204223", "0.5200855", "0.51974314", "0.5192172", "0.5184162", "0.5171037", "0.5169714", "0.51694745", "0.51683575", "0.5166829", "0.5162185", "0.5156492", "0.5155333", "0.5147407", "0.5143097", "0.5132625", "0.51317793", "0.51300055", "0.51225835", "0.5116686", "0.5116686", "0.5115459", "0.51134473", "0.51061463", "0.5103999", "0.50999093", "0.50917083", "0.50888693", "0.50875324", "0.50839055", "0.50834125", "0.5082863", "0.5076603", "0.5075593", "0.5075119", "0.506961", "0.5061887", "0.50616664", "0.5058349", "0.50580674", "0.5057003", "0.505166", "0.504443", "0.5044224", "0.50426704", "0.5036062" ]
0.7251539
0
Return the Planck15 cosmology coded up in lalsuite
def Planck15_lal_cosmology(): return cosmo.LambdaCDM(H0=67.90, Om0=0.3065, Ode0=0.6935)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cie_lab(self):\n K = Fraction(1, 3) * Fraction(29, 6) ** 2\n e = Fraction(6, 29) ** 3\n x, y, z = (n / m for n, m in zip(self.cie_xyz, D65))\n fx, fy, fz = (\n n ** Fraction(1, 3) if n > e else K * n + Fraction(4, 29)\n for n in (x, y, z)\n )\n return (116 * fy - 16, 500 * (fx - fy), 200 * (fy - fz))", "def get_cosmology(cosmology=conf.cosmology):\n if cosmology.lower() not in available_cosmologies:\n raise ValueError(\n \"Unrecognised cosmology {}. Available cosmologies are {}\".format(\n cosmology, \", \".join(available_cosmologies)\n )\n )\n elif cosmology.lower() in _astropy_cosmologies:\n ind = [\n num for num, name in enumerate(_astropy_cosmologies) if\n name == cosmology.lower()\n ][0]\n return getattr(cosmo, list(parameters.available)[ind])\n elif cosmology.lower() == \"planck15_lal\":\n return Planck15_lal_cosmology()\n elif \"_with_riess2019_h0\" in cosmology.lower():\n base_cosmology = cosmology.lower().split(\"_with_riess2019_h0\")[0]\n return Riess2019_H0_cosmology(base_cosmology)", "def Problem11():\n return 'Ductile Coulomb-Mohr'", "def get_coml_s(hyplo):\r\n\tres=\"\"\r\n\tfor x in hyplo:\r\n\t\tif x==\"1\":\r\n\t\t\tres+=\"0\"\r\n\t\telse:\r\n\t\t \tres+=\"1\"\r\n\treturn res", "def printPolyCoeffs(lam) :\n ell = len(lam)\n useFormat = \"2.6e\"\n count = 0\n def printLine(s, count) :\n if lam[count] < 0 :\n s = s + 3 * \" \"\n else :\n s = s + 4 * \" \"\n s = s + \"{0:\" + useFormat + \"}\"\n print(s . format(lam[count]))\n count = count + 1\n return count\n if ell >= 1 :\n count = printLine(\"x0y0\", count)\n if ell >= 3 :\n count = printLine(\"x1y0\", count)\n count = printLine(\"x0y1\", count)\n if ell >= 6 :\n count = printLine(\"x2y0\", count)\n count = printLine(\"x1y1\", count)\n count = printLine(\"x0y2\", count)\n if ell >= 10 :\n count = printLine(\"x3y0\", count)\n count = printLine(\"x2y1\", count)\n count = printLine(\"x1y2\", count)\n count = printLine(\"x0y3\", count)\n if ell >= 15 :\n count = printLine(\"x4y0\", count)\n count = printLine(\"x3y1\", count)\n count = printLine(\"x2y2\", count)\n count = printLine(\"x1y3\", count)\n count = printLine(\"x0y4\", count)\n if ell >= 21 :\n count = printLine(\"x5y0\", count)\n count = printLine(\"x4y1\", count)\n count = printLine(\"x3y2\", count)\n count = printLine(\"x2y3\", count)\n count = printLine(\"x1y4\", count)\n count = printLine(\"x0y5\", count)\n if ell >= 28 :\n count = printLine(\"x6y0\", count)\n count = printLine(\"x5y1\", count)\n count = printLine(\"x4y2\", count)\n count = printLine(\"x3y3\", count)\n count = printLine(\"x2y4\", count)\n count = printLine(\"x1y5\", count)\n count = printLine(\"x0y6\", count)\n if ell >= 36 :\n count = printLine(\"x7y0\", count)\n count = printLine(\"x6y1\", count)\n count = printLine(\"x5y2\", count)\n count = printLine(\"x4y3\", count)\n count = printLine(\"x3y4\", count)\n count = printLine(\"x2y5\", count)\n count = printLine(\"x1y6\", count)\n count = printLine(\"x0y7\", count)\n if (ell > 36) or (ell < 1) :\n raise ValueError(\"Polynomial degree less than or equal to 7, please.\")", "def bmad_linac_phasing_lines(epics):\n lines = [\n '! Linac overall phasing',\n 'O_L1[phase_deg] = 0 ! K21_1 sets this directly. This is a delta on top of that.', \n 'O_L2[phase_deg] = '+str(epics.caget('SIOC:SYS0:ML00:CALC204')),\n 'O_L3[phase_deg] = '+str(epics.caget('SIOC:SYS0:ML00:AO499'))\n ]\n return lines", "def get_answer(self, syl):\n \n if (self.figure_counter[syl[2]] != 0) and (np.random.random() < self.figure_nvc[syl[2]] / self.figure_counter[syl[2]]):\n answer = \"NVC\"\n else:\n rep = self.get_representation(syl)\n concl_quant = self.determine_quantifier(rep)\n quant = list(self.representation.keys())[list(self.representation.values()).index(concl_quant)]\n if self.figure_concl[syl[2]] > 0:\n order = \"ac\"\n elif self.figure_concl[syl[2]] < 0:\n order = \"ca\"\n else:\n order = np.random.choice([\"ac\", \"ca\"])\n answer = quant + order\n \n return answer", "def license_plate(self) -> str:\n temp = re.sub(\n r\"\\?\",\n lambda x: self.random_element(self.ascii_uppercase_azerbaijan),\n self.random_element(self.license_formats),\n )\n temp = temp.replace(\"##\", self.random_element(self.license_plate_initial_numbers), 1)\n # temp = temp.format(self.random_element(range(1, 999)))\n return self.numerify(temp)", "def css(ax, col, legend):\n d = Planck15.luminosity_distance(z=0.034).cgs.value\n\n # low frequency\n nu = 6E9\n\n # add the points from Deanne's paper\n x = np.array([69, 99, 162, 357])\n y = np.array([4.5, 6.1, 2.3, 0.07])*nu\n lum = plot_line(\n ax, d, x, y,\n 'AT2018cow', None, col, legend, zorder=10)\n print(lum)\n ax.text(x[0], lum[0]*1.1, 'CSS161010', fontsize=11,\n verticalalignment='bottom',\n horizontalalignment='right')", "def calico_kitty():\n return __cat_whisperer(colors=True, coat='calico_colorz', logo_colorz='logo_colorz')[Cat.ASS]", "def chelsea():\n from skimage import data\n\n return data.chelsea()", "def showCl(ell,temps,title='CAMB ISWout power spectrum'):\n plt.plot(ell,temps*ell*(ell+1)/(2*np.pi) *1e12) #1e12 to convert to microK**2\n plt.xlabel('multipole moment l')\n plt.ylabel('l(l+1)C_l/(2pi) [microK**2]')\n plt.title(title)\n plt.show()", "def lookup_Pk(cosmology='planck',nonlinear=0):\n\n # k in h/Mpc\n k = N.logspace(-4., 3., 3*1024)\n\n if nonlinear==1:\n hf = 'halofit'\n saveto = 'data_itam/'+cosmology+'_pk.txt'\n\n else:\n hf = ''\n saveto = 'data_itam/'+cosmology+'_pk_linear.txt'\n\n if cosmology == 'planck':\n class_params = {\n 'non linear': hf,\n 'output': ['mPk','vTk'],\n 'P_k_max_1/Mpc': 1000.,\n 'z_pk': 0.,\n 'A_s': 2.3e-9,\n 'n_s': 0.96,\n 'h': 0.7,\n 'omega_b': 0.0225,\n 'Omega_cdm': 0.25,\n }\n sig8_0 = 0.8\n\n\n elif cosmology == 'wmap':\n class_params = {\n 'non linear': hf,\n 'output': ['mPk','vTk'],\n 'P_k_max_1/Mpc': 1000.,\n 'z_pk': 0.,\n 'A_s': 2.3e-9,\n 'n_s': 0.967,\n 'h': 0.704,\n 'omega_b': 0.02253,\n 'Omega_cdm': 0.226,\n }\n sig8_0 = 0.81\n\n\n elif cosmology == 'ML':\n class_params = {\n 'non linear': hf,\n 'output': ['mPk','vTk'],\n 'P_k_max_1/Mpc': 1000.,\n 'z_pk': 0.,\n 'A_s': 2.3e-9,\n 'n_s': 1.,\n 'h': 0.73,\n 'omega_b': 0.045*0.73**2,\n 'Omega_cdm': 0.25-0.045,\n }\n sig8_0 = 0.9\n\n else:\n raise ValueError(\"the cosmology you chose does not exist\")\n\n cosmoClass_nl = Class()\n cosmoClass_nl.set(class_params)\n cosmoClass_nl.compute()\n\n # rescale the normalization of matter power spectrum to have sig8=0.8 today\n sig8 = cosmoClass_nl.sigma8()\n A_s = cosmoClass_nl.pars['A_s']\n cosmoClass_nl.struct_cleanup() # does not clean the input class_params, cosmo.empty() does that\n cosmoClass_nl.set(A_s=A_s*(sig8_0*1./sig8)**2)\n cosmoClass_nl.compute()\n\n h = cosmoClass_nl.pars['h']\n pk_nl = N.asarray([ cosmoClass_nl.pk(x*h, 0.,)*h**3 for x in k ])\n\n kpk = N.vstack((k,pk_nl))\n \n N.savetxt(saveto,kpk)\n print('saving', saveto )\n return", "def at2018cow(ax, col, legend):\n d = Planck15.luminosity_distance(z=0.014).cgs.value\n\n # high frequency\n a, b, c = sma_lc()\n dt, f, ef = b\n ef_comb = np.sqrt(ef**2 + (0.15*f)**2)\n nu = 231.5E9\n\n # low frequency\n nu = 9E9\n data_dir = \"/Users/annaho/Dropbox/Projects/Research/AT2018cow/data\"\n dat = Table.read(\n \"%s/radio_lc.dat\" %data_dir, delimiter=\"&\",\n format='ascii.no_header')\n tel = np.array(dat['col2'])\n choose = np.logical_or(tel == 'SMA', tel == 'ATCA')\n\n days = np.array(dat['col1'][choose])\n freq = np.array(dat['col3'][choose]).astype(float)\n flux_raw = np.array(dat['col4'][choose])\n flux = np.array(\n [float(val.split(\"pm\")[0][1:]) for val in flux_raw])\n eflux_sys = np.array([0.1*f for f in flux])\n eflux_form = np.array(\n [float(val.split(\"pm\")[1][0:-1]) for val in flux_raw])\n eflux = np.sqrt(eflux_sys**2 + eflux_form**2)\n choose = freq == 9\n\n # add the Margutti point and the Bietenholz point\n margutti_x = np.array([84,287])\n margutti_y = np.array([6E28, 3.2E26])/(4*np.pi*d**2)/1E-23/1E-3\n x = np.hstack((days[choose], margutti_x))\n y = np.hstack((flux[choose], margutti_y)) * nu\n lum = plot_line(\n ax, d, x, y,\n 'AT2018cow', None, col, legend, zorder=10)\n ax.text(x[0], lum[0]/1.4, 'AT2018cow', fontsize=11,\n verticalalignment='top',\n horizontalalignment='center')", "def generate_l1ca_codes(self, prn):\n output_taps = self.l1_code_phase_assignments.loc[prn, 'CA_Phase_Select']\n g1 = self.generate_mls(10, self.g1_feedback_taps, [10])\n g2 = self.generate_mls(10, self.g2_feedback_taps, output_taps)\n ca_code = []\n for index, bit in enumerate(g1):\n ca_code.append(int((bit + g2[index]) % 2))\n return ca_code", "def cvpr2018_labels():\n\n return {\n 0: 'others',\n 33: 'car',\n 34: 'motorcycle',\n 35: 'bicycle',\n 36: 'pedestrian',\n 38: 'truck',\n 39: 'bus',\n 40: 'tricycle'\n }", "def generate_symbole(figure_name = \"canon\"):\n if figure_name == \"planeur\": #PLANNEUR\n planneur = np.zeros((3, 3))\n planneur[1, 0] = 1\n planneur[0, 1] = 1\n planneur[0, 2] = 1\n planneur[1, 2] = 1\n planneur[2, 2] = 1\n return planneur\n\n elif figure_name == \"canon\": #CANON\n canon = np.zeros((36,9))\n canon[0:2,5:7] = 1\n canon[11,4:7] = 1\n canon[15:17,4:7] = 1\n canon[12,3] = 1\n canon[14,3] = 1\n canon[13,2] = 1\n canon[12,7] = 1\n canon[14,7] = 1\n canon[13,8] = 1\n canon[25,0:2] = 1\n canon[22:25,1:3] = 1\n canon[21,2:5] = 1\n canon[24,3] = 1\n canon[22:25,4:6] = 1\n canon[25,5:7] = 1\n canon[30,1:3] = 1\n canon[34:36,3:5] = 1\n return canon\n\n elif figure_name == \"blinker\": #BLINKER\n blinker = np.ones((3,1))\n return blinker\n\n elif figure_name == \"oscillator_alone\":\n osc = np.zeros((11,11))\n osc[2,2:9] = 1\n osc[8,2:9] = 1\n osc[2:9,2] = 1\n osc[2:9,8] = 1\n osc[5,2] = 0\n osc[5,8] = 0\n osc[2,5] = 0\n osc[8,5] = 0\n osc[0,5] = 1\n osc[10,5] = 1\n osc[5,0] = 1\n osc[5,10] = 1\n osc[1,4:7] = 1\n osc[9,4:7] = 1\n osc[4:7,1] = 1\n osc[4:7,9] = 1\n return osc\n\n elif figure_name == \"oscillator_one_block\":\n osc = generate_symbole(\"oscillator_alone\")\n osc[0:2,-2:] = 1\n return osc\n\n elif figure_name == \"oscillator_four_blocks\":\n osc = generate_symbole(\"oscillator_alone\")\n osc[0:2, -2:] = 1\n osc[0:2,0:2] = 1\n osc[-2:,0:2] = 1\n osc[-2:,-2:] = 1\n return osc\n\n elif figure_name == \"croix\":\n return osc\n\n elif figure_name == \"diag\":\n return osc\n\n elif figure_name == \"octogone\":\n return osc\n\n else:\n return 0", "def get_info(self):\n return \"Malayalam Stemmer(Experimental)\"", "def grb030329(ax, col, legend):\n z = 0.1686\n d = Planck15.luminosity_distance(z=z).cgs.value\n\n # LOW FREQUENCY\n\n # Berger: this is the best frequency to pick from this paper\n t = np.array(\n [0.58, 1.05, 2.65, 3.57, 4.76, 6.89, 7.68, 9.49, 11.90, \n 12.69, 14.87, 16.66, 18.72, 20.58, 25.70, 28.44, 31.51, \n 33.58, 36.52, 42.55, 44.55, 59.55, 66.53]) / (1+z)\n f = np.array(\n [3.50, 1.98, 8.50, 6.11, 9.68, 15.56, 12.55, 13.58, 17.70, \n 17.28, 19.15, 17.77, 15.92, 16.08, 15.34, 12.67, 13.55, \n 13.10, 10.64, 8.04, 8.68, 4.48, 4.92])\n nu = np.array([8.5E9]*len(f))\n\n # Van der Horst: best frequency is 2.3 GHz\n t = np.append(t, np.array([268.577, 306.753, 365.524, 420.168, 462.078, \n 583.683, 743.892, 984.163]) / (1+z))\n f = np.append(\n f, np.array([1613, 1389, 871, 933, 707, 543, 504, 318]) * 1E-3)\n nu = np.append(nu, np.array([2.3E9]*8))\n lum = plot_line(ax, d, t, nu*f, 'GRB030329', 'GRB', col, legend)\n ax.text(t[6]*1.05, lum[10]*1.05, 'GRB030329', fontsize=11,\n verticalalignment='bottom',\n horizontalalignment='left')", "def test_lightcurve_seismology_plot():\n KeplerLightCurveFile(TABBY_Q8).PDCSAP_FLUX.periodogram().plot()", "def main():\n station = \"Merikannontie\"\n coefs, score = cycling_weather_linregr(station)\n print(f\"Measuring station: {station}\")\n print(\n f\"Regression coefficient for variable 'precipitation': {coefs[0]:.1f}\")\n print(f\"Regression coefficient for variable 'snow depth': {coefs[1]:.1f}\")\n print(f\"Regression coefficient for variable 'temperature': {coefs[2]:.1f}\")\n print(f\"Score: {score:.2f}\")\n return", "def grb111209a(ax, col, legend):\n z = 0.677\n d = Planck15.luminosity_distance(z=z).cgs.value\n\n t = np.array([5.1])/(1+z)\n f = np.array([0.97])\n nu = np.array([9E9]*len(f))\n\n lum = plot_line(ax, d, t, nu*f, 'GRB111209A', 'GRB', col, legend)\n ax.text(t[0]*1.5, lum[0]*1.3, 'GRB111209A/SN2011kl', fontsize=11,\n verticalalignment='bottom',\n horizontalalignment='center')", "def info():\n return r\"\"\"Lin-Yu Tseng and Chun Chen, \"Multiple trajectory search for Large Scale Global Optimization,\" 2008 IEEE Congress on Evolutionary Computation (IEEE World Congress on Computational Intelligence), Hong Kong, 2008, pp. 3052-3059. doi: 10.1109/CEC.2008.4631210\"\"\"", "def info(self):\n txt = \"\"\"Lick Index {s.name}\n wavelength units: {s.wavelength_unit}\n Index Band: {s.band}\n Blue continuum band: {s.blue}\n Red continuum band: {s.red}\n Measurement unit: {s.index_unit}\"\"\".format(s=self)\n print(txt)", "def info(self):\n txt = \"\"\"Lick Index {s.name}\n wavelength units: {s.wavelength_unit}\n Index Band: {s.band}\n Blue continuum band: {s.blue}\n Red continuum band: {s.red}\n Measurement unit: {s.index_unit}\"\"\".format(s=self)\n print(txt)", "def cie1931cmf(wavelength):\n if wavelength < 380 or wavelength > 780:\n return [0, 0, 0]\n index=int(round((wavelength-380)/5.0))*3\n return [_CIE1931[index+i] for i in range(3)]", "def test_get_tone_from_IBM():\n comments = [\"This was a really sucky movie. I will probably never go see this movie ever again. I am going to \"\n \"tell my whole family never to watch this movie. I very much enjoyed the special cameo in it \"\n \"though. I loved the plot line.\"]\n tone_info_dictionary = get_tone_from_IBM(comments[0])\n\n tones = get_columns_from_IBM_tone(tone_info_dictionary)\n print(tones)", "def lae_nccd(year):\n html = load_campdoc_html(year)\n table = extract_main_table_from_html(html)\n data = process_main_table(table)\n return(data)", "def getcolorcodeALA15(ramapath, N, ssize=5):\n\n from analyse_ala_15 import AngleCategorizer\n\n nResidues = 15\n #angles = np.loadtxt('rama_dataset_ala_15.xvg', skiprows=32, usecols=range(0, 2), delimiter=' ')\n angles = np.loadtxt(os.path.join(ramapath, 'rama_dataset_ala_15_1500.xvg'), skiprows=32, usecols=range(0, 2), delimiter=' ')\n nSamples = angles.shape[0]/15\n angles.resize(nSamples, nResidues, 2)\n angCat = AngleCategorizer(angles)\n angCat.categorize()\n angCat.countConfigurations()\n colInd = angCat.getColorMatrix()\n alphaInd = angCat.getAlphaVals()\n\n marker = list()\n patchlist = list()\n\n marker.append('o')\n marker.append('o')\n marker.append('o')\n\n import matplotlib.patches as mpatches\n patchlist.append(mpatches.Patch(color='black', label=r'$\\alpha$'))\n patchlist.append(mpatches.Patch(color='blue', label=r'$\\beta$-1'))\n patchlist.append(mpatches.Patch(color='red', label=r'$\\beta$-2'))\n\n alpha = plt.scatter(0, 1, c='k', marker=marker[0], s=ssize, label=r'$\\alpha$')\n beta1 = plt.scatter(0, 1, c='b', marker=marker[1], s=ssize, label=r'$\\beta\\textnormal{-}1$')\n beta2 = plt.scatter(0, 1, c='r', marker=marker[2], s=ssize, label=r'$\\beta\\textnormal{-}2$')\n plt.close()\n\n patchlist = [alpha, beta1, beta2]\n\n return colInd, marker, patchlist, alphaInd", "def _repr_(self):\n return (\"%d-d CPR-Fano toric variety covered by %d affine patches\"\n % (self.dimension_relative(), self.fan().ngenerating_cones()))", "def getCode1Letter(self):\n dataDict = self.__dict__\n cc = self.stdChemComp\n if cc is None:\n result = None\n else:\n result = cc.code1Letter\n return result", "def _get_cu(self):\n c_undrained=0\n #group_index = self._data['GI']\n if self.is_clayey():\n c_undrained = self.qu(self._data[SoilProperty.N60])/2\n #c_undrained=_clamp(c_undrained, 10, 103)\n # Plasix calculation needs very small c_undrained\n #if c_undrained<0.21:\n # c_undrained = 0.21\n #use 0.2 as per plasix recommendation\n return c_undrained#the cu is always 103 check with small value of n_60, some mistake maybe", "def get_cosmology_from_name(cosmology):\n\n # This list should be updated when astropy releases the Planck18 cosmology\n available_cosmologies = {\n \"WMAP5\": acosmo.WMAP5,\n \"WMAP7\": acosmo.WMAP7,\n \"WMAP9\": acosmo.WMAP9,\n \"Planck13\": acosmo.Planck13,\n \"Planck15\": acosmo.Planck15,\n }\n\n # If the user uses a string for the cosmology look it up in the dict.\n # If they specify a cosmology class, use that instead.\n if isinstance(cosmology, str):\n if cosmology in available_cosmologies.keys():\n cosmo = available_cosmologies[cosmology]\n else:\n msg = (f\"\"\"The cosmology '{cosmology}' is not in the list of\n available cosmologies with string keywords. The list\n if available cosmologies accessable via keyword are:\n {available_cosmologies.keys()}\"\"\")\n raise ValueError(msg)\n\n elif isinstance(cosmology, acosmo.core.FLRW):\n cosmo = cosmology\n\n return cosmo", "def fig16():\n # fmt: off\n tpm = np.array([\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 0, 1, 0, 0],\n [1, 0, 1, 0, 0, 0, 0],\n [1, 0, 0, 0, 1, 0, 0],\n [1, 0, 0, 0, 0, 0, 0],\n [1, 0, 1, 0, 1, 0, 0],\n [1, 0, 1, 0, 0, 0, 0],\n [1, 0, 0, 0, 1, 0, 0],\n [0, 0, 0, 1, 1, 0, 0],\n [0, 0, 1, 1, 1, 0, 0],\n [1, 0, 1, 1, 1, 0, 0],\n [1, 0, 0, 1, 1, 0, 0],\n [1, 0, 0, 1, 1, 0, 0],\n [1, 1, 1, 1, 1, 0, 0],\n [1, 0, 1, 1, 1, 0, 0],\n [1, 1, 0, 1, 1, 0, 0],\n [0, 0, 0, 1, 1, 0, 0],\n [0, 0, 1, 1, 1, 0, 0],\n [1, 0, 1, 1, 1, 0, 0],\n [1, 0, 0, 1, 1, 0, 0],\n [1, 0, 0, 1, 1, 0, 0],\n [1, 0, 1, 1, 1, 0, 0],\n [1, 0, 1, 1, 1, 0, 0],\n [1, 0, 0, 1, 1, 0, 0],\n [0, 0, 0, 1, 1, 0, 0],\n [0, 0, 1, 1, 1, 0, 0],\n [1, 0, 1, 1, 1, 0, 0],\n [1, 0, 0, 1, 1, 0, 0],\n [1, 0, 0, 1, 1, 0, 0],\n [1, 1, 1, 1, 1, 0, 0],\n [1, 0, 1, 1, 1, 0, 0],\n [1, 1, 0, 1, 1, 0, 0],\n [0, 0, 0, 0, 0, 1, 0],\n [0, 0, 1, 0, 1, 1, 0],\n [1, 0, 1, 0, 0, 1, 0],\n [1, 0, 0, 0, 1, 1, 0],\n [1, 0, 0, 0, 0, 1, 0],\n [1, 0, 1, 0, 1, 1, 0],\n [1, 0, 1, 0, 0, 1, 0],\n [1, 0, 0, 0, 1, 1, 0],\n [0, 0, 0, 1, 1, 1, 0],\n [0, 0, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 1, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 1, 0, 1, 1, 1, 0],\n [0, 0, 0, 1, 1, 1, 0],\n [0, 0, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [0, 0, 0, 1, 1, 1, 0],\n [0, 0, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 1, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 1, 0, 1, 1, 1, 0],\n [0, 0, 0, 0, 0, 1, 0],\n [0, 0, 1, 0, 1, 1, 0],\n [1, 0, 1, 0, 0, 1, 0],\n [1, 0, 0, 0, 1, 1, 0],\n [1, 0, 0, 0, 0, 1, 0],\n [1, 0, 1, 0, 1, 1, 0],\n [1, 0, 1, 0, 0, 1, 0],\n [1, 0, 0, 0, 1, 1, 0],\n [0, 0, 0, 1, 1, 1, 0],\n [0, 0, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 1, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 1, 0, 1, 1, 1, 0],\n [0, 0, 0, 1, 1, 1, 0],\n [0, 0, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [0, 0, 0, 1, 1, 1, 0],\n [0, 0, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 0, 0, 1, 1, 1, 0],\n [1, 1, 1, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 1, 0],\n [1, 1, 0, 1, 1, 1, 0],\n [0, 0, 0, 0, 0, 1, 1],\n [0, 0, 1, 0, 1, 1, 1],\n [1, 0, 1, 0, 0, 1, 1],\n [1, 0, 0, 0, 1, 1, 1],\n [1, 0, 0, 0, 0, 1, 1],\n [1, 0, 1, 0, 1, 1, 1],\n [1, 0, 1, 0, 0, 1, 1],\n [1, 0, 0, 0, 1, 1, 1],\n [0, 0, 0, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1, 1],\n [1, 0, 1, 1, 1, 1, 1],\n [1, 0, 0, 1, 1, 1, 1],\n [1, 0, 0, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1],\n [1, 0, 1, 1, 1, 1, 1],\n [1, 1, 0, 1, 1, 1, 1],\n [0, 0, 0, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1, 1],\n [1, 0, 1, 1, 1, 1, 1],\n [1, 0, 0, 1, 1, 1, 1],\n [1, 0, 0, 1, 1, 1, 1],\n [1, 0, 1, 1, 1, 1, 1],\n [1, 0, 1, 1, 1, 1, 1],\n [1, 0, 0, 1, 1, 1, 1],\n [0, 0, 0, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1, 1],\n [1, 0, 1, 1, 1, 1, 1],\n [1, 0, 0, 1, 1, 1, 1],\n [1, 0, 0, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1],\n [1, 0, 1, 1, 1, 1, 1],\n [1, 1, 0, 1, 1, 1, 1],\n ])\n cm = np.array([\n [0, 1, 1, 0, 1, 0, 0],\n [1, 0, 1, 0, 0, 0, 0],\n [1, 1, 0, 0, 0, 0, 0],\n [0, 1, 0, 1, 1, 0, 0],\n [0, 0, 0, 1, 1, 0, 0],\n [0, 0, 0, 0, 0, 1, 1],\n [0, 0, 0, 0, 0, 1, 1],\n ])\n # fmt: on\n return Network(tpm, cm=cm, node_labels=LABELS[:tpm.shape[1]])", "def test_name_of_cube(self):\n result = calculate_sleet_probability(self.snow_prob_cube, self.rain_prob_cube)\n name = \"probability_of_sleet\"\n self.assertEqual(result.long_name, name)", "def derive_Fritz11(wavelength):\n # Extinction law definition\n wave = np.array([1.282, 1.736, 2.166, 2.625, 2.758, 2.873, 3.039, 3.297, 3.74, 3.819, 3.907, 4.052,\n 4.376, 5.128, 5.908, 6.772, 7.459, 7.502, 8.76, 12.371, 19.062])\n A_AKs = np.array([7.91, 4.30, 2.49, 1.83, 1.51, 1.84, 2.07, 1.66, 1.19, 1.19, 1.09, 1.01, 1.09, 0.99,\n 1.04, 0.84, 0.81, 0.79, 2.04, 1.34, 1.34])\n\n\n # Interpolate over the curve\n spline_interp = interpolate.splrep(wave, A_AKs, k=3, s=0)\n A_at_wave = interpolate.splev(wavelength, spline_interp)\n\n # We'll call 2.14 microns the K-band\n idx = np.where( abs(wavelength - 2.14) == min(abs(wavelength - 2.14)) )\n A_AKs_at_wave = A_at_wave / A_at_wave[idx] \n\n return A_AKs_at_wave", "def logic_program_form(self):\r\n return '% -------------------------------------\\n' +\\\r\n '% Theory ' + self.name + '\\n' +\\\r\n '% -------------------------------------\\n\\n' +\\\r\n GENERAL_AXIOMS", "def test_kyc_get_legal(self):\n pass", "def _get_pretty_campus(self, raw_campus):\n campus_dict = {\n 'cascadescampus': 'Cascades',\n 'osucorvallis': 'Corvallis',\n 'hmsc': 'HMSC'\n }\n campus = None\n raw_campus = raw_campus.lower()\n if raw_campus in campus_dict:\n campus = campus_dict[raw_campus]\n elif raw_campus:\n campus = 'Other'\n\n return campus", "def func_spon_318(n, series):\n if series == \"3S1\":\n return 1/state_3S1(n)\n if series == \"3D3\":\n return 1/state_3D3(n)", "def ckn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"ckn\")", "def get_corporal_comp(imc: float):\n if imc < 18.5:\n return \"Peso inferior al normal\"\n if imc >= 18.5 and imc < 25:\n return \"Normal\"\n if imc >= 25 and imc < 30:\n return \"Peso superior al normal\"\n if imc >= 30:\n return \"Obesidad\"", "def name(self):\n return 'VL53L1X'", "def main():\n tl = TwoLocus(in_path='/csbiodata/public/www.csbio.unc.edu/htdocs/sgreens/pairwise_origins/')\n # tl = TwoLocus()\n # tl.preprocess(glob.glob('OR_ss_origins/*.hap'))\n print len(tl.list_available_strains())\n exit()\n # print len(tl.list_available_strains())\n # tl.preprocess(['cc_origins.csv'])\n # tl.preprocess(['ccv_origins.csv'])\n classical = [s for s in\n [\"129P1/ReJ\", # \"129P3/J\", \"129S1SvlmJ\", \"129S6\", \"129T2/SvEmsJ\", \"129X1/SvJ\", \"A/J\", \"A/WySnJ\",\n \"AEJ/GnLeJ\", \"AEJ/GnRk\", \"AKR/J\", \"ALR/LtJ\", \"ALS/LtJ\", \"BALB/cByJ\", \"BALB/cJ\", \"BDP/J\", \"BPH/2J\",\n # \"BPL/1J\", \"BPN/3J\", \"BTBR T<+>tf/J\", \"BUB/BnJ\", \"BXSB/MpJ\", \"C3H/HeJ\", \"C3HeB/FeJ\", \"C57BL/10J\",\n # \"C57BL/10ScNJ\", \"C57BL/10SAAAJ\", \"C57BL/6CR\", \"C57BL/6J\", \"C57BL/6NCI\", \"C57BL/6Tc\", \"C57BLKS/J\",\n # \"C57BR/cdJ\", \"C57L/J\", \"C58/J\", \"CBA/CaJ\", \"CBA/J\", \"CE/J\", \"CHMU/LeJ\", \"DBA/1J\", \"DBA/1LacJ\",\n # \"DBA/2DeJ\", \"DBA/2HaSmnJ\", \"DBA/2J\", \"DDK/Pas\", \"DDY/JclSidSeyFrkJ\", \"DLS/LeJ\", \"EL/SuzSeyFrkJ\",\n # \"FVB/NJ\", \"HPG/BmJ\", \"I/LnJ\", \"IBWSP2\", \"IBWSR2\", \"ICOLD2\", \"IHOT1\", \"IHOT2\", \"ILS\", \"ISS\", \"JE/LeJ\",\n # \"KK/HlJ\", \"LG/J\", \"LP/J\", \"LT/SvEiJ\", \"MRL/MpJ\", \"NOD/ShiLtJ\", \"NON/ShiLtJ\", \"NONcNZO10/LtJ\",\n # \"NONcNZO5/LtJ\", \"NOR/LtJ\", \"NU/J\", \"NZB/BlNJ\", \"NZL/LtJ\", \"NZM2410/J\", \"NZO/HlLtJ\", \"NZW/LacJ\", \"P/J\",\n # \"PL/J\", \"PN/nBSwUmabJ\", \"RF/J\", \"RHJ/LeJ\", \"RIIIS/J\", \"RSV/LeJ\", \"SB/LeJ\", \"SEA/GnJ\", \"SEC/1GnLeJ\",\n # \"SEC/1ReJ\", \"SH1/LeJ\", \"SI/Col Tyrp1 Dnahc11/J\", \"SJL/Bm\", \"SJL/J\", \"SM/J\", \"SSL/LeJ\", \"ST/bJ\",\n \"STX/Le\", ] # \"SWR/J\", \"TALLYHO/JngJ\", \"TKDU/DnJ\", \"TSJ/LeJ\", \"YBR/EiJ\", \"ZRDCT Rax<+>ChUmdJ\"]\n if tl.is_available(s)]\n wild_derived = [s for s in\n ['22MO',\n # 'BIK/g', 'BULS', 'BUSNA', 'BZO', 'CALB/RkJ', 'CASA/RkJ', 'CAST/EiJ', 'CIM', 'CKN', 'CKS',\n 'CZECHI/EiJ', 'CZECHII/EiJ', 'DCA', 'DCP', 'DDO', 'DEB', 'DGA', 'DIK', 'DJO', 'DKN', 'DMZ', 'DOT',\n # 'IS/CamRkJ', 'JF1/Ms', 'LEWES/EiJ', 'MBK', 'MBS', 'MCZ', 'MDG', 'MDGI', 'MDH', 'MGA', 'MH',\n # 'MOLD/RkJ', 'MOLF/EiJ', 'MOLG/DnJ', 'MOR/RkJ', 'MPB', 'MSM/Ms', 'PERA/EiJ', 'PERC/EiJ', 'POHN/Deh',\n # 'PWD/PhJ', 'PWK/PhJ', 'RBA/DnJ', 'RBB/DnJ', 'RBF/DnJ', 'SF/CamEiJ', 'SKIVE/EiJ', 'SOD1/EiJ',\n # 'STLT', 'STRA', 'STRB', 'STUF', 'STUP', 'STUS', 'TIRANO/EiJ', 'WLA', 'WMP', 'WSB/EiJ',\n 'ZALENDE/EiJ'] if tl.is_available(s)]\n tl.contingency_table(classical, wild_derived, '/csbiohome01/sgreens/Projects/intervals/contingency.csv')\n exit()\n x = TwoLocus(chrom_sizes=[20e6, 20e6])\n x.preprocess([\"test2.csv\"])\n x.unique_combos(['A', 'B', 'D'], ['C', 'E'])\n x.sources_at_point_pair('1', 1, '1', 10000000, ['A'])\n # x.interlocus_dependence([chr(c) for c in xrange(ord('A'), ord('J')+1)])\n # exit()\n\n x = TwoLocus(chrom_sizes=[20 * 10 ** 6, 20 * 10 ** 6])\n x.preprocess([\"test.csv\"])\n rez = x.pairwise_frequencies([\"A\"])\n\n areas = x.calculate_genomic_area(rez[0], rez[1])\n total = 0.0\n\n for combo in subspecies.iter_combos():\n print \"\\t{:15s}({:4d}):{:1.5f}\".format(subspecies.to_string(combo), combo,\n areas[str(subspecies.to_string(combo))])\n total += areas[str(subspecies.to_string(combo))]\n print \"\\t{:21s}:{:1.5f}\".format(\"Total\", total)\n\n sys.exit(1)\n # for code, combo in combos.iteritems():\n # print \"\\n\", rez[1]\n # print \"\\t{} ({}):\\n{}\".format(combo, code, rez[0][code])", "def hispaniola_models():\n return [\n ('GFS (6-hr steps, 7 days)', 'gfs'),\n ('WRF-PR (1-hr steps, 2 days)', 'wrfpr'),\n ]", "def create_weka_mfcc_13():\n global ARGS\n\n ## ten thu muc can trich chon vector dac trung (RLS, LMS, NLMS, Kalman, Non)\n name = '';\n fout = open('weka/MFCC78_TUNNING_{}_dataset.arff'.format(name), 'w')\n fout.write('@RELATION {}_dataset\\n\\n'.format(name))\n\n fout.write('@ATTRIBUTE MEAN_MFCC1\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC2\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC3\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC4\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC5\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC6\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC7\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC8\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC9\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC10\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC11\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC12\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC13\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD1\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD2\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD3\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD4\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD5\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD6\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD7\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD8\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD9\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD10\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD11\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD12\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD13\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD1\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD2\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD3\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD4\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD5\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD6\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD7\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD8\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD9\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD10\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD11\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD12\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD13\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC1\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC2\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC3\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC4\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC5\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC6\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC7\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC8\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC9\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC10\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC11\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC12\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC13\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD1\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD2\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD3\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD4\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD5\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD6\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD7\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD8\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD9\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD10\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD11\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD12\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD13\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD1\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD2\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD3\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD4\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD5\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD6\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD7\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD8\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD9\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD10\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD11\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD12\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD13\tREAL\\n')\n fout.write('@ATTRIBUTE class \t{'+ARGS.labels+'}\\n\\n')\n \n fout.write('@DATA\\n')\n\n ## cua so\n windowing = Windowing(type='hamming',\n size=1104,\n zeroPhase=False)\n \n ## quang pho\n spectrum = Spectrum(size=1104)\n\n ##khoi tao MFCC\n mfcc = MFCC(highFrequencyBound=4000, ## gioi han tren cua tan so\n inputSize=201, \t\t\t ## kich thuoc pho dau vao\n lowFrequencyBound=0,\t ## gioi han duoi cua tan so\n numberBands=40,\t\t\t ## so luong cac dai Mels trong bo loc\n numberCoefficients=13, ## so luong dau ra cac he so Mel\n sampleRate=16000)\t\t ## tan so lay mau\n\n for label in ARGS.labels.split(','): ## duyet cac thu muc giong voi ten nhan\n\n ## dia chi thu muc\n dir = os.path.join(ARGS.dir, label)\n\n logging.info('Access folder <{}>'.format(dir))\n\n for file in sorted(os.listdir(dir)):\n\n \t## duyet cac file .wav\n if file.endswith('.wav'):\n logging.info('Process <{}>'.format(file))\n path = os.path.join(dir, file)\n \n ## doc file am thanh\n loader = MonoLoader(filename=path, sampleRate=ARGS.sampleRate)\n audio = loader()\n cnt = 0\n\n for window in FrameGenerator(audio, \n frameSize=ARGS.window_length*ARGS.sampleRate/1000, \n hopSize=ARGS.window_stride*ARGS.sampleRate/1000, \n startFromZero=True):\n mfccs = []\n for frame in FrameGenerator(window, \n frameSize=ARGS.frame_length*ARGS.sampleRate/1000, \n hopSize=ARGS.frame_stride*ARGS.sampleRate/1000, \n startFromZero=True):\n s = spectrum(windowing(frame))\n\n _, m = mfcc(s)\n\n m_delta = librosa.feature.delta(m, order=1) ## dao ham bac 1\n m_delta_delta = librosa.feature.delta(m, order=2) ## dao ham bac 2\n\n m_all = np.concatenate((m, m_delta, m_delta_delta), axis=0) ## them vao chuoi\n mfccs.append(m_all)\n mfccs = np.array(mfccs)\n mfccs_mean = np.mean(mfccs, axis=0)\n mfccs_std = np.std(mfccs, axis=0)\n feat = np.concatenate((mfccs_mean, mfccs_std), axis=0).tolist()\n str_feat = [str(x) for x in feat]\n line = ','.join(str_feat)+','+label\n fout.write(line+'\\n')\n cnt = cnt+1\n logging.info('{} samples'.format(cnt))", "def getCriticStation(analyzer):\n mayIn = model.getRankMay(analyzer,\"in\")\n mayOut=model.getRankMay(analyzer,\"out\")\n less=model.getRankMen(analyzer,\"LessPopular\")\n return (mayIn,mayOut,less)", "def csi_to_conky(match: re.Match) -> str:\n # Convert the string of code;code;code to a list of ints\n try:\n codes= [0 if _=='' else int(_) for _ in match.group(1).split(';')]\n except IndexError: \n print('csi_to_conky called with no group match', file=sys.stderr)\n return match.group(0) # if no group has matched return the string as is\n except ValueError as err: # problem converting to int\n print(f'csi_to_conky: {err}', file=sys.stderr)\n return match.group(0)\n \n # Initialize the string to be returned\n result=''\n\n # consume the list one code at a time, first to last\n while len(codes)>0:\n code=codes.pop(0)\n if code==0: # Reset\n # Clear the string and init it with default color and font\n result = '${color}${font}'\n continue\n \n elif code==1: # Bold\n result += '${font DejaVu Sans Mono:style=bold}'\n continue\n \n elif code>29 and code<38: # Set foreground color (0 to 7)\n result += conky_set_fg(code -30)\n continue\n \n elif code==38: # Advanced ANSI\n try:\n type=codes.pop(0)\n if type==2: # ESC[38;2;R;G;Bm => TODO\n # for now just consume the next 3 values in the list\n del codes[0:3]\n continue\n elif type==5: # ESC[38;5;xxm \n result += eightbit_to_conky(codes.pop(0))\n continue\n else:\n raise ValueError(f'Improper value {type} after code 38')\n except (IndexError, ValueError) as err:\n print(f'csi_to_conky: {err} while parsing advanced ANSI sequence {code};{type}', file=sys.stderr)\n continue\n \n elif code==39: # default fg\n result +='${color}'\n continue\n \n else:\n print(f'code {code} not implemented', file=sys.stderr)\n continue \n\n return result", "def sn2003L(ax, col, legend):\n d = 2.8432575937224894e+26\n nu_plt = 8.5E9\n nu, dt, f, ef = read_2003L()\n choose = nu == nu_plt\n lum = plot_line(\n ax, d, dt[choose], 1E-3*f[choose]*nu_plt, \n 'SN2003L', 'SN', col, legend)\n ax.text(dt[choose][-1]/1.05, lum[-1], 'SN2003L', fontsize=11,\n verticalalignment='center',\n horizontalalignment='left')", "def license_plate(self) -> str:\n return self.numerify(self.generator.parse(self.random_element(self.license_formats)))", "def schmidt_number(self):\n a = self.alpha\n w = self.omega\n return 5 * (2 + a) / (3 * a * (7 - 2 * w))", "def get_lcalcpol_incar(custom_parameters_dictionary=None):\n\n\t\tincar = IncarMaker.get_static_incar(custom_parameters_dictionary)\n\t\tdel incar['ibrion']\n\t\tdel incar['nsw']\n\t\tdel incar['npar'] #lcalcpol runs are not parallelizable when symmetry is on\n\n\t\tincar['lcalcpol'] = True\n\t\tincar['dipol'] = '0.125 0.125 0.125' #this should be a point of minimum polarization in the cell ########################\n\t\tincar['isif'] = 2\n\n\t\treturn incar", "def Frischknecht16_net(self):\n import numpy.lib.recfunctions as rcfuncs\n import os\n \n # Define metallicites \n self.metallicities = [0.0134,1e-3,1e-5] # First is solar value\n \n # Define masses\n self.masses= np.array((15,20,25,40))\n \n # Define isotope indexing. For radioactive isotopes with half-lives << Chempy time_step they are assigned to their daughter element\n # NB: we only use elements up to Ge here, as in the paper\n indexing={}\n indexing['H']=['p','d']\n indexing['He'] = ['he3','he4']\n indexing['Li'] = ['li6','li7']\n indexing['Be'] = ['be9']\n indexing['B'] = ['b10','b11']\n indexing['C'] = ['c12','c13']\n indexing['N'] = ['n14','n15']\n indexing['O'] = ['o16','o17','o18']\n indexing['F'] = ['f19']\n indexing['Ne'] = ['ne20','ne21','ne22']\n indexing['Na'] = ['na23']\n indexing['Mg'] = ['mg24','mg25','mg26','al26']\n indexing['Al'] = ['al27']\n indexing['Si'] = ['si28','si29','si30']\n indexing['P'] = ['p31']\n indexing['S'] = ['s32','s33','s34','s36']\n indexing['Cl'] = ['cl35','cl37']\n indexing['Ar'] = ['ar36','ar38','ar40']\n indexing['K'] = ['k39','k41']\n indexing['Ca'] = ['ca40','ca42','ca43','ca44','ca46','ca48']\n indexing['Sc'] = ['sc45']\n indexing['Ti'] = ['ti46','ti47','ti48','ti49','ti50']\n indexing['V'] = ['v50','v51']\n indexing['Cr'] = ['cr50','cr52','cr53','cr54']\n indexing['Mn'] = ['mn55']\n indexing['Fe'] = ['fe54', 'fe56','fe57','fe58']\n indexing['Co'] = ['fe60', 'co59']\n indexing['Ni'] = ['ni58','ni60','ni61','ni62','ni64']\n indexing['Cu'] = ['cu63','cu65']\n indexing['Zn'] = ['zn64','zn66','zn67','zn68','zn70']\n indexing['Ga'] = ['ga69','ga71']\n indexing['Ge'] = ['ge70','ge72','ge73','ge74','ge76']\n\n # Define indexed elements \n self.elements = list(indexing.keys())\n \n \n # Define data types\n dt = np.dtype('U8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8')\n \n # Initialise yield table\n yield_table = {}\n \n \n # Import full table with correct rows and data-types\n z = np.genfromtxt(localpath+'input/yields/Frischknecht16/yields_total.txt',skip_header=62,dtype=dt)\n \n \n \n # Create model dictionary indexed by metallicity, giving relevant model number for each choice of mass\n # See Frischknecht info_yields.txt file for model information\n model_dict = {}\n model_dict[0.0134] = [2,8,14,27]\n model_dict[1e-3]=[4,10,16,28]\n model_dict[1e-5]=[6,12,18,29]\n \n # Import list of remnant masses for each model (from row 32-60, column 6 of .txt file) \n # NB: these are in solar masses\n rem_mass_table = np.loadtxt(localpath+'input/yields/Frischknecht16/yields_total.txt',skiprows=31,usecols=6)[:29]\n\n # Create one subtable for each metallicity \n for metallicity in self.metallicities:\n additional_keys = ['Mass', 'mass_in_remnants','unprocessed_mass_in_winds'] # List of keys for table\n names = additional_keys + self.elements\n \n # Initialise table and arrays \n base = np.zeros(len(self.masses))\n list_of_arrays = []\n for i in range(len(names)):\n list_of_arrays.append(base)\n yield_subtable = np.core.records.fromarrays(list_of_arrays,names=names)\n mass_in_remnants = np.zeros(len(self.masses))\n total_mass_fraction = np.zeros(len(self.masses))\n element_mass = np.zeros(len(self.masses))\n \n # Add masses to table\n yield_subtable['Mass'] = self.masses\n \n \n # Extract remnant masses (in solar masses) for each model:\t\t\t\n for mass_index,model_index in enumerate(model_dict[metallicity]):\n mass_in_remnants[mass_index] = rem_mass_table[model_index-1] \n \n # Iterate over all elements \n for element in self.elements:\n element_mass = np.zeros(len(self.masses))\n for isotope in indexing[element]: # Iterate over isotopes of each element\n for mass_index,model_index in enumerate(model_dict[metallicity]): # Iterate over masses \n for row in z: # Find required row in table \n if row[0] == isotope:\n element_mass[mass_index]+=row[model_index] # Compute cumulative mass for all isotopes\n yield_subtable[element]=np.divide(element_mass,self.masses) # Add entry to subtable\n \n all_fractions = [row[model_index] for row in z] # This lists all elements (not just up to Ge)\n total_mass_fraction[mass_index] = np.sum(all_fractions) # Compute total net mass fraction (sums to approximately 0)\n \n # Add fields for remnant mass (now as a mass fraction) and unprocessed mass fraction\t\t\t\n yield_subtable['mass_in_remnants']=np.divide(mass_in_remnants,self.masses) \n yield_subtable['unprocessed_mass_in_winds'] = 1.-(yield_subtable['mass_in_remnants']+total_mass_fraction) # This is all mass not from yields/remnants\n \n # Add subtable to full table\n yield_table[metallicity]=yield_subtable\n\n # Define final yield table for output\n self.table = yield_table", "def Sphericity(self):\n s = self.sphericity\n assert s in range(1,6), \"Sphericity score out of bounds.\"\n if s == 1: return 'Linear'\n elif s == 2: return 'Ovoid Linear'\n elif s == 3: return 'Ovoid'\n elif s == 4: return 'Ovoid Round'\n elif s == 5: return 'Round'", "def refCylNoise():\n #Get data\n wdir = '/home/rallured/Dropbox/AXRO/Metrology/' \\\n 'NoiseStudy/RefCylinderMeasurements/'\n d1,dx1 = met.read4DFits(wdir+'161205_RefCylinder_Avg8_Meas1.fits')\n d2,dx2 = met.read4DFits(wdir+'161205_RefCylinder_Avg8_Meas2.fits')\n d3,dx3 = met.read4DFits(wdir+'161205_RefCylinder_Avg8_Meas3.fits')\n\n p1,px1 = met.read4DFits(wdir+'161205_RefCylinder_'\n 'ParrotingTestPitch_Meas1.fits')\n p2,px2 = met.read4DFits(wdir+'161205_RefCylinder_'\n 'ParrotingTestPitch_Meas2.fits')\n p3,px3 = met.read4DFits(wdir+'161205_RefCylinder_'\n 'ParrotingTestRoll_Meas1.fits')\n p4,px4 = met.read4DFits(wdir+'161205_RefCylinder_'\n 'ParrotingTestRoll_Meas2.fits')\n\n #Construct baseline power spectra\n f1,pow1 = fourier.meanPSD(d1-d2,win=np.hanning,dx=dx1)\n f2,pow2 = fourier.meanPSD(d1-d2,win=np.hanning,dx=dx1,axis=1)\n \n #Construct parroted power spectra\n f3,pow3 = fourier.meanPSD(p1-p2,win=np.hanning,dx=dx1)\n f4,pow4 = fourier.meanPSD(p1-p2,win=np.hanning,dx=dx2,axis=1)\n f5,pow5 = fourier.meanPSD(p3-p4,win=np.hanning,dx=dx1)\n f6,pow6 = fourier.meanPSD(p3-p4,win=np.hanning,dx=dx2,axis=1)\n\n #Plot\n plt.loglog(f1,pow1/f1[0],label='Axial Baseline')\n plt.loglog(f2,pow2/f2[0],label='Azimuthal Baseline')\n plt.loglog(f3,pow3/f3[0],label='Pitch Axial')\n plt.loglog(f4,pow4/f4[0],label='Pitch Azimuthal')\n plt.loglog(f5,pow5/f5[0],label='Roll Axial')\n plt.loglog(f6,pow6/f6[0],label='Roll Azimuthal')\n\n return f1,pow1", "def wypisz_info(self):\n print(f\"Samochód: {self.producent} {self.model}\")", "def name(self):\n return 'D07 Indice de calidad de vida por manzanas'", "def wordsByDegreeCentrality(G):\n\n\tc=nx.degree_centrality(G)\n\n\thola=[]\n\tfor w in c:\n\t\thola.append((c[w],w))\n\thola.sort()\n\n\tprint(\"\\n\\nDEGREE CENTRALITY :\")\n\tprint(\"-------------------\")\n\n\tprint(\"Lowest 15 :\")\n\tfor i in range(0,15):\n\t\tprint(hola[i][1],end=\" \")\n\n\tprint(\"\\n\\nHighest 15 :\")\n\tfor i in range(len(hola)-1,len(hola)-1-15,-1):\n\t\tprint(hola[i][1],end=\" \")\n\n\tprint()", "def _get_lspci_name(line):\n hush = line.split('[')\n return '['.join(hush[0:-1]).strip()", "def lowerPen(gcode):\r\n gcode.append(\"M300 S43\")\r\n #gcode.append(\"G0 Z0\")\r", "def code() -> str:\n return \"\"\"\n G91 G17\n G0 Y10 X-10\n G0 Y0 X-5\n G0 Y5 X0\n G0 Y0 X5\n G0 Y0 X-5\n G0 Y-5 X0\n G3 Y-5 X5 J0 I5\n G0 Y0 X5\n G0 Y5 X0\n G3 Y5 X-5 J0 I-5\n G0 Y-5 X0\n G0 Y-10 X10\n G0 Y0 X-5\n G0 Y-15 X-15\n G0 Y0 X5\n G0 Y5 X0\n G0 Y0 X-5\n G0 Y-5 X0\n G0 Y5 X0\n G2 Y5 X5 J0 I5\n G0 Y0 X5\n G0 Y-5 X0\n G2 Y-5 X-5 J0 I-5\n G0 Y5 X0\n G0 Y10 X10\n G0 Y0 X-30\n G3 Y0 X-10 J0 I-5\n G3 Y0 X10 J0 I5\n\n G0 Y0 X5\n G3 Y5 X5 J5 I0\n G3 Y10 X-10 J0 I-10\n G3 Y-5 X-5 J-5 I0\n G0 Y-5 X0\n\n G0 Y5 X0\n G3 Y5 X-5 J0 I-5\n G3 Y-10 X-10 J-10 I0\n G3 Y-5 X5 J0 I5\n G0 Y0 X5\n\n G0 Y0 X-5\n G3 Y-5 X-5 J-5 I0\n G3 Y-10 X10 J0 I10\n G3 Y5 X5 J5 I0\n G0 Y5 X0\n\n G0 Y-5 X0\n G3 Y-5 X5 J0 I5\n G3 Y10 X10 J10 I0\n G3 Y5 X-5 J0 I-5\n G0 Y0 X-5\n \"\"\"", "def make_spondaic(self, scansion: str) -> str:\n mark_list = string_utils.mark_list(scansion)\n vals = list(scansion.replace(\" \", \"\"))\n new_vals = self.SPONDAIC_PENTAMETER[:-1] + vals[-1]\n corrected = \"\".join(new_vals)\n new_line = list(\" \" * len(scansion))\n for idx, car in enumerate(corrected):\n new_line[mark_list[idx]] = car\n return \"\".join(new_line)", "def GA341(atmsel):\n mdl = atmsel.get_model()\n return ('GA341 score', mdl.assess_ga341())", "def getX_cortex(atlas='tessels0042',sub = 's02'):\n Xdata = Dataset('sc1','glm7',atlas,sub)\n Xdata.load_mat() # Load from Matlab\n X1, INFO1 = Xdata.get_data(averaging=\"sess\") # Get numpy\n # Get the test data set\n XTdata = Dataset('sc2','glm7',atlas,sub)\n XTdata.load_mat() # Load from Matlab\n X2, INFO2 = XTdata.get_data(averaging=\"sess\") # Get numpy\n # z-standardize cortical regressors\n X1 = X1 / np.sqrt(np.sum(X1 ** 2, 0) / X1.shape[0])\n X2 = X2 / np.sqrt(np.sum(X2 ** 2, 0) / X1.shape[0])\n X1 = np.nan_to_num(X1)\n X2 = np.nan_to_num(X2)\n # i1 = np.where(INFO1.sess==1)\n # i2 = np.where(INFO1.sess==2)\n # rel = np.sum(X1[i1,:]*X1[i2,:])/np.sqrt(np.sum(X1[i1,:]**2) * np.sum(X1[i2,:]**2))\n return X1,X2,INFO1,INFO2", "def get_sn2018kzr(colorplt = False):\n ebv = 0.113/3.1\n z = 0.053\n D = cosmo.luminosity_distance([z])[0].value * 1e+6 # in pc\n dis_mod = 5*np.log10(D / 10)\n t_max = 58480.422+0.1\n \n f = open('../data/otherSN/Mcbrien2019/table1.tex')\n lines = f.readlines()\n f.close()\n lines = lines[:-4]\n \n dates = [x.split(\"&\")[0] for x in lines]\n mjds = [float(x.split(\"&\")[1]) for x in lines]\n phases = [float(x.split(\"&\")[2].replace('$', '').replace('\\t', '')) for x in lines]\n gs = [x.split(\"&\")[3].replace('$', '') for x in lines]\n rs = [x.split(\"&\")[4].replace('$', '') for x in lines]\n iis = [x.split(\"&\")[5].replace('$', '') for x in lines]\n zs = [x.split(\"&\")[6].replace('$', '') for x in lines]\n insts = [x.split(\"&\")[7] for x in lines]\n \n dtg = digital_latex(mjds, phases, gs, insts)\n dtr = digital_latex(mjds, phases, rs, insts)\n dti = digital_latex(mjds, phases, iis, insts)\n \n filt = np.hstack([np.repeat(\"g\", len(dtg[0])),\n np.repeat(\"r\", len(dtr[0])),\n np.repeat(\"i\", len(dti[0]))])\n phase = np.hstack([dtg[1], dtr[1], dti[1]])\n mag = np.hstack([dtg[2], dtr[2], dti[2]])\n emag = np.hstack([dtg[3], dtr[3], dti[3]])\n mjd = np.hstack([dtg[0], dtr[0], dti[0]])\n \n tb = Table(data = [(mjd - t_max) / (1+z), mag, emag, filt],\n names = ['tmax_rf', 'mag', 'emag', 'filter'])\n \n ixr = tb['filter'] == \"r\"\n ixg = tb['filter'] == \"g\"\n ixi = tb['filter'] == \"i\"\n tb['wave'] = np.zeros(len(tb))\n tb['wave'][ixg] = 4814\n tb['wave'][ixr] = 6422\n tb['wave'][ixi] = 7883\n tb['mag0'] = tb['mag'] - extinction.ccm89(tb['wave'], 3.1*ebv, 3.1)\n tb['mag0_abs'] = tb['mag0'] - dis_mod\n tb = tb.to_pandas()\n return tb", "def Calcification(self):\n s = self.calcification\n assert s in range(1,7), \"Calcification score out of bounds.\"\n if s == 1: return 'Popcorn'\n elif s == 2: return 'Laminated'\n elif s == 3: return 'Solid'\n elif s == 4: return 'Non-central'\n elif s == 5: return 'Central'\n elif s == 6: return 'Absent'", "def POSCAR_title(doc):\n com_for=doc['snl']\n formu=com_for['formula']\n return formu", "def lcfig( z=7 ):\n\n for modelname in ['z15G','z25G','z40G'] :\n\n # initialize a supernova model :\n snmodel = sncosmo.Model(source=modelname)\n\n # Fix the redshift for this instantiation of the model\n # (NOTE: this does not apply any cosmological dimming. It only\n # shifts the wavelengths)\n snmodel.set( z = z )\n\n # generate the H band light curve\n tobs = np.arange( 0, 1000, 10 )\n M160 = snmodel.bandmag( 'f160w', 'ab', tobs ) # Absolute Magnitude\n m160 = M160 + cosmo.distmod( z ).value # apparent magnitude\n\n pl.plot( tobs, m160 )\n ax = pl.gca()\n ax.invert_yaxis()\n ax.set_xlabel('Time (observer-frame days)')\n ax.set_ylabel('Apparent Magnitude in F160W')\n ax.set_xlim( 0, 1000 )\n ax.set_ylim( 36, 28 )\n\n pl.draw()", "def convert_physician_diagnoses_code(diagnoses_code):\n if diagnoses_code in ICD_9_DEFAULT_CODES_FOR_DIAGNOSIS:\n diagnoses_icd_9_code = \\\n ICD_9_DEFAULT_CODES_FOR_DIAGNOSIS.get(diagnoses_code)\n if diagnoses_icd_9_code in \\\n (\"Blank\", \"Blank diagnosis\", \"Diagnosis of 'none'\",\n \"Noncodable diagnosis\", \"Noncodable\", \"Illegible diagnosis\"):\n return \"\"\n return diagnoses_icd_9_code\n\n # 1975-76 - Instead of a \"Y\" to prefix codes in the supplementary\n # classification, an ampersand (&) was used\n # 1977 - 78 - Same as above, except that the prefix character is a dash(-)\n # For year 1973 till 1978 `diagnoses_code` is 4 length character\n if len(diagnoses_code) < 5 and (\n diagnoses_code.startswith(\"&\") or diagnoses_code.startswith(\"-\")\n or diagnoses_code.startswith(\"Y\")\n ):\n diagnoses_code = \"V{}\".format(diagnoses_code[1:])\n\n # Character format\n # For inapplicable fourth or fifth digits, a dash is inserted.\n # 0010[-] - V829[-] = 001.0[0]-V82.9[0]\n elif \"-\" in diagnoses_code[3:]:\n diagnoses_code = diagnoses_code.replace(\"-\", \"0\")\n # Reference from documentation:\n # -9 = Blank\n elif \"-00009\" in diagnoses_code:\n return \"\"\n\n # The prefix “1” preceding the 3-digit diagnostic codes represents\n # diagnoses 001-999, e.g. ‘1381’=’381’=otitis media. And “138100”=”381.00”\n if diagnoses_code.startswith(\"1\"):\n diagnoses_code = diagnoses_code.lstrip(\"1\")\n\n # The prefix “2” preceding the 3 - digit diagnostic codes represents \"V\"\n # code diagnoses VO1 - V82, e.g., ‘2010’=’V10’ and “201081” = “V10.81”\n elif diagnoses_code.startswith(\"2\"):\n if diagnoses_code.startswith(\"20\"):\n diagnoses_code = \"V{}\".format(diagnoses_code[2:])\n else:\n diagnoses_code = \"V{}\".format(diagnoses_code[1:])\n\n # There is an implied decimal between the third and fourth digits\n diagnoses_icd_9_code = \"{}.{}\".format(\n diagnoses_code[:3], diagnoses_code[3:]\n )\n\n return diagnoses_icd_9_code", "def West17_net(self):\n \n # Index elements\n indexing = {}\n indexing['H'] = ['H1', 'H2']\n indexing['He'] = ['He3', 'He4']\n indexing['Li'] = ['Li6', 'Li7']\n indexing['Be'] = ['Be9']\n indexing['B'] = ['B10', 'B11']\n indexing['C'] = ['C12', 'C13']\n indexing['N'] = ['N14', 'N15']\n indexing['O'] = ['O16', 'O17', 'O18']\n indexing['F'] = ['F19']\n indexing['Ne'] = ['Ne20', 'Ne21', 'Ne22']\n indexing['Na'] = ['Na23']\n indexing['Mg'] = ['Mg24', 'Mg25', 'Mg26']\n indexing['Al'] = ['Al27']\n indexing['Si'] = ['Si28', 'Si29', 'Si30']\n indexing['P'] = ['P31']\n indexing['S'] = ['S32','S33','S34','S36']\n indexing['Cl'] = ['Cl35', 'Cl37']\n indexing['Ar'] = ['Ar36', 'Ar38', 'Ar40']\n indexing['K'] = ['K39', 'K41']\n indexing['Ca'] = ['K40','Ca40', 'Ca42', 'Ca43', 'Ca44', 'Ca46', 'Ca48']\n indexing['Sc'] = ['Sc45']\n indexing['Ti'] = ['Ti46', 'Ti47', 'Ti48', 'Ti49', 'Ti50']\n indexing['V'] = ['V50', 'V51']\n indexing['Cr'] = ['Cr50', 'Cr52', 'Cr53', 'Cr54']\n indexing['Mn'] = ['Mn55']\n indexing['Fe'] = ['Fe54', 'Fe56', 'Fe57', 'Fe58']\n indexing['Co'] = ['Co59']\n indexing['Ni'] = ['Ni58', 'Ni60', 'Ni61', 'Ni62', 'Ni64']\n indexing['Cu'] = ['Cu63', 'Cu65']\n indexing['Zn'] = ['Zn64', 'Zn66', 'Zn67', 'Zn68', 'Zn70']\n indexing['Ga'] = ['Ga69', 'Ga71']\n indexing['Ge'] = ['Ge70', 'Ge72', 'Ge73', 'Ge74', 'Ge76']\n \n # Load data\n data = np.genfromtxt('Chempy/input/yields/West17/ertl.txt',skip_header=102,names=True)\n \n # Load model parameters\n z_solar = 0.0153032\n self.masses = np.unique(data['mass'])\n scaled_z = np.unique(data['metallicity']) # scaled to solar\n self.metallicities = scaled_z*z_solar # actual metallicities\n \n self.elements = [key for key in indexing.keys()] # list of elements\n \n # Output table\n self.table = {}\n \n # Create initial abundances\n init_abun = {}\n \n import os\n \n if os.path.exists('Chempy/input/yields/West17/init_abun.npz'):\n init_file = np.load('Chempy/input/yields/West17/init_abun.npz')\n for z_in,sc_z in enumerate(scaled_z):\n init_abun[sc_z] = {}\n for k,key in enumerate(init_file['keys']):\n init_abun[sc_z][key] = init_file['datfile'][z_in][k]\n else: # If not already saved\n \n # Import initial abundance package\n os.chdir('Chempy/input/yields/West17')\n import gch_wh13\n os.chdir('../../../../')\n \n init_dat = []\n from matplotlib.cbook import flatten\n all_isotopes=list(flatten(list(indexing.values())))\t\n for sc_z in scaled_z:\n init_abun[sc_z] = gch_wh13.GCHWH13(sc_z)\n init_dat.append(init_abun[sc_z].abu)\n np.savez('Chempy/input/yields/West17/init_abun.npz',datfile=init_dat,keys=all_isotopes)\n \n for z_index,z in enumerate(self.metallicities): # Define table for each metallicity\n \n # Initialise subtables\n yield_subtable = {}\n yield_subtable['mass_in_remnants'] = []\n yield_subtable['Mass'] = self.masses\n for el in self.elements:\n yield_subtable[el]=[]\n \n # Find correct row in table\n for mass in self.masses:\n for r,row in enumerate(data):\n if row['mass'] == mass and row['metallicity']==scaled_z[z_index]:\n row_index = r\n break\n \n # Add remnant mass fraction\n remnant = data['remnant'][row_index]\n yield_subtable['mass_in_remnants'].append(remnant/mass)\n \n # Add each isotope into table\n for element in self.elements:\n el_net_yield = 0\n for isotope in indexing[element]: # Sum contributions from each element\n isotope_net_yield = data[isotope][r]/mass-init_abun[scaled_z[z_index]][isotope]*(mass-remnant)/mass\n el_net_yield +=isotope_net_yield # combine for total isotope yield\n yield_subtable[element].append(el_net_yield)\n \n \n summed_yields = np.zeros(len(self.masses)) # Total net yield - should be approx 1\n for element in self.elements:\n yield_subtable[element] = np.asarray(yield_subtable[element])\n summed_yields+=yield_subtable[element]\n \n # Write into yield table\n yield_subtable['mass_in_remnants'] = np.asarray(yield_subtable['mass_in_remnants'])\n yield_subtable['unprocessed_mass_in_winds'] = 1.0-yield_subtable['mass_in_remnants']-summed_yields\n \n # Restructure table\n all_keys = ['Mass','mass_in_remnants','unprocessed_mass_in_winds']+self.elements\n \n list_of_arrays = [yield_subtable[key] for key in all_keys]\n restructure_subtable = np.core.records.fromarrays(list_of_arrays,names=all_keys)\n \n self.table[z] = restructure_subtable", "def lyft_labels():\n\n return {\n 0: 'None',\n 7: 'Roads',\n 10: 'Vehicles'\n }", "def cntd_phi34_class_coeff( L, m ):\n\n s = m + 2*(L - 1)\n\n if s<0:\n return 0\n\n A = [ [ phi34_cc( sp, mp ) for mp in range(m+1) ] for sp in range(s+1) ]\n\n Alog = lLog( A )\n return Alog[s][m]", "def openMANGASpectrum(self, path_to_logcube, path_to_dapall, bin_number, plate_number, ifu_number, emlines,mpl='mpl-9'):\n\t\t\n\t\t# Read in MAPS file as this contains part of the information.\n\t\tmaps_header = pyfits.open(self.path_to_spectrum)\n\t\tbin_identification = maps_header['BINID'].data\n\t\twhere = np.where(bin_number == bin_identification[0,:,:]) #use 1st channel of bin_identification\n\t\tx_position, y_position = where[0][0], where[1][0]\n\t\t\n\t\t# Get S/N, right ascension and declination.\n\t\tsignal, ra, dec = maps_header['BIN_SNR'].data[x_position,y_position], maps_header[0].header['OBJRA'],maps_header[0].header['OBJDEC']\n\t\tvelocity_dispersion = maps_header['STELLAR_SIGMA'].data \t\t\t\t\n\t\tvelocity_dispersion_correction = maps_header['STELLAR_SIGMACORR'].data[0,:,:]\n\t\t\n\t\tif velocity_dispersion[x_position,y_position] > velocity_dispersion_correction[x_position,y_position]:\n\t\t\tcorrection = np.sqrt((velocity_dispersion[x_position,y_position])**2-(velocity_dispersion_correction[x_position,y_position])**2)\n\t\t\tvdisp = correction\n\t\telse:\n\t\t\tvdisp = 0\n\n\t\t\n\t\t# Open LOGCUBE to get the flux, wavelength, and error\n\t\theader = pyfits.open(path_to_logcube)\n\t\twavelength, flux, emline, bit_mask, inverse_variance = header['WAVE'].data, header['FLUX'].data, header['EMLINE'].data, header['MASK'].data, header['IVAR'].data\n\t\tself.wavelength = wavelength\n\t\tcorrect_flux = flux[:,x_position,y_position]\n\t\tcorrect_flux_emline = emline[:, x_position, y_position]\n\t\toutput_flux = correct_flux - correct_flux_emline\n\t\tcorrect_inverse_variance = inverse_variance[:, x_position, y_position]\n\t\t\n\t\tLSF = header['LSF'].data[:,x_position,y_position]\t\t# LSF given as sigma of Gaussian in Angstrom\n\t\tsig2fwhm = 2.0 * np.sqrt(2.0 * np.log(2.0))\n\t\tLSF_FWHM = LSF*sig2fwhm\n\t\tRES = wavelength/LSF_FWHM\n\t\t\n\t\tself.r_instrument = RES\n\t\tself.error = np.sqrt(1.0/(correct_inverse_variance))\n\t\tself.bad_flags = np.ones(len(output_flux))\n\t\tself.flux = output_flux\n\t\tself.vdisp = vdisp\n\n\t\tif (mpl=='mpl-10') or (mpl=='mpl-11'):\n\t\t\text=2\n\t\telse:\n\t\t\text=1\n\t\t\n\t\tdap_all = pyfits.open(path_to_dapall)\n\t\tget = np.where(dap_all[ext].data['PLATEIFU']==str(plate_number)+'-'+str(ifu_number))\n\t\tc = const.c.value/1000\n\t\t# Use redshift as measured from the stellar kinematics by the DAP.\n\t\tredshift = dap_all[ext].data['STELLAR_Z'][get][0]\n\t\t# If redshift measurement failed, use redshift estimate from NSA or ancillary programs.\n\t\tif redshift<0:\n\t\t\tredshift = dap_all[ext].data['Z'][get][0]\n\t\t\t\n\t\tsys_vel = maps_header[0].header['SCINPVEL']\n\t\tbin_vel = maps_header['STELLAR_VEL'].data[x_position,y_position]\t\n\t\t\t\n\t\tif redshift<0:\n\t\t\tprint('WARNING: The redshift of this object is negative.')\n\t\t\tprint('z = {}'.format(redshift))\n\t\t\n\t\tredshift_corr = (sys_vel+bin_vel)/c\n\t\tself.redshift = redshift\n\t\tself.restframe_wavelength = self.wavelength / (1.0+redshift_corr)\n\n\t\tbitmask = bit_mask[:,x_position,y_position]&2**0+2**1+2**2+2**3+2**4\n\t\tself.mask_emissionlines(emlines)\n\t\tself.final_mask = (bitmask | self.lines_mask)\n\n\t\tself.wavelength = self.wavelength[(self.final_mask==False)] \n\t\tself.restframe_wavelength = self.restframe_wavelength[(self.final_mask==False)] \n\t\tself.flux = self.flux[(self.final_mask==False)] \n\t\tself.error = self.error[(self.final_mask==False)]\n\t\tself.bad_flags = self.bad_flags[(self.final_mask==False)]\n\t\t\t\t\t\n\t\t# Get Trust flag, object_id, xpos, ypos and instrumental resolution.\n# \t\tself.trust_flag, self.objid, self.r_instrument = True, 0, np.loadtxt(os.path.join(os.environ['FF_DIR'],'data/MaNGA_spectral_resolution.txt'))\n\t\tself.trust_flag, self.objid= True, 0\n# \t\tself.r_instrument = self.r_instrument[0:self.r_instrument.shape[0]//2]\n\t\tself.r_instrument = self.r_instrument[(self.final_mask==False)]\n\t\tself.xpos, self.ypos = ra, dec\n\t\t\n\t\t# gets the amount of MW reddening on the models\n\t\tif self.milky_way_reddening :\n\t\t\tself.ebv_mw = get_dust_radec(ra, dec, 'ebv')\n\t\telse:\n\t\t\tself.ebv_mw = 0.0", "def out(lam, eng, mat): # {{{1\n print(\"\\\\begin{table}[!htbp]\")\n print(\" \\\\renewcommand{\\\\arraystretch}{1.2}\")\n txt = \" \\\\caption{{\\\\label{{tab:{0}}}properties of {0}}}\"\n # Raw underscores in LaTeX text mode produce “Missing $” errors.\n texlname = lam.name.replace('_', '\\_')\n print(txt.format(texlname))\n print(\" \\\\centering\\\\footnotesize{\\\\rule{0pt}{10pt}\")\n print(\" \\\\tiny calculated by lamprop {}\\\\\\\\[3pt]}}\".format(__version__))\n if eng:\n _engprop(lam)\n if mat:\n _matrices(lam)\n print(\"\\\\end{table}\\n\") # 1}}}", "def get_calculable_constant_names_latex():\n return r\"t_0\", r\"S_{rr}\", r\"S_{r\\theta}\", r\"S_{rz}\", r\"S_{zz}\" \\\n r\"\\alpha\", r\"\\beta\", r\"\\gamma\", r\"C_{13}\", r\"C_{33}\", \\\n r\"\\hat{E}\", r\"g_1\"", "def SFR_Custom(self, z):\n a = self.customSFR[0] \n b = self.customSFR[1] \n c = self.customSFR[2] \n d = self.customSFR[3] \n SFR = a * ((1+z)**b) / (1 + ((1+z)/c)**d) * 1e9 #[1e9 for GPc-3]\n return SFR # [Msun yr-1 Gpc-3] in comoving volume", "def execQ11():\n frame = pan.DataFrame(data, columns=['Product', 'Price', 'Period'])\n carrot = frame[(dFrame.Series_title_1 == \"Carrots, 1kg\") & (dFrame.Period == 2012.03)]\n return carrot", "def ActiveHlt1Lines(self) :\n lines = ['Hlt1IncPhi','Hlt1CalibTracking']\n\n return lines", "def short_training_symbol() -> np.ndarray:\n carriers = [0 + 0j] * 64\n carriers[-32] = 0\n carriers[-31] = 0\n carriers[-30] = 0\n carriers[-29] = 0\n carriers[-28] = 0\n carriers[-27] = 0\n carriers[-26] = 0\n carriers[-25] = 0\n carriers[-24] = 1 + 1j\n carriers[-23] = 0\n carriers[-22] = 0\n carriers[-21] = 0\n carriers[-20] = -1 - 1j\n carriers[-19] = 0\n carriers[-18] = 0\n carriers[-17] = 0\n carriers[-16] = 1 + 1j\n carriers[-15] = 0\n carriers[-14] = 0\n carriers[-13] = 0\n carriers[-12] = -1 - 1j\n carriers[-11] = 0\n carriers[-10] = 0\n carriers[-9] = 0\n carriers[-8] = -1 - 1j\n carriers[-7] = 0\n carriers[-6] = 0\n carriers[-5] = 0\n carriers[-4] = 1 + 1j\n carriers[-3] = 0\n carriers[-2] = 0\n carriers[-1] = 0\n carriers[0] = 0\n carriers[1] = 0\n carriers[2] = 0\n carriers[3] = 0\n carriers[4] = -1 - 1j\n carriers[5] = 0\n carriers[6] = 0\n carriers[7] = 0\n carriers[8] = -1 - 1j\n carriers[9] = 0\n carriers[10] = 0\n carriers[11] = 0\n carriers[12] = 1 + 1j\n carriers[13] = 0\n carriers[14] = 0\n carriers[15] = 0\n carriers[16] = 1 + 1j\n carriers[17] = 0\n carriers[18] = 0\n carriers[19] = 0\n carriers[20] = 1 + 1j\n carriers[21] = 0\n carriers[22] = 0\n carriers[23] = 0\n carriers[24] = 1 + 1j\n carriers[25] = 0\n carriers[26] = 0\n carriers[27] = 0\n carriers[28] = 0\n carriers[29] = 0\n carriers[30] = 0\n carriers[31] = 0\n return np.array(carriers) * np.sqrt(13 / 6)", "def label(self):\n G = self.__f.group()\n if is_Gamma0(G):\n group = ''\n elif is_Gamma1(G):\n group = 'G1'\n elif is_GammaH(G):\n group = 'GH[' + ','.join([str(z) for z in G._generators_for_H()]) + ']'\n return '%s%s%s'%(self.level(), cremona_letter_code(self.factor_number()), group)", "def lwcf(flntoa, flntoac):\n var = flntoa - flntoac\n var.long_name = \"TOA longwave cloud forcing\"\n return var", "def get_kit_string(currentStep):\n kit_string = currentStep.udf.get(\"ONT prep kit\")\n\n if currentStep.udf.get(\"ONT expansion kit\") != \"None\":\n kit_string += f\" {currentStep.udf.get('ONT expansion kit')}\"\n\n return kit_string", "def test_CLINTOX_2_erroneous_data_INFO_structural(self, mock_print):\n Plotter.from_smiles(self.data_CLINTOX_2_erroneous_smiles[\"smiles\"], target=self.data_CLINTOX_2_erroneous_smiles[\"target\"], target_type=\"C\", sim_type=\"structural\")\n mock_print.assert_called_once_with('The following erroneous SMILES have been found in the data:\\n[NH4][Pt]([NH4])(Cl)Cl\\nc1ccc(cc1)n2c(=O)c(c(=O)n2c3ccccc3)CCS(=O)c4ccccc4\\nCc1cc2c(cc1C)N3C=N2[Co+]456(N7=C8[C@H](C(C7=CC9=N4C(=C(C1=N5[C@@]([C@@H]2N6C(=C8C)[C@@]([C@H]2CC(=O)N)(CCC(=O)NC[C@H](OP(=O)(O[C@@H]2[C@H](O[C@H]3[C@@H]2O)CO)[O-])C)C)([C@@]([C@@H]1CCC(=O)N)(C)CC(=O)N)C)C)[C@@]([C@@H]9CCC(=O)N)(C)CC(=O)N)(C)C)CCC(=O)N)O\\n' + \n 'Cc1cc2c(cc1C)N3C=N2[Co]456(N7=C8[C@H](C(C7=CC9=N4C(=C(C1=N5[C@@]([C@@H]2N6C(=C8C)[C@@]([C@H]2CC(=O)N)(CCC(=O)NC[C@H](OP(=O)(O[C@@H]2[C@H](O[C@H]3[C@@H]2O)CO)O)C)C)([C@@]([C@@H]1CCC(=O)N)(C)CC(=O)N)C)C)[C@@]([C@@H]9CCC(=O)N)(C)CC(=O)N)(C)C)CCC(=O)N)C#N\\n'+\n 'CCCCc1c(=O)n(n(c1=O)c2ccc(cc2)O)c3ccccc3\\nCCCCc1c(=O)n(n(c1=O)c2ccccc2)c3ccccc3.\\n' +\n 'The erroneous SMILES will be removed from the data.')", "def derive_Hosek18(wavelength):\n # Extinction law definition\n wave = np.array([0.8059, 0.962, 1.25, 1.53, 2.14, 3.545])\n A_AKs = np.array([9.66, 6.29, 3.56, 2.33, 1.0, 0.50])\n \n\n # Following Hosek+18, Interpolate over the curve with cubic spline interpolation\n spline_interp = interpolate.splrep(wave, A_AKs, k=3, s=0)\n A_AKs_at_wave = interpolate.splev(wavelength, spline_interp)\n\n # This curve already assumes A_Ks = 1.0, so we can go straight to\n # output \n return A_AKs_at_wave", "def getDetector(self):\n\t\t#if self.offset5() == -18.5:\n\t\tif self.offset5() == -17.5:\n\t\t\tself.countername='Vortex'\n\t\tif self.offset5() == -2.5:\n\t\t\tself.countername='apd'\n\t\telse:\n\t\t\treturn \"No valid detector selected\"\n\t\treturn self.countername", "def ActiveHlt1Lines(self) :\n lines = [ 'Hlt1TrackAllL0', 'Hlt1TrackMuon', 'Hlt1TrackAllL0Tight', 'Hlt1TrackPhoton'\n , 'Hlt1VertexDisplVertex'\n , 'Hlt1SingleMuonNoIP', 'Hlt1SingleMuonHighPT'\n , 'Hlt1SingleElectronNoIP'\n , 'Hlt1DiMuonLowMass', 'Hlt1DiMuonHighMass'\n , 'Hlt1DiProtonLowMult', 'Hlt1DiProton'\n , 'Hlt1L0HighSumETJet','Hlt1HighPtJetsSinglePV']\n \n \n lines += ['Hlt1CharmCalibrationNoBias']\n lines += ['Hlt1CharmCalibrationNoBias']\n return lines", "def calculate_cci(hunterlab):\n return 1000 * (hunterlab[1]) / (hunterlab[0] * hunterlab[2])", "def showCrystals(self,tthval=90., lowlimit=80,highlimit=100,wl=None):\n\t\tcrystallist=sorted(self.analyser.items(),key=itemgetter(1))\n\t\tif wl==None:\n\t\t\twl = BLi.getWavelength()\n\t\tcrystallistb=[];\n\t\tcrystal_list_new=[];\n\t\tfor indice in range(0,len(crystallist)):\n\t\t\ttry:\n\t\t\t\tthbragg = 180/pi*asin(wl/(2*crystallist[indice][1]))\n\t\t\t\tcrystallistb.append([crystallist[indice][0], crystallist[indice][1], 2*thbragg])\n\t\t\texcept:\n\t\t\t\tpass\n\t\tprint crystallistb\n\t\tfor indice in range(0,len(crystallistb)):\n\t\t\ttry:\n\t\t\t\tordine1=int(round(tthval/crystallistb[indice][2]))\n\t\t\t\tfor ordine in [ordine1-1,ordine1,ordine1+1]:\n\t\t\t\t\tif ordine>0:\n\t\t\t\t\t\tthbragg = 180/pi*asin(ordine*wl/(2*crystallistb[indice][1]))\n#\t\t\t\tprint 2*thbragg, ordine, [crystallistb[indice][0] crystallistb[indice][1]/ordine 2*thbragg ordine]\n\t\t\t\t\t\tcrystal_list_new.append([crystallistb[indice][0], round(crystallistb[indice][1]/ordine,4), round(2*thbragg,4), ordine, abs(tthval-2*thbragg)])\n\t\t\texcept:\n\t\t\t\tpass\n\t\tcrystal_list_new=sorted(crystal_list_new,key=itemgetter(4))\n\t\tprint crystal_list_new\n\t\tprint \"Crystal, d-spacing, 2theta, refl. order\"\n\t\tfor indice in range(len(crystal_list_new)):\n\t\t\tif crystal_list_new[indice][2]>lowlimit and crystal_list_new[indice][2]<highlimit and mod(crystal_list_new[indice][-2],2)==self.analyserorder[crystal_list_new[indice][0]]:\n\t\t\t\tprint crystal_list_new[indice][0:-1]\n\t\treturn #crystal_list_new", "def _extract_kiss_source(self):\n self.source = aprs.Callsign(self.frame[7:])", "def wlsoln_coeff_from_header (pyfits_header, apply_WCS_rv=False, preferred=None, output='sel'):\n # coefficient choices\n cc = {}\n #========================================================================#\n # linear dispersion\n cc['linear'] = coeff_basic_linear(pyfits_header)\n\n #========================================================================#\n # using keywords ctype, crval, crpix, cdelt\n cc['ctype1'] = coeff_from_ctype1(pyfits_header)\n\n #========================================================================#\n # linear dispersion using keywords linintrp, crvl1_?, cdlt1_?\n # from IRAF, order by order !! do I need to look up what the 1_ means?\n # some of these are doubled by WAT0_001 stuff\n cc['crvl'] = coeff_from_crvl(pyfits_header)\n # if preferred_disp == 'any' or preferred_disp == 'linear' or preferred_disp == 'crvl' or preferred_disp == 'makee linear':\n \n #========================================================================#\n # IRAF WCS keywords WAT?_001 \n #if preferred_disp == 'any' or preferred_disp == 'IRAF_WCS':\n cc['wcs'] = coeff_from_wcs(pyfits_header,apply_WCS_rv)\n\n #========================================================================#\n # linear dispersion for keywords w0 and wpc\n cc['w0'] = coeff_from_w0(pyfits_header)\n #if preferred_disp == 'any' or preferred_disp == 'linear' or preferred_disp == 'w0':\n\n #========================================================================#\n # MAKEE type dispersion using keywords co_0_? and co_4_?\n # I'm not sure what type of coefficients these are !!\n #cc['co_0'] = coeff_from_makee_c0(pyfits_header)\n# if preferred_disp == 'any' or preferred_disp == 'makee' or preferred_disp == 'co_0':\n\n #========================================================================#\n # MAKEE coeffificients using keywords wv_0_? and wv_4_?\n cc['wv_0'] = coeff_from_makee_wv(pyfits_header)\n #if preferred_disp == 'any' or preferred_disp == 'makee' or preferred_disp == 'wv_0':\n\n #========================================================================#\n # spectre type dispersion\n cc['spectre'] = coeff_from_SPECTRE(pyfits_header)\n #if preferred_disp == 'any' or preferred_disp == 'spectre':\n\n #========================================================================#\n #========================================================================#\n \n if output == 'all': return cc\n return resolve_wlsoln_coeffs(cc,preferred)", "def getCrystal(self):\n\t\treturn self.crystal,self.dspace,self.offset7()", "def get_power_spectrum(self, cosmo, nz1, nz2):\n t1 = ccl.WeakLensingTracer(cosmo, nz1)\n t2 = ccl.WeakLensingTracer(cosmo, nz2)\n return ccl.angular_cl(cosmo, t1, t2, self.ells)", "def __get_final_data(self,mpoly,gpoly):\t\n\t\tsteps = len(mpoly)\n\t\tecwords = self.__longdivision(steps,mpoly,gpoly)\n\t\tmessage = mpoly + ecwords\n\t\tif self.version == 1:\n\t\t\trem = 0\n\t\telse:\n\t\t\trem = 7\n\t\tmessage = ['{0:08b}'.format(i) for i in message]+['0']*rem\n\t\tmessage = \"\".join(message)\n\t\treturn message", "def info():\n return r\"\"\"Tseng, Lin-Yu, and Chun Chen. \"Multiple trajectory search for unconstrained/constrained multi-objective optimization.\" Evolutionary Computation, 2009. CEC'09. IEEE Congress on. IEEE, 2009.\"\"\"", "def printLHCb ( LR = \"L\" ,\n prelim = \"Final\" ,\n text = \"\" ) :\n \n if not LR in ( 'L' , 'R' ) :\n raise TypeError( \"Unknown LR-option: %s\" % LR )\n \n global lhcbName\n if 'R' == LR : \n lhcbName = ROOT.TPaveText ( 0.70 - lhcbStyle . GetPadRightMargin (),\n 0.85 - lhcbStyle . GetPadTopMargin (),\n 0.95 - lhcbStyle . GetPadRightMargin (),\n 0.95 - lhcbStyle . GetPadTopMargin (),\n \"BRNDC\" )\n \n else : ## LR=\"L\"\n lhcbName = ROOT.TPaveText ( lhcbStyle.GetPadLeftMargin() + 0.08 ,\n 0.87 - lhcbStyle.GetPadTopMargin() ,\n lhcbStyle.GetPadLeftMargin() + 0.32 ,\n 0.95 - lhcbStyle.GetPadTopMargin() ,\n \"BRNDC\" )\n\n if \"Final\" == prelim : lhcbName.AddText ( \"LHCb\" )\n elif \"Prelim\" == prelim : lhcbName.AddText ( \"#splitline{LHCb}{#scale[1.0]{Preliminary}}\") \n else : lhcbName.AddText ( text )\n \n lhcbName . SetFillColor(0);\n lhcbName . SetFillColor(3001);\n lhcbName . SetTextAlign(12);\n lhcbName . SetBorderSize(0);\n lhcbName . Draw() \n \n return lhcbName", "def main():\n try:\n session = Api()\n plate = session.active_document()\n print(\"Part: {:^30s}\\n\".format(plate.name))\n\n # Check if part is sheetmetal.\n assert plate.name.endswith(\n \".psm\"\n ), \"This macro only works on .psm not {:^30s}\".format(plate.name[-4:])\n\n # Get a reference to the variables collection.\n holes = HoleCollection(plate)\n\n # Display the quantites of different types of holes.\n quantites(\n holes.count,\n holes.count_threaded,\n holes.count_imperial_threaded,\n holes.count_metric_threaded,\n )\n\n # Prototyping table of holes. (helper for drafter)\n ## qty_size = dict(len(holes.all_holes())) # >>> 'M6x1':3\n ## print_table(qty_size)\n\n # Prompt user selection\n units = prompt_units_selection()\n\n if units == \"metric\": # if metric\n for hole in holes.threaded():\n o = Hole(hole)\n if o.is_metric():\n continue\n imperial = o.size\n holedata = Hole.get_equivalence(o, mapping=mappingToMetric)\n o.conversion_to_metric(holedata)\n metric = o.size\n header()\n print(\" {:<30s} {:<30s}\".format(imperial, metric))\n footer()\n\n elif units == \"imperial\": # if imperial\n for hole in holes.threaded():\n o = Hole(hole)\n if o.is_imperial():\n continue\n metric = o.size\n holedata = Hole.get_equivalence(o, mapping=mappingToImp) # correction\n o.conversion_to_metric(holedata) # correction\n imperial = o.size\n header()\n print(\" {:<30s} {:<30s}\".format(metric, imperial))\n footer()\n\n elif units == \"debug\":\n for hole in holes.threaded():\n o = Hole(hole)\n print(o.__repr__())\n\n else:\n sys.exit()\n\n # Display a second time the quantites of different types of holes.\n quantites(\n holes.count,\n holes.count_threaded,\n holes.count_imperial_threaded,\n holes.count_metric_threaded,\n state=\"(Changed state)\",\n )\n\n except AssertionError as err:\n print(err.args)\n\n except Exception as ex:\n print(ex.args)\n\n else:\n pass\n\n finally:\n raw_input(\"\\nPress any key to exit...\")\n sys.exit()", "def _scfconv_from_ccdata(self):\n\n lines = [f\"scf-first 1 THROUGH {len(self.ccdata.scfenergies)}\"]\n\n for scfenergy in self.ccdata.scfenergies:\n lines.append(f\"{scfenergy:15.6f}\")\n\n return lines", "def func_kc_318(n, series):\n if series == \"3D3\":\n try:\n return 2*np.pi/(wl_3D3[str(n)]*1e-9)\n except:\n return 0", "def k_c(self, tl):\n\t\treturn self.KC0*exp(self.HKC/(R*self.TO)*(1. - self.TO/tl))", "def _getPOSCAR():\r\n file = open(\"POSCAR\", \"r\")\r\n lines = file.readlines()\r\n line5 = str(lines[5]).strip().split()\r\n line6 = str(lines[6]).strip().split()\r\n cellname = ''\r\n for i in range(len(line5)):\r\n cellname += (line5[i] + line6[i])\r\n return cellname" ]
[ "0.55920184", "0.549476", "0.54696536", "0.54056984", "0.53960073", "0.5380349", "0.53594655", "0.53576845", "0.53418833", "0.53027004", "0.5290028", "0.5287675", "0.5284653", "0.52330744", "0.51322997", "0.50801104", "0.5078625", "0.50296295", "0.50066954", "0.5004581", "0.49985564", "0.49900883", "0.498733", "0.49734536", "0.49734536", "0.49690753", "0.49663875", "0.49662814", "0.4957113", "0.4949705", "0.49300337", "0.4924287", "0.4910201", "0.48991886", "0.489692", "0.489141", "0.48878312", "0.48870856", "0.48744547", "0.48714852", "0.48571515", "0.48490903", "0.48458752", "0.48405975", "0.48391202", "0.48285025", "0.48271805", "0.4818124", "0.48170274", "0.4814193", "0.4812188", "0.48001504", "0.47986785", "0.47981593", "0.47965062", "0.47889578", "0.47876403", "0.47867444", "0.47831059", "0.47809315", "0.47784767", "0.4773623", "0.47714275", "0.4769978", "0.4766662", "0.47663572", "0.47613356", "0.47580242", "0.47523403", "0.4749896", "0.47473463", "0.47473347", "0.47455567", "0.47385982", "0.47343156", "0.47328815", "0.4732704", "0.47301483", "0.47249445", "0.47235262", "0.47206983", "0.47156313", "0.47128755", "0.4712777", "0.4712257", "0.47114435", "0.47072062", "0.4703066", "0.4702676", "0.47008675", "0.46954224", "0.4695356", "0.46929306", "0.46927533", "0.4691919", "0.46918717", "0.46908137", "0.4690111", "0.46893957", "0.46885237" ]
0.70417845
0
Return the base cosmology but with the Riess2019 H0 value. For details
def Riess2019_H0_cosmology(base_cosmology): _base_cosmology = get_cosmology(base_cosmology) return cosmo.LambdaCDM( H0=74.03, Om0=_base_cosmology.Om0, Ode0=_base_cosmology.Ode0 )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sound_horizon_EH(self):\n om_m = self.omega_cb\n om_b = self.omega_b\n om_n = np.sum(self.omega_nu)\n h = self.h \n if self.M_nu_tot == 0.: rs = 44.5*np.log(9.83/om_m)/np.sqrt(1+10*om_b**0.75)*h\n else: rs = 55.154*np.exp(-72.3*(om_n+0.0006)**2.)/(om_m**0.25351*om_b**0.12807)*h\n return rs", "def init_physical(\n ombh2=0.022161, omch2=0.11889, H0=67.77, omkh2=0.0, t0=2.726, nnu=3.046\n ):\n h = H0 / 100.0\n\n c = Cosmology()\n\n c.omega_b = ombh2 / h ** 2\n c.omega_c = omch2 / h ** 2\n c.H0 = H0\n\n rhoc = 3.0 * c.H() ** 2 * c_sl ** 2 / (8.0 * math.pi * G_n)\n rhorad = a_rad * t0 ** 4\n c.omega_g = rhorad / rhoc\n\n rhonu = nnu * rhorad * 7.0 / 8.0 * (4.0 / 11.0) ** (4.0 / 3.0)\n c.omega_n = rhonu / rhoc\n\n c.omega_l = 1.0 - (omkh2 + ombh2 + omch2) / h ** 2 - (c.omega_g + c.omega_n)\n\n return c", "def getP0(self):\n\t\tmyhmag.initializehelmholtz()\n\t\tabar = 13.714285714285715\n\t\tzbar = abar/2.0\n\t\tself.data[\"P0\"] = np.zeros(len(self.data[\"rho\"]))\n\t\tfor i in range(len(self.data[\"rho\"])):\n\t\t\tself.data[\"P0\"][i],energ,sound,gammaout,entropy,dummyfail = myhmag.gethelmholtzeos(1000.,self.data[\"rho\"][i],abar,zbar,True)", "def get_h0(self, t):\n return self.h0", "def get_rzero(self):\n return self.get_resistance() * math.pow((self.ATMOCO2/self.PARA), (1./self.PARB))", "def get_rzero(self):\n return self.get_resistance() * math.pow((self.ATMOCO2/self.PARA), (1./self.PARB))", "def h2o_from_rh_and_temp(RH, TEMP):\n TC = TEMP - 273.15\n frh = RH / 100.\n svp_millibar = 6.11 * 10**((7.5 * TC)/(TC+237.3))\n svp_pa = svp_millibar * 100\n vp_pa = svp_pa * frh\n molecule_per_cubic_m = vp_pa * Avogadro / R / TEMP\n molecule_per_cubic_cm = molecule_per_cubic_m * centi**3\n #print RH, TEMP, molecule_per_cubic_cm\n return molecule_per_cubic_cm", "def rhe(m):\n \n m = m*u.kg.to(u.M_sun)\n \n logr = np.full(m.shape,0)\n \n iless = np.where(m<=2.5)\n igreater = np.where(m>2.5)\n \n logr[iless] = 3.0965 - 2.013*np.log10(m[iless])\n logr[igreater] = 0.0557*(np.log10(m[igreater])-0.172)**-2.5\n return (10**logr)*u.Rsun.to(u.m)", "def ST_zero_flux(self):\n return 10 ** (-0.4 * self.ST_zero_mag) * Unit('erg*s**-1*cm**-2*AA**-1')", "def calculate_rh(self):\n # Check for existence of relative humidity and mixing ratio\n if self.data.get('Relative_Humidity') is None:\n if self.data.get('Mixing_Ratio') is None:\n raise KeyError('Calculate mixing ratio first!')\n else:\n # Convert mixing ratio to relative humidity\n sat_vapor = 6.11 * (10.0**((7.5 * self.data['Temperature_C']) /\n (237.7 + self.data['Temperature_C'])))\n\n sat_w = 621.97 * (sat_vapor / (self.data['Pressure'] -\n sat_vapor))\n\n self.data['Relative_Humidity'] = ((self.data['Mixing_Ratio'] /\n sat_w) * 100.0)", "def _calculate_strehl(self):\n\n self.strehl = np.exp(-1*((2*np.pi/self.science_wavelength)*self.high_order_wfe)**2)", "def MH(self):\n\n #return math.log10(self.glb[user_params_index[\"Zs\"]]*constants.solar_x/(self.glb[user_params_index[\"Xs\"]]*constants.solar_z))\n return math.log10(self.glb[iz0]*constants.solar_x/(self.glb[ix0]*constants.solar_z))", "def get_h0(self, t):\n return self.h0 * np.sin(2 * np.pi * t / self.Pmod + self.Pmod_phi)", "def get_hcore1(mol, atom, coord):\n\n mf = scf.RHF(mol)\n g = grad.rhf.Gradients(mf)\n\n hcore1 = g.hcore_generator(mol)(atom)[coord]\n\n omega = np.identity(2)\n hcore1 = np.kron(omega, hcore1)\n\n return hcore1", "def rho0_c(self, c):\n return 200./3*self.rhoc*c**3/(np.log(1+c)-c/(1+c))", "def h_P(self, z0):\n # Get the governing variables\n (B, N, u_slip, u_inf) = self.get_variables(z0, 0.)\n \n # Compute U_N\n U_N = u_slip / (B * N)**(1./4.)\n \n # Compute the correlation equation\n return 5.2 * np.exp(-(U_N - 1.8)**2 / 10.24) * (B / N**3)**(1./4.)", "def ST_zero_flux(self):\n return 10 ** (-0.4 * self.ST_zero_mag) * Unit('erg*s-1*cm-2*AA-1')", "def OLSV2_HA(code):\n if len(code)==6:\n if (code[0]=='6')|(code[0]=='9'):\n code='SS_'+code\n _get_index_data('000001')\n index1='SS_000001'\n else:\n code='SZ_'+code\n _get_index_data('399001')\n index1='SZ_399001'\n elif len(code)==4:\n code='HK_'+code\n index1='HK_HSI'\n else:\n print('Input Wrong code.')\n\n pre_code='YAHOO/'\n ticker=pre_code+code\n index1=pre_code+index1\n\n fn='./Quandl/'+ticker+'.csv'\n ind='./Quandl/'+index1+'.csv'\n\n df=pd.read_csv(fn,parse_dates=True,index_col=0)\n dff1=df[['Open','High','Low','Close','Volume','Adjusted Close']].copy()\n print ('Caculating the analysing 1/4 statistics:')\n print (dff1.describe(),'\\n')\n dfi=pd.read_csv(ind,parse_dates=True,index_col=0)\n df['cpct']=df['Close'].pct_change()\n df['vpct']=df['Volume'].pct_change()\n dfi['indpct']=dfi['Close'].pct_change()\n\n rets=pd.concat([df['cpct'],dfi['indpct'],df['vpct']],axis=1)\n rets=rets.dropna(how='any')\n #print (rets)\n\n X=np.array(rets.iloc[:,1:3])\n X=sm.add_constant(X)\n #print(X)\n\n Y=np.array(rets.iloc[:,0])\n\n\n #y=np.dot(X,beta)+e\n model=sm.OLS(Y,X)\n results=model.fit()\n print (results.summary())\n \n print (\"The params for the model:\",results.params)\n print (\"The std for the model:\",results.bse)\n return results", "def noyes84_rossby_activity(logRpHK):\n y = 5 + logRpHK\n logRo = 0.324 - 0.400*y - 0.283 * y**2 - 1.325 * y**3\n return 10**logRo", "def ISA_trop(h):\n\tT = 288.15 - 0.0065*h;\n\tp = 101325*(T/288.15)**(-g/(-0.0065*287));\n\trho = 1.225*(T/288.15)**(-g/(-0.0065*287) - 1);\n\ta = np.sqrt(1.4*287*T);\n\treturn T, p, rho, a;", "def MAH_Hearin_2021(halo_mass_t0, cosmic_t):\r\n\r\n #U_a_early = 2.5\r\n #U_a_early_late = 0.3\r\n #log10tau_c = 1.25\r\n\r\n k = 3.5\r\n\r\n a_late_early = 2.5-0.3 #np.log( np.power(np.e, U_a_early_late) + 1. )\r\n a_early = 2.5 #np.log( np.power(np.e, U_a_early) + 1. )\r\n tau_c = 1.25 #np.power(10., log10tau_c)\r\n alpha = a_early + a_late_early / (1. + np.exp(-k*(cosmic_t - tau_c)) )\r\n\r\n MAH = np.log10( 10.**halo_mass_t0 * np.power(cosmic_t / Cosmo.age(0), alpha) )\r\n\r\n return MAH", "def rh(self, h):\n sez=self.getSect(h)\n area=self.area(sez)\n wetborder = self.wetBorder(sez)\n return area/wetborder", "def convert_H_kcalmol(en_H):\n return en_H/kcalmol_H", "def sidm_halo_model_default(r, N0, v0, ns0, sigma0, w0, log10M200, c200):\n\n op_sigma_model = \"velocity-dependent\"\n\n Msun_in_cgs = 1.98848e33\n kpc_in_cgs = 3.08567758e21\n\n t_age = 7.5 # Gyr - assuming constant halo age\n rho0 = find_rho0(N0, t_age, v0, ns0, sigma0, w0, op_sigma_model)\n t_age_cgs = t_age * 1e9 * 365.24 * 24 * 3600 # sec\n rho0_cgs = rho0 * Msun_in_cgs / kpc_in_cgs ** 3 # g/cm^3\n\n G = 4.3e-6 # kpc km^2 Msun^-1 s^-2\n r0 = v0**2 / (4. * np.pi * G * rho0)\n r0 = np.sqrt(r0) # kpc\n\n sol = fsolve(find_r1, 20, args=(rho0_cgs, v0, ns0, t_age_cgs, sigma0, w0, op_sigma_model))\n r1 = sol[0] * r0 # kpc\n\n rho = rho_joint_profiles(r, r1, r0, rho0, ns0, log10M200, c200)\n\n log10rho = np.log10(rho)\n\n return log10rho", "def haurwitz(zenith):\n\n # GHI = 1098 * cos(z) * exp(-0.057 / cos(z))\n clearsky_ghi = 1098.0 * np.cos(np.radians(zenith)) * np.exp(-0.057 / np.cos(np.radians(zenith)))\n\n # remove negative values\n clearsky_ghi[clearsky_ghi < 0] = 0\n\n return clearsky_ghi", "def _calc_Hc(self, signal):\n\n return 2.8 * np.nanstd(signal)", "def _calculate_r0(self):\n\n self.r0 = self.coherence_cell_size * (np.cos(np.deg2rad(self.zenith_angle)))**(3/5)", "def clinopyroxene_92():\n\n rho = 3327.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 257.3; C[0,1] = 85.9; C[0,2] = 76.2; C[0,3] = 0.; C[0,4] = 7.1; C[0,5] = 0.\n C[1,0] = C[0,1]; C[1,1] = 216.2; C[1,2] = 71.8; C[1,3] = 0.; C[1,4] = 13.3; C[1,5] = 0.\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 260.2; C[2,3] = 0.; C[2,4] = 33.7; C[2,5] = 0.\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 80.2; C[3,4] = 0.; C[3,5] = 10.2\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 70.6; C[4,5] = 0.\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 85.8\n\n return C, rho", "def _convert_rh2w(self):\n sat_vapor = 6.11 * (10.0 ** ((7.5 * self.data['Temperature_C']) /\n (237.7 + self.data['Temperature_C'])))\n\n sat_w = 621.97 * (sat_vapor / (self.data['Pressure'] - sat_vapor))\n\n self.data['Mixing_Ratio'] = (\n self.data['Relative_Humidity'] / 100.0) * sat_w", "def _interpolate(self, omch2, h0):\n omch2_index = (\n 1.0\n * (self.CAMBGenerator.om_resolution - 1)\n * (omch2 - self.CAMBGenerator.omch2s[0])\n / (self.CAMBGenerator.omch2s[-1] - self.CAMBGenerator.omch2s[0])\n )\n\n if self.CAMBGenerator.h0_resolution == 1:\n h0_index = 0\n else:\n h0_index = (\n 1.0 * (self.CAMBGenerator.h0_resolution - 1) * (h0 - self.CAMBGenerator.h0s[0]) / (self.CAMBGenerator.h0s[-1] - self.CAMBGenerator.h0s[0])\n )\n\n x = omch2_index - np.floor(omch2_index)\n y = h0_index - np.floor(h0_index)\n\n data = self.data\n result = {}\n for key in data.keys():\n\n v1 = data[key][int(np.floor(omch2_index)), int(np.floor(h0_index))] # 00\n v2 = data[key][int(np.ceil(omch2_index)), int(np.floor(h0_index))] # 01\n\n if self.CAMBGenerator.h0_resolution == 1:\n result[key] = v1 * (1 - x) * (1 - y) + v2 * x * (1 - y)\n else:\n v3 = data[key][int(np.floor(omch2_index)), int(np.ceil(h0_index))] # 10\n v4 = data[key][int(np.ceil(omch2_index)), int(np.ceil(h0_index))] # 11\n result[key] = v1 * (1 - x) * (1 - y) + v2 * x * (1 - y) + v3 * y * (1 - x) + v4 * x * y\n return result", "def clinopyroxene_98():\n\n rho = 3190.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 237.8; C[0,1] = 83.5; C[0,2] = 80.; C[0,3] = 0.; C[0,4] = 9.; C[0,5] = 0.\n C[1,0] = C[0,1]; C[1,1] = 183.6; C[1,2] = 59.9; C[1,3] = 0.; C[1,4] = 9.5; C[1,5] = 0.\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 229.5; C[2,3] = 0.; C[2,4] = 48.1; C[2,5] = 0.\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 76.5; C[3,4] = 0.; C[3,5] = 8.4\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 73.; C[4,5] = 0.\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 81.6\n\n return C, rho", "def get_alpha_c_hex_h(v_hs: float) -> float:\n\n # effective area for heat exchange of front projected area of heat exchanger of the internal unit, m2\n a_f_hex = get_a_f_hex()\n\n v = v_hs / 3600 / a_f_hex\n\n return (- 0.0017 * v ** 2 + 0.044 * v + 0.0271) * 1000", "def cdd_Hrepresentation(self):\n return cdd_Hrepresentation(self._cdd_type, \n [i for i in self.inequalities()],\n [e for e in self.equation_generator()] )", "def H(self, z=0.0):\n\n H = (\n self.H0\n * (\n self.omega_r * (1 + z) ** 4\n + self.omega_m * (1 + z) ** 3\n + self.omega_k * (1 + z) ** 2\n + self.omega_l\n * (1 + z) ** (3 * (1 + self.w_0 + self.w_a))\n * np.exp(-3 * self.w_a * z / (1 + z))\n )\n ** 0.5\n )\n\n # Convert to SI\n return H * 1000.0 / mega_parsec", "def get_corrected_rzero(self, temperature, humidity):\n return self.get_corrected_resistance(temperature, humidity) * math.pow((self.ATMOCO2/self.PARA), (1./self.PARB))", "def get_corrected_rzero(self, temperature, humidity):\n return self.get_corrected_resistance(temperature, humidity) * math.pow((self.ATMOCO2/self.PARA), (1./self.PARB))", "def example():\n ldata = 200\n degrees = np.arange(ldata+1, dtype=np.float64)\n degrees[0] = np.inf\n power = degrees**(-1)\n\n clm1 = pysh.SHCoeffs.from_random(power, exact_power=False)\n clm2 = pysh.SHCoeffs.from_random(power, exact_power=True)\n\n fig, ax = plt.subplots()\n ax.plot(clm1.spectrum(unit='per_l'), label='Normal distributed power')\n ax.plot(clm2.spectrum(unit='per_l'), label='Exact power')\n ax.set(xscale='log', yscale='log', xlabel='degree l',\n ylabel='power per degree l')\n ax.grid(which='both')\n ax.legend()\n\n plt.show()", "def noyes84_logRpHK_to_S(logRpHK, BmV):\n logRpHK = np.asarray(logRpHK)\n BmV = np.asarray(BmV)\n logC_cf = 1.13 * BmV**3 - 3.91 * BmV**2 + 2.84 * BmV - 0.47\n DlogC_cf = np.zeros_like(BmV) # for correction of \"nonphysical maximum\"\n x = (0.63 - BmV)\n sel = BmV < 0.63\n DlogC_cf[sel] = 0.135*x[sel] - 0.814*x[sel]**2 + 6.03*x[sel]**3\n logC_cf += DlogC_cf\n logR_phot = -4.898 + 1.918 * BmV**2 - 2.893 * BmV**3\n Rp_HK = 10**logRpHK\n R_HK = Rp_HK + 10**logR_phot\n S = R_HK/(1.340E-4 * 10**logC_cf)\n return S", "def AB_zero_flux(self):\n return 10 ** (-0.4 * self.AB_zero_mag) * Unit('erg*s**-1*cm**-2*AA**-1')", "def convert_H_kJmol(en_H):\n return en_H/kJmol_H", "def baseline_co2_equivilant(self):\n return self.indoor_air_quality_baseline[0]", "def set_cosmology(self, cosmo):\n self.cosmo = cosmo\n self.h70 = cosmo['h'] # Hubble parameter, H0 = 100h km/s/Mpc\n self.Om = cosmo['omega_M_0'] # Omega_matter\n self.Ol = cosmo['omega_lambda_0'] # Omega_Lambda", "def h_S(self, z0, u_inf):\n # Get the governing variables\n (B, N, u_slip, u_inf) = self.get_variables(z0, u_inf)\n \n # Compute the correlation equation\n return 5.1 * B / (u_inf * u_slip**2.4)**(0.88)", "def q_2_rh(temp, pressure, qair):\n mr = qair / (1-qair)\n e = mr * pressure / (0.62197 + mr)\n # convert temperature to saturated vapor pressure\n es = 611.2 * np.exp(17.67 * (temp - 273.15) / (temp - 29.65))\n rh = e / es\n rh[rh > 1] = 1\n rh[rh < 0] = 0\n return rh", "def g0_hz(sp,cal_Veff = 5e-3, V_pi = None, askFit = True,T = 293,PM_calib_file = \"M:\\\\phaseModulationCalibrations\\\\V_pi1550.spe\"):#V_pi = 7.1\n \n cal_Veff = addDefaultUnit(cal_Veff,V)\n T = addDefaultUnit(T,K)\n f = sp.fitter\n if not isinstance(sp.fitter,fit.FitterLorentzAndGauss):\n if askFit:\n yn = \"dummy\"\n while yn is not \"y\":\n yn = raw_input(\"spectrum %s was fitted with model %s. refit it with model \\\"LorentzGauss\\\"(y/n)?\"%(sp.name,f.ID_STR))\n if yn ==\"n\":\n raise ValueError(\"spectrum should be fitted with LorentzGauss for determining effective mass\")\n sp.fit(model = \"LorentzGauss\")\n\n\n if V_pi == None:\n f = load(PM_calib_file)\n V_pi = utils.misc.interpFromPlottable(f,sp[\"x0_2_hz\"])\n else:\n V_pi = 7.1\n V_pi = addDefaultUnit(V_pi,V)\n print \"value of V_pi used is \" + str(V_pi)\n\n ratio = sp[\"area_2\"]/sp[\"area\"]\n \n phi0 = np.pi*cal_Veff*sqrt(2)/V_pi\n \n# omega = 2.0*np.pi*cst.c/(lambda_nm*1e-9)\n\n Omega = 2.0*np.pi*sp[\"x0_2_hz\"]*Hz\n\n nbar = k*T/(hbar*Omega)\n g0 = ((Omega**2*phi0**2)/(4*nbar*ratio))**.5/(2*pi)\n yn = raw_input(\"would you like results to be pasted in Origin to be copied in clipboard? [y]/n\")\n if yn is not \"n\":\n #import utils\n utils.misc.copyToClipboard(str(sp[\"x0_hz\"]*1e-6) +\"\\t\"+str(sp[\"gamma_hz\"]) + \"\\t\"+\"0\" +\"\\t\"+ str(g0.asNumber()))\n return g0", "def get_cosmology(cosmology=conf.cosmology):\n if cosmology.lower() not in available_cosmologies:\n raise ValueError(\n \"Unrecognised cosmology {}. Available cosmologies are {}\".format(\n cosmology, \", \".join(available_cosmologies)\n )\n )\n elif cosmology.lower() in _astropy_cosmologies:\n ind = [\n num for num, name in enumerate(_astropy_cosmologies) if\n name == cosmology.lower()\n ][0]\n return getattr(cosmo, list(parameters.available)[ind])\n elif cosmology.lower() == \"planck15_lal\":\n return Planck15_lal_cosmology()\n elif \"_with_riess2019_h0\" in cosmology.lower():\n base_cosmology = cosmology.lower().split(\"_with_riess2019_h0\")[0]\n return Riess2019_H0_cosmology(base_cosmology)", "def RHO(p,tv): \n _rd=287.053 # Gas constant for dry air\n _tv=tv*1.\n if np.nanmax(_tv)<100: _tv +=273.15# NB: C-->K\n if np.nanmax(p)<2000: p*=100 # hPa to Pa\n rho=np.divide(p,np.multiply(_rd,_tv))\n\n return rho", "def get_power_spectrum(self, cosmo, nz1, nz2):\n t1 = ccl.WeakLensingTracer(cosmo, nz1)\n t2 = ccl.WeakLensingTracer(cosmo, nz2)\n return ccl.angular_cl(cosmo, t1, t2, self.ells)", "def fig_coh_ph(coh, ph, direc):\n\n colors = plt.cm.cividis(np.linspace(0, 1, coh.shape[0]))\n\n if coh.ndim > 1:\n f, (ax1, ax2) = plt.subplots(1, 2)\n for i, (co, p) in enumerate(zip(coh, ph)):\n ax1.plot(direc, co, c=colors[i])\n ax2.plot(direc, p*180./np.pi, c=colors[i])\n ax1.set_ylabel('Coherence')\n ax1.set_ylim((0, 1.))\n ax2.set_ylabel('Phase')\n ax1.set_xlabel('Angle from H1')\n ax2.set_xlabel('Angle from H1')\n plt.tight_layout()\n\n else:\n plt.figure()\n plt.subplot(121)\n plt.plot(direc, coh, c=colors[0])\n plt.ylim((0, 1.))\n plt.subplot(122)\n plt.plot(direc, ph*180./np.pi, c=colors[0])\n plt.tight_layout()\n\n return plt", "def x0(self):\n return self.params['x0']", "def H1(self,kx,ky):\n return -2.*self.t2*np.cos(self.phi)*(np.cos(3.*kx/2.+np.sqrt(3.)*ky/2.)+np.cos(-3.*kx/2.+np.sqrt(3.)*ky/2.)+np.cos(-np.sqrt(3.)*ky))", "def calc_h_sen(dry_bulb_C):\n\n h_kJ_kg = dry_bulb_C * CPA_kJ_kgC\n\n return h_kJ_kg", "def hsic(x,y,sigma):\n # m is the number of observations here\n m = len(x)\n gamma = 1.0/(2*sigma**2)\n\n k = rbf_kernel(x,x,gamma)\n l = rbf_kernel(y,y,gamma)\n for i in range(m):\n k[i,i] = 0\n l[i,i] = 0\n h = np.eye(m)-1.0/m\n hsic_value = (1.0/(m-1)**2)*np.trace(np.dot(np.dot(np.dot(k,h),l),h))\n return hsic_value", "def full_spectrum_wcsheader(center_wave=1.4e4, dlam=40, NX=100, spatial_scale=1, NY=10):\n \n h = pyfits.ImageHDU(data=np.zeros((2*NY, 2*NX), dtype=np.float32))\n \n refh = h.header\n refh['CRPIX1'] = NX+1\n refh['CRPIX2'] = NY+1\n refh['CRVAL1'] = center_wave/1.e4\n refh['CD1_1'] = dlam/1.e4\n refh['CD1_2'] = 0.\n refh['CRVAL2'] = 0.\n refh['CD2_2'] = spatial_scale\n refh['CD2_1'] = 0.\n refh['RADESYS'] = ''\n \n refh['CTYPE1'] = 'RA---TAN-SIP'\n refh['CUNIT1'] = 'mas'\n refh['CTYPE2'] = 'DEC--TAN-SIP'\n refh['CUNIT2'] = 'mas'\n \n ref_wcs = pywcs.WCS(refh) \n ref_wcs.pscale = get_wcs_pscale(ref_wcs)\n \n return refh, ref_wcs", "def h_T(self, z0):\n # Get the governing variables\n (B, N, u_slip, u_inf) = self.get_variables(z0, 0.)\n \n # Compute U_N\n U_N = u_slip / (B * N)**(1./4.)\n \n # Compute the correlation equation\n return 2.9 * np.exp(-(U_N - 1.0)**2 / 28.09) * (B / N**3)**(1./4.)", "def H(self, z):\n prefactor = 15./np.pi**4.*self.Omega_gamma*(1.+z)**4.\n # Dark energy contribution\n Xde = self.X_DE(z)\n # Neutrino contribution\n yn = np.outer(self.M_nu/(const.kB*self.T_nu), 1./(1.+z))\n Fn = self.FermiDirac_integral(np.array(yn))\n nu_contribution = prefactor*self.Gamma_nu**4.*Fn\n # UR contribution\n Fu = self.FermiDirac_integral(0.)\n ur_contribution = prefactor*self.Gamma_nu_inst**4.*Fu*self.massless_nu\n # WDM contribution\n yw = np.outer(self.M_wdm/(const.kB*self.T_wdm), 1./(1.+z))\n Fw = self.FermiDirac_integral(np.array(yw))\n wdm_contribution = prefactor*np.expand_dims(self.Gamma_wdm**4.,1)*Fw\n # H(z)\n return self.H0*(self.Omega_cdm *(1+z)**3 +\n self.Omega_b *(1+z)**3 +\n self.Omega_gamma *(1+z)**4 + \n self.Omega_K *(1+z)**2 +\n self.Omega_lambda*Xde +\n ur_contribution +\n np.sum(wdm_contribution,axis=0) + \n np.sum(nu_contribution ,axis=0))**0.5", "def get_hershey():\n hershey_path = pkg_resources.resource_filename('pymicrofluidics', 'data/hershey.txt')\n hershey_table = {}\n first = True\n with open(hershey_path) as openfileobject:\n for tline in openfileobject:\n if re.search('Ascii',tline):\n if first == False:\n newline = hershey_table[asci]['coord'].split('-1,-1,')\n newline = [list(filter(None, x.split(','))) for x in newline if len(x)>0]\n hershey_table[asci]['coord'] = [np.array([[float(y[x]),float(y[x+1])] for x in range(0,len(y)-1,2)])/21 for y in newline]\n if len(hershey_table[asci]['coord'])>0:\n middle = 0.5*(np.max(np.concatenate(hershey_table[asci]['coord'])[:,0])+np.min(np.concatenate(hershey_table[asci]['coord'])[:,0]))\n #middle = float(middle)\n hershey_table[asci]['coord'] = [np.array([[x[0]-middle,x[1]] for x in y]) \n for y in hershey_table[asci]['coord']]\n hershey_table[asci]['width'] = np.max(np.concatenate(hershey_table[asci]['coord'])[:,0])-np.min(np.concatenate(hershey_table[asci]['coord'])[:,0])\n else:\n hershey_table[asci]['width'] = 0.5\n asci = int(re.findall('.*Ascii (\\d+).*',tline)[0])\n width = float(re.findall('\\d+,\\s*(\\d+),.*',tline)[0])\n hershey_table[asci] = {'coord': '', 'width': width}\n first = False\n else:\n newline = tline.rstrip('\\n')\n hershey_table[asci]['coord'] = hershey_table[asci]['coord']+newline\n return hershey_table", "def alpha2rho0(self, phi_E, Rs):\n return phi_E/(4*Rs**2*(1+np.log(1./2.)))", "def hexToCIE1931(self, h):\n rgb = self.color.hexToRGB(h)\n return self.rgbToCIE1931(rgb[0], rgb[1], rgb[2])", "def harzburgite():\n\n rho = 3200.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 226.5; C[0,1] = 75.34; C[0,2] = 74.73; C[0,3] = -0.27; C[0,4] = -2.00; C[0,5] = 1.85\n C[1,0] = C[0,1]; C[1,1] = 242.8; C[1,2] = 73.68; C[1,3] = -3.6; C[1,4] = -1.91; C[1,5] = 4.14\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 230.; C[2,3] = -4.36; C[2,4] = -4.27; C[2,5] = -0.27\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 80.75; C[3,4] = 1.81; C[3,5] = -2.19\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 76.94; C[4,5] = -1.88\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 79.15\n\n return C, rho", "def statee(h):\n # Convert height to SI\n hsi = h*0.3048\n\n # Get data\n zsi, tsi, psi, dsi = statsi(hsi)\n\n # Convert back to English\n z = zsi/0.3048\n t = tsi*1.8\n p = psi*0.02088543\n d = dsi*0.001940320\n\n return z, t, p, d", "def convert_kcalmol_H(en_kcalmol):\n return en_kcalmol*kcalmol_H", "def test_wet_psychrometric_rh():\n p = 1013.25 * units.mbar\n dry_bulb_temperature = 20. * units.degC\n wet_bulb_temperature = 18. * units.degC\n psychrometric_rh = relative_humidity_wet_psychrometric(p, dry_bulb_temperature,\n wet_bulb_temperature)\n assert_almost_equal(psychrometric_rh, 82.8747 * units.percent, 3)", "def h(self, X):\n if isinstance(X, int) or isinstance(X, float):\n if X < 1:\n x = max(0.001, X)\n a = np.log(x/2.)**2 - np.arccosh(1./x)**2\n elif X >= 1:\n a = np.log(X/2.)**2 + np.arccos(1./X)**2\n else:\n a=np.empty_like(X)\n X[X==0] = 0.001\n x = X[(X<1) & (X>0)]\n a[(X<1) & (X>0)] = np.log(x/2.)**2 - np.arccosh(1./x)**2\n x = X[X >= 1]\n a[X >= 1] = np.log(x/2.)**2 + np.arccos(1./x)**2\n return a", "def species_irreplaceability(x):\n x = x*100\n\n def h(x):\n # h(x) as specified\n miu = 39\n s = 9.5\n tmp = -(x - miu)/ s\n denominator = 1 + np.exp(tmp)\n return 1/denominator\n \n\n return (h(x) - h(0))/(h(100) - h(0))", "def ST_zero_mag(self):\n return 21.1", "def ST_zero_mag(self):\n return 21.1", "def computeX0(linsys_setup):\n datamaps, ninvs, beams, freqs, power_2d, precond_2d, clumaps, g_nu, \\\n map_prop = linsys_setup\n x = 0*datamaps[0].data\n \n # Monopole amplitude\n x = numpy.append(x, 0)\n \n # TSZ amplitudes\n for ic in range(len(clumaps[0])):\n x = numpy.append(x, 0.)\n \n # KSZ amplitudes\n if len(clumaps) == 2:\n for ic in range(len(clumaps[1])):\n x = numpy.append(x, 0.)\n return x", "def F_rxth(mx, rhoc, Temp):\n return 64. * m.pow(Temp/1.e+5,0.5) * m.pow(1.e+14/rhoc,0.5) * m.pow(100./mx,0.5) # cm", "def stockdon2006(H,L,B):\n \n # Make sure parameters are double\n H = np.double(H)\n L = np.double(L)\n B = np.double(B)\n \n # Compute incident swash (equation 11) \n incSwash = 1.1 / 2 * 0.75 * B * (H*L)**0.5\n \n # Infragravity swash (Equation 12)\n igSwash = 1.1 / 2 * 0.06 * (H*L)**0.5\n \n # Compute R2% (Equation 19)\n setup = 1.1 * 0.35 * B * ((H * L)**0.5)\n swash = 1.1 / 2.0 * (H*L * (0.563 * B**2 + 0.004))**0.5 \n r2 = setup + swash\n \n return r2,setup,incSwash,igSwash", "def noyes84_logRpHK(S, BmV):\n S = np.asarray(S)\n BmV = np.asarray(BmV)\n logC_cf = 1.13 * BmV**3 - 3.91 * BmV**2 + 2.84 * BmV - 0.47\n DlogC_cf = np.zeros_like(BmV) # for correction of \"nonphysical maximum\"\n x = (0.63 - BmV)\n sel = BmV < 0.63\n DlogC_cf[sel] = 0.135*x[sel] - 0.814*x[sel]**2 + 6.03*x[sel]**3\n logC_cf += DlogC_cf\n R_HK = 1.340E-4 * 10**logC_cf * S\n logR_phot = -4.898 + 1.918 * BmV**2 - 2.893 * BmV**3\n Rp_HK = R_HK - 10**logR_phot\n logRp_HK = np.log10(Rp_HK)\n return logRp_HK", "def octa_cox_data_to_ss(data):\n t = pandas.Series((\n data['TIME_StartTime'] -\n data['TIME_StartTime'].values[0]) / 1.0e6, name='t, sec')\n xh = pandas.DataFrame(\n data[[\n 'LPOS_X', 'LPOS_Y', 'LPOS_Z',\n 'LPOS_VX', 'LPOS_VY', 'LPOS_VZ',\n 'ATT_Roll', 'ATT_Pitch', 'ATT_Yaw',\n 'ATT_RollRate', 'ATT_PitchRate', 'ATT_YawRate']].values,\n columns=[\n 'X', 'Y', 'Z', 'V_X', 'V_Y', 'V_Z',\n 'Phi', 'Theta', 'Psi',\n 'P', 'Q', 'R'], index=t)\n y = pandas.DataFrame(\n data[[\n 'GPS_Lat', 'GPS_Lon', 'GPS_Alt',\n 'SENS_BaroAlt',\n 'IMU1_AccX', 'IMU1_AccY', 'IMU1_AccZ',\n 'IMU1_GyroX', 'IMU1_GyroY', 'IMU1_GyroZ',\n 'IMU1_MagX', 'IMU1_MagY', 'IMU1_MagZ']].values,\n columns=[\n 'GPS_Lat', 'GPS_Lon', 'GPS_Alt',\n 'Baro_Alt',\n 'Acc_X', 'Acc_Y', 'Acc_Z',\n 'Gyro_X', 'Gyro_Y', 'Gyro_Z',\n 'Mag_X', 'Mag_Y', 'Mag_Z'], index=t)\n u_raw = pandas.DataFrame(\n ((data[[\n 'OUT0_Out0', 'OUT0_Out1', 'OUT0_Out2',\n 'OUT0_Out3', 'OUT0_Out4', 'OUT0_Out5', 'OUT0_Out6',\n 'OUT0_Out7']] - 1000.0) / 1000.0).values,\n columns=['1', '2', '3', '4', '5', '6', '7', '8'], index=t)\n c_mix_octo = np.array([\n [1, 1, 1, 1, 1, 1, 1, 1], # thrust\n [-1, 1, 1, -1, -1, 1, 1, -1], # roll\n [-1, -1, 1, 1, -1, -1, 1, 1], # pitch\n [1, -1, 1, -1, 1, -1, 1, -1], # yaw\n ]) / 8.0\n u = pandas.DataFrame(\n c_mix_octo.dot(u_raw.T).T,\n columns=['thrust', 'roll', 'pitch', 'yaw'],\n index=t)\n return t, xh, u, y, u_raw", "def set_H0(self):\n self.slot.H0 = self.lf_H0.value()\n self.w_out.comp_output()\n # Notify the machine GUI that the machine has changed\n self.saveNeeded.emit()", "def prada(self):\n scale_factor = 1.0 / (1.0 + self.snapshot.header.redshift)\n r200c_physical = self.r200c * scale_factor / 1000.0 # units Mpc\n\n v200 = (\n (self.snapshot.const.G * self.m200c)\n / r200c_physical\n * self.snapshot.const.Mpc ** 2\n / 1000.0 ** 2\n ) ** 0.5 # units km/s\n\n def y(x, vmax, v200):\n func = np.log(1 + x) - (x / (1 + x))\n return ((0.216 * x) / func) ** 0.5 - (vmax / v200)\n\n concentration = np.zeros((len(self.vmax)))\n for halo in range(self.N_halos):\n if v200[halo] > self.vmax[halo]:\n concentration[halo] = -9999.0\n else:\n try:\n concentration[halo] = newton(\n y, x0=5.0, args=(self.vmax[halo], v200[halo])\n )\n except:\n concentration[halo] = -9999.0\n\n return concentration", "def __init__(self):\n # Set constants\n self.fromHztoeV = 6.58e-16\n self.gramstoeV = 1 / ( 1.78 * 1e-33)\n self.mtoev = 1/(1.97 * 1e-7) \n self.H0 = cosmo.H(0).value * 1e3 / (1e3 * const.kpc.value) #expressed in 1/s\n self.rhocritical = cosmo.critical_density(0).value * self.gramstoeV /(1e-2)**3 # eV/m**3\n self.Om0 = cosmo.Om0 #total matter \n self.OLambda0 = cosmo.Ode0 # cosmological constant\n self.DM0 = self.Om0 - cosmo.Ob0 # dark matter\n self.evtonJoule = 1.60218 * 1e-10 # from eV to nJ\n self.evtoJoule = 1.60218 * 1e-19 # from eV to J\n PSgal1h = np.loadtxt(\"/Users/andreacaputo/Desktop/Phd/AxionDecayCrossCorr/Codes/NIRB_PS/PS_GALl_1h.dat\")\n PSgal2h = np.loadtxt(\"/Users/andreacaputo/Desktop/Phd/AxionDecayCrossCorr/Codes/NIRB_PS/PS_GALl_2h.dat\")\n self.Mpc = 1e3 * const.kpc.value\n self.zmin = 0.001\n self.zmax = 30.001\n self.zbins = 301\n self.h = cosmo.h\n self.z_vect = np.linspace(self.zmin, self.zmax, self.zbins)\n self.k_vect = PSgal1h[:,0]* self.h\n self.Power1h = PSgal1h[:,1:]/(self.h**3)\n self.Power2h = PSgal2h[:,1:]/(self.h**3)\n self.Power = self.Power1h + self.Power2h\n self.Praw_prova1h = interp2d(self.k_vect, self.z_vect, np.transpose(self.Power1h))\n self.Praw_prova2h = interp2d(self.k_vect, self.z_vect, np.transpose(self.Power2h))\n self.Praw_prova = interp2d(self.k_vect, self.z_vect, np.transpose(self.Power))", "def __init__(self, w0=6):\n self.w0 = w0\n if w0 == 6:\n # value of C_d from TC98\n self.C_d = 0.776", "def sound_horizon_Class(self):\n if 'classy' not in sys.modules:\n warnings.warn(\"Class not installed, using a custom function to compute sound horizon (not precise)\")\n return self.r_s_drag()\n else:\n params = {\n 'A_s': self.As,\n 'n_s': self.ns, \n 'h': self.h,\n 'omega_b': self.Omega_b*self.h**2.,\n 'omega_cdm': self.Omega_cdm*self.h**2.,\n 'Omega_k': self.Omega_K,\n 'Omega_fld': self.Omega_lambda,\n 'w0_fld': self.w0,\n 'wa_fld': self.wa,\n 'N_ur': self.massless_nu,\n 'N_ncdm': self.massive_nu}\n if self.massive_nu != 0:\n params['m_ncdm'] = ''\n params['T_ncdm'] = ''\n for im, m in enumerate(self.M_nu):\n params['m_ncdm'] += '%.8f, ' %(m)\n params['T_ncdm'] += '%.8f, ' %(self.Gamma_nu)\n params['m_ncdm'] = params['m_ncdm'][:-2]\n params['T_ncdm'] = params['T_ncdm'][:-2]\n\n cosmo = Class()\n cosmo.set(params)\n cosmo.compute()\n\n rs = cosmo.rs_drag()*cosmo.h()\n\n cosmo.struct_cleanup()\n cosmo.empty()\n\n return rs", "def get_x0(self, x0):\n pass", "def x0(self):\n return self._x0", "def x0(self):\n return self._x0", "def get_maximum_heating_output(region: int, q_rtd_h: float) -> np.ndarray:\n\n # outdoor temperature, degree C, (8760 times)\n theta_ex = read_conditions.read_temperature(region)\n\n # absolute humidity, kg/kgDA, (8760 times)\n x_ex = read_conditions.read_absolute_humidity(region)\n\n # relative humidity, %, (8760 times)\n h_ex = read_conditions.get_relative_humidity(theta_ex, x_ex)\n\n # coefficient for defrosting, (8760 times)\n c_df_h = np.where((theta_ex < 5.0) & (h_ex >= 80.0), 0.77, 1.0)\n\n alpha_max_h = 1.0\n\n return q_rtd_h * c_df_h * 3600 * 10**(-6) * alpha_max_h", "def get_archeological_sfh(self, at_birth=True, zero=True):\n from .histograms import cumulative_histogram\n\n xprop = 'form.scalefactor'\n\n aform = self.star_particle_prop('form.scalefactor')\n tform = self.star_particle_prop('form.time')\n if at_birth:\n mass = self.star_particle_prop('form.mass')\n else:\n mass = self.star_particle_prop('mass')\n\n sfh, scale = cumulative_histogram(aform, weights=mass, zero=True)\n cosmic_time = np.unique(tform)\n if zero:\n cosmic_time = np.concatenate(([cosmic_time[0]-1e-10], cosmic_time))\n\n redshift = (1./scale) - 1\n lookback_time = self.snapshot['time'] - cosmic_time\n\n normalized_sfh = sfh / sfh[-1]\n\n res = {'scalefactor': scale, 'scale': scale,\n 'cosmic.time': cosmic_time, 'time': cosmic_time,\n 'lookback.time': lookback_time, 'time.lookback': lookback_time,\n 'cumulative.sfh': sfh, 'normalized.cumulative.sfh': normalized_sfh}\n\n if self.sfh is None:\n self.sfh = res\n\n return res", "def relative_to_absolute_hum(rel_h, temp):\n A = 8.07131\n B = 1730.63\n C = 233.426\n Ph20_star = 10 ** (A - B / (C + temp))\n P = rel_h / 100. * Ph20_star\n return P.reshape(-1,1)", "def h_spec(k, He, h):\r\n return np.array(((k**3/np.pi**2) * h))", "def zero(self):\n q = pinocchio.neutral(self.model)\n v = np.zeros(self.model.nv)\n return np.concatenate([q.flat, v])", "def H(self):\n if self._H is None:\n fp = numpy.arange(self.H_min, self.H_max, 0.1)\n xp = H_cfd(fp)\n xp = xp/xp[-1]\n x = self.distributions.rnd_gen.uniform(xp[0], xp[-1], self.distributions.size)\n self._H = numpy.interp(x, xp, fp)\n return self._H", "def aluminum_hexathiohypodiphosphate():\n\n positions = [[0.000000, 0.000000, 0.000000],\n [0.500000, 0.000000, 0.500000],\n [0.000000, 0.500000, 0.000000],\n [0.000000, 0.000000, 0.500000],\n [0.197847, 0.276435, 0.101916],\n [0.197847, 0.723565, 0.898084],\n [0.802153, 0.276435, 0.898084],\n [0.802153, 0.723565, 0.101916],\n [0.776404, 0.800507, 0.601208],\n [0.776404, 0.199493, 0.398792],\n [0.223596, 0.800507, 0.398792],\n [0.223596, 0.199493, 0.601208]]\n\n species = ['Al','Al','P','P','S','S','S','S','S','S','S','S']\n\n bravais = 'orthorhombic'\n\n space_group = 16\n lattice_parameters = {'a': Set(5.71230345, 'angstrom'),\n 'b': Set(5.71644625, 'angstrom'),\n 'c': Set(11.46678755,'angstrom')}\n data = {'fractional': positions,\n 'species': species,\n 'lattice_parameters': lattice_parameters,\n 'space_group': ('', space_group),\n 'n_atoms': len(species)}\n\n return data", "def H3H2_He4n(T):\n T9 = constants.to_norm_tempreture(T, units=\"T9\")\n ro_b = univ_func.rat_scale(T)\n base_rate = 8.29 * 10**10 * T9**(-2./3) * math.exp(-4.524*T9**(-1./3) - (T9/0.08)**2) * (\n + 1.0\n + 17.2 * T9 \n + 175 * T9**2\n ) + 8.12 * 10**8 * T9**(-0.712) * math.exp(-0.506/T9)\n\n return base_rate * ro_b * (1./(constants.less_time(1)))", "def r(self) -> float:\n return self._ohms.real", "def calibH(self):\n # in case of errors\n self.flushInput()\n if (self.model == 'GDS'):\n # GDS includes the sampling rate data with the waveform\n # data. hstep obtained later.\n self.write(':TIM:DEL?\\n')\n # minus sign necessary to make hoff on two scopes congruous\n hoff = -float(self.readline())\n elif (self.model == 'TDS'):\n self.write('WFMPre:XZEro?\\n')\n hoff = float(self.readline())\n self.write('WFMPre:XINcr?\\n')\n hstep = float(self.readline())\n # in case of errors\n self.flushInput()\n return (hstep, hoff)", "def M200(self, Rs, rho0, c):\n return 4*np.pi*rho0*Rs**3*(np.log(1+c)-c/(1+c))", "def odh_class(self):\n if self.phi < 1e-7/ureg.hr:\n return 0\n elif self.phi < 1e-5/ureg.hr:\n return 1\n elif self.phi < 1e-3/ureg.hr:\n return 2\n else:\n # TODO add a custom exception for ODH > 2\n print('ODH fatality rate is too high. Please, check calculations')\n return None", "def get_coml_s(hyplo):\r\n\tres=\"\"\r\n\tfor x in hyplo:\r\n\t\tif x==\"1\":\r\n\t\t\tres+=\"0\"\r\n\t\telse:\r\n\t\t \tres+=\"1\"\r\n\treturn res", "def H(self, z):\n return self.H0 * self.E(z)", "def hz2mel(hz):\r\n return 2595 * np.log10(1+hz/700.0)", "def hz2mel(hz):\n return 1127 * np.log(1 + hz / 700)", "def FeH(self):\n\n #return math.log10(self.glb[user_params_index[\"Zs\"]]*constants.solar_x/(self.glb[user_params_index[\"Xs\"]]*constants.solar_z))/constants.A_FeH\n return math.log10(self.glb[iz0]*constants.solar_x/(self.glb[ix0]*constants.solar_z))/constants.A_FeH", "def init_zero(cls, h):\n shapes = QuantizedCheckpoint.make_shaped_arrays(h)\n return jax.tree_util.tree_map(lambda s: np.zeros(s.shape, s.dtype), shapes)", "def mhm_to_fsl(self, m_hm):\n\n rhoc = self.rhoc * self.cosmo.h ** 2\n\n l_hm = 2 * (3 * m_hm / (4 * numpy.pi * rhoc)) ** (1. / 3)\n\n l_fs = l_hm / 13.93\n\n return l_fs", "def get_specific_heat() -> float:\n return 1006.0" ]
[ "0.6274564", "0.6107101", "0.60727805", "0.58837694", "0.5814193", "0.5814193", "0.56165946", "0.55797195", "0.5553944", "0.5534817", "0.553009", "0.55183256", "0.5515757", "0.5511963", "0.5507393", "0.54924595", "0.5484746", "0.5456038", "0.5441649", "0.54290783", "0.5415065", "0.5408925", "0.5395768", "0.5391213", "0.53902936", "0.5379662", "0.53723", "0.53493166", "0.5347208", "0.5334433", "0.5330138", "0.5298103", "0.52967155", "0.52933955", "0.5289566", "0.5289566", "0.5267455", "0.52482593", "0.5246108", "0.5241217", "0.5229748", "0.5211816", "0.5210424", "0.52016294", "0.51989716", "0.5197866", "0.518719", "0.5182321", "0.51724154", "0.5171863", "0.51694566", "0.51627594", "0.5150963", "0.5149169", "0.51490194", "0.51438737", "0.5140061", "0.51355696", "0.51278245", "0.5118449", "0.5110932", "0.51042116", "0.5098224", "0.50896215", "0.508194", "0.507988", "0.507988", "0.5076197", "0.50711673", "0.50701654", "0.50680083", "0.50662494", "0.50539", "0.50428987", "0.5039524", "0.5033672", "0.5023641", "0.50138044", "0.50112873", "0.50112873", "0.5010137", "0.50027084", "0.5002615", "0.4999154", "0.49755123", "0.49724358", "0.49651778", "0.49547166", "0.49494612", "0.49489093", "0.4945062", "0.49425587", "0.49423102", "0.4941024", "0.4936178", "0.49343306", "0.49229804", "0.49228188", "0.49201488", "0.49167377" ]
0.70496
0
Returns the supported components e.g. set(['mmic_autodock_vina',...]). Returns Set[str]
def tactic_comps(cls) -> Set[str]: return set(["mmic_autodock_vina"])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_supported_components(self):\n props = [cdav.SupportedCalendarComponentSet()]\n response = self.get_properties(props, parse_response_xml=False)\n response_list = response.find_objects_and_props()\n prop = response_list[unquote(self.url.path)][\n cdav.SupportedCalendarComponentSet().tag\n ]\n return [supported.get(\"name\") for supported in prop]", "def get_supported_sets(self):\n return _SUPPORTED_SETS", "def supported_modes(self) -> Set[str]:\n raise NotImplementedError", "def test_get_all_components(self):\n self._ucr({\n 'repository/online/component/a': 'yes',\n 'repository/online/component/b': 'no',\n })\n c = self.u.get_all_components()\n self.assertEqual(c, set(('a', 'b')))", "def supported_constructs(self) -> Set[Construct]:\n config: Dict[str, bool] = self.options.get(\"constructs\", {})\n result = set()\n for construct, supported in config.items():\n if supported:\n result.add(Construct[construct.upper()])\n return result", "def vendor_list():\n return ['nxos', 'eos', 'cumulus']", "def list_supported_models() -> Sequence[str]:\r\n return list(_MODELS)", "def get_supported_models(self):\n # type: () -> list\n return [model for model in self.__MODELS]", "def list_uses(self):\n return list(set(self._prop_typology['USE'].values))", "def get_supported_browsers_suggestions():\n supported_browsers = [\n 'chrome',\n 'chrome-remote',\n 'chrome-headless',\n 'chrome-remote-headless',\n 'firefox',\n 'firefox-remote',\n 'ie',\n 'ie-remote'\n ]\n return supported_browsers", "def _available_algorithms(**_: str) -> Set[str]:\n avail = set()\n pass2 = set()\n for algo in hashlib.algorithms_available:\n lalgo = algo.lower()\n if \"with\" in lalgo:\n continue # skip apparently redundant ones\n if lalgo != algo:\n pass2.add(algo)\n else:\n avail.add(lalgo)\n for algo in pass2:\n if algo.lower() not in avail:\n avail.add(algo)\n return avail", "def required_components(cls) -> List[Type[Component]]:\n\n return []", "def required_components(cls) -> List[Type[Component]]:\n\n return []", "def detect_supported_caps():\n result = []\n # generate list of supported capabilities\n\n # Intel RDT L3 CAT\n if common.PQOS_API.is_l3_cat_supported():\n result.append(common.CAT_L3_CAP)\n\n # Intel RDT L2 CAT\n if common.PQOS_API.is_l2_cat_supported():\n result.append(common.CAT_L2_CAP)\n\n # Intel RDT MBA\n if common.PQOS_API.is_mba_supported():\n result.append(common.MBA_CAP)\n\n if sstbf.is_sstbf_enabled():\n result.append(common.SSTBF_CAP)\n\n if power.is_sstcp_enabled():\n result.append(common.POWER_CAP)\n\n return result", "def _allowed_components():\n pass", "def chipset_driver_modules(self):\n\t\treturn self.__info_dict['info']['chipset_driver_modules']['value']", "def get_available_entities_models():\n return ['concat', 'bahdanau', 'luong']", "def get_supported_feature_sets(flags) -> List[str]:\n\n # find all supported feature sets\n supported = []\n for one_feature_set in sorted(REQUIRED_FEATURES.keys()):\n if supports_feature_set(flags, one_feature_set):\n supported.append(one_feature_set)\n return supported", "def get_platforms(self):\n if self.platform == 'All':\n return PLATFORMS\n else:\n return self.platform.split(':')", "def getChemCompSysNames(self):\n dataDict = self.__dict__\n result = frozenset(y for x in self.chemComp.namingSystems for y in x.chemCompSysNames if not y.specificChemCompVars).union(self.specificSysNames)\n return result", "def test__get_component_version_short(self):\n self._ucr({'repository/online/component/a/version': '%d.%d' % (MAJOR, MINOR)})\n ver = self.u._get_component_versions('a', None, None)\n self.assertEqual(set((U.UCS_Version((MAJOR, MINOR, 0)),)), ver)", "def get_graded_components(self):\r\n return self.components.keys()", "def manufacturers(self):\n return self._manufacturers", "def class_exts(cls):\n return set()", "def get_platform_combinations():\n mapped_osname = platform_map(g_osname)\n mapped_osarch = g_osarch\n ret = [mapped_osname]\n while True:\n ret += [mapped_osarch, mapped_osname + \"-\" + mapped_osarch]\n mapped_osarch = platform_map_iterate(mapped_osarch)\n if not mapped_osarch:\n break\n return sorted(ret, reverse=True) + [\"default\"]", "def get_supported_games(self):\n sg = []\n for game in c.supported_games.keys():\n sg.append(c.supported_games[game].game_name)\n return sg", "def used_features(self) -> List[str]:\n mapped = map_pipeline_names(self.input_features, self.output_features)\n return list(set(mapped))", "def get_component_name_list(self):\n return self._component_name_list", "def preset_modes(self) -> List[str]:\n return self._support_presets", "def list_components(self) -> Dict[str, Any]:\n return self._manager.list_components()", "def get_prior_technologies(self, allowed_packs) -> Set[Technology]:\n technologies = set()\n for ingredient in self.ingredients:\n if ingredient in allowed_packs:\n technologies |= required_technologies[ingredient] # technologies that unlock the recipes\n return technologies", "def _identify_combos(model, combo_tags):\n \n # Identify which load combinations to evaluate\n if combo_tags is None:\n combo_list = model.LoadCombos.values()\n else:\n combo_list = []\n for combo in model.LoadCombos.values():\n if any(tag in combo.combo_tags for tag in combo_tags):\n combo_list.append(combo)\n \n return combo_list", "def test_get_components_MIRRORED(self):\n self._ucr({\n 'repository/online/component/a': 'yes',\n 'repository/online/component/b': 'no',\n 'repository/online/component/c': 'yes',\n 'repository/online/component/c/localmirror': 'yes',\n 'repository/online/component/d': 'yes',\n 'repository/online/component/d/localmirror': 'no',\n 'repository/online/component/e': 'no',\n 'repository/online/component/e/localmirror': 'yes',\n 'repository/online/component/f': 'no',\n 'repository/online/component/f/localmirror': 'no',\n })\n c = self.u.get_components(only_localmirror_enabled=True)\n self.assertEqual(c, set(('a', 'c', 'e')))", "def part_types(self):\n return set(self.parts_by_type)", "def get_applicable_components(device, components, component_bitmap_bit_length):\n applicable_components_list = device[\"ApplicableComponents\"]\n applicable_components = bitarray(\n component_bitmap_bit_length, endian=\"little\"\n )\n applicable_components.setall(0)\n for component_index in applicable_components_list:\n if 0 <= component_index < len(components):\n applicable_components[component_index] = 1\n else:\n sys.exit(\"ERROR: Applicable Component index not found.\")\n return applicable_components", "def components(self):\r\n return list(self._components)", "def test_get_software_set(self):\n pass", "def test__get_component_version_full(self):\n self._ucr({'repository/online/component/a/version': '%d.%d-%d' % (MAJOR, MINOR, PATCH)})\n ver = self.u._get_component_versions('a', None, None)\n self.assertEqual(set((U.UCS_Version((MAJOR, MINOR, PATCH)),)), ver)", "def get_components_list(self):\n\n components_list = self.driver.find_elements(*BasePageLocators.LIST_COMPONENS)\n return components_list", "def supported_color_modes(self) -> set[str] | None:\n color_modes = [COLOR_MODE_ONOFF]\n if self.dp_code_bright in self.tuya_device.status:\n color_modes.append(COLOR_MODE_BRIGHTNESS)\n\n if self.dp_code_temp in self.tuya_device.status:\n color_modes.append(COLOR_MODE_COLOR_TEMP)\n\n if (\n self.dp_code_colour in self.tuya_device.status\n and len(self.tuya_device.status[self.dp_code_colour]) > 0\n ):\n color_modes.append(COLOR_MODE_HS)\n return set(color_modes)", "def open_requirements(degree, major):\n return set()", "def get_supported_property_keys():\n return impl.get_supported_property_keys(**locals())", "def get_available_plugin_names():\n mgr = stevedore.EnabledExtensionManager(namespace=PLUGIN_NAMESPACE,\n check_func=_auth_plugin_available,\n invoke_on_load=True,\n propagate_map_exceptions=True)\n return frozenset(mgr.names())", "def _get_supported_plugins(self, unfiltered_plugins):\r\n plugins = []\r\n\r\n for plugin in unfiltered_plugins:\r\n if self._os_supported(plugin):\r\n plugins.append(plugin)\r\n\r\n return plugins", "def __components__():\n # Get the component registry of the active application.\n registry = context.app.component_registry\n # A shortcut: return cached components.\n if registry.components is not None:\n return registry.components\n # A list of `Component` subclasses defined in modules exported by addons.\n components = [Component]\n idx = 0\n while idx < len(components):\n for subclass in components[idx].__subclasses__():\n # Skip realizations.\n if issubclass(subclass, Realization):\n continue\n # Check if the component belongs to the current application.\n if subclass.__enabled__():\n components.append(subclass)\n idx += 1\n # Cache and return the components.\n registry.components = components\n return components", "def _GetKnownTags(self) -> Set[str]:\n raise NotImplementedError()", "def support_opset(self) -> Collection[OpsetVersion]:\n return list(self._functions)", "def allowed_mods():\n mods = [Mod.EZ, Mod.HD, Mod.HR, Mod.DT, Mod.HT, Mod.FL]\n mod_powerset = chain.from_iterable(combinations(mods, r) for r in range(len(mods) + 1))\n combos = []\n for p in mod_powerset:\n combined_mod = Mod(0)\n for m in p:\n combined_mod |= m\n combos.append(combined_mod)\n allowed = tuple(c for c in combos if valid_mod(c))\n return allowed", "def interpret_point_requirements(requirements):\n requirements_for_major_set = set()\n for string in sorted(requirements):\n requirement_object = interpret_requirement(string)\n requirements_for_major_set.add(requirement_object)\n return requirements_for_major_set", "def components(self):\n return self.__components", "def components(self):\n return self.__components", "def enabled_services(self):\n services = set()\n if self.ce_collector_required_rpms_installed and self.htcondor_gateway_enabled:\n services.add('condor-ce')\n return services", "def components(self):\n # The '_components' attribute is defined according to the\n # subclass of Dyadic the instance belongs to.\n return self._components", "def get_labels(self) -> Set[str]:", "def list_components(self) -> Dict[str, Any]:\n return {c.name: c for c in self._components}", "def test_get_software_set_expanded(self):\n pass", "def available_unary_choices() -> Iterable[str]:\n for name, _ in inspect.getmembers(sys.modules[__name__], inspect.isclass):\n if name.startswith('Unary'):\n yield name", "def get_mdf_parsers() -> Set[str]:\n return set([name for name, info in get_available_adapters().items()\n if info['class'].startswith('mdf_matio')])", "def get_platforms(one_class):\n platforms = []\n\n platform = one_class.split(' ')[-1]\n if platform == 'win':\n platforms.append('Windows')\n if platform == 'mac':\n platforms.append('Mac os')\n if platform == 'linux':\n platforms.append('Linux')\n if platform == 'vr_supported':\n platforms.append('VR Supported')\n\n return platforms", "def test_get_all_components():\n\n components = ['api.component',\n 'api.router.component',\n 'configuration.component',\n 'database.component',\n 'database.migration.component',\n 'globalization.locale.component',\n 'globalization.datetime.component',\n 'logging.component',\n 'converters.deserializer.component',\n 'security.component',\n 'security.authentication.component',\n 'security.authorization.component',\n 'security.encryption.component',\n 'security.hashing.component',\n 'security.permission.component',\n 'security.session.component',\n 'security.token.component',\n 'packaging.component',\n 'caching.component']\n\n assert all(application_services.get_component(component) is not None\n for component in components)", "def get_names(self):\n selected_masks = self._component_obj.get_support()\n return [feature_name for (selected, feature_name) in zip(selected_masks, self.input_feature_names) if selected]", "def available_binary_choices() -> Iterable[str]:\n for name, _ in inspect.getmembers(sys.modules[__name__], inspect.isclass):\n if name.startswith('Binary'):\n yield name", "def get_verbs(self) -> Set[str]:", "def get_components(self, norm=False):\n return self._var_names", "def irreducible_components( self ):\n return self._info['irreducible_components']", "def supported_operation_modes(\n self,\n ) -> list[HVACModeT]:", "def list_compute_packages(self):\n return set(self.compute_packages.keys())", "def Platforms():\n return platforms", "def get_install_requires() -> List[str]:\n return [\n \n ]", "def supported_optimization_types(self) -> Sequence[str]:\n return pulumi.get(self, \"supported_optimization_types\")", "def supported_features(self):\n return self._supported_features", "def components(self):\n return self._components", "def components(self):\n return self._components", "def combo_suites(self):\n return [c for c in self.suites.all() if TestSuite.TS_COMBO == c.xtype]", "def combo_suites(self):\n return [c for c in self.suites.all() if TestSuite.TS_COMBO == c.xtype]", "def _check_emotion_set_is_supported(self):\n supported_emotion_subsets = [\n set(['calm', 'anger', 'happiness', 'surprise', 'disgust', 'fear', 'sadness']),\n set(['anger', 'fear', 'surprise', 'calm']),\n set(['happiness', 'disgust', 'surprise']),\n set(['anger', 'fear', 'surprise']),\n set(['anger', 'fear', 'calm']),\n set(['anger', 'happiness', 'calm']),\n set(['anger', 'fear', 'disgust']),\n set(['calm', 'disgust', 'surprise']),\n set(['sadness', 'disgust', 'surprise']),\n set(['anger', 'happiness'])\n ]\n if not set(self.target_emotions) in supported_emotion_subsets:\n error_string = 'Target emotions must be a supported subset. '\n error_string += 'Choose from one of the following emotion subset: \\n'\n possible_subset_string = ''\n for emotion_set in supported_emotion_subsets:\n possible_subset_string += ', '.join(emotion_set)\n possible_subset_string += '\\n'\n error_string += possible_subset_string\n raise ValueError(error_string)", "def components(self):\n return self._components", "def known_organisms():\n return [\"rat\"]", "def test_get_component_descriptors_by_types_using_get(self):\n pass", "def get_charm_names(self):\n\n charms = {}\n\n # Check if the VDUs in this VNF have a charm\n for config in self.get_config():\n juju = config['juju']\n\n name = juju['charm']\n if name not in charms:\n charms[name] = 1\n\n return charms.keys()", "def clsnames_affecting_onsets(self):\n # type: () -> Set[str]\n output = set()\n output.update(self.NONGRACE_NOTEHEAD_CLSNAMES)\n output.update(self.REST_CLSNAMES)\n output.update(self.MEASURE_SEPARATOR_CLSNAMES)\n output.update(self.TIME_SIGNATURES)\n output.add('repeat_measure')\n return output", "def supported_models(cls):\n \n models = []\n \n for subclass in cls.__subclasses__():\n models+=subclass.supported_models()\n return models", "def _gather_components(self):\n comps = set()\n for data in self._collection:\n for c in data.components:\n if c in comps:\n continue\n label = \"%s (%s)\" % (c, data.label)\n label = disambiguate(label, self._labels)\n self._labels[label] = c\n comps.add(c)", "def get_required_module_descriptors(self):\r\n return []", "def machine_specs(self) -> Optional[Sequence['outputs.AiEndpointDeployedModelDedicatedResourceMachineSpec']]:\n return pulumi.get(self, \"machine_specs\")", "def supported_vendor_interfaces(self):\n return [\n fake.FakeVendorB, fake.FakeVendorA\n ] + super().supported_vendor_interfaces", "def _get_components_list():\n # Order the services to install by service installation order\n ordered_services = sorted(\n config[SERVICES_TO_INSTALL],\n key=SERVICE_INSTALLATION_ORDER.index\n )\n # Can't easily use list comprehension here because this is a list of lists\n ordered_components = []\n for service in ordered_services:\n ordered_components.extend(SERVICE_COMPONENTS[service])\n return ordered_components", "def connex_components(self):\n unchecked = set(self.v.values())\n groups = []\n while len(unchecked):\n vcon = self.member_family(unchecked.pop())\n unchecked -= set(vcon)\n groups.append(set(vcon))\n return groups", "def get_required_extensions(self):\n return []", "def components(self) -> Iterable[Mapping[T, Set[T]]]:", "def __catalogue__(interface):\n names = []\n seen = set()\n for component in interface.__implementations__():\n for name in component.__names__:\n if name not in seen:\n names.append(name)\n seen.add(name)\n names.sort(key=(lambda n: str(n)))\n return names", "def get_components(self, req):\n request_name = req.request\n\n names = []\n if(request_name == \"\"):\n comps = self.rt_proxy.get_available_components() # get all\n else:\n comps = self.rt_proxy.get_available_components(request_name)\n\n for c in comps:\n names.append(str(c))\n\n resp = ListComponentsResponse(names)\n\n return resp", "async def test_api_get_components(\n hass: HomeAssistant, mock_api_client: TestClient\n) -> None:\n resp = await mock_api_client.get(const.URL_API_COMPONENTS)\n result = await resp.json()\n assert set(result) == hass.config.components", "def modifiers(m) -> Set[str]:\n return set(m[\"modifier_list\"])", "def lv_devices(self):\n devs = set()\n return devs", "def commonSetElementPredicate(field_set: Sequence[Any]) -> FrozenSet[str]:\n\n return frozenset(str(item) for item in field_set)", "def available_services(self) -> list[str]:\r\n return self.services", "def get_specification_kinds(specifications):\n specifications.setdefault(\"manual event models\", {\"tags\": [\"manual event models\"]})\n return [\"manual event models\"]", "def hvac_modes(self) -> List[str]:\n return self._support_modes", "def selected_components(self):\n return self._selected_components" ]
[ "0.7520092", "0.6942378", "0.6502353", "0.64853776", "0.64341474", "0.6051912", "0.603774", "0.58496153", "0.58443105", "0.58355975", "0.5832998", "0.5783114", "0.5783114", "0.577528", "0.57419336", "0.57056123", "0.56868654", "0.5655444", "0.5626463", "0.5625516", "0.56065476", "0.558589", "0.5582539", "0.5577305", "0.5568958", "0.556739", "0.5559511", "0.55418396", "0.55068254", "0.5482981", "0.547313", "0.5471368", "0.546757", "0.54658467", "0.546399", "0.54590636", "0.5458134", "0.5441381", "0.54261833", "0.5424694", "0.54235744", "0.54235125", "0.5415994", "0.54154927", "0.5407892", "0.5340728", "0.5339937", "0.5326858", "0.53266203", "0.53185844", "0.53185844", "0.52990735", "0.52804077", "0.5275381", "0.5274763", "0.5260384", "0.5257345", "0.5256568", "0.52553535", "0.52420735", "0.52412486", "0.522747", "0.5223963", "0.5223473", "0.5218496", "0.5216542", "0.5213312", "0.52042276", "0.520317", "0.5195343", "0.5183875", "0.5183266", "0.5183266", "0.5174846", "0.5174846", "0.5170375", "0.51693827", "0.51619077", "0.51610136", "0.5150074", "0.51480854", "0.51360005", "0.51327324", "0.51215774", "0.51100236", "0.5108459", "0.5103755", "0.5100039", "0.50967914", "0.50964046", "0.50947565", "0.50881946", "0.50873756", "0.5081953", "0.5079554", "0.50795484", "0.50791156", "0.50790167", "0.50783914", "0.5076588" ]
0.7170247
1
Define test variables and initialize app.
def setUp(self): self.app = create_app() self.client = self.app.test_client self.database_name = os.environ.get( "TEST_DATABASE_NAME", "abc123abc1234" ) self.database_path = "postgres://postgres:postgres@{}/{}".format( "localhost:5432", self.database_name ) setup_db(self.app, self.database_path) # drop db, create and populate with test data setup_db_for_test() self.casting_assistant_auth_header = { "Authorization": "Bearer " + CASTING_ASSISTANT_TOKEN } self.casting_director_auth_header = { "Authorization": "Bearer " + CASTING_DIRECTOR_TOKEN } self.executive_producer_auth_header = { "Authorization": "Bearer " + EXECUTIVE_PRODUCER_TOKEN } self.create_actor_success = { "name": "Chris Hemsworth", "age": 37, "gender": "Male", } self.create_actor_fail = { "name": "Chris Evans", "age": 39, } self.create_movie_success = { "title": "Captain America: Civil War", "release_date": "12/04/2016", "actors_ids": [1, 2, 3], } self.create_movie_fail_1 = { "title": "Avenger: Infinity War", } self.create_movie_fail_2 = { "title": "Avenger: Infinity War", "release_date": "27/04/2018", "actors_ids": [], } self.create_movie_fail_3 = { "title": "Avenger: Infinity War", "release_date": "27/04/2018", "actors_ids": [100], } # binds the app to the current context with self.app.app_context(): self.db = SQLAlchemy() self.db.init_app(self.app) # create all tables self.db.create_all()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setUp(self):\n self.app = init_api()", "def setUp(self):\r\n self.app = app.test_client()\r\n self.app.testing = True", "def setUp(self):\n self.app = app.test_client()\n self.app.testing = True", "def setUp(self) -> None:\n self.app = app.app.test_client()\n self.app.testing = True", "def setUp(self) -> None:\n self._app = WebTestApp(application)", "def setUp(self):\n #app['TESTING'] = True\n self.test_app = app.test_client()", "def setUp(self):\n self.api = \"http://localhost:4031/\"\n self.version = \"0.2\"\n self.app = init_api()", "def setUp(self):\n set_environment_vars()\n\n # Careful! We must set the environment variables to test values\n # _before_ importing the espresso app, otherwise, those env vars will\n # still have their normal values when instantiating the app.\n from espresso import app\n from espresso import db\n from espresso import RESTAURANTS_API_BASE\n from espresso import DEF_MAX_STR_LEN\n\n self.app = app\n self.test_client = app.test_client()\n # DEBUGGING: print(f\"Using database uri: {self.app.config['SQLALCHEMY_DATABASE_URI']}\")\n\n self.API_BASE = RESTAURANTS_API_BASE\n self.DEFAULT_MAX_STRING = DEF_MAX_STR_LEN\n\n db.drop_all()\n db.create_all()", "def setUp(self):\n self.app = create_app()\n self.client = self.app.test_client", "def setUp(self):\n self.app = app.test_client()", "def setUp(self):\n self.app = app.test_client()", "def setUp(self):\n self.app = create_app('testing')\n self.client = self.app.test_client()", "def setUp(self):\n _, instance_path, shared_inputs = sys.argv\n app = lnt.server.ui.app.App.create_standalone(instance_path)\n app.testing = True\n self.client = app.test_client()\n self.shared_inputs = shared_inputs", "def setUp(self):\n\n from . import main\n\n from .models import (\n get_engine,\n get_session_factory,\n get_tm_session,\n )\n\n self.config={\n 'admin_password':self.admin_login['password'],\n 'sqlalchemy.url':'sqlite://',\n 'auth.secret':'secret'\n }\n\n self.app = main({}, **self.config)\n self.init_database()\n self.testapp=webtest.TestApp(self.app)", "def setUp(self):\n self.app = Flask(__name__)\n self.app_context = self.app.app_context()\n self.app_context.push()\n self.client = self.app.test_client()", "def app():\n print('creating app with test vars')\n\n app = create_app('test')\n app.testing = True\n\n ctx = app.app_context()\n ctx.push()\n yield app\n\n ctx.pop()", "def setUp(self):\n self.app = create_app(\"testing\")\n self.client = self.app.test_client()\n\n self.answer = {\n \"text\":\"\".join(choice(\n string.ascii_letters) for x in range (randint(16,20)))\n }\n\n with self.app.app_context():\n self.db = _init_db()", "def setUp(self):\n\n # Get the Flask test client. Client is the browser.\n self.client = app.test_client()\n\n # Show Flask errors that happen during tests\n app.config['TESTING'] = True", "def setUp(self):\n # Get the Flask test client\n self.client = app.test_client()\n # Show Flask errors that happen during tests\n app.config['TESTING'] = True", "def setUp(self):\n self.app = app\n self.client = self.app.test_client", "def testapp():\n from space_rocks import main\n app = main({})\n from webtest import TestApp\n return TestApp(app)", "def setUp(self):\n config.ENV = \"testing\"\n self.app = init_app()\n self.client = self.app.test_client\n with self.app.app_context():\n db.drop_all()\n db.create_all()\n for actor in test.populate_db.sample_actors:\n Actor(**actor).insert()\n\n for movie in test.populate_db.sample_movies:\n Movie(**movie).insert()\n return self.app\n # binds the app to the current context", "def setUp(self):\r\n\r\n app.config['TESTING'] = True\r\n self.client = app.test_client()", "def setUp(self):\n\n # Get the Flask test client\n self.client = app.test_client()\n\n # Show Flask errors that happen during tests\n app.config['TESTING'] = True", "def setUp(self):\n\n # Get the Flask test client\n self.client = app.test_client()\n\n # Show Flask errors that happen during tests\n app.config['TESTING'] = True", "def setUp(self):\r\n\r\n # Get the Flask test client\r\n self.client = app.test_client()\r\n\r\n # Show Flask errors that happen during tests\r\n app.config['TESTING'] = True", "def setUp(self):\n\n app.testing = True\n self.app = app.test_client()\n\n self.valid_question = {\n \"title\" : \"tests\",\n \"question\": \"How do I refactor tests with database?\"\n }\n\n self.invalid_question = {\n \"title\" : \"\",\n \"question\": \"How do I refactor tests with database?\"\n }\n\n self.valid_question2 = {\n \"title\" : \"heroku\",\n \"question\": \"How do I refactor tests?\"\n }", "def setUp(self):\n # Get the Flask test client\n self.client = app.test_client()\n\n # Show Flask errors that happen during tests\n app.config['TESTING'] = True", "def __init__(self, app, environ):\n # NOTE: The TestApp class that we're wrapping takes a richer set of initialization parameters\n # (including relative_to, use_unicode, cookiejar, parser_features, json_encoder, and lint),\n # but we'll add them conservatively here. If there is a need for any of them, we should add\n # them explicitly here one-by-one as the need is shown so we have tight control of what\n # we're depending on and what we're not. -kmp 27-Apr-2020\n self.wrapped_app = self.HELPER_CLASS(app, environ)", "def setUp(self):\n\n self.client = app.test_client()\n app.config['TESTING'] = True", "def setUp(self):\n\n self.client = app.test_client()\n app.config['TESTING'] = True", "def setUp(self):\n\n self.client = app.test_client()\n app.config['TESTING'] = True", "def setUp(self):\n\n self.client = app.test_client()\n app.config['TESTING'] = True", "def setUp(self):\n\n self.client = app.test_client()\n app.config['TESTING'] = True", "def _setUp(self):\n # self.basedir = '/'.join(['var', self.id()])\n # os.mkdir(self.basedir)\n\n # datadir = '/'.join([self.basedir, 'data'])\n # os.mkdir(datadir)\n if not hasattr(self, 'APP'):\n return\n\n self.pyramid_config = Configurator()\n self.pyramid_config.add_notfound_view(Integration(self.APP))\n self.app = TestApp(self.pyramid_config.make_wsgi_app())", "def test_app():\n pass", "def setup_application(self):\n pass", "def setUp(self):\n self.user = {\n \"Email\": \"user@example.com\",\n \"Password\": \"pass1234\",\n \"Confirm Password\": \"pass1234\"\n }\n self.app = create_app('testing')\n self.client = self.app.test_client", "def setUp(self):\n self.app = app.test_client()\n db.init_db()", "def setup_class(self):\n self.db_fd, app.config['DATABASE'] = tempfile.mkstemp()\n app.config['TESTING'] = True\n self.app = app.test_client()\n init_db()", "def init_app(state):\n app = state.app\n\n app.config.setdefault('SPLIT_ALLOW_MULTIPLE_EXPERIMENTS', False)\n app.config.setdefault('SPLIT_DB_FAILOVER', False)\n app.config.setdefault('SPLIT_IGNORE_IP_ADDRESSES', [])\n app.config.setdefault('SPLIT_ROBOT_REGEX', r\"\"\"\n (?i)\\b(\n Baidu|\n Gigabot|\n Googlebot|\n libwww-perl|\n lwp-trivial|\n msnbot|\n SiteUptime|\n Slurp|\n WordPress|\n ZIBB|\n ZyBorg\n )\\b\n \"\"\")\n\n app.jinja_env.globals.update({\n 'ab_test': ab_test,\n 'finished': finished\n })\n\n @app.template_filter()\n def percentage(number):\n number *= 100\n if abs(number) < 10:\n return \"%.1f%%\" % round(number, 1)\n else:\n return \"%d%%\" % round(number)", "def setUp(self):\n self.client = app.test_client()\n\n # Show Flask errors that happen during tests\n app.config['TESTING'] = True", "def setUp(self):\n self.db_fd, app.app.config['DATABASE'] = tempfile.mkstemp()\n app.app.config['TESTING'] = True\n self.app = app.app.test_client()\n app.init_db()", "def setUp(self):\n self.app = create_app(config_name=\"testing\")\n self.client = self.app.test_client\n self.category = {'category_name': 'Stews'}\n\n # binds the app to the current context\n with self.app.app_context():\n # create all tables\n db.create_all()", "def setUp(self):\n self.app = create_app(config_name=\"testing\")\n self.client = self.app.test_client()\n self.answers = {'id':1,'ask':'What is the difference between django and flask','language':'python', 'date_posted': '7th May 2017'}", "def test_app():\n # setup\n app = main.create_application()\n app.dependency_overrides[get_settings] = get_settings_override\n with TestClient(app) as test_client:\n yield test_client\n # teardown", "def setUp(self):\n\n self.client = app.test_client()\n app.config['TESTING'] = True\n app.config['SECRET_KEY'] = 'key'", "def setUp(self):\n self.app = create_app(config_name=\"testing\")\n # initialize the test client\n self.client = self.app.test_client\n # This is the user test json data with a predefined username, email and password\n \n self.user_data = {\n 'user_email': 'looky@example.com',\n 'password': 'testexample'\n }\n self.user_data_2 = {\n 'user_email': 'example@example.com',\n 'password': 'test_123'\n }", "def setUp(self):\n self.db_fd, mainPyUnit.app.config['DATABASE'] = tempfile.mkstemp()\n mainPyUnit.app.config['TESTING'] = True\n self.app = mainPyUnit.app.test_client()\n #mainPyUnit.init_db()", "def setUp(self):\n self.app = create_app(config_name=\"testing\")\n self.app_context = self.app.app_context()\n self.app_context.push()\n self.client = self.app.test_client()\n\n\n self.user = {\n\t \"firstname\": \"Michael\",\n\t \"lastname\": \"Mbugua\",\n \"othername\": \"Mike\",\n \"email\": \"mike@gmail.com\",\n \"phoneNumber\": \"0708453901\",\n \"username\": \"Thomas\",\n \"password\": \"Aw3someSauce\"\n \n }", "def setUp(self):\n self.app = webtest.TestApp(main.app) \n self.batch_id = \"R1HIA55JB5DOQZM8R53OKMCWZ5BEQKUJ\"", "def setUp(self):\n self.driver = {\n \"Email\": \"p@gmail.com\",\n \"Type\": \"driver\",\n \"Password\": \"pass123\",\n \"Confirm Password\": \"pass123\"\n }\n self.ride = {\n \"Destination\": \"Meru\",\n \"Origin\": \"Kutus\",\n \"Time\": \"9:00\",\n \"Date\": \"23/7/2018\",\n \"Ride Name\": \"Toyota\",\n \"Capacity\": \"7\"\n }\n self.request = {\n \"Email\": \"Njobu\",\n \"Tel\": \"+254716272376\"\n }\n self.app = create_app('testing')\n self.client = self.app.test_client\n self.app_context = self.app.app_context()\n self.app_context.push()", "def testInit(self):\n self.globalInit()\n self.test.start()", "def test_config():\n assert not sample.create_app().testing\n assert sample.create_app({\"TESTING\": True}).testing", "def setUp(self):\n\n #Get Flask test client\n self.client = app.test_client\n #Show errors from Flask than happen\n app.config['TESTING'] = True\n #Connect to test database\n connect_to_db(app, \"postgresql:///testdb\")\n #Create tables and add sample data to them\n db.create_all()\n example_data()", "def setUp(self):\n self.app = create_app(config_name=\"testing\")\n self.client = self.app.test_client\n self.activity = {'name': 'Shop in Dubai'}\n # test bucket\n self.bucketlist = {'name': 'Go to Egypt for trip'}\n # test user\n self.user_details = {\n 'email': 'test@gmail.com',\n 'password': 'password123'\n }\n\n # binds the app to the current context\n with self.app.app_context():\n # create all tables\n db.session.close()\n db.drop_all()\n db.create_all()", "def setUp(self):\n\n self.client = app.test_client()\n app.config['Testing'] = True\n app.config['SECRET_KEY'] = 'test'\n connect_to_db(app, db_uri='postgresql:///testdb', echo=False)\n db.create_all()\n\n example_data() # Need to expand!", "def setUp(self):\n self.app = create_app(TestingConfig)\n self.client = self.app.test_client\n self.user = {\n \"email\": \"yeku@gmail.com\",\n \"firstname\": \"Yeku Wilfred\",\n \"lastname\": \"chetat\",\n \"phone\": \"671357962\",\n \"password\": \"weezybaby\"\n }\n\n with self.app.app_context():\n # create all tables\n db.create_all()\n initialize_db()", "def setUp(cls):\n app = Flask(__name__)\n esrs.ESRSView.register(app)\n app.config['TESTING'] = True\n cls.app = app.test_client()\n # Mock Celery\n app.celery_app = MagicMock()\n cls.fake_task = MagicMock()\n cls.fake_task.id = 'asdf-asdf-asdf'\n app.celery_app.send_task.return_value = cls.fake_task", "def setUp(self):\n\n # Get the Flask test client\n self.client = app.test_client()\n app.config['TESTING'] = True\n\n # Connect to test database\n connect_to_db(app, \"postgresql:///testdb\")\n\n # Create tables and add sample data\n db.create_all()\n example_data()", "def setUp(self):\n self.app = create_app(config_name=\"testing\")\n # initialize the test client\n self.client = self.app.test_client\n # This is the user test json data with a predefined email and password\n self.user_data = {\n 'user_email': 'best@example.com',\n 'password': 'test123'\n }\n self.user_data_2 = {\n \"user_email\": \"example@example.com\",\n \"password\": \"testexample\"\n }\n self.user_data_3 = {\n \"user_email\": \"example.com\",\n \"password\": \"testexample\"\n }\n\n self.user_data_4 = {\n \"user_email\": \"kelly@example.com\",\n \"password\": \"tes\"\n }", "def test_config():\n assert not create_app().testing\n assert create_app(TestConfig).testing", "def setUp(self):\n self.app = create_app()\n self.client = self.app.test_client\n setup_db(self.app, TEST_DB_PATH)\n\n # binds the app to the current context\n with self.app.app_context():\n # create all tables\n db.create_all()\n self._populate_db()", "def _setup_app_context_for_test():\n ctx = application.app_context()\n ctx.push()\n yield # tests will run here\n ctx.pop()", "def setUp(self):\n self.app = api.app\n self.client = self.app.test_client\n \n setup_db(self.app)\n\n # binds the app to the current context\n with self.app.app_context():\n self.db = SQLAlchemy()\n self.db.init_app(self.app)", "def setUp(self):\n self.app = create_app(config_name=\"testing\")\n self.client = self.app.test_client\n self.expense = {'name': 'snacks', 'amount': 12.23, 'date_of_expense': '01-01-2021'}\n\n\n # binds the app to the current context\n with self.app.app_context():\n # create all tables\n db.create_all()", "def setUp(self):\n\n # Get the Flask test client\n self.client = app.test_client()\n\n #Shows Flask errors that happen during tests\n app.config['TESTING'] = True\n\n #To test sessions we need to set Secret key \n app.config['SECRET_KEY'] = 'key'\n\n\n # Connect to test database\n connect_to_db(app, \"postgresql:///testdb\")\n\n # Create tables and add sample data\n db.create_all()\n users()\n reviews()", "def application(self):\n if not ApplicationFixture._test_app:\n app = self.APP_CLASS()\n app.run_tests()\n ApplicationFixture._test_app = app\n return ApplicationFixture._test_app", "def initialize(self, application):", "def setUp(self):\n\n # Get Flask test client\n app.config[\"TESTING\"] = True\n app.config[\"SECRET_KEY\"] = 'oh-so-secret-key'\n self.client = app.test_client()\n\n # Connnect to test db\n connect_to_db(app, \"postgresql:///testdb\")\n\n # Create tables and sample data\n db.create_all()\n example_data()", "def test_app():\n param = {\n 'DEBUG': True,\n 'TESTING': True\n }\n _app = create_app(settings_override=param)\n\n ctx = _app.app_context()\n ctx.push()\n yield _app\n ctx.pop()", "def app():\n # create the app with common test config\n app = create_app({\"DB_CONNECT\": TEST_DB_CONNECT})\n\n # create the database and load test data\n with app.app_context():\n get_db()\n aa = g.db.execute(_data_sql).fetchone() # 用来测试, 实际使用的时候应该是清理或初始化需要的数据\n\n yield app\n\n # 可以在这里做一些清理工作\n print(\"end ever test doing sonething\")", "def setUp(self):\n pyauto.PyUITest.setUp(self)\n\n webapp = self.InstallExtension(self.GetWebappPath())\n self.host.LaunchApp(webapp)\n self.account = self.GetPrivateInfo()['test_chromoting_account']", "def application():\n yield create_test_application()", "def setUp(self):\n self.app = app\n self.testing = True\n self.client = self.app.test_client\n self.casting_assistant = os.getenv('CASTING_ASSISTANT')\n self.casting_director = os.getenv('CASTING_DIRECTOR')\n self.executive_producer = os.getenv('EXECUTIVE_PRODUCER')\n self.new_actor = {\n \"first_name\": \"Mark\",\n \"last_name\": \"Webb\",\n \"gender\": \"Male\",\n \"image_link\":\"adsads\",\n \"age\": 52\n }\n self.movies = {\n \"title\": \"Avengers\",\n \"release_date\": \"2019-01-01\",\n \"image_link\": \"ffdasfa\"\n }\n #db.drop_all()\n #db.create_all()\n # binds the app to the current context\n # with self.app.app_context():\n # self.db = SQLAlchemy()\n # self.db.init_app(self.app)\n # # create all tables\n # self.db.drop_all()\n # self.db.create_all()", "def main_tester():\n create_tester_paths()\n _logger.info(' -- tester init done setting up paths and db file.')", "def test_config():\n\n # assert create_app().testing\n assert create_app(\"testing\", settings={\n \"TESTING\": True,\n \"SQLALCHEMY_TRACK_MODIFICATIONS\": False\n }).testing", "def setUp(self):\n\n # Get the Flask test client\n self.client = app.test_client()\n app.config['TESTING'] = True\n app.config['SECRET_KEY'] = 'key'\n\n with self.client as c:\n with c.session_transaction() as sess:\n sess['user'] = 25\n\n # Connect to test database\n connect_to_db(app, \"postgresql:///testdb\")\n\n # Create tables and add sample data\n db.create_all()\n example_data()", "def _setup(app_obj):", "def setUp(self):\n self.app = create_app(\"configmodule.TestingConfig\")\n self.app.testing = True\n\n self.client = self.app.test_client()\n\n with self.app.app_context():\n db.drop_all()\n db.create_all()", "def setUp(self):\n self.app = create_app()\n self.client = self.app.test_client\n self.database_name = \"databasename\"\n self.database_path = \"postgresql://postgres:usman@{}/{}\".format('localhost:5432', self.database_name)\n setup_db(self.app, self.database_path)\n\n # binds the app to the current context\n with self.app.app_context():\n self.db = SQLAlchemy()\n self.db.init_app(self.app)\n # create all tables\n self.db.create_all()", "def setUp(self):\n api.app.config['SQLALCHEMY_DATABASE_URI'] = \"sqlite://\"\n api.app.config['TESTING'] = True\n self.app = api.app.test_client()\n db.create_all()", "def setUp(self):\n self.app = create_app()\n self.client = self.app.test_client\n self.database_name = \"trivia_test\"\n self.database_path = \"postgresql:///{}\".format(self.database_name)\n setup_db(self.app, self.database_path)\n\n # binds the app to the current context\n with self.app.app_context():\n self.db = SQLAlchemy()\n self.db.init_app(self.app)\n # create all tables\n self.db.create_all()", "def setUp(self) -> None:\n self.workdir = tempfile.mkdtemp()\n self.server_name = 'fooserver.localdomain'\n self.app = create_web_app()\n self.app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///:memory:'\n self.app.config['SERVER_NAME'] = self.server_name\n self.app.config['STORAGE_BASE_PATH'] = self.workdir\n\n # There is a bug in arxiv.base where it doesn't pick up app config\n # parameters. Until then, we pass it to os.environ.\n os.environ['JWT_SECRET'] = self.app.config.get('JWT_SECRET')\n self.client = self.app.test_client()\n # self.app.app_context().push()\n with self.app.app_context():\n database.db.create_all()\n\n with open('schema/resources/Workspace.json') as f:\n self.schema = json.load(f)", "def setUp(self):\n self.app = create_app()\n self.client = self.app.test_client\n self.database_name = \"trivia_test\"\n self.database_path = \"postgresql://postgres@{}/{}\".format('localhost:5432', self.database_name)\n setup_db(self.app, self.database_path)\n\n # binds the app to the current context\n with self.app.app_context():\n self.db = SQLAlchemy()\n self.db.init_app(self.app)\n # create all tables\n self.db.create_all()", "def setUp(self):\n self.app = create_app()\n self.client = self.app.test_client\n self.database_name = \"trivia_test\"\n self.database_path = \"postgres://{}/{}\".format(\n 'localhost:5432',\n self.database_name\n )\n setup_db(self.app, self.database_path)\n\n # binds the app to the current context\n with self.app.app_context():\n self.db = SQLAlchemy()\n self.db.init_app(self.app)\n # create all tables\n self.db.create_all()", "def setUp(self):\n self.app = create_app()\n self.client = self.app.test_client\n self.database_name = \"trivia_test\"\n self.database_path = \"postgres://{}/{}\".format('localhost:5432', self.database_name)\n setup_db(self.app, self.database_path)\n\n # binds the app to the current context\n with self.app.app_context():\n self.db = SQLAlchemy()\n self.db.init_app(self.app)\n # create all tables\n self.db.create_all()", "def setUp(self):\n self.app = create_app()\n self.client = self.app.test_client\n self.database_name = \"Casting_Agency_test\"\n self.database_path = \"postgres://hala@{}/{}\".format('localhost:5432', self.database_name)\n setup_db(self.app, self.database_path)\n\n # binds the app to the current context\n with self.app.app_context():\n self.db = SQLAlchemy()\n self.db.init_app(self.app)\n # create all tables\n self.db.create_all()", "def setUp(self):\n self.app = create_app()\n self.client = self.app.test_client\n self.database_name = \"trivia_test\"\n self.database_path = \"postgres://{}/{}\".format(\n 'localhost:5432', self.database_name)\n setup_db(self.app, self.database_path)\n\n # binds the app to the current context\n with self.app.app_context():\n self.db = SQLAlchemy()\n self.db.init_app(self.app)\n # create all tables\n self.db.create_all()", "def setUp(self):\n self.app = create_app(config_name=\"testing\")\n self.client = self.app.test_client\n self.bucketlist = {'name': 'Go to Grand canyon for camping'}\n\n # binds the app to the current context\n with self.app.app_context():\n # create all tables\n db.session.close()\n db.drop_all()\n db.create_all()", "def setUp(self):\n self.app = create_app()\n self.client = self.app.test_client\n self.database_name = \"trivia_test\"\n self.database_path = \"postgresql://postgres:1998@{}/{}\".format(\n 'localhost:5432', self.database_name)\n setup_db(self.app, self.database_path)\n\n # binds the app to the current context\n with self.app.app_context():\n self.db = SQLAlchemy()\n self.db.init_app(self.app)\n # create all tables\n self.db.create_all()", "def setUp(self):\n app.config[\"TESTING\"] = True\n app.config[\"WTF_CSRF_ENABLED\"] = False\n app.config[\"DEBUG\"] = False\n app.config[\"SQLALCHEMY_DATABASE_URI\"] = \"sqlite:///:memory:\"\n self.app = app.test_client()\n db.drop_all()\n db.create_all()", "def setUpClass(cls):\n app.debug = False", "def setup():\n load_app()\n setup_db()", "def setUp(self):\n self.app = create_app()\n self.client = self.app.test_client\n self.database_name = \"trivia_test\"\n self.database_path = \"postgres://{}:{}@{}/{}\".format('student',\n 'student', 'localhost:5432', self.database_name)\n setup_db(self.app, self.database_path)\n\n # binds the app to the current context\n with self.app.app_context():\n self.db = SQLAlchemy()\n self.db.init_app(self.app)\n # create all tables\n self.db.create_all()", "def test_config(app):\n assert app.testing", "def setUp(self):\n self.app = create_app(\"testing\")\n self.app = self.app.test_client()\n\n self.question_details = {\n \"title\": \"How to exit Vim on Ubuntu 16.04\",\n \"description\": \"How does one get the exit Vim from terminal?\"}", "def test_build(self):\n self.app.build()", "def setUp(self):\n self.db_fd, closet.app.config['DATABASE'] = tempfile.mkstemp()\n closet.app.config['TESTING'] = True\n self.app = closet.app.test_client()\n closet.init_db()", "def setUp(self):\n\n # Get the Flask test client.\n self.client = app.test_client()\n app.config[\"TESTING\"] = True\n app.config[\"SQLALCHEMY_TRACK_MODIFICATIONS\"] = False\n\n # Connect to the test database.\n connect_to_db(app, db_uri=\"postgresql:///testnourish\") \n\n # Create the tables and add the sample data.\n db.create_all()\n load_test_data()", "def setUp(self):\n self.app = app\n self.client = self.app.test_client\n self.database_name = \"volunteer_test\"\n self.database_path = \"postgres://{}/{}\".format('localhost:5432', self.database_name)\n\n # binds the app to the current context\n with self.app.app_context():\n self.db = SQLAlchemy()\n self.app.config[\"SQLALCHEMY_DATABASE_URI\"] = self.database_path\n self.app.config[\"SQLALCHEMY_TRACK_MODIFICATIONS\"] = True\n self.db.app = self.app\n self.db.init_app(self.app)\n # create all tables\n self.db.create_all()" ]
[ "0.8074992", "0.799192", "0.79715896", "0.7904655", "0.7897552", "0.7884764", "0.77985597", "0.7775213", "0.77417946", "0.77252287", "0.77252287", "0.7690111", "0.7678627", "0.7651094", "0.7564304", "0.7556179", "0.74967074", "0.74653935", "0.7387983", "0.73835796", "0.7377442", "0.73745763", "0.7366795", "0.7363622", "0.7363622", "0.73555404", "0.7347752", "0.7330745", "0.73254645", "0.7322105", "0.7322105", "0.7322105", "0.7262732", "0.7262732", "0.7260795", "0.7260075", "0.7259106", "0.7254375", "0.7229697", "0.7208638", "0.72067404", "0.72044975", "0.72038823", "0.719491", "0.7191244", "0.7179292", "0.7174789", "0.716133", "0.71342236", "0.7106748", "0.71065223", "0.71028143", "0.70987046", "0.7086434", "0.7086336", "0.7077584", "0.7071516", "0.706785", "0.70344794", "0.7004236", "0.69888484", "0.69770116", "0.6976879", "0.6973712", "0.6966386", "0.69542557", "0.69535947", "0.69462585", "0.6930442", "0.6928271", "0.69159204", "0.6901296", "0.6901166", "0.68987995", "0.689458", "0.6890022", "0.6882946", "0.6873787", "0.687111", "0.6868514", "0.68597823", "0.6858701", "0.6849498", "0.68448025", "0.684326", "0.6831578", "0.6829608", "0.68296057", "0.68280864", "0.68211895", "0.6818643", "0.68058413", "0.6804566", "0.680346", "0.680296", "0.6798396", "0.6798177", "0.67958885", "0.67922586", "0.67852235", "0.67827547" ]
0.0
-1
Load the specified mojofile, and return its model id.
def load_model(self, mojofile: str) -> str: return self._request("GET /loadmojo", params={"file": mojofile})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_model(self, filename):\r\n pass", "def load_model(self, file=None):\n return None", "def load_model(self, file_name=None):\n try:\n if file_name:\n self.agent.load_model(file_name)\n else:\n self.agent.load_model()\n print('Model loaded successfully')\n return 1\n except:\n print('Failed to load model')\n return 0", "def load(model_file):\n return pickle.load(open(model_file))", "def load_model(filename):\n return Model.load_savefile(filename)", "def load_model(self, model_path: str):", "def load(path_to_model):\n pass", "def load_model(self, path):\n pass", "def load(self, file_id):\n pass", "def load_model(file_name):\n with open(file_name, 'rb') as file:\n return pickle.load(file)", "def load_model(self) -> Any:", "def load_model():\n\n # find location of model\n\n file_path = '/Users/davidodwyer/Desktop' # to the directory\n file_name = 'original_mlr.joblib' \n the_file = os.path.join(file_path, file_name)\n\n # load model\n\n model = load(the_file)\n\n return model", "def load_model():\n logger.info('load_model called')\n return 1", "def load_model(fname: os.PathLike) -> Model:\n return Model.load(fname)", "def load_model(self, **params):\n \t# file_name = params['name']\n # return pickle.load(gzip.open(file_name, 'rb'))", "def load_model_custom(file, object):\n return getattr(load_module(file), object)", "def load(self, path, model_id):\n self.load_state_dict(torch.load(os.path.join(path, '{}-retriever'.format(model_id))))", "def load_model(task_id):\n # get model file name\n task_chain_id = task_id.split('-')[0]\n\n root_dir = os.path.split(os.path.realpath(__file__))[0]\n model_path = os.path.join(root_dir, '..', 'common', 'model', task_chain_id)\n model_file_name = os.path.join(model_path, task_id + '.model')\n if not os.path.exists(model_file_name):\n raise Exception(\"Algorithm load_model not find model {}\".format(model_file_name))\n # load mode from disk\n model = load(model_file_name)\n\n return model", "def load(identifier, path):\r\n\tloader = importlib.machinery.SourceFileLoader(identifier, path)\r\n\thandle = loader.load_module(identifier)\r\n\treturn handle", "def load_model(language_id, model_type):\n\n # getting the language code from it's id\n language_code = get_language_code(language_id)\n\n # getting the model name from it's type\n model_name = get_model_name(model_type)\n\n # building the model's full path\n model_full_path = \"%s/%s/%s.txt\" % (models_base_path, language_code, model_name)\n\n # returning the model loaded directly from file\n return load_model_from_file(model_full_path)", "def import_model(path=None):\n path = get_model_path() if path is None else path\n return torch.jit.load(path)", "def load_model_by_name(model, global_step, device=None, path=\"/scratch/users/zucks626/ADNI/IPMI/checkpoints/\"):\r\n # path = \"/scratch/users/zucks626/ADNI/ae_cls/checkpoints/\"\r\n file_path = path + model.name + \"/\" + 'model-{:05d}.pt'.format(global_step)\r\n state = torch.load(file_path, map_location=device)\r\n model.load_state_dict(state)\r\n print(\"Loaded from {}\".format(file_path))", "def loadModel(file_name):\n with open(SAVE_PATH + file_name, \"rb\") as in_file:\n model = pickle.load(in_file, encoding = \"uft-8\")\n print(\"{} loaded\".format(file_name))\n return model", "def import_model(file):\n file = os.path.expanduser(file)\n obj = IsolationForest()\n metadata = obj._cpp_obj.deserialize_obj(file)\n metadata = json.loads(metadata)\n obj._take_metadata(metadata)\n return obj", "def read_model(filename):\n return joblib.load(filename)", "def load_model(self, filename):\n filename = path.join(self.root_path, f'models/{filename}.pkl')\n self.model = pickle.load(open(filename, \"rb\"))\n print('Successfully loaded model from '+filename)", "def load_model():\n return \"None\"", "def load_model(filename):\n checkpoint = torch.load(filename)\n model = QNetwork(checkpoint['input_size'], checkpoint['output_size'], checkpoint['hidden_layers'])\n model.load_state_dict(checkpoint['state_dict'])\n return model", "def get_model_id(model_name, workspace, header, user):\n uri = \"https://api.anaplan.com/1/3/workspaces/{}/models/\".format(workspace)\n response = requests.get(uri, headers = header)\n response_json = json.loads(models.text.encode(\"utf-8\"))\n for model in response_json:\n if model[u\"name\"] == unicode(model_name):\n return model[u\"id\"]", "def load(\n self,\n modelLoadPath\n ):\n pass", "def _load(path):\n status = KerasOpenVINOModel._load_status(path)\n if status.get('xml_path', None):\n xml_path = Path(status['xml_path'])\n invalidInputError(xml_path.suffix == '.xml',\n \"Path of openvino model must be with '.xml' suffix.\")\n else:\n invalidInputError(False, \"nano_model_meta.yml must specify 'xml_path' for loading.\")\n xml_path = Path(path) / status['xml_path']\n return KerasOpenVINOModel(xml_path)", "def read_id_from_file(file):\n input = open(file, 'r')\n id = input.readline().strip()\n input.close()\n return id", "def get_api_id_from(file):\n with open(file, \"r\") as f:\n api_info = pickle.load(f)\n return api_info['id']", "def loadmodel(fname):\n if not fname.endswith('.pickle.gz'):\n fname = fname + '.pickle.gz'\n with gzip.open(fname, 'r') as fin:\n D = load(fin)\n print 'Load model from file: {}'.format(fname)\n return D", "def load(self, filename):\n pass", "def load(self, file_path):\n self.model = load_model(file_path)", "def load(self, file_path):\n self.model = load_model(file_path)", "def load(self, file_path):\n self.model = load_model(file_path)", "def load_model(self, filename):\n model_object = self.s3_resource.Object(self.bucket_name, self.models_path + str(filename)).get()['Body'].read()\n model = pickle.loads(model_object)\n return model", "def read_model(file_name):\n\n with open(file_name, \"rb\") as model_file:\n return pickle.load(model_file)", "def __init__(self, mojo_path=None):\n assert_is_type(mojo_path, str)\n\n self.pipeline_id = h2o.lazy_import(mojo_path)", "def read_id_from_file(path):\n\n with open(path) as id_file:\n return id_file.readline().strip()", "def load_model(gateway_name=None):\n if gateway_name and len(gateway_name) > 0:\n model = pk.load(open(\"models/\" + gateway_name + \"_model.pk\", \"r\"))\n else:\n model = pk.load(open(\"models/all_model.pk\", \"r\"))\n return model", "def get_file_id(file_name, model, workspace, header, user):\n uri = (\"https://api.anaplan.com/1/3/workspaces/{}/models/{}/\"\n \"files/\").format(workspace, model)\n response = requests.get(uri, headers = header)\n response_json = json.loads(response.text.encode(\"utf-8\"))\n for file in response_json:\n if file[u\"name\"] == unicode(file_name):\n return file[u\"id\"]", "def load_model():\n with open(MODEL_FILENAME, \"rb\") as file:\n model = pickle.load(file)\n return model", "def get_model_pipeline_from_file(self, oc):\r\n # Load model related files\r\n model_path = self.config['DATA_PATH'] + self.config['CUSTOMER_NAME'] + '/models/'\r\n\r\n model_file = model_path + self.task + '_' + str(oc) + '_pipeline.joblib'\r\n\r\n if os.path.isfile(model_file):\r\n model = joblib.load(model_file)\r\n return model\r\n return None", "def load_model(self, fname):\n cxnlib.CXNNetLoadModel(self.handle, fname)", "def model_load(file_name=None):\n if file_name is None :\n file_name = \"./data/_oP5_SegmentClassifier.dump\"\n else:\n pass\n\n return p5_util.object_load(file_name)", "def load_model(model_name):\r\n model = joblib.load(model_name)\r\n return model", "def load_model(self):\n pass", "def from_file(cls, file): \n try:\n import dill as pickle\n except ImportError:\n logger.error(\"Cannot import from file, dill not installed\")\n return None\n model = pickle.load(open(file,'rb'))\n if type(model) == GeologicalModel:\n logger.info('GeologicalModel initialised from file')\n return model\n else:\n logger.error('{} does not contain a geological model'.format(file))\n return None", "def load_model(self, filename, force=False):\n logging.info('Loading model from {}...'.format(filename))\n self._download_from_bucket(filename, filename, force=force)\n return load(filename)", "def __load_model(self):\n loaded = load(self.__file_name)\n self.__model = loaded['model']\n self.__meta_data = loaded['metadata']\n self.__is_ready = True", "def load(cls, file_id):\n if not isinstance(file_id, file):\n handle = open(\n \"{:s}{:s}-{:d}.pckl\".format(\n DUMP_PATH,\n cls.__name__,\n file_id\n ),\n \"rb\")\n else:\n handle = file_id\n return pickle.load(handle)", "def load_model(self, fname):\n if self._Booster is None:\n self._Booster = Booster({'nthread': self.n_jobs})\n self._Booster.load_model(fname)", "def load_model_from_file(model: torch.nn.Module, model_file_path: Path) -> None:\n\n if model_file_path.is_file():\n try:\n model.load_state_dict(torch.load(model_file_path))\n except Exception as e:\n logging.warning(\"Couldn't load model. Attempting to map CUDA tensors to CPU to solve error.\")\n else:\n logging.warning(\"Could not find model: {}\".format(model_file_path))\n raise FileExistsError(f\"Cannot load model file {model_file_path} into {model}...\")", "def _get_model():\n with open('models/catapp_gp_model.pickle', 'rb') as modelfile:\n model = pickle.load(modelfile)\n return model", "def get_import_id(import_name, model, workspace, header, user):\n uri = (\"https://api.anaplan.com/1/3/workspaces/{}/models/{}/\"\n \"imports/\").format(workspace, model)\n response = requests.get(uri, headers = header)\n response_json = json.loads(response.text.encode(\"utf-8\"))\n for imp in response_json:\n if imp[u\"name\"] == unicode(import_name):\n return imp[u\"id\"]", "def load_model(self, model, id=LAST):\n model.load_state_dict(self.data_dict[id])\n return model", "def load(self, path):\n load_model(path, self)", "def import_scene(file_path):\n\n pass", "def get_object(fname, fmethod='rb'):\n with open(model_dir/fname, fmethod) as f:\n return pickle.load(f) if '.pkl' in fname else f.read()", "def read_model( path ):\n path = os.path.join(models_folder,path + '.pck' )\n with open( path , 'r') as f:\n model = pickle.load(f)\n return model", "def open_model(fname=\"models/model.pickle\"):\n pickle_data = pkgutil.get_data(__name__, fname)\n time_to_model = pickle.loads(pickle_data)\n return time_to_model", "def load_model(self):\n with open(self.args.trained_model, 'rb') as handle:\n self.model_hash = hashlib.sha224(handle.read()).hexdigest()\n\n self.model.load(self.args.trained_model)\n self.logger.debug('Loaded model from %s', self.args.trained_model)\n return", "def load_from(filename):\n from .io import load\n return load(filename)", "def load_model():\n with open(paths.model('model.pkl'), 'rb') as stream:\n return pickle.load(stream)", "def insert_file_via_perl(filename, comment=\"Added by Python Job\"):\n for line in lines(['ImportSingleFileIntoPosdaAndReturnId.pl', filename, comment]):\n if line.startswith(\"File id:\"):\n return int(line[8:])\n\n # TODO: pass on the error if there was one\n raise RuntimeError(\"Failed to insert file into posda!\")", "def load_model(file_name: str, full_path: bool = False):\n if not full_path:\n file_name = path.join(MODEL_DIR, file_name)\n\n if not path.isfile(file_name):\n raise OSError(\"{0} does not exist!\".format(file_name))\n\n return pickle.load(open(file_name, 'rb'))", "def load_model(model_file):\n # Load TFLite model and allocate tensors.\n interpreter = tflite.Interpreter(model_path=model_file)\n interpreter.allocate_tensors()\n return interpreter", "def load_model_from_file(model_full_path):\n\n # trying to load the model from file\n try:\n # opening the file that has the model data\n with codecs.open(model_full_path, 'r') as f:\n # reading the model data\n model_data = u\"%s\" % f.read()\n\n # escaping unicode characters (\\u00fb, etc.)\n # model_data = model_data.decode('unicode_escape')\n\n # building the model features\n model_features = eval(model_data)\n\n # returning the model features\n return model_features\n\n # in case of an exception\n except Exception as e:\n # printing exception message\n print(str(e))\n\n # retuning None\n return None", "def get_local_model(filename):\n\n with open(filename) as fid: # 'aca/aca_spec.json', 'rb') as fid:\n f = fid.read()\n\n return json.loads(f), md5(f.encode('utf-8')).hexdigest()", "def load_model(model_path: str) -> object:\n model = torch.load(model_path)\n model.eval()\n return model", "def test__load_model(filename):\n file_storage = FileStorage(stream=open(filename, mode=\"rb\"),\n filename=filename, name=\"model\")\n model = Submit()._load_model(file_storage)\n assert len(model.reactions) == 95\n assert len(model.metabolites) == 72\n assert file_storage.closed", "def _load_model_from_file(path, handle):\n logger.debug('Reading file from %s assuming pickled model.' % path)\n try:\n model = pickle.load(handle)\n except (TypeError, pickle.UnpicklingError):\n logger.debug('Cannot unpickle %s. Assuming json model next.' % path)\n try:\n model = load_json_model(path)\n except ValueError:\n logger.debug(\"Cannot import %s as json model. Assuming sbml model next.\" % path)\n try:\n model = read_sbml_model(path)\n except AttributeError as e:\n logger.error(\"cobrapy doesn't raise a proper exception if a file does not contain an SBML model\")\n raise e\n except Exception as e:\n logger.error(\n \"Looks like something blow up while trying to import {} as a SBML model.\"\n \"Try validating the model at http://sbml.org/Facilities/Validator/ to get more information.\".format(\n path))\n raise e\n return model", "def load_scene():\n module_path = dirname(__file__)\n return _safe_unpickle(join(module_path, 'scene.pickle'))", "def load_seq_model():\n model = joblib.load(os.path.join(os.path.dirname(__file__), 'RuleSet3.pkl'))\n return model", "def read_data_model(filename='data/data_model.pkl'):\n\n with open(filename, 'r') as pklfile:\n root = pkl.load(pklfile)\n\n return root", "def load_model() -> None:\n global model\n\n if app.testing:\n current_dir = os.path.dirname(__file__)\n model_path = os.path.join(current_dir, \"models/model.pkl\")\n else:\n model_path = os.getenv(\"PATH_TO_MODEL\")\n\n if model_path is None:\n err = f\"PATH_TO_MODEL {model_path} is None\"\n raise RuntimeError(err)\n\n with open(model_path, \"rb\") as model_file:\n model = pickle.load(model_file)", "def _load_model_conf(path, run_id=None):\n if run_id:\n path = tracking.utils._get_model_log_dir(path, run_id)\n conf_path = os.path.join(path, \"MLmodel\")\n model = Model.load(conf_path)\n if FLAVOR_NAME not in model.flavors:\n raise Exception(\"Format '{format}' not found not in {path}.\".format(format=FLAVOR_NAME,\n path=conf_path))\n return model.flavors[FLAVOR_NAME]", "def load(self, filepath):\n try:\n ckpt = torch.load(filepath, map_location=self.device)\n except Exception as e:\n print('Could not load file: {}'.format(e))\n sys.exit()\n try:\n self.load_state_dict(ckpt['ae'])\n except Exception as e:\n print('Could not load model state dict: {}'.format(e))\n try:\n self.optimizer.load_state_dict(ckpt['optimizer'])\n except Exception as e:\n print('Could not load optimizer state dict: {}'.format(e))", "def get_model_name_from_raw_file(yaml_file: str) -> str:\n pattern = re.compile(r'^model:\\s*(?P<model>\\w+)')\n entries = find_all_entries(\n yaml_file=yaml_file, pattern=pattern, pattern_keyword='model')\n\n if not entries:\n logging.error(f\"Unable to find the model name in {yaml_file}\")\n entries.append('')\n\n return entries[0]", "def load_model(self, file_name):\n with open(file_name, 'rb') as file:\n self.lin_reg = pickle.load(file)", "def get_model():\n model_folder = os.path.join(os.environ['CovidTools'], 'mod_split_model')\n model_path = os.path.join(model_folder, 'model.pt')\n if not os.path.exists(model_path):\n fs = Filesplit()\n fs.merge(input_dir=os.path.join(model_folder, 'parts'),\n output_file=os.path.join(model_path),\n cleanup=False)\n return torch.load(model_path)", "def _load_model(self):\n with open(self.filepath, 'rb') as file:\n self.cmodel = pickle.load(file)", "def load_meta(fname, data_id=''):\n # TODO: expand functionality?\n with open(fname+data_id+'_meta.pkl', 'rb') as f:\n meta = pickle.load(f)\n return meta", "def load_model(PATH):\n model = torch.load(PATH)\n model.eval()\n return model", "def load_model(path_or_handle, solver_interface=optlang, sanitize=True):\n solver_interface = solvers.get(solver_interface, solver_interface)\n\n if isinstance(path_or_handle, str) and not os.path.isfile(path_or_handle):\n from cameo.models.webmodels import load_webmodel\n logger.debug(\"Given path is not a file. Trying to load from webmodels\")\n model = load_webmodel(path_or_handle, solver_interface)\n else:\n if isinstance(path_or_handle, str):\n # Open the given file\n path = path_or_handle\n handle = open(path_or_handle, 'rb')\n elif hasattr(path_or_handle, 'read'):\n # Argument is already an open file\n path = path_or_handle.name\n handle = path_or_handle\n else:\n raise ValueError('Provided argument %s has to be either a string or a file handle' % path_or_handle)\n model = _load_model_from_file(path, handle) # Parse model from the file\n\n if sanitize:\n sanitize_ids(model)\n\n if solver_interface is not None and not isinstance(model.solver, solver_interface.Model):\n logger.debug(\"Changing solver interface to %s\" % solver_interface)\n model.solver = solver_interface\n\n return model", "def load_model(self, fname: str) -> None:\n checkpoint_data = torch.load(fname)\n\n # Load the models\n # P-Net\n model_import_path = checkpoint_data['p_net']['model_import_path']\n imp = importlib.import_module(model_import_path)\n mod = getattr(imp, checkpoint_data['p_net']['model_name'])\n self.p_net = mod()\n self.p_net.set_params(checkpoint_data['p_net'])\n # Q-Net\n model_import_path = checkpoint_data['q_net']['model_import_path']\n imp = importlib.import_module(model_import_path)\n mod = getattr(imp, checkpoint_data['q_net']['model_name'])\n self.q_net = mod()\n self.q_net.set_params(checkpoint_data['q_net'])", "def load_model(path):\n # for example -> f\"{os.getcwd()}/trained_models\"\n return spacy.load(path)", "def\tget_id(args):\n\tpath = args.config['book-path']\n\tdata = json.loads(open(path).read())\n\tif len(data) == 0:\n\t\treturn 0\n\torder_id = data[len(data) - 1]['id']\n\torder_id += 1\n\treturn order_id", "def load_model(model, path):\n\tmodel.load_state_dict(torch.load(path))\n\tprint(\"pre-trained model loaded from {}\".format(path))", "def load(model_path: str):\n model = torch.load(model_path)\n model.eval()\n return model", "def model_1_0(*filename):\n return os.path.join(check.MODELS_1_0_DIR, *filename)", "def import_object(self, filename, pose=np.eye(4), size=None, oid=1):\n\n # extract name and extension of the model file\n name, ext = os.path.basename(filename).split(\".\")\n\n # load model according to file extension\n if ext == \"ply\":\n bpy.ops.import_mesh.ply(filepath=filename)\n else:\n raise NotImplementedError()\n\n # the name of the file is assigned\n # to the mesh object in blender engine\n model = bpy.data.objects[name]\n model.name = name + str(oid)\n\n # set object reference point (origin) and pose\n bpy.ops.object.origin_set(type=\"ORIGIN_CENTER_OF_MASS\", center=\"BOUNDS\")\n self.set_model_pose(model, pose)\n\n # normalize and scale model dimensions\n if size is not None:\n model.dimensions = size * model.dimensions / max(model.dimensions)\n\n # add material\n # FIXME: adjust properties\n material = bpy.data.materials.new(name=\"Material\")\n material.specular_intensity = 0.25\n model.data.materials.append(material)\n # enable vertex color rendering\n # this is necessary to render the vertex color\n # in the rgb branch of the rendering node tree\n model.active_material.use_vertex_color_paint = True\n\n # if rendering is not photorealistic, render only\n # the vertex color information of the model\n if not self.photorealism:\n model.active_material.use_shadeless = True\n\n # set object id\n model.pass_index = oid\n\n return model", "def load_model(filepath=None, config=None, item=None):\n\n if filepath is None:\n raise ValueError(\"The filepath is None, please check the filepath is in the config file\")\n if '.h5' in filepath:\n keras_model = lm(filepath)\n reader = FeatureReader(config)\n features = reader.get_feature(dt.now())\n f = features[item]\n # for keras bug\n f = f.values.reshape(1,4,12)\n v = keras_model.predict(f)\n return keras_model\n else:\n return joblib.load(filepath)", "def get_object_id(path):\n return str.split(os.path.basename(path), \"_\")[1][0]", "def load_model(\n model_path=filepath + \"/trained_models/hi2en/\", model_file_name=\"model.h5\"\n):\n model_path = (\n filepath + \"/trained_models/{}/\".format(model_path)\n if model_path in [\"en2hi\", \"hi2en\"]\n else model_path\n )\n config = SConfig(configuration_file=model_path + \"config.pkl\")\n s2s = Seq2Seq(config)\n s2s.load_model(path_to_model=model_path, model_file_name=model_file_name)\n return s2s", "def _get_backbone_model_from_file(filepath, in_chans, num_classes):\n sys.path.append('{}'.format(dirname(filepath)))\n class_name = basename(filepath).split('.')[0]\n exec('from {} import {}'.format(*[class_name]*2))\n return eval('{}(in_chans={}, num_classes={})'.format(class_name, in_chans, num_classes))", "def load(path):\n pass" ]
[ "0.6718768", "0.654268", "0.61337996", "0.6125505", "0.6113581", "0.6078083", "0.60401374", "0.60291064", "0.5945967", "0.5840561", "0.5827812", "0.5739245", "0.5689958", "0.5678766", "0.5677562", "0.5670736", "0.56696165", "0.5610047", "0.5605511", "0.55876744", "0.5587407", "0.55653065", "0.55591935", "0.5557846", "0.5543136", "0.55376154", "0.55330473", "0.5532693", "0.55221325", "0.5519036", "0.5514299", "0.55094075", "0.5501058", "0.54968715", "0.548497", "0.5478586", "0.5478586", "0.5478586", "0.5475653", "0.5456786", "0.54495925", "0.544821", "0.544698", "0.5423261", "0.5421974", "0.5414508", "0.5409573", "0.53994435", "0.5399004", "0.5396693", "0.53944874", "0.5379808", "0.5377807", "0.53699654", "0.53530085", "0.53510815", "0.5349027", "0.5312297", "0.5301968", "0.52916056", "0.5283538", "0.5281932", "0.52786434", "0.52699274", "0.52690077", "0.5248074", "0.52403694", "0.5233384", "0.5232123", "0.5230888", "0.5221369", "0.52188104", "0.52158576", "0.5211145", "0.5210009", "0.52001005", "0.5197723", "0.51890653", "0.51873296", "0.5186384", "0.51847905", "0.5182695", "0.51818407", "0.51791114", "0.5165977", "0.5165037", "0.5156094", "0.5151541", "0.51429886", "0.51400065", "0.5136776", "0.5135752", "0.51317424", "0.51306146", "0.51286286", "0.5127448", "0.51181215", "0.5117078", "0.51156086", "0.51057875" ]
0.78735054
0
Shutdown / kill the server. Sometimes the ``POST /shutdown`` request may fail. In any case we attempt to terminate the process with the SIGKILL signal if it still seems to be running.
def shutdown(self): try: self._request("POST /shutdown") time.sleep(0.300) except requests.exceptions.ConnectionError: pass if self._process and self._process.poll() is None: self._process.kill() if self._session: self._session.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def shutdown():\n os.kill(os.getpid(), signal.SIGTERM)", "def shutdown():\n self_pid = os.getpid()\n logging.info('Forcibly terminating program (PID=%s)', self_pid)\n os.kill(self_pid, signal.SIGKILL)", "def stop(self):\n self.shutdown_ = True\n if self.running():\n os.kill(self.server_pid_, signal.SIGTERM)", "def _shutdown(self, *args):\n self.server.shutdown()", "def shutdown():\n shutdown_func = request.environ.get(\n 'werkzeug.server.shutdown') # default web server with flask\n if shutdown_func is None:\n return 'unable to shutdown server!', 501\n shutdown_func()\n return \"server shutting down...\"", "def force_stop(self):\n self.shutdown_ = True\n if self.running():\n os.kill(self.server_pid_, signal.SIGINT)", "def server_shutdown():\n if not current_app.testing:\n abort(404)\n shutdown = request.environ.get('werkzeug.server.shutdown')\n if not shutdown:\n abort(500)\n shutdown()\n return 'Shutting down...'", "def shutdown_server():\n func = request.environ.get('werkzeug.server.shutdown')\n if func is None:\n raise RuntimeError('Not running with the Werkzeug Server')\n func()", "def shutdown(self):\n self._send_command('shutdown')\n self.sock.close()\n self.disconnected = True", "def shutdown(self):\n # TODO: Build a certificate chain so we can verify our localhost and remove the verify=False workaround.\n requests.get('{local_server_address}/shutdown'.format(local_server_address=self.local_server_address),\n verify=False)", "def shutdown(self) -> None:\n prefix = f\"In {ThreadedServer.__name__}.{ThreadedServer.shutdown.__name__}\"\n\n print(f\"{prefix}: Instructing the server to shut down...\", file=self.stdout)\n with self._server_exception_lock:\n if self._server_exception is not None:\n raise self._server_exception\n\n print(f\"{prefix}: Waiting for server to shut down...\", file=self.stdout)\n self._httpd.shutdown()", "def shutdown(self):\n self.logger.info(\"Received graceful shutdown request\")\n self.stop()", "def _HandleShutdown(self):\n self.send_response(httplib.OK)\n self.send_header('Content-Type', 'text/plain')\n self.end_headers()\n self.wfile.write('API Server Quitting')\n self.server.shutdown()", "def Quit(self):\n t = threading.Thread(target=self.server.shutdown)\n t.start()", "def shutdown(self):\n self.broadcast(self.server_socket, '[server shutdown]', 'server')\n self.selector.unregister(self.server_socket)\n self.server_socket.close()", "def shutdown(self):\n self.req_shutdown = True", "def shutdown():\n\n cmd = dict()\n cmd[\"type_\"] = \"shutdown\"\n cmd[\"name_\"] = \"all\"\n\n ## In case of the shutdown there will be no returned message to\n ## check the success.\n s = comm.send_and_receive_socket(cmd)\n\n s.close()", "def shutdown(self, signum, frame):\n self.serverSocket.close()\n sys.exit(0)", "def shutdown():\n shutdown_server()\n return \"Shutting down server\"", "def stop(self):\n self.logger.info('Shutting down SimpleHTTPServer')\n stop_cmd = \"pkill -9 -f '{0}'\".format(self.server_cmd)\n self._execute_command(stop_cmd)", "def shutdown_server():\n func = flask.request.environ.get('werkzeug.server.shutdown')\n if func is None:\n raise RuntimeError('Not running with the Werkzeug Server')\n func()", "async def shutdown(self):\n\n if self.log_output:\n logging.info('Shutting down ...')\n else:\n print('Shutting down ...')\n\n await self.send_reset()\n\n try:\n self.loop.stop()\n except:\n pass\n try:\n self.loop.close()\n except:\n pass\n sys.exit(0)", "def shutdown(self):\n self._shutdown_requested_event.set()\n SimpleJSONRPCServer.SimpleJSONRPCServer.shutdown(self)\n logging.info('Server shutdown complete')", "def shutdown():\n func = flask.request.environ.get('werkzeug.server.shutdown')\n if func is None:\n raise RuntimeError('Not running with the Werkzeug Server')\n func()\n return 'Server shutting down...'", "def rpc_shutdown(self):\n\t\tshutdown_thread = threading.Thread(target=self.server.shutdown)\n\t\tshutdown_thread.start()\n\t\treturn", "def shutdown(self):\n self.exit_app()", "async def kill_server(self):\n if await self._kill():\n await self.send('Server killed')", "def shutdown(self):\n # First call superclass shutdown()\n HTTPServer.shutdown(self)\n\n # We also need to manually close the socket\n self.socket.close()", "def stop() -> None:\n global _server\n if _server:\n try:\n _server.shutdown()\n except Exception:\n pass", "def shutdown(self):\n # shutdown all known sessions\n for session in self.sessions.values():\n session.shutdown()\n\n # if we are a daemon remove pid file\n if self.config[\"daemonize\"]:\n pid_file = self.config[\"pidfile\"]\n try:\n os.unlink(pid_file)\n except OSError:\n logger.exception(\"error daemon pid file: %s\", pid_file)\n\n # remove server from server list\n CoreServer.remove_server(self)", "def shutdown(self):\r\n # First call superclass shutdown()\r\n HTTPServer.shutdown(self)\r\n\r\n # We also need to manually close the socket\r\n self.socket.close()", "def shutdown_server(self):\n try:\n ans = self.xmlproxy.shutdown()\n except socket_error as err:\n self.class_logger.info(\"xmlrpc shutdown complete. (DEBUG: {0})\".format(err))\n except XmlrpcProtocolError as err:\n self.class_logger.info(\"xmlrpc shutdown complete. (DEBUG: {0})\".format(err))\n except Exception as err:\n self.class_logger.info(\"xmlrpc shutdown expected error: {0} - {1}\".format(type(err), err))\n else:\n self.class_logger.info(\"xmlrpc shutdown query answer: %s\" % (ans, ))\n # except socket.error, err:\n # if err[0] == 111:\n # print \"!\"*100\n # print \"ERR '{0}' handled\".format(err)\n # else:\n # raise", "def _shut_down_wsgi_server():\n print 'Stopping %s %s' % (server_class.__module__, server_name)\n\n if wsgi_server:\n wsgi_server.stop()", "def start_shutdown(self):\n self._socket.sendall(process_manager.ProcessManager.SHUTDOWN)", "def shutdown():\n os.system(\"sudo shutdown now\")", "async def quit(self):\n await self.kill_server()\n await self.logout()", "def shutdown():\n query = {\n \"type\": \"op\",\n \"cmd\": \"<request><shutdown><system></system></shutdown></request>\",\n }\n\n return __proxy__[\"panos.call\"](query)", "def shutdown(self):\n self.action('shutdown')", "def shutdown():\n\n # Earlier versions of traffic_ctl do not support\n # \"server stop\", so we prefer traffic_line here.\n if _TRAFFICLINE:\n cmd = _traffic_line(\"-S\")\n else:\n cmd = _traffic_ctl(\"server\", \"stop\")\n\n _subprocess(cmd)\n return _statuscmd()", "def shutdown(self):\n self.sock.close()", "async def shutdown_gracefully(self) -> None:", "async def shutdown_gracefully(self) -> None:", "def stop(self):\n shutdown_url = self._env[\"DATASTORE_HOST\"] + \"/shutdown\"\n req = urllib.request.Request(shutdown_url, method=\"POST\")\n urllib.request.urlopen(req)", "def close(self):\n self._server.shutdown()\n self._server = None", "def shutdown(self):\n TCPServer.shutdown(self)\n self.server_close()\n self.ae._servers.remove(self)", "def shutdown(self):\n self.socket_thread.stop()", "def shutdown(self) -> None:\n if self.is_alive():\n self.terminate()\n else:\n logger.warning(\"DHT shutdown has no effect: dht process is already not alive\")", "def on_exit(self, event):\n # Close server\n if hasattr(self, 'webapp'):\n requests.get(ROOT_URL + '/shutdown')\n self.webapp = None\n\n # Close app\n sys.exit()", "def stop(self):\n LOG.info(_(\"Stopping WSGI server.\"))\n\n if self._server is not None:\n # Resize pool to stop new requests from being processed\n self._pool.resize(0)\n self._server.kill()", "def shutdown(self):\n self.thread.server.shutdown()\n self.thread.join()", "def shutdown(self):\n debug_print('Stopping JSONRPCTCPServer thread...')\n self.__rpc_server.shutdown()", "def shutdown(self, signum, frame):\n self.log('WARNING', -1, 'Shutting down normally ...')\n main_thread = threading.current_thread()\n\n for t in threading.enumerate():\n if t is main_thread:\n continue\n t.join()\n self.server_socket.close()\n sys.exit(0)", "def shutdown(self) -> None:", "def shutdown(self) -> None:", "def terminate(self):\n self.send_signal(signal.SIGTERM)", "async def shutdown(self) -> int:", "def shutdown(self):\n\n pass", "def _shutdown(self):\n self.control_socket.send(zmqmessage.IPC_END)\n self.end_threads = True\n self.timeout = 1", "def shutdown(self):\n self._shutdown(None, None)\n self._running = False", "def shutdown(self):\n self._state = State.SHUTDOWN\n\n self._send_fin()\n self._cancel_ack_timeout()\n self._attempt_disabling_looping_send(force=True)\n self._attempt_disabling_looping_receive()\n self._clear_sending_window()\n\n self.handler.handle_shutdown()", "def stop_server(request):\n def stop_callback():\n global process\n process.terminate()\n request.addfinalizer(stop_callback)", "def kill(self):\r\n\r\n endpoint = self._get_nailgun_endpoint()\r\n if endpoint:\r\n self._log_kill(endpoint.pid, endpoint.port)\r\n try:\r\n os.kill(endpoint.pid, 9)\r\n except OSError:\r\n pass", "def request_shutdown(self, kernel_id, restart=False):", "def webserver_stop():\n run(\"kill $(cat %s)\" % GUNICORN_PIDFILE)\n run(\"rm %s\" % GUNICORN_PIDFILE)", "def sigint_handler(sig, frame):\n print(\"[i] Caught SIGINT, cleaning up...\")\n server.close()\n exit(0)", "def shutdown(self):\t\r\n\t\tself.is_running = False\r\n\t\tfor connection in self.established_connection_list:\r\n\t\t\tconnection.send('The server has been shutdown adruptly by the server owner.\\n')\r\n\t\t\tconnection.socket_send()", "def stop(self) -> None:\n # Call the server shutdown functions and wait for them to finish. These\n # must be called on the server thread's event loop.\n future = asyncio.run_coroutine_threadsafe(self._stop(), self._server_loop)\n future.result(5)\n\n # Stop the server thread's event loop\n self._server_loop.call_soon_threadsafe(self._server_loop.stop)", "def terminate(self):\n self._stop_proc(signal.SIGTERM)", "def shutdown(self) -> None:\n pass", "def shutdown(self):\n pass", "def shutdown(self):\n pass", "def shutdown(self):\n pass", "def shutdown(self):\n self.shutdown_requested = True", "def stop():\n global server_handle\n server_handle.kill()\n server_handle = None", "def terminate(self):\n if self.proc:\n logging.info(\"Terminating Proxy Server...\")\n self.proc.terminate()\n self.proc = None", "def request_shutdown(self, restart=False):", "def kill(self):\n if self.server:\n try:\n self.server.exit = True\n self.server.p.kill()\n self.server.p.wait()\n except OSError:\n pass\n self.dead = True", "def sighandler(self, signum, frame):\n print('Shutting down server...')\n\n # Close existing client sockets\n for output in self.outputs:\n output.close()\n\n self.server.close()", "def force_stop(self):\n\n # Stopping thread\n self.quit()\n\n # Killing all running processes\n ProcessManager(self.cf_process).close_all_child()\n ProcessManager(self.server_process).close_all_child()", "def shutdown(self):\n self._msg_disp.abort()\n self._conn_mgr.shutdown_connections()", "def exit(self):\n self.tcp_server_exit_event.set()\n for _, process in self.name_to_process.items():\n process.terminate()", "def kill(self):\n self._stop_proc(signal.SIGKILL)", "def shutdown():\n return subprocess.run([\"powershell.exe\", \"-Command\", \"Stop-Computer\", \"-ComputerName\", \"localhost\"], shell=True, universal_newlines=True, check=False).returncode", "def Shutdown(self):\n self.conn.send(False)\n self.process.join()", "def stop():\n\n tidyUp()\n shutdown_server()\n return \"Stopping server\"", "def stop():\n\n tidyUp()\n shutdown_server()\n return \"Stopping server\"", "def shutdown_imagenode(self):\n multiprocessing.Process(daemon=True,\n args=((self.pid,)),\n target=self.shutdown_process_by_pid).start()\n sys.exit()", "def shutdown(self):\r\n self._update('shutdown')\r\n self.supervisord.options.mood = SupervisorStates.SHUTDOWN\r\n return True", "def shutdown(self):\n try:\n if self.working and self.exiting.acquire():\n self.bot('shutting down...')\n self.working = False\n self._handleEvent(self.getEvent('EVT_STOP'))\n if self._cron:\n self._cron.stop()\n self.bot('shutting down database connections...')\n self.storage.shutdown()\n except Exception, e:\n self.error(e)", "def shutdown(self):\n ...", "def shutdown(self):\n self.connected = False\n self.protocol.send_message(self.sock, '__!shutdown__')\n data = self.protocol.recover_message(self.sock)\n self.sock.close()\n self.sock = None", "def shutdown(self):\n\t\tCORE.info('The session is shutting down. Sending UMC modules an EXIT request (%d processes)' % len(self.__processes))\n\t\tfor module_name, process in self.__processes.items():\n\t\t\tCORE.info('Ask module %s to shutdown gracefully' % (module_name,))\n\t\t\treq = Request('EXIT', arguments=[module_name, 'internal'])\n\t\t\tprocess.request(req)", "def shutdown(signum, frame): # pragma: no cover\n logging.info(\"Shutting down\")\n sys.exit(0)", "def shutdown(self):\n\n if not self.proc:\n return\n try:\n if self.proc.poll() is None:\n kill(self.proc)\n for attempt in range(5):\n if self.proc.poll() is not None:\n return\n LOG.info('Waiting %dth for PID %d to exit...'\n % (5 - attempt, self.proc.pid))\n time.sleep(1)\n kill(self.proc, signal.SIGKILL)\n self.proc.wait()\n except:\n LOG.exception('ignoring uncaught exception while shutting down')", "def kill(self):\n if self.transport.pid is not None:\n self.transport.signalProcess('KILL')", "def do_exit(self, args):\n logger.debug(\"do_exit() was called.\")\n \n raise Exception(\"Shutting server down.\")", "def shutdown(self, *args):\r\n return self._fd.shutdown(*args)", "def shutdown(self):\n\n raise NotImplementedError", "def shutdown (self, sig=None):\n pass\n #TODO: implement more realistic closing semantics", "def sigint_handler(*dummy):\n print \"Received SIGINT. Stopping everything.\"\n executor.Stop()\n server.Stop()" ]
[ "0.8043204", "0.7408491", "0.74051774", "0.73047185", "0.7299", "0.7294056", "0.72432417", "0.7183416", "0.7163862", "0.7163367", "0.7136405", "0.71052444", "0.70983547", "0.70959175", "0.70620507", "0.70323277", "0.70266837", "0.7002895", "0.6961565", "0.696054", "0.69446033", "0.69380605", "0.6931379", "0.68896073", "0.68697613", "0.686606", "0.6856233", "0.6852677", "0.6819714", "0.68126875", "0.6807071", "0.67933303", "0.6784231", "0.6783966", "0.6705279", "0.6702388", "0.6695432", "0.6691311", "0.668685", "0.66676617", "0.6658877", "0.6658877", "0.66436565", "0.66262937", "0.6605676", "0.659731", "0.6593623", "0.6574441", "0.6573272", "0.65687007", "0.65591687", "0.6540617", "0.65366423", "0.65366423", "0.6528906", "0.6528362", "0.6527647", "0.6524645", "0.6502932", "0.65028715", "0.6493768", "0.64910144", "0.6490553", "0.6483961", "0.64826953", "0.64794254", "0.6474942", "0.6473323", "0.6460823", "0.64569926", "0.64569926", "0.64569926", "0.6454476", "0.6447454", "0.6426823", "0.6422582", "0.6406753", "0.63992065", "0.63954586", "0.6393237", "0.6364188", "0.63627183", "0.6360996", "0.6353049", "0.6351411", "0.6351411", "0.633649", "0.6333527", "0.63242316", "0.63233626", "0.6312151", "0.6296625", "0.629613", "0.62886506", "0.62819475", "0.6272243", "0.6269025", "0.6263719", "0.6263698", "0.6262364" ]
0.81470776
0
Update the kernelspecs table.
def refresh_kernelspecs() -> None: ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def refresh_kernels() -> None:\n ...", "def modify_devices(self):\n\n for i in self._nodes.items():\n node = i[1]\n devices = node[\"devices\"]\n other_devices = devices[\"other_devices\"]\n kernel_devices = devices[\"kernel_devices\"]\n dpdk_devices = devices[\"dpdk_devices\"]\n\n if other_devices:\n self._modify_other_devices(\n node, other_devices, kernel_devices, dpdk_devices\n )\n\n # Get the devices again for this node\n self._get_device(node)\n devices = node[\"devices\"]\n kernel_devices = devices[\"kernel_devices\"]\n dpdk_devices = devices[\"dpdk_devices\"]\n\n klen = len(kernel_devices)\n if klen > 0:\n print(\"\\nThese devices are safe to be used with VPP.\\n\")\n VppPCIUtil.show_vpp_devices(kernel_devices)\n question = (\n \"\\nWould you like to use any of these \" \"device(s) for VPP [y/N]? \"\n )\n answer = self._ask_user_yn(question, \"n\")\n if answer == \"y\":\n vppd = {}\n for dit in kernel_devices.items():\n dvid = dit[0]\n device = dit[1]\n question = \"Would you like to use device {} \".format(dvid)\n question += \"for VPP [y/N]? \"\n answer = self._ask_user_yn(question, \"n\")\n if answer == \"y\":\n vppd[dvid] = device\n for dit in vppd.items():\n dvid = dit[0]\n device = dit[1]\n if (\n \"unused\" in device\n and len(device[\"unused\"]) != 0\n and device[\"unused\"][0] != \"\"\n ):\n driver = device[\"unused\"][0]\n question = \"Would you like to bind the driver {} for {} [y/N]? \".format(\n driver, dvid\n )\n answer = self._ask_user_yn(question, \"n\")\n if answer == \"y\":\n logging.debug(\n \"Binding device {} to driver {}\".format(\n dvid, driver\n )\n )\n ret = VppPCIUtil.bind_vpp_device(node, driver, dvid)\n if ret:\n logging.debug(\n \"Could not bind device {}\".format(dvid)\n )\n dpdk_devices[dvid] = device\n del kernel_devices[dvid]\n\n dlen = len(dpdk_devices)\n if dlen > 0:\n print(\"\\nThese device(s) are already using DPDK.\\n\")\n VppPCIUtil.show_vpp_devices(dpdk_devices, show_interfaces=False)\n question = \"\\nWould you like to remove any of \"\n question += \"these device(s) [y/N]? \"\n answer = self._ask_user_yn(question, \"n\")\n if answer == \"y\":\n vppdl = {}\n for dit in dpdk_devices.items():\n dvid = dit[0]\n device = dit[1]\n question = \"Would you like to remove {} [y/N]? \".format(dvid)\n answer = self._ask_user_yn(question, \"n\")\n if answer == \"y\":\n vppdl[dvid] = device\n for dit in vppdl.items():\n dvid = dit[0]\n device = dit[1]\n if (\n \"unused\" in device\n and len(device[\"unused\"]) != 0\n and device[\"unused\"][0] != \"\"\n ):\n driver = device[\"unused\"][0]\n logging.debug(\n \"Binding device {} to driver {}\".format(dvid, driver)\n )\n ret = VppPCIUtil.bind_vpp_device(node, driver, dvid)\n if ret:\n logging.debug(\"Could not bind device {}\".format(dvid))\n else:\n kernel_devices[dvid] = device\n del dpdk_devices[dvid]\n\n interfaces = {}\n for dit in dpdk_devices.items():\n dvid = dit[0]\n device = dit[1]\n VppPCIUtil.vpp_create_interface(interfaces, dvid, device)\n node[\"interfaces\"] = interfaces\n\n self._update_auto_config()\n self.updateconfig()", "def update_system_versions(self):\n #system_versions = [SystemVersion(id=-1 ,type=u'QX100',desc=u'Unknown Hardware version'),\n # SystemVersion(id=0 ,type=u'QX100',desc=u'QX100 - HW Rev A/B'),\n system_versions = [SystemVersion(id=1 ,type=u'QX100', desc=u'QX100 - HW Rev A/B bigger detector cap differences'),\n SystemVersion(id=2 ,type=u'QX100', desc=u'QX100 - HW Rev C'),\n SystemVersion(id=3 ,type=u'QX150', desc=u'QX150 - HW Rev Z Upgrade'),\n SystemVersion(id=4 ,type=u'QX200', desc=u'QX200 - HW Rev Z'),\n SystemVersion(id=5 ,type=u'QX201', desc=u'QX200 - HW with BR built Detector'),\n\t\t\t SystemVersion(id=6 ,type=u'QX150L', desc=u'QX150 - HW Rev Z Upgrade with LED'),\n SystemVersion(id=7 ,type=u'QX201L', desc=u'QX201 - HW with BR built LED Detector'),\n SystemVersion(id=200,type=u'QX200', desc=u'QX200 - Pre-Beta HW')]\n for sv in system_versions:\n dbsv = Session.query(SystemVersion).filter_by(id=sv.id).first()\n if not dbsv:\n Session.add(sv)\n else:\n if (dbsv.type != sv.type):\n dbsv.type = sv.type\n if( dbsv.desc != sv.desc):\n dbsv.desc = sv.desc\n\n Session.commit()", "def test_update_software_components_for_system_module(self):\n pass", "def update_device(self, dev_dict):\n # Note(jprabh1x): added bus,slot,function into fields dict as \n # seperate fields.\n no_changes = ('status', 'instance_uuid', 'id', 'extra_info', 'workload')\n map(lambda x: dev_dict.pop(x, None),\n [key for key in no_changes])\n\n # Note(jprabh1x): populating values for bus,slot,function from address in dev_dict.\n if dev_dict.has_key(\"address\"):\n \t\taddress = pci_utils.parse_address(dev_dict[\"address\"])\n \t\tdev_dict.update({'bus':str(address[1]), 'slot':str(address[2]), 'function':str(address[3])})\n for k, v in dev_dict.items():\n if k in self.fields.keys():\n self[k] = v\n else:\n extra_info = self.extra_info\n extra_info.update({k: str(v)})\n self.extra_info = extra_info", "def test_list_drives_drive_firmware_update(self):\n pass", "def command_update_hw(self, cmd):\n # TODO\n pass", "def test_update_pci_device(self):\n pass", "def test_update_software_component_for_system_module(self):\n pass", "def update_many(self, isystem_uuid, patch):\n\n if self._from_isystems and not isystem_uuid:\n raise exception.InvalidParameterValue(_(\n \"System id not specified.\"))\n\n # Validate if there are pending updates on the controllers lvg\n controller_hosts = pecan.request.dbapi.ihost_get_by_personality(\n constants.CONTROLLER\n )\n\n controllers_lvg_updated = True\n for host in controller_hosts:\n host_fs_list = pecan.request.dbapi.host_fs_get_by_ihost(host.uuid)\n host_lvg_list = pecan.request.dbapi.ilvg_get_by_ihost(host.uuid)\n controllers_lvg_updated = controllers_lvg_updated and \\\n utils.is_host_lvg_updated(host_fs_list, host_lvg_list)\n\n # Validate input filesystem names\n controller_fs_list = pecan.request.dbapi.controller_fs_get_list()\n valid_fs_list = []\n if controller_fs_list:\n valid_fs_list = {fs.name: fs.size for fs in controller_fs_list}\n\n reinstall_required = False\n reboot_required = False\n modified_fs = []\n update_fs_list = []\n for p_list in patch:\n p_obj_list = jsonpatch.JsonPatch(p_list)\n for p_obj in p_obj_list:\n if p_obj['path'] == '/name':\n fs_name = p_obj['value']\n if fs_name in update_fs_list:\n msg = _(\"Duplicate fs_name \"\n \"'%s' in parameter list\" % fs_name)\n raise wsme.exc.ClientSideError(msg)\n else:\n update_fs_list.append(fs_name)\n elif p_obj['path'] == '/size':\n size = p_obj['value']\n\n if fs_name not in valid_fs_list.keys():\n msg = _(\"ControllerFs update failed: invalid filesystem \"\n \"'%s' \" % fs_name)\n raise wsme.exc.ClientSideError(msg)\n elif not cutils.is_int_like(size):\n msg = _(\"ControllerFs update failed: filesystem '%s' \"\n \"size must be an integer \" % fs_name)\n raise wsme.exc.ClientSideError(msg)\n elif int(size) <= int(valid_fs_list[fs_name]):\n msg = _(\"ControllerFs update failed: size for filesystem '%s' \"\n \"should be bigger than %s \" % (fs_name, valid_fs_list[fs_name]))\n raise wsme.exc.ClientSideError(msg)\n elif not controllers_lvg_updated:\n msg = _(\"ControllerFs update failed: controllers have pending LVG \"\n \"updates, please retry again later.\")\n raise wsme.exc.ClientSideError(msg)\n\n if fs_name in constants.SUPPORTED_REPLICATED_FILEYSTEM_LIST:\n if utils.is_drbd_fs_resizing():\n raise wsme.exc.ClientSideError(\n _(\"A drbd sync operation is currently in progress. \"\n \"Retry again later.\")\n )\n\n modified_fs += [fs_name]\n\n controller_fs_list_new = []\n for fs in controller_fs_list:\n replaced = False\n for p_list in patch:\n p_obj_list = jsonpatch.JsonPatch(p_list)\n for p_obj in p_obj_list:\n if p_obj['value'] == fs['name']:\n try:\n controller_fs_list_new += [ControllerFs(\n **jsonpatch.apply_patch(fs.as_dict(), p_obj_list))]\n replaced = True\n break\n except utils.JSONPATCH_EXCEPTIONS as e:\n raise exception.PatchError(patch=p_list, reason=e)\n if replaced:\n break\n if not replaced:\n controller_fs_list_new += [fs]\n\n cgtsvg_growth_gib = _check_controller_multi_fs_data(\n pecan.request.context,\n controller_fs_list_new)\n\n if _check_controller_state():\n _check_controller_multi_fs(controller_fs_list_new,\n cgtsvg_growth_gib=cgtsvg_growth_gib)\n for fs in controller_fs_list_new:\n if fs.name in modified_fs:\n value = {'size': fs.size}\n if fs.replicated:\n value.update({'state': constants.CONTROLLER_FS_RESIZING_IN_PROGRESS})\n pecan.request.dbapi.controller_fs_update(fs.uuid, value)\n\n try:\n # perform rpc to conductor to perform config apply\n pecan.request.rpcapi.update_storage_config(\n pecan.request.context,\n update_storage=False,\n reinstall_required=reinstall_required,\n reboot_required=reboot_required,\n filesystem_list=modified_fs\n )\n\n except Exception as e:\n msg = _(\"Failed to update filesystem size \")\n LOG.error(\"%s with patch %s with exception %s\" % (msg, patch, e))\n raise wsme.exc.ClientSideError(msg)", "def test_update_device(self):\n pass", "def test_update_device(self):\n pass", "def update(self):\n for component in self.components.values():\n try:\n component.update()\n except Exception as e:\n if self.ds.isFMSAttached():\n log.error(\"In subsystem %s: %s\" % (component, e))\n else:\n raise e", "def _update_device_types(self):\n device_types = self.adapter.device_types()\n for device_type in device_types.items:\n key = device_type.id\n self._make_up_to_date('/device_types', key, device_type)", "def test_update_software_configuration_for_system_module(self):\n pass", "def test_update_hyperflex_server_firmware_version(self):\n pass", "def test_patch_hyperflex_server_firmware_version(self):\n pass", "def update_firmware(self) -> str:", "def test_patch_pci_device(self):\n pass", "def update_firmware(self):\n self.execute_command(CMD_UPDATE_FIRMWARE)", "def test_update_hyperflex_capability_info(self):\n pass", "def visit_table(self, sytable):\n self.current.update(sytable)", "def visit_table(self, sytable):\n self.current.update(sytable)", "def update(self):\n self.device = self._api.device_query(self._hardware_address, {})", "def test_update_bios_unit(self):\n pass", "def update(self):\n self.platform_list.update()\n self.enemy_list.update()", "def test_update_bios_boot_mode(self):\n pass", "async def _async_udev_events(self, kernel: pyudev.Device):\n # Update device List\n if not kernel.device_node or self.sys_hardware.helper.hide_virtual_device(\n kernel\n ):\n return\n\n hw_action: HardwareAction | None = None\n device: Device | None = None\n\n ##\n # Remove\n if kernel.action == UdevKernelAction.REMOVE:\n try:\n device = self.sys_hardware.get_by_path(Path(kernel.sys_path))\n except HardwareNotFound:\n return\n else:\n self.sys_hardware.delete_device(device)\n hw_action = HardwareAction.REMOVE\n\n ##\n # Add\n if kernel.action in (UdevKernelAction.ADD, UdevKernelAction.CHANGE):\n # We get pure Kernel events only inside container.\n # But udev itself need also time to initialize the device\n # before we can use it correctly\n udev = None\n for _ in range(3):\n await asyncio.sleep(2)\n try:\n udev = pyudev.Devices.from_sys_path(self.context, kernel.sys_path)\n except pyudev.DeviceNotFoundAtPathError:\n continue\n if udev.is_initialized:\n break\n\n # Is not ready\n if not udev:\n _LOGGER.warning(\n \"Ignore device %s / failes to initialize by udev\", kernel.sys_path\n )\n return\n\n device = Device.import_udev(udev)\n self.sys_hardware.update_device(device)\n\n # If it's a new device - process actions\n if kernel.action == UdevKernelAction.ADD:\n hw_action = HardwareAction.ADD\n\n # Ignore event for future processing\n if device is None or hw_action is None:\n return\n _LOGGER.info(\n \"Detecting %s hardware %s - %s\", hw_action, device.path, device.by_id\n )\n\n # Fire Hardware event to bus\n if hw_action == HardwareAction.ADD:\n self.sys_bus.fire_event(BusEvent.HARDWARE_NEW_DEVICE, device)\n elif hw_action == HardwareAction.REMOVE:\n self.sys_bus.fire_event(BusEvent.HARDWARE_REMOVE_DEVICE, device)", "def test_update_device_template(self):\n pass", "async def container_specs(self, event):\n await self.send(text_data=event['specs'])", "def __update_table(self):\n\n headlines = [\"\", ]\n headlines += range(1, + 1)\n headlines = [\" \"] + [str(x) for x in range(1, self.find_table_length() + 1)]\n self.__main_display_table.config(columns=headlines)\n\n for headline in headlines:\n self.__main_display_table.heading(headline, text=headline)\n self.__main_display_table.column(headline, anchor=\"center\", width=35)\n\n data = self.__display_buses_location()\n\n for i in self.__main_display_table.get_children():\n # deletes all the data in the chart\n self.__main_display_table.delete(i)\n for line in data:\n # inserts new data into the chart, goes line by line\n self.__main_display_table.insert(\"\", END, values=line)", "def update_spec(self, spec):\n if not isinstance(spec, dict):\n return spec\n\n for spec_name, spec_value in spec.iteritems():\n # Ansible returns numbers as unicode\n try:\n spec[spec_name] = int(spec_value)\n except (ValueError,TypeError):\n try:\n spec[spec_name] = float(spec_value)\n except (ValueError,TypeError):\n # Ansible returns bool as unicode\n try:\n if (spec_value == \"True\") or (spec_value == \"False\"):\n spec[spec_name] = ast.literal_eval(spec_value)\n except (ValueError,TypeError):\n pass\n\n # recursively update the values\n if spec_name == 'ManagedObjectReference':\n try:\n return self.get_container_view([getattr(vim, spec_value['type'])],\n spec_value['name'],\n limit = spec_value.get('limit', None))\n except IndexError:\n self.module.fail_json(msg = 'Failed to find %s within %s'\n %(spec_value['name'], spec_value.get('limit', 'root')))\n if isinstance(spec_value, dict):\n spec[spec_name] = self.update_spec(spec_value)\n if isinstance(spec_value, list):\n spec[spec_name] = [ self.update_spec(v) for v in spec_value ]\n if hasattr(vim, spec_name) and isinstance(spec_value, dict):\n try:\n return getattr(vim, spec_name)(**spec_value)\n except AttributeError:\n pass\n return spec", "def firmware_pack_modify(handle, org_name, name, rack_bundle_version=None,\n blade_bundle_version=None, descr=None, mode=None,\n org_parent=\"org-root\"):\n\n org_dn = org_parent + \"/org-\" + org_name\n fw_dn= org_dn + \"/fw-host-pack-\" + name\n mo = handle.query_dn(fw_dn)\n if mo is not None:\n if rack_bundle_version is not None:\n mo.rack_bundle_version = rack_bundle_version\n if blade_bundle_version is not None:\n mo.blade_bundle_version = blade_bundle_version\n if mode is not None:\n mo.mode=mode\n if descr is not None:\n mo.descr = descr\n\n handle.set_mo(mo)\n handle.commit()\n else:\n log.info(\"Firmware host pack <%s> not found.\" % name)", "def _update_volume_stats(self):\n\n LOG.debug('SPDK Updating volume stats')\n status = {'volume_backend_name': 'SPDK',\n 'vendor_name': 'Open Source',\n 'driver_version': self.VERSION,\n 'storage_protocol': constants.NVMEOF}\n pools_status = []\n self.lvs = []\n\n output = self._rpc_call('bdev_lvol_get_lvstores')\n if output:\n for lvs in output:\n pool = {}\n lvs_entry = {}\n free_size = (lvs['free_clusters']\n * lvs['cluster_size']\n / units.Gi)\n total_size = (lvs['total_data_clusters']\n * lvs['cluster_size']\n / units.Gi)\n pool[\"volume_backend_name\"] = 'SPDK'\n pool[\"vendor_name\"] = 'Open Source'\n pool[\"driver_version\"] = self.VERSION\n pool[\"storage_protocol\"] = constants.NVMEOF\n pool[\"total_capacity_gb\"] = total_size\n pool[\"free_capacity_gb\"] = free_size\n pool[\"pool_name\"] = lvs['name']\n pools_status.append(pool)\n\n lvs_entry['name'] = lvs['name']\n lvs_entry['uuid'] = lvs['uuid']\n lvs_entry['free_size'] = free_size\n lvs_entry['total_size'] = total_size\n self.lvs.append(lvs_entry)\n\n status['pools'] = pools_status\n self._stats = status\n\n for lvs in self.lvs:\n LOG.debug('SPDK lvs name: %s, total space: %s, free space: %s',\n lvs['name'],\n lvs['total_size'],\n lvs['free_size'])", "def test_partially_update_device_by_id1(self):\n pass", "def update(self):\n self.platform_list.update()\n self.exit_sprite.update()\n self.bagGroup.update()\n self.enemy_list.update()", "def callUpdateTable(self):\r\n self.updateTable()", "def update_drivers(self, driver, where=None):\n rowcount = 0\n if driver is not None:\n self.update_generic_data(driver, TABLE_NAME_DRIVERS, where)\n # done\n return rowcount", "async def on_symbol_specification_updated(self, specification: MetatraderSymbolSpecification):\n for i in range(len(self._specifications)):\n if self._specifications[i]['symbol'] == specification['symbol']:\n self._specifications[i] = specification\n break\n else:\n self._specifications.append(specification)\n self._specificationsBySymbol[specification['symbol']] = specification", "def upgrade_kernel():\n execute(\"upgrade_kernel_node\", env.host_string)", "def test_update_pci_switch(self):\n pass", "def refresh_table(self):\n self._table['bounty_column'] = Driver.instance.find_elements(*self._selectors['bounty_column'])\n self._table['first_name_column'] = Driver.instance.find_elements(*self._selectors['first_name_column'])\n self._table['last_name_column'] = Driver.instance.find_elements(*self._selectors['last_name_column'])\n self._table['edit_column'] = Driver.instance.find_elements(*self._selectors['edit_column'])\n self._table['details_column'] = Driver.instance.find_elements(*self._selectors['details_column'])\n self._table['delete_column'] = Driver.instance.find_elements(*self._selectors['delete_column'])", "def update_device_pool(arn=None, name=None, description=None, rules=None):\n pass", "def kernel_version(self, kernel_version):\n\n self._kernel_version = kernel_version", "def updateGrid(self) -> None:\n emu = self.emulator\n arch = self.root.arch\n registers = arch.registers\n self.__values.setRowCount(len(registers))\n for i, reg in enumerate(registers):\n self.__values.setRowHeight(i, self.__row_size)\n name = QTableWidgetItem(reg)\n name.setFlags(Qt.NoItemFlags)\n val = emu.get_register_value(reg) if emu.vm else 0\n old_val = self.__old_register_values.get(reg, 0)\n if type(val) in (int, int):\n value = format_address(val, arch)\n else:\n value = str(val)\n value = QTableWidgetItem( value )\n if old_val != val:\n self.__old_register_values[reg] = val\n value.setForeground(QColor(Qt.red))\n value.setFlags(Qt.ItemIsEnabled | Qt.ItemIsSelectable | Qt.ItemIsEditable)\n self.__values.setItem(i, 0, name)\n self.__values.setItem(i, 1, value)\n return", "def test_patch_bios_boot_mode(self):\n pass", "def _update_device_registry(self):\n try:\n if not self._flag_updating_deviceregistry:\n _log.debug(\"Updating device registry\")\n self._flag_updating_deviceregistry = True\n self._sync_connected_platforms()\n unreachable = []\n # Loop over the connections to the registered agent platforms.\n for k, v in self._platform_connections.items():\n _log.debug('updating for {}'.format(k))\n # Only attempt update if we have a connection to the\n # agent instance.\n if v is not None:\n try:\n devices = v.agent.vip.rpc.call(\n VOLTTRON_CENTRAL_PLATFORM,\n 'get_devices').get(timeout=30)\n\n anon_devices = defaultdict(dict)\n\n # for each device returned from the query to\n # get_devices we need to anonymize the k1 in the\n # anon_devices dictionary.\n for k1, v1 in devices.items():\n _log.debug(\n \"before anon: {}, {}\".format(k1, v1))\n # now we need to do a search/replace on the\n # self._topic_list so that the devices are\n # known as the correct itme nin the tree.\n anon_topic = self._topic_replace_map[k1]\n\n # if replaced has not already been replaced\n if not anon_topic:\n anon_topic = k1\n for sr in self._topic_replace_list:\n anon_topic = anon_topic.replace(\n sr['from'], sr['to'])\n\n self._topic_replace_map[k1] = anon_topic\n\n anon_devices[anon_topic] = v1\n\n _log.debug('Anon devices are: {}'.format(\n anon_devices))\n\n self._registry.update_devices(k, anon_devices)\n except (gevent.Timeout, Unreachable) as e:\n _log.error(\n 'Error getting devices from platform {}'\n .format(k))\n unreachable.append(k)\n for k in unreachable:\n if self._platform_connections[k]:\n self._platform_connections[k].disconnect()\n del self._platform_connections[k]\n\n finally:\n self._flag_updating_deviceregistry = False", "async def sandbox_specs(self, event):\n await self.send(text_data=event['specs'])", "def update_vluln_table():", "def _modify_other_devices(self, node, other_devices, kernel_devices, dpdk_devices):\n\n odevices_len = len(other_devices)\n if odevices_len > 0:\n print(\n \"\\nThese device(s) are currently NOT being used \" \"by VPP or the OS.\\n\"\n )\n VppPCIUtil.show_vpp_devices(other_devices, show_interfaces=False)\n question = \"\\nWould you like to give any of these devices\"\n question += \" back to the OS [Y/n]? \"\n answer = self._ask_user_yn(question, \"Y\")\n if answer == \"y\":\n vppd = {}\n for dit in other_devices.items():\n dvid = dit[0]\n device = dit[1]\n question = \"Would you like to use device {} for\".format(dvid)\n question += \" the OS [y/N]? \"\n answer = self._ask_user_yn(question, \"n\")\n if answer == \"y\":\n if (\n \"unused\" in device\n and len(device[\"unused\"]) != 0\n and device[\"unused\"][0] != \"\"\n ):\n driver = device[\"unused\"][0]\n ret = VppPCIUtil.bind_vpp_device(node, driver, dvid)\n if ret:\n logging.debug(\"Could not bind device {}\".format(dvid))\n else:\n vppd[dvid] = device\n for dit in vppd.items():\n dvid = dit[0]\n device = dit[1]\n kernel_devices[dvid] = device\n del other_devices[dvid]\n\n odevices_len = len(other_devices)\n if odevices_len > 0:\n print(\"\\nThese device(s) are still NOT being used \" \"by VPP or the OS.\\n\")\n VppPCIUtil.show_vpp_devices(other_devices, show_interfaces=False)\n question = \"\\nWould you like use any of these for VPP [y/N]? \"\n answer = self._ask_user_yn(question, \"N\")\n if answer == \"y\":\n vppd = {}\n for dit in other_devices.items():\n dvid = dit[0]\n device = dit[1]\n question = \"Would you like to use device {} \".format(dvid)\n question += \"for VPP [y/N]? \"\n answer = self._ask_user_yn(question, \"n\")\n if answer == \"y\":\n vppd[dvid] = device\n for dit in vppd.items():\n dvid = dit[0]\n device = dit[1]\n if (\n \"unused\" in device\n and len(device[\"unused\"]) != 0\n and device[\"unused\"][0] != \"\"\n ):\n driver = device[\"unused\"][0]\n logging.debug(\n \"Binding device {} to driver {}\".format(dvid, driver)\n )\n ret = VppPCIUtil.bind_vpp_device(node, driver, dvid)\n if ret:\n logging.debug(\"Could not bind device {}\".format(dvid))\n else:\n dpdk_devices[dvid] = device\n del other_devices[dvid]", "def test_partially_update_device_by_id(self):\n pass", "def test_create_drives_drive_firmware_update_item(self):\n pass", "def update_dev(self, *args):\r\n try:\r\n self.localSDK.get_version()\r\n except IOError:\r\n kT.debug_log('IO Error', sys.exc_info()[2])\r\n try:\r\n self.newProj.name = self.widgetList[3].get()\r\n except IndexError:\r\n kT.debug_log('Index Error', sys.exc_info()[2])\r\n self.newProj.setKsdkPath(self.localSDK.path)\r\n self.newProj.sdkVer = self.localSDK.version\r\n\r\n if self.advancedDevType.get():\r\n\r\n self.widgetList[34].state([\"!disabled\"])\r\n\r\n ### Widget 7 is the label for the device drop down menu\r\n self.widgetList[7].config(text='Board:')\r\n\r\n try:\r\n self.widgetList[31].config(command=lambda: self.begin_advanced_gen(self.master, None))\r\n except TclError:\r\n kT.debug_log('Tcl Error', sys.exc_info()[2])\r\n\r\n ### Widget 8 is te drop down menu for the devices\r\n self.widgetList[8].config(textvariable=self.advBrdSelect)\r\n self.widgetList[8]['values'] = self.localSDK.brdList\r\n try:\r\n self.widgetList[8].current(int(self.currBoard) - 1)\r\n except IOError: ## Catch the case where the user hasn't selected anything\r\n self.widgetList[8].current(0)\r\n except ValueError: ## Catch the case where there is no device given in manifest\r\n self.widgetList[8].current(0)\r\n else:\r\n try:\r\n self.widgetList[34].state([\"disabled\"])\r\n\r\n ### Widget 7 is the label for the device drop down menu\r\n self.widgetList[7].config(text='Device:')\r\n\r\n self.widgetList[31].config(command=lambda: self.package_select(self.master))\r\n\r\n ### Widget 8 is te drop down menu for the devices\r\n self.widgetList[8].config(textvariable=self.advDevSelect)\r\n self.widgetList[8]['values'] = self.localSDK.devList\r\n except IndexError:\r\n kT.debug_log('IndexError', sys.exc_info()[2])\r\n\r\n try:\r\n self.newProj.add_board(self.currBoard, self.localSDK.brdList)\r\n self.widgetList[8].current(self.localSDK.devList.index(self.newProj.device[0]))\r\n except IndexError:\r\n kT.debug_log('IndexError', sys.exc_info()[2])\r\n except IOError: ## Catch the case where the user hasn't selected anything\r\n try:\r\n self.widgetList[8].current(0)\r\n except IndexError:\r\n kT.debug_log('IndexError', sys.exc_info()[2])\r\n except ValueError: ## Catch the case where there is no device given in manifest\r\n try:\r\n self.widgetList[8].current(0)\r\n except IndexError:\r\n kT.debug_log('Index Error', sys.exc_info()[2])\r\n \r\n #FIXME Radka add special method for updating path \r\n self._update_project_path()", "def update(self):\n try:\n if not self._sysinfo:\n self._sysinfo = self.smartplug.sys_info\n self._mac = self.smartplug.mac\n self._model = self.smartplug.model\n if self.smartplug.context is None:\n self._alias = self.smartplug.alias\n self._device_id = self._mac\n else:\n self._alias = self._plug_from_context[\"alias\"]\n self._device_id = self.smartplug.context\n\n if self.smartplug.context is None:\n self._state = self.smartplug.state == self.smartplug.SWITCH_STATE_ON\n else:\n self._state = self._plug_from_context[\"state\"] == 1\n\n if self.smartplug.has_emeter:\n emeter_readings = self.smartplug.get_emeter_realtime()\n\n self._emeter_params[ATTR_CURRENT_POWER_W] = \"{:.2f}\".format(\n emeter_readings[\"power\"]\n )\n self._emeter_params[ATTR_TOTAL_ENERGY_KWH] = \"{:.3f}\".format(\n emeter_readings[\"total\"]\n )\n self._emeter_params[ATTR_VOLTAGE] = \"{:.1f}\".format(\n emeter_readings[\"voltage\"]\n )\n self._emeter_params[ATTR_CURRENT_A] = \"{:.2f}\".format(\n emeter_readings[\"current\"]\n )\n\n emeter_statics = self.smartplug.get_emeter_daily()\n try:\n self._emeter_params[ATTR_TODAY_ENERGY_KWH] = \"{:.3f}\".format(\n emeter_statics[int(time.strftime(\"%e\"))]\n )\n except KeyError:\n # Device returned no daily history\n pass\n\n self._available = True\n\n except (SmartDeviceException, OSError) as ex:\n if self._available:\n _LOGGER.warning(\n \"Could not read state for %s: %s\", self.smartplug.host, ex\n )\n self._available = False", "def update_host(hostname, cpu_mhz, cpu_cores, ram):\n return update_host(hostname, cpu_mhz, cpu_cores, ram)", "def _update_spec_headers(self, which_spec: str):\n # This function is meant for internal use only, so I won't check that the passed-in file paths\n # actually exist. This will have been checked already\n if which_spec == \"main\" and self.usable:\n # Currently having to use astropy's fits interface, I don't really want to because of risk of segfaults\n with fits.open(self._path, mode='update') as spec_fits:\n spec_fits[\"SPECTRUM\"].header[\"RESPFILE\"] = self._rmf\n spec_fits[\"SPECTRUM\"].header[\"ANCRFILE\"] = self._arf\n spec_fits[\"SPECTRUM\"].header[\"BACKFILE\"] = self._back_spec\n\n elif which_spec == \"back\" and self.usable:\n with fits.open(self._back_spec, mode='update') as spec_fits:\n if self._back_rmf is not None:\n spec_fits[\"SPECTRUM\"].header[\"RESPFILE\"] = self._back_rmf\n if self._back_arf is not None:\n spec_fits[\"SPECTRUM\"].header[\"ANCRFILE\"] = self._back_arf", "def update(self):\n _LOGGER.debug(\"Updating Warmup devices\")\n self._warmup.update_all_devices()", "def _update_deploy_specs(self):\n for cluster in self.CLUSTERS:\n deployspec_name = PushUtil.get_deployspec_name(cluster)\n QueueClusterConfigUpdates.update_deployspec(\n deployspec_name, cluster, self._release_name)", "def update(self, bsd):\n raise NotImplementedError()", "def test_patch_bios_unit(self):\n pass", "def testOldKernelMatchesNewKernelSpeciesList(self):\n species_list1 = self.tree1.get_species_list()\n species_list2 = self.tree2.get_species_list()\n for i, each in enumerate(species_list1):\n self.assertListEqual(list(each), list(species_list2[i]))", "def testOldKernelMatchesNewKernelSpeciesList(self):\n species_list1 = self.tree1.get_species_list()\n species_list2 = self.tree2.get_species_list()\n for i, each in enumerate(species_list1):\n self.assertListEqual(list(each), list(species_list2[i]))", "def test_update_system(self):\n pass", "def refresh(self):\n f = open(self._filepath, 'r')\n self._raw_sysfs_data = f.read()\n f.close()\n self._process_raw_data()", "def upgrade_kernel_node(*args):\n for host_string in args:\n with settings(host_string=host_string):\n dist, version, extra = get_linux_distro()\n print \"upgrading apparmor before upgrading kernel\"\n if version == '12.04':\n apt_install([\"apparmor\"])\n print \"Installing 3.13.0-34 kernel headers\"\n apt_install([\"linux-headers-3.13.0-34\"])\n apt_install([\"linux-headers-3.13.0-34-generic\"])\n print \"Upgrading the kernel to 3.13.0-34\"\n apt_install([\"linux-image-3.13.0-34-generic\"])\n elif version == '14.04':\n print \"Installing 3.13.0-40 kernel headers\"\n apt_install([\"linux-headers-3.13.0-40\",\n \"linux-headers-3.13.0-40-generic\"])\n print \"Upgrading the kernel to 3.13.0-40\"\n apt_install([\"linux-image-3.13.0-40-generic\",\n \"linux-image-extra-3.13.0-40-generic\"])", "def test_update_hyperflex_server_model(self):\n pass", "def fusion_api_edit_server_hardware(self, body, uri, api=None, headers=None):\n return self.sh.update(body, uri, api, headers)", "def _finalize_kernel_json(self, location):\n subs = self._get_substitutions(location)\n kernel_json_str = ''\n with open(os.path.join(location, KERNEL_JSON)) as f:\n for line in f:\n line = line.split('#', 1)[0]\n kernel_json_str = kernel_json_str + line\n f.close()\n post_subs = Template(kernel_json_str).safe_substitute(subs)\n kernel_json = json.loads(post_subs)\n\n # Instantiate default KernelSpec, then update with the substitutions. This allows for new fields\n # to be added that we might not yet know about.\n kernel_spec = KernelSpec().to_dict()\n kernel_spec.update(kernel_json)\n\n kernel_json_file = os.path.join(location, KERNEL_JSON)\n self.log.debug(\"Finalizing kernel json file for kernel: '{}'\".format(self.display_name))\n with open(kernel_json_file, 'w+') as f:\n json.dump(kernel_spec, f, indent=2)", "def update(self):\n current = LazyRegister(self.db)\n current.render()\n cur = self.db.cursor()\n for table in self.tables:\n if table in current.tables:\n additions, removals = current.tables[table].migrate(self.tables[table])\n for addition in additions:\n cur.execute(\"\"\"ALTER TABLE %s ADD COLUMN %s\"\"\" % (\n table, addition[1].get_sql()\n ))\n print('Added column: ', addition[0])\n for removal in removals:\n #cur.execute(\"\"\"ALTER TABLE %s DROP COLUMN %s\"\"\" % (\n # table, removal[0]\n #))\n #print('Removed column: ', removal[0])\n print('Did not removed column: ', removal[0])\n else:\n schema = self.tables[table].get_create_table_sql()\n cur.execute(schema)\n print('Added table %s' % table)", "def update(self):\n self.platform_list.update()\n #self.enemy_list.update()\n self.enemy_list.update()\n self.bullet_list.update()\n self.active_sprite_list.update()", "def test_update_hyperflex_sys_config_policy(self):\n pass", "def update_data(self):\n for sai_id_key in self.if_id_map:\n namespace, sai_id = mibs.split_sai_id_key(sai_id_key)\n if_idx = mibs.get_index_from_str(self.if_id_map[sai_id_key])\n counter_table = self.namespace_db_map[namespace].get_all(mibs.COUNTERS_DB, \\\n mibs.counter_table(sai_id))\n if counter_table is None:\n counter_table = {}\n self.if_counters[if_idx] = counter_table\n\n\n self.lag_name_if_name_map, \\\n self.if_name_lag_name_map, \\\n self.oid_lag_name_map, _, _ = Namespace.get_sync_d_from_all_namespace(mibs.init_sync_d_lag_tables, self.db_conn)\n\n self.if_range = sorted(list(self.oid_name_map.keys()) + list(self.oid_lag_name_map.keys()))\n self.if_range = [(i,) for i in self.if_range]", "def update(self):\n self.device.update()", "def update(self):\n self.device.update()", "def update_sections_table_data(self, new_data):\n\n self.update_sections_table.emit([], True)\n for entry in new_data:\n self.update_sections_table.emit(entry, False)", "def test_patch_hyperflex_capability_info(self):\n pass", "def update(self, price, dt):\n for name, feature in self._d_features.items():\n feature.update(price, dt)", "def disk_update(context, disk_id, values):\n return NotImplemented", "def _processSpec(self, spec):\n if isinstance(spec, list):\n for k in spec:\n if isinstance(k, Specifier):\n self._spec.append(k)\n else:\n raise NotAValidSpecifierError(str(type(k)))\n elif isinstance(spec, Specifier):\n self._spec.append(spec)\n else:\n # This point we need to go to the symboltable\n # and look for structs and unions.\n raise NotAValidSpecifierError(str(type(spec)))", "def refresh(self):\n # lattice\n specieList = self.rendererWindow.getCurrentInputState().specieList\n\n # store current so can try to reselect\n spec1CurrentText = str(self.spec1Combo.currentText())\n spec2CurrentText = str(self.spec2Combo.currentText())\n\n # clear and rebuild combo box\n self.spec1Combo.clear()\n self.spec2Combo.clear()\n\n self.spec1Combo.addItem(\"ALL\")\n self.spec2Combo.addItem(\"ALL\")\n\n count = 1\n match1 = False\n match2 = False\n for sym in specieList:\n self.spec1Combo.addItem(sym)\n self.spec2Combo.addItem(sym)\n\n if sym == spec1CurrentText:\n self.spec1Combo.setCurrentIndex(count)\n match1 = True\n\n if sym == spec2CurrentText:\n self.spec2Combo.setCurrentIndex(count)\n match2 = True\n\n count += 1\n\n if not match1:\n self.spec1Combo.setCurrentIndex(0)\n\n if not match2:\n self.spec2Combo.setCurrentIndex(0)", "def patch_table(cls, mount_table: ty.List[ty.Tuple[str, str]]):\n orig_table = cls._mount_table\n cls._mount_table = list(mount_table)\n try:\n yield\n finally:\n cls._mount_table = orig_table", "def test_update_device_by_id1(self):\n pass", "def update_sdcard_boot_commands(device):\n mount_dir = mkdtemp()\n\n boot_partition = device.partitions(full_paths=True)[0]\n\n mount_command = ['sudo', 'mount', boot_partition, mount_dir]\n\n print(f'Mounting SD Card partition {boot_partition} to temp directory {mount_dir}')\n interactive_console(mount_command)\n\n # Note- this sed command is what the target mounts will look like\n # I'm not messing with the blk_ids of our devices as we know them\n # here.\n\n sed_command = [\n 'sudo',\n 'sed',\n '-i',\n '-E',\n 's#root=[^ ]+#root=/dev/sda2#',\n os.path.join(mount_dir, 'cmdline.txt')]\n console(sed_command)\n sed_command = [\n 'sudo',\n 'sed',\n '-i',\n 's# init=/usr/lib/raspi-config/init_resize.sh##',\n os.path.join(mount_dir, 'cmdline.txt')]\n\n print('Modifying init command line')\n console(sed_command)\n\n print('Successfully modified! Unmounting.')\n umount_command = ['sudo', 'umount', mount_dir]\n interactive_console(umount_command)\n\n print('Cleaning up mounted dir')\n os.rmdir(mount_dir)", "def test_device_states_device_name_put(self):\n pass", "def update(self):\n ckresult(_dll.FMOD_System_Update(self._ptr))", "def patch_qemu(node):\n\n print('\\nWe are patching the node \"{}\":\\n'.format(node[\"host\"]))\n QemuUtils.build_qemu(node, force_install=True, apply_patch=True)", "def fillQuickList():\n global quickList\n cmd = \"/sbin/blkid\"\n proc = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)\n for line in proc.stdout:\n line = line.replace(':', '').strip()\n propList = line.split()\n devName = label = uuid = fsType = ''\n devName = propList[0]\n for property in propList:\n if property.startswith('UUID'):\n uuid = property.replace('UUID=', '').replace('\"', '')\n quickList[devName] = uuid", "def test_update_hyperflex_cluster(self):\n pass", "def _populate_table(self):\n self._table.setSortingEnabled(False)\n self._table.setRowCount(len(self._module_names))\n for i, module_name in enumerate(self._module_names, 0):\n self._table.setItem(i, 0, QtWidgets.QTableWidgetItem(module_name))\n self._table.resizeRowsToContents()\n self._table.setSortingEnabled(True)", "def update_sys_resource():\n\n cpu_cores = get_cpu_cores()\n logger.debug(\"starting top module\")\n cpu_usage = get_cpu_usage()\n mem_usage = get_mem_usage()\n df_usage = get_df_usage()\n logger.debug(\"round instrument data ready, next is top 5data\")\n fields = [\n 'check_time', 'cpu_usage', 'cpu_all', 'cpu_using', 'mem_usage',\n 'mem_all', 'mem_using', 'disk_usage', 'disk_all', 'disk_using',\n 'cpu_topN', 'mem_topN', 'disk_topN', 'net_in_topN', 'net_out_topN'\n ]\n # result = {}\n # result.fromkeys(field, None)\n result = {i: None for i in fields}\n result['check_time'] = int(time.time())\n result['cpu_all'] = cpu_cores\n result['cpu_usage'] = cpu_usage\n result['mem_all'], result['mem_using'] = mem_usage\n result['disk_all'], result['disk_using'] = df_usage\n try:\n result['mem_usage'] = result['mem_using'] / result['mem_all']\n except ZeroDivisionError:\n result['mem_usage'] = 0.0\n try:\n result['disk_usage'] = result['disk_using'] / result['disk_all']\n except ZeroDivisionError:\n result['disk_usage'] = 0.0\n result['cpu_topN'] = get_topN_cpu()\n net_topn_data = get_topN_netIO()\n mnd_topn_data = get_topN_mnd()\n result[\"mem_topN\"] = mnd_topn_data[\"mem.bytes.memavailable\"]\n result[\"disk_topN\"] = mnd_topn_data[\"df.bytes.used\"]\n result[\"net_in_topN\"] = net_topn_data[\"cluster.net.dev.receive\"]\n result[\"net_out_topN\"] = net_topn_data[\"cluster.net.dev.transmit\"]\n # print(result)\n send_to_db('argus-statistics', 'sys_resource', result)\n logger.debug(\"update is already success\")", "def step7(self):\n for indx, mr in enumerate(self.mrs):\n self.log.info(\"Set boot drive on controller:%d\"\n % (mr.ctrl_id))\n for vd in self.mr_vds[indx]:\n if (int(mr.cli.bootdrive_vd_get()) != vd):\n mr.cli.bootdrive_vd_set(vd_id=self.mr_vds[indx][indx],\n setting=\"On\")\n break", "def update_records(self):\n impl.get_runtime().sync()\n self.clear_frontend()\n self._traced_records = impl.get_runtime(\n ).prog.get_kernel_profiler_records()", "def test_partially_update_device_group_by_id1(self):\n pass", "def _update(self, device=None):\n self._attr_available = True\n self.schedule_update_ha_state(True)", "def update(self):\n self._device.update()", "def configure_dd_spec_list(mods, apps):\n config = DDConfig(width=0.6, height=0.6, x=0, y=0)\n\n dropdowns = [\n DDSpec('terminal',\n Keybind(mods.base, 'apostrophe'),\n [apps.term],\n config.defaults),\n DDSpec('tasks',\n Keybind(mods.app, 'Delete'),\n [apps.term, '-t', 'btm', '-e', 'btm'],\n config.modify(width=0.7, height=0.7)),\n DDSpec('mixer',\n Keybind(mods.app, 'F4'),\n [apps.term, '-t', 'PulseMixer', '-e', 'pulsemixer'],\n config.defaults),\n DDSpec('files',\n Keybind(mods.app, 'f'),\n [apps.term, '-t', 'files', '-e', 'lf'],\n config.defaults),\n DDSpec('calculator',\n Keybind(mods.app, 'minus'),\n [apps.term, '-t', 'Calculator', '-e', 'qalc'],\n config.defaults),\n # DDSpec('python shell',\n # Keybind(mods.app, 'p'),\n # [ apps.term, '-t', 'PyShell', '-e bpython' ],\n # config.defaults),\n DDSpec('signal',\n Keybind(mods.app, 's'),\n ['signal-desktop'],\n config.modify(opacity=1.0,\n height=0.8,\n width=0.4)),\n DDSpec('emacs scratch',\n Keybind(mods.app, 'e'),\n [*apps.editor, '-n', '-F', '((name . \\\"emacs-scratch\\\"))'],\n config.modify(\n match=Match(title=['emacs-scratch']),\n opacity=1.0,\n height=0.7,\n )),\n DDSpec('psensor', Keybind(mods.alternate_app, 'Delete'),\n ['psensor'],\n config.modify(\n match=Match(wm_class=['psensor', 'Psensor']),\n opacity=1.0,\n height=0.275, width=0.6,\n x=0.396, y=0.7205\n )),\n DDSpec('manpage',\n Keybind(mods.app, 'slash'),\n [apps.term],\n config.modify(\n height=0.99,\n width=0.5\n )),\n ]\n return dropdowns", "def update_system(self, system):\n try:\n rc, storage_system = self.request(\"storage-systems/%s\" % system[\"ssid\"], method=\"POST\", data=system[\"changes\"])\n except Exception as error:\n self.module.warn(\"Failed to update storage system. Array [%s]. Error [%s]\" % (system[\"ssid\"], to_native(error)))", "def __http_update_device_list(self):\n\n # Make sure we are (still) logged in\n self.__login_if_required()\n\n # Fetch all devices from Govee\n req = {\n 'key': '',\n 'transaction': self.__current_milli_time(),\n 'view': 0\n }\n res = self.__http_post(req, '/device/rest/devices/v1/list')\n\n # Response:\n \"\"\"\n {\n \"devices\": [\n {\n \"device\": \"AA:BB:CC:DD:EE:FF:11:22\",\n \"deviceExt\": {\n \"deviceSettings\": \"{\\\"wifiName\\\":\\\"MyWifi\\\",\\\"address\\\":\\\"CC:DD:EE:FF:11:22\\\",\\\"bleName\\\":\\\"ihoment_H6159_XXXX\\\",\\\"topic\\\":\\\"GD/123467890123467890123467890\\\",\\\"sku\\\":\\\"H6159\\\",\\\"device\\\":\\\"AA:BB:CC:DD:EE:FF:11:22\\\",\\\"deviceName\\\":\\\"Kitchen light\\\",\\\"versionHard\\\":\\\"1.00.01\\\",\\\"versionSoft\\\":\\\"1.02.14\\\"}\",\n \"extResources\": \"{\\\"skuUrl\\\":\\\"\\\",\\\"headOnImg\\\":\\\"\\\",\\\"headOffImg\\\":\\\"\\\",\\\"ext\\\":\\\"\\\"}\",\n \"lastDeviceData\": \"{\\\"online\\\":false}\"\n },\n \"deviceName\": \"Kitchen light\",\n \"goodsType\": 0,\n \"sku\": \"H6159\",\n \"versionHard\": \"1.00.01\",\n \"versionSoft\": \"1.02.14\"\n },\n {\n \"device\": \"A2:B2:C3:D4:E5:F6:77:88\",\n \"deviceExt\": {\n \"deviceSettings\": \"{\\\"wifiName\\\":\\\"MyWifi\\\",\\\"address\\\":\\\"C3:D4:E5:F6:77:88\\\",\\\"bleName\\\":\\\"ihoment_H6163_YYYY\\\",\\\"topic\\\":\\\"GD/123467890123467890123467890\\\",\\\"sku\\\":\\\"H6163\\\",\\\"device\\\":\\\"A2:B2:C3:D4:E5:F6:77:88\\\",\\\"deviceName\\\":\\\"Living room\\\",\\\"versionHard\\\":\\\"1.00.01\\\",\\\"versionSoft\\\":\\\"1.02.14\\\"}\",\n \"extResources\": \"{\\\"skuUrl\\\":\\\"\\\",\\\"headOnImg\\\":\\\"\\\",\\\"headOffImg\\\":\\\"\\\",\\\"ext\\\":\\\"\\\"}\",\n \"lastDeviceData\": \"{\\\"online\\\":false}\"\n },\n \"deviceName\": \"Living room\",\n \"goodsType\": 0,\n \"sku\": \"H6163\",\n \"versionHard\": \"1.00.01\",\n \"versionSoft\": \"1.02.14\"\n }\n ],\n \"message\": \"\",\n \"status\": 200\n }\n \"\"\"\n\n # Check response status\n if res['status'] != 200:\n raise GoveeException('Govee answered with device list status {}'.format(res['status'])) \n\n for raw_device in res['devices']:\n identifier = raw_device['device']\n sku = raw_device['sku']\n if not identifier or not sku:\n continue\n name = raw_device['deviceName']\n device_settings = json.loads(raw_device['deviceExt']['deviceSettings'])\n device_settings_keys = device_settings.keys()\n if not 'address' in device_settings_keys and not 'topic' in device_settings_keys:\n continue\n topic = device_settings['topic']\n\n if identifier in self.__devices.keys():\n device = self.__devices[identifier]\n device._name = name\n else:\n device_factory = self.__get_device_factory(sku)\n if not device_factory:\n continue\n last_device_data = json.loads(raw_device['deviceExt']['lastDeviceData'])\n if 'online' in last_device_data.keys():\n if last_device_data['online']:\n iot_connected = dev.IotConnectionStatus.ONLINE\n else:\n iot_connected = dev.IotConnectionStatus.OFFLINE\n elif not 'wifiName' in device_settings:\n iot_connected = dev.IotConnectionStatus.NO_IOT\n else:\n iot_connected = dev.IotConnectionStatus.UNKNOWN\n device = device_factory.build(self, identifier, topic, sku, name, iot_connected)\n if device:\n self.__devices[identifier] = device\n self.on_new_device(self, device, raw_device)", "def kernel(self, kernel):\n self._context[\"kernel\"] = kernel", "def update_knobs(self):\n self.previous_knobs = self.current_knobs\n self.current_knobs = {'Modulation' : 'fsk',\n 'Rs' : 0,\n 'EIRP' : 0,\n 'Speed' : 0}" ]
[ "0.5808753", "0.5629541", "0.5591042", "0.5496405", "0.54948765", "0.5418482", "0.52984756", "0.5270303", "0.5197712", "0.5154047", "0.5119544", "0.5119544", "0.51178193", "0.5101636", "0.50797075", "0.5073883", "0.5045194", "0.50378954", "0.50309587", "0.5014471", "0.5006939", "0.49790138", "0.49790138", "0.49558872", "0.4944775", "0.49345356", "0.4933418", "0.49157098", "0.49117544", "0.48883072", "0.487255", "0.4870881", "0.4838457", "0.48186287", "0.47928903", "0.4790472", "0.47855544", "0.4784225", "0.47728404", "0.47724557", "0.47722146", "0.47662774", "0.47515303", "0.47475302", "0.47366276", "0.47311965", "0.47084892", "0.46925533", "0.46844152", "0.46836463", "0.46796194", "0.46786395", "0.46757758", "0.4674159", "0.46594977", "0.4654707", "0.46454662", "0.46401823", "0.46396968", "0.46394607", "0.46333247", "0.46333247", "0.46300775", "0.46294063", "0.4622376", "0.46191347", "0.46182328", "0.46177244", "0.46123892", "0.46101418", "0.4605694", "0.4602598", "0.45930377", "0.45930377", "0.45910034", "0.45862654", "0.45795217", "0.45791483", "0.4579081", "0.45706734", "0.45693976", "0.45688397", "0.45688033", "0.45611158", "0.45599464", "0.45504114", "0.4550293", "0.45322782", "0.45303348", "0.45233905", "0.45231745", "0.4520297", "0.4509014", "0.45082575", "0.4500885", "0.44966468", "0.44948196", "0.4490787", "0.44905216", "0.44880718" ]
0.7550118
0
Update the status table.
def refresh_status() -> None: ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _update_status(self):\n self._db_update({'status': self.status})", "def updateStatus(self, status):\n pass", "def UpdateStatus(self, status):\r\n self.status.update(status)", "def update_status(self, id, status):\n sql = f\"UPDATE incidences SET status = \\'{status}\\'\\\n WHERE incidences.id = {id}\"\n conn = Db().con\n curr = conn.cursor()\n curr.execute(sql)\n conn.commit()", "def update_status(request_id, status):\n pass", "def update(self, new_status: Status) -> None:\n self._status = new_status", "def _update_status(self, status: dict):\n with generate_retry_session() as session:\n session.headers.update({\n 'Authorization': 'Bearer {}'.format(self.platform_auth_token)\n })\n url = '{}/training/definitions/{}/jobs/{}/status'.format(\n ORGANIZATION_ENDPOINT, self.job_definition_name, self.training_job_id)\n res = session.put(url, json=status)\n res.raise_for_status()", "def updatestatus(self):\n self.status = self.query()\n if self.status['success']:\n return True\n else:\n return False", "def callUpdateTable(self):\r\n self.updateTable()", "def set_table_status(table_id: int, status: int) -> Table:\n table = Table.query.filter_by(id=table_id).first()\n table.status = status\n\n db.session.commit()\n\n return table", "def update(self, **kwargs):\n self.status = status.parse(status.get(host=self._host, port=self._port))", "def update_status(self, id, status):\n\n records = self.db.get_table()\n index = -1\n\n for i in range(0, len(records)):\n if str(records[i][\"id\"]) == str(id):\n index = i\n\n if index == -1:\n return False\n\n records[index][\"status\"] = status\n self.db.update_cell(index, 'status', status)\n\n return records[index]", "def update_status(self) -> None:\n try:\n (rc, mid) = self.mqttc.publish(\n self.config.status_topic, json.dumps(self.status), qos=0, retain=False\n )\n if rc == mqtt.MQTT_ERR_SUCCESS:\n logging.info(\n f\"The request for a status update has been successfully accepted: mid={mid}\"\n )\n else:\n logging.warning(\"The request for a status update has been rejected\")\n except ValueError as e:\n logging.warning(f\"Cannot send status update: {e}\")", "def _set_status(self):\n result = self._get_status()\n if result and result[0]['state'] == 'aborted':\n raise Exception(\"Aborted because the status flag is set to 'aborted' in dynamodb\")\n\n # record the status\n self.status['timestamp'] = time.strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n self.db_handler.update_item({'api_version': TsV2CatalogHandler.api_version}, self.status)", "def refresh_status(self):\n\n pass", "def _status_btn_clicked(root, item):\n sql_status_update = 'UPDATE job SET Job_Status = \"Complete\" WHERE Job_ID = '+str(item[0])+';'\n print (sql_status_update)\n conn = pymysql.connect(host='localhost', user='root', password='#######', db='######')\n a = conn.cursor()\n a.execute(sql_status_update)\n conn.commit()\n a.close()\n conn.close()", "def load_status_table():", "def change_status(self, status, application_id):", "def _populate_table_status():\n [db_insert_or_get(Status, name=name) for name in app.config['STATUS_DICT'][1:]]\n db.session.commit()", "async def async_update(self):\n\n await self.status_request()", "def update_status(request=None, key=None):\n status_keys = ('global_stats', 'language_pair_stats', 'group_stats',\n 'user_stats', 'clusters')\n \n # If a key is given, we only update the requested sub status.\n if key:\n status_keys = (key,)\n \n for status_key in status_keys:\n if status_key == 'global_stats':\n STATUS_CACHE[status_key] = _compute_global_stats()\n \n elif status_key == 'language_pair_stats':\n STATUS_CACHE[status_key] = _compute_language_pair_stats()\n \n elif status_key == 'group_stats':\n STATUS_CACHE[status_key] = _compute_group_stats()\n \n elif status_key == 'user_stats':\n # Only show top 25 contributors.\n user_stats = _compute_user_stats()\n STATUS_CACHE[status_key] = user_stats[:25]\n \n if request is not None:\n return HttpResponse('Status updated successfully')", "def updatestatus(id, status):\n username = os.getlogin()\n res = requests.put('{}update/{}/'.format(base_url, id),\n data={\"keyword_fetching_status\": status, \"user_fetched\": username})\n res = res.json()\n return res", "def update(self):\n _LOGGER.debug(\"Updating status using the client AC instance...\")\n self.ac.update_status()\n _LOGGER.debug(\"Status updated using the client AC instance\")", "def update_status(status):\n global _current_line\n if _current_line is not None:\n _current_line.update(status)", "async def update_status_message(self):\n embed, components = self.get_status_embed_and_components()\n await self.client.message_edit(self.status_message, embed = embed, components = components)", "def set_status(self, scenario_id, status):\n self.cur.execute(\n \"UPDATE execute_list SET status = %s WHERE id = %s\",\n (status, scenario_id),\n )", "def _write_status(self, status, cls=MySQLStatus):", "def updateTable(self):\r\n\r\n problemHistory = save.getProblemHistory()\r\n\r\n inProgress = QColor(247, 222, 148)\r\n completed = QColor(143, 224, 162)\r\n\r\n self.recentsTable.clearContents()\r\n self.recentsTable.setRowCount(len(problemHistory))\r\n\r\n for i in range(len(problemHistory)):\r\n log = problemHistory[i].split(':')\r\n problem = QTableWidgetItem(log[0])\r\n date = QTableWidgetItem(log[2])\r\n\r\n if log[1] == 'True':\r\n problem.setBackground(completed)\r\n date.setBackground(completed)\r\n else:\r\n problem.setBackground(inProgress)\r\n date.setBackground(inProgress)\r\n\r\n self.recentsTable.setItem(i, 0, problem)\r\n self.recentsTable.setItem(i, 1, date)", "def update_status(conn, episode_info, status=\"watched_status\"):\n\tp_key = get_p_key(episode_info)\n\t\n\tstatus_update = f'UPDATE shows SET watched_status = {episode_info[status]} WHERE p_key = \"{p_key}\";'\n\t\n\texecute_sql(conn, status_update)", "def update_status(cls):\n for job in cls.query.filter(cls.finished == False):\n num_hits_left = session.query(BoxHit).filter_by(training_job_id = job.id, outstanding=True).count()\n urls_left = session.query(VideoTrainingURL).filter_by(training_job_id=job.id, processed = False)\n dynamo = DynamoIngestionStatusClient()\n num_urls_left = 0\n for url in urls_left:\n dynamo_url = dynamo.get(url.url)\n if dynamo_url is None or dynamo_url['status'] == 'Failed':\n # will never be processed, so ignore for our purposes\n url.processed = True\n else:\n num_urls_left += 1\n if num_hits_left+num_urls_left == 0:\n job.finished = True\n print '*** Job ID: %s is complete ***' % str(job.id)\n\n print '------------- Stats for Job ID: %s -------------' % str(job.id)\n print 'Total URLs : %i' % VideoTrainingURL.query.filter_by(training_job_id = job.id).count()\n print 'Total HITs : %i' % BoxHit.query.filter_by(training_job_id = job.id).count()\n if not job.finished:\n print 'unprocessed URLs: %i' % num_urls_left\n print 'outstanding HITs: %i\\n' % num_hits_left\n session.flush()", "def update_execute_list(self, status, scenario_info):\n print(\"--> Updating status in execute table on server\")\n options = \"-F, -v OFS=',' -v INPLACE_SUFFIX=.bak -i inplace\"\n # AWK parses the file line-by-line. When the entry of the first column is equal\n # to the scenario identification number, the second column is replaced by the\n # status parameter.\n program = \"'{if($1==%s) $2=\\\"%s\\\"};1'\" % (scenario_info[\"id\"], status)\n command = \"awk %s %s %s\" % (options, program, self._server_path)\n err_message = \"Failed to update %s on server\" % self._EXECUTE_LIST\n _ = self._execute_and_check_err(command, err_message)", "def UpdateStatus(self,pid):\n\t\tb1=Rents.objects.filter(paymentid_id=pid).first()\n\t\tamount=Payment.objects.filter(paymentid=pid).values('amount')\n\t\tb=b1.__dict__\n\t\tquant=b['quantity']\n\t\tbookid=b['bookid_id']\n\t\tprice=amount[0]['amount']/quant\n\t\t#price=float(\"{.2f}\".format(amount[0]['amount']))/float(\"{0:.2f}\".format(quant))\n\t\tRents.objects.filter(paymentid_id=pid).update(status='r')\n\t\tBook.objects.filter(bookid=bookid).update(quantity=F('quantity')+quant)\n\t\tStatus.objects.filter(ISBN=b['ISBN'],rentprice=price).update(quantity=F('quantity')+quant)\n\t\tUpload.objects.filter(owner_id_id=b['owner_id_id'],sellprice=price).update(qtyavailable=F('qtyavailable')+quant)\n\t\tself.notifyBuyer(b['ISBN'])", "def statusupdate(filepath):\n pass", "def update(self) -> None:\n self.previous_status = self.status\n\n jobs = self._client.describe_jobs(jobs = [ self.id ])[\"jobs\"]\n\n try:\n self.state = jobs[0]\n except IndexError:\n raise ValueError(\"Invalid or unknown job id %s\" % self.id) from None", "def update_execute_list(self, status, scenario_info):\n self.cur.execute(\n \"UPDATE execute_list SET status = %s WHERE id = %s\",\n (status, scenario_info[\"id\"]),\n )", "def update_job_status(jid, new_status):\n rd.hset(_generate_job_key(jid), 'status', new_status)", "async def async_update(self) -> None:\n await self._table.refresh()", "def updateStatus(self, newStatus = False):\n\t\theight, width = self.screen.getmaxyx()\n\t\tif newStatus:\n\t\t\tself.status = str(newStatus)\n\t\tspaces = width - len(self.status) - 2\n\t\tself.wts(height - 1, 1, self.status + ' ' * spaces , 1)\n\t\tself.screen.refresh()", "def update_status(self):\n\n # Memory information can be found in status and statm /proc/PID files\n # status file VmRSS equivalent to top's RES column\n # statm disagrees with status VmRSS, I think it may not include\n # sub-processes\n # From: man proc\n # * VmPeak: Peak virtual memory size.\n # * VmSize: Virtual memory size.\n # * VmHWM: Peak resident set size (\"high water mark\").\n # * VmRSS: Resident set size.\n\n # status_fields should be ordered as in the status file\n fields = iter(self.status_fields)\n field = next(fields)\n with open(self.status_path) as f:\n for line in f:\n if line.startswith(field):\n # separated by white-space, 2nd element is value\n # 3rd is units e.g. kB\n # At the moment all fields are ints\n self.status[field] = int(line.split()[1])\n\n try:\n field = next(fields)\n except StopIteration:\n # Just found the last field in status_fields\n break", "def update_status(request):\n return 0", "def set_status(self, status):\n # TODO log to db\n self.status = status", "def on_status_update(self, data):\n # TODO: Update User/Client object with this info\n print ('Status Update: %s' % data)", "def lock_table(self):\n\n self.status = 'Locked'", "def _update(self, data):\n self.status = data['status']\n self.progress = data['progress']", "def set_status(self, p_id, status):\n try:\n cursor = self.conn.cursor()\n command = '''\n UPDATE Player\n SET Status = ?\n WHERE P_ID = ?\n '''\n cursor.execute(command, (status, p_id))\n self.conn.commit()\n except BaseException as e:\n self.log.log_error('Fehler beim setzen des Status', e)\n raise e", "def _update_config_db_flex_counter_table(status, filename):\n with open(filename) as config_db_file:\n config_db = json.load(config_db_file)\n\n write_config_db = False\n if \"FLEX_COUNTER_TABLE\" in config_db:\n if status != \"delay\":\n for counter, counter_config in config_db[\"FLEX_COUNTER_TABLE\"].items():\n if \"FLEX_COUNTER_STATUS\" in counter_config and \\\n counter_config[\"FLEX_COUNTER_STATUS\"] is not status:\n counter_config[\"FLEX_COUNTER_STATUS\"] = status\n write_config_db = True\n\n elif status == \"delay\":\n write_config_db = True\n for key in config_db[\"FLEX_COUNTER_TABLE\"].keys():\n config_db[\"FLEX_COUNTER_TABLE\"][key].update({\"FLEX_COUNTER_DELAY_STATUS\":\"true\"})\n\n if write_config_db:\n with open(filename, 'w') as config_db_file:\n json.dump(config_db, config_db_file, indent=4)", "def update_status(self, context, status):\n plugin = self.driver.service_plugin\n plugin.update_status_by_agent(\n context, status, self.driver.service_type)", "def _update_status(self, new_status):\r\n old_status = self._status\r\n self._status = new_status\r\n for listener in self._listeners:\r\n # Calling user-defined callback.\r\n self._thread_pool.submit(\r\n listener.on_status_change(\r\n self, new_status.value, old_status.value))", "def updateAccountStatus(login:str, status:int=0)->bool:\n\n query = f\"UPDATE {Account.tablename} SET {Account.statusCol} = ? WHERE {Account.loginCol} = ?\"\n\n try:\n db = DataBaseConnection()\n db.cursor.execute(query, status, login)\n\n if status == 1: # activation\n newActivationDate = datetime.now().date()\n newExpirationDate = (newActivationDate + datetimePack.timedelta(days=155)).date() # warning : + 5 mois\n\n newActivationDate = str(newActivationDate)\n newExpirationDate = str(newExpirationDate)\n\n query = f\"UPDATE {Account.tablename} SET {Account.activationDateCol} = ?, {Account.expirationDateCol} = ? WHERE {Account.loginCol} = ?\"\n\n db.cursor.execute(query, newActivationDate, newExpirationDate, login)\n\n except Exception as error:\n return {\"flag\": \"queryError\", \"message\": f\"{error}\"}\n else:\n db.conn.commit()\n return True", "def change_status(id):\n query = \"\"\"UPDATE parcels SET status = %s WHERE id = %s\"\"\"\n tuple =('delivered' , id)\n db.insert(query, tuple)", "def reset_status(self):\n logging.debug(f\"\"\"reset_status\"\"\")\n conn = self.connect(cxRepo)\n sql = f\"\"\"update {self.schemaRepo}.tablediff set server1_status = null,\n server2_status = null where server1_status = 'running'\"\"\"\n with conn:\n with conn.cursor() as curs:\n try:\n curs.execute(sql)\n except conn.DatabaseError as exc:\n error, = exc.args\n logging.error(f\"\"\"error executing {sql} : {error}\"\"\")", "def update_record_status(self, context, payload):\n access_token = util.get_access_token(context[\"headers\"])\n record = ZohorecruitRecord(**payload)\n endpoint = f\"{record.module}/status\"\n record_data = {\n \"data\": [\n {\n \"ids\": [record.record_id],\n \"Candidate_Status\": record.status,\n \"comments\": record.comments\n }\n ],\n \"trigger\":[record.trigger]\n }\n response = util.rest(\"PUT\",endpoint,access_token,record_data)\n return json.loads(response.text)", "def backfill_notification_statuses(self):\n LIMIT = 250000\n subq = \"SELECT id FROM notification_history WHERE notification_status is NULL LIMIT {}\".format(LIMIT)\n update = \"UPDATE notification_history SET notification_status = status WHERE id in ({})\".format(subq)\n result = db.session.execute(subq).fetchall()\n\n while len(result) > 0:\n db.session.execute(update)\n print('commit {} updates at {}'.format(LIMIT, datetime.utcnow()))\n db.session.commit()\n result = db.session.execute(subq).fetchall()", "def update_status(\n self, departure, duration, state, connections, description, detail, delay\n ):\n self._departure = departure\n self._duration = duration\n self._state = state\n self._connections = connections\n self._description = description\n self._detail = detail\n self._delay = delay", "def state_update():\n if self.variables.table:\n if (\n self.variables.default_values_dict[\"settings\"][\"Table_state\"]\n and not table_indicator.text() == \"UP\"\n ):\n table_indicator.setStyleSheet(\n \"background : rgb(0,255,0); border-radius: 25px\"\n )\n table_indicator.setText(\"UP\")\n\n elif (\n not self.variables.default_values_dict[\"settings\"][\"Table_state\"]\n and not table_indicator.text() == \"DOWN\"\n ):\n table_indicator.setStyleSheet(\n \"background : rgb(255,0,0); border-radius: 25px\"\n )\n table_indicator.setText(\"DOWN\")", "def status(self, id):", "def update_status(self) -> str:\n return pulumi.get(self, \"update_status\")", "def sync_status_to_vc(status, context):\n conn = self._vc_connection\n conn.vip.health.set_status(status, context)", "def update_status_info (cls, nffg, status,\n log=logging.getLogger(\"UPDATE-STATUS\")):\n log.debug(\"Add %s status for NFs and Flowrules...\" % status)\n for nf in nffg.nfs:\n nf.status = status\n for infra in nffg.infras:\n for flowrule in infra.flowrules():\n flowrule.status = status\n return nffg", "def status(self, status: dict):\n pass", "def update_status(self, server):\r\n\r\n\t\tcards = self.ice.getCardsAlive()\r\n\t\talarm_list = self.ice.getAlarmStatus()\r\n\t\tstatus_list = self.ice.getStatus()\r\n\t\twarning_list = self.ice.getWarnings()\r\n\r\n\t\tdateTimeObj = datetime.now()\r\n\t\ttimestampStr = dateTimeObj.strftime(\"%d-%b-%Y (%H:%M:%S)\")\r\n\t\tfor i in range(len(cards)):\r\n\t\t\tjson_body = {'alarm':alarm_list[i], 'status':status_list[i], 'warning':warning_list[i], 'update':timestampStr, 'hostname':self.ip}\r\n\t\t\tserver.update(index='icepap_info', id=self.ip + '_' + str(cards[i]), body={\"doc\":json_body})", "def update_job_status(jid, new_status):\n jrd.hset(_generate_job_key(jid), 'status', new_status)", "def update_status(request):\n tasklist = request.GET.get(\"tasklist\")\n pk = request.GET.get(\"pk\")\n status = request.GET.get(\"status\")\n qs = Todo.objects.get(pk=pk)\n qs.status = status\n if status == \"Done\":\n qs.close()\n elif status == \"Undone\":\n qs.reopen()\n elif status == \"In-Progress\":\n qs.in_progress()\n qs.save()\n return redirect(\"tasks\", tasklist=tasklist)", "def update_trunk_status(self, context, trunk_id, status):\n with db_api.CONTEXT_WRITER.using(context):\n trunk = trunk_objects.Trunk.get_object(context, id=trunk_id)\n if trunk:\n self._safe_update_trunk(trunk, status=status)", "def SetStatus(self, status):\r\n self.status = status", "def update(self, status):\n\n for name, c in self.children.items():\n c.update(status.child(name))", "def patch(self, table_number):\n table = TableDetails.query.get_or_404(table_number)\n if 'table_status' in request.json:\n table.table_status = request.json['table_status']\n if 'current_session' in request.json:\n table.current_session = request.json['current_session']\n else:\n table.current_session = None\n db.session.commit()\n\n return table, 200", "def update_report_status(reportid, status):\n reportCar = Reportcar.query.get(reportid)\n reportCar.status = status\n\n if(status==\"fixed\"):\n car = reportCar.car\n car.isavailable = True\n\n db.session.commit()\n return jsonify({\"message\": \"Status changed to \" + status})", "def updateServersStatus(servers):\n session = Queries.createSession()\n try:\n for server in servers:\n ip = server[0]\n port = server[1]\n status = server[2]\n dict_status = {\"status\": unicode(status)}\n if status == 'ONLINE':\n dict_status[\"last_online\"] = strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())\n session.query(FileServer).filter_by(ip=ip, port=port).update(dict_status)\n session.commit()\n except sqlalchemy.exc.ArgumentError:\n print 'SQLAlchemy ERROR: Invalid or conflicting function argument is supplied'\n except sqlalchemy.exc.CompileError:\n print 'SQLAlchemy ERROR: Error occurs during SQL compilation'\n finally:\n session.close()", "def update(self):\n db.session.commit()", "def update(self):\n db.session.commit()", "async def change_status(self, status: str) -> int:\n data = {'status': str(status)}\n r = await self.request.request(url='https://www.roblox.com/home/updatestatus', method='POST', data=j.dumps(data))\n return r.status_code", "def set(self, key, value):\n try:\n self.status[key] = value\n log.info('updated %s to %s' %(key, value))\n ret = 0\n except KeyError as err:\n log.error('could not update %s to %s: %s' %(key, value, err))\n ret = 1\n \n return ret", "def status(self, status):\n self._status = status", "def status(self, status):\n self._status = status", "def status(self, status):\n self._status = status", "def status(self, status):\n self._status = status", "def status(self, status):\n self._status = status", "def status(self, status):\n self._status = status", "def status(self, status):\n self._status = status", "def update(self):\n self.__execute(self.pkgin_bin, \"update\")", "def edit_status(self,id,type,status):\n\n current_user = get_jwt_identity()\n try:\n con = init_db()\n cur = con.cursor()\n cur.execute(\"SELECT is_admin FROM users WHERE email = %s\",(current_user,))\n user = cur.fetchall() \n user_role = user[0][0] \n \n if user_role != True:\n return{\n \"Status\": 403,\n \"Message\":\"Unauthorized user\" \n },403 \n cur.execute(\"SELECT user_id FROM incidents WHERE \\\n incident_id = %s AND type = %s\",(id,type))\n record = cur.fetchall()\n if not record:\n return{\n \"Status\": 404,\n \"Message\": \"Record does not exist\"\n },404 \n\n user =record[0][0] \n cur.execute(\"SELECT email FROM users WHERE user_id = %s\",(user,))\n user_email = cur.fetchall()\n email = user_email[0][0]\n\n cur.execute(\"UPDATE incidents SET status = %s WHERE \\\n incident_id = %s and type = %s \\\n RETURNING incident_id,type,location,status,comment,user_id\",\n (status,id,type))\n updated_record = cur.fetchone()\n close_connection(con)\n new_record = {\n \"Created by\":updated_record[5],\n \"Incident Id\":updated_record[0],\n \"Type\":updated_record[1],\n \"Location\":updated_record[2],\n \"Status\":updated_record[3],\n \"Comment\":updated_record[4]\n }\n #send mail\n server = smtplib.SMTP('smtp.gmail.com', 587)\n server.starttls()\n server.login(\n \"projectemail2303@gmail.com\", \"Projectemail_2303\")\n msg = \"Your {} record is now {}\".format(type, status)\n server.sendmail(\n \"projectemail2303@gmail.com\", email, msg)\n server.quit() \n #update admin after status change \n return{\n \"Status\": 200,\n \"Message\":\"Updated \" + type + \" record status\",\n \"Data\": new_record\n }\n except (Exception,psycopg2.DatabaseError) as error:\n print(error)\n return{\n \"message\":\"Record has not been edited please try again\"\n }", "def updateJobsTable(self):\n self.checkJobsDict()\n jobdict = self.DB.meta.peatsa_jobs \n M = TableModel()\n #open job log from file\n f=open('jobstates.log','r')\n jl = pickle.load(f) \n for j in jobdict: \n jobid = jobdict[j] \n try:\n M.addRecord(j,state=jl[jobid]['State'],date=jl[jobid]['Date'])\n except:\n M.addRecord(j,state='Not in DB')\n self.jobstable = TableCanvas(self.tf, model=M, height=100, editable=False)\n self.jobstable.createTableFrame() \n self.log.yview('moveto', 1)\n f.close()\n return", "def update_vluln_table():", "def update_status(self, status, publish=False):\n #assert( isinstance(status, self.STATES) )\n if self.get_state() is not None:\n self.previous_states.append( self.get_state() )\n enum = self._state_enums(status)\n self.current_state = status\n self.pub_status.publish(self.STATES(status = status, text = enum.name))\n rospy.logdebug(\"Transitioned to state: {}\".format(enum))", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status" ]
[ "0.8145079", "0.7746542", "0.7465801", "0.69591504", "0.69093305", "0.6833517", "0.6802735", "0.6739377", "0.6701322", "0.6697481", "0.6673145", "0.6655026", "0.6641906", "0.6636951", "0.64802206", "0.64509493", "0.6405129", "0.64037913", "0.64026374", "0.6361408", "0.63308764", "0.632276", "0.63193786", "0.631147", "0.62840706", "0.6280456", "0.6271296", "0.6266761", "0.6266506", "0.62648124", "0.62624425", "0.62346464", "0.62129015", "0.6200155", "0.6199598", "0.6198515", "0.61909646", "0.61895597", "0.61735815", "0.6170438", "0.61649054", "0.6157093", "0.61537445", "0.6150488", "0.61468047", "0.6139052", "0.61157477", "0.61120856", "0.6102331", "0.60947144", "0.6094006", "0.609216", "0.60793966", "0.60764354", "0.6072034", "0.6067296", "0.60584927", "0.60561234", "0.6055632", "0.60447204", "0.6035396", "0.6017293", "0.59948367", "0.5988307", "0.59803915", "0.5976079", "0.59743136", "0.5971375", "0.5954582", "0.59509254", "0.59509254", "0.59470975", "0.59417564", "0.591401", "0.591401", "0.591401", "0.591401", "0.591401", "0.591401", "0.591401", "0.5896301", "0.5890872", "0.58771145", "0.58737475", "0.5865809", "0.58640885", "0.58640885", "0.58640885", "0.58640885", "0.58640885", "0.58640885", "0.58640885", "0.58640885", "0.58640885", "0.58640885", "0.58640885", "0.58640885", "0.58640885", "0.58640885", "0.58640885" ]
0.62391394
31
Creates a new terminal and returns the name.
def create_terminal() -> str: ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __repr__(self):\n return \"Terminal('{}')\".format(self.name)", "def addTerminal(self, name, **opts):\n opts.update(renamable=True, removable=True)\n name = self.nextTerminalName(name)\n term = NetTerminal(self, name, **opts)\n self.terminals[name] = term\n if term.isInput():\n self._inputs[name] = term\n elif term.isOutput():\n self._outputs[name] = term\n self.graphicsItem().updateTerminals()\n self.sigTerminalAdded.emit(self, term)\n return term", "def console_create(self):\n return self.call('console.create')", "def _spawn_turtle(self, trt_x, trt_y, name=None):\n\n\t\tif name is None or name == \"\":\n\t\t\tname = self._create_unique_turtle_name()\n\t\telif self._has_turtle(name):\n\t\t\treturn \"\"\n\n\t\tturtle = Turtle(name, Point(trt_x, trt_y))\n\t\tself._turtles[name] = turtle\n\n\t\trospy.loginfo(\"New turtle [%s] at x=[%d], y=[%d]\", name, trt_x, trt_y)\n\n\t\treturn name", "def get_custom_terminal_cmd():\n return lnp.userconfig.get_string('terminal')", "def create_namespace(self):\n print(\"\\nCreating namespace...\")\n\n name = input(\" - name (default = commands): \") or \"commands\"\n path = \"./{}\".format(name.replace(\".\", \"/\")).lower()\n\n os.makedirs(path, exist_ok=True)\n\n init_path = os.path.join(path, \"__init__.py\")\n if not os.path.isfile(init_path):\n open(init_path, 'w+').close()\n\n return name, path", "def _create_unique_turtle_name(self):\n\n\t\tself._id_counter += 1\n\t\tnew_name = \"turtle{}\".format(self._id_counter)\n\n\t\tif self._has_turtle(new_name):\n\t\t\treturn self._create_unique_turtle_name()\n\n\t\treturn new_name", "def new_session(self):\n self.command(\"new\")", "def CreateConsole(self):\n lc = launcher.TextFrame('title')\n return lc", "def create_name (self):\n return self.create_topic().create_name('Name')", "def create_device(name, device_type, runtime):\n command = 'create \"%s\" \"%s\" \"%s\"' % (\n name, device_type.identifier, runtime.identifier)\n device_id = _run_command(command)\n\n # The device ID has a new line at the end. Strip it when returning.\n return device_id[:-1]", "def generate_unique_name():\n return 'titanic-' + str(get_mac())", "def name_create(self, name):\n values = {\n 'name': name,\n }\n return self.create(values).name_get()[0]", "def create_name() -> str:\r\n user_input = str(input(\"What is your name?\\n\"))\r\n return user_input", "def terminal_name(arg_terminal):\n if not arg_terminal:\n return 'до двери'\n curs_dict = APP.conn.cursor(cursor_factory=psycopg2.extras.DictCursor)\n terminal_sql = curs_dict.mogrify(TERMINAL_SQL_TEMPL, (arg_terminal,))\n logging.info('terminal_sql=%s', terminal_sql)\n curs_dict.execute(terminal_sql)\n res = curs_dict.fetchone()\n curs_dict.close()\n return '{}, {}'.format(res.get('name', 'Название терминала по id не найдено'),\n res.get('address', 'Адрес терминала по id не найден'))", "def createname(cls):\n name = config.get(\"pyzombie_filesystem\", \"execbase\")\n name = \"{0}_{1}\".format(name, datetime.utcnow().strftime(\"%Y%jT%H%M%SZ\"))\n if os.path.isdir(Executable.execdirpath(name)):\n #Need to handle the rare case of duplicate resource names---this\n #will happen all the time in testing, but rarely in production.\n index = 0\n altname = \"{0}_{1:03}\".format(name, index)\n while os.path.isdir(Executable.execdirpath(altname)):\n index = index + 1\n altname = \"{0}_{1:03}\".format(name, index)\n name = altname\n return name", "def get_configured_terminal():\n s = lnp.userconfig.get_string('terminal_type')\n terminals = get_valid_terminals()\n for t in terminals:\n if s == t.name:\n return t\n return CustomTerminal", "def create_session(\n path: str,\n type: str,\n name: Optional[str] = None,\n kernel_name: Optional[str] = None,\n kernel_id: Optional[str] = None,\n) -> str:\n ...", "def name(self):\n if not self._name:\n prefix = self.random.choice(['Desktop'] * 4 + ['Laptop'])\n self._name = '{}-{}'.format(prefix, ''.join(\n self.random.choice(string.ascii_uppercase + string.digits) for _ in range(7)))\n return self._name", "def terminal(self):\n return self._term", "async def osname(self):\n\n await self.bot.say(box(system(), 'Bash'))", "def terminal_init(self):\n pass", "def create_table(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"create_table\")", "def create_table(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"create_table\")", "def CreateCharacter(self):\n attrs = Dictionary[str, object]()\n for k in self._attrs.keys():\n attrs[k] = self._attrs[k]\n return ClientAPI.Network.CreateCharacter(attrs)", "def parse_terminal(node, ty=None):\n if ty == \"METHOD_CALL\":\n return Terminal(\"var\", None)\n elif ty:\n return Terminal(\"var\", ty)\n else:\n return Terminal(\"var\", get_type(node))", "def terminalRenamed(self, term, oldName):\n newName = term._name\n for d in [self.terminals, self._inputs, self._outputs]:\n if oldName not in d:\n continue\n d[newName] = d[oldName]\n del d[oldName]\n\n self.graphicsItem().updateTerminals()\n self.sigTerminalRenamed.emit(term, oldName)", "def create(self) -> dict:\n\n questions = [\n Text(name=\"name\", message=\"Enter category name\"),\n ]\n\n return prompt(questions)", "def create_kernel(name: str) -> str:\n ...", "def configure_custom_terminal(new_path):\n lnp.userconfig['terminal'] = new_path\n lnp.userconfig.save_data()", "def generate_username(size=10, chars=string.ascii_lowercase + string.digits):\n suffix = gen_random_string(size, chars)\n return 'k8s-console-temp-user-' + suffix", "def create_name(length):\n if length <= 0:\n return None\n else:\n return create_random_name(length)", "def createNode(self, name):\n return Node(name)", "def ship_new(name):\n click.echo('Created ship %s' % name)", "def configure_terminal(termname):\n lnp.userconfig['terminal_type'] = termname\n lnp.userconfig.save_data()", "def createMachine():\n cd('/')\n machine = create(machineName, 'UnixMachine')\n cd('Machines/'+machineName+'/NodeManager/'+machineName)\n cmo.setName(machineName)\n cmo.setListenAddress(hostname)", "def create(args):\n print('Creates an HPC fleet with given name \"{}\"'.format(args.fleet_name))", "def new(name=None):", "def create_new_client(main: MainApplication) -> str:\n client = main.create_window(\"client\", \"IPLMS\", main.client_ui.get_layout())\n client[\"_CLIENT_ID_\"].Update(getUUID())\n client[\"_CP_NAME_IP_\"].Update(\"\")\n client[\"_CP_PHONE_IP_\"].Update(\"\")\n client[\"_CP_ADDRESS_IP_\"].Update(\"\")\n client.un_hide()\n event, values = client.read()\n client_logic = Client(main, event, values)\n name = client_logic.run(main)\n client.hide()\n return name", "def terminal(self):\n\t\treturn self._terminal", "def get_terminal():\n terminal = Gtk.ScrolledWindow()\n terminal.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.ALWAYS)\n textview = Gtk.TextView()\n textview.set_wrap_mode(Gtk.WrapMode.WORD_CHAR)\n terminal.add(textview)\n return terminal, textview.get_buffer()", "def createSymbol(self, address: ghidra.program.model.address.Address, name: unicode, makePrimary: bool) -> ghidra.program.model.symbol.Symbol:\n ...", "def getName():\n\n tcflush(sys.stdin, TCIFLUSH)\n name = input(\" You say:\\n \")\n updateNameDatabase(name)\n return name", "def spawn_terminal_emulator(pty_term, gdb_pty):\n argv = ['xterm', '-e', 'python']\n argv.extend(sys.argv)\n debug('spawning: \\'%s\\'', argv)\n pid = os.fork()\n if pid == 0:\n os.close(gdb_pty.slave_fd)\n os.close(gdb_pty.master_fd)\n try:\n os.environ['PTY_TERM'] = pty_term\n os.execvp(argv[0], argv)\n except OSError, err:\n msg = 'argv: \\'%s\\'\\n' % argv\n msg += 'Spawing \\'%s\\' failed: %s' % (argv[0], err)\n abort(msg)\n return pid", "def create_screen(self, name):\n\n State.screen = Screen(name)\n State.save(name)", "def _create_name(self) -> str:\n return self.stream.__class__.__name__", "def get_xterm(name, geom=\"80x30+0+0\"):\n if name in _terminals:\n p, (files, stdin, stdout, stderr) = _terminals[name]\n else:\n p, files = start_display_xterm(name, geom)\n stdin = open(files[0])\n stdout = open(files[1], \"w\")\n stderr = open(files[2], \"w\")\n _terminals[name] = (p, (files, stdin, stdout, stderr))\n\n return p, (files, stdin, stdout, stderr)", "def object_creator(object_name):\n obj = TemplateClass()\n print(f\"Name of object:{obj.get_object_name()}\")\n obj.set_object_name(\"NewObjectName\")\n print(f\"This is the new object name: {obj.get_object_name()}\")", "def createLabel(self, address: ghidra.program.model.address.Address, name: unicode, makePrimary: bool) -> ghidra.program.model.symbol.Symbol:\n ...", "def rename_cmd(args):\n cmd = commands.Rename([args.args[0], 'NEW'])\n return cmd", "def main():\n print_title()\n run_terminal()", "def remember_create(self, name):\n\n self.__history.append(History.new_node(name))", "def create_new(self, name):\n validate_name(name, self.__class__.__name__)\n self.data = {\n \"author_name\": \"\",\n \"author_email\": \"\",\n \"git_profile_url\": \"\",\n \"starting_version\": \"0.1.0\",\n \"default_description\": \"My project, created using nusex\",\n \"preferred_license\": \"unlicense\",\n }", "def get_printer(name, color=None, ansi_code=None, force_color=False):\n\n if force_color or supports_color():\n if color is None and ansi_code is None:\n cpre_1, csuf_1 = hash_coloured_escapes(name)\n cpre_2, csuf_2 = hash_coloured_escapes(name + \"salt\")\n name = cpre_1 + \"+\" + cpre_2 + \"+\" + csuf_1 + \" \" + name\n else:\n name = colored(name, color=color, ansi_code=ansi_code)\n\n prefix = name + \": \"\n\n def printer(text):\n print(prefix + str(text))\n\n return printer", "def func(self):\n caller = self.caller\n\n if not self.args:\n caller.msg(\"Usage: +createNPC <name>\")\n return\n if not caller.location:\n # May not create an NPC when OOC\n caller.msg(\"You must have a location to create an NPC.\")\n return\n # Make the name always start with capital letter\n name = self.args.strip().capitalize()\n # Create an NPC in caller's location\n npc = create_object(\"characters.Character\",\n key=name,\n location=caller.location,\n locks=f\"edit:id({caller.id}) and perm(Builders);call:false()\")\n # Announce\n message = \"%s created the NPC '%s'.\"\n caller.msg(message % (\"You\", name))\n caller.location.msg_contents(message % (caller.key, name),\n exclude=caller)", "def __init__(self, numero_telefono, tarifa):\n self.tarifa = tarifa\n \"\"\"\n Llamamos al constructor de la clase Terminal\n \"\"\"\n Terminal.__init__(self, numero_telefono)", "def help_create(self):\n print(CREATE)", "def create_new_text(self, *args, **kw):\n shape_id = self._create('text', args, kw)\n self.variables.shape_ids.append(shape_id)\n canvas_coords = args[0]\n self.variables.vector_objects[str(shape_id)] = VectorObject(SHAPE_TYPES.TEXT, None)\n self.variables.shape_ids.append(shape_id)\n self.set_shape_pixel_coords_from_canvas_coords(shape_id, canvas_coords)\n self.variables.current_shape_id = shape_id\n return shape_id", "def new(name, template, version):\n NewCommandExecutor().new(name, template, version)", "def create_new_board():\n\n board = Board()\n board.print_board()", "def make_window(color, title):\n w = turtle.Screen()\n w.bgcolor(color)\n w.title(title)\n return w", "def create(**cmd_opts):\n command_name = cmd_opts.get(CLI_CMDOPT.CMD_NAME, '')\n\n return CMDCONF_TYPES[command_name](**cmd_opts)", "def widget_terminal_title(\n widget: 'pygame_menu.widgets.Widget',\n widget_index: int = -1,\n current_index: int = -1\n) -> str:\n w_class_id = TerminalColors.BOLD + widget.get_class_id() + TerminalColors.ENDC\n if isinstance(widget, pygame_menu.widgets.Frame):\n w_title = TerminalColors.BRIGHT_WHITE + '┌━' + TerminalColors.ENDC\n w_title += f'{0} - {3}[{1},{2},'.format(w_class_id, *widget.get_indices(), TerminalColors.LGREEN)\n if widget.horizontal:\n w_title += 'H] '\n else:\n w_title += 'V] '\n if widget.is_scrollable:\n wsz = widget.get_inner_size()\n wsm = widget.get_max_size()\n wsh = wsm[0] if wsm[0] == wsz[0] else f'{wsm[0]}→{wsz[0]}'\n wsv = wsm[1] if wsm[1] == wsz[1] else f'{wsm[1]}→{wsz[1]}'\n w_title += f'∑ [{wsh},{wsv}] '\n w_title += TerminalColors.ENDC\n else:\n if widget.get_title() != '':\n title_f = TerminalColors.UNDERLINE + widget.get_title() + TerminalColors.ENDC\n w_title = f'{w_class_id} - {title_f} - '\n else:\n w_title = w_class_id + ' - '\n\n # Column/Row position\n w_title += TerminalColors.INDIGO\n cr = widget.get_col_row_index()\n w_title += '{' + str(cr[0]) + ',' + str(cr[1]) + '}'\n w_title += TerminalColors.ENDC\n\n # Add position\n w_title += TerminalColors.MAGENTA\n w_title += ' ({0},{1})'.format(*widget.get_position())\n w_title += TerminalColors.ENDC\n\n # Add size\n w_title += TerminalColors.BLUE\n w_title += ' ({0},{1})'.format(*widget.get_size())\n w_title += TerminalColors.ENDC\n\n # Add mods\n w_title += TerminalColors.CYAN\n if widget.is_floating():\n w_title += ' Φ'\n if not widget.is_visible():\n w_title += ' ╳'\n if not widget.is_selectable:\n w_title += ' β'\n if widget.is_selected():\n w_title += TerminalColors.BOLD + ' ⟵'\n if current_index != -1 and current_index != widget_index:\n w_title += f'! [{widget_index}->{current_index}]'\n if widget.get_menu() is None:\n w_title += ' !▲'\n w_title += TerminalColors.ENDC\n\n return w_title", "def createDisk(self , name):\n return", "def create_character(c: Character) -> Character:\n c.create_character(c.dna_generator)\n return c", "def createProfile():\n \n checkRoot()\n \n print('Creating new bluetooth profile\\n')\n \n # Enable bluetooth service if not enabled\n changeBluetoothService(enable=True)\n \n # Choose bluetooth controller\n try:\n cntMAC = confirmOption(getControllers, '***Available bluetooth controllers***', (1,))\n except Return:\n return\n \n # Select bluetooth controller\n blueSelectStdout = execCommand('bluetoothctl select {}'.format(cntMAC))\n \n # Power on bluetooth controller, choose pairing agent\n bluePoweronStdout = execCommand('bluetoothctl power on')\n blueAgentonStdout = execCommand('bluetoothctl agent on')\n blueDefagentStdout = execCommand('bluetoothctl default-agent')\n \n \n # Scan for bluetooth devices and choose one \n try:\n deviceMAC = confirmOption(getDevices, '***Available bluetooth devices***', (1,0), message='Scanning for bluetooth devices...')\n except Return:\n return\n \n # Pair device\n # TO DO: Implement pairing with pin/confirmation\n print('\\nPairing...\\n')\n pairStdout = execCommand('bluetoothctl pair {}'.format(deviceMAC))\n while not 'Pairing successful' in pairStdout:\n print(pairStdout[:-1])\n pairNextOpt = input('\\33[97m(press r for retry or q to quit): ')\n \n while pairNextOpt not in ('r', 'q'):\n pairNextOpt = input('\\33[97m(press r for retry or q to quit): ')\n \n if pairNextOpt == 'q':\n return\n \n elif pairNextOpt == 'r':\n print('\\nPairing...\\n')\n pairStdout = execCommand('bluetoothctl pair {}'.format(deviceMAC))\n \n print('Pairing successful')\n \n # Create new profile file\n print('\\n***Create name of new profile***')\n profileName = input('Profile name: ')\n \n with open('/etc/bluectl/'+profileName, 'wt') as profileFile:\n os.chmod('/etc/bluectl/'+profileName, 0o600)\n profileFile.write('Controller={}\\n'.format(cntMAC))\n profileFile.write('Device={}\\n'.format(deviceMAC))\n profileFile.write('Name={}\\n'.format(profileName))\n \n print('\\nProfile was successfully created\\n')\n \n return", "def create_filegdb(name, path):\n arcpy.CreateFileGDB_management(path, name, \"CURRENT\")", "def create(dlg):\n page = PrinterPage()\n return page", "def shell(console):\n return create_shell(\n MANAGE_DICT.get(\"shell\", {}).get(\"console\", console), MANAGE_DICT\n )", "def openTerminal():\n\n nodes = nuke.selectedNodes()\n if nodes:\n for node in nodes:\n if node.Class() in ['Read', 'Write']:\n if 'views' in node.knobs().keys():\n path = os.path.dirname(node['file'].evaluate())\n if os.path.exists(path):\n view = node['views'].value().split(' ')[0]\n command = ['gnome-terminal', '--working-directory=%s/' % path]\n print runCommand(command)\n #subprocess.Popen(['gnome-terminal', '--working-directory=%s/' % path])\n else:\n raise UserWarning(\"No such file or directory\")\n else:\n path = os.path.dirname(node['file'].evaluate())\n if os.path.exists(path):\n command = ['gnome-terminal', '--working-directory=%s/' % path]\n print runCommand(command)\n #subprocess.Popen(['gnome-terminal', '--working-directory=%s/' % path])\n else:\n raise UserWarning(\"No such file or directory\")\n else:\n raise UserWarning(\"No node to explore\")\n else:\n raise UserWarning(\"No node to explore\")", "def new_node(self, offset):\n # First we get the name of the node\n nameidx = self.string[offset:].find(b'\\0')\n name = self.string[offset: offset + nameidx]\n string_offset = offset + calc_length_word_align(nameidx + 1)\n node = FDTNode(name)\n return string_offset, node", "def create_script(game_title):\n script = '{}{}\\\"{}\\\"'.format(BashHeader, StreamString, game_title)\n print('\\nCreating a script for {}:'.format(game_title))\n print(script)\n return script", "def create_new_user():\n username = input('Vad är ditt användarID?: ')\n filename = 'user_db.json'\n \n with open(f'txt_files/{filename}', 'w') as f:\n json.dump(username, f)\n return username", "def create_Name(var_name, right_hand_side=True, line=0, column=0):\n name = ast.Name()\n name.id = var_name\n name.lineno = line\n name.col_offset = column\n\n if right_hand_side:\n name.ctx = ast.Load()\n else:\n name.ctx = ast.Store()\n\n return name", "def new_node(name):\n\n return name, []", "def creator(self) -> str:\n return pulumi.get(self, \"creator\")", "def creator(self) -> str:\n return pulumi.get(self, \"creator\")", "def newChemTorsionSysName(self, **attrlinks):\n return ChemTorsionSysName(self, **attrlinks)", "def new(cli):\n __check_in_autonotes_dir()\n\n # Filename\n filename = cli.config.new.filename\n file_date = datetime.now().strftime(\"%Y%m%d\")\n today = datetime.now().strftime(\"%b %d\")\n if not cli.config.new.no_date:\n filename += f'-{file_date}'\n filename += '.tex'\n if os.path.exists(filename):\n cli.log.error(f'File {emph(filename)} already exists.')\n exit(1)\n\n # Note title\n title = cli.config.new.title\n if title == '':\n title = f'Untitled {cli.config.new.filename}'\n\n # Open file for writing\n file = open(filename, 'w')\n cli.log.info(f'Created {emph(filename)}')\n new_note_str = f'% {filename}\\n\\n' \\\n '\\\\section{' \\\n f'{title.replace(\"_\", \" \")}' \\\n '}\\n' \\\n '\\\\marginnote{' \\\n f'{today}' \\\n '\\\\index{' \\\n f'{today}' \\\n '}}[.2cm]\\n'\n file.write(new_note_str)\n file.close()\n\n # Add the new file to master.tex\n __add([filename])", "def get_terminal_command(cmd, force_custom=False):\n log.d(\"Preparing terminal command for command line %s\", cmd)\n if not isinstance(cmd, list):\n cmd = [cmd, ]\n if sys.platform == 'darwin':\n return ['open', '-a', 'Terminal.app'] + cmd\n elif sys.platform.startswith('linux'):\n if force_custom:\n term = CustomTerminal.get_command_line()\n log.d(\"Using custom terminal: %s\", term)\n else:\n term = get_configured_terminal().get_command_line()\n log.d(\n \"Using configured terminal: %s, command line %s\", term,\n get_configured_terminal().name)\n if not term:\n raise Exception(\n 'No terminal configured! Use File > Configure Terminal.')\n if \"$\" in term:\n c = []\n for s in term:\n if s == '$':\n c += cmd\n else:\n c.append(s)\n return c\n else:\n return term + cmd\n raise Exception('No terminal launcher for platform: ' + sys.platform)", "def create_new_profile():\n client_nickname = input('Enter client profile name: ')\n client_username = input('Enter client username: ')\n client_hostname = input('Enter client hostname: ')\n client_port = '-p' + input('Enter client port: ')\n new_profile = SshUsers(client_nickname, client_username, client_hostname, client_port)\n return add_user_to_db(new_profile)", "def terminal_node(\n self,\n expr: Any = None,\n ) -> None:\n self.data.append(\n {\n \"type\": \"TERMINAL\",\n \"expr\": expr,\n \"id\": len(\n self.data,\n ),\n }\n )", "def create_label(self, name: str):\n return create_label(self.api_key, name)", "def test_create(self):\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd(\"create\")\n self.assertEqual(f.getvalue().strip(),\n \"** class name missing **\")\n\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd(\"create Manga\")\n self.assertEqual(f.getvalue().strip(),\n \"** class doesn't exist **\")\n\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd(\"create BaseModel\")\n id = f.getvalue().strip()\n self.assertTrue(type(f), str)\n self.assertEqual(len(id), 36)", "def create(name):\n\t\treturn \"CREATE DATABASE {0};\".format(name)", "def new(self):\r\n gen_name = lambda nb: self.tr(\"untitled\") + (\"%d.py\" % nb)\r\n nb = 0\r\n while osp.isfile(gen_name(nb)):\r\n nb += 1\r\n fname = gen_name(nb)\r\n self.emit(SIGNAL('redirect_stdio(bool)'), False)\r\n fname = QFileDialog.getSaveFileName(self, self.tr(\"New Python script\"),\r\n fname, self.tr(\"Python scripts\")+\" (*.py ; *.pyw)\")\r\n self.emit(SIGNAL('redirect_stdio(bool)'), True)\r\n if not fname.isEmpty():\r\n fname = unicode(fname)\r\n default = ['# -*- coding: utf-8 -*-',\r\n '\"\"\"', osp.basename(fname), '\"\"\"', '', '']\r\n text = os.linesep.join(default)\r\n encoding.write(unicode(text), fname, 'utf-8')\r\n self.load(fname)", "def __prompt_name(self):\n self.clear_screen()\n self.__print_logo()\n\n name = input(\"[!] Enter new player name and press ENTER:\\n\\n \")\n if not (2 < len(name) < 16):\n self.clear_screen()\n self.__print_logo()\n print(\"Username must be between 3 and 15 characters.\")\n input(\"Press ENTER to return to player menu.\")\n elif name in self._roster.get_roster():\n self.clear_screen()\n self.__print_logo()\n print(\"Player already exists.\")\n input(\"Press ENTER to return to player menu.\")\n else:\n return name", "def command_create(self):\n command = []\n for macro in self.my_xml.tool_data[self.shell_dict['short_name']]['pre_tmpls']:\n command.append(self.my_xml.chth_tmpl.substitute(macro=macro))\n command.extend(self.pre_chth)\n command.append(Template('@CMD_BEGIN@ $short_name').substitute(self.shell_dict))\n command.extend(self.tool_chth)\n for macro in self.my_xml.tool_data[self.shell_dict['short_name']]['post_tmpls']:\n command.append(self.my_xml.chth_tmpl.substitute(macro=macro))\n\n return '\\n'.join(command)", "def make_window(colr, ttle):\n w = turtle.Screen()\n w.bgcolor(colr)\n w.title(ttle)\n return w", "def name():\n\treturn input('Masukkan Nama : ')", "def _get_terminal_exec(self):\n\n terminal = None\n\n try:\n with open(CONFIG_FILE_PATH) as conffile:\n config = yaml.load(conffile, yaml.SafeLoader)\n terminal = config.get('terminal', None)\n except yaml.YAMLError:\n print(\"Nautiterm: invalid configuration file at {path}, falling back\" +\n \" to {d}\".format(path=CONFIG_FILE_PATH, d=DEFAULT_TERMINAL_EXEC),\n file=sys.stderr)\n except IOError as ioe:\n # catch-all for permission errors and file not founds to be compatible\n # with Python 2 which doesn't have FileNotFoundError or PermissionError\n pass\n\n if not terminal:\n terminal = DEFAULT_TERMINAL_EXEC\n\n return terminal", "def create_random_username() -> str:\n return str(uuid.uuid4())", "def newSubsystemHandler(self, event):\n\n self.add_new_tab(new_subsystem_name=event.text())", "def get_name(self):\n return \"make\"", "def newTestTxt(self):\n self.newTab( extension = TestTxt.TYPE, repoDest=UCI.REPO_UNDEFINED )", "def create(self, messages):\n output = self.create_output(messages)\n if output:\n output.name = self.name\n\n return output", "def vte_new_tab_cwd():\n env = builtins.__xonsh_env__\n t = '\\033]7;file://{}{}\\007'\n s = t.format(env.get('HOSTNAME'), env.get('PWD'))\n print(s, end='', flush=True)", "def test_create(self):\n _help = \"[Usage: create <class name>]\\n\"\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd(\"help create\")\n self.assertEqual(f.getvalue(), _help)", "def instantiateNewCmd(self):\n return QadSTRETCHCommandClass(self.plugIn)", "def create_launcher():\n data = textwrap.dedent('''\\\n #!/usr/bin/env python3\n\n \"\"\"QasteTray launcher.\"\"\"\n\n\n from qastetray.__main__ import main\n\n if __name__ == '__main__':\n main()\n ''')\n with open('build/usr/bin/qastetray', 'w') as f:\n f.write(data)" ]
[ "0.6440198", "0.6267893", "0.62144405", "0.60047346", "0.5891908", "0.5890155", "0.5865237", "0.57142264", "0.5690836", "0.5674329", "0.56701756", "0.56039375", "0.5576599", "0.5536455", "0.5528613", "0.5440345", "0.5381108", "0.5373771", "0.52840245", "0.52517766", "0.52179325", "0.51868945", "0.51438683", "0.51438683", "0.5118812", "0.51126826", "0.50778836", "0.50727916", "0.5062687", "0.50386935", "0.5027542", "0.50185555", "0.50129354", "0.49857807", "0.49829158", "0.4982182", "0.4975858", "0.49678332", "0.4958791", "0.49492577", "0.4948432", "0.49091557", "0.4900146", "0.4899951", "0.4899175", "0.48952878", "0.484088", "0.48359168", "0.48068273", "0.48014942", "0.47983855", "0.47974047", "0.47835502", "0.47801697", "0.4770722", "0.4757991", "0.47557142", "0.4753039", "0.47510827", "0.47482657", "0.473404", "0.47337312", "0.47322175", "0.47247356", "0.472266", "0.4713769", "0.47081098", "0.47023532", "0.47019017", "0.4701669", "0.4698574", "0.46938995", "0.46895495", "0.46867886", "0.4673053", "0.46676856", "0.46676856", "0.46603763", "0.46602184", "0.4650736", "0.4646617", "0.46363658", "0.46293843", "0.46175355", "0.46155825", "0.46108672", "0.4609363", "0.46056393", "0.46013087", "0.45993692", "0.45971686", "0.45962018", "0.4590384", "0.45900688", "0.45760858", "0.45758072", "0.45724896", "0.45714375", "0.4571144", "0.4569921" ]
0.80201596
0
Update the kernels table.
def refresh_kernels() -> None: ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def refresh_kernelspecs() -> None:\n ...", "def callUpdateTable(self):\r\n self.updateTable()", "def gpu_kernels(self, node, name):\r\n raise MethodNotDefined, 'gpu_kernels'", "def run(self):\n\n self.sess.run(self.update_operations)", "def update(self, x_train_single, updated_h):\n # x_row = cp.array(x_train_single.toarray())\n # cp.cuda.Stream.null.synchronize()\n updater(x_train_single,updated_h,self.weights,self.num_features,self.num_models,self.learning_rate)\n # self.biases += updated_h * self.learning_rate", "def _modify_updates(self, updates):\n\n if self.max_kernel_norm is not None:\n W, = self.transformer.get_params()\n if W in updates:\n updated_W = updates[W]\n row_norms = T.sqrt(T.sum(T.sqr(updated_W), axis=(0, 1, 2)))\n desired_norms = T.clip(row_norms, 0, self.max_kernel_norm)\n scales = desired_norms / (1e-7 + row_norms)\n updates[W] = (updated_W * scales.dimshuffle('x', 'x', 'x', 0))", "def __update_table(self):\n\n headlines = [\"\", ]\n headlines += range(1, + 1)\n headlines = [\" \"] + [str(x) for x in range(1, self.find_table_length() + 1)]\n self.__main_display_table.config(columns=headlines)\n\n for headline in headlines:\n self.__main_display_table.heading(headline, text=headline)\n self.__main_display_table.column(headline, anchor=\"center\", width=35)\n\n data = self.__display_buses_location()\n\n for i in self.__main_display_table.get_children():\n # deletes all the data in the chart\n self.__main_display_table.delete(i)\n for line in data:\n # inserts new data into the chart, goes line by line\n self.__main_display_table.insert(\"\", END, values=line)", "def update(self):\n self.cursor.execute(\"\"\"SELECT * FROM sensors_powersensor\"\"\")\n list = self.cursor.fetchall()\n for sensor in list:\n self.add(sensor[2], sensor[1])", "def add(self, kernels):\n if not isinstance(kernels, list):\n kernels = [kernels]\n self.kernels += kernels\n # update `_active_indices` from scratch: inactive kernels might be added\n self._active_indices = [idx for idx in range(len(self)) if \\\n not self.kernels[idx].stop()]\n self._ratio_nondom_offspring_incumbent = len(self) * [0] # len(self) changed", "def updateGrid(self) -> None:\n emu = self.emulator\n arch = self.root.arch\n registers = arch.registers\n self.__values.setRowCount(len(registers))\n for i, reg in enumerate(registers):\n self.__values.setRowHeight(i, self.__row_size)\n name = QTableWidgetItem(reg)\n name.setFlags(Qt.NoItemFlags)\n val = emu.get_register_value(reg) if emu.vm else 0\n old_val = self.__old_register_values.get(reg, 0)\n if type(val) in (int, int):\n value = format_address(val, arch)\n else:\n value = str(val)\n value = QTableWidgetItem( value )\n if old_val != val:\n self.__old_register_values[reg] = val\n value.setForeground(QColor(Qt.red))\n value.setFlags(Qt.ItemIsEnabled | Qt.ItemIsSelectable | Qt.ItemIsEditable)\n self.__values.setItem(i, 0, name)\n self.__values.setItem(i, 1, value)\n return", "def update_vluln_table():", "def release(self):\n # type: () -> None\n for k in self.kernels:\n k.release()", "def update(self, a, b, c, d):\n self.table.ravel()[:] = [a, b, c, d]\n self.N = self.table.sum()", "def update_data(self):\n for sai_id_key in self.if_id_map:\n namespace, sai_id = mibs.split_sai_id_key(sai_id_key)\n if_idx = mibs.get_index_from_str(self.if_id_map[sai_id_key])\n counter_table = self.namespace_db_map[namespace].get_all(mibs.COUNTERS_DB, \\\n mibs.counter_table(sai_id))\n if counter_table is None:\n counter_table = {}\n self.if_counters[if_idx] = counter_table\n\n\n self.lag_name_if_name_map, \\\n self.if_name_lag_name_map, \\\n self.oid_lag_name_map, _, _ = Namespace.get_sync_d_from_all_namespace(mibs.init_sync_d_lag_tables, self.db_conn)\n\n self.if_range = sorted(list(self.oid_name_map.keys()) + list(self.oid_lag_name_map.keys()))\n self.if_range = [(i,) for i in self.if_range]", "def visit_table(self, sytable):\n self.current.update(sytable)", "def visit_table(self, sytable):\n self.current.update(sytable)", "def update(self):\n self.platform_list.update()\n self.exit_sprite.update()\n self.bagGroup.update()\n self.enemy_list.update()", "def update_knobs(self):\n self.previous_knobs = self.current_knobs\n self.current_knobs = {'Modulation' : 'fsk',\n 'Rs' : 0,\n 'EIRP' : 0,\n 'Speed' : 0}", "def update(self):\n current = LazyRegister(self.db)\n current.render()\n cur = self.db.cursor()\n for table in self.tables:\n if table in current.tables:\n additions, removals = current.tables[table].migrate(self.tables[table])\n for addition in additions:\n cur.execute(\"\"\"ALTER TABLE %s ADD COLUMN %s\"\"\" % (\n table, addition[1].get_sql()\n ))\n print('Added column: ', addition[0])\n for removal in removals:\n #cur.execute(\"\"\"ALTER TABLE %s DROP COLUMN %s\"\"\" % (\n # table, removal[0]\n #))\n #print('Removed column: ', removal[0])\n print('Did not removed column: ', removal[0])\n else:\n schema = self.tables[table].get_create_table_sql()\n cur.execute(schema)\n print('Added table %s' % table)", "def __initializeKernels(self):\n # FFT plans:\n self.__initializeDopplerIfftPlan() # for Doppler Ifft\n self.__initializeDemodIfftPlan() # for demod \n self.__initializeSNRFftPlan() # for findSNR\n \n # GPU kernels\n kernel = self.CudaKernels\n ## kernels for initialization\n self.GPU_multInputVectorWithMasks = kernel.get_function('multInputVectorWithMasks').prepare('PPP')\n \n self.GPU_complexConj = kernel.get_function('complexConj').prepare('P')\n self.GPU_scaleComplexByScalar = kernel.get_function('scaleComplexByScalar').prepare('Pf')\n self.GPU_setComplexArrayToZeros = kernel.get_function('setComplexArrayToZeros').prepare('P')\n \n ## kernels for doppler search\n self.GPU_filterMasks = kernel.get_function('multInputVectorWithShiftedMasksDopp').prepare('PPPPii')\n # for multInputVectorWithShiftedMasks\n self.numBlocks = self.Nfft/self.numThreads\n self.bShapeVecMasks = (int(self.numThreads),1,1)\n self.gShapeVecMasks = (int(self.numBlocks),1)\n assert self.bShapeVecMasks[0]*self.gShapeVecMasks[0]==self.Nfft,'Dimension mismatch'\n\n self.GPU_absSumDoppler = kernel.get_function('blockAbsSumAtomic').prepare('PPi')\n # for the absSumKernel to sum the rows together\n self.bShapeAbsSum = (128,1,1) # 128 and 2 in next line is just picked TODO: should be config val\n self.gShapeAbsSum = (2,int(self.doppIdxArrayLen)) # tweak these\n\n assert self.Nfft % self.bShapeAbsSum[0]*self.gShapeAbsSum[0] == 0,'Nfft has to be dividable by block and grid dimensions'\n\n self.GPU_estDoppler = kernel.get_function('findDopplerEst').prepare('PPPii')\n # for the small kernel that finds the doppler\n self.bShapeDopp = (self.num_masks,1,1)\n self.gShapeDopp = (1,1)\n\n self.GPU_setArrayToZeros = kernel.get_function('setArrayToZeros').prepare('P')\n # for the set to zero kernel for the sum\n self.bShapeZero = (int(self.num_masks),1,1)\n self.gShapeZero = (int(self.doppIdxArrayLen),1)\n\n ## for demodulation\n self.bShapeVecMasks2 = (int(256),1,1) ## 256 is just picked, TODO: should be config val\n self.gShapeVecMasks2 = (int(self.Nfft/self.bShapeVecMasks2[0]),1)\n self.complexShiftMulMasks = kernel.get_function('multInputVectorWithShiftedMask').prepare('PPPi')\n self.complexHeterodyne = kernel.get_function('complexHeterodyne').prepare('PPfffi')\n self.findcentres = kernel.get_function('findCentres').prepare('PPPPffii')\n self.bShapeCentres = (256,1,1) ## 256 is just picked, TODO: should be config val", "def command_update_hw(self, cmd):\n # TODO\n pass", "def update():", "def update():", "def update(self):\n # GPS data\n self.model.GPS_latitude.set(self._kernel.data.lat)\n self.model.GPS_longitude.set(self._kernel.data.lon)\n \n self.model.GPS_heading.set(self._kernel.data.gps_heading)\n self.model.GPS_speed.set(self._kernel.data.speed)\n self.model.GPS_altitude.set(self._kernel.data.altitude)\n \n self.model.GPS_fix.set(self._kernel.data.fix)\n self.model.GPS_satellite_count.set(self._kernel.data.num_sat)\n \n # compass data\n self.model.compass_heading.set(self._kernel.data.compass_heading)\n \n # time data\n self.model.time.set(self._kernel.data.timestamp.isoformat())\n self.model.date.set(self._kernel.data.datestamp.isoformat())\n \n # other data\n self.model.temperature.set(self._kernel.data.temperature)", "def _modify_updates(self, updates):\n wxf = self.wxf\n wyf = self.wyf\n wxf_updated = updates[wxf]\n wyf_updated = updates[wyf]\n nwxf = (wxf_updated.std(0) + SMALL)[numpy.newaxis, :]\n nwyf = (wyf_updated.std(0) + SMALL)[numpy.newaxis, :]\n meannxf = nwxf.mean()\n meannyf = nwyf.mean()\n # Center filters\n centered_wxf = wxf_updated - wxf_updated.mean(0)\n centered_wyf = wyf_updated - wyf_updated.mean(0)\n # Fix standard deviation\n wxf_updated = centered_wxf * (meannxf / nwxf)\n wyf_updated = centered_wyf * (meannyf / nwyf)\n updates[wxf] = wxf_updated\n updates[wyf] = wyf_updated", "def update(self):\n self.platform_list.update()\n self.enemy_list.update()", "def update(self):\n self.device.update()", "def update(self):\n self.device.update()", "def updateH(self,k_vec,it):\n self.k_vec = k_vec\n self.it = it\n self.H_kc = fl.H_k(k_vec, self.it, self.delta)", "def refresh_table(self):\n self._table['bounty_column'] = Driver.instance.find_elements(*self._selectors['bounty_column'])\n self._table['first_name_column'] = Driver.instance.find_elements(*self._selectors['first_name_column'])\n self._table['last_name_column'] = Driver.instance.find_elements(*self._selectors['last_name_column'])\n self._table['edit_column'] = Driver.instance.find_elements(*self._selectors['edit_column'])\n self._table['details_column'] = Driver.instance.find_elements(*self._selectors['details_column'])\n self._table['delete_column'] = Driver.instance.find_elements(*self._selectors['delete_column'])", "def kernel(self, kernel):\n self._context[\"kernel\"] = kernel", "def update(self):\n sess = u.get_default_session()\n # sess.run(self.update_op)\n u.run(self.update_op)", "def _refresh_table(self):\n self._column_selected()\n self._table_selected()\n self._column_selection_change()\n self.refresh_column_list()\n self.refresh_table_list()\n self.refresh_table()", "def update_modules_table_data(self, new_data):\n\n self.update_modules_table.emit([], True)\n for entry in new_data:\n self.update_modules_table.emit(entry, False)", "def __set_kernels(self):\n self.clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))", "def update(self):\n\n if not self.db: self.validate()\n\n self.logging.debug( \"update(%s)\" % (self.db) )\n\n for name in self.tables:\n self.dbs_tables[name]['md5'] = get_md5( self.dbs_tables[name]['path'] )\n\n self._get_magnitudes()\n self._get_events()", "def update(self, updates, predicate):\n for row in self.rows:\n if predicate(row):\n for column, new_value in updates.items():\n row[column] = new_value", "def defineUpdateOperations(self):\n self.updated_value = tf.placeholder(shape=[1, self.network.action_size], dtype=tf.float32)\n self.loss = tf.reduce_sum(tf.square(self.updated_value - self.network.policyLayer))\n self.trainer = tf.train.GradientDescentOptimizer(learning_rate=self.learning_rate)\n\n self.updateModel = self.trainer.minimize(self.loss)", "def update_weights(self):\n\t\tpass", "def update(self, probs: torch.Tensor):\n tree, capacity = self._create_tree(probs, self.tree)\n self.tree = tree\n self.capacity = capacity", "def update_sections_table_data(self, new_data):\n\n self.update_sections_table.emit([], True)\n for entry in new_data:\n self.update_sections_table.emit(entry, False)", "def update(self, y_preds, labels):\r\n predicted_labels = torch.argmax(y_preds, dim=1)\r\n batch_confusion_matrix = self._fast_hist(labels.numpy().flatten(), predicted_labels.numpy().flatten())\r\n self.confusion_matrix += batch_confusion_matrix", "def update(self):\n self.send_tf_msg()\n super(Map).update()", "def _resize(self, c):\n old = list(self.items())\n self._table = [None] * c\n self._n = 0\n for (k, v) in old:\n self[k] = v", "def update_all(self):\n self.update_head_node_ip()\n self.get_database_info()\n self.update_users()", "def update(self):\n self._device.update()", "def update(self):\n self.platform_list.update()\n #self.enemy_list.update()\n self.enemy_list.update()\n self.bullet_list.update()\n self.active_sprite_list.update()", "def modify_devices(self):\n\n for i in self._nodes.items():\n node = i[1]\n devices = node[\"devices\"]\n other_devices = devices[\"other_devices\"]\n kernel_devices = devices[\"kernel_devices\"]\n dpdk_devices = devices[\"dpdk_devices\"]\n\n if other_devices:\n self._modify_other_devices(\n node, other_devices, kernel_devices, dpdk_devices\n )\n\n # Get the devices again for this node\n self._get_device(node)\n devices = node[\"devices\"]\n kernel_devices = devices[\"kernel_devices\"]\n dpdk_devices = devices[\"dpdk_devices\"]\n\n klen = len(kernel_devices)\n if klen > 0:\n print(\"\\nThese devices are safe to be used with VPP.\\n\")\n VppPCIUtil.show_vpp_devices(kernel_devices)\n question = (\n \"\\nWould you like to use any of these \" \"device(s) for VPP [y/N]? \"\n )\n answer = self._ask_user_yn(question, \"n\")\n if answer == \"y\":\n vppd = {}\n for dit in kernel_devices.items():\n dvid = dit[0]\n device = dit[1]\n question = \"Would you like to use device {} \".format(dvid)\n question += \"for VPP [y/N]? \"\n answer = self._ask_user_yn(question, \"n\")\n if answer == \"y\":\n vppd[dvid] = device\n for dit in vppd.items():\n dvid = dit[0]\n device = dit[1]\n if (\n \"unused\" in device\n and len(device[\"unused\"]) != 0\n and device[\"unused\"][0] != \"\"\n ):\n driver = device[\"unused\"][0]\n question = \"Would you like to bind the driver {} for {} [y/N]? \".format(\n driver, dvid\n )\n answer = self._ask_user_yn(question, \"n\")\n if answer == \"y\":\n logging.debug(\n \"Binding device {} to driver {}\".format(\n dvid, driver\n )\n )\n ret = VppPCIUtil.bind_vpp_device(node, driver, dvid)\n if ret:\n logging.debug(\n \"Could not bind device {}\".format(dvid)\n )\n dpdk_devices[dvid] = device\n del kernel_devices[dvid]\n\n dlen = len(dpdk_devices)\n if dlen > 0:\n print(\"\\nThese device(s) are already using DPDK.\\n\")\n VppPCIUtil.show_vpp_devices(dpdk_devices, show_interfaces=False)\n question = \"\\nWould you like to remove any of \"\n question += \"these device(s) [y/N]? \"\n answer = self._ask_user_yn(question, \"n\")\n if answer == \"y\":\n vppdl = {}\n for dit in dpdk_devices.items():\n dvid = dit[0]\n device = dit[1]\n question = \"Would you like to remove {} [y/N]? \".format(dvid)\n answer = self._ask_user_yn(question, \"n\")\n if answer == \"y\":\n vppdl[dvid] = device\n for dit in vppdl.items():\n dvid = dit[0]\n device = dit[1]\n if (\n \"unused\" in device\n and len(device[\"unused\"]) != 0\n and device[\"unused\"][0] != \"\"\n ):\n driver = device[\"unused\"][0]\n logging.debug(\n \"Binding device {} to driver {}\".format(dvid, driver)\n )\n ret = VppPCIUtil.bind_vpp_device(node, driver, dvid)\n if ret:\n logging.debug(\"Could not bind device {}\".format(dvid))\n else:\n kernel_devices[dvid] = device\n del dpdk_devices[dvid]\n\n interfaces = {}\n for dit in dpdk_devices.items():\n dvid = dit[0]\n device = dit[1]\n VppPCIUtil.vpp_create_interface(interfaces, dvid, device)\n node[\"interfaces\"] = interfaces\n\n self._update_auto_config()\n self.updateconfig()", "def upgrade_kernel():\n execute(\"upgrade_kernel_node\", env.host_string)", "def list_kernels(self):\n kernels = []\n kernel_ids = super(MappingKernelManager, self).list_kernel_ids()\n for kernel_id in kernel_ids:\n model = self.kernel_model(kernel_id)\n kernels.append(model)\n return kernels", "def update(self, labels, preds):\n labels, preds = check_label_shapes(labels, preds, True)\n\n for label, pred in zip(labels, preds):\n self.metrics.update_binary_stats(label, pred)\n\n if self.average == \"macro\":\n self.sum_metric += self.metrics.fscore\n self.num_inst += 1\n self.metrics.reset_stats()\n else:\n self.sum_metric = self.metrics.fscore * self.metrics.total_examples\n self.num_inst = self.metrics.total_examples", "def refresh(self):\n node, ans = self.list_head.next.next, 0\n # first update key_nodes in even positions\n while node:\n ans += 1\n node = node.next.next\n # then update tree_nodes's current_btree_node in odd positions\n node = self.list_head.next\n while node:\n node.current_btree_node = self\n if node.next:\n node = node.next.next\n else:\n break\n self.size = ans", "def init_kernel(cls, m):\n pass", "def update(self):\n self.device = self._api.device_query(self._hardware_address, {})", "def update( ):\r\n pass", "def update(self, labels, preds):\n raise NotImplementedError()", "def _update_cells(self):\n for row_number in range(self.number_cells_y):\n for col_number in range(self.number_cells_x):\n if self.to_be_updated[row_number][col_number]:\n self.cells[row_number][col_number].update()", "def SetKernel(self, _arg: 'itkFlatStructuringElement2') -> \"void\":\n return _itkClosingByReconstructionImageFilterPython.itkClosingByReconstructionImageFilterIUC2IUC2SE2_SetKernel(self, _arg)", "def remove_kernel(self, kernel_id):", "def update_carried(self, data):\n self.use()\n gpu_data = np.array(data, dtype=np.float32)\n gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.vbos[3])\n gl.glBufferData(gl.GL_ARRAY_BUFFER, gpu_data.nbytes, gpu_data, gl.GL_DYNAMIC_DRAW)", "def update_observation(self, g, inf_nodes, node, label, c):\n # rigorously speaking,\n # root sampler should be updated, for example\n # earliet node might be updated, or uninfected nodes get removed\n # print('update observation, self.root_sampler', self.root_sampler)\n self._update_root_sampler(inf_nodes, c)\n\n new_samples = self.sampler.update_samples(\n inf_nodes,\n {node: label},\n root_sampler=self.root_sampler\n )\n\n if not self.sampler.with_resampling:\n # should be deprecated because trees are re-sampled\n self.error_estimator.update_trees(new_samples, {node: label})\n else:\n # re-build the matrix because trees are re-sampled\n self.error_estimator.build_matrix(self.sampler._samples)", "def reset(self): # we can have stateful kernels now\n raise NotImplementedError()", "def kernel(self):\n\n # Create a blank kernel the appropriate size\n kernel = np.zeros((self.n_rows, self.n_cols), dtype=np.int)\n\n # Iterate through the offsets, turning on the correct pixels\n for offset in self.offsets:\n row, col = offset\n if np.all(offset == self.index):\n kernel[row, col] = 2\n else:\n kernel[row, col] = 1\n\n # Ensure that the index pixel is not zero for footprints where the\n # index pixel is not part of the footprint\n if kernel[self.index[0], self.index[1]] == 0:\n kernel[self.index[0], self.index[1]] = 3\n return kernel", "def _update_window(self, inst):\n inst[self.pid_cols] = self.pid.digitize(inst[self.pid_cols])\n label = inst[self.target_col_name]\n\n self.lab_counts[label] += 1\n self._calculate_probs_and_entropy_y()\n \n #categorical columns\n for col in self.cat_cols:\n if not self.isnan(inst[col]):\n val = inst[col]\n self.cat_counts[col][label][val] += 1\n self._calculate_probs_and_entropy_x([col])", "def update_state(self, logits, labels):\n labels = labels.unsqueeze(dim=1) #.cuda() # N,1,H,W\n predictions = logits.max(axis=1)[1].unsqueeze(dim=1) #.cuda() # N,1,H,W\n preds_1hot = torch.zeros_like(logits, dtype=bool).scatter_(1, predictions, True)#.cuda()\n labels_1hot = torch.zeros_like(logits, dtype=bool).scatter_(1, labels, True)#.cuda()\n\n if self.debug:\n if torch.cuda.is_available():\n for t in ['labels','preditions','preds_1hot','labels_1hot']:\n assert t.is_cuda\n assert logits.shape[0] == labels.shape[0]\n assert logits.shape == preds_1hot.shape\n assert logits.shape[1] >= labels.max()\n assert preds_1hot.shape == labels_1hot.shape\n assert preds_1hot.max() == labels_1hot.max() == 1\n assert preds_1hot.min() == labels_1hot.min() == 0\n assert preds_1hot.sum() == labels_1hot.sum() == logits.shape[0]*logits.shape[2]* logits.shape[3]\n\n self.tensors['TP'] += torch.sum(preds_1hot & labels_1hot , dim=(0, 2, 3)).double() # casting to double required for Cuda 10.0, not required for Cuda 10.1\n self.tensors['TN'] += torch.sum(~ preds_1hot & ~ labels_1hot , dim=(0, 2, 3)).double()\n self.tensors['FP'] += torch.sum(preds_1hot & ~ labels_1hot, dim=(0, 2, 3)).double()\n self.tensors['FN'] += torch.sum(~ preds_1hot & labels_1hot, dim=(0, 2, 3)).double()\n self.tensors['PX_CNT'] += torch.sum(labels_1hot , dim=(0, 2, 3)).double()\n self.tensors['samples_evaluated'] += logits.shape[0]\n \n if self.debug:\n if torch.cuda.is_available():\n for t in self.tensors: assert self.tensors[t].is_cuda", "def updateUserTable(self):\r\n\r\n self.view.userTable.clear()\r\n self.view.userTable.setRowCount(0)\r\n allUsersData = DBController().getAllUsers()\r\n\r\n #Dont do anything if there are no users\r\n if (len(allUsersData) == 0):\r\n return\r\n\r\n #Create user table columns based on DB fields\r\n self.view.userTable.setColumnCount(len(allUsersData[0].keys()))\r\n self.view.userTable.setHorizontalHeaderLabels(allUsersData[0].keys())\r\n self.view.userTable.verticalHeader().setVisible(False) #Hide rownums\r\n self.view.userTable.setEditTriggers(QAbstractItemView.NoEditTriggers) #Disable editable cells\r\n self.view.userTable.setSelectionBehavior(QAbstractItemView.SelectRows) #Dont select individual cells\r\n self.view.userTable.setSelectionMode(QAbstractItemView.SingleSelection) #Select only 1 row\r\n self.view.userTable.itemSelectionChanged.connect(self.handleUserButtonEnable)\r\n\r\n #Fill table\r\n for r in range(len(allUsersData)):\r\n self.view.userTable.insertRow(r)\r\n for c, field in enumerate(allUsersData[0].keys()):\r\n self.view.userTable.setItem(r , c, QTableWidgetItem(str(allUsersData[r][field])))", "def on_update_signal(self):\n self._table_model.dataChanged.emit(\n self._table_model.index(0, 0),\n self._table_model.index(self._config['row_count'] - 1, 5),\n [QtCore.Qt.DisplayRole])", "def update_controller(self):", "def kernel_version(self, kernel_version):\n\n self._kernel_version = kernel_version", "def update_device(self, dev_dict):\n # Note(jprabh1x): added bus,slot,function into fields dict as \n # seperate fields.\n no_changes = ('status', 'instance_uuid', 'id', 'extra_info', 'workload')\n map(lambda x: dev_dict.pop(x, None),\n [key for key in no_changes])\n\n # Note(jprabh1x): populating values for bus,slot,function from address in dev_dict.\n if dev_dict.has_key(\"address\"):\n \t\taddress = pci_utils.parse_address(dev_dict[\"address\"])\n \t\tdev_dict.update({'bus':str(address[1]), 'slot':str(address[2]), 'function':str(address[3])})\n for k, v in dev_dict.items():\n if k in self.fields.keys():\n self[k] = v\n else:\n extra_info = self.extra_info\n extra_info.update({k: str(v)})\n self.extra_info = extra_info", "def __put_kernels_on_grid(self, kernel, grid, pad=1):\n grid_Y, grid_X = grid\n # pad X and Y\n x1 = tf.pad(kernel, tf.constant([[pad, 0], [pad, 0], [0, 0], [0, 0]]))\n\n # X and Y dimensions, w.r.t. padding\n Y = kernel.get_shape()[0] + pad\n X = kernel.get_shape()[1] + pad\n\n # put NumKernels to the 1st dimension\n x2 = tf.transpose(x1, (3, 0, 1, 2))\n # organize grid on Y axis\n x3 = tf.reshape(x2, tf.stack([grid_X, Y * grid_Y, X, 3]))\n\n # switch X and Y axes\n x4 = tf.transpose(x3, (0, 2, 1, 3))\n # organize grid on X axis\n x5 = tf.reshape(x4, tf.stack([1, X * grid_X, Y * grid_Y, 3]))\n\n # back to normal order (not combining with the next step for clarity)\n x6 = tf.transpose(x5, (2, 1, 3, 0))\n\n # to tf.image_summary order [batch_size, height, width, channels],\n # where in this case batch_size == 1\n x7 = tf.transpose(x6, (3, 0, 1, 2))\n\n # scale to [0, 1]\n # x_min = tf.reduce_min(x7)\n # x_max = tf.reduce_max(x7)\n # x8 = (x7 - x_min) / (x_max - x_min)\n\n return x7", "def init():\n\tN = np.int32(DIM) #prepare for stitching\n\t#HII_DIM = np.int32(HII_DIM)\n\tf_pixel_factor = DIM/HII_DIM;\n\tscale = np.float32(BOX_LEN)/DIM\n\tHII_scale = np.float32(BOX_LEN)/HII_DIM\n\tshape = (N,N,N)\n\t\n\tMRGgen = MRG32k3aRandomNumberGenerator(seed_getter=seed_getter_uniform, offset=0)\n\n\tkernel_source = open(cmd_folder+\"/initialize.cu\").read()\n\tkernel_code = kernel_source % {\n\n\t\t'DELTAK': DELTA_K,\n\t\t'VOLUME': VOLUME,\n\t\t'DIM': DIM\n\t}\n\tmain_module = nvcc.SourceModule(kernel_code)\n\tinit_kernel = main_module.get_function(\"init_kernel\")\n\tHII_filter = main_module.get_function(\"HII_filter\")\n\tadj_complex_conj = main_module.get_function(\"adj_complex_conj\")\n\tsubsample_kernel = main_module.get_function(\"subsample\")\n\tvelocity_kernel = main_module.get_function(\"set_velocity\")\n\tpspec_texture = main_module.get_texref(\"pspec\")\n\n\tinterpPspec, interpSize = init_pspec() #interpPspec contains both k array and P array\n\tinterp_cu = cuda.matrix_to_array(interpPspec, order='F')\n\tcuda.bind_array_to_texref(interp_cu, pspec_texture)\n\n\tlargebox_d = gpuarray.zeros(shape, dtype=np.float32)\n\tinit_kernel(largebox_d, np.int32(DIM), block=block_size, grid=grid_size)\n\n\t#import IPython; IPython.embed()\n\tlargebox_d_imag = gpuarray.zeros(shape, dtype=np.float32)\n\tinit_kernel(largebox_d_imag, np.int32(DIM), block=block_size, grid=grid_size)\n\n\tlargebox_d *= MRGgen.gen_normal(shape, dtype=np.float32)\n\tlargebox_d_imag *= MRGgen.gen_normal(shape, dtype=np.float32)\n\tlargebox_d = largebox_d + np.complex64(1.j) * largebox_d_imag\n\n\t#adj_complex_conj(largebox_d, DIM, block=block_size, grid=grid_size)\n\tlargebox = largebox_d.get()\n\t#np.save(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc\".format(DIM, BOX_LEN), largebox)\n\n\t#save real space box before smoothing\n\tplan = Plan(shape, dtype=np.complex64)\n\tplan.execute(largebox_d, inverse=True) #FFT to real space of smoothed box\n\tlargebox_d /= scale**3\n\tnp.save(parent_folder+\"/Boxes/deltax_z0.00_{0:d}_{1:.0f}Mpc\".format(DIM, BOX_LEN), largebox_d.real.get_async())\n\n\t#save real space box after smoothing and subsampling\n\t# host largebox is still in k space, no need to reload from disk\n\tlargebox_d = gpuarray.to_gpu(largebox)\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\tHII_filter(largebox_d, N, ZERO, smoothR, block=block_size, grid=grid_size);\n\tplan.execute(largebox_d, inverse=True) #FFT to real space of smoothed box\n\tlargebox_d /= scale**3\n\tsmallbox_d = gpuarray.zeros(HII_shape, dtype=np.float32)\n\tsubsample_kernel(largebox_d.real, smallbox_d, N, HII_DIM, PIXEL_FACTOR, block=block_size, grid=HII_grid_size) #subsample in real space\n\tnp.save(parent_folder+\"/Boxes/smoothed_deltax_z0.00_{0:d}_{1:.0f}Mpc\".format(HII_DIM, BOX_LEN), smallbox_d.get_async())\n\n\t# reload the k-space box for velocity boxes\n\tlargebox_d = gpuarray.to_gpu(largebox)\n\t\n\t#largebox_d /= VOLUME #divide by VOLUME if using fft (vs ifft)\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\tlargevbox_d = gpuarray.zeros((DIM,DIM,DIM), dtype=np.complex64)\n\tsmallbox_d = gpuarray.zeros(HII_shape, dtype=np.float32)\n\tfor num, mode in enumerate(['x', 'y', 'z']):\n\t\tvelocity_kernel(largebox_d, largevbox_d, DIM, np.int32(num), block=block_size, grid=grid_size)\n\t\tHII_filter(largevbox_d, DIM, ZERO, smoothR, block=block_size, grid=grid_size)\n\t\tplan.execute(largevbox_d, inverse=True)\n\t\tlargevbox_d /= scale**3\n\t\t#import IPython; IPython.embed()\n\t\tsubsample_kernel(largevbox_d.real, smallbox_d, DIM, HII_DIM,PIXEL_FACTOR, block=block_size, grid=HII_grid_size)\n\t\tnp.save(parent_folder+\"/Boxes/v{0}overddot_{1:d}_{2:.0f}Mpc\".format(mode, HII_DIM, BOX_LEN), smallbox_d.get())\n\n\treturn", "def update_H(self):", "def kernel_filter(self):\n self.config.logger.info(\"Creating and processing kernel density...\")\n\n # reset start time\n t0 = time.time()\n\n # instantiate kernel density class\n self.kd = KernelDensity(self.config.spatial_resolution, self.spat_coords, self.final_landclasses,\n self.config.kernel_distance, self.ngrids, self.order_rules)\n\n # preprocess year-independent kernel density data\n self.lat, self.lon, self.cellindexresin, self.pft_maps, self.kernel_maps, self.kernel_vector, self.weights = self.kd.preprocess_kernel_density()\n\n # log processing time\n self.config.logger.info('PERFORMANCE: Kernel density filter prepared in {0} seconds'.format(time.time() - t0))", "def get_kernel(self, kernel_id):", "def get_kernels(self, window_lmax=None):\n\n if window_lmax is None:\n window_lmax = self.lmax\n\n save_name = \"kernels\"\n save_attrs = [\"kern\", \"pkern\", \"mkern\", \"xkern\", \"window_lmax\"]\n ret = self.load_data(\n save_name,\n \"kernels\",\n fields=save_attrs,\n to_attrs=True,\n shape=self.kern_shape,\n shape_ref=\"kern\",\n value_ref={\"window_lmax\": window_lmax},\n )\n if ret is not None:\n return ret\n\n kern = OrderedDict()\n if self.pol:\n pkern = OrderedDict()\n mkern = OrderedDict()\n xkern = OrderedDict()\n else:\n pkern = None\n mkern = None\n xkern = None\n\n lmax = self.lmax\n pol = self.pol\n wls = self.wls\n\n all_ells = np.arange(2 * lmax + 1)\n for xname in self.map_pairs:\n kern[xname] = np.zeros((lmax + 1, 2 * lmax + 1))\n if pol:\n pkern[xname] = np.zeros((lmax + 1, 2 * lmax + 1))\n mkern[xname] = np.zeros((lmax + 1, 2 * lmax + 1))\n xkern[xname] = np.zeros((lmax + 1, 2 * lmax + 1))\n\n for l in all_ells[2 : lmax + 1]:\n if np.mod(l, 50) == 0:\n self.log(\"Computing kernels for ell {}/{}\".format(l, lmax), \"debug\")\n l2 = np.min([2 * lmax + 1, l + lmax + 1])\n # populate upper triangle\n for ll in all_ells[l:l2]:\n j0, j0_lmin, j0_lmax = xft.wigner3j(l, 0, ll, 0)\n if pol:\n j2, j2_lmin, j2_lmax = xft.wigner3j(l, 2, ll, -2)\n\n # only go up to window lmax\n j0_lmax = np.minimum(j0_lmax, window_lmax)\n\n # computed as in https://arxiv.org/abs/1909.09375\n # equations 128 - 136\n l3 = np.arange(j0_lmin, j0_lmax + 1)\n dl3 = 2.0 * l3 + 1.0\n vk = j0[l3] ** 2 * dl3\n if pol:\n sign = ((-1.0) ** (l + ll + l3)).astype(int)\n v = j2[l3] ** 2 * dl3\n vp = v * (1.0 + sign) / 2.0\n vm = v * (1.0 - sign) / 2.0\n vx = j2[l3] * j0[l3] * dl3\n for xname in self.map_pairs:\n wls1 = wls[xname][:, l3]\n kern[xname][l, ll] += (vk * wls1[0]).sum(axis=-1)\n if pol:\n pkern[xname][l, ll] += (vp * wls1[1]).sum(axis=-1)\n mkern[xname][l, ll] += (vm * wls1[1]).sum(axis=-1)\n xkern[xname][l, ll] += (vx * wls1[2]).sum(axis=-1)\n\n # apply symmetry relation\n for l in all_ells[2 : lmax + 1]:\n ll = np.arange(2 * lmax + 1)\n dll = (2.0 * ll + 1.0) / 4.0 / np.pi\n sll = slice(l, lmax + 1)\n for xname in self.map_pairs:\n # populate lower triangle (wigners are symmetric in l and ll)\n kern[xname][sll, l] = kern[xname][l, sll]\n if pol:\n pkern[xname][sll, l] = pkern[xname][l, sll]\n mkern[xname][sll, l] = mkern[xname][l, sll]\n xkern[xname][sll, l] = xkern[xname][l, sll]\n # apply ell scaling along the axis that we bin over\n kern[xname][l, :] *= dll\n if pol:\n pkern[xname][l, :] *= dll\n mkern[xname][l, :] *= dll\n xkern[xname][l, :] *= dll\n\n # save and return\n self.kern = kern\n self.pkern = pkern\n self.mkern = mkern\n self.xkern = xkern\n self.window_lmax = window_lmax\n\n return self.save_data(save_name, from_attrs=save_attrs)", "def _update(self, device=None):\n self._attr_available = True\n self.schedule_update_ha_state(True)", "def _update(self, nbrs, nbrs_y, query, query_y):\n\n # Set up the graph for our shared memory variables\n new_K, new_A, new_V = self.K, self.A, self.V\n\n # Condition (1): First returned neighbour shares the same query label\n correct_query = T.eq(nbrs_y[:, 0], query_y).nonzero()[0]\n correct_mem = nbrs[correct_query, 0] # Idx to memory keys\n\n normed_keys = tensor_norm(query[correct_query] + new_K[correct_mem])\n new_K = T.set_subtensor(new_K[correct_mem], normed_keys)\n new_A = T.set_subtensor(new_A[correct_mem], 0.)\n\n # Condition (2): First returned neighbour does not share query label.\n # Add the key and label from query to memory\n incorrect_mask = T.neq(nbrs_y[:, 0], query_y)\n incorrect_query = incorrect_mask.nonzero()[0]\n\n # We need to find len(incorrect_query) locations in memory to write to.\n # Noise is added to randomize selection.\n age_mask = T.ge(new_A, T.max(new_A) - self.C) #1d\n oldest_idx = tensor_choose_k(age_mask, self.rng,\n k=T.sum(incorrect_mask),\n random=True).flatten()\n\n new_K = T.set_subtensor(new_K[oldest_idx], query[incorrect_query])\n new_V = T.set_subtensor(new_V[oldest_idx], query_y[incorrect_query])\n new_A = T.set_subtensor(new_A[oldest_idx], 0.)\n\n # Increment the age of all non-updated indices by 1\n new_A = new_A + 1.\n new_A = T.inc_subtensor(new_A[correct_mem], -1.)\n new_A = T.inc_subtensor(new_A[oldest_idx], -1.)\n\n return OrderedDict({(self.K, new_K), (self.V, new_V), (self.A, new_A)})", "def update_obstacles(self, new_obs):\n self.obstacles = new_obs", "def update(self):\n self.wall_list.update()\n self.enemy_list.update()\n self.sludge.update()\n self.consumeable.update()\n self.can_climb.update()", "def Update(self, controller):\n pass", "def update_features(df, config):\n coefs = np.ones(shape=df.shape[1])\n df_coefs = pd.DataFrame(np.expand_dims(coefs, 0), columns=df.columns)\n for key in config[\"clustering\"]:\n if key in (\"r\", \"g\", \"b\", \"z\"):\n df_coefs[key] = float(config[\"clustering\"][key])\n else:\n key_columns = df.columns.str.startswith(key)\n coefs[key_columns] = float(config[\"clustering\"][key])\n coefs = np.squeeze(np.array(df_coefs))\n for idx, column in enumerate(df.columns):\n if coefs[idx] != 1:\n logger.info(\"Multiply %s feature by %s\", column, coefs[idx])\n df[column] = coefs[idx] * df[column]", "def update(self) -> None:\n self.all_sprites.update()", "def update(self, key: T, value: T) -> None:\n\n if self.load_factor >= self.resize_threshold:\n self.resize() # increase table size once threshold is reached\n\n idx: int = self.hash_fn(key) # get an index location for 'key'\n if self.table[idx] is None: # idx location not occupied\n self.table[idx] = (key, value)\n self.filled_count += 1\n else: # idx location occupied\n if self.table[idx][0] == key: # trying to insert to the same key\n self.table[idx] = (self.table[idx][0], value) # update 'value' at 'key'\n else:\n # probe for next free position using double hashing\n idx2: int = self.h2(key)\n i: int = 1\n while self.table[(idx + i * idx2) % self.table_size] is not None:\n i += 1\n self.table[(idx + i * idx2) % self.table_size] = (key, value) # insert at an unoccupied location\n self.filled_count += 1", "def k(self):\n self.kTable()", "def update_model(self):\n self.model = [[self.cubes[i][j].value for j in range(self.cols)] for i in range(self.rows)]", "def __init__(self, columns): #, length_scale, length_scale_bounds=()):\n# assert isinstance(column, (list, tuple, int)), \"must be int or list of ints\"\n# self.column = [column] if isinstance(column, int) else column\n# assert all(isinstance(i, int) for i in self.column), \"must be integers\"\n self.columns = columns \n\n kernels = [Projection([c]) for c in columns]\n #factor_name(c)) for c in columns]\n \n # collect all the kernels to be combined into a single product kernel\n super(SimpleFactorKernel, self).__init__(kernels)", "def update_all(self,delta_t):\n self.update_thrust()\n self.update_climb_rate()\n self.update_height(delta_t)", "def update(self):\n\n self._pre_calc_mb()", "def update(self):\n self._g, self._B = self._constraint_assembler.preallocate_g_and_B(self._no_of_dofs_unconstrained,\n self._dofidxs(),\n self._no_of_constraints_by_object())", "def buildKernel(self):\n\t\tkernel = list()\n\n\t\tif not self.spreadsheet_transposed:\t\t\t# If the spreadsheet is NOT transposed, i.e., the spreadsheet contains rows, transpose it so it contains columns\n\t\t\tself.transpose(1)\n\n\t\t# CALL THESE JUST ONCE BEFORE LOOP(S)\n\t\tappend = kernel.append\n\t\tlower = str.lower\n\t\tformat = str.format\n\t\t# - - - - - - - - - - - - - - - - - -\n\n\t\tfor c in xrange(len(self.spreadsheet)):\t\t# Iterate through the spreadsheet's columns to search for the \"kernel\"\n\t\t\tcolumn = self.spreadsheet[c]\n\n\t\t\tif lower(column[0]) == \"kernel\":\t\t# Examines to see if the column contains the kernel\n\t\t\t\tleftColumn = self.spreadsheet[c-1]\t# Assigns the left side of the kernel to this variable\n\t\t\t\trightColumn = self.spreadsheet[c+1]\t# Assigns the right side of the kernel to this variable\n\n\t\t\t\tfor r in xrange(len(column)):\t# Iterate through the column to create a string from the left kernel, center kernel, and right kernel columns\n\t\t\t\t\tif r != 0:\n\t\t\t\t\t\tappend(format(\"{0}\", lower(leftColumn[r] + column[r] + rightColumn[r])))\t# Append string to kernel list\n\n\t\treturn kernel", "def restart_kernel(self, kernel_id, now=False):", "def switch_update(event):\n if (\n not isinstance(event.device, rfxtrxmod.LightingDevice)\n or event.device.known_to_be_dimmable\n or event.device.known_to_be_rollershutter\n ):\n return\n\n new_device = get_new_device(event, config, RfxtrxSwitch)\n if new_device:\n new_device.apply_event(event)\n add_entities_callback([new_device])", "def update(self):\n self.platform_quicksand.update(self.platform_quicksand)\n self.behind_boss_man.update()\n self.platform_fallthrough.update()\n self.decor.update()\n self.decorLayer.update()\n self.platform_slime.update()\n self.platform_list.update()\n self.platform_choose.update()\n self.enemy_list.update()\n self.boss_man.update()\n self.end_blocks.update()\n self.attacks.update()\n self.kill_blocks.update()", "def update_firmware(self):\n self.execute_command(CMD_UPDATE_FIRMWARE)", "def update(self):\n self.brain.update()", "def upgrade_kernel(**kwargs):\n execute(\"upgrade_kernel_node\", env.host_string, **kwargs)", "def normalize():\n\n for k in allkernels:\n beta = 0.0\n for key,value in grammar.items():\n if key.startswith(k):\n beta += value\n print()\n for key,value in grammar.items():\n if key.startswith(k):\n value = float(value)/beta\n grammar[key] = value\n for key,value in grammar.items():\n print()\n print(key.ljust(30),str(value))", "def __update_feature_table_columns(self):\n self.__init_table()\n\n feature_dict_sorted_keys = feature_extractor_definition.keys()\n feature_dict_sorted_keys.sort()\n for key in feature_dict_sorted_keys:\n if not self.__has_feature_column(key):\n self.__add_feature_column(key, feature_extractor_definition[key])", "def update(self):\n _LOGGER.debug(\"Updating Warmup devices\")\n self._warmup.update_all_devices()" ]
[ "0.6155734", "0.57529676", "0.5730305", "0.56955045", "0.56193566", "0.5611958", "0.5498789", "0.5324433", "0.5293424", "0.5292726", "0.5291626", "0.52879345", "0.5268349", "0.5242812", "0.52361995", "0.52361995", "0.51978123", "0.5197042", "0.51949096", "0.5192662", "0.519087", "0.5156624", "0.5156624", "0.51537174", "0.51536757", "0.51419264", "0.51350445", "0.51350445", "0.51272947", "0.5105007", "0.5098571", "0.50976825", "0.5093456", "0.5067747", "0.5062818", "0.50513893", "0.50448346", "0.5041281", "0.50362", "0.5009601", "0.50071055", "0.5006079", "0.4974994", "0.49747187", "0.4953713", "0.4953619", "0.49491018", "0.4946362", "0.49449944", "0.4943915", "0.4927463", "0.49226418", "0.49182203", "0.49137846", "0.4913751", "0.4898439", "0.48930496", "0.4888927", "0.48730612", "0.4873024", "0.4865849", "0.48592144", "0.4853896", "0.48502997", "0.48483932", "0.48479676", "0.4840746", "0.4840091", "0.4837817", "0.4835514", "0.48288855", "0.48244953", "0.48224726", "0.48181003", "0.4807559", "0.48042122", "0.47974324", "0.4792005", "0.47808203", "0.47765958", "0.4775577", "0.4774321", "0.4771874", "0.47716534", "0.47713345", "0.4771073", "0.47596395", "0.4749478", "0.4747726", "0.47395194", "0.4738985", "0.47318134", "0.47250223", "0.47233608", "0.47228265", "0.4719097", "0.47148398", "0.4713597", "0.47107464", "0.47105584" ]
0.7096408
0
Creates a new kernel and returns the ID
def create_kernel(name: str) -> str: ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_kernel(self, kernel_id):", "def testCreateKernel(self):\n try:\n contextID, retErr = PyOpenCLInterface.CreateContext(self.testResources.listDevicesIDs, self.testResources.dictProperties)\n self.assertEqual(retErr, 0)\n # create program\n programID, retErr = PyOpenCLInterface.CreateProgram(contextID, self.testResources.programCodeStrings)\n self.assertEqual(retErr, 0)\n buildOptions = \"\"\n retErr = PyOpenCLInterface.BuildProgram(programID, self.testResources.listDevicesIDs, buildOptions)\n self.assertEqual(retErr, 0)\n #create kernel\n kernelName = self.testResources.KernelFunctionName\n kernelID, retErr = PyOpenCLInterface.CreateKernel(programID, kernelName)\n self.assertEqual(retErr, 0)\n kernelProperties, retErr = PyOpenCLInterface.GetKernelProperties(kernelID)\n self.assertEqual(kernelProperties['Program'], programID)\n self.assertEqual(kernelProperties['id'], kernelID)\n self.assertEqual(kernelProperties['Context'], contextID)\n self.assertEqual(kernelProperties['KernelFunctionName'], kernelName)\n retErr = PyOpenCLInterface.ReleaseKernel(kernelID)\n self.assertEqual(retErr, 0)\n retErr = PyOpenCLInterface.ReleaseProgram(programID)\n self.assertEqual(retErr, 0)\n listPrograms = PyOpenCLInterface.ListPrograms()\n self.assertEqual(listPrograms, [])\n retErr = PyOpenCLInterface.ReleaseContext(contextID)\n self.assertEqual(retErr, 0)\n except:\n print \"Exception caught:\", sys.exc_info()[0]", "def create_device(name, device_type, runtime):\n command = 'create \"%s\" \"%s\" \"%s\"' % (\n name, device_type.identifier, runtime.identifier)\n device_id = _run_command(command)\n\n # The device ID has a new line at the end. Strip it when returning.\n return device_id[:-1]", "def create_session(\n path: str,\n type: str,\n name: Optional[str] = None,\n kernel_name: Optional[str] = None,\n kernel_id: Optional[str] = None,\n) -> str:\n ...", "def LUCID_create(lucid_kernel=None, blur_kernel=None): # real signature unknown; restored from __doc__\n pass", "def start_kernel(self, kernel_name=None, **kwargs):", "def kernel_model(self, kernel_id):\n self._check_kernel_id(kernel_id)\n kernel = self._kernels[kernel_id]\n\n model = {\n \"id\":kernel_id,\n \"name\": kernel.kernel_name,\n \"last_activity\": isoformat(kernel.last_activity),\n \"execution_state\": kernel.execution_state,\n \"connections\": self._kernel_connections[kernel_id],\n }\n return model", "def create_kernel(ktype='sph-anarchy'):\n \n kernel = get_kernel(ktype)\n header = np.array([{'kernel': ktype, 'bins': kernsize}])\n np.savez('kernel_{}.npz'.format(ktype), header=header, kernel=kernel)\n \n print (header)\n \n return kernel", "async def _initialize(self, kernel_name, kernel_id_future):\n kernel_id = await kernel_id_future\n extension = None\n language = None\n\n kernel = self.get_kernel(kernel_id)\n\n try:\n language_to_extensions = {\"python\": \"py\"}\n language = kernel.kernel_spec_manager.get_all_specs()[kernel_name][\"spec\"][\"language\"]\n extension = language_to_extensions[language]\n except Exception:\n pass\n\n py_imports = language == \"python\" and self.python_imports\n\n config_code = self.initialization_code.get(kernel_name)\n\n if not extension and not py_imports and not config_code:\n # Save some effort\n return kernel_id\n\n self.log.info(\"Initializing kernel: %s\", kernel_id)\n\n client = ExecClient(kernel)\n\n from jupyter_core.paths import jupyter_config_path\n from pathlib import Path\n\n async with client.setup_kernel():\n if py_imports:\n code = python_init_import_code.format(modules=self.python_imports)\n await client.execute(code)\n if config_code:\n await client.execute(config_code)\n if extension:\n for base_path in map(Path, jupyter_config_path()):\n path = base_path / f\"kernel_pool_init_{kernel_name}.{extension}\"\n if path.exists():\n with open(path) as f:\n self.log.debug(\"Running %s for initializing kernel\", path)\n code = f.read()\n await client.execute(code)\n self.log.debug(\"Initialized kernel: %s\", kernel_id)\n return kernel_id", "def _launch_kernel(self, kernel_cmd, **kw):", "def _setup_kernel(self, program, kernel_name, *argv):\n kernel = cl.Kernel(program, kernel_name)\n for idx, value in enumerate(argv):\n kernel.set_arg(idx, value)\n\n return kernel", "def define_kernel(self, *args, **kwargs):\n k = getattr(kernels, self.kernel_name)\n k_base = getattr(kernels, self.base_kernel)\n\n kernel = k(base_graph_kernel=k_base, *args, **kwargs)\n\n return kernel", "def cuda_kernel_factory(nvrtc_kernel_str, dtypes, kernel_name=None):\n\n dtype_strs = get_dtype_strs(dtypes)\n\n for idx, dtype in enumerate(dtypes):\n nvrtc_kernel_str = nvrtc_kernel_str.replace(\n \"{%d}\" % idx, dtype_strs[idx]\n )\n\n kernel_name = f\"\"\"{uuid1()\n if kernel_name is None\n else kernel_name}_{\n \"\".join(dtype_strs).replace(\" \", \"_\")\n }\"\"\"\n\n nvrtc_kernel_str = \"%s\\nvoid %s%s\" % (\n extern_prefix,\n kernel_name,\n nvrtc_kernel_str,\n )\n\n if logger.should_log_for(logger.level_debug):\n logger.debug(str(nvrtc_kernel_str))\n\n return cp.RawKernel(nvrtc_kernel_str, kernel_name)", "def _newClusterId(self):\n return self.guidGenerator.new_id()", "def remove_kernel(self, kernel_id):", "def get_new_oid(cls):\n return OidGenerator.allocate()", "def create_program(template, func, loc=None):\n\n k_args = []\n\n func.set_cl_kernel_args()\n k_args.extend(func.cl_args_name)\n\n # Build the kernel args string.\n kernel_args = ',\\n '.join(k_args)\n \n # Get the kernel workgroup code\n workgroup_code = func.get_cl_workgroup_code()\n \n # Construct the neighbor loop code.\n neighbor_loop_code = \"for (int src_id=0; src_id<nbrs; ++src_id)\"\n\n return template%(locals())", "def allocate_osd_id(\n cluster,\n fsid,\n keyring,\n ):\n\n LOG.debug('Allocating OSD id...')\n try:\n osd_id = _check_output(\n args=[\n 'ceph',\n '--cluster', cluster,\n '--name', 'client.bootstrap-osd',\n '--keyring', keyring,\n 'osd', 'create', '--concise',\n fsid,\n ],\n )\n except subprocess.CalledProcessError as e:\n raise Error('ceph osd create failed', e, e.output)\n osd_id = must_be_one_line(osd_id)\n check_osd_id(osd_id)\n return osd_id", "def mol_kern_factory(kernel_type: str, *args, **kwargs):\n kernel_to_kernel_type = {\n MolGraphKernel: MOL_GRAPH_CONT_KERNEL_TYPES + MOL_GRAPH_INT_KERNEL_TYPES,\n MolFingerprintKernel: MOL_FINGERPRINT_KERNEL_TYPES,\n MolDistanceKernel: MOL_DISTANCE_KERNEL_TYPES,\n MolSimilarityKernel: MOL_SIMILARITY_KERNEL_TYPES\n }\n kernel_type_to_kernel = {\n kernel_type: kernel\n for kernel, kernel_type_list in kernel_to_kernel_type.items()\n for kernel_type in kernel_type_list\n }\n if kernel_type not in kernel_type_to_kernel:\n raise ValueError(\"Not recognized kernel type: {}\".format(kernel_type))\n kernel = kernel_type_to_kernel[kernel_type]\n return kernel(kernel_type, *args, **kwargs)", "async def init_new_kernel_configuration(self, request, image_id):\n\n try:\n self._image = Image(image_id=image_id)\n except ImageDoesNotExist:\n request.ret_error(IMAGE_MISSING)\n\n request.ret(READY)", "def ker_class():\n ker = Kernel()\n return ker", "def kernel_metadata(self, cfg):\n # Pushing a kernel through the API fails if len(title) > 50. (b/120288024)\n title = self.title\n dev = cfg.get('development', False)\n return dict(\n id=self.slug,\n language='python',\n is_private=not cfg.get('public', not dev),\n # Path is relative to where kernel-metadata.json file will be written, which is\n # notebooks/<track>/<cfg-tag>/kernels_api_metadata/<notebook-identifier>/kernel-metadata.json\n code_file=\"../../rendered/{}\".format(self.filename),\n enable_gpu=self.enable_gpu,\n # Enable internet in development mode so we can pip install learntools\n # TODO: Actually, probably only needs to be turned on if we're in\n # development mode AND this is an exercise kernel.\n enable_internet=dev if self.enable_internet is None else self.enable_internet,\n kernel_type='notebook',\n title=title,\n dataset_sources=sorted(self.dataset_sources),\n competition_sources=sorted(self.competition_sources),\n kernel_sources=sorted(self.kernel_sources),\n keywords=sorted(self.keywords),\n docker_image_pinning_type=\"latest\",\n )", "def create(self):\n print('Creating container: {}'.format(self.cfg['name']))\n create = self.docker_client.create(**self.env)\n return create['id']", "def kernel(self, kernel):\n self._context[\"kernel\"] = kernel", "def restart_kernel(self, kernel_id, now=False):", "def intel_run(kernel_call, kernel_def, kernel='autosa.tmp/output/src/kernel_autosa_opencl.cpp'):\n\n # Load kernel call file\n module_calls = []\n fifo_decls = []\n with open(kernel_call, 'r') as f:\n add = False\n while True:\n line = f.readline()\n if not line:\n break\n # Extract the fifo declaration and add to the list\n if add:\n line = line.strip()\n fifo_decls.append(line)\n if line.find('/* FIFO Declaration */') != -1:\n if add:\n fifo_decls.pop(len(fifo_decls) - 1)\n add = not add\n\n with open(kernel_call, 'r') as f:\n add = False\n module_call = []\n while True:\n line = f.readline()\n if not line:\n break\n # Extract the module call and add to the list\n if add:\n line = line.strip()\n module_call.append(line)\n if line.find('/* Module Call */') != -1:\n if add:\n module_call.pop(len(module_call) - 1)\n module_calls.append(module_call.copy())\n module_call.clear()\n add = not add\n\n module_defs = {}\n headers = []\n with open(kernel_def, 'r') as f:\n while True:\n line = f.readline()\n if not line:\n break\n if line.find('#include') != -1:\n line = line.strip()\n headers.append(line)\n\n with open(kernel_def, 'r') as f:\n add = False\n module_def = []\n while True:\n line = f.readline()\n if not line:\n break\n # Extract the module definition and add to the dict\n if add:\n module_def.append(line)\n # Extract the module name\n if (line.find('__kernel')) != -1:\n m = re.search('void (.+?)\\(', line)\n if m:\n module_name = m.group(1)\n if line.find('/* Module Definition */') != -1:\n if add:\n module_def.pop(len(module_def) - 1)\n module_defs[module_name] = module_def.copy()\n module_def.clear()\n add = not add\n\n # compose the kernel file\n kernel = str(kernel)\n generate_intel_kernel(kernel, headers, module_defs, module_calls, fifo_decls)", "def run_kernel(\n environment='emulator',\n timeout=60):\n block_file = OBJ_DIR + 'fsimage.bin'\n subprocess.check_output([BIN_DIR + 'mkfs', block_file, ELF_FILE],\n stderr=subprocess.STDOUT)\n\n return run_program(environment=environment, block_device=block_file,\n timeout=timeout, executable=PROJECT_TOP + '/software/kernel/kernel.hex')", "def _initialize_kernel(input_dim: int,\n kernel: str = 'RBF',\n use_single_gp: bool = False) -> GenericKernel:\n if kernel == 'RBF':\n return RBFKernel(input_dim, use_single_gp)\n elif kernel == 'Matern52':\n return Matern52Kernel(input_dim, use_single_gp)\n elif kernel == 'Matern32':\n return Matern32Kernel(input_dim, use_single_gp)\n elif kernel == 'RationalQuadratic':\n return RationalQuadraticKernel(\n input_dim=input_dim, use_single_gp=use_single_gp)\n elif kernel == 'Sigmoid':\n return SigmoidKernel(input_dim, use_single_gp)\n else:\n sys.exit(\"Error: specified Gaussian Process kernel not valid\")", "def create_tag_id():\n return uuid.uuid1().int", "def define_kernel(self, *args, **kwargs):\n base_kernel = self.base_kernel\n if base_kernel is None:\n base_kernel = 'VertexHistogram'\n k = getattr(kernels, self.kernel_name)\n k_base = getattr(kernels, base_kernel)\n kernel = k(base_graph_kernel=k_base, *args, **kwargs)\n return kernel", "def _CreateNewDisk(self, idx, params, _):\n # add a new disk\n disk_template = self.cfg.GetInstanceDiskTemplate(self.instance.uuid)\n disk = self._GenerateDiskTemplateWrapper(idx, disk_template,\n params)\n new_disks = CreateDisks(self, self.instance, disks=[disk])\n self.cfg.AddInstanceDisk(self.instance.uuid, disk, idx)\n\n # re-read the instance from the configuration\n self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)\n\n if self.cluster.prealloc_wipe_disks:\n # Wipe new disk\n WipeOrCleanupDisks(self, self.instance,\n disks=[(idx, disk, 0)],\n cleanup=new_disks)\n\n changes = [\n (\"disk/%d\" % idx,\n \"add:size=%s,mode=%s\" % (disk.size, disk.mode)),\n ]\n if self.op.hotplug:\n result = self.rpc.call_blockdev_assemble(self.instance.primary_node,\n (disk, self.instance),\n self.instance, True, idx)\n if result.fail_msg:\n changes.append((\"disk/%d\" % idx, \"assemble:failed\"))\n self.LogWarning(\"Can't assemble newly created disk %d: %s\",\n idx, result.fail_msg)\n else:\n _, link_name, uri = result.payload\n msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,\n constants.HOTPLUG_TARGET_DISK,\n disk, (link_name, uri), idx)\n changes.append((\"disk/%d\" % idx, msg))\n\n return (disk, changes)", "def kernel_name():\n return \"python3\"", "def testRetainAndReleaseKernels(self):\n try:\n contextID, retErr = PyOpenCLInterface.CreateContext(self.testResources.listDevicesIDs, self.testResources.dictProperties)\n self.assertEqual(retErr, 0)\n # create program\n programID, retErr = PyOpenCLInterface.CreateProgram(contextID, self.testResources.programCodeStrings)\n self.assertEqual(retErr, 0)\n buildOptions = \"\"\n retErr = PyOpenCLInterface.BuildProgram(programID, self.testResources.listDevicesIDs, buildOptions)\n self.assertEqual(retErr, 0)\n #create kernel\n kernelName = self.testResources.KernelFunctionName\n kernelID, retErr = PyOpenCLInterface.CreateKernel(programID, kernelName)\n self.assertEqual(retErr, 0)\n listKernels = PyOpenCLInterface.ListKernels()\n self.assertEqual(listKernels, [kernelID])\n retErr = PyOpenCLInterface.RetainKernel(kernelID)\n self.assertEqual(retErr, 0)\n retErr = PyOpenCLInterface.ReleaseKernel(kernelID)\n self.assertEqual(retErr, 0)\n retErr = PyOpenCLInterface.ReleaseKernel(kernelID)\n self.assertEqual(retErr, 0)\n listKernels = PyOpenCLInterface.ListKernels()\n self.assertEqual(listKernels, [])\n except:\n print \"Exception caught:\", sys.exc_info()[0]\n # try to release again\n self.assertRaises(PyOpenCLInterface.error, PyOpenCLInterface.ReleaseKernel, kernelID)\n self.assertRaises(PyOpenCLInterface.error, PyOpenCLInterface.RetainKernel, kernelID)\n try:\n retErr = PyOpenCLInterface.ReleaseProgram(programID)\n self.assertEqual(retErr, 0)\n listPrograms = PyOpenCLInterface.ListPrograms()\n self.assertEqual(listPrograms, [])\n retErr = PyOpenCLInterface.ReleaseContext(contextID)\n self.assertEqual(retErr, 0)\n except:\n print \"Exception caught:\", sys.exc_info()[0]", "def create(self, nDeviceType):\n\t\treturn handle_to_object(call_sdk_function('PrlVmDev_Create', nDeviceType))", "def initialize(self, create_new=True, sysid=\"\"):", "def createDisk(self , name):\n return", "def start_kernel(self, kernel_id=None, path=None, **kwargs):\n if kernel_id is None:\n if path is not None:\n kwargs['cwd'] = self.cwd_for_path(path)\n kernel_id = yield gen.maybe_future(\n super(MappingKernelManager, self).start_kernel(**kwargs)\n )\n self._kernel_connections[kernel_id] = 0\n self.start_watching_activity(kernel_id)\n self.log.info(\"Kernel started: %s\" % kernel_id)\n self.log.debug(\"Kernel args: %r\" % kwargs)\n # register callback for failed auto-restart\n self.add_restart_callback(kernel_id,\n lambda : self._handle_kernel_died(kernel_id),\n 'dead',\n )\n else:\n self._check_kernel_id(kernel_id)\n self.log.info(\"Using existing kernel: %s\" % kernel_id)\n\n # Initialize culling if not already\n if not self._initialized_culler:\n self.initialize_culler()\n\n # py2-compat\n raise gen.Return(kernel_id)", "def kernel_for(notebook):\n with open(notebook, \"r\") as f:\n nb = json.load(f)\n\n md = nb.get(\"metadata\")\n if md:\n ks = md.get(\"kernelspec\")\n if ks:\n return ks[\"display_name\"]\n return None", "def create_key ():", "def get_kernel(self, name, signature):\n hdl = CudaKernelHandle()\n is_ndarray = []\n is_const = []\n dtypes = []\n pattern = re.compile(r\"\"\"^\\s*(const)?\\s*([\\w_]+)\\s*(\\*)?\\s*([\\w_]+)?\\s*$\"\"\")\n args = re.sub(r\"\\s+\", \" \", signature).split(\",\")\n for arg in args:\n match = pattern.match(arg)\n if not match or match.groups()[1] == 'const':\n raise ValueError(\n 'Invalid function prototype \"%s\". Must be in the '\n 'form of \"(const) type (*) (name)\"'%arg)\n is_const.append(bool(match.groups()[0]))\n dtype = match.groups()[1]\n is_ndarray.append(bool(match.groups()[2]))\n if dtype not in _DTYPE_CPP_TO_NP:\n raise TypeError(\n \"Unsupported kernel argument type %s. Supported types are: %s.\"%(\n arg, ','.join(_DTYPE_CPP_TO_NP.keys())))\n dtypes.append(_DTYPE_NP_TO_MX[_DTYPE_CPP_TO_NP[dtype]])\n\n check_call(_LIB.MXRtcCudaKernelCreate(\n self.handle,\n c_str(name),\n len(dtypes),\n c_array_buf(ctypes.c_int, array('i', is_ndarray)),\n c_array_buf(ctypes.c_int, array('i', is_const)),\n c_array_buf(ctypes.c_int, array('i', dtypes)),\n ctypes.byref(hdl)))\n\n return CudaKernel(hdl, name, is_ndarray, dtypes)", "def get_booted_kernel():\n try:\n return run(['/usr/bin/uname', '-r'])['stdout'].strip()\n except CalledProcessError as e:\n raise StopActorExecutionError(\n message='Unable to obtain release of the booted kernel.',\n details={'details': str(e), 'stderr': e.stderr}\n )", "def get_kernel(kernel_name, kernel_param):\n if kernel_name not in ['ExpSquared', 'ExpSquared+ExpSine2']:\n raise AttributeError(\"The only available kernels are 'ExpSquared' and \"\n \"'ExpSquared+ExpSine2'.\")\n kExpSquared = kernel_param[0] * george.kernels.ExpSquaredKernel(\n metric=kernel_param[1])\n if kernel_name == 'ExpSquared':\n kernel = kExpSquared\n elif kernel_name == 'ExpSquared+ExpSine2':\n kExpSine2 = kernel_param[4] * george.kernels.ExpSine2Kernel(\n gamma=kernel_param[5], log_period=kernel_param[6])\n kernel = kExpSquared + kExpSine2\n return kernel", "def create_gpu_device():\n d1 = dpctl.SyclDevice(\"gpu\")\n d2 = dpctl.select_gpu_device()\n assert d1 == d2\n print_device(d1)\n return d1", "def start_kernel(self, **kw):", "def GetCreateDevice(identifier, namespace, klass=Device, initfunction=None, **parameters):\n assert(issubclass(klass, Device))\n return pynt.xmlns.GetCreateRDFObject(identifier=identifier, namespace=namespace, klass=klass, \n verifyAttributes=False, initfunction=initfunction, **parameters)", "def create():", "def create():", "def CreateDevices(cfg,\n build_target=None,\n build_id=None,\n branch=None,\n kernel_build_id=None,\n kernel_branch=None,\n kernel_build_target=None,\n system_branch=None,\n system_build_id=None,\n system_build_target=None,\n bootloader_branch=None,\n bootloader_build_id=None,\n bootloader_build_target=None,\n gpu=None,\n num=1,\n serial_log_file=None,\n autoconnect=False,\n report_internal_ip=False,\n boot_timeout_secs=None,\n ins_timeout_secs=None):\n client_adb_port = None\n unlock_screen = False\n wait_for_boot = True\n logger.info(\n \"Creating a cuttlefish device in project %s, \"\n \"build_target: %s, \"\n \"build_id: %s, \"\n \"branch: %s, \"\n \"kernel_build_id: %s, \"\n \"kernel_branch: %s, \"\n \"kernel_build_target: %s, \"\n \"system_branch: %s, \"\n \"system_build_id: %s, \"\n \"system_build_target: %s, \"\n \"bootloader_branch: %s, \"\n \"bootloader_build_id: %s, \"\n \"bootloader_build_target: %s, \"\n \"gpu: %s\"\n \"num: %s, \"\n \"serial_log_file: %s, \"\n \"autoconnect: %s, \"\n \"report_internal_ip: %s\", cfg.project, build_target,\n build_id, branch, kernel_build_id, kernel_branch, kernel_build_target,\n system_branch, system_build_id, system_build_target, bootloader_branch,\n bootloader_build_id, bootloader_build_target, gpu, num, serial_log_file,\n autoconnect, report_internal_ip)\n # If multi_stage enable, launch_cvd don't write serial log to instance. So\n # it doesn't go WaitForBoot function.\n if cfg.enable_multi_stage:\n wait_for_boot = False\n device_factory = CuttlefishDeviceFactory(\n cfg, build_target, build_id, branch=branch,\n kernel_build_id=kernel_build_id, kernel_branch=kernel_branch,\n kernel_build_target=kernel_build_target, system_branch=system_branch,\n system_build_id=system_build_id,\n system_build_target=system_build_target,\n bootloader_branch=bootloader_branch,\n bootloader_build_id=bootloader_build_id,\n bootloader_build_target=bootloader_build_target,\n boot_timeout_secs=boot_timeout_secs,\n ins_timeout_secs=ins_timeout_secs,\n report_internal_ip=report_internal_ip,\n gpu=gpu)\n return common_operations.CreateDevices(\"create_cf\", cfg, device_factory,\n num, constants.TYPE_CF,\n report_internal_ip, autoconnect,\n serial_log_file, client_adb_port,\n boot_timeout_secs, unlock_screen,\n wait_for_boot)", "def allocate(cls):\n if OidGenerator.node_id is None:\n OidGenerator.node_id = OidGenerator._determine_node_id()\n OidGenerator._counter = 0\n OidGenerator._clock = 0\n OidGenerator._clock = OidGenerator._check_clock()\n\n OidGenerator._counter += 1\n if OidGenerator._counter > 4095:\n OidGenerator._counter = 0\n OidGenerator._clock = OidGenerator._check_clock()\n\n # A Platform OID looks like this in binary:\n # - 52 bits in total.\n # - Top 12 Bits: Node ID\n # - Middle 27 Bits: Clock State\n # - Last 13 Bits: Counter State\n\n mask = 0x000FFFFFFFFFFFFF\n result = mask & (\n (OidGenerator.node_id << 52)\n | (OidGenerator._clock << 13)\n | OidGenerator._counter\n )\n return result", "def create_entity(self):\n \n if self.ORION_CB.get_entity(self.params['name']) is None:\n \n print('[INFO]: Create new PID entity')\n \n entity_dict = {\"id\":self.params['name'], \"type\":'PID_controller'}\n for attr in ['Kp', 'Ti', 'Td', 'lim_low', 'lim_high', 'setpoint']:\n entity_dict.update({attr:{'value':self.params[attr],'type':'Number'}})\n\n entity_dict.update({'reverse_act':{'value':self.params['reverse_act'],'type':'Text'}})\n \n entity = filip.orion.Entity(entity_dict)#, attrs)\n\n self.ORION_CB.post_entity(entity)\n \n else:\n print('Entity name already assigned')", "def create_node(self, name, img, size):\n if self._create_needed(name):\n node = self._driver.create_node(\n name=name,\n image=self.get_ami_by_id(img),\n size=self.get_size_by_id(size))\n self._driver.wait_until_running([node])\n return node\n else:\n logger.debug(\"node %s already created, no action\" % name)\n return None", "def create_kernel(\n self,\n ) -> pyabc.StochasticKernel:\n def kernel_fun(x, x_0, t, par) -> float:\n \"\"\"The kernel function.\"\"\"\n # the kernel value is computed by amici already\n return x['llh']\n\n # create a kernel from function, returning log-scaled values\n kernel = pyabc.distance.SimpleFunctionKernel(\n kernel_fun, ret_scale=pyabc.distance.SCALE_LOG)\n\n return kernel", "def get_kernel_version():\r\n try:\r\n return utils.run('uname -r').stdout.strip()\r\n except:\r\n logging.info(\"Not Found\")\r\n return -1", "def create():\n pass", "def bdev_daos_create(client, num_blocks, block_size, pool, cont, name, oclass=None, uuid=None):\n params = {'num_blocks': num_blocks, 'block_size': block_size, 'pool': pool, 'cont': cont, 'name': name}\n if uuid:\n params['uuid'] = uuid\n if oclass:\n params['oclass'] = oclass\n return client.call('bdev_daos_create', params)", "def creator():\n return SeamlessFkIk()", "def make_knode(self,i,path_len=0):\n return Knode(path_len=path_len,\\\n ident=self.nodes[i].ident,\\\n lindex=i)", "def kernels_initialize(self, folder):\r\n if not os.path.isdir(folder):\r\n raise ValueError('Invalid folder: ' + folder)\r\n\r\n resources = []\r\n resource = {'path': 'INSERT_SCRIPT_PATH_HERE'}\r\n resources.append(resource)\r\n\r\n username = self.get_config_value(self.CONFIG_NAME_USER)\r\n meta_data = {\r\n 'id':\r\n username + '/INSERT_KERNEL_SLUG_HERE',\r\n 'title':\r\n 'INSERT_TITLE_HERE',\r\n 'code_file':\r\n 'INSERT_CODE_FILE_PATH_HERE',\r\n 'language':\r\n 'Pick one of: {' +\r\n ','.join(x for x in self.valid_push_language_types) + '}',\r\n 'kernel_type':\r\n 'Pick one of: {' +\r\n ','.join(x for x in self.valid_push_kernel_types) + '}',\r\n 'is_private':\r\n 'true',\r\n 'enable_gpu':\r\n 'false',\r\n 'enable_internet':\r\n 'true',\r\n 'dataset_sources': [],\r\n 'competition_sources': [],\r\n 'kernel_sources': [],\r\n 'model_sources': [],\r\n }\r\n meta_file = os.path.join(folder, self.KERNEL_METADATA_FILE)\r\n with open(meta_file, 'w') as f:\r\n json.dump(meta_data, f, indent=2)\r\n\r\n return meta_file", "def _new_node(self):\n self._size += 1\n return self._node_factory()", "def create_boot_dev(self):\n\t\treturn handle_to_object(call_sdk_function('PrlVmCfg_CreateBootDev', self.handle))", "def do_create(self, class_name):\n\n if not class_name:\n print(\"** class name missing **\")\n elif class_name not in HBNBCommand.class_list:\n print(\"** class doesn't exist **\")\n else:\n new_object = eval(class_name + \"()\")\n new_object.save()\n print(new_object.id)", "def xilinx_run(kernel_call, kernel_def, kernel='autosa.tmp/output/src/kernel_kernel.cpp', host='opencl'):\n\n # Load kernel definition file\n lines = []\n with open(kernel_def, 'r') as f:\n lines = f.readlines()\n\n # Simplify the expressions\n lines = simplify_expressions(lines)\n\n # Change the loop iterator type\n lines = shrink_bit_width(lines)\n\n # Insert the HLS pragmas\n lines = insert_xlnx_pragmas(lines)\n\n # Lift the split_buffers\n lines = lify_split_buffers(lines)\n\n kernel = str(kernel)\n print(\"Please find the generated file: \" + kernel)\n\n with open(kernel, 'w') as f:\n if host == 'opencl':\n # Merge kernel header file\n kernel_header = kernel.split('.')\n kernel_header[-1] = 'h'\n kernel_header = \".\".join(kernel_header)\n with open(kernel_header, 'r') as f2:\n header_lines = f2.readlines()\n f.writelines(header_lines)\n f.write('\\n')\n\n f.writelines(lines)\n\n # Load kernel call file\n with open(kernel_call, 'r') as f2:\n lines = f2.readlines()\n # Reorder module calls\n lines = reorder_module_calls(lines)\n f.writelines(lines)", "def upgrade_kernel():\n execute(\"upgrade_kernel_node\", env.host_string)", "def kernel(self):\n return self._context.get(\"kernel\", None)", "async def get_kernel(self):\n # Ensure that the kernel is launched\n await self.kernel_launched.wait()\n if self.child_kernel is None:\n self.log.error(\"the child kernel was not available\")\n\n return self.child_kernel", "def bdev_opal_create(client, nvme_ctrlr_name, nsid, locking_range_id, range_start, range_length, password):\n params = {\n 'nvme_ctrlr_name': nvme_ctrlr_name,\n 'nsid': nsid,\n 'locking_range_id': locking_range_id,\n 'range_start': range_start,\n 'range_length': range_length,\n 'password': password,\n }\n\n return client.call('bdev_opal_create', params)", "def auto_create_filesystem(self):\n\n key = self.km.gpg_key['fingerprint']\n root = yield BuddyNode.get_node(self.start_port, self.known_ip,\n self.known_port).get_root(key)\n\n if root:\n self.tree.register_root_inode(root)\n else:\n logger.info('Did not find existing root inode pointer.'\n ' Generating new root inode pointer.')\n self.tree.generate_root_inode()", "def new_vm():\n\tcfg_path = input(\"\\n\\nInsert the ClickOS .cfg file absolute path:\\n\")\n\n\tbridge_name = get_bridge_name(cfg_path)\n\tif len(bridge_name) == 0:\n\t\tprint(\"Couldnt find the bridge name.\")\n\t\treturn 0\n\n\tcreate_bridge(bridge_name)\n\n\tboot_vm(cfg_path)\n\n\treturn 1", "def create(name, **params):\n log.debug(\"Server Density params: %s\", params)\n params = _clean_salt_variables(params)\n\n params[\"name\"] = name\n api_response = requests.post(\n \"https://api.serverdensity.io/inventory/devices/\",\n params={\"token\": get_sd_auth(\"api_token\")},\n data=params,\n )\n log.debug(\"Server Density API Response: %s\", api_response)\n log.debug(\"Server Density API Response content: %s\", api_response.content)\n if api_response.status_code == 200:\n try:\n return salt.utils.json.loads(api_response.content)\n except ValueError:\n log.error(\"Could not parse API Response content: %s\", api_response.content)\n raise CommandExecutionError(\n \"Failed to create, API Response: {}\".format(api_response)\n )\n else:\n return None", "def create_identity(msg: CreateIdentity_request):\n \n # Check if we have received some data in the POST\n if len(msg.DID) == 0:\n log.error(\"No data received\")\n raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=\"No data received\")\n\n # Create the identity using the library\n try:\n error, didDoc = tf.create_identity_subnode(\n msg.DID, msg.domain_name, msg.website, msg.commercial_name, msg.new_privatekey, msg.parent_privatekey)\n except Exception as e:\n detail=str(e)\n log.error(detail)\n raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=detail)\n\n if error is not None:\n log.error(error)\n raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=error)\n\n return {\"didDoc\": didDoc.to_dict()}", "def prepare_kernel_string(kernel_name, kernel_string, params, grid, threads, block_size_names):\n logging.debug('prepare_kernel_string called for %s', kernel_name)\n\n grid_dim_names = [\"grid_size_x\", \"grid_size_y\", \"grid_size_z\"]\n for i, g in enumerate(grid):\n kernel_string = \"#define \" + grid_dim_names[i] + \" \" + str(g) + \"\\n\" + kernel_string\n for i, g in enumerate(threads):\n kernel_string = \"#define \" + block_size_names[i] + \" \" + str(g) + \"\\n\" + kernel_string\n for k, v in params.items():\n if k not in block_size_names:\n kernel_string = \"#define \" + k + \" \" + str(v) + \"\\n\" + kernel_string\n name = kernel_name\n #name = kernel_name + \"_\" + get_instance_string(params)\n #kernel_string = kernel_string.replace(kernel_name, name)\n return name, kernel_string", "def _load_program(self, kernel):\n return cl.Program(\n self.context, open('kernels/{0}'.format(kernel)).read()\n ).build()", "def gpu(device_id=0):\n return Context('gpu', device_id)", "def gpu(device_id=0):\n return Context('gpu', device_id)", "def kernel(self, verbose=False):\n\n return self._action(FP_ModuleMorphism.kernel, verbose)", "def machine_new(node=\"dev\", driver='virtualbox'):\n machine = Dockerizing(driver)\n\n # Check that the requested node does not already exist\n if node in machine.list():\n print(colors.warn | \"Failed:\", colors.bold |\n \"Machine '%s' Already exists\" % node)\n return\n machine.create(node)\n\n # Create the machine\n _logger.info(\"Preparing machine\", node)\n print(machine.create(node))\n _logger.info(colors.green | \"Created!\\n\\n\")", "def make_new_session():\n session = Session.objects.create(uuid=str(uuid4()), container_id=None)\n return session.id", "def CreateInstance(self):\n\n # Create host instances for cuttlefish device. Currently one host instance\n # has one cuttlefish device. In the future, these logics should be modified\n # to support multiple cuttlefish devices per host instance.\n instance = self._compute_client.GenerateInstanceName(\n build_id=self.build_info.build_id, build_target=self._build_target)\n\n if self._cfg.enable_multi_stage:\n remote_build_id = self.build_info.build_id\n else:\n remote_build_id = self._GetGcsBucketBuildId(\n self.build_info.build_id, self.build_info.release_build_id)\n\n if self._cfg.enable_multi_stage:\n remote_system_build_id = self.system_build_info.build_id\n else:\n remote_system_build_id = self._GetGcsBucketBuildId(\n self.system_build_info.build_id, self.system_build_info.release_build_id)\n\n host_image_name = self._compute_client.GetHostImageName(\n self._cfg.stable_host_image_name,\n self._cfg.stable_host_image_family,\n self._cfg.stable_host_image_project)\n # Create an instance from Stable Host Image\n self._compute_client.CreateInstance(\n instance=instance,\n image_name=host_image_name,\n image_project=self._cfg.stable_host_image_project,\n build_target=self.build_info.build_target,\n branch=self.build_info.branch,\n build_id=remote_build_id,\n kernel_branch=self.kernel_build_info.branch,\n kernel_build_id=self.kernel_build_info.build_id,\n kernel_build_target=self.kernel_build_info.build_target,\n blank_data_disk_size_gb=self._blank_data_disk_size_gb,\n extra_scopes=self._extra_scopes,\n system_build_target=self.system_build_info.build_target,\n system_branch=self.system_build_info.branch,\n system_build_id=remote_system_build_id,\n bootloader_build_target=self.bootloader_build_info.build_target,\n bootloader_branch=self.bootloader_build_info.branch,\n bootloader_build_id=self.bootloader_build_info.build_id)\n\n return instance", "def upgrade_kernel(**kwargs):\n execute(\"upgrade_kernel_node\", env.host_string, **kwargs)", "def test_create_device(self):\n pass", "def test_create_device(self):\n pass", "def get_kernel_version(self):\n raise DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)", "def create(cls,configuration,data_handler):\n ID = configuration[config.ID] \n d = configuration.get(config.DESCRIPTION,cls.description)\n n = configuration.get(config.NAME,cls.name)\n path = configuration['path'] \n \n #hardware._file.debug = True\n return NIPCI6602(path,ID,n,d)", "def create_new_volume(self, volumeInfo, change_name=True):\n size = volumeInfo.get(SVC_KEY_VDISK_CAPACITY)\n if (change_name):\n new_volume_name = self._get_new_volume_name(\n volumeInfo.get(SVC_KEY_VDISK_NAME))\n else:\n new_volume_name = volumeInfo.get(SVC_KEY_VDISK_NAME)\n if SVC_KEY_VOLUME_GROUP in volumeInfo:\n volumeGroup = volumeInfo.get(SVC_KEY_VOLUME_GROUP)\n elif self.dft_stg_pool:\n volumeGroup = self.dft_stg_pool\n else:\n volumeGroup = self.get_mdisk_grp_by_size(size)\n\n if volumeGroup is None:\n raise SVCNoSANStoragePoolException\n\n # iogrp parameter should not use name since it could be\n # customized. It is always safe to use iogrp 0.\n cmd = \"svctask mkvdisk -name %s -iogrp 0 -mdiskgrp %s \" \\\n \"-size %s -unit b\" % (new_volume_name, volumeGroup, size)\n\n output, err_output = self._svc_command(cmd)\n\n volume_uid = self.get_uid(new_volume_name)\n\n # Check if it got created\n if not volume_uid:\n # The SVC message of out of space is not really user friendly.\n # So, we will manully check whether the pool ran out of space\n free_capacity = self.get_mdisk_grp_size(volumeGroup)\n\n if float(size) > float(free_capacity):\n ex_args = {'pool_name': volumeGroup,\n 'size': size,\n 'free_capacity': free_capacity}\n raise SVCVolumeGroupOutOfSpace(**ex_args)\n if err_output:\n ex_args = {'new_volume_name': new_volume_name,\n 'err_output': err_output}\n raise SVCVolumeCreationFailed(**ex_args)\n else:\n # failed to create volume but with no error msg\n # really shouldn't hit this condition\n ex_args = {'cmd': cmd,\n 'e': _(\"No error available\")}\n raise SVCCommandException(**ex_args)\n\n return new_volume_name, volume_uid", "def kernel(self):\n return self._kernel", "def create_knx_device(\n hass: HomeAssistant,\n platform: SupportedPlatforms,\n knx_module: XKNX,\n config: ConfigType,\n) -> XknxDevice:\n if platform is SupportedPlatforms.light:\n return _create_light(knx_module, config)\n\n if platform is SupportedPlatforms.cover:\n return _create_cover(knx_module, config)\n\n if platform is SupportedPlatforms.climate:\n return _create_climate(knx_module, config)\n\n if platform is SupportedPlatforms.switch:\n return _create_switch(knx_module, config)\n\n if platform is SupportedPlatforms.sensor:\n return _create_sensor(knx_module, config)\n\n if platform is SupportedPlatforms.notify:\n return _create_notify(knx_module, config)\n\n if platform is SupportedPlatforms.scene:\n return _create_scene(knx_module, config)\n\n if platform is SupportedPlatforms.binary_sensor:\n return _create_binary_sensor(hass, knx_module, config)\n\n if platform is SupportedPlatforms.weather:\n return _create_weather(knx_module, config)", "def create_system(sys_structure):\n pass", "def create_instance(driver,\n user_id, sig_server_addr, sig_server_port, zone='us-central1-b',\n tags=[], branch='aosp-master', target='aosp_cf_x86_phone-userdebug'):\n\n target = target.replace('_','-')\n instance_name = f'halyard-{user_id}'\n image_family = f'halyard-{branch}-{target}'\n\n try:\n driver.ex_get_image_from_family(image_family)\n except:\n utils.fatal_error(f'Image family {image_family} does not exist.\\n \\\n New base images can be created using the `create_base_image` endpoint.')\n\n # Stops execution if instance already exists\n instance = utils.find_instance(driver, instance_name, zone)\n if instance:\n utils.fatal_error(f'Instance {instance_name} already exists.')\n\n build_node = driver.create_node(\n instance_name,\n 'n1-standard-4',\n None,\n location=zone,\n ex_image_family=image_family,\n ex_service_accounts=[{'scopes': ['storage-ro']}],\n ex_disk_size=30,\n ex_tags=tags)\n\n utils.wait_for_instance(instance_name, zone)\n\n print('successfully created new instance', instance_name)\n\n launch_cvd(instance_name, zone, sig_server_addr, sig_server_port, False)\n\n return {\"name\": instance_name}", "def do_insert(self, **kwargs):\n _allocation = RAMSTKAllocation()\n _allocation.revision_id = kwargs['revision_id']\n _allocation.hardware_id = kwargs['hardware_id']\n _allocation.parent_id = kwargs['parent_id']\n _error_code, _msg = RAMSTKDataModel.do_insert(\n self, entities=[\n _allocation,\n ])\n\n if _error_code == 0:\n try:\n self.tree.create_node(\n 'Allocation ID: {0:d}'.format(_allocation.hardware_id),\n _allocation.hardware_id,\n parent=_allocation.parent_id,\n data=_allocation)\n self.last_id = max(self.last_id, _allocation.hardware_id)\n except DuplicatedNodeIdError:\n _error_code = 1\n _msg = ('RAMSTK ERROR: Node ID {0:s} already exists in the '\n 'Allocation tree for Hardware ID {1:s}').format(\n str(_allocation.hardware_id),\n str(_allocation.parent_id))\n\n return _error_code, _msg", "def get_create_named_node(self, node_id_name):\n n = node_id_name.split(\"_\", 1)\n node_id = int(n[0], 16)\n if node_id in self.nodes_dict:\n node = self.nodes_dict[node_id]\n else:\n node = self.get_create_node(node_id)\n\n if len(n) == 2 and node.node_name != n[1]:\n node.node_name = n[1]\n\n return node", "def flask_create_device():\n try:\n # retrieve the authorization token\n token = retrieve_auth_token(request)\n\n params = {\n 'count': request.args.get('count', '1'),\n 'verbose': request.args.get('verbose', 'false'),\n 'content_type': request.headers.get('Content-Type'),\n 'data': request.data\n }\n\n result = DeviceHandler.create_device(params, token)\n devices = result.get('devices')\n deviceId = devices[0].get('id')\n LOGGER.info(f' Creating a new device with id {deviceId}.')\n return make_response(jsonify(result), 200)\n except HTTPRequestError as e:\n LOGGER.error(f' {e.message} - {e.error_code}.')\n if isinstance(e.message, dict):\n return make_response(jsonify(e.message), e.error_code)\n\n return format_response(e.error_code, e.message)", "def new(self):\n uuid = uuid4().hex\n cur = self.conn.cursor()\n cur.execute(\n \"\"\"\n INSERT INTO experiments (uuid)\n VALUES(?)\n \"\"\", [uuid])\n cur.close()\n self.conn.commit()\n return uuid", "def bdev_xnvme_create(client, filename, name, io_mechanism, conserve_cpu=None):\n params = {\n 'name': name,\n 'filename': filename,\n 'io_mechanism': io_mechanism,\n }\n if conserve_cpu:\n params['conserve_cpu'] = conserve_cpu\n\n return client.call('bdev_xnvme_create', params)", "def CreateInstance(self, instance, image_name, image_project,\n build_target=None, branch=None, build_id=None,\n kernel_branch=None, kernel_build_id=None,\n kernel_build_target=None, blank_data_disk_size_gb=None,\n avd_spec=None, extra_scopes=None,\n system_build_target=None, system_branch=None,\n system_build_id=None):\n self._CheckMachineSize()\n\n # A blank data disk would be created on the host. Make sure the size of\n # the boot disk is large enough to hold it.\n boot_disk_size_gb = (\n int(self.GetImage(image_name, image_project)[\"diskSizeGb\"]) +\n blank_data_disk_size_gb)\n disk_args = self._GetDiskArgs(\n instance, image_name, image_project, boot_disk_size_gb)\n\n # Transitional metadata variable as outlined in go/cuttlefish-deployment\n # These metadata tell the host instance to fetch and launch one\n # cuttlefish device (cvd-01). Ideally we should use a separate tool to\n # manage CVD devices on the host instance and not through metadata.\n # TODO(b/77626419): Remove these metadata once the\n # cuttlefish-google.service is turned off on the host instance.\n metadata = self._metadata.copy()\n metadata[\"cvd_01_fetch_android_build_target\"] = build_target\n metadata[\"cvd_01_fetch_android_bid\"] = \"{branch}/{build_id}\".format(\n branch=branch, build_id=build_id)\n if kernel_branch and kernel_build_id:\n metadata[\"cvd_01_fetch_kernel_bid\"] = \"{branch}/{build_id}\".format(\n branch=kernel_branch, build_id=kernel_build_id)\n if kernel_build_target:\n metadata[\"cvd_01_fetch_kernel_build_target\"] = kernel_build_target\n if system_build_target:\n metadata[\"cvd_01_fetch_system_build_target\"] = system_build_target\n if system_branch and system_build_id:\n metadata[\"cvd_01_fetch_system_bid\"] = \"{branch}/{build_id}\".format(\n branch=system_branch, build_id=system_build_id)\n metadata[\"cvd_01_launch\"] = self._GetLaunchCvdArgs(avd_spec)\n\n # For the local image, we unset the _METADATA_TO_UNSET from\n # metadata to tell server not to launch cvd and not to fetch image\n # while instance is booted up.\n if avd_spec and avd_spec.image_source == constants.IMAGE_SRC_LOCAL:\n for meta in _METADATA_TO_UNSET:\n metadata.pop(meta, None)\n\n if blank_data_disk_size_gb > 0:\n # Policy 'create_if_missing' would create a blank userdata disk if\n # missing. If already exist, reuse the disk.\n metadata[\"cvd_01_data_policy\"] = self.DATA_POLICY_CREATE_IF_MISSING\n metadata[\"cvd_01_blank_data_disk_size\"] = str(\n blank_data_disk_size_gb * 1024)\n metadata[\"user\"] = getpass.getuser()\n # Update metadata by avd_spec\n # for legacy create_cf cmd, we will keep using resolution.\n # And always use avd_spec for acloud create cmd.\n # TODO(b/118406018): deprecate resolution config and use hw_proprty for\n # all create cmds.\n if avd_spec:\n metadata[constants.INS_KEY_AVD_TYPE] = avd_spec.avd_type\n metadata[constants.INS_KEY_AVD_FLAVOR] = avd_spec.flavor\n metadata[\"cvd_01_x_res\"] = avd_spec.hw_property[constants.HW_X_RES]\n metadata[\"cvd_01_y_res\"] = avd_spec.hw_property[constants.HW_Y_RES]\n metadata[\"cvd_01_dpi\"] = avd_spec.hw_property[constants.HW_ALIAS_DPI]\n if constants.HW_ALIAS_DISK in avd_spec.hw_property:\n metadata[\"cvd_01_blank_data_disk_size\"] = avd_spec.hw_property[\n constants.HW_ALIAS_DISK]\n # Use another METADATA_DISPLAY to record resolution which will be\n # retrieved in acloud list cmd. We try not to use cvd_01_x_res\n # since cvd_01_xxx metadata is going to deprecated by cuttlefish.\n metadata[constants.INS_KEY_DISPLAY] = (\"%sx%s (%s)\" % (\n avd_spec.hw_property[constants.HW_X_RES],\n avd_spec.hw_property[constants.HW_Y_RES],\n avd_spec.hw_property[constants.HW_ALIAS_DPI]))\n else:\n resolution = self._resolution.split(\"x\")\n metadata[\"cvd_01_dpi\"] = resolution[3]\n metadata[\"cvd_01_x_res\"] = resolution[0]\n metadata[\"cvd_01_y_res\"] = resolution[1]\n\n gcompute_client.ComputeClient.CreateInstance(\n self,\n instance=instance,\n image_name=image_name,\n image_project=image_project,\n disk_args=disk_args,\n metadata=metadata,\n machine_type=self._machine_type,\n network=self._network,\n zone=self._zone,\n extra_scopes=extra_scopes)", "def svc_kernel(name, kernel, random_state=None, probability=False, **kwargs):\n def _name(msg):\n return '%s.%s_%s' % (name, kernel, msg)\n\n hp_space = _svm_hp_space(_name, kernel=kernel, **kwargs)\n hp_space.update(_svc_hp_space(_name, random_state, probability))\n return scope.sklearn_SVC(**hp_space)", "def _mkfs (self,blockDevice,timer):\n # build command string\n fsTypeString = None\n if (self._activeFileSystemConfig.fileSystemType == blinky_generated_enums.FileSystemTypeType.kExt3):\n fsTypeString = \"ext3\"\n if (self._activeFileSystemConfig.fileSystemType == blinky_generated_enums.FileSystemTypeType.kExt4):\n fsTypeString = \"ext4\"\n else:\n self._log(\"unsupported-fs-type\").error(\"file system %s doesn't support type %s\",self._activeFileSystemConfig.fileSystemType)\n return ReturnCodes.kGeneralError\n \n mkfsCmd = self._activeCommandsConfig.mkfs\n mkfsCmdExtras = self._activeCommandsConfig.mkfsExtras\n cmdString = mkfsCmd%{self.BLOCK_DEVICE_COMMAND_ELEMENT:blockDevice,self.TYPE_COMMAND_ELEMENT:fsTypeString}\n\n # update with extra parameters\n cmdString = self.__joinCmdStringWithExtras(cmdString,mkfsCmdExtras)\n\n # run\n stdout,stderr,rc = self._runCommand(cmdString,timer)\n \n if (rc == 0):\n self._log(\"fs-created\").debug2(\"file system was successfully created on block device '%s'\",blockDevice)\n return ReturnCodes.kOk\n else:\n self._log(\"fs-creation-failed\").error(\"file system creation on block device '%s' failed! stderr=%s\",blockDevice,stderr)\n return ReturnCodes.kGeneralError", "def create_id():\n unique_id = UniqueId()\n unique_id.put()\n return unique_id.key().id()", "def create_id():\n unique_id = UniqueId()\n unique_id.put()\n return unique_id.key().id()", "def create_disk(self, disk):\n spec = {\n 'new_vmdk': {\n # Convert from mebibytes to bytes because VMDK is specified in bytes\n 'capacity': 1024\n * 1024\n * disk.size,\n }\n }\n\n try:\n backend_id = self.client.create_disk(disk.vm.backend_id, spec)\n except VMwareError as e:\n raise VMwareBackendError(e)\n else:\n disk.backend_id = backend_id\n disk.save(update_fields=['backend_id'])\n signals.vm_updated.send(self.__class__, vm=disk.vm)\n return disk", "def create_node(self, **kwargs):\n default = \"ubuntu.precise\"\n template = {\"name\":\"ubuntu\", \"args\":[]}\n if 'image' not in kwargs:\n kwargs['image'] = default\n \n for image in self.list_images():\n if image.name == kwargs['image']:\n template = {\"name\":image.extra[\"template_name\"],\n \"args\":image.extra[\"template_args\"]\n }\n \n name = kwargs['name']\n container = {\n \"cgroups\": [],\n \"name\": name,\n \"conf\": [],\n \"template\": template\n }\n \n self.connection.request(action=\"/v1/containers\", method=\"POST\", data=json.dumps(container))\n self.connection.request(action=\"/v1/containers/%s/actions/start\" % name, method=\"POST\")\n return self.get_node(name)" ]
[ "0.6998178", "0.6430162", "0.59596896", "0.59022087", "0.58619636", "0.58563536", "0.5824651", "0.5798457", "0.5699474", "0.56958765", "0.5624578", "0.5619856", "0.560809", "0.5569131", "0.5542638", "0.5518145", "0.5472115", "0.5447868", "0.5417481", "0.54118687", "0.5337183", "0.5333342", "0.5293815", "0.5230644", "0.5209176", "0.5197264", "0.5192696", "0.51854235", "0.5174386", "0.5173899", "0.5150559", "0.5106124", "0.5079882", "0.5064176", "0.5055109", "0.5041265", "0.5026886", "0.5025331", "0.50234854", "0.5018179", "0.50114495", "0.50094837", "0.499425", "0.49868897", "0.4986254", "0.4979094", "0.4979094", "0.4977264", "0.49659047", "0.49640283", "0.4952431", "0.49485838", "0.4943344", "0.49327806", "0.49288356", "0.49280727", "0.49243063", "0.4922968", "0.4920074", "0.4919124", "0.49125698", "0.49097937", "0.49080977", "0.49060652", "0.48975122", "0.4895568", "0.4895414", "0.4893703", "0.48849195", "0.4883174", "0.48817122", "0.4879096", "0.4875259", "0.4875259", "0.4874955", "0.4867729", "0.48506024", "0.48481405", "0.48403537", "0.4836269", "0.4836269", "0.48362678", "0.48351654", "0.48344287", "0.4826323", "0.482492", "0.48196456", "0.48188275", "0.48180106", "0.4814829", "0.4813563", "0.47969478", "0.47954452", "0.47953385", "0.47943124", "0.4785036", "0.47784764", "0.47784764", "0.47739857", "0.47638267" ]
0.7782273
0
Creates a new session or returns existing one if path exists
def create_session( path: str, type: str, name: Optional[str] = None, kernel_name: Optional[str] = None, kernel_id: Optional[str] = None, ) -> str: ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_new_session():\n session = Session.objects.create(uuid=str(uuid4()), container_id=None)\n return session.id", "def _insert_new_session():\n request = self._make_request()\n session_existing = self._set_up_session_in_Redis_and_makeOne( # noqa: F841\n request, session_id, session_dict={\"visited\": True}, **session_args\n )\n return request", "def get_or_create_session(db):", "def create_new_session(self) -> None:\n try:\n session = self.client.create_session()\n logger.info(\"created session: %s\", session.id)\n self.join_session(session.id)\n location_config = self.app.guiconfig.location\n self.session.location = SessionLocation(\n x=location_config.x,\n y=location_config.y,\n z=location_config.z,\n lat=location_config.lat,\n lon=location_config.lon,\n alt=location_config.alt,\n scale=location_config.scale,\n )\n except grpc.RpcError as e:\n self.app.show_grpc_exception(\"New Session Error\", e)", "def create_session(\n self,\n environ: str,\n session_request_to_use: typing.Optional[SessionRequest] = None,\n ) -> Session:\n self.poll_sessions() # make sure there is an up to date picture of Sessions before proceeding\n self.check_session_can_start(session_request_to_use)\n return self.perform_session_create(\n environ, self.project.session_parameters.serialize()\n )", "def perform_session_create(self, environ: str, session_parameters: dict) -> Session:\n session_parameters[\"mounts\"] = []\n attach_context = self.client.start_session(environ, session_parameters)\n\n # TODO should we record some of the request\n # headers e.g. `REMOTE_ADDR`, `HTTP_USER_AGENT`, `HTTP_REFERER` for analytics?\n\n return Session.objects.create(\n project=self.project,\n url=attach_context.url,\n execution_id=attach_context.execution_id,\n client_class_id=self.client.class_id,\n )", "def session(self):\n if not self._session: #Create new session if none exists\n return self._new_session()\n return self._session", "def test_create_session(self):\n study_id = self.storage.create_study(sample_study_spec())\n\n session = sample_session(study_id=study_id)\n self.storage.create_session(session)\n\n self.assertEqual(self.storage.get_session(study_id, session.id), session)", "async def create_session(session: SessionModel, mongo: MongoDB = mongodb) -> SessionOutModel:\n if not await mongo.session_coll.find_one({\"id\": session.id}):\n await mongo.session_coll.insert_one(session.dict())\n else:\n await mongo.session_coll.update_one({\"id\": session.id}, {'$set': {'status': session.status}})\n return SessionOutModel(**session.dict())", "def create_new_session(self, username):\n return self.session_mgr.create_new_session(username)", "def get_session():\n if not hasattr(get_session, \"session\"):\n get_session.session = requests_cache.CachedSession(\n cache_name=CACHE_PATH.rstrip(\".sqlite\"),\n expire_after=518400, # 6 days\n )\n adapter = HTTPAdapter(max_retries=3)\n get_session.session.mount(\"http://\", adapter)\n get_session.session.mount(\"https://\", adapter)\n return get_session.session", "def _get_session():\n api_version = \"1.0\"\n originator = \"salt_cloud_{}_driver\".format(__virtualname__)\n url = config.get_cloud_config_value(\n \"url\", get_configured_provider(), __opts__, search_global=False\n )\n user = config.get_cloud_config_value(\n \"user\", get_configured_provider(), __opts__, search_global=False\n )\n password = config.get_cloud_config_value(\n \"password\", get_configured_provider(), __opts__, search_global=False\n )\n ignore_ssl = config.get_cloud_config_value(\n \"ignore_ssl\",\n get_configured_provider(),\n __opts__,\n default=False,\n search_global=False,\n )\n try:\n session = XenAPI.Session(url, ignore_ssl=ignore_ssl)\n log.debug(\n \"url: %s user: %s password: %s, originator: %s\",\n url,\n user,\n \"XXX-pw-redacted-XXX\",\n originator,\n )\n session.xenapi.login_with_password(user, password, api_version, originator)\n except XenAPI.Failure as ex:\n pool_master_addr = str(ex.__dict__[\"details\"][1])\n slash_parts = url.split(\"/\")\n new_url = \"/\".join(slash_parts[:2]) + \"/\" + pool_master_addr\n session = XenAPI.Session(new_url)\n log.debug(\n \"session is -> url: %s user: %s password: %s, originator:%s\",\n new_url,\n user,\n \"XXX-pw-redacted-XXX\",\n originator,\n )\n session.xenapi.login_with_password(user, password, api_version, originator)\n return session", "def new_session(self):\n return self._SessionLocal()", "def session(get_session):\n return get_session()", "def create_session(self):\n self._session = self.create_scoped_session()\n self.session = self._session()", "def create_session(self, **params):\n raise NotImplementedError('Should be implemented by a sub-class.')", "def _session(self):\n if self.session is None:\n self.session = create_session(self.config, self.auth)\n return self.session", "def get_session(self, renew: Optional[bool] = False) -> neo4j.work.simple.Session:\n if self.session is None or renew:\n sess = self.driver.session()\n self.session = sess\n return self.session", "def session():\n def session():\n return BaseUrlSession()\n return session", "def new_session(self):\n return self.Session()", "async def create(self, session, *, dc=None):\n response = await self._api.put(\n \"/v1/session/create\",\n data=session,\n params={\"dc\": dc})\n return response.body", "def init_session(\n session_path=None\n , session_path_header=None\n , session_domain=None\n , session_secure=False\n , session_httponly=True\n , session_persistent=True\n , **kwargs\n ):\n \n # Guard against running twice\n if hasattr(cherrypy.serving, \"session\"):\n return\n \n request = cherrypy.serving.request\n session_cookie = kwargs.get('session_cookie', Session.session_cookie)\n cookie_timeout = kwargs.get('session_timeout', Session.timeout)\n \n # Check if request came with a session ID\n id = None\n if session_cookie in request.cookie:\n id = request.cookie[session_cookie].value\n log('ID obtained from request.cookie: %r' % id)\n else:\n log('New session (no cookie)')\n \n # Create and attach a new Session instance to cherrypy.serving.\n # It will possess a reference to (and lock, and lazily load)\n # the requested session data.\n cherrypy.serving.session = sess = Session(id, **kwargs)\n # Save a copy of our session in case we get overwritten by a user slate.\n cherrypy.serving.sessionActual = sess", "def test_create_session(self):\n finder = FinderInsidePro(self.test_key)\n session_id = finder.create_session(2811)\n assert isinstance(session_id, str)\n assert session_id == finder.session_id\n assert len(session_id)", "def get_or_create_sessions(self):\n\t\tpath = f'{self.BIKE_ENDPOINT}user/current/session?{self.secret_key}'\n\t\tresponse = requests.get(path).json()\n\t\tself.check_api_key(response)\n\n\t\treturn response", "def create_session(self, session_id=None):\n\n # create random id when necessary, seems to be 1 case wanted, based on legacy code\n # creating a value so high, typical client side generation schemes hopefully wont collide\n if not session_id:\n session_id = next(\n session_id for session_id in xrange(60000, 65000)\n if session_id not in self.sessions\n )\n\n # create and add session to local manager\n session = Session(session_id, config=self.config)\n self.add_session(session)\n\n # add shutdown handler to remove session from manager\n session.shutdown_handlers.append(self.session_shutdown)\n\n return session", "def insert_item(self, token_object,\n new_session, session_time=timedelta(0)):\n if self.file_type == settings.APACHE_COMMON:\n url_obj = get_or_create(\n self.session, Uurl, url=token_object.resource_requested)\n elif self.file_type == settings.APACHE_COMBINED:\n url_obj = get_or_create(\n self.session, Uurl, url=token_object.resource_requested)\n elif self.file_type == settings.SQUID:\n url_obj = get_or_create(\n self.session, Uurl, url=token_object.url)\n\n # If this is a new session\n if new_session:\n # Create session object\n session_obj = Session(\n ip=token_object.ip_address, session_time=session_time)\n # Set start and end time\n session_obj.start_time = token_object.date_time\n session_obj.end_time = token_object.date_time\n # If new_session is False, new session may or may not be created\n # (depending upon the session_time)\n else:\n # Try to get session object\n session_obj = get_or_create(\n self.session, Session, ip=token_object.ip_address)\n # If the object is a new session\n if session_obj.session_time is timedelta(0):\n session_obj.start_time = token_object.date_time\n\n session_obj.session_time = session_time\n session_obj.end_time = token_object.date_time\n\n # Add url to session\n session_obj.session_urls.append(url_obj)\n self.session.add(session_obj)", "def test_new_session(self):\r\n cookie = Cookie()\r\n req = Mock(incookie=Cookie(), outcookie=cookie, authname='anonymous',\r\n base_path='/')\r\n session = Session(self.env, req)\r\n self.assertEqual(session.sid, cookie['trac_session'].value)\r\n cursor = self.db.cursor()\r\n cursor.execute(\"SELECT COUNT(*) FROM session\")\r\n self.assertEqual(0, cursor.fetchone()[0])", "def _create_session_data(self, abs_path, sess_root):\n sess_path = os.path.join(abs_path, sess_root)\n if not os.path.exists(sess_path):\n os.makedirs(sess_path)\n sess_id = len(os.listdir(sess_path))\n sess_path = os.path.join(sess_path, str(sess_id))\n print(\"SESSION PATH:\", sess_path)\n print(\"SESSION ID:\", sess_id) \n return sess_id, sess_path", "def create(self):\n\t\tif self._session:\n\t\t\tself.close()\n\n\t\tif not self._session:\n\t\t\tself._session = requests.Session()\n\t\t\tself._session.mount('http://', ra.HTTPAdapter(max_retries=self._max_retries))\n\t\t\tself._session.mount('https://', ra.HTTPAdapter(max_retries=self._max_retries))\n\n\t\t\tmsg = u'Created internal requests Session instance {0:#0x}'\n\t\t\tlog_with_debug_info(logging.DEBUG, msg.format(id(self._session)))", "def get_write_session() -> Session:\n return _write_session()", "def create(self):\n if self._session:\n self.close()\n\n if not self._session:\n self._session = requests.Session()\n self._session.mount('http://', ra.HTTPAdapter(max_retries=self._max_retries))\n self._session.mount('https://', ra.HTTPAdapter(max_retries=self._max_retries))\n\n msg = u'Created internal requests Session instance {0:#0x}'\n utils.log_with_debug_info(logging.DEBUG, msg.format(id(self._session)))", "def create_session():\n with open(CONFIG_PATH) as config_file:\n config_json = json.load(config_file)\n return boto3.Session(\n aws_access_key_id=config_json['awsAccessKeyId'],\n aws_secret_access_key= config_json['awsSecretAccessKey'],\n region_name=config_json['awsRegionName']\n )", "def test_newSession(self):\n session = self.mdk.session()\n session2 = self.mdk.session()\n self.assertSessionHas(session, session._context.traceId, [0])\n self.assertSessionHas(session2, session2._context.traceId, [0])\n self.assertNotEqual(session._context.traceId,\n session2._context.traceId)", "def request_session(self):\n if not hasattr(self, \"_request_session\"):\n rqsid = self.shared_vars.pop(\"rqsid\", \"\")\n rqses = self.request_session_manager.pop_request_session(rqsid)\n\n if not rqses:\n if self.is_action():\n del session['VDOM_API_SESSIONS']\n raise RequestSessionDoesntExist\n\n rqses = self.request_session_manager.create_request_session()\n\n else:\n uuid = rqses[\"rqsid_uuid\"]\n if not self.verify_request_session_key(rqsid, uuid):\n del session['VDOM_API_SESSIONS']\n raise RequestSessionInvalidKey\n\n self._request_session = rqses\n\n return self._request_session", "def create_session(self, session_expiration_datetime=None):\n session_expiration_datetime = session_expiration_datetime or datetime.now() + timedelta(seconds=5)\n session = JOHN | dict(session_id=\"5\", session_expiration_datetime=session_expiration_datetime)\n self.database.sessions.find_one.return_value = session", "def create(id = None, expires=None):\n\n\t# Init the data\n\tdData = {}\n\n\t# If we have an expires time\n\tif expires:\n\t\tdData['__expire'] = expires\n\n\t# Create a new Session using a UUID as the id\n\treturn _Session(id and id or uuid.uuid4().hex, dData)", "def start_session(db_path):\n engine = create_engine('sqlite:///' + db_path)\n Session = sessionmaker(bind=engine)\n return Session()", "def new_session(self):\n body = yield from self._fetch_json(URL_LOGIN, self._new_session_data)\n self.sma_sid = jmespath.search('result.sid', body)\n if self.sma_sid:\n return True\n\n msg = 'Could not start session, %s, got {}'.format(body)\n\n if body.get('err'):\n if body.get('err') == 503:\n _LOGGER.error(\"Max amount of sesions reached\")\n else:\n _LOGGER.error(msg, body.get('err'))\n else:\n _LOGGER.error(msg, \"Session ID expected [result.sid]\")\n return False", "def test_new_session_promotion(self):\r\n cursor = self.db.cursor()\r\n incookie = Cookie()\r\n incookie['trac_session'] = '123456'\r\n outcookie = Cookie()\r\n req = Mock(authname='john', base_path='/', incookie=incookie,\r\n outcookie=outcookie)\r\n session = Session(self.env, req)\r\n self.assertEqual('john', session.sid)\r\n session.save()\r\n\r\n cursor.execute(\"SELECT sid,authenticated FROM session\")\r\n self.assertEqual(('john', 1), cursor.fetchone())\r\n self.assertEqual(None, cursor.fetchone())", "def getSession():\n return call(\"getSession\")", "def create_session(self,session_id,host_id,host_name,spotify_token):\n self.sessions[session_id] = {\n \"HOST\" : {\n \"ID\" : host_id,\n \"NAME\" : host_name,\n \"spotify_token\" : spotify_token,\n \"spotify_player\": None,\n },\n \"queue\" : [],\n \"queue_lock\" : False,\n \"current_track\" : \"\",\n \"previous_track\" : \"\",\n \"USERS\" : {}\n }", "def get_session():\n session = scoped_session(sessionmaker(bind=engine))\n return session", "def new_session(self):\n self.current_session = Session(self, history_channel=self.config.history_channel)\n return self.current_session", "def _new_session(self, username_key=None, **attributes):\n for key in ['username', 'token', 'tenant_id']:\n if attributes.get(key, None) is None:\n attributes[key] = key + \"_\" + text_type(uuid4())\n if 'expires' not in attributes:\n attributes['expires'] = (\n datetime.utcfromtimestamp(self._clock.seconds())\n + timedelta(days=1)\n )\n session = Session(**attributes)\n if username_key is None:\n username_key = session.username\n self._username_to_token[username_key] = session.token\n self._token_to_session[session.token] = session\n self._tenant_to_token[session.tenant_id] = session.token\n return session", "def get_session(self):\n session = Session(self.settings)\n self.sessions.append(session)\n return session", "def get_session(echo=False):\n db_type = _config['DEFAULT']['DatabaseType']\n if db_type == 'sqlite':\n engine_string = ('sqlite:///{db_file}'\n .format(db_file=_config['sqlite']['DatabaseFile']))\n else:\n engine_string = ('{type}://{user}:{passwd}@{host}/grasp'\n .format(type=db_type,\n user=_config['other']['DatabaseUser'],\n passwd=_config['other']['DatabasePass'],\n host=_config['other']['DatabaseHost']))\n engine = _create_engine(engine_string, echo=echo)\n Session = _sessionmaker(bind=engine)\n return Session(), engine", "def save_session(self, session):\n db = self.open()\n db[session.id] = session", "async def create_session() -> aiohttp.ClientSession:\n\n headers = generate_header()\n\n client_session = aiohttp.ClientSession(headers=headers)\n return client_session", "def get_session(self):\r\n if self._config.has_key('database'):\r\n return self._builder.session(self._config['database'], self.get_threads())\r\n if not self._config.has_key('host'):\r\n raise Exception(\"Database engine host configuration is not found\")\r\n elif not self._config.has_key('dbpath'):\r\n raise Exception(\"Database path configuration is not found\")\r\n else:\r\n return self._builder.session(None, self.get_threads(), self._config['host'], self._config['dbpath'])", "def create_session(self, _id: int = None, _cls: type[Session] = Session) -> Session:\n if not _id:\n _id = 1\n while _id in self.sessions:\n _id += 1\n session = _cls(_id, config=self.config)\n session.service_manager = self.service_manager\n logger.info(\"created session: %s\", _id)\n self.sessions[_id] = session\n return session", "def new_session(self):\n self.command(\"new\")", "def create_new_session(sessions, segmeta):\n # Find an available session id\n new_sid = 0\n while new_sid in [s[0].meta.sessionid for s in sessions.values()]:\n new_sid += 1\n # Create meta and fill in information of the file\n meta = MetaInfo(segmeta.filename, segmeta.segmentid, new_sid)\n sp = snc_parameters(meta.segsize, 0.01, 16, 64, 1280, BAND_SNC, 1, 1, 0, -1)\n meta.set_snc_params(sp)\n # Fork a child process and build pipe between parent and child\n session = Session(meta)\n (fdp, fdc) = mp.Pipe()\n session.fdp = fdp\n session.fdc = fdc\n logging.info(\"New session created, ID: %d \" % (new_sid,))\n print(session.meta)\n # Fork a process to serve the clients of the session\n child = mp.Process(target=session.main)\n child.start()\n session.fdc.close() # Close parent's fdc\n sessions[(segmeta.filename, segmeta.segmentid)] = (session, child)\n return session", "def create_session(self, user_id, **kwargs):\n defaults = {\n 'created_at': time.time()\n }\n defaults.update(kwargs)\n self.save_session(user_id, defaults)\n if self.max_session_length:\n self.schedule_session_expiry(user_id, self.max_session_length)\n return self.load_session(user_id)", "def create_session(hostname, username, password):\n return slycat.web.server.remote.create_session(hostname, username, password, None)", "def get_session():\n if MYSQL['username'] is None:\n raise ValueError(\"User name is mandatory\")\n\n if MYSQL['password'] is None:\n raise ValueError(\"Password is mandatory\")\n\n if MYSQL['host'] is None:\n raise ValueError(\"Host is mandatory\")\n\n if MYSQL['db_name'] is None:\n raise ValueError(\"Database Name is mandatory\")\n\n try:\n engine = create_engine(\n '{engine}://{username}:{password}@{host}/{db_name}'.format(**MYSQL),\n pool_size=MYSQL[\"pool_size\"],\n echo=MYSQL[\"debug\"]\n )\n\n session_factory = sessionmaker(bind=engine)\n sess = scoped_session(session_factory)\n return sess\n\n except Exception as err:\n print(err)\n exit()", "def create_session(self, transport):\n session_id = self.session_id_allocator.allocate()\n session = self.SESSION_CLS(self, transport, session_id)\n self.sessions[session.id] = session\n return session", "def get_session():\n assert config.AUTH_URL, \"Environment variable OS_AUTH_URL is not defined\"\n\n def _get_session(auth_url=None,\n username=None,\n password=None,\n project_name=None,\n user_domain_name=None,\n project_domain_name=None):\n auth_url = auth_url or config.AUTH_URL\n username = username or config.USERNAME\n password = password or config.PASSWORD\n project_name = project_name or config.PROJECT_NAME\n user_domain_name = user_domain_name or config.USER_DOMAIN_NAME\n project_domain_name = project_domain_name or config.PROJECT_DOMAIN_NAME\n\n if config.KEYSTONE_API_VERSION == 3:\n\n auth = identity.v3.Password(\n auth_url=auth_url,\n username=username,\n user_domain_name=user_domain_name,\n password=password,\n project_name=project_name,\n project_domain_name=project_domain_name)\n\n elif config.KEYSTONE_API_VERSION == 2:\n\n auth = identity.v2.Password(\n auth_url=auth_url,\n username=username,\n password=password,\n tenant_name=project_name)\n\n else:\n raise ValueError(\"Unexpected keystone API version: {}\".format(\n config.KEYSTONE_API_VERSION))\n\n return _session.Session(auth=auth)\n\n return _get_session", "def make_session(uri=None, echo=None, session_kwargs=None, **kwargs):\n if session_kwargs is None:\n session_kwargs = {}\n engine = create_engine(uri, echo=echo, **kwargs)\n log.debug(\"Created engine for session context\")\n return sqlalchemy.create_session(bind_to=engine, **session_kwargs)", "def create_session():\n app = Application.query.filter_by(id=request.json['app']).first()\n questionnaire = Questionnaire.query.filter_by(id=request.json['questionnaire']).first()\n expected_emotions = request.json['expected_emotions']\n\n # validate application type\n if not app:\n return {'status': 'error', 'message': 'Invalid application.'}, 400\n\n new_session = Session(app=app, expected_emotions=expected_emotions, questionnaire=questionnaire)\n\n db.session.add(new_session)\n db.session.commit()\n\n result = session_schema.dump(new_session).data\n\n return jsonify({'status': 'success', 'message': 'Created new session for application with id of {}.'.format(request.json['app']), 'data': result}), 201", "def create_session():\n session = Session()\n try:\n yield session\n session.commit()\n except:\n session.rollback()\n raise\n finally:\n session.close()", "def _new_session(self):\n try:\n self._session.close()\n except (AttributeError,TypeError):\n pass\n self._session = requests.Session()\n return self._session", "def session(self, sid):\n s = self.list\n if sid not in s:\n for k in s:\n if s[k]['uuid'] == sid:\n if s[k]['type'] == 'meterpreter':\n return MeterpreterSession(k, self.rpc, s)\n elif s[k]['type'] == 'shell':\n return ShellSession(k, self.rpc, s)\n raise KeyError('Session ID (%s) does not exist' % sid)\n if s[sid]['type'] == 'meterpreter':\n return MeterpreterSession(sid, self.rpc, s)\n elif s[sid]['type'] == 'shell':\n return ShellSession(sid, self.rpc, s)\n raise NotImplementedError('Could not determine session type: %s' % s[sid]['type'])", "def get_session(config=None):\n sess = tf.get_default_session()\n if sess is None:\n sess = make_session(config=config, make_default=True)\n return sess", "def init_session(self) -> Tuple[str, str]:\n study_id = self.storage.create_study(sample_study_spec())\n session = sample_session(study_id=study_id)\n self.storage.create_session(session)\n return study_id, session.id", "def session(request):\n session = get_test_db_session()\n request.cls.session = session\n return session", "async def create_session(self):\n # Creating a session under an async function is recommended\n self.session = aiohttp.ClientSession()", "def build_session():\n return requests.Session()", "def _get_by_sid(self, sid):\n if self._is_valid_sid(sid):\n data = self.session_model.get_by_sid(sid)\n if data is not None:\n self.sid = sid\n logging.info(sid)\n logging.info(sessions.SessionDict(self, data=data))\n return sessions.SessionDict(self, data=data)\n logging.info('new')\n self.sid = self._get_new_sid()\n return sessions.SessionDict(self, new=True)", "def get_session():\n return scoped_session(sessionmaker(autocommit=False, autoflush=False, bind=engine))", "def get_session():\n request_session = requests.Session()\n\n # Try to use what was passed in for username/password...\n username = CMD.username\n password = CMD.password\n \n # ...if there was nothing passed in then try to read it from config file\n if ((username is None or username == \"\") and (password is None or password == \"\")):\n # Try to read username and password from config file, if it exists\n # Otherwise default to DEFAULT_USERNAME/DEFAULT_PASSWORD\n try:\n with open(\"config.json\") as config_file:\n config_data = json.load(config_file)\n if (config_data):\n username = config_data[\"username\"]\n password = config_data[\"password\"]\n except:\n LOG.exception(\"Unable to open \\\"/collector/config.json\\\" file\")\n username = DEFAULT_USERNAME\n password = DEFAULT_PASSWORD\n\n request_session.auth = (username, password)\n request_session.headers = {\"Accept\": \"application/json\",\n \"Content-Type\": \"application/json\",\n \"netapp-client-type\": \"grafana-\" + __version__}\n # Ignore the self-signed certificate issues for https\n request_session.verify = False\n return request_session", "def get_session(*args, **kwargs):\n session = requests.session(*args, **kwargs)\n\n return session", "def loadSession():\n metadata = BaseClass.metadata\n Session = sessionmaker(bind=engine)\n session = Session()\n return session", "def test_get_session_missing(self):\n study_id = self.storage.create_study(sample_study_spec())\n self.assertIsNone(self.storage.get_session(study_id, 'missing'))\n\n session = sample_session(study_id)\n self.storage.create_session(session)\n self.assertIsNone(self.storage.get_session('missing', session.id))", "def create_session(self, loop):\n session = ClientSession(loop=loop, json_serialize=json_dumps)\n # Setting directly on `session` will raise deprecation warning\n object.__setattr__(session, \"_request\", self.match_request)\n return session", "def create_session(self):\n\t\ttry:\n\t\t\tself.session = session.DropboxSession(APP_KEY, APP_SECRET, ACCESS_TYPE)\n\t\texcept Exception, e:\n\t\t\tlogger.error('Exception at create_session')\n\t\t\tlogger.debug('*' + sys.exc_info()[0])", "def session(self):\n if not hasattr(self, '_session'):\n self._session = FakeSession(self.version)\n self._session.auth = (self.key, 'ignore')\n return self._session", "def auth_create_session(self) -> str:\n self.__logger.debug('Eva.auth_create_session called')\n return self.__http_client.auth_create_session()", "def create_session(self, keyspace, cf_name, **cf_args):\n return self._Create_Session(keyspace, cf_name, **cf_args)", "def create_session(self, transport):\n session_id = self.session_id_allocator.allocate()\n session = self.SESSION_CLS(self, transport, session_id, self.message_mgr)\n self.sessions[session.id] = session\n return session", "def _createSessionObject(self, request):\n # Preload necessary data items\n user = endpoints.get_current_user()\n if not user:\n raise endpoints.UnauthorizedException('Authorization required')\n user_id = user.email()\n # Get the conference entity\n conf = _getEntityByWebsafeKey(request.websafeConferenceKey,\n 'Conference')\n # Ensure that the current user is the conference organizer\n if user_id != conf.organizerUserId:\n raise endpoints.UnauthorizedException(\n 'Only the conference organizer can create a new session')\n # Verify that the speaker exists\n speaker = _getEntityByWebsafeKey(request.websafeSpeakerKey, 'Speaker')\n # Ensure that the user submitted the required name property\n if not request.name:\n raise endpoints.BadRequestException(\n \"Session 'name' field required\")\n # Copy SessionForm/ProtoRPC Message into dict\n data = {\n field.name: getattr(request, field.name) for field in\n request.all_fields()\n }\n # Remove data that isn't destined for the Session entity\n del data['websafeConferenceKey']\n del data['websafeSpeakerKey']\n del data['websafeKey']\n # Add default values for those missing in the data model\n for df in SESSION_DEFAULTS:\n if data[df] in (None, []):\n data[df] = SESSION_DEFAULTS[df]\n # Ensure the string version of typeOfSession is what is stored\n # in the NDB model\n data['typeOfSession'] = str(data['typeOfSession'])\n # Convert date from string to Date object\n if data['date'] is not None:\n try:\n data['date'] = datetime.strptime(\n data['date'][:10], '%Y-%m-%d').date()\n except:\n raise endpoints.BadRequestException(\n \"Invalid 'date' value\")\n # Convert startTime from string to Time object\n if data['startTime'] is not None:\n try:\n data['startTime'] = datetime.strptime(\n data['startTime'], '%H:%M').time()\n except:\n raise endpoints.BadRequestException(\n \"Invalid 'startTime' value\")\n # Create Session\n session = Session(**data)\n session.conference = conf.key\n session.speaker = speaker.key\n session.put()\n # Add the session key to the speaker's sessions list\n speaker.sessions.append(session.key)\n speaker.put()\n # Add a task to task queue which checks if the speaker of this session\n # should be the new featured speaker\n taskqueue.add(params={'websafeSpeakerKey': request.websafeSpeakerKey,\n 'websafeConferenceKey': request.websafeConferenceKey},\n url='/tasks/update_featured_speaker'\n )\n # Return SessionForm object\n return self._copySessionToForm(session)", "async def session(self, request):\n body = await api_validate(SCHEMA_SESSION, request)\n self._check_password(body)\n\n # check TOTP\n if self.config.security_totp:\n totp = pyotp.TOTP(self.config.security_totp)\n if body[ATTR_TOTP] != totp.now():\n raise RuntimeError(\"Invalid TOTP token!\")\n\n # create session\n valid_until = datetime.now() + timedelta(days=1)\n session = hashlib.sha256(os.urandom(54)).hexdigest()\n\n # store session\n self.config.add_security_session(session, valid_until)\n return {ATTR_SESSION: session}", "def addsession(cls, session, username, passwd):\n sessionkey = cls.sessionkey(session)\n tmpdict = dict({'username': username, 'password': passwd})\n sessionmgr.update(dict({sessionkey: tmpdict}))", "def session(self, request: HttpRequest) -> Job:\n job = Job.objects.create(\n project=self,\n creator=request.user if request.user.is_authenticated else None,\n method=JobMethod.session.name,\n params=dict(container_image=self.container_image),\n description=f\"Session for project '{self.name}'\",\n )\n job.add_user(request)\n return job", "def new_session(zap_helper):\n zap_helper.new_session()", "def get_session(session_id):\n response_dict = {}\n if request.method == 'POST' and request.json:\n # First Time creation\n # with or without json data\n # session_id = request.json.get('session_id')\n if not session_id:\n return return_response({\"message\": \"Something is missing, \"\n \"read the API docs for \"\n \"more information.\"}, 403)\n if is_active_session(session_id):\n return return_response({\"message\": \"Conflict, ID already exists. Use PUT instead of POST.\"}, 409)\n if request.json:\n update_or_create_session(session_id=session_id, data=request.json.get('data'))\n response_dict['ok'] = True\n elif request.method == 'PUT' and request.json:\n # Updating information in session\n if not session_id:\n return return_response({\"message\": \"Something is missing, \"\n \"read the API docs for \"\n \"more information.\"}, 403)\n if request.json:\n update_or_create_session(session_id=session_id, data=request.json.get('data'))\n response_dict['ok'] = True\n elif request.method == 'GET':\n # Getting information for a session_id or get new random session_id\n if session_id is None:\n response_dict['session_id'] = generate_random_session_id()\n else:\n data = get_session_data(session_id=session_id)\n if data is not None:\n response_dict = {'data': data, 'ok': True}\n else:\n return return_response({\"message\": \"ID does not exists\"}, 404)\n else:\n pass\n\n return return_response(response_dict)", "def add_session(self, session):\n with self._sessions_lock:\n if session.session_id in self.sessions:\n raise KeyError(\"non-unique session id %s for %s\" % (session.session_id, session))\n self.sessions[session.session_id] = session\n\n return session", "def session(self):\n return self.session_store.get_session()", "def reinitsession(cls, arg, session):\n arg = None\n print(\"Dup Session start\")\n cls.log(1, \"Dup Session start\")\n ret, username = cls.getsessionuser(session)\n if ret is False:\n print(\"Unable to reinit the session\", session, arg)\n cls.log(3, \"Unable to reinit the session\",\n session, arg)\n return False\n ret, passwd = cls.getsessionpasswd(session)\n if ret is False:\n print(\"Unable to reinit the session\", session, arg)\n cls.log(3, \"Unable to reinit the session\",\n session, arg)\n return False\n\n IP = session[\"ip_addr\"]\n # vfid = session[\"vfid\"]\n https = session[\"ishttps\"]\n # debug = session[\"debug\"]\n # throttle_delay = session[\"throttle_delay\"]\n newsession = None\n retry = 0\n for i in range(10):\n retry = i\n newsession = auth.login(username, passwd, IP, https)\n if auth.is_failed_login(newsession):\n cls.sleep(20, session)\n continue\n else:\n break\n if not auth.is_failed_login(newsession):\n # print('old', cls.sessionkey(session), 'New',\n # cls.sessionkey(newsession))\n session['credential'] = newsession['credential']\n session[\"version\"] = newsession[\"version\"]\n print(\"Dup Session Completed after Iterations:\", retry)\n cls.log(1, \"Dup Session Completed after Iterations:\",\n retry)\n return True\n print(\"Dup Session Failed.\")\n cls.log(2, \"Dup Session Failed.\")\n sys.exit('Exiting as session dup didn\\'t work')\n return False", "def load_session(self, id, default=None):\n \n db = self.open()\n return db.get(id, default)", "def start_session(\n self, environ: str, session_parameters: dict\n ) -> SessionAttachContext:\n result = self.make_request(\n HttpMethod.PUT,\n self.get_full_url(SESSION_CREATE_PATH_FORMAT),\n body_data=self.transform_session_parameters(session_parameters, environ),\n )\n\n urls = result.get(\"urls\")\n\n if urls:\n return SessionAttachContext(urls[0], result.get(\"executionId\", \"\"))\n\n path = result.get(\"path\")\n assert path is not None\n return SessionAttachContext(self.get_full_url(path))", "def session(self):\n\n if not hasattr(self, \"_session\"):\n self._session = Session(\"guest\")\n return self._session", "def update_session(\n id: str,\n path: Optional[str] = None,\n name: Optional[str] = None,\n type: Optional[str] = None,\n kernel_name: Optional[str] = None,\n kernel_id: Optional[str] = None,\n) -> None:\n ...", "def _get_session_from_cache(thread_ident: int) -> requests.Session:\n return _GLOBAL_BACKEND_FACTORY()", "def post(self, args):\n\n response = openvidu().post_session(args)\n\n if response.status_code == 200:\n session = response.json()\n current_app.logger.info(f'Created new session `{session[\"id\"]}`')\n\n # Store session parameters in database to recreate it if necessary\n db = current_app.session\n db.add(Session(id=session[\"id\"], parameters=args))\n db.commit()\n return session\n elif response.status_code == 400:\n abort(UnprocessableEntity, json=response.json().get(\"message\"))\n abort(response)", "def get_session(engine=None, engine_string=None):\n\n # error checking that engine string was passed into function \n if engine is None and engine_string is None:\n return ValueError(\"`engine` or `engine_string` must be provided\")\n elif engine is None:\n engine = create_connection(engine_string=engine_string)\n Session = sessionmaker(bind=engine)\n session = Session()\n return session", "def session_from_path(cls, db_uri: str) -> Session:\n return sessionmaker(\n bind=cls.get_sql_engine(db_uri), autocommit=False, autoflush=False\n )()", "def get_session(self, db_config=\"Database\"):\n # If the DB is not in the sessionmaker_dict, make it\n if db_config not in self.sessionmaker_dict:\n self.make_sessionmaker(db_config)\n\n # Get the sessionmaker object from the sessionmaker dict\n sessionmaker_obj = self.sessionmaker_dict[db_config]\n # Get a session from the sessionmaker\n session = sessionmaker_obj()\n\n return session", "def open_session(self):\n return self.Session()", "def session_store(self):\n # Setup http sessions\n session_db = odoo.tools.config.get('session_db')\n if session_db:\n _logger.debug(\"Sessions in db %s\" % session_db)\n return PGSessionStore(session_db, session_class=http.OpenERPSession)\n path = odoo.tools.config.session_dir\n _logger.debug('HTTP sessions stored in: %s', path)\n return werkzeug.contrib.sessions.FilesystemSessionStore(path, session_class=http.OpenERPSession)", "def new_session(self, information, current_player):\n game_name = information.split(protocol._MSG_FIELD_SEP)[1]\n\n # if not self.__is_name_valid(game_name):\n # return None # TODO: be more informative on reasons to client\n\n max_num_of_players = information.split(protocol._MSG_FIELD_SEP)[2]\n # if max_num_of_players < 1 or max_num_of_players > 100:\n # return None # TODO: be more informative to client\n\n s_id = len(self.current_sessions ) + 1\n current_player.current_session_id = s_id\n session = Session(protocol._PENDING, s_id, game_name,\n self.sudoku_name,\n self.sudoku_sol,\n # 'sudoku/puzzles/sudoku_easy_1.csv',\n # 'sudoku/puzzles/sudoku_easy_1_solution.csv',\n max_num_of_players,\n [current_player])\n session.game_start()\n\n self.__lock.acquire()\n self.current_sessions.append(session)\n self.__lock.release()\n return session" ]
[ "0.7212364", "0.6900169", "0.6895351", "0.6759798", "0.6726369", "0.6653804", "0.6640182", "0.65958303", "0.65842575", "0.65202713", "0.6479733", "0.64455444", "0.6434389", "0.64199865", "0.63682336", "0.6362771", "0.63557774", "0.63264036", "0.6294936", "0.6289158", "0.62681067", "0.6267423", "0.62555367", "0.624714", "0.6244107", "0.6200586", "0.61884403", "0.6152993", "0.61482996", "0.61311203", "0.61139786", "0.60822004", "0.6073137", "0.60716736", "0.6070833", "0.60695904", "0.60660326", "0.6062069", "0.6055005", "0.6050709", "0.60462254", "0.603296", "0.6031483", "0.60188", "0.6011447", "0.59760135", "0.59700865", "0.5961642", "0.59477216", "0.591213", "0.5910566", "0.5904338", "0.5901157", "0.58964914", "0.5890031", "0.5880529", "0.5869889", "0.5863902", "0.58617425", "0.5846331", "0.5834756", "0.58205605", "0.58186054", "0.58170277", "0.5815579", "0.58075356", "0.5800497", "0.5794028", "0.57893306", "0.578524", "0.57822037", "0.5768278", "0.5747179", "0.5733971", "0.57201564", "0.5710845", "0.57095456", "0.57093513", "0.5705138", "0.5700652", "0.56967616", "0.5689508", "0.56887037", "0.56841654", "0.56726176", "0.56712276", "0.5669349", "0.56631225", "0.56627667", "0.5662678", "0.5662466", "0.56590796", "0.56499654", "0.56472117", "0.56447095", "0.5643916", "0.5641919", "0.5639138", "0.56345284", "0.5617665" ]
0.70272946
1
Updates an existing session.
def update_session( id: str, path: Optional[str] = None, name: Optional[str] = None, type: Optional[str] = None, kernel_name: Optional[str] = None, kernel_id: Optional[str] = None, ) -> None: ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def upsert_session(session_data):\n g_db['sessions'].update(\n get_session_id(session_data),\n {\n \"$set\": session_data,\n },\n upsert=True\n )", "def test_update_session(self):\r\n now = time.time()\r\n\r\n # Make sure the session has data so that it doesn't get dropped\r\n cursor = self.db.cursor()\r\n cursor.execute(\"INSERT INTO session VALUES ('123456', 0, 1)\")\r\n cursor.execute(\"INSERT INTO session_attribute VALUES \"\r\n \"('123456', 0, 'foo', 'bar')\")\r\n\r\n incookie = Cookie()\r\n incookie['trac_session'] = '123456'\r\n outcookie = Cookie()\r\n req = Mock(authname='anonymous', base_path='/', incookie=incookie,\r\n outcookie=outcookie)\r\n session = Session(self.env, req)\r\n session.save() # updating should not require modifications\r\n\r\n self.assertEqual(PURGE_AGE, outcookie['trac_session']['expires'])\r\n\r\n cursor.execute(\"SELECT last_visit FROM session WHERE sid='123456' AND \"\r\n \"authenticated=0\")\r\n self.assertAlmostEqual(now, int(cursor.fetchone()[0]), -1)", "def update(self):\n sess = u.get_default_session()\n # sess.run(self.update_op)\n u.run(self.update_op)", "def update_session(self, session):\n self.session = session\n print(self.session.active)\n self.curvePlot.session = session\n self.saveAs.session = session\n self.actionRun.setEnabled(True)\n self.actionPass.setEnabled(True)\n try:\n if self.port and self.brate:\n self.menuConnect.setEnabled(True)\n self.actionStop.setEnabled(False)\n if self.depthCal and self.tensionCal:\n self.actionPlot.setEnabled(True)\n self.actionSpeed.setEnabled(True)\n self.actionDepth.setEnabled(True)\n self.actionSaveAs.setEnabled(True)\n except:\n pass\n # Status Bar message\n msg = \"Well: {} Run: {} Pass: {}\".format(\n session.active['well'],\n str(session.active['run']),\n session.active['pass'][5:])\n self.dbStatus.showMessage(msg)", "def test_update_session(self):\n study_id = self.storage.create_study(sample_study_spec())\n\n session = sample_session(study_id=study_id)\n self.storage.create_session(session)\n self.assertEqual(session.state, study_pb2.Session.STATE_VALID)\n\n session.state = study_pb2.Session.STATE_INVALID\n self.storage.update_session(session)\n\n self.assertEqual(self.storage.get_session(study_id, session.id), session)\n self.assertEqual(session.state, study_pb2.Session.STATE_INVALID)", "def update_session(id):\n session = Session.query.get(id)\n\n # calculate all the final scores of the contributors\n latency_score = cssi.latency.generate_final_score(scores=session.latency_scores)\n sentiment_score = cssi.sentiment.generate_final_score(all_emotions=session.sentiment_scores, expected_emotions=session.expected_emotions)\n questionnaire_score = cssi.questionnaire.generate_final_score(pre=session.questionnaire.pre, post=session.questionnaire.post)\n\n # calculate the final scores of the plugins\n plugin_scores = cssi.generate_plugin_final_scores(scores=session.plugin_scores)\n\n # calculate the final CSSI Score\n cssi_score = cssi.generate_cssi_score(tl=latency_score, ts=sentiment_score, tq=questionnaire_score, ps=plugin_scores)\n\n # set the scores in the session\n session.total_latency_score = latency_score\n session.total_sentiment_score = sentiment_score\n session.total_questionnaire_score = questionnaire_score\n session.total_plugin_scores = plugin_scores\n session.cssi_score = cssi_score\n\n # get a breakdown of the questionnaire scores and set it in the session\n [pre_n, pre_o, pre_d, pre_ts], [post_n, post_o, post_d, post_ts] = cssi.questionnaire.generate_score_breakdown(pre=session.questionnaire.pre, post=session.questionnaire.post)\n q_score_breakdown = {\n \"pre\": {\n \"N\": pre_n,\n \"O\": pre_o,\n \"D\": pre_d,\n \"TS\": pre_ts\n },\n \"post\": {\n \"N\": post_n,\n \"O\": post_o,\n \"D\": post_d,\n \"TS\": post_ts\n }\n }\n session.questionnaire_scores = q_score_breakdown\n\n session.status = \"completed\"\n db.session.commit()\n\n result = session_schema.dump(session).data\n\n return jsonify({'status': 'success', 'message': 'Successfully updated the session data', 'data': result}), 200", "def save_session(self, session):\n db = self.open()\n db[session.id] = session", "def update_session(request):\n if request.method == \"POST\":\n req_data = request.POST.get(\"session_data\", None)\n if req_data:\n if req_data == \"sidebar\":\n if \"sidebar\" in request.session.keys():\n request.session[\"sidebar\"][\"sticky\"] ^= True\n else:\n request.session[\"sidebar\"] = {}\n request.session[\"sidebar\"][\"sticky\"] = True\n request.session.save()\n data = {\n \"result\": \"success\",\n \"message\": \"Session updated\",\n }\n return JsonResponse(data)\n\n return HttpResponseNotAllowed([\"POST\"])", "def set(self, session):\n raise InvalidSessionException('Need to be implemented')", "def put(self, session: Session = None) -> Response:\n token = generate_token(username=current_user.name, session=session)\n return jsonify({'token': token})", "def update_self(self, existing_session=None):\n if (not existing_session):\n session = get_database_session()\n else:\n session = existing_session\n\n session.add(self)\n session.commit()\n\n if (not existing_session):\n session.expunge(self)", "def set_login_session(self, session_id=None):\r\n meta = self.get_meta()\r\n old_login = meta.get('session_id', None)\r\n if old_login:\r\n SessionStore(session_key=old_login).delete()\r\n meta['session_id'] = session_id\r\n self.set_meta(meta)\r\n self.save()", "def session(self, value: ClientSession):\r\n self._session = value", "def _update_token(token):\n session.token = token", "def update_from_naucse(self, report_progress=print, session=None):\n if self.naucse_slug == None:\n raise ValueError(f'No naucse slug for course {self.course_name}')\n if session is None:\n session = requests.Session()\n url = NAUCSE_API_URL_TEMPLATE.format(self.naucse_slug)\n response = session.get(url)\n if response.status_code != 200:\n raise ValueError(f'Could not update course: {url} returned {response.status_code}')\n response.raise_for_status()\n course_info = response.json()['course']\n if 'subtitle' in course_info:\n self.course_name = f\"{course_info['title']} – {course_info['subtitle']}\"\n else:\n self.course_name = course_info['title']\n\n report_progress(f'Updating {self!r}')\n\n self.save()\n\n for session_info in course_info['sessions']:\n if 'time' not in session_info:\n report_progress(\n f'Skipping session without time: {session_info[\"title\"]}')\n else:\n session, created = Session.objects.get_or_create(\n course=self,\n slug=session_info['slug'],\n )\n if 'serial' in session_info:\n session.title = f'Lekce {session_info[\"serial\"]}'\n else:\n session.title = None\n session.text = session_info['title']\n published_date = parse_datetime(session_info['time']['start'])\n session.published_date = published_date\n\n if created:\n report_progress(f'Added {session!r}')\n else:\n report_progress(f'Updating {session!r}')\n\n session.save()", "def upsert(database: Database, user: User, session_id: SessionId, session_expiration_datetime: datetime) -> None:\n database.sessions.replace_one(\n {\"user\": user.username},\n {\n \"user\": user.username,\n \"email\": user.email,\n \"common_name\": user.common_name,\n \"session_id\": session_id,\n \"session_expiration_datetime\": session_expiration_datetime,\n },\n upsert=True,\n )", "async def renew(self, session, *, dc=None):\n session_id = extract_attr(session, keys=[\"ID\"])\n response = await self._api.put(\"/v1/session/renew\", session_id,\n params={\"dc\": dc})\n try:\n result = response.body[0]\n except IndexError:\n meta = extract_meta(response.headers)\n raise NotFound(\"No session for %r\" % session_id, meta=meta)\n return consul(result, meta=extract_meta(response.headers))", "def set_session(session):\n\n global session_\n session_ = session\n import observatory.api.server.api as api\n\n api.session_ = session", "def set_session(context, key, value):\n session_manager = getToolByName(context, 'session_data_manager')\n session = session_manager.getSessionData()\n session[key] = value", "def use_session(cls, session):\r\n cls._session = session", "def fusion_api_set_active_session(self, sessionId):\n return self.loginsession.set_active_session(sessionId)", "def refresh_session():\n\n hruntime.response.headers['Cache-Control'] = 'must-revalidate, no-cache, no-store'\n\n hruntime.user = hruntime.dbroot.users[hruntime.session.name]\n hruntime.i18n = hruntime.dbroot.localization.languages['cz']", "def update(self):\n db.session.commit()", "def update(self):\n db.session.commit()", "def set_session_property(self, key, value):\n\n self.session[key] = value", "def add2session(key, value):\n cherrypy.session.acquire_lock()\n cherrypy.session[key] = value\n cherrypy.session.release_lock()", "def update(self):\n with managed_session() as session:\n session.merge(self)", "def addsession(cls, session, username, passwd):\n sessionkey = cls.sessionkey(session)\n tmpdict = dict({'username': username, 'password': passwd})\n sessionmgr.update(dict({sessionkey: tmpdict}))", "def save(self):\n self.session.modified = True", "async def save_session(request, response):\n try:\n await request.app.session_interface.save(request, response)\n except Exception as e:\n if isinstance(e, Exception):\n pass", "def setSession( self, name, value, REQUEST=None, cookie=None ):\n SetSessionValue( self, name, value, REQUEST, cookie )", "async def put(url, session=None, **kwargs):\n\n method = 'PUT'\n resp = await _request(method, url, session=session, **kwargs)\n return resp", "def test_modify_detached_session(self):\r\n cursor = self.db.cursor()\r\n cursor.execute(\"INSERT INTO session VALUES ('john', 1, 0)\")\r\n cursor.execute(\"INSERT INTO session_attribute VALUES \"\r\n \"('john', 1, 'foo', 'bar')\")\r\n\r\n session = DetachedSession(self.env, 'john')\r\n self.assertEqual('bar', session['foo'])\r\n session['foo'] = 'baz'\r\n session.save()\r\n cursor.execute(\"SELECT value FROM session_attribute \"\r\n \"WHERE sid='john' AND name='foo'\")\r\n self.assertEqual('baz', cursor.fetchone()[0])", "def put(self):\n form = UpdateForm.from_json(request.get_json())\n\n if not g.user:\n return jsonify({\"login_staus\": False, \"message\": \"Please login\"})\n\n if form.validate_on_submit():\n current_user = User.query.filter_by(id=g.user.id).first()\n if form.email.data:\n current_user.email = form.email.data\n if form.new_password.data:\n current_user.password = form.new_password.data\n db.session.commit()\n return jsonify({\"update_status\": True})\n return jsonify({\"update_status\": False, \"message\": form.errors})", "def write(self, session: Session = None):\n session.merge(self)", "def current_session(self, session):\n if self._session is None:\n self._session = session\n else:\n if session is None or self._session.session_id != session.session_id:\n self._session.active = False\n self._session = session", "def reinitsession(cls, arg, session):\n arg = None\n print(\"Dup Session start\")\n cls.log(1, \"Dup Session start\")\n ret, username = cls.getsessionuser(session)\n if ret is False:\n print(\"Unable to reinit the session\", session, arg)\n cls.log(3, \"Unable to reinit the session\",\n session, arg)\n return False\n ret, passwd = cls.getsessionpasswd(session)\n if ret is False:\n print(\"Unable to reinit the session\", session, arg)\n cls.log(3, \"Unable to reinit the session\",\n session, arg)\n return False\n\n IP = session[\"ip_addr\"]\n # vfid = session[\"vfid\"]\n https = session[\"ishttps\"]\n # debug = session[\"debug\"]\n # throttle_delay = session[\"throttle_delay\"]\n newsession = None\n retry = 0\n for i in range(10):\n retry = i\n newsession = auth.login(username, passwd, IP, https)\n if auth.is_failed_login(newsession):\n cls.sleep(20, session)\n continue\n else:\n break\n if not auth.is_failed_login(newsession):\n # print('old', cls.sessionkey(session), 'New',\n # cls.sessionkey(newsession))\n session['credential'] = newsession['credential']\n session[\"version\"] = newsession[\"version\"]\n print(\"Dup Session Completed after Iterations:\", retry)\n cls.log(1, \"Dup Session Completed after Iterations:\",\n retry)\n return True\n print(\"Dup Session Failed.\")\n cls.log(2, \"Dup Session Failed.\")\n sys.exit('Exiting as session dup didn\\'t work')\n return False", "def switch_session_data(request, current_session_key,\n stored_session_key):\n # getting previous session data\n stored_session_data = Session.objects.get(\n session_key=stored_session_key).session_data\n # remove not used anymore session\n Session.objects.get(session_key=stored_session_key).delete()\n\n expire_date = request.session.get_expiry_date()\n\n # update current session\n session_object = Session.objects.get(session_key=current_session_key)\n session_object.session_data = stored_session_data\n session_object.expire_date = expire_date\n session_object.save()", "def session_id(self, session_id):\n\n self._session_id = session_id", "def update(self):\n try:\n self._device.update()\n except requests.exceptions.HTTPError as ex:\n _LOGGER.warning(\"Fritzhome connection error: %s\", ex)\n self._fritz.login()", "def save_session_data(self, session_id, data):\n raise NotImplementedError()", "def persist(self):\n if self._invalidated:\n logger.debug('Not saving invalidated session')\n return\n\n # Serialize namespace dataclasses to see if their content changed\n self._serialize_namespaces()\n\n # TODO: Remove self.new below at a later stage\n # Only save a session if it is modified\n # Don't save it just because it is new, this is to not\n # save empty sessions for every call to the backend\n if self.new or self.modified:\n logger.debug(f'Saving session {self}')\n self._session.commit()\n self.new = False\n self.modified = False\n if self.app.debug:\n _saved_data = json.dumps(self._session.to_dict(), indent=4, sort_keys=True)\n logger.debug(f'Saved session {self}:\\n{_saved_data}')", "def refresh_session(self):\n if self.session:\n try:\n yield from self.session.close()\n except:\n # we don't care if closing the session does nothing\n pass \n\n self.session = aiohttp.ClientSession()\n self._session_start = time.time()", "def post(self, args):\n\n response = openvidu().post_session(args)\n\n if response.status_code == 200:\n session = response.json()\n current_app.logger.info(f'Created new session `{session[\"id\"]}`')\n\n # Store session parameters in database to recreate it if necessary\n db = current_app.session\n db.add(Session(id=session[\"id\"], parameters=args))\n db.commit()\n return session\n elif response.status_code == 400:\n abort(UnprocessableEntity, json=response.json().get(\"message\"))\n abort(response)", "def test_modify_authenticated_session_var(self):\r\n cursor = self.db.cursor()\r\n cursor.execute(\"INSERT INTO session VALUES ('john', 1, 0)\")\r\n cursor.execute(\"INSERT INTO session_attribute VALUES \"\r\n \"('john', 1, 'foo', 'bar')\")\r\n\r\n req = Mock(authname='john', base_path='/', incookie=Cookie())\r\n session = Session(self.env, req)\r\n self.assertEqual('bar', session['foo'])\r\n session['foo'] = 'baz'\r\n session.save()\r\n cursor.execute(\"SELECT value FROM session_attribute \"\r\n \"WHERE sid='john' AND name='foo'\") \r\n self.assertEqual('baz', cursor.fetchone()[0])", "def session(self):", "def login(self):\n backend = self.backend\n self.session[backend.session_id_key] = self[\"id\"]\n self.session[backend.session_backend_key] = backend.session_backend_val\n self.session[backend.session_hash_key] = self._get_session_hash(\n self[\"password\"]\n )", "def update(self, other):\n\n fields = None\n if isinstance(other, dict):\n fields = other\n elif isinstance(other, Session):\n fields = other.fields\n else:\n raise ValueError('Cannot update with supplied data')\n\n for k, v in fields.iteritems():\n self.fields[k.replace('-', '_')] = v", "def test_set_session():", "def test_update_missing_session(self):\n study_id = self.storage.create_study(sample_study_spec())\n session = sample_session(study_id=study_id)\n with self.assertRaises(ValueError):\n self.storage.update_session(session)", "def _set_session_value(self, req, section_name, option_name, option_value):\n name = 'inieditor|%s|%s' % (section_name, option_name)\n req.session[name] = option_value", "def user_update(self, session, data):\n\n endpoint_response = None\n\n if self.check_if_row_exists(session, data):\n\n try:\n\n user_row = self.get_user_by_id(session, data)\n\n if user_row is not None:\n user_id = user_row.user_id\n else:\n user_id = 0\n\n self.last_update_date = get_current_date(session)\n\n data['last_update_date'] = self.last_update_date\n\n # update row to database\n session.query(UsersAuthModel).filter(UsersAuthModel.user_id == user_id). \\\n update({\"user_name\": data.get('username'),\n \"password\": data.get('password'),\n \"is_active\": data.get('is_active'),\n \"is_staff\": data.get('is_staff'),\n \"is_superuser\": data.get('is_superuser'),\n \"last_update_date\": data.get('last_update_date')},\n synchronize_session='fetch')\n\n session.flush()\n\n # check update correct\n row_updated = self.get_one_user(session, data)\n\n logger.info('Data Updated: %s', str(row_updated))\n\n if row_updated:\n logger.info('Data User updated')\n\n endpoint_response = json.dumps({\n \"Username\": row_updated.user_name,\n \"Password\": row_updated.password,\n \"IsActive\": row_updated.is_active,\n \"IsStaff\": row_updated.is_staff,\n \"IsSuperUser\": row_updated.is_superuser,\n \"CreationDate\": row_updated.creation_date,\n \"UpdatedDate\": row_updated.last_update_date\n })\n\n except SQLAlchemyError as exc:\n session.rollback()\n endpoint_response = None\n\n logger.exception('An exception was occurred while execute transactions: %s',\n str(str(exc.args) + ':' +\n str(exc.code)))\n raise mvc_exc.IntegrityError(\n 'Row not stored in \"{}\". IntegrityError: {}'.format(data.get('username'),\n str(str(exc.args) + ':' + str(exc.code)))\n )\n finally:\n session.close()\n\n return endpoint_response", "def relogin(self):\n spotify.Error.maybe_raise(lib.sp_session_relogin(self._sp_session))", "def save_session(self, session, response):\n if not isinstance(session, PyMongoSession):\n raise TypeError('session (%r) is not a PyMongoSession' % session)\n\n try:\n cx, db = app.extensions['pymongo'][self.config_prefix]\n except KeyError:\n raise Exception('could not find PyMongo with config prefix %r in app' %\n self.config_prefix)\n\n db[self.collection].save(session)", "def save(self):\n path = self.user.get_session_path()\n with open(path, 'a', encoding='utf8') as file:\n self.write(file=file)", "def sessionAlter(self, session, *args, **kwargs):\n # Get the field to be altered and the new value from the keywords\n field = kwargs.get('field', '')\n value = kwargs.get('value', '')\n\n # Validates the field being changed\n if field in ('line', 'direction', 'stop'):\n # For change in line, make the current stop and direction null since\n # they're now undefined. String 'null' is used as a signalling value\n # for javascript here (as opposed to None). We need to check that the\n # values here are roughly what we expect (to guard against XSS attacks,\n # for instance). The value should be an integer ~16000.\n if field == 'line':\n\tif value.isdigit():\n session['tpg.line'] = value\n session['tpg.direction'] = 'null'\n session['tpg.stop'] = 'null'\n else:\n return None\n\n # Likewise for direction; value should be a char 'a' or 'r'\n elif field == 'direction':\n if value in ('a', 'r'):\n session['tpg.direction'] = value\n session['tpg.stop'] = 'null'\n else:\n return None\n\n # And stop; value should be a small positive integer.\n elif field == 'stop':\n if value.isdigit():\n session['tpg.stop'] = value\n else:\n return None\n\n self.gui._saveSession(session)\n return self._state(session)", "def for_session(self, session_id):\n if not isinstance(session_id, str):\n raise TypeError('Session Id must be a string')\n\n self.token['sessionId'] = session_id\n\n return self", "async def create(self, session, *, dc=None):\n response = await self._api.put(\n \"/v1/session/create\",\n data=session,\n params={\"dc\": dc})\n return response.body", "def Session_sign_in(self, day, session):\n if self.Sessions[day] is \"\":\n self.Sessions[day] = session\n else:\n raise DoubleWriteException(self.EmailAddress, day, session)", "async def session(self, request):\n body = await api_validate(SCHEMA_SESSION, request)\n self._check_password(body)\n\n # check TOTP\n if self.config.security_totp:\n totp = pyotp.TOTP(self.config.security_totp)\n if body[ATTR_TOTP] != totp.now():\n raise RuntimeError(\"Invalid TOTP token!\")\n\n # create session\n valid_until = datetime.now() + timedelta(days=1)\n session = hashlib.sha256(os.urandom(54)).hexdigest()\n\n # store session\n self.config.add_security_session(session, valid_until)\n return {ATTR_SESSION: session}", "def _udpate_session(token):\n user_session = UserSession.query.get(token)\n if user_session is None:\n return False\n if user_session.expiration_date < datetime.utcnow():\n return False\n user_session.expiration_date = datetime.utcnow() + SESSION_DURATION\n db.session.commit()\n return True", "async def session(self, ctx):\n\n await ctx.send(\"Session function called\")", "def _set_session(self):\n self.__session = sessionmaker(bind=self.__engine)()", "def put_request_session(self, key, inst):\n with self.GLOB_LOCK:\n inst.touch()\n self._request_sessions[key] = inst", "def _check_session(self, request):\n if request.user.is_authenticated:\n current_session_key = request.session.session_key\n stored_session_key = request.user.logged_in_user.session_key\n\n if stored_session_key and stored_session_key != current_session_key:\n self.switch_session_data(request, current_session_key,\n stored_session_key)\n\n # update LoggedInUser table with relevant session key\n request.user.logged_in_user.session_key = current_session_key\n request.user.logged_in_user.save()", "def reload_sessions(self):\n import glob \n sessions = glob.glob('*.session')\n for x in sessions:\n self._db['accounts'][x.split('.')[0]] = { 'session': x.split('.')[0] }", "def set_session(aws_access_key_id=None,\n aws_secret_access_key=None,\n aws__session_token=None,\n region_name=None,\n profile_name=None,\n boto_session=None):\n global __session, client\n __session = boto_session if boto_session is not None else boto3.session.Session(**larry.core.copy_non_null_keys(locals()))\n client = __session.client('sts')", "def update_session(user):\n\n # Setup/update cookie\n user.cookie = token_urlsafe(64)\n user.cookie_expiration = datetime.now() + timedelta(hours=2)\n\n # Commit\n db.session.add(user)\n db.session.commit()\n\n cookie = user.cookie\n return cookie", "async def set_session(self,ctx,stype,*,text): \n if stype == \"main\":\n await ctx.send(\"```\" + \"The session ID for \" + stype + \" is now:\" + \" \" + text + \"```\")\n await self.config.sessions.main.set(text)\n elif stype == \"red\":\n await ctx.send(\"```\" + \"The session ID for \" + stype + \" is now:\" + \" \" + text + \"```\")\n await self.config.sessions.red.set(text)\n elif stype == \"amber\":\n await ctx.send(\"```\" + \"The session ID for \" + stype + \" is now:\" + \" \" + text + \"```\")\n await self.config.sessions.amber.set(text)\n elif stype == \"green\":\n await ctx.send(\"```\" + \"The session ID for \" + stype + \" is now:\" + \" \" + text + \"```\")\n await self.config.sessions.green.set(text)\n else:\n await ctx.send(\"invalid team\")", "async def update_user(self, cookie, remote_addr, user_agent):\n # SELECT uuid_in(md5(random()::text || now()::text)::cstring);\n # CREATE UNIQUE INDEX session_uuid_idx ON sessions(((data->>0)::uuid));\n if not cookie:\n sql = ('SELECT '\n 'uuid_in(md5(random()::text || now()::text)'\n '::cstring)::text')\n result = await self.db.fetchval(sql)\n # result = await self.md5.fetchrow()\n uuid = result\n await self.update_stats(1, 0, 0)\n else:\n uuid = cookie\n current_time = int(time.time())\n # -- UPSERT is PAIN\n # INSERT INTO sessions (data) VALUES\n # ('[\"983f2816-6ed2-4c4c-a13a-5432b67b6125\", 1489337990,\n # 0, \"00.00.00.00\", \"Not detected\"]')\n # on conflict (((data ->> 0)::uuid))\n # do update set data = jsonb_set(SESSIONS.data, '{2}','1');\n sql = (\n \" INSERT INTO sessions (data) VALUES\"\n \" ('[\\\"{0}\\\", {1}, {2}, \\\"{3}\\\", \\\"{4}\\\"]') \"\n \" on conflict (((data ->> 0)::uuid)) \"\n \" do update set data = \"\n \" jsonb_set(\"\n \" jsonb_set(\"\n \" jsonb_set(\"\n \" jsonb_set(SESSIONS.data, '{{1}}','{1}'),\"\n \" '{{2}}',(select (((data->>2)::int+1)::text)::jsonb\"\n \" from sessions where data->>0 = '{0}')),\"\n \" '{{3}}', '\\\"{3}\\\"'),\"\n \" '{{4}}', '\\\"{4}\\\"');\")\n sql = sql.format(\n uuid, current_time, 0, remote_addr, user_agent)\n await self.db.execute(sql)\n return uuid", "def configure(self, data: dict) -> Session:\n return self.request(uri=self.uri, method=\"PUT\", data=data)", "def startSession(self):\n self.storage.insert(self.__json__())", "def synchronize_session(self, session: str) -> None:\n temp_dir = Path(\n tempfile.mkdtemp(prefix=f\"chime6_{session}_\", dir=self.output_dir)\n )\n\n if session not in self.audio_edits:\n logging.warning(f\"No audio edits found for session {session}\")\n return\n\n session_audio_edits = self.audio_edits[session]\n\n print(f\"Correcting {session} for frame drops...\")\n self.correct_frame_drops(temp_dir, session, frame_drops=session_audio_edits)\n\n print(f\"Correcting {session} for clock drift...\")\n self.correct_clock_drift(\n temp_dir,\n session,\n linear_fit=session_audio_edits,\n num_threads=self.num_workers,\n )\n\n print(f\"Adjusting timestamps in {session} JSON files...\")\n self.adjust_json_timestamps(session, linear_fit=session_audio_edits)\n\n # clean up\n shutil.rmtree(temp_dir)\n\n return", "def sessions(connection, verbose=False):\n response = requests.put(url=connection.base_url + '/sessions',\n headers={'X-MSTR-AuthToken': connection.auth_token},\n cookies=connection.cookies,\n verify=connection.ssl_verify)\n if verbose:\n print(response.url)\n return response", "def save_to_session(self, serializer: URLSafeSerializer, session):\n session[SESSION_STORE_KEY] = self.store(serializer)\n return session", "def test_modify_anonymous_session_var(self):\r\n cursor = self.db.cursor()\r\n cursor.execute(\"INSERT INTO session VALUES ('123456', 0, 0)\")\r\n cursor.execute(\"INSERT INTO session_attribute VALUES \"\r\n \"('123456', 0, 'foo', 'bar')\")\r\n incookie = Cookie()\r\n incookie['trac_session'] = '123456'\r\n req = Mock(authname='anonymous', base_path='/', incookie=incookie,\r\n outcookie=Cookie())\r\n session = Session(self.env, req)\r\n self.assertEqual('bar', session['foo'])\r\n session['foo'] = 'baz'\r\n session.save()\r\n cursor.execute(\"SELECT value FROM session_attribute WHERE sid='123456'\")\r\n self.assertEqual('baz', cursor.fetchone()[0])", "def _insert_new_session():\n request = self._make_request()\n session_existing = self._set_up_session_in_Redis_and_makeOne( # noqa: F841\n request, session_id, session_dict={\"visited\": True}, **session_args\n )\n return request", "def add_session(self, session):\n with self._sessions_lock:\n if session.session_id in self.sessions:\n raise KeyError(\"non-unique session id %s for %s\" % (session.session_id, session))\n self.sessions[session.session_id] = session\n\n return session", "def update(self):\n self.__execute(self.pkgin_bin, \"update\")", "def test_existing_session_invalidate_nodupe(self):\n # existing session -> invalidate()\n request = self._make_request()\n session_id = self._get_session_id(request)\n self._set_session_cookie(request=request, session_id=session_id)\n request.session = self._makeOne(request)\n self._register_callback(request, request.session)\n persisted = request.session.redis.get(session_id)\n self.assertIsNotNone(persisted)\n\n # invalidate\n request.session.invalidate()\n response = webob.Response()\n request.response_callbacks[0](request, response)\n set_cookie_headers = response.headers.getall(\"Set-Cookie\")\n self.assertEqual(len(set_cookie_headers), 1)\n self.assertIn(\"Max-Age=0\", set_cookie_headers[0])\n\n # manually execute the callbacks\n request._process_finished_callbacks()\n\n # make sure this isn't in redis\n persisted = request.session.redis.get(session_id)\n self.assertIsNone(persisted)\n\n # make sure we don't have any keys in redis\n keys = request.session.redis.keys()\n self.assertEqual(len(keys), 0)", "def session(get_session):\n return get_session()", "def saveSession(self,overwriteOnConflict=False):\n\t\t################################################################\n\t\t#\tLog the parameters.\n\t\t################################################################\n\t\tself.debug( __name__ + \".saveSession(): called.\\n\" )\n\t\tself.debug( __name__ + \".saveSession(): overwriteOnConflict=\" + str( overwriteOnConflict ) + \".\\n\" )\n\t\tself.logIt( __name__ + \".saveSession(): Saving session=\" + str( self.configService.session.toString() ) + \".\\n\" )\n\t\tself.configService.save( self.configService.session, overwriteOnConflict )", "def user_update_password(self, session, data):\n\n endpoint_response = None\n\n if self.check_if_row_exists(session, data):\n\n try:\n\n user_row = self.get_user_by_id(session, data)\n\n if user_row is not None:\n user_id = user_row.user_id\n else:\n user_id = 0\n\n self.last_update_date = get_current_date(session)\n\n data['last_update_date'] = self.last_update_date\n\n # update row to database\n session.query(UsersAuthModel).filter(UsersAuthModel.user_id == user_id). \\\n update({\"password\": data.get('password'),\n \"last_update_date\": data.get('last_update_date')},\n synchronize_session='fetch')\n\n session.flush()\n\n # check update correct\n row_updated = self.get_one_user(session, data)\n\n logger.info('Data Updated: %s', str(row_updated))\n\n if row_updated:\n logger.info('Data User updated')\n\n endpoint_response = json.dumps({\n \"Username\": row_updated.user_name,\n \"Password\": row_updated.password,\n \"IsActive\": row_updated.is_active,\n \"IsStaff\": row_updated.is_staff,\n \"IsSuperUser\": row_updated.is_superuser,\n \"CreationDate\": row_updated.creation_date,\n \"UpdatedDate\": row_updated.last_update_date\n })\n\n except SQLAlchemyError as exc:\n session.rollback()\n endpoint_response = None\n\n logger.exception('An exception was occurred while execute transactions: %s',\n str(str(exc.args) + ':' +\n str(exc.code)))\n raise mvc_exc.IntegrityError(\n 'Row not stored in \"{}\". IntegrityError: {}'.format(data.get('username'),\n str(str(exc.args) + ':' + str(exc.code)))\n )\n finally:\n session.close()\n\n return endpoint_response", "def create_session(self,session_id,host_id,host_name,spotify_token):\n self.sessions[session_id] = {\n \"HOST\" : {\n \"ID\" : host_id,\n \"NAME\" : host_name,\n \"spotify_token\" : spotify_token,\n \"spotify_player\": None,\n },\n \"queue\" : [],\n \"queue_lock\" : False,\n \"current_track\" : \"\",\n \"previous_track\" : \"\",\n \"USERS\" : {}\n }", "async def create_session(session: SessionModel, mongo: MongoDB = mongodb) -> SessionOutModel:\n if not await mongo.session_coll.find_one({\"id\": session.id}):\n await mongo.session_coll.insert_one(session.dict())\n else:\n await mongo.session_coll.update_one({\"id\": session.id}, {'$set': {'status': session.status}})\n return SessionOutModel(**session.dict())", "def merge_session_with_post(session, post):\n for key, value in post:\n setattr(session, key, value)\n return session", "def post(self):\n DA = DataAccessor()\n session = getSessionByRequest(self)\n user = getSessionUser(session)\n \n old = self.request.get('old')\n new = self.request.get('new')\n new2 = self.request.get('new2')\n\n if old != user.password:\n setSessionMessage(session, \"Invalid Password\")\n self.redirect('/admin')\n\n if (new != new2) :\n setSessionMessage(session, \"Your new passwords did not match. Please try again.\", True)\n else:\n setSessionMessage(session, \"You have successfully changed your password.\", False)\n \n #Reset the password\n DA.update(user, password=new)\n\n #Reset the session.\n session.generated = False\n session.put()\n self.redirect('/admin')", "async def patch(url, session=None, **kwargs):\n\n method = 'PATCH'\n resp = await _request(method, url, session=session, **kwargs)\n return resp", "def new_session_loaded(self):\n session = self.parent.session\n if session is None: return None\n #logger.debug(\"LOADING NEW SESSION\")\n self.figure.new_session(session)\n self.refresh_table()\n self.summarize_current_table()\n self.refresh_plots()\n self.update_fitting_options()\n return None", "def resume_session(self, session):\n return True, {\n \"question\": self.question,\n \"choices\": self.choices\n }", "def session(self):\n if not self._session: #Create new session if none exists\n return self._new_session()\n return self._session", "def update(self):\n self.attributes = self.call('UPDATE', expect=error.OK, body=self.attributes)", "def update(self, path, **kwargs):\n client = self.connect(VAULT_TOKEN)\n\n existing = client.read(path)\n if existing is None:\n existing = {}\n else:\n existing = existing[\"data\"]\n\n existing.update(kwargs)\n\n client.write(path, **existing)", "def do_save(self, arg):\n smores.save_session(self.__version__)", "def current_session_view(request):\n if request.method == 'POST':\n form = CurrentSessionForm(request.POST)\n if form.is_valid():\n session = form.cleaned_data['current_session']\n term = form.cleaned_data['current_term']\n AcademicSession.objects.filter(name=session).update(current=True)\n AcademicSession.objects.exclude(name=session).update(current=False)\n AcademicTerm.objects.filter(name=term).update(current=True)\n AcademicTerm.objects.exclude(name=term).update(current=False)\n\n else:\n form = CurrentSessionForm(initial={\n \"current_session\": AcademicSession.objects.get(current=True),\n \"current_term\": AcademicTerm.objects.get(current=True)\n })\n\n\n return render(request, 'corecode/current_session.html', {\"form\":form})", "def update(self, request, pk=None):\n return Response({'http_method': 'PUT'})", "def get_session_info(self, session: Session) -> SessionInformation:\n session_info = self.make_request(\n HttpMethod.PUT,\n self.get_full_url(SESSION_STATUS_PATH_FORMAT),\n body_data={\"id\": session.execution_id},\n )\n status = KubernetesPodStatus(session_info[\"status\"].lower())\n return SessionInformation(SESSION_STATUS_LOOKUP[status])", "def update_user_login_data():\n if not 'user' in session:\n raise InvalidUsage(\"Access denied\", 401)\n\n data = request.json\n if 'email' not in data or not data['email']:\n raise InvalidUsage(\"Email must not be empty\", 422)\n if 'currentPassword' not in data or len(data['currentPassword']) < 6:\n raise InvalidUsage(\"Current password must have more then 5 characters\", 422)\n if 'newPassword' not in data or len(data['newPassword']) < 6:\n raise InvalidUsage(\"New password must have more then 5 characters\", 422)\n if 'newPasswordAgain' not in data or data['newPassword'] != data['newPasswordAgain']:\n raise InvalidUsage(\"New password does not match\", 422)\n\n database = mysql.get_db()\n cursor = database.cursor()\n activeUser = session.get('user')\n\n query = '''SELECT password\n FROM users\n WHERE users.id = %s'''\n\n cursor.execute(query, (activeUser['id']))\n user = cursor.fetchone()\n\n if not bcrypt.check_password_hash(user['password'], data['currentPassword']):\n raise InvalidUsage(\"Wrong current password\", 401)\n\n query = '''SELECT id\n FROM users\n WHERE users.email = %s AND users.id != %s'''\n\n cursor.execute(query, (data['email'], activeUser['id']))\n cursor.fetchone()\n\n if cursor.rowcount != 0:\n raise InvalidUsage(\"User with this email already exists\", 422)\n\n query = '''UPDATE users\n SET email = %s, password = %s\n WHERE id = %s'''\n\n hashed_password = bcrypt.generate_password_hash(data['newPassword'])\n cursor.execute(query, (data['email'], hashed_password, session.get('user')['id']))\n database.commit()\n\n activeUser['email'] = data['email']\n session['user'] = activeUser\n\n return jsonify({'message': 'Successfully updated'}), 200", "def create_new_session(self) -> None:\n try:\n session = self.client.create_session()\n logger.info(\"created session: %s\", session.id)\n self.join_session(session.id)\n location_config = self.app.guiconfig.location\n self.session.location = SessionLocation(\n x=location_config.x,\n y=location_config.y,\n z=location_config.z,\n lat=location_config.lat,\n lon=location_config.lon,\n alt=location_config.alt,\n scale=location_config.scale,\n )\n except grpc.RpcError as e:\n self.app.show_grpc_exception(\"New Session Error\", e)", "def update(self, request, pk=None):\n\n return Response({'http_method': 'PUT'})" ]
[ "0.7159977", "0.686218", "0.68574136", "0.66107583", "0.6506551", "0.6489274", "0.6447803", "0.64465", "0.63691455", "0.63294715", "0.62878484", "0.6127597", "0.6104474", "0.6089136", "0.6085308", "0.6046978", "0.6046949", "0.60441613", "0.5959553", "0.59453976", "0.5927981", "0.59049016", "0.5862541", "0.5862541", "0.58554435", "0.58338207", "0.58177024", "0.5811466", "0.5779311", "0.57515186", "0.5724029", "0.5648431", "0.5610777", "0.5597431", "0.55908245", "0.557528", "0.55672926", "0.55512494", "0.55407995", "0.55332327", "0.55324167", "0.5525755", "0.5519687", "0.5511034", "0.5507628", "0.5479739", "0.5477566", "0.5462862", "0.5437726", "0.54251343", "0.53681225", "0.5353068", "0.53507036", "0.53475165", "0.5346348", "0.53396875", "0.53232205", "0.5322058", "0.5310085", "0.5306853", "0.5306161", "0.53053856", "0.52872866", "0.52635753", "0.5261738", "0.5248095", "0.5244156", "0.5234297", "0.5233117", "0.5231631", "0.5216951", "0.5213744", "0.5200432", "0.51969665", "0.51907486", "0.5160786", "0.51502156", "0.514029", "0.5137119", "0.5134884", "0.5123938", "0.5117789", "0.5117205", "0.5112598", "0.51082414", "0.50970167", "0.50897366", "0.5089652", "0.5088398", "0.5084446", "0.5076196", "0.50697833", "0.50660145", "0.5065538", "0.506203", "0.5053303", "0.5050072", "0.50391227", "0.5037833", "0.50367963" ]
0.726062
0
Updates the content and returns the ID
def refresh_content( path: str, content: Optional[bool] = False, type: Optional[str] = None, format: Optional[str] = None, ) -> str: ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def updateContent(content, **kwargs):", "def update_content(self):\n raise NotImplementedError", "def on_update(self):\n self.contentnode.make_content_id_unique()", "def put(self,id):\r\n data = request.json\r\n return update(id=id,data=data)", "def update(self, data):\n self.content = data", "def patch(self):\n\n if session.get(\"login\",False) is not True:\n return {\n \"errno\": 699,\n \"describe\": \"需要登录\"\n }\n\n id = request.form.get(\"id\")\n content = request.form.get(\"content\")\n hashtag = request.form.get(\"hashtag\")\n\n hashtag = [] if hashtag == None or hashtag == \"\" else hashtag.split( \",\" )\n if isinstance(hashtag, str):\n hashtag = json.loads(hashtag)\n\n edit_doc(id, content, hashtag)\n\n return {\"errno\":0}", "def update(request):\n paste = Paste.get(request.matchdict['idContent'])\n\n password = _buildPassword(paste.username, paste.created, request.POST['password'])\n\n if password == paste.password:\n paste.title = request.POST['title']\n paste.content = request.POST['content']\n\n paste.save()\n\n request.session.flash(u\"Updated\") # TODO translatoion\n\n return HTTPFound(request.route_path('oneContent', idContent=paste._id))\n\n request.session.flash(u\"Wrong password\") # TODO translatoion\n\n return HTTPFound(request.route_path('edit', idContent=paste._id))", "def update_contentnode_content_id(self):\n if self.contentnode and self.preset.thumbnail is False:\n self.contentnode.make_content_id_unique()", "def handle_content_edit(content_id):\n\n # instance of ContentForm is available to both GET and POST requests\n form = ContentForm()\n\n # content will be None if it cannot be found\n content = Content.find_by_id(content_id)\n\n # POST - for handling the edit content form\n if form.validate_on_submit():\n\n # validation - owner email must exist\n owner_email = form.owner_email.data\n owner_obj = Owner.find_by_email(owner_email)\n if not owner_obj:\n flash(f'Owner with the email {owner_email} does not exist!',\n 'danger')\n # if owner not exist, edit page is reloaded with same content id\n return redirect(url_for('content_edit', content_id=content.id))\n\n # content type choice is extracted from the form\n choice = form.content_type.data # user choice\n choices = dict(ContentForm.SELECT_CHOICES) # all possible choices\n\n # content is updated with form values and saved to the database\n content.content_name = form.content_name.data.title()\n content.content_type = choices.get(choice)\n content.valid_months = form.valid_months.data\n content.updated_at = date.today() # today's date becomes last updated\n content.owner_id = owner_obj.id\n\n # saving content errors handled\n try:\n content.save_content()\n except HTTPException:\n return \"Server cannot update the content at this time\", 500\n\n # user is redirected to the main content page with success msg\n flash(f'{content.content_name} has been updated!', 'success')\n return redirect(url_for('content'))\n\n # GET - display the form\n # form is pre-populated with existing content data\n form.content_name.data = content.content_name\n form.owner_email.data = Owner.find_by_id(content.owner_id).owner_email\n form.valid_months.data = content.valid_months\n form.submit.data = \"Update Content\"\n\n # content type stored in this content is looked up against all types\n # each choice is a tuple pair - (stored choice, displayed choice)\n for form_type in ContentForm.SELECT_CHOICES:\n # choice becomes default value on form if it matches the stored value\n if form_type[1] == content.content_type:\n form.content_type.data = form_type[0]\n\n return render_template('content_edit.html',\n content_name=content.content_name,\n form=form)", "def update(id):\r\n post = get_post(id)\r\n db = get_db()\r\n cur = db.cursor()\r\n\r\n if request.method == 'POST':\r\n title = request.form['title']\r\n body = request.form['body']\r\n error = None\r\n\r\n cur.execute('SELECT id FROM novel.post WHERE title = %s', title)\r\n newId = cur.fetchone()\r\n\r\n\r\n\r\n if not title:\r\n error = 'Title is required.'\r\n\r\n if newId and newId['id'] != id:\r\n error = 'Title is repeated.'\r\n\r\n if error is not None:\r\n flash(error)\r\n else:\r\n\r\n cur.execute(\r\n 'UPDATE novel.post SET title = \"{0}\", body = \"{1}\" WHERE id = {2}'\r\n .format(title, body, id)\r\n )\r\n db.commit()\r\n return redirect(url_for('novel.index'))\r\n\r\n return render_template('novel/update.html', post=post)", "def content_id(self, value):\n self._content_id = value", "def update(self,request,pk = None):\n return Response({'http_method':'PUT'})", "def update_content(self, group, md, html, toc):\n self.html = html\n self.toc = toc\n diff = unified_diff.make_patch(self.md, md)\n if diff:\n pv = WikiPageVersion(diff, self.current_version, self.modified_on,\n self.modified_by).switch_db(group).save()\n self.versions.append(pv)\n self.md = md\n self.modified_on = datetime.now()\n self.modified_by = current_user.name\n self.current_version += 1\n \n with switch_db(WikiCache, group) as _WikiCache:\n _cache = _WikiCache.objects.only('changes_id_title').first()\n _cache.add_changed_page(self.id, self.title, self.modified_on)\n self.save()", "def update(cls, id, xml):\n raise Exception('Not Implemented Yet')", "def save_changes(id,content):\n\tgeneric_query(\"UPDATE docs SET content=? WHERE id=?\",(content,id))\n\tinvalidate_doc_by_id(id)", "def edit(id):\n r = requests.get(API_ROUTE + '/' + str(id), headers={'Auth': _auth()})\n if r.status_code != requests.codes.ok:\n return r.text, r.status_code\n\n return render_template('editor.html', article=r.json())", "def put(self, id):\n return update_msg(request.json, id)", "def update_item_md(dataobj_id, new_content):\n\n from archivy.models import DataObj\n\n filename = get_by_id(dataobj_id)\n dataobj = frontmatter.load(filename)\n dataobj[\"modified_at\"] = datetime.now().strftime(\"%x %H:%M\")\n dataobj.content = new_content\n md = frontmatter.dumps(dataobj)\n with open(filename, \"w\", encoding=\"utf-8\") as f:\n f.write(md)\n\n converted_dataobj = DataObj.from_md(md)\n converted_dataobj.fullpath = str(\n filename.relative_to(current_app.config[\"USER_DIR\"])\n )\n converted_dataobj.index()\n current_app.config[\"HOOKS\"].on_edit(converted_dataobj)", "def update(_id): \n pages_object = Pages(_id)\n page = pages_object.page\n \n language_name = languages_object.get_languages(3)\n \n # Update page\n if request.method == 'POST':\n if pages_object.update():\n return redirect(url_for('pages.overview'))\n \n len_of_label = len(page['label'])\n \n # Come back a message when there is an error\t\n if not pages_object.message is None:\n message = pages_object.message\n status = pages_object.status\n \n return render_template('{}/update.html'.format(MODULE_DIR), **locals())", "def update(id):\n if request.method == \"POST\":\n result = update_post(\n id,\n request.form[\"title\"],\n request.form[\"body\"]\n )\n flash(result)\n return redirect(url_for(\"show\"))\n else:\n post = get_post(id)\n return render_template(\"edit.html\", **post)", "def content_id(self):\n return self._content_id", "def edit(ctx, docid, password):\n coll = db.get_document_collection(ctx)\n config = ctx.obj[\"config\"]\n\n doc, docid = db.get_document_by_id(ctx, docid)\n title = doc[\"title\"]\n\n template, c = db.get_content(ctx, doc, password=password)\n\n content, tmpfile = utils.get_content_from_editor(config[\"editor\"], template=template)\n d = datetime.datetime.now()\n\n if doc[\"encrypted\"] is True:\n title = utils.get_title_from_content(content)\n content = c.encrypt_content(content.decode(\"utf-8\").encode(\"utf-8\"))\n else:\n if not \"links\" in doc[\"categories\"]:\n title = utils.get_title_from_content(content)\n\n if isinstance(template, unicode):\n content = content.decode(\"utf-8\")\n\n if content != template:\n doc[\"content\"] = content\n doc[\"title\"] = title\n doc[\"updated\"] = d\n if validate(doc):\n coll.save(doc)\n else:\n utils.log_error(\"Validation of the updated object did not succeed\")\n\n transaction.log(ctx, docid, \"edit\", title)\n utils.log_info(\"Document \\\"%s\\\" updated.\" % title)\n else:\n utils.log_info(\"No changes detected for \\\"%s\\\"\" % title)\n\n utils.clean_tmpfile(tmpfile)\n\n return True", "def refresh(self, new_content):\n pass", "def get(self):\n return self.content_id", "def update_content(self, address, owner, content):\n page = self.get(address=address)\n page.check_address()\n revision = page.revision_set.create(page=page, content=content, owner=owner)\n page.update_html(content)\n return page", "def edit(self, new_content: str) -> None:\n\n # YOUR CODE HERE\n self.content = new_content", "def setID(id, content):\n return content.replace(\"$ID\", str(id))", "def edit_task(id):\n\n if not id:\n raise InvalidAPIUsage(\"id is required\")\n\n content = get_content_or_400(request)\n\n collection = get_db_collection()\n\n task = get_task_or_404(collection, id)\n\n collection.update_one({\"_id\": task[\"_id\"]}, {\"$set\": {\"content\": content}})\n\n response = jsonify()\n response.status_code = 200\n return response", "def put(self, request, pk):\n return self.update(request, pk)", "def put(self, id):\n data = request.json\n update_entry(id, data)\n return None, 204", "def put(self, data):\n if 'content' in data:\n self.context.content = data['content']\n self.context.mtime = datetime.datetime.now()\n self.db.flush()\n return self.context.as_dict(self.user)", "def save_edit(request, post_id):\n if request.method == \"PUT\":\n data = json.loads(request.body)\n user = request.user\n post = Post.objects.get(id=post_id)\n content = data.get(\"content\", \"\")\n # Check to make sure user attempting edit is author\n if user == post.author:\n post.content = content\n post.save()\n return JsonResponse({\"content\": post.content})\n else:\n return JsonResponse({\"message\": \"Not authorized to edit\"})", "def put(self, request, pk=None): #pk of id of objects to be updated (DB)\n return Response({'method':'PUT'})", "def put(self, request, pk):\n return self.post(request, pk)", "def content_id(self) -> Optional[str]:\n return pulumi.get(self, \"content_id\")", "def PUT(self):\n # Check upload size\n body = self.request.get('BODYFILE')\n checkUploadSize(self.context, body)\n\n # If the module is published, do a transparent checkout\n if self.context.state == 'published':\n self.context.checkout(self.context.objectId)\n\n filename = self.request.get_header(\n 'Content-Disposition', self.context.title)\n content_type = self.request.get_header('Content-Type')\n\n parent = self.context.aq_inner.aq_parent\n adapter = getMultiAdapter(\n (parent, self.request), IRhaptosWorkspaceSwordAdapter)\n\n cksum = self.request.get_header('Content-MD5')\n merge = self.request.get_header('Update-Semantics')\n\n body.seek(0)\n adapter.updateContent(self.context, body, content_type, cksum,\n merge == 'http://purl.org/oerpub/semantics/Merge')\n self.context.logAction(adapter.action)\n\n view = self.__of__(self.context)\n pt = self.depositreceipt.__of__(view)\n return pt()", "def update(self, store, uuid, contents):\n\n stored_file = self._retrieve(store.object_type, uuid)\n\n stored_file.contents = contents\n\n if store.versioned:\n version = self._get_latest_version(store, stored_file.name) + 1\n return self._create(\n store, stored_file.name, stored_file.contents, version)\n\n return self._upsert(store, stored_file)", "def edit_entry(self, id, body=None, link=None, **args):\n args.update(id=id)\n if body: args.update(body=body)\n if link: args.update(link=link)\n return self.fetch(\"/entry\", post_args=args)", "def update_document(self, portal_name, content_id, document):\n if isinstance(document, dict):\n document = json.dumps(document)\n r = requests.put('/'.join([self.base_url,\n self.DOCUMENTS_ENDPOINT,\n portal_name,\n str(content_id)]),\n data=document,\n headers={'Content-Type': 'application/json'})\n return r.json()", "def update(request):\n return 0", "def put(self, id):\n return None, 204", "def update(self, request, pk=None):\n return Response({'http_method': 'PUT'})", "def update(self, request, pk=None):\n\n return Response({'http_method':'PUT'})", "def update(self, data):\n return data", "def _update_content(self, content, siteurl):\n if not content:\n return content\n\n hrefs = self._get_intrasite_link_regex()\n return hrefs.sub(lambda m: self._link_replacer(siteurl, m), content)", "def partial_update(self,request,pk = None):\r\n\r\n return Response({'HTTP method':'PATCH'})", "def update(self, request, pk=None): #update a specific object\n return Response({'http_method': 'PUT'})", "def _update_from_rest_data(self) -> None:", "def patch(self, request, pk=None): #pk of id of objects to be updated (DB)\n return Response({'method':'PATCH'})", "def update(self) -> requests.request:\n # Check if id is set\n if self.args.id is None:\n raise Exception('Provide id of asset you want to update')\n\n # Check URL validity\n if self.args.url is not None and self.check_url_invalidity():\n raise Exception('Provided URL is not valid')\n\n # Send PUT request\n return requests.put(\n self.REQUEST_URL + str(self.args.id),\n {'title': self.args.title, 'label': self.args.label, 'url': self.args.url}\n )", "def content_put(self, request, pk=None):\n container = self.get_object()\n content = request.data\n status_code = HttpResponseBadRequest.status_code\n new_tag = content.get('new_tag')\n new_description = content.get('new_description')\n if container.is_singularity():\n response_data = dict(message=NO_SINGULARITY_PUT)\n elif 'pipeline' not in content:\n response_data = dict(pipeline=['This field is required.'])\n elif new_tag and Container.objects.filter(tag=new_tag).exists():\n response_data = dict(new_tag=['Tag already exists.'])\n else:\n if not new_tag:\n permissions_copy = None\n else:\n permissions_copy = (list(container.users_allowed.all()),\n list(container.groups_allowed.all()))\n container.pk = None # Saves a copy.\n container.tag = new_tag\n if new_description:\n container.description = new_description\n with use_field_file(container.file):\n container.file.save(os.path.basename(container.file.name),\n File(container.file))\n try:\n container.write_archive_content(content)\n container.save()\n if permissions_copy:\n container.grant_from_permissions_list(permissions_copy)\n response_data = container.get_content()\n status_code = Response.status_code\n except ExistingRunsError as ex:\n response_data = dict(pipeline=[ex.args[0]])\n return Response(response_data, status_code)", "def updateDoc(self, path):\n self.db.setDb(self.db_file)\n \n if not self.authd:\n self._authorize()\n \n db_row = self.db.getRowFromPath(path)\n if not db_row:\n return False\n \n resource_id = db_row[0]\n etag = db_row[1]\n title = db_row[2]\n \n ms = gdata.data.MediaSource(file_path=path, content_type=MIMETYPES['ODT'])\n doc = self.client.GetDoc(resource_id.replace(':', '%3A'))\n new_version = self.client.Update(doc, media_source=ms)\n print 'Document pushed:', new_version.GetAlternateLink().href\n \n self.db.resetEtag(new_version)", "def edit_comment(self, id, body, **args):\n args.update(id=id, body=body)\n return self.fetch(\"/comment\", post_args=args)", "def putfunc(self, liste, conn):\n\t\tput_tree = ET.parse('test.xml')\n\t\tput_root = put_tree.getroot()\n\t\t\n\t\told_id= liste[-2]\n\t\told_id = old_id.strip('id=\"')\n\t\t\n\t\told_val = liste[-1]\n\t\told_val = old_val.split('\"')\n\t\t\n\t\t#update the given id's message\n\t\tfor node in put_root:\n\t\t\tif (node.attrib['id'] == old_id):\n\t\t\t\tnode.attrib['value'] = old_val[1]\n\t\t\t\n\t\tput_tree.write('./test.xml')\n\t\t\n\t\tsend = self.gen_msg(200, 0, 'PUT')\n\t\tconn.send(send)", "def upload_content(newContent):\n # Get current content from Wiki\n currentWiki = requests.get(f\"{config['rootURI']}/api/v4/projects/{config['projectID']}/wikis/{config['wikiSlug']}\",\n headers={\"PRIVATE-TOKEN\": config['PAT']})\n\n # Append load_content to current content\n oldContent = getContent(currentWiki)\n # Get old ID - Increment\n id = getLastId(oldContent)\n id += 1\n # Set the ID value on the new content\n newContent = setID(id, newContent)\n content = oldContent + newContent\n\n # Push changes to Gitlab\n requests.put(f\"{config['rootURI']}/api/v4/projects/{config['projectID']}/wikis/{config['wikiSlug']}\",\n headers={\"PRIVATE-TOKEN\": config['PAT']},\n params={\"content\": content})\n return", "def edit(self, new_content: object, reason: str = \"\") -> None:\n raise NotImplementedError", "def edit_document():", "def update_item(self, id: str, user: User, **kwargs) -> None:", "def contentId(groupId, assignmentId, channelId):\n group = db.Group.find_one_or_404({\"_id\": ObjectId(groupId)})\n if group is None:\n return \"Group Not Found\", 404\n assignment = db.Assignment.find_one_or_404(\n {\"_id\": ObjectId(assignmentId)}\n )\n if assignment is None:\n return jsonify({\"msg\": \"Assignment Not Found\"}), 404\n if request.method == \"GET\":\n data = {\n \"assignmentId\": assignmentId,\n \"name\": assignment[\"name\"],\n \"dis\": assignment[\"dis\"],\n \"maxGrade\": assignment[\"maxGrade\"],\n \"startDate\": assignment[\"startDate\"],\n \"dueDate\": assignment[\"dueDate\"],\n \"url\": assignment[\"url\"],\n }\n return dumps(data), 200\n if request.method == \"DELETE\":\n db.Assignment.deleteOne({\"_id\": ObjectId(assignment[\"_id\"])})\n return jsonify({\"msg\": \"Assignment Deleted\"}), 204\n # elif request.method == \"PATCH\":\n # patchData = request.json\n # newName = request.json.get('name')\n # newDis = request.json.get(\"dis\")\n # newMaxGrade = request.json.get(\"maxGrade\")\n # newStartDate = request.json.get(\"startDate\")\n # newDueDate = request.json.get(\"dueDate\")\n # newUrl = request.json.get(\"url\")\n # db.Assignment.update_one({'_id': ObjectId(assignment['_id'])}, {\n # \"$set\": {\n #\n # }\n # })", "def do_PUT(self):\n content_len = int(self.headers.get('content-length', 0))\n post_body = self.rfile.read(content_len)\n post_body = json.loads(post_body)\n (resource, id) = self.parse_url(self.path)\n\n success = False\n\n if resource == \"categories\":\n success = update_category(id, post_body)\n if resource == \"comments\":\n success = edit_comment(id, post_body)\n if resource == \"posts\":\n success = update_post(id, post_body)\n\n if success:\n self._set_headers(204)\n else:\n self._set_headers(404)\n\n self.wfile.write(\"\".encode())", "def post(self):\n modified_content = self.request.get('comment_edit')\n comment_id = self.request.get('comment_id')\n comment = Comments.get_by_id(int(comment_id))\n user = self.get_active_user()\n\n if user.key().id() == comment.submitter_id:\n comment.content = modified_content\n comment.put()\n self.redirect('/%s' % str(comment.post_id))\n else:\n self.error(403)", "def submission_update_description(request, submission_pk):\n try:\n submission = models.CompetitionSubmission.objects.get(pk=submission_pk)\n if submission.participant.user != request.user:\n raise Http404()\n submission.description = request.POST.get('updated_description')\n submission.save()\n return HttpResponse()\n except models.CompetitionSubmission.DoesNotExist:\n raise Http404()", "def put(self, id):\n return add_comment(request.json, id)", "def write(self, id, data):\n return self._call('%s.update' % self._shopware_model,\n [int(id), data])", "def update(self, request, pk=None):\n\n return Response({'http_method': 'PUT'})", "def update_article():\n\n article = {}\n article[\"title\"] = getattr(request.forms, \"title\")\n article[\"text\"] = getattr(request.forms, \"article\")\n article[\"id\"] = request.forms.get(\"id\")\n\n articles = get_articles()\n\n if article[\"title\"] == \"\" and article[\"text\"] == \"\":\n return template(\"fel\")\n\n elif article[\"id\"] == \"\":\n highest_id = 0\n for a in articles:\n if int(a[\"id\"]) > highest_id:\n highest_id = int(a[\"id\"])\n \n article[\"id\"] = highest_id + 1\n articles.append(article)\n my_file = open(\"artiklar.json\", \"w\")\n my_file.write(json.dumps(articles, indent=4))\n my_file.close()\n\n redirect (\"/wiki/\" + article[\"title\"])\n\n else:\n article_id = int(article[\"id\"])\n for i in articles:\n if article_id == i[\"id\"]:\n articles.remove(i)\n article[\"id\"] = article_id\n\n articles.append(article)\n my_file = open(\"artiklar.json\", \"w\")\n my_file.write(json.dumps(articles, indent=4))\n my_file.close()\n\n redirect (\"/wiki/\" + article[\"title\"])", "def updateOne(self,ident):\n \tLOGGER.info(\"lazily updating {}\".format(ident))\n \tself.idToUpdate=ident\n \tself.newState=''\n \tself.save()", "def save(self)->None:\n item = database.cursor.fetchone()\n if item:\n self.id = item['id']\n database.connection.commit()", "async def update(self, pk, payload):\n\n self.log.info(payload)\n await self.db.update(pk, payload)\n return await self.get_one(pk)", "def update_document(self):\n pass", "async def update_hacks_content(self, attacker_id: int) -> None:\n\n mycursor, db = await the_database()\n await mycursor.execute(\"UPDATE SlothSkills SET content = 'virus' WHERE user_id = %s\", (attacker_id,))\n await db.commit()\n await mycursor.close()", "def update(self, data):\n pass", "def update(self, data):\n pass", "def update(self, data):\n pass", "def update(self, data):\n pass", "def edit_entry(id):\n if not session.get('logged_in'):\n abort(401)\n\n if request.method == 'POST':\n db = get_db()\n db.execute('update entries set title = ?, ingredients = ?, \\\n steps = ?, tags = ?, url = ? where id = ?',\n [request.form['title'], request.form['ingredients'],\n request.form['steps'], request.form['tags'],\n request.form['url'], request.form['id']])\n db.commit()\n flash('Entry ' + id + ' has been modified.', 'success')\n return view_entry(str(id))\n else:\n db = get_db()\n cur = db.execute('select id, title, ingredients, steps, tags, \\\n url from entries where id = ? order by id desc',\n [id.strip()])\n entries = cur.fetchall()\n return render_template('edit_entry.html', entries=entries)", "def update(id):\n post = get_post(id)\n\n if request.method == 'POST':\n title = request.form['title']\n body = request.form['body']\n error = None\n\n if not title:\n error = 'Title is required.'\n\n if error is not None:\n flash(error)\n else:\n db = get_db()\n db.execute(\n 'UPDATE post SET title = ?, body = ? WHERE id = ?',\n (title, body, id)\n )\n db.commit()\n return redirect(url_for('blog.thread', id=id))\n\n return render_template('blog/update.html', post=post)", "def update(self, request, pk=None):\n\n return Response({'http_method': 'PUT'})", "def update(self, file_id: str, data: bytes, offset: int) -> int:\n if not self.has(file_id):\n raise KeyError('No file {}'.format(file_id))\n\n self._touch_file(file_id)\n file_path = self._path_to_file(file_id)\n\n with open(file_path, 'r+b') as f:\n f.seek(offset)\n return f.write(data)", "def update(self, request, pk=None):\n\n job = Job.objects.get(pk=pk)\n job.title = request.data[\"title\"]\n job.description = request.data[\"description\"]\n job.city = request.data[\"city\"]\n job.state = request.data[\"state\"]\n job.application = request.data[\"application\"]\n user = request.auth.user\n job.user = user\n job.save()\n\n return Response({}, status=status.HTTP_204_NO_CONTENT)", "def test_update_content_no_file(self):\n url = reverse('content-list')\n with tempfile.NamedTemporaryFile(suffix='.txt') as content_file:\n content_file.write(b\"The contents of the temporary file.\\n\")\n content_file.seek(0)\n data = {\n 'name': 'Content File',\n 'description': 'File 1',\n 'content_file': content_file,\n 'updated_time': date.today(),\n 'creators': [],\n 'coverage': '',\n 'subjects': [],\n 'keywords': [],\n 'workareas': [],\n 'language': '',\n 'cataloger': ''\n }\n response = self.client.post(url, data, format='multipart')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n self.assertEqual(Content.objects.count(), 1)\n content = Content.objects.first()\n last_uploaded_time = content.last_uploaded_time\n updated_data = {\n 'name': 'Updated Content Name',\n 'description': 'New description'\n }\n url = reverse('content-detail', args=[content.pk])\n response = self.client.patch(url, updated_data, format='json')\n content = Content.objects.first()\n self.assertEqual(last_uploaded_time, content.last_uploaded_time)", "def boxUpdate(client, file_id, path):\n\tbox_file = client.file(file_id).update_contents(path)\n\treturn box_file", "def partial_update(self,request,pk= None):\n return Response({'http_method':'PATCH'})", "def update(table, id_):\n\n # your code\n\n return table", "def update():", "def update():", "def update(self) -> None:\n ...", "def update_html(site_name, doc_name, contents, directory='', dry_run=True):\n if not doc_name:\n raise AttributeError('no_name')\n if not contents:\n raise AttributeError('no_contents')\n if not directory:\n directory = '/'\n doc_name = pathlib.Path(doc_name).stem\n\n siteid = _get_site_id(site_name)\n if siteid is None:\n raise FileNotFoundError('no_site')\n dirid = _get_dir_id(siteid, directory)\n if dirid is None:\n raise FileNotFoundError('no_subdir')\n doc_id, _, html_id = _get_doc_ids(dirid, doc_name)\n if doc_id is None:\n raise FileNotFoundError(\"no_document\")\n if dry_run:\n return\n\n if html_id is None:\n new_html_id = _add_doc()\n oldtext = ''\n else:\n oldtext = _get_doc_text(html_id)\n ## if contents == oldtext:\n ## return 'text has not changed'\n\n dts = datetime.datetime.utcnow()\n if html_id is None:\n querystring = 'update {} set currtext = %s where id = %s;'\n result = execute_query(querystring.format(TABLES[4]), (contents, new_html_id))\n querystring = 'update {} set target_docid = %s, target_updated = %s where id = %s;'\n result = execute_query(querystring.format(TABLES[3]), (new_html_id, dts, doc_id))\n else:\n querystring = 'update {} set previous = %s, currtext = %s where id = %s;'\n result = execute_query(querystring.format(TABLES[4]), (oldtext, contents, html_id))\n querystring = 'update {} set target_updated = %s where id = %s;'\n result = execute_query(querystring.format(TABLES[3]), (dts, doc_id))", "def update(entry_id):\n entry = models.Journal.select().where(\n models.Journal.id == entry_id).get()\n form = forms.JournalForm() # if the form validates\n if form.validate_on_submit(): # if click update button\n entry.title = form.title.data\n entry.date = form.date.data\n entry.time_spent = form.time_spent.data\n entry.learnt = form.learnt.data\n entry.resources = form.resources.data\n entry.save() # commit the changes\n flash('Entry has been updated', 'success')\n return redirect(url_for('detail', entry_id=entry.id))\n elif request.method == 'GET': # fill the form with current data\n form.title.data = entry.title\n form.date.data = entry.date\n form.time_spent.data = entry.time_spent\n form.learnt.data = entry.learnt\n form.resources.data = entry.resources\n return render_template('update.html', form=form)", "def put(self, request, pk=None):\n return Response({'method': 'patch'})", "def update(self, message):\n return self.request().update(message)", "def note_update(request, pk):\n try:\n note = Note.objects.get(id=pk)\n serializer = NoteSerializer(instance=note, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n except Exception:\n return Response(\"Something terrible went wrong. Can't update this note.\")", "def update(id):\n\tpost = get_post(id)\n\n\tif request.method == 'POST':\n\t\ttitle = request.form['title']\n\t\tbody = request.form['body']\n\t\terror = None\n\n\t\tif not title:\n\t\t\terror = 'Title is required.'\n\n\t\tif error is not None:\n\t\t\tflash(error)\n\t\telse:\n\t\t\tdb = get_db()\n\t\t\tdb.execute(\n\t\t\t\t'UPDATE post SET title = ?, body = ? WHERE id = ?',\n\t\t\t\t(title, body, id)\n\t\t\t)\n\t\t\tdb.commit()\n\t\t\treturn redirect(url_for('blog.index'))\n\n\treturn render_template('blog/update.html', post=post)", "def update(self, identifier, data):\n self.client.request_with_method(Methods.UPDATE % (self.name, identifier,),\n data=data)", "def update(self, example_id):\n\n download_file(self._remote.get_url(example_id), self._cache.get_local_cache_folder())", "def update():\n return 'update api in put'", "def put(self,request, pk =None):\n return Response({'method': 'PUT'})", "def put(self ,request, pk = None):\r\n\r\n return Response({'method ': 'put'})", "def update(self, request, pk=None):\n user = RareUser.objects.get(user=request.auth.user)\n\n # Do mostly the same thing as POST, but instead of\n # creating a new instance of Post, get the Post record\n # from the database whose primary key is `pk`\n post = Post.objects.get(pk=pk)\n post.user = user\n post.title = request.data[\"title\"]\n post.publication_date = request.data[\"publicationDate\"]\n post.image_url = request.data[\"imageUrl\"]\n post.content = request.data[\"content\"]\n post.approved = request.data[\"approved\"]\n\n # Use the Django ORM to get the record from the database\n # whose `id` is what the client passed as the\n # `categoryId` in the body of the request.\n category = Category.objects.get(pk=request.data[\"categoryId\"])\n post.category = category\n\n post.save()\n\n # 204 status code means everything worked but the\n # server is not sending back any data in the response\n return Response({}, status=status.HTTP_204_NO_CONTENT)", "def edit_success(self, id_, data):\n rv = self.post((id_, self.edit_url), data)\n assert not in_response(rv, 'Edit {}'.format(data[self.name_field]))\n assert self.verify_object(data)\n return rv", "def update(self):\n return self._process('update')" ]
[ "0.7222523", "0.6997383", "0.62771076", "0.6275982", "0.62165546", "0.6215696", "0.61253357", "0.6119364", "0.6075601", "0.603036", "0.6030269", "0.59868115", "0.5979657", "0.59795225", "0.59764755", "0.596385", "0.59549665", "0.59367526", "0.589859", "0.5896499", "0.5894518", "0.58762616", "0.58733106", "0.5854021", "0.5829007", "0.5821175", "0.58112663", "0.5782423", "0.56720066", "0.5643166", "0.5638791", "0.5637306", "0.5623109", "0.5601439", "0.5599548", "0.55944496", "0.5587744", "0.5573115", "0.55601895", "0.5546829", "0.55305326", "0.5518294", "0.5517784", "0.55126715", "0.5493368", "0.5486406", "0.5485608", "0.5471398", "0.5470731", "0.54606915", "0.54569024", "0.54537916", "0.5453522", "0.54465246", "0.54458565", "0.54399306", "0.54095006", "0.54050523", "0.53957784", "0.5392947", "0.53802115", "0.53800654", "0.5376203", "0.5373678", "0.5372083", "0.5364967", "0.53643334", "0.5362988", "0.535276", "0.53503144", "0.53483415", "0.5345241", "0.5345241", "0.5345241", "0.5345241", "0.5344501", "0.534238", "0.5342176", "0.5333454", "0.53271896", "0.5320964", "0.53161204", "0.5309409", "0.5300467", "0.529739", "0.529739", "0.5296081", "0.5293285", "0.5291318", "0.5289418", "0.5286296", "0.5270461", "0.52685726", "0.5256995", "0.5256491", "0.52547026", "0.5254558", "0.5234473", "0.5231798", "0.5231271", "0.5214771" ]
0.0
-1
Creates new content and returns the ID.
def create_content( copy_from: Optional[str] = None, ext: Optional[str] = None, type: Optional[str] = None, path: str = "", ) -> str: ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_create(self):\n self.contentnode.make_content_id_unique()", "def _create(self, title=''):\n return ContentObject(title)", "def create_with_content_and_title(db, content, title):\n sequence_list = SequenceList(str(uuid.uuid1()))\n sequence_list.content = content\n sequence_list.title = title\n sequence_list.insert(db)\n return sequence_list.seq_uuid", "def insert(self):\n item = self.create()\n return item.id", "def _create_content_and_test(self, name, workspace, *args, **kwargs) -> Content:\n content = Content(*args, **kwargs)\n content.label = name\n content.workspace = workspace\n DBSession.add(content)\n DBSession.flush()\n\n eq_(1, ContentApi.get_canonical_query().filter(Content.label == name).count())\n return ContentApi.get_canonical_query().filter(Content.label == name).one()", "def _add(self, parent, data):\n # Search for an addable and a factory\n addable = extensionRegistry.get_addable(self._content_type)\n if not addable:\n raise ValueError(u\"Content factory cannot be found. \")\n\n factory = getattr(\n resolve(addable['instance'].__module__),\n getFactoryName(addable['instance']))\n\n # Build the content\n identifier = str(data.getWithDefault('id'))\n factory(parent, identifier, data.getWithDefault('title'))\n content = getattr(parent, identifier)\n\n self._edit(parent, content, data)\n return content", "def on_update(self):\n self.contentnode.make_content_id_unique()", "def new_post(self, content):\n return self.proxy.wp.newPost(self.blog_id, self.username, self.password,\n content)", "def add_task(id):\n\n content = get_content_or_400(request)\n\n collection = get_db_collection()\n\n object_id = None\n if id:\n object_id = ObjectId(id)\n object = collection.find({\"_id\": object_id})\n if object:\n response = jsonify(errormsg=\"id already exists\")\n response.status_code = 400\n return response\n\n new_object = {\"content\": content}\n if id:\n new_object[\"_id\"] = id\n new_object_id = collection.insert_one(new_object).inserted_id\n\n response = jsonify(id=str(new_object_id))\n response.status_code = 201\n response.headers[\"Location\"] = url_for('get_task', id=new_object_id)\n return response", "def update_contentnode_content_id(self):\n if self.contentnode and self.preset.thumbnail is False:\n self.contentnode.make_content_id_unique()", "def _post(self, data):\n new_comment_id = DB_COMMENT_TABLE.insert(data)\n return new_comment_id", "def content_id(self):\n return self._content_id", "def test_create_content(self):\n url = reverse('content-list')\n with tempfile.NamedTemporaryFile(suffix='.txt') as content_file:\n content_file.write(b\"The contents of the temporary file.\\n\")\n content_file.seek(0)\n data = {\n 'name': 'Content File',\n 'description': 'File 1',\n 'content_file': content_file,\n 'updated_time': date.today(),\n 'creators': [],\n 'coverage': '',\n 'subjects': [],\n 'keywords': [],\n 'workareas': [],\n 'language': '',\n 'cataloger': ''\n }\n response = self.client.post(url, data, format='multipart')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)", "def create_article(self, html):\n kav_api = getattr(self.api, settings.SALESFORCE_ARTICLE_TYPE)\n data = html.create_article_data()\n result = kav_api.create(data=data)\n kav_id = result['id']\n return kav_id", "def get_new_id(self) -> str:\n user = self.get_template(list_entries=False)\n return user.id", "async def create(self, payload):\n\n return await self.creator.write(payload)", "def generate(self):\n payload = {\n \"entity_id\": self.entity_id,\n \"document_type\": self.document_type,\n \"output_format\": self.output_format,\n \"locale\": self.locale,\n \"template_id\": self.template_id,\n }\n payload = {key: val for key, val in payload.items() if val is not None}\n\n response = self._client.post(self._path, data=payload)\n location = wait_for_async_resolution(self._client, response.headers[\"Location\"])\n self.id = get_id_from_location(location)\n\n return response", "def _createElement(self, identifier, request):\n try:\n decoder, contentType = self._getDecoder(request)\n state = decoder(request.body)\n\n element = self._collection.createElementFromState(state)\n\n actualIdentifier = getattr(element, element.identifyingAttribute)\n if actualIdentifier != identifier:\n raise errors.IdentifierError(identifier, actualIdentifier)\n\n self._collection.add(element)\n return Created()\n except errors.SerializableError, e:\n contentType = self.defaultContentType\n encoder = self.encoders[contentType]\n errorResource = RESTErrorPage(e, encoder, contentType)\n return errorResource", "def do_create_element(self):\n id_ = self.dialog.id_entry.get_text()\n title_ = self.dialog.title_entry.get_text()\n # Check validity of id.\n if not self.is_valid_id(id_):\n dialog.message_dialog(\n _(\"The identifier %s is not valid.\\nIt must be composed of non-accentuated alphabetic characters\\nUnderscore is allowed.\") % id_)\n return None\n\n if self.controller.package._idgenerator.exists(id_):\n dialog.message_dialog(\n _(\"The identifier %s is already defined.\") % id_)\n return None\n else:\n self.controller.package._idgenerator.add(id_)\n\n if self.dialog.type_combo:\n t = self.dialog.type_combo.get_current_element()\n\n if self.type_ == Annotation:\n if isinstance(self.parent, AnnotationType):\n parent=self.controller.package\n else:\n parent=self.parent\n el=parent.create_annotation(\n id=id_,\n type=t,\n media=self.controller.current_media,\n begin=0,\n end=self.controller.player.stream_duration)\n\n if el.type._fieldnames:\n el.content.data=\"\\n\".join( \"%s=\" % f for f in sorted(el.type._fieldnames) )\n self.controller.notify('AnnotationCreate', annotation=el)\n elif self.type_ == Query:\n el=self.parent.create_query(id=id_, mimetype=t.id)\n el.title=title_\n if t.id == 'application/x-advene-simplequery':\n # Create a basic query\n q=SimpleQuery(source=\"here\")\n el.content.data=q.xml_repr()\n self.controller.notify('QueryCreate', query=el)\n elif self.type_ == View:\n el=self.parent.create_view(id=id_, mimetype=t.id)\n el.title=title_\n if t.id == 'application/x-advene-ruleset':\n # Create an empty ruleset to begin with\n r=RuleSet()\n\n # Create a new default Rule\n rule=SubviewList(name=_(\"Subviews\"),\n elements=[])\n r.add_rule(rule)\n\n event=Event(\"AnnotationBegin\")\n catalog=self.controller.event_handler.catalog\n ra=catalog.get_action(\"Message\")\n action=Action(registeredaction=ra, catalog=catalog)\n for p in ra.parameters:\n action.add_parameter(p, ra.defaults.get(p, ''))\n rule=Rule(name=_(\"Rule\") + '1',\n event=event,\n action=action)\n r.add_rule(rule)\n\n el.content.data=r.xml_repr()\n self.controller.notify('ViewCreate', view=el)\n elif self.type_ == Schema:\n el=self.parent.create_schema(id=id_)\n el.title=title_\n self.controller.notify('SchemaCreate', schema=el)\n elif self.type_ == AnnotationType:\n if not isinstance(self.parent, Schema):\n print \"Error: bad invocation of CreateElementPopup\"\n el=None\n else:\n el=self.controller.package.create_annotation_type(id=id_)\n el.title=title_\n el.mimetype=t.id\n el.color=self.controller.package._color_palette.next()\n el.element_color='here/tag_color'\n self.controller.notify('AnnotationTypeCreate', annotationtype=el)\n elif self.type_ == RelationType:\n if not isinstance(self.parent, Schema):\n print \"Error: bad invocation of CreateElementPopup\"\n el=None\n else:\n el=self.controller.package.create_relation_type(id=id_)\n el.title=title_\n el.mimetype=t.id\n el.color=self.controller.package._color_palette.next()\n el.element_color='here/tag_color'\n self.controller.notify('RelationTypeCreate', relationtype=el)\n elif self.type_ == Resource:\n # Create a new resource file\n # FIXME: to implement\n self.parent[id_]=_(\"New resource data\")\n el=self.parent[id_]\n self.controller.notify('ResourceCreate',\n resource=el)\n\n else:\n el=None\n print \"Not implemented yet.\"\n return el", "def create():", "def create():", "def content_id(self, value):\n self._content_id = value", "def create(self):\n if self.id:\n raise ResourceAlreadyCreatedError\n data_dict = {k: v for k, v in self.to_dict().items() if v is not None}\n result = self._client.raw_post(self.managing_endpoint, data_dict, 201)\n # TODO: update object from result\n self._id = result.get(\"id\", None)", "def create(self):", "def new_entry(title, content):\n\n title.strip # Remove the spaces from both sides.\n filename = f\"entries/{title}.md\"\n if default_storage.exists(filename):\n return False\n default_storage.save(filename, ContentFile(content))\n return True", "def create(self):\n ...", "def create(self, message: Message) -> int:\n curDate = datetime.strptime(message.date, '%d/%m/%Y %H:%M:%S') if message.date else None\n\n if type(message.isMine) is str:\n isMine = True if message.isMine == 'true' else False\n else:\n isMine = message.isMine\n\n intValues = {\n 'text' : message.text,\n 'date' : curDate.toordinal() if curDate else None,\n 'isMine' : int(isMine),\n 'characterId' : message.characterId,\n 'partyCharacterId': message.partyCharacterId\n }\n\n id = self.database.insert(self.DATABASE_TABLE, intValues)\n message.id = id\n\n return id", "def _create_instance(**kwargs):\n ctxt = context.get_admin_context()\n return db.instance_create(ctxt, _create_instance_dict(**kwargs))['id']", "def create_generic_empty_content(\n self,\n context,\n request: TracimRequest,\n hapic_data=None,\n ) -> ContentInContext:\n app_config = request.registry.settings['CFG']\n creation_data = hapic_data.body\n api = ContentApi(\n current_user=request.current_user,\n session=request.dbsession,\n config=app_config\n )\n parent = None\n if creation_data.parent_id:\n try:\n parent = api.get_one(content_id=creation_data.parent_id, content_type=ContentType.Any) # nopep8\n except ContentNotFound as exc:\n raise ParentNotFound(\n 'Parent with content_id {} not found'.format(creation_data.parent_id)\n ) from exc\n content = api.create(\n label=creation_data.label,\n content_type=creation_data.content_type,\n workspace=request.current_workspace,\n parent=parent,\n )\n api.save(content, ActionDescription.CREATION)\n content = api.get_content_in_context(content)\n return content", "def content_id(self) -> Optional[str]:\n return pulumi.get(self, \"content_id\")", "def getID():", "def add(request):\n username = request.POST['username']\n password = ''\n\n\n now = datetime.datetime.now()\n expire = request.POST['expire']\n\n expireDate = None\n\n if expire:\n delta = expireChoice[expire]\n\n if delta:\n expireDate = now + delta\n\n if username:\n password = _buildPassword(username, now, request.POST['password'])\n\n paste = Paste(title=request.POST['title'],\n content=request.POST['content'],\n created=now,\n typeContent=request.POST['type'],\n username=username,\n password=password,\n expire=expireDate)\n paste.save()\n\n request.session.flash(u\"Add ok\") # TODO translatoion\n\n return HTTPFound(request.route_path('oneContent', idContent=paste._id))", "def noteCreate(ownerId, title):\n query = QUERY_CREATE_NOTE_ENTRY\n query = query.format(**{'owner_id':ownerId, 'title':title})\n data = None\n try:\n cursor.execute(query)\n connection.commit()\n data = cursor.fetchone()\n except Exception as e:\n return False, ERROR_CREATION_NOTE, 'Error creating note', -1\n noteId = -1\n if data != None:\n noteId, = data\n return True, NO_ERROR, 'Note created', noteId", "def insert(self, parent, name):\n pid = self.db.insert_returning_id('simple', dict(parent=parent, name=name))\n return pid", "def create(self):\n pass", "def create(self):\n pass", "def create(self):\n pass", "def post(self, request, pk: int = None):\n if not pk:\n template_id = request.POST.get('template_id')\n template = Template.objects.get(pk=int(template_id))\n _pk = TemplateVersion.objects.create(template=template, test_data={}).pk\n else:\n _pk = TemplateVersion.objects.duplicate(pk)\n template = TemplateVersion.objects.get(pk=pk).template\n return JsonResponse({'id': _pk, 'template_id': template.pk})", "def createPost(content):\n\n cur, user_id, con = initialise(3, True)\n cur.execute(\"INSERT INTO posts (name, content) VALUES ((SELECT username FROM users WHERE id = ?), ?)\", (user_id, content))\n finish(con)", "def create_for_object(self, content_object, **kwargs):\r\n return self.create(**self._generate_object_kwarg_dict(content_object, **kwargs))", "def create_id():\n unique_id = UniqueId()\n unique_id.put()\n return unique_id.key().id()", "def create_id():\n unique_id = UniqueId()\n unique_id.put()\n return unique_id.key().id()", "def create(self, space, title, content, parent_page=None, **pageoptions):\r\n\r\n pagedef = dict(\r\n space = space,\r\n title = title,\r\n url = Confluence.get_url(self._server_url, space, title),\r\n content = content,\r\n contentStatus = 'current',\r\n current = True\r\n )\r\n pagedef.update(**pageoptions)\r\n\r\n if parent_page:\r\n # Get the parent page id.\r\n parent_page_obj = self.getpage(space, parent_page)\r\n if parent_page_obj is None:\r\n raise ConfluenceError('Failed to find parent page %s in space %s' % (parent_page, space))\r\n pagedef['parentId'] = parent_page_obj['id']\r\n\r\n # Now create the page\r\n return self.storepage(pagedef)", "def create(self):\n\n pass", "def _post(self, data):\n new_ticket_id = DB_TICKET_TABLE.insert(data)\n return new_ticket_id", "def create_ontology(name, content):\n try:\n query_ontology = QueryOntology(\n name=name,\n content=content,\n last_modif=datetime.now()\n )\n\n query_ontology.save()\n return query_ontology.pk\n except Exception as exc:\n return -1", "def save(self, *args, **kwargs):\n if not self.id:\n self.created = timezone.now()\n\n self.modified = timezone.now()\n\n return super(CodeSnippet, self).save(*args, **kwargs)", "def create_story():\n client = RequestManager()\n client.set_method(\"POST\")\n client.set_endpoint(\"/projects/{0}/stories\".format(STORED_ID['project_id']))\n name = \"\".join(choices(string.ascii_letters, k=6))\n body = {\"name\": name}\n client.set_body(json.dumps(body))\n response = client.execute_request()\n try:\n STORED_ID['story_id'] = response.json()['id']\n except KeyError:\n LOGGER.info(response.json())", "def get(self):\n return self.content_id", "def getID(self) -> int:\n ...", "def new(self):\n uuid = uuid4().hex\n cur = self.conn.cursor()\n cur.execute(\n \"\"\"\n INSERT INTO experiments (uuid)\n VALUES(?)\n \"\"\", [uuid])\n cur.close()\n self.conn.commit()\n return uuid", "def create():\n pass", "def create(self, store, name, contents):\n\n if store.versioned:\n version = 1\n else:\n version = None\n\n if name is not None:\n try:\n self.retrieve_by_name(store, name)\n msg = \"A {0} with the name '{1}' already exists\".format(\n store.object_type,\n name\n )\n raise NameAlreadyUsed(msg)\n except UnknownName:\n pass\n\n return self._create(store, name, contents, version)", "def new(request, pk=\"\"):\n\n if request.path != request.session[constants.ACTUAL_TEMPLATE]:\n clear_session(request)\n request.session[constants.REM_LINKS] = []\n request.session[constants.REM_TAGS] = []\n request.session[constants.REM_DOCS] = []\n request.session[constants.REM_CONTACTS] = []\n request.session[constants.ADD_CONTACTS] = []\n request.session[constants.ADD_LINKS] = []\n request.session[constants.ADD_TAGS] = []\n request.session[constants.ADD_DOCS] = []\n\n request.session[constants.MAINTAIN_STATE] = True\n\n if pk != \"\":\n request.session[constants.ACTUAL_PROJECT] = get_object_or_404(ProjectContainer, id=pk)\n\n # User must have permission to add new CodeStand\n if not is_user_allowed(request.user, \"canaddmatch\"):\n raise Http404\n\n return save_code(request, constants.TEMPLATE_MATCHES_NEW, pk)", "def create_document(content: Union[str, bytes]) -> Document:\n r = requests.post(\"https://pastecord.com/documents\", data=content)\n r.raise_for_status()\n \n return Document(r.json()['key'])", "def upload_content(newContent):\n # Get current content from Wiki\n currentWiki = requests.get(f\"{config['rootURI']}/api/v4/projects/{config['projectID']}/wikis/{config['wikiSlug']}\",\n headers={\"PRIVATE-TOKEN\": config['PAT']})\n\n # Append load_content to current content\n oldContent = getContent(currentWiki)\n # Get old ID - Increment\n id = getLastId(oldContent)\n id += 1\n # Set the ID value on the new content\n newContent = setID(id, newContent)\n content = oldContent + newContent\n\n # Push changes to Gitlab\n requests.put(f\"{config['rootURI']}/api/v4/projects/{config['projectID']}/wikis/{config['wikiSlug']}\",\n headers={\"PRIVATE-TOKEN\": config['PAT']},\n params={\"content\": content})\n return", "def create_content(self, address, owner, content):\n page = self.create(address=address, author=owner)\n page.check_address()\n revision = page.revision_set.create(page=page, content=content, owner=owner)\n page.update_html(content)\n return page", "def post(self, body):\n return self.objects.create(body)", "def delete(self, *args, **kwargs):\n self.contentnode.make_content_id_unique()\n return super(AssessmentItem, self).delete(*args, **kwargs)", "def create_post(request):\n if request.method == 'POST':\n title = request.POST['title']\n content = request.POST['content']\n user_id = request.POST['author_id']\n category = request.POST['category']\n\n slug = \"-\".join(list(map(lambda word: word.lower(), title.split())))\n author = User.objects.get(id=int(user_id))\n\n # save info in models\n post = Post()\n post.author = author\n post.category = category\n post.title = title\n post.content = content\n post.slug = slug\n post.save()\n return redirect('post')\n\n return render(request, 'posts/create_post.html')", "def _put_assume_new(self, _id=None, **data):\n if _id is None:\n _id = str(uuid4())\n doc = dict(_id=_id, **data)\n try:\n current_doc = self._db.create_document(doc, throw_on_exists=True)\n except couchdb.http.ResourceConflict:\n # TODO: _rev is in header, don't need to get entire doc\n # Don't use self.get, don't want to actually download an attachment\n current_doc = self._db.get(_id)\n current_doc.update(doc)\n current_doc.save()\n return current_doc", "def create():\r\n if request.method == 'POST':\r\n title = request.form['title']\r\n body = request.form['body']\r\n error = None\r\n db = get_db()\r\n cur = db.cursor()\r\n\r\n cur.execute('SELECT title FROM novel.post WHERE title = %s', title)\r\n newTitle = cur.fetchone()\r\n\r\n if not title:\r\n error = 'Title is required.'\r\n\r\n if newTitle and newTitle['title'] == title:\r\n error = 'Title is repeated.'\r\n\r\n if error is not None:\r\n flash(error)\r\n else:\r\n db = get_db()\r\n db.cursor().execute(\r\n 'INSERT INTO novel.post (title, body, author_id) VALUES (\"{0}\", \"{1}\", \"{2}\")'\r\n .format(title, body, g.user['id'])\r\n )\r\n db.commit()\r\n return redirect(url_for('novel.index'))\r\n\r\n return render_template('novel/create.html')", "def newItem():\n if request.method == 'POST':\n db.createItem(\n title=request.form['title'],\n description=request.form['description'],\n category_id=request.form['category'],\n user_id=login_session['user_id'])\n flash(\"New catalog item created!\", 'success')\n return redirect(url_for('showCatalog'))\n return render_template('new_item.html', categories=db.getAllCategories())", "def createItem(self, parentFolderId, name, description) :\n path = 'item'\n params = { 'folderId': parentFolderId,\n 'name': name,\n 'description': description }\n obj = self.sendRestRequest('POST', path, params)\n if '_id' in obj :\n return obj['_id']\n else :\n raise Exception('Error, expected the returned item object to have an \"_id\" field')", "def new_from_post():\n # If you make a post request with a question_id we will assume you want a new question editor\n # we will prepopulate the question new page with data from that question (if it is a valid question id)\n question_id = request.form['question_id'] if request.form['question_id'] else ''\n\n return render_template('questionNew.html', question_id=question_id)", "def create_content(self, node):\n text = node.xpath(self.pattern.xpath_content)\n\n if self.pattern.xpath_content_header:\n header = node.xpath(self.pattern.xpath_content_header)\n else:\n header = text[:50]\n\n content, _ = Content.objects.get_or_create(\n content_descriptor=header,\n defaults={\n 'text': text\n })\n\n return content", "def create_node(self, drip_campaign_id, title, start_time, template_id, subject, from_email, from_name, initial,\n description=None):\n new_content = Content(template_id=template_id, subject=subject, from_email=from_email, from_name=from_name)\n new_node = Node(\n drip_campaign_id=drip_campaign_id,\n title=title,\n start_time=start_time,\n description=description,\n content=new_content,\n initial=initial,\n done=False,\n )\n new_node.save()\n return new_node.id", "def _save(self, name, content):\n cloud_obj = self.container.create_object(name)\n mimetype, _ = mimetypes.guess_type(name)\n cloud_obj.content_type = mimetype\n cloud_obj.send(content)\n return name", "def create(\n self,\n __template_id,\n __payload,\n *,\n workflow_id=None,\n command_id=None,\n read_as=None,\n act_as=None,\n ):\n raise NotImplementedError", "def create(self):\n\n raise NotImplementedError", "def get_id(self):\n pass", "def get_id(self):\n pass", "def get_id(self):\n pass", "def get_id(self):\n pass", "def create_task():\n client = RequestManager()\n client.set_method(\"POST\")\n client.set_endpoint(\"/projects/{0}/stories/{1}/tasks\".format(STORED_ID['project_id'], STORED_ID['story_id']))\n name = \"\".join(choices(string.ascii_letters, k=6))\n body = {\"description\": name}\n client.set_body(json.dumps(body))\n response = client.execute_request()\n try:\n STORED_ID['task_id'] = response.json()['id']\n except KeyError:\n LOGGER.info(response.json())", "def create(self, data):\n\n\t\tmessage = data\n\t\tmessage[\"id\"] = self.count = self.count + 1\n\t\tself.messages.append(message)\n\n\t\treturn(message)", "def EventContentMissionExcelAddEventContentId(builder, EventContentId):\n return AddEventContentId(builder, EventContentId)", "def create(self, *args, **kwargs):\n pass", "def admincreate(object):\n if request.method == \"POST\":\n\n db = get_db()\n execute_string = 'INSERT INTO ' + object.title()\n\n if object == 'post':\n execute_string += '(title, content, authorId, categoryId) VALUES (\"' + request.form['title'] + '\", \"' + request.form[\"content\"] + '\", \"' + request.form[\"authorid\"] + '\", \"' + request.form[\"categoryid\"] + '\")'\n elif object == 'author':\n execute_string += '(name) VALUES (\"' + request.form['name'] + '\")'\n elif object == 'category':\n execute_string += '(name, description) VALUES (\"' + request.form['name'] + '\", \"' + request.form[\"description\"] + '\")'\n\n db.execute(execute_string)\n db.commit()\n return redirect(url_for(\"adminview\", object=object))\n\n return render_template(\"new.html\", object=object, item={})", "def save(self):\n if self.id:\n self.update()\n else:\n self.create()", "def newId():\n global lastId\n lastId += 1\n return 'id%d' % lastId", "def create(self, **kwargs):\n return self.save(self.new(**kwargs))", "def save_to_db(self):\n result = self.db.newsdb.insert_one({\"name\": self.name})\n self.id = str(result.inserted_id)", "def insert_meeting(self, title, start_date, end_date):\n db_connection = DbConnection()\n\n try:\n connection = db_connection.get_connection()\n\n cursor = connection.cursor()\n cursor.execute(self.insert_sql, (title, start_date, end_date))\n meeting_id = cursor.fetchone()['id']\n connection.commit()\n\n cursor.close()\n db_connection.close_connection()\n except psycopg2.DatabaseError as e:\n raise\n\n else:\n\n return meeting_id", "def _create_template( service):\n\n return DOCS.documents().create(body=template_page_setup).execute().get('documentId')", "def add_view(self, request):\r\n instance_form = self.get_minimal_add_form()\r\n form = instance_form(request.POST, request.FILES, prefix=self.base_url())\r\n\r\n new_instance = None\r\n if form.is_valid():\r\n new_instance = form.save()\r\n template = select_template(self.item_add_template)\r\n context = RequestContext(request)\r\n context.update({\r\n \"insert\": self,\r\n \"form\": form,\r\n \"object\": new_instance\r\n })\r\n response = HttpResponse(template.render(context))\r\n response.status_code = 201\r\n return response\r\n response = HttpResponse(form.errors)\r\n response.status_code = 400\r\n return response", "def postdados(new_item_id):\n\n item_id = cursos_collections.insert_one(new_item_id).inserted_id\n\n mensage = 'o item do id {} foi inserido'.format(item_id)\n print(item_id)\n\n return mensage", "def post(self):\n\n\t\treturn MessageStore.create(api.payload), 201", "def create(self):\n self.id = None # id must be none to generate next primary key\n db.session.add(self)\n db.session.commit()\n db.session.refresh(self)", "def create(self, data):\n raise NotImplementedError", "def create(self, validated_data):\r\n return Snippet.objects.create(**validated_data)", "def comments_new():\n comment = {\n \"title\": request.form.get(\"title\"),\n \"content\": request.form.get(\"content\"),\n \"playlist_id\": ObjectId(request.form.get(\"playlist._id\")),\n }\n print(comment)\n comment_id = db.comments.insert_one(comment).inserted_id\n return redirect(\n url_for(\"playlists.playlists_show\", playlist_id=request.form.get(\"playlist._id\"))\n )", "def contentsCreated(self, *args, **kwargs): # real signature unknown\n pass", "def post(self):\n data = request.json\n create_entry(data)\n return None, 201", "def create():\n if request.method == 'POST':\n if request.form.get('title') and request.form.get('content'):\n entry = Entry.create(\n title = request.form.get('title'),\n content = request.form.get('content'),\n published = request.form.get('published') or False)\n flash('Entry created successfully!', 'success')\n if entry.published:\n return redirect(url_for('detail', slug=entry.slug))\n else:\n return redirect(url_for('edit', slug=entry.slug))\n else:\n flash('Title and Content are required!', 'danger')\n return render_template('create.html')", "def createchore():\n return render_template(\"newchore.html\")", "def create_child(self, form_name, request={}, applyhidewhen=True, **kwargs):\n parent = self.doc\n form = self.db.getForm(form_name)\n doc = self.db.createDocument()\n doc.setItem('Form', form_name)\n form.readInputs(doc, request, applyhidewhen=applyhidewhen)\n self.setParenthood(parent.id, doc.id, **kwargs)\n self.setChildhood(parent.id, doc.id)\n doc.save()\n return doc.getId()", "def content_put(self, request, pk=None):\n container = self.get_object()\n content = request.data\n status_code = HttpResponseBadRequest.status_code\n new_tag = content.get('new_tag')\n new_description = content.get('new_description')\n if container.is_singularity():\n response_data = dict(message=NO_SINGULARITY_PUT)\n elif 'pipeline' not in content:\n response_data = dict(pipeline=['This field is required.'])\n elif new_tag and Container.objects.filter(tag=new_tag).exists():\n response_data = dict(new_tag=['Tag already exists.'])\n else:\n if not new_tag:\n permissions_copy = None\n else:\n permissions_copy = (list(container.users_allowed.all()),\n list(container.groups_allowed.all()))\n container.pk = None # Saves a copy.\n container.tag = new_tag\n if new_description:\n container.description = new_description\n with use_field_file(container.file):\n container.file.save(os.path.basename(container.file.name),\n File(container.file))\n try:\n container.write_archive_content(content)\n container.save()\n if permissions_copy:\n container.grant_from_permissions_list(permissions_copy)\n response_data = container.get_content()\n status_code = Response.status_code\n except ExistingRunsError as ex:\n response_data = dict(pipeline=[ex.args[0]])\n return Response(response_data, status_code)", "def test_create(self):\n\t\tself.obj.save()\n\t\tself.assertEqual(1, self.obj.id)", "def create(self):\n return (True == self.client.put(self.name).getBodyData(\"ok\"))" ]
[ "0.72641987", "0.6442757", "0.64032644", "0.6294966", "0.6188546", "0.61151767", "0.6097857", "0.60493034", "0.5894949", "0.58636135", "0.58631575", "0.5804538", "0.5788969", "0.5767498", "0.57668734", "0.57604814", "0.5739654", "0.57271385", "0.57177323", "0.5717616", "0.5717616", "0.5708025", "0.5690597", "0.56783223", "0.5663084", "0.56430346", "0.56358826", "0.5609926", "0.56003517", "0.5589646", "0.5583825", "0.5582431", "0.5575106", "0.55582774", "0.55569434", "0.55569434", "0.55569434", "0.55524844", "0.55430144", "0.5533084", "0.5521362", "0.5521362", "0.55204517", "0.55192", "0.54870117", "0.547191", "0.5463723", "0.5462814", "0.5445743", "0.5442723", "0.5435383", "0.5429239", "0.5418511", "0.5414429", "0.5410873", "0.54101413", "0.5408326", "0.54076993", "0.53985983", "0.5385345", "0.5380436", "0.5379633", "0.53779894", "0.535815", "0.5356144", "0.5342531", "0.53415304", "0.5334688", "0.5334674", "0.5326745", "0.5318867", "0.5318867", "0.5318867", "0.5318867", "0.53137684", "0.53070015", "0.5300244", "0.52973264", "0.5296241", "0.52883947", "0.5284213", "0.5278511", "0.5266618", "0.5261074", "0.5253094", "0.5251169", "0.52505153", "0.5249598", "0.5245331", "0.52422017", "0.523677", "0.52265054", "0.5222905", "0.52176225", "0.52140015", "0.5206412", "0.520349", "0.5203039", "0.5202524", "0.5198876" ]
0.5430152
51
Returns policy and value estimates for given observations.
def step(self, obs): obs = torch.from_numpy(obs) _, pi, v = self.forward(obs) return pi.detach().numpy(), v.detach().numpy()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_observations(\n self, obs: Dict[str, Any], *args: Any, **kwargs: Any\n ) -> Dict[str, Any]:\n\n for uuid in self.compute_order:\n if uuid not in obs:\n obs[uuid] = self.preprocessors[uuid].process(obs)\n\n return {uuid: obs[uuid] for uuid in self.observation_spaces}", "def estimates(self):\n return self._est", "def policy_eval(env, policy, V, discount_factor):\n policy_value = np.zeros(env.nS)\n for state, action in enumerate(policy):\n for probablity, next_state, reward, info in env.P[state][action]:\n policy_value[state] += probablity * (reward + (discount_factor * V[next_state]))\n\n return policy_value", "def __call__(self, observations):\n observations = numpy.asarray(observations)\n if self.continuous:\n state_probabilities = [kde(observations) for kde in self.state_distributions]\n else:\n state_probabilities = [hist[observations] for hist in self.state_distributions]\n return numpy.transpose(state_probabilities)", "def decision_function(self, obs=None, env=None):\n if self.n_exec_decision % 10000 == 0:\n print(f\"Predicting observation number {self.n_exec_decision}\")\n self.n_exec_decision += 1\n \n # Periscope bounding box\n x, y, z = ((80, 124), (12, 160), (None))\n \n # Predictions and actions\n prediction = self.graph.predict(\n np.array([obs[x[0]:x[1], y[0]:y[1], :]])\n )\n \n # index of the highest scored action by our graph\n action = np.argmax(prediction)\n \n return action, prediction", "def get_post_exploration_prediction(self, observations: List[List[float]], deterministic: bool) -> List[List[float]]:\n\n actions = []\n\n for i, o in enumerate(observations):\n o = self.get_encoded_observations(i, o)\n o = self.get_normalized_observations(i, o)\n o = torch.FloatTensor(o).unsqueeze(0).to(self.device)\n result = self.policy_net[i].sample(o)\n a = result[2] if deterministic else result[0]\n actions.append(a.detach().cpu().numpy()[0])\n\n return actions", "def calc_obs(self, states, covs=None):\n\n if covs is None:\n return states @ self.hx[0].T + self.hx[1]\n\n var = np.diagonal(covs, axis1=1, axis2=2)\n std = np.sqrt(var)\n iv95 = np.stack((states - 1.96*std, states, states + 1.96*std))\n\n obs = (self.hx[0] @ states.T).T + self.hx[1]\n std_obs = (self.hx[0] @ std.T).T\n iv95_obs = np.stack((obs - 1.96*std_obs, obs, obs + 1.96*std_obs))\n\n return iv95_obs, iv95", "def get_policies():\r\n policy = policies.values()\r\n return policy", "def _evaluate(x_input, model):\n x_input = to_tensor(x_input)\n\n # Compute model output value and policy\n yhat_value, yhat_log_policy = model(x_input)\n return yhat_value, yhat_log_policy", "def obtain_observation(self, model):\n return {\n name: obs_func.obtain_observation(model)\n for name, obs_func in self.observation_functions.items()\n }", "def calculate_obs_and_exp(self):\n\n # 2x2 matrix of (antecedants satisfied, not satisfied)x(consequent satisfied, not satisfied) flattened to 1D array of length 4\n obsVals = [self.support, self.sup_a - self.support, self.sup_c - self.support, self.n - self.sup_a - self.sup_c + self.support]\n\n # calculating expected counts assuming that each satisfied/not satified chance is 50/50 i.e. no correlation\n sup_not_a = self.n - self.sup_a\n sup_not_c = self.n - self.sup_c\n expVals = [self.sup_a * self.sup_c / self.n, self.sup_a * sup_not_c / self.n, sup_not_a * self.sup_c / self.n, sup_not_a * sup_not_c / self.n]\n \n return obsVals, expVals", "def eval_policy(env, policy, episodes=100):\n scores = [run_episode(env, policy, T=episodes)\n for _ in range(episodes)]\n return np.mean(scores)", "def get_action(self, obs):\n obs = torch.FloatTensor(obs).to(self.device)\n value_int, value_ext = self.ppo.critic(obs)\n action, policy = self.ppo.explore(obs)\n return action, policy, value_ext.data.cpu().numpy(), value_int.data.cpu().numpy()", "def policyImprv(P,R,gamma,policy,v):\n def one_step_lookahead(s, V):\n \"\"\"\n :param state: current state\n :param v: current value estimator\n :return: A, list of optimal action values under current value estimator\n \"\"\"\n num_a = policy.shape[1]\n A = np.zeros(num_a)\n for a in range(num_a):\n for s_prime in range(num_S):\n A[a] += P[s, a, s_prime] * (R[s, a, s_prime] + gamma * V[s_prime])\n return A\n\n # initialization \n num_S, num_a = policy.shape\n policy_stable = True\n\n for s in range(num_S):\n\n chosen_a = np.argmax(policy[s])\n\n action_values = one_step_lookahead(s, v)\n best_a = np.argmax(action_values)\n\n if chosen_a != best_a:\n policy_stable = False\n\n for i in range(num_a):\n if i != best_a:\n policy[s][i] = 0\n if i == best_a:\n policy[s][best_a] = 1\n return policy, policy_stable", "def get_estimates(model):\n from itertools import product\n from pymc3 import summary\n\n subjects = model.data['subject'].unique().astype(np.int)\n parameters = ['v', 'gamma', 's', 'tau', 't0']\n estimates = pd.DataFrame()\n MAP = extract_modes(model.trace)\n combinations = list(product(*[model.design['factor_conditions'][factor]\n for factor in model.design['factors']]))\n subject_template = pd.DataFrame({factor: [combination[f]\n for combination in combinations]\n for f, factor\n in enumerate(model.design['factors'])},\n index=np.zeros(1))\n if model.type == 'hierarchical':\n summary_table = summary(model.trace[0])\n elif model.type == 'individual':\n summary_tables = [summary(trace)\n for trace in model.trace]\n else:\n raise ValueError(\n 'Model type not understood. Make sure \"make_model\" has already been called.')\n for subject in subjects:\n subject_estimates = subject_template.copy()\n subject_estimates.loc[:, 'subject'] = np.array([subject])\n for parameter in parameters:\n subject_template[parameter] = np.nan\n subject_template[parameter + '_hpd_2.5'] = np.nan\n subject_template[parameter + '_hpd_97.5'] = np.nan\n subject_template[parameter] = np.nan\n\n dependence = model.design[parameter]['dependence']\n if dependence is None:\n # Parameter is fixed\n if model.type == 'hierarchical':\n # add participant paramaters\n subject_estimates[parameter] = MAP[0][parameter][subject][0]\n subject_estimates[parameter + '_hpd_2.5'] = summary_table.loc[parameter +\n '__{}_0'.format(subject), 'hpd_2.5']\n subject_estimates[parameter + '_hpd_97.5'] = summary_table.loc[parameter +\n '__{}_0'.format(subject), 'hpd_97.5']\n # add population parameters\n if (parameter + '_mu') in summary_table.index:\n subject_estimates[parameter +\n '_mu'] = summary_table.loc[parameter + '_mu', 'mean']\n subject_estimates[parameter +\n '_mu_hpd_2.5'] = summary_table.loc[parameter + '_mu', 'hpd_2.5']\n subject_estimates[parameter +\n '_mu_hpd_97.5'] = summary_table.loc[parameter + '_mu', 'hpd_97.5']\n\n elif model.type == 'individual':\n # add participant paramaters\n subject_estimates[parameter] = MAP[subject][parameter][0][0]\n subject_estimates[parameter +\n '_hpd_2.5'] = summary_tables[subject].loc[parameter + '__0_0', 'hpd_2.5']\n subject_estimates[parameter +\n '_hpd_97.5'] = summary_tables[subject].loc[parameter + '__0_0', 'hpd_97.5']\n else:\n # Parameter has dependence\n conditions = model.design[parameter]['conditions']\n for condition in conditions:\n if condition not in model.data.loc[model.data['subject'] == subject, dependence].values:\n subject_estimates = subject_estimates.drop(subject_estimates[subject_estimates[dependence] == condition].index,\n axis=0)\n else:\n # Check if subject is in condition\n if subject in model.design[parameter][condition]['subjects']:\n parameter_condition = parameter + '_' + condition\n if model.type == 'hierarchical':\n index = model.design[parameter][condition]['subject_mapping'][subject]\n # extract participant parameters\n estimate = MAP[parameter_condition][index]\n hpd25 = summary_table.loc[parameter_condition +\n '__{}'.format(index), 'hpd_2.5']\n hpd975 = summary_table.loc[parameter_condition +\n '__{}'.format(index), 'hpd_97.5']\n # extract population parameters\n if (parameter_condition + '_mu') in summary_table.index:\n pop_estimate = summary_table.loc[parameter_condition + '_mu', 'mean']\n pop_hpd25 = summary_table.loc[parameter_condition +\n '_mu', 'hpd_2.5']\n pop_hpd975 = summary_table.loc[parameter_condition +\n '_mu', 'hpd_97.5']\n\n elif model.type == 'individual':\n if model.design[parameter]['type'] == 'between':\n estimate = MAP[subject][parameter]\n hpd25 = summary_tables[subject].loc[parameter +\n '__0_0', 'hpd_2.5']\n hpd975 = summary_tables[subject].loc[parameter +\n '__0_0', 'hpd_97.5']\n elif model.design[parameter]['type'] == 'within':\n estimate = MAP[subject][parameter_condition]\n hpd25 = summary_tables[subject].loc[parameter_condition +\n '__0_0', 'hpd_2.5']\n hpd975 = summary_tables[subject].loc[parameter_condition +\n '__0_0', 'hpd_97.5']\n else:\n raise ValueError('Parameter dependence not understood for {}: {} ({}).'.format(\n parameter, dependence, condition))\n else:\n raise ValueError(\n 'Model type not understood. Make sure \"make_model\" has already been called.')\n # add participant parameters\n subject_estimates.loc[subject_estimates[dependence]\n == condition, parameter] = estimate\n subject_estimates.loc[subject_estimates[dependence]\n == condition, parameter + '_hpd_2.5'] = hpd25\n subject_estimates.loc[subject_estimates[dependence]\n == condition, parameter + '_hpd_97.5'] = hpd975\n # add population parameters\n if model.type == 'hierarchical':\n subject_estimates.loc[subject_estimates[dependence]\n == condition, parameter + '_mu'] = pop_estimate\n subject_estimates.loc[subject_estimates[dependence]\n == condition, parameter + '_mu_hpd_2.5'] = pop_hpd25\n subject_estimates.loc[subject_estimates[dependence] ==\n condition, parameter + '_mu_hpd_97.5'] = pop_hpd975\n\n estimates = pd.concat([estimates, subject_estimates])\n\n estimates.reset_index(inplace=True, drop=True)\n return estimates", "def get_observations(self):\n # Check hyper_params and criterion\n if len(self.hyper_params) == 0:\n raise AssertionError('!! Hyper-Parameters has not been set.')\n # Check criterion\n if self.criterion is None:\n raise AssertionError(\n '!! Criterion for hyper-parameter searching has not been set.')\n # Fetch notes\n notes = self.summary_fetcher()\n if len(notes) == 0: return []\n # Peel of Note wrapper\n observations = []\n for note in notes:\n # Every note in the note list must contain the criterion\n if self.criterion not in note.criteria: raise AssertionError(\n '!! Every note must contain the criterion `{}`'.format(self.criterion))\n # This note will be ignored if it does not contain all the information\n # in self.hyper_params or the config value is not within the range\n if not all([hp.name in note.configs and hp.within(note.configs[hp.name])\n for hp in self.hyper_params]): continue\n # Gather observation\n od = OrderedDict()\n # self.scroll.hyper_params.values() may have been found themselves\n for hp in self.scroll.hyper_params.values():\n assert isinstance(hp, HyperParameter)\n od[hp] = note.configs[hp.name]\n # Organize the observation list as a list of tuples\n observations.append((od, note.criteria[self.criterion]))\n return observations", "def predict(self, obs):\n pred_q = self.model(obs)\n return pred_q", "def run_policy(env, policy, scaler, logger, plotter, episodes, plot=True):\n\n total_steps = 0\n trajectories = []\n episode_experiences = []\n success_rates = []\n for e in range(episodes):\n observes, actions, rewards, unscaled_obs, episode_experience, success_rate = run_episode(env, policy, scaler)\n total_steps += observes.shape[0]\n trajectory = {'observes': observes,\n 'actions': actions,\n 'rewards': rewards,\n 'unscaled_obs': unscaled_obs}\n trajectories.append(trajectory)\n episode_experiences.append(episode_experience)\n success_rates.append(success_rate)\n unscaled = np.concatenate([t['unscaled_obs'] for t in trajectories])\n scaler.update(unscaled) # update running statistics for scaling observations\n\n logger.log({'_MeanReward': np.mean([t['rewards'].sum() for t in trajectories]),\n 'Steps': total_steps})\n\n if plot:\n plotter.updateMeanR(np.mean([t['rewards'].sum() for t in trajectories]))\n plotter.updateSuccessR(np.mean(success_rates))\n\n return trajectories, episode_experiences", "def evaluate_actions(self, obs: torch.Tensor, actions: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n latent_pi, latent_vf = self._get_latent(obs, pi=True, vf=True)\n distribution = self._get_action_dist_from_latent(latent_pi)\n log_prob = distribution.log_prob(actions)\n values = self.value_net(latent_vf)\n return values, log_prob, distribution.entropy()", "def action(self, observations):\n observations = observations.float()\n policy_hidden = self.policy_backbone(observations)\n action = self.action_head(policy_hidden)\n return action", "def get_probs(self, states, actions):\n # YOUR CODE HERE\n \n # So we need to determine for every input state-action pair, what the resulting policy distribution is\n # This means that the input will be a single state and a single action per index. \n # We then need to determine if, according to our policy, the action should be taken (prob=1) \n # or not (prob=0)\n \n # state is a tuple of (player's current sum, dealer's single showing card, boolean for usable ace)\n probs = []\n for index, (state, action) in enumerate(zip(states, actions)):\n chosen_action = self.sample_action(state)\n if action == chosen_action:\n probs.append(1)\n else:\n probs.append(0)\n \n \n return np.array(probs)", "def policy_iter_v(env, policy_eval_v=policy_eval_v, discount_factor=1.0):\n # Start with a random policy\n policy = np.ones([env.nS, env.nA]) / env.nA\n\n policy_stable = False\n while not policy_stable:\n V = policy_eval_v(policy, env, discount_factor=discount_factor)\n\n policy_stable = True\n for state in range(env.nS):\n old_best_action = np.argmax(policy[state])\n best_action = -1\n best_value = -float('inf')\n for action in range(env.nA):\n value = V[env.P[state][action][0][1]]\n if value > best_value:\n best_value = value\n best_action = action\n \n for action in range(env.nA):\n if action == best_action:\n policy[state][action] = 1\n else:\n policy[state][action] = 0\n \n if best_action != old_best_action:\n policy_stable = False\n\n return policy, V", "def obtain_observation(self, model):\n return tuple(obs_func.obtain_observation(model) for obs_func in self.observation_functions)", "def obs_assimilation_statistics(prior, obs): #post, obs):\n # Make sure these states are the right kind of object\n assert isinstance(prior, EnsembleState)\n #assert isinstance(post, EnsembleState)\n\n # Build a list of dictionaries\n oblist = []\n for ob in obs:\n obd = {}\n obd['validtime'] = ob.time\n obd['flead'] = (ob.time - pd.to_datetime(prior['validtime'].values[0])).total_seconds()/3600\n obd['lat'] = ob.lat\n obd['lon'] = ob.lon\n obd['obtype'] = ob.obtype\n obd['description'] = ob.description\n obd['ob error'] = ob.error\n obd['value'] = ob.value\n obd['assimilated'] = ob.assimilated\n prior_ye = ob.estimate(prior)\n #post_ye = ob.estimate(post)\n obd['prior mean'] = prior_ye.mean()\n #obd['post mean'] = post_ye.mean()\n obd['prior variance'] = prior_ye.var()\n #obd['post variance'] = post_ye.var()\n oblist.append(obd)\n print(len(oblist))\n # Build a dataframe from this list of objects\n df = pd.DataFrame(oblist)\n return df", "def __get_metrics_adapted(self, policies):\n percent_min = 1 - policies['percent']\n percent_max = 1 + policies['percent']\n metrics = {'cpu_min':percent_min*policies['cpu'], 'cpu_max':percent_max*policies['cpu'],\n 'memory_min':percent_min*policies['ram'], 'memory_max':percent_max*policies['ram'],\n 'disk_min':percent_min*policies['disk'], 'disk_max':percent_max*policies['disk']}\n return metrics", "def compute_pg_vars(trajs, policy, baseline, discount, gae_lambda):\n for traj in trajs:\n # Include the last observation here, in case the trajectory is not finished\n baselines = baseline.predict(np.concatenate(\n [traj[\"observations\"], [traj[\"last_observation\"]]]))\n if traj['finished']:\n # If already finished, the future cumulative rewards starting from the final state is 0\n baselines[-1] = 0.\n # This is useful when fitting baselines. It uses the baseline prediction of the last state value to perform\n # Bellman backup if the trajectory is not finished.\n traj['returns'] = compute_cumulative_returns(\n traj['rewards'], baselines, discount)\n traj['advantages'] = compute_advantages(\n traj['rewards'], baselines, discount, gae_lambda)\n traj['baselines'] = baselines[:-1]\n\n # First, we compute a flattened list of observations, actions, and advantages\n all_obs = np.concatenate([traj['observations'] for traj in trajs], axis=0)\n all_acts = np.concatenate([traj['actions'] for traj in trajs], axis=0)\n all_advs = np.concatenate([traj['advantages'] for traj in trajs], axis=0)\n all_dists = {\n k: np.concatenate([traj['distributions'][k] for traj in trajs], axis=0)\n for k in trajs[0]['distributions'].keys()\n }\n\n # Normalizing the advantage values can make the algorithm more robust to reward scaling\n all_advs = (all_advs - np.mean(all_advs)) / (np.std(all_advs) + 1e-8)\n\n # Form chainer variables\n all_obs = Variable(all_obs)\n all_acts = Variable(all_acts)\n all_advs = Variable(all_advs.astype(np.float32, copy=False))\n all_dists = policy.distribution.from_dict(\n {k: Variable(v) for k, v in all_dists.items()})\n\n return all_obs, all_acts, all_advs, all_dists", "def sample_observations(self):\n sample_dict={}\n for obs in self.observations:\n domain = obs.properties['domain']\n #If probabilistic, sample from the true distribution\n if obs.type=='probabilistic':\n probabilities = obs.properties['probability']\n #If uncontrollable, sample uniformly from the domain\n else:\n probabilities = [1.0/len(domain)]*len(domain)\n\n #Samples a value from the discrete probability distribution\n u_sample =random.random()\n acc_prob=0.0\n for val,prob in zip(domain,probabilities):\n acc_prob+=prob\n if u_sample<= acc_prob:\n sample_dict[obs]=val\n break\n\n return sample_dict", "def estimate(self, states):\n scores = [state.get_score() for state in states]\n return np.array([score[0] - score[1] for score in scores])", "def extract_policy(env, v, gamma):\n \n policy = np.zeros(env.nS, dtype=int)\n\n ############################\n # YOUR CODE #\n ############################\n for i in range (env.nS):\n policy[i] = np.argmax([env.P[i][j][0][2] + gamma * v[env.P[i][j][0][1]] for j in range (6)])\n\n return policy", "def all_summary_stats(price_sim, price_obs):\n\n # count, mean, std, min, 25%, 50%, 75%, max\n s1 = price_sim[0].describe()\n\n # skew, kurt, hurst\n s2 = summary_stats_extra(price_sim[0])\n\n # Kolmogorov Smirnov 2 sample test statistic (if 0 - identical)\n ks_stat = {\"KS\": stats.ks_2samp(np.ravel(price_sim), np.ravel(price_obs))[0]}\n\n # acf\n acf = sm.tsa.stattools.acf(price_sim[0], unbiased=False, nlags=5, qstat=False, fft=None, alpha=None, missing='none')\n\n return {\"mean\": s1.loc[\"mean\"],\n \"std\": s1.loc[\"std\"],\n **s2,\n **ks_stat,\n \"acf1\": acf[1],\n \"acf2\": acf[2],\n \"acf3\": acf[3],\n \"acf4\": acf[4],\n \"acf5\": acf[5]\n }", "def extract_value(env, policy, n_states, **kwargs):\n # keyword arguments\n eps = kwargs.get('eps', 1e-10)\n gamma = kwargs.get('gamma', 0.99)\n max_iter = kwargs.get('max_iter', 1000)\n V = np.zeros(shape=[n_states])\n for t in range(max_iter):\n v = np.copy(V)\n # go through all states\n for s in range(n_states):\n a = policy[s]\n for trans in env.env.P[s][a]:\n p, s_, r, _ = trans\n V[s] += p * (r + gamma * v[s_])\n # convergence\n if np.sum(np.fabs(v - V)) <= eps:\n sys.stdout.write(f'\\rValue extraction converged @ {t+1} iter\\n')\n sys.stdout.flush()\n break\n return V", "def extractPolicy(self, V):\n\n policy = np.zeros(self.nStates)\n for i in range(self.nStates):\n A = self.helper(i, V)\n best_action = np.argmax(A)\n policy[i] = best_action\n\n return policy", "def policy_evaluation(P, nS, nA, policy, gamma=0.9, tol=1e-8):\n value_function = np.zeros(nS)\n ############################\n # YOUR IMPLEMENTATION HERE #\n def next_state_reward(P,state,action,gamma,value_function):\n sum_reward=0\n for p,nextS,r,boolean_v in P[state][action]:\n sum_reward+=p*( r + gamma* value_function[nextS])\n #print(sum_reward) \n return sum_reward\n\n while True:\n delta=0;\n for state in range(nS):\n new_value=0;\n for action in range(nA):\n sum_reward=next_state_reward(P,state,action,gamma,value_function)\n new_value+=policy[state][action]*sum_reward\n delta= max(delta, abs(new_value-value_function[state]))\n value_function[state] = new_value\n #print(value_function)\n if(delta < tol):\n break\n\n ############################\n return value_function", "def Policy_Inputs(UI):\n \n PolicyDicts = UI.PolicyDicts\n PolicyDictsInv = UI.PolicyDictsInv\n Map = UI.SD_Map\n location = UI.location\n \n policy_input = dict()\n \n \n for policy in PolicyDicts.keys():\n policy_input[policy] = PolicyDictsInv[policy][Map.retrieve_ob(policy).value()]\n \n return policy_input", "def evaluate(self, Estimator, Generator):\n assert hasattr(Estimator, 'estimate'),\\\n \"Estimator must implement the estimate method\"\n assert hasattr(Generator, 'generate'),\\\n \"Generator must implement the generate method\"\n for param in self.params:\n for sigma in self.sigmas:\n self.mse[param][sigma] = self._evaluate(\n Estimator(param), Generator(self.n, self.k, sigma))\n return self.mse", "def _policy_values(self, p_map, v_note):\n pitches = None\n for p in self.v_policy_map[v_note]:\n p_values = p.values(p_map, v_note)\n if p_values is None:\n returned_pitches = OrderedSet() # None means p cannot be satisfied!\n else:\n returned_pitches = OrderedSet()\n for n in p_values:\n returned_pitches.add(n.diatonic_pitch)\n pitches = returned_pitches if pitches is None else pitches.intersection(returned_pitches)\n\n retset = OrderedSet()\n for p in pitches:\n retset.add(Note(p, v_note.base_duration, v_note.num_dots))\n return retset", "def policy_value_fn(board):\n # return uniform probabilities and 0 score for pure MCTS", "def policy_improvement(P, nS, nA, value_from_policy, policy, gamma=0.9):\n\n\tnew_policy = np.zeros(nS, dtype='int')\n\n\t############################\n\t# YOUR IMPLEMENTATION HERE #\n\tfor s in range(nS):\n\t\tq_values = np.zeros(nA)\n\t\tfor action in range(nA):\n\t\t\tcurrent_q_value = 0\n\t\t\tfor transition in P[s][action]:\t# for every possible transition\n\t\t\t\t# print(len(P[s][action]))\n\t\t\t\tprobability = transition[0]\n\t\t\t\treward = transition[2]\n\t\t\t\tnext_state = transition[1]\n\t\t\t\tvalue_next_state = value_from_policy[next_state]\n\n\t\t\t\tcurrent_q_value += probability * (reward + gamma * value_next_state)\n\n\t\t\tq_values[action] = current_q_value\n\n\t\tnew_policy[s] = np.argmax(q_values)\n\n\n\t# print(new_policy)\n\t############################\n\treturn new_policy", "def predictive_values(self,mu,var, full_cov):\n mean = mu*self._std + self._mean\n if full_cov:\n if self.D >1:\n raise NotImplementedError, \"TODO\"\n #Note. for D>1, we need to re-normalise all the outputs independently.\n # This will mess up computations of diag(true_var), below.\n #note that the upper, lower quantiles should be the same shape as mean\n true_var = (var + np.eye(var.shape[0])*self._variance)*self._std**2\n _5pc = mean + - 2.*np.sqrt(np.diag(true_var))\n _95pc = mean + 2.*np.sqrt(np.diag(true_var))\n else:\n true_var = (var + self._variance)*self._std**2\n _5pc = mean + - 2.*np.sqrt(true_var)\n _95pc = mean + 2.*np.sqrt(true_var)\n return mean, true_var, _5pc, _95pc", "def de_moor_perishable_waste_conscious_S_policy(\n policy_params: chex.Array, obs: chex.Array, rng: chex.PRNGKey, mean_demand: float\n) -> chex.Array:\n # policy_params = [[S]]\n S = policy_params[0, 0]\n total_stock = obs.sum()\n stock_expiring_next_period = obs[-1]\n order = base_waste_conscious_S_policy(\n S, total_stock, stock_expiring_next_period, mean_demand\n )\n return jnp.array(order)", "def get_observations():\n hhm.samples = SAMPLES\n X = hhm.init_sampparams = pd.DataFrame([(r.scout_prob, r.survival_prob) for r in results],\n columns=('scout prob', 'survival prob'))\n X = [(round(x[0], ROUNDING), round(x[1], ROUNDING)) for x in X]\n results = np.empty(SAMPLES, dtype=object)\n # Get 10 observations so we have some observation uncertainty\n obs = pyrun.run_ensembles(X, 10)\n index = 0\n for x in X:\n scout, survival = x\n # The filename indicates the parameters used to get the observations.\n dir = 'results/run_%d_%d' % (int(scout*10**ROUNDING), int(survival*10**ROUNDING))\n r = result(params(scout, survival), obs[index], dir)\n results[index] = r\n index += 1\n print(results) # In case saving goes wrong.\n with open('observations.pkl', 'wb') as pfile:\n pickle.dump(results, pfile)", "def compute_objective(Instance: dict):\r\n\r\n print(\"Computing objectives values...\")\r\n # Retrieve usefull infos\r\n T_max = Instance[T_STR]\r\n scenario_numbers = Instance[SCENARIO_NUMBER]\r\n Interventions = Instance[INTERVENTIONS_STR]\r\n quantile = Instance[QUANTILE_STR]\r\n # Retrieve risk final distribution\r\n risk = compute_risk_distribution(Interventions, T_max, scenario_numbers)\r\n # Compute mean risk\r\n mean_risk = compute_mean_risk(risk, T_max, scenario_numbers)\r\n # Compute quantile\r\n q = compute_quantile(risk, T_max, scenario_numbers, quantile)\r\n print(\"Done\")\r\n\r\n return mean_risk, q", "def estimates_conf(self):\n return self._est_L, self._est_R", "def get_model_profits(model, cost_benefit, X_test, y_test):\n predicted_probs = model.predict_proba(X_test)[:, 1]\n profits, thresholds = profit_curve(cost_benefit, predicted_probs, y_test)\n\n return profits, thresholds", "def evaluate_actions(self, obs: th.Tensor, actions: th.Tensor) -> Tuple[th.Tensor, th.Tensor, th.Tensor]:\n obs = self.bodynet(obs)\n latent_pi, latent_vf, latent_sde = self._get_latent(obs)\n distribution = self._get_action_dist_from_latent(latent_pi, latent_sde)\n log_prob = distribution.log_prob(actions)\n values = self.value_net(latent_vf)\n return values, log_prob, distribution.entropy()", "def policy_value_fn(board):\n # return uniform probabilities and 0 score for pure MCTS\n action_probs = np.ones(len(board.availables))/len(board.availables)\n return zip(board.availables, action_probs), 0", "def __optimize_bob(self, dim, alice_povms) -> tuple[dict, float]:\n # Get number of inputs and outputs.\n (\n num_outputs_alice,\n num_outputs_bob,\n num_inputs_alice,\n num_inputs_bob,\n ) = self.pred_mat.shape\n\n # Now, optimize over Bob's measurement operators and fix Alice's\n # operators as those are coming from the previous SDP.\n bob_povms = defaultdict(cvxpy.Variable)\n for y_ques in range(num_inputs_bob):\n for b_ans in range(num_outputs_bob):\n bob_povms[y_ques, b_ans] = cvxpy.Variable((dim, dim), PSD=True)\n\n win = 0\n for x_ques in range(num_inputs_alice):\n for y_ques in range(num_inputs_bob):\n for a_ans in range(num_outputs_alice):\n for b_ans in range(num_outputs_bob):\n win += (\n self.prob_mat[x_ques, y_ques]\n * self.pred_mat[a_ans, b_ans, x_ques, y_ques]\n * cvxpy.trace(\n bob_povms[y_ques, b_ans].H @ alice_povms[x_ques, a_ans].value\n )\n )\n\n objective = cvxpy.Maximize(win)\n constraints = []\n\n # Sum over \"b\" for all \"y\" for Bob's measurements.\n for y_ques in range(num_inputs_bob):\n bob_sum_b = 0\n for b_ans in range(num_outputs_bob):\n bob_sum_b += bob_povms[y_ques, b_ans]\n constraints.append(bob_sum_b == np.identity(dim))\n\n problem = cvxpy.Problem(objective, constraints)\n\n lower_bound = problem.solve()\n return bob_povms, lower_bound", "def predict(self, obs):\n return self.model(obs)", "def _policy_eval(self, policy: np.ndarray) -> np.ndarray:\n V = np.zeros(self.state_dim)\n diff = 1.0\n dr = 0.9\n while (diff >= self.theta):\n diff = 0.0\n for s in self.mdp._state_dict:\n old = V[self.mdp._state_dict[s]]\n temp = 0.0\n for opt in range(self.action_dim):\n if policy[self.mdp._state_dict[s],opt] == 1.0: \n for next_s in self.mdp._state_dict:\n p = self.mdp.P[self.mdp._state_dict[s],opt,self.mdp._state_dict[next_s]]\n r = self.mdp.R[self.mdp._state_dict[s],opt,self.mdp._state_dict[next_s]]\n Vs = V[self.mdp._state_dict[next_s]]\n temp = temp + p * (r + dr * Vs)\n V[self.mdp._state_dict[s]] = temp\n diff = max(diff,abs(old - V[self.mdp._state_dict[s]]))\n return V", "def generate_observation(args):\n model = models[args.model_name](args)\n\n pyprob.set_random_seed(args.seed)\n\n trace = model.get_trace(generate_samples=True, verbose=True)\n\n number_of_vehicles, image_obs = trace.result\n\n posns = posns_from_trace(trace)\n\n return posns, model.x, model.y, image_obs", "def get_reward(self, observations, actions):\n\n #initialize and reshape as needed, for batch mode\n self.reward_dict = {}\n if(len(observations.shape)==1):\n observations = np.expand_dims(observations, axis = 0)\n actions = np.expand_dims(actions, axis = 0)\n batch_mode = False\n else:\n batch_mode = True\n\n #get vars\n xvel = observations[:, 9].copy()\n body_angle = observations[:, 2].copy()\n front_leg = observations[:, 6].copy()\n front_shin = observations[:, 7].copy()\n front_foot = observations[:, 8].copy()\n zeros = np.zeros((observations.shape[0],)).copy()\n\n # ranges\n leg_range = 0.2\n shin_range = 0\n foot_range = 0\n penalty_factor = 10\n\n #calc rew\n self.reward_dict['run'] = xvel\n\n front_leg_rew = zeros.copy()\n front_leg_rew[front_leg>leg_range] = -penalty_factor\n self.reward_dict['leg'] = front_leg_rew\n\n front_shin_rew = zeros.copy()\n front_shin_rew[front_shin>shin_range] = -penalty_factor\n self.reward_dict['shin'] = front_shin_rew\n\n front_foot_rew = zeros.copy()\n front_foot_rew[front_foot>foot_range] = -penalty_factor\n self.reward_dict['foot'] = front_foot_rew\n\n # total reward\n self.reward_dict['r_total'] = self.reward_dict['run'] + self.reward_dict['leg'] + self.reward_dict['shin'] + self.reward_dict['foot']\n\n #return\n dones = zeros.copy()\n if(not batch_mode):\n return self.reward_dict['r_total'][0], dones[0]\n return self.reward_dict['r_total'], dones", "def policy_improvement(P, nS, nA, value_from_policy, policy, gamma=0.9):\n\n\tnew_policy = np.zeros(nS, dtype='int')\n\tfor state in range(nS): # for each state\n\t\tbest_Q_value = -float(\"inf\") # we seek the action that gives the best Q value from this state\n\t\tfor action in range(nA): # for each action\n\t\t\tp = P[state][action] # {(probability, nextstate, reward, terminal),...}[state,action]\n\t\t\treward = sum([p_i[0]*p_i[2] for p_i in p]) # expected reward from state,action\n\t\t\tQ_value = reward + gamma*(sum([p_i[0]*value_from_policy[p_i[1]] for p_i in p])) # expected reward + gamma * expected next value\n\t\t\tif Q_value > best_Q_value: # if this is the best action from this state so far\n\t\t\t\tbest_Q_value = Q_value\n\t\t\t\tnew_policy[state] = action # update policy\n\treturn new_policy", "def __estimatedata(data, payout=500, randomsample=0):\n if randomsample > 0:\n data = data.sort('prob')\n data = data.head(randomsample)\n\n data['premium'] = 0.0\n data['payout'] = payout\n\n # get the model parameters\n \n # estimate\n estimator = EtheriscEstimator(data)\n estimator.estimate()\n return estimator", "def evaluate(self, X, integrate=True):\n m, n = self.n_models, X.shape[0]\n gammas, means, sds = self.score(X)\n eis = np.zeros((m, n))\n for i in range(m):\n eis[i] = (\n (means[i] - self.y_best) * norm.cdf(gammas[i]) +\n sds[i] * norm.pdf(gammas[i])\n )\n if integrate:\n return eis.mean(axis=0)\n else:\n return eis", "def evaluate_function_by_objective(self, trajectory):\n objective_values_by_tag = []\n reachability_cost = False\n\n # judge the cost to use, if there is ReachAvoid4d, there must be reachability cost\n for objective in self.objectives:\n if isinstance(objective, ReachAvoid4d):\n reachability_cost = True\n\n if reachability_cost:\n for objective in self.objectives:\n if isinstance(objective, ReachAvoid4d) or isinstance(objective, Avoid4d) or isinstance(objective, GoalDistance):\n objective_values_by_tag.append([objective.tag, objective.evaluate_objective(trajectory)])\n else:\n for objective in self.objectives:\n objective_values_by_tag.append([objective.tag, objective.evaluate_objective(trajectory)])\n\n\n # for objective in self.objectives:\n # # if isinstance(objective, ReachAvoid4d) or isinstance(objective, Avoid4d):\n # # if isinstance(objective, ReachAvoid4d):\n # objective_values_by_tag.append([objective.tag, objective.evaluate_objective(trajectory)])\n\n return objective_values_by_tag", "def model_likelihoods(\n self, obs: Tensor, actions: Tensor, next_obs: Tensor\n ) -> List[Tensor]:\n return [m.log_prob(obs, actions, next_obs).mean() for m in self.model]", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"eAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"eA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doVar(\"rAfb[1.0,-5.0, 5.0]\");\n self.modelBuilder.doVar(\"rA0[1.0, -5.0, 5.0]\");\n self.modelBuilder.doSet(\"POI\",\"rAfb,rA0\")\n self.modelBuilder.factory_('expr::mAfb(\"@0*@1\",eAfb,rAfb)')\n self.modelBuilder.factory_('expr::mA0(\"(@0*@1)\",eA0,rA0)')\n\n \n self.modelBuilder.factory_('expr::eAlph(\"2.0*@0/(2.0-@0)\",eA0)')\n self.modelBuilder.factory_('expr::eNorm(\"3.0/4.0/(2.0+@0)\",eAlph)')\n self.modelBuilder.factory_('expr::eRAlph(\"@0*@1\",eAlph,eNorm)')\n self.modelBuilder.factory_('expr::eRpl(\"(@0+@1)\",eNorm,eAfb)')\n self.modelBuilder.factory_('expr::eRmn(\"(@0-@1)\",eNorm,eAfb)')\n\n self.modelBuilder.factory_('expr::mAlph(\"2.0*@0/(2.0-@0)\",mA0)')\n self.modelBuilder.factory_('expr::mNorm(\"3.0/4.0/(2.0+@0)\",mAlph)')\n self.modelBuilder.factory_('expr::mRAlph(\"@0*@1\",mAlph,mNorm)')\n self.modelBuilder.factory_('expr::mRpl(\"(@0+@1)\",mNorm,mAfb)')\n self.modelBuilder.factory_('expr::mRmn(\"(@0-@1)\",mNorm,mAfb)')", "def extract_policy(env, v, gamma = 1.0):\n policy = np.zeros(env.nS)\n for s in range(env.nS):\n q_sa = np.zeros(env.action_space.n)\n for a in range(env.action_space.n):\n for next_sr in env.P[s][a]:\n # next_sr is a tuple of (probability, next state, reward, done)\n p, s_, r, _ = next_sr\n q_sa[a] += (p * (r + gamma * v[s_]))\n policy[s] = np.argmax(q_sa)\n return policy", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"eAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"eA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doVar(\"dAfb[0.,-0.75,0.75]\");\n self.modelBuilder.doVar(\"dA0[0.0, -1.0, 1.0]\");\n #self.modelBuilder.doSet(\"POI\",\"dAfb,dA0\")\n self.modelBuilder.doSet(\"POI\",\"dAfb\")\n self.modelBuilder.factory_('expr::mAfb(\"@0+@1\",eAfb,dAfb)')\n self.modelBuilder.factory_('expr::mA0(\"(@0+@1)\",eA0,dA0)')\n\n \n self.modelBuilder.factory_('expr::eAlph(\"2.0*@0/(2.0-@0)\",eA0)')\n self.modelBuilder.factory_('expr::eNorm(\"3.0/4.0/(2.0+@0)\",eAlph)')\n self.modelBuilder.factory_('expr::eRAlph(\"@0*@1\",eAlph,eNorm)')\n self.modelBuilder.factory_('expr::eRpl(\"(@0+@1)\",eNorm,eAfb)')\n self.modelBuilder.factory_('expr::eRmn(\"(@0-@1)\",eNorm,eAfb)')\n\n self.modelBuilder.factory_('expr::mAlph(\"2.0*@0/(2.0-@0)\",mA0)')\n self.modelBuilder.factory_('expr::mNorm(\"3.0/4.0/(2.0+@0)\",mAlph)')\n self.modelBuilder.factory_('expr::mRAlph(\"@0*@1\",mAlph,mNorm)')\n self.modelBuilder.factory_('expr::mRpl(\"(@0+@1)\",mNorm,mAfb)')\n self.modelBuilder.factory_('expr::mRmn(\"(@0-@1)\",mNorm,mAfb)')", "def _compute_observations(self):\n observations = {}\n for ts in self.ts_ids:\n if self.traffic_signals[ts].time_to_act() or self.traffic_signals[ts].regular_obs() :\n observations[ts] = self.traffic_signals[ts]._compute_observation()\n return observations", "def get_selected_policies(actor):\n dike_model, _ = get_model_for_problem_formulation(actor)\n levers = [lever.name for lever in dike_model.levers]\n policies_df = pd.read_csv('simulation/selected/selected_policies_' + actor + '.csv')\n policies_df = policies_df.loc[:, levers]\n policies = []\n\n for i, row in policies_df.iterrows():\n policy = Policy(f'Policy {i}', **row.to_dict())\n policies.append(policy)\n\n return policies", "def policy_evaluation(P, nS, nA, policy, gamma=0.9, tol=1e-8):\n value_function = np.zeros(nS)\n\n while True:\n change = 0\n for state_idx in range(nS):\n v = 0\n for action_idx, action_prob in enumerate(policy[state_idx]): # for each state in nA\n for probability, nextstate, reward, terminal in P[state_idx][action_idx]:\n v += action_prob * probability * (reward + gamma * value_function[nextstate])\n change = max(change, abs(v - value_function[state_idx]))\n value_function[state_idx] = v\n if change < tol:\n break\n return value_function", "def o_func(self, state, covs=None, pars=None):\n\n if pars is not None:\n\n obs = []\n for sti, par in zip(state, pars):\n self.set_par(par, get_hx_only=True)\n ob = sti[:, :self.dimp] @ self.hx[0].T + \\\n sti[:, self.dimp:] @ self.hx[1].T + self.hx[2]\n obs.append(ob)\n\n return np.array(obs)\n\n try:\n obs = state[..., :self.dimp] @ self.hx[0].T + \\\n state[..., self.dimp:] @ self.hx[1].T + self.hx[2]\n except ValueError as e:\n raise ValueError(\n str(e) + ' you probably want to use the filter with `reduced_form=False`.')\n\n if np.ndim(state) <= 1:\n data = self.data.index if hasattr(self, 'data') else None\n obs = pd.DataFrame(obs, index=data, columns=self.observables)\n\n if covs is None:\n return obs\n\n var = np.diagonal(covs, axis1=1, axis2=2)\n std = np.sqrt(var)\n iv95 = np.stack((state - 1.96*std, state, state + 1.96*std))\n\n std_obs = (np.hstack((self.hx[0], self.hx[1])) @ std.T).T\n iv95_obs = np.stack((obs - 1.96*std_obs, obs, obs + 1.96*std_obs))\n\n return iv95_obs, iv95", "def _staged_decision_function(self, X):\n X = check_array(X, dtype=DTYPE, order=\"C\", accept_sparse='csr')\n score = self._init_decision_function(X)\n for i in range(self.estimators_.shape[0]):\n predict_stage(self.estimators_, i, X, self.learning_rate, score)\n yield score.copy()", "def get_parameter_values(self):\n obsPars = numpy.zeros(self.get_num_parameters())\n i = 0\n for p in self.parameters:\n obsPars[i] = p.read_value_in_fmu(self.fmu)\n i += 1\n return obsPars", "def hendrix_perishable_one_product_waste_conscious_S_policy(\n policy_params: chex.Array, obs: chex.Array, rng: chex.PRNGKey, mean_demand: float\n) -> chex.Array:\n # policy_params = [[S]]\n S = policy_params[0, 0]\n total_stock = obs.sum()\n stock_expiring_next_period = obs[-1]\n order = base_waste_conscious_S_policy(\n S, total_stock, stock_expiring_next_period, mean_demand\n )\n return jnp.array(order)", "def _estimate_gp(self, list_x):\n\n assert isinstance(list_x, list)\n\n mean = self.mean(list_x)\n sigma = np.squeeze(\n np.array([self.sigma([x]) for x in list_x])\n )\n\n return mean, sigma", "def w_estimates(self):\n return np.copy(self._w_values)", "def w_estimates(self):\n return np.copy(self._w_values)", "def _calc_policy(self, V: np.ndarray) -> np.ndarray:\n policy = np.zeros([self.state_dim, self.action_dim])\n \n for s in self.mdp._state_dict:\n action_dict = {}\n compare = 0.0\n for a in self.mdp._action_dict:\n temp = 0.0\n for next_s in self.mdp._state_dict:\n p = self.mdp.P[self.mdp._state_dict[s],self.mdp._action_dict[a],self.mdp._state_dict[next_s]]\n r = self.mdp.R[self.mdp._state_dict[s],self.mdp._action_dict[a],self.mdp._state_dict[next_s]]\n Vs = V[self.mdp._state_dict[next_s]]\n temp = temp + p * (r + self.gamma * Vs)\n compare = max(compare, temp)\n action_dict[a]= temp\n res = [t for t,v in action_dict.items() if v == compare][0]\n policy[self.mdp._state_dict[s],self.mdp._action_dict[res]] = 1.0\n\n return policy", "def policy_eval(policy, env, discount_factor=1.0, theta=0.00001):\n\n # Start with a random (all 0) value function\n V = np.zeros(env.nS)\n \n while True: #any(Vdiff > theta):\n \n delta_V = 0\n\n for i in range(env.nS):\n \n # need to calculate the value of taking each of the available actions\n\n action_val = np.zeros(env.nA)\n\n for a in range(env.nA):\n \n # get transition tuple for this state and action\n tup = env.P[i][a][0]\n \n # calculate the value of this action/state? \n # value = reward + gamma * (prob * V[next_state])\n # error here I think, probability missing\n action_val[a] = tup[0] * (tup[2] + discount_factor * V[tup[1]])\n \n \n Vold = V[i]\n Vnew = np.dot(policy[i],action_val)\n delta_V = max(delta_V,np.abs(Vnew - Vold))\n # get state value by multiplying probability of taking action (policy) by action value\n V[i] = Vnew\n \n #print(action_val)\n #print(policy[i])\n #print(V[i])\n #print(delta_V)\n\n # function only works if I use this delta rule to terminate\n if delta_V < theta:\n break\n return np.array(V)", "def eval_obs_pre(self, X, y):\n \n X, y = check_X_y(\n X, y, accept_sparse=False, dtype=np.float64, order=\"C\",\n ensure_min_features=2, estimator=self)\n\n n_dimensions = X.shape[1]\n self.classes_, n_samples = np.unique(y, return_counts=True)\n n_times = self.classes_.size\n\n # n_samples = np.array([x.shape[0] for x in X])\n if self.assume_centered:\n self.location_ = np.zeros((n_times, n_dimensions))\n else:\n self.location_ = np.array(\n [X[y == cl].mean(0) for cl in self.classes_])\n\n precisions = self.get_observed_precision()\n\n emp_pre_score = []\n sam_pre_score = []\n slack = []\n\n for i in range(precisions.shape[0]):\n emp_cov = empirical_covariance(X[y == i] - self.location_[i], assume_centered=True)\n precision = precisions[i, :, :]\n # slack.append(-self.constrained_to[i] + logl(emp_cov, precision))\n _, log_det = np.linalg.slogdet(precision)\n emp_pre_score.append(log_det - np.trace(emp_cov @ precision))\n sam_pre_score.append(log_det - np.array([np.trace((X[y == i][[j], :].T @ X[y == i][[j], :]) @ precision) for j in range(int(n_samples))]))\n\n return self.emp_inv_score - np.array(emp_pre_score), self.sam_inv_score - np.array(sam_pre_score), precision", "def df_obs(x, *args):\n sslm, word_counts, totals, mean_deriv_mtx, word, deriv = args\n\n sslm.obs[word] = x\n sslm.mean[word], sslm.fwd_mean[word] = sslm.compute_post_mean(word, sslm.chain_variance)\n\n model = \"DTM\"\n if model == \"DTM\":\n deriv = sslm.compute_obs_deriv(word, word_counts, totals, mean_deriv_mtx, deriv)\n elif model == \"DIM\":\n deriv = sslm.compute_obs_deriv_fixed(\n p.word, p.word_counts, p.totals, p.sslm, p.mean_deriv_mtx, deriv) # noqa:F821\n\n return np.negative(deriv)", "def probabilidadObservable(obs,ket):\n valP,vectP = propiosObservable(obs)\n probs=[]\n calA=CalculadoraAvanzada()\n for v in vectP:\n p=calA.transitarVector(v,ket)\n probs.append(p)\n return probs", "def policy_to_action(policy, obs):\n value = np.dot(policy[0], obs) + policy[1]\n return 1 if value > 0 else 0", "def extract_policy(env, v, gamma = 1.0):\n number_of_states = env.unwrapped.nS\n\n policy = np.zeros(number_of_states)\n for s in range(number_of_states):\n q_sa = np.zeros(env.action_space.n)\n for a in range(env.action_space.n):\n for next_sr in env.unwrapped.P[s][a]:\n # next_sr is a tuple of (probability, next state, reward, done)\n p, s_, r, _ = next_sr\n q_sa[a] += (p * (r + gamma * v[s_]))\n policy[s] = np.argmax(q_sa)\n return policy", "def metrics(self):\n \n if self.mse.shape[0]>1:\n raise ValueError('Metrics can only handle single observations.')\n \n if self.N==1:\n pred = float('nan')\n err = float('nan')\n y_true = float('nan')\n else:\n pred = int(self._predictions[-1])\n err = self._mse[-1]\n y_true = int(self.label[0])\n \n is_outlier = {\"type\":\"GAUGE\",\"key\":\"is_outlier\",\"value\":pred}\n mse = {\"type\":\"GAUGE\",\"key\":\"mse\",\"value\":err}\n obs = {\"type\":\"GAUGE\",\"key\":\"observation\",\"value\":self.N - 1}\n threshold = {\"type\":\"GAUGE\",\"key\":\"threshold\",\"value\":self.threshold}\n \n label = {\"type\":\"GAUGE\",\"key\":\"label\",\"value\":y_true}\n \n accuracy_tot = {\"type\":\"GAUGE\",\"key\":\"accuracy_tot\",\"value\":self.metric[4]}\n precision_tot = {\"type\":\"GAUGE\",\"key\":\"precision_tot\",\"value\":self.metric[5]}\n recall_tot = {\"type\":\"GAUGE\",\"key\":\"recall_tot\",\"value\":self.metric[6]}\n f1_score_tot = {\"type\":\"GAUGE\",\"key\":\"f1_tot\",\"value\":self.metric[7]}\n f2_score_tot = {\"type\":\"GAUGE\",\"key\":\"f2_tot\",\"value\":self.metric[8]}\n \n accuracy_roll = {\"type\":\"GAUGE\",\"key\":\"accuracy_roll\",\"value\":self.metric[9]}\n precision_roll = {\"type\":\"GAUGE\",\"key\":\"precision_roll\",\"value\":self.metric[10]}\n recall_roll = {\"type\":\"GAUGE\",\"key\":\"recall_roll\",\"value\":self.metric[11]}\n f1_score_roll = {\"type\":\"GAUGE\",\"key\":\"f1_roll\",\"value\":self.metric[12]}\n f2_score_roll = {\"type\":\"GAUGE\",\"key\":\"f2_roll\",\"value\":self.metric[13]}\n \n true_negative = {\"type\":\"GAUGE\",\"key\":\"true_negative\",\"value\":self.metric[0]}\n false_positive = {\"type\":\"GAUGE\",\"key\":\"false_positive\",\"value\":self.metric[1]}\n false_negative = {\"type\":\"GAUGE\",\"key\":\"false_negative\",\"value\":self.metric[2]}\n true_positive = {\"type\":\"GAUGE\",\"key\":\"true_positive\",\"value\":self.metric[3]}\n \n nb_outliers_roll = {\"type\":\"GAUGE\",\"key\":\"nb_outliers_roll\",\"value\":self.metric[14]}\n nb_labels_roll = {\"type\":\"GAUGE\",\"key\":\"nb_labels_roll\",\"value\":self.metric[15]}\n nb_outliers_tot = {\"type\":\"GAUGE\",\"key\":\"nb_outliers_tot\",\"value\":self.metric[16]}\n nb_labels_tot = {\"type\":\"GAUGE\",\"key\":\"nb_labels_tot\",\"value\":self.metric[17]}\n \n return [is_outlier,mse,obs,threshold,label,\n accuracy_tot,precision_tot,recall_tot,f1_score_tot,f2_score_tot,\n accuracy_roll,precision_roll,recall_roll,f1_score_roll,f2_score_roll,\n true_negative,false_positive,false_negative,true_positive,\n nb_outliers_roll,nb_labels_roll,nb_outliers_tot,nb_labels_tot]", "def update_posterior_policies(\n qs,\n A,\n B,\n C,\n policies,\n use_utility=True,\n use_states_info_gain=True,\n use_param_info_gain=False,\n pA=None,\n pB=None,\n gamma=16.0\n):\n n_policies = len(policies)\n efe = np.zeros(n_policies)\n q_pi = np.zeros((n_policies, 1))\n\n for idx, policy in enumerate(policies):\n qs_pi = get_expected_states(qs, B, policy)\n qo_pi = get_expected_obs(qs_pi, A)\n\n if use_utility:\n efe[idx] += calc_expected_utility(qo_pi, C)\n\n if use_states_info_gain:\n efe[idx] += calc_states_info_gain(A, qs_pi)\n\n if use_param_info_gain:\n if pA is not None:\n efe[idx] += calc_pA_info_gain(pA, qo_pi, qs_pi)\n if pB is not None:\n efe[idx] += calc_pB_info_gain(pB, qs_pi, qs, policy)\n\n q_pi = softmax(efe * gamma) \n\n return q_pi, efe", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"eAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"eA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doVar(\"mAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"mA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doSet(\"POI\",\"eAfb,mAfb\")\n\n \n self.modelBuilder.factory_('expr::eAlph(\"2.0*@0/(2.0-@0)\",eA0)')\n self.modelBuilder.factory_('expr::eNorm(\"3.0/4.0/(2.0+@0)\",eAlph)')\n self.modelBuilder.factory_('expr::eRAlph(\"@0*@1\",eAlph,eNorm)')\n self.modelBuilder.factory_('expr::eRpl(\"(@0+@1)\",eNorm,eAfb)')\n self.modelBuilder.factory_('expr::eRmn(\"(@0-@1)\",eNorm,eAfb)')\n\n self.modelBuilder.factory_('expr::mAlph(\"2.0*@0/(2.0-@0)\",mA0)')\n self.modelBuilder.factory_('expr::mNorm(\"3.0/4.0/(2.0+@0)\",mAlph)')\n self.modelBuilder.factory_('expr::mRAlph(\"@0*@1\",mAlph,mNorm)')\n self.modelBuilder.factory_('expr::mRpl(\"(@0+@1)\",mNorm,mAfb)')\n self.modelBuilder.factory_('expr::mRmn(\"(@0-@1)\",mNorm,mAfb)')", "def propiosObservable(obs):\n for i in range(len(obs)):\n for j in range(len(obs[0])):\n obs[i][j]=complex(obs[i][j][0],obs[i][j][1])\n a=np.array(obs)\n x,v = np.linalg.eig(a)\n valPropios = [(c.real,c.imag) for c in x]\n vectPropios = [[(c.real,c.imag) for c in y]for y in v]\n return valPropios,vectPropios", "def policy_evaluation(P, nS, nA, policy, gamma=0.9, tol=1e-3):\n\n\tvalue_function = np.zeros(nS)\n\n\t############################\n\t# YOUR IMPLEMENTATION HERE #\n\n\twhile True:\n\t\tprevious_value_function = np.copy(value_function)\n\t\tfor s in range(nS):\n\t\t\taction = policy[s];\t# action specified by the policy\n\t\t\tcurrent_value = 0\n\t\t\tfor transition in P[s][action]:\t# for every possible transition\n\t\t\t\t# print(len(P[s][action]))\n\t\t\t\tprobability = transition[0]\n\t\t\t\treward = transition[2]\n\t\t\t\tnext_state = transition[1]\n\t\t\t\tvalue_next_state = previous_value_function[next_state]\n\n\t\t\t\t# print(\"probability: \" + str(probability) + \"reward: \" + str(reward) + \"value next state: \" + str(value_next_state))\n\n\t\t\t\tcurrent_value += probability * (reward + gamma * value_next_state)\n\t\t\t\n\t\t\tvalue_function[s] = current_value\n\n\t\t# find the maximum difference between the previous value and the current value\n\t\tdifArray = np.subtract(value_function, previous_value_function)\n\t\tfor i in range(nS):\n\t\t\tdifArray[i] = abs(difArray[i])\n\t\tmaxDif = np.amax(difArray)\n\n\t\tif (maxDif < tol):\n\t\t\tbreak\n\n\t############################\n\treturn value_function", "def _policy_improvement(self) -> Tuple[np.ndarray, np.ndarray]:\n # Start with a (random) policy\n policy = np.zeros([self.state_dim, self.action_dim])\n V = np.zeros([self.state_dim])\n #random init the policy\n for s in range(self.state_dim):\n policy[s,0] = 0.0\n policy[s,1] = 0.0\n policy[s,2] = 1.0\n\n V = self._policy_eval(policy)\n\n policy_stable = False\n dr = 0.9\n\n while (policy_stable != True):\n policy_stable = True\n for s in self.mdp._state_dict:\n old_action = (policy[self.mdp._state_dict[s]]).tolist()\n action_dict = {}\n for a in self.mdp._action_dict:\n temp = 0.0\n for next_s in self.mdp._state_dict:\n p = self.mdp.P[self.mdp._state_dict[s],self.mdp._action_dict[a],self.mdp._state_dict[next_s]]\n r = self.mdp.R[self.mdp._state_dict[s],self.mdp._action_dict[a],self.mdp._state_dict[next_s]]\n Vs = V[self.mdp._state_dict[next_s]]\n temp = temp + p * (r + dr * Vs)\n action_dict[self.mdp._action_dict[a]]= temp \n max_act = max(action_dict.values())\n V[self.mdp._state_dict[s]] = max_act\n res = [t for t,v in action_dict.items() if v == max_act][0]\n for opt in range(self.action_dim):\n if opt == res:\n policy[self.mdp._state_dict[s],opt] = 1.0\n else:\n policy[self.mdp._state_dict[s],opt] = 0.0\n if (old_action - policy[self.mdp._state_dict[s]]).any() == True:\n \n policy_stable = False\n if policy_stable == False:\n V = self._policy_eval(policy)\n \n return policy, V", "def _estimate_gp(self, list_x, list_y):\n\n assert isinstance(list_x, list)\n assert isinstance(list_y, list)\n assert len(list_x) == len(list_y)\n\n # Dictionary with key the name of the estimation, and value a list with\n # [{function_used_to_estimate}, {derivative}, {i},\n # {sigma_derivation_function}], where derivative and i are parameters\n # of function_used_to_estimate, and sigma_derivation_function is the\n # function to apply to get the standard deviation\n def sqrt(x, x_der):\n \"\"\"Derivative of sqrt(x)\"\"\"\n return np.sqrt(x)\n\n def sqrt_der(x, x_der):\n \"\"\"Derivative of sqrt(x)\"\"\"\n return x_der/(2*x + FUZZ)\n\n params = {\n 'mean': [self.mean, False, None, None],\n 'mean_x': [self.mean, True, 0, None],\n 'mean_y': [self.mean, True, 1, None],\n 'std': [self.sigma, False, None, sqrt],\n 'std_x': [self.sigma, True, 0, sqrt_der],\n 'std_y': [self.sigma, True, 1, sqrt_der],\n }\n result = {}\n for param_key in np.sort(list(params.keys())):\n param = params[param_key]\n current_estimation = self._get_estimation(\n param[0], param[1], param[2], list_x, list_y\n )\n result[param_key] = current_estimation\n if params[param_key][3]:\n result[param_key] = params[param_key][3](\n result['std'], current_estimation\n )\n\n return result", "def get_metrics(model, test_data):\n feats = test_data[:, :-1]\n gold_labels = test_data[:, -1]\n preds = model.predict_y(feats)\n preds_mean = preds[0].flatten()\n preds_var = preds[1]\n #print preds_mean[:10]\n #print gold_labels[:10]\n mae = MAE(preds_mean, gold_labels)\n rmse = np.sqrt(MSE(preds_mean, gold_labels))\n prs = pearson(preds_mean, gold_labels)\n nlpd = - np.mean(model.predict_density(feats, gold_labels[:, None]))\n return mae, rmse, prs, nlpd", "def filter(self, observations):\n\n (_, _, _, x_filtered, P_filtered) = filter(self.F, self.Q, self.H, self.R, self.x_0, self.P_0, observations)\n return x_filtered, P_filtered", "def test_getEstimates(self):\r\n with self.assertRaises(ValueError):\r\n self.res1.getEstimates('S1')\r\n\r\n self.res1.addSample('S1', 42)\r\n self.res1.addSampleEstimate('S1', 15, 30, 4.75, 2.5, 3.5)\r\n self.res1.addSampleEstimate('S1', 10, 20, 2.5, 2.5, 3.5)\r\n assert_almost_equal(self.res1.getEstimates('S1'),\r\n [(10, 20, 2.5, 2.5, 3.5), (15, 30, 4.75, 2.5, 3.5)])", "def policy_eval_v(policy, env, discount_factor=1.0, theta=0.00001):\n # Start with an all 0 value function\n V = np.zeros(env.nS)\n \n # loop door alle states heen \n # sla de oude state value op \n # Bereken de nieuwe state value door de SOM (kans omhoog * loop over waar je terrecht kunt komen * reward) kans omlaag..\n # kijk of je nog door moet gaan of stoppen\n delta = 1000 \n while delta > theta:\n # for x in range(2):\n delta = 0\n \n# loop throw possible states\n for state in range(env.nS):\n old_state_value = V[state]\n new_state_value = 0\n\n # loop shrow possible actions in state\n for action in range(env.nA):\n\n # print(\"kans omhoog\", policy[state][action])\n # print(\"kans omhoog uitkomen\", env.P[state][action][0][0])\n # print(\"direct reward\",env.P[state][action][0][2] )\n # print(\"value of that new state\", discount_factor * V[env.P[state][action][0][1]] )\n\n current_state_value = policy[state][action] * env.P[state][action][0][0] * ( env.P[state][action][0][2] + ( discount_factor * V[env.P[state][action][0][1]] ) ) \n# print(\"current state value\", current_state_value)\n new_state_value += current_state_value\n \n delta = max(delta, abs(old_state_value - new_state_value))\n V[state] = new_state_value\n# print(V[state])\n# print(\"delta\", delta)\n return np.array(V)", "def evaluate_model(points, function):\n function_values = np.zeros((len(points), 1))\n for i in range(0, len(points)):\n function_values[i,0] = function(points[i,:])\n return function_values", "def extract_policy(env, v, GAMMA=1.0):\r\n policy = np.zeros(env.nS)\r\n for s in range(env.nS):\r\n q_sa = np.zeros(env.nA)\r\n for a in range(env.nA):\r\n q_sa[a] = sum([p*(r+GAMMA*v[s_]) for p, s_, r, _ in env.P[s][a]])\r\n policy[s] = np.argmax(q_sa)\r\n return policy", "def policy_variables(self):\n\n return self.pi_model.variables", "def get_payoff(model, attack_policy, defense_policy):\n ave_discount_reward = get_payoff_mixed(model, [attack_policy], [defense_policy], [1.0], [1.0])\t\n return ave_discount_reward", "def ModelEstimation(data, S, A):\n\n counts_sas = np.zeros((S,A,S))\n counts_sa = np.zeros((S,A))\n R_est = np.zeros((S,A))\n P_est = np.zeros((S,A,S))\n for traj in data:\n for sample in traj:\n (s,a,r,s_next) = sample\n counts_sa[s,a] += 1\n counts_sas[s,a,s_next] += 1\n R_est[s,a] += r\n\n for s in range(S):\n for a in range(A):\n if counts_sa[s,a] == 0:\n # if this state-action doesn't exist in data\n # Use default values:\n R_est[s,a] = 0.5\n P_est[s,a,:] = 1/S\n else:\n R_est[s,a] /= counts_sa[s,a]\n P_est[s, a, :] = counts_sas[s,a,:] / counts_sa[s,a]\n if np.any(np.abs(P_est.sum(axis=2) - 1) > 1e-5):\n raise RuntimeError('Probabilty matrix not normalized!!')\n return P_est, R_est", "def update_policy(env, policy, V, discount_factor):\n\n for state in range(env.nS):\n # for a given state compute state-action value.\n action_values = one_step_lookahead(env, state, V, discount_factor)\n\n # choose the action which maximizes the state-action value.\n policy[state] = np.argmax(action_values)\n\n return policy", "def iterative_policy_evaluation(policy,\n states,\n actions,\n transition,\n rewards,\n discount=0.99,\n threshold=0.01):\n update = sys.maxint\n\n # Initialize values to zero\n values = {state: 0 for state in states}\n\n # Iterate until convergence\n while update > threshold:\n update = 0\n for state in states:\n initial_value = values[state]\n\n # Use Bellman equation update\n values[state] = sum([\n policy(state, action) * \\\n sum([transition(s, a, n_s, r) * (r + discount * values[n_s])\n for (s, a, n_s, r) in itertools.product(states, actions, states, rewards)\n if s == state and a == action])\n for action in actions])\n update = max(update, abs( initial_value - values[state]))\n return values", "def _get_observation(self):\n ret = {\n 'observation': self.state,\n 'achieved_goal': self._get_achieved_goal(),\n 'desired_goal': self._get_desired_goal(),\n }\n ret.update({'reward': self.compute_reward(ret['achieved_goal'], ret['desired_goal'])})\n return ret", "def evaluate(self, dataset, *args, **kwargs):\n\n losses = []\n for sample in dataset:\n output = self.predict(sample, *args, **kwargs)\n losses.append(self.metric_loss(output, sample, *args, **kwargs))\n\n return losses", "def get_estimation(self):\n self.calculate_variables()\n if self.validate_preconditions():\n return self.estimate()\n else:\n return None", "def test_policy_loss(self):\n PolicyEstimator = self.notebook_locals[\"PolicyEstimator\"]\n student_policy_loss = self.notebook_locals[\"util_compute_policy_loss\"]\n obs = torch.ones(3, 2) * 2.0\n actions = torch.ones(3, 1) * 3.0\n returns = torch.arange(3, 6).float()\n policy_f = PolicyEstimator(\n num_hidden=1, hidden_dim=2, obs_dim=2, action_dim=1\n )\n # initialize to constant values\n for p in policy_f.parameters():\n torch.nn.init.ones_(p)\n loss_student = student_policy_loss(policy_f, obs, actions, returns)\n self.assertTrue(\n torch.abs(loss_student - 7.6758) < 0.1,\n \"The policy loss computation is incorrect\",\n )", "def elbo_with_policy(self, rng, params, x, policy, train, context=None):\n d = np.prod(x.shape[1:])\n batch_size = x.shape[0]\n\n rng_perm, rng_t, rng_drop = jax.random.split(rng, 3)\n\n # Get random sigma ~ Unif(S_n_steps)\n sigmas = ardm_utils.get_batch_permutations(rng_perm, x.shape[0],\n self.num_steps)\n\n # Sample t from policy.\n t, _, weight_policy = self.sample_policy_t(rng_t, batch_size, policy)\n\n prev_selection, _ = ardm_utils.get_selection_for_sigma_and_t(\n sigmas, t, self.config.mask_shape)\n future_selection = (1. - prev_selection)\n\n corrupted = self.corrupt(x, prev_selection)\n\n net_out = self.apply_fn(\n {'params': params}, corrupted, t, prev_selection, train,\n rngs={'dropout': rng_drop} if train else None, context=context)\n\n log_px_sigma_geq_t = self.logprob_fn(x, net_out)\n\n log_px_sigma_geq_t = future_selection.reshape(\n log_px_sigma_geq_t.shape) * log_px_sigma_geq_t\n log_px_sigma_geq_t = util_fns.sum_except_batch(log_px_sigma_geq_t)\n\n ce = log_px_sigma_geq_t / d / np.log(2)\n\n # Reweigh for expectation over i.\n reweighting_factor_expectation_i = 1. / (self.num_steps - t)\n elbo_per_t = reweighting_factor_expectation_i * log_px_sigma_geq_t\n\n # Reweigh for expectation over policy.\n elbo = elbo_per_t * weight_policy\n\n elbo = elbo / d / np.log(2)\n elbo_per_t = elbo_per_t / d / np.log(2)\n\n return elbo, elbo_per_t, ce, t", "def get_variables():\n policer_data = {\n \"policer_data\": {\n \"name\": \"policy1\",\n \"cir\": 450,\n \"cb\": 50000,\n \"rate-type\": \"kbps\",\n \"round-type\": \"closest\",\n \"type\": \"1r2c\",\n \"conform-action\": {\n \"meter-action-type\": \"meter-action-transmit\"\n },\n \"exceed-action\": {\n \"meter-action-type\": \"meter-action-drop\"\n }\n },\n \"policer_data_oper\": {\n \"name\": \"policy1\",\n \"cir\": 450,\n \"cb\": 50000,\n \"rate-type\": \"kbps\",\n \"round-type\": \"closest\",\n \"type\": \"1r2c\",\n \"conform-action\": {\n \"meter-action-type\": \"policer:meter-action-transmit\"\n },\n \"exceed-action\": {\n \"meter-action-type\": \"policer:meter-action-drop\"\n }\n },\n \"policer_data_2\": {\n \"name\": \"policy1\",\n \"cir\": 900,\n \"cb\": 50000,\n \"rate-type\": \"kbps\",\n \"round-type\": \"closest\",\n \"type\": \"1r2c\",\n \"conform-action\": {\n \"meter-action-type\": \"meter-action-transmit\"\n },\n \"exceed-action\": {\n \"meter-action-type\": \"meter-action-drop\"\n }\n },\n \"policer_data_oper_2\": {\n \"name\": \"policy1\",\n \"cir\": 900,\n \"cb\": 50000,\n \"rate-type\": \"kbps\",\n \"round-type\": \"closest\",\n \"type\": \"1r2c\",\n \"conform-action\": {\n \"meter-action-type\": \"policer:meter-action-transmit\"\n },\n \"exceed-action\": {\n \"meter-action-type\": \"policer:meter-action-drop\"\n }\n },\n \"policer_data_3\": {\n \"name\": \"policy1\",\n \"cir\": 100,\n \"eir\": 150,\n \"cb\": 200,\n \"eb\": 300,\n \"rate-type\": \"pps\",\n \"round-type\": \"closest\",\n \"type\": \"2r3c-2698\",\n \"conform-action\": {\n \"meter-action-type\": \"meter-action-transmit\"\n },\n \"exceed-action\": {\n \"meter-action-type\": \"meter-action-mark-dscp\",\n \"dscp\": \"AF22\"\n },\n \"violate-action\": {\n \"meter-action-type\": \"meter-action-drop\"\n },\n \"color-aware\": True\n },\n \"policer_data_oper_3\": {\n \"name\": \"policy1\",\n \"cir\": 100,\n \"eir\": 150,\n \"cb\": 200,\n \"eb\": 300,\n \"rate-type\": \"pps\",\n \"round-type\": \"closest\",\n \"type\": \"2r3c-2698\",\n \"conform-action\": {\n \"meter-action-type\": \"policer:meter-action-transmit\"\n },\n \"exceed-action\": {\n \"meter-action-type\": \"policer:meter-action-mark-dscp\",\n },\n \"violate-action\": {\n \"meter-action-type\": \"policer:meter-action-drop\"\n },\n \"color-aware\": True\n },\n\n \"acl_tables\": {\n # settings for policer tables\n \"hc_acl_table\": {\n \"name\": \"table0\",\n \"nbuckets\": 2,\n \"memory_size\": 1048576,\n \"skip_n_vectors\": 12,\n \"miss_next\": \"permit\",\n \"mask\": \"00:00:00:00:00:00:00:00:00:00:00:00:ff:ff:ff:ff\"\n },\n # setting for acl sessions\n \"hc_acl_session\": {\n \"match\": \"00:00:00:00:00:00:00:00:00:00:00:00:C0:A8:7A:01\",\n \"policer_hit_next\": \"policy1\",\n \"color_classfier\": \"exceed-color\",\n },\n \"hc_acl_session2\": {\n \"match\": \"00:00:00:00:00:00:00:00:00:00:00:00:C0:A8:7A:02\",\n \"policer_hit_next\": \"policy1\",\n \"color_classfier\": \"exceed-color\",\n },\n },\n }\n return policer_data", "def evaluate(\n policies: list[bandit.base.Policy],\n env: gym.Env,\n reward_stat: stats.base.Univariate | None = None,\n n_episodes: int = 20,\n seed: int | None = None,\n):\n\n reward_stat = reward_stat or stats.Sum()\n rng = random.Random(seed)\n\n for episode in range(n_episodes):\n episode_policies = [policy.clone() for policy in policies]\n episode_env = copy.deepcopy(env)\n episode_env.reset(seed=rng.randint(0, 2**32))\n episode_env.action_space.seed(rng.randint(0, 2**32 - 1))\n episode_envs = [copy.deepcopy(episode_env) for _ in episode_policies]\n episode_reward_stats = [reward_stat.clone() for _ in policies]\n\n step = 0\n done = [False] * len(policies)\n\n while not all(done):\n for policy_idx, (policy_, env_, reward_stat_) in enumerate(\n zip(episode_policies, episode_envs, episode_reward_stats)\n ):\n if done[policy_idx]:\n continue\n\n arm = policy_.pull(range(env_.action_space.n)) # type: ignore[attr-defined]\n observation, reward, terminated, truncated, info = env_.step(arm)\n policy_.update(arm, reward)\n reward_stat_.update(reward)\n\n yield {\n \"episode\": episode,\n \"step\": step,\n \"policy_idx\": policy_idx,\n \"arm\": arm,\n \"reward\": reward,\n \"reward_stat\": reward_stat_.get(),\n }\n\n done[policy_idx] = terminated or truncated\n step += 1" ]
[ "0.5557971", "0.548167", "0.54308265", "0.5420844", "0.5312977", "0.53012514", "0.52951914", "0.5293624", "0.52801895", "0.52599216", "0.5253742", "0.52483994", "0.5174865", "0.517392", "0.51706284", "0.5163968", "0.5154146", "0.51273954", "0.5124976", "0.5118999", "0.51168025", "0.51120555", "0.51011777", "0.5081445", "0.50588846", "0.5035132", "0.5034332", "0.499786", "0.498526", "0.49734855", "0.49671248", "0.49630225", "0.49531302", "0.4938473", "0.49337703", "0.49329427", "0.4927292", "0.49272108", "0.49212787", "0.49169692", "0.49158478", "0.491397", "0.49037308", "0.49018508", "0.4900334", "0.4896278", "0.48882094", "0.48760206", "0.4870039", "0.4859608", "0.48508564", "0.4848685", "0.4848592", "0.48454827", "0.48399773", "0.48341757", "0.48296255", "0.4829248", "0.4827025", "0.48209876", "0.4811552", "0.48114222", "0.48111543", "0.48064312", "0.48013955", "0.4796073", "0.47951013", "0.47904333", "0.47904333", "0.47890905", "0.47876605", "0.47840837", "0.47764832", "0.47763163", "0.47759697", "0.477222", "0.47687173", "0.47682002", "0.47661647", "0.47569624", "0.47465137", "0.4744439", "0.47395593", "0.47358388", "0.47259298", "0.47226885", "0.47223592", "0.47210118", "0.4720173", "0.4720058", "0.4719937", "0.47126028", "0.4704149", "0.47031963", "0.4702593", "0.470063", "0.4698115", "0.4697033", "0.46892065", "0.4688396", "0.4688358" ]
0.0
-1
Modified from DASHAgent to replace activation with SocogSys1 variable binding function.
def update_beliefs(self, result, action): if self.traceUpdate: print("Updating beliefs based on action", action, "with result", result) if result == 'TryAgain': return None elif not result and not self.isTransient(action): if self.traceUpdate: print("Adding known false", action) self.knownFalseTuple(action) if isinstance(result, list): for bindings in result: concrete_result = substitute(action, bindings) if not self.isTransient(concrete_result): if self.traceUpdate: print("Adding known true and performed", concrete_result) self.knownTuple(concrete_result) self.knownTuple(('performed', concrete_result)) self.update_variable_binding(concrete_result)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _activation(self,components,activation):\r\n \r\n if activation == \"ReLU\":\r\n components.append(nn.ReLU())\r\n elif activation == \"Sigmoid\":\r\n components.append(nn.Sigmoid())\r\n else:\r\n raise Exception(\"Invalid activation fn: \"+activation)", "def activate(self, x):\n return x", "def activate(x):\n raise NotImplementedError()", "def __init__(self):\r\n self.activation = Activation(u'signup')\r\n self.activated = False", "def _return_activation(x, nl):\n if nl == 'HS':\n x = Activation(_hard_swish)(x)\n if nl == 'RE':\n x = Activation(_relu6)(x)\n return x", "def bind(self, ens, beads, nm, cell, bforce, prng):\n\n super(InstantonMotion, self).bind(ens, beads, nm, cell, bforce, prng)\n # Binds optimizer\n\n self.optimizer.bind(self)", "def _activate(self, x):\n self._activation_map = self._activation_distance(x, self._weights)", "def build_activation(activation: str) -> nn.Module:\n if hasattr(nn, activation):\n return getattr(nn, activation)()\n elif activation == \"Swish\":\n return Swish()\n else:\n raise Exception(\"{} invalid activation function.\".format(activation))", "def variabilize(self):\n if self.nvars>=0:\n pass #already done\n else:\n varTab = syt.SymbolTable()\n def convertArgs(args):\n return map(lambda a: -varTab.getId(a) if isVariableAtom(a) else a, args)\n def convertGoal(g):\n return Goal(g.functor, convertArgs(g.args))\n if self.lhs: self.lhs = convertGoal(self.lhs)\n self.rhs = map(convertGoal, self.rhs)\n if self.features:\n self.features = map(convertGoal, self.features)\n if self.findall:\n self.findall = map(convertGoal, self.findall) \n self.variableList = varTab.getSymbolList()\n self.nvars = len(self.variableList)", "def _return_activation(x, nl):\n if nl == 'HS':\n x = KL.Activation(_hard_swish)(x)\n if nl == 'RE':\n x = KL.ReLU(6.)(x)\n\n return x", "def translateToBindingName(self, action):\n if type(action) == str:\n return eval(\"self.accel_%s\" % action)\n else:\n return action", "def activation_func(activation:str):\n return nn.ModuleDict([\n ['relu', nn.ReLU(inplace=True)],\n ['leaky_relu', nn.LeakyReLU(negative_slope=0.01, inplace=True)],\n ['selu', nn.SELU(inplace=True)],\n ['none', nn.Identity()]\n ])[activation]", "def __call__(self, x) -> mx.sym.Symbol:\n h = mx.sym.FullyConnected(data=x, num_hidden=self.num_hidden, weight=self.w_i2h, bias=self.b_i2h, flatten=False)\n h = layers.activation(h, act_type=self.act_type)\n if self.dropout > 0.0:\n h = mx.sym.Dropout(h, p=self.dropout)\n y = mx.sym.FullyConnected(data=h, num_hidden=self.num_model, weight=self.w_h2o, bias=self.b_h2o, flatten=False)\n return y", "def __init__(self,\n model,\n weights,\n singa_ops,\n keep_initializers_as_inputs=True):\n super(SingaRep, self).__init__()\n self.model = model\n self.tensor_map = weights\n self.keep_initializers_as_inputs = keep_initializers_as_inputs\n # this each item of singa_ops is: ('name', 'op', 'handle', 'forward')\n # the name is a string, op is OnnxNode,\n # handle is Singa handle to store the tensor into singa operator\n # the forward is singa autograd operator\n self.singa_ops = singa_ops", "def activate(self):\n pass", "def activate_2(self, act_1):\n x = act_1\n e = math.e\n result = 1/(1 + e **-x)\n #print (result)\n return result", "def __init__ ( self , syms , defn ):\n\n self.logic = cognitiveDefiner.convertDefinition(syms,defn)", "def get_activation_function(actfn):\n if actfn is None or actfn == 'leakyrelu':\n def create_actfn(): return nn.LeakyReLU(0.1, inplace=True)\n elif actfn == 'gelu':\n def create_actfn(): return nn.GELU()\n elif actfn == 'relu':\n def create_actfn(): return nn.ReLU()\n elif actfn == 'swish' or actfn == 'silu':\n def create_actfn(): return nn.SiLU()\n else:\n raise Exception('Unknown activation function ' + str(actfn))\n return create_actfn", "def __init__(self, obs_dim, hiddens, activation=\"Tanh\", **kwargs):\n assert activation in [\"Tanh\", \"Relu\", \"Softplus\"], (\n \"probably can't handle activation '%s'\" % activation\n )\n self._hiddens = hiddens\n self._activation = activation\n super().__init__(obs_dim, **kwargs)", "def __init__(self, kern, f, f_scope):\n super().__init__(kern.input_dim,active_dims=kern.active_dims)\n self.kern = kern\n self._f = lambda x: tf.cast(f(x), gp.settings.float_type) #function to call on input\n self._f_scope = f_scope #learnable variables that f depends on", "def define_operational_variables(n, sns, c, attr):\n if n.df(c).empty:\n return\n\n active = get_activity_mask(n, c, sns) if n._multi_invest else None\n coords = [sns, n.df(c).index.rename(c)]\n n.model.add_variables(coords=coords, name=f\"{c}-{attr}\", mask=active)", "def activation_function(self, x: np.array) -> np.array:\r\n\t\treturn self._activation_function(x)", "def bind(motion, character):\n bindings[motion] = character", "def apply_action(self, physics, action, random_state):\n del random_state\n physics.bind(self.actuators).ctrl = action", "def activation_func(activation, inplace=False):\n return nn.ModuleDict([\n ['relu', nn.ReLU(inplace=inplace)],\n ['leaky_relu', nn.LeakyReLU(negative_slope=0.2, inplace=inplace)],\n ['selu', nn.SELU(inplace=inplace)],\n ['none', nn.Identity()]\n ])[activation]", "def start_func_default(self, activation):\n activation.prepare()\n activation.done()\n return activation", "def _get_activate(self):\n return self.__activate", "def __init__(self, owner=None, variable=None, name=None, prefs=None):\n if not name:\n self.name = owner.name + \"_\" + SYSTEM_TARGET_INPUT_STATE\n else:\n self.name = owner.name + \"_\" + name\n self.prefs = prefs\n self.sendsToProjections = []\n self.owner = owner\n self.value = variable", "def __init__(self, \n ns: str, \n reward_fnc: str, \n is_action_space_discrete, \n safe_dist: float = None, \n goal_radius: float = 0.1, \n max_steps_per_episode=100, \n train_mode: bool = True, \n debug: bool = False,\n task_mode: str = \"staged\",\n PATHS: dict = dict(),\n extended_eval: bool = False,\n *args, **kwargs):\n super(FlatlandEnv, self).__init__()\n\n self.ns = ns\n try:\n # given every environment enough time to initialize, if we dont put sleep,\n # the training script may crash.\n ns_int = int(ns.split(\"_\")[1])\n time.sleep(ns_int*2)\n except Exception:\n rospy.logwarn(f\"Can't not determinate the number of the environment, training script may crash!\")\n pass\n\n\n # process specific namespace in ros system\n self.ns_prefix = '' if (ns == '' or ns is None) else '/'+ns+'/'\n \n if not debug:\n if train_mode:\n rospy.init_node(f'train_env_{self.ns}', disable_signals=False)\n else:\n rospy.init_node(f'eval_env_{self.ns}', disable_signals=False)\n\n self._extended_eval = extended_eval\n self._is_train_mode = rospy.get_param(\"/train_mode\")\n self._is_action_space_discrete = is_action_space_discrete\n \n self.setup_by_configuration(PATHS['robot_setting'], PATHS['robot_as'])\n\n # set rosparam\n rospy.set_param(\"/laser_num_beams\", self._laser_num_beams)\n \n # observation collector\n self.observation_collector = ObservationCollector(\n self.ns, self._laser_num_beams, self._laser_max_range)\n self.observation_space = self.observation_collector.get_observation_space()\n\n # reward calculator\n if safe_dist is None:\n safe_dist = 1.6*self._robot_radius\n\n self.reward_calculator = RewardCalculator(\n robot_radius=self._robot_radius, safe_dist=1.6*self._robot_radius, goal_radius=goal_radius, \n rule=reward_fnc, extended_eval=self._extended_eval)\n\n # action agent publisher\n if self._is_train_mode:\n self.agent_action_pub = rospy.Publisher(f'{self.ns_prefix}cmd_vel', Twist, queue_size=1)\n else:\n self.agent_action_pub = rospy.Publisher(f'{self.ns_prefix}cmd_vel_pub', Twist, queue_size=1)\n\n # service clients\n if self._is_train_mode:\n self._service_name_step = f'{self.ns_prefix}step_world'\n self._sim_step_client = rospy.ServiceProxy(\n self._service_name_step, StepWorld)\n \n # instantiate task manager\n self.task = get_predefined_task(\n ns, mode=task_mode, start_stage=kwargs['curr_stage'], PATHS=PATHS)\n\n self._steps_curr_episode = 0\n self._max_steps_per_episode = max_steps_per_episode\n\n # for extended eval\n self._action_frequency = 1/rospy.get_param(\"/robot_action_rate\")\n self._last_robot_pose = None\n self._distance_travelled = 0\n self._safe_dist_counter = 0\n self._collisions = 0\n self._in_crash = False", "def __init__(self, incoming, a=tf.identity, name='ActivationLayer'):\n super(ActivationLayer, self).__init__()\n \n with tf.variable_scope(name) as self.layer_scope:\n self.incoming, self.incoming_shape = get_input(incoming)\n \n self.out = None\n self.a = a\n self.name = name", "def activate(self, x):\n self._activate(x)\n return self._activation_map", "def __init__(self, name, state_shape, n_actions, reuse=False):\n\n with tf.variable_scope(name, reuse=reuse):\n # Prepare neural network architecture\n ### Your code here: prepare any necessary layers, variables, etc.\n if os.path.exists('model.h5'):\n self.model = load_model('model.h5')\n else:\n inp = Input(state_shape)\n dense0 = Dense(64, activation='tanh', kernel_initializer='ones', bias_initializer='ones')(inp)\n dense1 = Dense(256, activation='tanh', kernel_initializer='ones', bias_initializer='ones')(dense0)\n dense2 = Dense(128, activation='relu', kernel_initializer='ones', bias_initializer='ones')(dense1)\n dense3 = Dense(64, activation='relu', kernel_initializer='ones', bias_initializer='ones')(dense2)\n dense4 = Dense(32, activation='tanh', kernel_initializer='ones', bias_initializer='ones')(dense3)\n\n logits = Dense(n_actions, activation='linear', kernel_initializer='ones', bias_initializer='ones')(dense4)\n # probs = Activation('softmax')(logits)\n state_value = Dense(1, activation='linear', kernel_initializer='ones', bias_initializer='ones')(dense4)\n\n self.model = Model(inputs=inp, outputs=[logits, state_value])\n\n # prepare a graph for agent step\n self.state_t = tf.placeholder('float32', [None, ] + list(state_shape))\n self.agent_outputs = self.symbolic_step(self.state_t)", "def output_layer_activation(x):\n return x", "def unify(self,term,fact,bindings):\n\n n = len(term.split('(')[1][:-1].split(','))\n term_args = term.split('(')[1][:-1].split(',')\n fact_args = fact.split('(')[1][:-1].split(',')\n for i in range(n):\n if (not Prover.is_var(term_args[i])) and (not Prover.is_var(fact_args[i])):\n if term_args[i] != fact_args[i]:\n return False\n elif (Prover.is_var(term_args[i])) and (not Prover.is_var(fact_args[i])):\n bindings[term_args[i]] = fact_args[i]\n elif (not Prover.is_var(term_args[i])) and (Prover.is_var(fact_args[i])):\n bindings[fact_args[i]] = term_args[i]\n return bindings", "def uf_activate(self, output_reg):\n if len(self.inputs) is 2:\n self.two_activation(output_reg)\n elif len(self.inputs) is 3:\n self.three_activation(output_reg)\n else:\n self.large_activation(output_reg)", "def update_activation(self) -> None:\n if self.forced:\n return\n self.units.update_activation()", "def productactivate():\n pass", "def variable(self, val):", "def OnSim42ChangeInterpreter(self, event):\n self.UseCommandInterface(event.IsChecked())", "def activate(self, net):\r\n\t\tif self.layer != 0:\r\n\t\t\tself.net = net\r\n\t\t\tself.value = 1 / (1 + np.exp(net))\r\n\t\t# else:\r\n\t\t\t# should I throw an error?", "def activate(connection, args):\n\n iface = sap.adt.Interface(connection, args.name)\n iface.activate()", "def activate(self,x):\n return 1 / (1 + np.exp(-x))", "def agent_start(self,thisObs): \n action={'vol':0,'price':0}\n \n \"\"\"Changes for Boltzman Exploration\"\"\"\n #choice=self.pick_action_from_dist()\n #action_bin=self.prob_dist_action[choice]\n #action=self.unbin_action(action_bin,thisObs)\n \n \"\"\"Changes for epsilon greedy method\"\"\"\n action= self.return_random_action(thisObs)\n \n self.lastAction=action\n self.lastObs=thisObs\n return action", "def bound(name):", "def binding(model, binding):\n var = model.binding(binding)\n if var is None:\n raise myokit.IncompatibleModelError(\n model.name(),\n 'No variable found with binding \"' + str(binding) + '\".')\n return var", "def activation_function(self, X):\n return self.net_input(X)", "def activation_function(self, X):\n return self.net_input(X)", "def UpdateLocalsFromCmd(self):\n self.origInterp.locals[\"parentFlowsh\"] = self.sim42interp.cmd.root\n self.origInterp.locals[\"thAdmin\"] = self.sim42interp.cmd.thermoAdmin", "def activate(self):\n pass", "def _hidden_activation(self, inputs):\n if self.act_enc is None:\n act_enc = lambda x: x\n else:\n act_enc = self.act_enc\n return act_enc(self._mappings(inputs))", "def __init__(self, env):\n super().__init__(env) \n # beta of entropy used in A2C\n self.beta = 0.9\n # loss function of A2C value_model is mse\n self.loss = 'mse'", "def __init__(self, in_features):\n super(AddBias, self).__init__()\n self.in_features =in_features\n self.bias = Parameter(torch.Tensor(in_features))", "def activation(activation_fun=None):\n activation_fun = (activation_fun or cfg.MODEL.ACTIVATION_FUN).lower()\n if activation_fun == \"relu\":\n return nn.ReLU(inplace=cfg.MODEL.ACTIVATION_INPLACE)\n elif activation_fun == \"silu\" or activation_fun == \"swish\":\n try:\n return torch.nn.SiLU()\n except AttributeError:\n return SiLU()\n elif activation_fun == \"gelu\":\n return torch.nn.GELU()\n else:\n raise AssertionError(\"Unknown MODEL.ACTIVATION_FUN: \" + activation_fun)", "def __init__(self,obsinst,):\n self.__observable = obsinst.getobservable()\n self.__modelDV = obs.getmodel(modeltype)\n self.__modelBkg= obsinstances[2]", "def _get_activation_fn(activation):\n if activation == \"relu\":\n return ReLU()\n if activation == \"gelu\":\n return GELU()\n # if activation == \"glu\":\n # return F.glu\n raise RuntimeError(F\"activation should be relu/gelu, not {activation}.\")", "def _get_activation_fn(activation):\n if activation == \"relu\":\n return nn.ReLU()\n # if activation == \"gelu\":\n # return F.gelu\n # if activation == \"glu\":\n # return F.glu\n raise RuntimeError(F\"activation should be relu/gelu, not {activation}.\")", "def updateVariables(self) -> None:\n ...", "def get_activation_function(activation: str) -> nn.Module:\n if activation == \"ReLU\":\n return nn.ReLU()\n elif activation == \"LeakyReLU\":\n return nn.LeakyReLU(0.1)\n elif activation == \"PReLU\":\n return nn.PReLU()\n elif activation == \"tanh\":\n return nn.Tanh()\n elif activation == \"SELU\":\n return nn.SELU()\n elif activation == \"ELU\":\n return nn.ELU()\n elif activation == 'Swish':\n return Swish()\n elif activation == 'Mish':\n return Mish()\n else:\n raise ValueError(f'Activation \"{activation}\" not supported.')", "def activation(self):\n return self.__activation", "def act(self, state, epsilon, env):\n if random.random() > epsilon:\n state = Variable(torch.FloatTensor(state)).unsqueeze(0) # adds extra dim when single input\n state = self.vari_gpu(state)\n _, u_opt = self.forward(state)\n action = (u_opt.cpu().detach().numpy()) # compute the u*[0] \n #print('act:q_value ',q_value)\n #print('act:model action ',action)\n else:\n rand = np.random.rand(int(np.array(env.action_space.shape)))\n high = env.action_space.high\n low = env.action_space.low\n action = low + rand*(high-low)\n #print('act: ',action)\n return action", "def indirect_activation(trial_id, caller, called, _model):\n called_id, caller_id = var(\"_called_id _caller_id\")\n return (\n _name(trial_id, _model, called_id, called) &\n _name(trial_id, \"activation\", caller_id, caller) &\n indirect_activation_id(trial_id, caller_id, called_id)\n )", "def generate_activation(act_par):\n\n if type(act_par) == list:\n if len(act_par) == 2:\n atype, par = act_par\n if atype == 'elu':\n return ELU(alpha=par)\n elif atype == 'leaky':\n return LeakyReLU(alpha=par)\n elif atype == 'prelu':\n return PReLU()\n else:\n raise NameError(\"No such Activation layer\")\n elif len(act_par) == 1:\n if act_par[0] == 'snake':\n return Activation(snake)\n elif act_par[0] == 'snakeh2':\n return Activation(snakeh2)\n elif act_par[0] == 'snake2':\n return Activation(snake2)\n elif act_par[0] == 'xsin':\n return Activation(xsin)\n elif act_par[0] == 'swish':\n return Activation(swish)\n else:\n return Activation(act_par[0])\n else:\n raise NameError(\"No such Activation layer\")\n elif type(act_par) == str:\n return Activation(act_par)\n else:\n raise NameError(\"Wrong parameters for activation layer\")", "def __call__(self, inputs):\n return self._hidden_activation(inputs)", "def create_state_symbols(nVars,nParams):\n\n nSensitivityEqs = nVars * nParams\n\n #state variables\n x_sp = np.array(sp.symbols('x0:' + str(nVars)))\n\n #sensitivity variables\n sensitivity_sp = np.array(list(sp.symbols('s0:' + str(nSensitivityEqs))))\n\n return [x_sp,sensitivity_sp]", "def __init__(self, layers=[2, 2, 1], activation_function=\"bentidentity\"):\n self.layers = layers\n self.activation_function = th.activation_functions[activation_function]\n self.activation_derivative = th.activation_derivatives[\n activation_function]\n self.weights = self._generate_weights()", "def construct_activation_function(self):\n # Add the activation function\n if not self.activation_function is None:\n # Check if it is a string\n if isinstance(self.activation_function, str):\n activation_function = get_activation_function_by_name(\n self.activation_function\n )()\n else:\n assert isinstance(self.activation_function, ActivationFunction)\n activation_function = self.activation_function\n # Plot the function above the rest of the layer\n self.activation_function = activation_function\n self.add(self.activation_function)", "def set_activation(self, method, sig=None, d_sig=None, sig_0=None, d_sig_0=None):\n\t\tmethod = method.lower()\n\n\t\tif method == 'logistic':\n\t\t\tself.sig = lambda z: twod(1 / (1 + np.exp(-z)))\n\t\t\tself.d_sig = lambda z: twod(np.multiply(self.sig(z), (1 - self.sig(z))))\n\t\t\tself.sig_0 = self.sig\n\t\t\tself.d_sig_0 = self.d_sig\n\t\telif method == 'htangent':\n\t\t\tself.sig = lambda z: twod(np.tanh(z))\n\t\t\tself.d_sig = lambda z: twod(1 - np.power(np.tanh(z), 2))\n\t\t\tself.sig_0 = self.sig\n\t\t\tself.d_sig_0 = self.d_sig\n\t\telif method == 'custom':\n\t\t\tself.sig = sig\n\t\t\tself.d_sig = d_sig\n\t\t\tself.sig_0 = sig_0\n\t\t\tself.d_sig_0 = d_sig_0\n\t\telse:\n\t\t\traise ValueError('NNetClassify.set_activation: ' + str(method) + ' is not a valid option for method')\n\n\t\tself.activation = method", "def forward(self, x):\n return self.activation_function(self.backbone_model(x))", "def set_activation(self, values: Context) -> 'Evaluator':\n self.activation = self.base_activation.clone()\n self.activation.identifiers.load_values(values)\n self.logger.info(f\"Activation: {self.activation!r}\")\n return self", "def swd_activation_code(self):\n self._probe.swj_sequence(12, 0x01a0)", "def __init__(self, dualgan:nn.Module, l_adv:float=1., l_rec:float=1., l_idt:float=0.):\n super().__init__()\n store_attr()", "def updateBindPose():\n\n dag = pmc.dagPose(q=True, bindPose=True)\n objects = pmc.dagPose(dag, q=True, members=True)\n for obj in objects:\n pmc.dagPose(obj, reset=True, name=dag[0])", "def __setattr__(self, name, value):\n if isinstance(value, torch.jit.ScriptModule):\n object.__setattr__(self, name, value)\n elif isinstance(value, FrameworkTensor):\n self.role.register_state_tensor(value)\n self.state_attributes[name] = value\n elif isinstance(value, FrameworkLayerModule):\n for param in value.parameters():\n self.role.register_state_tensor(param)\n self.state_attributes[name] = value\n else:\n object.__setattr__(self, name, value)", "def __init__(self,ls,activations = [tf.nn.tanh, tf.nn.tanh, None], sess = None, RL = False, lr = 1e-2, reg_scale = 0.1):\n self.ls = ls\n if sess == None:\n self.sess = self.tf_reset()\n else:\n self.sess = sess\n self.activations = activations\n self.input_ph = tf.placeholder(dtype=tf.float32, shape=[None, ls[0]], name = 'msh_input_placeholder') # batch-size by state size\n self.output_ph = tf.placeholder(dtype=tf.float32, shape=[None, ls[-1]]) # action space size\n\n self.fc1 = tf.contrib.layers.fully_connected(self.input_ph, ls[1],\n weights_regularizer=tf.contrib.layers.l2_regularizer(reg_scale),\n activation_fn=activations[0])\n\n self.fc2 = tf.contrib.layers.fully_connected(self.fc1, ls[2],\n weights_regularizer=tf.contrib.layers.l2_regularizer(reg_scale),\n activation_fn=activations[1])\n\n self.output_pred = tf.contrib.layers.fully_connected(self.fc2, ls[-1],\n weights_regularizer=tf.contrib.layers.l2_regularizer(reg_scale),\n activation_fn=activations[-1])\n\n\n # self.W_dict = {}\n # self.b_dict = {}\n # for i in range(len(ls)-1):\n # self.W_dict[i] = tf.get_variable(name='W'+str(i), shape=[ls[i], ls[i+1]], initializer=tf.contrib.layers.xavier_initializer())\n # self.b_dict[i] = tf.get_variable(name='b'+str(i), shape=[ls[i+1]], initializer=tf.constant_initializer(0.))\n\n\n # self.layer = self.input_ph\n # print(tf.shape(self.layer))\n\n\n # for i in range(len(self.activations)):\n # self.layer = tf.matmul(self.layer, self.W_dict[i]) + self.b_dict[i]\n # print(tf.shape(self.layer))\n # if self.activations[i] is not None:\n # self.layer = self.activations[i](self.layer)\n # self.output_pred = self.layer\n\n if RL == True: \n with tf.name_scope('reward_holder'):\n self.reward_holder = tf.placeholder(shape=[None],dtype=tf.float32)\n \n with tf.name_scope('get_resp_outs'):\n self.action_holder = tf.placeholder(shape=[None],dtype=tf.int32, name = 'action_holder')\n \n self.indexes = tf.range(0, tf.shape(self.output_pred)[0]) * tf.shape(self.output_pred)[1] + self.action_holder\n\n self.responsible_outputs = tf.gather(tf.reshape(self.output_pred, [-1]), self.indexes, name = 'responsible_outputs')\n # out of the output vector, this will pull out the indexes\n # But i still don't understand indexes.\n\n # i feel like instead of going thru all of this, you could have just saved the actual outputs. I think I'll try that.\n # then for responsible outputs, you'd do tf.gather(outputs, action_holder) oh maybe it's not different than this. \n # Maybe that's exactly what they're doing, bc action_holder is a scaler number. IDK.\n with tf.name_scope('loss'):\n self.loss = -tf.reduce_mean(tf.log(self.responsible_outputs)*self.reward_holder) #becuase reward_holder value \n # doesn't directly change as you change the Weights, this is equivalent to multiplying the gradient by the reward.\n # when you take the gradient, you're solving for d(log*A)/dW = d(log_p)/dW * d(log_p*A)/d(log_p) = A*d(log_p)/dW. so it's equivalent to mult gradient\n # by the reward function\n tvars = tf.trainable_variables()\n\n with tf.name_scope('update'):\n # self.train_step = tf.train.RMSPropOptimizer(learning_rate = lr, decay = 0.99).minimize(self.loss)\n self.train_step = tf.train.AdamOptimizer().minimize(self.loss)\n self.init = tf.global_variables_initializer()", "def bodynet(self, obs):\n (raw_obs,bodyinfo) = th.split(obs, [28,7], dim=1) # this number is for Ant\n x = self.bodyinfo_linear1(bodyinfo)\n x = self.bodyinfo_activation(x)\n x = self.bodyinfo_linear2(x)\n x = self.bodyinfo_softmax(x)\n obs = th.cat([raw_obs, x], dim=1)\n return obs", "def __init__ ( self , func , deriv = None , name = '' ) :\n self.__func = func\n if deriv and callable ( deriv ) : self.__derivative = deriv\n else : self.__derivative = Derivative(func)\n \n if name : self.__name__ = name \n elif hasattr ( func , '__name__' ) and '<lambda>' != func.__name__ :\n self.__name__ = func.__name__\n else : self.__name__ = 'Eval2VE'", "def __init__(self):\r\n ScriptedLoadableModuleLogic.__init__(self)\r\n self.rgbport = 18944\r\n self.depthPort = 18945", "def __init__(self):\n self.topology = None\n self.learningRate = None\n self.momentum = None\n self.name = None\n self.size = None\n #self._hiddenActiv_fun_key = None\n #self._outActiv_fun_key = None\n #self.output_activation = None\n #self.hidden_activation = None", "def activate_predicate(self):\n pass", "def set_variable(self, name, value):\n if self._scalamagic and (not name.startswith(\"_i\")):\n self.scala_interpreter.bind(name, value)\n else:\n self.log.debug('Not setting variable %s', name)", "def forward_activate(self, a_prev, w, b, func_type):\n\n\t\tz = np.dot(w, a_prev) + b\n\t\tif 'sigmod' == func_type.lower(): \n\t\t\ta = 1 / (1 + np.exp(-z))\n\t\telif 'relu' == func_type.lower():\n\t\t\ta = np.where(z >= 0, z, 0)\n\t\telif 'leaky relu' == func_type.lower():\n\t\t\ta = np.where(z >= 0, z, 0.01 * z)\n\t\telif 'tanh' == func_type.lower():\n\t\t\ta = (np.exp(z) - np.exp(-z)) / (np.exp(z) + np.exp(-z))\n\n\t\tcache = (a_prev, w, b, z)\n\t\treturn a, cache", "def init():\n global instance, swig\n global visionC, pythonC, ledsC, localizationC, opponentsC, behaviorC, planningC\n global BehaviorModuleLog\n global text_logger\n global sensor_values, joint_values, joint_stiffness\n global RANDOM_SEED\n global Timer\n global TOOL\n global CONFIG_ID\n global OPTIMIZE, DEBUG\n\n swig = pythonswig_module\n Timer = swig.Timer\n instance = pythonswig_module.PythonInterface().CORE_INSTANCE\n # BehaviorModuleLog = swig.BehaviorModuleLog\n visionC = instance.vision_\n pythonC = instance.interpreter_\n behaviorC = instance.behavior_\n ledsC = instance.leds_\n localizationC = instance.localization_\n planningC = instance.planning_\n opponentsC = instance.opponents_\n text_logger = instance.textlog()\n TOOL = (instance.type_ == swig.CORE_TOOL)\n OPTIMIZE = instance.EnableOptimizations()\n DEBUG = not OPTIMIZE\n\n joint_values = pythonC.joint_values_\n sensor_values = pythonC.sensor_values_\n joint_stiffness = pythonC.joint_stiffness_\n\n this = sys.modules[__name__]\n for item in dir(swig):\n if item.startswith(\"__\"):\n continue\n setattr(this, item, getattr(swig, item))\n RANDOM_SEED = swig.Random.SEED\n CONFIG_ID = -1", "def _update_target_net(self):\n self.target_net.load_state_dict(self.policy_net.state_dict())\n self.target_net.eval()", "def initialiseActivationFunctions(self):\n\n\t\t###uniform for output units\n\t\tif self._outputActivationFunctions == None or self._outputActivationDerivatives == None:\t\n\t\n\t\t\tself._outputActivationFunctions = []\n\t\t\tself._outputActivationDerivatives = []\n\n\t\t\tactFunc = lambda x : x\n\t\t\tdActFunc = lambda x : 1.0\n\t\n\t\t\tfor i in range(self.nOutputs):\n\t\t\t\t\n\t\t\t\tself._outputActivationFunctions.append(actFunc)\n\t\t\t\tself._outputActivationDerivatives.append(dActFunc)\n\n\t\t\tself._outputActivationFunctions = np.array(self._outputActivationFunctions)\n\t\t\tself._outputActivationDerivatives = np.array(self._outputActivationDerivatives)\n\t\t\t\n\n\t\tif self._hiddenActivationFunctions == None or self._hiddenActivationDerivatives == None:\n\n\t\t\tself._hiddenActivationFunctions = []\n\t\t\tself._hiddenActivationDerivatives = []\n\n\t\t\tfor i in range(self.nHiddenLayers):\n\n\t\t\t\tfTemp = []\n\t\t\t\tdTemp = []\n\t\t\t\t\n\t\t\t\t#Make the default sigmoid the one suggested in LeCun et al 1998\n\t\t\t\ttwist = 0.01\n\t\t\t\ta = 1.7159\n\t\t\t\tc = 2.0/3.0\n\n\t\t\t\tactFunc = lambda x : a*np.tanh(c*x) + twist*x\n\t\t\t\tdActFunc = lambda x : twist + a*c*(1.0 - (np.tanh(c*x)**2.0))\n\n#\t\t\t\tactFunc = lambda x : np.tanh(x)\n#\t\t\t\tdActFunc = lambda x : 1.0 - np.tanh(x)**2.0\n\n\t\t\t\t#plus all of the bias\n\t\t\t\tfor j in range(self.nUnitsPerLayer+1):\n\t\t\t\t\t\n\t\t\t\t\tfTemp.append(actFunc)\n\t\t\t\t\tdTemp.append(dActFunc)\n\t\t\t\t\n\t\t\t\tself._hiddenActivationFunctions.append(fTemp)\n\t\t\t\tself._hiddenActivationDerivatives.append(dTemp)\n\t\t\t\n\t\t\tself._hiddenActivationFunctions = np.array(self._hiddenActivationFunctions)\n\t\t\tself._hiddenActivationDerivatives = np.array(self._hiddenActivationDerivatives)", "def test_swish(self):\n activation_name = 'Swish'\n args = {}\n\n activation = activation_factory.create(activation_name, **args)\n self.assertEqual(activation._get_name(), activation_name)\n\n x = torch.empty(10)\n y = activation(x)\n assert_array_equal(y, x * torch.sigmoid(x))", "def _localSetState(self,pdict):\n self.lambdaVar = pdict.pop('lambda')\n self.low = pdict.pop('low' )", "def init_weapon(self, weapon):\n if weapon: \n self.weapon = weapon(self)\n #self.weapon.activate() #TEMP", "def _localSetState(self,pdict):\n self.mu = pdict.pop('mu')", "def activated(self):", "def server_activate(self):\n\t\tpass", "def __init__(self):\n super(SiLU, self).__init__()", "def apply_activation(self, tens):\n if(self.activation == \"ReLU\"): # pylint: disable=no-else-return\n return tf.nn.relu(tens)\n elif(self.activation == \"Leaky_ReLU\"):\n return tf.nn.leaky_relu(tens)\n elif(self.activation == \"Tanh\"):\n return tf.nn.tanh(tens)\n elif(self.activation == \"Sigmoid\"):\n return tf.nn.sigmoid(tens)\n elif(self.activation == \"Linear\"):\n return tens\n else:\n raise InvalidActivationError(self.activation)", "def update(self, sess, s, a, y):\n feed_dict = { self.X_pl: s, self.y_pl: y, self.actions_pl: a }\n global_step, _, loss = sess.run(\n [tf.train.get_global_step(), self.train_op, self.loss],\n feed_dict)\n return loss", "def _get_activation_fn(activation):\n if activation == \"relu\":\n return F.relu\n if activation == \"gelu\":\n return F.gelu\n if activation == \"glu\":\n return F.glu\n raise RuntimeError(F\"activation should be relu/gelu, not {activation}.\")", "def __init__(self, state, cval=True, conditional=\"eq\"):\n self.variable = state\n self.cval = cval\n self.conditional = conditional\n self.sending = bridgectl.sending\n self.type = bridgectl.type\n self.target = bridgectl.target", "def activation(x):\n # return np.tanh(x)\n return np.maximum(0,x)", "def _bin_sensor_activation(self, entity, attribute, old, new, kwargs):\n location = self._friendly_names[entity]\n pretty_date_now = self.datetime().strftime(self._date_format)\n\n # Add to history\n self._history.append(f\"{location}: {pretty_date_now}\")\n\n # Publish new state\n self._set_new_sensor_state(location)", "def _bind(self):\n\n pass", "def _update_activation(self, signal):\n # Desactivate the selected switch boxes and activate only the selected\n # path associated boxes\n if hasattr(signal, \"value\") and hasattr(signal, \"switch_name\"):\n switch_paths = self._switches[signal.switch_name]\n for path_name, box_names in switch_paths.items():\n for box_name in box_names:\n self._boxes[box_name].active = (path_name == signal.value)\n else:\n raise ValueError(\n \"Updating pipeline activation '{0}'. Activation error: \"\n \"observer signal has no attribute 'value'.\".format(self.name))", "def forward_activationfunction(self, x):\n if self.forward_activation == 'tanh':\n return torch.tanh(x)\n elif self.forward_activation == 'relu':\n return F.relu(x)\n elif self.forward_activation == 'linear':\n return x\n elif self.forward_activation == 'leakyrelu':\n return F.leaky_relu(x, 0.2)\n elif self.forward_activation == 'sigmoid':\n return torch.sigmoid(x)\n else:\n raise ValueError('The provided forward activation {} is not '\n 'supported'.format(self.forward_activation))", "def __init__(self,name,exp_base, random_seed=None,version=None):\n self.exp_base = exp_base\n self.log_fun = lambda x: np.log(x) / np.log(self.exp_base)\n self.exp_fun = lambda x: np.power(self.exp_base,x)\n\n super(LogNormalBehaviorModel, self).__init__(name, random_seed, version)" ]
[ "0.58555764", "0.57150984", "0.54634446", "0.5329766", "0.5275407", "0.5234855", "0.5234301", "0.5216041", "0.51913345", "0.5126263", "0.5109353", "0.5107513", "0.50998616", "0.5097742", "0.5060914", "0.5060419", "0.5039985", "0.5010341", "0.50005376", "0.4995609", "0.4991565", "0.49793637", "0.49411544", "0.49289382", "0.4928022", "0.49123108", "0.4905465", "0.48961765", "0.48769683", "0.48734716", "0.4863715", "0.48560503", "0.48496616", "0.4847354", "0.4847066", "0.48434934", "0.48431844", "0.48246866", "0.48204163", "0.48181543", "0.48176062", "0.48173955", "0.48154274", "0.480867", "0.480662", "0.4803795", "0.4803795", "0.48007268", "0.47987965", "0.47876814", "0.4782854", "0.4770436", "0.47673532", "0.47603613", "0.47586158", "0.47463748", "0.47390962", "0.4738836", "0.47329214", "0.47319525", "0.4729145", "0.47254676", "0.47231042", "0.4722914", "0.47192952", "0.4717894", "0.471735", "0.47145024", "0.47130713", "0.47123095", "0.47045517", "0.4701188", "0.46972784", "0.4693292", "0.4688013", "0.46870553", "0.46847814", "0.46816894", "0.4681662", "0.4674061", "0.46700564", "0.46697637", "0.4664943", "0.46606228", "0.46577612", "0.46573707", "0.46553943", "0.4655349", "0.46520817", "0.4649314", "0.46459758", "0.46451297", "0.46381757", "0.46341208", "0.4632252", "0.46303928", "0.4630345", "0.46283305", "0.4626125", "0.46208075", "0.46201468" ]
0.0
-1
Takes a two element tuple. The second element must be a Beliefs object which system1 will use to update the belief module. Once updated, the action queue will be emptied and the rules will be checked for satisfied conditions. The action queue will be refilled with new active actions from the rule list.
def process_belief(self, args): goal, belief = args if isinstance(belief, Beliefs): self.belief_module.process_belief(belief) self.initialize_action_queue() return [{}]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_beliefs(self, result, action):\n if self.traceUpdate:\n print(\"Updating beliefs based on action\", action, \"with result\", result)\n\n if result == 'TryAgain':\n return None\n\n elif not result and not self.isTransient(action):\n if self.traceUpdate:\n print(\"Adding known false\", action)\n self.knownFalseTuple(action)\n\n if isinstance(result, list):\n for bindings in result:\n concrete_result = substitute(action, bindings)\n if not self.isTransient(concrete_result):\n if self.traceUpdate:\n print(\"Adding known true and performed\", concrete_result)\n self.knownTuple(concrete_result)\n self.knownTuple(('performed', concrete_result))\n self.update_variable_binding(concrete_result)", "def _update_beliefs(self, features,\n beliefs):\n raise NotImplementedError", "def update1(self, state, action, nextState, reward):\n #print \"update1 in ApproximateQAgent\"\n \"*** YOUR CODE HERE ***\"\n ##################################################################################################################################Eric Did Stuff\n actionList = nextState.getLegalActions(self.index)\n\n\n #print \"Action List\", actionList\n\n\n\n\n weights = self.getWeights()\n\n features = self.featExtractor.getFeatures(state, action, self)\n #self.myFeats = features\n if self.index == 0:\n print \"FEATURES: \",features\n value = self.computeValueFromQValues(nextState)\n qValue = self.getQValue(state,action)\n #print \"value\", value, \"qValue\", qValue\n for feature in features:\n if len(actionList) != 0:\n weights[feature] = weights[feature] + self.alpha * (reward + self.discount * value - qValue) * features[feature]\n else:\n weights[feature] = weights[feature] + self.alpha * (reward - qValue) * features[feature]\n #print \"feature\", feature, \"weights\", weights[feature]\n #print \"weights\", weights\n\n #util.raiseNotDefined()", "def update(self):\n #self.consider_deactivation() if self.active_flag else self.consider_activation()\n if self.active_flag:\n self.consider_deactivation()\n else:\n self.consider_activation()\n if self.active_flag:\n self.sense_and_act()\n self.weight = self.match_degree*self.priority", "def update(self):\n #self.consider_deactivation() if self.active_flag else self.consider_activation()\n if self.active_flag:\n self.consider_deactivation()\n else:\n self.consider_activation()\n if self.active_flag:\n self.sense_and_act()\n self.weight = self.match_degree*self.priority", "def update(self):\n #self.consider_deactivation() if self.active_flag else self.consider_activation()\n if self.active_flag:\n self.consider_deactivation()\n else:\n self.consider_activation()\n if self.active_flag:\n self.sense_and_act()\n self.weight = self.match_degree*self.priority", "def update(self):\n #self.consider_deactivation() if self.active_flag else self.consider_activation()\n if self.active_flag:\n self.consider_deactivation()\n else:\n self.consider_activation()\n if self.active_flag:\n self.sense_and_act()\n self.weight = self.match_degree*self.priority", "def update(self):\n startstate = self.state\n goalstates =self.env.getGoalStates()\n inputs = self.env.sense(self)\n self.action_sequence = self.drive(goalstates,inputs)\n action = self.choose_action() # Choose an action\n self.state = self.env.act(self,action) \n return", "def _action(self, wloops: Any, beta: Any) -> Any:\n pass", "def HELPER_update_belief(self, old_belief, observation, gamma):\n observation = int(observation)\n #print \"old_belief:\", old_belief, type(old_belief)\n #print \"observation:\", observation, type(observation)\n #print \"gamma:\", gamma, type(gamma)\n\n diffs = [0.1*i for i in range(self.num_difficulty_bins)]\n new_belief = util.updateBelief(old_belief, None, observation, diffs, gamma)\n #print \"new_belief\", new_belief, type(new_belief)\n return new_belief", "def update_beliefs(self, corpus_id):\n logger.info('Updating beliefs for corpus \"%s\"' % corpus_id)\n # TODO check which options are appropriate for get_corpus\n corpus = self.get_corpus(corpus_id)\n be = BeliefEngine(self.scorer)\n stmts = list(corpus.statements.values())\n be.set_prior_probs(stmts)\n # Here we set beliefs based on actual curation\n for uuid, correct in corpus.curations.items():\n stmt = corpus.statements.get(uuid)\n if stmt is None:\n logger.warning('%s is not in the corpus.' % uuid)\n continue\n stmt.belief = correct\n belief_dict = {st.uuid: st.belief for st in stmts}\n return belief_dict", "def update_q(self,action,reward):\n #print('')\n #print('Action index is: ' + str(action))\n #print('Provided reward is: ' + str(reward))\n \n # Read from disk before updating\n try:\n pickle_in = open(\"static/data/values.pickle\",\"rb\")\n values = pickle.load(pickle_in)\n #print(values)\n self.values = values\n pickle_in = open(\"static/data/counts.pickle\",\"rb\")\n self.counts = pickle.load(pickle_in)\n pickle_in = open(\"static/data/actions_taken.pickle\",\"rb\")\n actions_taken = pickle.load(pickle_in)\n pickle_in = open(\"static/data/reward_list.pickle\",\"rb\")\n reward_list = pickle.load(pickle_in)\n except:\n actions_taken = []\n reward_list = []\n pass\n \n self.counts[action] += 1\n n = self.counts[action]\n value = self.values[action]\n actions_taken.append(action)\n reward_list.append(reward)\n \n # Running product\n new_value = value + (1/n) * (reward - value)\n self.values[action] = new_value\n \n \n # Save to disk before exiting\n pickle_out = open('static/data/values.pickle','wb')\n pickle.dump(self.values, pickle_out)\n pickle_out.close()\n pickle_out = open('static/data/counts.pickle','wb')\n pickle.dump(self.counts, pickle_out)\n pickle_out.close()\n pickle_out = open('static/data/actions_taken.pickle','wb')\n pickle.dump(actions_taken, pickle_out)\n pickle_out.close()\n pickle_out = open('static/data/reward_list.pickle','wb')\n pickle.dump(reward_list, pickle_out)\n pickle_out.close()", "def update1(self, state, action, nextState, reward):\n \"*** YOUR CODE HERE ***\"\n ############################################################################################################ Eric Changed nextState to other stuff\n \n actionList = nextState.getLegalActions(self.index)\n\n if (not (nextState == None)) and len(actionList) > 0 :\n expectedRewardList = []\n #print \"state \",nextState,\" has legal actions \", state.getLegalActions(nextState)\n for a in actionList:\n #print \"next state: \",nextState,\" action: \",a, \"Value: \", self.Q[(nextState, a)]\n expectedRewardList.append(self.Q[(nextState, a)])\n #print \"expected reward list: \", expectedRewardList\n self.Q[(state, action)] = self.Q[(state, action)] + self.alpha * (reward + self.discount * max(expectedRewardList) - self.Q[(state, action)])\n #print self.Q\n return\n else:\n self.Q[(state, action)] = self.Q[(state, action)] + self.alpha * (reward - self.Q[(state, action)])\n return\n\n #print \"I should never be here\"\n #util.raiseNotDefined()", "def update_belief(self, state, action, reward):\n self.add_to_state_history(state)\n state = self.get_modified_state()\n self.belief.update(state, action, reward, self.alpha)\n self.alpha *= self.a_rate", "def _update_beliefs(self, features,\n beliefs):\n if (len(features) != len(beliefs) or features.ndim != 1):\n raise core.BadFeatureFnError()\n\n assert len(features) == len(beliefs)\n decay = self.rng.binomial(beliefs, self.params.decay_prob)\n updated_beliefs = [\n beliefs[i] + features[i] - decay[i] for i in range(len(beliefs))\n ]\n return updated_beliefs", "def update(self, state, action, nextState, reward):\n \"*** YOUR CODE HERE ***\"\n features = self.featExtractor.getFeatures(state,action)\n\n learning_rate = self.alpha #gives us the learning rate\n\n temporary_QValue = self.getQValue(state,action) #to get the Q value of the state,action pair\n\n nextState_QValue = self.getValue(nextState) #to get the Q value of the landing state when taken action a and state s\n\n discount_factor = self.discount #to get the gamma/ discount factor\n\n weight = self.weights\n\n Q_Value = 0\n\n difference = (reward + discount_factor * nextState_QValue ) - (temporary_QValue) #refer to README_Reinforcement.txt for the formula\n\n for each_feature in features:\n\n #refer to README_Reinforcement.txt for the formula at line 20\n weight[each_feature] = weight[each_feature] + learning_rate * difference * features[each_feature]\n\n #util.raiseNotDefined()", "def update(self, arm, reward, context):", "def update(self, action, reward):\n # like puseudo count\n a, b = self.ActionValue[action]\n #print(f\"UPDATE {action}: ({a}, {b})\")\n a = a + self.huber(reward) # The larger the reward, the easier it is to select\n b = b + 1 - self.huber(reward) # It becomes easy to be selected as the reward becomes larger, and it becomes difficult to be selected as the reward becomes smaller\n a = 0.001 if a <= 0 else a\n b = 0.001 if b <= 0 else b\n \n self.ActionValue[action] = (a, b)\n\n #print(f\"=> ({a}, {b})\")\n\n # Update nearby action candidates\n around_update_rate = 0.3 # Parameter to adjust the degree of change according to the distance; [0, 1]\n radius = np.sqrt(self.action_resolution**2 + self.action_resolution**2 + 1e-9) # 1e-9 is for safety to caluculate the small number \n for action_around in self.actions:\n if action_around == action:\n continue\n x = action_around[0] - action[0]\n y = action_around[1] - action[1]\n distance = np.sqrt(x**2 + y**2)\n if distance <= radius:\n a, b = self.ActionValue[action_around]\n #print(f\"UPDATE {action_around}: ({a}, {b})\")\n a = a + self.huber(reward) * around_update_rate * (1 - distance)\n b = b + (1 - self.huber(reward)) * around_update_rate * (1 - distance) # To adjust the update, weight 1-r. If normal update is 1, it will be the update of around_update_rate * (1-distance) for adjacent actions.\n a = 0.001 if a <= 0 else a\n b = 0.001 if b <= 0 else b\n\n #print(f\"=> ({a}, {b})\")\n\n self.ActionValue[action_around] = (a, b)", "def update_based_on_topology(self, *args, **kwargs):\n for bfr in Configuration.get(\"switches\"):\n switch = bfr[\"name\"]\n\n self.update_bier_decap_rule(switch=switch)", "def __init__(self, sn, beliefs):\n assert all([type(x) is Belief for x in beliefs])\n self._sn = sn\n self._beliefs = beliefs", "def kb_retract(self, fact_or_rule):\n printv(\"Retracting {!r}\", 0, verbose, [fact_or_rule])\n ####################################################\n # Student code goes here\n\n if isinstance(fact_or_rule, Fact):\n if fact_or_rule not in self.facts:\n #print(\"fact not in bk!\")\n return\n else:\n #find the corresponding fact in kb\n index = self.facts.index(fact_or_rule)\n fact_or_rule = self.facts[index]\n #if the fact is not supported, remove it\n if len(fact_or_rule.supported_by) == 0:\n self.facts.remove(fact_or_rule)\n else:\n #print(\"can't retract!\")\n return\n elif isinstance(fact_or_rule, Rule):\n if fact_or_rule not in self.rules:\n #print(\"rule not in bk!\")\n return\n else:\n #find the corresponding rule in kb\n index = self.rules.index(fact_or_rule)\n fact_or_rule = self.rules[index]\n #if rule is not supported and not asserted, then remove it\n if len(fact_or_rule.supported_by) == 0 and fact_or_rule.asserted != True:\n self.rules.remove(fact_or_rule)\n else:\n #print(\"can't retract!\")\n return\n #remove the supported pairs of the facts that it supports\n for facts in fact_or_rule.supports_facts:\n for i in facts.supported_by:\n if fact_or_rule in i:\n facts.supported_by.remove(i)\n if facts.asserted != True:\n self.kb_retract(facts)\n #remove the supported pairs of the rules that it supports\n for rules in fact_or_rule.supports_rules:\n for i in rules.supported_by:\n if fact_or_rule in i:\n rules.supported_by.remove(i)\n if rules.asserted != True:\n self.kb_retract(rules)", "def updateFCFS_queue(self, junc):\n for tl_combination in junc.tl_combinations:\n for lane in tl_combination.corresponding_lanes:\n for vehicle in traci.lane.getLastStepVehicleIDs(lane.ID):\n junc.FCFS_queue[vehicle] = tl_combination.ryg_state", "def updateWeapons(self):\n self.readyWeapons = []\n self.setWeaponStatus()\n\n for myWeapon in self.activeWeapons:\n if myWeapon.preFireCheck() == 1:\n self.readyWeapons.append(myWeapon)\n self.alternateTargets = []\n\n if self.amsTargets != []:\n for myWeapon in self.amsWeapons:\n if myWeapon.preFireCheck() == 1:\n self.readyWeapons.append(myWeapon)\n self.amsTargets = []", "def update_belief_once(self, current_observation, last_observation, avg_vel, dt, current_belief):\n # type: (np.ndarray, np.ndarray, float, float, list) -> (list, list)\n\n\n\n new_belief = []\n likelihoods = []\n estimated_positions = []\n normalization_factor = 0.0\n\n # Compute the likelihoods\n for goal_idx in range(self._num_goals):\n obs_likelihood, calculated_position = self.compute_observation_likelihood(current_observation,\n last_observation,\n self._goals[goal_idx],\n avg_vel, dt)\n estimated_positions.append(calculated_position)\n obs_likelihood += 1\n likelihoods.append(obs_likelihood)\n normalization_factor += obs_likelihood * current_belief[goal_idx]\n\n\n\n\n #for i in range(self.importance_of_prior_in_belief_update):\n #normalization_factor = 0.0\n #tmp_belief = []\n # Compute new belief\n for goal_idx in range(self._num_goals):\n prob = (likelihoods[goal_idx] * current_belief[goal_idx])/normalization_factor\n\n new_belief.append(prob)\n\n #tmp_belief = np.array(tmp_belief) / normalization_factor\n\n\n #new_belief = tmp_belief\n return [new_belief, estimated_positions]", "def get_action(self, state):\n\n \"\"\"\n XXX: DO NOT MODIFY THAT FUNCTION !!!\n Doing so will result in a 0 grade.\n \"\"\"\n\n # XXX : You shouldn't care on what is going on below.\n # Variables are specified in constructor.\n if self.beliefGhostStates is None:\n self.beliefGhostStates = state.getGhostBeliefStates()\n if self.walls is None:\n self.walls = state.getWalls()\n return self.updateAndGetBeliefStates(\n self._computeNoisyPositions(state))", "def beam_update(self, beams, extra):\n return extra", "def update(self, *args, **kwargs):\n\n print(\"\\nIn MOCK ALGO OBSERVER....\")\n\n if 'remaining_tasks' in kwargs:\n\n remaining_tasks = len(kwargs['remaining_tasks'])\n\n print(\"\\tThere are {} remaining tasks\".format(remaining_tasks))\n print(\"\\tIs {} less than {}? {}\".format(remaining_tasks, min_tasks, (remaining_tasks < min_tasks)))\n\n # If we don't have the minimum number of hits out...\n if remaining_tasks < min_tasks:\n print(\"\\tRefilling queue with {} new task(s)\".format(min_tasks - remaining_tasks))\n # Fill up the tasks again\n for t in range(min_tasks - remaining_tasks):\n new_task = make_rand_task()\n tasks.append(new_task)\n\n actAMT.init_tasks(tasks, hit_type_init_file)\n del tasks[:]\n\n if 'completed_task' in kwargs:\n add_to_db(kwargs['completed_task'])", "def update_action(self):\n self.action = self.automata > self.states\n self.inv_action = self.inv_automata > self.states", "def belief_conflict(self, args):\n goal, belief = args\n if isinstance(belief, Beliefs):\n if self.belief_module.is_conflicting_belief(belief):\n return [{}]\n\n return []", "def _update_goals(self):\n print\"updating goals\"\n response = self.goal_tracker_call() # type: GoalsResponse\n self._goals = []\n for goal in response.goals: # type: Point\n self._goals.append([goal.x, goal.y, goal.z])\n self._num_goals = len(self._goals)\n\n self._current_belief = self._init_belief()", "def update_chains(self):\r\n _, black_positions, white_positions = self.get_positions()\r\n\r\n self.bfs(black_positions, 1)\r\n self.bfs(white_positions, 2)", "def update(self, upper_action, lower_action):\n # validate the actions:\n for action, c in [(upper_action, \"upper\"), (lower_action, \"lower\")]:\n actions = list(self._available_actions(c))\n if action not in actions:\n self.logger.info(f\"error: {c}: illegal action {action!r}\")\n available_actions_list_str = \"\\n* \".join(\n [f\"{a!r} - {_FORMAT_ACTION(a)}\" for a in actions]\n )\n # NOTE: The game instance _could_ potentially be recovered\n # but pursue a simpler implementation that just exits now\n raise IllegalActionException(\n f\"{c} player's action, {action!r}, is not well-\"\n \"formed or not available. See specification and \"\n \"game rules for details, or consider currently \"\n \"available actions:\\n\"\n f\"* {available_actions_list_str}\"\n )\n # otherwise, apply the actions:\n battles = []\n atype, *aargs = upper_action\n if atype == \"THROW\":\n s, x = aargs\n self.board[x].append(s.upper())\n self.throws[\"upper\"] += 1\n battles.append(x)\n else:\n x, y = aargs\n # remove ONE UPPER-CASE SYMBOL from self.board[x] (all the same)\n s = self.board[x][0].upper()\n self.board[x].remove(s)\n self.board[y].append(s)\n # add it to self.board[y]\n battles.append(y)\n atype, *aargs = lower_action\n if atype == \"THROW\":\n s, x = aargs\n self.board[x].append(s.lower())\n self.throws[\"lower\"] += 1\n battles.append(x)\n else:\n x, y = aargs\n # remove ONE LOWER-CASE SYMBOL from self.board[x] (all the same)\n s = self.board[x][0].lower()\n self.board[x].remove(s)\n self.board[y].append(s)\n # add it to self.board[y]\n battles.append(y)\n # resolve hexes with new tokens:\n for x in battles:\n # TODO: include summary of battles in output?\n self.board[x] = _BATTLE(self.board[x])\n\n self._turn_detect_end()\n # TODO:\n # return a sanitised version of the action to avoid action injection?\n\n # Log the action (if logging is enabled)\n self.logger.info(\n f\"turn {self.nturns}: upper: {_FORMAT_ACTION(upper_action)}\"\n )\n self.logger.info(\n f\"turn {self.nturns}: lower: {_FORMAT_ACTION(lower_action)}\"\n )", "def trigger(self, state, updated_vars):\n for evidence_var in state.get_evidence().get_variables():\n if evidence_var.startswith('R(') and evidence_var.endswith(')'):\n actual_action = Assignment.create_from_string(evidence_var[2:])\n actual_utility = state.get_evidence.get_value(evidence_var).get_double()\n\n if actual_action.get_variables() in self.previous_states.keys():\n previous_state = self.previous_states[actual_action.get_variables()]\n self.learn_from_feedback(previous_state, actual_action, actual_utility)\n\n state.clear_evidence(evidence_var)\n\n if len(state.get_action_node_ids()) != 0:\n try:\n self.previous_states[state.get_action_node_ids()] = copy(state)\n except Exception as e:\n self.log.warning(\"cannot copy state: \" + str(e))", "def update_on_demand_queue(cfg):\n\n # temp storage of all sprites to update\n update_list = list()\n\n while len(update_queue.update_queue) > 0:\n next_sprite = update_queue.update_queue.pop()\n update_list.append(next_sprite)\n #print(\"[update_on_demand_queue] Found in on demand queue:\", next_sprite.name)\n\n #print(\"[update_on_demand_queue] Updating on demand queue with contents:\", update_list)\n\n for s in update_list:\n s.update()", "def calculateBeliefs(self):\n\n belief = {}\n\n for question in self.getQuestions():\n q = str(question.id)\n belief[q] = self.HELPER_init_belief()\n\n #print belief[q]\n for answer in self.getQuestionCompletedAnswers(question):\n #print q\n #print str(answer.question.id)\n assert str(answer.question.id) == q\n w_skill = answer.worker.inference_results['EM']['skill']\n # answer.value must be \"0\" or \"1\"\n assert answer.value == \"0\" or answer.value == \"1\"\n #print answer.value, w_skill\n belief[q] = self.HELPER_update_belief(belief[q], answer.value, w_skill)\n #print belief[q]\n\n #print \"Question beliefs:\", belief\n #print \"##################\"\n return belief", "def update(self, upper_action, lower_action):\n # validate the actions:\n for action, c in [(upper_action, \"upper\"), (lower_action, \"lower\")]:\n actions = list(self.available_actions(c))\n if action not in actions:\n self.logger.info(f\"error: {c}: illegal action {action!r}\")\n available_actions_list_str = \"\\n* \".join(\n [f\"{a!r} - {_FORMAT_ACTION(a)}\" for a in actions]\n )\n # NOTE: The game instance _could_ potentially be recovered\n # but pursue a simpler implementation that just exits now\n raise Exception(\n f\"{c} player's action, {action!r}, is not well-\"\n \"formed or not available. See specification and \"\n \"game rules for details, or consider currently \"\n \"available actions:\\n\"\n f\"* {available_actions_list_str}\"\n )\n # otherwise, apply the actions:\n battles = []\n atype, *aargs = upper_action\n if atype == \"THROW\":\n s, x = aargs\n self.board[x].append(s.upper())\n self.throws[\"upper\"] += 1\n battles.append(x)\n else:\n x, y = aargs\n # remove ONE UPPER-CASE SYMBOL from self.board[x] (all the same)\n s = self.board[x][0].upper()\n self.board[x].remove(s)\n self.board[y].append(s)\n # add it to self.board[y]\n battles.append(y)\n atype, *aargs = lower_action\n if atype == \"THROW\":\n s, x = aargs\n self.board[x].append(s.lower())\n self.throws[\"lower\"] += 1\n battles.append(x)\n else:\n x, y = aargs\n # remove ONE LOWER-CASE SYMBOL from self.board[x] (all the same)\n s = self.board[x][0].lower()\n self.board[x].remove(s)\n self.board[y].append(s)\n # add it to self.board[y]\n battles.append(y)\n # resolve hexes with new tokens:\n upper_defeated_cnt = 0\n lower_defeated_cnt = 0\n for x in battles:\n # TODO: include summary of battles in output?\n self.board[x], upper_defeated, lower_defeated = self._BATTLE(self.board[x])\n upper_defeated_cnt += upper_defeated\n lower_defeated_cnt += lower_defeated\n\n self.nturns += 1\n return upper_defeated_cnt, lower_defeated_cnt", "def update(*args):", "def trigger(self):\n if self.queue and self.age(self.queue[0][0]) >= 0:\n # enable next effect\n self.set_lights(self.queue[0][1])\n # normally remove the first element\n self.queue.pop(0)\n\n while self.queue and self.age(self.queue[0][0]) > 0:\n print(\"Skipping front of queue\", self.queue[0], \"because age is \",\n self.age(self.queue[0][0]))\n self.queue.pop(0)\n self.queue = []\n\n if not self.queue:\n self.set_lights(self.config.config('idle', 'lights'))", "def backpropagating(self): \n\n ######################### Configure the sensor inputs given the movement of the agent ######################### \n sensors_result_N = self.agent.sensors(self, direction=3) + self.agent.get_energy_coarsed()+\\\n self.agent.rotate_previousAction(3)+[int(self.agent.get_previous_collision())]\n sensors_result_O = self.agent.sensors(self, direction=2) + self.agent.get_energy_coarsed()+\\\n self.agent.rotate_previousAction(2) + [int(self.agent.get_previous_collision())]\n sensors_result_S = self.agent.sensors(self, direction=1) + self.agent.get_energy_coarsed()+\\\n self.agent.rotate_previousAction(1) + [int(self.agent.get_previous_collision())]\n sensors_result_E = self.agent.sensors(self, direction=0) + self.agent.get_energy_coarsed()+\\\n self.agent.rotate_previousAction(0) + [int(self.agent.get_previous_collision())]\n\n input_nn_N = np.asarray(sensors_result_N).astype(int) # input when the Nord action is performed \n input_nn_O = np.asarray(sensors_result_O).astype(int) # input when the West action is performed\n input_nn_S = np.asarray(sensors_result_S).astype(int) # input when the South action is performed\n input_nn_E = np.asarray(sensors_result_E).astype(int) # input when the West action is performed\n\n l_input = [input_nn_E.reshape(1,145),input_nn_S.reshape(1,145),input_nn_O.reshape(1,145),input_nn_N.reshape(1,145)]\n ######################### Configure the sensor inputs given the movement of the agent #########################\n\n print(\"The reward in baskpropagating is %f\" %(self.agent.reward) ) \n parameters = [self.gamma, self.agent.reward]\n Ui = self.U_list[self.agent.get_previousAction().index(1)]\n\n if not self.end:\n U_list_y = [self.nn.predict(input_nn_E.reshape(1,145)),\\\n self.nn.predict(input_nn_S.reshape(1,145)),\\\n self.nn.predict(input_nn_O.reshape(1,145)),\\\n self.nn.predict(input_nn_N.reshape(1,145))] \n #print(U_list_y)\n maxU = np.max(U_list_y)\n #print(np.max(U_list_y))\n index_input_maxU = np.argmax(U_list_y) # the input given for the backprogating is the one with the maximum utility\n input_target = l_input[index_input_maxU] # The input target with the max utility, add to the tuple given during the experience replay\n uprime = self.agent.reward + self.gamma * maxU # input of the utility with the best value\n \n else:\n uprime = self.agent.reward\n input_target = np.array(None)\n \n action = self.agent.get_previousAction().index(1)\n input_nn = self.input_list[action]\n ##### Add to the lesson the action chose in order to go the next state, \n ##### the next state after to have performed the action, and the reward given\n if(self.action_proba[action] > 0.01): # the Pl minimum to choose the action corresponding to the action policy, cf to the paper part experience replay\n #next_states = [copy.deepcopy(input_nn_E).reshape(1,145), copy.deepcopy(input_nn_S).reshape(1,145), copy.deepcopy(input_nn_O).reshape(1,145), copy.deepcopy(input_nn_N).reshape(1,145)]\n self.memory.append((input_nn,action,np.asarray(copy.deepcopy(l_input)),self.agent.reward)) # We add the experiment to the memory of the agent \n \n ############################\n self.nn.train_one_step_other(input_nn,uprime)\n #self.nn.train(input_nn,tf.convert_to_tensor([[uprime]])) # use the method fit to train the neural network", "def _update_beliefs(self, features, beliefs):\n self.n_steps += 1\n if self.last_allocation is None:\n return beliefs\n for i_bin in range(self._n_bins):\n self.data[i_bin].append((features[i_bin], self.last_allocation[i_bin]))\n if self.params.burn_steps <= self.n_steps and self.n_steps % self.params.interval == 0:\n ll_model = _CensoredPoisson(\n np.array(self.data[i_bin][-self.params.window:]))\n results = ll_model.fit(disp=0)\n beliefs[i_bin] = results.params[0]\n return beliefs", "def run_heuristic_1(self):\n\n # store original upper bounds\n for arc in self.arc_info.keys():\n self.arc_info[arc][\"original_ub\"] =\\\n self.arc_info[arc][\"upper_bound\"]\n\n q = self.create_queue()\n updates = 0\n\n while (not q.empty()):\n # for every edge that has flow 0, set upper bound to 0\n # this ensures that we don't add an edge to remove this one\n for arc in self.arc_info.keys():\n if self.arc_info[arc][\"weight\"] == 0:\n self.arc_info[arc][\"upper_bound\"] = 0\n\n arc_id = q.get()[1]\n # print(\"Trying to adjust flow using arc {}\".format(arc_id))\n # set upper bound of this edge to 0\n self.arc_info[arc_id][\"upper_bound\"] = 0\n flow_found = self.update_flow()\n if flow_found:\n # start = self.arc_info[arc_id][\"start\"]\n # destin = self.arc_info[arc_id][\"destin\"]\n # print(\"Found flow without arc {}, ({},{}).\".format(arc_id,\n # start, destin))\n # create new queue from new flow\n q = self.create_queue()\n updates += 1\n\n # return bounds to original\n for arc in self.arc_info.keys():\n self.arc_info[arc][\"upper_bound\"] =\\\n self.arc_info[arc][\"original_ub\"]\n return(updates)", "def ModelBasedReflexAgentProgram(rules, update_state):\n\n def program(percept):\n program.state = update_state(program.state, program.action, percept)\n rule = rule_match(program.state, rules)\n action = rule.action\n return action\n\n program.state = program.action = None\n return program", "def update(self, state, action, nextState, reward):\n \"*** YOUR CODE HERE ***\"\n Q_Value = self.Q #calling constructor\n\n learning_rate = self.alpha #gives us the learning rate\n\n temporary_QValue = self.getQValue(state,action) #to get the Q value of the state\n\n nextState_QValue = self.getValue(nextState) #to get the Q value of the landing state when taken action a and state s\n\n discount_factor = self.discount #to get the gamma/ discount factor\n\n\n Q_Value[(state,action)] = ((1-learning_rate) * temporary_QValue) + (learning_rate * (reward + discount_factor * nextState_QValue)) #for formula go to README_Reinforcement.txt at line 8\n\n #util.raiseNotDefined()", "def update_goal(self):\n pass", "def get_action(self, state):\n\n \"\"\"\n XXX: DO NOT MODIFY THAT FUNCTION !!!\n Doing so will result in a 0 grade.\n \"\"\"\n\n # XXX : You shouldn't care on what is going on below.\n # Variables are specified in constructor.\n if self.beliefGhostStates is None:\n self.beliefGhostStates = state.getGhostBeliefStates()\n if self.walls is None:\n self.walls = state.getWalls()\n\n # @TODO Put this back to normal\n ret = self.updateAndGetBeliefStates(\n self._computeNoisyPositions(state))\n\n if self.i < 25:\n debug = ret[0]\n self.l.append(np.max(debug))\n self.i += 1\n #if debug == 1: # To Stop as soon as convergence happens\n #self.i = 25\n\n prefix = 'data/' # To indicate path\n\n if self.i == 25:\n\n if os.path.exists(os.path.join(prefix, str(self.w) + \"-\" + str(self.p) + \".txt\")):\n os.remove(os.path.join(prefix, str(self.w) + \"-\" + str(self.p) + \".txt\"))\n\n f = open(os.path.join(prefix, str(self.w) + \"-\" + str(self.p) + \".txt\"), \"a\")\n first = True\n for data in self.l:\n if first:\n first = False\n f.write(str(data))\n else:\n f.write(\",\" + str(data))\n self.i += 1\n f.close()\n print(\"Done\")\n plt.plot(range(1, len(self.l)+1), self.l)\n plt.xlabel('Time step')\n plt.ylabel('Maximum probability')\n plt.title('Bayes Filter')\n plt.axis([0, self.i, 0, 1])\n plt.savefig(os.path.join(prefix, str(self.w) + \"-\" + str(self.p) + \".pdf\"), bbox_inches='tight')\n plt.show()\n\n return ret", "def update(self, state, action, nextState, reward):\n candidateQ = reward + self.discount * \\\n self.computeValueFromQValues(nextState)\n candidateQ_vec = np.array([[candidateQ]])\n features = self.featExtractor.getFeatures(state, action)\n features_as_list = [features[feat] for feat in self.allFeats]\n features_vec = np.array([features_as_list])\n self.train_step.run(feed_dict={self.x: features_vec,\n self.y_: candidateQ_vec}, session=self.sess)", "def cb_update(val):\n alpha_update = [sAlpha0.val, sAlpha1.val, sAlpha2.val]\n\n # update Dirichlet's parameters alpha\n dirichlet.set_param(alpha_update)\n draw_pdf_contours(axDirichlet, dirichlet, True) # Draw Dirichlet\n\n # MAP\n lambda_MAP = CatMAP.MAPinfer(x_cat, dirichlet)\n axMAP.cla()\n drawBarGraph( axMAP, \"MAP\", lambda_MAP, bar_y_max, col_MAP ) # Draw Bar graph\n\n # Bayes\n posteriorDirichlet.set_param(alpha_update)\n posteriorDirichlet.calcPosterior(x_cat)\n draw_pdf_contours(axPosteriorDirichlet, posteriorDirichlet) # Draw Posterior Dirichlet\n lambda_Bayes = np.zeros(3)\n for k in range(3):\n lambda_Bayes[k] = posteriorDirichlet.BayesInfer(k)\n\n axBayes.cla()\n drawBarGraph( axBayes, \"Bayes\", lambda_Bayes, bar_y_max, col_Bayes ) # Draw Bar graph\n\n print('Update')\n print('lambda_ML =', lambda_ML)\n print('lambda_MAP =', lambda_MAP)\n print('lambda_Bayes=', lambda_Bayes)\n draw_point(axDirichlet, lambda_ML, col_ML)\n draw_point(axDirichlet, lambda_MAP, col_MAP)\n draw_point(axDirichlet, lambda_Bayes, col_Bayes)\n draw_point(axPosteriorDirichlet, lambda_MAP, col_MAP)\n draw_point(axPosteriorDirichlet, lambda_Bayes, col_Bayes)\n\n fig.canvas.draw_idle()", "def action_tof(obj: TOF, thread: QtCore.QThread):\n w_actions = []\n # f_meas = obj.is_attribute(\"pd_meas\")\n # f_chi2 = obj.is_attribute(\"chi2\")\n # f_phase = obj.is_attribute(\"phase\")\n\n # l_pd_peak = []\n # if f_phase:\n # phase = obj.phase\n # for item in phase.items:\n # try:\n # pd_peak = getattr(obj, f\"pd_peak_{item.label.lower():}\")\n # l_pd_peak.append(pd_peak)\n # except AttributeError:\n # pass\n\n # f_setup = obj.is_attribute(\"setup\")\n # f_pd_instr_resolution = obj.is_attribute(\"pd_instr_resolution\")\n # f_pd_background = obj.is_attribute(\"pd_background\")\n # f_range = obj.is_attribute(\"range\")\n\n # if not(f_chi2 & f_meas & f_setup & f_pd_instr_resolution & f_phase &\n # f_pd_background & f_range):\n # if not f_chi2:\n # qtb_1 = QtWidgets.QToolButton()\n # qtb_1.setText(\"Add chi2\")\n # qtb_1.clicked.connect(lambda: add_items(obj, [Chi2()], thread))\n # w_actions.append(qtb_1)\n\n return w_actions", "def update(self, state, action, nextState, reward):\n # print \"Update\"\n difference = (reward + self.discount*self.compValFromState(nextState)) - self.getQValue(state, action)\n features = self.featExtractor.getFeatures(state, self.index)\n #print \"features\", features, \"difference\", difference, \"weights\", self.weights\n for key in self.weights:\n self.weights[key] = self.alpha * difference * features[key]", "def update1(self, state, action, nextState, reward):\n util.raiseNotDefined()", "def update(self, args):\n pass", "def _inform_F1_goal(self, goal, sys_history, domains=None):\n if domains is None:\n domains = self.belief_domains\n inform_slot = {}\n for domain in domains:\n inform_slot[domain] = set()\n TP, FP, FN = 0, 0, 0\n for da in sys_history:\n domain, intent, slot, value = da.split('~', 3)\n if intent in ['inform', 'recommend', 'offerbook', 'offerbooked'] and \\\n domain in domains and value.strip() not in NUL_VALUE:\n inform_slot[domain].add(slot)\n for domain in domains:\n for k, v in goal[domain].items():\n if v == '?':\n if k in inform_slot[domain]:\n TP += 1\n else:\n FN += 1\n for k in inform_slot[domain]:\n # exclude slots that are informed by users\n if k not in goal[domain] \\\n and (k in requestable[domain] or k == 'ref'):\n FP += 1\n return TP, FP, FN", "def update(self, new):\n if not new:\n # Empty list, clear the full list.\n self.queue = []\n return\n\n print(\"Age of first element in new cue: \", self.age(new[0][0]))\n while self.queue and self.queue[0][0] > new[0][0]:\n self.queue.pop(0)\n\n self.queue = self.queue + new\n \n while self.queue and self.age(self.queue[0][0]) > 0:\n self.queue.pop(0)\n\n max_age = self.config.config('mqtt', 'max_cue_future_seconds', 0)\n if max_age != 0:\n i = 0\n while i < len(self.queue):\n if self.age(self.queue[i][0]) < -max_age:\n # the element is too far in the future, purge it!\n self.queue.pop(i)\n i -= 1\n i += 1", "def __call__(self):\n\n self.initialise()\n\n # //\n # // ToDo: Add exception wrappers for plugin calls\n #//\n subJobs = TrackerDB.getJobsByState(\"submitted\", self.cooloff)\n self.updateSubmitted(*subJobs.keys())\n runningJobs = TrackerDB.getJobsByState(\"running\")\n self.updateRunning(*runningJobs.keys())\n completeJobs = TrackerDB.getJobsByState(\"complete\")\n self.updateComplete(*completeJobs.keys())\n failedJobs = TrackerDB.getJobsByState(\"failed\")\n self.updateFailed(*failedJobs.keys())\n self.cleanup()\n\n return", "def update(self):\n self.current_events = set()\n\n # Disambiguate deaths\n if self.joe.dead:\n while self.joe.size > 50:\n image, reward, done, info = self.env.step(0)\n self.joe.update(image)\n\n # Detect falls\n if self.joe.dead and self.joe.size > 20:\n self.current_events.add(Event(\"falls\", self.joe))\n\n # Detect collisions\n elif self.joe.dead:\n self.current_events.add(Event(\"collides\", self.joe, self.skull))\n self.skull.dead = True\n\n # Detect key acquisition\n elif self.current_reward == 100:\n self.current_events.add(Event(\"arrives\", self.joe, self.key))\n self.door = Object(\"Door+Key\", self.door.location, self.door.region)\n self.key = None\n\n # Detect success\n elif self.current_reward == 300:\n self.current_events.add(Event(\"arrives\", self.joe, self.door))\n self.door = None\n\n TaskInterface.update(self)", "def _inform_F1_goal(self, goal, sys_history, domains=None):\n if domains is None:\n domains = self.belief_domains\n inform_slot = {}\n for domain in domains:\n inform_slot[domain] = set()\n TP, FP, FN = 0, 0, 0\n for da in sys_history:\n domain, intent, slot, value = da.split('-', 3)\n if intent in ['inform', 'recommend', 'offerbook', 'offerbooked'] and \\\n domain in domains and value.strip() not in NUL_VALUE:\n inform_slot[domain].add(slot)\n for domain in domains:\n for k, v in goal[domain].items():\n if v == '?':\n if k in inform_slot[domain]:\n TP += 1\n else:\n FN += 1\n for k in inform_slot[domain]:\n # exclude slots that are informed by users\n if k not in goal[domain] \\\n and (k in requestable[domain] or k == 'ref'):\n FP += 1\n return TP, FP, FN", "def update(self, old_state, action, new_state, reward):\r\n \r\n old = self.get_q_value(old_state, action)\r\n best_future = self.best_future_reward(new_state)\r\n self.update_q_value(old_state,action, old, reward, best_future)", "def add_points_to_actions_update_queue(self, focus_points: {Point} = None) -> {Point}:\n self.new_points_since_last_actions_update.update(focus_points)\n return self.new_points_since_last_actions_update", "def update(self, state, action, nextState, reward):\n \"*** YOUR CODE HERE ***\"\n self.qValues[(state, action)] = ((1 - self.alpha) * self.getQValue(state, action)) + self.alpha \\\n * (reward + self.discount * self.computeValueFromQValues(nextState))", "def act(self, action):\n action_name = action.op\n args = action.args\n list_action = first(a for a in self.actions if a.name == action_name)\n if list_action is None:\n raise Exception(\"Action '{}' not found\".format(action_name))\n if not list_action.check_precond(self.kb, args):\n raise Exception(\"Action '{}' pre-conditions not satisfied\".format(action))\n list_action(self.kb, args)", "def test_f1_activate_rules(self):\n config.NR_ROWS = 5\n config.NR_COLS = 5\n blinker = [[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 1, 1, 1, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]\n tick_changes = [(1, 2, 4), (2, 1, 1), (2, 3, 1), (3, 2, 4)]\n new_gamefield = logic.activate_rules(blinker, tick_changes)\n\n self.assertEqual(new_gamefield, [\n [0, 0, 0, 0, 0],\n [0, 0, 1, 0, 0],\n [0, 0, 1, 0, 0],\n [0, 0, 1, 0, 0],\n [0, 0, 0, 0, 0],\n ])", "def update_state(self, act):\n\n # check the checkbox logic\n if act in ['follow', 'not_follow', 'locate', 'not_locate']:\n self.check_locate_follow_logic(act)\n # test/record logic\n print(\"update function not implemented\")", "def apply_all(self):\n\n print(\"Are you sure? Enter 'y' if so\")\n\n if input() == 'y':\n\n for job in self.old_jobs:\n if job.is_relevant:\n job.reject('a') # 0 for apply\n self.jobs_save(self.old_jobs, 'overwrite')\n print('All relevant jobs have been marked as applied')\n\n else:\n print('returning to main menu')", "def actions(self, state):\n MovementList = []\n #Check if the agent is able to move a box (Left, Down, Right, Up) \n #without moving it into a taboo cell or pushing two blocks (Invalid move)\n #then move the box in the given direction.\n \n \n moves = [\"Up\", \"Down\", \"Left\", \"Right\"]\n opposite_moves = [\"Down\", \"Up\", \"Right\", \"Left\"]\n worker = state[0]\n boxes = state[1]\n temp_warehouse = self.warehouse.copy(worker, boxes)\n no_go = self.taboo.copy()\n walls = self.walls.copy()\n for wall in walls:\n no_go.append(wall)\n \n accessible = []\n \n for box in boxes:\n for i in range(len(moves)):\n surrounding_space = move_coords(box, opposite_moves[i])\n if can_go_there(temp_warehouse, move_coords(box, opposite_moves[i])):\n accessible.append((surrounding_space, moves[i]))\n \n for space_move in accessible:\n space = space_move[0]\n move = space_move[1]\n box_push_space = move_coords(move_coords(space, move), move)\n if (box_push_space in no_go) or (box_push_space in boxes):\n continue\n else:\n MovementList.append((move_coords(space, move), move)) \n print(\"Movement List: \", MovementList)\n \n if len(accessible) < 0: \n # Iterate throguh the moves and make sure they satify constraints\n for move in moves:\n if (move_coords(worker, move) not in no_go):\n if (move_coords(worker, move) in boxes):\n if move_coords(move_coords(worker, move), move) not in boxes:\n MovementList.append((move_coords(worker, move), move)) \n else:\n MovementList.append((move_coords(worker, move), move))\n \n \n \n \n \n return MovementList", "def applyOperators(self):\n sendList = [self.sendTwoM, self.sendTwoC, self.sendMC, self.sendM, self.sendC]\n bringList = [self.bringTwoM, self.bringTwoC, self.bringMC, self.bringM, self.bringC]\n result = []\n if self.boatLocation() == 1: # now boat is on destination side\n for operation in bringList:\n toAdd = operation()\n if toAdd is not None and toAdd.isValidState():\n result.append(toAdd)\n elif self.boatLocation() == 0: #now boat is on start side\n for operation in sendList:\n toAdd = operation()\n if toAdd is not None and toAdd.isValidState():\n result.append(toAdd)\n else:\n raise Exception\n return result", "def update_from_tuple(self, the_tuple):\n if not the_tuple.rowcount:\n raise Deque.ZeroTupleException(\"Error updating task\")\n\n row = the_tuple[0]\n\n if self.task_id != row[0]:\n raise Deque.BadTupleException(\"Wrong task: id's are not match\")\n\n self.state = row[1]\n self.next_event = row[2]\n self.msg_type = row[3]\n self.obj_type = row[4]\n self.obj_id = row[5]\n self.channel = row[6]\n self._to_send_at = row[7]\n self._valid_until = row[8]\n self._created_at = row[9]\n self.data = row[10]", "def match_rule(name, lhs, rhs, wm):\n print(\" ------------ Matching Rule '\", name, \"' --------------\")\n print(\" lhs = \", lhs)\n print(\" rhs = \", rhs)\n print(\" wm = \", wm)\n print()\n def mr_helper(queue, new_wm):\n # Each state in queue is\n # (anteceds-left, subs)\n # print(\" ----- matching rule helper ------\")\n # print(\" queue = \", queue)\n # print(\" new_wm = \", new_wm)\n # print()\n if queue == []: # if the queue is empty, return new_wm\n return new_wm\n else: # else examine the first item in the queue (call it state1)\n state1 = queue[0]\n if state1[0] == []: # If state1 has no antecedents, state1 is a goal state (the rule is matched);\n # call \"execute\" on rhs using the substitution in state1\n derived = execute(state1[1], rhs, new_wm)\n # But don't stop here (this is exhaustive):\n # return mr_helper applied to the rest of the queue, appending\n # whatever new WM assertions \"execute\" returned.\n new_wm = update_wm(new_wm, derived)\n return mr_helper(queue[1:], new_wm)\n elif state1[0] != []: # Else if state1 has antecedents, apply \"match_antecedent\" to them along with wm and the substitutions in state1.\n matched = match_antecedent(state1[0], wm, state1[1])\n if matched == []: # If \"match_antecedent\" returns no new states, return mr_helper on rest of the queue without changing states.\n return mr_helper(queue[1:], new_wm)\n else:\n # Else return mr_helper on the updated queue,\n # i.e., the old one with the new states found\n # by \"match_antecedent\" replacing state1\n queue = matched + queue[1:]\n return mr_helper(queue, new_wm)\n return mr_helper(match_antecedent(lhs, wm ,[]), [])", "def __call__(self, stack: Sequence[Dep], queue: Sequence[Dep]) -> Action:\n\n features={}\n\n #Extract the features\n features=feature_extraction(stack,queue)\n\n #Make the prediction\n predicted_val= [self.le.classes_[self.log_reg.predict(self.dictvec.transform(features))][0]]\n\n #Choose which action based on predicted value\n if predicted_val == [1]:\n return Action.SHIFT\n elif predicted_val == [2]:\n return Action.LEFT_ARC\n else:\n return Action.RIGHT_ARC", "def Run(self):\n # print \"len of tostartFlows \", len(self.sched.toStartFlows)\n # start all the flows along with updating related flow transfer time\n while self.sched.toStartFlows:\n # the first flow is with earliest startTime\n curStartFlow = self.sched.toStartFlows[0]\n # update flows if there are flows has already finished\n while self.sched.runningFlows:\n # the first flow is with earliest finishTime\n toFinishFlow = self.sched.runningFlows[0]\n if toFinishFlow.finishTime <= curStartFlow.startTime:\n # remove this flow from running flows\n self.sched.runningFlows.remove(toFinishFlow)\n # add this flow to finished flows\n self.sched.finishedFlows.append(toFinishFlow)\n # Update related flow's transfer time in removing a flow\n self.sched.UpdateFlow(toFinishFlow, \"remove\")\n # Resort runningFlows by endTime\n self.sched.runningFlows.sort(key=lambda x: x.finishTime)\n \n if self.Qlearning_enable == 1:\n self.action = toFinishFlow.pathNodeIds\n if len(self.action) == 5:\n self.pre_state = self.state[:]\n self.Update(toFinishFlow)\n self.printQlearningLog()\n #self.Update(self.pre_state, self.action, self.state, self.reward)\n\n else:\n break\n # insert current start flow to running list\n if self.Qlearning_enable == 1:\n self.routing.BuildPath(curStartFlow.startId, curStartFlow.endId, curStartFlow, self.state)\n else:\n self.routing.BuildPath(curStartFlow.startId, curStartFlow.endId, curStartFlow)\n pathNodeIds = self.routing.GetPath(curStartFlow.startId, curStartFlow.endId)\n curStartFlow.BuildPath(pathNodeIds)\n self.sched.runningFlows.append(curStartFlow)\n # Update related flow's transfer time in removing a flow\n # self.lb(curStartFlow)\n # self.topo.GetLinkOfLeastFlow()\n # Step 1 find out which spine is less loaded\n\n # Hedera load balancing for spine leaf\n # print self.topo.GetCoreLeastFlow()\n # if self.topo.name == \"spineleaf\":\n # if self.topo.GetCoreLeastFlow() not in curStartFlow.pathNodeIds:\n # if len(curStartFlow.pathNodeIds) == 5:\n # if curStartFlow.coflowId == 0:\n # self.changeSpine(curStartFlow, self.topo.GetCoreLeastFlow())\n # # print \"general flow reroute to spine {}\".format(self.topo.GetCoreLeastFlow().nodeId)\n # else:\n # self.changeSpine(curStartFlow,\n # self.topo.GetCoreNode((curStartFlow.coflowId % self.topo.numOfCores)+1))\n # print \"coflow reroute to spine {}\".format(self.topo.GetCoreNode((curStartFlow.coflowId % self.topo.numOfCores)+1).nodeId)\n #print curStartFlow.pathNodeIds\n # Less loaded in terms of more flows\n\n # update state and reward for Qlearning algorithm\n self.sched.UpdateFlow(curStartFlow, \"insert\")\n #self.updatenum += 1\n #print \"updatenum= \",self.updatenum\n if self.Qlearning_enable == 1:\n self.action = curStartFlow.pathNodeIds\n if len(self.action) == 5:\n self.pre_state = self.state[:]\n self.Update(curStartFlow)\n self.printQlearningLog()\n #self.Update(self.pre_state, self.action, self.state, self.reward)\n\n # Resort runningFlows by endTime\n self.sched.runningFlows.sort(key=lambda x: x.finishTime)\n # remove this flow from start list\n self.sched.toStartFlows.remove(curStartFlow)\n #print \"finished\"\n #print len(self.sched.toStartFlows)\n\n # Now, all the flows are started\n # Iteratively update flow's transfer time in running list until all the flows are finished\n while self.sched.runningFlows:\n # the first flow is always with earliest finish Time\n curFinishFlow = self.sched.runningFlows[0]\n # remove it from running list\n self.sched.runningFlows.remove(curFinishFlow)\n # insert it to finished flows\n self.sched.finishedFlows.append(curFinishFlow)\n # Update related flow's transfer time in removing a flow\n self.sched.UpdateFlow(curFinishFlow, \"remove\")\n # Resort runningFlows by endTime\n self.sched.runningFlows.sort(key=lambda x: x.finishTime)\n\n # update state and reward for Qlearning algorithm\n if self.Qlearning_enable == 1:\n self.action = curFinishFlow.pathNodeIds\n if len(self.action) == 5:\n self.pre_state = self.state[:]\n self.Update(curFinishFlow)\n self.printQlearningLog()\n #self.Update(self.pre_state, self.action, self.state, self.reward)\n\n\n # Finally, all the flows are finished\n self.sched.PrintFlows()\n if self.Qlearning_enable == 1:\n self.logf.close()\n # print \"final stateId= \", self.stateId", "def setBeliefs(self, position, idx):\n self.beliefs[idx] = util.Counter()\n self.beliefs[idx][position] = 1.0", "def __init__(self, good, elements, action):\n self.good = good\n self.elements = elements\n self.action = action\n self.name = None", "def do_bay_update(cs, args):\n if args.rollback and args.magnum_api_version and \\\n args.magnum_api_version in ('1.0', '1.1', '1.2'):\n raise exceptions.CommandError(\n \"Rollback is not supported in API v%s. \"\n \"Please use API v1.3+.\" % args.magnum_api_version)\n patch = magnum_utils.args_array_to_patch(args.op, args.attributes[0])\n bay = cs.bays.update(args.bay, patch, args.rollback)\n if args.magnum_api_version and args.magnum_api_version == '1.1':\n _show_bay(bay)\n else:\n print(\"Request to update bay %s has been accepted.\" % args.bay)", "def update(self, state, action, nextState, reward):\n \"\"\"Description:\n Use second equation in slide 71 of MDP\n Adjest weight of active features depend on tranistion \n \"\"\"\n \"\"\" YOUR CODE HERE \"\"\"\n feat = self.featExtractor.getFeatures(state, action)\n\n # if weight is empty, then weight will need to initial to 1 for all features\n # According to which Extractor user choose, weight counter will have equal number of keys.\n if len(self.weight) == 0:\n feat = self.featExtractor.getFeatures(state, action)\n self.weight.incrementAll(feat.keys(), 1)\n \n maxQns = self.getValue(nextState)\n if maxQns == None:\n maxQns = 0\n Qsa = self.getQValue(state, action)\n difference = ( reward + self.discountRate * maxQns ) - Qsa\n \n for key in self.weight.keys():\n self.weight[key] += (self.alpha * difference * feat[key])\n \n \n \"\"\" END CODE \"\"\"", "def chooseAction(self, gameState):\n if self.getPreviousObservation() is not None:\n lastCapsule = self.getCapsules(self.getPreviousObservation())\n else:\n lastCapsule = None\n x, y = self.getCurrentObservation().getAgentPosition(self.index)\n self.dangerousFood = [i for i in self.deadWidth.keys() if i in self.getFood(gameState).asList()]\n self.safeFood = [i for i in self.getFood(gameState).asList() if i not in self.dangerousFood]\n # this list stores all the actions can be taken according to different goals\n actions = {}\n\n # this list stores the scores of each goal, which indicates the tendency of choose that goal.\n scores = {}\n # find actions to go to boarder\n boarder_goal, disToBoarder = self.getBoarderGoal(gameState)\n action = self.aStarSearch(gameState, boarder_goal, self.simpleHeuristic)\n actions[\"go_to_boarder\"] = action\n\n # # find the actions to another boarder\n # another_boarder_goal, disToBoarder = self.getAnotherBoarderGoal(gameState, boarder_goal)\n # action = self.aStarSearch(gameState, another_boarder_goal, self.nullHeuristic)\n # actions[\"another_boarder\"] = action\n\n # find actions to return border\n boarder_goal, disToBoarder = self.getBoarderGoal(gameState)\n action = self.aStarSearch(gameState, boarder_goal, self.GeneralHeuristic)\n actions[\"return_boarder\"] = action\n\n # actions to eat capsules\n capsule_goal, disToCapsule = self.getCapsuleGoal(gameState)\n currentCapsule = self.getCapsules(gameState)\n if currentCapsule is None and lastCapsule is not None:\n self.timer = 20\n elif lastCapsule is not None and currentCapsule is not None:\n if len(lastCapsule) - len(currentCapsule) == 1:\n self.timer = 20\n action = self.aStarSearch(gameState, capsule_goal, self.GeneralHeuristic)\n actions[\"capsule\"] = action\n\n # actions to eat safe food\n safe_food_goal, disToSafeFood = self.getSafeFoodGoal(gameState)\n # if (x, y) == (22, 8):\n # print(1)\n capsule_goal, disToCapsule = self.getCapsuleGoal(gameState)\n self.walls.append(capsule_goal)\n action = self.aStarSearch(gameState, safe_food_goal, self.GeneralHeuristic)\n actions[\"safeFood\"] = action\n\n # actions to eat dangerous food\n dangerous_food_goal, disToDangerousFood = self.getDangerousFoodGoal(gameState)\n action = self.aStarSearch(gameState, dangerous_food_goal, self.GeneralHeuristic)\n self.walls.remove(capsule_goal)\n actions[\"dangerousFood\"] = action\n\n # calculate the scores for each action\n\n ghost_goal, ghostDis = self.getGhostGoal(gameState)\n foodNum = self.numOfFood(gameState)\n safeDis = disToSafeFood\n safeDis = self.isZero(safeDis)\n dangerousDis = disToDangerousFood\n dangerousDis = self.isZero(dangerousDis)\n carried = self.getCurrentObservation().getAgentState(self.index).numCarrying\n disToBoarder = self.isZero(disToBoarder)\n\n # choose actions\n if not self.isRed:\n x = self.Width - x\n\n if x < self.midWidth - 1:\n if self.blocked:\n action = actions[\"another_boarder\"]\n if self.previousActions.full():\n self.previousActions.get()\n self.previousActions.put(action)\n return action\n else:\n if foodNum < 3:\n return actions[\"return_boarder\"]\n else:\n if safe_food_goal is not None:\n scores[\"return_boarder\"] = carried / math.sqrt(disToBoarder)\n scores[\"safeFood\"] = 100 / ((carried + 1) * safeDis)\n if disToCapsule == None:\n scores[\"capsule\"] = 0\n else:\n capsuleDis = self.isZero(disToCapsule)\n if self.timer > 0:\n self.timer -= 1\n scores[\"capsule\"] = 0\n else:\n scores[\"capsule\"] = 30 / capsuleDis\n max = -99999\n key = 0\n for k, v in scores.items():\n if v != 0:\n if v > max:\n max = v\n key = k\n return actions[key]\n else:\n scores[\"return_boarder\"] = carried / math.sqrt(disToBoarder)\n\n if disToCapsule == None:\n scores[\"capsule\"] = 0\n else:\n capsuleDis = self.isZero(disToCapsule)\n if self.timer > 0:\n self.timer -= 1\n scores[\"capsule\"] = 0\n\n else:\n scores[\"capsule\"] = 500 / capsuleDis\n if dangerousDis is not None:\n if ghostDis is not None:\n scaredTimer = self.opponentscaredTime(gameState)\n if scaredTimer is None:\n if ghostDis > 5:\n ghostDis = 5\n if dangerousDis + self.deadWidth[dangerous_food_goal] + 1 < ghostDis:\n scores[\"dangerousFood\"] = 30 / ((carried + 1) * dangerousDis)\n else:\n scores[\"dangerousFood\"] = 3 / ((carried + 1) * dangerousDis)\n else:\n if dangerousDis + self.deadWidth[dangerous_food_goal] + 1 < scaredTimer:\n scores[\"dangerousFood\"] = 30 / ((carried + 1) * dangerousDis)\n else:\n scores[\"dangerousFood\"] = 8 / ((carried + 1) * dangerousDis)\n else:\n scores[\"dangerousFood\"] = 20 / ((carried + 1) * dangerousDis)\n max = -99999\n key = 0\n for k, v in scores.items():\n if v != 0:\n if v > max:\n max = v\n key = k\n return actions[key]\n else:\n if foodNum < 3:\n return actions[\"return_boarder\"]\n else:\n if safe_food_goal is not None:\n scores[\"return_boarder\"] = carried / math.sqrt(disToBoarder)\n scores[\"safeFood\"] = 30 / ((carried + 1) * safeDis)\n if disToCapsule == None:\n scores[\"capsule\"] = 0\n else:\n capsuleDis = self.isZero(disToCapsule)\n if self.timer > 0:\n self.timer -= 1\n scores[\"capsule\"] = 0\n scores[\"return_boarder\"] = carried / disToBoarder\n else:\n scores[\"capsule\"] = 10 / capsuleDis\n max = -99999\n key = 0\n for k, v in scores.items():\n if v != 0:\n if v > max:\n max = v\n key = k\n return actions[key]\n else:\n scores[\"return_boarder\"] = carried / math.sqrt(disToBoarder)\n if disToCapsule == None:\n scores[\"capsule\"] = 0\n else:\n capsuleDis = self.isZero(disToCapsule)\n if self.timer > 0:\n self.timer -= 1\n scores[\"capsule\"] = 0\n scores[\"return_boarder\"] = carried / disToBoarder\n else:\n scores[\"capsule\"] = 500 / capsuleDis\n if dangerousDis is not None:\n if ghostDis is not None:\n scaredTimer = self.opponentscaredTime(gameState)\n if scaredTimer is None:\n if ghostDis > 5:\n ghostDis = 5\n if dangerousDis + self.deadWidth[dangerous_food_goal] + 1 < ghostDis:\n scores[\"dangerousFood\"] = 30 / ((carried + 1) * dangerousDis)\n else:\n scores[\"dangerousFood\"] = 3 / ((carried + 1) * dangerousDis)\n else:\n if dangerousDis + self.deadWidth[dangerous_food_goal] + 1 < scaredTimer:\n scores[\"dangerousFood\"] = 30 / ((carried + 1) * dangerousDis)\n else:\n scores[\"dangerousFood\"] = 8 / ((carried + 1) * dangerousDis)\n else:\n scores[\"dangerousFood\"] = 20 / ((carried + 1) * dangerousDis)\n max = -99999\n key = 0\n for k, v in scores.items():\n if v != 0:\n if v > max:\n max = v\n key = k\n return actions[key]", "def updateBolts(self,input):\r\n check = False\r\n i = 0\r\n while i < len(self._bolts):\r\n if self._bolts[i].bottom >= GAME_HEIGHT or self._bolts[i].top <= 0:\r\n del self._bolts[i]\r\n else:\r\n i += 1\r\n self._aliensBolt()\r\n for bolt in self._bolts:\r\n bolt.y += bolt.getVelocity()\r\n check = (check or bolt.isPlayerBolt())\r\n self._shipBolt(input,check)", "def update(self, obs, actions, rewards, new_obs):\n pass", "def perTickActions(self, timeNow):\n if self.newRegList:\n facAddrL = [tpl[1] for tpl in self.patch.serviceLookup('RegistryUpdateQueue')\n if not self.patch.group.isLocal(tpl[1])]\n payload = self.newRegList[:]\n for fA in facAddrL:\n msg = RegistryGroupUpdateMsg(self.name + '_groupUpdateMsg',\n self.patch, payload, fA, debug=True)\n self.patch.launch(msg, timeNow)\n self.newRegList = []", "def update(self, state, action, nextState, reward):\n \"*** YOUR CODE HERE ***\"\n diff = reward + self.discount * self.computeValueFromQValues(nextState) - self.getQValue(state, action)\n for feature_name, feature_value in self.featExtractor.getFeatures(state, action).iteritems():\n self.weights[feature_name] += self.alpha * diff * feature_value", "def update(self, state, action, nextState, reward):\n \"*** YOUR CODE HERE ***\"\n diff = reward + self.discount * self.computeValueFromQValues(nextState) - self.getQValue(state, action)\n for feature_name, feature_value in self.featExtractor.getFeatures(state, action).iteritems():\n self.weights[feature_name] += self.alpha * diff * feature_value", "def update(self, state, action, nextState, reward):\n \"*** YOUR CODE HERE ***\"\n # print \"update\"\n oldValue = self.getQValue(state, action)\n sample = reward + self.discount*self.computeValueFromQValues(nextState)\n self.qValues[(state, action)] = (1-self.alpha)*oldValue + self.alpha*(sample)", "def __init__(self, factions, items):\n self.factions = factions\n self.items = items", "def computeActionFromValues(self, state):\n\n ##util.raiseNotDefined()\n #\"*** YOUR CODE STARTS HERE ***\"\n \n # Code to remove --- from here\n resultingAction = None\n if self.mdp.isTerminal(state):\n return resultingAction\n else:\n bestq = float(\"-inf\")\n actions = self.mdp.getPossibleActions(state)\n for action in actions:\n qvalue = self.computeQValueFromValues(state, action)\n if qvalue > bestq:\n bestq = qvalue\n resultingAction = action\n return resultingAction\n\n # Code to remove --- to here\n #\"*** YOUR CODE FINISHES HERE ***\"", "def update(self, state, action, nextState, reward):\n \"*** YOUR CODE HERE ***\"\n\n position = state.getPacmanPosition()\n\n temporal_difference = reward + self.discount * self.computeValueFromQValues(nextState) - self.getQValue(state, action)\n\n val = self.getQValue(state, action) + self.alpha * temporal_difference\n\n self.values[(str(position), action)] = val\n\n with open(\"dataFakeGoal.txt\", \"a+\") as f:\n f.write(str(position) + \"|\" + action + \"|\" + str(val) + \"\\n\")", "def update(self):\n self.wall_list.update()\n self.enemy_list.update()\n self.sludge.update()\n self.consumeable.update()\n self.can_climb.update()", "def updateWorkStatus(self, args, pooltable = 'tp_threadpool'):\n\n # differentiate between onequeu and multi queue\n if pooltable in ['tp_threadpool', 'tp_threadpool_buffer_in', \\\n 'tp_threadpool_buffer_out']:\n sqlStr = \"\"\"\nUPDATE %s SET state='queued' WHERE component = :componentName\nAND thread_pool_id = :thread_pool_id\n \"\"\" % (pooltable)\n self.execute(sqlStr, args)\n else:\n sqlStr = \"\"\"\nUPDATE %s SET state=\"queued\"\n \"\"\" % (pooltable)\n self.execute(sqlStr, {})\n\n return", "def solve(self):\n\n self.queue.add(*self.moved.items)\n self.solving = True\n self.moved.items = []", "def do_action(actions, state):\n seed(time.time())\n uid = state[\"uid\"]\n is_banker = state[\"banker\"]\n hole_cards = state[\"pocket\"]\n board_cards = state[\"community\"]\n player_bet_to = state[\"player_bet_to\"]\n oppo_bet_to = state[\"oppo_bet_to\"]\n num_players = state[\"players\"]\n pot_size = player_bet_to + oppo_bet_to\n allowed_actions = [k for k in actions if actions[k] is not False]\n logging.info(\n \"req: uid={}, banker={}, pocket={}, community={}, pot={}, player={}, oppo={}, allowed actions={}\"\n .format(\n uid,\n is_banker,\n hole_cards,\n board_cards,\n pot_size,\n player_bet_to,\n oppo_bet_to,\n allowed_actions\n )\n )\n\n if len(board_cards) < 3:\n # odds = min(EPS + pre_flop_strength(hole_cards) * 0.25, 1 - EPS)\n odds = PRE_FLOP_ODDS[pre_flop_strength(hole_cards)]\n else:\n odds = calculate_odds(hole_cards, board_cards, num_players)\n\n evs = []\n # check, call, raise, allin\n bet_actions = []\n bet_action_sizes = []\n bet_ev = odds * pot_size\n\n # ------------------------------\n # expected value for each action\n # ------------------------------\n def expected_value(odds, bet_size):\n return bet_ev - (1 - odds) * bet_size\n\n # fold\n # if actions[\"fold\"]:\n # ev = expected_value(odds, -actions[\"call\"])\n # if ev >= EV_THRESHOLD:\n # bet_actions.append(FOLD)\n # bet_action_sizes.append(0)\n # evs.append(ev)\n # check\n if actions[\"check\"]:\n bet_actions.append(CHECK)\n bet_action_sizes.append(0)\n evs.append(expected_value(odds, 0))\n # call\n if not isinstance(actions[\"call\"], bool):\n ev = expected_value(odds, actions[\"call\"])\n if ev >= EV_THRESHOLD:\n bet_actions.append(CALL)\n bet_action_sizes.append(actions[\"call\"])\n evs.append(ev)\n # raise\n if not isinstance(actions[\"raise\"], bool):\n # bet to allin\n for bet_size in range(*actions[\"raise\"]):\n ev = expected_value(odds, bet_size)\n if ev >= EV_THRESHOLD:\n bet_actions.append(RAISE)\n bet_action_sizes.append(bet_size)\n evs.append(ev)\n # print(\n # \"odds: %f, pot: %d, bet_size: %d, ev: %f\"\n # % (odds, pot_size, bet_size, ev)\n # )\n # allin\n if not isinstance(actions[\"allin\"], bool):\n # allin\n ev = expected_value(odds, actions[\"allin\"])\n if ev >= EV_THRESHOLD:\n bet_actions.append(ALLIN)\n bet_action_sizes.append(actions[\"allin\"])\n evs.append(ev)\n # ----------------\n # sample an action\n # ----------------\n\n def bet_or_fold(odds, bet_size):\n bet_prob = bet_ev / (bet_ev + (1 - odds) * bet_size)\n return (bet_prob, 1 - bet_prob)\n if evs:\n # 1. a bet action\n # normalization\n evs = [v + EPS for v in evs]\n sum_evs = sum(evs)\n probs = [v / sum_evs for v in evs]\n idx = sample_action(probs)\n action = bet_actions[idx]\n action_size = bet_action_sizes[idx]\n # 2. bet or fold\n # fold\n if actions[\"fold\"]:\n bet_or_fold_prob = bet_or_fold(odds, action_size)\n idx = sample_action(bet_or_fold_prob)\n if idx == 1:\n action = FOLD\n action_size = 0\n else:\n action = FOLD\n action_size = 0\n logging.info(\n \"rsp: uid={}, action={}, bet_size={}, odds={}\"\n .format(\n uid, action, action_size, odds\n )\n )\n return action, action_size", "def apply(self, context):\n\n state = _get_state_data(self._battleship_addr, context)\n LOGGER.debug('Applying changes to state\\nCURRENT STATE:\\n%s', state)\n\n self.check_valid(state)\n\n if self._action == 'CREATE':\n state[self._name] = {'State': 'NEW', 'Ships': self._ships}\n elif self._action == 'JOIN':\n game = state[self._name].copy()\n\n # If this is the first JOIN, set HashedBoard1 and Player1 in the\n # store. if this is the second JOIN, set HashedBoard2 and\n # Player2 in the store. Also, initialize TargetBoard1 and\n # TargetBoard2 as empty.\n if 'Player1' not in game:\n game['HashedBoard1'] = self._board\n size = len(self._board)\n game['TargetBoard1'] = [['?'] * size for _ in range(size)]\n game['Player1'] = self._signer_public_key\n else:\n game['HashedBoard2'] = self._board\n size = len(self._board)\n game['TargetBoard2'] = [['?'] * size for _ in range(size)]\n game['Player2'] = self._signer_public_key\n\n # Move to 'P1-NEXT' as both boards have been entered.\n game[\"State\"] = 'P1-NEXT'\n\n state[self._name] = game\n elif self._action == 'FIRE':\n game = state[self._name].copy()\n\n # Reveal the previously targeted space\n if 'LastFireColumn' in game:\n if game['State'] == 'P1-NEXT':\n target_board = 'TargetBoard2'\n else:\n target_board = 'TargetBoard1'\n\n col = ord(game['LastFireColumn']) - ord('A')\n row = int(game['LastFireRow']) - 1\n if self._reveal_space != '-':\n game[target_board][row][col] = 'H'\n else:\n game[target_board][row][col] = 'M'\n\n # calculate number of hits for later determination\n # of win\n number_of_hits = sum(sum([1 if space == 'H' else 0\n for space in row])\n for row in game[target_board])\n else:\n number_of_hits = None\n\n # Update LastFireColumn and LastFireRow in the store so\n # they can be used in the next transaction.\n game['LastFireColumn'] = self._column\n game['LastFireRow'] = self._row\n\n # if the game has been won, change the State\n # to P1-WIN or P2-WIN as appropriate\n total_ship_spaces = sum([len(ship) for ship in game['Ships']])\n if number_of_hits is not None and \\\n total_ship_spaces == number_of_hits:\n if target_board == 'TargetBoard2':\n game['State'] = 'P2-WIN'\n else:\n game['State'] = 'P1-WIN'\n\n if game['State'] == 'P1-NEXT':\n game['State'] = 'P2-NEXT'\n elif game['State'] == 'P2-NEXT':\n game['State'] = 'P1-NEXT'\n\n state[self._name] = game\n else:\n raise InvalidTransaction(\n \"invalid state: {}\".format(state[self._name].copy))\n\n _store_state_data(self._battleship_addr, state, context)", "def update(self, arm, context, reward):\n raise NotImplementedError", "def update(self, arm, context, reward):\n raise NotImplementedError", "def update(self, state, action, nextState, reward):\n \"*** YOUR CODE HERE ***\"\n\n position = state.getPacmanPosition()\n\n temporal_difference = reward + self.discount * self.computeValueFromQValues(nextState) - self.getQValue(state, action)\n\n val = self.getQValue(state, action) + self.alpha * temporal_difference\n\n self.values[(str(position), action)] = val\n\n with open(\"dataFakeGoal1.txt\", \"a+\") as f:\n f.write(str(position) + \"|\" + action + \"|\" + str(val) + \"\\n\")", "def backpropegate(self, trace, reward):\n for i in range(len(trace)-1, 0, -1):\n action = trace[i][1]\n state = trace[i-1][0]\n self.node_pool[state].backprop(action, reward)", "def get_action(self):\n # Actual q and dq\n contacts = np.array(\n [(leg_state in (gait_generator_lib.LegState.STANCE,\n gait_generator_lib.LegState.EARLY_CONTACT))\n for leg_state in self._gait_generator.desired_leg_state],\n dtype=np.float64)\n foot_positions = self._robot.GetFootPositionsInBaseFrame()\n\n robot_com_height = self._estimate_robot_height(contacts, foot_positions)\n robot_com_velocity = self._state_estimator.com_velocity_body_frame\n robot_com_roll_pitch_yaw = np.array(self._robot.GetBaseRollPitchYaw())\n robot_com_roll_pitch_yaw[2] = 0. # To prevent yaw drifting\n robot_com_roll_pitch_yaw_rate = self._robot.GetBaseRollPitchYawRate()\n robot_q = np.hstack(([0., 0., robot_com_height], robot_com_roll_pitch_yaw))\n robot_dq = np.hstack((robot_com_velocity, robot_com_roll_pitch_yaw_rate))\n # Desired q and dq\n desired_com_position = np.array((0., 0., self._desired_body_height),\n dtype=np.float64)\n desired_com_velocity = np.array(\n (self.desired_speed[0], self.desired_speed[1], 0.), dtype=np.float64)\n desired_com_roll_pitch_yaw = np.array((0., 0., 0.), dtype=np.float64)\n desired_com_angular_velocity = np.array(\n (0., 0., self.desired_twisting_speed), dtype=np.float64)\n desired_q = np.hstack((desired_com_position, desired_com_roll_pitch_yaw))\n desired_dq = np.hstack(\n (desired_com_velocity, desired_com_angular_velocity))\n # Desired ddq\n desired_ddq = KP * (desired_q - robot_q) + KD * (desired_dq - robot_dq)\n desired_ddq = np.clip(desired_ddq, MIN_DDQ, MAX_DDQ)\n contact_forces = self._qp_torque_optimizer.compute_contact_force(\n foot_positions, desired_ddq, contacts=contacts)\n\n action = {}\n for leg_id, force in enumerate(contact_forces):\n # While \"Lose Contact\" is useful in simulation, in real environment it's\n # susceptible to sensor noise. Disabling for now.\n # if self._gait_generator.leg_state[\n # leg_id] == gait_generator_lib.LegState.LOSE_CONTACT:\n # force = (0, 0, 0)\n motor_torques = self._robot.MapContactForceToJointTorques(leg_id, force)\n for joint_id, torque in motor_torques.items():\n action[joint_id] = (0, 0, 0, 0, torque)\n return action, contact_forces", "def update(self, *args, **kwargs):", "def forward(self, screen, minimap, flat, available_actions):\n # push each input through the network\n screen = self.screen_features(screen)\n minimap = self.minimap_features(minimap)\n flat = self.flat_features(flat)\n\n flattened_screen = screen.view(1, -1)\n flattened_mm = minimap.view(1, -1)\n\n latent_vector = torch.cat([flat, flattened_screen, flattened_mm], 1)\n features = self.combined_features(latent_vector)\n\n value = self.value_predictor(features)\n action = self.policy_action(features)\n\n policy_args = dict()\n for arg in actions.TYPES:\n for dim, size in enumerate(arg.sizes):\n module_name = self.get_argument_module_name(arg, dim)\n operator = getattr(self, module_name)\n policy_args[module_name] = operator(features)\n\n return action, policy_args, value", "def test_updatePlayerbHist_singleaction(self):\n self.assertEqual(self.player.bHist[1], [cardutils.BETSTRING_DICT['CALL']])", "def test_updatePlayerbHist_multiactions_2(self):\n self.assertEqual(self.player.bHist[0][1], cardutils.BETSTRING_DICT['CALL'])", "def update(self):\n #self._switch.odlclient._request_json(self._path, method=\"put\", json={\n # \"flow\": self._odl_inventory()\n #})\n self.remove() # actually, remove only uses self.switch and self.id, so this removes the other entry as well.\n self.deploy()", "def getAction(self, observation):\n \n beliefs = []\n noisyRangeMeasurements, prevAction, gameState = observation\n if self.observeEnable:\n self.inferenceModule.observe(prevAction, noisyRangeMeasurements)\n beliefs.append(self.inferenceModule.getWallBeliefDistribution())\n beliefs.append(self.inferenceModule.getPositionBeliefDistribution())\n self.display.updateDistributions(beliefs)\n return self.chooseAction(gameState)", "def qUpdate(self,state,action,reward,next_state):\r\n #get delta\r\n \r\n #delta = reward + self.gamma * self.Q(next_state,next_action) \\\r\n # - self.Q(state,action)\r\n \r\n #get e update\r\n #self.e = self.gamma *self.lam * self.e - self.grad(state,action)\r\n \r\n \r\n #do update to w\r\n \r\n #self.w = self.alpha * delta * self.e\r\n #get difference between current q and new q\r\n \r\n delta = reward + self.gamma * self.maxQ(next_state)[0] - \\\r\n self.Q(state,action) \r\n #update w\r\n self.w = self.w + self.alpha * delta * self.grad(state,action)" ]
[ "0.62839353", "0.5601004", "0.54203224", "0.523504", "0.523504", "0.523504", "0.523504", "0.52241856", "0.51511395", "0.5150723", "0.5111748", "0.50751257", "0.5067793", "0.50459915", "0.5037544", "0.50247914", "0.4989329", "0.4953486", "0.49498907", "0.49385783", "0.49159616", "0.48950887", "0.48764834", "0.48609462", "0.48405632", "0.48395693", "0.48154777", "0.48101878", "0.48097143", "0.48087364", "0.48033226", "0.47870412", "0.4776434", "0.4756237", "0.47515598", "0.47448295", "0.47307277", "0.4727935", "0.472038", "0.47144988", "0.4698528", "0.46776938", "0.46680045", "0.46629268", "0.4661188", "0.46530014", "0.4650523", "0.46457264", "0.4643604", "0.46368375", "0.46274233", "0.4620578", "0.46120843", "0.45973197", "0.45939007", "0.45909452", "0.45905048", "0.45860282", "0.45812958", "0.456555", "0.45631075", "0.45625898", "0.45609242", "0.4550656", "0.45437258", "0.4542965", "0.45425949", "0.45365798", "0.45345408", "0.45334017", "0.45299077", "0.45294243", "0.45292008", "0.45189095", "0.45073646", "0.45069972", "0.45035726", "0.45033735", "0.45033735", "0.45024854", "0.45005283", "0.4498269", "0.44965446", "0.44945866", "0.44903135", "0.448986", "0.44893357", "0.4488735", "0.4480765", "0.4480765", "0.44795114", "0.44760334", "0.44756752", "0.44680774", "0.4465172", "0.4464293", "0.44641575", "0.4457788", "0.44546714", "0.44532117" ]
0.60782725
1
Calls the belief module's emit_belief method to get and return a Beliefs object with the agents chosen belief for emission.
def emit_belief(self, args): goal, belief = args return [{belief: self.belief_module.emit_belief()}]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process_belief(self, args):\n goal, belief = args\n\n if isinstance(belief, Beliefs):\n self.belief_module.process_belief(belief)\n self.initialize_action_queue()\n\n return [{}]", "def calculateBeliefs(self):\n\n belief = {}\n\n for question in self.getQuestions():\n q = str(question.id)\n belief[q] = self.HELPER_init_belief()\n\n #print belief[q]\n for answer in self.getQuestionCompletedAnswers(question):\n #print q\n #print str(answer.question.id)\n assert str(answer.question.id) == q\n w_skill = answer.worker.inference_results['EM']['skill']\n # answer.value must be \"0\" or \"1\"\n assert answer.value == \"0\" or answer.value == \"1\"\n #print answer.value, w_skill\n belief[q] = self.HELPER_update_belief(belief[q], answer.value, w_skill)\n #print belief[q]\n\n #print \"Question beliefs:\", belief\n #print \"##################\"\n return belief", "def getBeliefDistribution(self):\n pass", "def update_beliefs(self, corpus_id):\n logger.info('Updating beliefs for corpus \"%s\"' % corpus_id)\n # TODO check which options are appropriate for get_corpus\n corpus = self.get_corpus(corpus_id)\n be = BeliefEngine(self.scorer)\n stmts = list(corpus.statements.values())\n be.set_prior_probs(stmts)\n # Here we set beliefs based on actual curation\n for uuid, correct in corpus.curations.items():\n stmt = corpus.statements.get(uuid)\n if stmt is None:\n logger.warning('%s is not in the corpus.' % uuid)\n continue\n stmt.belief = correct\n belief_dict = {st.uuid: st.belief for st in stmts}\n return belief_dict", "def belief_revision(self):\n\n # Store the coherence of the belief_network before the belief revision has taken place\n network_history = self.belief_network.copy()\n self.coherence_history = self.coherence(network_history)\n\n # Add the newly communicated nodes to the belief_network\n if self.communicated_nodes is not None:\n for node in self.communicated_nodes:\n self.belief_network.nodes[node[0]]['truth_value'] = node[1]\n self.belief_network.nodes[node[0]]['type'] = 'com'\n\n # Get the inferred nodes and its combinations of truth values in order to explore different coherence values\n inferred_nodes = [x for x, y in self.belief_network.nodes(data=True) if y['type'] == 'inf']\n combinations = list(itertools.product([True, False], repeat=len(inferred_nodes)))\n\n # Calculate the coherence for all possible combinations\n\n # Initialise a list to store the different coherence values in\n coherence_values = []\n\n for n in range(len(combinations)):\n # Initialise a count for the number of inferred nodes\n i = 0\n for inferred_node in inferred_nodes:\n self.belief_network.nodes[inferred_node]['truth_value'] = combinations[n][i]\n i += 1\n coherence_values.append(self.coherence(self.belief_network))\n\n # Store all the indices of the maximum coherence values in a list and pick one randomly\n max_coherence = max(coherence_values)\n max_indices = [i for i in range(len(coherence_values)) if coherence_values[i] == max_coherence]\n nodes_truth_values_index = random.choice(max_indices)\n\n # Set the truth values of the inferred nodes to (one of) the maximum coherence option(s)\n i = 0\n for inferred_node in inferred_nodes:\n self.belief_network.nodes[inferred_node]['truth_value'] = combinations[nodes_truth_values_index][i]\n i += 1\n\n # If at least one node is flipped, belief revision has taken place and the coherence should be compared\n # with the previous belief_network before belief revision (trouble_identification)\n # print(\"Network after belief revision:\\n\", self.belief_network.nodes(data=True))\n # print(\"Network before belief revision:\\n\", network_history.nodes(data=True))\n if not nx.is_isomorphic(self.belief_network, network_history, node_match=lambda x, y: x['truth_value'] ==\n y['truth_value']):\n # print(\"Trouble identification\")\n repair_initiation = self.trouble_identification()\n else:\n # print(\"No trouble identification\")\n repair_initiation = False\n\n return repair_initiation, self.belief_network", "def getAction(self, observation):\n \n beliefs = []\n noisyRangeMeasurements, prevAction, gameState = observation\n if self.observeEnable:\n self.inferenceModule.observe(prevAction, noisyRangeMeasurements)\n beliefs.append(self.inferenceModule.getWallBeliefDistribution())\n beliefs.append(self.inferenceModule.getPositionBeliefDistribution())\n self.display.updateDistributions(beliefs)\n return self.chooseAction(gameState)", "def belief(self, element):\n return self.bel(element)", "def _init_belief(self):\n belief = []\n for i in range(self._num_goals):\n belief.append(1.0 / self._num_goals)\n self._last_belief_over_history = np.copy(belief)\n return belief", "def _compute_belief(self):\n # Compute current dt\n current_time = time.time()\n\n\n\n\n\n # Get the current human position\n try:\n (current_human_pos, rotation) = self._tf_listener.lookupTransform(self._darias_frame, self._human_frame,\n rospy.Time(0))\n current_human_pos = np.asarray(current_human_pos)\n\n except (tf.ExtrapolationException, tf.ConnectivityException, tf.LookupException):\n return\n\n self._compute_belief_from_pose_and_time(current_human_pos, current_time)", "def get_belief_scores(self):\n return self._belief_scores.copy()", "def belief_conflict(self, args):\n goal, belief = args\n if isinstance(belief, Beliefs):\n if self.belief_module.is_conflicting_belief(belief):\n return [{}]\n\n return []", "def updateAndGetBeliefStates(self, evidences):\n # XXX: Your code here\n\n # if self.iter < 0:\n # np.save('Entropy{}_{}'.format(self.w, self.p), self.entropy)\n # sys.exit()\n #\n # self.iter = self.iter - 1\n\n if (self.m or self.n) is None:\n self.m = self.walls.height\n self.n = self.walls.width\n\n if not self.board:\n for x in np.arange(self.n):\n for y in np.arange(self.m):\n self.board.append((x, y))\n\n if self.transitionMatrix is None:\n self.transitionMatrix = self.createTransitionMatrix()\n\n if self.sensorMatrix is None:\n self.sensorMatrix = self.createSensorModel()\n\n beliefStates = self.beliefGhostStates\n\n # self.entropy.append(self.entropyF(beliefStates))\n\n for i, e in enumerate(evidences):\n \"\"\"\n To manage multiple ghosts.\n \"\"\"\n col_beliefStates = np.reshape(beliefStates[i, :, :], (-1, 1))\n\n index = self.board.index(e)\n O_col = self.sensorMatrix[:, index]\n\n O = np.diag(O_col)\n \"\"\"\n O = Observation matrix.\n \"\"\"\n\n col_bel = np.dot(O, self.transitionMatrix)\n col_beliefStates = np.dot(col_bel, col_beliefStates)\n\n alpha = 1/(np.sum(col_beliefStates))\n col_beliefStates = alpha*col_beliefStates\n\n beliefState = col_beliefStates.reshape((self.n, self.m))\n beliefStates[i, :, :] = beliefState\n\n # XXX: End of your code\n self.beliefGhostStates = beliefStates\n return beliefStates", "def __init__(self, sn, beliefs):\n assert all([type(x) is Belief for x in beliefs])\n self._sn = sn\n self._beliefs = beliefs", "def generate_new_state(self):\n # If simple Beam, return itself.\n # Variable beams should return simple one.\n n_samples = 100000\n samples = np.random.normal(self.photon_energy, self.sigma, self.n_spikes*n_samples)\n\n gkde = stats.gaussian_kde(samples)\n\n gkde.set_bandwidth(bw_method=0.25)\n\n xs = np.linspace(self.photon_energy-self.sigma*5, self.photon_energy+self.sigma*5, self.n_spikes+1)\n\n density, bins, patches = plt.hist(samples, bins=xs, histtype=u'step', density=True)\n\n ind = np.where(density == np.amax(density))\n density[ind[0][0]] *= 1.5\n density_renorm = density / density.sum()\n\n photon_energy = np.linspace(self.photon_energy-self.sigma*5, self.photon_energy+self.sigma*5, self.n_spikes+1).tolist()\n fluences = (self.get_photons_per_pulse()*density_renorm/density_renorm.sum())\n\n return [\n Beam(\n photon_energy=photon_energy[i],\n focus_x=self._focus_xFWHM,\n focus_y=self._focus_yFWHM,\n focus_shape=self._focus_shape,\n fluence=fluences[i])\n for i in range(self.n_spikes)\n ]", "def updateAndGetBeliefStates(self, evidences):\n\n beliefStates = self.beliefGhostStates\n # XXX: Your code here\n width = self.walls.width\n height = self.walls.height\n w = self.w\n p = self.p\n pastBeliefStates = self.beliefGhostStates\n\n\n beliefStates = list()\n for i in range(len(evidences)):\n prob = np.zeros((width, height))\n pastProb = pastBeliefStates[i]\n evidence = evidences[i]\n for x in range(evidence[0] - w, evidence[0] + w + 1):\n for y in range(evidence[1] - w, evidence[1] + w + 1):\n if x in range(width) and y in range(height):\n prob[x][y] = 1\n\n for x in range(width):\n for y in range(height):\n if prob[x][y] != 0:\n prob[x][y] *= self.forwarding(x, y, p, pastProb)\n\n alpha = 1/np.sum(prob)\n # Normalization of the probability of the evidence\n for x in range(width):\n for y in range(height):\n if prob[x][y] != 0:\n prob[x][y] *= alpha\n beliefStates.append(prob)\n\n # XXX: End of your code\n self.beliefGhostStates = beliefStates\n return beliefStates", "def beam(self) -> Beam:\n\n return self._beam", "def HELPER_init_belief(self):\n return util.initBelief(self.num_answer_choices, self.num_difficulty_bins)", "def test_edge_features(self):\n k = [4, 4, 4, 4, 4]\n mn = self.create_chain_model(k)\n\n d = 3\n\n for i in range(5):\n mn.set_edge_features((i, i+1), np.random.randn(d))\n\n mn.create_matrices()\n mn.set_unary_weight_matrix(np.random.randn(4, 4))\n mn.set_edge_weight_matrix(np.random.randn(d, 16))\n\n bp = MatrixBeliefPropagator(mn)\n\n bp.infer()\n bp.load_beliefs()\n\n unconditional_marginals = bp.var_beliefs[4]\n\n bp.condition(0, 2)\n bp.infer()\n bp.load_beliefs()\n\n conditional_marginals = bp.var_beliefs[4]\n\n assert not np.allclose(unconditional_marginals, conditional_marginals), \\\n \"Conditioning on variable 0 did not change marginal of variable 4\"\n\n mn.set_edge_features((2, 3), np.zeros(d))\n mn.create_matrices()\n mn.set_unary_weight_matrix(np.random.randn(4, 4))\n mn.set_edge_weight_matrix(np.random.randn(d, 16))\n\n bp.infer()\n bp.load_beliefs()\n\n unconditional_marginals = bp.var_beliefs[4]\n\n bp.condition(0, 2)\n bp.infer()\n bp.load_beliefs()\n\n conditional_marginals = bp.var_beliefs[4]\n\n assert np.allclose(unconditional_marginals, conditional_marginals), \\\n \"Conditioning on var 0 changed marginal of var 4, when the features should have made them independent\"", "def _update_beliefs(self, features,\n beliefs):\n raise NotImplementedError", "def _interaction(self, entity):\n\n # Get parameters\n att_range = np.array([agent.a_range for agent in entity], dtype=float)[:,None]\n att_strength = np.array([agent.get_advantage for agent in entity])[:,None]\n team_index = np.array([agent.team for agent in entity])\n alliance_matrix = team_index[:,None]==team_index[None,:]\n att_strength[team_index==TEAM1_BACKGROUND,] += self.BLUE_ADV_BIAS\n att_strength[team_index==TEAM2_BACKGROUND,] += self.RED_ADV_BIAS\n\n # Get distance between all agents\n x, y = np.array([agent.get_loc() for agent in entity]).T\n dx = np.subtract(*np.meshgrid(x,x))\n dy = np.subtract(*np.meshgrid(y,y))\n distance = np.hypot(dx, dy)\n\n # Get influence matrix\n infl_matrix = np.less(distance, att_range)\n infl_matrix = infl_matrix * att_strength\n friend_count = (infl_matrix*alliance_matrix).sum(axis=0)-1 # -1 to not count self\n enemy_count = (infl_matrix*~alliance_matrix).sum(axis=0)\n mask = enemy_count == 0\n\n # Add background advantage bias\n loc_background = [self._static_map[agent.get_loc()] for agent in entity]\n friend_count[loc_background==team_index] += self.STOCH_ATTACK_BIAS\n enemy_count[~(loc_background==team_index)] += self.STOCH_ATTACK_BIAS\n\n # Interaction\n if self.STOCH_ATTACK:\n result = self.np_random.rand(*friend_count.shape) < friend_count / (friend_count + enemy_count)\n else:\n result = friend_count > enemy_count\n result[mask] = True\n\n return result", "def retrieve_solver_belief(self, t_plan=0, t=0):\n\n # get raw info stored from the solver\n # b_target[(v, t)] = beta, 0 <= beta <= 1\n b_target = self.belief[t_plan]\n\n # make it pretty: b = [b_c, b_v1, .... b_vn]\n belief = self.get_belief_vector(b_target, t)\n\n return belief", "def choose(self):\n # pick agent A\n keys = list(self._agents.keys())\n keyA = random.choice(keys)\n agentA = self.model.schedule.agents[keyA]\n\n # pick pick agent B\n keyB = random.choice(agentA.neighbors)\n agentB = self.model.schedule.agents[keyB]\n\n return agentA, agentB", "def HELPER_update_belief(self, old_belief, observation, gamma):\n observation = int(observation)\n #print \"old_belief:\", old_belief, type(old_belief)\n #print \"observation:\", observation, type(observation)\n #print \"gamma:\", gamma, type(gamma)\n\n diffs = [0.1*i for i in range(self.num_difficulty_bins)]\n new_belief = util.updateBelief(old_belief, None, observation, diffs, gamma)\n #print \"new_belief\", new_belief, type(new_belief)\n return new_belief", "def act(self):\n channel_act = copy.deepcopy(self.observation)\n\n for user_act in channel_act['user_acts']:\n # Dialogue Act\n da_conf = self.generate_confidence()\n da_value = user_act[\"dialogue_act\"][\"value\"]\n\n if np.random.random() > da_conf:\n if da_value == UserAct.AFFIRM:\n da_value = UserAct.NEGATE\n elif da_value == UserAct.NEGATE:\n da_value == UserAct.AFFIRM\n else:\n pass\n\n user_act[\"dialogue_act\"][\"value\"] = da_value\n user_act[\"dialogue_act\"][\"conf\"] = self.generate_confidence()\n\n # Intent\n if \"intent\" in user_act:\n intent_value = user_act[\"intent\"][\"value\"]\n if self.intents[intent_value].get(\"speech\", False):\n intent_conf = 1.\n else:\n intent_conf = self.generate_confidence()\n intent_possible_values = self.slots[\"intent\"][\n \"possible_values\"].copy()\n\n if np.random.random() > intent_conf:\n intent_possible_values.remove(intent_value)\n intent_value = np.random.choice(intent_possible_values)\n\n user_act['intent']['value'] = intent_value\n user_act['intent']['conf'] = intent_conf\n\n # Slot Values\n for slot_dict in user_act.get('slots', list()):\n slot_name = slot_dict[\"slot\"]\n slot_value = slot_dict[\"value\"]\n\n if self.slots[slot_name][\"node\"] != \"BeliefNode\":\n slot_conf = 1.0\n else:\n slot_conf = self.generate_confidence()\n\n slot_possible_values = self.slots[slot_name].get(\n \"possible_values\")\n\n if slot_possible_values is None:\n slot_possible_values = list()\n\n slot_possible_values = slot_possible_values.copy()\n if len(slot_possible_values) and np.random.random() > slot_conf:\n slot_possible_values.remove(slot_value)\n slot_value = np.random.choice(slot_possible_values)\n\n slot_dict['conf'] = slot_conf\n\n channel_act[\"channel_utterance\"] = self.template_nlg(\n channel_act['user_acts'])\n return channel_act", "def _update_beliefs(self, features,\n beliefs):\n if (len(features) != len(beliefs) or features.ndim != 1):\n raise core.BadFeatureFnError()\n\n assert len(features) == len(beliefs)\n decay = self.rng.binomial(beliefs, self.params.decay_prob)\n updated_beliefs = [\n beliefs[i] + features[i] - decay[i] for i in range(len(beliefs))\n ]\n return updated_beliefs", "def talk(self):\n out = (self.blurbs[self.state][\"talk\"])\n self.next_state(\"talk\")\n return out", "def _update_beliefs(self, features, beliefs):\n self.n_steps += 1\n if self.last_allocation is None:\n return beliefs\n for i_bin in range(self._n_bins):\n self.data[i_bin].append((features[i_bin], self.last_allocation[i_bin]))\n if self.params.burn_steps <= self.n_steps and self.n_steps % self.params.interval == 0:\n ll_model = _CensoredPoisson(\n np.array(self.data[i_bin][-self.params.window:]))\n results = ll_model.fit(disp=0)\n beliefs[i_bin] = results.params[0]\n return beliefs", "def obtain_batch_bandit_feedback(\n self,\n random_state: Optional[int] = None,\n ) -> BanditFeedback:\n random_ = check_random_state(random_state)\n # train a base ML classifier\n base_clf_b = clone(self.base_classifier_b)\n base_clf_b.fit(X=self.X_tr, y=self.y_tr)\n preds = base_clf_b.predict(self.X_ev).astype(int)\n # construct a behavior policy\n pi_b = np.zeros((self.n_rounds_ev, self.n_actions))\n pi_b[:, :] = (1.0 - self.alpha_b) / self.n_actions\n pi_b[np.arange(self.n_rounds_ev), preds] = (\n self.alpha_b + (1.0 - self.alpha_b) / self.n_actions\n )\n # sample action and factual reward based on the behavior policy\n action = np.zeros(self.n_rounds_ev, dtype=int)\n for i, p in enumerate(pi_b):\n action[i] = random_.choice(\n np.arange(self.n_actions, dtype=int), p=p, replace=False\n )\n reward = self.y_full_ev[np.arange(self.n_rounds_ev), action]\n\n return dict(\n n_actions=self.n_actions,\n n_rounds=self.n_rounds_ev,\n context=self.X_ev,\n action=action,\n reward=reward,\n position=None, # position effect is not considered in classification data\n pscore=pi_b[np.arange(self.n_rounds_ev), action],\n )", "def update_belief_once(self, current_observation, last_observation, avg_vel, dt, current_belief):\n # type: (np.ndarray, np.ndarray, float, float, list) -> (list, list)\n\n\n\n new_belief = []\n likelihoods = []\n estimated_positions = []\n normalization_factor = 0.0\n\n # Compute the likelihoods\n for goal_idx in range(self._num_goals):\n obs_likelihood, calculated_position = self.compute_observation_likelihood(current_observation,\n last_observation,\n self._goals[goal_idx],\n avg_vel, dt)\n estimated_positions.append(calculated_position)\n obs_likelihood += 1\n likelihoods.append(obs_likelihood)\n normalization_factor += obs_likelihood * current_belief[goal_idx]\n\n\n\n\n #for i in range(self.importance_of_prior_in_belief_update):\n #normalization_factor = 0.0\n #tmp_belief = []\n # Compute new belief\n for goal_idx in range(self._num_goals):\n prob = (likelihoods[goal_idx] * current_belief[goal_idx])/normalization_factor\n\n new_belief.append(prob)\n\n #tmp_belief = np.array(tmp_belief) / normalization_factor\n\n\n #new_belief = tmp_belief\n return [new_belief, estimated_positions]", "def happiness(self):\n return ( self.girl.happiness())\n # self.boy.happiness(self.girl) +", "def convert_output(self, act):\n outlist = []\n if act is not None:\n self.action = act\n outlist = [ act['heater_on'], ]\n\n return outlist", "def beers_get(label=None, page=None, per_page=None): # noqa: E501\n\n\n return query_manager.get_resource(\n label=label,\n page=page,\n per_page=per_page,\n rdf_type_uri=BEER_TYPE_URI,\n rdf_type_name=BEER_TYPE_NAME, \n kls=Beer)", "def makes_offer(self) -> object:\n return self._makes_offer", "def get_beam_current(self):\n raise NotImplementedError", "def beaten(self):\r\n return [self.trainer.beaten()]", "def get_action(self, state):\n\n \"\"\"\n XXX: DO NOT MODIFY THAT FUNCTION !!!\n Doing so will result in a 0 grade.\n \"\"\"\n\n # XXX : You shouldn't care on what is going on below.\n # Variables are specified in constructor.\n if self.beliefGhostStates is None:\n self.beliefGhostStates = state.getGhostBeliefStates()\n if self.walls is None:\n self.walls = state.getWalls()\n\n # @TODO Put this back to normal\n ret = self.updateAndGetBeliefStates(\n self._computeNoisyPositions(state))\n\n if self.i < 25:\n debug = ret[0]\n self.l.append(np.max(debug))\n self.i += 1\n #if debug == 1: # To Stop as soon as convergence happens\n #self.i = 25\n\n prefix = 'data/' # To indicate path\n\n if self.i == 25:\n\n if os.path.exists(os.path.join(prefix, str(self.w) + \"-\" + str(self.p) + \".txt\")):\n os.remove(os.path.join(prefix, str(self.w) + \"-\" + str(self.p) + \".txt\"))\n\n f = open(os.path.join(prefix, str(self.w) + \"-\" + str(self.p) + \".txt\"), \"a\")\n first = True\n for data in self.l:\n if first:\n first = False\n f.write(str(data))\n else:\n f.write(\",\" + str(data))\n self.i += 1\n f.close()\n print(\"Done\")\n plt.plot(range(1, len(self.l)+1), self.l)\n plt.xlabel('Time step')\n plt.ylabel('Maximum probability')\n plt.title('Bayes Filter')\n plt.axis([0, self.i, 0, 1])\n plt.savefig(os.path.join(prefix, str(self.w) + \"-\" + str(self.p) + \".pdf\"), bbox_inches='tight')\n plt.show()\n\n return ret", "def get_bribe(self):\r\n return self.bribe", "def formulate_request(self):\n\n # Initialise a list to store the indices of the nodes to include in the repair initiation\n repair_initiation = []\n\n # Make a copy of the existing belief belief_network in order to explore the coherence of different truth value\n # assignment combinations\n network_copy = self.belief_network.copy()\n\n # Get the not (yet) communicated nodes and its combinations of truth values in order to explore\n # different coherence values\n not_comm_nodes = [x for x, y in network_copy.nodes(data=True) if y['type'] == 'inf' or\n y['type'] == 'own' and y['repair'] is False]\n\n # If there are no nodes that have not been communicated yet, break and return 'False' as a repair initiation\n if not not_comm_nodes:\n return False\n\n combinations = list(itertools.product([True, False], repeat=len(not_comm_nodes)))\n\n # Calculate the coherence for all possible combinations\n\n # Initialise a list to store the different coherence values in and a list for the normalised coherence\n # (normalised over the number of nodes)\n coherence_values = []\n normalised_coherence = []\n\n for n in range(len(combinations)):\n # Initialise a count for the number of inferred nodes\n i = 0\n for not_comm_node in not_comm_nodes:\n network_copy.nodes[not_comm_node]['truth_value'] = combinations[n][i]\n i += 1\n coherence = self.coherence(network_copy)\n coherence_values.append(coherence)\n normalised_coherence.append(coherence / len(combinations[n]))\n\n # Store all the indices of the maximum normalised coherence values in a list and pick one randomly\n max_normalised_coherence = max(normalised_coherence)\n max_indices = [i for i in range(len(normalised_coherence)) if normalised_coherence[i] ==\n max_normalised_coherence]\n nodes_truth_values_index = random.choice(max_indices)\n\n # Change the network copy to the truth value combination with the highest coherence\n i = 0\n for not_comm_node in not_comm_nodes:\n network_copy.nodes[not_comm_node]['truth_value'] = combinations[nodes_truth_values_index][i]\n i += 1\n\n # The node(s) to be asked repair over are stored in a list containing tuples (a tuple per node to ask repair\n # over) consisting of the node index and its truth value\n for not_comm_node in not_comm_nodes:\n if network_copy.nodes[not_comm_node]['truth_value'] != \\\n self.belief_network.nodes[not_comm_node]['truth_value']:\n repair_initiation.append((not_comm_node, self.belief_network.nodes[not_comm_node]['truth_value']))\n self.belief_network.nodes[not_comm_node]['repair'] = True\n\n return repair_initiation", "def get_blue():\n # return name of actor, grazing speed, self defense\n return 'Piggy', 2", "def _update_belief_over_history(self):\n # type: () -> list\n\n if len(self.current_init_belief)==0:\n #print\"init normal:\"\n belief = self._init_belief()\n else:\n #print\"init transitions:\"\n #print self.current_init_belief\n belief = np.copy(self.current_init_belief)\n self._last_belief_over_history = np.copy(belief)\n\n #print \"after\"\n if self._history.size() < self._max_belief_history:\n start_idx_history_window = 1\n else:\n start_idx_history_window = self._history.size() - self._max_belief_history\n\n for t in range(start_idx_history_window, self._history.size()):\n dt = self._history.dts[t]\n current_obs = self._history.observations[t]\n last_obs = self._history.observations[t - 1]\n\n vel, avg_vel = self._compute_velocity(last_obs, current_obs, dt)\n\n # TODO: Belief will be computed multiple times here too!\n belief, _ = self.update_belief_once(current_obs, last_obs, avg_vel, dt, belief)\n\n return belief", "def get_node_behaviours(self) -> dict:\n response = requests.get(self.channel, params=\"get_behaviours\")\n response = AllBehavioursMessage.parse_raw(response.content)\n return response.nodes", "def to_bel(self) -> BELGraph:\n graph = BELGraph(\n name='Side Effect Resource (SIDER)',\n version='1.0.0',\n )\n\n it = tqdm(self._get_query(SideEffect), total=self.count_side_effects(), desc='Mapping side effects to BEL')\n for side_effect in it:\n side_effect.add_to_bel_graph(graph)\n\n it = tqdm(self._get_query(Indication), total=self.count_indications(), desc='Mapping indications to BEL')\n for indication in it:\n indication.add_to_bel_graph(graph)\n\n return graph", "def agent(self) -> Entity:\n return self.__agent", "def _start_callback(self, msg):\n\n self.initialize_parameters() # think if we want this\n\n if not msg.goals_file:\n \n rospy.logerr(\"[BeliefTracker] Goals File Path is empty! Using default path for the CSV file!\")\n self._init_goals_from_csv(self.pkg_path+'/data/goals.csv')\n #exit(1)\n\n\n\n # Initialize goals and belief\n else:\n self._init_goals_from_csv(msg.goals_file)\n \n self._current_belief = self._init_belief()\n self._radius_around_goal = [0.1 for _ in range(self._num_goals)]\n\n # Allow the Belief computation loop to start\n self._is_belief_tracker_ready = True\n\n rospy.loginfo(\"[BeliefTracker] Belief Tracker started!\")\n\n parameter_file = msg.belief_tracker_parameters_file\n\n if not parameter_file:\n parameter_file = self.pkg_path+\"/data/transition_probabilities/belief_tracker_params_default.pkl\"\n\n transition_history_length = msg.belief_tracker_goal_transition_history\n if transition_history_length == 0:\n print \"transition history length not specified, setting it per default to 1\"\n transition_history_length = 1\n\n self.update_belief_tracker_parameters(parameter_file,transition_history_length)\n\n self._reached_goals = []\n self._timer = rospy.Timer(rospy.Duration(1.0 / self._frequency), self.tick)", "def update_belief(self, state, action, reward):\n self.add_to_state_history(state)\n state = self.get_modified_state()\n self.belief.update(state, action, reward, self.alpha)\n self.alpha *= self.a_rate", "def sense_act_learn(self, sensors, reward):\n \n self.timestep += 1\n features = sensors\n # Calcuate activities of all the features.\n # features = cortex.step(sensors)\n\n # Single out one input for consicous processing.\n attended, attended_activity = self.cingulate.attend(\n features, self.predicted_features)\n self.hippocampus.attend(attended, attended_activity)\n\n # Decide which actions to take.\n decision_scores = self.hippocampus.get_decision_scores(\n self.amygdala.reward_by_feature, self.ganglia.goals)\n actions, decision_index = self.ganglia.decide(\n features, self.predicted_actions, decision_scores)\n '''\n print\n print 'brain'\n print 'ds', decision_scores\n print 'di', decision_index\n print 'actions', actions\n print 'features', features\n '''\n # Make predictions and calculate reactions for the next time step. \n self.predicted_features, self.predicted_actions = (\n self.cerebellum.predict(features, actions))\n\n # Learn from this new time step of experience.\n self.amygdala.learn(features, reward) \n self.cerebellum.learn(features, actions)\n self.hippocampus.learn(decision_index)\n\n if (self.timestep % self.backup_interval) == 0:\n self.backup() \n return actions", "def afmetingenBL(self):\n return self._afmetingenBL.get_waarde()", "def _sample_position(self, positions, current_belief):\n new_belief = np.copy(current_belief)\n\n # Threshold Belief and re-normalize\n # If we are very sure of one goal we do not care about the others\n for i in range(self._num_goals):\n if current_belief[i] < self._belief_threshold:\n new_belief[i] = 0.0\n\n # print \"probs belief before:\"\n # print new_belief\n\n # if we are very unsure about one goal we do not use it\n if np.max(new_belief) == 0.0:\n new_belief = np.copy(current_belief)\n for i in range(self._num_goals):\n if current_belief[i] < self._belief_threshold_min:\n new_belief[i] = 0.0\n print \"using old belief above min threshold\"\n\n # this should never happen I think unless we have super many goals\n if np.max(new_belief) == 0.0:\n new_belief = np.copy(current_belief)\n print \"using old belief, should not happen\"\n\n # print \"probs belief:\"\n # print new_belief\n new_belief = new_belief / np.sum(new_belief)\n\n\n idx = np.random.choice(a=np.arange(len(positions)), p=new_belief)\n return np.asarray(positions[idx]), self._goals[idx]", "def ai(self):\n if not self.alive:\n return\n\n assert hasattr(self, 'net'), \"Bird has ai enabled but does not appear to have any agents\"\n\n self.birth_time += 1\n\n activation = self.net.activate(self.get_inputs())[0]\n\n # if activation > 1e3:\n # print('unusually high activation of', activation)\n\n if activation > 0.9:\n self.flap()", "def assign_agents(particle,self):\n\n self.models[particle].state2agents(self.states[particle])\n\n return self.models[particle]", "def getnodebeliefs(self, node_p=None):\n node_p = self.getnodenamed(node_p) # Verify pointer.\n\n nstates = self.getnodenumberstates(node_p)\n cnetica.GetNodeBeliefs_bn.argtypes = [c_void_p]\n cnetica.GetNodeBeliefs_bn.restype = ndpointer(\n 'float32', ndim=1, shape=(nstates,), flags='C')\n # (node_bn* node)\n return cnetica.GetNodeBeliefs_bn(node_p) # prob_bn", "def get_effective_agent(self):\n raise Unimplemented()", "def get_action(self, state):\n\n \"\"\"\n XXX: DO NOT MODIFY THAT FUNCTION !!!\n Doing so will result in a 0 grade.\n \"\"\"\n\n # XXX : You shouldn't care on what is going on below.\n # Variables are specified in constructor.\n if self.beliefGhostStates is None:\n self.beliefGhostStates = state.getGhostBeliefStates()\n if self.walls is None:\n self.walls = state.getWalls()\n return self.updateAndGetBeliefStates(\n self._computeNoisyPositions(state))", "def get_behaviours_output(q: Optional[pulumi.Input[Optional[str]]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetBehavioursResult]:\n ...", "def test_bart_gen(self):\n opt = ParlaiParser(True, True).parse_args(['--model', 'bart'])\n bart = create_agent(opt)\n text = \"Don't have a cow, Man!\"\n obs = {\"text\": text, 'episode_done': True}\n bart.observe(obs)\n act = bart.act()\n\n self.assertEqual(act['text'], text)", "def act(self, goal):\n rospy.loginfo(\"Emergency action received goal: \"+str(goal))\n\n\n sam_publisher = rospy.Publisher('/sam_auv_1/thrusters/0/input',\n FloatStamped,\n queue_size = 100)\n\n # sam_publisher = rospy.Publisher('/uavcan_vbs_command/',\n # PercentStamped,\n # queue_size = 100)\n\n #self.emergency_activated = False\n #sam_sub = rospy.Subscriber('/sam_auv_1/emergency_butt', Bool, self.emergency_cb)\n\n while not rospy.is_shutdown() and not self.done_once:\n time.sleep(0.5)\n #rospy.loginfo('Emergency waiting for butt')\n #if self.emergency_activated:\n # rospy.loginfo('Butt activated')\n start_time = rospy.get_time()\n elapsed = 0\n rospy.loginfo('Emergency action active...')\n while elapsed < 3:\n if rospy.is_shutdown():\n break\n\n elapsed = rospy.get_time() - start_time\n #fs = FloatStamped()\n fs = PercentStamped()\n h = Header()\n fs.header = h\n fs.data = 0\n sam_publisher.publish(fs)\n time.sleep(0.1)\n # we are done doing the action succesfully\n self.done_once = True\n rospy.loginfo('Emergency action SUCCESS')\n return True\n\n # something went wrong, we fucked up\n rospy.loginfo('Emergency action FAILURE '+ str(self.done_once))\n return False", "def belief_mean(self) -> types.StatesTorch:\n return self._belief_mean", "def test_belief_vector():\n # load graph\n graph_file = 'G7V_test.p'\n g = ext.get_graph(graph_file)\n v_list = [5]\n type_distribution = 'uniform'\n\n b_0 = cp.set_initial_belief(g, v_list, type_distribution)\n assert b_0 == [0, 0, 0, 0, 0, 1, 0, 0]\n\n v_list = [1, 7]\n b_0 = cp.set_initial_belief(g, v_list, type_distribution)\n assert b_0 == [0, 1/2, 0, 0, 0, 0, 0, 1/2]", "def getAction(self):\n # Module player is always assumed player #0\n state = [0] * (4+self.game.numPlayers*2)\n state[0] = (0,1)[self.game.roles[0] == \"Werewolf\"]\n state[1] = (0,1)[self.game.roles[0] == \"Villager\"]\n state[2] = (0,1)[self.game.turnnum == 0]\n state[3] = (0,1)[self.game.turnnum == 1]\n for i in range(len(self.game.claim)):\n state[4+i*2+0] = (0,1)[self.game.claim[i] == \"Werewolf\"]\n state[4+i*2+1] = (0,1)[self.game.claim[i] == \"Villager\"]\n #state[4+i*2+0] = (0,1)[self.game.roles[0] == \"Werewolf\"]\n #state[4+i*2+1] = (0,1)[self.game.roles[0] == \"Villager\"]\n self.module.reset()\n output = self.module.activate(state)\n result = None\n if (self.game.turnnum == 0):\n if output[0] > output[1]: result = \"Werewolf\"\n else : result = \"Villager\"\n else:\n votes = output[2:]\n result = drawGibbs(votes, self.temperature) + 1\n #print (\"votes, result\", votes, result)\n #print ()\n #print (\"input\", state)\n #print (self.game.stateStr())\n #print (\"output\", output, result)\n #print ()\n return [self.pnum, result]", "def bef_ft(self):\n # Join #\n df = self.parent.pool_indicators\n # Sum for everyone #\n cols_sum = {'sw_merch' : 'sum',\n 'sw_foliage': 'sum',\n 'sw_other' : 'sum',\n 'hw_merch' : 'sum',\n 'hw_foliage': 'sum',\n 'hw_other' : 'sum',\n 'sw_coarse' : 'sum',\n 'sw_fine' : 'sum',\n 'hw_coarse' : 'sum',\n 'hw_fine' : 'sum'}\n # Group and aggregate #\n df = df.groupby(\"forest_type\").agg(cols_sum).reset_index()\n # Make new columns #\n df['tot_merch'] = df.sw_merch + df.hw_merch\n df['tot_abg'] = df.sw_merch + df.hw_merch + \\\n df.sw_foliage + df.hw_foliage + \\\n df.hw_other + df.sw_other\n df['bg_biomass'] = df.sw_coarse + df.sw_fine + \\\n df.hw_coarse + df.hw_fine\n # Calculate the biomass expansion factor\n # Ratio of (total above and below ground) / total above ground\n df['bef_tot'] = (df.tot_abg + df.bg_biomass) / df.tot_abg\n # Return #\n return df", "def getBeliefDistribution(self):\n # This essentially gives a point to a location for each particle there, then \n # normalizes the point values so they add up to 1.\n dist = util.Counter()\n for part in self.particles: dist[part] += 1\n dist.normalize()\n return dist", "def F(self):\n return self.generic_getter(get_F_potential, \"F\", \"convert_energy\")", "def receive_shipments(self):\n return ReceiveIntensity(self)", "def _beam(self):\n\n return self._beam_factory.simple(self.detectorbase.wavelength)", "def analyze_belief_strength_with_bias(self, G):\r\n n = []\r\n nbs_list = []\r\n for node in G.nodes: #cycles through the nodes of the graph to mine the attributes\r\n n.append(node) #appends each node to a list that will be put into a dictionary\r\n pbs_list = []\r\n og_bs = G.nodes[node]['belief_strength'] #mines the numerical value for a nodes belief strength, from a pre-set node attribute\r\n unc = G.nodes[node]['uncertainty'] #mines the numerical value for a nodes belief uncertainty, from a pre-set node attribute\r\n prob = G.nodes[node]['probability']\r\n for pre in G.predecessors(node):\r\n ew = G.edges[pre, node]['weight'] #mines the numerical value of an edge's weight, from a pre-set edge attribute\r\n pre_bs = G.nodes[pre]['belief_strength'] #mines the numerical value for a predecessors belief strength, from a pre-set node attribute\r\n x = ew * pre_bs #determines how much a node values its neighbor's opinion.\r\n pbs_list.append(x) #puts all values for predecessor belief strangths in a list\r\n if len(pbs_list) == 0:\r\n nbs = og_bs\r\n nbs = int(nbs)\r\n else:\r\n apbs = sum(pbs_list)/len(pbs_list) #calculates the average predecessor belief strength value for a node\r\n if apbs*og_bs > 0:\r\n if apbs > 0:\r\n nbs = min(og_bs + (0.1*prob*unc*apbs), 100)\r\n else:\r\n nbs = max(og_bs + (0.1*prob*unc*apbs), -100)\r\n nbs = int(nbs)\r\n else:\r\n nbs = og_bs\r\n nbs = int(nbs)\r\n nbs_list.append(nbs) #the new belief strengths are appended to a list that will be put into adictionary\r\n change = dict(zip(n, nbs_list)) #creates a dictionary from two lists which stores the nodes as keys and their new belief strengths as values\r\n print(change)\r\n return change #this will be used to update the list in a different function\r", "def goal(self) -> Goal:\n return MaxReward()", "def forward_att(self, eouts, elens, ys, return_logits=False, teacher_logits=None, ctc_trigger_points=None, forced_trigger_points=None):\n bs, xmax = eouts.size()[:2]\n device = eouts.device\n ys_in, ys_out, ylens = append_sos_eos(ys, self.eos, self.eos, self.pad, eouts.device, self.bwd)\n ymax = ys_in.size(1)\n if forced_trigger_points is not None:\n for b in range(bs):\n forced_trigger_points[b, ylens[b] - 1] = elens[b] - 1\n dstates = self.zero_state(bs)\n if self.training:\n if self.discourse_aware and not self._new_session:\n dstates = {'dstate': (self.dstate_prev['hxs'], self.dstate_prev['cxs'])}\n self.dstate_prev = {'hxs': [None] * bs, 'cxs': [None] * bs}\n self._new_session = False\n cv = eouts.new_zeros(bs, 1, self.enc_n_units)\n self.score.reset()\n aw, aws = None, []\n betas, p_chooses = [], []\n lmout, lmstate = None, None\n ys_emb = self.embed_token_id(ys_in)\n src_mask = make_pad_mask(elens).unsqueeze(1)\n tgt_mask = (ys_out != self.pad).unsqueeze(2)\n logits = []\n for i in range(ymax):\n is_sample = i > 0 and self._ss_prob > 0 and random.random() < self._ss_prob\n if self.lm is not None:\n self.lm.eval()\n with torch.no_grad():\n y_lm = self.output(logits[-1]).detach().argmax(-1) if is_sample else ys_in[:, i:i + 1]\n lmout, lmstate, _ = self.lm.predict(y_lm, lmstate)\n y_emb = self.embed_token_id(self.output(logits[-1]).detach().argmax(-1)) if is_sample else ys_emb[:, i:i + 1]\n dstates, cv, aw, attn_state, attn_v = self.decode_step(eouts, dstates, cv, y_emb, src_mask, aw, lmout, mode='parallel', trigger_points=forced_trigger_points[:, i:i + 1] if forced_trigger_points is not None else None)\n logits.append(attn_v)\n aws.append(aw)\n if attn_state.get('beta', None) is not None:\n betas.append(attn_state['beta'])\n if attn_state.get('p_choose', None) is not None:\n p_chooses.append(attn_state['p_choose'])\n if self.attn_type in ['gmm', 'sagmm']:\n aw = attn_state['myu']\n if self.training and self.discourse_aware:\n for b in [b for b, ylen in enumerate(ylens.tolist()) if i == ylen - 1]:\n self.dstate_prev['hxs'][b] = dstates['dstate'][0][:, b:b + 1].detach()\n self.dstate_prev['cxs'][b] = dstates['dstate'][1][:, b:b + 1].detach()\n if self.training and self.discourse_aware:\n if bs > 1:\n self.dstate_prev['hxs'] = torch.cat(self.dstate_prev['hxs'], dim=1)\n self.dstate_prev['cxs'] = torch.cat(self.dstate_prev['cxs'], dim=1)\n else:\n self.dstate_prev['hxs'] = self.dstate_prev['hxs'][0]\n self.dstate_prev['cxs'] = self.dstate_prev['cxs'][0]\n logits = self.output(torch.cat(logits, dim=1))\n if return_logits:\n return logits\n loss, ppl = cross_entropy_lsm(logits, ys_out, self.lsm_prob, self.pad, self.training)\n acc = compute_accuracy(logits, ys_out, self.pad)\n aws = torch.cat(aws, dim=2)\n if not self.training:\n self.data_dict['elens'] = tensor2np(elens)\n self.data_dict['ylens'] = tensor2np(ylens)\n self.data_dict['ys'] = tensor2np(ys_out)\n self.aws_dict['xy_aws'] = tensor2np(aws)\n if len(betas) > 0:\n self.aws_dict['xy_aws_beta'] = tensor2np(torch.cat(betas, dim=2))\n if len(p_chooses) > 0:\n self.aws_dict['xy_p_choose'] = tensor2np(torch.cat(p_chooses, dim=2))\n if self.attn_type == 'mocha' or (ctc_trigger_points is not None or forced_trigger_points is not None):\n aws = aws.masked_fill_(tgt_mask.unsqueeze(1).expand_as(aws) == 0, 0)\n loss_quantity = 0.0\n if self.attn_type == 'mocha':\n n_tokens_pred = aws.sum(3).sum(2).sum(1) / aws.size(1)\n n_tokens_ref = tgt_mask.squeeze(2).sum(1).float()\n loss_quantity = torch.mean(torch.abs(n_tokens_pred - n_tokens_ref))\n loss_latency = 0.0\n if self.latency_metric == 'interval':\n assert ctc_trigger_points is None\n assert aws.size(1) == 1\n aws_prev = torch.cat([aws.new_zeros(aws.size())[:, :, -1:], aws.clone()[:, :, :-1]], dim=2)\n aws_mat = aws_prev.unsqueeze(3) * aws.unsqueeze(4)\n delay_mat = aws.new_ones(xmax, xmax).float()\n delay_mat = torch.tril(delay_mat, diagonal=-1, out=delay_mat)\n delay_mat = torch.cumsum(delay_mat, dim=-2).unsqueeze(0)\n delay_mat = delay_mat.unsqueeze(1).unsqueeze(2).expand_as(aws_mat)\n loss_latency = torch.pow((aws_mat * delay_mat).sum(-1), 2).sum(-1)\n loss_latency = torch.mean(loss_latency.squeeze(1))\n elif ctc_trigger_points is not None or 'ctc_sync' not in self.latency_metric and forced_trigger_points is not None:\n if 'ctc_sync' in self.latency_metric:\n trigger_points = ctc_trigger_points\n else:\n trigger_points = forced_trigger_points\n js = torch.arange(xmax, dtype=torch.float, device=device).expand_as(aws)\n exp_trigger_points = (js * aws).sum(3)\n trigger_points = trigger_points.float().unsqueeze(1)\n loss_latency = torch.abs(exp_trigger_points - trigger_points)\n loss_latency = loss_latency.sum() / ylens.sum()\n if teacher_logits is not None:\n kl_loss = distillation(logits, teacher_logits, ylens, temperature=5.0)\n loss = loss * (1 - self.distil_weight) + kl_loss * self.distil_weight\n return loss, acc, ppl, loss_quantity, loss_latency", "def extract_belief(ontology, state, threshold=0.3):\n\n \"\"\"\n need to check if the value at argmax is bigger than a threshold\n \"\"\"\n\n request_idx = np.argmax([item[1] for item in state[\"request\"].items()])\n frequency_idx = np.argmax([item[1] for item in state[\"frequency\"].items()])\n illness_type_idx = np.argmax([item[1] for item in state[\"type\"].items()])\n symptom_idx = np.argmax([item[1] for item in state[\"symptom\"].items()])\n escalation_idx = np.argmax([item[1]\n for item in state[\"escalation\"].items()])\n duration_idx = np.argmax([item[1] for item in state[\"duration\"].items()])\n confirmation_idx = np.argmax([item[1]\n for item in state[\"confirmation\"].items()])\n\n # then it is neccessary to map the indices back to words\n\n request = is_plausible(state[\"request\"].items(\n ), ontology, \"request\", request_idx, threshold)\n frequency = is_plausible(\n state[\"frequency\"].items(), ontology, \"frequency\", frequency_idx, threshold)\n illness_type = is_plausible(\n state[\"type\"].items(), ontology, \"type\", illness_type_idx, threshold)\n symptom = is_plausible(state[\"symptom\"].items(\n ), ontology, \"symptom\", symptom_idx, threshold)\n\n escalation = is_plausible(state[\"escalation\"].items(\n ), ontology, \"escalation\", escalation_idx, threshold)\n\n duration = is_plausible(state[\"duration\"].items(\n ), ontology, \"duration\", duration_idx, threshold)\n confirmation = is_plausible(state[\"confirmation\"].items(\n ), ontology, \"confirmation\", confirmation_idx, threshold)\n\n if request is not None:\n request = \"request \"+request\n if frequency is not None:\n frequency = \"frequency \"+frequency\n if illness_type is not None:\n illness_type = \"type \"+illness_type\n if symptom is not None:\n symptom = \"symptom \"+symptom\n if escalation is not None:\n escalation = \"escalation \"+escalation\n if duration is not None:\n duration = \"duration \"+duration\n if confirmation is not None:\n confirmation = \"confirmation \"+confirmation\n values = [request, frequency, illness_type,\n symptom, escalation, duration, confirmation]\n values = [val for val in values if val is not None]\n return sorted(values)", "def greetBow(self):\n\n self.behaviorService.startBehavior(\"caresses/leanforwardorbowing\")", "def getFeatures(self, state, action):\n features = qutils.Qcounter()\n features['bias'] = 1.0\n\n if state is None:\n return features\n else:\n\n if self.id%2 == 0:\n plrCoords = state.board.plr_coords['r']\n oppCoords = state.board.plr_coords['b']\n else:\n plrCoords = state.board.plr_coords['b']\n oppCoords = state.board.plr_coords['r']\n\n goalState = GoalState(state.board.plr_coords['r'],state.board.plr_coords['b'],state.agents[self.id].hand,\n state.board.draft)\n if action['coords'] is not None:\n draftCoords = goalState.CardsToCoords([action['draft_card']])\n else:\n draftCoords = None\n\n features['euclideanDistanceCentroid'] = eucDist(action, plrCoords)\n features['neighbour'] = neighbour(action, plrCoords, oppCoords)\n features['heart'] = heart(action, plrCoords)\n features['blockHeart'] = blockHeart(action, oppCoords)\n features['eHorizontal'] = eHorizontal(state, action, plrCoords, oppCoords)\n features['eVertical'] = eVertical(state, action, plrCoords, oppCoords)\n features['eIandIIIDiag'] = eIandIIIDiagonal(state, action, plrCoords, oppCoords)\n features['eIIandIVDiag'] = eIIandIVDiagonal(state, action, plrCoords, oppCoords)\n features['draftHorizontal'] = draftHorizontal(state, plrCoords, oppCoords, draftCoords)\n features['draftVertical'] = draftVertical(state, plrCoords, oppCoords, draftCoords)\n features['draftDiagIandIII'] = draftDiagIandIII(state, plrCoords, oppCoords, draftCoords)\n features['draftDiagIIandIV'] = draftDiagIIandIV(state, plrCoords, oppCoords, draftCoords)\n features['draftJacks'] = DraftJacks(action)\n features['PlayCentre'] = PlayCentre(action)\n features['HeuristicValuePlace'] = HeuristicValue(action, goalState)\n features['HeuristicValueDraft'] = HeuristicValueDraft(action, goalState, draftCoords, self.gamma)\n return features", "def __call__(self):\n return random.choice(self.fakers)", "def action(self):\n obs = self.observation\n\n action = None\n try:\n if SETTINGS_DEAD_CANT_THINK and obs.respawn_in > -1:\n self.debugMsg(\"Sleeping\")\n return (0,0,False)\n\n # Check if agent reached goal.\n if self.goal and point_dist(self.goal, obs.loc) < self.settings.tilesize:\n self.goal = None\n\n # If agent already has a goal\n # check if the motivation is still accurate\n if self.goal:\n self.validateMotivation()\n\n # Drive to where the user clicked\n if self.selected and self.observation.clicked:\n self.motivation = MOTIVATION_USER_CLICK\n self.goal = obs.clicked\n\n if self.goal is None:\n if self.strategy == STRATEGY_DEFENCE:\n action = self.action_defend()\n elif self.strategy == STRATEGY_OFFENCE:\n action = self.action_offence()\n else:\n action = self.action_normal()\n else:\n self.debugMsg(\"Goal already found: (%d,%d)\" % self.goal)\n except Exception:\n self.goal = None\n# self.debugMsg(\"Goal: %s, exception: %s\" % (self.goal, exp), True)\n \n if self.goal is None:\n self.goal = obs.loc\n\n self.updateTrendingSpot()\n if action is None:\n if self.goal == obs.loc:\n return (0,0,False)\n else:\n return self.getActionTriple()\n else:\n return action", "def setup_biosafe(self):\n # Generate dummy data in the right format\n species_presence = pd.DataFrame(\n np.random.randint(2, size=len(self.links_law)),\n columns=['speciesPresence'], index=self.links_law.index)\n\n ecotope_area = pd.DataFrame(\n np.ones(len(self.links_eco2.columns)-1) * 1e5,\n columns = ['area_m2'],\n index = self.links_eco2.columns.values[0:-1])\n\n # Simplify ecotope tables to VR ecotopes\n unique_eco = np.unique(\n np.hstack((self.vr_eco.ecotope1.values,\n self.vr_eco.ecotope2.values)))\n links_eco3 = self.links_eco2.reindex(columns=unique_eco)\n ecotope_area = ecotope_area.reindex(index=unique_eco)\n\n # Run a first version of Biosafe\n self.bsf_model = bsf.biosafe(\n self.legal_weights, self.links_law, links_eco3,\n species_presence, ecotope_area)\n\n #PotTax = self.bsf_model.TFI()\n #PotAll = self.bsf_model.FI()\n return", "def tick_agent(self, agent):\n\n # decay mass\n mass_decay = agent.handle_mass_decay()\n\n # find all food items which are not currently being eaten by this agent, and\n # update global foods list\n remaining_food, food_eaten_or_none = self._filter_objects(\n agent, self.foods, self.handle_food)\n self.foods = remaining_food\n num_food_eaten = len(\n list(filter(lambda x: x != None, food_eaten_or_none)))\n\n if self.with_masses:\n # Iterate over all masses, remove those which were eaten\n remaining_mass, mass_eaten_or_none = self._filter_objects(\n agent, self.masses, self.handle_mass)\n self.masses = remaining_mass\n num_mass_eaten = len(\n list(filter(lambda x: x != None, mass_eaten_or_none)))\n else:\n num_mass_eaten = 0\n\n if self.with_viruses:\n # Iterate over all viruses, remove viruses which were eaten\n remaining_virus, virus_eaten_or_none = self._filter_objects(\n agent, self.viruses, self.handle_virus)\n self.viruses = remaining_virus\n num_virus_eaten = len(\n list(filter(lambda x: x != None, virus_eaten_or_none)))\n else:\n num_virus_eaten = 0\n\n # get a list of all agents which have collided with the current one, and see\n # if it eats any of them\n agent_mass_eaten = 0\n for other in self.agents.values():\n agent_mass_eaten += self.handle_eat_agent(agent, other)\n return (\n agent_mass_eaten +\n conf.FOOD_MASS * num_food_eaten +\n conf.VIRUS_MASS * num_virus_eaten +\n conf.MASS_MASS * num_mass_eaten +\n mass_decay)", "def speak(self):\n # Speaks randomly to another agent on the same cell\n anticipated_meaning = None\n cellmates = self.model.grid.get_cell_list_contents([self.pos])\n\n # If other agents on the same cell\n if len(cellmates) > 1:\n hearer = self.random.choice(cellmates)\n\n while (hearer == self): # agents should not talk to themselves\n hearer = self.random.choice(cellmates)\n\n meaning = self.random.choice(self.model.schedule.agents).unique_id\n\n # If the speaker is not acquainted with the meaning\n if meaning not in self.meanings:\n print(\"New meaning added to speaker\")\n self.meanings.append(meaning)\n return Conversation(word=None, meaning=None, success=0.0)\n\n # If the hearer is not acquainted with the meaning\n if meaning not in hearer.meanings:\n print(\"New meaning added to hearer\")\n hearer.meanings.append(meaning)\n return Conversation(word=None, meaning=None, success=0.0)\n\n # 50% chance of having an anticipated meaning default\n if self.random.random() <= self.model.antecipated_prob:\n print(\" \" + str(self.unique_id) +\n \" points at \" + str(meaning))\n anticipated_meaning = meaning\n\n # If the speaker has a word for the meaning\n if meaning in self.meaning2word:\n word = self.meaning2word[meaning]\n\n # If the hearer has a word for the meaning\n if word in hearer.word2meaning:\n # If the hearer has no anticipated meaning\n if anticipated_meaning == None:\n return Conversation(word=word, meaning=meaning, success=1.0)\n # If anticipated meaning different from hearer meaning\n if (anticipated_meaning != None\n and anticipated_meaning != hearer.word2meaning[word]):\n hearer.delete_link(word)\n hearer.create_link(word, anticipated_meaning)\n return None\n # If anticipated meaning same as hearer meaning\n if (anticipated_meaning != None\n and anticipated_meaning == hearer.word2meaning[word]):\n return Conversation(word=word, meaning=meaning, success=1.0)\n\n # If the hearer has no word for the meaning\n else:\n # If anticipated meaning same as speaker meaning\n if (anticipated_meaning != None\n and word not in hearer.word2meaning\n and anticipated_meaning not in hearer.meaning2word):\n hearer.create_link(word, anticipated_meaning)\n return Conversation(word=word, meaning=meaning, success=0.0)\n\n # If the speaker has no word for the meaning\n if meaning not in self.meaning2word:\n return Conversation(word=None, meaning=meaning, success=0.0)", "def play_beergame(self, ntrials=1000, get_output=True):\n pdata = np.zeros((ntrials+1, self.nact))\n pdata[0, :] = np.array([1/self.nact]*self.nact)\n qdata = np.zeros_like(pdata)\n self.choices = []\n self.feedback = []\n\n for t in range(ntrials):\n\n # select bandit arm (action) from state space \n act_i = np.random.choice(self.actions, p=pdata[t, :])\n \n # get reward for current action \n r = self.beergame.get_reward(act_i)\n \n if t>0: \n # update value of selected action\n qdata[t+1, act_i] = update_Qi(qdata[t-1, self.last], qdata[t, act_i], r, self.alpha, self.gamma)\n \n # broadcast old q-values for unchosen actions\n for act_j in range(self.nact):\n if act_j == act_i: continue \n qdata[t+1, act_j] = qdata[t, act_j]\n \n self.last = act_i\n # update action selection probabilities and store data\n pdata[t+1, :] = update_Pall(qdata[t+1, :], self.beta)\n self.choices.append(act_i)\n self.feedback.append(r)\n\n self.pdata = pdata[1:, :]\n self.qdata = qdata[1:, :]\n self.make_output_df()\n\n if get_output:\n return self.data.copy()", "def breathe_effect(self, color, from_color, period, cycles, persist, power_on, peak, wait):\n payload = {\"color\": color,\n \"period\": period,\n \"cycles\": cycles,\n \"persist\": persist,\n \"power_on\": power_on,\n \"peak\": peak}\n if from_color is not None:\n payload['from_color'] = from_color\n response = requests.post(self.__api_url('effects/breathe'.format(self.name)),\n data=payload,\n headers=self.headers)\n if wait:\n time.sleep(period * cycles)\n return response.text", "def emb(self, entity):\n fv = []\n fv.extend(self.name_model.emb(entity))\n fv.extend(super().emb(entity))\n # if self.config.debug:\n # print('== emb ==')\n # print('==> name_model: %s' % self.name_model.emb(entity))\n # print('==> sub_ent_model: %s' % self.sub_ent_model.emb(entity))\n # print('== bme ==')\n return fv", "def initialize_beliefs(\n self, *, mean: types.StatesTorch, covariance: types.CovarianceTorch\n ) -> None:\n N = mean.shape[0]\n assert mean.shape == (N, self.state_dim)\n assert covariance.shape == (N, self.state_dim, self.state_dim)\n self.belief_mean = mean\n self.belief_covariance = covariance\n self._initialized = True", "def bevel(*args, bevelShapeType: Union[int, bool]=1, caching: bool=True, cornerType: Union[int,\n bool]=2, depth: Union[float, bool]=0.5, extrudeDepth: Union[float, bool]=1.0,\n nodeState: Union[int, bool]=0, tolerance: Union[float, bool]=0.01, width: Union[float,\n bool]=0.5, constructionHistory: bool=True, joinSurfaces: bool=True, name: AnyStr=\"\",\n numberOfSides: Union[int, bool]=4, object: bool=True, polygon: int=0, range:\n bool=True, q=True, query=True, e=True, edit=True, **kwargs)->Union[List[AnyStr],\n Any]:\n pass", "def createReaction(self):\n return _libsbml.Model_createReaction(self)", "def get_favorite(self):\n raise NotImplementedError()", "def evaluate(self, gameState, action, ghosts):\r\n features = self.getFeatures(gameState, action)\r\n weights = self.getWeights(gameState, action)\r\n return features * weights", "def get_belief_vector(b: dict, t: int):\n\n my_list = [k[1] for k in b.keys() if k[1] == t]\n\n # number of vertices + capture\n nu = len(my_list)\n # set of capture + vertices V_c = [0, 1, ... n]\n V_c = ext.get_idx_vertices(nu)[0]\n\n belief = []\n for v in V_c:\n beta = b.get((v, t))\n belief.append(beta)\n\n return belief", "def goals(self):\r\n return Goals(self)", "def aanleg(self):\n return self._aanleg.get_waarde()", "def breedte(self):\n return self._breedte.get_waarde()", "def fBellStates(self) -> Dict[Tuple[int, ...], Optional[float]]:\n warnings.warn(\n DeprecationWarning(\n \"fBellState device specs have been deprecated, and will \"\n \"be removed in release v2.13 (targeted for October 2019)\"\n )\n )\n return {tuple(es.targets): es.fBellState for es in self.edges_specs}", "def step (self, action):\n if self.done == 1:\n print(\"episode done\")\n return [self.state, self.reward, self.done, self.info]\n\n else:\n degree = float(action[0] * HI_ANGLE)\n loc, last_pos = self.state\n\n theta = self.fire.deg_to_rad(degree)\n pos = round(self.fire.calc_dist(theta))\n delta = abs(loc - pos)\n\n self.state[1] = pos\n self.info[\"degree\"] = degree\n self.info[\"theta\"] = round(theta, 3)\n self.info[\"delta\"] = delta\n\n self.render()\n\n if pos <= self.fire.radius:\n # realistically, the launch crew should be dead\n self.reward = -100.0\n elif delta <= self.fire.radius:\n # target hit (within blast radius)\n self.reward = 100.0\n self.done = 1;\n else:\n # reward is the \"nearness\" of the blast destroying the target\n self.reward = round(100.0 * float(abs(loc - delta)) / float(self.fire.range))\n\n return [self.state, self.reward, self.done, self.info]", "def backpropagating(self): \n\n ######################### Configure the sensor inputs given the movement of the agent ######################### \n sensors_result_N = self.agent.sensors(self, direction=3) + self.agent.get_energy_coarsed()+\\\n self.agent.rotate_previousAction(3)+[int(self.agent.get_previous_collision())]\n sensors_result_O = self.agent.sensors(self, direction=2) + self.agent.get_energy_coarsed()+\\\n self.agent.rotate_previousAction(2) + [int(self.agent.get_previous_collision())]\n sensors_result_S = self.agent.sensors(self, direction=1) + self.agent.get_energy_coarsed()+\\\n self.agent.rotate_previousAction(1) + [int(self.agent.get_previous_collision())]\n sensors_result_E = self.agent.sensors(self, direction=0) + self.agent.get_energy_coarsed()+\\\n self.agent.rotate_previousAction(0) + [int(self.agent.get_previous_collision())]\n\n input_nn_N = np.asarray(sensors_result_N).astype(int) # input when the Nord action is performed \n input_nn_O = np.asarray(sensors_result_O).astype(int) # input when the West action is performed\n input_nn_S = np.asarray(sensors_result_S).astype(int) # input when the South action is performed\n input_nn_E = np.asarray(sensors_result_E).astype(int) # input when the West action is performed\n\n l_input = [input_nn_E.reshape(1,145),input_nn_S.reshape(1,145),input_nn_O.reshape(1,145),input_nn_N.reshape(1,145)]\n ######################### Configure the sensor inputs given the movement of the agent #########################\n\n print(\"The reward in baskpropagating is %f\" %(self.agent.reward) ) \n parameters = [self.gamma, self.agent.reward]\n Ui = self.U_list[self.agent.get_previousAction().index(1)]\n\n if not self.end:\n U_list_y = [self.nn.predict(input_nn_E.reshape(1,145)),\\\n self.nn.predict(input_nn_S.reshape(1,145)),\\\n self.nn.predict(input_nn_O.reshape(1,145)),\\\n self.nn.predict(input_nn_N.reshape(1,145))] \n #print(U_list_y)\n maxU = np.max(U_list_y)\n #print(np.max(U_list_y))\n index_input_maxU = np.argmax(U_list_y) # the input given for the backprogating is the one with the maximum utility\n input_target = l_input[index_input_maxU] # The input target with the max utility, add to the tuple given during the experience replay\n uprime = self.agent.reward + self.gamma * maxU # input of the utility with the best value\n \n else:\n uprime = self.agent.reward\n input_target = np.array(None)\n \n action = self.agent.get_previousAction().index(1)\n input_nn = self.input_list[action]\n ##### Add to the lesson the action chose in order to go the next state, \n ##### the next state after to have performed the action, and the reward given\n if(self.action_proba[action] > 0.01): # the Pl minimum to choose the action corresponding to the action policy, cf to the paper part experience replay\n #next_states = [copy.deepcopy(input_nn_E).reshape(1,145), copy.deepcopy(input_nn_S).reshape(1,145), copy.deepcopy(input_nn_O).reshape(1,145), copy.deepcopy(input_nn_N).reshape(1,145)]\n self.memory.append((input_nn,action,np.asarray(copy.deepcopy(l_input)),self.agent.reward)) # We add the experiment to the memory of the agent \n \n ############################\n self.nn.train_one_step_other(input_nn,uprime)\n #self.nn.train(input_nn,tf.convert_to_tensor([[uprime]])) # use the method fit to train the neural network", "def agent(self):\n return self.__agent", "def leverage(self):\n return self._leverage", "def act(self, state):\n action = self.actor_model.predict(state)\n return action[0]", "def fire_boundary(self):\n for cell in self._fire_boundary:\n yield cell\n # alternative syntax\n #return (cell for cell in self._fire_boundary)", "def send_proposes(self):\n neighbors = self.model.space.get_neighbors(self.pos, self.range, include_center=False)\n neighbors = list(filter(lambda x: x.type == 'guest', neighbors))\n\n if len(neighbors) > 0:\n options = list(map(lambda x: (x.role, self.action), neighbors))\n know = list(map(lambda x: self.knowledge[x], options))\n # print(\"Knowledges\", probs)\n probs = list(map(lambda x: np.exp(x), know))\n # print(\"Softmax\", probs)\n probs = list(map(lambda x: x / sum(probs), probs))\n # print(\"Normed\", probs)\n if len(neighbors) > 1:\n print(self.unique_id, neighbors, probs, know)\n\n other_agent = random.choices(neighbors, probs)[0]\n self.propose_interaction(other_agent, self.action)", "def __deref__(self) -> \"gr::beamforming::beamformer *\":\n return _beamforming_swig.beamformer_sptr___deref__(self)", "def get_actions(self, ally: Set['Entity'], enemy: Set['Entity']) -> Tuple[\n Set['Action'], Set['Entity'], Set['Entity']]:\n available_actions = set()\n for action in self.actions:\n if action.cool_down.name not in [effect.name for effect in self.effects.effects]:\n available_actions.add(action)\n # print(f'{self.name_color} has {[action.name for action in available_actions]}')\n return available_actions, ally, enemy", "def agent_start(self,thisObs): \n action={'vol':0,'price':0}\n \n \"\"\"Changes for Boltzman Exploration\"\"\"\n #choice=self.pick_action_from_dist()\n #action_bin=self.prob_dist_action[choice]\n #action=self.unbin_action(action_bin,thisObs)\n \n \"\"\"Changes for epsilon greedy method\"\"\"\n action= self.return_random_action(thisObs)\n \n self.lastAction=action\n self.lastObs=thisObs\n return action", "def propagate(self):\n self._check_status(STATUS_IDLE)\n\n # Notify listeners\n self._notify_listeners_start_operation(listener.OPERATION_PROPAGATE)\n\n # Propagate model\n self._set_status(STATUS_PROPAGATING)\n psol = self.agent.propagate()\n self._set_status(STATUS_IDLE)\n\n # Notify listeners\n for lstnr in self.listeners:\n lstnr.new_result(self, psol)\n self._notify_listeners_end_operation()\n\n return psol", "def water_giving(self):\n #\n self.clock.start()\n self.show_give_buttons()\n self.show_people()\n in_conversation = False\n last_delay = 0\n #\n while True:\n for button, mouse_button in self.buttons_clicked:\n if button.tag == 'give':\n for delay in self.give_water_to(button.person):\n yield delay\n #\n if not self.awaiting_conversations and not self.end_of_day:\n conversation_item = self.conversation.getAndUseNext(state='day-{0}'.format(self.clock.day + 1), situation='main-story')\n ###\n if conversation_item:\n self.log.debug('Conversation item: {0}'.format(conversation_item))\n guid = conversation_item.guid\n other_item = self.conversation.getEntry(guid - 1)\n self.log.debug('Previous item is {0}'.format(other_item))\n ###\n #\n if conversation_item:\n in_conversation = True\n #\n # Pause so that talk button doesn't immediately appear - visually this confuses the user\n yield last_delay\n last_delay = len(conversation_item.conversation_text) * S['talking-time-per-letter']\n #\n self.hide_give_buttons()\n self.ready_to_talk(conversation_item)\n elif in_conversation:\n yield 1\n self.show_give_buttons()\n self.hide_people()\n in_conversation = False\n #\n # Are we at the drinking\n if self.end_of_day:\n self.awaiting_conversations.clear()\n last_delay = 0\n #\n for delay in self.the_drinking():\n yield delay\n self.clock.start()\n if self.clock.day == 7:\n self.nextState(self.mid_point_onwards())\n #\n yield 0" ]
[ "0.6572719", "0.60047936", "0.58963394", "0.5791217", "0.5663534", "0.5584099", "0.5567305", "0.550817", "0.54517233", "0.53226274", "0.53167677", "0.5304004", "0.51742756", "0.51660204", "0.5156423", "0.5151522", "0.5149159", "0.51191115", "0.5110184", "0.50023305", "0.49014044", "0.48974186", "0.48856696", "0.48717213", "0.48576158", "0.48263758", "0.48238376", "0.4794923", "0.47802514", "0.4777317", "0.47375414", "0.47342643", "0.47330508", "0.47235933", "0.47214907", "0.47038686", "0.4700535", "0.46783844", "0.4670095", "0.46555972", "0.46498594", "0.46416104", "0.46301273", "0.4628875", "0.45919585", "0.45910013", "0.45768493", "0.45716596", "0.45619082", "0.45606348", "0.4558643", "0.4555857", "0.45529786", "0.45463306", "0.4532221", "0.4524917", "0.45221063", "0.45176506", "0.450265", "0.4497106", "0.4493758", "0.44924662", "0.4491148", "0.44896567", "0.44734633", "0.44718158", "0.44714567", "0.44689193", "0.44680658", "0.4462624", "0.44610646", "0.44477996", "0.44459993", "0.44408727", "0.44385377", "0.44377246", "0.44306394", "0.44300985", "0.44280094", "0.44221318", "0.4420451", "0.44194347", "0.4415675", "0.44110662", "0.44064987", "0.4403355", "0.44029188", "0.43976593", "0.4394227", "0.43818322", "0.43778467", "0.43771356", "0.43744156", "0.4367568", "0.4365952", "0.43535283", "0.43529508", "0.43506232", "0.43500894", "0.4344913" ]
0.74441326
0
Checks if an incoming belief is in conflict with internal beliefs. A conflict occurs when the belief is of opposite valence to a current belief. This method does not update own or perceived beliefs.
def belief_conflict(self, args): goal, belief = args if isinstance(belief, Beliefs): if self.belief_module.is_conflicting_belief(belief): return [{}] return []
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def has_conflict(self):\n for diffstat in self.diffstat():\n if diffstat.has_conflict:\n return True\n return False", "def checkConflicts(self):\n\t\treturn", "def refine_conflict(self):\n self._raise_not_supported()", "def check_conflicts(self):\n\t\tshutit_global.shutit_global_object.yield_to_draw()\n\t\tcfg = self.cfg\n\t\t# Now consider conflicts\n\t\tself.log('PHASE: conflicts', level=logging.DEBUG)\n\t\terrs = []\n\t\tself.pause_point('\\nNow checking for conflicts between modules', print_input=False, level=3)\n\t\tfor module_id in self.module_ids():\n\t\t\tif not cfg[module_id]['shutit.core.module.build']:\n\t\t\t\tcontinue\n\t\t\tconflicter = self.shutit_map[module_id]\n\t\t\tfor conflictee in conflicter.conflicts_with:\n\t\t\t\t# If the module id isn't there, there's no problem.\n\t\t\t\tconflictee_obj = self.shutit_map.get(conflictee)\n\t\t\t\tif conflictee_obj is None:\n\t\t\t\t\tcontinue\n\t\t\t\tif ((cfg[conflicter.module_id]['shutit.core.module.build'] or\n\t\t\t\t self.is_to_be_built_or_is_installed(conflicter)) and\n\t\t\t\t (cfg[conflictee_obj.module_id]['shutit.core.module.build'] or\n\t\t\t\t self.is_to_be_built_or_is_installed(conflictee_obj))):\n\t\t\t\t\terrs.append(('conflicter module id: ' + conflicter.module_id + ' is configured to be built or is already built but conflicts with module_id: ' + conflictee_obj.module_id,))\n\t\treturn errs", "def check_influence_sanity(self):\n for influence in crest.get_all_influences(self.model):\n assert influence._name is not None, f\"There is an Influence in {influence._parent._name} ({influence._parent.__class__.__name__}) whose name is 'None'\"\n assert influence._name != \"\", f\"There is an Update in {influence._parent._name} ({influence._parent.__class__.__name__}) whose name is empty string\"\n\n assert isinstance(influence.source, crest.Port), f\"Influence {influence._name}'s source is not a crest.Port\"\n assert influence.source in api.get_sources(influence._parent), f\"Influence's source {influence.source._name} ({influence.source}) is not in the sources of entity {influence._parent._name} ({influence._parent})\"\n\n assert isinstance(influence.target, crest.Port), f\"Influence {influence._name}'s target is not a crest.Port\"\n assert influence.target in api.get_targets(influence._parent), f\"Influence's target {influence.target._name} ({influence.target}) is not in the targets of entity {influence._parent._name} ({influence._parent})\"\n\n assert isinstance(influence.function, (crestml.LearnedFunction, types.FunctionType)), f\"Influence {influence._name}'s function needs to be of type types.FunctionType or crestdsl.ml.LearnedFunction\"\n assert len(inspect.signature(influence.function).parameters) == 1, f\"An influence should not have arguments (except the input value)\"", "def checkConflicts(self):\n\t\tapDisplay.printError(\"you did not create a 'checkConflicts' function in your script\")\n\t\traise NotImplementedError()", "def violated(self) -> bool:\n ...", "def checkSpikeBonding (self):\r\n stable = True # If any bonds break this will be set to false\r\n stabilityChecker = True # Checks the result of each function call, if set to false then stable will be set to false\r\n # Go through each atom\r\n for i in range(len(self.mol)):\r\n # Go through each spike\r\n for j in range(len(self.mol[i].spikeArray)):\r\n if self.mol[i].spikeArray[j].bonded == True:\r\n stabilityChecker = self.stabilitySpike(self.mol[i].spikeArray[j])\r\n if stabilityChecker == False:\r\n stable = False\r\n #print (stable)\r\n if stable == True:\r\n print(\"No Bonds have broken \\n\")\r\n else:\r\n print (\"Bonds have broken \\n\")\r\n return stable", "def _resolve_ball_collisions(self) -> bool:\n\n bln_naughty = True\n lng_naughty_loop_count = 0\n lng_naughty_loop_limit = 10\n while bln_naughty:\n lng_naughty_loop_count += 1\n if lng_naughty_loop_count > lng_naughty_loop_limit:\n return False\n bln_naughty = False\n\n \"\"\" Ball vs Ball \"\"\"\n for sprBall1, sprBall2 in TrashyPhysics.collision_pairs_self(\n self.grpBalls, fncCollided=TrashyPhysics.balls_collided):\n bln_naughty = True\n TrashyPhysics.bounce_balls(sprBall1, sprBall2)\n\n \"\"\" Ball vs Bot \"\"\"\n for sprBall, sprRobot in TrashyPhysics.collision_pairs(\n self.grpBalls, self.grpRobots,\n fncCollided=TrashyPhysics.ball_robot_collided):\n bln_naughty = True\n TrashyPhysics.bounce_ball_off_bot(sprRobot, sprBall)\n\n \"\"\" Ball vs Wall \"\"\"\n for ball in filter(lambda x: TrashyPhysics.collided_wall(x), self.lstBalls):\n bln_naughty = True\n TrashyPhysics.bounce_ball_off_wall(ball)\n\n \"\"\" Ball vs Bumper \"\"\"\n # todo\n\n return True", "def has_bond_crossing(self):\n return self.count_bond_collisions() > 0", "def decide_infect(self, other):\n if (self._is_infected and not other._is_infected):\n if random.random() < self._transmission_prob and random.random() < other._infection_prob:\n other._is_infected = True\n\n if other._is_infected and not self._is_infected:\n if random.random() < other._transmission_prob and random.random() < self._infection_prob:\n self._is_infected = True", "def refine(self): # pylint: disable=R0201\n return True", "def isInternal(self):\n if self.data.depend_er_job == self.data.depend_on_job:\n return True\n return False", "def fusable(self) -> bool:\n if not self._pre_check() or not self.has_crossing_len2_ob():\n return False\n new_tiling = self._tiling.add_obstructions(self.obstructions_to_add())\n\n return (\n self._tiling == new_tiling\n and self._check_isolation_level()\n and all(\n self._can_component_fuse_assumption(assumption)\n for assumption in self._tiling.assumptions\n )\n )", "def updateInconsistency(self, x : pd.Series):\n problemname = x.get(Key.ProblemName)\n pb = x.get(Key.PrimalBound)\n db = x.get(Key.DualBound)\n\n obs = self.getObjSense(problemname, x)\n\n if pd.isnull(obs):\n obs = ObjectiveSenseCode.MINIMIZE\n\n if not problemname:\n return\n\n\n #\n # for inconsistency checks, we only consider problems that are consistent\n # with the reference information.\n #\n if self.isReferenceConsistent(x) != ProblemStatusCodes.Ok:\n return\n\n # do not trust versions/settings/solvers that returned an infeasible solution\n if self.isSolInfeasible(x) or (not pd.isnull(pb) and not self.isSolFeasible(x)):\n return\n\n pb = self.getPbValue(pb, obs)\n db = self.getDbValue(db, obs)\n bestpb = self.bestpb.get(problemname, np.inf if obs == ObjectiveSenseCode.MINIMIZE else -np.inf)\n bestpb = min(bestpb, pb) if obs == ObjectiveSenseCode.MINIMIZE else max(bestpb, pb)\n\n bestdb = self.bestdb.get(problemname, -np.inf if obs == ObjectiveSenseCode.MINIMIZE else np.inf)\n if x.get(Key.SolverStatus) == SolverStatusCodes.Infeasible:\n db = infty() if obs == ObjectiveSenseCode.MINIMIZE else -infty()\n\n bestdb = max(bestdb, db) if obs == ObjectiveSenseCode.MINIMIZE else min(bestdb, db)\n\n if (obs == ObjectiveSenseCode.MINIMIZE and not self.isLE(bestdb, bestpb)) or (obs == ObjectiveSenseCode.MAXIMIZE and not self.isGE(bestdb, bestpb)):\n self.inconsistentset.add(problemname)\n else:\n self.bestdb[problemname] = bestdb\n self.bestpb[problemname] = bestpb", "def refine_conflict(self):\n # Start refine conflict\n self._check_status(STATUS_IDLE)\n self._set_status(STATUS_REFINING_CONFLICT)\n self._notify_listeners_start_operation(listener.OPERATION_REFINE_CONFLICT)\n\n # Ensure cpo model is generated with all constraints named\n namecstrs = self.context.model.name_all_constraints\n if not namecstrs:\n self.context.model.name_all_constraints = True\n self.cpostr = None\n self.agent.solver.model_sent = False\n\n # Refine conflict\n msol = self.agent.refine_conflict()\n\n # Restore previous name constraints indicator\n self.context.model.name_all_constraints = namecstrs\n\n # Call listeners with conflict result\n for lstnr in self.listeners:\n lstnr.new_result(self, msol)\n\n # End refine conflict\n self._set_status(STATUS_IDLE)\n self._notify_listeners_end_operation()\n\n return msol", "def inferrable(self) -> bool:\n return self._strategy.inferrable", "def partial_change(self):\n return self.attempted_change() and not all(self._get_field_data())", "def b3_correctness(el_a, el_b, system_el2kbid, gold_el2kbid):\n correct = False\n\n if(inSameSet(el_a, el_b, system_el2kbid) and \n inSameSet(el_a, el_b, gold_el2kbid) and\n sameLinking(el_a, el_b, system_el2kbid, gold_el2kbid) #THIS CONDITION DEPARTS FROM THE ORIGINAL BCUBED (extesion for the Entity Linking problem)\n ):\n correct = True\n\n return correct", "def attempted_change(self):\n return any(self._get_field_data())", "def checkForSideChangeRequest(self):\n inThirdRound = self.wonRounds[\"Team1\"] == 1 and self.wonRounds[\"Team2\"] == 1\n oneTeamAt11AndOtherTeamUnder11 = (self.counter[\"Team1\"] == 11 and self.counter[\"Team2\"] < 11) or\\\n (self.counter[\"Team2\"] == 11 and self.counter[\"Team1\"] < 11)\n if inThirdRound and oneTeamAt11AndOtherTeamUnder11:\n self.__notifySideChangeRequest()", "def fix_has_no_advisory(self):\n fixed_in = self.fixed_artifact()\n return fixed_in and fixed_in.vendor_no_advisory", "def fail(self):\n rows, cols, _ = self.bird.img.shape\n # find the top-left coordinates of bird image\n x_b, y_b = self.bird.x + self.env.pad - cols//2, max(self.bird.y + self.env.pad - rows//2, 0)\n \n # check if the bird square intersects with some environment obstacles\n isCollision = (self.env.occ[y_b:y_b + rows, x_b:x_b + cols]).any()\n \n return isCollision", "def check_stability(self):\n\n blocking_pairs = []\n for resident in self.residents:\n for hospital in self.hospitals:\n if (\n _check_mutual_preference(resident, hospital)\n and _check_resident_unhappy(resident, hospital)\n and _check_hospital_unhappy(resident, hospital)\n ):\n blocking_pairs.append((resident, hospital))\n\n self.blocking_pairs = blocking_pairs\n return not any(blocking_pairs)", "def check_bollinger(self):\n upper, lower = self.bollinger_bands()\n if self.daily['Adj Close'][-1] > upper[-1]:\n self.debug += '\\nAbove upper bollinger: sells + 1'\n self.sells += 1\n elif self.daily['Adj Close'][-1] < lower[-1]:\n self.debug += '\\nBelow lower bollinger: buys + 1'\n self.buys += 1", "def need_attention(self):\n msg = [\"not staged\", \"behind\", \"ahead\", \"Untracked\"]\n status_msg = self.status()\n if any([each in status_msg for each in msg]):\n return True\n return False", "def high_business_impact(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"high_business_impact\")", "def high_business_impact(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"high_business_impact\")", "def _check_collisions(self):\n\t\tif pygame.sprite.spritecollide(\n\t\t\tself.bolan, \n\t\t\tself.obstacles.obstacles,\n\t\t\tFalse, \n\t\t\tpygame.sprite.collide_mask):\n\t\t\t\tself.is_play = False\n\t\t\t\tself.is_gameover = True\n\t\t\t\tself.bolan.image = self.settings.bolan_dead_image", "def collide_with_flower(self, flower):\n pass", "def bullish_engulfing(self):\n self.data['bullish_engulfing'] = ((self.data['Open'].shift(1) > self.data['Close'].shift(1)) & \\\n (self.data['Close'] > self.data['Open']) & \\\n (self.data['Close'] >= self.data['Open'].shift(1)) & \\\n (self.data['Close'].shift(1) >= self.data['Open']) & \\\n ((self.data['Close']-self.data['Open']) > (self.data['Open'].shift(1)-self.data['Close'].shift(1))))", "def validate_collision(self):\n pass", "def is_equivalence(self) -> bool:", "def is_almost_active(self,\n env\n ):\n flag = any([con.is_almost_active(env) for con in self.constraints])\n return flag", "def test_bayes_update_nondiscriminating(self):\r\n # deletion of non-discriminating evidence should not affect result\r\n for obs, exp in zip(bayes_updates(self.deleted), self.result):\r\n self.assertFloatEqualAbs(obs, exp, 1e-11)\r\n # additional non-discriminating evidence should not affect result\r\n for obs, exp in zip(bayes_updates(self.extra), self.result):\r\n self.assertFloatEqualAbs(obs, exp, 1e-11)", "def is_penalty_event(self):\n if hasattr(self, \"fouls_to_give\"):\n team_ids = list(self.current_players.keys())\n offense_team_id = self.get_offense_team_id()\n defense_team_id = (\n team_ids[0] if offense_team_id == team_ids[1] else team_ids[1]\n )\n if self.fouls_to_give[defense_team_id] == 0:\n if isinstance(self, (Foul, FreeThrow, Rebound)):\n # if foul or free throw or rebound on a missed ft\n # check foul event and should return false is foul\n # was shooting foul and team had a foul to give\n if isinstance(self, Foul):\n foul_event = self\n elif isinstance(self, FreeThrow):\n foul_event = self.foul_that_led_to_ft\n else:\n # if rebound is on missed ft, also need to look at foul that led to FT\n if not self.oreb and isinstance(self.missed_shot, FreeThrow):\n foul_event = self.missed_shot.foul_that_led_to_ft\n else:\n return True\n if foul_event is None:\n return True\n fouls_to_give_prior_to_foul = (\n foul_event.previous_event.fouls_to_give[defense_team_id]\n )\n if fouls_to_give_prior_to_foul > 0:\n return False\n return True\n return False", "def check_loss(self):\n return POKEMON in self.get_game()", "def is_weak(self):\n\n # set the minimum number of keypooints\n keypoint_threshold = 20 if self.detector_method == 'FAST'\\\n else 5\n \n # check if the tracker has less than minimum keypoints to track\n c1 = self.old_points.shape[0] < keypoint_threshold\n \n x,y,w,h = self.bounding_box\n row, col = self.fg_mask.shape\n \n # check if the window is out of the frame\n c2 = x >= col-1 or x < 0\n c3 = y >= row-1 or y < 0\n c4 = x+w >= col-1\n c5 = y+h >= row-1\n \n return c1+c2+c3+c4+c5", "def __gitBisectBad(self):\n pfile = self.project.getProjectFile()\n lastModified = QFileInfo(pfile).lastModified().toString()\n shouldReopen = (\n self.vcs.gitBisect(self.project.getProjectPath(), \"bad\") or\n QFileInfo(pfile).lastModified().toString() != lastModified\n )\n if shouldReopen:\n res = E5MessageBox.yesNo(\n self.parent(),\n self.tr(\"Bisect\"),\n self.tr(\"\"\"The project should be reread. Do this now?\"\"\"),\n yesDefault=True)\n if res:\n self.project.reopenProject()", "def is_caught_up_well_enough_for_government_work():\n return config.CAUGHT_UP or (config.BLOCKCHAIN_SERVICE_LAST_BLOCK and config.CURRENT_BLOCK_INDEX >= config.BLOCKCHAIN_SERVICE_LAST_BLOCK - 1)", "def is_concealed(self) -> bool:\n # return not self._exposed\n return sum(self.concealed_part.values()) == 13", "def checkInternalConsistency(self):\n return _libsbml.SBMLDocument_checkInternalConsistency(self)", "def ok_to_confirm_ball_via_playfield_switch(self):\n if not self.balls:\n return True\n else:\n return False\n\n # todo look for other incoming balls?", "def isReferenceConsistent(self, x : pd.Series) -> str :\n\n problemname = x.get(Key.ProblemName)\n pb = x.get(Key.PrimalBound)\n db = x.get(Key.DualBound)\n obs = self.getObjSense(problemname, x)\n sstatus = x.get(Key.SolverStatus)\n\n reference = self.referencedict.get(problemname, (None, None))\n\n logger.debug(\"Checking against reference {} for problem {}\".format(reference, problemname))\n\n referencepb = self.getPbValue(reference[self.__primalidx__], obs)\n referencedb = self.getDbValue(reference[self.__dualidx__], obs)\n\n if self.isUnkn(reference):\n return ProblemStatusCodes.Ok\n\n elif self.isInf(reference):\n if sstatus != SolverStatusCodes.Infeasible and not pd.isnull(pb) and not isInf(pb):\n return ProblemStatusCodes.FailSolOnInfeasibleInstance\n\n elif self.isFeas(reference):\n if sstatus == SolverStatusCodes.Infeasible:\n return ProblemStatusCodes.FailDualBound\n\n else:\n\n pb = self.getPbValue(pb, obs)\n db = self.getDbValue(db, obs)\n if not self.isPbReferenceConsistent(pb, referencedb, obs):\n return ProblemStatusCodes.FailObjectiveValue\n if sstatus == SolverStatusCodes.Infeasible and abs(referencepb) < infty():\n return ProblemStatusCodes.FailDualBound\n if not self.isDbReferenceConsistent(db, referencepb, obs):\n return ProblemStatusCodes.FailDualBound\n\n return ProblemStatusCodes.Ok", "def discrepancy_resolved(self):\n # If there's a discrepancy and distance change matches the existing data, we're good.\n if self.distance_change == self.existing_data:\n return True\n # If recommend_updates, i.e., if self.distance_change == self.new_data, we'll update the data and we're good\n elif self.recommend_updates:\n return True\n else:\n return False", "def _check_family(self):\n for (s, (b, c)), (cond, ref) in families.items():\n if s != self.SYMBOL or len(b) != self._.d:\n continue\n vars = tuple(set(sum(map(variables, b + c), ())))\n sols = _solve([SR(l) == r for l, r\n in zip(self._.b[:-1] + self._.c[1:], b + c)],\n vars)\n if any(checkConditions(cond, sol) for sol in sols\n if is_integral(sol)):\n raise InfeasibleError(refs=ref)", "def is_bijective(self):\n return self.is_injective() and self.is_surjective()", "def _check_overlap(self, fe_commit):\n # +++ Avoid O(b branches * r rev) checks when\n # overlap is impossible because current branch\n # overlaps no other branch.\n if self._current_branch not in self._overlapping_branch_list():\n return\n\n for fe_file in fe_commit['files']:\n gwt_path = fe_file['path']\n depot_path = self.ctx.gwt_to_depot_path(gwt_path)\n\n for branch in self._overlapping_branch_list():\n if branch == self._current_branch:\n continue\n if not branch.intersects_depot_path(depot_path):\n continue\n\n LOG.debug(\"_check_overlap() branch {br1} <> {br2}\"\n \" gwt={gwt:<40} {dp}\\n{view}\"\n .format(\n br1 = p4gf_util.abbrev(self._current_branch.branch_id)\n , br2 = p4gf_util.abbrev(branch.branch_id)\n , gwt = gwt_path\n , dp = depot_path\n , view = \"\\n\".join(branch.view_p4map.as_array())\n ))\n\n if self._current_branch.is_new_fp_from_push or branch.is_new_fp_from_push:\n current_branch_name = self._current_branch.git_branch_name\n if self._current_branch.is_new_fp_from_push:\n current_branch_name += '(new)'\n other_branch_name = branch.git_branch_name\n if branch.is_new_fp_from_push:\n other_branch_name += '(new)'\n human_msg = (_(\n \"Perforce: Cannot commit {sha1} '{gwt_path}' to '{depot_path}'.\\n\"\n \" You are attempting to push and create a new fully populated branch\\n\"\n \" with paths which overlap another branch. Contact your admin\\n\"\n \" to configure non-conflicting destination branch paths.\\n\"\n \" Branches: '{b1}', '{b2}'\")\n .format( sha1 = p4gf_util.abbrev(fe_commit['sha1'])\n , gwt_path = gwt_path\n , depot_path = depot_path\n , b1 = current_branch_name\n , b2 = other_branch_name ))\n else:\n human_msg = (_(\n \"Cannot commit {sha1} '{gwt_path}' to '{depot_path}'.\"\n \" Paths that overlap multiple Git Fusion branches are read-only.\"\n \" Branches: '{b1}', '{b2}'\")\n .format( sha1 = p4gf_util.abbrev(fe_commit['sha1'])\n , gwt_path = gwt_path\n , depot_path = depot_path\n , b1 = self._current_branch.branch_id\n , b2 = branch.branch_id ))\n raise PreflightException(human_msg)", "def isInconsistent(self, problemname : str) -> bool:\n return problemname in self.inconsistentset", "def bolt_check(self):\n for x in self.get_bolts():\n if x.get_velocity() > 0:\n self.set_plyrbolts(1)", "def should_grow_on_food_collision(self):\n return True", "def has_compatible_ligands(self, identity):\n return ((len(self.bad_coords[identity]) == 0) and\n (not self.BAD_COORD_RESIDUE in self.inaccuracies[identity]))", "def convergence_check(self):\n air = self.air_alias.val\n flue_gas = self.fuel_alias.val + '_fg'\n fuel = self.fuel_alias.val\n\n for c in self.outl:\n if not c.fluid.val_set[air]:\n if c.fluid.val[air] > 0.95:\n c.fluid.val[air] = 0.95\n if c.fluid.val[air] < 0.5:\n c.fluid.val[air] = 0.5\n\n if not c.fluid.val_set[flue_gas]:\n if c.fluid.val[flue_gas] > 0.5:\n c.fluid.val[flue_gas] = 0.5\n if c.fluid.val[flue_gas] < 0.05:\n c.fluid.val[flue_gas] = 0.05\n\n if not c.fluid.val_set[fuel]:\n if c.fluid.val[fuel] > 0:\n c.fluid.val[fuel] = 0\n\n c.target.propagate_fluid_to_target(c, c.target)\n\n for i in self.inl:\n if i.m.val_SI < 0 and not i.m.val_set:\n i.m.val_SI = 0.01\n\n for c in self.outl:\n if c.m.val_SI < 0 and not c.m.val_set:\n c.m.val_SI = 10\n c.target.propagate_fluid_to_target(c, c.target)\n\n if self.lamb.val < 1 and not self.lamb.is_set:\n self.lamb.val = 2", "def is_bipartite(self):\n # TO DO: Call coloring algorithm\n return False", "def is_independent(self, A):\n if self.variant.is_bipartite():\n raise ValueError()\n return not any(self.is_edge(v, w) for (v, w) in combinations(A, 2))", "def HELPER_update_belief(self, old_belief, observation, gamma):\n observation = int(observation)\n #print \"old_belief:\", old_belief, type(old_belief)\n #print \"observation:\", observation, type(observation)\n #print \"gamma:\", gamma, type(gamma)\n\n diffs = [0.1*i for i in range(self.num_difficulty_bins)]\n new_belief = util.updateBelief(old_belief, None, observation, diffs, gamma)\n #print \"new_belief\", new_belief, type(new_belief)\n return new_belief", "def verify_ballot_consistency(self) -> bool:\n sbb_contents = self._sbb.get_sbb_contents()\n \n # First, validate the commitment consistency with the initial vote lists and final vote lists.\n for list_idx, proof in sbb_contents.consistency_proof.items():\n for vote_idx in range(len(proof)):\n proved_sv = proof[vote_idx]\n tu_list = []\n tv_list = []\n for row_idx, sv in enumerate(proved_sv):\n # Ensure that we are consistent with the initial and the final commitments\n if sv.get('u', None) is not None:\n val_init = sv['u_init']\n val_fin = sv['u_fin']\n val_uv = sv['u']\n val_t = sbb_contents.t_values[list_idx][row_idx][vote_idx]['tu']\n original_commitment = sbb_contents.svr_commitments[row_idx][vote_idx]['com_u']\n final_commitment = sbb_contents.vote_lists[list_idx][vote_idx][row_idx].com_u\n else:\n val_init = sv['v_init']\n val_fin = sv['v_fin']\n val_uv = sv['v']\n val_t = sbb_contents.t_values[list_idx][row_idx][vote_idx]['tv']\n original_commitment = sbb_contents.svr_commitments[row_idx][vote_idx]['com_v']\n final_commitment = sbb_contents.vote_lists[list_idx][vote_idx][row_idx].com_v\n key_init = sv['k_init']\n key_fin = sv['k_fin']\n \n # Verify the input and output commitments\n com_init = util.get_COM(util.bigint_to_bytes(key_init), util.bigint_to_bytes(val_init))\n com_fin = util.get_COM(util.bigint_to_bytes(key_fin), util.bigint_to_bytes(val_fin))\n if com_init != original_commitment:\n raise Exception(\"Failed to open the initial vote commitment\")\n if com_fin != final_commitment:\n raise Exception(\"Failed to open the final vote commitment\")\n \n # Verify the t-values\n if util.t_val(util.bigint_to_bytes(val_init), util.bigint_to_bytes(val_uv), self._M) != val_t:\n raise Exception(\"Failed to verify t value\")\n \n # Add t-values to their respective lists for lagrange checks\n tu_list.append(sbb_contents.t_values[list_idx][row_idx][vote_idx]['tu'])\n tv_list.append(sbb_contents.t_values[list_idx][row_idx][vote_idx]['tv'])\n \n # Check that tu_list and tv_list lagrange to (t, -t)\n rows = len(proved_sv)\n tu0 = self._lagrange(tu_list, rows, rows-1, self._M)\n tv0 = self._lagrange(tv_list, rows, rows-1, self._M)\n if util.val(tu0, tv0, self._M) != 0:\n # TODO: This does not work\n #raise Exception(\"Failed lagrange verification of t values\")\n pass\n return True", "def has_unapplied_change(self):\n for name in self.params_to_display.keys():\n if self._tkvar_changed(name):\n return True\n return False", "def collision_check(self):\n return True", "def checkChanges(self):\n results = [\n self.values[1],\n self.values[f\"-{self.values[1]}-\"],\n self.values[\"-TOGGLE-ALL-\"],\n self.values[\"-INVITED-\"],\n self.values[\"-ASSIGNED-\"],\n self.values[\"-GRADED-\"],\n self.values[\"-BLOCKED-\"] ]\n\n if results == self.oldResults[1::]:\n self.oldResults = [False] + results\n\n elif (self.values[f\"-{self.values[1]}-\"] == [] and \\\n self.values[\"-TOGGLE-ALL-\"] == False and \\\n results[0] != self.oldResults[1]):\n self.window['-OUTPUT-'].update('')\n self.oldResults = [False] + results\n\n else:\n self.oldResults = [True] + results", "def belief_revision(self):\n\n # Store the coherence of the belief_network before the belief revision has taken place\n network_history = self.belief_network.copy()\n self.coherence_history = self.coherence(network_history)\n\n # Add the newly communicated nodes to the belief_network\n if self.communicated_nodes is not None:\n for node in self.communicated_nodes:\n self.belief_network.nodes[node[0]]['truth_value'] = node[1]\n self.belief_network.nodes[node[0]]['type'] = 'com'\n\n # Get the inferred nodes and its combinations of truth values in order to explore different coherence values\n inferred_nodes = [x for x, y in self.belief_network.nodes(data=True) if y['type'] == 'inf']\n combinations = list(itertools.product([True, False], repeat=len(inferred_nodes)))\n\n # Calculate the coherence for all possible combinations\n\n # Initialise a list to store the different coherence values in\n coherence_values = []\n\n for n in range(len(combinations)):\n # Initialise a count for the number of inferred nodes\n i = 0\n for inferred_node in inferred_nodes:\n self.belief_network.nodes[inferred_node]['truth_value'] = combinations[n][i]\n i += 1\n coherence_values.append(self.coherence(self.belief_network))\n\n # Store all the indices of the maximum coherence values in a list and pick one randomly\n max_coherence = max(coherence_values)\n max_indices = [i for i in range(len(coherence_values)) if coherence_values[i] == max_coherence]\n nodes_truth_values_index = random.choice(max_indices)\n\n # Set the truth values of the inferred nodes to (one of) the maximum coherence option(s)\n i = 0\n for inferred_node in inferred_nodes:\n self.belief_network.nodes[inferred_node]['truth_value'] = combinations[nodes_truth_values_index][i]\n i += 1\n\n # If at least one node is flipped, belief revision has taken place and the coherence should be compared\n # with the previous belief_network before belief revision (trouble_identification)\n # print(\"Network after belief revision:\\n\", self.belief_network.nodes(data=True))\n # print(\"Network before belief revision:\\n\", network_history.nodes(data=True))\n if not nx.is_isomorphic(self.belief_network, network_history, node_match=lambda x, y: x['truth_value'] ==\n y['truth_value']):\n # print(\"Trouble identification\")\n repair_initiation = self.trouble_identification()\n else:\n # print(\"No trouble identification\")\n repair_initiation = False\n\n return repair_initiation, self.belief_network", "def trouble_identification(self):\n\n # Initiate repair if the coherence is smaller than one time step back\n if self.coherence(self.belief_network) < self.coherence_history:\n repair_initiation = self.formulate_request()\n else:\n # print(\"Not formulating a request\")\n repair_initiation = False\n\n return repair_initiation", "def hasConflicts(self):\n partners = {}\n for first, second in self:\n #print >>sys.stderr, \"first:\", first, \"second:\", second\n if first is None:\n if second is None:\n continue #no pairing info\n else:\n first, second = second, first #swap order so None is 2nd\n if second is None: #check first isn't paired\n if partners.get(first, None) is not None:\n print >>sys.stderr, \"here1\"\n print >>sys.stderr, \"first:\", first, \"second:\", second\n return True\n else:\n partners[first] = None\n else: #first and second were both non-empty: check partners\n if first in partners:\n if partners[first] != second:\n print >>sys.stderr, \"here2\"\n print >>sys.stderr, \"first:\", first, \"second:\", second, \"partners[first]\", partners[first]\n print \"partners:\", partners\n return True\n if second in partners:\n if partners[second] != first:\n print >>sys.stderr, \"here3\"\n print >>sys.stderr, \"first:\", first, \"second:\", second, \"partners[second]:\", partners[second]\n return True\n #add current pair to the list of constraints\n partners[first] = second\n partners[second] = first\n #can only get here if there weren't conflicts\n return False", "def check_collisions(self):", "def hasFallthrough(self) -> bool:\n ...", "def fusable(self) -> bool:\n obs_fusable = self._can_fuse_set_of_gridded_perms(self.obstruction_fuse_counter)\n req_fusable = all(\n self._can_fuse_set_of_gridded_perms(counter)\n for counter in self.requirements_fuse_counters\n )\n ass_fusable = all(\n self._can_fuse_assumption(assumption, counter)\n for assumption, counter in zip(\n self._tiling.assumptions, self.assumptions_fuse_counters\n )\n )\n return (\n obs_fusable\n and req_fusable\n and ass_fusable\n and self._check_isolation_level()\n )", "def is_bipartite(self):\n return True", "def isBlocked(self):\n cal = self.request.get('form.widgets.calendarConfig')\n if cal is not None:\n return (cal == ['bloque'])\n wrapper = getSAWrapper('gites_wallons')\n session = wrapper.session\n for heb in getHebergementsForProprio(self.context, session):\n if heb.heb_calendrier_proprio == 'bloque':\n return True\n return False", "def _check_force_backward_true(self):\n prototxt_file = self.settings.caffevis_deploy_prototxt\n\n found = False\n with open(prototxt_file, 'r') as ff:\n for line in ff:\n fields = line.strip().split()\n if len(fields) == 2 and fields[0] == 'force_backward:' and fields[1] == 'true':\n found = True\n break\n\n if not found:\n print '\\n\\nWARNING: the specified prototxt'\n print '\"%s\"' % prototxt_file\n print 'does not contain the line \"force_backward: true\". This may result in backprop'\n print 'and deconv producing all zeros at the input layer. You may want to add this line'\n print 'to your prototxt file before continuing to force backprop to compute derivatives'\n print 'at the data layer as well.\\n\\n'", "def _alienCollide(self):\n for b in self._bolts:\n if self._ship != None and self._ship.collides(b):\n self._ship = None\n self._bolts = []\n self._key = False\n self._lives -= 1", "def checkBijectivity(self, valuePreviouslySet):\n secondGet = pfw.get(self._paramPath)\n\n if secondGet != valuePreviouslySet:\n return secondGet, False\n\n return secondGet, True", "def test_wrong_bridge_config(self):\n stored_target_deploy_status = self.test_config.get(\n 'target_deploy_status', {})\n new_target_deploy_status = stored_target_deploy_status.copy()\n new_target_deploy_status[self.application_name] = {\n 'workload-status': 'blocked',\n 'workload-status-message': 'Wrong format',\n }\n if 'target_deploy_status' in self.test_config:\n self.test_config['target_deploy_status'].update(\n new_target_deploy_status)\n else:\n self.test_config['target_deploy_status'] = new_target_deploy_status\n\n with self.config_change(\n self.config_current(\n application_name=self.application_name,\n keys=['bridge-interface-mappings']),\n {'bridge-interface-mappings': 'incorrect'}):\n logging.info('Charm went into blocked state as expected, restore '\n 'configuration')\n self.test_config[\n 'target_deploy_status'] = stored_target_deploy_status", "def _aliensCollision(self):\r\n for bolt in self._bolts:\r\n for row in range(ALIEN_ROWS):\r\n for col in range(ALIENS_IN_ROW):\r\n if (not self.getAliens()[row][col] is None and\r\n self.getAliens()[row][col].collides(bolt)):\r\n self._score += 100*(ALIEN_ROWS-row)\r\n self._aliens[row][col] = None\r\n if not self._aliensound is None:\r\n self._alienexplode.play()\r\n self._bolts.remove(bolt)\r\n self._alienspeed = self._alienspeed*0.98\r\n return", "def ball_lost(self):\n self.num_balls_known = self.balls\n self.num_balls_missing = (self.machine.config['machine']\n ['balls installed'] - self.balls)\n self.num_balls_live = 0\n # since desired count doesn't change, this will relaunch them\n self.log.debug(\"Ball(s) Marked Lost. Known: %s, Missing: %s\",\n self.num_balls_known, self.num_balls_missing)\n\n # todo audit balls lost", "def __game_is_over(self):\n return not (self.__playing and self.__bricks_total > 0 and self.__num_lives > 0)", "def _ftolCheck(self):\n oldLoss = biggestRecentLoss(self.loss, self.lookback)\n newLoss = float(self.loss[-1])\n fracDiff = 2 * (oldLoss - newLoss)/(oldLoss + newLoss)\n \n if fracDiff < self.ftol:\n \n self.converged = True", "def inhabitant_check(self):\n\t\tchanged = False\n\t\tif self.happiness > self.__get_data(\"happiness_inhabitants_increase_requirement\") and \\\n\t\t\t self.inhabitants < self.inhabitants_max:\n\t\t\tself.inhabitants += 1\n\t\t\tchanged = True\n\t\t\tself.log.debug(\"%s: inhabitants increase to %s\", self, self.inhabitants)\n\t\telif self.happiness < self.__get_data(\"happiness_inhabitants_decrease_limit\") and \\\n\t\t self.inhabitants > 1:\n\t\t\tself.inhabitants -= 1\n\t\t\tchanged = True\n\t\t\tself.log.debug(\"%s: inhabitants decrease to %s\", self, self.inhabitants)\n\n\t\tif changed:\n\t\t\t# see http://wiki.unknown-horizons.org/index.php/DD/Economy/Supplying_citizens_with_resources\n\t\t\tself.alter_production_time( 1 + (float(self.inhabitants)/10))\n\t\t\tself._changed()", "def high_business_impact(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"high_business_impact\")", "def is_solved(self):\n raise NotImplementedError()", "def withholding_reconciliation(self):\n\n for inv_brw in self:\n move_ids = [move.id or False\n for move in (inv_brw.move_id, inv_brw.wh_move_id)]\n\n if not all(move_ids):\n continue\n\n line_ids = [line.id\n for move2 in (inv_brw.move_id, inv_brw.wh_move_id)\n for line in move2.line_id\n if line.account_id.id == inv_brw.account_id.id]\n\n if len(line_ids) < 2:\n continue\n\n # /!\\ NOTE: There could be some payments in the invoice let us\n # reconcile them too\n line_ids += [lin2.id for lin2 in inv_brw.payment_ids]\n line_ids = list(set(line_ids))\n\n line_ids = self.env['account.move.line'].browse(line_ids)\n line_ids.reconcile_partial()\n\n return True", "def is_irrecoverable(\r\n self,\r\n x=None,\r\n contact_forces=None,\r\n omega_cmd=None,\r\n ignore_force_check=False):\r\n if x is None:\r\n x = self.x\r\n\r\n psi = x[PSI_IDX]\r\n\r\n # upper ball falling off the lower ball\r\n if np.abs(psi) > np.pi / 2:\r\n return True\r\n\r\n # upper ball touching the ground\r\n if self.p.r2 > self.p.r1:\r\n psi_crit = np.arccos((self.p.r2 - self.p.r1) /\r\n (self.p.r2 + self.p.r1))\r\n if np.abs(psi) > psi_crit:\r\n return True\r\n\r\n # lift off: contact force between lower and upper ball <= 0\r\n if not ignore_force_check:\r\n if contact_forces is None:\r\n contact_forces = self.compute_contact_forces(x, omega_cmd)\r\n\r\n if np.dot(contact_forces[1], self._compute_e_S1S2(x)) <= 0:\r\n return True\r\n\r\n return False", "def TryBalanceWithWater(self): \n extra_waters = self._ExtraWaters()\n if extra_waters is None:\n # cannot balance the reaction with H2O only\n return False\n if extra_waters != 0:\n self._AddCompound('C00001', extra_waters)\n self._Dedup()\n return True", "def _user_assigned_bell(self, bell: Bell) -> bool:\n return not self._bot_assigned_bell(bell)", "def safety_check(self):\n rs = moveit_msgs.msg.RobotState()\n current_joint_angles = self._limb.joint_angles()\n for joint in current_joint_angles:\n rs.joint_state.name.append(joint)\n rs.joint_state.position.append(current_joint_angles[joint])\n result = self._sv.get_state_validity(rs, self._moveit_group)\n return result.valid", "def _check_alternative1_stop_conditions(self, changed):\n searching = self._iteration < self._max_iterations\n if not searching:\n self._notify(message=LocalSearchMessage.Stopped)\n elif self._target_fitness:\n if self._solution.fitness >= self._target_fitness:\n self._notify(message=LocalSearchMessage.StoppedTargetAchieved)\n return False\n return searching", "def resolve_conflict(self):\n neighbours = self.nodes\n new_chain = None\n #We're only looking for chains Longer than ours\n max_length = len(self.chain)\n #Grab and verify the chains from all the other nodes in our netwrok\n for node in neighbours:\n response = requests.get(f'http://{node}/chain')\n if response.status_code == 200:\n length = response.json()['length']\n chain = response.json()['chain']\n #check if the lentgh is longer and the cain is valid\n if length > max_length and self.valid_chain(chain):\n max_length = length\n new_chain = chain\n\n #replace our chain if we're discovered a new valid chain, Longer than ours\n if new_chain:\n self.chain = new_chain\n return True\n\n return False", "def onForbidChanged_(self, forbid, isInc):\n\t\tState.onForbidChanged_(self, forbid, isInc)\n\t\tAI.onForbidChanged_(self, forbid, isInc)", "def need_wells(rxn_class):\n _need_wells = (\n # ReactionClass.Typ.ADDITION,\n ReactionClass.Typ.HYDROGEN_ABSTRACTION,\n ReactionClass.Typ.SUBSTITUTION\n )\n return typ(rxn_class) in _need_wells", "def isFallthrough(self) -> bool:\n ...", "def _get_boost_on_errors(self):\n self._validate_boost_on_errors()\n if self.boost_on_errors == \"auto\":\n val = self._get_validation_strategy()\n if val.get(\"validation_type\", \"\") == \"custom\":\n return False\n if self._get_mode() == \"Explain\":\n return False\n if self._get_mode() == \"Perform\":\n return False\n if self._get_mode() == \"Compete\":\n return True\n if self._get_mode() == \"Optuna\":\n return False\n else:\n return deepcopy(self.boost_on_errors)", "def is_weak(self):\n return self.binding == 'STB_WEAK'", "def check_unstaged_changes(self):\n pass", "def can_satisfy_without_refs(self):\n\n if self.constraint_type in (self.ONE, self.AT_LEAST_ONE):\n result = any(not _is_ref_prop(name) for name in self.property_names)\n\n else:\n # ALL\n result = all(not _is_ref_prop(name) for name in self.property_names)\n\n return result", "def may_lose_data(self, windowing):\n may_finish = all(\n _IncludesMayFinish(t.may_lose_data(windowing)) for t in self.triggers)\n return (\n DataLossReason.MAY_FINISH\n if may_finish else DataLossReason.NO_POTENTIAL_LOSS)", "def count_bond_collisions(self):\n\n errors = 0\n\n for i in range(0, len(self.bonds)):\n for a in range(i + 1, len(self.bonds)):\n result = self._intersection(self.bonds[i], self.bonds[a])\n\n if result:\n errors += 1\n return errors", "def any_neighbor_burning(self):\n neighbors = self.world.get_four_neighbors(self, Patch.null)\n states = [patch.state for patch in neighbors]\n return \"orange\" in states", "def does_match_backwards(self, situation: Perception) -> bool:\n p = self.condition.get_backwards_anticipation(situation)\n if self.effect.does_match(situation, p):\n return True\n return False", "def update_belief_once(self, current_observation, last_observation, avg_vel, dt, current_belief):\n # type: (np.ndarray, np.ndarray, float, float, list) -> (list, list)\n\n\n\n new_belief = []\n likelihoods = []\n estimated_positions = []\n normalization_factor = 0.0\n\n # Compute the likelihoods\n for goal_idx in range(self._num_goals):\n obs_likelihood, calculated_position = self.compute_observation_likelihood(current_observation,\n last_observation,\n self._goals[goal_idx],\n avg_vel, dt)\n estimated_positions.append(calculated_position)\n obs_likelihood += 1\n likelihoods.append(obs_likelihood)\n normalization_factor += obs_likelihood * current_belief[goal_idx]\n\n\n\n\n #for i in range(self.importance_of_prior_in_belief_update):\n #normalization_factor = 0.0\n #tmp_belief = []\n # Compute new belief\n for goal_idx in range(self._num_goals):\n prob = (likelihoods[goal_idx] * current_belief[goal_idx])/normalization_factor\n\n new_belief.append(prob)\n\n #tmp_belief = np.array(tmp_belief) / normalization_factor\n\n\n #new_belief = tmp_belief\n return [new_belief, estimated_positions]", "def consistency_check(self):\n for opt, opt_def in self.opt_dict.iteritems():\n selected_imp = opt_def['selected_imp']\n if selected_imp == 'none':\n continue\n else:\n mapped_opt = self.imp2opt_dict[selected_imp]\n\n if mapped_opt != opt:\n emsg = 'ERROR: imp2opt_dict fails to map {i} to {o}'\n print emsg.format(i=selected_imp, o=opt)\n\n for imp, opt in self.imp2opt_dict.iteritems():\n if imp == 'none':\n print 'ERROR: Found \"none\" as a keyword in imp2opt_dict'\n continue\n elif opt == 'none':\n continue\n else:\n selected_imp = self.opt_dict[opt]['selected_imp']\n\n if imp != selected_imp:\n emsg = 'ERROR: imp2opt_dict fails to map {i} to {o}'\n print emsg.format(i=selected_imp, o=opt)", "def check_integrity(self):\r\n nodes = graph.ops(self.inputs, self.outputs)\r\n if self.apply_nodes != nodes:\r\n missing = nodes.difference(self.apply_nodes)\r\n excess = self.apply_nodes.difference(nodes)\r\n raise Exception(\r\n \"The nodes are inappropriately cached. missing, in excess: \",\r\n missing, excess)\r\n for node in nodes:\r\n if node.fgraph is not self:\r\n raise Exception(\"Node should belong to the FunctionGraph.\",\r\n node)\r\n for i, variable in enumerate(node.inputs):\r\n if variable.fgraph is not self:\r\n raise Exception(\r\n \"Input of node should belong to the FunctionGraph.\",\r\n variable, (node, i))\r\n if (node, i) not in variable.clients:\r\n raise Exception(\"Inconsistent clients list.\",\r\n (node, i), variable.clients)\r\n variables = set(graph.variables(self.inputs, self.outputs))\r\n if set(self.variables) != variables:\r\n missing = variables.difference(self.variables)\r\n excess = self.variables.difference(variables)\r\n raise Exception(\r\n \"The variables are inappropriately cached. missing, in excess: \",\r\n missing, excess)\r\n for variable in variables:\r\n if (variable.owner is None and\r\n variable not in self.inputs and\r\n not isinstance(variable, graph.Constant)):\r\n raise Exception(\"Undeclared input.\", variable)\r\n if variable.fgraph is not self:\r\n raise Exception(\"Variable should belong to the FunctionGraph.\",\r\n variable)\r\n for node, i in variable.clients:\r\n if node == 'output':\r\n if self.outputs[i] is not variable:\r\n raise Exception(\"Inconsistent clients list.\",\r\n variable, self.outputs[i])\r\n continue\r\n if node not in nodes:\r\n raise Exception(\"Client not in FunctionGraph.\",\r\n variable, (node, i))\r\n if node.inputs[i] is not variable:\r\n raise Exception(\"Inconsistent clients list.\",\r\n variable, node.inputs[i])" ]
[ "0.60755587", "0.5997811", "0.5935477", "0.5858629", "0.57589734", "0.56303644", "0.5552522", "0.55062824", "0.54827005", "0.54471344", "0.5424391", "0.54060894", "0.53555745", "0.53466696", "0.53354824", "0.5331523", "0.5313019", "0.530491", "0.5267892", "0.52595043", "0.5228276", "0.5223606", "0.5222479", "0.5220137", "0.5218231", "0.51882666", "0.51552117", "0.51552117", "0.514843", "0.51482075", "0.5143261", "0.5142177", "0.5142", "0.51308465", "0.51276577", "0.5113718", "0.51013875", "0.50927556", "0.5090616", "0.5076911", "0.50685656", "0.50613356", "0.5056955", "0.50533354", "0.5052815", "0.5035062", "0.5033832", "0.5033598", "0.5030144", "0.50278795", "0.5026759", "0.5026528", "0.50232136", "0.5021805", "0.50153196", "0.5012928", "0.5011723", "0.5009125", "0.49994987", "0.4991666", "0.49865714", "0.4981262", "0.4980286", "0.49725014", "0.4959702", "0.49540478", "0.49530452", "0.49513018", "0.49493933", "0.49471775", "0.49445367", "0.49405852", "0.4935763", "0.4935019", "0.49186623", "0.49119672", "0.49079388", "0.49029267", "0.49007776", "0.4900412", "0.48981595", "0.4897502", "0.48967338", "0.4893677", "0.4889954", "0.48899388", "0.48856646", "0.4885518", "0.4884088", "0.4875251", "0.48697037", "0.48695326", "0.4866501", "0.4864536", "0.48639098", "0.486355", "0.48634845", "0.48593327", "0.48581988", "0.4851248" ]
0.66045004
0
quad(f, a, b) > \int_a^b f(x) dx Uses some quadrature rule to evaluate the integral.
def quad(f, a, b): S, D = (b+a)/2.0, (b-a)/2.0 def rescaled_f(x): return f(x*D + S)*D return sum(w * rescaled_f(p) for w, p in zip(quad_weights, quad_points))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def quad(func, a, b, args=()):\n\tx_units = a.units\n\tf_units = func(.5*(a+b)).units\n\n\tI, abserr = sciquad(\n\t\tlambda x : func(x*x_units).to(f_units).magnitude,\n\t\ta.magnitude, b.to(x_units).magnitude,\n\t\targs)\n\n\treturn I*x_units*f_units, abserr*x_units*f_units", "def add_quad(a, b):\n s = np.sqrt(np.square(a) + np.square(b))\n return s", "def integrate_f_from0(b):\n integral, err = scipy.integrate.quad(f, 0, b)\n return integral", "def complex_quadrature(func, a, b, **kwargs):\n\n def real_func(x):\n return scipy.real(func(x))\n\n def imag_func(x):\n return scipy.imag(func(x))\n\n real_integral = quad(real_func, a, b, **kwargs)\n imag_integral = quad(imag_func, a, b, **kwargs)\n return (real_integral[0] + 1j * imag_integral[0], real_integral[1:], imag_integral[1:])", "def quad(self, b):\n return b.T @ self.solve(b)", "def integrate(f, inf_lim, sup_lim):\n function = get_function_from_text(f)\n return sp_integrate.quad(function, inf_lim, sup_lim)[0]", "def integrate_fun(fun: Callable, low_b: float, upp_b: float) -> float:\n return integrate.quad(fun, low_b, upp_b)[0]", "def complex_integral(self,func,a,b):\r\n \r\n import scipy\r\n from scipy import array\r\n \r\n def quad_routine(func, a, b, x_list, w_list):\r\n c_1 = (b-a)/2.0\r\n c_2 = (b+a)/2.0\r\n eval_points = map(lambda x: c_1*x+c_2, x_list)\r\n func_evals = list(map(func, eval_points)) # Python 3: make a list here\r\n return c_1 * sum(array(func_evals) * array(w_list))\r\n \r\n def quad_gauss_7(func, a, b):\r\n x_gauss = [-0.949107912342759, -0.741531185599394, -0.405845151377397, 0, 0.405845151377397, 0.741531185599394, 0.949107912342759]\r\n w_gauss = array([0.129484966168870, 0.279705391489277, 0.381830050505119, 0.417959183673469, 0.381830050505119, 0.279705391489277,0.129484966168870])\r\n return quad_routine(func,a,b,x_gauss, w_gauss)\r\n \r\n def quad_kronrod_15(func, a, b):\r\n x_kr = [-0.991455371120813,-0.949107912342759, -0.864864423359769, -0.741531185599394, -0.586087235467691,-0.405845151377397, -0.207784955007898, 0.0, 0.207784955007898,0.405845151377397, 0.586087235467691, 0.741531185599394, 0.864864423359769, 0.949107912342759, 0.991455371120813]\r\n w_kr = [0.022935322010529, 0.063092092629979, 0.104790010322250, 0.140653259715525, 0.169004726639267, 0.190350578064785, 0.204432940075298, 0.209482141084728, 0.204432940075298, 0.190350578064785, 0.169004726639267, 0.140653259715525, 0.104790010322250, 0.063092092629979, 0.022935322010529]\r\n return quad_routine(func,a,b,x_kr, w_kr)\r\n \r\n class Memorize: # Python 3: no need to inherit from object\r\n def __init__(self, func):\r\n self.func = func\r\n self.eval_points = {}\r\n def __call__(self, *args):\r\n if args not in self.eval_points:\r\n self.eval_points[args] = self.func(*args)\r\n return self.eval_points[args]\r\n \r\n def quad(func,a,b):\r\n ''' Output is the 15 point estimate; and the estimated error '''\r\n func = Memorize(func) # Memorize function to skip repeated function calls.\r\n g7 = quad_gauss_7(func,a,b)\r\n k15 = quad_kronrod_15(func,a,b)\r\n # I don't have much faith in this error estimate taken from wikipedia\r\n # without incorporating how it should scale with changing limits\r\n return [k15, (200*scipy.absolute(g7-k15))**1.5]\r\n \r\n return quad(func,a,b)", "def complex_integral(self,func,a,b):\r\n \r\n import scipy\r\n from scipy import array\r\n \r\n def quad_routine(func, a, b, x_list, w_list):\r\n c_1 = (b-a)/2.0\r\n c_2 = (b+a)/2.0\r\n eval_points = map(lambda x: c_1*x+c_2, x_list)\r\n func_evals = list(map(func, eval_points)) # Python 3: make a list here\r\n return c_1 * sum(array(func_evals) * array(w_list))\r\n \r\n def quad_gauss_7(func, a, b):\r\n x_gauss = [-0.949107912342759, -0.741531185599394, -0.405845151377397, 0, 0.405845151377397, 0.741531185599394, 0.949107912342759]\r\n w_gauss = array([0.129484966168870, 0.279705391489277, 0.381830050505119, 0.417959183673469, 0.381830050505119, 0.279705391489277,0.129484966168870])\r\n return quad_routine(func,a,b,x_gauss, w_gauss)\r\n \r\n def quad_kronrod_15(func, a, b):\r\n x_kr = [-0.991455371120813,-0.949107912342759, -0.864864423359769, -0.741531185599394, -0.586087235467691,-0.405845151377397, -0.207784955007898, 0.0, 0.207784955007898,0.405845151377397, 0.586087235467691, 0.741531185599394, 0.864864423359769, 0.949107912342759, 0.991455371120813]\r\n w_kr = [0.022935322010529, 0.063092092629979, 0.104790010322250, 0.140653259715525, 0.169004726639267, 0.190350578064785, 0.204432940075298, 0.209482141084728, 0.204432940075298, 0.190350578064785, 0.169004726639267, 0.140653259715525, 0.104790010322250, 0.063092092629979, 0.022935322010529]\r\n return quad_routine(func,a,b,x_kr, w_kr)\r\n \r\n class Memorize: # Python 3: no need to inherit from object\r\n def __init__(self, func):\r\n self.func = func\r\n self.eval_points = {}\r\n def __call__(self, *args):\r\n if args not in self.eval_points:\r\n self.eval_points[args] = self.func(*args)\r\n return self.eval_points[args]\r\n \r\n def quad(func,a,b):\r\n ''' Output is the 15 point estimate; and the estimated error '''\r\n func = Memorize(func) # Memorize function to skip repeated function calls.\r\n g7 = quad_gauss_7(func,a,b)\r\n k15 = quad_kronrod_15(func,a,b)\r\n # I don't have much faith in this error estimate taken from wikipedia\r\n # without incorporating how it should scale with changing limits\r\n return [k15, (200*scipy.absolute(g7-k15))**1.5]\r\n \r\n return quad(func,a,b)", "def evalQuad(a,b,c,x):\n return a * x**2 + b*x + c", "def quad(*args, **kwargs):\n return (42, 0.001)", "def sp_integrate_1D ( func , xmin , xmax , *args , **kwargs ) : \n from scipy import integrate\n ##\n result = integrate.quad ( func , xmin , xmax , *args , **kwargs )\n return result[0]", "def integralFunction(xa, ya, xb, yb):\n return psi(xb, yb) - psi(xa, ya)", "def piecewise_integrate(x, y, a, b):\n assert x[0] == a\n assert x[-1] <= b\n output = 0.\n num_x = len(x)\n if x[-1] == b:\n for idx in range(num_x - 1):\n output += y[idx] * (x[idx+1] - x[idx])\n else:\n for idx in range(num_x):\n if idx < num_x - 1:\n output += y[idx] * (x[idx+1] - x[idx])\n else:\n output += y[idx] * (b - x[idx])\n return output", "def test_quad1():\n xi = np.array([-1., 0., 2.])\n yi = np.array([ 1., -1., 7.])\n c = quad_interp(xi,yi)\n c_true = np.array([-1., 0., 2.])\n print(\"c = \", c)\n print(\"c_true = \", c_true)\n # test that all elements have small error:\n assert np.allclose(c, c_true), \\\n \"Incorrect result, c = %s, Expected: c = %s\" % (c,c_true)", "def f1(x, a, b):\n #return x**43 - b*x**42 + x**7 - x**6 * a + 84*x - 42 * b - 42 * a\n return (x**42 + 42)/(x-a) + (x**6 + 42)/(x-b)", "def quad_interp(x,y,xi) :\n f = interp1d(x,y,kind='quadratic')\n yi = f(xi)\n \n return yi", "def calc_quad(self,mw,A0,A1,A2): \n return (A0 + A1 * mw + A2 * mw**2)", "def integrate_gausskronrod(f, a, b, args=()):\n\n assert b > a\n mid = 0.5*(b+a)\n dx = 0.5*(b-a)\n zi = mid+gausskronrod_nodes*dx\n integrand = f(zi)\n integral_G7 = np.sum(integrand[:7]*gauss_weights)\n integral_K15 = np.sum(integrand*kronrod_weights)\n\n error = (200*abs(integral_G7-integral_K15))**1.5\n\n return integral_K15*dx, dx*error", "def sp_integrate_2D ( func ,\n xmin , xmax ,\n ymin , ymax , *args , **kwargs ) :\n from scipy import integrate\n ##\n result = integrate.dblquad ( func ,\n ymin ,\n ymax ,\n lambda x : xmin ,\n lambda x : xmax , \n *args , **kwargs )\n return result[0]", "def integrate(x, y, xmin, xmax):\n indexes = get_interval(x, xmin, xmax)\n integral = np.trapz(y[indexes], x[indexes])\n\n return integral", "def _quad_function(order, *args, **kws):\n params = parameters_spec.copy()\n params.update(kws)\n return quad_function(order, *args, **params)", "def integrate_using_univariate_gauss_legendre_quadrature_unbounded(\n integrand, lb, ub, nquad_samples, atol=1e-8, rtol=1e-8,\n interval_size=2, max_steps=1000, verbose=0, adaptive=True,\n soft_error=False, tabulated_quad_rules=None):\n if interval_size <= 0:\n raise ValueError(\"Interval size must be positive\")\n\n if np.isfinite(lb) and np.isfinite(ub):\n partial_lb, partial_ub = lb, ub\n elif np.isfinite(lb) and not np.isfinite(ub):\n partial_lb, partial_ub = lb, lb+interval_size\n elif not np.isfinite(lb) and np.isfinite(ub):\n partial_lb, partial_ub = ub-interval_size, ub\n else:\n partial_lb, partial_ub = -interval_size/2, interval_size/2\n\n result = __integrate_using_univariate_gauss_legendre_quadrature_bounded(\n integrand, partial_lb, partial_ub, nquad_samples, rtol,\n atol, verbose-1, adaptive, tabulated_quad_rules)\n\n step = 0\n partial_result = np.inf\n plb, pub = partial_lb-interval_size, partial_lb\n while (np.any(np.absolute(partial_result) >= rtol*np.absolute(result)+atol)\n and (plb >= lb) and step < max_steps):\n partial_result = \\\n __integrate_using_univariate_gauss_legendre_quadrature_bounded(\n integrand, plb, pub, nquad_samples, rtol, atol,\n verbose-1, adaptive, tabulated_quad_rules)\n result += partial_result\n pub = plb\n plb -= interval_size\n step += 1\n if verbose > 1:\n print('Left', step, result, partial_result, plb, pub,\n interval_size)\n if verbose > 0:\n if step >= max_steps:\n msg = \"Early termination when computing left integral\"\n msg += f\"max_steps {max_steps} reached\"\n if soft_error is True:\n warn(msg, UserWarning)\n else:\n raise RuntimeError(msg)\n if np.all(np.abs(partial_result) < rtol*np.absolute(result)+atol):\n msg = f'Tolerance {atol} {rtol} for left integral reached in '\n msg += f'{step} iterations'\n print(msg)\n\n step = 0\n partial_result = np.inf\n plb, pub = partial_ub, partial_ub+interval_size\n while (np.any(np.absolute(partial_result) >= rtol*np.absolute(result)+atol)\n and (pub <= ub) and step < max_steps):\n partial_result = \\\n __integrate_using_univariate_gauss_legendre_quadrature_bounded(\n integrand, plb, pub, nquad_samples, rtol, atol,\n verbose-1, adaptive, tabulated_quad_rules)\n result += partial_result\n plb = pub\n pub += interval_size\n step += 1\n if verbose > 1:\n print('Right', step, result, partial_result, plb, pub,\n interval_size)\n if verbose > 0:\n if step >= max_steps:\n msg = \"Early termination when computing right integral. \"\n msg += f\"max_steps {max_steps} reached\"\n if soft_error is True:\n warn(msg, UserWarning)\n else:\n raise RuntimeError(msg)\n if np.all(np.abs(partial_result) < rtol*np.absolute(result)+atol):\n msg = f'Tolerance {atol} {rtol} for right integral reached in '\n msg += f'{step} iterations'\n print(msg)\n # print(partial_result, plb, pub)\n\n return result", "def quadratures(f, a=-1, b=1, n=30):\n nodes, weights = gauss_legender_points(n)\n w = to.tensor(weights.reshape(1, 1, -1))\n nodes = to.tensor(nodes.reshape(1, 1, -1))\n\n scale = (b - a) / 2.\n\n x = scale * nodes + (b + a) / 2.\n y = w * f(x)\n y = to.sum(scale * y, dim=-1)\n return y.type(dtype=to.float)", "def isqrt( a, b ):\n return a*a - b", "def trapezoid_integral(f, xrange, intervals):\n \n a, b = min(xrange), max(xrange)\n delta_x = (b-a)/intervals\n x = np.arange(1, intervals)\n \n int_out = f(a)\n int_out += f(b)\n int_out += sum(2*f(a+x*delta_x))\n \n return delta_x/2*int_out", "def integrate(func, a, b, tol=1e-8):\n left_pts = []\n result = integ(func, a, b, tol, 0, left_pts)\n\n return result, left_pts", "def integrate(f, a, b, args=(), minintervals=1, limit=200, tol=1e-10):\n fv = np.vectorize(f)\n\n intervals = []\n\n limits = np.linspace(a, b, minintervals+1)\n for left, right in zip(limits[:-1], limits[1:]):\n I, err = integrate_gausskronrod(fv, left, right, args)\n bisect.insort(intervals, (err, left, right, I))\n\n while True:\n Itotal = sum([x[3] for x in intervals])\n err2 = sum([x[0]**2 for x in intervals])\n err = sqrt(err2)\n\n if abs(err/Itotal) < tol:\n return Itotal, err\n\n # no convergence\n if len(intervals) >= limit:\n return False # better to raise an exception\n\n err, left, right, I = intervals.pop()\n\n # split integral\n mid = left+(right-left)/2\n\n # calculate integrals and errors, replace one item in the list and\n # append the other item to the end of the list\n I, err = integrate_gausskronrod(fv, left, mid, args)\n bisect.insort(intervals, (err, left, mid, I))\n I, err = integrate_gausskronrod(fv, mid, right, args)\n bisect.insort(intervals, (err, mid, right, I))", "def quad_interp(xi,yi):\n\n # check inputs and print error message if not valid:\n\n error_message = \"xi and yi should have type numpy.ndarray\"\n assert (type(xi) is np.ndarray) and (type(yi) is np.ndarray), error_message\n\n error_message = \"xi and yi should have length 3\"\n assert len(xi)==3 and len(yi)==3, error_message\n\n error_message = \"it is not possible to have more than one point in the with the same xi\"\n assert (len(np.unique(xi)) == len(xi)), error_message\n\n # Set up linear system to interpolate through data points:\n\n A = np.array([[1, 1, 1], xi, xi**2]).T\n b = yi\n\n c = solve(A,b)\n \n return c", "def integral(self, f_bin, gamma, malm_pars=np.array([1.]), Pobs=0.00711310498183):\n s = 0.0\n for alpha, beta in zip(self.alpha_vals, self.beta_vals):\n #arg_list = [gamma, alpha, beta, self.low_q, self.high_q, len(malm_pars)]\n arg_list = [gamma, f_bin, Pobs, alpha, beta, 0.0, 1.0, len(malm_pars)]\n arg_list.extend(malm_pars)\n s += quad(self.c_integrand, self.low_q, self.high_q, args=tuple(arg_list))[0]\n #return s*f_bin\n return s\n #return f_bin * np.sum([quad(self.c_integrand, 0, 1, args=arg_list)[0] for alpha, beta in\n # zip(self.alpha_vals, self.beta_vals)])", "def bdq2(f, x, h=1e-5):\n return (3*f(x) - 4*f(x-h) + f(x-2*h))/(2*h)\n raise NotImplementedError(\"Problem 2 Incomplete\")", "def square_gradient(tri, q):\n\n finder = tri.get_trifinder()\n interp = LinearTriInterpolator(tri, q, trifinder = finder)\n\n integral = 0.0\n num_triangles, _ = np.shape(tri.triangles)\n\n for n in range(num_triangles):\n ele = tri.triangles[n, :]\n x = tri.x[ele]\n y = tri.y[ele]\n\n a = area(x, y)\n q_x, q_y = interp.gradient(sum(x) / 3, sum(y) / 3)\n\n integral += a * (q_x.data**2 + q_y.data**2)\n\n return integral", "def integral(self, dmax, dmin=0):\r\n # more expressive\r\n #return integrate.quad(lambda r: 2*np.pi*r*self(r), dmin, dmax)[0]\r\n return self.bpsf.integral(dmax, dmin)", "def b(q):\n if q == 0 or q == 1:\n return float(0.0)\n return -(q * log2(q) + (1 - q) * log2(1 - q))", "def integral ( self, xmin , xmax , ymin , ymax , nevents = True ) :\n if self.xminmax() :\n xmn , xmx = self.xminmax()\n xmin = max ( xmin , xmn )\n xmax = min ( xmax , xmx )\n\n if self.yminmax() : \n ymn , ymx = self.yminmax() \n ymin = max ( ymin , ymn )\n ymax = min ( ymax , ymx )\n\n value , todo = 0 , True \n \n ## 1) make a try to use analytical integral (could be fast)\n if self.tricks :\n try:\n if hasattr ( self.pdf , 'setPars' ) : self.pdf.setPars() \n fun = self.pdf.function()\n value , todo = fun.integral ( xmin , xmax , ymin , ymax ) , False \n except:\n pass\n\n ## use numerical integration \n from ostap.math.integral import integral2 as _integral2\n\n extended = self.pdf.canBeExtended() or isinstance ( self.pdf , ROOT.RooAddPdf )\n\n if todo and extended : value = _integral2 ( self , xmin , xmax , ymin , ymax )\n elif todo :\n \n ## use unormalized PDF here to speed up the integration \n ifun = lambda x, y : self ( x , y , error = False , normalized = False )\n value = _integral2 ( ifun , xmin , xmax , ymin , ymax )\n norm = self.pdf.getNorm ( self.vars )\n value /= norm\n\n if nevents and self.pdf.mustBeExtended () :\n evts = self.pdf.expectedEvents( self.vars )\n if evts <= 0 or iszero ( evts ) :\n self.warning ( \"integral: expectedEvents is %s\" % evts )\n value *= evts \n\n return value", "def _G(xi, B):\n try:\n import scipy.integrate\n except:\n raise ImportError(' [PHIDL] To run the microstrip functions you '\n 'need scipy, please install it with '\n '`pip install scipy`')\n return B/sinh(B)*scipy.integrate.quad(_G_integrand, 0, xi, args = (B))[0]", "def bdq1(f, x, h=1e-5):\n return (f(x)-f(x-h))/h\n raise NotImplementedError(\"Problem 2 Incomplete\")", "def mc_integrate1d(f, a, b, N=10000):\n #use the approximation formula with a translate and scale of unit interval\n points = (b-a)*np.random.rand(N)+a\n y = f(points)\n return (b-a)*sum(y)/N", "def _forceInt(x, y, z, dens, b2, c2, i, glx=None, glw=None):\n\n def integrand(s):\n t = 1 / s**2.0 - 1.0\n return (\n dens(\n numpy.sqrt(\n x**2.0 / (1.0 + t) + y**2.0 / (b2 + t) + z**2.0 / (c2 + t)\n )\n )\n * (\n x / (1.0 + t) * (i == 0)\n + y / (b2 + t) * (i == 1)\n + z / (c2 + t) * (i == 2)\n )\n / numpy.sqrt((1.0 + (b2 - 1.0) * s**2.0) * (1.0 + (c2 - 1.0) * s**2.0))\n )\n\n if glx is None:\n return integrate.quad(integrand, 0.0, 1.0)[0]\n else:\n return numpy.sum(glw * integrand(glx))", "def Qfun(Phieq,Phi,Phibar,taurad):\n #note Q is different from Perez-Becker and Showman by a factor of g (for consistency with Phi vs H)\n Q=(1/taurad)*(Phieq-(Phi+Phibar))\n\n return Q", "def integrand(x, n):\n return jn(n, x)", "def _forceInt(x,y,z,a2,b2,c2,n,i):\n def integrand(tau):\n return (x*(i==0) + y*(i==1) + z*(i==2))/(a2*(i==0) + b2*(i==1) + c2*(i==2) + tau) * \\\n _FracInt(x, y, z, a2, b2, c2, tau, n)\n return integrate.quad(integrand, lowerlim(x**2, y**2, z**2, a2, b2, c2), np.inf, epsabs=1e-12)[0]", "def compare_int(f, s, a, b):\n\tintegral_comp = []\n\tintegral_array = integral(f,s,a,b)\n\tscintegral_array = scintegral(f,s,a,b)\n\tfor i in range(1,3):\n\t\t\"\"\"Da ich mir nicht sicher war, ob man integ.quad und die\n\t\tMittelwertsregel vergleichen kann, hab ich das weggelassen.\"\"\"\n\t\tintegral_comp.append(integral_array[i] - scintegral_array[i])\n\tprint \"Abweichung bei Trapezregel: \" + str(integral_comp[0])\n\tprint \"Abweichung bei Simpsonregel: \" + str(integral_comp[1])", "def integrate_function():\n def midpoint_integration(f, a, b, n=100):\n h = (b - a)/float(n)\n I = 0\n for i in range(n):\n I += f(a + i*h + 0.5*h)\n return h*I\n\n\n f_formula = sys.argv[1]\n a = eval(sys.argv[2])\n b = eval(sys.argv[3])\n if len (sys.argv) >= 5:\n n = int(sys.arvg[4])\n else:\n n = 200\n\n from scitools.StringFunction import StringFunction\n f = StringFunction(f_formula) # turn formula into f(x) func.\n\n \"\"\"\n >>> g = StringFunction('A*exp(-a*t)*sin(omega*x)',\n independent_variable='t',\n A=1, a=0.1, omega=pi, x=0.5)\n >>> g.set_parameters(omega=0.1)\n >>> g.set_parameters(omega=0.1, A=5, x=0)\n >>> g(0)\n 0.0\n >>> g(pi)\n 2.8382392288852166e-15\n \"\"\"\n\n I = midpoint_integration(f, a, b, n)\n print(\"Integral of {:s} on [{:g}, {:g}] with n ={:d}: {:g}\" \\\n .format(f_formula, a, b, n, I))", "def trapezoidalPost(x,y):\n integral = 0\n for ndx in range(1,len(x)):\n integral+= (y[ndx]+y[ndx-1])/2 * (x[ndx]-x[ndx-1])\n return integral", "def bisection(f, fu, point_a, point_b, point_c, point_d, lower_bound, upper_bound, length):\n n = 1\n theta = 0\n a = lower_bound\n b = upper_bound\n while n <= 100:\n theta = (a + b) / 2.0\n if -1e-6 < f(fu(point_a, point_b, point_c, theta), point_d) - length < 1e-6:\n # print 'Residual', f(fu(point_a, point_b, point_c, theta), point_d) - length\n # print 'iteration', n\n return theta\n else:\n n = n + 1\n if f(fu(point_a, point_b, point_c, theta), point_d) - length > 0:\n b = theta\n else:\n a = theta\n\n print 'failedtheta', theta, 'Residual', f(fu(point_a, point_b, point_c, theta), point_d) - length\n print 'iteration', n\n return False", "def biseccion(func, a, b, tol=1e-4):\n p = (a + b) / 2 \n while np.fabs(func(p)) > tol:\n p = (a + b) / 2 \n if func(a) * func(p) < 0:\n b = p\n elif func(a) * func(p) > 0:\n a = p\n else:\n return p\n return p", "def hypotenuse(a, b):\r\n return (a**2 + b**2)**0.5", "def quatmul ( a, b ):\n\n import numpy as np\n\n assert a.size==4, 'Error in a dimension'\n assert b.size==4, 'Error in b dimension'\n\n return np.array ( [ a[0]*b[0] - a[1]*b[1] - a[2]*b[2] - a[3]*b[3],\n a[1]*b[0] + a[0]*b[1] - a[3]*b[2] + a[2]*b[3],\n a[2]*b[0] + a[3]*b[1] + a[0]*b[2] - a[1]*b[3],\n a[3]*b[0] - a[2]*b[1] + a[1]*b[2] + a[0]*b[3] ], dtype=np.float_ )", "def quo(self, a, b):\n return a / b", "def quo(self, a, b):\n if a % b:\n raise ExactQuotientFailed(a, b, self)\n else:\n return a // b", "def rectangle_integrate(self, a=None, b=None, n=None):\n\n sum_of_rectangles = 0.0\n h = float( (b-a) / n )\n\n x = a\n while x < b:\n\n sum_of_rectangles += self._function(x) * h\n\n x += h \n\n return sum_of_rectangles", "def iqi(f, a, b, ztol, maxiter):\n\txnm2 = a\n\tfnm2 = f(a)\n\txnm1 = b\n\tfnm1 = f(b)\n\txn = a + (b-a)*0.5\n\tfn = f(xn)\n\tfasign = (fnm2 < 0.0)\n\tif (fnm1< 0.0) == fasign:\n\t return None\n \n\tswitch = True\n\tfor i in range(maxiter):\n\t\tprint \"i, xn, fn, a, b, fnm2, fnm1=\", i, xn, fn,a, b, fnm2, fnm1\n\t\t#Check for near equal function values.\n\t\tif abs(fnm2-fnm1)< ztol or \\\n\t\t abs(fnm1-fn) < ztol or\\\n\t\t abs(fn-fnm2) < ztol:\n \n\t\t #ensure bisection is used if this is the case\n\t\t if switch:\n\t\t\t print \"switching to bisection\",\n\t\t\t switch = not switch\n\t\tif switch:\n\t\t # perform quadratic interpolation\n\t\t print \"iqi:\",\n\t\t xest = invqinterp(xnm2, xnm1, xn,\n\t\t\t\t\t\t\t fnm2, fnm1, fn)\n\t\telse:\n\t\t print \"biseciton:\",\n\t\t xest = a + (b-a) * 0.5\n \n\t\tswitch = not switch\n \n\t\tfxest= f(xest)\n\t\tprint \"xest, fxest =\", xest, fxest\n\t\tif abs(fxest) < ztol:\n\t\t print \"tolerance met.\"\n\t\t return xest, fxest, i+1\n \n\t\tif (fxest < 0) == fasign:\n\t\t xnm2 = xest\n\t\t fnm2 = fxest\n\t\t a = xest\n\t\telse:\n\t\t xnm1 = xest\n\t\t fnm1 = fxest\n\t\t b = xest\n \n\t\txn = a + (b-a)*0.5\n\t\tfn = f(xn)\n\t\tif abs(b-a) < ztol:\n\t\t return (xn, fn,i+1)\n\treturn xn, fn, maxiter", "def integrate_trapz(f, lower, upper, N=1000):\n a = lower # Lower integration limit\n b = upper # Upper integration limit\n w = (b - a) / N # Width of each trapezoid\n\n # Area of first and last trapezoids\n I = 0.5 * f(a) * w + 0.5 * f(b) * w\n\n # Area of rest of trapezoids\n for i in range(1, N):\n I += f(a + i * w) * w\n\n return I, N", "def get_quadrant(x, y):\n try:\n x = int(x)\n y = int(y)\n except ValueError:\n return (0)\n\n if y >= 0 and x > 0:\n return (1)\n elif y >= 0 and x < 0:\n return (2)\n elif y < 0 and x < 0:\n return (3)\n else:\n return (4)", "def trap(f, lower, upper, numberOfPoly):\r\n def f(x):\r\n return 3*(x**2)\r\n \r\n h = (upper-lower)/numberOfPoly\r\n print(h)\r\n result = (0.5*(f(lower))) + (0.5*(f(upper)))\r\n print(result)\r\n for i in range (1, int(numberOfPoly)):\r\n result += f(lower + i*h)\r\n \r\n result *= h\r\n print('result = %f' % result)\r\n return result", "def is_quad(q):\n P0, P1, P2, P3 = q\n\n # Convert points to ECEF\n p0 = Vector.fromPoint(P0)\n p1 = Vector.fromPoint(P1)\n p2 = Vector.fromPoint(P2)\n p3 = Vector.fromPoint(P3)\n\n # Unit vector along top edge\n v0 = (p1 - p0).norm()\n\n # Distance along bottom edge\n d = (p3 - p2).mag()\n\n # New location for p2 by extending from p3 the same distance and\n # direction that p1 is from p0:\n new_p2 = p3 + v0 * d\n\n # How far off of the plane is the origin p2?\n planepoints = [p0, p1, p2]\n dist = get_distance_to_plane(planepoints, p2)\n\n # Is it close enough?\n if dist / d > OFFPLANE_TOLERANCE:\n on_plane = False\n else:\n on_plane = True\n\n # Fixed quad\n fquad = [p0.toPoint(),\n p1.toPoint(),\n new_p2.toPoint(),\n p3.toPoint()]\n\n return (on_plane, fquad)", "def quadTo(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads\r\n pass", "def integral(ambient_dim, dim, operand, where=None):\n\n return NodeSum(\n area_element(ambient_dim, dim, where)\n * QWeight(where)\n * operand)", "def fdq2(f, x, h=1e-5):\n return (-3*f(x) + 4*f(x+h) - f(x+2*h))/(2*h)\n raise NotImplementedError(\"Problem 2 Incomplete\")", "def integrate_range(self, lower, upper):\n if upper>self.upper:\n upper=self.upper\n if lower<self.lower:\n lower = self.lower\n\n i_l = int(np.floor((lower-self.lower)/self._dx))\n i_u = int(np.floor((upper-self.lower)/self._dx))\n #print \"i_l \",i_l,\" i_u \",i_u\n total = 0.0\n for i in range(i_l,i_u):\n total+= self.y[i]*self._dx\n return total", "def B(q):\n # print('Value q')\n # print(q)\n if q > 0 and q != 0 and q != 1:\n result = -(q*math.log(q,2) + (1-q)*math.log(1-q,2))\n else:\n result = 0\n # print('Result of B')\n # print(result)\n return result", "def a_q(self, phi, ci, tl):\n\t return (self.j(phi, tl)*(ci - self.gamma(tl)))/(4.*(ci + 2.*self.gamma(tl)))", "def BisectionMethod(f, a=0, b=1, tol=1e-10):\n\tstart = time()\n\tf_a = f(a)\n\tf_b = f(b)\n\t\n\t# Initialization of errors and iters\n\terrs = []\n\ti = 0\n\n\tif f_a == 0:\n\t\treturn a\n\telif f_b == 0:\n\t\treturn b\n\telif f_a*f_b > 0:\n\t\tprint(\"The function values have the same sign!\")\n\telse:\n\t\terror = b-a\n\t\twhile error > tol:\n\t\t\tc = (b+a)/2\n\t\t\tf_c = f(c)\n\t\t\t\n\t\t\terrs.append(error)\n\t\t\t\n\t\t\tif f_a*f_c > 0:\n\t\t\t\ta = c\n\t\t\t\tf_a = f_c\n\t\t\telif f_a*f_c < 0:\n\t\t\t\tb = c\n\t\t\t\tf_b = f_c\n\t\t\telse:\n\t\t\t\tbreak\n\t\t\terror = b-a\n\t\t\ti = i+1\n\tend = time()\n\treturn c, (end-start), i", "def get_function(rule, domain, **parameters):\n if isinstance(domain, chaospy.dist.Dist):\n lower, upper = domain.range()\n parameters[\"dist\"] = domain\n else:\n lower, upper = numpy.array(domain)\n parameters[\"lower\"] = lower\n parameters[\"upper\"] = upper\n\n quad_function = chaospy.quad.collection.QUAD_FUNCTIONS[rule]\n parameters_spec = inspect.getargspec(quad_function)[0]\n parameters_spec = {key: None for key in parameters_spec}\n del parameters_spec[\"order\"]\n\n for key in parameters_spec:\n if key in parameters:\n parameters_spec[key] = parameters[key]\n\n def _quad_function(order, *args, **kws):\n \"\"\"Implementation of quadrature function.\"\"\"\n params = parameters_spec.copy()\n params.update(kws)\n return quad_function(order, *args, **params)\n\n return _quad_function", "def quad(nodes, weights, f):\n f_eval = f(nodes.T)\n return np.dot(f_eval, weights)", "def evaluate_ising(linear, quad, state):\n\n # If we were given a numpy array cast to list\n if _numpy and isinstance(state, np.ndarray):\n return evaluate_ising(linear, quad, state.tolist())\n\n # Accumulate the linear and quadratic values\n energy = 0.0\n for index, value in uniform_iterator(linear):\n energy += state[index] * value\n for (index_a, index_b), value in six.iteritems(quad):\n energy += value * state[index_a] * state[index_b]\n return energy", "def aireTriangle(b, h):\n return (b * h) / 2", "def eo_numerical(th):\n res = integrate.dblquad(gaussian_func_2dim_extra, th, np.inf, lambda x: th, lambda x: np.inf)\n return res[0]", "def _G_integrand(xip, B):\n try:\n from scipy.special import iv as besseli\n except:\n \"\"\" [PHIDL] To run this function you need scipy, please install it with\n pip install scipy \"\"\"\n return besseli(0, B*sqrt(1-xip**2))", "def quadrant(pAx, pAy, pBx, pBy):\n###############################################################################\n\n if (pBx>pAx and pBy>pAy):\n return 1\n elif (pBx<pAx and pBy>pAy):\n return 2\n elif (pBx<pAx and pBy<pAy):\n return 3\n elif (pBx>pAx and pBy<pAy):\n return 4\n else:\n return 0", "def vert_integrate(self, u, d='up', Q='self'):\n s = \"::: vertically integrating function :::\"\n print_text(s, cls=self)\n\n if type(Q) != FunctionSpace:\n Q = self.Q\n ff = self.ff\n phi = TestFunction(Q)\n v = TrialFunction(Q)\n bcs = []\n # integral is zero on bed (ff = 3,5) \n if d == 'up':\n bcs.append(DirichletBC(Q, 0.0, ff, self.GAMMA_B_GND)) # grounded\n bcs.append(DirichletBC(Q, 0.0, ff, self.GAMMA_B_FLT)) # shelves\n a = v.dx(2) * phi * dx\n # integral is zero on surface (ff = 2,6) \n elif d == 'down':\n bcs.append(DirichletBC(Q, 0.0, ff, self.GAMMA_S_GND)) # grounded\n bcs.append(DirichletBC(Q, 0.0, ff, self.GAMMA_S_FLT)) # shelves\n bcs.append(DirichletBC(Q, 0.0, ff, self.GAMMA_U_GND)) # grounded\n bcs.append(DirichletBC(Q, 0.0, ff, self.GAMMA_U_FLT)) # shelves\n a = -v.dx(2) * phi * dx\n L = u * phi * dx\n name = 'value integrated %s' % d \n v = Function(Q, name=name)\n solve(a == L, v, bcs, annotate=False)\n print_min_max(u, 'vertically integrated function', cls=self)\n return v", "def integrate_dphi_phi(self, i, j):\n def func(x):\n return self.shape_function_deriv(i, x) * self.shape_function(j, x)\n i, err = quadrature(func, -1, 1)\n return i", "def test_get_quad_angover_all_float(self):\n result = get_quadrilateral_type(2, 2, 2, 2, 180.1, .1, 180.1, .1)\n self.assertEqual(result, 'invalid')", "def quadrature_calculator(x_points: list, y_points: list) -> float:\n # sorted_y = [p for _, p in sorted(zip(x_points, y_points))]\n sorted_y = [p for _, p in\n sorted(list(zip(x_points, y_points)), key=lambda x: x[0])]\n n = len(y_points)\n sorted_x = sorted(x_points)\n\n trapezoidal_rule = [\n 0.5 * (sorted_x[n + 1] - sorted_x[n]) * (sorted_y[n + 1] + sorted_y[n])\n for n in range(n - 1)]\n\n return float(np.sum(trapezoidal_rule))", "def quad_gaussian(\n order,\n dist,\n rule=\"fejer\",\n accuracy=100,\n recurrence_algorithm=\"\",\n):\n coefficients = construct_recurrence_coefficients(\n order, dist, rule, accuracy, recurrence_algorithm)\n abscissas, weights = coefficients_to_quadrature(coefficients)\n return combine_quadrature(abscissas, weights)", "def analytical_integral_rppd(p, q, r, a, b, c):\n if p < 0:\n return 0.0\n elif q < 0:\n return 0.0\n elif r < 0.0:\n return 0.0\n else:\n return (\n a ** (p + 1)\n * b ** (q + 1)\n * c ** (r + 1)\n * ((-1) ** p + 1)\n * ((-1) ** q + 1)\n * ((-1) ** r + 1)\n / ((p + 1) * (q + 1) * (r + 1))\n )", "def exquo(self, a, b):\n return a // b", "def integrand(u):\n return erfcx(-u)\n #if u < -4.0:\n #return -1. / np.sqrt(np.pi) * (1.0 / u - 1.0 / (2.0 * u**3) + \n ##3.0 / (4.0 * u**5) - \n ##15.0 / (8.0 * u**7))\n #else:\n #return np.exp(u**2) * (1. + erf(u))", "def test_hof(a, b):\n def f(g, x):\n return g(x) * g(x + 10.0)\n\n def g(x):\n return x * b\n\n return f(g, a) + f(g, b)", "def sp_integrate_1D_ ( pdf , xmin , xmax , *args , **kwargs ) :\n if hasattr ( pdf , 'setPars' ) : pdf.setPars() \n func = pdf.function()\n return func.sp_integrate_1D ( xmin , xmax , *args , **kwargs )", "def cross_product_integral(self, a, b):\n\n return self._cross_product_integral(a, b)", "def quadradic_interp(alpha_0, pk, xk):\n top = (alpha_0 ** 2) * (phi_prime(pk, xk))\n bottom = (phi_function(alpha_0, pk, xk) - phi_function(0, pk, xk) - alpha_0 * phi_prime(pk, xk))\n return - top / (2 * bottom)", "def integrate(self,lower,upper,method=True,n=100,jac=None,**kwargs):\n\n #TODO?: vectorize when needed\n\n import scipy.integrate as itg\n if method is True:\n method=self.defaultIntMethod\n\n e,d=None,None\n\n ps=tuple(self.parvals)\n if jac:\n def f(x,*pars):\n return jac(x,*pars)*self.f(x,*pars)\n else:\n f=self.f\n\n if method=='quad':\n res=itg.quad(f,lower,upper,args=ps,full_output=1,**kwargs)\n if len(res) == 4:\n v,e,d,m = res\n from warnings import warn\n warn('Integration message: %s'%m)\n #use these for 2d and 3d\n #elif method=='dblquad':\n # raise NotImplementedError\n #elif method=='tplquad':\n # raise NotImplementedError\n elif method=='fixed_quad':\n res=itg.fixed_quad(f,lower,upper,ps,5 if n is None else n,**kwargs)\n elif method=='quadrature':\n res=itg.quadrature(f,lower,upper,ps,**kwargs)\n elif method=='romberg':\n res=itg.romberg(f,lower,upper,ps,**kwargs)\n else: #sampled techniques\n if n is None:\n n=100\n if np.isscalar(n):\n x=np.linspace(lower,upper,n)\n else:\n x=np.array(n)\n y=f(x,*ps)\n if method=='trapz':\n res=itg.trapz(y,x,**kwargs)\n elif method=='cumtrapz':\n res=itg.cumtrapz(y,x,**kwargs)\n elif method=='simps':\n res=itg.simps(y,x,**kwargs)\n elif method=='romb':\n res=itg.simps(y,np.convolve(x,[1,-1],mode='same').mean(),**kwargs)\n else:\n raise ValueError('unrecognized integration method')\n\n\n self.lastintegrate = res\n return res if np.isscalar(res) else res[0]", "def fdq1(f, x, h=1e-5):\n return (f(x+h) - f(x))/h\n \n raise NotImplementedError(\"Problem 2 Incomplete\")", "def solve_integral(integrand, y):\n solnarr = np.empty(len(y))\n for i in range(len(y)):\n yy = y[i]\n soln = quad(integrand, 0, np.inf, args=(yy))\n solnarr[i] = soln[0]\n return solnarr", "def bisection(f,a,b,tol=1e-4,Max_trial=14):\n pvalues = np.zeros(Max_trial+3)\n fvalues = np.zeros(Max_trial+3)\n fa,fb = f(a),f(b)\n pvalues[0],pvalues[1]=a,b\n fvalues[0],fvalues[1]=fa,fb\n\n if fa*fb>0:\n print('f(a)*f(b)>0 ---> specified a and b do not work!')\n print('spiting out a or b --- whichever is closer')\n \n if np.abs(fa)<=fb:\n return a,f(a)\n else:\n return b,f(b)\n \n \n p=(a+b)/2\n pvalues[2]=p\n fp=f(p)\n fvalues[2]=fp\n err=np.abs(fp)\n\n j=0\n while err > tol and j<Max_trial:\n\n if fa*fp<0:\n b=p\n p=(a+b)/2\n fp = f(p)\n else:\n a=p\n fa = fp\n p=(a+b)/2\n fp = f(p)\n\n err=abs(fp)\n print('-------------------------------------------------------')\n print('bisection trial #'+str(j)+': sol='+str(p)+', mismatch ='+str(err))\n print('-------------------------------------------------------')\n j=j+1\n pvalues[j+2]=p\n fvalues[j+2]=fp\n # print('p='+str(p))\n # print('a,b='+str(a)+'--'+str(b))\n\n pvalues,fvalues=pvalues[:j+3],fvalues[:j+3]\n\n return pvalues,fvalues", "def rectangular_integral(f, xrange, intervals):\n int_out = 0\n delta_x = (max(xrange)-min(xrange))/intervals\n new_xrange = np.linspace(min(xrange), max(xrange), intervals)\n for x in new_xrange:\n int_out += f(x)\n return delta_x*int_out", "def gue_correlation(alpha, beta):\n f = lambda x: 1.0 - power(sin(pi*x)/(pi*x), 2)\n result = quad(f, [alpha, beta])\n #x = (alpha + beta)/2.0\n return result", "def exquo(self, a, b):\n return a / b", "def gass_hermite_quad(f, degree, m, c):\n\n points, weights = np.polynomial.hermite.hermgauss( degree)\n\n #function values at given points\n f_x = f(points, m= m, c= c)\n\n #weighted sum of function values\n F = np.sum( f_x * weights)\n\n return F", "def my_func(a,b):\n return (0.+a)*b", "def biquad_coefficients(self):\n a = 10 ** (self.gain / 40)\n w0 = 2 * np.pi * self.fc / self._fs\n alpha = np.sin(w0) / (2 * self.q)\n\n a0 = 1 + alpha / a\n a1 = -(-2 * np.cos(w0)) / a0\n a2 = -(1 - alpha / a) / a0\n\n b0 = (1 + alpha * a) / a0\n b1 = (-2 * np.cos(w0)) / a0\n b2 = (1 - alpha * a) / a0\n\n return 1.0, a1, a2, b0, b1, b2", "def integrate(self, t):", "def biquad_coefficients(self):\n a = 10 ** (self.gain / 40)\n w0 = 2 * np.pi * self.fc / self._fs\n alpha = np.sin(w0) / (2 * self.q)\n\n a0 = (a + 1) + (a - 1) * np.cos(w0) + 2 * np.sqrt(a) * alpha\n a1 = -(-2 * ((a - 1) + (a + 1) * np.cos(w0))) / a0\n a2 = -((a + 1) + (a - 1) * np.cos(w0) - 2 * np.sqrt(a) * alpha) / a0\n\n b0 = (a * ((a + 1) - (a - 1) * np.cos(w0) + 2 * np.sqrt(a) * alpha)) / a0\n b1 = (2 * a * ((a - 1) - (a + 1) * np.cos(w0))) / a0\n b2 = (a * ((a + 1) - (a - 1) * np.cos(w0) - 2 * np.sqrt(a) * alpha)) / a0\n\n return 1.0, a1, a2, b0, b1, b2", "def biquad_coefficients(self):\n a = 10 ** (self.gain / 40)\n w0 = 2 * np.pi * self.fc / self._fs\n alpha = np.sin(w0) / (2 * self.q)\n\n a0 = (a + 1) - (a - 1) * np.cos(w0) + 2 * np.sqrt(a) * alpha\n a1 = -(2 * ((a - 1) - (a + 1) * np.cos(w0))) / a0\n a2 = -((a + 1) - (a - 1) * np.cos(w0) - 2 * np.sqrt(a) * alpha) / a0\n\n b0 = (a * ((a + 1) + (a - 1) * np.cos(w0) + 2 * np.sqrt(a) * alpha)) / a0\n b1 = (-2 * a * ((a - 1) + (a + 1) * np.cos(w0))) / a0\n b2 = (a * ((a + 1) + (a - 1) * np.cos(w0) - 2 * np.sqrt(a) * alpha)) / a0\n\n return 1.0, a1, a2, b0, b1, b2", "def IQ_interpolation(f, x0, x1, y0, y1, x, yval, xtol, ytol):\n _abs = abs\n if y1 < 0.: x0, y0, x1, y1 = x1, y1, x0, y0\n dx1 = dx0 = x1-x0\n f0 = yval-y0 \n if not (x0 < x < x1 or x1 < x < x0):\n try:\n # False position\n x = x0 + f0*dx0/(y1-y0)\n except ZeroDivisionError:\n # Bisection\n x = (x0+x1)/2\n yval_ub = yval + ytol\n yval_lb = yval - ytol\n while _abs(dx1) > xtol:\n y = f(x)\n if y > yval_ub:\n y2 = y1\n x2 = x1\n x1 = x\n y1 = y\n elif y < yval_lb:\n y2 = y0\n x2 = x0\n x0 = x\n y0 = y\n f0 = yval-y\n else: break\n dx1 = x1-x0\n try:\n # Inverse quadratic interpolation\n f1 = yval - y1\n f2 = yval - y2\n d01 = f0-f1\n d02 = f0-f2\n d12 = f1-f2\n f0_d12 = f0/d12\n f1_d02 = f1/d02\n f2_d01 = f2/d01\n x = x0*f1_d02*f2_d01 - x1*f0_d12*f2_d01 + x2*f0_d12*f1_d02\n assert x0 < x < x1 or x1 < x < x0\n except:\n try:\n # False position\n x = x0 + f0*dx1/(y1-y0)\n except:\n # Bisection\n x = (x0+x1)/2\n else:\n # Overshoot to prevent getting stuck\n x = x + 0.1*(x1 + x0 - 2.*x)*(dx1/dx0)**3\n dx0 = dx1\n return x", "def RegulaFalsiMethod(f, a=0.0, b=0.75, tol=1e-10):\n\tstart = time()\n\tf_a = f(a)\n\tf_b = f(b)\n\terror = tol + 1\n\t\n\terrs = []\n\ti = 0\n\n\twhile error > tol:\n\t\tx = (a*f_b - b*f_a) / (f_b - f_a)\n\t\tf_x = f(x)\n\n\t\terrs.append(error)\n\n\t\tif f_a*f_x > 0:\n\t\t\ta = x\n\t\t\tf_a = f_x\n\t\telif f_b*f_x > 0:\n\t\t\tb = x\n\t\t\tf_b = f_x\n\t\telse:\n\t\t\tbreak\n\n\t\terror = np.abs(f_x)\n\t\ti = i+1\n\tend = time()\n\treturn x, (end-start), i", "def integrate(self, x, dx):\n raise NotImplementedError(\"Not implemented yet.\")", "def bisection(df, a, b, niter=10):\n for i in xrange(niter):\n mid = (a+b)/2.\n if df(mid) > 0:\n b = mid\n else:\n a = mid\n\n print \"Bisection method converges faster\"\n return (a+b)/2." ]
[ "0.7321034", "0.66224813", "0.6437059", "0.64194804", "0.62829626", "0.62496614", "0.6247555", "0.6206842", "0.6206842", "0.6199654", "0.61690414", "0.6163885", "0.61347663", "0.6107581", "0.60302866", "0.60202134", "0.6018099", "0.6005248", "0.60019904", "0.5973936", "0.59257406", "0.58599657", "0.58041286", "0.58012384", "0.5800743", "0.5697578", "0.5657789", "0.5613508", "0.5597451", "0.5479812", "0.54697436", "0.5428496", "0.54250985", "0.5414989", "0.541311", "0.53803337", "0.53770334", "0.53527105", "0.53385216", "0.53310794", "0.531009", "0.5305022", "0.5273443", "0.5241531", "0.5222423", "0.5210571", "0.51966774", "0.51963127", "0.51932424", "0.5191184", "0.5187609", "0.5165571", "0.5147095", "0.5137549", "0.51372075", "0.51348686", "0.5134393", "0.5133609", "0.5132199", "0.51320684", "0.5107501", "0.5101991", "0.50947267", "0.50848967", "0.5081415", "0.5080468", "0.5078948", "0.50749856", "0.50677896", "0.5065257", "0.5059806", "0.50565016", "0.505249", "0.5050308", "0.50476617", "0.50469154", "0.50399315", "0.50278467", "0.50161624", "0.50115824", "0.50003797", "0.49981406", "0.49955204", "0.4992284", "0.49626556", "0.4961897", "0.49528697", "0.49485475", "0.49462098", "0.4946088", "0.49390385", "0.4936317", "0.49323243", "0.4926315", "0.4921033", "0.49183002", "0.49179602", "0.49045983", "0.4903405", "0.48961237" ]
0.7673257
0
Run setup before each request.
def before_request(): id = request.headers.get("User-Id", None) if id: user = User.query.get(id) if user is None: raise CustomError(400, message="Invalid User-Id in Header.") g.user = user else: g.user = None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def before_request():\r\n\r\n\tinit_classes()", "def setup(ctx):\n handle_no_cache(ctx)", "def _setup(self) -> None:\n\t\treturn", "def setup_once():\n # type: () -> None\n _install_httpx_client()\n _install_httpx_async_client()", "def _setup(self):\n pass", "def _setup(self):\n pass", "def _setup(self):\n pass", "def _setup(self):\n pass", "def _setup(self):\n pass", "def pre_setup(self) -> None:\n if self.__setup_done:\n self.base_logger.error(\"pre_setup was erroneously called twice\")\n raise SetupAlreadyDoneError()", "def prepare(self, request):\n pass", "def setup(self) -> None:\n self.setup_logging()\n self.setup_plugins()\n self.post_setup()", "def setUp(self) -> None:\n super(ReadOnlyActionTestsMixin, self).setUp()\n\n self.request = RequestFactory().request()\n self.siteconfig = SiteConfiguration.objects.get_current()", "def before_request():\n pass", "def setup(self):\n # Have to wait for a server connection before we\n # can run the test\n self.wait_for_server_connections(10)", "def setUp(self):\n self.setup_remote_site()\n self.setup_local_site()", "async def setup(self):\n load_base_templates()\n uris = URI.gather()\n for uri, resource in uris.items():\n methods = resource.methods\n if \"get\" not in methods:\n methods[\"get\"] = None\n\n for method in methods.keys():\n self.app.add_routes([\n getattr(aioweb, method)(uri, resource.process)\n ])\n self.app.add_routes([aioweb.get(\"/hello\", hello)])\n\n # TMP code\n max_age = 3600 * 24 * 365 # 1 year\n setup(self.app, PonyStorage(max_age=max_age))\n self.preparing_task = asyncio.create_task(self.prepare_web())", "def setUp(self):\n self.headers = list()\n self.status = list()", "def setup(self):\n log.msg(\"Fetching required net test inputs...\")\n for net_test_loader in self.netTestLoaders:\n yield self.fetchAndVerifyNetTestInput(net_test_loader)\n\n if self.bouncer:\n log.msg(\"Looking up test helpers...\")\n yield self.lookupTestHelpers()", "def setUp(self):\n test_env_setup()", "def setUp(self):\n self.factory = RequestFactory()", "def setUp(self):\n self.factory = RequestFactory()", "def setUp(self):\n self.factory = RequestFactory()", "def setUp(self):\n self.hass = get_test_home_assistant()\n with requests_mock.Mocker() as mock_req:\n self.setup_api(MOCK_DATA, mock_req)\n self.addCleanup(self.hass.stop)", "def setUp(self):\n super().setUp()\n self.request_factory = RequestFactory()\n self._auth_backend = LTIBackend()", "def before_each_test(self, request):\n self.test_counter = Counter()\n self.check_ref = request.config.getvalue(\"check_ref\")\n self.create_ref = request.config.getvalue(\"create_ref\")", "def dm_setup(self):\n dispatcher.connect(\n self.dequeue_next_page_requests,\n signal=signals.spider_idle\n )\n self._was_setup_called = True", "def setup(self) -> None:\n pass", "def setup(self) -> None:\n pass", "def setup(self) -> None:\n pass", "def setUp(self):\n cache.clear()\n self.factory = APIRequestFactory()", "def setUp(self):\n\n self._set_up()", "def _setup(self, request, config):\n try:\n self._pytester = request.getfixturevalue(\"pytester\")\n except:\n try:\n self._testdir = request.getfixturevalue(\"testdir\")\n except:\n raise RuntimeError(\n \"Unable to load either pytester or testdir fixtures. \"\n \"Check if pytester plugin is enabled.\"\n )", "def setUp(self):\n self.response = self.s.get(self.url, params=self.params)", "async def setup(self, ctx):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def do_before(self):\r\n pass", "def prepare(self):\n self.uri = self.request.uri\n self.path = self.request.uri.split('?')[0]\n self.method = self.path.split('/')[-1]\n self.default_methods = {}\n #\n # You can use the before_handler in a local controller to\n # process your own prepare stuff.\n # a common use case is to call: self.print_debug_info().\n # which then applies only to this specific handler.\n # \n before_handler = getattr(self, \"before_handler\", None)\n print(\"calling before_handler for \" + str(self.__class__))\n if callable(before_handler):\n before_handler()", "def setup(self):\n\t\tpass", "def before():\n app.logger.info(\"Local Timestamp: {}\".format(str(datetime.now())))\n app.logger.info(\"Request Method: {}\".format(request.method))\n app.logger.info(\"Request URL: {}\".format(request.url))\n app.logger.info(\"Request Access Route: {}\".format(request.access_route[0]))\n headers = \"\"\n for (key, value) in request.headers:\n # hide authorization header from logs\n if key == \"Authorization\":\n value = \"[provided]\" \n headers += \"{}: {}\\n\".format(key, value)\n app.logger.info(\"Request Headers:{}\\n{}\\n{}\".format(\"-\"*45,str(headers)[:-1], \"-\"*60))\n body = copy.deepcopy(request.json)\n if type(body) is dict and \"password\" in body:\n body['password'] = \"[provided]\"\n app.logger.info(\"Request Body: {}\".format(body))", "def setup(self):\n pass", "def setup(self,**kwargs):\n pass", "def preRunSetup(self):\n self.logDesc(\"Pre Run Setup\") \n #Check for current logged in user\n self.verifyCurrentUser(userRole='Administrator', loginAsUser=True)", "def setup(self):\r\n pass", "def setup(self, request_params):\n raise NotImplementedError(\n u\"%s: Method not implemented\", self.__class__.__name__)", "def setup(self):\n self.log.debug('upm - in upm setup()')\n # Add resource setup code here", "def _setup(self):", "def _setup(self):", "def setUp(self):\n input_files = glob.glob(PATH_TO_INPUT_FILES) # Read input (csv) files from current (sw/test) directory.\n if not self.sessionizing:\n self.sessionizing = Sessionizing()\n self.sessionizing.initialize(*input_files)\n if not self.sites_session_counter:\n self.merge_and_sort_input_files(*input_files)\n self.process_input_files()", "def preRunSetup(self):\n self.logDesc(\"Pre Run Setup\")\n #Check for current logged in user\n self.verifyCurrentUser(userRole='Administrator', loginAsUser=True)\n #Creates Sample Template if not exists\n self.createSampleTemplate(templateName=self.templateName)", "def before_run_tests(cls):\n pass", "def _fixture_setup(self):\n pass", "def setup( self ):", "async def _setup(self):", "def setUp(self):\n\n installHandler()", "def setUp(self):\n\n installHandler()", "def pre_init(self) -> None:\n self._check_and_set_network()\n self._check_and_apply_migrations()", "async def setup(self):\n pass", "def setup(self, registers):\n \"\"\" tasks before any generation functions are called \"\"\"\n pass", "def setup(self, *args, **kwargs):\n pass", "def setup(self):\n self.build_serverkeyhash()\n self.build_agent_pubkey()\n self.load_registration_key()", "def prepare(self):\n # The default implementation takes care of local compilation of the client, if needed. Be sure to call it.\n client.prepare(self)\n # TODO: Add any additional preparations if needed. You're not likely to need those, though.", "def _pre_setup(self):\n apps.clear_cache()\n call_command('migrate', interactive=False, verbosity=0)\n call_command('loaddata', 'initial_data', verbosity=0)\n super(DatatableViewTestCase, self)._pre_setup()", "def post_setup(self, context):\n pass", "def setUp(self):\n logging.debug('setting up')", "def setUp(self):\n logging.debug('setting up')", "def setUp(self):\n self.client = app.test_client()\n\n # Show Flask errors that happen during tests\n app.config['TESTING'] = True", "def setup(self):\n pass", "def test_setup(self):\n assert self.http_handler.setup() is None\n self.assert_quantity_in_outbox(0)", "def setUp(self):\n pass #because we dont have anything to setup.", "def default_setup(self, mocker):\n # pylama: ignore=W0201\n self.url = '/api/v0/publish'\n self.client = wsgi.application.test_client()\n self._retryable = mocker.patch.object(wsgi, '_retryable')", "def _setup(self):\n raise NotImplementedError()", "def setup(self) -> None:", "async def _pre_call(self, _request_id: int, request: fastapi.Request, *args, **kwargs) -> None:\n return", "def setUp(self):\n self.core_processor = core_processor.ProcessCores()", "def setUp(self):\n # Get the Flask test client\n self.client = app.test_client()\n\n # Show Flask errors that happen during tests\n app.config['TESTING'] = True", "def setUp(self):\n\n self.client = app.test_client()\n app.config['TESTING'] = True", "def setUp(self):\n\n self.client = app.test_client()\n app.config['TESTING'] = True", "def setUp(self):\n\n self.client = app.test_client()\n app.config['TESTING'] = True", "def setup_hooks(self):\n pass", "def setUp(self):\n self.factory = RequestFactory()\n with patch(\"bookwyrm.suggested_users.rerank_suggestions_task.delay\"), patch(\n \"bookwyrm.activitystreams.populate_stream_task.delay\"\n ):\n self.local_user = models.User.objects.create_user(\n \"mouse@local.com\",\n \"mouse@mouse.mouse\",\n \"password\",\n local=True,\n localname=\"mouse\",\n )\n models.SiteSettings.objects.create()", "def setUp_extra(self):\n pass", "def _handle_first_request(self):\n pass", "def _pre_setup(self):\r\n\r\n # Flush the Mongo modulestore\r\n ModuleStoreTestCase.drop_mongo_collections()\r\n\r\n # Call superclass implementation\r\n super(ModuleStoreTestCase, self)._pre_setup()", "def setUp(self):\n\n # Get the Flask test client\n self.client = app.test_client()\n\n # Show Flask errors that happen during tests\n app.config['TESTING'] = True", "def setUp(self):\n\n # Get the Flask test client\n self.client = app.test_client()\n\n # Show Flask errors that happen during tests\n app.config['TESTING'] = True", "def initial(self, request, *args, **kwargs):\n\t\t# Ensure that the incoming request is permitted\n\t\t# self.perform_authentication(request)\n\t\t# self.check_permissions(request)\n\t\t# self.check_throttles(request)\n\t\tpass", "def setup(self):\n raise NotImplementedError", "def setup(self):\n raise NotImplementedError", "def setup(self):\n raise NotImplementedError" ]
[ "0.727703", "0.69686836", "0.69070673", "0.68600476", "0.683981", "0.683981", "0.683981", "0.683981", "0.683981", "0.6837512", "0.68245745", "0.6805248", "0.67752415", "0.676644", "0.67472184", "0.67328984", "0.67298096", "0.67094517", "0.66988313", "0.6659573", "0.6658358", "0.6658358", "0.6658358", "0.6623725", "0.661193", "0.6611005", "0.6602891", "0.65889204", "0.65889204", "0.65889204", "0.65833676", "0.65579516", "0.6555587", "0.6548714", "0.6548401", "0.6524638", "0.6524638", "0.6524638", "0.6524638", "0.6524638", "0.6524638", "0.6524638", "0.6524638", "0.6524638", "0.6524638", "0.6524638", "0.6524638", "0.6516338", "0.6505613", "0.65021884", "0.64853483", "0.64661366", "0.64651906", "0.64614993", "0.64563125", "0.64545107", "0.64394987", "0.64372855", "0.64372855", "0.64303684", "0.64243454", "0.6413708", "0.6403852", "0.64012754", "0.640077", "0.6387688", "0.6387688", "0.6387512", "0.6370554", "0.63697183", "0.63650435", "0.63591707", "0.6342773", "0.6341599", "0.6340102", "0.6302547", "0.6302547", "0.63010216", "0.6297801", "0.62906504", "0.6283493", "0.6282236", "0.6280623", "0.6273432", "0.6271583", "0.62678987", "0.62635845", "0.6263382", "0.6263382", "0.6263382", "0.6261198", "0.6260301", "0.62502414", "0.62362367", "0.6233134", "0.6227965", "0.6227965", "0.62277097", "0.6225029", "0.6225029", "0.6225029" ]
0.0
-1
Return whether or not an email or password combination is valid.
def authenticate(): # Get JSON data from request json = request.get_json() if 'email' not in json or 'password' not in json: raise CustomError(400, message='Must include an email and a password') # Check email user = User.query.filter_by(email=json['email']).first() if user is None: raise CustomError(401, message='Email or password were not found.') # Check password if not check_password_hash(user.password, json['password']): raise CustomError(401, message='Email or password were not found.') return jsonify({'success': True, 'user': user.to_dict()}), 201
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def valid_auth(email, password):\n if not email or not password:\n return False\n sha = sha1(email).hexdigest()\n user_info = redis.hgetall(\"sl:account:{}\".format(sha))\n\n return bool(\n type(user_info) == dict and\n user_info.get(\"password\") == password\n )", "def check_auth(email, password):\n try:\n user = User.get(User.email == email)\n except User.DoesNotExist:\n return False\n return encrypted_password(email, password) == user.password", "def validate(self):\n rv = Form.validate(self)\n if not rv:\n return False\n\n cur = get_cursor()\n if not email_exists(cur, self.email.data):\n self.email.errors.append('Please check your email address.')\n return False\n\n return True", "def is_login_valid(email, password):\n user_data = Database.find_one(UserConstants.COLLECTION, {\"email\": email})\n admin_created_user = Database.find_one(UserConstants.COLLECTION, {\"email\": email, \"admin_created\": \"Yes\"})\n if user_data is None:\n # Tell the user that their e-mail doesn't exist\n raise UserErrors.UserNotExistsError(\n \"Email is not recognized. Please use link below to sign-up if you have not created an account.\")\n if admin_created_user is not None:\n # Tell the user to sign up\n raise UserErrors.AdminCreatedUserError(\n \"Your account was created by an admin. Please register with HHT to enjoy the full functionality of the site.\")\n if not sha512_crypt.verify(password, user_data['password']):\n # Tell the user that their password is wrong\n raise UserErrors.IncorrectPasswordError(\"Password does not match the one registered.\")\n\n return True", "def test_credentials(self):\r\n data = self._deep_clean('zekebarge@gmail.com')\r\n error = data.get(ERROR_CODE, None)\r\n if error in (1,2):\r\n raise InvalidCredentialsError(\"Credentials are invalid for user '{}'\".format(self._username))\r\n return True", "def verify_password(email_or_token: str, password: str) -> bool:\n # Secondary method used to simplify unit testing.\n return _verify_password(email_or_token, password)", "def is_valid(self, email=None):\n if not email:\n return False\n\n # RFC 3696\n # In addition to restrictions on syntax, there is a length limit on email addresses.\n # That limit is a maximum of 64 characters (octets) in the \"local part\" (before the \"@\")\n # and a maximum of 255 characters (octets) in the domain part (after the \"@\") for a total\n # length of 320 characters. However, there is a restriction in RFC 2821 on the length of\n # an address in MAIL and RCPT commands of 254 characters. Since addresses that do not fit\n # in those fields are not normally useful, the upper limit on address lengths should\n # normally be considered to be 254.\n\n if len(email) > 254:\n return False\n\n parts = email.split('@')\n if len(parts) > 2 or len(parts[0]) > 64 or len(parts[1]) > 255:\n return False\n\n if not re.match('[a-z0-9\\!\\#\\$\\%\\&\\'\\*\\+\\/\\=\\?\\^\\_\\`\\{\\|\\}\\~\\-]+(?:\\.[a-z0-9\\!\\#\\$\\%\\&\\'\\*\\+\\/\\=\\?\\^\\_\\`\\{\\|\\}\\~\\-]+)*', email.lower()):\n return False\n # A valid mail exchange server is configured!\n return self.valid_mx(parts[1])", "def check_registration_allowed(self, email, username, password):\n message = ''\n status = 'done'\n for provider, options in self.active_authenticators(email, username, password):\n allow_reg = _get_tri_state(options, 'allow-register', True)\n if allow_reg is None: # i.e. challenge\n auth_result, msg = provider.authenticate(email, username, password, options)\n if auth_result is True:\n break\n if auth_result is None:\n message = 'Invalid email address/username or password'\n status = 'error'\n break\n elif allow_reg is True:\n break\n elif allow_reg is False:\n message = 'Account registration not required for your account. Please simply login.'\n status = 'error'\n break\n return message, status", "def is_password_valid(password):\n #TODO : This should also be handled by the front_end\n pass", "def check_auth(username, password, expected_user, expected_pw):\n return username == expected_user and password == expected_pw", "def is_valid_password(self, password):\n rex = \"^[a-zA-Z0-9@_+-.]{3,}$\"\n return re.match(rex, password)", "def _verify_password(email: str, password: str) -> bool:\n # Get the user information from the DB.\n user = User.query.filter_by(email=email).first()\n\n # If the user does not exist in the DB, return False.\n if not user:\n return False\n\n # Set flask global state.\n set_globals(token_used=False)\n\n logger.debug(\"Authorized with Email/Password.\")\n\n # Check if the users password is correct.\n return user.verify_password(password=password)", "def password_is_valid(self, password):\n return Bcrypt().check_password_hash(self.password, password)", "def password_is_valid(self, password):\n return Bcrypt().check_password_hash(self.password, password)", "def validate_authentication(self, username, password):\n return self.user_table[username]['pwd'] == password", "def is_email(address):\n try:\n validate_email(address)\n return True\n except:\n return False", "def _check_password(self, password):\n rule = re.compile(constant.password_regex)\n if not rule.match(password):\n return False\n # disallow password from azure guide, yes, it's hard code.\n disallowed = constant.password_disallowed\n return password not in disallowed", "def valid_email(self, email):\n # uses regular expressions\n reg_email = re.compile(r\"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+$)\")\n return re.match(reg_email, email)", "def _email_is_valid(email):\n return VALID_EMAIL_REGEXP.match(email) is not None", "def check_submission(username, password, verify, email):\n if (is_allowed_username(username) +\n is_allowed_password(password) +\n matches_password_verify(password, verify) +\n is_allowed_email(email)) == \"\":\n\n return True\n else:\n return False", "def is_valid_email(self, email):\n rex = \"^[\\w]+[\\d]?@[\\w]+\\.[\\w]+$\"\n return re.match(rex, email)", "def validate(self):\n rv = Form.validate(self)\n if not rv:\n return False\n\n cur = get_cursor()\n if email_exists(cur, self.email.data):\n self.email.errors.append('This email already exists!')\n return False\n\n return True", "def isvalid(email):\n pattern = re.compile(r\"^([a-zA-Z0-9_\\-]+)@([a-zA-Z0-9]+)\\.\\w{,3}$\")\n return bool(pattern.match(email))", "def check_pass(self):\n if self.validated_data['new_password'] != self.validated_data['confirm_password']:\n raise serializers.ValidationError({\"error\":\"Please enter matching passwords\"})\n return True", "def password_validator(username, password):\n digits = re.search(r'\\d+', password)\n capital_letters = re.search(r'[A-Z]+', password)\n lenght = len(password) > PASSWORD_MIN_LENGTH\n special_symbol = re.search(r'[\\-\\/\\@\\?\\!\\,\\.\\#\\&\\*]+', password)\n\n statement = digits and capital_letters and lenght and special_symbol\n\n if statement:\n return True\n return False", "def IsEmailValid(email):\n return email and EMAIL_RE.search(email)", "def is_valid_password(self, pwd):\n if pwd is None:\n return False\n if type(pwd) is not str:\n return False\n if self._password is None:\n return False\n if self._password == hashlib.md5(pwd.encode()).hexdigest().lower():\n return True\n return False", "def check_valid(self, username, password):\n con = self.connect()\n cursor = con.cursor()\n cursor.execute(\"SELECT username,password \\\n FROM users WHERE username = %s\", (username,))\n credentials = cursor.fetchone()\n cursor.close()\n con.commit()\n con.close()\n if credentials is None:\n return False\n if username != credentials[0]:\n return False\n if sha256_crypt.verify(password, credentials[1]):\n return True\n return False", "def is_email_address_valid(email):\n if not re.match(\"^[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+@[a-zA-Z0-9-]+(?:\\.[a-zA-Z0-9-]+)*$\", email):\n return False\n return True", "def password_validator(password: str) -> bool:\n uppercase_regex = re.compile(r\"[A-Z]+\")\n lowercase_regex = re.compile(r\"[a-z]+\")\n digit_regex = re.compile(r\"\\d+\")\n\n if len(password) < 8:\n return False\n if not uppercase_regex.findall(password):\n return False\n if not lowercase_regex.findall(password):\n return False\n if not digit_regex.findall(password):\n return False\n else:\n return True", "def is_valid_email_address (email):\n return valid_email.search(email)", "def valid_password(password):\n val = True\n\n if len(password) < 8:\n val = False\n return val\n\n if not any(char.isdigit() for char in password):\n val = False\n return val\n\n if not any(char.isupper() for char in password):\n val = False\n return val\n\n if not any(char.islower() for char in password):\n val = False\n return val\n\n if val:\n return val", "def password_validates(password):\n if any(char.isdigit() for char in password) \\\n and any(char.isalpha() for char in password) \\\n and len(password) > 5:\n return True\n else:\n return False", "def password_is_valid(password: str) -> bool:\n pattern = re.compile(\n r'^(?=.{8,32}$)(?=.*[A-Z])(?=.*[a-z])(?=.*[0-9])(?=.*[!\"#$%&\\'()*+-./:;<=>?@[\\]^_`{|} ~,\\\\]).*')\n return pattern.match(password)", "def check_password(self, password):\n if self.password_expire_epoch != 0 and \\\n time.time() > self.password_expire_epoch:\n lg.debug(\"Password invalid for user '%s': user password is expired\"\\\n %self.name)\n return False\n if self.password_hash is None:\n lg.debug(\"Password invalid for user '%s': user has no password set\"\\\n %self.name)\n return False\n hash = self._get_pw_hash(password, self.password_salt)\n if hash != self.password_hash:\n lg.debug(\"Password invalid for user '%s': password does not match\"\\\n %self.name)\n return False\n else:\n return True", "def validate_password(self, password):\r\n # the password might be null as in the case of morpace employees\r\n # logging in via ldap. We check for that here and return them as an\r\n # incorrect login\r\n if self.password:\r\n salt = self.password[:29]\r\n return self.password == bcrypt.hashpw(password, salt)\r\n else:\r\n return False", "def check_auth(email, password):\n sha = hashlib.sha1()\n sha.update(password)\n user = Users.query.filter_by(email=email).first()\n return user.password == sha.hexdigest()", "def authenticate(self):\n rv = Form.validate(self)\n if not rv:\n return False\n\n user = self.username.data\n\n cur = get_cursor()\n if email_exists(cur, user):\n user = get_username(cur, user)\n\n if username_exists(cur, user):\n pw_hash = get_pw_hash(cur, user)\n\n if check_password(self.password.data, pw_hash):\n self.username.data = user\n return True\n\n return False", "def is_allowed_password(password):\n if not is_filled_password(password):\n return \"Password is a required field.\"\n elif not is_regex_password(password):\n return \"That is not a valid password.\"\n else:\n return \"\"", "def validate_password(data):\n\n if \"password\" not in data:\n return False\n password = data[\"password\"]\n if len(password) < 4 or len(password) > 16:\n return False\n\n return True", "def verify_password(email_or_token, password):\n if email_or_token == '':\n g.current_user = AnonymousUser()\n return True\n if password == '':\n g.current_user = User.verify_auth_token(email_or_token)\n g.token_used = True\n return g.current_user is not None\n email_or_token = email_or_token.lower()\n user = User.query.filter(func.lower(User.email)==email_or_token).first()\n if not user:\n return False\n g.current_user = user\n g.token_used = False\n return user.verify_password(password)", "def _check_password(self, password):\n if self.password_regex.search(password) is not None:\n print(\"Correct password\")\n return True\n else:\n print(\"Wrong password\")\n return False", "def check_password(self, password):\n return self.password == password", "def validate_email(email):\r\n\r\n\t\tstatus = 'valid'\r\n\r\n\t\ttry:\r\n\t\t\tvalidate_email(email)\r\n\t\t\tuser = AuthTools.get_user_by_email(email)\r\n\r\n\t\t\tif user is not None:\r\n\t\t\t\tstatus = 'taken'\r\n\r\n\t\texcept:\r\n\t\t\tstatus = 'invalid'\r\n\r\n\t\treturn status", "def _credentials_are_valid(self, username, password):\n LDAP_SERVER = 'ldap://xxx.xxx.xxx' # EDIT THIS\n LDAP_USERNAME = '%s@xxx.com' % username # EDIT THIS\n LDAP_PASSWORD = password\n\n try:\n # build a client\n ldap_client = ldap.initialize(LDAP_SERVER)\n # perform a synchronous bind\n ldap_client.set_option(ldap.OPT_REFERRALS, 0)\n ldap_client.simple_bind_s(LDAP_USERNAME, LDAP_PASSWORD)\n except ldap.INVALID_CREDENTIALS:\n ldap_client.unbind()\n # Wrong username or password\n return False\n except ldap.SERVER_DOWN:\n # AD server not available\n return False\n # all is well\n ldap_client.unbind()\n # Login successful\n return True", "def user_login_verify(email, password):\n user_exist = User.user_exists(email)\n if user_exist is True:\n emails_password = \"\".join([i['password']\\\n for i in Data.users if email == i['email']])\n return check_password_hash(emails_password, password)\n return False", "def check_password(password1: str, password2: str) -> bool:\n if password1 == password2:\n return True\n else:\n raise ValueError('Пароли не совпадают')", "def IsValidEmail(s):\n return RE_EMAIL_ONLY.match(s)", "def has_valid_password(self, password):\n return bcrypt.check_password_hash(self.password, password)", "def check(self, password):\n\n if len(password) < self.min_length:\n return False\n\n digits = len(findall(r\"\\d\", password))\n if digits < self.min_digits:\n return False\n\n special_chars = sum(v for k, v in Counter(password).items() if k in punctuation)\n if special_chars < self.min_special:\n return False\n\n alpha_chars = sum(v for k, v in Counter(password).items() if k in ascii_letters)\n if alpha_chars < self.min_alpha:\n return False\n\n upper_chars = sum(\n v for k, v in Counter(password).items() if k in ascii_uppercase\n )\n if upper_chars < self.min_upper:\n return False\n\n lower_chars = sum(\n v for k, v in Counter(password).items() if k in ascii_lowercase\n )\n if lower_chars < self.min_lower:\n return False\n\n if self.check_breaches and check_password(password):\n return False\n\n if self.func and not self.func(password):\n return False\n\n return True", "def validate_password(self):\n # source: https://docs.python.org/2/howto/regex.html\n if not re.match(r\"[A-Za-z0-9@#]\", self.password):\n return 'Oops!, invalid password'\n elif len(self.password) < 6:\n return 'Password should be at least six characters long'\n return 'Valid password!'", "def is_valid_password(password):\n if len(password) < MIN_LENGTH:\n return False\n return True", "def is_valid_email_address(self, addr):\n\t\t# the call is blocking, so only syntactic analysis performed\n\t\t# To check if the SMTP server exists change check_mx to True\n\t\t# to check if email address exists change verify to true\n\t\treturn addr is not None and validate_email(addr, verify=False, check_mx=False)", "def validate_password(self, password):\n return self._password == encrypt_password(password,\n b64decode(str(self._salt)))", "def password_check(form,pwd,errors, pwdbis=None):\n length=len(form[pwd])\n if length<8 or length>50:\n errors[pwd] = 'Your password must contain between 8 and 50 letters'\n return False\n\n # searching for digits or symbols\n if not (re.search(r\"\\d\", form[pwd]) or re.search(r\"\\W\", form[pwd])):\n errors[pwd] = 'Your password must contain at least one digit or symbol'\n return False\n\n if not re.search(r\"[a-zA-Z]\", form[pwd]):\n errors[pwd] = 'Your password must contain at least a letter'\n return False\n\n if pwdbis and form[pwd] != form[pwdbis]:\n errors[pwdbis] = 'Must be identical to your password'\n return False\n return True", "def is_valid_password(password):\n assert password is not None\n password = str(password)\n return len(password) >= 8 and any(s.islower() for s in password) \\\n and any(s.isupper() for s in password) \\\n and any(s.isdigit() for s in password)", "def is_email_valid(e_mail):\n pattern = re.compile(r\"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+$)\")\n result = False\n if pattern.match(e_mail):\n result = True\n return result", "def is_valid_email(email):\n if re.search(EMAIL_REGEX, email):\n return True\n else:\n return False", "def validate(self) -> bool:\n if not super().validate():\n return False\n\n # Does the user exist\n user = User.query.filter_by(username=self.username.data).first()\n if not user:\n self.username.errors.append('Invalid username or password')\n return False\n\n # Does given password match user's password\n if not user.check_password(self.password.data):\n self.username.errors.append('Invalid username or password')\n return False\n\n return True", "def verify_email(entered_email):\n return EMAIL_RE.match(entered_email)", "def verify_password(self, password):\n stored_password = self.user_in_db['password']\n password_valid = passwords.verify_password(\n password, stored_password)\n\n if not password_valid:\n # Invalid password\n return {'error': 'Invalid email and password combination'}\n\n return {'success': True}", "def validate_password(form, field):\n user = User.query.filter_by(email = form.email.data).first()\n if user:\n if not user.has_password(field.data):\n raise ValidationError(\"Password is incorrect.\")", "def email_is_valid(email: Optional[str]) -> bool:\n if email is None:\n return True\n\n if re.match(r\"^[a-zA-Z0-9]+[\\.]?[a-zA-Z0-9]+[@]\\w+[.]\\w{2,3}$\", email):\n return True\n\n return False", "def check_email_validity(email):\n if email.count('@') != 1:\n return False\n if len(email.split('@')[0]) == 0:\n return False\n if '.' not in email.split('@')[1]:\n return False\n return True", "def valid_password(self, password):\n return check_hash(password, self.password)", "def verify_password(self, password):\n return self.PASSWORD == password", "def is_logged_in_user_valid(user_name, password):\n if user_name.upper() == \"HELLO\" and password == \"World\":\n return True # User input matches user name and password.\n else:\n return False # User input does not match user name and password.s", "def is_valid_password(password, username):\n import string\n if len(password) < 4 or ' ' in password:\n return False\n if username:\n if string.lower(username) in string.lower(password):\n return False\n return True", "def is_valid(first_name, last_name, zip_code, streetnumber, email):\n if len(first_name) < 1:\n messagebox.showerror('Error', 'Vul een geldige voornaam in.')\n return False\n elif len(last_name) < 1:\n messagebox.showerror('Error', 'Vul een geldige achternaam in.')\n return False\n elif len(zip_code.replace(' ', '')) != 6:\n messagebox.showerror('Error', 'Vul een geldig postcode in.')\n return False\n elif 1 < len(streetnumber) > 10:\n messagebox.showerror('Error', 'Vul een geldig huisnummer in.')\n return False\n elif re.search('[@]', email) is None or len(email) < 5 or re.search('[.]', email) is None:\n messagebox.showerror('Error', 'Vul een geldig e-mail in.')\n return False\n else:\n return True", "def check_auth(username, password):\n return username == USERNAME and password == PASSWORD", "def validate(self):\n rv = Form.validate(self)\n if not rv:\n return False\n\n cur = get_cursor()\n pw_hash = get_pw_hash(cur, session['username'])\n if not check_password(self.old_password.data, pw_hash):\n self.old_password.errors.append('Did not find a match.')\n return False\n\n return True", "def valid_password(lower, upper, letter, password):\n # Note the -1 to turn 1 indexing into 0 indexing\n matches = [idx for idx in (lower, upper) if password[idx - 1] == letter]\n return len(matches) == 1", "def validate_login(self, username, password):\n user = User(self).get(username)\n return user and user['Password'] == password", "def is_email_valid(email):\n\n result = requests.get(\n f'https://api.hunter.io/v2/email-verifier?email={email}&api_key={settings.HUNTER_IO_API_KEY}'\n ).json()\n\n return True if result.get('data').get('status') == 'valid' else False", "def is_email_request_valid(self, to_addr, cc_addr, bcc_addr, topic, text):\n\t\tif not self.is_valid_email_address(to_addr):\n\t\t\tself.log.debug(\n 'Incorrect to_addr %s.' % (to_addr))\n\t\t\treturn False, 'Invalid main recipient.'\n\n\t\tif cc_addr and len(cc_addr) and not self.are_valid_email_addresses(cc_addr):\n\t\t\tself.log.debug(\n 'Incorrect cc_addr list %s submitted.' % (cc_addr))\n\t\t\treturn False, 'At least one of cc recipients invalid.'\n\n\t\tif bcc_addr and len(bcc_addr) and not self.are_valid_email_addresses(bcc_addr):\n\t\t\tself.log.debug(\n 'Incorrect bcc_addr list %s.' % (bcc_addr))\n\t\t\treturn False, 'At least one of bcc recipients invalid.'\n\n\t\tif not topic or not len(topic):\n\t\t\tself.log.debug(\n 'Empty topic submitted.')\n\t\t\treturn False, 'Topic cannot be empty.'\n\n\t\tif not text or not len(text):\n\t\t\tself.log.debug(\n 'Empty text submitted.')\n\t\t\treturn False, 'Text cannot be empty.'\n\t\t# valid!\n\t\treturn True, None", "def _validate_credentials(self):\n\n # There should be a client_id and client secret\n return \"client_id\" in self.credentials.keys() and \"client_secret\" in self.credentials.keys() \\\n and self.credentials[\"client_id\"] and self.credentials[\"client_secret\"]", "def valid_password(password):\n if password is None: # SQLite integrity check\n return False\n if len(password) < 8: # Arbitrary length minimum\n return False\n return True", "def check_auth(username, password):\n return username == 'admin' and password in app.config[\"CLAIM_SECRETS\"]", "def check_email_address_validity(email_address):\n\n try:\n validate_email(email_address)\n valid_email = True\n\n except ValidationError:\n valid_email = False\n\n return valid_email", "def valid_pass(password):\r\n if len(password) >= 8 and len(password) <= 25 and password != \"forgot\":\r\n return True\r\n\r\n else:\r\n print(\"This is not a vaid password\")\r\n print(\"Password should be between 8 and 25 letters\")\r\n\r\n return False", "def verify_password(password):\n hash_pass = hashlib.sha1(password + app.config['SECRET_KEY']).hexdigest()\n valid = hash_pass == app.config['VALID_PASSWORD']\n return valid", "def is_valid_password(password):\n from string import ascii_lowercase\n from string import ascii_uppercase\n from string import digits\n\n if len(password) < MIN_LENGTH or len(password) > MAX_LENGTH:\n return False\n\n count_lower = 0\n count_upper = 0\n count_digit = 0\n count_special = 0\n for char in password:\n if char in ascii_lowercase:\n count_lower += 1\n elif char in ascii_uppercase:\n count_upper += 1\n elif char in digits:\n count_digit += 1\n # TODO: count each kind of character (use str methods like isdigit)\n pass\n\n # TODO: if any of the 'normal' counts are zero, return False\n if count_lower == 0 or count_upper == 0 or count_digit == 0:\n return False\n\n # TODO: if special characters are required, then check the count of those\n # and return False if it's zero\n\n if SPECIAL_CHARS_REQUIRED:\n for char in password:\n if char in SPECIAL_CHARACTERS:\n count_special += 1\n if count_special == 0:\n return False\n\n # if we get here (without returning False), then the password must be valid\n return True", "def test_valid_password(self):\n user = User(email=\"test@email.com\", password=\"testpassword\")\n\n self.assertTrue(user.is_valid_password(\"testpassword\"))", "def validate_email(email:str) -> bool:\r\n return email.count(\"@\") == 1 and email.count(\".\") >= 1 and len(email) > 6", "def check_auth(username, password):\n return username == app.config['USERNAME'] and (\n password == app.config['PASSWORD'])", "def verify_password(self, password):\n return self.PASS == password", "def is_valid_email(email):\n assert email is not None\n return validate_email(str(email))", "def verify_password(entered_password):\n return PASSWORD_RE.match(entered_password)", "def test_invalid_password(self):\n user = User(email=\"test@email.com\", password=\"testpassword\")\n\n self.assertFalse(user.is_valid_password(\"invalid_password\"))", "def check_password(self, password):\n if not self._password or not password:\n return False\n return check_password_hash(self._password, password)", "def email_validator(email):\n if len(email) > 6:\n if re.match(REGEX_EXP, email) != None:\n return True\n return False", "def valid_email(email):\n email_regex = re.compile(r\"^[\\S]+@[\\S]+.[\\S]+$\")\n return email and email_regex.match(email)", "def verify_email(email):\n email_reg_exp = re.compile(r\"^[\\S]+@[\\S]+.[\\S]+$\")\n return not email or email_reg_exp.match(email)", "def validate(self):\n rv = Form.validate(self)\n if not rv:\n return False\n\n cur = get_cursor()\n if email_exists(cur, self.email.data):\n self.email.errors.append('This email already exists!')\n return False\n\n if username_exists(cur, self.username.data):\n self.username.errors.append('This username already exists!')\n return False\n\n return True", "def check_auth(username, password):\n return username == 'admin' and password == 'pebble'", "def check_user(self):\n try:\n if (self.get_user()[0][0] == self.username) and (self.check_password(self.password)):\n return True\n else:\n return False\n except:\n return False", "def validate_email(self):\n # source: https://docs.python.org/2/howto/regex.html\n if not re.match(r\"[^@.]+@[A-Za-z]+\\.[a-z]+\", self.email):\n return 'Invalid email address!'\n return self.email", "def check_login(db, useremail, password):\n import hashlib\n\n cursor = db.cursor().execute('SELECT password FROM users WHERE email IS ?', [useremail])\n result = cursor.fetchone()\n if result:\n return result[0] == hashlib.sha1(password.encode()).hexdigest()\n return False", "def is_valid_email(email):\n return \"@\" in email and \".\" in email", "def check_auth(username, password):\n ADMIN_USER = config.CONFIG_VARS['ADMIN_USER']\n ADMIN_PASS = config.CONFIG_VARS['ADMIN_PASS']\n return username == ADMIN_USER and password == ADMIN_PASS", "def check_auth(username, password):\n\n config = get_app_configurations()\n\n with open(config[\"credentials\"], \"r\") as fh:\n u, p = fh.readline().rstrip().split(\",\")\n\n return username == u and password == p" ]
[ "0.7349685", "0.68957275", "0.6839669", "0.6783419", "0.67657584", "0.6671322", "0.66452736", "0.663105", "0.6579197", "0.6508494", "0.6490613", "0.647486", "0.6462656", "0.6462656", "0.6454392", "0.64511716", "0.64275295", "0.64244556", "0.6408592", "0.6401601", "0.64013714", "0.6389929", "0.63709974", "0.6366542", "0.6350101", "0.6329364", "0.63250613", "0.6311483", "0.63056713", "0.63051605", "0.6300021", "0.6292426", "0.6284904", "0.62823606", "0.6273874", "0.62720644", "0.6267129", "0.62669486", "0.62614334", "0.6247489", "0.6238719", "0.6236313", "0.6224139", "0.62229973", "0.6222943", "0.6217394", "0.6214069", "0.6195124", "0.615645", "0.61532736", "0.6152381", "0.61514014", "0.6149554", "0.6143067", "0.6141966", "0.6139463", "0.61373043", "0.6135998", "0.61277294", "0.61214906", "0.6109681", "0.61052614", "0.6093924", "0.608884", "0.6086194", "0.6080187", "0.60745984", "0.60585725", "0.6056533", "0.6052195", "0.6049962", "0.60456955", "0.60440725", "0.604376", "0.6042321", "0.6041088", "0.60394406", "0.60314554", "0.6029389", "0.60269433", "0.60245425", "0.60218495", "0.60191643", "0.6014466", "0.6013725", "0.60120845", "0.6010826", "0.6005928", "0.60004795", "0.5999922", "0.5992993", "0.59919655", "0.59831965", "0.5983172", "0.598081", "0.5980769", "0.5978593", "0.5978121", "0.5971744", "0.5970076", "0.5964246" ]
0.0
-1
Create new User from supplied infomation.
def signup(): # Get JSON from request json = request.get_json() if json is None: raise CustomError(400, message="No JSON included or " "Content-Type is not application/json") expected_keys = ['first_name', 'last_name', 'email', 'password'] if not all(key in json for key in expected_keys): raise CustomError(400, message="Must include a first name, last name," "email and password.") # # Check if email is unique if User.query.filter_by(email=json['email']).first() is not None: raise CustomError(409, message='Email already in use.') # # TODO: Add password validation user = User(json['first_name'], json['last_name'], json['email'], json['password']) db.session.add(user) db.session.commit() return jsonify({'success': True, 'user': user.to_dict()}), 201
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create(self, validated_data:tuple):\n user = user_details.objects.create(user_name=validated_data[0], email=validated_data[1], password=validated_data[2])\n return user", "def create(cls, **data):\n user = cls()\n for attribute in data:\n if hasattr(user, attribute):\n setattr(user, attribute, data[attribute])\n user.password = data[\"password\"]\n db.session.add(user)\n return user", "def create(self, validated_data):\n return User.objects.create(**validated_data)", "def new_user(first_name, sur_name, user_name, email, password):\n new_user = User(first_name, sur_name, user_name, email, password)\n return new_user", "def create_user_object(self, request):\r\n user = {\r\n \"first_name\": request.form.get(\"first_name\"),\r\n \"last_name\": request.form.get(\"last_name\"),\r\n \"age\": request.form.get(\"age\"),\r\n \"cpr_number\": request.form.get(\"CPR\"),\r\n \"email\": request.form.get(\"email\"),\r\n \"phone_number\": request.form.get(\"phone_number\"),\r\n \"password\": PasswordHasher().hash(request.form.get(\"password\")),\r\n \"bank_account\": str(BankAccount(\"Savings\", 1000.00).store_account().inserted_id),\r\n \"crypto_wallet\": str(CryptoWallet(\"Bitcoin\", 0.0045).store_account().inserted_id)\r\n }\r\n return user", "def create(self, data):\n # ensure 'create()' calls the specific 'create_user()' method\n # note that the 'data' gets validated\n user = get_user_model().objects.create_user(**data)\n return user", "def create(self,validated_data):\n user_obj = User.objects.create(**validated_data)\n return user_obj", "def create(self, validated_data):\n ## overriding default create\n\n user = UserProfile.objects.create_user(\n email = validated_data['email'],\n name = validated_data['name'],\n password=validated_data['password']\n )\n \n return user", "def create(self, validated_data):\n user = User.objects.create(\n first_name=validated_data.get('first_name'),\n middle_name=validated_data.get('middle_name'),\n last_name=validated_data.get('last_name'),\n email=validated_data.get('email'),\n username=validated_data.get('username'),\n mobile_number=validated_data.get('mobile_number'),\n gender=validated_data.get('gender'),\n is_active=validated_data.get('is_active'),\n country=validated_data.get('country'),\n address=validated_data.get('address'),\n role=validated_data.get('role'),\n )\n if self.context['request'].data.get('file_profile_picture') is not None:\n user.profile_picture = self.context['request'].data['file_profile_picture']\n if self.context['request'].data.get('file_signature') is not None:\n user.signature = self.context['request'].data['file_signature']\n user.set_password(validated_data.get('password'))\n user.save()\n return user", "def create_user(email, password, f_name, l_name):\n pass", "def build_user(data: Dict[Any, Any]) -> User:\n return User(**data)", "def create (self, validated_data):\n user = models.UserProfile.objects.create_user(\n email = validated_data ['email'],\n name = validated_data ['name'],\n password = validated_data ['password']\n )\n\n return user", "def create_user(self):\n unique_id = str(uuid.uuid4())\n new_user_properties = {\n \"name\": self.name,\n \"mission_statement\": self.mission_statement,\n \"unique_id\": unique_id,\n \"email\": self.email.lower(),\n \"is_mentor\": True,\n \"is_tutor\": True,\n \"is_visible\": True,\n \"is_available_for_in_person\": True,\n \"is_admin\": True}\n new_user_node = Node.cast(AgoraLabel.USER, new_user_properties)\n try:\n self.graph_db.create(new_user_node)\n except:\n pass\n return new_user_node", "def create_user() -> tuple:\n # created new user\n user_data: dict = request.get_json()\n names: str = user_data.get(\"names\")\n surname: str = user_data.get(\"surname\")\n cell: str = user_data.get(\"cell\")\n email: str = user_data.get(\"email\")\n password: str = user_data.get(\"password\")\n uid: str = user_data.get(\"uid\")\n organization_id: str = user_data.get(\"organization_id\")\n\n # Add User View will perform error checking\n return user_view.add_user(organization_id=organization_id, uid=uid, names=names, surname=surname,\n cell=cell, email=email, password=password)", "def create(self, validated_data):\n user = UserProfile.objects.create_user(\n email=validated_data[\"email\"],\n name=validated_data[\"name\"],\n password=validated_data[\"password\"]\n )\n\n return user", "def create(self, **kwargs):\n\n # Normalize the address by lowercasing the domain part of the email\n # address.\n try:\n email_name, domain_part = kwargs['email'].strip().split('@', 1)\n except ValueError:\n pass\n else:\n kwargs['email'] = '@'.join([email_name.lower(), domain_part.lower()])\n \n user = User(**kwargs)\n user.save()\n return user", "def create(self, validated_data):\n user = UserProfile.objects.create_user(\n email=validated_data['email'],\n first_name = validated_data['first_name'],\n last_name = validated_data['last_name'],\n password = validated_data['password']\n )\n return user", "def create(self, validated_data: dict):\n return User.objects.create_user(**validated_data)", "def create(self, validated_data):\n\n # Here we actually create a new user.\n user = models.UserProfile(\n email = validated_data['email'],\n name = validated_data['name']\n )\n\n user.set_password(validated_data['password'])\n\n # Here we save the object to the database.\n user.save()\n\n return user", "def create_user(context, params):\n form_user = dict()\n # form_user['edited_by'] = context.user\n if params.get('username'):\n form_user['username'] = params.get('username')\n else:\n form_user['username'] = create_username(params) # 'email_user{}'.format(MISUser.objects.latest('id').id + 1\n form_user['first_name'] = params.get('first_name')\n form_user['last_name'] = params.get('last_name')\n form_person = create_person(params)\n form_user.update(form_person)\n user = User.objects.create(**form_user)\n user.set_password(params.get('password'))\n\n email = {'label': 'Work', 'val': params.get('email'), 'person': user, 'is_main': True}\n create_email(context, email)\n\n user.save()\n return user", "def create_user(\n *,\n user_in: schemas.UserCreate,\n) -> schemas.User:\n next_user_id = users[-1].id + 1 # type: ignore\n user = schemas.User(\n id=next_user_id,\n email=user_in.email,\n is_active=user_in.is_active,\n is_superuser=user_in.is_superuser,\n full_name=user_in.full_name,\n )\n users.append(user)\n return user", "def create_by_args(self, params):\n signup_args = {}\n for arg in self.signup_args:\n signup_args[arg] = params.get(arg)\n\n # we don't use password, we use the magic raw_password\n del signup_args['password']\n signup_args['password_raw'] = params.get('password')\n prime_key = params[self.prime_key].lower()\n unique_properties = [self.prime_key]\n user_data = self.user_model.create_user(\n prime_key,\n unique_properties,\n **signup_args\n )\n\n if not user_data[0]: # user_data is a tuple\n details = \"Duplicate user id\"\n raise CustomException(error_code='', details=details)\n user = user_data[1]\n user.put()\n return user", "def create_user(user, first_name, last_name, major, bio):\n return userAccount.objects.create(user=user, first_name=first_name, last_name=last_name, major=major, bio=bio)", "def create_user(first_name,last_name,email,password):\n\n\tnew_user = User(first_name,last_name,email,password)\n\treturn new_user", "def create_user(self):\n return UserFactory.create()", "def create(self, data):\n # Make User\n username = data['email'].split(\"@\")[0]\n user = User.objects.create_user(**data, username=username, is_verified=False, is_client=True)\n Profile.objects.create(user=user)\n send_confirmation_email.delay(user_pk=user.pk)\n return user", "def _create_user(self, username, email, password, phone, **extra_fields):\n\n username = self.model.normalize_username(username)\n user = self.model(username=username, email=email, phone=phone, **extra_fields) # using email_id instead of email\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create(self, validated_data):\n instance = User()\n return self.update(instance, validated_data)", "def __create_new_user(user_info: Dict[str, Any], ui_locales: str, oauth_provider: Optional[str] = None,\n oauth_provider_id: Optional[str] = None) -> Tuple[str, str, User]:\n success = ''\n info = ''\n\n _tn = Translator(ui_locales)\n language = DBDiscussionSession.query(Language).filter_by(ui_locales=ui_locales).first()\n # creating a new user_info with hashed password\n LOG.debug(\"Adding user_info for %s\", user_info['nickname'])\n hashed_password = password_handler.get_hashed_password(user_info['password'])\n new_user = User(firstname=user_info['firstname'],\n surname=user_info['lastname'],\n email=user_info['email'],\n nickname=user_info['nickname'],\n password=hashed_password,\n gender=user_info['gender'],\n group=user_info['db_group'],\n oauth_provider=oauth_provider,\n oauth_provider_id=oauth_provider_id)\n DBDiscussionSession.add(new_user)\n settings = Settings(user=new_user,\n send_mails=False,\n send_notifications=True,\n should_show_public_nickname=True,\n language=language)\n DBDiscussionSession.add(settings)\n\n if new_user:\n LOG.debug(\"New data was added with uid %s\", new_user.uid)\n success = _tn.get(Keywords.accountWasAdded).format(user_info['nickname'])\n else:\n LOG.debug(\"New data was not added\")\n info = _tn.get(Keywords.accoutErrorTryLateOrContant)\n\n return success, info, new_user", "def create(self, validated_data):\n username = validated_data.pop('username')\n email = validated_data.pop('email')\n password = validated_data.pop('password')\n user = User.objects.create_user(\n username, email, password, **validated_data)\n return user", "def create(self, validated_data):\n\n user = models.User(\n email=validated_data['email'],\n name=validated_data['name']\n )\n\n user.set_password(validated_data['password'])\n\n user.save()\n\n return user", "def create( self , validated_data ) :\n\n user = models.UserProfile(\n email = validated_data[ 'email' ] ,\n name = validated_data[ 'name' ]\n )\n\n user.set_password( validated_data[ 'password' ] )\n user.save( )\n\n return user", "def new_users(name_first, name_two, email_adress, user_name, pass_word):\n new_user = UserData(name_first, name_two, email_adress, user_name, pass_word)\n\n return new_user", "def new(self):\n\n for req_var in self.required_attribs:\n if req_var not in self.kwargs:\n err = \"The '%s' kwarg is required when creating a new user!\"\n self.logger.error(err % req_var)\n raise ValueError(err % req_var)\n\n self.logger.warn('Creating new user!')\n self.name = self.kwargs.get('name')\n self.email = self.kwargs.get('email').lower()\n self.created_on = datetime.now()\n\n try:\n self._id = self.mdb.insert({'email': self.email})\n except pymongo.errors.DuplicateKeyError:\n raise ValueError(\"Email '%s' is already in use!\" % self.email)\n\n if self.save(verbose=False):\n self.logger.warn('Created new user! %s' % self)\n else:\n raise AttributeError('New user record could not be saved!')\n\n self.update_password(self.kwargs.get('password'))", "def _create_user(self, FirstName,LastName, EmailId, MobileNo, password=None, **extra_fields):\n if not (FirstName and LastName):\n raise ValueError(\"The user's Name must be set\")\n if not EmailId:\n raise ValueError('The given EmailId must be set')\n if not password:\n raise ValueError('The given password must be set')\n if not MobileNo:\n raise ValueError('The given mobile must be set')\n EmailId = self.normalize_email(EmailId)\n user = self.model(FirstName =FirstName, LastName =LastName ,EmailId=EmailId, MobileNo=MobileNo, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create(self, validated_data):\n # We override the create function by accessing the method 'create_user(args)'\n # defined in 'objects' that is an object\n # which references UserProfileManager class into UserProfile class.\n user = models.UserProfile.objects.create_user(\n email = validated_data['email'],\n name = validated_data['name'],\n password = validated_data['password']\n )\n\n return user", "def create_user():\n usr = request.get_json()\n if not usr:\n abort(400, {'Not a JSON'})\n elif 'email' not in usr:\n abort(400, {'Missing email'})\n elif 'password' not in usr:\n abort(400, {'Missing password'})\n else:\n new_usr = User(**usr)\n storage.new(new_usr)\n storage.save()\n return jsonify(new_usr.to_dict()), 201", "def create(self, validated_data):\n\n user = models.UserProfile(\n email=validated_data['email'],\n name=validated_data['name']\n )\n\n user.set_password(validated_data['password'])\n user.save()\n return user", "def create(self, validated_data):\n\t\tinstance = Usuario()\n\t\treturn self.update(instance, validated_data)", "def create(self, validated_data):\n\n user = models.UserProfile(\n email=validated_data['email'],\n name=validated_data['name']\n )\n\n user.set_password(validated_data['password'])\n user.save()\n\n return user", "def create_user(uname,password):\n new_user = User(uname,password)\n return new_user", "def create(self, validated_data):\n\n user = models.UserProfile(\n username=validated_data['username'],\n email=validated_data['email'],\n first_name=validated_data['first_name'],\n mobile_number=validated_data['mobile_number'],\n )\n\n user.set_password(validated_data['password'])\n user.save()\n\n return user", "def _create_user(self, ident, password, **extra_fields):\n if not ident:\n raise ValueError('The Email must be set')\n ident = self.normalize_email(ident)\n user = self.model(ident=ident, **extra_fields)\n user.set_password(password)\n\n user.save()\n return user", "def create(cls, sender, instance, created, **kdws):\n if created:\n username = helpers.make_username(instance.first_name, instance.last_name, instance.email)\n user = User(username=username)\n user.save()\n user = User.objects.get(username=username)\n instance.user = user\n instance.save()", "def create(self, validated_data):\n username = validated_data.get('username')\n email = validated_data.get('email')\n password = validated_data.get('password')\n first_name = validated_data.get('first_name', '')\n last_name = validated_data.get('last_name', '')\n return User.objects.create_user(username, email, password, first_name=first_name,\n last_name=last_name)", "def create_user():\n body = request.get_json(silent=True)\n if body is None:\n abort(400, jsonify(error=\"Not a JSON\"))\n if 'email' not in body:\n abort(400, jsonify(error=\"Missing email\"))\n if 'password' not in body:\n abort(400, jsonify(error=\"Missing password\"))\n user = models.user.User(**body)\n models.storage.new(user)\n models.storage.save()\n return make_response(jsonify(user.to_dict()), 201)", "def create(self, validated_data):\n user = User(\n email=validated_data['email'],\n username=validated_data['username']\n )\n user.set_password(validated_data['password'])\n user.save()\n return user", "def create_user(UserName=None, MessageAction=None, FirstName=None, LastName=None, AuthenticationType=None):\n pass", "def create_user(entry):\n # only works for first + last name currently\n full_name = entry[5].split()\n email = '{first_name}-{client_id}@{domain}'.format(\n first_name=full_name[0].lower(),\n client_id=str(entry[4]).strip(), # unique email for clients with same name\n domain='example.com')\n password = 'test1234'\n dob = timezone.now() - timedelta(days=(365 * random.randint(18, 99)))\n try:\n user = get_user_model().objects.get(email=email)\n except get_user_model().DoesNotExist:\n user = get_user_model().objects.create_user(email=email, first_name=full_name[0],\n last_name=full_name[1], password=password, dob=dob)\n return user", "def create_user(self):\n return User.objects.create_user(**self.user_data)", "def create_user():\n record = request.get_json()\n if record is None:\n return {\"Error\": \"No data Supplied.\"}, 400\n\n schema = user_schema.load(record)\n\n if UserModel.objects(email=schema['email']):\n return {\"Error\": \"User Data already exists.\"}, 400\n user = UserModel(**schema)\n user.hash_password()\n user.save()\n ser_data = user_schema.dump(user)\n token = Auth.generate_token(ser_data[\"_id\"])\n return {\"message\": \"User Created Successfully\", \"Token\": token, \"id\": str(user.id)}, 200", "def create(self, validated_data):\n user = models.UserProfile.objects.create_user(\n email=validated_data['email'],\n username=validated_data['username'],\n password=validated_data['password'],\n\n )\n\n return user", "def create(self, validated_data):\n user = User.objects.create(username=validated_data['username'],\n email=validated_data['email'],\n first_name=validated_data['first_name'],\n last_name=validated_data['last_name'])\n\n user.set_password(validated_data['password'])\n user.save()\n\n return user", "def create(self, data):\n data.pop('password_confirmation')\n try:\n availability = data.pop(\"availability\")\n babysitter = data.pop(\"user_bbs\")\n user = User.objects.create_user(**data, is_verified=False)\n if babysitter:\n bbs = Babysitter.objects.create(user_bbs=user, **babysitter)\n for shift in availability:\n Availability.objects.create(bbs=bbs, **shift)\n except KeyError:\n logging.info('This is a instance client')\n user = User.objects.create_user(**data, is_verified=False)\n logging.info(f'User created, whit pk {user.pk}')\n client = Client.objects.create(user_client=user)\n logging.info(f'User pk is already to pass {user.pk}')\n send_confirmation_email.delay(username=user.username, email=user.email )\n return user", "def create_user(username, password, user_fname, user_lname, email, profile_picture=\"/static/img/profile_pictures/default.png\"):\n\n user = User(username=username, password=password, user_fname=user_fname, user_lname=user_lname, profile_picture=profile_picture, email=email)\n\n db.session.add(user)\n db.session.commit()\n\n return user", "def create(self, validated_data):\n user = User.objects.create_user(\n email=validated_data['email'],\n password=validated_data['password'],\n )\n return user", "def create_user(self, **kwargs):\n\n user = self.user_model(**self._prepare_create_user_args(**kwargs))\n return self.put(user)", "def create_user(user_name: str):\n user = User()\n user.username = user_name\n user.save()\n return user", "def _create_user(self, username, email, password, **extra_fields):\n if not username:\n raise ValueError('The given username must be set')\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n username = self.model.normalize_username(username)\n user = self.model(username=username, email=email, **extra_fields)\n user.password = make_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email, password, **extra_fields):\n\n email = self.normalize_email(email)\n #username = self.model.normalize_username(username)\n user = self.model( email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, **fields):\n email = fields.pop('email')\n password = fields.get('password1')\n if not email:\n raise ValueError(\"Email address is required\")\n email = self.normalize_email(email)\n user = self.model(email=email, **fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create(self, validated_data):\n user = get_user_model().objects.create(\n username=validated_data['username'],\n )\n user.set_password(validated_data['password'])\n user.save()\n return user", "def _create_user(self, username, email, password, **extra_fields):\n if not username:\n raise ValueError('The given username must be set')\n email = self.normalize_email(email)\n username = self.model.normalize_username(username)\n user = self.model(username=username, email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_new_user(email, password, user_handle, age=None):\n\n return User(email=email,\n password=password,\n user_handle=user_handle,\n age=age)", "def create_user():\n new_dict = request.get_json(silent=True)\n if type(new_dict) is dict:\n if \"email\" not in new_dict.keys():\n return jsonify({\"error\": \"Missing email\"}), 400\n elif \"password\" not in new_dict.keys():\n return jsonify({\"error\": \"Missing password\"}), 400\n else:\n user = User(email=new_dict[\"email\"], password=new_dict[\"password\"])\n for k, v in new_dict.items():\n setattr(user, k, v)\n user.save()\n return jsonify(user.to_dict()), 201\n else:\n return jsonify({\"error\": \"Not a JSON\"}), 400", "def create(self, validated_data):\n\n user = models.User(\n email = validated_data['email'],\n name = validated_data['name'] \n )\n\n # This will encrypt the password first and then assign it to the user.\n user.set_password(validated_data['password'])\n\n # Save user into database.\n user.save()\n\n return user", "def get_or_create_user(cls, **kwargs):\n\n un = kwargs.get('username', u'')\n pw = kwargs.get('password', u'')\n fn = kwargs.get('first_name', u'')\n ln = kwargs.get('last_name', u'')\n em = kwargs.get('email', u'')\n\n user = None\n created = False\n\n if un:\n # created = False\n [user] = User.objects.filter(\n username=un)[:1] or [None]\n elif em:\n [user] = User.objects.filter(\n email=em).order_by('-pk')[:1] or [None]\n\n if not user:\n created = True\n user = User.objects.create_user(**{\n 'username': un or Profile.spawn_username(fn[:1], ln),\n 'email': em,\n 'password': pw or uuid.uuid1().get_hex()[:6],\n })\n\n user.first_name = fn\n user.last_name = ln\n user.email = em\n user.save()\n\n if created:\n profile = Profile.objects.create_profile(user)\n sf_id = create_salesforce_contact(profile)\n\n return user, created", "def create_form_user(self, **kwargs):\n user = User.objects.create_user(\n **kwargs\n )\n return user", "def _create_user(self, email, username, full_name, password, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n if not username:\n raise ValueError('The given username must be set')\n if not full_name:\n raise ValueError('The given full name must be set')\n\n email = self.normalize_email(email)\n username = self.model.normalize_username(username)\n user = self.model(\n email=email, username=username, full_name=full_name, **extra_fields\n )\n\n user.set_password(password)\n user.save(using=self._db)\n return user", "def do_user_create():\n target = User(\n request.form['gender'],\n request.form['first_name'],\n request.form['name'],\n request.form['mail'],\n request.form['meter_id'],\n request.form['group_id'],\n secrets.token_hex(33))\n target.set_role(request.form['role'])\n target.nick = request.form['nick']\n db.session.add(target)\n db.session.commit()\n return user_list(\"Created user \" + target.name)", "def users_create():", "def createUser(login_session):\r\n newUser = User_info(name=login_session['username'],\r\n email=login_session['email'],\r\n picture=login_session['picture'])\r\n session.add(newUser)\r\n session.commit()\r\n user = session.query(User_info).\\\r\n filter_by(email=login_session['email']).one()\r\n return user.id", "def create_user():\r\n if not request.is_json or 'name' not in request.get_json() or 'phone_number' not in request.get_json() or 'password' not in request.get_json():\r\n return bad_request('Missing required data.')\r\n try:\r\n return add_user(request)\r\n except:\r\n return bad_request(error_messages['user_exist'])", "def createUser(self):\n if self.user:\n return self.user\n from soc.models.user import User\n from soc.modules.seeder.logic.providers.user import CurrentUserProvider\n properties = {'account': CurrentUserProvider(),\n 'status': 'valid', 'is_developer': self.dev_test}\n self.user = seeder_logic.seed(User, properties=properties)\n return self.user", "def _create_user(self, username, name,\n email, password, **extra_fields):\n if not email:\n raise ValueError('Email field is required')\n email = self.normalize_email(email)\n user = self.model(\n username=username,\n name=name,\n email=email,\n **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, first_name, last_name, email, password, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n first_name = first_name\n last_name = self.last_name\n user = self.model(first_name, last_name,email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, user_info):\n username = user_info.name\n password = user_info.password\n os_tenants = create_os_project(username=username, password=password, tenant_name=username)\n ob_client = OBClient()\n project = {\n 'name': username,\n 'description': 'the project for user %s' % username\n }\n project = ob_client.create_project(project)\n user = {\n 'username': username,\n 'password': password,\n 'enabled': True,\n 'email': \"{}@softfire.local\".format(username),\n 'roles': [\n {\n 'role': 'USER',\n 'project': project.get('name')\n }\n ]\n }\n logger.debug(\"Create openbaton project %s\" % project)\n ob_client = OBClient(project.get('name'))\n user = ob_client.create_user(user)\n logger.debug(\"Create openbaton user %s\" % user)\n\n user_info.ob_project_id = project.get('id')\n # user_info.testbed_tenants = {}\n\n testbed_tenants = {}\n if os_tenants:\n for testbed_name, v in os_tenants.items():\n tenant_id = v.get('tenant_id')\n vim_instance = v.get('vim_instance')\n try:\n vi = ob_client.create_vim_instance(vim_instance)\n logger.debug(\"created vim instance with id: %s\" % vi.get('id'))\n except NfvoException:\n logger.warning(\"Not able to upload vim %s\" % testbed_name)\n testbed_tenants[TESTBED_MAPPING[testbed_name]] = tenant_id\n\n for k, v in testbed_tenants.items():\n user_info.testbed_tenants[k] = v\n logger.debug(\"Updated user_info %s\" % user_info)\n\n return user_info", "def create_user(self):\n if not self.is_valid():\n return None\n # generate a username \n ids = User.objects.values_list('id', flat=True).order_by('-id')[:1]\n if len(ids) > 0:\n # ids[0] will be the maximum value (due to order_by: '-id')\n idnum = ids[0] + 1\n else:\n idnum = 1\n # create User object \n username = \"user%s\" % idnum\n # NOTE: store email in lower case\n email = self.clean_email().lower()\n password = self.clean_password2()\n user = User(username=username, email=email, password='tmp')\n user.save()\n # set the real password\n user.set_password(password)\n # make user inactive (until user has confirmed account)\n user.is_active = False\n # update\n user.save()\n return user", "def create_user(self, *args, **kwargs):\n user = User.objects.create_user(*args, **kwargs)\n return get_profile(user)", "def _create_user(self, password, is_active, is_staff, is_superuser, **extra_fields):\n now = timezone.now()\n if not self.model.USERNAME_FIELD:\n raise ValueError('User model must have set USERNAME_FIELD')\n identifier = extra_fields.get(self.model.USERNAME_FIELD)\n if not identifier:\n raise ValueError((\"User's %s must be set\", self.model.USERNAME_FIELD))\n user = self.model(is_active=is_active, is_staff=is_staff, is_superuser=is_superuser,\n date_joined=now, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create(self, validated_data):\n return get_user_model().objects.create_user(**validated_data)", "def create(self, validated_data):\n return get_user_model().objects.create_user(**validated_data)", "def create(self, validated_data):\n return get_user_model().objects.create_user(**validated_data)", "def create(self, validated_data):\n return get_user_model().objects.create_user(**validated_data)", "def create(self, validated_data):\n return get_user_model().objects.create_user(**validated_data)", "def create(self, validated_data):\n return get_user_model().objects.create_user(**validated_data)", "def create(self, validated_data):\n return get_user_model().objects.create_user(**validated_data)", "def _create(cls, model_class, *args, **kwargs):\n manager = cls._get_manager(model_class)\n # The default would use ``manager.create(*args, **kwargs)``\n return manager.create_user(*args, **kwargs)", "def get_or_create(cls, eppn, **kwargs):\n ucl_id, domain = eppn.split('@')\n user = cls.query.filter_by(ucl_id=ucl_id).first()\n if user is None:\n user = cls.create(ucl_id=ucl_id, **kwargs)\n logger.info(\"A new user %s (%s) has been created\", user.ucl_id, user.name)\n else:\n fields = ['name', 'email', 'upi']\n updates = {}\n for field in fields:\n if kwargs[field] != getattr(user, field):\n updates[field] = kwargs[field]\n if updates:\n logger.info(\n \"Updating information for user {} \".format(user.ucl_id)\n + \", \".join(\"{}={}\".format(attr, updates[attr]) for attr in updates)\n )\n user.update(**updates)\n return user", "def _create_user(self, email, username, password, gender=2, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n\n email = self.normalize_email(email)\n username = self.model.normalize_username(username)\n user = self.model(email=email, username=username, gender=gender, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n\n return user", "def create_user(self, instance, **attrs):\n instance = self._get_resource(_instance.Instance, instance)\n return self._create(_user.User, instance_id=instance.id, **attrs)", "def create(self, validated_data):\n user_data = validated_data.pop('user')\n user = UserSerializer.create(UserSerializer(), validated_data=user_data)\n businessuser, created = BusinessUser.objects.update_or_create(user=user,\n company_name=validated_data.pop('company_name'), \n reg=validated_data.pop('reg'), \n vat=validated_data.pop('vat'), )\n return businessuser", "def create(self, validated_data):\n\n user_data = {\n \"username\" : validated_data.get(\"username\"),\n \"email\" : validated_data.get(\"email\"),\n \"password\" : validated_data.get(\"password\")\n }\n user = User.objects.create_user(**user_data)\n user.save()\n\n account_data = {\n \"phone\" : validated_data.get(\"phone\"),\n \"type\" : validated_data.get(\"type\"),\n \"lat\" : validated_data.get(\"lat\"),\n \"lang\" : validated_data.get(\"lang\"),\n \"center_point\" : validated_data.get(\"center_point\")\n }\n account = Account(user = user, **account_data)\n account.save()\n\n return user", "def _create_user(self, email, username, firstname, lastname, password, **other_fields):\n\n if not email:\n raise ValueError(_('You must provide an email address'))\n\n email = self.normalize_email(email)\n user = self.model(email=email, username=username, firstname=firstname, lastname=lastname, **other_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create(self, validated_data):\r\n user_data = validated_data.pop('user')\r\n user = UserSerializer.create(UserSerializer(), validated_data = user_data)\r\n profile, created = Profile.objects.update_or_create(user = user,\r\n bio = validated_data.pop('bio'),\r\n location = validated_data.pop('location'),\r\n birth_date = validated_data.pop('birth_date'))\r\n return profile", "def _create_user(self, username, email, password, **extra_fields):\n if not username:\n raise ValueError('Username is required.')\n if not email:\n raise ValueError('Email is required.')\n if not password:\n raise ValueError('Password is required.')\n try:\n with transaction.atomic():\n user = self.model(username=username, email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user\n except:\n raise", "def create(self, validated_data):\n request = self.context.get('request')\n profile = Profile(**validated_data)\n profile.user = request.user\n profile.save()\n return profile", "def create_user(fname, phone_num):\n\n user = User(fname=fname, phone_num=phone_num)\n\n db.session.add(user)\n db.session.commit()\n\n return user", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError('The given username must be set')\n email = self.normalize_email(email)\n # Lookup the real model class from the global app registry so this\n # manager method can be used in migrations. This is fine because\n # managers are by definition working on the real model.\n\n user = self.model(email=email, **extra_fields)\n user.password = make_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email, **extra_fields):\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.save(using=self._db)\n return user", "def _create_user(self, username, email, password):\n\t\tnow = datetime.now()\n\t\tif username is None:\n\t\t\traise ValueError('Must include username')\n\t\tif email is None:\n\t\t\traise ValueError('Must include email')\n\t\temail = self.normalize_email(email)\n\t\tuser = self.model(\n\t\t\temail=self.normalize_email(email),\n\t\t\tusername=username,\n\t\t\tdate_joined=now\n\t\t)\n\t\tuser.set_password(password)\n\t\tuser.save(using=self._db)\n\t\treturn user" ]
[ "0.7371264", "0.715452", "0.70016223", "0.6986368", "0.69517016", "0.6951686", "0.69164294", "0.6909793", "0.6909042", "0.68905514", "0.68693435", "0.68369526", "0.6824223", "0.6820779", "0.6811556", "0.6802563", "0.68015856", "0.67980665", "0.67485744", "0.67480195", "0.6740202", "0.672642", "0.6701116", "0.66838545", "0.66787535", "0.6668592", "0.66685224", "0.666226", "0.6659394", "0.66573745", "0.66563505", "0.665618", "0.6648955", "0.6646073", "0.6641081", "0.6618858", "0.6617581", "0.6607383", "0.660027", "0.65987426", "0.6580645", "0.6580523", "0.65763193", "0.6575064", "0.6563423", "0.65630597", "0.6551594", "0.65506595", "0.65505576", "0.65479267", "0.6541457", "0.65320027", "0.652737", "0.65253603", "0.65112174", "0.6507377", "0.6502647", "0.6496809", "0.6494914", "0.6487333", "0.64830124", "0.64799917", "0.6469565", "0.6454977", "0.645147", "0.64431757", "0.64370996", "0.6432598", "0.64300865", "0.64282596", "0.6424831", "0.64236414", "0.64228797", "0.6420999", "0.64209586", "0.6418328", "0.6413221", "0.64092416", "0.64076495", "0.64030427", "0.64019483", "0.64019483", "0.64019483", "0.64019483", "0.64019483", "0.64019483", "0.64019483", "0.63901675", "0.63895595", "0.6385274", "0.6379487", "0.6373916", "0.637292", "0.6369068", "0.6365784", "0.63652587", "0.63572246", "0.6355316", "0.6353496", "0.63514453", "0.63499135" ]
0.0
-1
Get a user by its id.
def get_user_by_id(id): user = User.query.get(id) if user is None: raise CustomError(404, 'User with id: {} was not found.'.format(id)) return jsonify({'success': True, 'user': user.to_dict()})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_user_by_id(self, id):\n\t\treturn self.users.get(id)", "def get(self, id):\n\t\ttry:\n\t\t\tflask_app.logger.debug('We are getting the user: %d', id)\n\t\t\treturn user_service.get(id)\n\t\texcept AssertionError as e:\n\t\t\tuser_space.abort(400, e.args[0], status = \"Could not get user\", statusCode = \"400\")\n\t\texcept Exception as e:\n\t\t\tuser_space.abort(500, e.args[0], status = \"Could not get user\", statusCode = \"500\")", "def get(id):\n return User.query.filter_by(id=id).first()", "def get_user_by_id(id):\n user = session.query(User).get(id)\n return user", "def getUser(self, id : int) -> bbUser.bbUser:\n id = self.validateID(id)\n return self.users[id]", "def get_user(id=None, name=None):\n found_id = get_user_id(id, name)\n if not found_id:\n return\n response = utils.checked_api_call(users_api, 'get_specific', id=found_id)\n if response:\n return response.content", "def find_user_by_id(id: str) -> User:\n\n # Find the id user in the database, else return None\n return User.query.get(int(id))", "def get_user_by_id(user_id):\n return User.query.get(user_id)", "def get_user(self, id: utils.Intable) -> User | None:\n id64 = make_id64(id=id, type=Type.Individual)\n return self._connection.get_user(id64)", "def get(cls,id):\n result = execute_query(\"\"\"SELECT * FROM Users Where username = ?\n \"\"\",\n [id])\n try:\n user = User(id,result[0][1])\n except Exception as e:\n return None\n \n return user", "def get_user_by_id(user_id):\n\n return User.query.get(user_id)", "def get_user_by_id(user_id):\n\n return User.query.get(user_id)", "def get_user_by_id(user_id):\n\n return User.query.get(user_id)", "def get_user_by_id(user_id):\n\n return User.query.get(user_id)", "def get_by_id(self, id):\n return self.session.query(User).filter_by(id=id).first()", "def get_single_user(self, id):\n for user in self.users:\n if user['id'] == id:\n return user", "def get_user_by_id(user_id):\n\n return User.query.get(user_id)", "async def get_user(id: int):\n service = SearchService()\n return await service.get_user(id)", "def _get_user_by_id(self, _id):\n user_resp = self._db.Users(database_pb2.UsersRequest(\n request_type=database_pb2.UsersRequest.FIND,\n match=database_pb2.UsersEntry(global_id=_id)))\n if user_resp.result_type != database_pb2.UsersResponse.OK:\n self._logger.warning(\n 'Could not find user: {}'.format(user_resp.error))\n return None\n if not len(user_resp.results):\n self._logger.warning('Could not find user.')\n return None\n return user_resp.results[0]", "def get_user_from_id(user_id):\n return Users.query.filter_by(id=user_id).first()", "def get_user(id):\n pass", "def get(self, user_id):\n return User.get(user_id)", "def read_user_by_id(\n user_id: int = Path(description=\"User id\", example=1),\n) -> schemas.User:\n user = next((usr for usr in users if usr.id == user_id), None)\n if not user:\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND,\n detail=f\"User with id={user_id} doesn't exist\",\n )\n return user", "def get_user(id, **options):\n\n return security_services.get_user(id, **options)", "async def get_user(id: int):\n with Session(engine) as session:\n # TODO return the user based on the ID (and an error if not)\n statement = select(User).where(User.id == id)\n user = session.exec(statement).first()\n if user == None:\n raise HTTPException(status_code=404, detail=\"User ID not found\")\n return {\"user\": user}", "def find_by_id(cls, _id):\n user = cls.query.filter_by(id=_id).first()\n return user", "def get_user(cls, user_id):\n try:\n return User.objects.get(pk=user_id)\n except User.DoesNotExist:\n return None", "def get_user(self, user_id):\n uri = 'users/' + user_id\n return self.make_request(uri)", "def get_user_by_id(cls, userid):\n\n user = User.query.filter_by(user_id=userid).one()\n\n return user", "async def get_user_by_id(self, roblox_id: int) -> User:\n r = await self.request.request(url=f'https://api.roblox.com/users/{roblox_id}', method=\"GET\", noerror=True)\n json = r.json()\n if r.status_code != 200 or not json.get('Id') or not json.get('Username'):\n return None\n return User(self.request, json['Id'], json['Username'])", "def get_user(id):\n if (g.user.id == id):\n return jsonify(g.user.serialize)\n else:\n abort(400)", "def get_user(user_id):\n try:\n return UserModel.objects.get(id=user_id)\n except UserModel.DoesNotExist:\n return None", "def load_user(id):\n return User.query.get(int(id))", "def load_user(id):\n return User.query.get(int(id))", "def load_user(id):\n return User.query.get(int(id))", "def load_user(id):\n return User.query.get(int(id))", "def get(self, id: str = None) -> User:\n query = gql(\n \"\"\"\n query User($id: String) {\n user(id: $id) {\n id\n email\n name\n bio\n company\n avatar\n verified\n profiles\n role\n }\n }\n \"\"\"\n )\n\n params = {\"id\": id}\n\n return self.make_request(query=query, params=params, return_type=\"user\")", "def get_user_by_id(self, user_id):\n query = \"SELECT * FROM users WHERE user_id = %s\"\n self.cursor.execute(query,[user_id])\n result = self.cursor.fetchone()\n return result", "def get_user(self, user_id):\n User = get_user_model()\n try:\n return User.objects.get(pk=user_id)\n except User.DoesNotExist:\n return None", "def get_user(self, user_id=None):\n raise NotImplementedError", "async def fetch_user(self, id: str):\n user = await self.http.get_user(id)\n return User(state=self.http, data=user)", "async def get_user_byid(request):\n user_id = request.match_info[\"user_id\"]\n try:\n user_id = int(user_id)\n except (ValueError, TypeError):\n return web.Response(text=\"Incorrect value for user_id\", status=400)\n\n currentuser = (\n request.cirrina.db_session.query(User)\n .filter(User.username == request.cirrina.web_session[\"username\"])\n .first()\n )\n\n if user_id == -1 or not currentuser.is_admin:\n user = currentuser\n else:\n user = request.cirrina.db_session.query(User).filter_by(id=user_id).first()\n\n if not user:\n return web.Response(status=404, text=\"User not found\")\n\n data = {\"username\": user.username, \"user_id\": user.id, \"is_admin\": user.is_admin}\n return web.json_response(data)", "def load_user(id):\n return User.get_by_id(int(id))", "def load_user(id):\n\treturn User.query.get(int(id))", "def get_by_id(user_id: int) -> Optional[User]:\n user = User.query.filter_by(id=user_id).first()\n return user", "def load_user(id):\n return Users.query.get(id)", "def load_user(id):\n\n return User.query.get(int(id))", "async def fetch_user(self, id: utils.Intable) -> User | None:\n id64 = make_id64(id=id, type=Type.Individual)\n return await self._connection.fetch_user(id64)", "def get_user(self, user_id):\n return self.my_get_user(self.get_all_dbusers(), user_id)", "def get_by_id(self, id):\n return self.user_cls(id=id, password='secret' + str(id))", "def get_user_by_id(user_id, **options):\n\n return security_services.get_user_by_id(user_id, **options)", "async def getch_user(self, id: str):\n return self.get_user(id) or await self.fetch_user(id)", "async def get(cls, user_id):\n try:\n user = await db.one(cls.SELECT_USER, user_id=user_id)\n except exceptions.NoResultFound:\n LOGGER.error(\"Could not find user=%s.\", user_id)\n raise DatabaseError\n except SQLAlchemyError as err:\n LOGGER.error(\"Failed to fetch user=%s. Error: %s\", user_id, err)\n raise DatabaseError\n\n return user", "def get_user(self, user_id):\n try:\n User = get_user_model()\n return User.objects.get(pk=user_id)\n except User.DoesNotExist:\n return None", "def get_user(self, user_id):\n _email = self._email_for_user_id(user_id)\n response = self._get('/users?{0}'.format(urllib.urlencode({'search': _email})))\n for _user in response:\n if _user['email'] == _email:\n return _user\n return None", "def getUser(self, id):\n if not isinstance(id, int):\n # Must be a string. Get the UserId first.\n id = self.getUserId(id)\n u = self.users[id]\n while isinstance(u, int):\n id = u\n u = self.users[id]\n u.id = id\n return u", "def get(self, user_id):\n user = UserServices(public_id=user_id).get_an_item()\n if not user:\n api.abort(404)\n else:\n return user", "def getByID(session, id):\n return session.query(User).filter(User.id == id).first()", "def get_user(self, object_id):\n return self.get_object(\"user\", object_id)", "def get_user(id):\n user = User.query.get(id)\n return user_schema.jsonify(user)", "def get_user(id):\n url = 'https://jsonplaceholder.typicode.com/'\n user = requests.get(url + 'users', params={'id': id}).json()\n return user", "async def get_user(self, name=None, id=None) -> User:\n if name:\n return await self.get_user_by_username(name)\n if id:\n return await self.get_user_by_id(id)\n return None", "def read_user_by_id(\n user_id: PyObjectId,\n current_user: Users = Depends(deps.get_current_active_user),\n) -> Optional[User]:\n user = dao.user.get_user_by_id(id=user_id)\n if user == current_user:\n return user\n if not user:\n raise HTTPException(\n status_code=400, detail=\"The user doesn't exist.\"\n )\n # TODO: toggle this if we only want admins to be able to see profiles other than their own.\n # if not dao.user.is_superuser(current_user):\n # raise HTTPException(\n # status_code=400, detail=\"The user doesn't have enough privileges\"\n # )\n return user", "def find_by_id(cls, _id):\n ## Setup Connection & Cursor\n connection, cursor = Database.connect_to_db()\n\n ## Find the user\n query = \"SELECT * FROM {table} WHERE id=?\".format(table=cls.TABLE_NAME)\n result = cursor.execute(query, (_id,)) ## Parameter must always be a tuple\n row = result.fetchone() ## Returns None if no results\n\n ## Create User object if we get data back\n if row:\n user = cls(*row)\n else:\n user = None\n\n ## Close Connection\n connection.close()\n\n return user", "def get_user(self, user_id):\n oauth_user = OAuthioUser.objects.filter(user__id=user_id)\n if oauth_user.exists():\n return oauth_user.get().user", "def get_user(user_id=None):\n users = storage.all('User')\n user = users.get('User' + \".\" + user_id)\n if user is None:\n abort(404)\n else:\n return jsonify(user.to_dict()), 200", "def load_user(id):\r\n\r\n\tuser = User.query.get(int(id))\r\n\tif user is not None:\r\n\t\tuser.id = session['user_id']\r\n\t\treturn user\r\n\telse:\r\n\t\treturn None", "def get_user_by_id(id):\n u = models.User.query.get(id)\n user = {\n 'id': u.id,\n 'name': u.name,\n 'email': u.email,\n 'regID': u.regid,\n 'photo': u.photo\n }\n\n if len(user) == 0:\n abort(404)\n return jsonify({'user': user}), 201", "def user_by_id(user_id):\n user = User.query.filter(User.id == user_id).one_or_none()\n return user", "def get(self, id):\n tmp = userDao.get_one_entry(id)\n return tmp", "def get_user_by_id(id):\n user = User.query.get(id)\n result = userSchema.dump(user)\n return jsonify(result)", "def retrieve_user_from_id(self, user_id):\n if self.database is None:\n raise Exception(\"No database.\")\n if user_id is None:\n raise Exception(\"Bad parameter.\")\n return self.database.retrieve_user_from_id(user_id)", "def get_user(user_id):\n user = storage.get(User, user_id)\n if user is None:\n abort(404)\n else:\n return jsonify(user.to_dict())", "def get_user(user_id):\n user = storage.get(\"User\", user_id)\n if user is None:\n abort(404)\n return user.to_dict()", "def get_user(self, user_id):\n return UserModel._default_manager.get(pk=user_id)", "def find_by_id(cls, user_id):\n return UsersModel.query.filter_by(id=user_id).first()", "def get(id):\n users = get_data_from_csv('files/users.csv')\n search_in_csv(users, 'id', id)\n return User(*search_in_csv(users, 'id', id))", "def load_user(id):\n user = db.session.query(User).filter(User.id == id).first()\n return user", "def user_get_by_id(user_id):\n obj = storage.get(\"User\", user_id)\n if obj is None:\n abort(404)\n else:\n return jsonify(obj.to_dict())", "def get_user_by_id(self, user_id: str) -> typing.Optional[User]:\n query_params = {\n \"$select\": \",\".join(\n [\"displayName\", \"id\", \"mail\", \"department\", \"companyName\"]\n ),\n }\n\n request = self._prepare_request(\n method=\"get\",\n resource_path=f\"users/{user_id}\",\n query_params=query_params,\n )\n with requests.Session() as session:\n response = session.send(request=request)\n try:\n response.raise_for_status()\n except requests.exceptions.HTTPError as exception:\n if response.status_code == 400:\n return None\n raise exception\n user = response.json()\n return User.from_dict(**user) if user.get(\"id\") else None", "def get_user(self, user_id):\n try:\n return Account.objects.get(pk=user_id)\n except Account.DoesNotExist:\n return None", "def load_user(user_id):\n return models.UserModel.query.get(int(user_id))", "def get_user_by_id(user_id: int) -> User:\n session = Session()\n\n # verify user_id exists\n vote_user: User = session.query(User).filter(User.id == user_id).first()\n session.close()\n\n if not vote_user:\n raise UserNotFoundException\n\n return vote_user", "def get_user(user_id):\n usr = storage.get(User, user_id)\n if usr:\n return jsonify(usr.to_dict())\n else:\n abort(404)", "def get(user_id: int) -> User:\n try:\n user = User.objects.get(id=user_id)\n except User.DoesNotExist:\n logger.error(\n 'Getter(user_id = %d) in BaseUser throws User.DoesNotExist exception.' %\n user_id)\n raise NonUserException\n return user", "def load_user(user_id):\n return User.query.get(int(user_id))", "def load_user(user_id):\n return User.query.get(int(user_id))", "def load_user(user_id):\n return User.query.get(int(user_id))", "def load_user(user_id):\n return User.query.get(int(user_id))", "def load_user(user_id):\n return User.query.get(int(user_id))", "def load_user(user_id):\n return User.query.get(int(user_id))", "def user_by_id(self, user_id):\n\n cur = self.db.cursor()\n cur.execute(\n \"\"\"SELECT user_id, username, password, phone, email, role\n FROM users WHERE user_id = %s\"\"\", (user_id, ))\n \n user_from_db = cur.fetchone()\n if cur.rowcount == 1: \n user_id, username, password, phone, email, role = user_from_db\n resp = dict(user_id=user_id, username=username, password=password, phone=phone, email=email, role=role)\n \n return resp\n return None", "def get_user(self, user_id: int) -> dict:\n user = self.call_method('getUser', user_id=user_id)\n return user", "def get(self, no):\n user = get_a_user(no)\n if not user:\n api.abort(404)\n else:\n return user", "def get_user(self, *, id: str) -> Union[User, None]:\n with self.client.create_session() as session:\n user_record = session.query(RDSUser).filter(RDSUser.rk == id).first()\n if not user_record:\n return user_record\n\n manager_record = user_record.manager\n\n manager_name = ''\n if manager_record and manager_record.full_name:\n manager_name = manager_record.full_name\n\n return self._build_user_from_record(user_record=user_record, manager_name=manager_name)", "def load_user(user_id):\n return User.query.get(user_id)", "def load_user(user_id):\n return User.query.get(user_id)", "def get_user(self, user_id=None, nick=None):\n if user_id in self:\n return self[user_id]\n else:\n return User(self, user_id, nick=nick)", "def load_user(user_id):\r\n return User.query.get(int(user_id))", "def load_user(user_id):\n return User.get_by_id(int(user_id))" ]
[ "0.89340585", "0.8621323", "0.85916656", "0.8508148", "0.8499932", "0.84347427", "0.8386604", "0.8384479", "0.8328696", "0.8305895", "0.82829595", "0.82829595", "0.82829595", "0.82829595", "0.8282169", "0.82745594", "0.8266114", "0.82544875", "0.82501876", "0.8243749", "0.8214747", "0.8212782", "0.8201788", "0.82001954", "0.818829", "0.8184199", "0.81802803", "0.81639445", "0.812776", "0.81165797", "0.811113", "0.80775315", "0.8069223", "0.8069223", "0.8069223", "0.8069223", "0.80623317", "0.8050461", "0.8047785", "0.8039932", "0.80116105", "0.80079246", "0.7994597", "0.79871404", "0.7986041", "0.7984022", "0.7978478", "0.79775846", "0.79768294", "0.7975656", "0.7954707", "0.7944356", "0.79353917", "0.79336137", "0.79124194", "0.7880159", "0.78603286", "0.78320813", "0.78206027", "0.78182924", "0.7815266", "0.78095245", "0.77988875", "0.77811027", "0.7767863", "0.77604496", "0.77440035", "0.77397037", "0.77202225", "0.7702343", "0.7697633", "0.7694471", "0.7686224", "0.7686097", "0.7681967", "0.7669158", "0.7665181", "0.7663223", "0.7662771", "0.76615626", "0.7649233", "0.76085657", "0.7605827", "0.75941485", "0.75920296", "0.75736624", "0.75736624", "0.75736624", "0.75736624", "0.75736624", "0.75736624", "0.75664926", "0.7559569", "0.7525715", "0.7524614", "0.75208956", "0.75208956", "0.75179523", "0.75166", "0.7506129" ]
0.8168108
27
Return all of the current user's friends.
def friends(): friends = [u.to_dict() for u in g.user.get_friends()] return jsonify({'success': True, 'friends': friends})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def friends(self) -> List['Friend']:\n\n return self.sObj.getFriends()", "async def get_friends(self) -> List[User]:\n me = await self.get_self()\n r = await self.request.request(url=f'https://friends.roblox.com/v1/users/{me.id}/friends', method=\"GET\")\n data = r.json()\n friends = []\n for friend in data['data']:\n friends.append(User(self.request, friend['id'], friend['name']))\n return friends", "def list_users_friends(self):\n user = self.sp.user(self.user)\n return user", "def get_friends(self):\n edges = DirectedUserToUserEdge.all().filter(\n 'owner_user_id =', self.key().id()).run()\n return db.get([db.Key.from_path('User', edge.friend_user_id) for edge in\n edges])", "def get_friends(user):\r\n try:\r\n friends = user.friends()\r\n return friends[:]\r\n except tweepy.error.RateLimitError:\r\n print(\"Rate limit reached! Waiting...\")\r\n wait_15_minutes()\r\n return get_friends(user)\r\n except tweepy.error.TweepError:\r\n print(\"Skipping user whose information is protected.\")\r\n return list()", "def get_friends(self, user_id):\n # if user_id is alias, replace it with id\n if not self._is_positive_number(user_id):\n user_id = get_names_of_users(set([user_id]))[0].id\n api = pyvkontakte.VkontakteApi()\n return set(api.call('friends.get', user_id=user_id, v='5.8')['items'])", "def signed_up_friends(self):\n friends = self.twitter_oauth.friends\n if not friends:\n return [], []\n return friends, User.query.filter(\n User.username.in_(x.screen_name for x in friends))", "def get_my_friends(self):\n query = read_query('content exploration/my_friends')\n response = self._submit_query(query)\n return [elem['name']['value'].split('/')[-1] for elem in response]", "def list_friends(self, user_id):\n if self.database is None:\n raise Exception(\"No database.\")\n if user_id is None or len(user_id) == 0:\n raise Exception(\"Bad parameter.\")\n return self.database.retrieve_friends(user_id)", "def get_friends(self):\n friends = self.graph.get_connections(\"me\", \"friends\")\n return friends['data']", "def get_possible_friends():\n user_list = []\n for user_unprocessed in api_vars.users.find({'public': True}):\n user = user_unprocessed\n user['_id'] = str(user['_id'])\n user_list.append(user)\n # For now, let's break the list at one hundred. This is just for the\n # sake of simplicity.\n if len(user_list) >= 100:\n break\n user_data = {'users': user_list}\n json_data = json.dumps(user_data)\n return json_data", "def get_friends(self, force: bool = False) -> List[types.FriendInformation]:\n raise NotImplementedError", "def get_friends(user, data):\n setA = list(\n data.loc[data.user == user].user_friend_list.values)\n setB = list(\n data.loc[data.user_friend_list == user].user\n .values)\n friends = list(set(set(setA).union(setB)))\n return friends", "def get_friends(graph, location_id=\"\", is_user=\"\"):\n user = graph.get_object(\"me\")\n fql = \"SELECT uid, name, profile_url, pic_small, current_location, mutual_friend_count FROM user WHERE uid IN (SELECT uid1 FROM friend WHERE uid2 = \" + user[\"id\"] + \")\"\n if location_id:\n fql += \" AND current_location.id=\" + location_id\n if is_user:\n fql += \" AND is_app_user=\" + is_user\n fql += \" ORDER BY mutual_friend_count DESC\"\n logging.info(fql)\n try:\n fql_friends = graph.fql(fql)\n return fql_friends['data']\n except:\n logging.error(\"There was an error retrieving friends of UID %s\", user[\"id\"])\n return list()", "def friends(self):\n #Guillaume\n friends_list = []\n received = Friendships.objects.filter(request_for=self, status='A')\n for friend in received:\n friends_list.append(friend.request_from)\n sent = Friendships.objects.filter(request_from=self, status='A')\n for friend in sent:\n friends_list.append(friend.request_for)\n return friends_list", "def get_user_friends(user_id):\n\n friends = db.session.query(User_Friend).filter(User_Friend.user_id==user_id).all() \n\n return friends", "def get_friend_list(self):\n self.friends = self.df[['user_id','friends']]", "def friends(self):\n service_root = self._get_webservice_url(\"fmf\")\n return FindFriendsService(service_root, self.session, self.params)", "def get_friends(user_id):\n return list(set(get_following(user_id)) &\n set(get_followers(user_id)))", "def user_list_friend_requests(self):\n email_token = auth.current_user()[0]\n friend_emails = self.friend_database.get_friend_requests(email_token)\n friends = [self.auth_server.profile_query(email) for email in friend_emails]\n return json.dumps(friends), 200", "async def get_friends(_: User = Depends(get_current_user),\n db: Session = Depends(get_db)):\n # Need a way to map GPG keys to users in DB\n pairs = crud.get_name_email_pairs(db)\n return pairs", "def get_friends():\n\n acct = get_current_account(session[\"acct\"])\n get_user_friends(acct, GR_KEY, GR_SECRET)\n search = False\n\n return render_template(\"index.html\", acct=acct, search=search)", "def fetch_friends(self, user, paginate=False):\n\n if USING_ALLAUTH:\n social_app = SocialApp.objects.get_current('facebook')\n oauth_token = SocialToken.objects.get(account=user, app=social_app).token\n else:\n social_auth_backend = FacebookBackend()\n\n # Get the access_token\n tokens = social_auth_backend.tokens(user)\n oauth_token = tokens['access_token']\n\n graph = facebook.GraphAPI(oauth_token)\n\n friends = graph.get_connections(\"me\", \"friends\")\n\n if paginate:\n total_friends = friends.copy()\n total_friends.pop('paging')\n while 'paging' in friends and 'next' in friends['paging'] and friends['paging']['next']:\n next_url = friends['paging']['next']\n next_url_parsed = urlparse.urlparse(next_url)\n query_data = urlparse.parse_qs(next_url_parsed.query)\n query_data.pop('access_token')\n for k, v in query_data.items():\n query_data[k] = v[0]\n friends = graph.get_connections(\"me\", \"friends\", **query_data)\n total_friends['data'] = sum([total_friends['data'], friends['data']], [])\n else:\n total_friends = friends\n\n return total_friends", "def get_friends(self, user_id=None, fields='sex,bdate'):\n if user_id is None:\n friends_info = self.vk.friends.get(fields=fields)\n else:\n friends_info = self.vk.friends.get(fields=fields, user_id=user_id)\n return friends_info['items']", "def friends(user_id):\n user = user_grab(user_id)\n if user is None:\n return \"user not found\", 404\n friends = user.get(\"friends\")\n if friends is None:\n friends = []\n data_json = json.dumps({'friends': [str(friend) for friend in friends]})\n return data_json", "def get_potential_friends(user_id):\n\n if not g.user:\n return _get_json_message(\n INVALID_CREDENTIALS_MSG,\n INVALID_CREDENTIALS_STATUS_CODE)\n\n current_user = User.query.get_or_404(user_id)\n\n if current_user.username != g.user.username:\n return _get_json_message(\n INVALID_CREDENTIALS_MSG,\n INVALID_CREDENTIALS_STATUS_CODE)\n\n user_options = User.get_list_of_potential_friends(current_user)\n user_options_serialized = [user.serialize() for user in user_options]\n\n return jsonify(user_options=user_options_serialized)", "def get_queryset(self):\n self.object = self.get_object()\n return self.object.friends.all().exclude(user=self.object.user)", "def find_friends(request):\n find_list = []\n sent_requests = set()\n rec_requests = set()\n sent_f_requests = FriendRequest.objects.filter(\n from_user=request.user\n )\n rec_f_requests = FriendRequest.objects.filter(\n to_user=request.user\n )\n\n me = request.user\n my_friends = me.profile.friends.all()\n my_family = me.relations.all()\n profiles = Profile.objects.exclude(\n user=request.user\n )\n for user in profiles:\n user_friends = user.friends.all()\n for friend in user_friends:\n if friend not in find_list and friend != me:\n if friend not in my_friends and friend not in my_family:\n find_list.append(friend)\n\n template = 'profiles/find_friends.html'\n context = {\n 'find_list': find_list,\n }\n return render(request, template, context)", "def list(self, request, *args, **kwargs):\n from_user = request.QUERY_PARAMS.get('user', None)\n if from_user:\n from_user_id = from_user\n else:\n from_user_id = request.user.id\n\n query = Friend.objects.filter(from_user_id=from_user_id)\n # items = request.QUERY_PARAMS.get('item', 50)\n items = 200\n paginator = Paginator(query, items)\n\n page = request.QUERY_PARAMS.get('page')\n\n try:\n friends = paginator.page(page)\n except PageNotAnInteger:\n # If page is not an integer, deliver first page.\n friends = paginator.page(1)\n except EmptyPage:\n # If page is out of range (e.g. 9999),\n # deliver last page of results.\n friends = paginator.page(paginator.num_pages)\n user_location = UserLocation.objects.filter(user=request.user).order_by('-modified_at').first()\n context = dict(user_id=request.user.id)\n if user_location:\n context['lat'] = user_location.lat\n context['lon'] = user_location.lon\n serializer = PaginatedFriendSerializer(friends, context=context)\n return Response(serializer.data, status=200)", "def friend_info(self):\n return self._reddit.get(API_PATH['friend_v1'].format(user=self))", "def get_friends_of_friend(friends, data):\n friends_of_friends = []\n for friend in friends:\n friend_list = get_friends(friend, data)\n friends_of_friends.append(friend_list)\n return sum(friends_of_friends, [])", "def get_friends(\n self, recent_tracks: bool, limit: int = 50, page: int = 1\n ) -> ListModel[\"User\"]:\n return self.retrieve(\n bind=User,\n flatten=\"user\",\n params=dict(\n method=\"user.getFriends\",\n user=self.name,\n recenttracks=recent_tracks,\n page=page,\n limit=limit,\n ),\n )", "def list_pending_friends(self, user_id):\n if self.database is None:\n raise Exception(\"No database.\")\n if user_id is None or len(user_id) == 0:\n raise Exception(\"Bad parameter.\")\n return self.database.retrieve_pending_friends(user_id)", "def user_list_friends(self):\n email_query = request.args.get('email')\n if not email_query:\n self.logger.debug(messages.MISSING_FIELDS_ERROR % \"email\")\n return messages.ERROR_JSON % messages.MISSING_FIELDS_ERROR % \"email\", 400\n email_token = auth.current_user()[0]\n if email_token != email_query and not self.friend_database.are_friends(email_token, email_query):\n self.logger.debug(messages.USER_NOT_AUTHORIZED_ERROR)\n return messages.ERROR_JSON % messages.USER_NOT_AUTHORIZED_ERROR, 403\n friend_emails = self.friend_database.get_friends(email_query)\n friends = [self.auth_server.profile_query(email) for email in friend_emails]\n return json.dumps(friends), 200", "def get_cached_friends(self) -> Optional[List[types.FriendInformation]]:\n raise NotImplementedError", "def common_friends(self, user):\n\n self_friend_ids = set(self.friends.keys()) if self.friends else set()\n other_friend_ids = set(user.fb_profile.friends.keys()) if user.fb_profile.friends else set()\n\n common_friend_ids = self_friend_ids.intersection(other_friend_ids)\n\n return common_friend_ids", "def fetch_friend_ids(self, user, **kwargs):\n friends = self.fetch_friends(user, **kwargs)\n friend_ids = []\n for friend in friends['data']:\n friend_ids.append(friend['id'])\n return friend_ids", "def get_all_books_from_friends(user, KEY, SECRET):\n\n friends = user.friends\n\n if not friends:\n acct = user.account\n friends = get_user_friends(acct, KEY, SECRET)\n if len(friends) == 0:\n print \"no friends data found\"\n flash(\"Add friends on Goodreads in order to see their reading history\")\n\n for friend in friends:\n # if friend.user_id < 32: # TEMPORARY - prevents duplicate data collection\n # continue\n time.sleep(1.00)\n shelves = check_for_shelves(friend.gr_id, KEY)\n get_books_from_shelf(friend.gr_id, 'read', KEY)\n get_books_from_shelf(friend.gr_id, 'currently-reading', KEY)\n print \"Got all books for user \" + friend.gr_url\n\n return", "def getFriends(id):\n u = models.User.query.get(id)\n if not u:\n return jsonify({'error': 'No account found'}), 200\n\n if not u.isFb:\n if int(u.fbid) is not 0:\n u = models.User.query.get(u.fbid)\n if not u.isFb and int(u.fbid) is not 0:\n u = models.User.query.get(u.fbid)\n else:\n return jsonify({'error': 'No account found'}), 200\n\n session['oauth_token'] = (u.token, '')\n resp = facebook.get('/' + u.fbid + '/friends')\n friends = []\n for f in resp.data['data']:\n friends.append(f['id'])\n\n friends_json = []\n for f in friends:\n u = models.User.query.filter_by(fbid=f).first()\n user = {\n 'id': u.id,\n 'name': u.name,\n 'email': u.email,\n 'regID': u.regid,\n 'photo': u.photo\n }\n friends_json.append(user)\n return jsonify({'friends': friends_json}), 200", "def friends_of_friends(self, node, ids):\n fof = set()\n for id in ids:\n for f in self.users[id]:\n if f != node:\n fof.add(f)\n return fof", "def get(self, request):\n from_user_id = request.QUERY_PARAMS.get('user', None)\n if from_user_id is None:\n from_user_id = request.user.id\n query = Friend.objects.filter(from_user_id=from_user_id)\n items = request.QUERY_PARAMS.get('page_size', 10)\n paginator = Paginator(query, items)\n page = request.QUERY_PARAMS.get('page')\n try:\n friends = paginator.page(page)\n except PageNotAnInteger:\n # If page is not an integer, deliver first page.\n friends = paginator.page(1)\n except EmptyPage:\n # If page is out of range (e.g. 9999), deliver last page of results.\n friends = paginator.page(paginator.num_pages)\n user_location = UserLocation.objects.filter(user=request.user)\\\n .order_by('-modified_at')\\\n .first()\n context = {'user_id': request.user.id}\n if user_location:\n context['lat'] = user_location.lat\n context['lon'] = user_location.lon\n serializer = PaginatedFriendSerializer(friends, context=context)\n return Response(serializer.data, status=200)", "def friends(self, node, current_date):\n\n friends = []\n\n for friend in self.network.successors(node):\n # return friends which edge node->friends was created before the current date\n if (self.network[node][friend][self.EDGE_CREATE_TIME] <= current_date):\n friends.append(friend)\n return friends", "def get_users_by_friendid(cls, provider, friendid):\n #return db.GqlQuery(\"SELECT user FROM Logins WHERE provider_name = :1 AND friendids = :2\",provider, friendid)\n count = 0\n if provider == 'twitter':\n query = db.GqlQuery(\"SELECT * FROM TwitterFriendsTBD WHERE friendids = :1 ORDER BY __key__\", friendid)\n elif provider == 'facebook':\n query = db.GqlQuery(\"SELECT * FROM FacebookFriendsTBD WHERE friendids = :1 ORDER BY __key__\", friendid)\n users = []\n while 1:\n logins = query.fetch(1000)\n current_count = query.count()\n count += current_count\n if current_count == 0:\n break\n for alogin in logins:\n users.append(alogin.user)\n query.with_cursor(query.cursor())\n\n \"\"\"\n while 1:\n current_count = query.count()\n count += current_count\n if current_count == 0:\n break\n logins = query.fetch(1000)\n last_key = None\n for alogin in logins:\n users.append(alogin.user)\n last_key = alogin.key()\n query = query.filter('__key__ > ', last_key)\n \"\"\"\n return users", "def get_friends(driver, username):\r\n\r\n driver.get('https://www.facebook.com/' + username + '/friends_all')\r\n scroll_to_end_of_page(driver)\r\n friends = driver.find_elements_by_css_selector('.fsl.fwb.fcb')\r\n for i in range(len(friends)):\r\n friends[i] = friends[i].text\r\n return friends", "def friend_list(request):\n profile = Profile.objects.get(user=request.user)\n context = {\n 'profile': profile,\n }\n return render(request, 'profiles/my_friends.html', context)", "def get_users():\n users = models.User.query.all()\n friends_json = []\n for u in users:\n user = {\n 'id': u.id,\n 'name': u.name,\n 'email': u.email,\n 'regID': u.regid,\n 'photo': u.photo\n }\n friends_json.append(user)\n return jsonify({'users': friends_json}), 200", "def get_friends(api, username, limit):\n for friend in tqdm(tweepy.Cursor(api.friends, screen_name=username).items(limit), unit=\"friends\", total=limit):\n process_friend(friend)", "def get_friends(character, _info):\n return map(get_character, character.friends)", "def get_friend_requests(self, user):\n return self.filter(addresser_user=user, status=Friendship.STATUS_PENDING, active=True)", "def get_user_friends_locations_list(\n bearer_token: str, screen_name: str, friends_num: int=50\n ) -> list:\n base_url = 'https://api.twitter.com/'\n search_headers = {\n 'Authorization': f'Bearer {bearer_token}'\n }\n search_params = {\n 'screen_name': f'{screen_name}',\n 'count': friends_num\n }\n search_url = f'{base_url}1.1/friends/list.json'\n response = requests.get(\n search_url, headers=search_headers, params=search_params\n )\n\n data = response.json()\n\n return [\n (user['name'], user['location'])\n for user in data['users']\n if len(user['location']) != 0\n ]", "def get_all_followers(self):\n return get_all_(self.get_followers)", "def vk_friends(request):\n context = {}\n user = request.user\n\n if user.is_authenticated:\n user_social_auth = user.social_auth.get(provider='vk-oauth2')\n access_token = user_social_auth.access_token\n\n api = api_vk.get_vk_api(access_token)\n friends_info = api_vk.get_vk_friends(api)\n account_info = api_vk.get_account_info(api)\n\n context.update({\n \"account_info\": account_info,\n \"friends_info\": friends_info,\n })\n\n return render(request, \"vk_friends.html\", context)", "def get_pending_friendships(self):\n url = 'friendships/pending/'\n return self.send_request(url)", "def list(self, request):\n\n user_profile = get_object_or_404(UserProfile, user=request.user)\n #   Get all sent accepted invitations\n sent = user_profile.creator_friendships.filter(status=1)\n # Get all received accepted invitations\n received = user_profile.invited_friendships.filter(status=1)\n #   Combine results to get all friends:\n friends = []\n for friendship in sent:\n friends.append(UserProfileSerializer(friendship.user_2).data)\n for friendship in received:\n friends.append(UserProfileSerializer(friendship.user_1).data)\n return Response(friends, status=rest_status.HTTP_200_OK)", "def display_all_friends(self):\r\n # Initial count of elements\r\n element_count = len(self.driver.find_elements_by_css_selector(self.CSS_SELECTOR_FRIEND))\r\n print(\"Processing \" + str(element_count) + \" CSS selectors...\")\r\n\r\n # Scroll page to the end\r\n while True:\r\n self.driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\r\n time.sleep(3)\r\n\r\n # If the number of elements is the same, the end of page has been reached\r\n if len(self.driver.find_elements_by_css_selector(self.CSS_SELECTOR_FRIEND)) == element_count:\r\n break\r\n\r\n element_count = len(self.driver.find_elements_by_css_selector(self.CSS_SELECTOR_FRIEND))\r\n print(\"Processing \" + str(element_count) + \" CSS selectors...\")\r\n\r\n # Store friends in class variable\r\n self.friends = self.driver.find_elements_by_css_selector(self.CSS_SELECTOR_FRIEND)", "def getFriends(self, users):\n\n\t\tquery = \"\"\"select * from socialgraph\n\t\t\t\t\twhere first_user_id in {}\n\t\t\t\t\tgroup by first_user_id, second_user_id\"\"\".format(users)\n\n\t\tdf = pd.read_sql_query(query, self.conn)\n\n\t\treturn df", "def get_best_friends(self):\n query = read_query('content exploration/best_friends')\n response = self._submit_query(query)\n return [(elem['name']['value'], elem['num_chat']['value'].split('/')[-1]) for elem in response]", "def _get_friends_random_list(self, citizen):\r\n \r\n number_friends = int(random.uniform(len(citizen.friends)*0.05, len(citizen.friends)*0.2))\r\n return random.sample(citizen.friends, number_friends)", "def get_user_friends(acct, KEY, SECRET): # this isn't true - evaluate what needs to be returned tomorrow.\n\n new_gr_session = OAuth1Session(\n consumer_key=KEY,\n consumer_secret=SECRET,\n access_token=acct.access_token,\n access_token_secret=acct.access_token_secret\n )\n\n user_id = str(acct.user.gr_id)\n current_page = 1\n\n total, friends = get_friends_page(new_gr_session, user_id, current_page)\n\n # check for no friends first\n if len(friends) == 0:\n flash(\"No Goodreads friends found.\")\n print \"No friends!\"\n\n # friends requests return a list of 30 at a time\n # get total number of pages required.\n total_pages = int(math.ceil(total / float(30)))\n # creates new users and adds friendship relationships to db\n add_user_friendships(friends, acct)\n\n # check for more than 30 friends\n if total_pages > 1:\n\n current_page = 2\n while current_page <= total_pages:\n\n print \"******YOU HAVE MORE FRIENDS*******\"\n\n # wait 1 second between calls, per GR policy\n time.sleep(1.00)\n\n # create new query with updated current_page\n total, friends = get_friends_page(new_gr_session, user_id, current_page)\n add_user_friendships(friends, acct)\n current_page += 1\n\n return None", "def get_queryset(self):\n return FriendRequest.objects.filter(touser=self.request.user)", "def flatten_friends_ids(users):\n friends_ids = []\n for user_id in users:\n friends_ids.extend(users[user_id][\"friends_ids\"]) \n return list(set(friends_ids))", "def get_friends_ids(api, user_id):\r\n # Getting user object:\r\n user = get_user(api, user_id)\r\n\r\n # Getting list of friends of the user:\r\n friends = get_friends(user)\r\n\r\n # Returning ids of friends of the user:\r\n return [friend.id for friend in friends]", "def get_pending_friends(cu_id):\n users = db.session.execute(\n \"\"\"select fr.user_1_id, u.username, u.firstname, u.lastname\n from friend_request as fr inner join userm as u on fr.user_1_id = u.id \n where fr.user_2_id = :cu_id\n and fr.approved is NULL\"\"\",\n {\"cu_id\": cu_id}\n )\n return users", "def get_friends(twitter, screen_name):\n request = robust_request(twitter, 'friends/ids', {'screen_name': screen_name}, max_tries=5)\n friend_list = []\n for r in request:\n friend_list.append(r)\n return sorted(friend_list)", "def show_friends():\n\n\n user_id = session['user_id']\n user = User.query.get(user_id)\n friendship = Friendship.query.get(user_id)\n\n return render_template('friends.html', user=user, friendship=friendship)", "def add_all_friends(twitter, users):\n ###TODO-- Completed\n\n #calling get_friends here to receive friends ID's for all the values of screen_name,\n # limiting the values to receive to 5000\n for user in users:\n user['friends'] = get_friends(twitter, user['screen_name'])[:5000]\n #print(len(user['friends']))", "def get_friends_page(session, user_id, page):\n\n url = 'https://www.goodreads.com/friend/user'\n params = {'id': user_id, 'format': 'xml', 'page': page}\n response = session.get(url, params=params)\n\n doc = untangle.parse(response.content)\n total = int(doc.GoodreadsResponse.friends['total'])\n friends = doc.GoodreadsResponse.friends\n\n return (total, friends)", "def get(self):\n user = users.get_current_user()\n if not user:\n self.response.out.write(json.dumps(error_obj('User not logged in.')))\n return\n friend = self.request.get('email')\n if not friend:\n self.response.out.write(json.dumps(error_obj('Must provide email of friend to add.')))\n return\n account = user_info.get_user_account()\n if not friend in account.friend_list:\n self.response.out.write(json.dumps(error_obj('This email is not in your friends list.')))\n return\n friend_account = user_info.get_by_email(friend)\n self.response.out.write(json.dumps(account_info(friend_account)))", "def api_profile_friends_get(profile_id: int):\n\n if is_access_denied(profile_id):\n return jsonify({'error': {'message': 'forbidden'}}), 403\n\n friends = Friend.find_by_profile_id(profile_id)\n \n if friends is None:\n return jsonify({'error': {'message': 'not found'}}), 404\n\n out = [ f.get_fields(with_id=True) for f in friends ]\n\n return jsonify(out), 200", "def get_queryset(self):\n user = self.request.user\n sender = Friendship.objects.filter(sender=user)\n receiver = Friendship.objects.filter(receiver=user)\n return sender.union(receiver)", "def get_info_about_friends (cursor, limit, screen_name=None, user_id=None):\n \n # Must have either screen_name or user_id (logical xor)\n assert (screen_name != None) != (user_id != None), \\\n \"Must have screen_name or user_id, but not both\"\n \n assert (limit > 0), \"The requested number of ids must be higher than 0\"\n \n result = []\n needed = limit\n \n # while there are friends to get and the needed number is still positive\n while cursor != 0 and needed > 0:\n # we can retrieve only 5000 at once\n if needed > MAX_ALLOWED_SCRNMS:\n count_limit = MAX_ALLOWED_SCRNMS\n else:\n count_limit = needed\n \n # depends if we have the screen_name or the id of the follower\n if screen_name != None:\n friends_data = twitterapi.make_twitter_request(twitter_api.friends.list, count=count_limit, screen_name=screen_name, cursor=cursor)\n result = result + friends_data[\"users\"]\n else:\n friends_data = twitterapi.make_twitter_request(twitter_api.friends.ids, count=count_limit, user_id=user_id, cursor=cursor)\n result = result + friends_data[\"users\"]\n \n needed = needed - count_limit\n \n # move to next friends that were not retrieved\n cursor = friends_data[\"next_cursor\"]\n \n # returns the needed results\n return result[:limit]", "def get_all_social_paths(self, user_id):\n if len(self.friendships) > 0:\n visited = {}\n q = Queue()\n q.enqueue([user_id])\n\n while q.size() > 0:\n curr_path = q.dequeue()\n curr_vertex = curr_path[-1]\n\n if curr_vertex not in visited:\n visited[curr_vertex] = curr_path\n\n for friend in self.friendships[curr_vertex]:\n path_copy = curr_path[:]\n path_copy.append(friend)\n q.enqueue(path_copy)\n\n return visited\n\n else:\n print(\"There are currently no friendship paths in the network\")", "def number_of_friends(user):\n user_id = user[\"id\"]\n friend_ids = friendships[user_id]\n return len(friend_ids)", "def search_for_friend():\n\n\n user_id = session['user_id']\n email = request.form.get('email')\n\n user = User.get_user_by_email(email)\n\n user_json = {\n 'first_name': user.first_name, 'last_name': user.last_name, \"friend_id\": user.user_id\n }\n\n return jsonify(user_json)", "async def get_app_users(\n self, **kwargs\n ) -> friends.GetAppUsersResponseModel:\n\n params = self.get_set_params(locals())\n response = await self.api.request(\"friends.getAppUsers\", params)\n model = friends.GetAppUsersResponse\n return model(**response).response", "def ReorganizeFriendList(self):\n with sqlite3.connect(self.DBname) as conn:\n c = conn.cursor()\n c.execute(\"select ZID, FRIENDS from user_info\")\n user_list = c.fetchall()\n for user, friends in user_list:\n out = set()\n friends = [x.strip() for x in friends.split(\",\")]\n for friend in friends:\n c.execute(\"select FRIENDS from user_info where ZID = (?)\",[friend])\n TFL = c.fetchone()[0]\n TFL = [x.strip() for x in TFL.split(\",\")]\n if user not in TFL:\n out.add(friend)\n NFL = list(set(friends) - out)\n self.UpdateFriendList(user,NFL)", "def get_accepted_friend_requests(self, user):\n \n return self.filter(addresser_user=user, status=Friendship.STATUS_ACCEPTED, active=True)", "def get_friends_ids(api, user_id):\n url = \"https://api.twitter.com/1.1/friends/ids.json\"\n rate_status = check_rate_limit(api, url)\n remaining_requests = rate_status[\"remaining\"]\n if not remaining_requests:\n delay = rate_status['reset'] - time.time()\n if delay > 0:\n print \"Sleeping {0}...\".format(delay)\n time.sleep(delay) \n rate_status = check_rate_limit(api, url)\n remaining_requests = rate_status[\"remaining\"]\n\n friends_ids = []\n params = {\"user_id\": user_id, \"counter\": 0, \n \"count\": 5000, \"stringify_ids\": True}\n response = api.get(url, params=params)\n friends_ids.extend(response.json().get(\"ids\", []))\n response.close()\n remaining_requests -= 1\n\n while response.json().get('next_cursor'):\n if not remaining_requests:\n delay = rate_status['reset'] - time.time()\n if delay > 0:\n print \"Sleeping {0:,.4} s...\".format(delay)\n time.sleep(delay) \n rate_status = check_rate_limit(api, url)\n remaining_requests = rate_status[\"remaining\"]\n params[\"cursor\"] = response.json().get('next_cursor_str')\n response = api.get(url, params=params)\n friends_ids.extend(response.json().get(\"ids\", []))\n response.close()\n remaining_requests -= 1\n return friends_ids", "def add_all_friends(twitter, users):\n for u_dict in users:\n u_dict['friends'] = get_friends(twitter,u_dict['screen_name'])", "def friends(request):\n return friendslist(request, request.session['id'])", "def get_all_users():\n return Users.query.all()", "def count_friends(users):\n ###TODO-- Completed\n\n #Creating a Counter object, to count the mapping\n c = Counter()\n c.update(friend_id for user in users for friend_id in user['friends'])\n return c", "def get_friends(user_id, fields):\n assert isinstance(user_id, int), \"user_id must be positive integer\"\n assert isinstance(fields, str), \"fields must be string\"\n assert user_id > 0, \"user_id must be positive integer\"\n import requests\n domain = \"https://api.vk.com/method\"\n access_token = '1efb9991613d1e0c7597cae85db190f37bbda497579e92b05af4352bc694c66fd3883d0ff1b875b53a98d'\n user_id = user_id\n\n query_params = {\n 'domain': domain,\n 'access_token': access_token,\n 'user_id': user_id,\n 'fields': fields\n }\n\n query = \"{domain}/friends.get?access_token={access_token}&user_id={user_id}&fields={fields}&v=5.53\".format(\n **query_params)\n response = requests.get(query)\n friends_list = response.json()['response']['items']\n return friends_list", "def users(self, site = None):\r\n uids = self.user_ids()\r\n if uids:\r\n users = Account._byID(uids, True, return_dict = False)\r\n return [self.ajax_user(u) for u in users]\r\n else:\r\n return ()", "def get_all_fans(self):\n return self._fan_list", "def get_common_friends(user, friends, friends_of_friends, data):\n common_friends_list = {}\n friends_set = set(friends)\n for friend_of_friend in list(set(friends_of_friends)):\n if int(friend_of_friend) != user and friend_of_friend not in friends:\n friend_of_friend_list = get_friends(friend_of_friend, data)\n score = len(list(friends_set.intersection(friend_of_friend_list)))\n if score in common_friends_list:\n common_friends_list[score].append(friend_of_friend)\n else:\n common_friends_list[score] = [friend_of_friend]\n return common_friends_list", "def get_friends(user_id, fields=\"\"):\r\n assert isinstance(user_id, int), \"user_id must be positive integer\"\r\n assert isinstance(fields, str), \"fields must be string\"\r\n assert user_id > 0, \"user_id must be positive integer\"\r\n query = f\"{domain}/friends.get?user_id={user_id}&fields={fields}&access_token={access_token}&v={v}\"\r\n response = requests.get(query)\r\n return response.json()", "def get_reachable_friends(self, name):\n distance_to_be_reachable = 25\n ant = self.ants[name]\n reachable_friends = [friend for friend in self.ants\n if 3 < distance_to(self.ants[friend].xcor,\n self.ants[friend].ycor,\n ant.xcor, ant.ycor) < distance_to_be_reachable]\n return reachable_friends", "def get_users():\n table_response = USER_FAVORITES_TABLE.scan()\n return table_response['Items']", "def get_all_users():\n return User.query.all()", "async def fetch_blocked_users(self):\n settings = await self.http.get_privacy_settings()\n blocked = []\n for user in settings.get('blockedUsers', []):\n blocked.append(User(state=self.http, data=user))\n\n return blocked", "def getFriends(self, **kwargs):\n screen_name = kwargs.get('screen_name', None)\n user_id = kwargs.get('user_id', None)\n return_type = kwargs.get('return_type', \"id\")\n\n if not screen_name and not user_id:\n return {}\n\n params = {\n 'screen_name': screen_name,\n 'user_id': user_id,\n 'cursor': kwargs.get('cursor', None),\n 'count': kwargs.get('count', None),\n 'stringify_ids': kwargs.get('stringify_ids', None)\n }\n\n uri = self.api_url + '/friends/'\n\n query = \"ids.json\" if return_type == \"id\" else \"list.json\"\n query += createQuery(params)\n \n response = self.session.get(uri + query).json()\n return response", "def get_all_users():", "def getConnectedUsers(self):\n\n\t\treturn self.connectedUsers", "def get_context_data(self, **kwargs):\n context = super(FriendsList, self).get_context_data(**kwargs)\n context['friends'] = self.get_queryset()\n\n return context", "def get_friend_books():\n\n acct = get_current_account(session['acct'])\n user = get_user_by_acct(acct)\n search = False\n\n get_all_books_from_friends(user, GR_KEY, GR_SECRET)\n flash(\"imported all books from your friends!\")\n\n return render_template(\"index.html\", acct=acct, search=search)", "def addFriends(author):\n friends = author.friends.all()\n remote_friends = RemoteFriend.objects.all().filter(author=author)\n friend_list = list()\n if friends:\n for friend in friends:\n friend_dict = {'id': \"{}/api/{}\".format(DOMAIN, friend.id), 'host': friend.host_url,\n 'displayName': friend.username, 'url': \"{}/api/{}\".format(DOMAIN, friend.id)}\n friend_list.append(friend_dict)\n\n if remote_friends:\n for remote in remote_friends:\n friend_dict = {'id': remote.url, 'host': remote.host,\n 'displayName': remote.displayName, 'url': remote.url}\n friend_list.append(friend_dict)\n\n remote = check_remote_friends(author)\n friend_list += remote\n return friend_list", "def get_friends(twitter,userid,count):\n url = 'https://api.twitter.com/1.1/friends/ids.json?&user_id='+str(userid)+'&skip_status=true&include_user_entities=false&count='+str(count) \n consumer = oauth.Consumer(key=consumer_key, secret=consumer_secret)\n access = oauth.Token(key=access_token, secret=access_token_secret)\n client = oauth.Client(consumer, access)\n li=[]\n try:\n response,data = client.request(url)\n dataStr = data.decode('utf-8') \n if('Rate limit exceeded' in dataStr ):\n print('rate limit exceeded error.. sleep for 15 min')\n time.sleep(61 * 15)\n response,data = client.request(url)\n \n jsonid = json.loads(dataStr)\n li = list(jsonid['ids'])\n \n except:\n pass\n \n return li", "def favorites(self):\n if not self._user_favorites_loaded:\n self._user_favorites = self._getFavorites()\n self._user_favorites_loaded = True\n return deepcopy(self._user_favorites)", "def get_friends_ids(cursor, limit, screen_name=None, user_id=None):\n \n # Must have either screen_name or user_id (logical xor)\n assert (screen_name != None) != (user_id != None), \\\n \"Must have screen_name or user_id, but not both\"\n \n assert (limit > 0), \"The requested number of ids must be higher than 0\"\n \n result = []\n needed = limit\n \n # while there are friends to get and the needed number is still positive\n while cursor != 0 and needed > 0:\n # we can retrieve only 5000 at once\n if needed > MAX_ALLOWED_IDS:\n count_limit = MAX_ALLOWED_IDS\n else:\n count_limit = needed\n \n # depends if we have the screen_name or the id of the follower\n if screen_name != None:\n friends_ids = twitterapi.make_twitter_request(twitter_api.friends.ids, count=count_limit, screen_name=screen_name, cursor=cursor)\n result = result + friends_ids[\"ids\"]\n else:\n friends_ids = twitterapi.make_twitter_request(twitter_api.friends.ids, count=count_limit, user_id=user_id, cursor=cursor)\n result = result + friends_ids[\"ids\"]\n \n needed = needed - count_limit\n \n # move to next friends that were not retrieved\n cursor = friends_ids[\"next_cursor\"]\n \n # returns the needed results\n return result[:limit]" ]
[ "0.80327207", "0.80079186", "0.79557514", "0.77440524", "0.74695307", "0.737918", "0.73686486", "0.7360085", "0.7259125", "0.7249176", "0.71591926", "0.70998305", "0.70950127", "0.70513386", "0.70225525", "0.7008113", "0.6999867", "0.6982058", "0.6899615", "0.68960917", "0.6802415", "0.679378", "0.67759705", "0.6679316", "0.66448534", "0.66274863", "0.66153926", "0.6606536", "0.65730613", "0.65686846", "0.65404356", "0.64921397", "0.64638275", "0.6448418", "0.6435691", "0.6325873", "0.63025326", "0.6276289", "0.6226965", "0.62259454", "0.6190977", "0.61276054", "0.60957843", "0.6087874", "0.60786945", "0.60770947", "0.60533595", "0.6036792", "0.6004402", "0.5996137", "0.59737897", "0.59696996", "0.59603494", "0.5960118", "0.5951705", "0.59374464", "0.59283984", "0.59274083", "0.59190804", "0.5912931", "0.590795", "0.58959985", "0.58943343", "0.5854115", "0.5823585", "0.58078426", "0.58000916", "0.5797843", "0.57967955", "0.5789452", "0.57417077", "0.5723276", "0.5716216", "0.5711655", "0.5703473", "0.5679763", "0.5676489", "0.56656295", "0.5652275", "0.56418234", "0.56262803", "0.56228626", "0.5620682", "0.5595212", "0.5593766", "0.55937237", "0.55925775", "0.55672807", "0.5543717", "0.5542098", "0.55241317", "0.5503436", "0.54907703", "0.5476933", "0.5451254", "0.54476154", "0.54428905", "0.54112285", "0.54043233", "0.53923684" ]
0.7357716
8
List or create friendrequests. Create an unconfirmed friendship between two users. Or return all friendships which are not confirmed for the current user.
def create_friend_request(): if request.method == "GET": friend_requests = [f.to_dict() for f in g.user.get_friend_requests()] return jsonify({'success': True, 'friend_requests': friend_requests}) if request.method == "POST": # Get recieving user id from request json = request.get_json() if json is None: raise CustomError(400, message="No JSON included or Content-Type" "is not application/json") if 'recieving_user_id' not in json: raise CustomError(400, message="Must include recieving_user_id") recieving_user_id = json['recieving_user_id'] # Get the user object recieving_user = User.query.get(recieving_user_id) if recieving_user is None: raise CustomError( 404, message='User with id: {} was not found.'.format( recieving_user_id) ) # Check friendship does not already exist friendship_exists = Friendship.query.filter( (Friendship.actioning_user_id == g.user.id) | (Friendship.recieving_user_id == g.user.id), (Friendship.actioning_user_id == recieving_user_id) | (Friendship.recieving_user_id == recieving_user_id) ).first() if friendship_exists: raise CustomError( 409, message="There is either a pending friend request between the" "two users or the two users are already friends." ) # Insert friend request friend_request = Friendship(g.user, recieving_user) db.session.add(friend_request) db.session.commit() return jsonify({'success': True}), 201
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_pending_friendships(self):\n url = 'friendships/pending/'\n return self.send_request(url)", "def get_friend_requests(self, user):\n return self.filter(addresser_user=user, status=Friendship.STATUS_PENDING, active=True)", "def friend_request():\n if 'username' not in session:\n return redirect('/login?type=0')\n user1 = session['username']\n user2 = request.form['username']\n now_time = Time.time()\n if not re.search(ID_REG, user2) and user2 != 'admin':\n return jsonify(res=-4)\n # check friend\n with sqlite3.connect('data.db') as conn:\n cur = conn.cursor()\n cur.execute('SELECT * FROM friend WHERE user1 = ? AND user2 = ?', [user1, user2])\n if cur.fetchall() or user1 == user2:\n return jsonify(res=-1)\n cur.execute('SELECT * FROM friend_request WHERE user1 = ? AND user2 = ?', [user1, user2])\n if cur.fetchall():\n return jsonify(res=-2)\n cur.execute('SELECT * FROM friend_request WHERE user1 = ? AND user2 = ?', [user2, user1])\n if cur.fetchall():\n return jsonify(res=-3)\n cur.execute('SELECT * FROM user_login WHERE username = ?', [user2])\n if not cur.fetchall():\n return jsonify(res=-4)\n cur.execute('INSERT INTO friend_request VALUES (?, ?, ?)', [user1, user2, now_time])\n conn.commit()\n return jsonify(res=0)", "def list(self, request):\n\n user_profile = get_object_or_404(UserProfile, user=request.user)\n #   Get all sent accepted invitations\n sent = user_profile.creator_friendships.filter(status=1)\n # Get all received accepted invitations\n received = user_profile.invited_friendships.filter(status=1)\n #   Combine results to get all friends:\n friends = []\n for friendship in sent:\n friends.append(UserProfileSerializer(friendship.user_2).data)\n for friendship in received:\n friends.append(UserProfileSerializer(friendship.user_1).data)\n return Response(friends, status=rest_status.HTTP_200_OK)", "def create(self, request):\n\n invited_email = request.data.get(\"email\")\n status = request.data.get(\"status\", False)\n if not invited_email:\n return Response(status=rest_status.HTTP_404_NOT_FOUND)\n try:\n invited_user = UserProfile.objects.get(user__email=invited_email)\n except UserProfile.DoesNotExist:\n return Response(status=rest_status.HTTP_404_NOT_FOUND)\n\n user_sending = get_object_or_404(UserProfile, user=request.user)\n\n if user_sending == invited_user:\n return Response(status=rest_status.HTTP_404_NOT_FOUND)\n\n error = \"\"\n try:\n friendship, _created = FriendShip.objects.get_or_create(\n user_1=user_sending, user_2=invited_user, status=status\n )\n if not _created:\n if friendship.status:\n error = _(\"You already are friend with this user\")\n else:\n error = _(\"A pending invitation is already created\")\n except Exception:\n error = _(\n f\"An error occured when user {user_sending.user.email} invited {invited_user.user.email}\"\n )\n\n data = {}\n status = rest_status.HTTP_200_OK\n if error:\n status = rest_status.HTTP_400_BAD_REQUEST\n data[\"message\"] = error\n else:\n serializer = FriendShipSerializer(friendship)\n data[\"message\"] = \"OK\"\n data[\"content\"] = serializer.data\n return Response(data, status=status)", "def friendship_request_list(request, template_name='/friend/requests_list.html'):\n # friendship_requests = Friend.objects.requests(request.user)\n friendship_requests = FriendshipRequest.objects.filter(rejected__isnull=True)\n\n return render(request, template_name, {'requests': friendship_requests})", "def dispatch(self, request, *args, **kwargs):\n user_to = User.objects.get(pk=kwargs['pk'])\n user_from = self.request.user\n ###\n if user_to not in wanna_be_friends(user_from):\n friendship = FriendshipInvitation.objects.create(\n from_user=user_from, to_user=user_to, status=\"0\")\n\n notif = Notification.objects.create(sender=user_from,\n receiver=user_to,\n notif_type='friend_request')\n # Aca se ha enviado la solicitud\n else:\n return HttpResponseRedirect(\"/fr_req_fail/\")\n return HttpResponseRedirect(\"/\")", "def accept_request(request, id):\n f_request = FriendRequest.objects.get(id=id)\n if f_request.to_user == request.user:\n f_request.to_user.profile.friends.add(f_request.from_user)\n f_request.from_user.profile.friends.add(f_request.to_user)\n f_request.delete()\n messages.success(\n request,\n f'Your friend request was successfully accepted'\n )\n return redirect('profiles:my_friends')", "def request_to_be_friends(self, user_id, target_id):\n if self.database is None:\n raise Exception(\"No database.\")\n if user_id is None or len(user_id) == 0:\n raise Exception(\"Bad parameter.\")\n if target_id is None or len(target_id) == 0:\n raise Exception(\"Bad parameter.\")\n return self.database.create_pending_friend_request(user_id, target_id)", "def user_send_friend_request(self):\n try:\n assert request.is_json\n except AssertionError:\n self.logger.debug(messages.REQUEST_IS_NOT_JSON)\n return messages.ERROR_JSON % messages.REQUEST_IS_NOT_JSON, 400\n content = request.get_json()\n if not FRIEND_REQUEST_MANDATORY_FIELDS.issubset(content.keys()):\n self.logger.debug(messages.MISSING_FIELDS_ERROR % (FRIEND_REQUEST_MANDATORY_FIELDS - set(content.keys())))\n return messages.ERROR_JSON % messages.MISSING_FIELDS_ERROR % (\n FRIEND_REQUEST_MANDATORY_FIELDS - set(content.keys())), 400\n email_token = auth.current_user()[0]\n try:\n self.friend_database.create_friend_request(email_token, content[\"other_user_email\"])\n except UnexistentTargetUserError:\n self.logger.debug(messages.USER_NOT_FOUND_MESSAGE % content[\"other_user_email\"])\n return messages.ERROR_JSON % (messages.USER_NOT_FOUND_MESSAGE % content[\"other_user_email\"]), 404\n except UsersAlreadyFriendsError:\n self.logger.debug(messages.USERS_ALREADY_FRIEND_ERROR)\n return messages.ERROR_JSON % messages.USERS_ALREADY_FRIEND_ERROR, 400\n except UnexistentRequestorUserError:\n self.logger.debug(messages.INTERNAL_ERROR_CONTACT_ADMINISTRATION)\n return messages.ERROR_JSON % messages.INTERNAL_ERROR_CONTACT_ADMINISTRATION, 500\n self.notification_database.notify(content[\"other_user_email\"],\n \"New friendship request\", \"From %s\" % email_token,\n {\"kind\": \"friendship_request\",\n \"from\": email_token})\n return messages.SUCCESS_JSON, 200", "def friends(self):\n #Guillaume\n friends_list = []\n received = Friendships.objects.filter(request_for=self, status='A')\n for friend in received:\n friends_list.append(friend.request_from)\n sent = Friendships.objects.filter(request_from=self, status='A')\n for friend in sent:\n friends_list.append(friend.request_for)\n return friends_list", "def get_pending_friends(cu_id):\n users = db.session.execute(\n \"\"\"select fr.user_1_id, u.username, u.firstname, u.lastname\n from friend_request as fr inner join userm as u on fr.user_1_id = u.id \n where fr.user_2_id = :cu_id\n and fr.approved is NULL\"\"\",\n {\"cu_id\": cu_id}\n )\n return users", "def create(self, request):\n friend_obj = Friend.objects.add_friend(\n request.user, # The sender\n get_object_or_404(User, pk=request.data['user_id']), # The recipient\n message=request.data.get('message', '')\n )\n\n return Response(\n FriendshipRequestSerializer(friend_obj).data,\n status.HTTP_201_CREATED\n )", "def pending_invitations(self, request):\n\n user_profile = get_object_or_404(UserProfile, user=request.user)\n #   Get all sent pending invitation\n sent = user_profile.creator_friendships.filter(status=0)\n # Get all received pending invitation\n received = user_profile.invited_friendships.filter(status=0)\n #   Serialize all and create a dict from it\n data = {\"sent\": [], \"received\": []}\n for friendship in sent:\n data[\"sent\"].append(FriendShipSerializer(friendship).data)\n for friendship in received:\n data[\"received\"].append(FriendShipSerializer(friendship).data)\n #   Return response with these 2 informations\n return Response(data, status=rest_status.HTTP_200_OK)", "def get_accepted_friend_requests(self, user):\n \n return self.filter(addresser_user=user, status=Friendship.STATUS_ACCEPTED, active=True)", "def confirm_request_to_be_friends(self, user_id, target_id):\n if self.database is None:\n raise Exception(\"No database.\")\n if user_id is None or len(user_id) == 0:\n raise Exception(\"Bad parameter.\")\n if target_id is None or len(target_id) == 0:\n raise Exception(\"Bad parameter.\")\n\n if self.database.delete_pending_friend_request(user_id, target_id):\n return self.database.create_friend(user_id, target_id)\n return False", "async def send_friend_request(self):\n\n logging.debug(\"Sending friend request to \" + self.username)\n\n if self.is_friend:\n raise ObjectErrors.AlreadyFriends(\n \"You are already friends with \" + self.display_name)\n\n await self.client.request.post(\n \"/user/%s/friendRequest\" % self.id)", "def post(self):\n\t\tdb = getattr(g, 'db', None)\n\t\tobj = request.get_json()\n\n\t\tif ('username' not in obj) or ('session' not in obj):\n\t\t\treturn {'status':'MISSING_PARAMS'}\n\t\telif not authenticate(obj['username'],obj['session']):\n\t\t\treturn {'status':'AUTH_FAIL'}\n\t\telif ('action' not in obj):\n\t\t\treturn {'status':'MISSING_PARAMS'}\n\t\telse:\n\t\t\taction = obj['action']\n\t\t\tif action == 'ADD' and 'friend' in obj:\n\t\t\t\tqry = \"INSERT INTO friends VALUES ((SELECT id FROM profiles WHERE username = %s),\\\n\t\t\t\t\t(SELECT id FROM profiles WHERE username = %s));\"\n\t\t\t\twith db as cur:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tlines = cur.execute(qry, (obj['username'],obj['friend']))\n\n\t\t\t\t\t\tif lines > 0:\n\t\t\t\t\t\t\treturn {'status':'FRIEND_ADDED'}\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\treturn {'status':'QUERY_FAILED'}\n\t\t\t\t\texcept sql.IntegrityError:\n\t\t\t\t\t\treturn {'status':'DUPLICATE_USER'}\n\t\t\t\t\texcept sql.OperationalError:\n\t\t\t\t\t\treturn {'status':'NO_SUCH_USER'}\n\n\t\t\telif action == 'GET':\n\t\t\t\t\"\"\" Retrieve all friends belonging to user. \"\"\"\n\t\t\t\tfriends = [] #accepted, both ends\n\t\t\t\tpending = [] #pending answer from friend\n\n\t\t\t\t# retrieve canonical friends\n\t\t\t\tuserqry = \"SELECT id FROM profiles WHERE username = %s\"\n\t\t\t\tfriendsqry = \"SELECT friend FROM friends WHERE target = ANY(\"+userqry+\")\"\n\t\t\t\tqry = \"SELECT username FROM profiles WHERE id = ANY(\"+friendsqry+\");\"\n\t\t\t\twith db as cur:\n\t\t\t\t\tlines = cur.execute(qry, (obj['username'],))\n\t\t\t\t\tfor friend in cur.fetchall():\n\t\t\t\t\t\tfriends += friend\n\n\t\t\t\t# retrieve pending requests\n\t\t\t\tuserqry = \"SELECT id FROM profiles WHERE username = %s\"\n\t\t\t\tfriendsqry = \"SELECT target FROM friends WHERE friend = ANY(\"+userqry+\")\"\n\t\t\t\tqry = \"SELECT username FROM profiles WHERE id = ANY(\"+friendsqry+\");\"\n\t\t\t\twith db as cur:\n\t\t\t\t\tlines = cur.execute(qry, (obj['username'],))\n\t\t\t\t\tprint \"friends:\"+str(friends)\n\t\t\t\t\tfor req in cur.fetchall():\n\t\t\t\t\t\tif not req[0] in friends:\n\t\t\t\t\t\t\tpending += req\n\n\t\t\t\tif not (len(friends)<=0 and len(pending)<=0):\n\t\t\t\t\treturn {'status':'QUERY_OK', 'friends':friends, 'pending':pending}\n\t\t\t\telse:\n\t\t\t\t\treturn {'status':'NO_FRIENDS'}\n\n\t\t\telif action == 'DELETE' and 'friend' in obj:\n\t\t\t\tqry = \"DELETE FROM friends WHERE target = (SELECT id FROM profiles WHERE username = %s)\\\n\t\t\t\t\tand friend = (SELECT id FROM profiles WHERE username = %s);\"\n\t\t\t\twith db as cur:\n\t\t\t\t\tlines = cur.execute(qry, (obj['username'], obj['friend']))\n\t\t\t\t\tif lines>0:\n\t\t\t\t\t\treturn {'status':'FRIEND_DELETED'}\n\t\t\t\t\telse:\n\t\t\t\t\t\treturn {'status':'QUERY_FAILED'}\n\n\t\t\telse:\n\t\t\t\treturn {'status':'INVALID_ACTION'}", "def friends():\n friends = [u.to_dict() for u in g.user.get_friends()]\n return jsonify({'success': True, 'friends': friends})", "def user_list_friend_requests(self):\n email_token = auth.current_user()[0]\n friend_emails = self.friend_database.get_friend_requests(email_token)\n friends = [self.auth_server.profile_query(email) for email in friend_emails]\n return json.dumps(friends), 200", "def add_friend(request):\n required_fields = ['source_user_id', 'dest_user_id', 'token']\n\n # Check if the post request contain the required fields\n if set(required_fields) != set(list(request.data.keys())):\n return Response({'error': str('Missing required fields!')}, status=status.HTTP_400_BAD_REQUEST)\n\n # POST Request content\n data = request.data\n\n # Here check if user_id matches the token with the database\n if not db.check_user(data['source_user_id'], data['token']):\n return Response({'error': str('UNAUTHORIZED')}, status=status.HTTP_401_UNAUTHORIZED)\n\n # Send friend request\n if not mock_db.add_friend(data['source_user_id'], data['dest_user_id']):\n return Response({'error': str('Error when adding friend!')},\n status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n return Response({'status': 'success'})", "def test_requested_friends_asymmetrical(self):\n u = AppUser(id = 1)\n u.django_user = User.objects.create(username='Testuser')\n u.save()\n f = AppUser(id = 2)\n f.django_user = User.objects.create(username='Testuser2')\n f.save()\n \n f.requested_friends.add(u)\n self.assertIs(u in f.requested_friends.all(), True)\n self.assertIs(f in u.requested_friends.all(), False)", "def find_friends(request):\n find_list = []\n sent_requests = set()\n rec_requests = set()\n sent_f_requests = FriendRequest.objects.filter(\n from_user=request.user\n )\n rec_f_requests = FriendRequest.objects.filter(\n to_user=request.user\n )\n\n me = request.user\n my_friends = me.profile.friends.all()\n my_family = me.relations.all()\n profiles = Profile.objects.exclude(\n user=request.user\n )\n for user in profiles:\n user_friends = user.friends.all()\n for friend in user_friends:\n if friend not in find_list and friend != me:\n if friend not in my_friends and friend not in my_family:\n find_list.append(friend)\n\n template = 'profiles/find_friends.html'\n context = {\n 'find_list': find_list,\n }\n return render(request, template, context)", "def get_friend_request_with_id(id):\n # Get friend request\n friendship = Friendship.query.get(id)\n if friendship is None:\n raise CustomError(\n 404,\n message=\"Friendship with id: {} not found.\".format(id)\n )\n can_view = friendship.actioning_user_id == g.user.id or \\\n friendship.recieving_user_id == g.user.id\n # Check user is has permission to view that request\n if not can_view:\n raise CustomError(\n 401,\n message=\"You are not authorised to view this resource.\"\n )\n\n if request.method == \"GET\":\n return jsonify({'success': True, 'friendship': friendship.to_dict()})\n\n if request.method == \"PATCH\":\n if friendship.recieving_user_id != g.user.id:\n raise CustomError(\n 401,\n message=\"You are not authorised to update this object.\"\n )\n\n json = request.get_json()\n if json is None:\n raise CustomError(400, message=\"No JSON included or Content-Type\"\n \"is not application/json\")\n if 'confirmed' in json:\n friendship.confirmed = json['confirmed']\n\n db.session.commit()\n return jsonify({'success': True, 'friendship': friendship.to_dict()})\n\n if request.method == \"DELETE\":\n db.session.delete(friendship)\n db.session.commit()\n return jsonify({'success': True})", "def accept(self):\n receiver_friend_list = FriendList.objects.filter(user_id=self.receiver_id)\n sender_friend_list = FriendList.objects.filter(user_id=self.sender_id)\n if(receiver_friend_list.exists()):\n receiver_friend_list = receiver_friend_list[0]\n else:\n receiver_friend_list = FriendList.objects.create(user_id=self.receiver_id)\n\n if(sender_friend_list.exists()):\n sender_friend_list = sender_friend_list[0]\n else:\n sender_friend_list = FriendList.objects.create(user_id=self.sender_id)\n\n if receiver_friend_list:\n receiver_friend_list.add_friend(self.sender_id)\n if sender_friend_list:\n sender_friend_list.add_friend(self.receiver_id)\n self.is_active = False\n self.save()", "def accept(request, pk=None):\n # check request is valid or not\n friend_request = get_or_none(FriendRequest, pk=pk)\n if friend_request is None:\n return Response({'status': '400', 'code': 'E_REQUEST_NOT_FOUND',\n 'detail': code['E_REQUEST_NOT_FOUND']}, status=400)\n # Create friend for login user -> request user\n new_friend1 = FriendConnectSerializer(\n data={'user': friend_request.from_user.id, 'friend': friend_request.to_user.id})\n if not new_friend1.is_valid():\n return Response({'status': '400', 'code': 'E_INVALID_PARAMETER_VALUES',\n 'detail': new_friend1.errors}, status=400)\n # Create friend for request user -> login user\n new_friend2 = FriendConnectSerializer(\n data={'friend': friend_request.from_user.id, 'user': friend_request.to_user.id})\n if not new_friend2.is_valid():\n return Response({'status': '400', 'code': 'E_INVALID_PARAMETER_VALUES',\n 'detail': new_friend2.errors}, status=400)\n # Save record 1\n new_friend1.save()\n # Check save or fail\n is_save1 = get_or_none(FriendConnect, user=friend_request.from_user, friend=friend_request.to_user)\n if is_save1 is not None:\n return Response({'status': '500', 'code': 'E_NOT_SAVE',\n 'detail': code['E_NOT_SAVE']}, status=500)\n # Save record 2\n new_friend2.save()\n # Check save or fail\n is_save2 = get_or_none(FriendConnect, user=friend_request.to_user, friend=friend_request.from_user)\n # if fail delete record 1\n if is_save2 is not None:\n is_save1.delete()\n return Response({'status': '500', 'code': 'E_NOT_SAVE',\n 'detail': code['E_NOT_SAVE']}, status=500)\n # if every things ok delete request\n friend_request.delete()\n return Response({'status': '200', 'code': 'OK_SEND_FRIEND_REQUEST',\n 'detail': code['OK_ACCEPT_FRIEND_REQUEST']}, status=201)", "def add_friends(self, user1_index, user2_index):\n if user1_index >= self.num_users or user2_index >= self.num_users:\n raise ValueError(\n f\"Number of users is {self.num_users}, but indices \"\n f\"{user1_index} and {user2_index} were requested.\"\n )\n if self.users_hat[user1_index, user2_index] == 0:\n self.users_hat[user1_index, user2_index] = 1\n elif self.is_verbose():\n self.log(f\"User {user2_index} was already following user {user1_index}\")\n if self.users_hat[user2_index, user1_index] == 0:\n self.users_hat[user2_index, user1_index] = 1\n elif self.is_verbose():\n self.log(f\"User {user1_index} was already following user {user2_index}\")", "def send_request(request, id):\n user = get_object_or_404(User, id=id)\n f_request, created = FriendRequest.objects.get_or_create(\n from_user=request.user,\n to_user=user\n )\n if created:\n messages.success(\n request,\n f'Your friend request to {user} has been sent.'\n )\n\n return redirect('/profiles/%s/' % user.profile.slug)\n messages.info(\n request,\n f'You have already sent a friend request to {user}'\n )\n return redirect('/profiles/%s/' % user.profile.slug)", "def get_friends(user, data):\n setA = list(\n data.loc[data.user == user].user_friend_list.values)\n setB = list(\n data.loc[data.user_friend_list == user].user\n .values)\n friends = list(set(set(setA).union(setB)))\n return friends", "def accept_decline_friend(request):\n required_fields = ['source_user_id', 'dest_user_id', 'accept', 'token']\n\n # Check if the post request contain the required fields\n if set(required_fields) != set(list(request.data.keys())):\n return Response({'error': str('Missing required fields!')}, status=status.HTTP_400_BAD_REQUEST)\n\n # POST Request content\n data = request.data\n\n # Here check if user_id matches the token with the database\n if not db.check_user(data['source_user_id'], data['token']):\n return Response({'error': str('UNAUTHORIZED')}, status=status.HTTP_401_UNAUTHORIZED)\n\n # if friend request is being accepted\n if data['accept'] == \"yes\":\n if not mock_db.accept_friend(data['source_user_id'], data['dest_user_id']):\n return Response({'error': str('Error when accepting friend request!')},\n status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n # if friend request is not accepted\n elif data['accept'] == \"no\":\n if not mock_db.cancel_friend_request(data['source_user_id'], data['dest_user_id']):\n return Response({'error': str('Error when declining friend request!')},\n status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n # bad request\n else:\n return Response({'error': str('Invalid request. Use yes/no in accept field.')},\n status=status.HTTP_400_BAD_REQUEST)\n\n return Response({'status': 'success'})", "def add_user_friendships(friend_page, acct):\n\n friends_list = [] # becomes a list of User objects\n # with db.session.begin():\n for friend in friend_page.user: # loops over page of 30 friends\n gr_id = int(friend.id.cdata.encode('utf8'))\n gr_url = friend.link.cdata.encode('utf8')\n name = friend.name.cdata.encode('utf8')\n image_url = friend.small_image_url.cdata.encode('utf8')\n\n try:\n # if user is already in db, add friendship only\n existing_user = User.query.filter_by(gr_id=gr_id).one()\n friends_list.append(existing_user)\n except:\n new_user = User(gr_id=gr_id, gr_url=gr_url,\n gr_name=name, image_url=image_url)\n db.session.add(new_user)\n print \"added new friend: \" + friend.name.cdata.encode('utf8')\n friends_list.append(new_user)\n\n print friends_list\n db.session.commit()\n\n # after adding missing users to db, add friendship between authorized account\n # and all friends\n for friend in friends_list:\n\n new_friend = Friendship(user_id=acct.user.user_id, friend_id=friend.user_id)\n old_friend = Friendship(user_id=friend.user_id, friend_id=acct.user.user_id)\n db.session.add(new_friend)\n db.session.add(old_friend)\n print \"Added friendship!\"\n\n db.session.commit()", "def add_friendship(self, user_id, friend_id):\n if user_id == friend_id:\n print(\"WARNING: You cannot be friends with yourself\")\n elif friend_id in self.friendships[user_id] or user_id in self.friendships[friend_id]:\n print(\"WARNING: Friendship already exists\")\n else:\n self.friendships[user_id].add(friend_id)\n self.friendships[friend_id].add(user_id)", "def add_friendship(self, user_id, friend_id):\n if user_id == friend_id:\n print(\"WARNING: You cannot be friends with yourself\")\n elif friend_id in self.friendships[user_id] or user_id in self.friendships[friend_id]:\n print(\"WARNING: Friendship already exists\")\n else:\n self.friendships[user_id].add(friend_id)\n self.friendships[friend_id].add(user_id)", "def add_friendship(self, user_id, friend_id):\n if user_id == friend_id:\n print(\"WARNING: You cannot be friends with yourself\")\n elif friend_id in self.friendships[user_id] or user_id in self.friendships[friend_id]:\n print(\"WARNING: Friendship already exists\")\n else:\n self.friendships[user_id].add(friend_id)\n self.friendships[friend_id].add(user_id)", "def add_untracked_friends(self):\n\n self.log.debug(\"CHECK FOR UNTRACKED FRIENDS\")\n friends_ids_api = self.api.friends_ids()\n targets = Target.objects.filter(hunter=self.user)\\\n .exclude(status__in=Target.ON_DECK)\n friends_ids_django = [t.hunted.twitter_id for t in targets]\n untracked_friends_ids = \\\n filter(lambda x: unicode(x) not in friends_ids_django,\n friends_ids_api)\n\n untracked_friends, remainder = lookup_users_by_id(self.api,\n untracked_friends_ids)\n for untracked_friend in untracked_friends:\n \"\"\"These could be people who don't follow us, but we want to follow,\n for example to keep up with news of their company\"\"\"\n twitter_account, created = utils.get_or_create_twitter_account(\n untracked_friend)\n target, created = Target.objects.get_or_create(\n hunter=self.user, hunted=twitter_account)\n if created:\n target.reason = \"External add.\"\n target.status = Target.FOLLOWER\n target.save()\n self.log.debug(\" => add friend: %s\" % twitter_account.screen_name)\n else:\n self.log.debug(\" => we're following, but no reciprocation: %s\" % twitter_account.screen_name)", "def auto_follow_followers():\n\n following = set(t.friends.ids(screen_name=TWITTER_HANDLE)[\"ids\"])\n followers = set(t.followers.ids(screen_name=TWITTER_HANDLE)[\"ids\"])\n\n not_following_back = followers - following\n\n for user_id in not_following_back:\n try:\n t.friendships.create(user_id=user_id, follow=False)\n except Exception as e:\n print(\"error: %s\" % (str(e)))", "def add_friend(self, User):\n if not User in self.friends.all():\n self.friend.add(User)\n #self.save()", "def get_possible_friends():\n user_list = []\n for user_unprocessed in api_vars.users.find({'public': True}):\n user = user_unprocessed\n user['_id'] = str(user['_id'])\n user_list.append(user)\n # For now, let's break the list at one hundred. This is just for the\n # sake of simplicity.\n if len(user_list) >= 100:\n break\n user_data = {'users': user_list}\n json_data = json.dumps(user_data)\n return json_data", "def get_queryset(self):\n return FriendRequest.objects.filter(touser=self.request.user)", "def user_accept_friend_request(self):\n try:\n assert request.is_json\n except AssertionError:\n self.logger.debug(messages.REQUEST_IS_NOT_JSON)\n return messages.ERROR_JSON % messages.REQUEST_IS_NOT_JSON, 400\n content = request.get_json()\n if not FRIEND_REQUEST_MANDATORY_FIELDS.issubset(content.keys()):\n self.logger.debug(messages.MISSING_FIELDS_ERROR % (FRIEND_REQUEST_MANDATORY_FIELDS - set(content.keys())))\n return messages.ERROR_JSON % messages.MISSING_FIELDS_ERROR % (\n FRIEND_REQUEST_MANDATORY_FIELDS - set(content.keys())), 400\n email_token = auth.current_user()[0]\n try:\n self.friend_database.accept_friend_request(content[\"other_user_email\"], email_token)\n except UnexistentFriendRequest:\n self.logger.debug(messages.UNEXISTENT_FRIEND_REQUEST % (content[\"other_user_email\"], email_token))\n return messages.ERROR_JSON % (messages.UNEXISTENT_FRIEND_REQUEST %\n (content[\"other_user_email\"], email_token)), 404\n return messages.SUCCESS_JSON, 200", "def get_queryset(self):\n user = self.request.user\n sender = Friendship.objects.filter(sender=user)\n receiver = Friendship.objects.filter(receiver=user)\n return sender.union(receiver)", "def friends(self, node, current_date):\n\n friends = []\n\n for friend in self.network.successors(node):\n # return friends which edge node->friends was created before the current date\n if (self.network[node][friend][self.EDGE_CREATE_TIME] <= current_date):\n friends.append(friend)\n return friends", "def add_friendship(self, user_id, friend_id):\n if user_id == friend_id:\n # print(\"WARNING: You cannot be friends with yourself\")\n return False\n elif friend_id in self.friendships[user_id] or user_id in self.friendships[friend_id]:\n # print(\"WARNING: Friendship already exists\")\n return False\n else:\n self.friendships[user_id].add(friend_id)\n self.friendships[friend_id].add(user_id)\n\n return True", "def create_friends(friend, friendors, create_post = True, visibility = ACL_DEFAULT):\n for friendor in friendors:\n friend.add_friend(friendor)\n friendor.add_friend(friend)\n # FriendRelationship.objects.create(friendor = friendor, friend = friend)\n\n if create_post:\n Post.objects.create(content = TEXT, author = friendor, visibility = visibility)", "def remove_friend(request):\n collected_values = {}\n\n if request.method != 'POST':\n collected_values[\"success\"] = False\n collected_values[\"errmsg\"] = \"Wrong HTTP verb\"\n return JsonResponse(collected_values, status=400)\n \n uid = request.POST[\"user_id\"]\n oid = request.POST[\"oid\"]\n token = request.POST[\"token\"]\n\n # Check auth\n is_valid, collected_values[\"token\"] = check_auth(uid, token, timezone.now())\n if not is_valid:\n collected_values[\"success\"] = False\n collected_values[\"errmsg\"] = \"Invalid Token\"\n return JsonResponse(collected_values, status=400)\n\n user_raw_query = \"SELECT friends, friend_not_to_add from linx_luser WHERE user_id = {}\".format(uid)\n other_raw_query = \"SELECT friends, friend_not_to_add from linx_luser WHERE user_id = {}\".format(oid)\n with connection.cursor() as cursor:\n cursor.execute(user_raw_query)\n values = cursor.fetchall()\n user_friends = values[0][0]\n if user_friends == None:\n user_friends = \"\"\n user_blocked = values[0][1]\n if user_blocked == None:\n user_blocked = \"\"\n\n cursor.execute(other_raw_query)\n values = cursor.fetchall()\n other_friends = values[0][0]\n if other_friends == None:\n other_friends = \"\"\n other_blocked = values[0][1]\n if other_blocked == None:\n other_blocked = \"\"\n\n friendsr = user_friends.replace(\"[\", \"\").replace(\"]\", \"\")\n split_user_friends = friendsr.split(\",\")\n split_user_friends.remove(oid)\n new_user_friends = \"[\" + \",\".join(split_user_friends) + \"]\"\n \n block_listr = user_blocked.replace(\"[\", \"\").replace(\"]\", \"\")\n block_list = block_listr.split(\",\")\n if block_list is []:\n block_list = [oid]\n else:\n block_list.append(oid)\n new_user_block = \"[\" + \",\".join(block_list) + \"]\"\n\n ofriendsr = other_friends.replace(\"[\", \"\").replace(\"]\", \"\")\n other_friends = ofriendsr.split(\",\") \n other_friends.remove(uid)\n new_other_friends = \"[\" + \",\".join(other_friends) + \"]\"\n\n block_listr2 = other_blocked.replace(\"[\", \"\").replace(\"]\", \"\")\n block_list2 = block_listr2.split(\",\")\n if block_list2 is []:\n block_list2 = [uid]\n else:\n block_list2.append(uid)\n new_other_block = \"[\" + \",\".join(block_list2) + \"]\"\n \n user_raw_query2 = \"UPDATE linx_luser SET friends = \\'{}\\', friend_not_to_add = \\'{}\\' WHERE user_id = {}\".format(new_user_friends, new_user_block, uid)\n other_raw_query2 = \"UPDATE linx_luser SET friends = \\'{}\\', friend_not_to_add = \\'{}\\' WHERE user_id = {}\".format(new_other_friends, new_other_block, oid)\n\n cursor.execute(user_raw_query2)\n cursor.execute(other_raw_query2)\n\n collected_values[\"uid\"] = uid\n collected_values[\"oid\"] = oid\n collected_values[\"token\"] = token\n collected_values[\"raw_query_1\"] = user_raw_query2\n collected_values[\"raw_query_2\"] = other_raw_query2\n\n LOGGER.info(\"Block user request: %v\", collected_values)\n return JsonResponse(collected_values, status=200)", "def accept(self):\n receiver_friend_list = FriendList.objects.get(user=self.receiver)\n if receiver_friend_list:\n receiver_friend_list.add_friend(self.sender)\n sender_friend_list = FriendList.objects.get(user=self.sender)\n if sender_friend_list:\n sender_friend_list.add_friend(self.receiver)\n self.is_active = False\n self.save()", "def connect(request, pk=None):\n # check if user sent request to them self\n if int(request.user.id) == int(pk):\n return Response({'status': '400', 'code': 'E_SAME_USER',\n 'detail': code['E_SAME_USER']}, status=400)\n\n # Check both Users are valid\n from_user = get_or_none(User, pk=request.user.id)\n to_user = get_or_none(User, pk=pk)\n # Return Error Message User is not valid\n if from_user is None or to_user is None:\n return Response({'status': '400', 'code': 'E_USER_NOT_FOUND',\n 'detail': code['E_USER_NOT_FOUND']}, status=400)\n\n # check user have sent request before or not\n current_request = get_or_none(FriendRequest, from_user=from_user, to_user=to_user)\n # search current request in reverse way\n if current_request is None:\n current_request = get_or_none(FriendRequest, from_user=to_user, to_user=from_user)\n # Return Error Message request have sent before\n if current_request is not None:\n return Response({'status': '400', 'code': 'E_ALREADY_SEND_REQUEST',\n 'detail': code['E_ALREADY_SEND_REQUEST']}, status=400)\n # Check both users are connect or not\n current_connection = get_or_none(FriendConnect, user=from_user, friend=to_user)\n # Return Error Message both user are friend before\n if current_connection is not None:\n return Response({'status': '400', 'code': 'E_ALREADY_CONNECT',\n 'detail': code['E_ALREADY_CONNECT']}, status=400)\n # Save new request\n new_request = FriendRequest(from_user=from_user, to_user=to_user)\n new_request.save()\n # Check request is save success\n is_created = get_or_none(FriendRequest, from_user=from_user, to_user=to_user)\n # Return Error Message Request is not save\n if is_created is None:\n return Response({'status': '500', 'code': 'E_NOT_SAVE',\n 'detail': code['E_NOT_SAVE']}, status=500)\n # Return Message sent request success\n return Response({'status': '200', 'code': 'OK_SEND_FRIEND_REQUEST',\n 'detail': code['OK_SEND_FRIEND_REQUEST']}, status=201)", "def friendship_accept(request, friendship_request_id):\n #if request.method == 'POST':\n #id1 = get_object_or_404(request.user.friendship_requests_sent,id=friendship_request_id)\n f_request = FriendshipRequest.objects.get(from_user=friendship_request_id, to_user = request.user)\n from_user = request.user\n f_request.accept()\n return render (request , 'reload_page.html')\n #return render(request,'friendship/template_ags/friend_requests.html', {'from_user':from_user})", "async def add_list(\n self, name: str, user_ids: Optional[List[int]] = None, **kwargs\n ) -> friends.AddListResponseModel:\n\n params = self.get_set_params(locals())\n response = await self.api.request(\"friends.addList\", params)\n model = friends.AddListResponse\n return model(**response).response", "def remove_friends(self, user1_index, user2_index):\n if user1_index >= self.num_users or user2_index >= self.num_users:\n raise ValueError(\n f\"Number of users is {self.num_users}, but indices \"\n f\"{user1_index} and {user2_index} were requested.\"\n )\n if self.users_hat[user1_index, user2_index] == 1:\n self.users_hat[user1_index, user2_index] = 0\n elif self.is_verbose():\n self.log(f\"User {user2_index} was not following user {user1_index}\")\n if self.users_hat[user2_index, user1_index] == 1:\n self.users_hat[user2_index, user1_index] = 0\n elif self.is_verbose():\n self.log(f\"User {user1_index} was not following user {user2_index}\")", "def fetch_friendships(friendships, apis, users, excluded, out, target,\n save_frequency=15,\n friends_restricted_to=None,\n friendships_file=\"cache/friendships.json\") -> None:\n friendships.update(get_or_set(out / target / friendships_file, friendships))\n friends_restricted_to = friends_restricted_to if friends_restricted_to else users\n users_ids = set([str(user[\"id\"]) for user in friends_restricted_to])\n excluded = [s.lower() for s in get_or_set(excluded, [])]\n api_idx = 0\n for i, user in enumerate(users):\n if user[\"screen_name\"].lower() in excluded:\n continue\n if str(user[\"id\"]) in friendships:\n print(f\"[{len(friendships)}] @{user['screen_name']} found in cache.\")\n else:\n print(f\"[{len(friendships)}] Fetching friends of @{user['screen_name']}\")\n user_friends = []\n previous_cursor, next_cursor = 0, -1\n while previous_cursor != next_cursor and next_cursor != 0:\n try:\n new_user_friends, (previous_cursor, next_cursor) = apis[api_idx].get_friend_ids(user_id=user[\"id\"],\n stringify_ids=True,\n cursor=next_cursor)\n user_friends += new_user_friends\n except tweepy.TooManyRequests as e:\n api_idx = (api_idx + 1) % len(apis)\n print(f\"You reached the rate limit. Moving to next api: #{api_idx}\")\n sleep(15)\n except tweepy.TweepyException as e:\n print(f\"failed at api: #{api_idx}\")\n print(\"...but it failed. Error: {}\".format(e))\n user_friends = []\n break\n except (requests.exceptions.ConnectionError, requests.exceptions.ReadTimeout) as e:\n print(e) # Why do I get these?\n sleep(5)\n\n common_friends = set(user_friends).intersection(users_ids)\n friendships[str(user[\"id\"])] = list(common_friends)\n # Write to file\n if i % save_frequency == 0:\n get_or_set(out / target / friendships_file, friendships.copy(), force=True)\n get_or_set(out / target / friendships_file, friendships.copy(), force=True)", "def addFriends(author):\n friends = author.friends.all()\n remote_friends = RemoteFriend.objects.all().filter(author=author)\n friend_list = list()\n if friends:\n for friend in friends:\n friend_dict = {'id': \"{}/api/{}\".format(DOMAIN, friend.id), 'host': friend.host_url,\n 'displayName': friend.username, 'url': \"{}/api/{}\".format(DOMAIN, friend.id)}\n friend_list.append(friend_dict)\n\n if remote_friends:\n for remote in remote_friends:\n friend_dict = {'id': remote.url, 'host': remote.host,\n 'displayName': remote.displayName, 'url': remote.url}\n friend_list.append(friend_dict)\n\n remote = check_remote_friends(author)\n friend_list += remote\n return friend_list", "def remove_friend(request):\n required_fields = ['source_user_id', 'dest_user_id', 'token']\n\n # Check if the post request contain the required fields\n if set(required_fields) != set(list(request.data.keys())):\n return Response({'error': str('Missing required fields!')}, status=status.HTTP_400_BAD_REQUEST)\n\n # POST Request content\n data = request.data\n\n # Here check if user_id matches the token with the database\n if not db.check_user(data['source_user_id'], data['token']):\n return Response({'error': str('UNAUTHORIZED')}, status=status.HTTP_401_UNAUTHORIZED)\n\n # delete friend from user profile\n if not mock_db.remove_friend(data['source_user_id'], data['dest_user_id']):\n return Response({'error': str('Error when removing friend from the profile!')},\n status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n return Response({'status': 'success'})", "def list(self, request, *args, **kwargs):\n from_user = request.QUERY_PARAMS.get('user', None)\n if from_user:\n from_user_id = from_user\n else:\n from_user_id = request.user.id\n\n query = Friend.objects.filter(from_user_id=from_user_id)\n # items = request.QUERY_PARAMS.get('item', 50)\n items = 200\n paginator = Paginator(query, items)\n\n page = request.QUERY_PARAMS.get('page')\n\n try:\n friends = paginator.page(page)\n except PageNotAnInteger:\n # If page is not an integer, deliver first page.\n friends = paginator.page(1)\n except EmptyPage:\n # If page is out of range (e.g. 9999),\n # deliver last page of results.\n friends = paginator.page(paginator.num_pages)\n user_location = UserLocation.objects.filter(user=request.user).order_by('-modified_at').first()\n context = dict(user_id=request.user.id)\n if user_location:\n context['lat'] = user_location.lat\n context['lon'] = user_location.lon\n serializer = PaginatedFriendSerializer(friends, context=context)\n return Response(serializer.data, status=200)", "def create_friend(user_id, friend_user_id):\n\n friend = User_Friend(user_id=user_id, friend_user_id=friend_user_id)\n\n db.session.add(friend)\n db.session.commit()\n\n return friend", "async def get_friends(self) -> List[User]:\n me = await self.get_self()\n r = await self.request.request(url=f'https://friends.roblox.com/v1/users/{me.id}/friends', method=\"GET\")\n data = r.json()\n friends = []\n for friend in data['data']:\n friends.append(User(self.request, friend['id'], friend['name']))\n return friends", "def ReorganizeFriendList(self):\n with sqlite3.connect(self.DBname) as conn:\n c = conn.cursor()\n c.execute(\"select ZID, FRIENDS from user_info\")\n user_list = c.fetchall()\n for user, friends in user_list:\n out = set()\n friends = [x.strip() for x in friends.split(\",\")]\n for friend in friends:\n c.execute(\"select FRIENDS from user_info where ZID = (?)\",[friend])\n TFL = c.fetchone()[0]\n TFL = [x.strip() for x in TFL.split(\",\")]\n if user not in TFL:\n out.add(friend)\n NFL = list(set(friends) - out)\n self.UpdateFriendList(user,NFL)", "def resolve_follow_requests(self, info):\n user = info.context.user\n return FollowRequest.objects.filter(following=user.id, pending=True).order_by(\"-request_date\")", "def friends(self):\n service_root = self._get_webservice_url(\"fmf\")\n return FindFriendsService(service_root, self.session, self.params)", "def user_list_friends(self):\n email_query = request.args.get('email')\n if not email_query:\n self.logger.debug(messages.MISSING_FIELDS_ERROR % \"email\")\n return messages.ERROR_JSON % messages.MISSING_FIELDS_ERROR % \"email\", 400\n email_token = auth.current_user()[0]\n if email_token != email_query and not self.friend_database.are_friends(email_token, email_query):\n self.logger.debug(messages.USER_NOT_AUTHORIZED_ERROR)\n return messages.ERROR_JSON % messages.USER_NOT_AUTHORIZED_ERROR, 403\n friend_emails = self.friend_database.get_friends(email_query)\n friends = [self.auth_server.profile_query(email) for email in friend_emails]\n return json.dumps(friends), 200", "def add_friend(request, profile_pk, friend_pk):\n\n profile_object = Profile.objects.get(pk=profile_pk)\n friend_object = profile_object.get_friend_suggestions().get(pk=friend_pk)\n \n profile_object.friends.add(friend_object)\n profile_object.save()\n\n return redirect(reverse('show_profile_page', kwargs={'pk': profile_pk}))", "async def send_friend_request(self, TargetId: int):\n data = {\n 'targetUserId': TargetId\n }\n e = await self.request.request(url=f'https://friends.roblox.com/v1/users/{TargetId}/request-friendship',\n method='post',\n data=data)\n return e", "def list_pending_friends(self, user_id):\n if self.database is None:\n raise Exception(\"No database.\")\n if user_id is None or len(user_id) == 0:\n raise Exception(\"Bad parameter.\")\n return self.database.retrieve_pending_friends(user_id)", "def foaf_ids_bad(user):\n return [foaf_id \n for friend_id in friendships[user[\"id\"]]\n for foaf_id in friendships[friend_id]]", "def addFriendship(self, userID, friendID):\n if userID == friendID:\n print(\"WARNING: You cannot be friends with yourself\")\n elif friendID in self.friendships[userID] or userID in self.friendships[friendID]:\n print(\"WARNING: Friendship already exists\")\n else:\n self.friendships[userID].add(friendID)\n self.friendships[friendID].add(userID)", "def addFriendship(self, userID, friendID):\n if userID == friendID:\n print(\"WARNING: You cannot be friends with yourself\")\n elif friendID in self.friendships[userID] or userID in self.friendships[friendID]:\n print(\"WARNING: Friendship already exists\")\n else:\n self.friendships[userID].add(friendID)\n self.friendships[friendID].add(userID)", "def friends_of_friends(self, node, ids):\n fof = set()\n for id in ids:\n for f in self.users[id]:\n if f != node:\n fof.add(f)\n return fof", "def get_friends(self, force: bool = False) -> List[types.FriendInformation]:\n raise NotImplementedError", "def new_friends(self, G):\r\n H = G.to_undirected() #creates an undirected copy of the original graph\r\n n = nx.preferential_attachment(H) #uses the preferential_attachment method from networkx to create friends\r\n for u, v, p in n:\r\n chance = random.randint(0, 100) #chance is a randomly generated number between 0 and 100\r\n if p >= len(G.edges) and chance >= 90: #creates a new relationship (edge) between two nodes if their preferential\r\n G.add_edge(u, v, weight=random.uniform(-1, 1)) #attachment number is higher than the total number of edges and\r\n else: #chance is greater than 90.\r\n continue\r\n return G", "def get_friend_list(self):\n self.friends = self.df[['user_id','friends']]", "def make_friend(user_id, friend_id):\n # Find out if the user exists\n user_a = user_grab(user_id)\n if user_a is None:\n return \"user not found\", 404\n\n # Find the other user\n user_b = user_grab(friend_id)\n if user_b is None:\n return \"user not found\", 404\n\n # Get their friend list\n friends_current = user_a.get(\"friends\")\n friends_updated = []\n if friends_current is not None:\n for friend in friends_current:\n if friend == friend_id:\n return user_b\n friends_updated = friends_current\n friends_updated.append(str(user_b['_id']))\n api_vars.users.update({'_id': ObjectId(user_id)},\n {'$set': {'friends': friends_updated}})\n return json.dumps(user_b)", "async def edit(\n self, user_id: int, list_ids: Optional[List[int]] = None, **kwargs\n ) -> base.OkResponseModel:\n\n params = self.get_set_params(locals())\n response = await self.api.request(\"friends.edit\", params)\n model = base.OkResponse\n return model(**response).response", "def getFriendshipsIncoming(self, **kwargs):\n params = {\n \"cursor\": kwargs.get(\"cursor\", None),\n \"stringify_ids\": kwargs.get(\"stringify_ids\", None)\n }\n\n query = createQuery(params)\n uri = self.api_url + '/friendships/incoming.json'\n response = self.session.get(uri + query).json()\n return response", "def get(self):\n user = users.get_current_user()\n if not user:\n self.response.out.write(json.dumps(error_obj('User not logged in.')))\n return\n friend = self.request.get('email')\n if not friend:\n self.response.out.write(json.dumps(error_obj('Must provide email of friend to add.')))\n return\n account = user_info.get_user_account()\n if not friend in account.friend_list:\n self.response.out.write(json.dumps(error_obj('This email is not in your friends list.')))\n return\n friend_account = user_info.get_by_email(friend)\n self.response.out.write(json.dumps(account_info(friend_account)))", "def accept_invite_requests(invite_requests):\n for invite_request in invite_requests:\n accepting_user = invite_request.to_facebook_user.profile.user\n graph = accepting_user.profile.get_offline_graph()\n facebook = FacebookUserConverter(graph)\n # Delete the request\n facebook.delete_request(invite_request)\n logger.info('Invite request deleted')", "def add_friend():\n if request.method == 'POST':\n username = get_username()\n user_id = get_id_from_username(username)\n friend_to_add = get_id_from_username(request.form['add_user'])\n if not friend_to_add or friend_to_add==user_id:\n return redirect(url_for('message.converse'))\n add_friend_db(user_id, friend_to_add)\n return redirect(url_for('message.converse'))", "def friendship_status_with(self):\n email_query = request.args.get('other')\n if not email_query:\n self.logger.debug(messages.MISSING_FIELDS_ERROR % \"other\")\n return messages.ERROR_JSON % messages.MISSING_FIELDS_ERROR % \"other\", 400\n email_token = auth.current_user()[0]\n response = \"no_contact\"\n if self.friend_database.are_friends(email_token, email_query):\n response = \"friends\"\n elif self.friend_database.exists_friend_request(email_query, email_token):\n response = \"received\"\n elif self.friend_database.exists_friend_request(email_token, email_query):\n response = \"sent\"\n return json.dumps({\"status\": response}), 200", "def test_request_friend(self):\n self.test_login_user()\n self.test_create_user('b')\n url = reverse('MGA:send_friend_request')\n data = {'id': 2}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def user_reject_friend_request(self):\n try:\n assert request.is_json\n except AssertionError:\n self.logger.debug(messages.REQUEST_IS_NOT_JSON)\n return messages.ERROR_JSON % messages.REQUEST_IS_NOT_JSON, 400\n content = request.get_json()\n if not FRIEND_REQUEST_MANDATORY_FIELDS.issubset(content.keys()):\n self.logger.debug(messages.MISSING_FIELDS_ERROR % (FRIEND_REQUEST_MANDATORY_FIELDS - set(content.keys())))\n return messages.ERROR_JSON % messages.MISSING_FIELDS_ERROR % (\n FRIEND_REQUEST_MANDATORY_FIELDS - set(content.keys())), 400\n email_token = auth.current_user()[0]\n try:\n self.friend_database.reject_friend_request(content[\"other_user_email\"], email_token)\n except UnexistentFriendRequest:\n self.logger.debug(messages.UNEXISTENT_FRIEND_REQUEST % (content[\"other_user_email\"], email_token))\n return messages.ERROR_JSON % (messages.UNEXISTENT_FRIEND_REQUEST %\n (content[\"other_user_email\"], email_token)), 404\n return messages.SUCCESS_JSON, 200", "def addfriend(self, second_user_id):\n second_user = User.objects.get(id=second_user_id)\n new_friendship = Friendship.objects.create(friend_user=self, friend=second_user.gameplanuser)\n new_friendship.save()", "def test_rejectFriend(self):\n \n u = AppUser(id = 1)\n u.django_user = User.objects.create(username='Testuser')\n u.save()\n f = AppUser(id = 2)\n f.django_user = User.objects.create(username='Testuser2')\n f.save()\n u.requested_friends.add(f)\n # c = Client()\n # c.post(\"/requestFriend\",{'Friend':f.id,'User':u.id})\n # client = Client()\n # response = client.get(\"/requestFriend\")\n # request = response.wsgi_request \n\n #request.POST({'Friend':f.id,'User':u.id})\n #response = self.client.get(reverse('meetup_finder_app:requestFriend'))\n #f.requested_friends.add(u)\n #requestFriend(request)\n data = {'Friend':f.id,'User':u.id}\n #request = self.factory.post('/a/test/path/', data, content_type='application/json')\n # request = self.factory.post('/requestFriend/', data, content_type='application/json')\n # print(request.POST['User'])\n # request.user = self.user\n # requestFriend(request)\n\n # poll_1 = Poll.objects.get(pk=1)\n # self.assertEqual(poll_1.choice_set.get(pk=1).votes, 1)\n\n resp = self.client.post('/rejectFriend/', {'User': u.id, 'Friend': f.id})\n self.assertEqual(resp.status_code, 302)\n\n self.assertIs(f not in u.requested_friends.all(), True)", "def test_requestFriend(self):\n \n u = AppUser(id = 1)\n u.django_user = User.objects.create(username='Testuser')\n u.save()\n f = AppUser(id = 2)\n f.django_user = User.objects.create(username='Testuser2')\n f.save()\n # c = Client()\n # c.post(\"/requestFriend\",{'Friend':f.id,'User':u.id})\n # client = Client()\n # response = client.get(\"/requestFriend\")\n # request = response.wsgi_request \n\n #request.POST({'Friend':f.id,'User':u.id})\n #response = self.client.get(reverse('meetup_finder_app:requestFriend'))\n #f.requested_friends.add(u)\n #requestFriend(request)\n data = {'Friend':f.id,'User':u.id}\n #request = self.factory.post('/a/test/path/', data, content_type='application/json')\n # request = self.factory.post('/requestFriend/', data, content_type='application/json')\n # print(request.POST['User'])\n # request.user = self.user\n # requestFriend(request)\n\n # poll_1 = Poll.objects.get(pk=1)\n # self.assertEqual(poll_1.choice_set.get(pk=1).votes, 1)\n\n resp = self.client.post('/requestFriend/', {'User': u.id, 'Friend': f.id})\n self.assertEqual(resp.status_code, 302)\n\n self.assertIs(u in f.requested_friends.all(), True)", "def addFriendship(self, userID, friendID):\n if userID == friendID:\n print(\"WARNING: You cannot be friends with yourself\")\n return False\n elif friendID in self.friendships[userID] or userID in self.friendships[friendID]:\n print(\"WARNING: Friendship already exists\")\n return False\n else:\n self.friendships[userID].add(friendID)\n self.friendships[friendID].add(userID)\n return True", "def addFriendship(self, userID, friendID):\n # adding a edge between two vertices\n if userID == friendID:\n print(\"WARNING: You cannot be friends with yourself\")\n elif friendID in self.friendships[userID] or userID in self.friendships[friendID]:\n print(\"WARNING: Friendship already exists\")\n else:\n self.friendships[userID].add(friendID)\n self.friendships[friendID].add(userID)", "def people_view(request):\n if request.user.is_authenticated:\n # TODO Objective 4: create a list of all users who aren't friends to the current user (and limit size)\n\n all_users = models.UserInfo.objects.all()\n myuserInfo = models.UserInfo.objects.get(user=request.user)\n myFriends = myuserInfo.friends.all()\n\n all_people = []\n for personInfo in all_users.all():\n if personInfo not in myFriends:\n all_people.append(personInfo)\n\n num_visits = request.session.get('num_visits', 0)\n\n listUpperBound = num_visits + 2\n peopleSize = len(all_people)\n\n new_list = []\n if listUpperBound < peopleSize - 1:\n for i in range(listUpperBound):\n new_list.append(all_people[i])\n else:\n new_list = all_people\n\n # TODO Objective 5: create a list of all friend requests to current user\n friend_list = models.FriendRequest.objects.filter(to_user=myuserInfo)\n given_list = models.FriendRequest.objects.filter(from_user=myuserInfo)\n\n sent_list=[]\n for stuff in given_list:\n sent_list.append(stuff.to_user)\n\n print(sent_list)\n\n friend_requests = []\n\n for friend in friend_list:\n friend_requests.append(friend.from_user)\n\n context = { 'user_info' : myuserInfo,\n 'all_people' : all_people,\n 'num_visits' : num_visits,\n 'new_list' : new_list,\n 'friend_requests' : friend_requests,\n 'sent_list' : sent_list}\n\n return render(request,'people.djhtml',context)\n\n request.session['failed'] = True\n return redirect('login:login_view')", "def requests(request):\n to_user = request.user\n profile = get_object_or_404(Profile, user=request.user)\n rec_f_requests = FriendRequest.objects.filter(\n to_user=profile.user\n )\n\n context = {\n 'profile': profile,\n 'rec_f_requests':rec_f_requests\n }\n return render(request, 'profiles/my_requests.html', context)", "def unfriend(request, pk=None):\n # Check user id and friend id\n if int(request.user.id) == int(pk):\n return Response({'status': '400', 'code': 'E_SAME_USER',\n 'detail': code['E_SAME_USER']}, status=400)\n # Check 2 user is valid\n current_user = get_or_none(User, pk=request.user.id)\n friend = get_or_none(User, pk=pk)\n # if 1 or 2 user is not valid\n if current_user is None or friend is None:\n return Response({'status': '400', 'code': 'E_USER_NOT_FOUND',\n 'detail': code['E_USER_NOT_FOUND']}, status=400)\n # get connect of request user -> friend\n # from_user=friend.to_user, to_user=request.user\n current_connection = get_or_none(Friend, from_user=current_user, to_user=friend)\n if current_connection is None:\n return Response({'status': '400', 'code': 'E_REQUEST_NOT_FOUND',\n 'detail': code['E_REQUEST_NOT_FOUND']}, status=400)\n # get connect of friend to request user\n # reverse_connection = get_or_none(FriendConnect, user=friend, friend=current_user)\n #if reverse_connection is None:\n # return Response({'status': '400', 'code': 'E_REQUEST_NOT_FOUND',\n # 'detail': code['E_REQUEST_NOT_FOUND']}, status=400)\n # Delete\n current_connection.delete()\n #reverse_connection.delete()\n # if every thing ok\n return Response({'status': '200', 'code': 'OK_UNFRIEND',\n 'detail': code['OK_UNFRIEND']}, status=200)", "def add_friend(self, account):\n if not account in self.friends.all():\n self.friends.add(account)\n self.save()", "def test_rescindRequest(self):\n \n u = AppUser(id = 1)\n u.django_user = User.objects.create(username='Testuser')\n u.save()\n f = AppUser(id = 2)\n f.django_user = User.objects.create(username='Testuser2')\n f.save()\n f.requested_friends.add(u)\n # c = Client()\n # c.post(\"/requestFriend\",{'Friend':f.id,'User':u.id})\n # client = Client()\n # response = client.get(\"/requestFriend\")\n # request = response.wsgi_request \n\n #request.POST({'Friend':f.id,'User':u.id})\n #response = self.client.get(reverse('meetup_finder_app:requestFriend'))\n #f.requested_friends.add(u)\n #requestFriend(request)\n data = {'Friend':f.id,'User':u.id}\n #request = self.factory.post('/a/test/path/', data, content_type='application/json')\n # request = self.factory.post('/requestFriend/', data, content_type='application/json')\n # print(request.POST['User'])\n # request.user = self.user\n # requestFriend(request)\n\n # poll_1 = Poll.objects.get(pk=1)\n # self.assertEqual(poll_1.choice_set.get(pk=1).votes, 1)\n\n resp = self.client.post('/rescindRequest/', {'User': u.id, 'Friend': f.id})\n self.assertEqual(resp.status_code, 302)\n\n self.assertIs(u not in f.requested_friends.all(), True)", "def get_friends(self):\n edges = DirectedUserToUserEdge.all().filter(\n 'owner_user_id =', self.key().id()).run()\n return db.get([db.Key.from_path('User', edge.friend_user_id) for edge in\n edges])", "def get(self, request):\n from_user_id = request.QUERY_PARAMS.get('user', None)\n if from_user_id is None:\n from_user_id = request.user.id\n query = Friend.objects.filter(from_user_id=from_user_id)\n items = request.QUERY_PARAMS.get('page_size', 10)\n paginator = Paginator(query, items)\n page = request.QUERY_PARAMS.get('page')\n try:\n friends = paginator.page(page)\n except PageNotAnInteger:\n # If page is not an integer, deliver first page.\n friends = paginator.page(1)\n except EmptyPage:\n # If page is out of range (e.g. 9999), deliver last page of results.\n friends = paginator.page(paginator.num_pages)\n user_location = UserLocation.objects.filter(user=request.user)\\\n .order_by('-modified_at')\\\n .first()\n context = {'user_id': request.user.id}\n if user_location:\n context['lat'] = user_location.lat\n context['lon'] = user_location.lon\n serializer = PaginatedFriendSerializer(friends, context=context)\n return Response(serializer.data, status=200)", "def show_friends():\n\n\n user_id = session['user_id']\n user = User.query.get(user_id)\n friendship = Friendship.query.get(user_id)\n\n return render_template('friends.html', user=user, friendship=friendship)", "async def delete_all_requests(\n self, **kwargs\n ) -> base.OkResponseModel:\n\n params = self.get_set_params(locals())\n response = await self.api.request(\"friends.deleteAllRequests\", params)\n model = base.OkResponse\n return model(**response).response", "def add_friend_to_trip(request, trip_id, user_id):\n try:\n trip = Trip.objects.get(pk=trip_id)\n if request.user not in trip.users.all():\n return Response(status=status.HTTP_401_UNAUTHORIZED)\n\n user = User.objects.get(pk=user_id)\n if user in trip.users.all():\n error_message = \"User already associated with trip\"\n return Response(error_message, status=status.HTTP_400_BAD_REQUEST)\n\n trip.users.add(user)\n except Trip.DoesNotExist:\n error_message = \"Trip does not exist\"\n return Response(error_message, status=status.HTTP_400_BAD_REQUEST)\n except User.DoesNotExist:\n error_message = \"User does not exist\"\n return Response(error_message, status=status.HTTP_400_BAD_REQUEST)\n except Exception as e:\n return Response(str(e), status=status.HTTP_400_BAD_REQUEST)\n\n return Response(status=status.HTTP_200_OK)", "def get_accepted_registration_requests(self,user,site):\n return self.filter(project=site,\n user=user,\n status=RegistrationRequest.ACCEPTED)", "def get_potential_friends(user_id):\n\n if not g.user:\n return _get_json_message(\n INVALID_CREDENTIALS_MSG,\n INVALID_CREDENTIALS_STATUS_CODE)\n\n current_user = User.query.get_or_404(user_id)\n\n if current_user.username != g.user.username:\n return _get_json_message(\n INVALID_CREDENTIALS_MSG,\n INVALID_CREDENTIALS_STATUS_CODE)\n\n user_options = User.get_list_of_potential_friends(current_user)\n user_options_serialized = [user.serialize() for user in user_options]\n\n return jsonify(user_options=user_options_serialized)", "def get_queryset(self):\n self.object = self.get_object()\n return self.object.friends.all().exclude(user=self.object.user)", "def add_friend():\n\n\n user_id = session['user_id']\n add_friend = request.form.get(\"add-friend\")\n friend_id = request.form.get(\"friend_id\")\n friendship = Friendship.add_friend(user_id, friend_id)\n\n print \"This is the friend id\", friend_id\n\n return 'friend added'", "def get_user_friends(acct, KEY, SECRET): # this isn't true - evaluate what needs to be returned tomorrow.\n\n new_gr_session = OAuth1Session(\n consumer_key=KEY,\n consumer_secret=SECRET,\n access_token=acct.access_token,\n access_token_secret=acct.access_token_secret\n )\n\n user_id = str(acct.user.gr_id)\n current_page = 1\n\n total, friends = get_friends_page(new_gr_session, user_id, current_page)\n\n # check for no friends first\n if len(friends) == 0:\n flash(\"No Goodreads friends found.\")\n print \"No friends!\"\n\n # friends requests return a list of 30 at a time\n # get total number of pages required.\n total_pages = int(math.ceil(total / float(30)))\n # creates new users and adds friendship relationships to db\n add_user_friendships(friends, acct)\n\n # check for more than 30 friends\n if total_pages > 1:\n\n current_page = 2\n while current_page <= total_pages:\n\n print \"******YOU HAVE MORE FRIENDS*******\"\n\n # wait 1 second between calls, per GR policy\n time.sleep(1.00)\n\n # create new query with updated current_page\n total, friends = get_friends_page(new_gr_session, user_id, current_page)\n add_user_friendships(friends, acct)\n current_page += 1\n\n return None", "def get_pending_registration_requests(self,user,site):\n\n return self.filter(project=site,\n user=user,\n status=RegistrationRequest.PENDING)" ]
[ "0.66678417", "0.65838015", "0.64492136", "0.6419166", "0.6355479", "0.6347656", "0.63212603", "0.6301099", "0.6292718", "0.62728006", "0.6239098", "0.62126476", "0.61244184", "0.60886", "0.6074877", "0.6053998", "0.6042511", "0.60373974", "0.59732705", "0.5967323", "0.59309", "0.5902279", "0.5878904", "0.58686084", "0.5834719", "0.57897013", "0.5774579", "0.572391", "0.5668115", "0.5655726", "0.5640195", "0.561569", "0.561569", "0.561569", "0.5583479", "0.5520632", "0.5516513", "0.55102324", "0.54781914", "0.545685", "0.54552543", "0.5405855", "0.53945297", "0.5370485", "0.53530896", "0.5339244", "0.5318966", "0.53161883", "0.53066516", "0.5254827", "0.5251047", "0.52480704", "0.5244704", "0.5236959", "0.52176607", "0.5196515", "0.517775", "0.5156423", "0.5154537", "0.51491106", "0.51462376", "0.5128707", "0.5126347", "0.51247895", "0.51027673", "0.51027673", "0.509549", "0.5082912", "0.5082842", "0.5080084", "0.5073092", "0.5060247", "0.50507855", "0.5050752", "0.50370884", "0.50288546", "0.5025565", "0.50237787", "0.50031906", "0.4984165", "0.49677438", "0.49666858", "0.4951746", "0.49484247", "0.49318615", "0.4929471", "0.49145868", "0.48993504", "0.48934656", "0.48820335", "0.4881649", "0.48809102", "0.48783585", "0.48472798", "0.4841219", "0.48337376", "0.48235896", "0.480019", "0.4799238", "0.47975925" ]
0.7035371
0
Get, update or delete friendship with the specified id.
def get_friend_request_with_id(id): # Get friend request friendship = Friendship.query.get(id) if friendship is None: raise CustomError( 404, message="Friendship with id: {} not found.".format(id) ) can_view = friendship.actioning_user_id == g.user.id or \ friendship.recieving_user_id == g.user.id # Check user is has permission to view that request if not can_view: raise CustomError( 401, message="You are not authorised to view this resource." ) if request.method == "GET": return jsonify({'success': True, 'friendship': friendship.to_dict()}) if request.method == "PATCH": if friendship.recieving_user_id != g.user.id: raise CustomError( 401, message="You are not authorised to update this object." ) json = request.get_json() if json is None: raise CustomError(400, message="No JSON included or Content-Type" "is not application/json") if 'confirmed' in json: friendship.confirmed = json['confirmed'] db.session.commit() return jsonify({'success': True, 'friendship': friendship.to_dict()}) if request.method == "DELETE": db.session.delete(friendship) db.session.commit() return jsonify({'success': True})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_friend(request, id):\n user = request.user\n friend = get_object_or_404(User, id=id)\n user.profile.friends.remove(friend)\n friend.profile.friends.remove(user)\n messages.success(\n request,\n 'User deleted from your friends list'\n )\n return redirect('profiles:profile')", "async def delete(\n self, user_id: Optional[int] = None, **kwargs\n ) -> friends.DeleteResponseModel:\n\n params = self.get_set_params(locals())\n response = await self.api.request(\"friends.delete\", params)\n model = friends.DeleteResponse\n return model(**response).response", "def update_ship(id):\n data = request.get_json()\n print(data)\n for ship in db['ships']:\n if ship['id'] == id:\n if data['name']:\n ship['name'] == data['name']\n if data['age']:\n ship['age'] == data['age']\n return ship, status.HTTP_202_ACCEPTED\n return {}, status.HTTP_404_NOT_FOUND", "def add_friendship(self, user_id, friend_id):\n if user_id == friend_id:\n print(\"WARNING: You cannot be friends with yourself\")\n elif friend_id in self.friendships[user_id] or user_id in self.friendships[friend_id]:\n print(\"WARNING: Friendship already exists\")\n else:\n self.friendships[user_id].add(friend_id)\n self.friendships[friend_id].add(user_id)", "def add_friendship(self, user_id, friend_id):\n if user_id == friend_id:\n print(\"WARNING: You cannot be friends with yourself\")\n elif friend_id in self.friendships[user_id] or user_id in self.friendships[friend_id]:\n print(\"WARNING: Friendship already exists\")\n else:\n self.friendships[user_id].add(friend_id)\n self.friendships[friend_id].add(user_id)", "def add_friendship(self, user_id, friend_id):\n if user_id == friend_id:\n print(\"WARNING: You cannot be friends with yourself\")\n elif friend_id in self.friendships[user_id] or user_id in self.friendships[friend_id]:\n print(\"WARNING: Friendship already exists\")\n else:\n self.friendships[user_id].add(friend_id)\n self.friendships[friend_id].add(user_id)", "def getFriends(id):\n u = models.User.query.get(id)\n if not u:\n return jsonify({'error': 'No account found'}), 200\n\n if not u.isFb:\n if int(u.fbid) is not 0:\n u = models.User.query.get(u.fbid)\n if not u.isFb and int(u.fbid) is not 0:\n u = models.User.query.get(u.fbid)\n else:\n return jsonify({'error': 'No account found'}), 200\n\n session['oauth_token'] = (u.token, '')\n resp = facebook.get('/' + u.fbid + '/friends')\n friends = []\n for f in resp.data['data']:\n friends.append(f['id'])\n\n friends_json = []\n for f in friends:\n u = models.User.query.filter_by(fbid=f).first()\n user = {\n 'id': u.id,\n 'name': u.name,\n 'email': u.email,\n 'regID': u.regid,\n 'photo': u.photo\n }\n friends_json.append(user)\n return jsonify({'friends': friends_json}), 200", "def add_friendship(self, user_id, friend_id):\n if user_id == friend_id:\n # print(\"WARNING: You cannot be friends with yourself\")\n return False\n elif friend_id in self.friendships[user_id] or user_id in self.friendships[friend_id]:\n # print(\"WARNING: Friendship already exists\")\n return False\n else:\n self.friendships[user_id].add(friend_id)\n self.friendships[friend_id].add(user_id)\n\n return True", "def remove_relation(request, id):\n user = request.user\n relation = get_object_or_404(User, id=id)\n user.profile.relations.remove(relation)\n user.profile.friends.add(relation)\n messages.success(\n request,\n 'Family member removed to your friends list'\n )\n return redirect('profiles:my_friends')", "def delete_request(request, id):\n f_request = FriendRequest.objects.get(id=id)\n f_request.delete()\n messages.success(\n request,\n f'Your friend request has been removed.'\n )\n return redirect('profiles:my_requests')", "def add_relation(request, id):\n user = request.user\n friend = get_object_or_404(User, id=id)\n user.profile.relations.add(friend)\n user.profile.friends.remove(friend)\n messages.success(\n request,\n 'Friend added to your family list'\n )\n return redirect('profiles:my_family')", "def api_profile_friends_get(profile_id: int):\n\n if is_access_denied(profile_id):\n return jsonify({'error': {'message': 'forbidden'}}), 403\n\n friends = Friend.find_by_profile_id(profile_id)\n \n if friends is None:\n return jsonify({'error': {'message': 'not found'}}), 404\n\n out = [ f.get_fields(with_id=True) for f in friends ]\n\n return jsonify(out), 200", "def make_friend(user_id, friend_id):\n # Find out if the user exists\n user_a = user_grab(user_id)\n if user_a is None:\n return \"user not found\", 404\n\n # Find the other user\n user_b = user_grab(friend_id)\n if user_b is None:\n return \"user not found\", 404\n\n # Get their friend list\n friends_current = user_a.get(\"friends\")\n friends_updated = []\n if friends_current is not None:\n for friend in friends_current:\n if friend == friend_id:\n return user_b\n friends_updated = friends_current\n friends_updated.append(str(user_b['_id']))\n api_vars.users.update({'_id': ObjectId(user_id)},\n {'$set': {'friends': friends_updated}})\n return json.dumps(user_b)", "def delete_favorite(self, id):\n path = self._get_path('alter_favorite').format(id=id)\n \n return self._clean_return(self._DELETE(path))", "def scrap_ship(self, ship_id):\n r = requests.delete(self.base_url + f'/users/{self.username}/ships/{ship_id}', headers=self.auth_header)\n return r.text", "def addFriendship(self, userID, friendID):\n if userID == friendID:\n print(\"WARNING: You cannot be friends with yourself\")\n elif friendID in self.friendships[userID] or userID in self.friendships[friendID]:\n print(\"WARNING: Friendship already exists\")\n else:\n self.friendships[userID].add(friendID)\n self.friendships[friendID].add(userID)", "def addFriendship(self, userID, friendID):\n if userID == friendID:\n print(\"WARNING: You cannot be friends with yourself\")\n elif friendID in self.friendships[userID] or userID in self.friendships[friendID]:\n print(\"WARNING: Friendship already exists\")\n else:\n self.friendships[userID].add(friendID)\n self.friendships[friendID].add(userID)", "def removeFollower(self,id):\n # DELETE /followers/$id\n pass", "def accept_request(request, id):\n f_request = FriendRequest.objects.get(id=id)\n if f_request.to_user == request.user:\n f_request.to_user.profile.friends.add(f_request.from_user)\n f_request.from_user.profile.friends.add(f_request.to_user)\n f_request.delete()\n messages.success(\n request,\n f'Your friend request was successfully accepted'\n )\n return redirect('profiles:my_friends')", "def send_request(request, id):\n user = get_object_or_404(User, id=id)\n f_request, created = FriendRequest.objects.get_or_create(\n from_user=request.user,\n to_user=user\n )\n if created:\n messages.success(\n request,\n f'Your friend request to {user} has been sent.'\n )\n\n return redirect('/profiles/%s/' % user.profile.slug)\n messages.info(\n request,\n f'You have already sent a friend request to {user}'\n )\n return redirect('/profiles/%s/' % user.profile.slug)", "def friends(user_id):\n user = user_grab(user_id)\n if user is None:\n return \"user not found\", 404\n friends = user.get(\"friends\")\n if friends is None:\n friends = []\n data_json = json.dumps({'friends': [str(friend) for friend in friends]})\n return data_json", "def delete(self,id):\r\n return delete(id=id)", "def delete(self, id):\n response = remove_location(id)\n return response", "def addFriendship(self, userID, friendID):\n if userID == friendID:\n print(\"WARNING: You cannot be friends with yourself\")\n return False\n elif friendID in self.friendships[userID] or userID in self.friendships[friendID]:\n print(\"WARNING: Friendship already exists\")\n return False\n else:\n self.friendships[userID].add(friendID)\n self.friendships[friendID].add(userID)\n return True", "def board_by_id(request, id):\n if request.method == 'GET':\n try:\n board = Board.objects.get(id=id)\n return Response(BoardSerializer(board).data)\n except ObjectDoesNotExist:\n return Response({\n \"id\": -1,\n \"error\": \"invalid id\"\n })\n if request.method == 'DELETE':\n try:\n Board.objects.get(id=id).delete()\n return Response({\n \"success\": True\n })\n except ObjectDoesNotExist:\n return Response({\n \"success\": False\n })", "def getFollowings(self,id=None,**kwargs):\n # GET /followings [/$id]\n debugMain('getEntitiesIFollow')\n if id is None:\n return self._genericGet('/followings',**kwargs)\n else:\n return self._genericGet('/followings/%s'%id,**kwargs)", "def addFriendship(self, userID, friendID):\n # adding a edge between two vertices\n if userID == friendID:\n print(\"WARNING: You cannot be friends with yourself\")\n elif friendID in self.friendships[userID] or userID in self.friendships[friendID]:\n print(\"WARNING: Friendship already exists\")\n else:\n self.friendships[userID].add(friendID)\n self.friendships[friendID].add(userID)", "def add_favorite(self, id):\n path = self._get_path('alter_favorite').format(id=id)\n \n return self._clean_return(self._PUT(path))", "def show_friends():\n\n\n user_id = session['user_id']\n user = User.query.get(user_id)\n friendship = Friendship.query.get(user_id)\n\n return render_template('friends.html', user=user, friendship=friendship)", "def delete(self, id):\n try:\n deleted_id = self.borrow_repo.remove_one_by_id(id)\n if deleted_id:\n self.write({'id': deleted_id})\n else:\n self.write_not_found(\n 'A request with id {} was not found'.format(id)\n )\n except BumerangError as e:\n self.set_status(500)\n self.finish({'error': str(e)})", "def get_friends(self, user_id):\n # if user_id is alias, replace it with id\n if not self._is_positive_number(user_id):\n user_id = get_names_of_users(set([user_id]))[0].id\n api = pyvkontakte.VkontakteApi()\n return set(api.call('friends.get', user_id=user_id, v='5.8')['items'])", "def get_friend_user_object(friend_user_id):\n\n user_id = friend_user_id\n friend = User.query.filter(User.user_id == user_id).first()\n\n return friend", "def get(self, id):\n return Freigabe.find_by_id(id)", "def delete(id):\n party_to_delete = Party.get_party_by_id(id=id)\n if party_to_delete:\n Party.delete_party(id=id)\n return make_response(jsonify({\n \"status\": 204,\n \"message\": \"deleted\"\n }))\n return make_response(jsonify({\n \"status\": 404,\n \"error\": \"No party found with that id\"\n }), 404)", "def updateOne(id):\n print(inspect.stack()[1][3])\n # read data from the API call\n req_data = request.get_json()\n\n query = select([Followup]).where(Followup.columns.id == id)\n ResultProxy = connection.execute(query)\n ResultSet = ResultProxy.fetchone()\n if(not ResultSet):\n return {'error': 'Unable to Find the given client'}\n\n # Update the URL\n json_data = {}\n\n for req in req_data:\n if (req in Followup.c.keys()):\n json_data[req] = req_data[req]\n\n query = (\n update(Followup).\n where(Followup.columns.id == id).\n values(json_data)\n )\n ResultProxy = connection.execute(query)\n if(not ResultProxy):\n return {'error': 'Unable to Update the given client'}\n return {'status': \"Update Succesful\"}", "def removeFriend(self, user):\n user = user if isinstance(user, MyPlexUser) else self.user(user)\n url = self.FRIENDUPDATE.format(userId=user.id)\n return self.query(url, self._session.delete)", "async def edit(\n self, user_id: int, list_ids: Optional[List[int]] = None, **kwargs\n ) -> base.OkResponseModel:\n\n params = self.get_set_params(locals())\n response = await self.api.request(\"friends.edit\", params)\n model = base.OkResponse\n return model(**response).response", "def get_potential_friends(user_id):\n\n if not g.user:\n return _get_json_message(\n INVALID_CREDENTIALS_MSG,\n INVALID_CREDENTIALS_STATUS_CODE)\n\n current_user = User.query.get_or_404(user_id)\n\n if current_user.username != g.user.username:\n return _get_json_message(\n INVALID_CREDENTIALS_MSG,\n INVALID_CREDENTIALS_STATUS_CODE)\n\n user_options = User.get_list_of_potential_friends(current_user)\n user_options_serialized = [user.serialize() for user in user_options]\n\n return jsonify(user_options=user_options_serialized)", "def put(self, id):\n return Contacts().update_one(id, request.json)", "def delete(self, id):\n\n ns.abort(404, 'This API is not supported yet.')", "def list_friends(self, user_id):\n if self.database is None:\n raise Exception(\"No database.\")\n if user_id is None or len(user_id) == 0:\n raise Exception(\"Bad parameter.\")\n return self.database.retrieve_friends(user_id)", "def patch(id):\n\n if not request.json or not 'name' in request.json:\n return make_response(jsonify({\n \"status\": 400,\n \"error\": \"Party name is required\"\n }), 400)\n\n data = request.get_json(force=True)\n if isinstance(data['name'], int):\n return make_response(jsonify({\n \"status\": 400,\n \"error\": \"Name should be of type strings\"\n }), 400)\n\n if Party.get_party_by_name(data[\"name\"]):\n return make_response(jsonify({\n \"status\": 409,\n \"error\": \"Party name already taken\"\n }), 409)\n if Validate.validate_empty_string(data_inputed=data[\"name\"]):\n return make_response(jsonify({\n \"status\": 400,\n \"error\": \"Party name cannot be empty\"\n }), 400)\n update_data = request.get_json(force=True)\n party_to_edit = Party.get_party_by_id(id=id)[0]\n party_to_edit = Party.update_party(update_data=update_data,id=id)\n return make_response(jsonify({\n \"status\": 201,\n \"data\": party_to_edit\n }), 201)", "def delete(self, id):\n return Contacts().delete_one(id)", "def get_user_friends(user_id):\n\n friends = db.session.query(User_Friend).filter(User_Friend.user_id==user_id).all() \n\n return friends", "def delete(self, id):\n url = self._format_url(self.url + \"/{id}\", {\"id\": id})\n\n return self._make_request('delete', url)", "def put(self, id):\n activity = Activity().get(id)\n if not activity:\n abort(404, \"Activity not found\")\n\n return activity._update(request.json)", "def get_friends(user_id):\n return list(set(get_following(user_id)) &\n set(get_followers(user_id)))", "def delete(self, id):\n return self.app.post('/delete/' + str(id), data=dict(id=id),\n follow_redirects=True)", "def add_friend():\n\n\n user_id = session['user_id']\n add_friend = request.form.get(\"add-friend\")\n friend_id = request.form.get(\"friend_id\")\n friendship = Friendship.add_friend(user_id, friend_id)\n\n print \"This is the friend id\", friend_id\n\n return 'friend added'", "def delete(self, id):\n context = request.environ.get('context')\n dbapi.netdevice_data_delete(context, id, request.json)\n return None, 204, None", "def get(self,id):\r\n person = get_one(id=id)\r\n if not person:\r\n api.abort(404)\r\n else:\r\n return person", "def delete(self, id):\n delete_entry(id)\n return None, 204", "def request_to_be_friends(self, user_id, target_id):\n if self.database is None:\n raise Exception(\"No database.\")\n if user_id is None or len(user_id) == 0:\n raise Exception(\"Bad parameter.\")\n if target_id is None or len(target_id) == 0:\n raise Exception(\"Bad parameter.\")\n return self.database.create_pending_friend_request(user_id, target_id)", "def Get(id):\n try:\n bug = Bug.get_by_id(id)\n if not bug:\n raise InvalidIdError\n except (db.Error, InvalidIdError), e:\n logging.error('bug.Get: Exception while retrieving bug (%s): %s', id, e)\n raise InvalidIdError('Bug not found [id=%s].%s' % (id, e))\n return bug", "def delete(self, id: str) -> Any:\n\n return self.client.delete(self._url(id))", "def list_pending_friends(self, user_id):\n if self.database is None:\n raise Exception(\"No database.\")\n if user_id is None or len(user_id) == 0:\n raise Exception(\"Bad parameter.\")\n return self.database.retrieve_pending_friends(user_id)", "def delete_profile(cls, id):\n return cls.objects.filter(id == id).delete()", "async def send_friend_request(self, TargetId: int):\n data = {\n 'targetUserId': TargetId\n }\n e = await self.request.request(url=f'https://friends.roblox.com/v1/users/{TargetId}/request-friendship',\n method='post',\n data=data)\n return e", "def my_ship_info(self, ship_id):\n r = requests.get(self.base_url + f'/users/{self.username}/ships/{ship_id}', headers=self.auth_header)\n return r.text", "def delete(self, id=None):\n if id is not None:\n self.where('id', '=', id)\n\n sql = self._grammar.compile_delete(self)\n\n return self._connection.delete(sql, self.get_bindings())", "def get(self,id):\r\n person = get_one_by_persons_id(id=id)\r\n if not person:\r\n api.abort(404)\r\n else:\r\n return person", "def delete(self, id):\n return self._post(\n request=ApiActions.DELETE.value,\n uri=ApiUri.ACTIONS.value,\n params={'id': id}\n )", "def delete(self, id):\n return self._post(\n request=ApiActions.DELETE.value,\n uri=ApiUri.ACTIONS.value,\n params={'id': id}\n )", "def deleteOne(id):\n print(inspect.stack()[1][3])\n query = Followup.delete().where(Followup.columns.id == id)\n ResultProxy = connection.execute(query)\n if(not ResultProxy):\n return {'error': 'Unable to find the given client'}\n return {'status': \"Delete Succesful\"}", "def put(self, id):\n return userDao.update(id, api.payload)", "def delete(self, id=None):\n if id:\n boat = test4ValidEntity(id)\n if boat == None:\n self.response.set_status(404)\n else:\n if boat.at_sea == False:\n query = Slip.query(Slip.current_boat == boat.id)\n result = query.fetch(limit = 1)\n for match in result:\n match.current_boat = None\n match.arrival_date = None\n match.put()\n boat.key.delete()\n self.response.write(\"Boat has been deleted!\") \n else:\n boat.key.delete()\n self.response.write(\"Boat has been deleted!\")", "def delete(self, id):\n raise NotImplementedError", "def delete(self, id: str) -> dict:\n r = requests.delete(self.url + '/{}'.format(id), headers=self.headers)\n\n return r.json()", "def friend_info(self):\n return self._reddit.get(API_PATH['friend_v1'].format(user=self))", "def put(self, id):\n return update_msg(request.json, id)", "def delete(self, id):\r\n try:\r\n self.valid_args()\r\n inst = db.session.query(self.__class__).get(id)\r\n if inst is None:\r\n raise NotFound\r\n getattr(require, self.__class__.__name__.lower()).delete(inst)\r\n db.session.delete(inst)\r\n db.session.commit()\r\n self._refresh_cache(inst)\r\n return '', 204\r\n except Exception as e:\r\n return error.format_exception(\r\n e,\r\n target=self.__class__.__name__.lower(),\r\n action='DELETE')", "def patch(self, id=None):\n if id:\n boat = test4ValidEntity(id)\n if boat == None:\n self.response.set_status(404)\n else:\n boat_data = json.loads(self.request.body)\n if 'name' in boat_data:\n boat.name = boat_data['name']\n if 'type' in boat_data:\n boat.type = boat_data['type']\n if 'length' in boat_data:\n boat.length = boat_data['length']\n boat.put()\n boat_dict = boat.to_dict()\n self.response.headers['Content-Type'] = 'application/json'\n self.response.write(json.dumps(boat_dict))", "def contact(self, id):\n try:\n json = self.skype.conn(\"POST\", \"{0}/users/batch/profiles\".format(SkypeConnection.API_USER),\n json={\"usernames\": [id]}, auth=SkypeConnection.Auth.SkypeToken).json()\n contact = SkypeContact.fromRaw(self.skype, json[0])\n if contact.id not in self.contactIds:\n self.contactIds.append(contact.id)\n return self.merge(contact)\n except SkypeApiException as e:\n if len(e.args) >= 2 and getattr(e.args[1], \"status_code\", None) == 403:\n # Not a contact, so no permission to retrieve information.\n return None\n raise", "def unfriend(request, pk=None):\n # Check user id and friend id\n if int(request.user.id) == int(pk):\n return Response({'status': '400', 'code': 'E_SAME_USER',\n 'detail': code['E_SAME_USER']}, status=400)\n # Check 2 user is valid\n current_user = get_or_none(User, pk=request.user.id)\n friend = get_or_none(User, pk=pk)\n # if 1 or 2 user is not valid\n if current_user is None or friend is None:\n return Response({'status': '400', 'code': 'E_USER_NOT_FOUND',\n 'detail': code['E_USER_NOT_FOUND']}, status=400)\n # get connect of request user -> friend\n # from_user=friend.to_user, to_user=request.user\n current_connection = get_or_none(Friend, from_user=current_user, to_user=friend)\n if current_connection is None:\n return Response({'status': '400', 'code': 'E_REQUEST_NOT_FOUND',\n 'detail': code['E_REQUEST_NOT_FOUND']}, status=400)\n # get connect of friend to request user\n # reverse_connection = get_or_none(FriendConnect, user=friend, friend=current_user)\n #if reverse_connection is None:\n # return Response({'status': '400', 'code': 'E_REQUEST_NOT_FOUND',\n # 'detail': code['E_REQUEST_NOT_FOUND']}, status=400)\n # Delete\n current_connection.delete()\n #reverse_connection.delete()\n # if every thing ok\n return Response({'status': '200', 'code': 'OK_UNFRIEND',\n 'detail': code['OK_UNFRIEND']}, status=200)", "def friends(request):\n return friendslist(request, request.session['id'])", "def user(self, id):\n json = self.skype.conn(\"POST\", \"{0}/batch/profiles\".format(SkypeConnection.API_PROFILE),\n auth=SkypeConnection.Auth.SkypeToken, json={\"usernames\": [id]}).json()\n if json and \"status\" not in json[0]:\n return self.merge(SkypeUser.fromRaw(self.skype, json[0]))\n else:\n return None", "def put(self, id):\r\n try:\r\n self.valid_args()\r\n existing = db.session.query(self.__class__).get(id)\r\n if existing is None:\r\n raise NotFound\r\n getattr(require, self.__class__.__name__.lower()).update(existing)\r\n data = json.loads(request.data)\r\n # may be missing the id as we allow partial updates\r\n data['id'] = id\r\n # Clean HATEOAS args\r\n data = self.hateoas.remove_links(data)\r\n inst = self.__class__(**data)\r\n db.session.merge(inst)\r\n db.session.commit()\r\n self._refresh_cache(inst)\r\n return Response(json.dumps(inst.dictize()), 200,\r\n mimetype='application/json')\r\n except IntegrityError:\r\n db.session.rollback()\r\n raise\r\n except Exception as e:\r\n return error.format_exception(\r\n e,\r\n target=self.__class__.__name__.lower(),\r\n action='PUT')", "def unfollow(self,id):\n # DELETE /followings/$id\n debugMain('unfollow')\n resource = '/followings/%s'%id\n requestUrl = self.apiRootUrls[0] + resource\n debugRequest('unfollowing: %s'%requestUrl)\n r = self.session.delete(requestUrl)\n \n debugDetail('request headers:')\n debugJson(r.request.headers)\n debugDetail()\n debugDetail(' -- -- -- --')\n debugDetail()\n debugDetail('response headers:')\n debugJson(r.headers)\n debugDetail()\n \n if r.status_code is not 200:\n debugError('failed to unfollow.')\n debugDetail()\n return False\n return True", "def friendship_reject(request, friendship_request_id):\n #if request.method == 'POST':\n #f_request = get_object_or_404(request.user.friendship_requests_received,id=friendship_request_id)\n f_request = FriendshipRequest.objects.get(from_user=friendship_request_id, to_user = request.user)\n from_user = request.user\n f_request.reject()\n return render(request , 'reload_page.html')\n #return render(request,'friendship/template_ags/friend_requests.html', {'from_user':from_user})", "def friendship_accept(request, friendship_request_id):\n #if request.method == 'POST':\n #id1 = get_object_or_404(request.user.friendship_requests_sent,id=friendship_request_id)\n f_request = FriendshipRequest.objects.get(from_user=friendship_request_id, to_user = request.user)\n from_user = request.user\n f_request.accept()\n return render (request , 'reload_page.html')\n #return render(request,'friendship/template_ags/friend_requests.html', {'from_user':from_user})", "def get(self, id: str, privileges: 'Optional[List[str]]' = None) -> 'Optional[User]':\n return self._get(schema=UserSchema(), id=id, privileges=privileges)", "def getFollowers(self,id=None,**kwargs):\n # GET /followers [/$id]\n debugMain('getFollowers')\n if id is None:\n return self._genericGet('/followers',**kwargs)\n else:\n return self._genericGet('/followers/%s'%id,**kwargs)", "def patch(self, id=None):\n if id:\n boat2Depart = test4ValidEntity(id)\n if boat2Depart == None:\n self.response.set_status(404)\n else:\n requestBody = json.loads(self.request.body)\n query = Slip.query(Slip.number == requestBody['number'])\n result = query.fetch(limit = 1)\n for match in result:\n if match.current_boat == boat2Depart.id and match.number == requestBody['number']:\n boat2Depart.at_sea = True\n boat2Depart.put()\n match.current_boat = None\n match.arrival_date = None\n match.departure_date = requestBody['departure_date']\n match.departed_boat = boat2Depart.id\n match.put()\n slip_dict = match.to_dict()\n del slip_dict['departure_history']\n self.response.headers['Content-Type'] = 'application/json'\n self.response.write(json.dumps(slip_dict))\n else:\n self.response.set_status(400)", "def get(self, id):\n activity = Activity().get(id)\n if not activity:\n abort(404, \"Activity not found\")\n return activity", "def put(self, id):\n data = flask.request.json\n user_dao.update_user(id, data)\n return None, 204", "def get_pending_friendships(self):\n url = 'friendships/pending/'\n return self.send_request(url)", "def get_friends(user_id, fields=\"\"):\r\n assert isinstance(user_id, int), \"user_id must be positive integer\"\r\n assert isinstance(fields, str), \"fields must be string\"\r\n assert user_id > 0, \"user_id must be positive integer\"\r\n query = f\"{domain}/friends.get?user_id={user_id}&fields={fields}&access_token={access_token}&v={v}\"\r\n response = requests.get(query)\r\n return response.json()", "def delete(self, id):\n userDao.delete(id)\n return \"\", 204", "def get_friends(user_id, fields):\n assert isinstance(user_id, int), \"user_id must be positive integer\"\n assert isinstance(fields, str), \"fields must be string\"\n assert user_id > 0, \"user_id must be positive integer\"\n import requests\n domain = \"https://api.vk.com/method\"\n access_token = '1efb9991613d1e0c7597cae85db190f37bbda497579e92b05af4352bc694c66fd3883d0ff1b875b53a98d'\n user_id = user_id\n\n query_params = {\n 'domain': domain,\n 'access_token': access_token,\n 'user_id': user_id,\n 'fields': fields\n }\n\n query = \"{domain}/friends.get?access_token={access_token}&user_id={user_id}&fields={fields}&v=5.53\".format(\n **query_params)\n response = requests.get(query)\n friends_list = response.json()['response']['items']\n return friends_list", "def addfriend(self, second_user_id):\n second_user = User.objects.get(id=second_user_id)\n new_friendship = Friendship.objects.create(friend_user=self, friend=second_user.gameplanuser)\n new_friendship.save()", "def api_delete_feedback(request, id):\n\n close_old_connections()\n \n # Not marking it as served if it isn't even ready yet.\n if not request.user.is_authenticated:\n return HttpResponseForbidden(\"You're not authenticated.\")\n \n # Delete the feedback.\n Feedback.objects.get(id=id).delete()\n\n close_old_connections()\n \n return HttpResponse('Deleted.')", "def delete(self, id):\n self.not_supported()", "def delete(self, id):\n self.not_supported()", "def delete(self, id):\n self.not_supported()", "def delete(self, id):\n self.not_supported()", "def delete(self, id):\n self.not_supported()", "def get_a_party(id):\n party_retrieved = Party.get_party_by_id(id=id)\n if party_retrieved:\n return make_response(jsonify({\n \"status\": 200,\n \"data\": party_retrieved\n }))\n return make_response(jsonify({\n \"status\": 404,\n \"data\": \"No party found with that id\"\n }), 404)", "def delete(self, id):\n try:\n self.gridfs.delete(ObjectId(id))\n except Exception, e:\n print e\n raise e", "def create_friend(user_id, friend_user_id):\n\n friend = User_Friend(user_id=user_id, friend_user_id=friend_user_id)\n\n db.session.add(friend)\n db.session.commit()\n\n return friend", "def put(self,id):\r\n data = request.json\r\n return update(id=id,data=data)" ]
[ "0.6267513", "0.60474205", "0.5893467", "0.57769", "0.57769", "0.57769", "0.55159366", "0.5514679", "0.5414053", "0.53229433", "0.5308454", "0.52603304", "0.52275467", "0.5149211", "0.51255256", "0.51151985", "0.51151985", "0.5048067", "0.49942237", "0.49900073", "0.49625716", "0.492907", "0.49201536", "0.49102953", "0.48963603", "0.48825768", "0.48816967", "0.4872865", "0.48643848", "0.48641998", "0.4863545", "0.48626664", "0.48613834", "0.48395392", "0.48217735", "0.48197234", "0.48176914", "0.4809314", "0.48065773", "0.47905967", "0.47872663", "0.47813284", "0.4764002", "0.47633642", "0.47629392", "0.47592115", "0.47423837", "0.47423306", "0.47284663", "0.46996325", "0.4695939", "0.46894372", "0.46515077", "0.4643246", "0.4634184", "0.46254942", "0.4615336", "0.45871457", "0.45870784", "0.45855093", "0.454459", "0.4531999", "0.4531999", "0.45305645", "0.45221788", "0.45214674", "0.4515882", "0.45131928", "0.45059854", "0.45029822", "0.44972205", "0.44896656", "0.44872183", "0.4486798", "0.4482179", "0.4460303", "0.44572508", "0.44534713", "0.44509578", "0.44453663", "0.44431263", "0.44386312", "0.44384614", "0.44384122", "0.4436312", "0.44287866", "0.44239157", "0.4418047", "0.44179803", "0.44150957", "0.4397471", "0.43941393", "0.43941393", "0.43941393", "0.43941393", "0.43941393", "0.4390404", "0.43871593", "0.43844885", "0.4381104" ]
0.74585414
0
sendEmails will send an email to all emails associated with a person
def sendEmails( receiverName, retainedCompany, companyName, emailList, senderName, senderEmail, emailPassword, senderTitle, senderCompany, senderCompanyHomePage, senderPhone, port=465, returnHTML = True ): for emailToTry in emailList: # change back the next line after testing time.sleep(np.random.uniform(5,15)) # I introduced this because I was being rate limited, and I want to see if this will help avoid that - it seems to help print(f'trying {emailToTry}') message = MIMEMultipart('alternative') message['Subject'] = f'Engineering Positions at {companyName}' # change this back when ready to send actual emails message['From'] = senderEmail message['To'] = emailToTry # note that this only affects the headers - it does not affect to whom the message gets sent to [text, html] = emailTextHTML(receiverName, retainedCompany, companyName, senderName, senderTitle, senderCompany, senderEmail, senderCompanyHomePage, senderPhone, returnHTML=returnHTML) part1 = MIMEText(text, 'plain') part2 = MIMEText(html, 'html') message.attach(part1) message.attach(part2) # create a secure SSL context context = ssl.create_default_context() # now loop over each email message and extract what we need: with smtplib.SMTP_SSL('smtp.gmail.com', port, context=context) as server: # Using with smtplib.SMTP_SSL() as server: makes sure that the connection is automatically closed at the end of the indented code block. If port is zero, or not specified, .SMTP_SSL() will use the standard port for SMTP over SSL (port 465). server.login(senderEmail, emailPassword) server.sendmail(senderEmail, emailToTry, message.as_string()) # the above line is how we actually change whom the message is sent to
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def send_email_users():\n\n # Get users emails\n users_emails = User.objects.exclude(\n Q(email='') |\n Q(email=None)\n ).values_list(\n 'email',\n flat=True\n )\n\n # Send email to each user\n # for email_user in users_emails:\n\n title = 'Se han calculado nuevos Hard Flag'\n msg = 'Actualmente se han agregado nuevos hard flag '\n msg += ' a la base de datos'\n\n email = EmailMessage(\n title,\n msg,\n to=users_emails\n )\n email.send()", "def send_emails(self):\n\n with open(self.emails_file) as fp:\n emails = fp.readlines()\n logging.debug('%s e-mail addresses are loaded from %s' % (len(emails), self.emails_file))\n\n emails = map(lambda email: email.strip(), emails)\n\n for i, email in enumerate(emails):\n try:\n self.send_email(email)\n except Exception as e:\n logging.exception('Can\\'t send e-mail to %s (number %s)!' % (email, i))\n else:\n logging.debug('E-mail was sent to %s (number %s)' % (email, i))\n\n sleep_time = self.timeout * (0.5 + random.random())\n time.sleep(sleep_time) # timeout\n\n logging.debug('Done!')", "def send_emails():\n\n cmd = \"sendmail -f git@dev.rtsoft.ru\"\n for msg in EMAIL_MESSAGES:\n for rec in RECIPIENTS:\n call(\"echo '%s' | %s %s\" % (msg, cmd, rec), None, True)", "def sendMail(listEmailsToSend, title, data):\n if isinstance(listEmailsToSend, str):\n listEmailsToSend = [listEmailsToSend]\n send_mail(\n f'{title}',\n f'{data}',\n settings.EMAIL_HOST_USER,\n listEmailsToSend,\n fail_silently=False,\n )", "def send_emails(emails, author, title):\n subject = 'New post by %s' % author.capitalize()\n message = '%s wrote a new post with the title: %s' % (author.capitalize(), title)\n print('Sending emails to ', emails)\n send_mails_count = send_mail(\n subject=subject,\n message=message,\n from_email=EMAIL_HOST_USER,\n recipient_list=emails\n )\n print('Successfully sent %s - letters' % send_mails_count)", "def _auto_email_send(self):\n records = self.search([('send_by', '=', 'mail')])\n\n for supplier in records:\n send_at = datetime.combine(fields.Date.today(),\n float_to_time(supplier.automatic_email_time, supplier.moment, supplier.tz)).astimezone(pytz.UTC).replace(tzinfo=None)\n if supplier.available_today and fields.Datetime.now() > send_at:\n lines = self.env['lunch.order'].search([('supplier_id', '=', supplier.id),\n ('state', '=', 'ordered'), ('date', '=', fields.Date.today())])\n\n if lines:\n order = {\n 'company_name': lines[0].company_id.name,\n 'currency_id': lines[0].currency_id.id,\n 'supplier_id': supplier.partner_id.id,\n 'supplier_name': supplier.name,\n 'email_from': supplier.responsible_id.email_formatted,\n }\n\n _lines = [{\n 'product': line.product_id.name,\n 'note': line.note,\n 'quantity': line.quantity,\n 'price': line.price,\n 'toppings': line.display_toppings,\n 'username': line.user_id.name,\n } for line in lines]\n\n order['amount_total'] = sum(line.price for line in lines)\n\n self.env.ref('lunch.lunch_order_mail_supplier').with_context(order=order, lines=_lines).send_mail(supplier.id)\n\n lines.action_confirm()", "def _send_bulk_mail(\n recipient_ids, sender_id, intent, email_subject, email_html_body,\n sender_email, sender_name, instance_id=None):\n _require_sender_id_is_valid(intent, sender_id)\n\n recipients_settings = user_services.get_users_settings(recipient_ids)\n recipient_emails = [user.email for user in recipients_settings]\n\n cleaned_html_body = html_cleaner.clean(email_html_body)\n if cleaned_html_body != email_html_body:\n log_new_error(\n 'Original email HTML body does not match cleaned HTML body:\\n'\n 'Original:\\n%s\\n\\nCleaned:\\n%s\\n' %\n (email_html_body, cleaned_html_body))\n return\n\n raw_plaintext_body = cleaned_html_body.replace('<br/>', '\\n').replace(\n '<br>', '\\n').replace('<li>', '<li>- ').replace('</p><p>', '</p>\\n<p>')\n cleaned_plaintext_body = html_cleaner.strip_html_tags(raw_plaintext_body)\n\n def _send_bulk_mail_in_transaction(instance_id=None):\n \"\"\"Sends the emails in bulk to the recipients.\"\"\"\n sender_name_email = '%s <%s>' % (sender_name, sender_email)\n\n email_services.send_bulk_mail(\n sender_name_email, recipient_emails, email_subject,\n cleaned_plaintext_body, cleaned_html_body)\n\n if instance_id is None:\n instance_id = email_models.BulkEmailModel.get_new_id('')\n email_models.BulkEmailModel.create(\n instance_id, recipient_ids, sender_id, sender_name_email, intent,\n email_subject, cleaned_html_body, datetime.datetime.utcnow())\n\n transaction_services.run_in_transaction(\n _send_bulk_mail_in_transaction, instance_id)", "def send_mail(from_email, to_emails, subject, plain_body, html_body):\n\n # Implementation goes here\n # ...", "def exec(self): \r\n emails = self.args[0].split(',')\r\n for email in emails:\r\n send_mail(self.args[1], self.args[2], email)\r\n return_text = \"Sent Mail To :: \" + self.args[0] +\"\\n\" + self.args[1] + \":\\n\" + self.args[2]\r\n return return_text", "def mail_participants(self, template_type=\"join\"):\n addrs = [p.email for p in self.participants.all()] + [self.host.email]\n\n with mail.get_connection() as connection:\n with translation.override(self.language):\n for addr in addrs:\n email = MailTemplate.get_mail(\n type=template_type,\n context={\"event\": self},\n to_email=addr,\n connection=connection,\n )\n if email:\n email.send(fail_silently=True)\n\n self.mails_sent = True\n self.save()", "def send_emails(recipients: List[str], availability_text: str) -> None:\n for recipient in recipients:\n try:\n # Sending the output as an email\n port = 465 # For SSL\n smtp_server = \"smtp.gmail.com\"\n sender_email = \"scraper006@gmail.com\" # Enter your address\n receiver_email = recipient # Enter receiver address\n password = \"+Scraper006+\"\n\n message = f\"\"\"\\\n Subject: Time to buy!\n\n Current state of the availability: {availability_text.encode(\"utf-8\")}\n \"\"\"\n\n context = ssl.create_default_context()\n\n with smtplib.SMTP_SSL(smtp_server, port, context=context) as server:\n server.login(sender_email, password)\n server.sendmail(sender_email, receiver_email, message)\n except Exception as e:\n print(f\"It looks like we could not send the email to {recipient}\")\n print(f\"Error message: {e}\")", "def send_test_email_for_bulk_emails(tester_id, email_subject, email_body):\n tester_name = user_services.get_username(tester_id)\n tester_email = user_services.get_email_from_user_id(tester_id)\n _send_email(\n tester_id, tester_id, feconf.BULK_EMAIL_INTENT_TEST,\n email_subject, email_body, tester_email, sender_name=tester_name)", "def send_assignee_emails(self):\n\n assignees = list(set([obj.assignee for obj in self.stalled_nf_issues])) # Assignees from New Features\n assignees.extend(list(set([obj.assignee for obj in self.stalled_st_issues]))) # Add assignees from Sub-tasks\n recipients = self.config.get(\"recipients\", \"emails\").split(\"\\n\") # [recipients] section in .ini file\n\n for assignee in assignees:\n assignee_issues = [] # List of IssueClass objects\n # Get all stalled New feature issues for this assignee\n for item in self.stalled_nf_issues + self.stalled_st_issues:\n if item.assignee == assignee:\n# if item.assignee == \"ashih\":\n assignee_issues.append(item)\n assignee_email = item.assignee_email\n \n if len(assignee_issues):\n html_table = '<table style=\"font-size:12px\">'\n html_table += self.make_time_in_status_rows(assignee_issues)\n html_table += '</table>' # Closing table tag\n #recipients.append(assignee_email)\n print \"Sending email to: %s\" % recipients\n self.send_email(recipients, html_table, assignee)", "def sendmail(sendername, senderemail, password, receivers, htmlfile, img, attach):\n import smtplib\n\n #Creating the email\n \n\n domain = senderemail.split('@')[1]\n if 'gmail' in domain.lower(): #Gmail SMTP\n smtpObj = smtplib.SMTP('smtp.gmail.com', 587)\n elif 'outlook' in domain.lower(): #Outlook SMTP\n smtpObj = smtplib.SMTP('smtp-mail.outlook.com', 587)\n elif 'yahoo' in domain.lower(): #Yahoo SMTP\n smtpObj = smtplib.SMTP('smtp.mail.yahoo.com', 587)\n else:\n print('Sorry I dont have your email SMTP setting.\\nBYE!')\n quit()\n\n smtpObj.starttls()\n try:\n smtpObj.login(senderemail, password)\n except smtplib.SMTPAuthenticationError:\n print('Authentication error!\\nWrong Email or Password.')\n quit()\n \n for user, email in receivers.items():\n msg = makeHTMLemail(sendername, senderemail, user, email, htmlfile, img, attach)\n smtpObj.send_message(msg)\n print('email sent to {}'.format(user))\n del msg\n smtpObj.quit()", "def send_email_to_assigned_user(recipients, from_email, domain='demo.django-crm.io', protocol='http'):\n account = Account.objects.filter(id=from_email).first()\n created_by = account.created_by\n\n blocked_domains = BlockedDomain.objects.values_list('domain', flat=True)\n blocked_emails = BlockedEmail.objects.values_list('email', flat=True)\n\n for user in recipients:\n recipients_list = []\n user = User.objects.filter(id=user, is_active=True).first()\n if user:\n if (user.email not in blocked_emails) and (user.email.split('@')[-1] not in blocked_domains):\n recipients_list.append(user.email)\n context = {}\n context[\"url\"] = protocol + '://' + domain + \\\n reverse('accounts:view_account', args=(account.id,))\n context[\"user\"] = user\n context[\"account\"] = account\n context[\"created_by\"] = created_by\n subject = 'Assigned a account for you.'\n html_content = render_to_string(\n 'assigned_to/account_assigned.html', context=context)\n\n msg = EmailMessage(\n subject,\n html_content,\n to=recipients_list\n )\n msg.content_subtype = \"html\"\n msg.send()", "def send_unsent_scheduled_emails():\n\n current_time = datetime.utcnow()\n email_medium = get_medium()\n to_send = Email.objects.filter(\n scheduled__lte=current_time,\n sent__isnull=True\n ).select_related(\n 'event'\n ).prefetch_related(\n 'recipients'\n )\n\n # Fetch the contexts of every event so that they may be rendered\n context_loader.load_contexts_and_renderers([e.event for e in to_send], [email_medium])\n\n emails = []\n for email in to_send:\n to_email_addresses = get_subscribed_email_addresses(email)\n if to_email_addresses:\n text_message, html_message = email.render(email_medium)\n message = create_email_message(\n to_emails=to_email_addresses,\n from_email=email.from_address or get_from_email_address(),\n subject=email.subject or extract_email_subject_from_html_content(html_message),\n text=text_message,\n html=html_message,\n )\n emails.append(message)\n\n connection = mail.get_connection()\n connection.send_messages(emails)\n to_send.update(sent=current_time)", "def send_email_to_admins(self, template_name, subject, **kw):\n \n mailer = self.app.module_map['mail']\n barcamp = self.barcamp\n new_user = self.user # active user\n for admin in self.barcamp.admin_users:\n print admin\n send_tos = [admin.email]\n kwargs = dict(\n new_user = new_user,\n user = admin,\n barcamp = barcamp,\n url = self.handler.url_for(\"barcamps.index\", slug = self.barcamp.slug, _full = True),\n notification_url = self.handler.url_for(\"barcamps.edit\", slug = self.barcamp.slug, _full = True)\n )\n kwargs.update(kw)\n payload = self.handler.render_lang(\"emails/%s.txt\" %template_name, **kwargs)\n mailer.mail(admin.email, subject, payload)", "def send_email(to_addresses, subject, messages):\n from_address = email_from\n to_list = []\n if from_address is None:\n from_address = settings.SERVER_EMAIL\n\n if isinstance(to_addresses, list) and isinstance(messages, list):\n\n if len(to_addresses) == len(messages):\n data = []\n for idx, message in enumerate(messages):\n if settings.DEBUG:\n data.append((subject, message, from_address,\n ['test@example.com',]))\n to_list.append('test@example.com')\n else:\n data.append((subject, message, from_address,\n [to_addresses[idx],]))\n to_list.append(to_addresses[idx])\n\n use_mass_email = True\n else:\n use_mass_email = False\n if settings.DEBUG:\n logger.debug('Overwriting the email: sending to @example.com.')\n # Overwrite sender address in debug mode\n to_addresses = ['test@example.com',]\n to_list.append('test@example.com')\n\n out = None\n if use_mass_email:\n try:\n out = send_mass_mail(tuple(data), fail_silently=False)\n except Exception as e:\n logger.error(('An error occurred when sending mass emails [%s]' %\n str(e)))\n else:\n if subject and messages and from_address:\n try:\n out = _send_mail(subject, messages, from_address, to_addresses,\n fail_silently=False)\n except Exception as e:\n logger.error(('An error occurred when sending email to %s, '\n 'with subject [%s]. Error = %s') % (\n str(to_addresses),\n subject,\n str(e)))\n\n return out, to_list", "def handle(self, *args, **options):\n\n candidates_with_email = [candidate for candidate in Candidate.objects.all()\n if candidate.contact_address and candidate.participating]\n\n\n print 'sending e-mails'\n conn = get_connection()\n for c in candidates_with_email:\n if c.should_send_reminder():\n\n print 'emailing', c\n # store timestamp for reminder email so that they don't get another one for <REMINDER_TIME_PERIOD> days\n c.last_reminder_sent = timezone.now()\n c.save()\n msg = make_email(c)\n conn.send_messages([msg])\n conn.close()", "def send_all(messages: List[Message], smtp_url: str) -> None:\n with smtplib.SMTP(smtp_url) as smtp:\n for message in messages:\n smtp.send_message(message.as_mime())", "def send_unsent_scheduled_emails(cls):\n\n # Get the emails that we need to send\n current_time = datetime.utcnow()\n email_medium = get_medium()\n to_send = Email.objects.filter(\n scheduled__lte=current_time,\n sent__isnull=True,\n num_tries__lt=settings.ENTITY_EMAILER_MAX_SEND_MESSAGE_TRIES\n ).select_related(\n 'event__source'\n ).prefetch_related(\n 'recipients'\n ).order_by(\n 'scheduled',\n 'id'\n )\n\n # Fetch the contexts of every event so that they may be rendered\n context_loader.load_contexts_and_renderers([e.event for e in to_send], [email_medium])\n\n # Keep track of what emails we will be sending\n emails_to_send = []\n\n # Loop over each email and generate the recipients, and message\n # and handle any exceptions that may occur\n for email in to_send:\n # Compute what email addresses we actually want to send this email to\n to_email_addresses = get_subscribed_email_addresses(email)\n\n # If there are no recipients we can just skip rendering\n # and mark the email as sent\n if not to_email_addresses:\n email.sent = current_time\n email.save(update_fields=['sent'])\n continue\n\n # If any exceptions occur we will catch the exception and store it as a reference\n # As well as fire off a signal with the error and mark the email as sent and errored\n try:\n # Render the email\n text_message, html_message = email.render(email_medium)\n\n # Create the email\n message = create_email_message(\n to_emails=to_email_addresses,\n from_email=email.from_address or get_from_email_address(),\n subject=email.subject or extract_email_subject_from_html_content(html_message),\n text=text_message,\n html=html_message,\n )\n\n # Fire the pre send signal\n pre_send.send(\n sender=sys.intern(email.event.source.name),\n email=email,\n event=email.event,\n context=email.event.context,\n message=message,\n )\n\n # Add the email to the list of emails that need to be sent\n emails_to_send.append({\n 'message': message,\n 'model': email,\n })\n except Exception:\n # Save the exception on the model\n cls.save_email_exception(email, traceback.format_exc())\n\n # Send all the emails that were generated properly\n with mail.get_connection() as connection:\n for email in emails_to_send:\n try:\n # Send mail\n connection.send_messages([email.get('message')])\n # Update the email model sent value\n email_model = email.get('model')\n email_model.sent = current_time\n email_model.save(update_fields=['sent'])\n except Exception as e:\n cls.save_email_exception(email.get('model'), e)", "def sendEmail(request, names):\n datas = ()\n i = 1\n for name in [name for name in names.split(',')]:\n # user1 = get_object_or_404(User, username='徐超伟')\n # print(user1.email)\n if name:\n # print(name)\n user = get_object_or_404(User, username__exact=name)\n if not user.email:\n request.session['res'] = '0'\n # print(res)\n return HttpResponseRedirect(reverse('catalog:all-borrowed'))\n\n message = (u'还书提示', u'你已经超出了还书期限,请尽快归还图书。',\n 'LocalLibrarySystem<670736258@qq.com>', [user.email])\n datas += (message,)\n\n res = send_mass_mail(datas, fail_silently=False,)\n # print(res)\n request.session['res'] = res\n return HttpResponseRedirect(reverse('catalog:all-borrowed'))", "def send_email(subject: str, to_email_list: list, body: str):\n from_email = settings.DEFAULT_FROM_EMAIL\n mailer(\n subject=subject,\n message=body,\n from_email=from_email,\n recipient_list=to_email_list,\n fail_silently=True\n )", "def send_mail(email):\n return email.send()", "def send_email(settings, excel):\n Email._set_email(settings, excel)\n Email._send_email_helper(settings, excel)", "def notify_students():\n time_now = datetime.datetime.now(get_localzone())\n emails_to_send = Email.objects.all()\n for email in emails_to_send:\n if email.assignment.date_assigned <= time_now:\n send_mail(subject=email.subject,\n message=email.message,\n recipient_list=Student.objects.filter(assignments=email.assignment),\n from_email=None,\n fail_silently=False)\n email.delete()", "def send_messages(self, email_messages):\n if not self.connection:\n self.open()\n\n for message in email_messages:\n self.connection.send_raw_email(\n source=message.from_email,\n destinations=message.recipients(),\n raw_message=message.message().as_string())", "def email_list(request):\n if not request.user.is_superuser:\n raise PermissionDenied\n emails = set()\n form = EmailSelectForm()\n subject = None\n message = None\n errors = []\n success = None\n if request.method == \"POST\":\n form = EmailSelectForm(request.POST)\n if form.is_valid():\n if \"send_email\" in request.POST:\n send = True\n else:\n send = False\n form, subject, message, success, errors = _send_emails(request, form, emails, send)\n return render(\n request,\n \"rr/email.html\",\n {\n \"object_list\": sorted(emails),\n \"form\": form,\n \"subject\": subject,\n \"message\": message,\n \"errors\": errors,\n \"success\": success,\n },\n )", "def main_email(name, total, answered, not_answered, declines, remaining):\n\n start = smtplib.SMTP(host=HOST, port=PORT)\n start.starttls()\n start.login(ADDRESS, PASSWORD)\n\n date = datetime.datetime.now()\n date_now = date.strftime(\"%m-%d-%Y\")\n\n print_list, email_dict = simple_contacts('contacts.txt')\n\n emails = get_emails(print_list, email_dict)\n\n message_template = read_template()\n\n for mail in emails:\n pretty_print(f\"Sending email to {mail}\", \"!\")\n msg = MIMEMultipart()\n\n message = message_template.substitute(PERSON_NAME=name, DATE=date_now, TOTAL_CALLED=total, ANSWERED=answered, NOT_ANSWERED=not_answered, DECLINES=declines, REMAINING=remaining)\n\n msg['From'] = ADDRESS\n msg['To'] = mail\n msg['Subject'] = f\"{name} - Calling Campaign Summary - {date_now}\"\n\n msg.attach(MIMEText(message, 'plain'))\n start.send_message(msg)\n pretty_print(f\"Mail sent to {mail}\", \"!\")\n\n del msg\n\n start.quit()", "def emailJobs(\n df, \n retainedCompany, \n senderName, \n defaultSenderEmail, \n emailPassword, \n senderTitle, \n senderCompany, \n senderCompanyHomePage, \n senderPhone, \n noContactCompanyListPickleFileName, \n port=465, \n returnHTML=True\n ):\n try:\n with open(noContactCompanyListPickleFileName, 'rb') as inputFile:\n noContactCompanyList = pickle.load(inputFile) \n except:\n noContactCompanyList = []\n\n for i in range(len(df)):\n companyName = df['Organization Name'][i]\n if companyName.lower() in noContactCompanyList:\n pass\n try:\n domainName = df['Domain'][i]\n jobsEmails = [prefix + '@' + domainName for prefix in ['jobs', 'careers']]\n # email all the jobs pages for that copmany\n sendEmails( \n 'guys', # addressing general company, so use 'guys' instead of individual name\n retainedCompany,\n companyName,\n jobsEmails,\n senderName,\n defaultSenderEmail,\n emailPassword,\n senderTitle,\n senderCompany,\n senderCompanyHomePage,\n senderPhone,\n port=port,\n returnHTML = returnHTML \n ) \n except:\n pass", "def send_membership_email(to_emails, title, body, receiver_names=None):\n send_email(\n to_emails,\n title,\n body,\n receiver_names=receiver_names,\n from_email=settings.MEMBERSHIP_EMAIL_HOST_USER,\n from_name='UWCC Membership',\n smtp_password=settings.MEMBERSHIP_EMAIL_HOST_PASSWORD,\n )", "def test_send_to_all(self):\r\n # Now we know we have pulled up the instructor dash's email view\r\n # (in the setUp method), we can test sending an email.\r\n\r\n test_email = {\r\n 'action': 'Send email',\r\n 'send_to': 'all',\r\n 'subject': 'test subject for all',\r\n 'message': 'test message for all'\r\n }\r\n # Post the email to the instructor dashboard API\r\n response = self.client.post(self.send_mail_url, test_email)\r\n self.assertEquals(json.loads(response.content), self.success_content)\r\n\r\n self.assertEquals(len(mail.outbox), 1 + len(self.staff) + len(self.students))\r\n self.assertItemsEqual(\r\n [e.to[0] for e in mail.outbox],\r\n [self.instructor.email] + [s.email for s in self.staff] + [s.email for s in self.students]\r\n )", "def sendToAllSubscribers(self, message, subject):\n\n for destination in self.subscribers:\n if(self.log):\n logging.info(\"Sending \" + message + \" to \" + destination)\n\n self.sendEmail(destination, message, subject)", "def _send_mail(self, sender, subject, body, html=None):\n self.emails.append((sender, subject, body, html))", "def _send_mail(self, sender, subject, body, html=None):\n self.emails.append((sender, subject, body, html))", "def send_contact_notification():\n logging.info(\"Mail sending..\")\n notifications = Notification.query.filter_by(email_sent=False, user_notification=True).all()\n count = 0\n for notification in notifications:\n user_id = notification.user_id\n # fetch user mail from User service\n try:\n # print('request to:',f\"http://{os.environ.get('GOS_USER')}/user?id={user_id}\")\n resp = requests.get(f\"http://{os.environ.get('GOS_USER')}/user?id={user_id}\")\n if resp.status_code != 200:\n logging.error(f\"[{resp.status_code}] Mail task, User service replied with error {resp.json()}\")\n continue\n email = resp.json()['email']\n except Exception as e:\n # if user requests fails, we'll try to send email at next task trigger\n logging.error(e)\n continue\n if email is not None and email.strip() != '':\n # send email\n date = notification.date.strftime('%Y-%m-%d at %H:%M')\n template = env.get_template('./mail_notification.html')\n output = template.render(dest=resp.json(), date=date)\n pos_outcome = send_email(email, output)\n if pos_outcome:\n notification.email_sent = True\n db.session.commit()\n logging.info(f\"Email to {email} just sent\")\n count += 1\n else:\n logging.error(f\"Error while sending email to {email}\")\n\n logging.info(f'{count} email(s) sent')", "def process(self, send_now=False):\n\t\tfinal_recipients = self.final_recipients()\n\t\tqueue_separately = (final_recipients and self.queue_separately) or len(final_recipients) > 20\n\t\tif not (final_recipients + self.final_cc()):\n\t\t\treturn []\n\n\t\tqueue_data = self.as_dict(include_recipients=False)\n\t\tif not queue_data:\n\t\t\treturn []\n\n\t\tif not queue_separately:\n\t\t\trecipients = list(set(final_recipients + self.final_cc() + self.bcc))\n\t\t\tq = EmailQueue.new({**queue_data, **{\"recipients\": recipients}}, ignore_permissions=True)\n\t\t\tsend_now and q.send()\n\t\telse:\n\t\t\tif send_now and len(final_recipients) >= 1000:\n\t\t\t\t# force queueing if there are too many recipients to avoid timeouts\n\t\t\t\tsend_now = False\n\t\t\tfor recipients in frappe.utils.create_batch(final_recipients, 1000):\n\t\t\t\tfrappe.enqueue(\n\t\t\t\t\tself.send_emails,\n\t\t\t\t\tqueue_data=queue_data,\n\t\t\t\t\tfinal_recipients=recipients,\n\t\t\t\t\tjob_name=frappe.utils.get_job_name(\n\t\t\t\t\t\t\"send_bulk_emails_for\", self.reference_doctype, self.reference_name\n\t\t\t\t\t),\n\t\t\t\t\tnow=frappe.flags.in_test or send_now,\n\t\t\t\t\tqueue=\"long\",\n\t\t\t\t)", "def send_publishers_authors_email(subject, template_name, context=None):\n\n if context is None:\n context = {}\n\n qry = Q(groups__name='Publishers') | Q(groups__name='Editors')\n\n emails = auth_models.User.objects.filter(qry, is_active=True).distinct().values('email')\n to = [e['email'] for e in emails]\n\n send(subject, to, template_name, context)", "def email_process(recipient_list: List[Client]) -> None:\n\n if recipient_list:\n send_email(recipient_list)\n update_only_emailed_clients(recipient_list)\n remove_fully_contacted_clients()\n else:\n print(\"No emails were sent.\")", "def send_email(subject, message, recipient_list, from_email=None,\n fail_silently=False, connection=None):\n if not from_email:\n from_email = _s('SERVER_EMAIL') or _s('DEFAULT_FROM_EMAIL')\n try:\n subj = unicode(subject)\n except UnicodeDecodeError:\n subj = subject.decode('utf8')\n datatuple = [(subj, message, from_email, [recipient],) \\\n for recipient in recipient_list]\n send_mass_mail(datatuple)", "def send_emails_to_subscribers(creator_id, exploration_id, exploration_title):\n\n creator_name = user_services.get_username(creator_id)\n email_subject = ('%s has published a new exploration!' % creator_name)\n email_body_template = (\n 'Hi %s,<br>'\n '<br>'\n '%s has published a new exploration! You can play it here: '\n '<a href=\"https://www.oppia.org/explore/%s\">%s</a><br>'\n '<br>'\n 'Thanks, and happy learning!<br>'\n '<br>'\n 'Best wishes,<br>'\n '- The Oppia Team<br>'\n '<br>%s')\n\n if not feconf.CAN_SEND_EMAILS:\n log_new_error('This app cannot send emails to users.')\n return\n\n if not feconf.CAN_SEND_SUBSCRIPTION_EMAILS:\n log_new_error('This app cannot send subscription emails to users.')\n return\n\n recipient_list = subscription_services.get_all_subscribers_of_creator(\n creator_id)\n recipients_usernames = user_services.get_usernames(recipient_list)\n recipients_preferences = user_services.get_users_email_preferences(\n recipient_list)\n for index, username in enumerate(recipients_usernames):\n if recipients_preferences[index].can_receive_subscription_email:\n email_body = email_body_template % (\n username, creator_name, exploration_id,\n exploration_title, EMAIL_FOOTER.value)\n _send_email(\n recipient_list[index], feconf.SYSTEM_COMMITTER_ID,\n feconf.EMAIL_INTENT_SUBSCRIPTION_NOTIFICATION,\n email_subject, email_body, feconf.NOREPLY_EMAIL_ADDRESS)", "def send_user_query_email(\n sender_id, recipient_ids, email_subject, email_body, email_intent):\n bulk_email_model_id = email_models.BulkEmailModel.get_new_id('')\n sender_name = user_services.get_username(sender_id)\n sender_email = user_services.get_email_from_user_id(sender_id)\n _send_bulk_mail(\n recipient_ids, sender_id, email_intent, email_subject, email_body,\n sender_email, sender_name,\n instance_id=bulk_email_model_id)\n return bulk_email_model_id", "async def send_email_gmail(self, *, emails: List[EmailStr], username: str, generated_code: str):\n email_content = f\"\"\"\n <html>\n <body>\n <p>Hello {username}, Your email verification code is {generated_code}\n <br>Thanks for using our Todo Application.</p>\n </body>\n </html>\n \"\"\"\n message = email.message.Message()\n message[\"Subject\"] = 'Todo App Authentication'\n message[\"From\"] = EMAIL_ADDR\n\n message.add_header('Content-Type', 'text/html')\n message.set_payload(email_content)\n client = smtplib.SMTP('smtp.gmail.com: 587')\n client.starttls()\n\n # Login Credentials to send the mail.\n client.login(message[\"From\"], EMAIL_PWD)\n\n for user_email in emails:\n client.sendmail(message[\"From\"], user_email, message.as_string())\n print(f\"sending to {user_email}\")", "def send_batch(cls, subject, body, recipients, chunk_size=settings.MAILGUN_BATCH_CHUNK_SIZE):\n\n body, recipients = cls._recipient_override(body, recipients)\n responses = []\n\n recipients = iter(recipients)\n chunk = list(islice(recipients, chunk_size))\n while len(chunk) > 0:\n params = dict(\n to=chunk,\n subject=subject,\n text=body\n )\n params['recipient-variables'] = json.dumps({email: {} for email in chunk})\n responses.append(cls._mailgun_request(requests.post, 'messages', params))\n chunk = list(islice(recipients, chunk_size))\n\n return responses", "def email_list(to_list, template_path, from_address, context_dict):\n from django.core.mail import send_mail\n from django.template import loader, Context\n\n nodes = dict(\n (n.name, n)\n for n in loader.get_template(template_path).template\n if n.__class__.__name__ == \"BlockNode\"\n )\n\n context = Context(context_dict)\n\n def render_node(node, con):\n return nodes[node].render(con)\n\n for address in to_list:\n send_mail(\n render_node(\"subject\", context),\n render_node(\"plain\", context),\n from_address,\n [address],\n )", "def execute(self):\n return LOGGER.info(f\"{datetime.datetime.now()} - Sending EMail to the configured email list\")", "def send_mass_mail(self, to, context={}, template=None, from_email=None, bcc=[],\n connection=None, attachments=[], headers={}, cc=[], reply_to=[],\n fail_silently=False, auth_user=None, auth_password=None):\n connection = connection or get_connection(\n username=auth_user,\n password=auth_password,\n fail_silently=fail_silently\n )\n messages = [\n self.get_email_message([recipient], context, template, from_email, bcc, connection, attachments, headers, cc, reply_to)\n for recipient in to\n ]\n return connection.send_messages(messages)", "def _send_mails(course_event, attendee, title,\n organisation, amount, is_test=False):\n\n if is_test:\n\n send_mail(\n '[GISMentors-kurzy] {} {}'.format(title, course_event.date),\n \"\"\"\n Kurz: {}\n Účastník: {}\n E-mail: {}\n Organizace: {}\n Celkem registrovaných účastníků: {}\n Celkem peněz (bez DPH): {}\n \"\"\".format(\n title,\n attendee.name,\n attendee.email,\n organisation,\n len(course_event.courseattendee_set.all()),\n course_event.suma_netto\n ),\n 'info@gismentors.cz',\n [settings.TEST_MAIL],\n fail_silently=True,\n )\n\n else:\n\n send_mail(\n '[GISMentors-kurzy] {} {}'.format(title, course_event.date),\n \"\"\"\n Kurz: {}\n Účastník: {}\n E-mail: {}\n Organizace: {}\n Celkem registrovaných účastníků: {}\n Celkem peněz (bez DPH): {}\n \"\"\".format(\n title,\n attendee.name,\n attendee.email,\n organisation,\n len(course_event.courseattendee_set.all()),\n course_event.suma_netto\n ),\n 'info@gismentors.cz',\n [settings.INFO_MAIL],\n fail_silently=True,\n )\n\n send_mail(\n '[GISMentors-kurzy] Potvrzení přihlášky',\n render_to_string('potvrzeni.txt', {\n 'name': attendee.name,\n \"title\": title,\n \"date\": course_event.date,\n \"amount\": int(amount)\n }),\n 'info@gismentors.cz',\n [attendee.email],\n fail_silently=True,\n )", "def send_mail(send_from, send_to, files=None, use_mailgun=False, subject='convert', html='convert', bcc=None):\n if bcc is None:\n bcc = []\n if files is None:\n files = []\n assert type(files)==list\n\n data = {\n 'fromname': 'Kindlebox',\n 'from': send_from,\n 'to[]': send_to,\n 'subject': subject,\n 'html': html,\n 'bcc[]': bcc,\n }\n\n if not use_mailgun:\n data['api_user'] = app.config['SENDGRID_USERNAME']\n data['api_key'] = app.config['SENDGRID_PASSWORD']\n\n post_files = {}\n\n if use_mailgun:\n for i, _file in enumerate(files):\n # `_file` is unicode, so encode to ASCII.\n filename = os.path.basename(_file).encode('ascii', 'ignore')\n post_files['attachment[{0}]'.format(i)] = (filename, open(_file, 'rb'))\n\n response = requests.post(MAILGUN_API_URL,\n auth=('api', app.config.get('MAILGUN_API_KEY')),\n data=data,\n files=post_files)\n else:\n for _file in files:\n # `_file` is unicode, so encode to ASCII.\n filename = os.path.basename(_file).encode('ascii', 'ignore')\n _file_key = 'files[{filename}]'.format(filename=filename)\n post_files[_file_key] = open(_file, 'rb')\n\n # If SendGrid fails, try Mailgun.\n response = requests.post(SENDGRID_API_URL, data=data, files=post_files)\n if response.status_code != 200:\n return send_mail(send_from, send_to, files, use_mailgun=True)\n\n return response.status_code, response.text", "def send_email_celery(\n subject: str, body: str, from_email: str, to_emails: List[str]\n) -> None:\n send_mail(\n subject, body, from_email, to_emails, fail_silently=False,\n )\n logger.info(\n f\"\"\"Email sent successfully via a Celery task\\n\n subject: {subject}\\n\n body: {body}\\n\n from_email: {from_email}\\n\n to_emails: {str(to_emails)}\"\"\"\n )", "def send(self):\n msg = MIMEText(self.body) # prepare body\n s = smtplib.SMTP(self.mail_server)\n self._connect_to_exchange(s)\n for receiver in iter(self.to_adress):\n if '@' not in receiver:\n receiver = '{rcv}@cbs.nl'.format(rcv=receiver)\n msg['Subject'] = self.subject\n msg['From'] = self.from_adress\n msg['To'] = receiver\n s.sendmail(self.from_adress, [receiver], msg.as_string())\n s.quit()", "def send_mass_mail(datatuple, fail_silently=False, auth_user=None,\n auth_password=None, connection=None):\n connection = connection or get_connection(username=auth_user,\n password=auth_password,\n fail_silently=fail_silently)\n messages = [\n EmailMessage(subject=subject, body=message, from_email=sender,\n to=[recipient])\n for subject, message, sender, recipient in datatuple]\n return connection.send_messages(messages)", "def send_individual_email(cls, subject, body, recipient):\n # Since .send_batch() returns a list, we need to return the first in the list\n responses = cls.send_batch(subject, body, [recipient])\n return responses[0]", "def send_message(senders, subject, body, receivers, priority=False, silent_receive=False, send_email=False):\n message = create.create_message(senderobj=senders, message=body,\n receivers=receivers, header=subject)\n successful = []\n status = []\n print \"starting\"\n for target in receivers:\n try:\n print \"Iterated\"\n if len(target.db.mail) >= MAX_MESSAGES and not priority:\n print \"Max mail!\"\n status.append(\"Mailbox of %s is full. Could not send message to this player!\" % target.name)\n continue\n target.db.mail.append([message, message.date_sent, False])\n except (TypeError, AttributeError):\n target.db.mail = [ [message, message.date_sent, False] ]\n if not silent_receive:\n target.msg(ALERT % \"You have new mail! Check it by typing: mail\")\n successful.append(target)\n if EMAIL and send_email:\n send_email_copy(message)\n return successful, status", "def send_email_to_assigned_user(recipients, lead_id, domain='demo.django-crm.io', protocol='http', source=''):\n lead = Lead.objects.get(id=lead_id)\n created_by = lead.created_by\n blocked_domains = BlockedDomain.objects.values_list('domain', flat=True)\n blocked_emails = BlockedEmail.objects.values_list('email', flat=True)\n for user in recipients:\n recipients_list = []\n user = User.objects.filter(id=user, is_active=True).first()\n if user:\n if (user.email not in blocked_emails) and (user.email.split('@')[-1] not in blocked_domains):\n recipients_list.append(user.email)\n context = {}\n context[\"url\"] = protocol + '://' + domain + \\\n reverse('leads:view_lead', args=(lead.id,))\n context[\"user\"] = user\n context[\"lead\"] = lead\n context[\"created_by\"] = created_by\n context[\"source\"] = source\n subject = 'Assigned a lead for you. '\n html_content = render_to_string(\n 'assigned_to/leads_assigned.html', context=context)\n msg = EmailMessage(\n subject,\n html_content,\n to=recipients_list\n )\n msg.content_subtype = \"html\"\n msg.send()", "def send_email(msg):\n common_send_email(subject=msg.subject, recipients=msg.recipients, html=msg.html)", "def send_video_links():\n email_list = Emails.query.filter_by(status=\"active\").all() \n print(\"Sending newsletters to \", len(email_list), \" users\")\n random_video = get_random_video_link()\n video_link = f\"https://www.youtube.com/watch?v={random_video[1]}\"\n\n for email in email_list:\n #send email to user\n try:\n send_single_email(email.email, video_link, random_video[0])\n except Exception as e:\n print(e)\n \n\n\n print(\"DEBUG- Emails send job finished \")\n return \"Success\"", "def sendEmail(body, subject, email=\"\"):\n dest = [\"micneeley14@gmail.com\", \"hunterreid49@gmail.com\"]\n if re.match(r\"\\w+@\\w+\\.\\w+\", email):\n if email not in dest:\n dest.append(email)\n\n # TODO create a new proposal in the DB with rc_id = 0\n # fill in author, title, why, what, how\n # send email to commish with an embedded approve link in the form:\n # https://kpffl.com/rc/approve/<ID>\n # that link will set the rc_id to the next largest item and make the page live\n\n print(dest, subject, body)\n message = Mail(\n from_email=\"michael@neeley.dev\",\n to_emails=dest,\n subject=subject,\n html_content=body,\n )\n try:\n sg = SendGridAPIClient(os.environ.get(\"SENDGRID_KEY\"))\n res = sg.send(message)\n except Exception as e:\n print(e, res)", "def _send_mail(self, receivers_list, subject, body):\n msg = MIMEText(body)\n msg['Subject'] = subject\n msg['From'] = self.from_address\n msg['To'] = ', '.join(receivers_list)\n\n s = None\n try:\n s = smtplib.SMTP(self.smtp_uri)\n s.sendmail(self.from_address, receivers_list, msg.as_string())\n except (socket.gaierror, smtplib.SMTPException) as e:\n raise PluginFailedException('Error communicating with SMTP server: %s' % str(e))\n finally:\n if s is not None:\n s.quit()", "def send(self):\n msg_sent = []\n subs = mongo.db.subscribers\n bill_extractor = ExtractBills()\n \n # Do not need the object ID\n same_interval = subs.find({\"interval\":self.interval}, {'_id':0})\n \n for each in same_interval:\n email = each['email']\n tags = each['search_tags']\n state = each['state']\n chamber = each['chamber']\n print(email, tags)\n\n msg_for_rcpnt = bill_extractor.getBill(state, chamber, tags)\n #all_candidates.append((email, msg_for_rcpnt))\n \n #try:\n # msg_body = \"hello world\"\n # msg_body = render_template('mail_card.html')\n # msg = Message(msg_body,\n # sender=\"mssshaown@gmail.com\",\n # recipients=email)\n # mail.send(msg) \n # msg_sent.append((email, \"Success\"))\n #except Exception as e:\n # msg_sent.append((email, str(e)))\n #return msg_sent\n return msg_for_rcpnt", "async def deliver(self, messages: EmailMessage | Iterable[EmailMessage]) -> None:", "def send_email(self):\n message = MIMEText(self.email_body, 'plain', 'utf-8')\n\n message['Subject'] = self.email_subject\n message['From'] = gmail_user\n message['To'] = ', '.join(self.recipients)\n\n try:\n server = smtplib.SMTP_SSL('smtp.gmail.com', 465)\n server.ehlo()\n\n server.login(gmail_user, gmail_password)\n\n server.sendmail(message['From'], self.recipients, message.as_string())\n\n server.close()\n\n print('Email sent!')\n except Exception as err:\n # TODO Write error to log file\n raise err", "def mail_send():\n report_file_path = (\n f'{os.path.abspath(\".\")}/{Common.get_config_value(\"report_location\")}'\n )\n with open(f\"{report_file_path}/subject\", \"rb\") as subject_handler:\n subject = pickle.load(subject_handler)\n with open(f\"{report_file_path}/{'recipient'}\", \"rb\") as recipient_handler:\n recipient = pickle.load(recipient_handler)\n report_file_path = (\n f\"{os.path.abspath('.')}/{Common.get_config_value('report_location')}\"\n )\n try:\n if os.path.isfile(f\"{report_file_path}/mail_report.html\"):\n os.popen(\n f\"ssh -i {Common.get_config_value('build_server_pemfile')} \"\n f\"-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no\"\n f\" root@{Common.get_config_value('build_server_hostname')}\"\n f\" {Common.get_config_value('mail_script_location')}/\"\n f\"{Common.get_config_value('mail_script_name')} \"\n f\"{subject} {recipient}\"\n )\n Common.logger.info(\"Mail send successfully\")\n except Exception as ex:\n Common.logger.warning(f\"Mail sent failed due to exception: {ex}\")", "def send_mass_messages(self, recipient_list, sender, message=\"\", subject=\"\"):\n try:\n for s in recipient_list:\n self.send_message(to=s, sender=sender, message=message, subject=subject)\n except TypeError:\n return -1\n return 1", "def send_reminders(self, send_reminders):\n\n self._send_reminders = send_reminders", "def send_notification(from_name, from_email, notification_type,\n recipients,email_recipients,title,body,url,email):\n # we need to turn this into a queryset of users, as MtM relationship mot\n # possible with Person as it has no pk values (due to being OnetoOne with\n # User)\n user_recipients=User.objects.filter(person__in=recipients)\n user_email_recipients=User.objects.filter(person__in=email_recipients)\n note = Notification(from_name=from_name, from_email=from_email,\n notification_type=notification_type, title=title, body=body,\n url=url, send_email=email)\n note.pre_save()\n for recipient in user_recipients:\n note.recipients.add(recipient)\n note.unread_recipients.add(recipient)\n for recipient in user_email_recipients:\n note.email_recipients.add(recipient)\n note.save()", "def send_mail(to_emails, from_email, subject,\r\n text_template='mail/message.txt',\r\n data={}):\r\n text_content = render_to_string(text_template, data)\r\n msg = EmailMultiAlternatives(subject, text_content, from_email, to_emails)\r\n msg.send()", "def send_messages(self, email_messages):\n from post_office.mail import create\n from post_office.utils import create_attachments\n\n if not email_messages:\n return\n\n for email_message in email_messages:\n subject = email_message.subject\n from_email = email_message.from_email\n headers = email_message.extra_headers\n message = email_message.message()\n\n # Look for first 'text/plain' and 'text/html' alternative in email\n plaintext_body = html_body = ''\n for part in message.walk():\n if part.get_content_type() == 'text/plain':\n plaintext_body = part.get_payload()\n if html_body:\n break\n if part.get_content_type() == 'text/html':\n html_body = part.get_payload()\n if plaintext_body:\n break\n\n attachment_files = {}\n for attachment in email_message.attachments:\n if isinstance(attachment, MIMEBase):\n attachment_files[attachment.get_filename()] = {\n 'file': ContentFile(attachment.get_payload()),\n 'mimetype': attachment.get_content_type(),\n 'headers': OrderedDict(attachment.items()),\n }\n else:\n attachment_files[attachment[0]] = ContentFile(attachment[1])\n recipients = filter_blacklisted_recipients(email_message.to)\n cc = filter_blacklisted_recipients(email_message.cc)\n bcc = filter_blacklisted_recipients(email_message.bcc)\n if not len(recipients + cc + bcc):\n continue\n email = create(sender=from_email,\n recipients=recipients,\n cc=cc,\n bcc=bcc,\n subject=subject,\n message=plaintext_body,\n html_message=html_body,\n headers=headers)\n\n if attachment_files:\n attachments = create_attachments(attachment_files)\n\n email.attachments.add(*attachments)\n\n if get_default_priority() == 'now':\n email.dispatch()", "def send(self):\n log.debug('send {} messages'.format(len(self.messages)))\n smtp = self._connect_smtp()\n if smtp is not None:\n for msg in self.messages:\n #TODO: There could be any exception in here somewhere\n log.debug('message: \\n\\r{}'.format(msg.as_string()))\n try:\n smtp.sendmail(msg['From'], msg['To'], msg.as_string())\n except smtplib.SMTPRecipientsRefused as err:\n log.warn('Recipient refused for following message: \\n\\r{}'.format(msg.as_string()))\n log.warn(err)\n except smtplib.SMTPException as err:\n log.critical('something went wrong with sending message: \\n\\r{}'.format(msg.as_string()))\n log.critical(err)\n smtp.quit()\n else:\n log.warning('emails did not get sent because of exception in connection')", "def email_user(self, subject, message, from_email=None):\n send_mail(subject, message, from_email, [self.email])", "def send_email(email: str, name: str, message, db: Session):\n msg = MIMEText(message)\n msg[\"Subject\"] = name\n msg[\"From\"] = \"info@reach.com\"\n msg[\"To\"] = email\n with smtplib.SMTP(host=\"localhost\", port=8025) as s:\n try:\n s.sendmail(msg[\"From\"], [email], msg.as_string())\n logger.info(\"Recipient reached at {}\".format(email))\n except smtplib.SMTPRecipientsRefused:\n logger.error(\"Recipient refused at {}\".format(email))\n raise\n mark_person_emailed(db, email)", "def send_email(subject, sender, recipients, html_body):\n\n try:\n # Create a new SendGrid Mail object with the arguments given\n message = Mail(\n from_email=sender,\n to_emails=recipients,\n subject=subject,\n html_content=html_body)\n\n # We prepare a new Thread here to send the email in the background. This takes in the send_async_email\n # function as its target and runs the function with the parameters passed through args.\n Thread(target=send_async_email,\n args=(current_app._get_current_object(), message)).start()\n\n except Exception as e:\n print(e)\n # FIXME: should do some type of error handling here or allow error to bubble up", "def send_email(request):\n # send emails and return some manner of success response\n send(**request.params)\n return {'success': 'mail sent!'}", "def test_send_mass_html_mail_to_send_no_email(self, send_mass_html_mail__mock: Mock):\n self.family.guests.add(\n Guest(name=\"Pierre\", email=None, phone=\"0123456789\", female=False, family=self.family),\n bulk=False\n )\n events = Event.objects.filter(pk=self.event.pk)\n\n admin.EventAdmin.send_mail(Mock(), None, events)\n\n recipient = list(send_mass_html_mail__mock.call_args[0][0])[0][4]\n self.assertListEqual(list(recipient),\n [\"Françoise <valid@example.com>\", \"Jean <valid@example.com>\"])", "def send_shared_objects_bulk(objs, current_user, user, send_mail, auth_token=None):\n if not send_mail or not objs:\n return\n\n from urlparse import urljoin\n from flask import request, render_template\n from solariat.mail import Message\n from solariat_bottle.app import MAIL_SENDER as mail\n\n #_subject = get_var('SHARE_%s_SUBJECT_BULK' % msg_type.upper())\n #_body = get_var('SHARE_%s_BODY_BULK' % msg_type.upper())\n\n # gets object type\n obj_type = objs[0].__class__.__name__\n temp = obj_type\n if obj_type.endswith('Channel'):\n temp = 'Channel'\n if obj_type.endswith('ContactLabel'):\n temp = 'Contact Label'\n if obj_type == 'SmartTagChannel':\n temp = 'Tag'\n obj_type = temp\n\n objs_for_email = []\n for obj in objs:\n path = get_object_link(obj)\n if auth_token:\n link = \"%susers/%s/password?auth_token=%s&next=/%s\" % (\n request.host_url, user.email, auth_token.digest, path)\n else:\n link = urljoin(request.host_url, path)\n objs_for_email.append((\n getattr(obj, 'title', None) or unicode(obj),\n link\n ))\n\n msg = Message(subject=\"New shared %s\" % obj_type)\n msg.recipients = [user.email]\n msg.body = render_template(\n \"mail/send_shared_objects_bulk.html\",\n objs=objs_for_email,\n username=current_user.email,\n object_type=obj_type\n )\n #LOGGER.debug(msg.body)\n mail.send(msg)", "def send(self):\n\n if not self.from_email:\n self.from_email = getattr(settings, 'DRIP_FROM_EMAIL', settings.DEFAULT_FROM_EMAIL)\n MessageClass = message_class_for(self.drip_model.message_class)\n\n count = 0\n for user in self.get_queryset():\n message_instance = MessageClass(self, user)\n try:\n result = message_instance.message.send()\n if result:\n SentDrip.objects.create(\n drip=self.drip_model,\n user=user,\n from_email=self.from_email,\n from_email_name=self.from_email_name,\n subject=message_instance.subject,\n # body=message_instance.body\n )\n count += 1\n except Exception as e:\n logging.error(\"Failed to send drip %s to user %s: %s\" % (self.drip_model.id, user, e))\n\n return count", "def mailing_all_users(request):\n form = MalingUsersForm(request.POST or None)\n if request.method == 'POST':\n mail_users(form, get_all_users_email_list())\n messages.success(request, 'Successfully mailed ')\n return redirect(reverse('mailing_all_users'))\n return render(\n request,\n 'admin_panel/mailing/mailing_all_users.html',\n {\n 'form': form,\n }\n )", "def send(self, from_email, to_list, cc_list, bcc_list, subject, text):\n\n to_address_list = []\n\n if len(to_list) > 0:\n for to_address in to_list:\n to_address_list.append(\n {\n \"email\": to_address,\n \"type\": \"to\"\n }\n )\n\n if len(cc_list) > 0:\n for cc_address in cc_list:\n to_address_list.append(\n {\n \"email\": cc_address,\n \"type\": \"cc\"\n }\n )\n\n if len(bcc_list) > 0:\n for bcc_address in bcc_list:\n to_address_list.append(\n {\n \"email\": bcc_address,\n \"type\": \"bcc\"\n }\n )\n\n sendgrid_data = {\n \"key\": sendgrid_key,\n \"message\": {\n \"text\": text,\n \"subject\": subject,\n \"from_email\": from_email,\n \"to\": to_address_list\n },\n \"async\": False,\n }\n\n response = requests.post(\n sendgrid_url,\n data=json.dumps(sendgrid_data)\n )\n\n if response.ok:\n status = 0\n else:\n status = 1\n\n message = str(response.content)\n\n return status, message", "def send(self, smtp_server_instance: SMTPServer = None):\n\t\tif not self.can_send_now():\n\t\t\treturn\n\n\t\twith SendMailContext(self, smtp_server_instance) as ctx:\n\t\t\tmessage = None\n\t\t\tfor recipient in self.recipients:\n\t\t\t\tif recipient.is_mail_sent():\n\t\t\t\t\tcontinue\n\n\t\t\t\tmessage = ctx.build_message(recipient.recipient)\n\t\t\t\tif method := get_hook_method(\"override_email_send\"):\n\t\t\t\t\tmethod(self, self.sender, recipient.recipient, message)\n\t\t\t\telse:\n\t\t\t\t\tif not frappe.flags.in_test:\n\t\t\t\t\t\tctx.smtp_server.session.sendmail(\n\t\t\t\t\t\t\tfrom_addr=self.sender, to_addrs=recipient.recipient, msg=message\n\t\t\t\t\t\t)\n\n\t\t\t\tctx.update_recipient_status_to_sent(recipient)\n\n\t\t\tif frappe.flags.in_test:\n\t\t\t\tfrappe.flags.sent_mail = message\n\t\t\t\treturn\n\n\t\t\tif ctx.email_account_doc.append_emails_to_sent_folder:\n\t\t\t\tctx.email_account_doc.append_email_to_sent_folder(message)", "def test_send_email(self):\n\t\trecipient = \"\"\n\t\tself.email.send_email(self.subject, recipient, self.content)", "def test_send_mass_html_mail_to_send(self, send_mass_html_mail__mock: Mock):\n events = Event.objects.filter(pk=self.event.pk)\n\n admin.EventAdmin.send_mail(Mock(), None, events)\n\n self.assertIsInstance(send_mass_html_mail__mock.call_args[0], Iterable)\n to_send = list(send_mass_html_mail__mock.call_args[0][0])\n expected_subject = \"Save the date\"\n\n self.assertEqual(len(to_send), 1)\n to_send = list(to_send[0])\n self.assertEqual(len(to_send), 5)\n subject, text, html, from_email, recipient = to_send\n self.assertEqual(subject, expected_subject)\n self.assertEqual(text, self.expected_text)\n self.assertEqual(html, self.expected_html)\n self.assertIsNone(from_email)\n self.assertListEqual(list(recipient),\n [\"Françoise <valid@example.com>\", \"Jean <valid@example.com>\"])", "def send_main_email(self):\n\n print \"Sending main email\"\n \n # Make an html table to be body of email\n html_table = '<table style=\"font-size:12px\">'\n html_table += self.make_nfs_changed_rows(\"sprint\") # New features only\n html_table += self.make_nfs_changed_rows(\"status\") # New features only\n html_table += self.make_time_in_status_rows(self.stalled_nf_issues) \n html_table += self.make_time_in_status_rows(self.stalled_st_issues) # Sub-tasks\n html_table += '</table>' # Closing table tag\n\n recipients = self.config.get(\"recipients\", \"emails\").split(\"\\n\") # [recipients] section in .ini file\n \n# emails = self.config.items('recipients')\n# for key, email in emails:\n# recipients = ', '.join(self.config.items('recipients'))\n \n print recipients\n# sys.exit()\n self.send_email(recipients, html_table)", "def create_user_emails_sheets_all():\n input_range = \"Sheet1\"\n\n sheetsService = build(\n 'sheets', 'v4', credentials=credentials, cache_discovery=False)\n\n # Empty sheet\n sheetsService.spreadsheets().values().clear(\n spreadsheetId=spreadsheet_id, range=input_range).execute()\n\n # Get all basic users' email\n users = list(User.objects.filter(is_active=True,\n role=\"BU\").values('email', 'username', 'role', 'profile_id'))\n\n # Check their consent status and update accordingly\n subscribers = []\n for user in users:\n if user['profile_id'] != None:\n profile = SubscriberProfile.objects.get(id=user['profile_id'])\n status = profile.consent_status\n if status == \"IMPLIED\" and profile.expired_at < date.today():\n profile.consent_status = \"EXPIRED\"\n profile.save()\n elif status == \"EXPRESSED\" or status == \"IMPLIED\":\n user.pop('profile_id')\n subscribers.append(user)\n # Get newsletter only users' email\n nlusers = list(NLUser.objects.all())\n\n # Check their consent status and update accordingly\n for nluser in nlusers:\n status = nluser.consent_status\n if status == \"IMPLIED\" and nluser.expired_at < date.today():\n nluser.consent_status = \"EXPIRED\"\n nluser.save()\n elif status == \"EXPRESSED\" or status == \"IMPLIED\":\n subscribers.append({\"email\": nluser.email, \"username\": nluser.first_name,\n \"role\": \"NL\"})\n\n # Get all basic users' email\n restaurant_owners = list(\n User.objects.filter(is_active=True, role=\"RO\").values('email', 'username', 'role'))\n\n # Append user info into values (only users that has email verified)\n values = [['Email', 'Username', 'Role']]\n for subscriber in subscribers:\n values.append(list(subscriber.values()))\n for restaurant_owner in restaurant_owners:\n values.append(list(restaurant_owner.values()))\n\n body = {\n 'values': values\n }\n\n try:\n sheetsService.spreadsheets().values().update(spreadsheetId=spreadsheet_id, range=input_range,\n valueInputOption=\"USER_ENTERED\", body=body).execute()\n except HttpError as error:\n print('An error occurred: %s' % error)\n raise error\n # return None\n\n # Automatically format the sheets\n requests = [\n {\n \"autoResizeDimensions\": {\n \"dimensions\": {\n \"sheetId\": 0,\n \"dimension\": \"COLUMNS\",\n \"startIndex\": 0,\n \"endIndex\": 3\n }\n }\n },\n {\n \"repeatCell\": {\n \"range\": {\n \"sheetId\": 0,\n \"startRowIndex\": 0,\n \"endRowIndex\": 1,\n \"startColumnIndex\": 0,\n \"endColumnIndex\": 3\n },\n \"cell\": {\n \"userEnteredFormat\": {\n \"textFormat\": {\n \"bold\": True\n }\n }\n },\n \"fields\": \"userEnteredFormat(textFormat)\"\n }\n }\n ]\n\n body = {\n 'requests': requests\n }\n\n try:\n sheetsService.spreadsheets().batchUpdate(\n spreadsheetId=spreadsheet_id, body=body).execute()\n except HttpError as error:\n print('An error occurred: %s' % error)\n raise error", "def send(\r\n self,\r\n to = '', #list of email addresses - Required\r\n subject='None', #message's subject - Required\r\n message_text='None', #message body in plain text - Required\r\n message_html=None, #message body in html - Optional\r\n attachments=None, #list of truples [(filename, file_contents)] - Optional\r\n cc = None, #list of email addresses to CC message to\r\n bcc = None, #list of email addresses to BCC message to\r\n reply_to = None, #single email address to have replies send to\r\n ): \r\n if not isinstance(to, list):\r\n to = [to]\r\n\r\n try:\r\n if self.settings.private.email_server == 'gae':\r\n from google.appengine.api import mail\r\n #untested on GAE, but in theory should work\r\n #http://code.google.com/appengine/docs/python/mail/emailmessagefields.html\r\n mail.send_mail(sender=self.settings.private.email_sender, to=to,\r\n subject=subject, body=message_text, html=message_html, attachments=attachments, cc = cc,\r\n bcc = bcc, reply_to = reply_to)\r\n else:\r\n\r\n msg = self.buildMIME(sender = self.settings.private.email_sender,\r\n recipients = to, subject = subject,\r\n message_text = message_text, message_html = message_html,\r\n attachments = attachments,\r\n cc = cc, bcc = bcc, reply_to = reply_to)\r\n #print 'message'+msg.as_string()\r\n #Build MIME body\r\n (host, port) = self.settings.mail.server.split(':')\r\n\r\n if self.settings.mail.ssl: \r\n try:\r\n server = smtplib.SMTP_SSL(host, port)\r\n except:\r\n # ERROR python <= 2.6\r\n pass\r\n else:\r\n server = smtplib.SMTP(host, port)\r\n\r\n if self.settings.mail.login:\r\n try:\r\n server.ehlo_or_helo_if_needed()\r\n except SMTPHeloError:\r\n logger.info(\"SMTP Helo Error in HELO\")\r\n\r\n if self.settings.mail.use_tls:\r\n try:\r\n server.starttls()\r\n except SMTPHeloError:\r\n logger.info(\"SMTP Helo Error in STARTTLS\")\r\n except SMTPException:\r\n logger.info(\"Server does not support TLS\")\r\n\r\n except RuntimeError:\r\n logger.info(\"Python version does not support TLS (<= 2.6?)\")\r\n\r\n try:\r\n server.ehlo_or_helo_if_needed()\r\n except SMTPHeloError:\r\n logger.info(\"SMTP Helo Error in HELO\")\r\n\r\n (username, password) = self.settings.mail.login.split(':')\r\n try:\r\n server.login(username, password)\r\n except SMTPHeloError:\r\n logger.info(\"SMTP Helo Error in LOGIN\")\r\n\r\n except SMTPAuthenticationError:\r\n logger.info(\"Invalid username/password combination\")\r\n\r\n except SMTPException:\r\n logger.info(\"SMTP error in login\")\r\n\r\n try:\r\n server.sendmail(self.settings.private.email_sender, to, msg.as_string())\r\n server.quit()\r\n\r\n except SMTPRecipientsRefused:\r\n logger.info(\"All recipients were refused. Nobody got the mail.\")\r\n\r\n except SMTPHeloError:\r\n logger.info(\"The server didn't reply properly to the HELO greeting.\")\r\n\r\n except SMTPSenderRefused:\r\n logger.info(\"The server didn't accept the from_addr.\")\r\n\r\n except SMTPDataError:\r\n logger.info(\"The server replied with an unexpected error code (other than a refusal of a recipient).\")\r\n \r\n except Exception, e:\r\n return False\r\n return True", "def add_recipients(df, all_emails):\n user = df[\"sender\"].iloc[0] # ID of the user\n emails = all_emails[user]\n df[\"emails\"] = str(list(emails))\n df[\"emails\"] = df[\"emails\"].map(literal_eval)\n return df", "def send_email_week():\n\n cars_all = Car.objects.all()\n title_list = []\n today = now()\n for car in cars_all:\n if (today.day - car.created.day) > 7:\n new_car = car.title\n title_list.append(new_car)\n\n for item in Subscriber.objects.all():\n email_adress = item.email\n data = {\n 'email': email_adress,\n 'title': title_list,\n }\n email_body = render_to_string('main/email_add_ad.html', data)\n msg = EmailMultiAlternatives(subject='Обьявления машин', to=[email_adress, ])\n msg.attach_alternative(email_body, 'text/html')\n msg.send()", "def send_notifications():\n due_notifications = Notification.query.filter(Notification.delivery_date <= datetime.now(timezone.utc))\n for notification in due_notifications:\n send_notification.delay(notification.id)", "def get(self):\n app_id = app_identity.get_application_id()\n users = User.query(User.email != None)\n for user in users:\n games = Game.query(Game.user == user.key, Game.game_over == False)\n if games:\n subject = \"This is a reminder!\"\n body = \"Hello {}, you have some unfinished games.\".format(\n user.name)\n\n mail.send_mail('noreply@{}.appspot.com'.format(app_id),\n user.email, subject, body)", "def send_messages(self, email_messages): \n\t\tif not email_messages: \n\t\t\treturn \n\t\tself._lock.acquire() \n\t\ttry: \n\t\t\tnew_conn_created = self.open() \n\t\t\tif not self.connection: \n\t\t\t\t# We failed silently on open(). \n\t\t\t\t# Trying to send would be pointless. \n\t\t\t\treturn \n\n\t\t\tnum_sent = 0 \n\t\t\tfor message in email_messages: \n\t\t\t\tsent = self._send(message)\n\t\t\t\tif sent: \n\t\t\t\t\tnum_sent += 1 \n\t\t\tif new_conn_created: \n\t\t\t\tself.close()\n\t\tfinally: \n\t\t\tself._lock.release() \n\t\treturn num_sent", "def send_email(jobs):\n jobs = jobs\n server = smtplib.SMTP(\"smtp.gmail.com\", 587)\n server.ehlo()\n server.starttls()\n server.ehlo()\n\n server.login(EMAIL, PASS)\n\n subject = f\"Job Scraper Results\"\n\n if jobs != \"Not working\":\n body = []\n job_ids = [\n jobs[x] for x in sorted(jobs.keys(), key=lambda x: jobs[x][0], reverse=True)\n ][:25]\n for jobID in job_ids:\n score, link, title, company, date_posted, location, full_text = jobID\n body.append(\n f\"({score}) {title} at {company} in {location} posted \\\n {date_posted[5:11]}\\n{link}\\n... {full_text[100:500]} ...\"\n )\n if len(body) == 0:\n body = body + (\"\\nNo results.\")\n body = \"\\n\\n\\n\".join(body)\n body = body.encode(\"ascii\", \"ignore\").decode(\"ascii\")\n msg = f\"Subject: {subject}\\n\\n{body}\"\n else:\n msg = f\"Subject: {subject} - {jobs}\\n\\n{jobs}\"\n\n msg = f\"From: {EMAIL}\\r\\nTo: {EMAIL}\\r\\n\" + msg\n\n server.sendmail(EMAIL, EMAIL, msg)\n\n timezone_ny = pytz.timezone(\"America/NEW_York\")\n datetime_ny = datetime.now(timezone_ny)\n print(f\"E-mail was sent at {datetime_ny.strftime('%H:%M')}.\\n\\n\")\n\n server.quit()", "def bulk_convert_events_to_emails():\n\n # Get the email medium\n email_medium = get_medium()\n\n # Get the default from email\n default_from_email = get_from_email_address()\n\n email_params_list = []\n\n # Find any unseen events and create unsent email objects\n for event, targets in email_medium.events_targets(seen=False, mark_seen=True):\n\n # Check the event's context for a from_address, otherwise fallback to default\n from_address = event.context.get('from_address') or default_from_email\n\n email_params_list.append(dict(\n event=event,\n from_address=from_address,\n recipients=targets\n ))\n\n # Bulk create the emails\n Email.objects.create_emails(email_params_list)", "def test_email():\n recipients = configs[\"email_to\"].split(\", \")\n email_body = test_email_content()\n if configs[\"smtp_ssl\"] == 1:\n server = smtplib.SMTP_SSL(configs[\"smtp_server\"])\n elif configs[\"smtp_tls\"] == 1:\n server = smtplib.SMTP(configs[\"smtp_server\"])\n server.starttls()\n else:\n server = smtplib.SMTP(configs[\"smtp_server\"])\n\n if configs[\"smtp_authentication\"] == 1:\n server.login(configs[\"username\"], configs[\"password\"])\n\n server.sendmail(configs[\"email_from\"], recipients, email_body)\n server.quit()", "def send_email( # pylint: disable=too-many-arguments\n recipients, subject, body, html_body=None, reply_to=None, swallow_errors=False\n):\n recipients = enforce_list(recipients)\n reply_to = enforce_list(reply_to or [])\n\n params = {\n \"from_email\": settings.DEFAULT_FROM_EMAIL,\n \"to\": recipients,\n \"subject\": subject,\n \"body\": body,\n \"reply_to\": enforce_list(reply_to or settings.DEFAULT_FROM_EMAIL),\n }\n if html_body:\n message = EmailMultiAlternatives(**params)\n message.attach_alternative(html_body, \"text/html\")\n else:\n message = EmailMessage(**params)\n\n logger.info(\"Sending email with subject %s\", subject)\n try:\n message.send()\n except Exception:\n logger.exception(\"Error sending email with subject %s\", subject)\n if not swallow_errors:\n raise", "def send_email(subject, sender, recipients, text_body, html_body):\n msg = Message(subject=subject, sender=sender, recipients=recipients)\n msg.body = text_body\n msg.html = html_body\n mail.send(msg)", "def _send_email_helper(settings, excel):\n try:\n server = smtplib.SMTP(settings.smtp_server, str(settings.smtp_port))\n server.starttls()\n server.login(settings.user,settings.password)\n dest = [str(settings.user), str(settings.dest_addr)]\n server.sendmail(settings.user, dest, Email._set_email(settings,excel).as_string())\n server.quit()\n\n FileHelper.archive(settings, excel)\n excel.clear_sheet()\n excel.gen_dates()\n Popups.email_sent()\n except Exception:\n print(\"Send email failed.\")", "def email_user(self, subject, message, from_email=None, **kwargs):\n send_mail(subject, message, from_email, [self.email], **kwargs)", "def send_email(subject, sender, recipients, text_body, html_body):\n\t\tmsg = Message(subject, sender=sender, recipients=recipients)\n\t\tmsg.body = text_body\n\t\tmsg.html = html_body\n\t\tmail.send(msg)", "def send_mail(user, subject, message, from_email, recipient_list, fail_silently=False,\\\n auth_user=None, auth_password=None, connection=None, html_message=None):\n message = smart_text(gpg.sign(message))\n try:\n Profile.objects.get(user= user).pgp_gpg_public_key\n message = smart_text(gpg.encrypt(message))\n except:\n pass\n send_email_django(subject, message, from_email, recipient_list, fail_silently,\\\n auth_user, auth_password, connection, html_message)\n return True", "def generate_fake_emails(amount):\n extensions = ['com', 'net', 'org', 'gov']\n domains = [\"hotmail\", \"gmail\", \"aol\",\n \"mail\", \"mail\", \"yahoo\"]\n emails = []\n for _ in range(amount):\n domain = domains[random.randint(0, len(domains)-1)]\n extension = extensions[random.randint(0, len(extensions)-1)]\n user_name = random_str(random.randint(5, 10))\n email = ''.join([user_name, '@', domain, '.', extension])\n emails.append(Email(data=email))\n Email.objects.bulk_create(emails)", "def send_santas_list(santa, person, budget=None):\n if budget:\n msg = f'Your secret Santa for {person.name} with a budget of {budget}'\n else:\n msg = f'Your secret Santa for {person.name}'\n\n subject = 'Your Foggy Secret Santa'\n\n santas_helper = 'punnyfoggersanta@gmail.com'\n password = 'doubledragon1986'\n message = f\"\"\"From: {santas_helper}\\nTo: {santa.name}\\nSubject: {subject}\\n\\n{msg}\"\"\"\n\n try:\n print (1)\n server = smtplib.SMTP('smtp.gmail.com:587')\n server.ehlo()\n print(2)\n server.starttls()\n print(server)\n server.login(santas_helper, password)\n print(3)\n server.sendmail(santas_helper, santa.email, message)\n server.close()\n print(f'successfully sent the mail email to {santa.name}')\n\n except:\n print(\"failed to send mail\")" ]
[ "0.76834345", "0.7484118", "0.71810454", "0.7061762", "0.70294595", "0.69372624", "0.68464893", "0.6761992", "0.6630803", "0.6604708", "0.6564017", "0.65435404", "0.64834946", "0.64148235", "0.6410648", "0.6408025", "0.63622856", "0.63590986", "0.6358885", "0.6347821", "0.6289376", "0.62511283", "0.6243546", "0.62341577", "0.6202016", "0.6179673", "0.6177762", "0.61597645", "0.61568177", "0.6152813", "0.614577", "0.61446726", "0.6111692", "0.611021", "0.611021", "0.6096779", "0.60960346", "0.60916555", "0.6084207", "0.6076201", "0.60751283", "0.6069454", "0.6005451", "0.59925735", "0.59901273", "0.5977368", "0.5965938", "0.59520704", "0.5947068", "0.5930919", "0.5924688", "0.5924441", "0.5921183", "0.59027636", "0.5898048", "0.5897861", "0.5891981", "0.58412737", "0.58304", "0.5804138", "0.5802212", "0.5801188", "0.5795676", "0.5793847", "0.5786199", "0.57716835", "0.5766599", "0.5766358", "0.5762276", "0.5753588", "0.57440716", "0.5739256", "0.5736101", "0.5735016", "0.56999505", "0.56866753", "0.5683191", "0.56658757", "0.56551874", "0.56547457", "0.5654478", "0.5643581", "0.56425357", "0.5641747", "0.5637308", "0.5626513", "0.5622354", "0.5617215", "0.5611769", "0.56090486", "0.56021416", "0.5590713", "0.5589659", "0.55888504", "0.55884016", "0.55804193", "0.55759066", "0.5575268", "0.5570297", "0.55702895" ]
0.6047232
42
outLookSender is not utilized in this module but wrote the function in case we want to send from an outlook account in the future
def outLookSender(receiverAddress, receiverName, retainedCompany, companyName, senderName, senderTitle, senderCompany, senderEmail, senderCompanyHomePage, senderPhone, returnHTML=False): subj = f'Engineers from {retainedCompany} Search' if returnHTML: [text, html] = emailTextHTML(receiverName, retainedCompany, companyName, senderName, senderTitle, senderCompany, senderEmail, senderCompanyHomePage, senderPhone, returnHTML=returnHTML) else: [text] = emailTextHTML(receiverName, retainedCompany, companyName, senderName, senderTitle, senderCompany, senderEmail, senderCompanyHomePage, senderPhone, returnHTML=returnHTML) outlook = app('Microsoft Outlook') msg = outlook.make( new=k.outgoing_message, with_properties={ k.subject: subj, k.plain_text_content: text } ) msg.make( new=k.recipient, with_properties={ k.email_address: { k.name: receiverName, k.address: receiverAddress } } ) msg.send()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pcorMacVerification(window,refrenceid,objectidentifier,texttoenter):\n try:\n buttons = getAppButtons(window)\n atomacclick(buttons[9])\n childwindow = refrenceid.windowsR()\n protectMoreDevicestitle = getApplicatontitle(childwindow[0])\n entertext(protectMoreDevicestitle,objectidentifier,texttoenter)\n except Exception as er:\n return False\n print \"Not able to able to send mail\"", "def __init__(self):\n self.outlook = win32.Dispatch('outlook.application')\n locale.setlocale(locale.LC_ALL, '')", "def replyMessage(_email, _name):\n\n _mailer = app.config['MAIL_USERNAME']\n mesg = Message(\"Message Received\", sender=('iSOLveIT Contact', f'{_mailer}'), recipients=[_email])\n mesg.body = f'''Hello {_name},\nThe message you sent to Randy has been received. \nRandy will contact you within 24 hours.\nThank you.\n\nRegards,\nRandy\n\nDate Sent: {dt.now(tz=GMT_tz).strftime('%B %d, %Y, %H:%M ') + 'GMT'}\n'''\n mail.send(mesg)\n return 'OK'", "def sender(self):\n key, alt = ('Sender', 'From') if not self.resent else \\\n ('Resent-Sender', 'Resent-From')\n value = self.get(key) or self.get(alt)\n _, addr = getaddresses([value])[0]\n return addr", "def test_send_subscribe_email(self):\n #Verifica se foi enviado 1 e-mail, o este não envia e-mail\n self.assertEqual(1, len(mail.outbox))", "def test_send_to_self(self):\r\n # Now we know we have pulled up the instructor dash's email view\r\n # (in the setUp method), we can test sending an email.\r\n test_email = {\r\n 'action': 'send',\r\n 'send_to': 'myself',\r\n 'subject': 'test subject for myself',\r\n 'message': 'test message for myself'\r\n }\r\n # Post the email to the instructor dashboard API\r\n response = self.client.post(self.send_mail_url, test_email)\r\n self.assertEquals(json.loads(response.content), self.success_content)\r\n\r\n # Check that outbox is as expected\r\n self.assertEqual(len(mail.outbox), 1)\r\n self.assertEqual(len(mail.outbox[0].to), 1)\r\n self.assertEquals(mail.outbox[0].to[0], self.instructor.email)\r\n self.assertEquals(\r\n mail.outbox[0].subject,\r\n '[' + self.course.display_name + ']' + ' test subject for myself'\r\n )", "def start():\r\n\r\n try:\r\n server.starttls()\r\n print(\"Successful connection to Outlook server\")\r\n print(\"--------------------------\")\r\n sender = input(\"Enter your Outlook email address: \")\r\n pwd = input(\"Enter your Outlook password: \")\r\n print(\"--------------------------\")\r\n server.login(sender, pwd)\r\n print(\"Successfully logged into Outlook\")\r\n print(\"--------------------------\")\r\n return sender\r\n except Exception as e:\r\n print(\"Unable to login. Check that the login information is correct\")\r\n print(e)\r\n print(\"--------------------------\")\r\n quit()", "def outlook(self):\n if \"outlook\" in self._prop_dict:\n if isinstance(self._prop_dict[\"outlook\"], OneDriveObjectBase):\n return self._prop_dict[\"outlook\"]\n else :\n self._prop_dict[\"outlook\"] = OutlookUser(self._prop_dict[\"outlook\"])\n return self._prop_dict[\"outlook\"]\n\n return None", "def test_using_invite_use_host_in_from_email(self, send_mass_html_mail__mock: Mock):\n events = Event.objects.filter(pk=self.event.pk)\n\n admin.EventAdmin.send_mail(Mock(), None, events)\n\n to_send = list(send_mass_html_mail__mock.call_args[0][0])\n from_email = to_send[0][3]\n self.assertEqual(from_email, \"Marie <test_using_invite_use_host_in_from_email@example.com>\")", "def send(\r\n self,\r\n to = '', #list of email addresses - Required\r\n subject='None', #message's subject - Required\r\n message_text='None', #message body in plain text - Required\r\n message_html=None, #message body in html - Optional\r\n attachments=None, #list of truples [(filename, file_contents)] - Optional\r\n cc = None, #list of email addresses to CC message to\r\n bcc = None, #list of email addresses to BCC message to\r\n reply_to = None, #single email address to have replies send to\r\n ): \r\n if not isinstance(to, list):\r\n to = [to]\r\n\r\n try:\r\n if self.settings.private.email_server == 'gae':\r\n from google.appengine.api import mail\r\n #untested on GAE, but in theory should work\r\n #http://code.google.com/appengine/docs/python/mail/emailmessagefields.html\r\n mail.send_mail(sender=self.settings.private.email_sender, to=to,\r\n subject=subject, body=message_text, html=message_html, attachments=attachments, cc = cc,\r\n bcc = bcc, reply_to = reply_to)\r\n else:\r\n\r\n msg = self.buildMIME(sender = self.settings.private.email_sender,\r\n recipients = to, subject = subject,\r\n message_text = message_text, message_html = message_html,\r\n attachments = attachments,\r\n cc = cc, bcc = bcc, reply_to = reply_to)\r\n #print 'message'+msg.as_string()\r\n #Build MIME body\r\n (host, port) = self.settings.mail.server.split(':')\r\n\r\n if self.settings.mail.ssl: \r\n try:\r\n server = smtplib.SMTP_SSL(host, port)\r\n except:\r\n # ERROR python <= 2.6\r\n pass\r\n else:\r\n server = smtplib.SMTP(host, port)\r\n\r\n if self.settings.mail.login:\r\n try:\r\n server.ehlo_or_helo_if_needed()\r\n except SMTPHeloError:\r\n logger.info(\"SMTP Helo Error in HELO\")\r\n\r\n if self.settings.mail.use_tls:\r\n try:\r\n server.starttls()\r\n except SMTPHeloError:\r\n logger.info(\"SMTP Helo Error in STARTTLS\")\r\n except SMTPException:\r\n logger.info(\"Server does not support TLS\")\r\n\r\n except RuntimeError:\r\n logger.info(\"Python version does not support TLS (<= 2.6?)\")\r\n\r\n try:\r\n server.ehlo_or_helo_if_needed()\r\n except SMTPHeloError:\r\n logger.info(\"SMTP Helo Error in HELO\")\r\n\r\n (username, password) = self.settings.mail.login.split(':')\r\n try:\r\n server.login(username, password)\r\n except SMTPHeloError:\r\n logger.info(\"SMTP Helo Error in LOGIN\")\r\n\r\n except SMTPAuthenticationError:\r\n logger.info(\"Invalid username/password combination\")\r\n\r\n except SMTPException:\r\n logger.info(\"SMTP error in login\")\r\n\r\n try:\r\n server.sendmail(self.settings.private.email_sender, to, msg.as_string())\r\n server.quit()\r\n\r\n except SMTPRecipientsRefused:\r\n logger.info(\"All recipients were refused. Nobody got the mail.\")\r\n\r\n except SMTPHeloError:\r\n logger.info(\"The server didn't reply properly to the HELO greeting.\")\r\n\r\n except SMTPSenderRefused:\r\n logger.info(\"The server didn't accept the from_addr.\")\r\n\r\n except SMTPDataError:\r\n logger.info(\"The server replied with an unexpected error code (other than a refusal of a recipient).\")\r\n \r\n except Exception, e:\r\n return False\r\n return True", "def test_send_email(self):\n\t\trecipient = \"\"\n\t\tself.email.send_email(self.subject, recipient, self.content)", "def sendmail(self, *args, **kwargs):\n #FUTURE: the EmailMessage attributes could be found by introspecting\n # the encoded message.\n message = mail.EmailMessage('SUBJECT', 'BODY', 'FROM', ['TO'])\n mail.outbox.append(message)", "def test_send_email_on_invite(self):\n\n league = self.create_league()\n\n season = self.create_season(league)\n team = self.create_team(season)\n\n player = self.create_player()\n\n send_user_email_on_join(player, team.id)\n\n self.assertEqual(len(mail.outbox), 1)\n\n # if testing manually:\n # import pathlib\n # pathlib.Path(\"test_email.html\").write_text(last_sent.body)", "def sendmail(sendername, senderemail, password, receivers, htmlfile, img, attach):\n import smtplib\n\n #Creating the email\n \n\n domain = senderemail.split('@')[1]\n if 'gmail' in domain.lower(): #Gmail SMTP\n smtpObj = smtplib.SMTP('smtp.gmail.com', 587)\n elif 'outlook' in domain.lower(): #Outlook SMTP\n smtpObj = smtplib.SMTP('smtp-mail.outlook.com', 587)\n elif 'yahoo' in domain.lower(): #Yahoo SMTP\n smtpObj = smtplib.SMTP('smtp.mail.yahoo.com', 587)\n else:\n print('Sorry I dont have your email SMTP setting.\\nBYE!')\n quit()\n\n smtpObj.starttls()\n try:\n smtpObj.login(senderemail, password)\n except smtplib.SMTPAuthenticationError:\n print('Authentication error!\\nWrong Email or Password.')\n quit()\n \n for user, email in receivers.items():\n msg = makeHTMLemail(sendername, senderemail, user, email, htmlfile, img, attach)\n smtpObj.send_message(msg)\n print('email sent to {}'.format(user))\n del msg\n smtpObj.quit()", "def send_owner_message(): \n data = order_obj.send_owner_message(request.forms)\n return data", "def _send_mail(self, sender, subject, body, html=None):\n self.emails.append((sender, subject, body, html))", "def _send_mail(self, sender, subject, body, html=None):\n self.emails.append((sender, subject, body, html))", "def email_body_appointment_confirmation_for_seller(meeting, buyer_profile, sellr_profile, msg_user_link='https://INSPRITE.co/message/USER'):\n\tmsg = '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr><td align=\"center\" valign=\"top\"></td></tr></tbody></table>'\n\n\tmsg = msg + '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6; border-top: 2px solid #e6e6e6\" cellspacing=\"0\" cellpadding=\"10\" width=\"600\">'\n\tmsg = msg + '<tbody>'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #e6e6e6; border-bottom: 10px solid #FFFFFF; padding-top:75px; padding-left:58px\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<a href=\"https://insprite.co\"><img src=\"http://ryanfbaker.com/insprite/inspriteLogoA.png\" border=\"0\" alt=\"Insprite\" align=\"center\" width=\"200px\" height=\"55px\" /></a>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</tbody>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"85\" width=\"600\" height=\"350\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:0px;\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '\\t\\t<font style=\"font-family:Helvetica Neue;color:#555555;font-size:14px;\">Fantastic! You accepted <a href=\"https://127.0.0.1:5000/profile?' + buyer_profile.prof_id + '\" style=\"color:#1488CC\">' + buyer_profile.prof_name + '\\'s proposal.</a><br><br>'\n\tmsg = msg + '\\t\\t\\t Check out the details:<br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tTime: ' + meeting.meet_ts.strftime('%A, %b %d, %Y %H:%M %p') + '<br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tDuration: ' + meeting.get_duration_in_hours() + ' hours<br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tLocation: ' + str(meeting.meet_location) + '<br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tFee: $' + str(meeting.meet_cost) + '<br><br>'\n\tmsg = msg + '\\t\\t\\t Need to edit, manage or update the appointment? <a href=\"https://127.0.0.1:5000/dashboard\" style=\"color:#1488CC\">Go for it</a>, or send <a href=\"' + msg_user_link + '\" style=\"color:#1488CC\"> ' + buyer_profile.prof_name + ' a message.</a><br><br>We know life can be busy, so we\\'ll send you a reminder 24 hours in advance too.</font>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '\\n\\t<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\n\\t\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:0px;padding-left:75px\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '\\n\\t\\t\\t<img style=\"padding-right: 6px\" src=\"http://maps.googleapis.com/maps/api/staticmap?center=' + meeting.meet_location + '&zoom=15&size=400x450&markers=size:large%8Ccolor:0xFFFF00%7Clabel:Insprite%7C' + meeting.meet_location + '\"><br>'\n\tmsg = msg + '\\n\\t\\t</td></tr>'\n\tmsg = msg + '\\n\\t</table>'\n\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/facebookIcon.png\">'\n\tmsg = msg + '<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/twitterIcon.png\">'\n\tmsg = msg + '<img src=\"http://ryanfbaker.com/insprite/instagramIcon.png\">'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '<img src=\"http://ryanfbaker.com/insprite/spacer-2.png\">'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '<font style=\"font-family:Helvetica Neue;color:#555555;font-size:10px;\"><a href=\"mailto:thegang@insprite.co\" style=\"color:#1488CC\">Contact Us</a> '\n\tmsg = msg + '| Sent by <a href=\"https://insprite.co\" style=\"color:#1488CC\">Insprite</a>, California, USA. | <a href=\"#\" style=\"color:#1488CC\">Unsubscribe</a></font><br>'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '<tr> <td style=\"border-top: 0px solid #333333; border-bottom: 0px solid #FFFFFF;\">'\n\tmsg = msg + '<img width=\"596px\" src=\"http://ryanfbaker.com/insprite/footerImage.png\">'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '</table>'\n\treturn msg", "def service_sendTestMail(self, context, sender=None, recipient=None):\n\n if sender is None:\n sender = self.config.sender_mail\n else:\n sender = sender.strip()\n\n if recipient is None:\n recipient = self.config.admin_mail\n else:\n recipient = recipient.strip()\n\n # TODO fr / en\n # add fqdn\n msg_text = u\"\"\"Bonjour,\nCe message de test a été envoyé depuis l'interface d'administration\nd'EdenWall. Si vous l'avez reçu, cela confirme que la configuration\nen place au moment de l'envoi vous permet de recevoir les messages\nsystème (alertes et informations) de votre pare-feu EdenWall.\"\"\"\n if context.isUserContext():\n session = context.getSession()\n msg_text += u\"\\n\\nL'envoi ce de message a été déclenché par une action utilisateur.\\nInformations de traçage: %s\\n\" % (session,)\n\n msg = MIMEText(msg_text.encode('ISO-8859-1'), 'plain', 'ISO-8859-1')\n msg['Subject'] = 'EdenWall : test mail'\n\n if check_mail(sender):\n msg['From'] = sender\n else:\n raise NuConfError(CONTACT_INVALID_SENDER, \"'sender' e-mail : invalid e-mail address\")\n\n if check_mail(recipient):\n msg[\"To\"] = recipient\n else:\n raise NuConfError(CONTACT_INVALID_RECIPIENT, \"'recipient' e-mail : invalid e-mail address\")\n\n return self.sendTestMail('127.0.0.1', msg['From'], [msg['To']], msg.as_string())", "def send(self):\n return get_current_sender().sendmail(self)", "def receive(self, email):\n self.inbox += email", "def sendEmail(_name, _email, _body):\n\n _mailer = app.config['MAIL_USERNAME']\n msg = Message(\"Contact Form\", sender=('iSOLveIT Contact', f'{_mailer}'), recipients=[f'{_mailer}'])\n msg.body = f'''{_body}\n\n\nSender's Name: {_name}\nSender's Email: {_email}\nDate Sent: {dt.now(tz=GMT_tz).strftime('%B %d, %Y, %H:%M ') + 'GMT'}\n'''\n mail.send(msg)\n return 'OK'", "def define_sender(self, email=\"\", name=\"\"):\n if not email:\n return\n if not name:\n name = False\n self.from_who = _email.formataddr( (name, email) )", "def send(self, **kwargs):\n if hasattr(self.object, 'member'):\n self.add_to(self.object.member.user.email)\n elif hasattr(self.object, 'membership'):\n self.add_to(self.object.created_by.email)\n return super(GrantedAccessMailer, self).send(**kwargs)", "def email_body_appointment_confirmation_for_buyer(meeting, buyer_profile, sellr_profile, msg_url=\"https://127.0.0.1:5000/message?profile=xxxx\"):\n\tmsg = '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr><td align=\"center\" valign=\"top\"></td></tr></tbody></table>'\n\tmsg = msg + '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr>'\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6; border-top: 2px solid #e6e6e6\" cellspacing=\"0\" cellpadding=\"10\" width=\"600\">'\n\tmsg = msg + '<tbody>'\n\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #e6e6e6; border-bottom: 10px solid #FFFFFF; padding-top:75px; padding-left:58px\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<a href=\"https://insprite.co\"><img src=\"http://ryanfbaker.com/insprite/inspriteLogoA.png\" border=\"0\" alt=\"Insprite\" align=\"center\" width=\"200px\" height=\"55px\" /></a>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</tbody>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"85\" width=\"600\" height=\"350\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:0px;\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '\\t\\t<font style=\"font-family:Helvetica Neue;color:#555555;font-size:14px;\">Ain\\'t life grand? Meeting\\'s on! <a href=\"https://127.0.0.1:5000/profile?'+ sellr_profile.prof_id + ' style=\"color:#1488CC\">\"' + sellr_profile.prof_name + '\" accepted your proposal.</a><br><br>'\n\tmsg = msg + '\\t\\t\\t Check out the details: <br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tTime: ' + meeting.meet_ts.strftime('%A, %b %d, %Y %H:%M %p') + '<br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tDuration: ' + meeting.get_duration_in_hours() + ' hours<br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tLocation: ' + str(meeting.meet_location) + '<br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tFee: $' + str(meeting.meet_cost) + '<br><br>'\n\tmsg = msg + '\\t\\t\\t Need to edit, manage or update the appointment? <a href=\"https://127.0.0.1:5000/dashboard\" style=\"color:#1488CC\">Go for it</a>, or send <a href=\"'+msg_url+'\" style=\"color:#1488CC\">\"' + sellr_profile.prof_name + '\" a message.</a><br><br></font>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '\\n\\t<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\n\\t\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:0px;padding-left:75px\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '\\n\\t\\t\\t<img style=\"padding-right: 6px\" src=\"http://maps.googleapis.com/maps/api/staticmap?center=' + meeting.meet_location + '&zoom=15&size=400x450&markers=size:large%8Ccolor:0xFFFF00%7Clabel:Insprite%7C' + meeting.meet_location + '\">'\n\tmsg = msg + '\\n\\t\\t</td></tr>'\n\tmsg = msg + '\\n\\t</table>'\n\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/facebookIcon.png\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/twitterIcon.png\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/instagramIcon.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/spacer-2.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<font style=\"font-family:Helvetica Neue;color:#555555;font-size:10px;\"> <a href=\"mailto:thegang@insprite.co\" style=\"color:#1488CC\">Contact Us</a>'\n\tmsg = msg + '\\t\\t| Sent by <a href=\"https://insprite.co\" style=\"color:#1488CC\">Insprite</a>, California, USA. | <a href=\"#\" style=\"color:#1488CC\">Unsubscribe</a></font>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr> <td style=\"border-top: 0px solid #333333; border-bottom: 0px solid #FFFFFF;\">'\n\tmsg = msg + '\\t\\t<img width=\"596px\" src=\"http://ryanfbaker.com/insprite/footerImage.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\treturn msg", "def send_email(path=OUTLOOK_EXE,subject=\"\",message=\"\",recipients=None,\r\n cc=None,bcc=None,attachments=None):\r\n # Check if outlook is an active process.\r\n if not is_running(os.path.basename(path)):\r\n\r\n # Launch Outlook executable.\r\n cmdspec = 'start \"\" /B /MIN ' + '\"' + path + '\"'\r\n subprocess.run(cmdspec, shell=True, capture_output=False)\r\n\r\n # Wait until `OUTLOOK.EXE` registers in tasklist.\r\n while True:\r\n if not is_running(os.path.basename(path)):\r\n time.sleep(.25)\r\n else:\r\n break\r\n\r\n # Send message via Outlook and logged in user.\r\n outlook = win32com.client.Dispatch(\"Outlook.Application\")\r\n mail = outlook.CreateItem(0)\r\n mail.Subject = subject\r\n mail.HtmlBody = message\r\n\r\n if recipients is not None:\r\n if hasattr(recipients, \"strip\"):\r\n recipients = [recipients]\r\n [mail.Recipients.Add(i) for i in recipients]\r\n\r\n if attachments is not None:\r\n if hasattr(attachments, \"strip\"):\r\n attachments = [attachments]\r\n [mail.Attachments.Add(i) for i in attachments]\r\n\r\n mail.send\r\n\r\n return(None)", "def test_using_invite_use_host_in_from_email(self, send_mass_html_mail__mock: Mock):\n self._send_form()\n\n to_send = list(send_mass_html_mail__mock.call_args[0][0])\n from_email = to_send[0][3]\n self.assertEqual(from_email, \"Marie <test_using_invite_use_host_in_from_email@example.com>\")", "def test_using_invite_use_host_in_from_email(self, send_mass_html_mail__mock: Mock):\n self._send_form()\n\n to_send = list(send_mass_html_mail__mock.call_args[0][0])\n from_email = to_send[0][3]\n self.assertEqual(from_email, \"Marie <test_using_invite_use_host_in_from_email@example.com>\")", "def test_invitation_email(self):\n queryset = models.Invitation.objects.filter(id=self.invitation.id)\n self.admin_instance.send_new_activation_email(self.some_request, queryset)\n # check whether there is a mail in the outbox\n self.assertEqual(len(mail.outbox), 1)\n # check subject\n self.assertEqual(\n mail.outbox[0].subject,\n \"Er is een account voor u aangemaakt op sso.lizard.net\",\n )\n self.assertEqual(mail.outbox[0].to, [\"reinout@example.org\"])\n # check mail starts with 'Hallo Reinout,'\n self.assertTrue(mail.outbox[0].body.startswith(\"Hallo Reinout,\"))", "def test_sending_mail(self):\n\n appt_date = datetime.date.today() + datetime.timedelta(days=7) # Default for email\n confirmed = self.create_confirmed_notification(self.test_patient, appt_date)\n\n # run email job\n daily_email_callback(self.router)\n\n self.assertEqual(len(mail.outbox), 1)\n message = mail.outbox[0]\n self.assertTrue(self.test_contact.email in message.to)", "def send(to, msg_type, update, sender=None):\n # The value of agent used to be identity.current.user_name in TG land, but\n # now we need to somehow get it off of the pyramid request, or have it\n # passed into this function.\n agent = 'TBD'\n for person in iterate(to):\n send_mail(sender, person, '[Fedora Update] [%s] %s' % (msg_type,\n update.title), messages[msg_type]['body'] %\n messages[msg_type]['fields'](agent, update))", "def mail2project(self, message) :\n \n # keep copy of original message for error handling\n original_message = email.message_from_string(message)\n \n #keep trac email : \n trac_mail = self.env.config.get('notification', 'smtp_replyto')\n \n # whether or not to email back on error\n email_errors = self.env.config.getbool('mail', 'email_errors', True)\n \n # lookup the message\n message = self.lookup(message)\n # get the handlers\n handlers = ExtensionPoint(IEmailHandler).extensions(self.env)\n _handlers = self.env.config.getlist('mail', 'handlers')\n if not _handlers: # default value\n _handlers = [ 'RemoveQuotes', 'ReplyToTicket', 'EmailToTicket' ]\n handler_dict = dict([(h.__class__.__name__, h)\n for h in handlers])\n handlers = [handler_dict[h] for h in _handlers\n if h in handler_dict ]\n # handle the message\n warnings = []\n\t#is this email treated ?\n\temail_treated = False\n for handler in handlers:\n if not handler.match(message) :\n continue\n try:\n\t\temail_treated = True\n\n message = handler.invoke(message, warnings)\n except Exception, e:\n # handle the error\n print \"Exception in user code:\"\n print '-'*60\n traceback.print_exc(file=sys.stdout)\n print '-'*60\n raise\n except EmailException, e:\n if email_errors and original_message['from']:\n subject = reply_subject(original_message['subject'])\n response = 'Subject: %s\\n\\n%s' % (subject, reply_body(str(e), original_message))\n send_email(self.env,\n trac_mail,\n [ original_message['from'] ],\n response\n )\n warnings = [] # clear warnings\n return\n else:\n raise\n \n # if the message is consumed, quit processing\n if not message:\n break\n \n\tif not email_treated :\n\t warnings.append(\"Your email was not treated. It match none of the condition to be treated\")\n # email warnings\n if warnings:\n \n # format warning message\n if len(warnings) == 1:\n body = warnings[0]\n pass\n else:\n body = \"\\n\\n\".join([\"* %s\" % warning.strip() \n for warning in warnings])\n \n # notify the sender\n subject = reply_subject(original_message['subject'])\n response = 'Subject: %s\\n\\n%s' % (subject, reply_body(body, original_message))\n send_email(self.env,\n trac_mail,\n [ original_message['from'] ],\n response\n )", "def exec(self): \r\n emails = self.args[0].split(',')\r\n for email in emails:\r\n send_mail(self.args[1], self.args[2], email)\r\n return_text = \"Sent Mail To :: \" + self.args[0] +\"\\n\" + self.args[1] + \":\\n\" + self.args[2]\r\n return return_text", "def send_message(message, destination):\n\n #Your code here\n pass", "def send_email(self, message):\n pass", "def send_object(self):\n for object_ in self.objects:\n strCC = '; '.join([object_.ter_dir_email, object_.successor_email])\n strCC += \"; ekb.inkas.net@maxus.ru; schugunov@svyaznoy.ru\"\n strSubject = \"Инкассация и вывоз POS-терминала при закрытии ТТ\"\n outMail = self.outlook.Application.CreateItemFromTemplate(\n CLOSING_MAIL_TEMPLATE\n )\n fixture = {\n 'дата+1': self.event_date.strftime('%d.%m.%Y'),\n 'преемник': object_.successor_full_name,\n 'имяТТ': f'ЦМС {object_.object_code[-4:]} {object_.object_name}'\n }\n HTML_body_without_signature = outMail.HTMLBody\n outMail.Display()\n for k, v in fixture.items():\n HTML_body_without_signature = HTML_body_without_signature.replace('{' + k + '}', v)\n\n outMail.HTMLBody = HTML_body_without_signature\n outMail.To = object_.object_SAP_code\n outMail.CC = strCC\n outMail.Subject = strSubject\n outMail.importance = 2\n if datetime.now().date() + timedelta(days=1) < self.event_date:\n outMail.DeferredDeliveryTime = \\\n (self.event_date - timedelta(days=1)).strftime('%d.%m.%Y') + \" 17:00\"", "def send(self, email):\n client = self.clients[email.addressee]\n client.receive(email)", "def send_email(to, subject, body, attachment=None):\n outlook = win32.Dispatch('outlook.application')\n new_mail = outlook.CreateItem(0)\n new_mail.Subject = subject\n new_mail.HTMLBody = body\n new_mail.To = to\n\n if attachment:\n new_mail.Attachments.Add(attachment)\n\n new_mail.Send()", "def test_sendimmessages(self):\n pass", "def sendsms(window,refrenceid,image,email):\n try:\n buttons = getAppButtons(window)\n atomacclick(buttons[10])\n childwindow = refrenceid.windowsR()\n protectMoreDevicesbuttons = getAppButtons(childwindow[0])\n protectMoreDevicestitle = childwindow[0].getApplicatontitle()\n ldtp.enterstring(protectMoreDevicestitle,image,email)\n #Need to write after click\n except Exception as er:\n return False\n print \"Not able to send SMS\"", "def sendMsg(dest):\n # need to grab these creds from config\n jconfig = getConfig()\n if jconfig == -1:\n print(\"::unable to parse tardigrade credentials\")\n return 1\n if 'tardigrade' not in jconfig[2]:\n print(\"::unable to parse tardigrade credentials. Add to config using (-E) --email-creds-init\")\n return 1\n username=jconfig[2]['tardigrade']['username']\n password=jconfig[2]['tardigrade']['password']\n fromaddr=username\n toaddrs=dest\n msg=listFeeds(1)\n server = smtplib.SMTP('smtp.gmail.com:587')\n server.starttls()\n server.login(username,password)\n server.sendmail(fromaddr, toaddrs, msg)\n server.quit()\n print(\"::msg sent\")", "def createTMsender():\n global s_tmSender\n nctrsTMfields = GRND.NCTRS.NCTRStmFields()\n nctrsTMfields.spacecraftId = int(UTIL.SYS.s_configuration.SPACECRAFT_ID)\n s_tmSender = TMsender(\n portNr=int(UTIL.SYS.s_configuration.NCTRS_TM_SERVER_PORT),\n nctrsTMfields=nctrsTMfields)\n if not s_tmSender.openConnectPort(UTIL.SYS.s_configuration.HOST):\n sys.exit(-1)", "def envelope_sender(self):\n envelope_sender = None\n # TODO: Make this check better as soon as SMTP from and sender are \n # Addresses, not AddressLists anymore.\n if self.smtp_from != None and len(self.smtp_from) > 0:\n envelope_sender = self.smtp_from\n elif self.sender != None and len(self.sender) > 0:\n envelope_sender = self.sender\n else:\n envelope_sender = self.author\n return Address(envelope_sender)", "def recipient(self):\n\t\trecipient = re.search(r\"([Tt]\\s*o )(.*)(from.*)\",self.raw_text()[:250])\n\t\t\n\t\tif recipient: \t\n\t\t\trecipient = recipient.group(2) \t\n\t\t\trecipient = re.sub(r\"(\\w+\\s*\\w+),.*\",r\"\\1\",recipient) #attempting to clear out titles and such\n\t\t\t# recipient = re.sub(r\"([sS]ecre[a-z]+ of the \\w+).*\",\"Secretary of the Navy\",recipient) \t\n\t\t\treturn recipient\n\t\treturn \"Unknown\"", "def send_mail(self, subject):\r\n pass", "def func_from(self, data, get_recv):\n if get_recv:\n checking = bytes(data).decode().encode('ascii', 'ignore').decode()\n else:\n checking = bytes(data).decode().encode('ascii', 'ignore').decode().splitlines()[0]\n data_list = checking.split(':')\n remove_bracket = str(data_list[1])\n remove_bracket = remove_bracket[2:-1]\n data_list[1] = remove_bracket\n check = data_list[0].lower().rstrip()\n if check == 'mail from':\n message = self.conf_th_ic.get_item(q_key='std-messages').get(check)\n self.func_sender(message)\n return True", "def sendTemplate( self, template, mto=None, mfrom=None, subject=None, \\\n from_member=None, raise_exc=MissingValue, IsAntiSpam=None, \\\n namespace=None, lang=None, REQUEST=None, **kw ):\n context = aq_parent( self )\n\n if callable( template ):\n id = template.getId()\n\n else:\n id = template\n skins = getToolByName( context, 'portal_skins' )\n try:\n template = getattr( skins.getSkinByName('Mail'), id )\n except ( AttributeError, KeyError ):\n raise KeyError, id\n # XXX move this to skins tool\n template = aq_base( template ).__of__( context )\n\n if not _checkPermission( CMFCorePermissions.ReplyToItem, template ):\n raise Unauthorized, id\n\n # XXX must use language from recipients' settings\n lang = lang or getToolByName( self, 'msg' ).get_default_language()\n\n if type(mto) is StringType:\n mto = (mto,)\n elif isinstance( mto, MemberData ):\n mto = list(mto.getMemberName())\n\n membership = getToolByName( self, 'portal_membership', None )\n if not membership:\n return None\n\n count = 0\n check_list_to = []\n\n for x in mto:\n if x in check_list_to or not x:\n continue\n\n if x.find('@') > -1:\n member_email = x\n IsAntiSpam = 0\n else:\n member_email = membership.getMemberById( x ).getMemberEmail()\n if IsAntiSpam is None:\n IsAntiSpam = CheckAntiSpam( self, x )\n\n mail_text = template( context, namespace or REQUEST, lang=lang, IsAntiSpam=IsAntiSpam, **kw )\n\n msg = self.createMessage( source=mail_text )\n if self.send( msg, mto=(member_email,), mfrom=mfrom, subject=subject, from_member=from_member, raise_exc=raise_exc ):\n check_list_to.append( x )\n count += 1\n\n #if count:\n # LOG('Mail.sendTemplate', INFO, \"message was sent: mfrom %s, mto %s, subject %s\" % (mfrom, mto, subject) )\n return count", "async def _return_exported_sender(self: 'TelegramClient', sender):\n async with self._borrow_sender_lock:\n self._log[__name__].debug('Returning borrowed sender for dc_id %d', sender.dc_id)\n state, _ = self._borrowed_senders[sender.dc_id]\n state.add_return()", "def send_created_email(self):\n if settings.NOTIFY_NEW_REG:\n to = settings.NOTIFY_NEW_REG\n message = \"\"\"\\\nGreetings,<br><br>\n\nA new vehicle registration has been submitted by %s.<br><br>\n\nGo here to view or edit the request: <br>\n<a href=\"%s\">%s</a>\n<br><br>\nSincerely,<br><br>\nThe Janelia Parking Permit Program\n \"\"\" % (self.user_display_name(), self.get_edit_url(True), self.get_edit_url(True))\n subject = 'A new parking permit request has been entered'\n from_email = 'parkingpermit-donotreply@janelia.hhmi.org'\n text_content = re.sub(r'<[^>]+>','',message)\n html_content = message\n msg = EmailMultiAlternatives(subject, text_content, from_email, to)\n msg.attach_alternative(html_content, \"text/html\")\n msg.send()", "def testMailSent(self):\n self.sendEmail()\n messages = self.mail_stub.get_sent_messages(to='trigger@ifttt.com')\n self.assertEqual(1, len(messages))\n self.assertEqual('trigger@ifttt.com', messages[0].to)", "def emailobj(self):\r\n try:\r\n if self.glob_data['EmailNotifications'].lower() == 'yes':\r\n if \"Email\" in self.glob_data:\r\n recipients = self.glob_data['Email']['recipients']\r\n if not recipients or recipients == 'None':\r\n logger.warn(\"Recipients list is not provided hence \\\r\n not sending email notifications\")\r\n else:\r\n return Message()\r\n except KeyError:\r\n logger.warn('MailNotifications are disabled')\r\n\r\n try:\r\n emailobj = Maillib(recipients)\r\n return emailobj\r\n except MailConnectionException as err:\r\n logger.warn(err.message)\r\n return Message()\r\n except MailRecipientsException as err:\r\n logger.warn(err.message)\r\n return Message()", "def add_users_from_file_process_inbox(self,\n input_file,\n out_file,\n look_for=\"yes\",\n email_details=None):\n if email_details is None:\n email_details = {}\n if not os.access(out_file, os.W_OK):\n print('Can''t write to output file. Please close ' + str(out_file))\n return\n ol = win32com.client.Dispatch(\"Outlook.Application\").GetNamespace(\"MAPI\")\n inbox = ol.GetDefaultFolder(6) # 6=olFolderInbox\n for no in range(inbox.Items.Count-1, -1, -1):\n msg = inbox.Items[no]\n # If look_for found in subject line\n if msg.Subject.lower().find(look_for.lower()) != -1:\n csv_file_read = open(input_file, 'r')\n approved_users_csv = csv.DictReader(csv_file_read)\n found_email_in_spreadsheet = False\n for row in approved_users_csv:\n try:\n # Exchange users are a pain - you have to look up their email\n if msg.SenderEmailType == \"EX\":\n from_address = msg.Sender.GetExchangeUser().PrimarySmtpAddress\n else:\n from_address = msg.SenderEmailAddress\n except:\n # msg.SenderEmailType raises error on (Message Recall mails i think)\n continue\n if from_address.lower().strip() == row['email'].lower().strip():\n found_email_in_spreadsheet = True\n # Add to ICE, write to outputFile\n csv_file_write = open(out_file, 'a', newline='')\n writer = csv.DictWriter(csv_file_write, approved_users_csv.fieldnames)\n print('---\\nProcessing: Firstname: ' + row['firstName'] + ' Surname: ' + row['surname'] +\n ' Email: ' + row['email'].lower().strip())\n if not self.password_validates(row['newPassword']):\n comment = \"ICE won't accept this password even if i try it!\"\n # Shouldn't ever get here if input_file is validated, but might want to\n # consider emailing someone the fail here?\n else:\n try:\n self.login()\n except:\n print('Warning - cant log into ICE, I need to tell someone!')\n # TODO Email simon\n return\n comment = self.add_user(row)\n self.log_off()\n print(comment)\n # i = datetime.now()\n # row['Status'] = comment + ' (%s/%s/%s %s:%s)' % (i.day, i.month, i.year, i.hour, i.minute)\n row['Status'] = comment + ' (' + datetime.now().strftime('%d %b %Y %H:%M') + ')' # 01 Jan 1900 19:00\n writer.writerow(row)\n csv_file_write.close()\n # Email end user\n if comment == 'User added successfully':\n # Email username\n self.email_out(row, email_details['userHTMLFile'], email_details['userSubject'],\n email_details['userAttachFolder'], email_details['fromAddress'])\n time.sleep(30)\n # Email password\n self.email_out(row, email_details['passHTMLFile'], email_details['passSubject'],\n email_details['passAttachFolder'], email_details['fromAddress'])\n # Email UHB\n # forward_message = msg.Forward()\n # forward_message.To = email_details['UHBAddress']\n # forward_message.Send()\n # Move original email\n msg.Move(inbox.Folders(email_details['processed_folder']))\n else: # Failed to add successfully\n msg.Move(inbox.Folders(email_details['failed_folder']))\n if not found_email_in_spreadsheet:\n msg.Subject += ' [email address not found in spreadsheet]'\n msg.Save()\n msg.Move(inbox.Folders(email_details['failed_folder']))\n else: # search term not found in subject line\n msg.Subject += ' [search term not found in subject line]'\n msg.Save()\n msg.Move(inbox.Folders(email_details['failed_folder']))", "def manage_addMailSender( self, id='MailHost', title='', host=None, port=None, REQUEST=None ):\n self._setObject( id, MailSender( id, title, host, port ) )\n\n if REQUEST is not None:\n REQUEST.RESPONSE.redirect( REQUEST.URL1 )", "def test_21_inbox_redirection(self):\n cr, uid, user_admin, group_pigs = self.cr, self.uid, self.user_admin, self.group_pigs\n model, act_id = self.ir_model_data.get_object_reference(cr, uid, 'mail', 'action_mail_inbox_feeds')\n model, port_act_id = self.ir_model_data.get_object_reference(cr, uid, 'portal', 'action_mail_inbox_feeds_portal')\n # Data: post a message on pigs\n msg_id = self.group_pigs.message_post(body='My body', partner_ids=[self.partner_bert_id, self.partner_chell_id], type='comment', subtype='mail.mt_comment')\n\n # No specific parameters -> should redirect to Inbox\n action = self.mail_thread.message_redirect_action(cr, self.user_raoul_id, {'params': {}})\n self.assertEqual(action.get('type'), 'ir.actions.client',\n 'URL redirection: action without parameters should redirect to client action Inbox')\n self.assertEqual(action.get('id'), act_id,\n 'URL redirection: action without parameters should redirect to client action Inbox')\n\n # Bert has read access to Pigs -> should redirect to form view of Pigs\n action = self.mail_thread.message_redirect_action(cr, self.user_raoul_id, {'params': {'message_id': msg_id}})\n self.assertEqual(action.get('type'), 'ir.actions.act_window',\n 'URL redirection: action with message_id for read-accredited user should redirect to Pigs')\n self.assertEqual(action.get('res_id'), group_pigs.id,\n 'URL redirection: action with message_id for read-accredited user should redirect to Pigs')\n\n # Bert has no read access to Pigs -> should redirect to Inbox\n action = self.mail_thread.message_redirect_action(cr, self.user_bert_id, {'params': {'message_id': msg_id}})\n self.assertEqual(action.get('type'), 'ir.actions.client',\n 'URL redirection: action without parameters should redirect to client action Inbox')\n self.assertEqual(action.get('id'), act_id,\n 'URL redirection: action without parameters should redirect to client action Inbox')\n\n # Chell has no read access to pigs -> should redirect to Portal Inbox\n action = self.mail_thread.message_redirect_action(cr, self.user_chell_id, {'params': {'message_id': msg_id}})\n self.assertEqual(action.get('type'), 'ir.actions.client',\n 'URL redirection: action without parameters should redirect to client action Inbox')\n self.assertEqual(action.get('id'), port_act_id,\n 'URL redirection: action without parameters should redirect to client action Inbox')", "def test_private_message_sends_email(self, get_current):\n get_current.return_value.domain = \"testserver\"\n\n s, c = Setting.objects.get_or_create(user=self.to, name=\"email_private_messages\")\n s.value = True\n s.save()\n # User has setting, and should recieve notification email.\n\n assert Setting.get_for_user(self.to, \"email_private_messages\")\n\n self.client.login(username=self.sender.username, password=\"testpass\")\n post(self.client, \"messages.new\", {\"to\": self.to, \"message\": \"a message\"})\n subject = \"[SUMO] You have a new private message from [{sender}]\"\n\n attrs_eq(\n mail.outbox[0],\n to=[self.to.email],\n subject=subject.format(sender=self.sender.profile.name),\n )\n starts_with(\n mail.outbox[0].body, PRIVATE_MESSAGE_EMAIL.format(sender=self.sender.profile.name)\n )", "def handle_inbound_sms(to, from_):\n body = MessageRequest()\n body.application_id = MESSAGING_APPLICATION_ID\n body.to = [from_]\n body.mfrom = to\n body.text = \"The current date-time is: \" + str(time.time() * 1000) + \" milliseconds since the epoch\"\n try:\n messaging_client.create_message(MESSAGING_ACCOUNT_ID, body)\n except Exception as e:\n print(e)\n return None", "def main():\n credentials = get_credentials()\n http = credentials.authorize(httplib2.Http())\n service = discovery.build('gmail', 'v1', http=http)\n\n\n lists,nextPageToken = ListMessages(service,user_id = 'me',q='subject:tradingview')\n # print (lists)\n mes,mes_str = GetMimeMessage(service,user_id = 'me',msg_id = lists[0]['id'])\n print (mes)\n\n\n j = 0\n for part in mes.walk(): \n j = j + 1 \n fileName = part.get_filename() \n contentType = part.get_content_type() \n mycode=part.get_content_charset(); \n # 保存附件 \n if fileName:\n print ('hhhhhhhhhhhhh')\n elif contentType == 'text/plain' or contentType == 'text/html': \n #保存正文 \n data = part.get_payload(decode=True) \n content=str(data); \n # if mycode=='gb2312': \n # content= mbs_to_utf8(content) \n #end if \n # nPos = content.find('降息') \n # print(\"nPos is %d\"%(nPos)) \n # print >> f, data \n # 正则替换掉所有非 <a></a>的标签 <[^>|a]+>\n # reg = re.compile('<[^>|a]+>')\n contentTxt = re.compile('<[^>|a]+>').sub('',content)\n print (reg.sub('',content))\n #end if \n\n\n \n # help(mes)\n # for i in mes.values():\n # print (i)\n # # print (mes[i]);\n # print (\"----------\")\n # print (mes['from'])\n # print (type (mes))\n # # print \n # parsed = Parser().parsestr(mes)\n # print (parsed)\n # print (mes)\n # for i in mes:\n # print (i)\n # for item in lists:\n # mes = GetMimeMessage(service,user_id = 'me',msg_id = item['id'])\n # # print (mes)\n # parsed = Parser().parsestr(mes)\n # print (parsed)", "def test_sending_mail(self):\n\n appt_date = datetime.date.today() + datetime.timedelta(days=7) # Default for email\n reminders.Patient.objects.filter(\n pk__in=[self.test_patient.pk, self.other_patient.pk]\n ).update(next_visit=appt_date)\n confirmed = self.create_confirmed_notification(self.test_patient, appt_date)\n\n self.startRouter()\n self.router.logger.setLevel(logging.DEBUG)\n\n # run email job\n from afrims.apps.reminders.app import daily_email_callback\n daily_email_callback(self.router)\n\n self.assertEqual(len(mail.outbox), 1)\n message = mail.outbox[0]\n self.assertTrue(self.test_contact.email in message.to)\n self.stopRouter()", "def test_email_good(get_email, capsys):\n e = get_email\n e.send()\n out, err = capsys.readouterr()\n assert \"Message sent\" in out", "def email_out(row, html_file, subject, attach_folder, from_address):\n raw_html = open(html_file, 'r').read()\n html = raw_html.replace('$firstname', row['firstName'])\n html = html.replace('$surname', row['surname'])\n html = html.replace('$username', row['username'])\n html = html.replace('$password', row['newPassword'])\n html = html.replace('email', row['email'])\n attachments = []\n if os.path.exists(attach_folder):\n attachments = [os.path.join(os.getcwd(), attach_folder, fn) for fn in os.listdir(attach_folder)]\n o = Outlook()\n o.send(True, row['email'], subject, '', html, attachments=attachments, account_to_send_from=from_address)", "def handle_inbound_sms_call_me(to, from_):\n handle_call_me(to, from_)", "def step_impl_the_msg_to_is_set_to_internal_as_string_not_array(context):\n context.bdd_helper.message_data[\"msg_to\"] = context.bdd_helper.internal_id_specific_user", "def msg_to_sender(received_msg):\r\n msg_reply = Message()\r\n msg_reply.to = str(received_msg.sender)\r\n msg_reply.set_metadata(\"performative\", \"inform\")\r\n return msg_reply", "def sender(self) -> str:", "def send_ctr_alert(date, ctr):\n sender = \"team1_rs@outlook.com\"\n receivers = [\"alexa.hernandez@mail.mcgill.ca\"]\n msg = MIMEText(\n f\"Hello Team1,\\n\\nToday's CTR has dropped below {str(MIN_CTR*100)}%. The CTR is {str(ctr*100)}%.\\nPlease \"\n f\"investigate immediately.\"\n )\n\n msg[\"Subject\"] = \"Team1 Recommendation Service - CTR Alert\"\n msg[\"From\"] = sender\n msg[\"To\"] = \";\".join(receivers)\n\n try:\n smtpObj = smtplib.SMTP(\"smtp.office365.com\", 587)\n smtpObj.ehlo()\n smtpObj.starttls()\n smtpObj.login(\"team1_rs@outlook.com\", \"team1*rs\")\n smtpObj.sendmail(sender, receivers, msg.as_string())\n print(\"Successfully sent email\")\n except smtplib.SMTPException as e:\n print(\"Error: unable to send email\")", "def send_sender_activation_email(self, email):\n logger.info(\"Function call: send_sender_activation_email for '{}'\".format(email, ))\n return self.__handle_error('Empty sender email') if not email else self.__handle_result(self.__send_request('senders/{}/code'.format(email, )))", "def onAboutLeoEmail(self,event=None):\n \n try:\n import webbrowser\n webbrowser.open(\"mailto:\" + self.email)\n except:\n g.es(\"not found: \" + self.email)", "def send( self, msg, mto=None, mfrom=None, subject=None, encode=None, from_member=None, \\\n IsAntiSpam=None, IsReturnReceiptTo=None, IsConfirmReadingTo=None, \\\n object_url=None, raise_exc=MissingValue ):\n count = 0\n if not self.address():\n LOG( 'MailSender.send', TRACE, 'SMTP address is not defined')\n return 0\n\n try:\n if not isinstance( msg, MailMessage ):\n count = MailHost.send( self, msg, mto, mfrom, subject, encode )\n LOG( 'MailSender.send', TRACE, 'sent mail messages: count [%s], from %s to %s ' % ( count, mfrom, mto ))\n self.close()\n return count\n\n if subject is not None:\n msg.set_header( 'subject', subject )\n else:\n subject = ''\n\n if 'date' not in msg:\n msg.set_header( 'date', formatdate( None, 1 ) )\n if 'message-id' not in msg:\n msg.set_header( 'message-id', make_msgid() )\n if 'x-mailer' not in msg:\n msg.set_header( 'x-mailer', Config.MailerName % self._class_version )\n\n membership = getToolByName( self, 'portal_membership', None )\n properties = getToolByName( self, 'portal_properties', None )\n if membership is None or properties is None:\n return 0\n\n member = mname = None\n\n if mfrom is None:\n if from_member and not membership.isAnonymousUser():\n member = membership.getAuthenticatedMember()\n elif 'from' in msg:\n mfrom = parseaddr( msg.get( 'from', decode=1 ) )[1]\n if not mfrom:\n mfrom = properties.getProperty( 'email_from_address' )\n else:\n mname = properties.getProperty( 'email_from_name' )\n if IsAntiSpam != 0:\n try: mfrom = properties.getProperty( 'email_antispam' )\n except: pass\n if not mfrom:\n mfrom = properties.getProperty( 'email_from_address' )\n else:\n mname = None\n else:\n if type(mfrom) is StringType:\n if mfrom.find('@') < 0:\n member = membership.getMemberById( mfrom )\n elif isinstance( mfrom, MemberData ):\n member = mfrom\n\n if member is not None:\n mname = member.getMemberName()\n mfrom = member.getMemberEmail()\n\n if not mfrom:\n mfrom = getSecurityManager().getUser().getUserName()\n\n if 'from' not in msg:\n msg.set_header( 'from', (mname, mfrom) )\n\n list_to = None\n\n if mto is None:\n mdict = {}\n for header in ( 'to', 'cc', 'bcc', 'resent-to', 'resent-cc' ):\n for mname, email in getaddresses( msg.get_all( header ) ):\n if email:\n mdict[ email ] = header\n mto = mdict.keys()\n elif 'to' in msg:\n list_to = []\n\n if 'bcc' in msg:\n msg.remove_header( 'bcc' )\n\n if IsReturnReceiptTo:\n msg.set_header( 'Disposition-Notification-To', mfrom )\n msg.set_header( 'Return-Receipt-To', mfrom )\n\n if IsConfirmReadingTo:\n msg.set_header( 'Return-Receipt-To', mfrom )\n msg.set_header( 'Disposition-Notification-To', mfrom )\n msg.set_header( 'X-Confirm-Reading-To', mfrom )\n\n no_mail = membership.getGroupMembers('_NO_MAIL_') or []\n if mto and type(mto) is StringType:\n mto = [ mto ]\n check_list_to = []\n\n for item in mto:\n member = None\n if type(item) is StringType:\n if item.find('@') < 0:\n member = membership.getMemberById( item )\n elif isinstance( item, MemberData ):\n member = item\n\n if member is not None:\n mname = member.getMemberName()\n email = member.getMemberEmail()\n else:\n mname = None\n email = str(item)\n\n if member is not None and member.getUserName() in no_mail:\n continue\n\n if not email or email == '' or email == 'None' or email.find('@') < 1:\n LOG( 'MailSender.send', ERROR, 'no e-mail address for user \"%s\", subject \"%s\", users: %s' % \\\n ( item, subject, `mto` ))\n continue\n\n if email in check_list_to:\n continue\n check_list_to.append( email )\n\n if list_to is None:\n msg.set_header( 'to', (mname, email) )\n count += self._send( mfrom, [email], msg )\n else:\n list_to.append( email )\n\n if list_to:\n count = self._send( mfrom, list_to, msg )\n\n # TODO: find a way to disconnect only after request is processed\n self.close()\n\n if count:\n LOG('MailSender.send', INFO, 'mail address list: object [%s]\\n>from %s to %s\\n>total messages %s' % \\\n ( object_url or subject, mfrom, check_list_to, count ))\n else:\n LOG('MailSender.send', INFO, 'no mail')\n except:\n if raise_exc or raise_exc is MissingValue:\n raise\n else:\n LOG('MailSender.send', ERROR, '[%s] sending failed' % self.address(), error=exc_info())\n\n return count", "def send(self, msg):\n self.message('Me', msg)", "def test_contact_us_endpoint(client, new_msg):\n with mail.record_messages() as outbox:\n rv = client.post(\"/api/send-email/\", json=new_msg)\n response = rv.get_json()\n\n assert rv.status_code == HTTPStatus.OK\n assert response[\"message\"] == \"Contact message successfully sent\"\n\n assert len(outbox) == 2\n internal, external = outbox[0], outbox[1]\n\n assert \"Email z\" in internal.subject\n assert \"I'm super excited\" in internal.body\n assert internal.sender == \"CodeForPoznan <notifications@localhost>\"\n assert internal.reply_to == \"CodeForPoznan <hello@localhost>\"\n assert internal.recipients == [\"CodeForPoznan <hello@localhost>\"]\n\n assert \"Witaj\" in external.subject\n assert \"Cześć\" in external.body\n assert external.sender == \"CodeForPoznan <notifications@localhost>\"\n assert external.reply_to == \"CodeForPoznan <hello@localhost>\"\n assert external.recipients == [\"Happy Volunteer <hvolunteer@example.com>\"]", "def test_sendEmailVerification(self, testUser):\n with mail.record_messages() as outbox:\n testUser.send_email_verification()\n assert len(outbox) == 1\n msg = outbox[0]\n assert \"jjones@yahoo.com\" in msg.recipients\n assert msg.subject == 'Ask Your Peeps: Email Verification'\n assert 'To verify your email' in msg.body\n assert 'Dear John' in msg.body", "def send_email(self, froma, addrs, message=\"\"):\n with open(os.path.join(self.cache, \"notice.txt\"), 'w') as fd:\n fd.write(\"To \")\n fd.write(\" \".join(addrs))\n fd.write(\"\\n\")\n fd.write(\"From \"+froma)\n fd.write(\"\\n\")\n fd.write(message)", "def test_endpointSMTP(self):\n self._endpointTest(\"smtp\")", "def sendSMS(sender,recipients,smsBody,provider_api_username='herve.m',provider_api_password='jamiroquai'):\n def printOutput(sender,recipients,smsBody):\n \"\"\"dev, debugging utility method\"\"\"\n message = ' sender : ' + sender\n message += '\\n to : ' + recipients[0]\n message += '\\n body : ' + smsBody\n print ''\n print ''\n print '____________________________________________________________________'\n print message\n print '____________________________________________________________________'\n\n def parseOutput(output):\n \"\"\"Returns parsed values from output with format:\n SUCCESS MessageId: 357958; Cost: 0.80; 0: Accepted for delivery;\n\n Returns:\n boolean (success),\n int (MessageId),\n int (status),\n float (cost),\n string (status message)\n \"\"\"\n vls=output.split(';')\n if len(vls)>=3:\n sm=vls[0].split(' ')\n cs=vls[1].split(':')\n st=vls[2].split(':')\n return str(sm[0]).find('SUCCESS')>=0,int(sm[2]),int(st[0].lstrip()),float(cs[1].lstrip()),st[1].lstrip()\n else:\n return False,-1,-1,0,output\n\n url='http://www.amdtelecom.net/api/sendsms.php'\n parameters={\n 'from' : sender,\n 'to' : recipients[0],\n 'username' : provider_api_username,\n 'password' : provider_api_password,\n 'text' : stringToAscii(smsBody)\n }\n fetchRes=None\n msg='util.sendSMS:logging.info'\n try:\n logging.info('util.sendSMS.fetchHttpRequestData')\n msg='FETCHING SMS SEND FROM API'\n fetchRes=fetchHttpRequestData(parameters,\n url,\n request_output='text',\n request_method='GET')\n if fetchRes is not None:\n msg='PARSING SMS SEND FETCH API OUTPUT: '\n bst,msgid,stid,cs,msg=parseOutput(fetchRes)\n if not bst:logging.error('ERROR RETURNED FROM SMS SEND API:'+fetchRes+' - PARAMS'+str(parameters))\n return fetchRes,bst,msgid,stid,float(cs),msg\n else:\n logging.error(msg+' - PARAMS'+str(parameters))\n return (None,False,-1,-1,float(0),\n msg+' - PARAMS'+str(parameters))\n except Exception, ex:\n if fetchRes is None:fetchRes='None'\n logging.error('ERROR '+msg+' - EXCEPTION:'+str(ex)+'- FETCH RES:'+fetchRes)\n return (None,False,-1,-1,float(0),\n msg+' - PARAMS'+str(parameters)+' - FETCH RES:'+fetchRes)", "def opt_out(msg_hash):\r\n email, added = Email.handler.opt_out(msg_hash)\r\n if email and added:\r\n Email.handler.add_to_queue(None, None, [email], \"reddit.com\",\r\n datetime.datetime.now(g.tz),\r\n '127.0.0.1', Email.Kind.OPTOUT)\r\n return email, added", "def email_user(self, subject, message, from_email=None):\n send_mail(subject, message, from_email, [self.email])", "def main():\n credentials = get_credentials()\n http = credentials.authorize(httplib2.Http())\n service = discovery.build('gmail', 'v1', http=http)\n\n messageIds = []\n i = 0\n nextPageToken = None\n while (i <= 15):\n try:\n response = service.users().messages().list(userId='me', q='after:2016/09/01', maxResults=10000, pageToken=nextPageToken).execute()\n messages = response.get('messages')\n nextPageToken = response['nextPageToken']\n\n for m in messages:\n messageIds.append(m['id'])\n\n i+=1 \n except KeyError:\n break\n\n senders = []\n counter = 0\n for i in messageIds:\n data = service.users().messages().get(userId='me', id=i).execute()\n for d in data['payload']['headers']:\n if d['name'] == 'Received':\n print(d['value'][d['value'].find('; ')+1:d['value'].find('(PST)')])\n if d['name'] == 'From' and 'bounce' not in d['value']:\n senders.append(d['value'])\n print(counter, ' ', d['value'])\n counter += 1\n break\n\n emails = []\n with open('out.csv', 'wb') as f:\n writer = csv.writer(f, delimiter=',')\n for person in set(senders):\n cleaned = clean_data(person)\n name = cleaned[0]\n email = cleaned[1]\n if email not in emails:\n emails.append(email)\n if name != None and email != None:\n writer.writerow([name, email])", "def printOutput(sender,recipients,smsBody):\n message = ' sender : ' + sender\n message += '\\n to : ' + recipients[0]\n message += '\\n body : ' + smsBody\n print ''\n print ''\n print '____________________________________________________________________'\n print message\n print '____________________________________________________________________'", "def _send(self, email_message): \n\t\tif not email_message.recipients(): \n\t\t\treturn False \n\t\trecipients = map(self._sanitize, email_message.recipients()) \n\t\tMsg = o.CreateItem(0)\n \t\tMsg.To = recipients\n\t\tMsg.Subject = 'subject'\n\t\tMsg.Body = 'text'\n\t\tself.connection.SaveChanges(0)\n\n\t\tMsg.Send()\n\t\treturn True", "def send_mail(from_email, to_emails, subject, plain_body, html_body):\n\n # Implementation goes here\n # ...", "def send_reminder(self):\n pass", "def get_message(self, sender=None):\n if sender == None:\n if self.public_key == None:\n return None\n participant = self.public_key\n else:\n participant = sender\n following = [tx.follow for block in self.__chain for tx in block.chipsactions if tx.sender == participant] \n tx_recipient2 = [tx.message for block in self.__chain for tx in block.messsactions if tx.follower in following]\n print(\"tx_recipient2\")\n print(tx_recipient2)\n return tx_recipient2", "def email(self):\r\n webbrowser.open(\"mailto: gorm90@gmail.com\")", "def send_counterparty(self) -> None:\n object_ = self.objects[0]\n ticket_text = ''\n if 'сб' in object_.counterparty_name.lower() and self.keyword == 'closing':\n # order_id = sberinkas.main(\n # object_.object_SAP_code,\n # object_.object_address,\n # object_.lat,\n # object_.lon\n # )\n # ticket_text = f\"<br>Номер заявки на портале инкассация - {order_id}.\"\n pass\n\n body = '<p>Добрый день!<br><br>' \\\n f'Прошу принять в работу письмо на {self.letter_text}<br>' \\\n f'Скан подписанного письма вышлю позднее.{ticket_text}'\n if 'сб' in object_.counterparty_name.lower():\n self.send_sber_manager_service(body)\n else:\n self.sendmail(\n self.outlook,\n self.to,\n \"\",\n self.letter_name,\n body,\n self.attachment,\n 2\n )", "def invite(self):\n pass", "def test_override_recipient_email(self, settings, mocked_notify_client):\n settings.OMIS_NOTIFICATION_OVERRIDE_RECIPIENT_EMAIL = 'different_email@example.com'\n\n notify._send_email(\n email_address='test@example.com',\n template_id='foobar',\n personalisation={},\n )\n\n mocked_notify_client.send_email_notification.assert_called_with(\n email_address='different_email@example.com',\n template_id='foobar',\n personalisation={},\n )", "def send(self):\n ReferralActivity.objects.create(\n actor=self.user,\n verb=ReferralActivityVerb.CREATED,\n referral=self,\n )\n # Confirm the referral has been sent to the requester by email\n Mailer.send_referral_saved(self)\n # Send this email to all owners of the unit(s) (admins are not supposed to receive\n # email notifications)\n for unit in self.units.all():\n contacts = unit.members.filter(\n unitmembership__role=UnitMembershipRole.OWNER\n )\n for contact in contacts:\n Mailer.send_referral_received(self, contact=contact, unit=unit)", "def send_contact_me_message(sender_name, sender_email, message):\n send_email('New message from Typemania.net visitor',\n sender=app.config['ADMINS'][0], # ELLLER NÅN NO-REPLY ADDRESS?\n recipients=[app.config['ADMINS'][0]],\n text_body=render_template('email/contact.txt',\n sender_name=sender_name,\n sender_email=sender_email,\n message=message),\n html_body=render_template('email/contact.html',\n sender_name=sender_name,\n sender_email=sender_email,\n message=message))", "def GetMimeMessage(service, user_id, msg_id, idx):\n try:\n message = service.users().messages().get(userId=user_id, id=msg_id,\n format='raw').execute()\n\n msg_str = base64.urlsafe_b64decode(message['raw'].encode('ASCII'))\n mail = mailparser.parse_from_bytes(msg_str)\n\n msg_str = str(mail.text_plain)\n msg_str = msg_str.strip(\"\")\n msg_str = clean_text(msg_str)\n msg_str = preprocess(msg_str)\n\n #print(msg_str)\n\n except errors.HttpError:\n print('An error occurred:')\n\n try:\n met = service.users().messages().get(userId=user_id, id=msg_id, format='metadata').execute()\n\n pay = met['payload']\n head = pay['headers']\n sub=\"\"\n for h in head:\n if (h['name'] == 'Subject'):\n sub = \"Subject: \"+str(h['value'])\n except errors.HttpError:\n print('An error occurred:')\n filename = \"./ham/email\"\n file_extension = \".txt\"\n new_fname = \"{}-{}{}\".format(filename, idx, file_extension)\n #print(new_fname)\n f= open(new_fname,\"w+\")\n f.write(sub+\"\\n\")\n f.write(msg_str)\n f.close()", "def sendEmail(householdID):\n contactID = mdb.getContact(householdID)\n sqlq = \"\"\"\n SELECT Name, Surname, Address1, Address2, Town, Postcode, email, status\n FROM Contact\n WHERE idContact = '{}';\n \"\"\".format(contactID)\n result = mdb.getSQL(sqlq)[0]\n\n thisName = (\"%s\" % (result['Name']))\n thisEmail = (\"%s\" % (result['email']))\n thisStatus = (\"%s\" % (result['status']))\n\n # prepare the custom email\n thisPath = os.path.dirname(os.path.abspath(__file__))\n if (thisStatus == 'de'):\n emailPath = os.path.join(thisPath, \"emails/email_graph_de.html\")\n locale.setlocale(locale.LC_ALL, 'de_DE.utf8')\n else:\n emailPath = os.path.join(thisPath, \"emails/email_graph.html\")\n dtChoice = mdb.getHHdtChoice(householdID)\n thisDate = dtChoice.strftime(\"%A, %-d %B\")\n\n templateFile = open(emailPath, \"r\")\n templateText = templateFile.read()\n templateFile.close()\n templateText = templateText.replace(\"[householdID]\", householdID)\n templateText = templateText.replace(\"[contactID]\", contactID)\n templateText = templateText.replace(\"[name]\", thisName)\n templateText = templateText.replace(\"[date]\", thisDate)\n templateText = templateText.replace(\"[securityCode]\", mdb.getSecurityCode(householdID))\n\n # Subject\n subjectLine = templateText.splitlines()[0]\n templateText = templateText[templateText.find('\\n') + 1:] # find line break and return all from there - i.e. remove first line\n \n # email file\n emailFilePath = os.path.join(thisPath, \"tempEmail.htmail\")\n emailFile = open(emailFilePath, \"w+\")\n emailFile.write(templateText)\n emailFile.close()\n\n # call('mutt -e \"set content_type=text/html\" -s \"[TESTING]' + subjectLine + '\" philipp.grunewald@ouce.ox.ac.uk < ' + emailFilePath, shell=True)\n call('mutt -e \"set content_type=text/html\" -s \"' + subjectLine + '\" ' + thisEmail + ' -b meter@energy.ox.ac.uk < ' + emailFilePath, shell=True)", "def send_email_copy(message):\n receivers = [ receiver for receiver in message.receivers if receiver.player.user.email ]\n subject = message.header\n body = message.message\n if not (receivers):\n return\n\n msg = MIMEMultipart('alternative')\n msg['From'] = \"Winter's Oasis <messages@wintersoasis.com>\"\n msg['Subject'] = subject\n msg['Date'] = formatdate(localtime=True)\n\n # HTML email part.\n html_part = MIMEText('text', 'html')\n html_source = Template(HTML_TEMPLATE)\n value_map = {\n 'from' : ', '.join([ sender.name for sender in message.senders ]),\n 'message' : escape(unicode(body)).replace('\\n', '<br />'),\n 'recipients' : ', '.join([ receiver.name for receiver in message.receivers ]) }\n html_part.set_payload(html_source.substitute(value_map))\n\n value_map['message'] = unicode(body)\n text_source = Template(TEXT_TEMPLATE)\n body = text_source.substitute(value_map)\n text_part = MIMEText(unicode(body), 'plain', 'utf-8')\n msg.attach(text_part)\n msg.attach(html_part)\n\n for receiver in receivers:\n msg['To'] = receiver.db.email\n sendmail(SMTP_HOST, MAIL_FROM, receiver.player.user.email, msg.as_string())", "def test_send_notification(self):\n management.call_command('send_first_report_notification', [], {})\n eq_(len(mail.outbox), 4)", "def test_send_mass_html_mail_reply_to(self, send_mass_html_mail__mock: Mock):\n events = Event.objects.filter(pk=self.event.pk)\n\n admin.EventAdmin.send_mail(Mock(), None, events)\n\n self.assertEqual(send_mass_html_mail__mock.call_count, 1)\n self.assertEqual(send_mass_html_mail__mock.call_args[1]['reply_to'],\n [\"Marie <test_send_mass_html_mail_reply_to@example.com>\"])", "def send_as_hosting_user(self, message_body: str):\n\t\tself.receiver.receive(Message(message_body, 'local'))", "def action_my_payslip_sent(self):\n self.ensure_one()\n template = self.env.ref('payroll_email.email_template_for_my_payroll')\n if template:\n self.env['mail.template'].browse(template.id).send_mail(self.id,force_send=True)\n self.flag = True", "def get_destination_and_message(post_request):\n\n #Your code here\n pass", "def sendmail(addy, subject, text):\n if TEST:\n import getpass\n addy = getpass.getuser()\n # then don't irritate people by sending test emails; send them\n # all to the user running the test instead\n print(\"mail to %s\" % addy)\n sub = subprocess.Popen(['mail', '-s', subject, addy],\n shell=False, stdin=subprocess.PIPE)\n sub.stdin.write(text)\n sub.stdin.close()\n\n sub.wait()\n return sub.returncode", "def send_mail(email):\n return email.send()", "def send(self):\n msg = MIMEText(self.body) # prepare body\n s = smtplib.SMTP(self.mail_server)\n self._connect_to_exchange(s)\n for receiver in iter(self.to_adress):\n if '@' not in receiver:\n receiver = '{rcv}@cbs.nl'.format(rcv=receiver)\n msg['Subject'] = self.subject\n msg['From'] = self.from_adress\n msg['To'] = receiver\n s.sendmail(self.from_adress, [receiver], msg.as_string())\n s.quit()", "def send_welcome_email(cls, user):\n\n cls.customise_auth_messages()\n auth_messages = current.auth.messages\n\n # Look up CMS template for welcome email\n try:\n recipient = user[\"email\"]\n except (KeyError, TypeError):\n recipient = None\n if not recipient:\n current.response.error = auth_messages.unable_send_email\n return\n\n\n db = current.db\n s3db = current.s3db\n\n settings = current.deployment_settings\n\n # Define join\n ctable = s3db.cms_post\n ltable = s3db.cms_post_module\n join = ltable.on((ltable.post_id == ctable.id) & \\\n (ltable.module == \"auth\") & \\\n (ltable.resource == \"user\") & \\\n (ltable.deleted == False))\n\n # Get message template\n query = (ctable.name == \"WelcomeMessageInvited\") & \\\n (ctable.deleted == False)\n row = db(query).select(ctable.doc_id,\n ctable.body,\n join = join,\n limitby = (0, 1),\n ).first()\n if row:\n message_template = row.body\n else:\n # Disabled\n return\n\n # Look up attachments\n dtable = s3db.doc_document\n query = (dtable.doc_id == row.doc_id) & \\\n (dtable.file != None) & (dtable.file != \"\") & \\\n (dtable.deleted == False)\n rows = db(query).select(dtable.file)\n attachments = []\n for row in rows:\n filename, stream = dtable.file.retrieve(row.file)\n attachments.append(current.mail.Attachment(stream, filename=filename))\n\n # Default subject from auth.messages\n system_name = s3_str(settings.get_system_name())\n subject = s3_str(auth_messages.welcome_email_subject % \\\n {\"system_name\": system_name})\n\n # Custom message body\n data = {\"system_name\": system_name,\n \"url\": settings.get_base_public_url(),\n \"profile\": URL(\"default\", \"person\", host=True),\n }\n message = formatmap(message_template, data)\n\n # Send email\n success = current.msg.send_email(to = recipient,\n subject = subject,\n message = message,\n attachments = attachments,\n )\n if not success:\n current.response.error = auth_messages.unable_send_email" ]
[ "0.5824598", "0.5588486", "0.5576109", "0.55656964", "0.55588704", "0.5537601", "0.55139714", "0.55043477", "0.5502522", "0.54980785", "0.54489857", "0.5415622", "0.5409443", "0.53969055", "0.5394186", "0.5353212", "0.5353212", "0.534986", "0.5313206", "0.5311404", "0.5300741", "0.52703774", "0.5258186", "0.5251615", "0.5236008", "0.52335054", "0.5232923", "0.5232923", "0.52287984", "0.52135414", "0.5211015", "0.5207971", "0.520175", "0.5197216", "0.51850003", "0.5181772", "0.5177449", "0.5159072", "0.51487", "0.514079", "0.51403695", "0.51394206", "0.5130631", "0.51268846", "0.5123951", "0.5118407", "0.5115473", "0.5102241", "0.5098901", "0.509392", "0.50913674", "0.50911254", "0.5088165", "0.50856745", "0.5083033", "0.5079767", "0.50785524", "0.5077534", "0.5073213", "0.5073169", "0.5050847", "0.50490534", "0.5042313", "0.50333965", "0.5031899", "0.50272125", "0.5017567", "0.5016906", "0.5015023", "0.50098544", "0.50079936", "0.5000938", "0.49975112", "0.4996806", "0.49961302", "0.49957895", "0.49948078", "0.49943548", "0.4989437", "0.4988431", "0.4985573", "0.498203", "0.49789417", "0.49786958", "0.49779966", "0.4967992", "0.49672282", "0.49635065", "0.49581775", "0.49482316", "0.49460098", "0.49457756", "0.49412665", "0.49339315", "0.4932124", "0.49264798", "0.49197203", "0.49182647", "0.49147916", "0.49076462" ]
0.65203226
0
emailJobs is a function that is used to email jobs/careers email addresses for companies in a dataframe
def emailJobs( df, retainedCompany, senderName, defaultSenderEmail, emailPassword, senderTitle, senderCompany, senderCompanyHomePage, senderPhone, noContactCompanyListPickleFileName, port=465, returnHTML=True ): try: with open(noContactCompanyListPickleFileName, 'rb') as inputFile: noContactCompanyList = pickle.load(inputFile) except: noContactCompanyList = [] for i in range(len(df)): companyName = df['Organization Name'][i] if companyName.lower() in noContactCompanyList: pass try: domainName = df['Domain'][i] jobsEmails = [prefix + '@' + domainName for prefix in ['jobs', 'careers']] # email all the jobs pages for that copmany sendEmails( 'guys', # addressing general company, so use 'guys' instead of individual name retainedCompany, companyName, jobsEmails, senderName, defaultSenderEmail, emailPassword, senderTitle, senderCompany, senderCompanyHomePage, senderPhone, port=port, returnHTML = returnHTML ) except: pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def send_email(jobs):\n jobs = jobs\n server = smtplib.SMTP(\"smtp.gmail.com\", 587)\n server.ehlo()\n server.starttls()\n server.ehlo()\n\n server.login(EMAIL, PASS)\n\n subject = f\"Job Scraper Results\"\n\n if jobs != \"Not working\":\n body = []\n job_ids = [\n jobs[x] for x in sorted(jobs.keys(), key=lambda x: jobs[x][0], reverse=True)\n ][:25]\n for jobID in job_ids:\n score, link, title, company, date_posted, location, full_text = jobID\n body.append(\n f\"({score}) {title} at {company} in {location} posted \\\n {date_posted[5:11]}\\n{link}\\n... {full_text[100:500]} ...\"\n )\n if len(body) == 0:\n body = body + (\"\\nNo results.\")\n body = \"\\n\\n\\n\".join(body)\n body = body.encode(\"ascii\", \"ignore\").decode(\"ascii\")\n msg = f\"Subject: {subject}\\n\\n{body}\"\n else:\n msg = f\"Subject: {subject} - {jobs}\\n\\n{jobs}\"\n\n msg = f\"From: {EMAIL}\\r\\nTo: {EMAIL}\\r\\n\" + msg\n\n server.sendmail(EMAIL, EMAIL, msg)\n\n timezone_ny = pytz.timezone(\"America/NEW_York\")\n datetime_ny = datetime.now(timezone_ny)\n print(f\"E-mail was sent at {datetime_ny.strftime('%H:%M')}.\\n\\n\")\n\n server.quit()", "def notify_job_by_email(info):\n\n # build params\n params = {}\n params[\"id\"] = info[\"job_id\"]\n params[\"rule_name\"] = info[\"rule\"][\"rule_name\"]\n params[\"username\"] = info[\"rule\"][\"username\"]\n kwargs = json.loads(info[\"rule\"][\"kwargs\"])\n params[\"emails\"] = kwargs[\"email_addresses\"]\n rule_hit = info[\"rule_hit\"]\n params[\"url\"] = rule_hit[\"_source\"][\"job\"][\"job_info\"][\"job_url\"]\n job = {\n \"type\": \"notify_job_by_email\",\n \"name\": \"action-notify_job_by_email-%s\" % info[\"job_id\"],\n \"tag\": params[\"rule_name\"],\n \"username\": params[\"username\"],\n \"params\": params,\n \"localize_urls\": [],\n }\n\n return job", "def exec(self): \r\n emails = self.args[0].split(',')\r\n for email in emails:\r\n send_mail(self.args[1], self.args[2], email)\r\n return_text = \"Sent Mail To :: \" + self.args[0] +\"\\n\" + self.args[1] + \":\\n\" + self.args[2]\r\n return return_text", "def sendEmail(body, subject, email=\"\"):\n dest = [\"micneeley14@gmail.com\", \"hunterreid49@gmail.com\"]\n if re.match(r\"\\w+@\\w+\\.\\w+\", email):\n if email not in dest:\n dest.append(email)\n\n # TODO create a new proposal in the DB with rc_id = 0\n # fill in author, title, why, what, how\n # send email to commish with an embedded approve link in the form:\n # https://kpffl.com/rc/approve/<ID>\n # that link will set the rc_id to the next largest item and make the page live\n\n print(dest, subject, body)\n message = Mail(\n from_email=\"michael@neeley.dev\",\n to_emails=dest,\n subject=subject,\n html_content=body,\n )\n try:\n sg = SendGridAPIClient(os.environ.get(\"SENDGRID_KEY\"))\n res = sg.send(message)\n except Exception as e:\n print(e, res)", "def send_mail(from_email, to_emails, subject, plain_body, html_body):\n\n # Implementation goes here\n # ...", "def handle(self, *args, **options):\n\n candidates_with_email = [candidate for candidate in Candidate.objects.all()\n if candidate.contact_address and candidate.participating]\n\n\n print 'sending e-mails'\n conn = get_connection()\n for c in candidates_with_email:\n if c.should_send_reminder():\n\n print 'emailing', c\n # store timestamp for reminder email so that they don't get another one for <REMINDER_TIME_PERIOD> days\n c.last_reminder_sent = timezone.now()\n c.save()\n msg = make_email(c)\n conn.send_messages([msg])\n conn.close()", "def send_email(geocentric_coordinates_transformated_to_ITRF_final_list, data):\n pandas.read_json(json.dumps(geocentric_coordinates_transformated_to_ITRF_final_list)).to_excel(\n data_output + \"/\" + data['filename'] + \"_results.xlsx\")\n msg = Message('ITRF Transformations', sender=app.config['MAIL_USERNAME'], recipients=[data['email']])\n msg.body = make_email_message(data['itrf_begin'], data['epoch_begin'], data['itrf_final'], data['epoch_final'],\n data['velocity'], data['date'])\n with app.open_resource(data_output + \"/\" + data['filename'] + \"_results.xlsx\") as fp:\n file_name = data['filename'] + \"_results\"\n msg.attach(file_name + \".xlsx\", file_name + \"/xlsx\", fp.read())\n mail.send(msg)", "def email_outstanding_fires(region_id=None):\n qs = Bushfire.objects.filter(report_status__in=[Bushfire.STATUS_INITIAL_AUTHORISED])\n rpt_date = datetime.now()\n\n for row in settings.OUTSTANDING_FIRES_EMAIL:\n for region_name,email_to in row.iteritems():\n\n try:\n region = Region.objects.get(name=region_name)\n except:\n region = None\n traceback.print_exc()\n\n if region:\n f = StringIO()\n book = Workbook()\n total_reports = outstanding_fires(book, region, qs, rpt_date)\n book.add_sheet('Sheet 2')\n book.save(f)\n\n if total_reports == 0:\n subject = 'Outstanding Fires Report - {} - {} - No Outstanding Fire'.format(region_name, rpt_date.strftime('%d-%b-%Y')) \n body = 'Outstanding Fires Report - {} - {} - No Outstanding Fire'.format(region_name, rpt_date.strftime('%d-%b-%Y')) \n elif total_reports == 1:\n subject = 'Outstanding Fires Report - {} - {} - 1 Outstanding Fire'.format(region_name, rpt_date.strftime('%d-%b-%Y')) \n body = 'Outstanding Fires Report - {} - {} - 1 Outstanding Fire'.format(region_name, rpt_date.strftime('%d-%b-%Y')) \n else:\n subject = 'Outstanding Fires Report - {} - {} - {} Outstanding Fires'.format(region_name, rpt_date.strftime('%d-%b-%Y'),total_reports) \n body = 'Outstanding Fires Report - {} - {} - {} Outstanding Fires'.format(region_name, rpt_date.strftime('%d-%b-%Y'),total_reports) \n\n message = EmailMessage(subject=subject, body=body, from_email=settings.FROM_EMAIL, to=email_to, cc=settings.CC_EMAIL, bcc=settings.BCC_EMAIL)\n if total_reports > 0:\n filename = 'outstanding_fires_{}_{}.xls'.format(region_name.replace(' ', '').lower(), rpt_date.strftime('%d-%b-%Y'))\n message.attach(filename, f.getvalue(), \"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet\") #get the stream and set the correct mimetype\n\n message.send()", "def add_recipients(df, all_emails):\n user = df[\"sender\"].iloc[0] # ID of the user\n emails = all_emails[user]\n df[\"emails\"] = str(list(emails))\n df[\"emails\"] = df[\"emails\"].map(literal_eval)\n return df", "def send_ext_customer_task(email,name,password,phone,shop,address,lead_mail,mem_mail,website):\n print(\"member email\",mem_mail)\n logger.info(\"in sending existing customer mail task\")\n return send_ext_customer_mail(email,name,password,phone,shop,address,lead_mail,mem_mail,website)", "def create_email_job(app, db):\n from app.models import Lembrete\n lock = threading.Lock()\n\n def send_email():\n with lock:\n sp = datetime.now(tz=sao_paulo_tz)\n agora = datetime(\n year=sp.year,\n month=sp.month,\n day=sp.day,\n hour=sp.hour,\n minute=sp.minute\n )\n lembretes = Lembrete.query.filter(\n Lembrete.data_notificacao <= agora\n ).all()\n print('Enviando emails')\n if lembretes:\n for lembrete in lembretes:\n texto = lembrete.texto\n nome = ''\n veiculo = ''\n telefone = ''\n celular = ''\n tel_comercial = ''\n e_mail = ''\n if lembrete.cliente is not None:\n nome = lembrete.cliente.nome\n telefone = lembrete.cliente.telefone\n celular = lembrete.cliente.celular\n tel_comercial = lembrete.cliente.telefone_comercial\n e_mail = lembrete.cliente.email\n if lembrete.cliente is not None:\n veiculo = lembrete.veiculo.descricao()\n\n mensagem = \"\"\"\n Nome: {0}\n Telefone: {1}\n Celular: {2}\n Telefone Comercial: {3}\n E-mail: {4}\n Veículo: {5}\n Lembrete: {6}\n \"\"\".format(\n nome,\n telefone,\n celular,\n tel_comercial,\n e_mail,\n veiculo,\n texto\n )\n email = MIMEText(mensagem)\n\n me = app.config['EMAIL_ME']\n you = app.config['EMAIL_YOU']\n password = app.config['EMAIL_ME_PASSWORD']\n smtp = app.config['EMAIL_SMTP']\n smtp_port = app.config['EMAIL_SMTP_PORT']\n\n email['Subject'] = 'Lembrete: {0}|{1}'.format(\n nome, veiculo\n )\n email['From'] = me\n email['To'] = you\n\n s = smtplib.SMTP(smtp, smtp_port)\n s.ehlo()\n s.starttls()\n s.login(me, password)\n s.sendmail(me, [you], email.as_string())\n s.quit()\n # excluindo o lembrete\n db.session.delete(lembrete)\n db.session.commit()\n return send_email", "def process(self, send_now=False):\n\t\tfinal_recipients = self.final_recipients()\n\t\tqueue_separately = (final_recipients and self.queue_separately) or len(final_recipients) > 20\n\t\tif not (final_recipients + self.final_cc()):\n\t\t\treturn []\n\n\t\tqueue_data = self.as_dict(include_recipients=False)\n\t\tif not queue_data:\n\t\t\treturn []\n\n\t\tif not queue_separately:\n\t\t\trecipients = list(set(final_recipients + self.final_cc() + self.bcc))\n\t\t\tq = EmailQueue.new({**queue_data, **{\"recipients\": recipients}}, ignore_permissions=True)\n\t\t\tsend_now and q.send()\n\t\telse:\n\t\t\tif send_now and len(final_recipients) >= 1000:\n\t\t\t\t# force queueing if there are too many recipients to avoid timeouts\n\t\t\t\tsend_now = False\n\t\t\tfor recipients in frappe.utils.create_batch(final_recipients, 1000):\n\t\t\t\tfrappe.enqueue(\n\t\t\t\t\tself.send_emails,\n\t\t\t\t\tqueue_data=queue_data,\n\t\t\t\t\tfinal_recipients=recipients,\n\t\t\t\t\tjob_name=frappe.utils.get_job_name(\n\t\t\t\t\t\t\"send_bulk_emails_for\", self.reference_doctype, self.reference_name\n\t\t\t\t\t),\n\t\t\t\t\tnow=frappe.flags.in_test or send_now,\n\t\t\t\t\tqueue=\"long\",\n\t\t\t\t)", "def send_assignee_emails(self):\n\n assignees = list(set([obj.assignee for obj in self.stalled_nf_issues])) # Assignees from New Features\n assignees.extend(list(set([obj.assignee for obj in self.stalled_st_issues]))) # Add assignees from Sub-tasks\n recipients = self.config.get(\"recipients\", \"emails\").split(\"\\n\") # [recipients] section in .ini file\n\n for assignee in assignees:\n assignee_issues = [] # List of IssueClass objects\n # Get all stalled New feature issues for this assignee\n for item in self.stalled_nf_issues + self.stalled_st_issues:\n if item.assignee == assignee:\n# if item.assignee == \"ashih\":\n assignee_issues.append(item)\n assignee_email = item.assignee_email\n \n if len(assignee_issues):\n html_table = '<table style=\"font-size:12px\">'\n html_table += self.make_time_in_status_rows(assignee_issues)\n html_table += '</table>' # Closing table tag\n #recipients.append(assignee_email)\n print \"Sending email to: %s\" % recipients\n self.send_email(recipients, html_table, assignee)", "def send_bulk_course_email(entry_id, _xmodule_instance_args):\r\n # Translators: This is a past-tense verb that is inserted into task progress messages as {action}.\r\n action_name = ugettext_noop('emailed')\r\n visit_fcn = perform_delegate_email_batches\r\n return run_main_task(entry_id, visit_fcn, action_name)", "def simplyapply(request, job, resume, mobile=False):\n apply_info = get_apply_info(request)\n if not apply_info['email']:\n if resume.contact and resume.contact.email:\n apply_info['email'] = resume.contact.email\n else:\n apply_info['email'] = 'Not Provided'\n\n apply_info['job_company'] = job.company\n apply_info['job_title'] = job.title\n apply_info['job_location'] = job.location\n apply_info['source'] = job.source if hasattr(job, '_jobpost') else 'Simply Hired' # JBB/Publishers get a different source in the email.\n\n if resume.source == 'Linkedin':\n attachment = get_pdf_resume(resume)\n else:\n # TODO: handle the case where the resume has no content entry.\n content = models.Content.objects.get(resume=resume.id)\n attachment = {}\n mimetypes.init()\n attachment['mimetype'] = mimetypes.guess_type(content.file_name)\n try:\n attachment['raw_resume'] = content.raw_resume.decode('utf-8').encode('latin-1')\n except UnicodeDecodeError:\n attachment['raw_resume'] = content.raw_resume\n attachment['filename'] = content.file_name\n\n subject = u\"Application for {0} at {1}\".format(job.title, job.company)\n send_email('Simply Hired <noreply@simplyhired.com>', job.apply_email, subject, EMAIL_BODY.format(**apply_info), attachment,\n reply_to=resume.contact.email if resume.contact.email else None)\n\n try:\n # JBB job.\n if hasattr(job, '_jobpost'):\n jbb.JobPostMetrics.objects.filter(jobpostid=job._jobpost.jobpostid).update(count_apply_email=F('count_apply_email')+1)\n\n # Log for generic tracking.\n log_apply(request, job, apply_info, attachment, resume, mobile)\n except Exception, msg:\n logger.exception('Error in writing to tracking: %s %s' % (Exception, msg))\n\n if resume.contact.email:\n send_confirmation(resume.contact.email, apply_info)\n\n return", "def execute(self):\n return LOGGER.info(f\"{datetime.datetime.now()} - Sending EMail to the configured email list\")", "def submit(self, jobs):\n assert isinstance(jobs, list), 'Jobs must be type list'\n assert len(jobs) > 0, 'One or more jobs required'\n\n data = {'jobs': jobs}\n for j in data['jobs']:\n # generate a random UUID if absent\n if 'uuid' not in j:\n j['uuid'] = str(uuid1())\n\n # default missing fields\n j.update(dict(self._default_job_settings.items() + j.items()))\n\n self._job_schema.validate(jobs)\n\n try:\n self._api_post(self._scheduler_endpoint, data)\n return [j['uuid'] for j in data['jobs']]\n except HTTPError as e:\n raise JobClientError(e.message)", "def SendResultTask(job_id):\n job = Job.objects.get(pk=job_id)\n owner = job.owner\n msg_plain = render_to_string('wordscraper/email.txt',\n {'first_name': owner.first_name, 'last_name': owner.last_name,\n 'result_id': job.result_id})\n msg_html = render_to_string('wordscraper/email.html',\n {'first_name': owner.first_name, 'last_name': owner.last_name,\n 'result_id': job.result_id})\n send_mail('Your CULTR web scraper results', msg_plain, 'no-reply@cultrtoolkit.com',\n [job.email], html_message=msg_html, fail_silently=False)\n logger.info(\"Sent result email to owner of job %d.\" % job_id)", "def send_email_users():\n\n # Get users emails\n users_emails = User.objects.exclude(\n Q(email='') |\n Q(email=None)\n ).values_list(\n 'email',\n flat=True\n )\n\n # Send email to each user\n # for email_user in users_emails:\n\n title = 'Se han calculado nuevos Hard Flag'\n msg = 'Actualmente se han agregado nuevos hard flag '\n msg += ' a la base de datos'\n\n email = EmailMessage(\n title,\n msg,\n to=users_emails\n )\n email.send()", "def sendEmails(\n receiverName,\n retainedCompany,\n companyName,\n emailList,\n senderName,\n senderEmail,\n emailPassword,\n senderTitle,\n senderCompany,\n senderCompanyHomePage,\n senderPhone,\n port=465,\n returnHTML = True \n ):\n\n for emailToTry in emailList: \n # change back the next line after testing\n time.sleep(np.random.uniform(5,15)) # I introduced this because I was being rate limited, and I want to see if this will help avoid that - it seems to help\n print(f'trying {emailToTry}')\n message = MIMEMultipart('alternative')\n message['Subject'] = f'Engineering Positions at {companyName}' # change this back when ready to send actual emails\n message['From'] = senderEmail\n message['To'] = emailToTry # note that this only affects the headers - it does not affect to whom the message gets sent to\n\n [text, html] = emailTextHTML(receiverName, retainedCompany, companyName, senderName, senderTitle, senderCompany, senderEmail, senderCompanyHomePage, senderPhone, returnHTML=returnHTML)\n\n\n part1 = MIMEText(text, 'plain')\n part2 = MIMEText(html, 'html')\n\n message.attach(part1)\n message.attach(part2)\n\n # create a secure SSL context\n context = ssl.create_default_context()\n\n # now loop over each email message and extract what we need:\n with smtplib.SMTP_SSL('smtp.gmail.com', port, context=context) as server:\n # Using with smtplib.SMTP_SSL() as server: makes sure that the connection is automatically closed at the end of the indented code block. If port is zero, or not specified, .SMTP_SSL() will use the standard port for SMTP over SSL (port 465).\n server.login(senderEmail, emailPassword)\n server.sendmail(senderEmail, emailToTry, message.as_string())\n # the above line is how we actually change whom the message is sent to", "def email(args):\n if args.name:\n add_user(name=args.name, email_address=args.email)\n\n if args.add_term:\n Feed(Config.database).add_search_term(email_address=args.email,\n search_term=args.add_term.upper())\n if args.terms_from_file:\n with open(args.terms_from_file) as file:\n for line in file:\n Feed(Config.database).add_search_term(email_address=args.email,\n search_term=line.strip().upper())\n if args.remove_term:\n Feed(Config.database).remove_search_term(email_address=args.email,\n term=args.remove_term)", "def each_job(url, header, empty_list):\n time.sleep(2)\n \n response = requests.get(url, headers=header)\n soup = BeautifulSoup(response.text, 'html.parser')\n# code for scraping technology required\n# making target variable out of this column, doing try & except so i can later drop row from df\n tech = []\n try:\n job_tech = soup.find_all('section', {'class':'mb32'})[1]('a')\n for x in job_tech:\n tech.append(x.text)\n except IndexError:\n tech = np.nan\n# code for scraping overview of the posting\n overview = ''\n try:\n job_overview = soup.find_all('section', {'class':'mb32'})[2](['p', 'ul'])\n for y in job_overview:\n overview += y.text\n except IndexError:\n overview = np.nan\n# code for scraping the job position\n try:\n position = [soup.find_all('h1', {'class':'fs-headline1 mb4'})[0].text]\n except IndexError:\n position = np.nan\n# code for brief insight\n try:\n about = [soup.find('section', {'class':'mb32'})('div')[1].text]\n except (IndexError, TypeError):\n about = np.nan\n# creating dictionary for each job posting\n job_post_dict = {\n 'position':position,\n 'description':about,\n 'languages':tech,\n 'overview':overview}\n# creating list of job postings to later turn into a dataframe\n empty_list.append(job_post_dict)\n \n return empty_list", "def send_tachycardia_email(df):\n\n # Set up email\n sg = sendgrid.SendGridAPIClient(apikey=os.environ.get('SENDGRID_API_KEY'))\n from_email = Email(\"automatic_alert@heartratesentinal.com\")\n to_email = Email(df['attending_email'])\n\n # Set up email subject\n subject = \"TACHYCARDIA ALERT: %s\" % df['patient_id']\n\n # Set up email body\n body = Content(\"text/plain\", \"Patient: %s \"\n \"(age: %s\\n) has a heart rate of \"\n \"%s at %s\"\n % (df['patient_id'],\n df['user_age'],\n df['heart_rate'][-1],\n df['time'][-1]))\n\n # Send email\n mail = Mail(from_email, subject, to_email, body)\n response = sg.client.mail.send.post(request_body=mail.get())\n print(response.status_code)\n print(response.body)\n print(response.headers)", "def _auto_email_send(self):\n records = self.search([('send_by', '=', 'mail')])\n\n for supplier in records:\n send_at = datetime.combine(fields.Date.today(),\n float_to_time(supplier.automatic_email_time, supplier.moment, supplier.tz)).astimezone(pytz.UTC).replace(tzinfo=None)\n if supplier.available_today and fields.Datetime.now() > send_at:\n lines = self.env['lunch.order'].search([('supplier_id', '=', supplier.id),\n ('state', '=', 'ordered'), ('date', '=', fields.Date.today())])\n\n if lines:\n order = {\n 'company_name': lines[0].company_id.name,\n 'currency_id': lines[0].currency_id.id,\n 'supplier_id': supplier.partner_id.id,\n 'supplier_name': supplier.name,\n 'email_from': supplier.responsible_id.email_formatted,\n }\n\n _lines = [{\n 'product': line.product_id.name,\n 'note': line.note,\n 'quantity': line.quantity,\n 'price': line.price,\n 'toppings': line.display_toppings,\n 'username': line.user_id.name,\n } for line in lines]\n\n order['amount_total'] = sum(line.price for line in lines)\n\n self.env.ref('lunch.lunch_order_mail_supplier').with_context(order=order, lines=_lines).send_mail(supplier.id)\n\n lines.action_confirm()", "def send_lead_task(email,name,password,phone,shop,address,lead_mail,fname,mem_mail,website):\n\n logger.info(\"in send lead mail task\")\n return send_lead_email(email,name,password,phone,shop,address,lead_mail,fname,mem_mail,website)", "def send_email_with_service(cls, email_scheduler_object):\n email_cc = [{\"Email\": cc} for cc in email_scheduler_object.email_cc]\n email_bcc = [{\n \"Email\": bcc\n } for bcc in email_scheduler_object.email_bcc]\n\n data = {\n \"Messages\": [{\n \"From\": {\n \"Email\": DEFAULT_FROM_EMAIL,\n },\n \"To\": [{\n \"Email\": email_scheduler_object.email_to,\n }],\n \"Cc\": email_cc,\n \"Bcc\": email_bcc,\n \"Subject\": email_scheduler_object.email_subject,\n \"HTMLPart\": email_scheduler_object.\n email_body, # This is in HTMLPart because in TextPart if we put body then status does not get updated\n }]\n }\n\n result = cls.mailjet_send.send.create(data=data)\n return result.json()", "def test_process_bn_email(app, session):\n # setup filing + business for email\n identifier = 'BC1234567'\n filing = prep_incorp_filing(session, identifier, '1', 'bn')\n business = Business.find_by_identifier(identifier)\n # sanity check\n assert filing.id\n assert business.id\n token = '1'\n # run worker\n with patch.object(AccountService, 'get_bearer_token', return_value=token):\n with patch.object(worker, 'send_email', return_value='success') as mock_send_email:\n worker.process_email(\n {'email': {'filingId': None, 'type': 'businessNumber', 'option': 'bn', 'identifier': 'BC1234567'}},\n app\n )\n # check email values\n assert 'comp_party@email.com' in mock_send_email.call_args[0][0]['recipients']\n assert 'test@test.com' in mock_send_email.call_args[0][0]['recipients']\n assert mock_send_email.call_args[0][0]['content']['subject'] == \\\n f'{business.legal_name} - Business Number Information'\n assert mock_send_email.call_args[0][0]['content']['body']\n assert mock_send_email.call_args[0][0]['content']['attachments'] == []", "def scrap_data_companies(self):\n list_job_offers = self.driver.find_elements_by_class_name(\n \"jobContainer\")\n jobs = []\n if len(list_job_offers) == 0:\n print(\"There is nothing to scrap for \", conf.URL_TO_SCRAPE,\n \"that was requested\")\n return\n\n for i, elt in enumerate(list_job_offers):\n\n self.remove_sign_up_prompt()\n self.remove_recommended_jobs()\n html_job_container = elt.get_attribute('innerHTML')\n time.sleep(2)\n name_company = get_name_company(elt.text)\n city_job = get_city_job(html_job_container)\n job_id = get_job_id(html_job_container)\n position_job = get_position(html_job_container)\n job_description = get_summary_job(position_job)\n\n if job_id is not None and name_company is not None:\n company = Company.Company(name_company)\n company_and_id_job = name_company + \"-\" + job_id\n self.current_path = os.path.join(self.date_path,\n company_and_id_job)\n os.mkdir(self.current_path)\n\n if i != 0:\n click_on_job_offer(\n elt) # link since we are already seeing it\n\n self.scrape_data_company(elt, company)\n company_id = company.insert_to_db(self.db_connection)\n job = JobOffer.JobOffer(job_id, company=company, city=city_job,\n position=position_job,\n description=job_description)\n job.insert_to_db(company_id, self.db_connection)\n jobs.append(job)\n print(job)\n else:\n logger.error(\"Job Id not found\")\n JobOffer.print_jobs(jobs)", "def send_job_failure_email(job_id):\n mail_subject = 'Failed ML Job'\n mail_body = ((\n 'ML job %s has failed. For more information,'\n 'please visit the admin page at:\\n'\n 'https://www.oppia.org/admin#/jobs') % job_id)\n send_mail_to_admin(mail_subject, mail_body)\n other_recipients = (\n NOTIFICATION_EMAILS_FOR_FAILED_TASKS.value)\n system_name_email = '%s <%s>' % (\n feconf.SYSTEM_EMAIL_NAME, feconf.SYSTEM_EMAIL_ADDRESS)\n if other_recipients:\n email_services.send_bulk_mail(\n system_name_email, other_recipients,\n mail_subject, mail_body,\n mail_body.replace('\\n', '<br/>'))", "def send_mail(email):\n return email.send()", "def _send_course_email(entry_id, email_id, to_list, global_email_context, subtask_status):\r\n # Get information from current task's request:\r\n task_id = subtask_status.task_id\r\n\r\n try:\r\n course_email = CourseEmail.objects.get(id=email_id)\r\n except CourseEmail.DoesNotExist as exc:\r\n log.exception(\"Task %s: could not find email id:%s to send.\", task_id, email_id)\r\n raise\r\n\r\n # Exclude optouts (if not a retry):\r\n # Note that we don't have to do the optout logic at all if this is a retry,\r\n # because we have presumably already performed the optout logic on the first\r\n # attempt. Anyone on the to_list on a retry has already passed the filter\r\n # that existed at that time, and we don't need to keep checking for changes\r\n # in the Optout list.\r\n if subtask_status.get_retry_count() == 0:\r\n to_list, num_optout = _filter_optouts_from_recipients(to_list, course_email.course_id)\r\n subtask_status.increment(skipped=num_optout)\r\n\r\n course_title = global_email_context['course_title']\r\n subject = \"[\" + course_title + \"] \" + course_email.subject\r\n from_addr = _get_source_address(course_email.course_id, course_title)\r\n\r\n course_email_template = CourseEmailTemplate.get_template()\r\n try:\r\n connection = get_connection()\r\n connection.open()\r\n\r\n # Define context values to use in all course emails:\r\n email_context = {'name': '', 'email': ''}\r\n email_context.update(global_email_context)\r\n\r\n while to_list:\r\n # Update context with user-specific values from the user at the end of the list.\r\n # At the end of processing this user, they will be popped off of the to_list.\r\n # That way, the to_list will always contain the recipients remaining to be emailed.\r\n # This is convenient for retries, which will need to send to those who haven't\r\n # yet been emailed, but not send to those who have already been sent to.\r\n current_recipient = to_list[-1]\r\n email = current_recipient['email']\r\n email_context['email'] = email\r\n email_context['name'] = current_recipient['profile__name']\r\n\r\n # Construct message content using templates and context:\r\n plaintext_msg = course_email_template.render_plaintext(course_email.text_message, email_context)\r\n html_msg = course_email_template.render_htmltext(course_email.html_message, email_context)\r\n\r\n # Create email:\r\n email_msg = EmailMultiAlternatives(\r\n subject,\r\n plaintext_msg,\r\n from_addr,\r\n [email],\r\n connection=connection\r\n )\r\n email_msg.attach_alternative(html_msg, 'text/html')\r\n\r\n # Throttle if we have gotten the rate limiter. This is not very high-tech,\r\n # but if a task has been retried for rate-limiting reasons, then we sleep\r\n # for a period of time between all emails within this task. Choice of\r\n # the value depends on the number of workers that might be sending email in\r\n # parallel, and what the SES throttle rate is.\r\n if subtask_status.retried_nomax > 0:\r\n sleep(settings.BULK_EMAIL_RETRY_DELAY_BETWEEN_SENDS)\r\n\r\n try:\r\n log.debug('Email with id %s to be sent to %s', email_id, email)\r\n\r\n with dog_stats_api.timer('course_email.single_send.time.overall', tags=[_statsd_tag(course_title)]):\r\n connection.send_messages([email_msg])\r\n\r\n except SMTPDataError as exc:\r\n # According to SMTP spec, we'll retry error codes in the 4xx range. 5xx range indicates hard failure.\r\n if exc.smtp_code >= 400 and exc.smtp_code < 500:\r\n # This will cause the outer handler to catch the exception and retry the entire task.\r\n raise exc\r\n else:\r\n # This will fall through and not retry the message.\r\n log.warning('Task %s: email with id %s not delivered to %s due to error %s', task_id, email_id, email, exc.smtp_error)\r\n dog_stats_api.increment('course_email.error', tags=[_statsd_tag(course_title)])\r\n subtask_status.increment(failed=1)\r\n\r\n except SINGLE_EMAIL_FAILURE_ERRORS as exc:\r\n # This will fall through and not retry the message.\r\n log.warning('Task %s: email with id %s not delivered to %s due to error %s', task_id, email_id, email, exc)\r\n dog_stats_api.increment('course_email.error', tags=[_statsd_tag(course_title)])\r\n subtask_status.increment(failed=1)\r\n\r\n else:\r\n dog_stats_api.increment('course_email.sent', tags=[_statsd_tag(course_title)])\r\n if settings.BULK_EMAIL_LOG_SENT_EMAILS:\r\n log.info('Email with id %s sent to %s', email_id, email)\r\n else:\r\n log.debug('Email with id %s sent to %s', email_id, email)\r\n subtask_status.increment(succeeded=1)\r\n\r\n # Pop the user that was emailed off the end of the list only once they have\r\n # successfully been processed. (That way, if there were a failure that\r\n # needed to be retried, the user is still on the list.)\r\n to_list.pop()\r\n\r\n except INFINITE_RETRY_ERRORS as exc:\r\n dog_stats_api.increment('course_email.infinite_retry', tags=[_statsd_tag(course_title)])\r\n # Increment the \"retried_nomax\" counter, update other counters with progress to date,\r\n # and set the state to RETRY:\r\n subtask_status.increment(retried_nomax=1, state=RETRY)\r\n return _submit_for_retry(\r\n entry_id, email_id, to_list, global_email_context, exc, subtask_status, skip_retry_max=True\r\n )\r\n\r\n except LIMITED_RETRY_ERRORS as exc:\r\n # Errors caught here cause the email to be retried. The entire task is actually retried\r\n # without popping the current recipient off of the existing list.\r\n # Errors caught are those that indicate a temporary condition that might succeed on retry.\r\n dog_stats_api.increment('course_email.limited_retry', tags=[_statsd_tag(course_title)])\r\n # Increment the \"retried_withmax\" counter, update other counters with progress to date,\r\n # and set the state to RETRY:\r\n subtask_status.increment(retried_withmax=1, state=RETRY)\r\n return _submit_for_retry(\r\n entry_id, email_id, to_list, global_email_context, exc, subtask_status, skip_retry_max=False\r\n )\r\n\r\n except BULK_EMAIL_FAILURE_ERRORS as exc:\r\n dog_stats_api.increment('course_email.error', tags=[_statsd_tag(course_title)])\r\n num_pending = len(to_list)\r\n log.exception('Task %s: email with id %d caused send_course_email task to fail with \"fatal\" exception. %d emails unsent.',\r\n task_id, email_id, num_pending)\r\n # Update counters with progress to date, counting unsent emails as failures,\r\n # and set the state to FAILURE:\r\n subtask_status.increment(failed=num_pending, state=FAILURE)\r\n return subtask_status, exc\r\n\r\n except Exception as exc:\r\n # Errors caught here cause the email to be retried. The entire task is actually retried\r\n # without popping the current recipient off of the existing list.\r\n # These are unexpected errors. Since they might be due to a temporary condition that might\r\n # succeed on retry, we give them a retry.\r\n dog_stats_api.increment('course_email.limited_retry', tags=[_statsd_tag(course_title)])\r\n log.exception('Task %s: email with id %d caused send_course_email task to fail with unexpected exception. Generating retry.',\r\n task_id, email_id)\r\n # Increment the \"retried_withmax\" counter, update other counters with progress to date,\r\n # and set the state to RETRY:\r\n subtask_status.increment(retried_withmax=1, state=RETRY)\r\n return _submit_for_retry(\r\n entry_id, email_id, to_list, global_email_context, exc, subtask_status, skip_retry_max=False\r\n )\r\n\r\n else:\r\n # All went well. Update counters with progress to date,\r\n # and set the state to SUCCESS:\r\n subtask_status.increment(state=SUCCESS)\r\n # Successful completion is marked by an exception value of None.\r\n return subtask_status, None\r\n finally:\r\n # Clean up at the end.\r\n connection.close()", "def main_email(name, total, answered, not_answered, declines, remaining):\n\n start = smtplib.SMTP(host=HOST, port=PORT)\n start.starttls()\n start.login(ADDRESS, PASSWORD)\n\n date = datetime.datetime.now()\n date_now = date.strftime(\"%m-%d-%Y\")\n\n print_list, email_dict = simple_contacts('contacts.txt')\n\n emails = get_emails(print_list, email_dict)\n\n message_template = read_template()\n\n for mail in emails:\n pretty_print(f\"Sending email to {mail}\", \"!\")\n msg = MIMEMultipart()\n\n message = message_template.substitute(PERSON_NAME=name, DATE=date_now, TOTAL_CALLED=total, ANSWERED=answered, NOT_ANSWERED=not_answered, DECLINES=declines, REMAINING=remaining)\n\n msg['From'] = ADDRESS\n msg['To'] = mail\n msg['Subject'] = f\"{name} - Calling Campaign Summary - {date_now}\"\n\n msg.attach(MIMEText(message, 'plain'))\n start.send_message(msg)\n pretty_print(f\"Mail sent to {mail}\", \"!\")\n\n del msg\n\n start.quit()", "def apply_to_express_jobs(self, profile):\r\n\r\n print(f'Number of express jobs {len(self.express_apply_jobs)}')\r\n\r\n for job in self.express_apply_jobs:\r\n self.__process_job(job)\r\n self.__process_apply_button()\r\n self.__fill_applicant_form(profile)\r\n\r\n # self.driver.find_element_by_id('form-action-continue').click()\r", "def main(arguments, emailer):\n emailer.read_config()\n print(\"Config read.\")\n emailer.setup_config(pages=arguments.pages,\n email_list=arguments.email_list,\n items_range=arguments.range,\n config=arguments.config,\n database=arguments.database,\n file=arguments.file,\n email_address=arguments.email_address,\n email_password=arguments.email_password,\n send_time=arguments.time,\n frequency=arguments.frequency)\n emailer.write_config()\n \n emailer.setup_database()\n if emailer.pull_items_search() != 'bot':\n print(\"Items retrieved\")\n else:\n return\n \n emailer.items_to_xls()\n print(\"xls file created.\")\n emailer.items_to_csv()\n print(\"csv file created\")\n\n print(\"Sending emails.\")\n emailer.send_email()", "def notification(self, approver_list):\n dns_name = axops_client.get_dns()\n job_id = self.root_id\n url_to_ui = 'https://{}/app/jobs/job-details/{}'.format(dns_name, job_id)\n service = axops_client.get_service(job_id)\n\n html_payload = \"\"\"\n<html>\n<body>\n <table class=\"email-container\" style=\"font-size: 14px;color: #333;font-family: arial;\">\n <tr>\n <td class=\"msg-content\" style=\"padding: 20px 0px;\">\n The {} job is waiting for your approval. The job was triggered by {}.\n </td>\n </tr>\n <tr>\n <td class=\"commit-details\" style=\"padding: 20px 0px;\">\n <table cellspacing=\"0\" style=\"border-left: 1px solid #e3e3e3;border-right: 1px solid #e3e3e3;border-top: 1px solid #e3e3e3;\">\n <tr>\n <td class=\"item-label\" style=\"font-weight: bold;height: 20px;padding: 10px;border-bottom: 1px solid #e3e3e3;border-right: 1px solid #e3e3e3;\">Author</td>\n <td class=\"item-value\" style=\"height: 20px;padding: 10px;border-bottom: 1px solid #e3e3e3;\">{}</td>\n </tr>\n <tr>\n <td class=\"item-label\" style=\"font-weight: bold;height: 20px;padding: 10px;border-bottom: 1px solid #e3e3e3;border-right: 1px solid #e3e3e3;\">Repo</td>\n <td class=\"item-value\" style=\"height: 20px;padding: 10px;border-bottom: 1px solid #e3e3e3;\">{}</td>\n </tr>\n <tr>\n <td class=\"item-label\" style=\"font-weight: bold;height: 20px;padding: 10px;border-bottom: 1px solid #e3e3e3;border-right: 1px solid #e3e3e3;\">Branch</td>\n <td class=\"item-value\" style=\"height: 20px;padding: 10px;border-bottom: 1px solid #e3e3e3;\">{}</td>\n </tr>\n <tr>\n <td class=\"item-label\" style=\"font-weight: bold;height: 20px;padding: 10px;border-bottom: 1px solid #e3e3e3;border-right: 1px solid #e3e3e3;\">Description</td>\n <td class=\"item-value\" style=\"height: 20px;padding: 10px;border-bottom: 1px solid #e3e3e3;\">{}</td>\n </tr>\n <tr>\n <td class=\"item-label\" style=\"font-weight: bold;height: 20px;padding: 10px;border-bottom: 1px solid #e3e3e3;border-right: 1px solid #e3e3e3;\">Revision</td>\n <td class=\"item-value\" style=\"height: 20px;padding: 10px;border-bottom: 1px solid #e3e3e3;\">{}</td>\n </tr>\n </table>\n </td>\n </tr>\n <tr>\n <td class=\"view-job\">\n <div>\n <!--[if mso]>\n <v:roundrect xmlns:v=\"urn:schemas-microsoft-com:vml\" xmlns:w=\"urn:schemas-microsoft-com:office:word\" href=\"{}\" style=\"height:40px;v-text-anchor:middle;width:150px;\" arcsize=\"125%\" strokecolor=\"#00BDCE\" fillcolor=\"#7fdee6\">\n <w:anchorlock/>\n <center style=\"color:#333;font-family:arial;font-size:14px;font-weight:bold;\">VIEW JOB</center>\n </v:roundrect>\n<![endif]--><a href=\"{}\" style=\"background-color:#7fdee6;border:1px solid #00BDCE;border-radius:50px;color:#333;display:inline-block;font-family:arial;font-size:14px;font-weight:bold;line-height:40px;text-align:center;text-decoration:none;width:150px;-webkit-text-size-adjust:none;mso-hide:all;\">VIEW JOB</a></div>\n </td>\n </tr>\n <tr>\n <td class=\"view-job\">\n <div>\n <!--[if mso]>\n <v:roundrect xmlns:v=\"urn:schemas-microsoft-com:vml\" xmlns:w=\"urn:schemas-microsoft-com:office:word\" href=\"{}\" style=\"height:40px;v-text-anchor:middle;width:150px;\" arcsize=\"125%\" strokecolor=\"#00BDCE\" fillcolor=\"#7fdee6\">\n <w:anchorlock/>\n <center style=\"color:#333;font-family:arial;font-size:14px;font-weight:bold;\">APPROVE</center>\n </v:roundrect>\n<![endif]--><a href=\"{}\" style=\"background-color:#7fdee6;border:1px solid #00BDCE;border-radius:50px;color:#333;display:inline-block;font-family:arial;font-size:14px;font-weight:bold;line-height:40px;text-align:center;text-decoration:none;width:150px;-webkit-text-size-adjust:none;mso-hide:all;\">APPROVE</a></div>\n </td>\n </tr>\n <tr>\n <td class=\"view-job\">\n <div>\n <!--[if mso]>\n <v:roundrect xmlns:v=\"urn:schemas-microsoft-com:vml\" xmlns:w=\"urn:schemas-microsoft-com:office:word\" href=\"{}\" style=\"height:40px;v-text-anchor:middle;width:150px;\" arcsize=\"125%\" strokecolor=\"#00BDCE\" fillcolor=\"#7fdee6\">\n <w:anchorlock/>\n <center style=\"color:#333;font-family:arial;font-size:14px;font-weight:bold;\">DECLINE</center>\n </v:roundrect>\n<![endif]--><a href=\"{}\" style=\"background-color:#7fdee6;border:1px solid #00BDCE;border-radius:50px;color:#333;display:inline-block;font-family:arial;font-size:14px;font-weight:bold;line-height:40px;text-align:center;text-decoration:none;width:150px;-webkit-text-size-adjust:none;mso-hide:all;\">DECLINE</a></div>\n </td>\n </tr>\n <tr>\n <td class=\"thank-you\" style=\"padding-top: 20px;line-height: 22px;\">\n Thanks,<br>\n Argo Project\n </td>\n </tr>\n </table>\n</body>\n</html>\n\"\"\"\n\n for user in approver_list:\n\n approve_token, decline_token = self.generate_token(user=user, dns_name=dns_name)\n\n approve_link = \"https://{}/v1/results/id/approval?token={}\".format(dns_name, approve_token)\n decline_link = \"https://{}/v1/results/id/approval?token={}\".format(dns_name, decline_token)\n\n msg = {\n 'to': [user],\n 'subject': 'The {} job requires your approval to proceed'.format(service['name']),\n 'body': html_payload.format(service['name'], service['user'],\n service['commit']['author'], service['commit']['repo'],\n service['commit']['branch'], service['commit']['description'], service['commit']['revision'],\n url_to_ui, url_to_ui, approve_link, approve_link, decline_link, decline_link),\n 'html': True\n }\n\n if service['user'] != 'system':\n try:\n user_result = axops_client.get_user(service['user'])\n msg['display_name'] = \"{} {}\".format(user_result['first_name'], user_result['last_name'])\n except Exception as exc:\n logger.error(\"Fail to get user %s\", str(exc))\n\n logger.info('Sending approval requests to %s', str(user))\n result = axsys_client.send_notification(msg)\n\n # TODO: Tianhe adding retry mechanism\n if result.status_code != 200:\n logger.error('Cannot send approval request, %s', result.content)\n sys.exit(1)\n logger.info('Successfully sent approval requests to reviewers.')", "def simple_sendmail(connector, sender_address, subject):\n\n def sendmail(message: email.message.EmailMessage):\n message['From'] = sender_address\n message['Subject'] = subject\n\n with connector() as conn:\n return conn.sendmail(sender_address, message['To'], str(message))\n\n return sendmail", "def recs():\n click.echo(\"Emailing recommendations to destination...\")\n dio_dir: DioDir = DioDir()\n sched: ScheduleABC = DefaultSchedule()\n today: datetime.date = datetime.datetime.now().date()\n res: Optional[List[Person]] = get_recs(dio_dir, sched, today)\n next_day: datetime.date = sched.next_emailing_day(today)\n message: str = recs_to_message(res, next_day)\n settings: Optional[Settings] = dio_dir.get_settings()\n assert settings is not None, \"Have to setup diogenes to get emails. Run `dio setupemail`\"\n send_message(message, today, settings)\n click.echo(\"Recommendations emailed!\")", "def _send_mail(self, sender, subject, body, html=None):\n self.emails.append((sender, subject, body, html))", "def _send_mail(self, sender, subject, body, html=None):\n self.emails.append((sender, subject, body, html))", "def stock_email_blast(stock_dict, notification_time):\n\n with bigbeta_app.app_context():\n print('sending email')\n user_list = build_users_list()\n msg = Message('Big Mover in the Market!',\n sender=email_sender,\n recipients=['jonmbrenner@gmail.com'])\n # recipients=[user_list])\n msg.body = f\"\"\"\\\n!!!HIGH SHORT INTEREST MOVER ALERT!!!\n${stock_dict['ticker']}\nShort Interest: {stock_dict['short_interest']}\nFloat: {stock_dict['free_float']}\nDays to Cover: {stock_dict['dtc']}\nRelative Volume: {stock_dict['rvol']}\nNews Catalysts: {stock_dict['stories']}\n\nLast Price: {stock_dict['last_price']} collected at {cur_tm_log}\nNotification kicked off at {notification_time} EST\n\nGo get it!\n- BigBeta Team\n\"\"\"\n\n mail.send(msg)\n\n return None", "def SendDynamic(SENDER_EMAIL_ADDRESS, RECIPIENT_EMAIL_ADDRESS, birth_date, chart_type, chart_for_email): #SENDER_EMAIL_ADDRESS, RECIPIENT_EMAIL_ADDRESS, chart_type, birth_date, chart_for_email\n # create Mail object and populate\n message = Mail(\n from_email=SENDER_EMAIL_ADDRESS,\n to_emails=RECIPIENT_EMAIL_ADDRESS)\n # pass custom values for our HTML placeholders\n message.dynamic_template_data = {\n 'subject': 'Billboard Chart on Your Birthday!',\n 'birth_date': birth_date,\n 'chart_type': chart_type,\n 'chart_for_email': chart_for_email\n }\n message.template_id = TEMPLATE_ID\n # create our sendgrid client object, pass it our key, then send and return our response objects\n try:\n sg = SendGridAPIClient(os.environ.get('SENDGRID_API_KEY'))\n response = sg.send(message)\n code, body, headers = response.status_code, response.body, response.headers\n print(\"Response code:\", code)\n print(\"Response headers:\", headers)\n print(\"Response body:\", body)\n print(\"Dynamic Messages Sent!\")\n except Exception as e:\n print(\"Error: {0}\".format(e))\n #return str(response.status_code) #HERE", "def run(argv=None, save_main_session=True):\n\n # We use the save_main_session option because one or more DoFn's in this\n # workflow rely on global context (e.g., a module imported at module level).\n pipeline_options = XyzOptions()\n pipeline_options.view_as(SetupOptions).save_main_session = save_main_session\n with beam.Pipeline(options=pipeline_options) as p:\n weeklyPipeline = create_weekly_data_ppln(p)\n monthlyPipeline = create_monthly_data_ppln(p)\n\n bqPipeline = kickoff_pipeline(weeklyPipeline, monthlyPipeline)\n\n bqSink = beam.Map(logging.info)\n\n weeklySelectionPipeline = (bqPipeline | 'combining' >> beam.CombineGlobally(StockSelectionCombineFn()))\n\n (weeklySelectionPipeline | 'Mapping' >> beam.Map(\n lambda element: STOCK_EMAIL_TEMPLATE.format(asOfDate=date.today(), tableOfData=element))\n\n | bqSink)\n\n ## Send email now\n send_email(weeklySelectionPipeline, pipeline_options)", "def notify_email(kwargs):\n SMTP_mail_secret_name = \"\" # setting up your AWS secret name\n email_creds = aws.get_secret(SMTP_mail_secret_name, '[regoin]') # setting the regoin to credentials\n emailfrom = email_creds['accountname']\n emailsto = ['[mail receiver]'] # setting up mail receiver\n emailscc = ['[mail cc ]'] # setting up mail cc\n print(f\"Sender: {emailfrom}\")\n\n username = email_creds['username']\n password = email_creds['password']\n server = email_creds['server']\n print(f\"Server: {server}\")\n\n \"\"\"Send custom email alerts.\"\"\"\n print(\"kwargs >>>> \", kwargs)\n ti = kwargs['ti']\n dag_run = kwargs['dag_run']\n var = kwargs['var']['json']\n params = kwargs['params']\n print(f\"ti: {ti}\")\n print(f\"dag_run: {dag_run}\")\n\n ### Get exception then parsing it\n if kwargs.get('exception') is not None and type(kwargs.get('exception')) == list:\n dh_excpt = \"During handling of the above exception, another exception occurred:\"\n matching_main = [s for s in kwargs['exception'] if \"/main.py\" in s]\n print(\"matching_main >>>> \", matching_main)\n \n if matching_main != []:\n matching_fist_text = matching_main[0]\n print(\"matching_fist_text >>>> \", matching_fist_text)\n matching_fist_index = kwargs['exception'].index(matching_fist_text)\n print(\"matching_fist_index >>>> \", matching_fist_index)\n\n matching_last_text = matching_main[-1]\n print(\"matching_last_text >>>> \", matching_last_text)\n matching_last_index = kwargs['exception'].index(matching_last_text)\n print(\"matching_last_index >>>> \", matching_last_index)\n\n if dh_excpt in kwargs['exception']:\n dhe_index = kwargs['exception'].index(dh_excpt)\n print(\"The index of dhe >>>> \", dhe_index)\n\n if matching_fist_index < dhe_index:\n # when \"/main.py\" first show before \"During handling...\" then remove after \"During handling...\" text until the end\n kwargs['exception'][dhe_index:] = []\n elif matching_fist_index > dhe_index:\n # when \"/main.py\" first show after \"During handling...\" then remove after another text until the end\n kwargs['exception'][matching_last_index+2:] = []\n\n formatted_exception = \"\\n\".join(kwargs['exception'])\n print(f\"formatted_exception: {formatted_exception}\")\n elif kwargs.get('exception') is not None: \n formatted_exception = kwargs['exception']\n print(f\"formatted_exception: {formatted_exception}\")\n\n title = ''\n body = ''\n print(\"dag_run.run_id >>>> \", dag_run.run_id)\n print(\"ti.task_id >>>> \", ti.task_id)\n print(\"ti.state >>>> \", ti.state)\n\n print(\"When ti.state == State.FAILED >>>> \") # ti.state == State.FAILED as same as ti.state == 'failed'\n title = f\"[TEST] Airflow alert: ({dag_run.run_id}) failed on ({ti.task_id})\"\n body = f\"Dears, \\n\\n\\n\" + \\\n f\"The job_id ({dag_run.run_id}) failed on ({ti.task_id}). \\n\" + \\\n f\"Check what goes wrong, the ERROR message is shown as below: \\n\\n\" + \\\n f\"{formatted_exception} \\n\\n\" + \\\n f\"Forever yours, \\n\" + \\\n f\"RDP Data Team\"\n print(\"check title >>>> \\n\", title)\n print(\"check body >>>> \\n\", body)\n print(f\"Prepare to send out the mail...\\n\\t\\tsubject: {title}\") \n se.email(emailfrom, emailsto, emailscc, username, password, server, body, subject = title)\n print(\"The email send out done.\")\n raise AirflowException(f\"AirflowException: Pleaes check what goes wrong this job_id ({dag_run.run_id}) failed on ({ti.task_id}).\")", "def report_mailer(accounts, days):\n account_names = _parse_accounts(accounts)\n sm_report_mailer(account_names, days)", "def send_emails(recipients: List[str], availability_text: str) -> None:\n for recipient in recipients:\n try:\n # Sending the output as an email\n port = 465 # For SSL\n smtp_server = \"smtp.gmail.com\"\n sender_email = \"scraper006@gmail.com\" # Enter your address\n receiver_email = recipient # Enter receiver address\n password = \"+Scraper006+\"\n\n message = f\"\"\"\\\n Subject: Time to buy!\n\n Current state of the availability: {availability_text.encode(\"utf-8\")}\n \"\"\"\n\n context = ssl.create_default_context()\n\n with smtplib.SMTP_SSL(smtp_server, port, context=context) as server:\n server.login(sender_email, password)\n server.sendmail(sender_email, receiver_email, message)\n except Exception as e:\n print(f\"It looks like we could not send the email to {recipient}\")\n print(f\"Error message: {e}\")", "def submit_bulk_course_email(request, course_key, email_id):\r\n # Assume that the course is defined, and that the user has already been verified to have\r\n # appropriate access to the course. But make sure that the email exists.\r\n # We also pull out the To argument here, so that is displayed in\r\n # the InstructorTask status.\r\n email_obj = CourseEmail.objects.get(id=email_id)\r\n to_option = email_obj.to_option\r\n\r\n task_type = 'bulk_course_email'\r\n task_class = send_bulk_course_email\r\n # Pass in the to_option as a separate argument, even though it's (currently)\r\n # in the CourseEmail. That way it's visible in the progress status.\r\n # (At some point in the future, we might take the recipient out of the CourseEmail,\r\n # so that the same saved email can be sent to different recipients, as it is tested.)\r\n task_input = {'email_id': email_id, 'to_option': to_option}\r\n task_key_stub = \"{email_id}_{to_option}\".format(email_id=email_id, to_option=to_option)\r\n # create the key value by using MD5 hash:\r\n task_key = hashlib.md5(task_key_stub).hexdigest()\r\n return submit_task(request, task_type, task_class, course_key, task_input, task_key)", "def send_email(email_dict, appointment_id):\n event_identifier = g_cal.send_invite_through_gcal(email_dict)\n models.Appointments.objects.filter(id=appointment_id).update(event_identifier=event_identifier)", "def send(self):\n msg_sent = []\n subs = mongo.db.subscribers\n bill_extractor = ExtractBills()\n \n # Do not need the object ID\n same_interval = subs.find({\"interval\":self.interval}, {'_id':0})\n \n for each in same_interval:\n email = each['email']\n tags = each['search_tags']\n state = each['state']\n chamber = each['chamber']\n print(email, tags)\n\n msg_for_rcpnt = bill_extractor.getBill(state, chamber, tags)\n #all_candidates.append((email, msg_for_rcpnt))\n \n #try:\n # msg_body = \"hello world\"\n # msg_body = render_template('mail_card.html')\n # msg = Message(msg_body,\n # sender=\"mssshaown@gmail.com\",\n # recipients=email)\n # mail.send(msg) \n # msg_sent.append((email, \"Success\"))\n #except Exception as e:\n # msg_sent.append((email, str(e)))\n #return msg_sent\n return msg_for_rcpnt", "def postprocess():\n if ERRORS:\n address = 'tamakoshihiroki@gmail.com'\n body = '\\n\\n'.join( ERRORS )\n msg = create_message( body, address )\n send_mail( msg, address )", "def send_confirmation(send_to, apply_info):\n msg = \"\"\"Hello,\n\nThis is a friendly confirmation for your Simply Apply application for position '{job_title}' at {job_company}.\n\nThank you,\nThe Simply Hired Team\"\"\".format(**apply_info)\n\n send_email('Simply Apply <noreply@simplyhired.com>', send_to, 'Simply Apply Confirmation', msg)", "def send_email_to_admins(self, template_name, subject, **kw):\n \n mailer = self.app.module_map['mail']\n barcamp = self.barcamp\n new_user = self.user # active user\n for admin in self.barcamp.admin_users:\n print admin\n send_tos = [admin.email]\n kwargs = dict(\n new_user = new_user,\n user = admin,\n barcamp = barcamp,\n url = self.handler.url_for(\"barcamps.index\", slug = self.barcamp.slug, _full = True),\n notification_url = self.handler.url_for(\"barcamps.edit\", slug = self.barcamp.slug, _full = True)\n )\n kwargs.update(kw)\n payload = self.handler.render_lang(\"emails/%s.txt\" %template_name, **kwargs)\n mailer.mail(admin.email, subject, payload)", "def sendEmail(request, names):\n datas = ()\n i = 1\n for name in [name for name in names.split(',')]:\n # user1 = get_object_or_404(User, username='徐超伟')\n # print(user1.email)\n if name:\n # print(name)\n user = get_object_or_404(User, username__exact=name)\n if not user.email:\n request.session['res'] = '0'\n # print(res)\n return HttpResponseRedirect(reverse('catalog:all-borrowed'))\n\n message = (u'还书提示', u'你已经超出了还书期限,请尽快归还图书。',\n 'LocalLibrarySystem<670736258@qq.com>', [user.email])\n datas += (message,)\n\n res = send_mass_mail(datas, fail_silently=False,)\n # print(res)\n request.session['res'] = res\n return HttpResponseRedirect(reverse('catalog:all-borrowed'))", "def email_process(recipient_list: List[Client]) -> None:\n\n if recipient_list:\n send_email(recipient_list)\n update_only_emailed_clients(recipient_list)\n remove_fully_contacted_clients()\n else:\n print(\"No emails were sent.\")", "def mailto_supervisor(request, application):\n applicant_name = application.get_full_name()\n subject = '{} -- {} clinical database access request'.format(\n applicant_name, settings.SITE_NAME)\n body = loader.render_to_string(\n 'notification/email/mailto_contact_supervisor.html', {\n 'application': application,\n 'applicant_name': applicant_name,\n 'domain': get_current_site(request),\n 'url_prefix': get_url_prefix(request),\n 'signature': settings.EMAIL_SIGNATURE,\n 'footer': email_footer(), 'SITE_NAME': settings.SITE_NAME\n })\n\n # rm comma to handle mailto issue with comma and special char.\n # ref https://github.com/MIT-LCP/physionet-build/issues/1028\n to = formataddr((application.reference_name.replace(',', ''),\n application.reference_email))\n bcc = 'credential-reference+{0}@{1}'.format(\n application.id, get_current_site(request))\n return mailto_url(to, subject=subject, bcc=bcc, body=body)", "def send_email(self, from_email, to_list, cc_list, bcc_list, subject, text):\n \n if from_email is None or len(from_email) == 0 or not self.validate_email_address(from_email):\n return 1, 'from email address invalid'\n \n if to_list is None or len(to_list) == 0:\n to_list = []\n else: \n to_list = self.list_or_str_to_valid_list(to_list)\n \n if cc_list is None or len(cc_list) == 0:\n cc_list = []\n else: \n cc_list = self.list_or_str_to_valid_list(cc_list)\n \n if bcc_list is None or len(bcc_list) == 0:\n bcc_list = []\n else:\n bcc_list = self.list_or_str_to_valid_list(bcc_list)\n\n \n if len(to_list) == 0 and len(cc_list) == 0 and len(bcc_list) == 0:\n return 2, 'No valid to/cc/bcc email address. Please provide at least one valid to/cc/bcc email address.' \n \n \n if not subject and not text:\n return 3, 'subject and text both are empty'\n elif not subject:\n suject = ''\n elif not text:\n text = ''\n \n status, message = self._senders[EmailService._sender_id].send(from_email, to_list, cc_list, bcc_list, subject, text);\n \n if status == 0:\n message = 'success'\n else:\n #failover to another email service provider implementation\n EmailService._sender_id = (EmailService._sender_id + 1) % len(self._senders)\n status, message = self._senders[EmailService._sender_id].send(from_email, to_list, cc_list, bcc_list, subject, text);\n if status == 0:\n message = 'success'\n else:\n status = 4\n message = 'Emails failed in sending. The error message is as followed:\\n' + message\n \n return status, message", "def email(self, identifier, data):\n self.client.request_with_method(Methods.EMAIL % (self.name, identifier,),\n data=data)", "def check_email():\n server = smtplib.SMTP(\"smtp.gmail.com\", 587)\n server.ehlo()\n server.starttls()\n server.ehlo()\n server.login(user, password)\n\n g = gmail.login(user, password)\n\n # Check for unread messages.\n unread = g.inbox().mail(unread=True)\n\n # Submit a job to lint each email sent to editor@proselint.com. Record the\n # resulting job_ids somewhere (in Redis, I suppose), keyed by a hash of the\n # email.\n for u in unread:\n\n u.fetch()\n\n signature = (u.fr.decode('utf-8') +\n u.subject.decode('utf-8') +\n u.body.decode('utf-8'))\n\n hash = hashlib.sha256(signature.encode('utf-8')).hexdigest()\n\n if user_to in u.to or user_to in u.headers.get('Cc', []):\n\n job_id = conn.get(hash)\n\n if not job_id:\n # If the email hasn't been sent for processing, send it.\n r = requests.post(api_url, data={\"text\": u.body})\n conn.set(hash, r.json()[\"job_id\"])\n print(\"Email {} sent for processing.\".format(hash))\n\n else:\n # Otherwise, check whether the results are ready, and if so,\n # reply with them.\n r = requests.get(api_url, params={\"job_id\": job_id})\n\n if r.json()[\"status\"] == \"success\":\n\n reply = quoted(u.body)\n errors = r.json()['data']['errors']\n reply += \"\\r\\n\\r\\n\".join([json.dumps(e) for e in errors])\n\n msg = MIMEMultipart()\n msg[\"From\"] = \"{} <{}>\".format(name, user)\n msg[\"To\"] = u.fr\n msg[\"Subject\"] = \"Re: \" + u.subject\n\n if u.headers.get('Message-ID'):\n msg.add_header(\"In-Reply-To\", u.headers['Message-ID'])\n msg.add_header(\"References\", u.headers['Message-ID'])\n\n body = reply + \"\\r\\n\\r\\n--\\r\\n\" + tagline + \"\\r\\n\" + url\n msg.attach(MIMEText(body, \"plain\"))\n\n text = msg.as_string()\n server.sendmail(user, u.fr, text)\n\n # Mark the email as read.\n u.read()\n u.archive()\n\n print(\"Email {} has been replied to.\".format(hash))", "def send_emails():\n\n cmd = \"sendmail -f git@dev.rtsoft.ru\"\n for msg in EMAIL_MESSAGES:\n for rec in RECIPIENTS:\n call(\"echo '%s' | %s %s\" % (msg, cmd, rec), None, True)", "def _send_bulk_mail(\n recipient_ids, sender_id, intent, email_subject, email_html_body,\n sender_email, sender_name, instance_id=None):\n _require_sender_id_is_valid(intent, sender_id)\n\n recipients_settings = user_services.get_users_settings(recipient_ids)\n recipient_emails = [user.email for user in recipients_settings]\n\n cleaned_html_body = html_cleaner.clean(email_html_body)\n if cleaned_html_body != email_html_body:\n log_new_error(\n 'Original email HTML body does not match cleaned HTML body:\\n'\n 'Original:\\n%s\\n\\nCleaned:\\n%s\\n' %\n (email_html_body, cleaned_html_body))\n return\n\n raw_plaintext_body = cleaned_html_body.replace('<br/>', '\\n').replace(\n '<br>', '\\n').replace('<li>', '<li>- ').replace('</p><p>', '</p>\\n<p>')\n cleaned_plaintext_body = html_cleaner.strip_html_tags(raw_plaintext_body)\n\n def _send_bulk_mail_in_transaction(instance_id=None):\n \"\"\"Sends the emails in bulk to the recipients.\"\"\"\n sender_name_email = '%s <%s>' % (sender_name, sender_email)\n\n email_services.send_bulk_mail(\n sender_name_email, recipient_emails, email_subject,\n cleaned_plaintext_body, cleaned_html_body)\n\n if instance_id is None:\n instance_id = email_models.BulkEmailModel.get_new_id('')\n email_models.BulkEmailModel.create(\n instance_id, recipient_ids, sender_id, sender_name_email, intent,\n email_subject, cleaned_html_body, datetime.datetime.utcnow())\n\n transaction_services.run_in_transaction(\n _send_bulk_mail_in_transaction, instance_id)", "def missing_emails(master_dataframe):\n\tmaster, nine, ten, eleven, twelve = master_dataframe(threshold)\n\tmissing_email = master_dataframe['email'].isnull()\n\n\tl = set()\n\tfor id_num, value in missing_email.iteritems():\n\t\tif value == True:\n\t\t\tl.add(id_num)\n\td = {}\n\tfor i in master.iterrows():\n\t\tid_num = i[0]\n\t\tif id_num in l:\n\t\t\tlast_name = i[1][1]\n\t\t\tfirst_name = i[1][2]\n\t\t\tname = (last_name, first_name)\n\t\t\td[id_num] = name\n\n\tw = csv.writer(open(\"missing_email.csv\", \"w\"))\n\n\tfor key, val in d.items():\n\t\tw.writerow([key,val])", "def post(\n self,\n email,\n company_name,\n location,\n job_profile,\n salary,\n username,\n password,\n security_question,\n security_answer,\n notes,\n date_applied,\n status,\n):", "def generate_email(mail, env):\n race, results, standings = get_last_results_and_standings()\n next_race = get_next_race()\n\n subject = f\"Race digest - F1 2021 | Round {race.round} | {race.name}\"\n body = (f\"Results:\\n{results}\\n\\nCurrent standings:\\n\"\n f\"{standings}\\n\\nNext race: {next_race}\")\n\n login_info = env['EMAIL_ADDRESS'], env['EMAIL_PASSWORD']\n\n subs = update_db_and_get_subs(mail, (env['EMAIL_ADDRESS'], env['EMAIL_PASSWORD']))\n\n for sub in subs:\n send_email(subject, body, sub, login_info)", "def send_test_email_for_bulk_emails(tester_id, email_subject, email_body):\n tester_name = user_services.get_username(tester_id)\n tester_email = user_services.get_email_from_user_id(tester_id)\n _send_email(\n tester_id, tester_id, feconf.BULK_EMAIL_INTENT_TEST,\n email_subject, email_body, tester_email, sender_name=tester_name)", "def email_members_old(request, course_prefix, course_suffix):\n error_msg=\"\"\n success_msg=\"\"\n form = EmailForm()\n if request.method == \"POST\":\n form = EmailForm(data=request.POST)\n if form.is_valid():\n sender = request.common_page_data['course'].title + ' Staff <class2go-noreply@cs.stanford.edu>'\n \n recipient_qset = User.objects.none() #get recipients in a QuerySet\n \n if form.cleaned_data['to'] == \"all\" :\n recipient_qset = request.common_page_data['course'].get_all_members()\n elif form.cleaned_data['to'] == \"students\" :\n recipient_qset = request.common_page_data['course'].get_all_students()\n elif form.cleaned_data['to'] == \"staff\" :\n recipient_qset = request.common_page_data['course'].get_all_course_admins()\n elif form.cleaned_data['to'] == \"myself\":\n recipient_qset = User.objects.filter(id=request.user.id)\n #pdb.set_trace()\n courses.email_members.tasks.email_with_celery.delay(\n form.cleaned_data['subject'],\n form.cleaned_data['message'],\n sender,\n recipient_qset.values_list('email',flat=True),\n course_title=request.common_page_data['course'].title,\n course_url=request.build_absolute_uri(reverse('courses.views.main', args=[course_prefix, course_suffix])))\n success_msg = \"Your email was successfully queued for sending\"\n #form = EmailForm()\n \n else:\n error_msg = \"Please fix the errors below:\"\n \n context = RequestContext(request)\n return render_to_response('email/email.html',\n {'form': form,\n 'error_msg': error_msg,\n 'success_msg': success_msg,\n 'course': request.common_page_data['course'],\n 'common_page_data': request.common_page_data},\n context_instance=context)", "def send_email(subject, sender, recipients, html_body):\n\n try:\n # Create a new SendGrid Mail object with the arguments given\n message = Mail(\n from_email=sender,\n to_emails=recipients,\n subject=subject,\n html_content=html_body)\n\n # We prepare a new Thread here to send the email in the background. This takes in the send_async_email\n # function as its target and runs the function with the parameters passed through args.\n Thread(target=send_async_email,\n args=(current_app._get_current_object(), message)).start()\n\n except Exception as e:\n print(e)\n # FIXME: should do some type of error handling here or allow error to bubble up", "def send_mail(month: str, data: list):\n\n V2RayLogger.debug('SMTP server: {0}:{1}.'.format(Config.get('mail_host'), Config.get('mail_port')))\n smtp = smtplib.SMTP_SSL(Config.get('mail_host'), Config.get('mail_port'))\n V2RayLogger.debug('SMTP login with: {0}:{1}.'.format(Config.get('mail_user'), Config.get('mail_pass')))\n smtp.login(Config.get('mail_user'), Config.get('mail_pass'))\n V2RayLogger.debug('SMTP login successful.')\n\n for row in data:\n V2RayLogger.debug('Send email: {0}:{1}.'.format(row[0], row[1]))\n message = '<tr align=left><th align=\"left\">{0:30s}</th><th align=\"left\">{1:9s}</th></tr>\\n'.format(\n row[0], row[1])\n message = MIMEText(message, 'html')\n message['Subject'] = Header(Config.get('mail_subject') + ': {0}'.format(month))\n message['From'] = Config.get('mail_user')\n message['To'] = row[0]\n\n smtp.sendmail(Config.get('mail_user'), row[0], message.as_string())\n V2RayLogger.info('Send traffic to: {0}.'.format(row[0]))", "def mail_method(self,address,city,state,zip,name):\n id = self.find_employee_id(name)\n if id in self.pymthd:\n self.pymthd[id] = \"Mailed Check\"\n print(\"{}{}\".format(name, \" was successfully changed to Mailed Check\"))\n self.emp_dict[id][1] = address\n self.emp_dict[id][2] = city\n self.emp_dict[id][3] = state\n self.emp_dict[id][4] = zip\n self.emp_dict[id][6] = \"2\"\n return self.pymthd, self.emp_dict\n else:\n print(\"Error- employee not found\")\n self.employee_data()", "def _submit_jobs(self,\r\n jobs_fp,\r\n job_prefix):\r\n cmd = '%s -ms %s %s' % (self._cluster_jobs_fp,\r\n jobs_fp,\r\n job_prefix)\r\n stdout, stderr, return_value = qiime_system_call(cmd)\r\n if return_value != 0:\r\n msg = \"\\n\\n*** Could not start parallel jobs. \\n\" +\\\r\n \"Command run was:\\n %s\\n\" % cmd +\\\r\n \"Command returned exit status: %d\\n\" % return_value +\\\r\n \"Stdout:\\n%s\\nStderr\\n%s\\n\" % (stdout, stderr)\r\n raise RuntimeError(msg)\r\n\r\n # Leave this comments in as they're useful for debugging.\r\n # print 'Return value: %d\\n' % return_value\r\n # print 'STDOUT: %s\\n' % stdout\r\n # print 'STDERR: %s\\n' % stderr\r\n\r\n return stdout, stderr, return_value", "def getEmail(self, data):\r\n\t\tprint('test')\r\n\t\t# Empty array to hold unique emails\r\n\t\tno_dp_email = []\r\n\r\n\t\t# Loop through each row in the dataframe...\r\n\t\tfor row in data.itertuples():\r\n\t\t\tprint('test')\r\n\r\n\t\t\t# Parse through the row's keywords string for emails...\r\n\t\t\temails = re.findall(\"[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\\.[A-Za-z]{2,4}\", row.keywords)\r\n\t\t\tprint(emails)\r\n\t\t\tprint('test')\r\n\r\n\t\t\t# For each email in the array...\r\n\t\t\tfor email in emails:\r\n\t\t\t\tprint('test')\r\n\r\n\t\t\t\temail = str(email)\r\n\r\n\t\t\t\t# Append this email onto the array if it is not a repeat\r\n\t\t\t\tif email not in no_dp_email:\r\n\t\t\t\t\tprint('test')\r\n\r\n\t\t\t\t\tno_dp_email.append(email)\r\n\t\t\r\n\t\t# return array of unique emails\r\n\t\treturn no_dp_email", "def send_video_links():\n email_list = Emails.query.filter_by(status=\"active\").all() \n print(\"Sending newsletters to \", len(email_list), \" users\")\n random_video = get_random_video_link()\n video_link = f\"https://www.youtube.com/watch?v={random_video[1]}\"\n\n for email in email_list:\n #send email to user\n try:\n send_single_email(email.email, video_link, random_video[0])\n except Exception as e:\n print(e)\n \n\n\n print(\"DEBUG- Emails send job finished \")\n return \"Success\"", "def position_applicants_send_email(id):\n if current_user.id is None:\n abort(403)\n else:\n form = ContactForm(request.form)\n if request.method == 'POST' and form.validate():\n position = db.session.query(Job).get(id)\n if position is None:\n abort(404)\n emails = [u.email for u in position.users]\n message = Message(subject=form.subject.data,\n sender='info@mediville.com',\n reply_to='info@mediville.com',\n recipients=[''],\n bcc=emails,\n body=form.text.data)\n mail.send(message)\n flash(\"Message was send.\", 'success')\n return redirect(url_for('organisations.view_applicants', id=id))\n return render_template('organisations/message_send_form.html', form=form)", "def send_mail():\n msg = MIMEMultipart()\n msg[\"From\"] = \"SIRP-Reminders@company.com\"\n msg[\"To\"] = SENT_TO\n msg[\"Subject\"] = \"The Hive Case Metrics\"\n msg.attach(MIMEText(\"Attached are the requested case metrics in .XLSX format.\"))\n part = MIMEBase(\"application\", \"octet-stream\")\n part.set_payload(open(\"Hive Metrics.xlsx\", \"rb\").read())\n encoders.encode_base64(part)\n part.add_header(\"Content-Disposition\", 'attachment; filename=\"Hive Metrics.xlsx\"')\n msg.attach(part)\n smtp = smtplib.SMTP(SMTP_SERVER)\n smtp.starttls()\n smtp.sendmail(msg[\"From\"], msg[\"To\"].split(\",\"), msg.as_string())\n smtp.quit()", "def send_email(email_body, make_network_requests):\n\n if make_network_requests:\n ses = boto3.client(\"ses\", region_name=AWS_REGION)\n ses.send_email(\n Source=EMAIL_FROM,\n Destination={\"ToAddresses\": [EMAIL_TO]},\n Message={\n \"Subject\": {\"Data\": EMAIL_SUBJECT},\n \"Body\": {\"Text\": {\"Data\": email_body}},\n },\n )\n else:\n print(email_body)", "def get_mail_merge(threshold):\t\n\tmaster = master_dataframe(threshold)\n\tmaster.dropna(subset=['email'],inplace=True)\n\n\twriter = pd.ExcelWriter('MAIL_MERGE.xlsx')\n\tmaster.to_excel(writer,'ALL STUDENTS')\n\twriter.save()", "def test_get_jobs_by_encrypted_email(self):\n email = \"some.test@crim.ca\"\n body = {\n \"inputs\": [{\"id\": \"test_input\", \"data\": \"test\"}],\n \"outputs\": [{\"id\": \"test_output\", \"transmissionMode\": EXECUTE_TRANSMISSION_MODE_REFERENCE}],\n \"mode\": EXECUTE_MODE_ASYNC,\n \"response\": EXECUTE_RESPONSE_DOCUMENT,\n \"notification_email\": email\n }\n with contextlib.ExitStack() as stack:\n for runner in mocked_process_job_runner():\n stack.enter_context(runner)\n path = \"/processes/{}/jobs\".format(self.process_public.identifier)\n resp = self.app.post_json(path, params=body, headers=self.json_headers)\n assert resp.status_code == 201\n assert resp.content_type == CONTENT_TYPE_APP_JSON\n job_id = resp.json[\"jobID\"]\n\n # verify the email is not in plain text\n job = self.job_store.fetch_by_id(job_id)\n assert job.notification_email != email and job.notification_email is not None\n assert int(job.notification_email, 16) != 0 # email should be encrypted with hex string\n\n path = get_path_kvp(sd.jobs_service.path, detail=\"true\", notification_email=email)\n resp = self.app.get(path, headers=self.json_headers)\n assert resp.status_code == 200\n assert resp.content_type == CONTENT_TYPE_APP_JSON\n assert resp.json[\"total\"] == 1, \"Should match exactly 1 email with specified literal string as query param.\"\n assert resp.json[\"jobs\"][0][\"jobID\"] == job_id", "def send_mail(self, subject):\r\n pass", "def sendEmail(householdID):\n contactID = mdb.getContact(householdID)\n sqlq = \"\"\"\n SELECT Name, Surname, Address1, Address2, Town, Postcode, email, status\n FROM Contact\n WHERE idContact = '{}';\n \"\"\".format(contactID)\n result = mdb.getSQL(sqlq)[0]\n\n thisName = (\"%s\" % (result['Name']))\n thisEmail = (\"%s\" % (result['email']))\n thisStatus = (\"%s\" % (result['status']))\n\n # prepare the custom email\n thisPath = os.path.dirname(os.path.abspath(__file__))\n if (thisStatus == 'de'):\n emailPath = os.path.join(thisPath, \"emails/email_graph_de.html\")\n locale.setlocale(locale.LC_ALL, 'de_DE.utf8')\n else:\n emailPath = os.path.join(thisPath, \"emails/email_graph.html\")\n dtChoice = mdb.getHHdtChoice(householdID)\n thisDate = dtChoice.strftime(\"%A, %-d %B\")\n\n templateFile = open(emailPath, \"r\")\n templateText = templateFile.read()\n templateFile.close()\n templateText = templateText.replace(\"[householdID]\", householdID)\n templateText = templateText.replace(\"[contactID]\", contactID)\n templateText = templateText.replace(\"[name]\", thisName)\n templateText = templateText.replace(\"[date]\", thisDate)\n templateText = templateText.replace(\"[securityCode]\", mdb.getSecurityCode(householdID))\n\n # Subject\n subjectLine = templateText.splitlines()[0]\n templateText = templateText[templateText.find('\\n') + 1:] # find line break and return all from there - i.e. remove first line\n \n # email file\n emailFilePath = os.path.join(thisPath, \"tempEmail.htmail\")\n emailFile = open(emailFilePath, \"w+\")\n emailFile.write(templateText)\n emailFile.close()\n\n # call('mutt -e \"set content_type=text/html\" -s \"[TESTING]' + subjectLine + '\" philipp.grunewald@ouce.ox.ac.uk < ' + emailFilePath, shell=True)\n call('mutt -e \"set content_type=text/html\" -s \"' + subjectLine + '\" ' + thisEmail + ' -b meter@energy.ox.ac.uk < ' + emailFilePath, shell=True)", "def sendMail(listEmailsToSend, title, data):\n if isinstance(listEmailsToSend, str):\n listEmailsToSend = [listEmailsToSend]\n send_mail(\n f'{title}',\n f'{data}',\n settings.EMAIL_HOST_USER,\n listEmailsToSend,\n fail_silently=False,\n )", "def send_emails(self):\n\n with open(self.emails_file) as fp:\n emails = fp.readlines()\n logging.debug('%s e-mail addresses are loaded from %s' % (len(emails), self.emails_file))\n\n emails = map(lambda email: email.strip(), emails)\n\n for i, email in enumerate(emails):\n try:\n self.send_email(email)\n except Exception as e:\n logging.exception('Can\\'t send e-mail to %s (number %s)!' % (email, i))\n else:\n logging.debug('E-mail was sent to %s (number %s)' % (email, i))\n\n sleep_time = self.timeout * (0.5 + random.random())\n time.sleep(sleep_time) # timeout\n\n logging.debug('Done!')", "def search_for_email_given_job(job_description: str, contacts: str):\n # create an empty list of the contacts\n contacts_list = []\n # --> refer to the file called inputs/contacts.txt to learn more about\n # the format of the comma separated value (CSV) file that we must parse\n # --> iterate through each line of the file and extract the current job\n for contact_line in csv.reader(\n contacts.splitlines(),\n quotechar='\"',\n delimiter=\",\",\n quoting=csv.QUOTE_ALL,\n skipinitialspace=True,\n ):\n # TODO: extract the current job for the contact on this line of the CSV\n # TODO: the job description matches and thus we should save it in the list\n # return the list of the contacts who have a job description that matches\n return contacts_list", "def scrape_delta_jobs(args):\n today = date.today()\n today = today.strftime(\"%m/%d/%Y\")\n kwargs = parse_args(args)\n\n if kwargs is None:\n return\n\n browser = webdriver.Firefox()\n browser.get(base_url)\n actionChains = ActionChains(browser)\n try:\n link = browser.find_element_by_link_text(\"Search All Jobs\")\n actionChains.move_to_element(link).click(link).perform()\n time.sleep(3)\n html = browser.page_source\n soup = BeautifulSoup(html)\n table = soup.find('table')\n jobs_link = table.findAll('a')[-1]['href']\n browser.execute_script(jobs_link)\n time.sleep(3)\n html = browser.page_source\n soup = BeautifulSoup(html)\n table = soup.find('table')\n rows = table.findAll('tr')\n\n jobs = {}\n titles = []\n for row in rows:\n try:\n location = row.find('td', {'class': 'column-3'}).text\n # modify this line to adjust location\n if 'GA-Atlanta-ATG' in location:\n date_posted = row.find('td', {'class': 'column-5'}).text\n department = row.find('td', {'class': 'column-4'}).text\n title = row.find('td', {'class': 'column-1'})\n link = title.find('a')['href']\n if today in date_posted:\n jobs[title.text] = {\n 'date_posted': date_posted,\n 'department': department,\n }\n titles.append(title.text)\n except:\n pass\n\n formatted_titles = []\n for title in titles:\n formatted_titles.append(title.rstrip().encode('utf-8'))\n\n sentence = \"\"\n for title in formatted_titles:\n sentence = sentence + \\\n \" New Job Posting for Postion: {} \\n\".format(title)\n sender = kwargs['sender']\n receivers = kwargs['recipient']\n\n message = \"\"\"From: {}\n To: {}\n Subject: New Delta Jobs for {}\n\n {}\n\n {}\n \"\"\".format(sender, receivers, today, base_url, sentence)\n if len(sentence) > 0:\n server = smtplib.SMTP('smtp.gmail.com:587')\n server.ehlo()\n server.starttls()\n server.login(sender, kwargs['password'])\n server.sendmail(sender, receivers, message)\n browser.close()\n except:\n browser.close()", "def email_body_cancellation_from_seller_to_buyer():\n\tmsg = '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr><td align=\"center\" valign=\"top\"></td></tr></tbody></table>'\n\tmsg = msg + '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr>'\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6; border-top: 2px solid #e6e6e6\" cellspacing=\"0\" cellpadding=\"10\" width=\"600\">'\n\tmsg = msg + '<tbody>'\n\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #e6e6e6; border-bottom: 10px solid #FFFFFF; padding-top:75px; padding-left:58px\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<a href=\"https://insprite.co\"><img src=\"http://ryanfbaker.com/insprite/inspriteLogoA.png\" border=\"0\" alt=\"Insprite\" align=\"center\" width=\"200px\" height=\"55px\" /></a>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</tbody>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"85\" width=\"600\" height=\"350\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:0px;\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '\\t\\t<font style=\"font-family:Helvetica Neue;color:#555555;font-size:14px;\"> <a href=\"#\" style=\"color:#1488CC\">{Insert user - seller}</a> cancelled your appointment.<br><br>'\n\tmsg = msg + '\\t\\t\\t Check out <a href=\"#\" style=\"color:#1488CC\">{Insert seller}</a>\\'s availability, and send a new proposal. (Sometimes, a little reshuffling can really make things happen!)</font><br><br>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/facebookIcon.png\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/twitterIcon.png\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/instagramIcon.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/spacer-2.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\treturn msg", "def send_email(\n template_name: str,\n to: typing.Union[str, typing.List[str]],\n personalisation: dict = None,\n reference: str = None,\n staff_email: bool = None,\n retry_attempts: int = 2,\n spoolable_ctx: Context = None,\n):\n client = NotifyClient.shared_client()\n try:\n client.send_email(\n template_name=template_name,\n to=to,\n personalisation=personalisation,\n reference=reference,\n staff_email=staff_email,\n )\n except APIError as e:\n should_retry = (\n # no retry without uWSGI spooler\n spoolable_ctx.spooled\n # no retry if run out of retry attempts\n and retry_attempts\n # retry only for \"service unavailable\" / \"internal error\" statuses\n and 500 <= e.status_code < 600\n # …unless it was caused by an invalid json response\n and not isinstance(e, InvalidResponse)\n )\n if should_retry:\n send_email(\n template_name=template_name,\n to=to,\n personalisation=personalisation,\n reference=reference,\n staff_email=staff_email,\n retry_attempts=retry_attempts - 1,\n )\n else:\n raise e", "def fusion_api_send_email(self, body, api=None, headers=None):\n param = \"/send-email\"\n return self.email.post(body, api, headers, param)", "def email(self):\n return \"{}.{}@company.com\".format(self.first, self.last)", "def send_email_celery(\n subject: str, body: str, from_email: str, to_emails: List[str]\n) -> None:\n send_mail(\n subject, body, from_email, to_emails, fail_silently=False,\n )\n logger.info(\n f\"\"\"Email sent successfully via a Celery task\\n\n subject: {subject}\\n\n body: {body}\\n\n from_email: {from_email}\\n\n to_emails: {str(to_emails)}\"\"\"\n )", "def mark_email(args):\n cache.get_default().set_email(args.address, args.is_valid)\n print('{!r:} marked as {:s}valid.'.format(args.address, '' if args.is_valid else 'in'))", "def clean_email_column(df: pd.DataFrame) -> pd.DataFrame:\n try:\n df[\"Email\"] = df[\"Email\"].apply(_clean_email_strings)\n return df\n except ValueError:\n print(\"'Email' column not found, please check the input file structures.\")", "def _send_email_helper(settings, excel):\n try:\n server = smtplib.SMTP(settings.smtp_server, str(settings.smtp_port))\n server.starttls()\n server.login(settings.user,settings.password)\n dest = [str(settings.user), str(settings.dest_addr)]\n server.sendmail(settings.user, dest, Email._set_email(settings,excel).as_string())\n server.quit()\n\n FileHelper.archive(settings, excel)\n excel.clear_sheet()\n excel.gen_dates()\n Popups.email_sent()\n except Exception:\n print(\"Send email failed.\")", "def run(job, logger=None):\n one_day = datetime.datetime.now() + datetime.timedelta(days=int('{{ number_of_days }}'))\n date_string = \"{:%m/%d/%Y}\".format(one_day)\n job.set_progress(\"Setting expiration date for servers in this job to: {}\".format(date_string))\n\n for server in job.server_set.all():\n server.set_value_for_custom_field(\"expiration_date\", date_string)\n\n return \"\", \"\", \"\"", "def process_multiple_files(self, filepaths, email_col='EMAIL',min_size=100, threshold=0.05):\r\n new_paths = []\r\n for f in filepaths:\r\n df = pd.read_csv(f)\r\n df = self.pre_process_frame(df, col=email_col)\r\n orig_size = df.index.size\r\n FLAG = True\r\n if orig_size < min_size:\r\n pass\r\n else:\r\n print(\"Cleaning {}\".format(f))\r\n \r\n try: # Try to first do an easy match with results directly from the database. (Very fast compared to API calls)\r\n self.count_matching_emails(df, col=email_col, verify_integrity=True, thresh=threshold)\r\n except Exception as e:\r\n \r\n print(\"{}\\n Calling missing emails from remote server.\".format(e))\r\n df = self.deep_clean_frame(df,dealno=0,clean_col=email_col) # The long way - calling the API.\r\n \r\n try:\r\n self.deep_processing_rerun(dealno=0,thresh=0.05,max_tries=5) # Handling records stuck in processing.\r\n count = self.count_matching_emails(df, col=email_col, verify_integrity=True, thresh=threshold)\r\n print(\"Successfully matched {} records\".format(count))\r\n except Exception as e:\r\n \r\n FLAG = False # Stop this from finalizing...too many records stuck in processing/not in database...somethings wrong.\r\n print(\"Failed to reprocess some records for {}\\n Error: {}\".format(f,e))\r\n \r\n if FLAG:\r\n df = self.suppress_email_frame(df, col=email_col, clean_type=1)\r\n new_path = os.path.splitext(f) + \"ListWised.csv\"\r\n df.to_csv(new_path, index=False)\r\n new_paths.append(new_path)\r\n \r\n self.deep_processing_rerun_all() # Wraps up making one last try at rerunning any emails stuck in processing (for next time).\r\n return new_paths", "def email_article_summary(to_address, summary_filename, start_year, start_month, end_year, end_month, num_articles):\n \n host = HOST\n from_address = FROM_ADDRESS\n body = \"\"\"\n Good morning,\n \n There were %i peer-reviewed papers produced by researchers at this institute between %i/%i and %i/%i. A summary file containing the front page from each article is attached with this email. Please print out these summary pages, highlight the author(s) on each article and pin them to the monthly papers noticeboard.\n \n Thanks a bunch,\n \n Skynet.\n \n \"\"\" % (num_articles, start_month, start_year, end_month, end_year, )\n \n recipients = [to_address, ADMIN_ADDRESS]\n \n logging.info(\"Preparing summary email report for %s\" % (', '.join(recipients), ))\n \n successful = True\n for recipient in recipients:\n \n message = MIMEMultipart()\n message[\"From\"] = from_address\n message[\"To\"] = recipient\n message[\"Subject\"] = \"Refereed papers summary between %i/%i and %i/%i\" % (start_month, start_year, end_month, end_year, )\n message[\"Date\"] = formatdate(localtime=True)\n \n message.attach(MIMEText(textwrap.dedent(body).lstrip()))\n \n part = MIMEBase('application', 'octet-stream')\n part.set_payload(open(summary_filename, 'rb').read())\n Encoders.encode_base64(part)\n part.add_header('Content-Disposition', 'attachment; filename=\"%s\"' % os.path.basename(summary_filename))\n message.attach(part)\n \n server = smtplib.SMTP(host)\n \n try:\n failed = server.sendmail(from_address, to_address, message.as_string())\n server.close()\n \n except Exception as e:\n logging.critical(\"Unable to send email to %s. Error: %s\" % (recipient, str(e), ))\n successful = False\n \n else:\n logging.info(\"Email successfully sent to %s\" % recipient)\n \n \n return successful", "def _send_mails(course_event, attendee, title,\n organisation, amount, is_test=False):\n\n if is_test:\n\n send_mail(\n '[GISMentors-kurzy] {} {}'.format(title, course_event.date),\n \"\"\"\n Kurz: {}\n Účastník: {}\n E-mail: {}\n Organizace: {}\n Celkem registrovaných účastníků: {}\n Celkem peněz (bez DPH): {}\n \"\"\".format(\n title,\n attendee.name,\n attendee.email,\n organisation,\n len(course_event.courseattendee_set.all()),\n course_event.suma_netto\n ),\n 'info@gismentors.cz',\n [settings.TEST_MAIL],\n fail_silently=True,\n )\n\n else:\n\n send_mail(\n '[GISMentors-kurzy] {} {}'.format(title, course_event.date),\n \"\"\"\n Kurz: {}\n Účastník: {}\n E-mail: {}\n Organizace: {}\n Celkem registrovaných účastníků: {}\n Celkem peněz (bez DPH): {}\n \"\"\".format(\n title,\n attendee.name,\n attendee.email,\n organisation,\n len(course_event.courseattendee_set.all()),\n course_event.suma_netto\n ),\n 'info@gismentors.cz',\n [settings.INFO_MAIL],\n fail_silently=True,\n )\n\n send_mail(\n '[GISMentors-kurzy] Potvrzení přihlášky',\n render_to_string('potvrzeni.txt', {\n 'name': attendee.name,\n \"title\": title,\n \"date\": course_event.date,\n \"amount\": int(amount)\n }),\n 'info@gismentors.cz',\n [attendee.email],\n fail_silently=True,\n )", "def send_email(self,to, subj):\r\n\r\n \"\"\" Currently not implemented. \"\"\"\r\n print(to+'-'+subj)\r\n print(self.body)\r\n # Send the finalized email here.\r", "def process_domains(self, save_path=None):\r\n emails = self.db.read_sql(\"SELECT * FROM emails\")\r\n emails.loc[:, email2] = emails.loc[:, email].apply(self.parse_email) \r\n emails.loc[:, DOMAIN] = emails.loc[:, email2].apply(self.get_domain)\r\n emails.drop_duplicates([DOMAIN], inplace=True)\r\n if save_path:\r\n emails.to_csv(save_path, index=False)\r\n emails.loc[:,DOMAIN].to_sql(DOMAINS, self.db.con, if_exists='append', index=False)", "def send_email(self, recipients, html_data, assignee=None):\n\n msg = MIMEMultipart('alternative')\n# msg['Subject'] = \"Jira Alert - Stagnant Jiras %s\" % self.options.fl_project\n msg['Subject'] = \"Jira Alert - Stagnant Jiras\"\n msg['From'] = 'jira.alert@lsi.com'\n if assignee:\n msg['To'] = assignee\n msg['Cc'] = ', '.join(recipients) # Assignee emails\n else:\n msg['To'] = ', '.join(recipients) # Main email\n \n html1 = \"<!DOCTYPE html><html><head><meta charset=\\\"utf-8\\\"/><title>HTML Reference</title></head><body>\"\n \n html2 = \"</body></html>\"\n \n final_message = \"%s%s%s\" % (html1, html_data, html2)\n html_message = MIMEText(final_message, 'html', _charset='utf-8')\n msg.attach(html_message)\n \n # Send the message via our own SMTP server.\n s = smtplib.SMTP('localhost')\n s.set_debuglevel(1)\n# s.sendmail('richard.leblanc@lsi.com', recipients, msg.as_string())\n s.sendmail('jira.alert@lsi.com', recipients, msg.as_string())\n s.quit()", "def execute_automatic_email(self, request, pk=None):\n try:\n retreat = Retreat.objects.get(pk=pk)\n except Exception:\n response_data = {\n 'detail': \"Retreat not found\"\n }\n return Response(response_data, status=status.HTTP_400_BAD_REQUEST)\n\n try:\n email = AutomaticEmail.objects.get(\n id=int(request.GET.get('email'))\n )\n except Exception:\n response_data = {\n 'detail': \"AutomaticEmail not found\"\n }\n return Response(response_data, status=status.HTTP_400_BAD_REQUEST)\n\n # Notify a user for every reserved seat\n emails = []\n for reservation in retreat.reservations.filter(is_active=True):\n if reservation.automatic_email_logs.filter(email=email):\n pass\n else:\n send_automatic_email(reservation.user, retreat, email)\n AutomaticEmailLog.objects.create(\n reservation=reservation,\n email=email\n )\n emails.append(reservation.user.email)\n\n response_data = {\n 'stop': True,\n 'emails': emails\n }\n return Response(response_data, status=status.HTTP_200_OK)", "def sendjob(self,bashscript):", "def noticeEMail(starttime, usr, psw, fromaddr, toaddr, subject, jobmsg):\n\n # Calculate run time\n runtime=datetime.datetime.now() - starttime\n\n # Initialize SMTP server\n server=smtplib.SMTP('smtp.gmail.com:587')\n server.starttls()\n server.login(usr, psw)\n\n # Send email\n senddate=datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d')\n subject=subject\n m=\"Date: %s\\r\\nFrom: %s\\r\\nTo: %s\\r\\nSubject: %s\\r\\nX-Mailer: My-Mail\\r\\n\\r\\n\" % (senddate, fromaddr, toaddr, subject)\n msg='{}\\nJob runtime: {}'.format(jobmsg, str(runtime))\n\n server.sendmail(fromaddr, toaddr, m+msg)\n server.quit()", "def test_using_invite_use_host_in_from_email(self, send_mass_html_mail__mock: Mock):\n events = Event.objects.filter(pk=self.event.pk)\n\n admin.EventAdmin.send_mail(Mock(), None, events)\n\n to_send = list(send_mass_html_mail__mock.call_args[0][0])\n from_email = to_send[0][3]\n self.assertEqual(from_email, \"Marie <test_using_invite_use_host_in_from_email@example.com>\")" ]
[ "0.6686441", "0.573404", "0.5680976", "0.5586321", "0.5568828", "0.5565883", "0.5497524", "0.54533464", "0.537631", "0.5351467", "0.5310948", "0.5301148", "0.52795285", "0.5278291", "0.5266971", "0.52349377", "0.5156166", "0.51303834", "0.51244825", "0.5099825", "0.5099411", "0.5093604", "0.50920093", "0.5091304", "0.50840986", "0.5071165", "0.506223", "0.5058389", "0.50583375", "0.50443554", "0.5044086", "0.50358284", "0.5029595", "0.5014934", "0.5013695", "0.5011931", "0.5005724", "0.4998253", "0.4998253", "0.49901277", "0.4981038", "0.4980979", "0.49750912", "0.49715942", "0.49703056", "0.49664778", "0.49626005", "0.49249157", "0.49193197", "0.49191436", "0.49153477", "0.49121922", "0.49101135", "0.49091753", "0.49086368", "0.49043125", "0.4903437", "0.49019042", "0.4894159", "0.48915088", "0.48851597", "0.48824877", "0.488039", "0.487937", "0.48641822", "0.48584223", "0.48494494", "0.4841285", "0.4841208", "0.48347387", "0.48341432", "0.48260552", "0.48201135", "0.4817791", "0.4812535", "0.4808689", "0.48086405", "0.4806972", "0.4798074", "0.47872323", "0.47865248", "0.47799465", "0.4770004", "0.47659704", "0.4764854", "0.47589388", "0.47558767", "0.4755431", "0.47466207", "0.4740894", "0.47387108", "0.4737044", "0.47336113", "0.473021", "0.47286323", "0.472273", "0.4721897", "0.47140387", "0.47140348", "0.47133416" ]
0.8165515
0
Return checked to HTML input (checkbox)
def check_ignore(self): return "checked" if not self.ignore else ""
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def uiCheckboxChecked(checkbox):\n\n return clibui.uiCheckboxChecked(checkbox)", "def htmlCheckbox(labelText, parName, args, labelAttr='', attr=''):\n snippet = htmlLabel(labelText,parName,labelAttr)\n checked = 'checked=\"checked\"' if parName in args else ''\n snippet += '<input type=\"checkbox\" name=\"%s\"%s%s/>\\n' % (parName,sep(checked),sep(attr))\n return snippet", "def checkbox(self):\r\n return self._checkbox", "def get_checked(self):\n return self._checked", "def get_value(self):\n return bool(self._chb_bool.isChecked())", "def boolean(self, label, component, config, name, default=False):\n\n default = self.setting(config, name, default)\n return st.checkbox(label, value=default, key=component + name)", "def value(self):\n if self.isCheckable():\n return self.isChecked()\n else:\n return None", "def uiCheckboxSetChecked(checkbox, checked):\n\n clibui.uiCheckboxSetChecked(checkbox, checked)", "def action_checkbox(self, obj):\n if self.check_concurrent_action:\n return helpers.checkbox.render(helpers.ACTION_CHECKBOX_NAME,\n force_str(\"%s,%s\" % (obj.pk, get_revision_of_object(obj))))\n else: # pragma: no cover\n return super().action_checkbox(obj)", "def is_checked(self):\n\treturn self._Widget__w['isChecked'] == 'true'", "def check(self, element_tuple, *, wrapper_element_tuple=None):\n self.log_info(f\"Browser.check: Setting {element_tuple} checkbox to checked\")\n checkbox = self.CORE.find_element(*self.format_element(element_tuple))\n if not checkbox.is_selected():\n if wrapper_element_tuple is not None:\n self.log_info(f\"Browser.check: Wrapper element was provided, clicking {wrapper_element_tuple} instead\")\n self.click(wrapper_element_tuple)\n else:\n self.click(element_tuple)\n else:\n self.log_info(f\"Browser.check: Skipping action as {element_tuple} is already checked\")\n return", "def checkbox(self, label, initial=False, handler=None, **kwargs):\n handler = self._changed_handler(handler)\n cb = wx.CheckBox(self, label=label)\n #cb.span = 2\n cb.SetValue(initial)\n cb.Bind(wx.EVT_CHECKBOX, handler)\n self.pack(\"\", cb, **kwargs)\n return cb", "def _generateMenuItemCheckedState(self, obj, **args):\n result = []\n if not args.get('mode', None):\n args['mode'] = self._mode\n args['stringType'] = 'checkbox'\n indicators = self._script.formatting.getString(**args)\n if obj.getState().contains(pyatspi.STATE_CHECKED):\n result.append(indicators[1])\n return result", "def uiCheckboxText(checkbox):\n\n clibui.uiCheckboxText.restype = ctypes.c_char_p\n text = clibui.uiCheckboxText(checkbox)\n\n return text.decode()", "def CheckBoxClicked(self,chkb):\r\n\r\n print(\"{} Selecionado.\", format(chkb.text()))", "def option(name, checked, value):\n chkStr = u''\n if checked:\n chkStr = u'checked=\"checked\"'\n html = (u'<input type=\"radio\" name=\"%s\"'\n u' value=\"%s\" %s/>\\n' % \n (name, value, chkStr))\n return html", "def bool_checkbox(init: bool = False, descr: str = '', data_type: type[Data] = Data):\n\n class StdInpWidget_BoolCheckBox(StdInputWidgetBase, QCheckBox):\n def __init__(self, params):\n StdInputWidgetBase.__init__(self, params)\n QCheckBox.__init__(self)\n\n # tooltip\n self.setToolTip(self.__doc__)\n\n self.stateChanged.connect(self.state_changed)\n\n # initial value\n with self._prevent_update:\n self.setChecked(init)\n\n @property\n def val(self) -> data_type:\n return data_type(self.isChecked())\n\n def load_from(self, val: Data):\n with self._prevent_update:\n self.setChecked(val.payload)\n\n def state_changed(self, _):\n self.on_widget_val_changed(self.val)\n\n def val_update_event(self, val: Data):\n if isinstance(val.payload, bool):\n with self._prevent_update:\n self.setChecked(val.payload)\n\n StdInpWidget_BoolCheckBox.__doc__ = descr\n\n return StdInpWidget_BoolCheckBox", "def is_checkbox(field):\n return isinstance(field.field.widget, forms.CheckboxInput)", "def setChecked(self,selected,flag=True):\n if flag:\n qtflag = QtCore.Qt.Checked\n else:\n qtflag = QtCore.Qt.Unchecked\n \n for s in selected:\n for i in self.input.findItems(s,QtCore.Qt.MatchExactly):\n i.setCheckState(qtflag)", "def getCheckBoxValue(self, checkBox):\n\n\t\tif checkBox.checkState() == QtCore.Qt.Checked:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False", "def value(self):\n return self.input.checkState() == QtCore.Qt.Checked", "def add_check_box(self, name, caption, value=False, label=None, add_indicator=None, location=(None,0)):\n widget=QtWidgets.QCheckBox(self)\n widget.setText(_translate(self.name,caption,None))\n widget.setObjectName(_fromUtf8(self.name+\"_\"+name))\n widget.setChecked(value)\n return self.add_simple_widget(name,widget,label=label,add_indicator=add_indicator,location=location)", "def test_widget_is_checkbox():\n form = ExampleForm()\n field = form[\"checkbox\"]\n assert is_checkbox(field) is True", "def uiNewCheckbox(text):\n\n # Set return type\n clibui.uiNewCheckbox.restype = ctypes.POINTER(uiCheckbox)\n\n return clibui.uiNewCheckbox(bytes(text, 'utf-8'))", "def uiCheckboxSetText(checkbox, text):\n\n clibui.uiCheckboxSetText(checkbox, bytes(text, 'utf-8'))", "def setValue(self,val):\n if val:\n self.input.setCheckState(QtCore.Qt.Checked)\n else:\n self.input.setCheckState(QtCore.Qt.Unchecked)", "def _generateCheckedState(self, obj, **args):\n result = []\n if not args.get('mode', None):\n args['mode'] = self._mode\n args['stringType'] = 'checkbox'\n indicators = self._script.formatting.getString(**args)\n state = obj.getState()\n if state.contains(pyatspi.STATE_CHECKED):\n result.append(indicators[1])\n elif state.contains(pyatspi.STATE_INDETERMINATE):\n result.append(indicators[2])\n else:\n result.append(indicators[0])\n return result", "def checkmark_if_true(condition):\n\n if condition:\n html = '<i class=\"fa fa-check-square-o\"></i>'\n else:\n html = '<i class=\"fa fa-square-o\"></i>'\n return mark_safe(html)", "def value(self):\n if self._check_:\n f = self.getChecked\n else:\n f = self.getSelected\n return f()", "def value(self):\n for rb in self.rb:\n if rb.isChecked():\n return str(rb.text())\n return ''", "def action_checkbox(self):\n self.checkbox_online_var = not self.checkbox_online_var", "def checkBox(parent,label='',pos=defPos,size=defSize,style=0,val=defVal,\r\n name='checkBox',id=defId,onCheck=None,tip=None):\r\n gCheckBox = wx.CheckBox(parent,id,label,pos,size,style,val,name)\r\n if onCheck: gCheckBox.Bind(wx.EVT_CHECKBOX,onCheck)\r\n if tip: gCheckBox.SetToolTip(tooltip(tip))\r\n return gCheckBox", "def add_checkbox(\n self,\n name: str,\n label: str,\n default: bool = False,\n ) -> None: # noqa: E501\n self._client.results[name] = default\n self._client.add_element(\n name=str(name), element=Checkbox(label=str(label), value=bool(default))\n )", "def check_box(self, grid: object, name: str, xposition: int, yposition: int,\n synchronize: bool = False, xspan: int = 1, yspan: int = 1) -> QtWidgets.QCheckBox:\n label = QtWidgets.QLabel()\n label.setText(TR().tr(name) + ':')\n grid.addWidget(label, yposition, xposition, 1, 1)\n\n input = QtWidgets.QCheckBox()\n input.setObjectName(name)\n if synchronize:\n self.synchronize(input)\n grid.addWidget(input, yposition, xposition + 1, yspan, xspan)\n input.stateChanged.connect(self.data_changed)\n\n return input", "def update_checked(self):\n self._is_checked = self.checkState() == Qt.Checked", "def update_checked(self):\n self._is_checked = self.checkState() == Qt.Checked", "def update_checked(self):\n self._is_checked = self.checkState() == Qt.Checked", "def storeCheckBoxValue(self):\n\n\t\tcategory, attr = self.getWidgetMeta(self.sender())\n\t\tvalue = self.getCheckBoxValue(self.sender())\n\t\tself.storeValue(category, attr, value)", "def on_checkBox_kongtoukai_clicked(self, checked):\n # TODO: not implemented yet\n raise NotImplementedError", "def clicked_checkbox_use_antimasks(self):\n # TODO\n self._get_selected_model().metadata[\"antimask_flag\"] \\\n = self.checkbox_use_antimasks.isChecked()\n return None", "def elementIsChecked(self, element_tuple):\n result = self.CORE.find_element(*self.format_element(element_tuple)).is_selected()\n self.log_info(f\"Browser.elementIsChecked: {element_tuple} is {'' if result else 'not '}checked\")\n return result", "def DrawCheckBox(*args, **kwargs):\n return _gdi_.RendererNative_DrawCheckBox(*args, **kwargs)", "def form_CheckboxMultiChoice(request):\n schema = schemaish.Structure()\n schema.add('multiChoice', schemaish.Sequence(schemaish.String()))\n options = [(1,'a'),(2,'b'),(3,'c')]\n\n form = formish.Form(schema, 'form')\n form['multiChoice'].widget = formish.CheckboxMultiChoice(options)\n return form", "def GridCheck(Parent,DefaultSelected,Row,Column):\r\n dummyvar = IntVar()\r\n C = Checkbutton(Parent,var=dummyvar)\r\n if DefaultSelected == 1:\r\n C.select()\r\n C.grid(row=Row,column=Column)\r\n C.isChecked = dummyvar\r\n return C", "def _add_checkbox(self, text, state_changed, tooltip, checked=True,\n enabled=True, button_label=True):\n cbox = QtWidgets.QCheckBox('' if button_label else text, self)\n self.control.layout().addWidget(cbox)\n btn = None\n if button_label:\n btn = QtWidgets.QPushButton(text, self)\n self.control.layout().addWidget(btn)\n\n def cb(checked, cbox=cbox, state_changed=state_changed):\n state_changed(cbox.isChecked(), one_shot=True)\n\n btn.clicked.connect(cb)\n btn.setToolTip(tooltip)\n cbox.setChecked(checked)\n cbox.setEnabled(enabled)\n cbox.stateChanged.connect(state_changed)\n cbox.setToolTip(tooltip)\n self.control.layout().addItem(QtWidgets.QSpacerItem(20, 0))\n return cbox", "def _generateCellCheckedState(self, obj, **args):\n result = []\n if self._script.utilities.hasMeaningfulToggleAction(obj):\n oldRole = self._overrideRole(pyatspi.ROLE_CHECK_BOX, args)\n result.extend(self.generate(obj, **args))\n self._restoreRole(oldRole, args)\n\n return result", "def IsChecked(self):\r\n\r\n return self.GetValue()", "def select_checkbox(self, locator):\n self._info(\"Selecting checkbox '%s'.\" % locator)\n locator = self._parse_locator(locator)\n if not self._selenium.is_checked(locator):\n self._selenium.check(locator)", "def getResult(self):\n self.show(modal=True)\n self.exec_()\n b = self.clickedButton()\n if not b: # b == 0 or b is None\n b = self.defaultButton()\n if b:\n res = str(b.text())\n else:\n res = ''\n if self.checks:\n return res,[c.isChecked() for c in self.checks]\n else:\n return res", "def _checkbutton_toggle(self):\n new_value = self.value_checkbutton.var.get()\n if self.master.change_field_value(self.field_name, new_value):\n self.value_checkbutton.config(fg=\"#3F3\" if new_value else \"#F33\", text=\"ON\" if new_value else \"OFF\")\n else:\n self.value_checkbutton.var.set(not new_value)", "def _create_boolean_widget(self,frame,name,widget_options):\n # CB: might be necessary to pass actions to command option of Checkbutton;\n # could be cause of test pattern boolean not working?\n return T.Checkbutton(frame,variable=self._tkvars[name],**widget_options)", "def selecionar_registros_is_checked(self):\n state = self.Registers_Checkbox.isChecked()\n\n self.Registers_Field.setEnabled(state)", "def on_checkBox_duotoukai_clicked(self, checked):\n # TODO: not implemented yet\n raise NotImplementedError", "def on_action_toggled(self, content):\n checked = content['checked']\n self.set_guarded(checked=checked)\n self.toggled(checked)", "def on_checkBox_kongtouping_clicked(self, checked):\n # TODO: not implemented yet\n raise NotImplementedError", "def set_checked(self, checked):\n self._checked = checked", "def form_Boolean(request):\n schema = schemaish.Structure()\n schema.add('myBooleanField', schemaish.Boolean())\n form = formish.Form(schema, 'form')\n return form", "def on_checkBox_duotouping_clicked(self, checked):\n # TODO: not implemented yet\n raise NotImplementedError", "def getCheck(self) -> list:\n results = []\n for i in self.checkboxs:\n if i.isChecked():\n results.append(i)\n return results", "def checkMyWorkBox(self):\n self.util.waitForElementToBePresent(self.element.my_work_checkbox)\n checkbox = self.util.driver.find_element_by_xpath(self.element.my_work_checkbox)\n if not checkbox.is_selected():\n self.util.clickOn(self.element.my_work_checkbox)", "def check_toggle(self, toggle_button, input_value, saved_value):\n if toggle_button and saved_value is None:\n print(\"(check toggle) toggle on, saving value\")\n saved_value = input_value\n elif toggle_button:\n print(\"(check toggle) toggle off, not saving value\")\n saved_value = None\n return saved_value", "def checked(self, checked):\n\n self._checked = checked", "def is_checked(self):\n return self.data.get(\"complete\")", "def form_CheckboxRequired(request):\n schema = schemaish.Structure()\n schema.add('checkbox', schemaish.Boolean(validator=validatish.Required()))\n\n form = formish.Form(schema, 'form')\n return form", "def click_guarantee_cli_checkbox(self):\n self.click_element(self.guarantee_cli_checkbox_locator)", "def is_checkboxes(field):\n return isinstance(field.field.widget, forms.CheckboxSelectMultiple)", "def click_include_cli_checkbox(self):\n self.click_element(self.include_cli_checkbox_locator)", "def click_automate_generation_checkbox(self):\n self.click_element(self.automate_generation_checkbox_locator)", "def IsItemChecked(self, item):\r\n\r\n return item.IsChecked()", "def set_dayu_checked(self, value):\n button = self._button_group.button(value)\n button.setChecked(True)\n self.sig_checked_changed.emit(value)", "def test_checkboxtextgroup(self):\r\n self.check_group('checkboxtextgroup', 'choice', 'checkbox')", "def create_type_widget(self):\n self._chb_bool = QtWidgets.QCheckBox()\n return self._chb_bool", "def flagEnable(self, item):\n if item.checkState() == Qt.Checked:\n logging.debug('\"%s\" Checked' % item.text())\n self.flags[item.text()].enable()\n elif item.checkState() == Qt.Unchecked:\n logging.debug('\"%s\" Unchecked' % item.text())\n self.flags[item.text()].disable()\n else:\n logging.debug('\"%s\" Clicked' % item.text())", "def new_varEnabledWidget():\n newWidget = QtGui.QCheckBox()\n newWidget.setChecked(True)\n return newWidget", "def setValue(self,val):\n if self.isCheckable():\n self.setChecked(val)", "def header_field_should_be_checked(self, label):\n locator = lex_locators[\"record\"][\"header\"][\"field_value_checked\"].format(label)\n self.selenium.page_should_contain_element(locator)", "def _format_bool_(value):\n\n from ocgis.util.helpers import format_bool\n\n return format_bool(value)", "def dir_checked():\n return abspath('checked')", "def _paramFixCheck(self, default_value: bool = False) -> QtWidgets.QCheckBox:\n widget = QtWidgets.QCheckBox('')\n widget.setChecked(default_value)\n widget.setToolTip(\"when fixed, the parameter will be fixed to the \"\n \"initial guess value during fitting\")\n return widget", "def format_bool(b):\n return \"YES\" if b else \"NO\"", "def set(cls, text: str, title: str, graceful: bool = True) -> str:\n result, found, modified = cls._do_checkbox_setting(text, title, ('[ ]', '[x]', 1))\n\n if not found:\n raise UserInputError(\"Checkbox with title {!r} was not found in the provided text\".format(title))\n\n if not graceful and not modified:\n raise UserInputError(\"Checkbox with title {!r} was already set\".format(title))\n\n return result", "def bool_to_python(self, value):\r\n if value == 'true':\r\n return True\r\n elif value == 'false':\r\n return False", "def value(self):\n return self.element.is_selected()", "def boolToText(boolval):\n ret = libxml2mod.xmlBoolToText(boolval)\n return ret", "def get_bool(self, name, default=False):\n return self.get_as(self.parse_bool, name, default, value_type=bool)", "def uiCheckboxOnToggled(checkbox, callback, data):\n c_type = ctypes.CFUNCTYPE(\n ctypes.c_int, ctypes.POINTER(uiCheckbox), ctypes.c_void_p)\n c_callback = c_type(callback)\n\n clibui.uiCheckboxOnToggled(checkbox, c_callback, data)\n\n return c_callback", "def on_action_clicked(self, content):\n checked = content['checked']\n self.set_guarded(checked=checked)\n self.clicked(checked)", "def _check_state(self):\n if (self.stock_checker.isChecked() or self.future_checker.isChecked()) and self.name.buddy.text():\n self.btn_ok.setEnabled(True)\n self.btn_ok.setDefault(True)\n else:\n self.btn_ok.setEnabled(False)", "def check_button_checking_name(self):\r\n # Apply customizations if present\r\n if 'custom_checking' in self.text_customization:\r\n return self.text_customization.get('custom_checking')\r\n\r\n _ = self.runtime.service(self, \"i18n\").ugettext\r\n return _('Checking...')", "def getCheckBoxState( self, cCtrlName ):\n oControl = self.getControl( cCtrlName )\n return oControl.getState();", "def processCheckboxes(checkboxes, options):\n\n tag_selection_bin = list()\n tag_selection = list()\n for checkbox in checkboxes:\n tag_selection_bin += list(checkbox.state())\n\n for i, tag in enumerate(tag_selection_bin):\n if tag:\n tag_selection.append(options[i])\n\n return tag_selection", "def uiCheckboxPointer(obj):\n\n return ctypes.cast(obj, ctypes.POINTER(uiCheckbox))", "def ok_if_true(condition):\n\n if condition:\n html = '<i class=\"fa fa-check\"></i>'\n return mark_safe(html)\n return ''", "def get_dayu_checked(self):\n return self._button_group.checkedId()", "def DoCheck(self,event):\r\n index = event.GetSelection()\r\n item = self.items[index]\r\n if self.list.IsChecked(index):\r\n self.data.check(item)\r\n else:\r\n self.data.uncheck(item)\r\n #self.list.SetSelection(index)\r", "def __init__(self,name,value,*args,**kargs):\n if 'text' in kargs:\n text = kargs['text']\n else:\n text = str(name)\n kargs['text'] = '' # Force no label\n self.input = QtGui.QCheckBox(text)\n InputItem.__init__(self,name,*args,**kargs)\n self.setValue(value)\n self.layout().insertWidget(1,self.input)", "def get_bool2(self):\n pass", "def form_CheckboxMultiChoiceDefault(request):\n schema = schemaish.Structure()\n schema.add('multiChoice', schemaish.Sequence(schemaish.Integer()))\n options = [(1,'a'),(2,'b'),(3,'c')]\n\n form = formish.Form(schema, 'form')\n form['multiChoice'].widget = formish.CheckboxMultiChoice(options)\n form['multiChoice'].default = [2]\n return form", "def post_formatter(self, value):\n if isinstance(value, bool):\n return value and 'true' or None\n return value", "def addCheck(self,text):\n grid = self.layout()\n nr,nc = grid.rowCount(),grid.columnCount()\n check = QtGui.QCheckBox(text)\n grid.addWidget(check,nr,1)\n return check" ]
[ "0.73874754", "0.7013393", "0.67862654", "0.6326686", "0.6137979", "0.61240286", "0.61000293", "0.60839576", "0.60712683", "0.60165775", "0.5976162", "0.59326965", "0.58932775", "0.588823", "0.5887761", "0.5884953", "0.5875955", "0.5836536", "0.5805211", "0.57928985", "0.5773402", "0.57563525", "0.57225513", "0.57158434", "0.56914425", "0.56774896", "0.5669447", "0.5666948", "0.5649195", "0.56387925", "0.5633546", "0.5622085", "0.56190836", "0.5602721", "0.5588512", "0.5588512", "0.5588512", "0.55775243", "0.5564018", "0.556169", "0.55417466", "0.55060667", "0.5496894", "0.54854035", "0.54833984", "0.54763883", "0.54711086", "0.54578453", "0.5452947", "0.5449283", "0.5445576", "0.54248565", "0.5419647", "0.5379333", "0.53563464", "0.5294485", "0.5279601", "0.52749896", "0.52727073", "0.5262072", "0.52502805", "0.52369505", "0.52259815", "0.5223045", "0.52147865", "0.5210697", "0.5208127", "0.52060753", "0.519265", "0.51751727", "0.5139543", "0.51126295", "0.51080966", "0.51065457", "0.5097659", "0.5084836", "0.5074932", "0.5068881", "0.506555", "0.5054048", "0.50530654", "0.50253534", "0.50191987", "0.50182414", "0.50122166", "0.5011157", "0.50108814", "0.5001472", "0.49807042", "0.49750116", "0.49572524", "0.49513352", "0.49511895", "0.4930204", "0.4928971", "0.49167958", "0.4906048", "0.48846352", "0.48791382", "0.48763913" ]
0.49396482
93
Function for updating packages
def update(self, package=None): if package: query = package to_update = package.package.name else: query = PackageUpdate.objects.filter(server=self, ignore=False) to_update = " ".join(list(query.values_list('package__name', flat=True))) if self.os == 0: cmd = "apt-get install --only-upgrade {}".format(to_update,) elif self.os == 1: cmd = "yum update -y {}".format(to_update,) r = self.send_command(cmd) query.delete() if PackageUpdate.objects.filter(server=self, ignore=False).count() == 0: self.status = 0 self.save()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_package(self, **kwargs):\n logging.warning('Updating a package removes all existing data. '\n 'If you wish to keep the existing data, use `CachedCKAN.patch_package`.')\n results = self.api.action.package_update(**kwargs)\n self.get_ckan_metadata(True)\n return results", "def update_os_packages(self):\n self.summarize_operation(\"Updating OS Packages\")\n print subprocess.call(shlex.split(\"sudo apt-get update -y\"))", "def upgrade_packages():\n\n require('environment', provided_by=env.environments)\n system.update_apt_sources()\n system.upgrade_apt_packages()", "def update(package:str, path:str=None):\r\n logging.info(\"Checking For Updates On Package {}\".format(package))\r\n\r\n if path is None:\r\n path = get_site_packages_path()\r\n current = read_package_info(package, path)\r\n new = get_package_info(package)\r\n\r\n if current[\"version\"] != new[\"version\"]:\r\n logging.info(\"Updating Package {} From v{} To v{}...\".format(package, current[\"version\"], new[\"version\"]))\r\n uninstall(package, path)\r\n install(package, None, path)\r\n logging.info(\"Finished Updating Package {} From v{} To v{}\".format(package, current[\"version\"], new[\"version\"]))\r\n else:\r\n logging.info(\"No Updates Found For Package {}\".format(package))", "def update_from_repo():\n\treturn", "def pipupdate():\n\n packages = [d for d in pkg_resources.working_set]\n subprocess.call('pip install --upgrade ' + ' '.join(packages))", "def upgrade(packages):\n setup_audit_log()\n packages = CFG.package_specs(packages)\n if not packages:\n inform(\"No packages installed, nothing to upgrade\")\n sys.exit(0)\n\n for pspec in packages:\n perform_install(pspec, is_upgrade=True, force=False, quiet=False)", "def update(self, iterable):\n for package in iterable:\n self.add_package(package)", "def update(self):\n self.content = self.get_content()\n self.dependencies = self.content['requirements']['run']\n self.pythonversion = self.content['extra']['pythonversion']\n self.package_name = self.content['package']['name']", "def update():\n require('PROJECT_NAME')\n\n with cd(utils.home('apps', env.PROJECT_NAME)):\n run('hg pull')\n run('hg up')", "def upgrade_os_packages(self):\n self.summarize_operation(\"Upgrading OS Packages\")\n print subprocess.call(shlex.split(\"sudo apt-get upgrade -y\"))", "def update_package_files(self) -> None:\n # create the package folder\n self.package_path.mkdir(parents=True, exist_ok=True)\n\n self.clean() # Delete any previous *.py? files\n self.copy_stubs()\n self.create_readme()\n self.create_license()", "def update_package_data(distribution):\r\n build_py = distribution.get_command_obj(\"build_py\")\r\n build_py.finalize_options() # Updates package_data\r", "def update_package_data(distribution):\n build_py = distribution.get_command_obj('build_py')\n # distribution.package_data = find_package_data()\n # re-init build_py options which load package_data\n build_py.finalize_options()", "def update_package_data(distribution):\n build_py = distribution.get_command_obj('build_py')\n # distribution.package_data = find_package_data()\n # re-init build_py options which load package_data\n build_py.finalize_options()", "def update(pkg_name):\n\n vendor_file = os.path.join('vendor', 'vendor.json')\n target = 'golang.org/x/{}'.format(pkg_name)\n\n with open(vendor_file) as content:\n deps = json.load(content)\n packages = [dep['path'] for dep in deps['package'] if dep['path'].startswith(target)]\n revision = '@{revision}'.format(revision=args.revision) if args.revision else ''\n packages = ['{pkg}{revision}'.format(pkg=pkg, revision=revision) for pkg in packages]\n cmd = ['govendor', 'fetch'] + packages\n if args.verbose:\n print(' '.join(cmd))\n subprocess.check_call(cmd)", "def update_packages(self, config_file):\n entries = yacman.load_yaml(config_file)\n self.update(entries)\n return True", "def upgrade(repo, package, editable):\n if repo.upgrade(package, editable):\n click.echo('Done.')", "def import_and_update(distribution: str):\n fetch_packages(distribution)\n update_snapshot(distribution)", "def update_package(self, package):\n if package is not None:\n self._package_cache.add(package.id, package)", "def update_pkg_metadata(self, pkg, version=None, **kwargs):\n pass", "def update(self, gppkg_filename):\n run_command(\"gppkg --update %s\" % gppkg_filename)\n self.assertTrue(self.check_install(gppkg_filename))", "def run_update():\n\n args = _parse_arguments()\n\n # get dependencies\n dependencies = get_dependencies(args.folder)\n\n # get update config of dependencies\n update_info = get_update_info()\n\n install_queue = build_queue(\n update_info, dependencies, args.archive\n )\n\n print(\"install_queue\", install_queue)\n if install_queue is not None:\n build_wheels(install_queue)\n install_wheels(install_queue)", "def update_package(self) -> bool:\n log.info(f\"- Update {self.package_path.name}\")\n log.trace(f\"{self.package_path.as_posix()}\")\n\n # check if the sources exist\n ok = self.are_package_sources_available()\n if not ok:\n log.debug(f\"{self.package_name}: skipping as one or more source stub folders are missing\")\n self.status[\"error\"] = \"Skipped, stub folder(s) missing\"\n shutil.rmtree(self.package_path.as_posix())\n self._publish = False # type: ignore\n return False\n try:\n self.update_package_files()\n self.update_included_stubs()\n self.check()\n except Exception as e: # pragma: no cover\n log.error(f\"{self.package_name}: {e}\")\n self.status[\"error\"] = str(e)\n return False\n return True", "def update_repo(self, sign=True, verbose=False):\n self.ensure_correct_user()\n\n keyname=self.read_keyname()\n self.show(f'cd {self.repo_path}')\n os.chdir(self.repo_path)\n cmds=[\n 'apt-ftparchive packages . > Packages',\n 'gzip -c Packages > Packages.gz',\n 'apt-ftparchive release . > Release',\n ]\n for cmd in cmds:\n self.show(cmd)\n s=call(cmd, shell=True)\n if s!=0:\n print('error:', cmd, file=sys.stderr)\n return 1\n self.report('Updated Packages and Release.')\n if sign:\n s = self.sign_release(keyname)\n self.report('Updated repo:', self.repo_path)\n return s", "def packages():", "def patch_package(self, **kwargs):\n results = self.api.action.package_patch(**kwargs)\n self.get_ckan_metadata(True)\n return results", "def update_packages(self, packages: Packages, source=\"conda\") -> None:\n self[source] = self.get(source, {})\n self._update_packages(self[source], packages)", "def run(self):\n self.update_repos()", "async def update(self, ctx):\n # read original contents of pipfile\n with open('Pipfile') as f:\n original_pipfile = f.read()\n\n # run git pull. If nothing new is pulled, exit here.\n pull_output = await ctx.invoke(ctx.bot.get_command('pull'))\n\n if 'updating' not in pull_output.lower():\n return\n\n commit_message = subprocess.run(['git', 'log', '-1', '--pretty=%B'], stdout=subprocess.PIPE)\n await ctx.send('```yaml\\n{}```'.format(commit_message.stdout.decode('utf-8')))\n\n # read new contents of pipfile\n with open('Pipfile') as f:\n new_pipfile = f.read()\n\n # if no package changes, we just reload the changed extensions.\n # Unless if the main file was changed, which cannot be reloaded,\n # in which case the bot must be restarted.\n if new_pipfile == original_pipfile:\n pattern = r\" cogs\\/(.*).py *\\| [0-9]{1,9} \\+{0,}-{0,}\\n\"\n names = re.findall(pattern, pull_output)\n if not names or 'main' not in names:\n reload_cmd = ctx.bot.get_command('reload')\n for name in names:\n # first subgroup is either helpers or commandcogs, which we don't care about\n await ctx.invoke(reload_cmd, extension_name=name[0])\n await ctx.send('Up to date.')\n return\n\n else:\n # run pipenv install to get all the latest packages\n await ctx.send('Running `pipenv install`, please hold...')\n # Note: when tested in the wild, the bot seemed to be restarted by systemd hereish\n res = subprocess.run(['pipenv', 'install'])\n if res.returncode != 0:\n await ctx.send(\n 'Uh oh, found an error while running `pipenv install`. Time for you to get on fixing it.')\n return\n\n # give a verbal notice if our service file (which restarts us) is not running\n res = subprocess.run(['systemctl', 'status', 'mothbot'], stdout=subprocess.PIPE)\n if res.returncode != 0:\n await ctx.send('WARNING: Error fetching mothbot.service status. Make sure I get restarted.')\n elif 'Active: active (running)' not in res.stdout.decode('utf-8'):\n await ctx.send('WARNING: mothbot.service does not appear to be running. Restart me manually.')\n\n # logout\n await ctx.bot.logout()", "def update_package(self, *args):\r\n\r\n temp = (self.newProj.device[0],\\\r\n self.newProj.device[1],\\\r\n self.devPackage.get(),\\\r\n self.newProj.device[3],\\\r\n self.newProj.device[4])\r\n\r\n del self.newProj.device\r\n\r\n self.newProj.device = temp\r\n\r\n kT.debug_log(self.newProj.device)\r\n\r\n del temp\r\n\r\n return", "def update(*args):", "def __update_package(item):\n\n conn = sqlite3.connect(DTF_DB)\n cur = conn.cursor()\n\n # Remove the line first.\n sql = ('DELETE FROM packages '\n \"WHERE name='%s'\" % item.name)\n\n cur.execute(sql)\n\n entry = [(item.name, item.version, item.author,\n item.install_name)]\n\n # Update a Package Entry\n sql = ('INSERT INTO packages (name, version, '\n 'author, install_name)'\n 'VALUES (?, ?, ?, ?)')\n\n cur.executemany(sql, entry)\n conn.commit()\n\n return cur.rowcount", "def update():", "def update():", "def update_go_deps(self):\n self.go_version()\n env = self.m.step.get_from_context('env', {})\n env.update(self.go_env)\n with self.m.step.context({'env': env}):\n self.m.run.with_retry(\n self.m.step,\n 'update go pkgs',\n UPDATE_GO_ATTEMPTS,\n cmd=[self.go_exe, 'get', '-u', '-t', '%s/...' % INFRA_GO_PKG])", "def sub_install_packages():\n sudo('apt-get update') # Update repository links\n sudo('apt-get -y upgrade') # Upgrade the system\n package_str = ' '.join(INSTALL_PACKAGES)\n sudo('apt-get -y install ' + package_str) # Install the packages", "def repackage(self, arguments):\n puts_err(colored.red(\"Not implemented!\"))", "def patch_repos(self):", "def do_project_update(cs, args):\n raise NotImplementedError", "def update( ):\r\n pass", "def upgrade(self):", "def upgrade(self):", "def update_addon(self):\n if self.verbose:\n print(\"Addon name\", self.name)\n print(\"Addon repo\", self.repo)\n print(\"GitHub org\", self.org)\n\n if not self.skip_custom:\n # Add-on spesific updates\n if self.name == 'tautulli':\n self.addon_tautulli()\n elif self.name == 'matrix':\n self.addon_matrix()\n elif self.name == 'phlex':\n self.addon_phlex()\n elif self.name == 'magicmirror':\n self.addon_magicmirror()\n elif self.name == 'mqtt':\n self.addon_mqtt()\n elif self.name == 'home-panel':\n self.addon_home_panel()\n elif self.name == 'ssh':\n self.addon_ssh()\n elif self.name == 'tasmoadmin':\n self.addon_tasmoadmin()\n\n if not self.skip_apk:\n # Update APK packages\n print('Checking for apk uppdates')\n self.repoupdater.update_apk()\n\n if not self.skip_pip:\n # Update PIP packages\n print('Checking for pip uppdates')\n self.repoupdater.update_pip()", "def apt_update():\n print('>> apt update')\n with hide('output'):\n r = sudo('apt update')\n if r.find('packages can be upgraded') == -1:\n raise FabricCommandError(f'Result = {r}')\n print('>>> Success apt update')", "def update_server():\n log('Atualizando pacotes', yellow)\n sudo('apt-get -y update')", "def update_data(update_method):\n log.debug('Starting update')\n cmd = ['/usr/bin/python', wf.workflowfile('update.py')]\n if update_method == 'force':\n cmd.append('--update')\n cmd.append('force')\n\n # Update projects data\n log.debug('Run update command : {}'.format(cmd))\n run_in_background('update', cmd)\n\n return 0", "def update_submodules(options, project_directory=None):\n pass", "def test_reinstall_packages():\n\tassert packaging.install_packages(pkgs) == None", "def apt_update(self, force_refresh: bool = False):\n if not self.apt_updated or force_refresh:\n self.run(\"/\", \"root\", [\"apt-get\", \"update\"])\n self.apt_updated = True", "def update(self, args):\n pass", "def update():\n call('git -C ~/norminette+ pull', shell=True)", "def test_upgrade_with_fromrepo(self):\n pkg_cmd = MagicMock(return_value={\"retcode\": 0})\n\n with patch.dict(pkgng.__salt__, {\"cmd.run_all\": pkg_cmd}):\n with patch(\"salt.modules.pkgng.list_pkgs\", ListPackages()):\n result = pkgng.upgrade(fromrepo=\"FreeBSD\")\n expected = {\n \"gettext-runtime\": {\"new\": \"0.20.1\", \"old\": \"\"},\n \"p5-Mojolicious\": {\"new\": \"8.40\", \"old\": \"\"},\n }\n self.assertDictEqual(result, expected)\n pkg_cmd.assert_called_with(\n [\"pkg\", \"upgrade\", \"-y\", \"--repository\", \"FreeBSD\"],\n output_loglevel=\"trace\",\n python_shell=False,\n )", "def updates_check(self,request):\n\t\tp0 = subprocess.Popen(['LC_ALL=C apt-get update'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n\t\t(stdout,stderr) = p0.communicate()\n\n\t\tp1 = subprocess.Popen(['LC_ALL=C apt-get -u dist-upgrade -s'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n\t\t(stdout,stderr) = p1.communicate()\n\n\t\tresult = {}\n\t\tresult['install'] = []\n\t\tresult['update'] = []\n\t\tresult['remove'] = []\n\t\tfor line in stdout.split('\\n'):\n\t\t\t# upgrade:\n\t\t\t# Inst univention-updater [3.1.1-5] (3.1.1-6.408.200810311159 192.168.0.10)\n\t\t\t# inst:\n\t\t\t# Inst mc (1:4.6.1-6.12.200710211124 oxae-update.open-xchange.com)\n\t\t\t#\n\t\t\t# *** FIX ***\tthe above example lines ignore the fact that there's\n\t\t\t#\t\t\t\tsome extra text (occasionally) after the last closing\n\t\t\t#\t\t\t\tparenthesis. Until now, I've seen only a pair of empty\n\t\t\t#\t\t\t\tbrackets [], but who knows...\n\t\t\tmatch = re.search('^Inst (\\S+)\\s+(.*?)\\s*\\((\\S+)\\s.*\\)',line)\n\t\t\tif match:\n\t\t\t\tpkg = match.group(1)\n\t\t\t\told = match.group(2)\n\t\t\t\tver = match.group(3)\n\t\t\t\tif old:\n\t\t\t\t\tresult['update'].append([pkg,ver])\n\t\t\t\telse:\n\t\t\t\t\tresult['install'].append([pkg,ver])\n\t\t\telif line.startswith('Remv '):\n\t\t\t\tl=line.split(' ')\n\t\t\t\tpkg = l[1]\n\t\t\t\tver = _('unknown')\n\t\t\t\tif len(l) > 2:\n\t\t\t\t\tver = l[2].replace('[','').replace(']','')\n\t\t\t\tresult['remove'].append([pkg,ver])\n\n\n\t\t# sort package names?\n\t\tresult['update'] = sorted(result['update'])\n\t\tresult['install'] = sorted(result['install'])\n\t\tresult['remove'] = sorted(result['remove'])\n\n\t\tself.finished(request.id,result)", "def test_component_update_get_packages(self):\n MockPopen.mock_stdout = 'Inst a [old] (new from)\\nInst b (new from)\\nRemv c (old PKG)\\nRemv d PKG'\n installed, upgraded, removed = self.u.component_update_get_packages()\n self.assertEqual([('b', 'new')], installed)\n self.assertEqual([('a', 'old', 'new')], upgraded)\n self.assertEqual([('c', 'old'), ('d', 'unknown')], removed)", "def update(self, commit, **kwargs):\n self._pkg_changes(commit=self.commit, **kwargs)\n self.commit = commit", "def _provision_package(self):", "def __init__(self):\n self.update_os_packages()\n self.upgrade_os_packages()", "def __gitSubmodulesUpdate(self):\n self.vcs.gitSubmoduleUpdate(self.project.getProjectPath())", "def update_requirements():\n\n require('code_root', provided_by=env.environments)\n requirements = os.path.join(env.code_root, 'requirements')\n sdists = os.path.join(requirements, 'sdists')\n base_cmd = ['pip install']\n base_cmd += ['-q -E %(virtualenv_root)s' % env]\n base_cmd += ['--no-index --find-links=file://%s' % sdists]\n # install GDAL by hand, before anything else that might depend on it\n cmd = base_cmd + ['--no-install \"GDAL==1.6.1\"']\n sudo(' '.join(cmd), user=env.deploy_user)\n # this directory won't exist if GDAL was already installed\n if files.exists('%(virtualenv_root)s/build/GDAL' % env):\n sudo('rm -f %(virtualenv_root)s/build/GDAL/setup.cfg' % env, user=env.deploy_user)\n with cd('%(virtualenv_root)s/build/GDAL' % env):\n sudo('%(virtualenv_root)s/bin/python setup.py build_ext '\n '--gdal-config=gdal-config '\n '--library-dirs=/usr/lib '\n '--libraries=gdal1.6.0 '\n '--include-dirs=/usr/include/gdal '\n 'install' % env, user=env.deploy_user)\n # force reinstallation of OpenBlock every time\n with settings(warn_only=True):\n sudo('pip uninstall -y -E %(virtualenv_root)s ebpub ebdata obadmin' % env)\n for file_name in ['ebpub.txt', 'ebdata.txt', 'obadmin.txt', 'openrural.txt']:\n apps = os.path.join(requirements, file_name)\n cmd = base_cmd + ['--requirement %s' % apps]\n sudo(' '.join(cmd), user=env.deploy_user)", "def cmd_update(self):\n self.update_repository()\n results = self.results.getvalue()\n if results:\n print('---')\n print(results, end='')", "def update_requirements():\n with cd(REMOTE_REPO_DIR):\n cmd = ['npm install']\n # cmd += ['--requirement %s' % os.path.join(CODE_DIR,'requirements.txt')]\n run(' '.join(cmd))", "def update_package_documentation(\n version_suffix: str,\n git_update: bool,\n answer: str | None,\n package_id: str,\n force: bool,\n verbose: bool,\n base_branch: str,\n):\n provider_package_id = package_id\n verify_provider_package(provider_package_id)\n with with_group(f\"Update release notes for package '{provider_package_id}' \"):\n console.print(\"Updating documentation for the latest release version.\")\n make_sure_remote_apache_exists_and_fetch(git_update, verbose)\n only_min_version_upgrade = os.environ.get(\"ONLY_MIN_VERSION_UPDATE\", \"false\").lower() == \"true\"\n regenerate_missing_docs = os.environ.get(\"REGENERATE_MISSING_DOCS\", \"false\").lower() == \"true\"\n if not only_min_version_upgrade:\n if not update_release_notes(\n provider_package_id,\n version_suffix,\n force=force or regenerate_missing_docs,\n verbose=verbose,\n answer=answer,\n base_branch=base_branch,\n regenerate_missing_docs=regenerate_missing_docs,\n ):\n # Returns 64 in case of skipped package\n sys.exit(64)\n update_min_airflow_version(provider_package_id=provider_package_id, version_suffix=version_suffix)", "def update_pypkg(\n major: int,\n minor: int,\n patch: int,\n *,\n is_rc: bool,\n is_dev: bool,\n rc_ver: Optional[int] = None,\n) -> None:\n version = f\"{major}.{minor}.{patch}\"\n if is_rc:\n assert rc_ver\n version = version + f\"rc{rc_ver}\"\n if is_dev:\n version = version + \"-dev\"\n\n pyver_path = PY_PACKAGE / \"treelite\" / \"VERSION\"\n with open(pyver_path, \"w\", encoding=\"utf-8\") as fd:\n fd.write(version + \"\\n\")\n\n pyprj_path = PY_PACKAGE / \"pyproject.toml\"\n with open(pyprj_path, \"r\", encoding=\"utf-8\") as fd:\n pyprj = fd.read()\n matched = re.search('version = \"' + r\"([0-9]+\\.[0-9]+\\.[0-9]+.*)\" + '\"', pyprj)\n assert matched, \"Couldn't find version string in pyproject.toml.\"\n pyprj = pyprj[: matched.start(1)] + version + pyprj[matched.end(1) :]\n with open(pyprj_path, \"w\", encoding=\"utf-8\") as fd:\n fd.write(pyprj)", "def update():\n with cd(env.directory):\n\n # update plone\n result = sudo('git pull', user=env.deploy_user)\n quick_update = 'Already up-to-date.' in result\n\n if quick_update:\n # Plonesite Recipe replaces site on the fly\n print 'UPDATE: No full Buildout required: {0:s}'.format(result)\n # buildout\n stop()\n sudo('./bin/buildout install plonesite', user=env.deploy_user)\n start()\n\n else:\n stop()\n sudo('git checkout {}'.format(env.branch), user=env.deploy_user)\n\n # bootstrap\n sudo('./bin/pip install -r requirements.txt', user=env.deploy_user)\n\n sudo('rm -rf ./var/blobstorage', user=env.deploy_user)\n sudo('rm -rf ./var/filestorage', user=env.deploy_user)\n sudo('rm .installed.cfg', user=env.deploy_user)\n\n # buildout\n sudo('./bin/buildout', user=env.deploy_user)\n\n # start zope\n start()\n sudo('./bin/zeoclient_debug adduser admin admin', user=env.deploy_user) # noqa: E501\n\n # load page twice to fill cache and prevent a bug showing raw html\n sudo('/usr/bin/wget -S -qO- demo.starzel.de > /tmp/demo.starzel.de.html', user=env.deploy_user) # noqa: E501\n sudo('/usr/bin/wget -S -qO- demo.starzel.de > /tmp/demo.starzel.de.html', user=env.deploy_user) # noqa: E501", "def up_to_date(self, gyp_file, target=None, **kw):\n raise NotImplementedError", "def update_dependencies():\n pip = env.virtualenv.child('bin', 'pip')\n reqs = env.code_dir.child('deploy-requirements.txt')\n sudo('%s -q install -U pip' % pip)\n sudo('%s -q install -r %s' % (pip, reqs))", "def populate_package(package_count: int) -> None:\n logging.info(f\"Fetching {package_count} packages\")\n response = CurlController.send_get_request(url=CONFIG.EXTERNAL_API.ALL_PACKAGES)\n get_version = False\n count = 0\n temp_dir = filestore.generate_temp_dir()\n # Local Testing\n # response_arr = ['Package: A3', 'Version: 1.0.0', 'Depends: R (>= 2.15.0), xtable, pbapply', 'Suggests: randomForest, e1071', 'License: GPL (>= 2)', 'MD5sum: 027ebdd8affce8f0effaecfcd5f5ade2', 'NeedsCompilation: no', '', 'Package: aaSEA', 'Version: 1.1.0', 'Depends: R(>= 3.4.0)', 'Imports: DT(>= 0.4), networkD3(>= 0.4), shiny(>= 1.0.5),', ' shinydashboard(>= 0.7.0), magrittr(>= 1.5), Bios2cor(>= 2.0),', ' seqinr(>= 3.4-5), plotly(>= 4.7.1), Hmisc(>= 4.1-1)', 'Suggests: knitr, rmarkdown', 'License: GPL-3', 'MD5sum: 0f9aaefc1f1cf18b6167f85dab3180d8', 'NeedsCompilation: no', '', 'Package: AATtools', 'Version: 0.0.1', 'Depends: R (>= 3.6.0)', 'Imports: magrittr, dplyr, doParallel, foreach', 'License: GPL-3', 'MD5sum: 3bd92dbd94573afb17ebc5eab23473cb', 'NeedsCompilation: no', '', 'Package: ABACUS', 'Version: 1.0.0', 'Depends: R (>= 3.1.0)', 'Imports: ggplot2 (>= 3.1.0), shiny (>= 1.3.1),', 'Suggests: rmarkdown (>= 1.13), knitr (>= 1.22)', 'License: GPL-3', 'MD5sum: 50c54c4da09307cb95a70aaaa54b9fbd', 'NeedsCompilation: no', '', 'Package: abbyyR', 'Version: 0.5.5', 'Depends: R (>= 3.2.0)', 'Imports: httr, XML, curl, readr, plyr, progress', 'Suggests: testthat, rmarkdown, knitr (>= 1.11), lintr', 'License: MIT + file LICENSE', 'MD5sum: e048a3bca6ea32126e6c367415c0bfaf', 'NeedsCompilation: no', '', 'Package: abc', 'Version: 2.1', 'Depends: R (>= 2.10), abc.data, nnet, quantreg, MASS, locfit', 'License: GPL (>= 3)', 'MD5sum: c9fffe4334c178917f762735aba59653', 'NeedsCompilation: no', '', 'Package: abc.data', 'Version: 1.0', 'Depends: R (>= 2.10)', 'License: GPL (>= 3)', 'MD5sum: 799079dbbdd0cfc9d9c61c3e35241806', 'NeedsCompilation: no', '', 'Package: ABC.RAP', 'Version: 0.9.0', 'Depends: R (>= 3.1.0)', 'Imports: graphics, stats, utils', 'Suggests: knitr, rmarkdown', 'License: GPL-3', 'MD5sum: 38c65a7251d28ef2462ee430ded95700', 'NeedsCompilation: no', '', 'Package: abcADM', 'Version: 1.0', 'Imports: Rcpp (>= 1.0.1)', 'LinkingTo: Rcpp, BH', 'License: GPL-3', 'MD5sum: 8134f67912b506194e3dab4ccd6e75f7', 'NeedsCompilation: yes', '', 'Package: ABCanalysis', 'Version: 1.2.1', 'Depends: R (>= 2.10)', 'Imports: plotrix', 'License: GPL-3', 'MD5sum: 678e03837e25a922bf71bafe1f8de617', 'NeedsCompilation: no', '', 'Package: abcdeFBA', 'Version: 0.4', 'Depends: Rglpk,rgl,corrplot,lattice,R (>= 2.10)', 'Suggests: LIM,sybil', 'License: GPL-2', 'MD5sum: c84d45a85d8ab6bbe517365e8845db83', 'NeedsCompilation: no', '', 'Package: ABCoptim', 'Version: 0.15.0', 'Imports: Rcpp, graphics, stats, utils', 'LinkingTo: Rcpp', 'Suggests: testthat, covr', 'License: MIT + file LICENSE', 'MD5sum: a62ed03650273c09899655065437078f', 'NeedsCompilation: yes', '', 'Package: ABCp2', 'Version: 1.2', 'Depends: MASS', 'License: GPL-2', 'MD5sum: e920282d5a369df71e15241be40cb60e', 'NeedsCompilation: no', '', 'Package: abcrf', 'Version: 1.8.1', 'Depends: R(>= 3.1)', 'Imports: readr, MASS, matrixStats, ranger, doParallel, parallel,', ' foreach, stringr, Rcpp (>= 0.11.2)', 'LinkingTo: Rcpp, RcppArmadillo', 'License: GPL (>= 2)', 'MD5sum: 4d5a304f46d117226791523cef4e2427', 'NeedsCompilation: yes', '', 'Package: abcrlda', 'Version: 1.0.3', 'Imports: stats', 'License: GPL-3', 'MD5sum: 651e6e18e08916b443aaf011b5a63525', 'NeedsCompilation: no', '', 'Package: abctools', 'Version: 1.1.3', 'Depends: R (>= 2.10), abc, abind, parallel, plyr, Hmisc', 'Suggests: ggplot2, abc.data', 'License: GPL (>= 2)', 'MD5sum: c5937b65837ef7e6bfbe141cea257f40', 'NeedsCompilation: yes', '', 'Package: abd', 'Version: 0.2-8', 'Depends: R (>= 3.0), nlme, lattice, grid, mosaic', 'Suggests: boot, car, ggplot2, plyr, HH, ICC, vcd, Hmisc', 'License: GPL-2', 'MD5sum: 1913d76a0fbc44222709381f63f385b9', 'NeedsCompilation: no', '', 'Package: abdiv', 'Version: 0.2.0', 'Imports: ape', 'Suggests: testthat (>= 2.1.0), vegan', 'License: MIT + file LICENSE', 'MD5sum: 80931c0ca85ba5386000bf617552c5ce', 'NeedsCompilation: no', '', 'Package: abe', 'Version: 3.0.1', 'License: GPL (>= 2)', 'MD5sum: 9c151db5397422c8927dee41dabfbfab', 'NeedsCompilation: no', '', 'Package: abess', 'Version: 0.3.0', 'Depends: R (>= 3.1.0)', 'Imports: Rcpp, MASS, methods, Matrix', 'LinkingTo: Rcpp, RcppEigen', 'Suggests: testthat, knitr, rmarkdown', 'License: GPL (>= 3) | file LICENSE', 'MD5sum: e0ea7d068147c49c011c7135ab290bd3', 'NeedsCompilation: yes', '', 'Package: abf2', 'Version: 0.7-1', 'License: Artistic-2.0', 'MD5sum: 6792a51c6fb3e239165d69aa8a71d3cd', 'NeedsCompilation: no', '', 'Package: abglasso', 'Version: 0.1.1', 'Imports: MASS, pracma, stats, statmod', 'Suggests: testthat', 'License: GPL-3', 'MD5sum: 18bd0759cd005c5ac6fb515799b3f3d8', 'NeedsCompilation: no', '', 'Package: ABHgenotypeR', 'Version: 1.0.1', 'Imports: ggplot2, reshape2, utils', 'Suggests: knitr, rmarkdown', 'License: GPL-3', 'MD5sum: ca4397ba7390c0e0a3728c0cda864494', 'NeedsCompilation: no', '', 'Package: abind', 'Version: 1.4-5', 'Depends: R (>= 1.5.0)', 'Imports: methods, utils', 'License: LGPL (>= 2)', 'MD5sum: 136f981e1c4f618b64a87faaa7797c97', 'NeedsCompilation: no', '', 'Package: abjutils', 'Version: 0.3.1', 'Depends: R (>= 4.0)', 'Imports: dplyr, magrittr, purrr, rlang, rstudioapi, stringi, stringr,', ' tidyr', 'Suggests: testthat', 'License: MIT + file LICENSE', 'MD5sum: a596c07aaa7f82e5d123b2f7354e5b55', 'NeedsCompilation: no', '', 'Package: abmR', 'Version: 1.0.2', 'Depends: R (>= 3.5)', 'Imports: sp, rgdal, table1, googledrive, swfscMisc, geosphere,', ' kableExtra, gtsummary, ggplot2, gstat, purrr, rnaturalearth,', ' rnaturalearthdata, sf, tmap, raster, utils, stats, methods,', ' rgeos', 'Suggests: jpeg, knitr', 'License: GPL (>= 3)', 'MD5sum: cf96d']\n response_arr = response.decode(\"utf-8\").split(\"\\n\")\n with temp_dir:\n for item in response_arr:\n if count >= package_count:\n break\n if get_version:\n # Fetching the version, once we have the package name\n package_version = Command.get_package_version(item=item)\n if package_version:\n # Generating the required URL for the package to fetch the details\n package_url = Template(\n CONFIG.EXTERNAL_API.PACKAGE_DETAIL\n ).substitute(\n package_name=package_name,\n separator=\"_\",\n package_version=package_version,\n )\n logging.info(f\"Downloading {package_url}\")\n # Downloading the details of the package and extracting the DESCRIPTION file\n extract_file_path = filestore.join_paths(\n prefix=package_name,\n suffix=CONFIG.EXTERNAL_API.DETAIL_FILE_NAME,\n )\n target_dir = filestore.download_file(\n url=package_url,\n temp_dir=temp_dir,\n extract_file_path=extract_file_path,\n )\n # Reading contents of DESCRIPTION file\n package_details = filestore.join_paths(\n prefix=temp_dir.name,\n suffix=extract_file_path,\n )\n with open(package_details) as details_file:\n for line in details_file:\n if line.startswith(PackageInfoPrefix.PUBLICATION_DATE):\n publication_time_str = (\n Command.get_publication_timestamp(line)\n )\n publication_timestamp = (\n datetime_util.string_to_datetime(\n publication_time_str\n )\n )\n elif line.startswith(PackageInfoPrefix.TITLE):\n title = Command.get_package_title(line)\n elif line.startswith(PackageInfoPrefix.DESCRIPTION):\n description = Command.get_package_description(line)\n elif line.startswith(PackageInfoPrefix.AUTHOR):\n (\n author_name,\n author_email,\n ) = Command.get_package_author(line)\n elif line.startswith(PackageInfoPrefix.MAINTAINER):\n (\n maintainer_name,\n maintainer_email,\n ) = Command.get_package_maintainer(line)\n\n package_info_dict = {\n \"name\": package_name,\n \"version\": package_version,\n \"publication_timestamp\": publication_timestamp,\n \"title\": title,\n \"description\": description,\n \"author_name\": author_name,\n \"author_email\": author_email,\n \"maintainer_name\": maintainer_name,\n \"maintainer_email\": maintainer_email,\n }\n logging.info(package_info_dict)\n obj = PackageManager.create_object(\n create_data=package_info_dict\n )\n if obj == CONFIG.DB.FAILURE:\n raise Exception(f\"Could not insert package in DB\")\n count += 1\n get_version = False\n # Fetching the package name\n package_name = Command.get_package_name(item=item)\n if package_name:\n get_version = True", "def update_list(self):\n\t\ttry:\n\t\t\tassert(not self.master.TransactionInProgress)\n\t\t\tself.master.Vacuum()\n\n\t\t\tself.fetch_repo_file(\"/torrent\", self.config[\"daemon\"][\"rootdir\"] + \"/torrent\", \"wb\")\n\t\t\tself.master.master = json.loads(self.fetch_repo_file(\"/package-index.json\", True).decode('utf-8'))\n\t\t\tself.torrent_info = lt.torrent_info(self.config[\"daemon\"][\"rootdir\"] + \"/torrent\")\n\n\t\t\t\"\"\" Find pre-downloaded files \"\"\"\n\t\t\tpre_downloaded = {}\n\t\t\ti = 0\n\t\t\tfor f in self.torrent_info.files():\n\t\t\t\tif self.valid_tpkg_file(f.path):\n\t\t\t\t\tpre_downloaded[i] = f\n\t\t\t\ti += 1\n\n\n\t\t\t\"\"\" Default torrent params \"\"\"\n\t\t\tparams = {\n\t\t\t\t\"save_path\": self.config[\"daemon\"][\"rootdir\"],\n\t\t\t\t\"ti\": self.torrent_info\n\t\t\t}\n\t\t\t\n\t\t\t\"\"\" Set torrent handler \"\"\"\n\t\t\tself.handler = self.ses.add_torrent(params)\n\n\t\t\t\"\"\" Set chunk priority to 0 (don't download) \"\"\"\n\t\t\tfor p in range(self.torrent_info.num_pieces()):\n\t\t\t\tself.handler.piece_priority(p, 0)\n\n\t\t\tfor i in self.torrent_info.files():\n\t\t\t\tif i in pre_downloaded:\n\t\t\t\t\tpr = self.torrent_info.map_file(i, 0, pre_downloaded[i].size)\n\t\t\t\t\tn_pieces = pr.length / self.torrent_info.piece_length() + 1\n\n\t\t\t\t\tfor p in range(self.torrent_info.num_pieces()):\n\t\t\t\t\t\tif p in range(pr.piece, pr.piece + n_pieces):\n\t\t\t\t\t\t\tself.handler.piece_priority(p, 7)\n\n\t\texcept Exception as e:\n\t\t\tsys.stderr.write(\"Failed to update package list: {0}\\n\".format(e))\n\t\t\ttraceback.print_exc()\n\t\t\tself.write_line(\"Error: XXX - Failed to update package list.\")", "def test_upgrade_without_fromrepo(self):\n pkg_cmd = MagicMock(return_value={\"retcode\": 0})\n\n with patch.dict(pkgng.__salt__, {\"cmd.run_all\": pkg_cmd}):\n with patch(\"salt.modules.pkgng.list_pkgs\", ListPackages()):\n result = pkgng.upgrade()\n expected = {\n \"gettext-runtime\": {\"new\": \"0.20.1\", \"old\": \"\"},\n \"p5-Mojolicious\": {\"new\": \"8.40\", \"old\": \"\"},\n }\n self.assertDictEqual(result, expected)\n pkg_cmd.assert_called_with(\n [\"pkg\", \"upgrade\", \"-y\"],\n output_loglevel=\"trace\",\n python_shell=False,\n )", "def main():\n\n local_pkgs = set(os.listdir(GIT_FOLDER))\n local_pkgs = set([it.replace('.git', '') for it in local_pkgs])\n\n pkgdb_info = pkgdb_pkg_branch()\n\n pkgdb_pkgs = set(pkgdb_info.keys())\n\n ## Commented out as we keep the git of retired packages while they won't\n ## show up in the information retrieved from pkgdb.\n\n #if (local_pkgs - pkgdb_pkgs):\n #print 'Some packages are present locally but not on pkgdb:'\n #print ', '.join(sorted(local_pkgs - pkgdb_pkgs))\n\n if (pkgdb_pkgs - local_pkgs):\n print 'Some packages are present in pkgdb but not locally:'\n print ', '.join(sorted(pkgdb_pkgs - local_pkgs))\n\n tofix = set()\n for pkg in sorted(pkgdb_info):\n pkgdb_branches = pkgdb_info[pkg]\n git_branches = get_git_branch(pkg)\n diff = (pkgdb_branches - git_branches)\n if diff:\n print '%s missing: %s' % (pkg, ','.join(sorted(diff)))\n tofix.add(pkg)\n branch_package(pkg, diff)\n\n if tofix:\n print 'Packages fixed (%s): %s' % (\n len(tofix), ', '.join(sorted(tofix)))", "def update(self):\n with settings(user=self.serviceUser):\n self.venv.create()\n\n self.venv.install_twisted()\n self.venv.install(\" \".join(\"\"\"\n psycopg2==2.7.5\n pygments==2.2.0\n spambayes==1.1b3\n trac==1.2.2\n trac-github==2.3\n requests_oauthlib==1.0.0\n svn+https://svn.edgewall.org/repos/trac/plugins/1.2/spam-filter@15310\n git+https://github.com/twisted-infra/twisted-trac-plugins.git\n \"\"\".split()))\n\n # This is txacme v2 but is not yet released.\n # Should be replaced on we have txacme v2.\n # See https://github.com/twisted/txacme/pull/158\n self.venv.install(\n \"--index=https://pypi.chevah.com/simple txacme==1.0.0.chevah4\")\n\n run('mkdir -p ' + self.configDir)\n put(os.path.dirname(__file__) + '/*', self.configDir,\n mirror_local_mode=True)", "def do_update(args):\n # if args.verbosity > 0:\n log.info(\"Verbosity: %d\" % args.verbosity)\n log.info(\"Data directory: %s\" % get_data_dir(args))\n log.info(\"Updating...\")\n csl = update_list(args, 'csl')\n # if args.verbosity > 0:\n log.info(\"Done.\")\n return True", "def main():\n parser = setup_parser()\n args = parser.parse_args()\n\n global LOG\n if args.debug:\n LOG.setLevel(logging.DEBUG)\n\n changes = retrieve_pkgdb_change()\n LOG.debug('%s changes retrieved' % len(changes))\n orphaned = {}\n unorphaned = {}\n changed = {}\n for change in changes:\n pkg_name = change['msg']['package_listing']['package']['name']\n owner = change['msg']['package_listing']['owner']\n branch = change['msg']['package_listing']['collection']['branchname']\n user = change['msg']['agent']\n LOG.debug('%s changed to %s by %s on %s' % (\n pkg_name, owner, user, branch))\n pkg = PkgChange(\n name=pkg_name,\n summary=change['msg']['package_listing']['package']['summary'],\n branch=branch,\n new_owner=owner,\n user=user,\n )\n\n if owner == 'orphan':\n if pkg_name in orphaned:\n orphaned[pkg_name].add_branch(branch)\n else:\n orphaned[pkg_name] = pkg\n elif owner == user:\n if pkg_name in orphaned:\n del(orphaned[pkg_name])\n\n if pkg_name in unorphaned:\n unorphaned[pkg_name].add_branch(branch)\n else:\n unorphaned[pkg_name] = pkg\n else:\n if pkg_name in orphaned:\n del(orphaned[pkg_name])\n\n if pkg_name in changed:\n changed[pkg_name].add_branch(branch)\n else:\n changed[pkg_name] = pkg\n\n # Orphaned packages might have been deprecated:\n retired_info = retrieve_pkgdb_retired()\n retired = {}\n for pkg in retired_info:\n pkg_name = pkg['msg']['package_listing']['package']['name']\n LOG.debug('Retired: %s' % (pkg_name))\n if pkg_name in orphaned:\n pkg = orphaned[pkg_name]\n del(orphaned[pkg_name])\n pkg.new_owner = 'retired'\n retired[pkg_name] = pkg\n\n hours = int(DELTA) / 3600\n report = 'Change in ownership over the last %s hours\\n' % hours\n report += '=' * (40 + len(str(hours))) + '\\n'\n\n report += '\\n%s packages were orphaned\\n' % len(orphaned)\n report += '-' * (len(str(len(orphaned))) + 23) + '\\n'\n for pkg in orphaned:\n report += orphaned[pkg].to_string() + '\\n'\n report += ' ' * 5 + orphaned[pkg].summary + '\\n'\n report += ' ' * 5 + 'https://admin.fedoraproject.org/pkgdb/'\\\n 'acls/name/%s\\n' % orphaned[pkg].name\n\n report += '\\n%s packages unorphaned\\n' % len(unorphaned)\n report += '-' * (len(str(len(unorphaned))) + 20) + '\\n'\n for pkg in unorphaned:\n if unorphaned[pkg].unorphaned():\n report += unorphaned[pkg].to_string() + '\\n'\n\n report += '\\n%s packages were retired\\n' % len(retired)\n report += '-' * (len(str(len(retired))) + 23) + '\\n'\n for pkg in retired:\n report += retired[pkg].to_string() + '\\n'\n report += ' ' * 5 + retired[pkg].summary + '\\n'\n report += ' ' * 5 + 'https://admin.fedoraproject.org/pkgdb/'\\\n 'acls/name/%s\\n' % retired[pkg].name\n\n report += '\\n%s packages changed owner\\n' % len(changed)\n report += '-' * (len(str(len(changed))) + 23) + '\\n'\n for pkg in changed:\n if not changed[pkg].unorphaned():\n report += changed[pkg].to_string() + '\\n'\n\n report += '\\n\\nSources: https://github.com/pypingou/fedora-owner-change'\n\n if args.nomail:\n print report\n else:\n send_report(report)", "def updateFileInfo(self, data, pid):\n self.db.updateLinkInfo(data)\n self.evm.dispatchEvent(\"packageUpdated\", pid)", "async def update(self) -> None:\n # pause logic\n if not self.running.is_set():\n self.add_to_output(\"Paused...\")\n await self.running.wait()\n\n # tell the user we are updating\n self.add_to_output(f\"Updating...\")\n # create ssh connection to miner\n try:\n conn = await self.get_connection(\"root\", \"admin\")\n # tell the user we are sending the update file\n self.add_to_output(\"Sending upgrade file...\")\n # send the update file\n await self.send_file(UPDATE_FILE_S9, \"/tmp/firmware.tar\")\n # install the update and collect the result\n result = await conn.run(f'sysupgrade /tmp/firmware.tar')\n self.add_to_output(result.stdout.strip())\n # tell the user the update completed\n self.add_to_output(f\"Update completed...\")\n except OSError:\n self.add_to_output(f\"Unknown error...\")", "def update():\n # plugins dependencies\n install_dependencies()\n\n # git pull\n with cd('.vim'):\n print(green('Updating .vim folder'))\n git_pull()\n\n print(green('Updating submodules'))\n run('git submodule update --init')\n\n # plugins update\n cmd = 'vim +NeoBundleUpdate +qa!'\n run(cmd)", "def updatecheck(self):\n self.comp('packmanager').updatecheck_allpacks()", "def upgrade_dependencies():\n # upgrade pip\n print(\"Upgrading/installing any required dependencies...\")\n subprocess.run([\"python\", \"-m\", \"pip\", \"install\", \"--user\",\n \"--upgrade\", \"pip\", \"--no-warn-script-location\"],\n shell=True, check=True)\n print(\"pip package manager has been upgraded to the latest version\")\n\n # upgrade/install dependencies such as robot framework\n subprocess.run([\"python\", \"-m\", \"pip\", \"install\", \"--user\",\n \"--upgrade\", \"--no-warn-script-location\", \"-r\",\n os.path.join(os.path.curdir, \"requirements.txt\")],\n shell=True, check=True)\n print(\"Robot framework has been upgraded to the latest version\")\n print(\"PyQT5 has been upgraded to the latest version\")", "def upgrade_server():\n log('Atualizando programas', yellow)\n sudo('apt-get -y upgrade')", "def update(self, *args, **kw):\n pass", "def refresh():\n\n with connection() as cursor:\n cursor.execute(\"\"\"\n UPDATE\n package_stats\n SET\n installs_rank = sq.rank\n FROM\n (\n SELECT\n p.name AS package,\n row_number() OVER (ORDER BY ic.unique_installs DESC) AS rank\n FROM\n packages AS p INNER JOIN\n install_counts AS ic ON p.name = ic.package\n ORDER BY\n ic.unique_installs DESC\n ) AS sq\n WHERE\n sq.package = package_stats.package\n \"\"\")\n\n cursor.execute(\"\"\"\n UPDATE\n package_stats\n SET\n trending_rank = NULL,\n z_value = NULL;\n\n UPDATE\n package_stats\n SET\n trending_rank = sq.z_value_rank,\n z_value = sq.z_value\n FROM\n (\n SELECT\n package,\n z_value,\n row_number() OVER (ORDER BY z_value DESC) AS z_value_rank\n FROM\n (\n SELECT\n p.name AS package,\n (t.installs - h.installs_mean) / (h.installs_stddev + 0.000001) AS z_value\n\n FROM\n packages AS p INNER JOIN\n (\n SELECT\n package,\n avg(installs) AS installs_mean,\n stddev_pop(installs) AS installs_stddev\n FROM\n daily_install_counts\n WHERE\n date >= (CURRENT_DATE - interval '1 day' - interval '6 weeks')\n GROUP BY\n package\n ) AS h ON p.name = h.package INNER JOIN\n\n -- The two most recent full days of stats\n (\n SELECT\n package,\n (SUM(installs) / 2) AS installs\n FROM\n daily_install_counts\n WHERE\n date = CURRENT_DATE - interval '1 day' OR\n date = CURRENT_DATE - interval '2 days'\n GROUP BY\n package\n\n ) AS t ON p.name = t.package\n\n WHERE\n -- Make sure the packages have some users\n t.installs > 10\n\n ORDER BY\n z_value DESC\n\n LIMIT 100\n ) AS z\n ) AS sq\n WHERE\n sq.package = package_stats.package\n \"\"\")", "def test_non_vendor_update_references_to_upgraded_packages(\n self,\n ): # pylint: disable=unused-argument\n self.assert_dependency_updated(\n ComponentType.CONNECTION,\n \"my_connection\",\n \"protocols\",\n {DefaultMessage.protocol_id},\n )\n self.assert_dependency_updated(\n ComponentType.SKILL, \"my_skill\", \"protocols\", {DefaultMessage.protocol_id}\n )\n self.assert_dependency_updated(\n ComponentType.SKILL, \"my_skill\", \"skills\", {ERROR_SKILL_PUBLIC_ID}\n )", "def publish_updates():\n run_subprocess(['osg-batch-update'])", "def install_api(self, packages):\n\n for pkg in packages:\n if pkg not in self.installed_packages:\n try:\n self.base.install(pkg)\n except:\n print(\"dnf error finding: \" + pkg)\n self.base.resolve()\n self.base.download_packages(self.base.transaction.install_set)\n self.base.do_transaction()\n self._get_dnf()", "def refresh(self):\n self.update_from_file()\n self.update_from_env()", "def update(appname, use_appimageupdate=True):\n z = Zap(appname)\n z.update(use_appimageupdate=use_appimageupdate)", "def test_smart_update(self):\n if os.getuid() != 0:\n return self.skipTest(\"root privileges required to opt in\")\n updater = AptMirrorUpdater()\n # Remove all existing package lists.\n updater.clear_package_lists()\n # Verify that package lists aren't available.\n assert not have_package_lists()\n # Run `apt-get update' to download the package lists.\n updater.smart_update()\n # Verify that package lists are again available.\n assert have_package_lists()", "def post_install(self, installable_pkgs):\n pass", "def upgrade_if_needed(self, restart = True, dependencies = False):\n if self.check():\n print \"Upgrading %s\" % self.pkg\n self.upgrade(dependencies)\n if restart:\n self.restart()", "def force_update():\n # TODO: IS THERE A WAY TO ONLY REFRESH FOR A GIVEN YEAR?\n # TODO: FIND A WAY TO DO THIS ASYNCHRONOUSLY\n print('Starting update...')\n # TODO: THIS IS A PRETTY BAD WORKAROUND. WE SHOULD FIND A WAY TO PROVIDE THE SCRIPTS WITH THE 'LANDTAGSWAHLDB' PACKAGE\n sql_path = pathlib.Path(current_app.instance_path).parent.parent / 'sql-scripts' / 'UpdateViews.sql'\n with open(sql_path) as sql_file:\n script = sql_file.read()\n db = db_context.get_db()\n db.run_script(script)\n db.commit()\n return 'Success'", "def get_package_data(name, package=None):\n if not package:\n package = models.Package(name=name)\n releases = {}\n else:\n releases = package.get_all_releases()\n\n client = xmlrpclib.ServerProxy('http://pypi.python.org/pypi', transport=Urllib2Transport())\n\n versions = client.package_releases(package.name, True)\n\n # package_releases() method is case-sensitive, if nothing found\n # then we search for it\n # XXX: Ask pypi to make it case-insensitive?\n if not versions:\n for item in client.search({'name': name}):\n if name.lower() == item['name'].lower():\n package.name = name = item['name']\n break\n else:\n logger.info(\"No packages found matching %r\", name)\n return\n\n # Retry retrieving the versions with the new/correct name\n versions = client.package_releases(package.name, True)\n\n # Save the package if it is new\n if not package.pk:\n package.save()\n\n for version in versions:\n release, files = releases.get(version, (None, {}))\n if not release:\n release = models.Release(package=package, version=version)\n release.save()\n\n data = client.release_data(package.name, release.version)\n\n release_form = forms.PypiReleaseDataForm(data, instance=release)\n if release_form.is_valid():\n release_form.save()\n\n release_files = client.package_urls(package.name, release.version)\n for info in release_files:\n release_file = files.get(info['filename'])\n if not release_file:\n release_file = models.ReleaseFile(\n release=release, filename=info['filename'])\n\n release_file.python_version = info['python_version']\n release_file.filetype = info['packagetype']\n release_file.url = info['url']\n release_file.size = info['size']\n release_file.md5_digest = info['md5_digest']\n release_file.save()\n\n package.update_timestamp = now()\n package.save()", "def modify_package_state(self):\n ns_inst = NSInstModel.objects.filter(id=self.ns_inst_id)\n ns_insts = NSInstModel.objects.filter(nspackage_id=ns_inst[0].nspackage_id)\n if len(ns_insts) == 1:\n sdc_run_catalog.modify_nsd_state(ns_inst[0].nspackage_id, 0)", "def rewrite_packaging(pkg_files, new_root):\n for file in pkg_files.glob('*.py'):\n text = file.text()\n text = re.sub(r' (pyparsing)', rf' {new_root}.\\1', text)\n text = text.replace(\n 'from six.moves.urllib import parse',\n 'from urllib import parse',\n )\n file.write_text(text)", "def updateVersions(self):\r\n f = open('../versions.pckl', 'wb')\r\n pickle.dump(self.versions, f)\r\n f.close()", "def add_software_update(session, data, username='system_user'):\n session = validate_session(session)\n operation = operation_exists(session, data['operation_id'])\n node_id = data['node_id']\n if node_id:\n if operation:\n results = add_results_non_json(session, node_id=node_id,\n oper_id=data['operation_id'],\n result=True, results_received=datetime.now()\n )\n for update in data['data']:\n update_exists = package_exists(session, update['toppatch_id'])\n if not update_exists:\n if not 'kb' in update:\n update['kb'] = None\n if not 'version' in update:\n update['version'] = None\n app_update = Package(update['toppatch_id'],\n update['version'], update['kb'],\n update['vendor_id'], update['name'],\n update['description'], update['support_url'],\n update['severity'], date_parser(update['date_published']),\n update['file_size']\n )\n if app_update:\n try:\n session.add(app_update)\n session.commit()\n except:\n session.rollback()", "def _package_upgrades(args, env_attrs):\n\n overrides = env_attrs.get('override_attributes')\n if overrides.get('osops'):\n osops = overrides['osops']\n else:\n osops = overrides['osops'] = {}\n\n if args.get('disable_pkg_upgrades') is True:\n osops['do_package_upgrades'] = False\n else:\n osops['do_package_upgrades'] = True\n return env_attrs", "def set_package(self, pkg): \n self.pkg = pkg", "def update(self, *args, **kwargs):", "def __update_vnf_package(cls, vnf_package, location, size, multihash):\n vnf_package.algorithm = CONF.vnf_package.hashing_algorithm\n vnf_package.location_glance_store = location\n vnf_package.hash = multihash\n vnf_package.size = size\n vnf_package.save()" ]
[ "0.7270838", "0.72650665", "0.7251651", "0.7214989", "0.7085298", "0.7072634", "0.66923255", "0.668152", "0.6604724", "0.65995777", "0.65975875", "0.6594511", "0.656834", "0.6567808", "0.6567808", "0.6559777", "0.65499485", "0.65261453", "0.64859855", "0.64815766", "0.64602417", "0.64236444", "0.6409327", "0.6389884", "0.638869", "0.6378217", "0.6372137", "0.6354302", "0.6340421", "0.632727", "0.6323655", "0.63236254", "0.63180006", "0.63068616", "0.63068616", "0.62790287", "0.62683296", "0.6249588", "0.6230507", "0.62239003", "0.6218742", "0.6216929", "0.6216929", "0.60976624", "0.6043172", "0.6031396", "0.6030181", "0.6027685", "0.6013078", "0.60120565", "0.5998028", "0.598029", "0.59789646", "0.5960652", "0.5950423", "0.59455657", "0.5944466", "0.5932739", "0.59311146", "0.59304845", "0.5901174", "0.5888432", "0.58759564", "0.5869324", "0.5863093", "0.5834848", "0.5828777", "0.5826459", "0.58220345", "0.5811959", "0.5810578", "0.57959", "0.5795074", "0.57790434", "0.57786286", "0.57784057", "0.57625836", "0.5761102", "0.5755343", "0.5747584", "0.57367647", "0.5734662", "0.5734483", "0.57247746", "0.5722415", "0.570568", "0.5693026", "0.56843185", "0.5680765", "0.5675765", "0.5674623", "0.5665282", "0.5657061", "0.5654844", "0.5646905", "0.5645778", "0.56393015", "0.563692", "0.56319356", "0.5629448" ]
0.7036477
6
Display HTML icon of OS distribution.
def show_os_icon(self): if self.os == 0: return "<i class='devicon-debian-plain'></i>" elif self.os == 1: return "<i class='devicon-redhat-plain'></i>" else: return "?"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def downloadicon_name(self):\n return 'platform_%s.gif' % \\\n re.sub(r'\\W', '_', self.context.getPlatform()).lower()", "def icon(self) -> str:\n return ICON_SERVER", "def icon(self):\n return \"mdi:hubspot\"", "def icon(self):\n return ICON_BUS", "def icon(self):\n return ICON", "def icon(self):\n return ICON", "def icon(self):\n return ICON", "def icon(self):\n return ICON", "def icon(self):\n return ICON", "def icon(self):\n return ICON", "def icon(self):\n return ICON", "def icon(self):\n return ICON", "def icon(self):\n return ICON", "def icon(self):\n return ICON", "def api_get_icon():\n pkg_name = request.args.get('pkg')\n if pkg_name:\n pkg_files = Database().db.get_pkg_files(pkg_name)\n for src in pkg_files:\n if src.startswith(\"/usr/share/icons/hicolor/32x32/apps/\"):\n return send_file(src, as_attachment=False)\n return send_file(\"static/images/null.gif\")\n else:\n src = request.args.get('i')\n if not os.path.isfile(src):\n #abort(404)\n return send_file(\"static/images/null.gif\")\n return send_file(src, as_attachment=False)", "def icon(self):", "def get_icon_title(self): # real signature unknown; restored from __doc__\n return \"\"", "def icon(self):\n return None", "def icon(self):\n return None", "def get_icon_name(self):\n return 'gramps-notes'", "def get_icon(self):\n raise NotImplementedError", "def get_icon(self):\r\n raise NotImplementedError", "async def icon(self, ctx: lifesaver.Context):\n if not ctx.guild.icon:\n await ctx.send(\"This server doesn't have a custom icon.\")\n return\n\n await ctx.send(ctx.guild.icon.replace(format=\"png\"))", "def icon(self):\n return self.ICON", "def icon(self):\n return self.ICON", "def icon(self):\r\n try:\r\n return self.data['icon_url_base']+self.data['icon_url_name']\r\n except KeyError:\r\n return ''", "def icon(self):\n if self.device_class:\n return None\n\n return ICONS.get(self.tesla_device.type)", "def icon(self):\n if self._sensor_type == DEVICE_TYPE_DOORBELL:\n if self._camera_data[\"event_ring_on\"]:\n return \"mdi:bell-ring-outline\"\n return \"mdi:doorbell-video\"", "def icon(self):\n return self._metadata[2]", "def icon(self) -> str:\n return self._icon", "def icon(self) -> str:\n return self._icon", "def icon(self) -> str:\n return self._icon", "def icon(self):\n return self._sensor[CONF_ICON]", "def icon(self) -> str:\n return ICON_CORAL", "def icon(self) -> str:\n return ICON_CORAL", "def icon(self) -> str | None:\n if isinstance(self.wemo, CoffeeMaker):\n return \"mdi:coffee\"\n return None", "def icon(self):\n if \"icon\" in self._typeconf:\n return self._typeconf[\"icon\"]", "def icon(self) -> str | None:\n return self._get_sensor_type()[1]", "def icon(self) -> str:\n return \"mdi:thermometer\"", "def icon(self):\n return self._config.get(CONF_ICON)", "def icon(self):\r\n icon_path = \":/plugins/pdok_services/icon.png\"\r\n icon = QtGui.QIcon(icon_path)\r\n return icon", "def icon(self):\n if self._name in [HA_MONTH_ENERGY_KWH, HA_LAST_ENERGY_KWH]:\n return ICON_GAS\n else:\n return ICON_PRICE", "def toolIcon(self):\n if self.tmFile is not None:\n iconFile = pFile.conformPath(os.path.join(os.path.dirname(self.tmFile), '__ico__.png'))\n if os.path.exists(iconFile):\n return iconFile", "def icon(self):\n return DEFAULT_ICON", "def getIcon(self):\n return \":/icons/Ship_Instance.svg\"", "def icon(self) -> str | None:\n return self._icon", "def getIconPath(self):\n return '/zport/dmd/img/icons/noicon.png'", "def get_icon(self):\n return self.ICON", "def icon(self):\n return \"mdi:brightness-6\"", "def icon(self):\n\n # look for icon one level up from this hook's folder in \"icons\" folder\n return os.path.join(\n self.disk_location,\n os.pardir,\n \"icons\",\n \"review.png\"\n )", "def icon(self):\n return self.__icon", "def icon(self):\n return self.sensor_type[\"icon\"]", "def icon(self):\n return 'mdi:text-to-speech'", "def get_icon(self):\n\t\treturn QIcon()", "def icon(self) -> str | None:\n value = self.entity_description.icon\n if self.entity_description.key == \"weather\":\n value = self.state\n if value is None:\n value = \"sunny\"\n elif value == \"partlycloudy\":\n value = \"partly-cloudy\"\n value = f\"mdi:weather-{value}\"\n\n return value", "def icon(self) -> str:\n return icon_for_battery_level(\n battery_level=self._device.battery_level,\n charging=self._device.battery_status == \"Charging\",\n )", "def icon(self):\n return \"mdi:eyedropper-variant\"", "def icon(self):\n return 'mdi:broom'", "def icon(self):\n if not self.device_class:\n return ICONS.get(self.entity_type)\n return None", "def icon(self):\n return \"mdi:car\"", "def GetIcon(old=False):\r\n\r\n # Imaris icons used with permission from Bitplane\r\n Icon8 = \"eJzsfQd4VNW2/yDSpCQhvTfSe++Z9J7pvfeSKZkkkw4kJKE3BVTs2EEBBRsqIkgRQXqV3nsHARFY/73PSRC83qved32+/3vs71vfnnrOKruv9VuHQulHGUhxdKSgOoBieJJC0VEolIAA8v189Plm9Fl8fO/7EAqlyZVCycsj34cXUCh+WRSKQtH7/RwKpWQMhdLZ2fv94H6US079KOHomo74uhTy898rUqX+d3/zW0UgMxF1TV2Lf421habS14TwxdIn/8w12MJarZjTOYNT+uWXZRmtDWaryC2/SOkbGFjyyO9oLHnvvZpGmevtg+3tHRSxLs+Nzz49WUwHYMScBlrCUQgIFCxOS5dvENdWD3j4/1y50bGyuHGr3KjQW+1Nz9iae4aq1Sd15tJbwKCsgGrKQuAEnYCkYVOgsqhpW2KsGbxctUfdXFRrEx2mrhJlL7/AjP7wp0LX9+8EuzffpHOEkQY67KJTPoQiyjzIp8wCcdx5yPf4FHJdXj1fY5x+LDGscU/a4DdAEbnvjjLv0F1e4r77XPfj4DSkHTKCFjbwI89APKUTwiltkOj9LPToATJit0LuqG+gKvXVm+PGrL6iit66xFJ88bKx+DLYkJw1aT+DC8UMcf4LvuClX7kTSLFDTPAH0GUEEDAvQn7+YcjP2QNF0XuhpnjH3SbBqUMN3KP7x8gBpjQAdCgBAgc/Db6DJp+S8HZPEBYCqJJvE7rLyziK/nsaWLRLUKe/BTPHAMxqhJ+n1QF0mwA6EX/TmwEyIz4H5/4Nd+r0C4pNE1PcaPFvzav2WwOVkfugKvkIVGScBWbeNZDT7oOJj3hmAqjLAfj5AKzsu5Cdsgc8Rhiu2fRrhfba18ymRn4xK24HMNzWASNgLzDiyeuUph1FdBhK0/dBUcouyI7eDrlR391Mj/xmZ6bPy1Zr7dzxje0Tlyi4n3zLSD4EEu+jIHTfBtX+26AgdDPkhq+HrJCvIDf04/crcqZ2CblCv9EbKP07vqA80kbLg3dx+PGnQDPqFIjd9kGF73eQF7QC8gI/gRy/uT3tX1D6SxTmqQZzPZ3LF/r+VjsWex16Vu52Clieu6DAfzXkBH4GuX7zbyV6a4l2qDPa3YSy2qliceswJov9yH+TAtqJOt15ul+h+0fNVN8PZlF936/Ndh43IMfn5X74O5ma7GsytfnPdK9/WQBI2oJo3I9oHMF36vfob1YiOoI/c0B1HKJmCuVe7//wOBOAKI/yx8aZx4Us5vqWB6/lyjGD/7vvrzO3hOgtzTadqWVxbaO9scZaX8cVCEP5QrEb/p7BFvxl9+ZLmieLZPYtUkXzSpm6/hWxyjTBam/gavX6QDaH68XmiUPpbMvgnHzef/zebH5DK5NrahfT8oN5GTue5uSdOcBOO3qt2G/+D4nJsmnmWm0gX8gdmUuVER2SWsj/U9c31TUn662NMou9lYrf29vGkvcV1CBq8OPLlAx25lYNHY3jbDQeM9G4iF/L0RiZ7vgyRIQqDhvNqsDEBPWLKSnij37vftW9duJKtKUcgWGh2mDqESlqms0NLR+0do5/9uHfChSqUlb+1dEWOxrvqwEk/qeBM3g7MPp9C6yki8APOAF+HgYIHSX6WiJteS46kgfJKRLjr+85ahSNqAMCyJpNH9NJz58M1GQ7KI1qEZsvVjI5UrZKb51R39pZam5oJn5HL/lm2uRpSNaUn4E/5Ac0365E9ClUUpZC2ZBvQZx6F+IGTYZIjxbITtPOYrGaIDpKDcF+unsB3tr1iF5D1IOoyd9LUxfsZWgtjJg5RVy4BBiRH0JF/Jy7JcHPb0x1nwmernoIDVa/KZLppIR+SmwB48YBGNHcIxi4E+iUL4j7VlAWQxnlPSgb9DVI0wCog96GdJc5kOgw7oxA3LSzumw0JEc1gLuTFjxG6ghycdRAvNNUqBj2BWhK1oA8ZxOwIpAOY5ddZHhugryhi8DN0QCZnm9CRqqqlpC9Eka3qgFEA/dCFeVjRO9BKWU+lFDeIdYORa4boSb+PuQ5LkNrh/mQ6fASFMR0fFxTMwM4lZNvJ4a3nHN+qgZGDNFC6rB5wHXaBqKIPWCkHbknSPkBBCkb73CjDt/njjoCVU+tg5GDzRDoMQ8yfGdvwveXi2GHPOgcIW8+5UXIo8wFKmUOZCHKpEwBdtEJqAm9Ablo3qL6fgX5bouB6v7mT3RG897pU9/+sGvylMkJHj23qU++BzyHH0DkehpquHvOKnKP3JLl7gZO/AEQJp4DSdxl4A49CF5PtsEAihUygr8CSsAT/bVCuI/1nonWSImUCYh6IIYyCSIpYyGEMgOmI9toMi9AVvRmyItAc2/w11Dg9ymURM47JuBMg5fmrtkxoWf5IXnQZpA7XkF2vHDLVnL7hib/JKjzzoAi+w4oqdfBhNYyipGXILT/ZBhIkUA8mvvjHcYzlVxAOl+B7tkJoyhNEECxgVf/WnAfrAGT9SeYhdY3dfrzkJ68E9LQOiQvZhNkh3wLeX4rgV38/AVJ1WJobth1t0wyOqXdtuJtC3fHJlP+1Tumsgv3mlHfbUdrKrz+apeivhVxFZIHvQSDKDLwc38J0p1ff0aK7l/R/2vwQTrxpNSAK0UF7pQWEEp/hmfQmmwiIi7zNOTlHYDMzN0EH9nx2yE7fCsUBG772cJZcdtasROaWBeW9LQcWVfXNmaOXXz0EL43XjPObAV4sRsAr9MMGQB5Tp8Q8j/1RBtkui38qqTk7jFp8g2IQ7YPH/gcpIfvBKMNYBwagxRZSDdF54BKPQ5FqB2Ulx+HqvKjUFV8ECqo+4GedhAkacd/tlUcuFlfdu6+hXXkfn35T9CO7t0mRnJrAcYiGoPadwuSf5IFXS9sDyE/pkzHdw7ohUsm9o15XPQbK6prc9C9E29AVdpJYt2ZmXoCMjJOQ2EhWjtWnwel6BIY5ZehRnIFGqS3YKIJ7o+V3L+OeL7ThfTdoybXslj3+N54bTqxFuDlyQBK+llwRDYegPQcOXjalSbrG3pV4tIU6ahDe9TBJ0HucwrYQUeAFovWeomIUg9BEV77ZpyEwqyzUJ5/EXj0q2BW3YZOZJvZ4wHemA3w+lR0ffR6TgfAM+1I343kPfHaeLSe1MNYJL+AcQ1Chs9E8hvAf2jr7Trde61NTS1Oz8LQ/mL6TD49cvEOvF6u8t8CFaP2QmXcD2i9ewAqUo5COeKhLPsMVOZeAnrhdRDTfwIz0lkXsu0kdL/xqB6L2hoeT+zo81pkBxMb2Z1OrrHxej4v+xJkBX0BQ580gJeT6bJF/XmbzWZzlGuZDh09LWFqizCRVTyuoTpo8S2697dAC9gOVSH7oToB83EIaCnHoTr9DFrHn4dqdC167jW0br8NnNw7iO4+IDYaT5lZPyG6hb6/Aczsa1CVdRG132OQFrUDgkeMBn9P7S6bYeU4odp/cJ3uY21X5ztPq7SGDKlaXCKv/uo63e0bIMh7G1QFovtH7Qdmwn5yH4HaRFXa6V5ezhJUmXkGyvH7zFNob3CS2COUpR8i9ggFqbshD/Wd5OidkB6zEXJ93oMoL/uYhpr1rxvkqxlaxVyDtd5e1NDYSNMZRrdWp+4DXsx+YDt/BxxXNIZ7bQW6/x6oDtsPtBhSHxVJP0BZ8j4oRuNsMdq34H1IfuJOyELjRHb0VsiO2AjZkWuAGrkCqOHLruWGfbIxP2zREmrgks4cvzfyiHWB9vOxNTVzJhu1z7w0pmfyvIaG2YtEjOU/lKFr8ZNQ/0JtUeS8BfhumxEP24HmtxvpYheUhu6EPNRX8yN2QE7EFsiI3ABZEWshI2wVUEOXQ0nswv2FoYtbKjOmM2TiPIex9ylPCNsog0QaltfDc3Zr9xh/e/O0bVZ751djx0+Zb7POaC9LRvsspFs8dmsiT4HU/TAIXHYD130H0Hw2Q5n/JigO2gDUkLWQHfoN5IZ8DTmjlgM14MO7haHzxtKry33YIu0yvojjKJHKwmVyhTONznBkMFguAonKTShVPbj/hNmd/7B2ocft+Zmdchbp/yCow86DyvM0iF0OAM9jJ3H/Ev9vgRqI5AxaDtnBX0JuENpX+70DWb6TU4l1B08czeDqppit9ZaWlpbIahrdmc5gOvOF0nB1/Ye/t3SiVPp/1ySIPQLq6EugCz0Haq8zIHE9DHy0f6P5fI/2cGshN/AroAZ/TuwD8/wWQqbPrAP4vzSWhMKT1gwWK01csaJ2FVpLbxJJVXlMNtNLLNN7/d69+wrXZ/c3htBLYAhCc5jrWXT/g2j/uIPYfxYErIScgC/IfaT/EqD6vAUZ3tMrUgN7iP/yJTqiVuntdsTDLb6kFkQSe+kfvXdW4BSiZrismCF02QtS16PAc0dt32szFKOxKc//S+Le2f6fIdkXQ7b3SzP+1fVWffvBE7gWitv+KAsUz8A8og7wljuXO78/udTt00PFnkjXPp+iffdSyEb3RX14LdoDE3JFBqr/8LX/TPEOePSsKd7b4pzo2TCy731a8KS/5L7/Vwo8VO4hOoLoNhXgijPMPtL/flYnxad/J4WCqe8Eoh/lV+cJKxFdQXQXkRl9P5r4fT+YRBkOtym+cJfi13GXQqXep6ALd8IjBZ9TxCNSUB6fUzwu//3l4fMeXNichn5yVfsTA3/1OxbnP3/m8V8tSr3tweuO8RODW8Z05lobmmgGq51usNiK1DpTCF8kdvwbWfynRSC1ELXB2hZssjU3SRT1i0Qy+yb0ekKtvdFstNbZVPqaHJ5AFMLlC8OFEulw/Pu/8vzrzxRx+cR+IllLk1TZ8LlKa5+h0oy9oFCNPi+StJxDcnxqa2qrs9ps4Wj95cficLxZPEEYVyh3r6DXEOfbf8UZ2h8pTF4NRWnofoLDtz/P4tUf5fBtb0vVent1NZfHiJ2h5ocvtNAjnh1bkGGZxBaLadZ6Q7hao/Xm8HhuLK4gpLhMTa9gGX7dPf6tYmlo+qfflVQV/sNnLAF5vs4R2iVMnuU1vkiZKJZmBHFiVk/n51w9ySuHG6xyuMOl/vwzM+DA7RSvrssJ8YoFMoU8RW80eLM4bJfCQnFOZqZi0uRX7X/6DPG3yujuScNRn6vUmxtmGmqb3rQ2tnSb6ptH4e/6zhhxweeMuHDFzU4snvV9tYLuRstcWsoruHSSKwRgor0yHe3Z6XyShFwAlvdBCPFowmePZ0tKRRUGg8E7PV2RmZQkh8xsSR6+XmoG8w/zis8kqxnk2lSq1Y+Qqkxipc7Srbc2LBLL9RPQ+7GI92UtnRO+b+0cn4V/1zqu55FrsAW1Y9Vqph+LerJFzId7UgOAHPHNQ1O7IO4qCEJPgSj8DCBbgLwYIPOp+eDloYPwEOk+JkeaWlFh1SObQEKC+FpBkcofX3PFAe0f4p9WTZ5XckXaODq97svSItv90nLtUYPZbJbINVy5WstDZEIyPN3QPm5DW9cEwh+kNpD+GRa/PkSqZ2fyi861GS0AFhvJoywM7XVG7APOgI1Ao6whfI60wZsBn00xnXaA7/A6CPEzQFgI/wtLbWN+WZkZIiN5kJAo3pGaaR7+W7x6eXEenMfi0ncmy2C2hHIrJx5lFsyC4vgpkJvUADyxpoUnEbMKikR1efkyvUQhSmHz9a9Z7O3z8H+QLMR/BVJ+rLB4R73JCjChG5/X3gPByFPAH7gV8f0NcXaKzzDJ89NlxBmA2Pc0RA2YClEeoyHUzQwZKfIepbp9a0aGHqKilBAdroFAH80hRG8FeGvrgnzVNFRHI/JD5OPvqfX28FD4RHo2+KX79YQKil9ZLCpaCMzEhVAe+gaUJExF+/3Ombm+z0CCTyf4e+oh0E93JSNDXMZkq6faR3d6YN5FdsoT5bkfC41GuP3qCwDq5DsgGHEE2P039vp4yfPmX85+PwZm1nVQBJ+HzCfmQYrrTEgY2QVRrg3nK6vqPtTruiApvgawLzjIxwA+bjrwcdc+ID8P8r2XmwYi3Nog0+VpKAl4FeQly0GcuQ5YYV9DdeRSYKS8dqDa44vrFa6fQdqwueDiWAOjPOohyqvpUimNS+WJdNFE28lcOJRRfXXZSy8h3jNQ/xxxDJiUDQ/p/EPCT12OiDw//hCY2TdAEXQJ8vqjPaLz65Dh9CwkO0y8nx3W/BGNbr9ur58FSZH1kB7TDK4O5Dm2u5P+ATkNV6Pfz4Yqhy+B7rAaNKWrQV2+DfgJO4ATsQn4KZ9fZwV9f5vhsx0Yrt9C9sD54DbcAjEecyDbaz7E+dd9om9mEn4jVjH42ixwy47GGonjKeAh3rHOsa4xz2W9597llLfQ63mI/8XAR31D53MJioaifabrIsh1eps4/04fMeNQeXndFxJJDxhUc6A0p/vn1JjWmy5DjeA8XA+4dkSUOewNYDtsAYbTNpDEbgcDa999df5hECXvBmHKlvuc2K232OGH7uPzvkq37VAx8AtwHVILjkMaIS3kK8hwf+lqcrognmj7lWCuUSPdoz05u98mgu9ixGsx5Q3inL6Q8ipRF1BehjzKc4TO8Vmm3vUyFLiuAqrXMsh1+xDyRs6HbKfXoChs5ntsXvOF9tZXwGqe9o7ZOuGVxIDOGx4D7eA6qBZwbADHAel55D7gex8GI2vfbT398BlZ5gkQpe8Dcc7G+7TwA/d4sWdAGHMaWF5HgDlgC/gOaifONr3c34TsoBV3E1y6iMFNyIQ1mkoA/vDDRFvp4xOf9edSZkEO5WnIRJRBmQGplAlQFr2XOEOV+J0BasA6oI5aA/kBK4Dq/QlhiwLvt4FV1fkRnzseXn118fMTpk1tstlmfBQ7rOcu9YnFwH1qO4hGHgKe2xlQ5xy+ZxZu2aWkHrwnyzwGquItgH3qjNhjIEv5ESRJiEJPgGTgAYjoPxUGUTQwkGKFxMjtkOL28ryRbhUDxGy4o0i9g9rN94SfAvOZTJmCeJ1E1Nh3EEvpQtQGMeh9bS0ao/A8lnyEOCunRn9PnFdh/0GR/5eI/yVQ7P/uZR6n84yU8yK88/baVVOnv/9uz5gPdvAcN9wXDT0IYpcLoI08d9/KPn7MVHTnriH3CqiLd4Ei8wJIM26BLPsaKHKvgQHNPaq46yAefAKS+r2A9K+BIRQJBPl/Bum+H+yMd+pwwvOpNPIS0eYx79jfEE0ZjfafLRCGXocNbIPAJ+3gR6mHipJT8M7zAB2IfwX9KGQkbofMuG1Ajd8M1PD1hC3y/L+CfJ9l96vTXzwjYk+/rxG9B7Omb4EJXbvBpFm/VRGy6b1a6v7TpsKLl2oL793TZ94GU/FJMBSfJ3whtdUADWyAOtYdsNLugCHrZ5A5XoDM/gvgKYoB2UABro6TCD9N0shZfph/GZpXC1EfxXoOozRBMOLVv9dv4fyEAfUdOcRFfQvvL0C6R21n9jj0H/5xSE/fCSkJuyAlZhfkJm6F3KiNkBNGnuHl+qy4xac/c0XFfB8MwlWg02w/RROpQvTijyo7275YUlf70uvG4ovXLBWHThoqT99uYpK+FOzbwOf6k+pIakDy4LG6YMhnMAzxg/kf0q8RcpC9Y90mVfKRrOLYS6jdv0X4eTDPmNyQrCNRf3FAdXLEWpg5C2Ai9ps0AoxuuAU02gmgFu6HjIw9kJFKykHYI3IrZIZtgtyg76Ek+LurVsHHt83MtWAu23m/jX/x+9k9F8Z3ta96G63HehpaJ75mFx/da6cD4FgbzDuOt8F+mRfGA8xqBRiL1jCahFtQPHI9OFPsBP+DEH/x7osg3nlmJ7caTglysK9oFfhQzOCK2pgL+o0XRQcRw9+E0sKz0NGFrmsCwDqqEf8ELNo5KCk5AUVFxxAdgfz8g5CdvQ9yMndDXsYOKEnbAUXxu6E8fB/wknfeaOBsvmMrPwoNpVfvNkmvH7RJj16p0yzfWcc4d6NDSvp0sO6xLwn7NzBh3WP/Vgsa17F81YEHwfvJzl7+NRA69BVIdHpmgbBq5zo2soEk/TZUDd8ICYPnQ47HeijJ+RH4SCdNSB/Yb2RCMvLRZ2V55yAv+wTi9wTk5Z2A0tKTUFFxAqqqTgCt8jgwEJ+M4sPAzjuE1hlHQJx1ErQFp360lO6/UVdyDkxlh8Ba/cPtumK4P4YLD3xSmH88rvX5h1rlpG8K+6rmtKPxvegcBA2ZifqvghhHfftPh3iHmesVghefZVbDUSaSky0mCa+ZpUrS12JHfcqQhNagKZehKv0UUNOOQxaitLSTkIV4y8s7g2Q4S9hEwLkACuF5RBdBw70IOs4lqGFfhyb+PehUw+1m3o1zDYybF7vEcK8b8ysl/XGYd8wr5hkT5h/7lbCPCcdevT4T6VFzG+Kc30b8qwj9u6KxJWbYlCMG+QvPNIp3MQTUqwewH0iD48VKkb7TEEXfAWnUReDGHQd60mEoTz0EZelHCN8bliMbyZObeQYKcs+hsekC8OiXQSm6hsaZm2CvuQ1tlp+hG9nvmQ6yPT/fQbbpWah9TG/obS+9NN7y24T9nM+h/7YiWdICv+4dgwzgiPpCxPDuy2bFO8998Nl0d3XBfC9e0Moevt/396Qe+0DouR/4vvtRu0OE2jH2l5UnIEo5CMVIDuwDKkFzZkHmKcJ/V0o9D1VFaA6vvA5a0U2o092FLsTn0+jeLyP9vfUsqcdXe/15z6M+OmsM6UfFfXZSrwy4zWO/HvYvNitJakXvNYK7kI/GOY/BbQT/TgNMEOzQfq1G8d7H85d0jDSbrSPq33AbxOTp/dnZ896lh356tcrvG6jyWQ+0gK1QHbwHKiJ+IPxL5Yn7oCyZlKE8/QSS4xTh/6vIuQjVeVeAWXwdRLTbYBTeg0bs40RtYEYHOe7i+LxpraReuyy/+AWb5GR7rRc/6h/UofFTitoDreAGZCX9AMnur6D2b0BrKT34j7SdsciWffPKR8XD0VbHEe3HHQ0WrfuceV3RQo69jJH68puVwUuu0bxXQbXPBsJnSMO+y8h9pJ8s8QDhs6tEbakMtSXSV3cBaDmXgUG9Csy8G8AtugnSsrtE/ImWSfKFSf8Qf3j9oizt3TMVkjGDnBxM94CVfQdo2TeRjS9CXNx+yEVtyPOpemI96ONm2GpRffPFmAk0J6PR6KBSK510Zo5rz6SuOKVGGy9VaUtE/Do+PemVg1WenwDN81vCz1QZsBsqRiEZovdBVTxqW0kHoTrlKFT2+h7L088RcmC/ZnX2FWDn3kB83AR2zi3CJ8rO+Ykg7AfF7xloDia//5FYm2OfKP4fIwuNGdkXCP9lbuoxiEb3S0fr61SX54h9RYCXbn6tbv1io7HDR64UDjdqXg5orf3k88k9S5+XqxWRaM9ZJlXLK5TC1/YxgpDuXZYBw301ML3RftJ/B1QH7SP8lnSkl6r4A0Qfr0o+RvjX8ViFfaiVmeeAlnGekKePqrLOE35eXFdmne31s54m/dCoPZZnkDGYJWn7oTB9L+Sn7oas5J0Qi9ZtaWi9hf00ce5jIMTbwrQZN71qrXk5Nr2M0t+qWDGzRj9dM258N725rbtIqlKXoT20iV/+9U1B8gFgem4D+siVwHZbA3SP74Hhs5OI/aSPOgCMKFIOZsLBXjmOkDYh4kGP9vp8j0BJ6hHS95t2kOCvJHUfqvegMWEnFCIeC5J29PpptxOxJZlR3xH+08zwb9Da5GvIDfkSrXuXHsnxfGFCketLA2yGzVNt+rWzjfJvF6gVk0QqnYpqb2krHdfdzZBqBJU61YuLaeg+7GS0No89ADwkA3PkGsJ3zCZk2E74j2lBBwhbYF82Hqsq4/eifr4XSpL2QCnal2A/cn4q5g+tlXp5zER7l/TorYjP71G7+A7xuAbxuArywpcjWgb5YUvOU0MXLy8IfW9Ofvi8hqKYlxjUgA/9qf7vuhR6v9Yfr/9N2i/LDcqvNmiNDUyldDLa2nd1t3dPfK574rOL1aquTn7ZunMVaT8ADfHAS0B7pJDDIHTfAQLnjcBz3QQcj81A9+n1PwehNUMw9kHvRvuYPVAYuRtyEVGjdqC6T58bCV4f1ik1fNmd/NCPz+eHLVpVEvN2R0XaZI5SmTB8PQztJ5tBGSBqoQzmyYpcpXqxw6/PM6zWl31sdXO+ra+f851ONnfT2AmT1nZOnLZ2dNeUdWrhkiPlaTvulSP70pH+sd9aEXOciPmVuuwGgctO4LrtJNoVtgOOAa4I3AZFwdugOGQL5OJ1XNhGxOc6QrcZYashO3QlagPYx/sJ5I9avKUo8p1p9KrRO9i0mljMj8qe7qwwMLxEEp4D9m3TaAwHOoM5ki+SjqqxtvT/Nf/N49qeaOuaoLA3T9pgbRz9SeOY7neR/sc0NE0tqozfuRjzzkZ9kJVyEjiRB0AVcxYU/icJv7PM+QcQ9vLP9t4GlX7fQ5n/RigK/BYKgtdCVsgqRF8T/JI8Y7/0R4DsfzQ/+PVUellNApNT5s3iG7awhcpavlDwlEimSKqpqXHn8HhOdCbTCcngxGJxPAUiVTCxX3zIR/97pTR401ga6m9cNB6yE08AN/IQaCLPgCb4NCjcT4LC5RAIXfegPrGDiKOo9NsApWhPWRDwDeQFrYRstMfAhP35uUHLcBwBUH1fO53tM8EJX5/Dk3mxudwQBkc3mcHVLdIZzXKtXh9hq6v3ZrLZ2Lc/ksXhufMEkki+pO5Px8yW+yyPq4zc+yMn6TiwY9C+L+o4aCMuEvwr3c8S/m+R216Cf+wDx/zjGIC+GASC70AyFiA/APul50Gmz4zuvuuzePYneCJVAlcgTq6oYkQ2NI9b09TW2SGVy2M5XJ4Pg8Vx4wlEQUKpNoD8vfLPikCh+a17CcdLKNAeRxN9DfShF0AXcAYUaP+N+cex6zh+oNp7E5T4rSfi0HEMQ27glwTvOYh3HEuA4yiovs9fS/Mdl0JclyUhrk/nKZ+QKK2lIoV1vkBm/Umpa7htsDQtkkhlmSwOx5cvVoaJlaZBf5rxhwrHZ+c2TcR5MIZeAS3ay2l9z/bGPhwGvvsPRPsvR+uLEqT7vhgIkvfPUJv5mIhFoPq8gXT/9Oe/vnZfTITGaC9W6e3bJErbHbGiFiRy+0GewJqv0NT7/7t8ZweR8RL5LnMcWe4bl+E2j3nHbUfudoKI3cCxI7jtlKF1Ho6hyAlY/oD3B7r3eQ+yvV4+nuHT5YyvFxto+s37sdg6B4HULEL0NFdkmSGStlD/Xd77Sl/MR5nLOwNKXBfUsUauA5kLmstc8BnOXkL31Wg9UeS7ljh7yO6NASGxDB8T8RhZPvNOZXo/G4KvkxjY/Jv34Qh+idPomjjpAaKgklP6p+JDfqvgmJEsv+nE62ERmcMKnF9/nub8+Qm625obVe5r7xd7fQMFPiuA6reMiF2h+mO9f/Qz1XfRJdRnl2Z6Tyb0/lfFkvzRMipA/OD10JSEoenu0wtz3F+xZnu8MT3H88152d5vv0X1fvu1bJ+3JuZ6vy7P9pwd0vf75KDOv4Plfyg4bibO3/7oh09RKHHedQNSPMYOTPBsfgRnkzXqWUpC4D/3xz0uj8vfWchooT9Q+5P1XQeyvnqLrG/2uwyAZpgLlMtwl06h3Kbo4eppHLcUDEevUyhHKJSOlUPJWCYKmkU7cd3/H2sKftk3aj5ae/wan/VbJa+3VvTWfdEnfZ7ijk6ybugkr1vbWxOTI5plcPSkA5D8UFG9DnH1NZYbcf1ab32ZrB1W47qT4nAU1ysp/j/h+goOwkL1bUoHqbfuP6TX8F7eSXYCfl/Qx+VxeVwel8flDxW1ttlDpW5xLS0tH0Ityn3i7+bnrypqIxmzaGvpcmvp6JHZ2zon1Ta1P2eub5pQU1vfbaqt60a1ViiWJvBFYn+ZQvOnchb8Ty+mulY3vbl5hqW+cQJPZPuWw68/gmy/oLbRbkCf4bjHOoPFxkayj+IJRKFcvjAevQ7E/2Xz/uvxan9H4YnJ8xSputms0NiX6kxmrlLb+oFW1wEa3VgQy9pBIGoBsbxxdUPzmK66+oZok6U2mMPj+TDZXG8WmxOIdBErkKie+ptF+dNFrCBhukJJ8xtsbiOwuPUgkTf9JBDVL6moNrTQWeoytYkZbGop81capYkCiVmpqbGxFRpdjNlqDZbK5V4MJsudyWL7MDni1Aq6IfFvFulPF76k+Xk6qxZYHPMLErkhmydk+sjtqb60XCuNFbnwWWbI12vYwV/vKPdfsC8jeNyapBTpVKFUXiRVqtPUWn2wVqf3ojOYrgwWx6ugSFFbXKZO/7tl+qMFyU5jcKwnJXJVjEAkCRDURXmxEl4x8JL2HBXk3AMxG4DLJeM9FZUAsugLUOz4IYQFGSAhXrFEIJbky5SaUTq9wYvGYLiM6UodkpYuh9JKtc9fFUOs1JOx2Wh8dq21N9OM1sY5WnPD14bapm+s9pb51oYmDf6+ps7+L6+DC4tn26fUKAL5QrEvm2qK46cf2iFkAjClpP++WoZI9AsRcS7xNyBv6Pvg52OA8BD5z8VlQpFcqR6F2oGnrSl5cGKCGtLSxFfwxiS/WPgfk5svIWN0DVaLt7m+XqOtsbQg2VeKpAqLVG1ZqNCau2rqWla0d01c39LZs2rMuPGDjbW/fc6GC5tfN1ksUSeL9eXu/NgVAi72maoBpGYgYi7EfCQr9vvmAAjSAETlSCdCADX6XOp5GiKGjIcgfyOEBEmgrELA1GgNgYWF+oL4WBXExkogI1O68j8lu0KrIGq51kSvZuo3FhVrgM3Vf6k2Wtolco1GpjI3CcSyAqlKazVY7e81je36DLWPf5kHgSuwzKppLXcXpu2xs+kAlmYArRERklGcheQNOw9C15NE7C3/qd3AG7ILBJEXgMklcxGUDPwQXBwNEOyvhVGBwmsSmTTVaGwvyc01QFyMGBEfsnMVlv+UDth8y/TqykaoLOyAgvQmyMtTXTbW1tZKFCquSCbLUqgNdolCZxTJFEy0XhlvsbdvaxrTI/uH6wgMOHeSWGUShklyjtpxfPfkZwCMWtJ3rgi/BLyRx4A7ZAewn9xAxO5WUpYTsaQMylKgFZNjAfup78F3cAuEB9gg0EsDoaN4q7Rmnq9WN+5OUoKQiEGOixcC6g+5D98/MJDxp2VnMcY2ckqnA6dgLlSmzIaCyEmQm4DnK90ksVqaX0mXN2ZmCuoyMqT11TRxkUDMjqpiaN5D65lNv6lLgUmrLtkqwbK/9g5AvR619SRkc++zwBm2FzhPfv8gZplGWfZI7C8t+gjRBwQjDkHkk9Mg0rkDIryaINhDB5npIhuNYXtPremAqEgFREfJITlRAQHeagjy1W5E9Hywv9rk66HLCQtSjUqKl/2DL/DXhVY8PlRQ8iqICxcAK+19qAx/C0pC50JxwmSgVZtsecGTr2T4ToZY/3bw89JBoI8OIsO0X2mM7OBqhvr99q7u6F9fk8doseE2v+gjgNZaZPf4OyByPgvcwTuB+cR6MsYcyd0Xr41jQvticcvdfwAl0ptw5BlIp7wK8cOmQarveIgc2YZjuH/kiWo0YnE78HgtEBGugfgYPcRHGR/EPWPy99Q+oADvB/QjoquIrvl6aX6O9miBJJceYOe+BbKSL0CUvQLYUZ8DI2QJVES+A7ScmUcrfd7bXe6xGArd34WU4TPBw8mI4zkgyKsGIkbplklkjDCu2KR5WHaOqiqZUXhv2/OvAsyciNpy/F2QIVk4OL9Hv7VI9uVEHGZf3DSW+5E4ZLedhPw4X0Q++ixjxIuQ4f0MJDhMhjjHsZDoXzenvKr2qN0+DYryGyA+wgIZCQ0wytcIXi66f0nuLhqIcG6HYocFUOqwCNijPgZ15degLlsHwoRNwAlbC6zw5cBJXgzMyKXbcSxKtccaKB/xKSQPfg48HCxIz3rIcJsDca7jIClJqmRwVI90OF7+qgYl6usL3gZQonFdiOwuGLgd2JQ1D9mclLv6odjryt68I6Ve+0CP4xEdr0BJ/08ge8Q7kOvxKqSPeA5SRkyDxBFdeC1lKymzwYQJLxOyp0Y1Q1Z824N8KH0x5K4Ov9ROw7WQPGIOMB2+BrrTKqA7rAVNxRrQ0baCIn8HcGN2ASdiC3DjVoMg/eNjdO9dwPTdRfieKhxWQebAt8B1mBW8nGqJHAZUr/cgxrP5qEghyXhkLCmHHTifTC1a28i9zoFgwC5gob6O5e6TubyXsOw4jru8N5a7FLX3ioSjUIPkVz51DUoGL4NcpyVAdXsHcpxehwyHF1B7eBqyPLoWMti2LRzOWJg08XXIThgDpVndkBo9Gvri0PtoJHqP87NQh72DZP8O2E6bgDlyK4hjtkENcw/oKw+DLH0fCBL2AD9xO4hyVt9iBR68xRh1CDiIqr33Q5njOih98lNwHlIHTk/VgBeyA8b6p7u+BGmZ8gftXxF7JJiPxi6cm0IVfgP4g/cBk7IScC66PvuSsj5KOKaazP/yNFTkA9ShOVAz5CLkI1vlu38JVPclkOvyHhGDn+X4KqI5wKDZaqvpzdDSNBc62l8HLr17p0rd+UqM9xjwQHy6DrGA0yATDB2khayn5gNrxGYiFp/jshvY7vtBj2Q3cQ7dUuedAmnqIUT7QErdCJWR627wo48BJ+Y4CBBxgo5BpdMWYPb/FnwGtcIIdM0hFBnEor5C9fsCYr1aHoD6+cXQLVeTsY0YZ0CnrOvFczwao98nL6ZcgmYRlIe+xzGdzej/yuE4Z986yPNdBXleX0Cex8dE7DuO4c91fh0KAuZu44oa1lRVdMJzs5fcmjh1krZr8rTp9fVPfxL6VBf4PTkaPJ+0Q/7AxcAbthX4jnuB73IIcJ4/ZfY+MIv2XTBU79+tyD4F8rSjIM3eC+KC9ffLAw8TsZjChIvAj7sIvEgkx8h9wHtiJ4Q+OQmGDTDCIIoKXEfOhfTwDZDiOhv65OdWw7dKtLZVRN0C0eCdhN2xvA/H+fdRNmUGZFGmoTF+GpHrB8fSUwP3wOhWgMYEAI73Scjy/w7ykZ4LRn0DhUErIN/3M6It5LstRHp5G+h5k+bhPsBlTLq09OPlMydNnVHXOWHK0w31z30fQZkARU8sBd6gTSBEc67E+ShI3M+CLPgkWCWHbus5G76X5+wHefYxkGQeQGPgBiTveeBhbEDcYZCk/QSylFuon1wGntspEPbfD7H9n4HhT9b0xhcb0BpkL2R7fAj+Xpo899goCprzbmtxrH3gaeAg22N7Y1wDljENyYgpuZdSKD1EviGcZzGOMhrRWML2OC67NgKAl7gPMsO2QF7MFqBGfEvkXsTxKoU45sP7E8j3/gAKPBfcEIrbt3JZE+4rBPN+/uzTzcunTntt8tjOee+Pa5q/Fc832A4YryNxOweywLNQU3Lktl12bKO14C4Ycn4ERS6SvWIzGgMuoXbwI4jTb4A44yLIsq6DDq3N1Sk/gSL4LEgGHIYMJM+IAaZe+RXg6/s5ZAeuhRjncYTjG8dYa1HbFTkdJeZ1bPckZNcEShchZzSSEcfvRw3oRmubTghD74MpdghAuqQm7YFXXgEYh/Rnr/wJeEUHiFxDWX3Yg+gNkB1Cxr0QbcFnGaGHkpA3NwuF44FPfwZ00kWw6P0fYNL4tTBlwnZot6+5pPLec1s9/Cqo/C6BMebqvQbetTNWNMZgTII+5zLoy/aCLv8iaLPvgibvPhhL74Gp/GfQl14n8lpivII68hqIhxyH3H6LwOlJa6/8KnBzngmpkdsheeScPUT7R7yrqWjOG76HGOtSkN1jevNoYqwFlhVT0IBmCBhYD76DasF7iB4i3d6C1g6A958H6EFz/4R6tF5iHIKstB2QjOYlrIeCpC1E/iecOzI3GOkBxy/5f4Xmxi+AWTx9p5Q/E3jVL4BW+jnMnb0H2pvQOpO55WtVR1KMkbd4Rkft6m02/v5dpty7YMm+AwaMHSk7gsaq88R4hWUlckZJf4lLt7FuQC3ShS7pJxA4nYXiAV+C+4CG3hxKZB8oyNqFc0lBn/1VOIZ+6C6i38eh9h1CyF1P5LbCeA0P9B+Me3DqrwWnIXIY5T4ZlAaA1V+heYNN5jLqbr4FXNYRyELXTkvaBUlxiKJ2ATVlC5HbJidyA4HhyAleDTl+30CB96ofRfzJlzSc90BT/TnoJOtBLt+xt1pGcRWK5cF1in0dXR0ffdDa0fFMDWv79+aCG2Bh7tpXU33yJsYN4Nh8HJPfh+vAMdYYX4BzKeHP9On3ib5T9tQ6CBg8+iH5ZWjOXQE5rkvAx1OehOXHMcI8hz1ofHuOsHtfbq0+uZ0pOnCkaGA4RQQRaE6XorXSZ58hmZG+cc6tWWjNaNFfBhrtGOTn74fMzL0EhiU1cTehhz48Do7RzQrbiNoCGiMDN0JZ6JpzVukHP9s4K8FUsR6s5XtgjOjS/u6W5YUvTL/11djR82dZG9rG1TW3Tmzp7nm3QXDqgK2clL1TTRLOGYXjzXEc/Qu9+aTGoz1rfSmZP6wMrRvCnprwkPwqCHZ7FXLclkO4c7OOWQ0/Y9yowOsEMZfhHLDevVgXLD+pA7Q+Q//LGbUbMH723XdJfAHGG3WakOzq66jdniXyfRUXH4bCwoNApe6HbDQ/ZWTsgszUnZCZuBOoydshNw7HfO6EgpBdUBT4A3Djvz/TqlgPjZztUFdyCOxFV6CVe/ndsdabJ22KrbsbWibOa7DN/66Ocek2xjWMRn2tkUvKjjE3GDeAbY51gGscc4+/w20Ejxd0n2OQitZgOIdZn/wO/bvRnLQKYhw7p3JK761nIznkqbeBMXADIb9Lr90xZsYb1amOS4GL48xt6D4zSR7syA56RKLKq8AoO4dkPolsf5zAzpSUHIPS0qNIF0fR54ehKO8A5Of8AIVIH8Wof5Sk7SJid3GsOxfNW4rcrWfsrN3QUHIKGisvg63syqV6ybV7tYITgPo/1KJxrZVL4jpwX8fyYxtPNJHtHuM5MGGMQV/utAY+mddMnHIacka+B08+ZH9MGGMY6zB+saj0qy+Zved5krSfgeN2DK29lkG64xdQHnqIwBTjXGumenR/pO8WpNf6fLxPuAfM7CtQlXcGcjJOojb/C/YG66Ks7BRUVJwCZvVJghhVJ4BVeRRYaPxiEzicI8DJOQrCnNMgR2O6ofTk2bqyvTfqSi6AsegImMoOgLlyzw3Uju/idoblb34Ik4NxUWPkv2BbWnqxLhirgHEuU1C/eGEc4ptxE3Ldv3qAUcOyD0B15rDFEDNiwhq1ctx0QcH9vX1YdqwL3B6I9wqkF9TX9bg9Cck1njEFyZ50C+ipF6A84zTkp5+AjBQSu5OOXmdmnkJt/ySUFp6G6nKcfw7toVnnkH7Pg1xwjiAFeq1An6kYF0DHuAIW1k3CpkiuH+2sKyfrq8+faay6e74T3RMTxnp19OJ6msQk9cmLCWM0MGHbE+2id0x48xmyf2b7bwLvgS0kxo2QX4Pm8+ex/D+oxbPfeOPZJYnCrOu38FzYh1fC5yBaKbmubyxGfTwJ6SHmJ5DGXwJO4ikCD4Bj54szjhIYBIz7wXn3SOzPKchBdi0pOAcVaK5i09HahH8ZtLJrUIPGijr9dbDrf4QG7S1o0twhsCYYj4TxVAQGCPE/DbXrycZfaLzxoTZvJMd8jKfBbb77oX6AazwG4HHwlenoOkgP2Wg+jneY2TsG6og5MKj/RIge0XVGK35twYdLF4c26Tt8hYE/rNfEXgJ9ElpPoPWsKQrVYT+CPuQiSMNOgjDqMHDiDqJ+ux8qk9AeK/kAlOB8f+lHiJx/WAc47x817TSBf8rLPgeVheeBVnIJ+MjOcgGaw9S3wF5zB1pr7xE4oqkdZE5AzOs8RC9NJHFEuO1iXNSzY3r10krqCM9xfbgibGdMvx4DCD2g76ei/+F75KG9ItVjce8YYIDB/fTg2b8Voh3GXTFK3130/uK3AsePH++6GYY+UR3yGofj+/lhofdGELhtJ3Kocbz3AMtvL5FTD+NbME6HwE0l7iVyDZKYo8O97YHUQQHOv5h1FoqyzkNZ3gWoQuM6xlApeTfBrPwJ6eA+9DSTOKrnppI5ETGWCrdZjKfCuRdf6MVT9eVIxPI/jKnqyyOOMW2tvbiqvr5A4JbQez3qy9m4jfqvBteBNkL+YQO04IzWcWGOo68YZe9tXbR0rmdXd4+rXq8frlRo0MpzaL+q7EksVsrbO+ghnwMd7ecqvdYBzXcjVAdsJbAMlUgPVVG9uCtEGOtT3osfK0XtoBjpoDTrNCK0BsFYndxLQMu/SmCwBFU3kR7ugEVK4tcmINs+PY7Es2F6tpvEZM3A81kzicnqriX7Mm7zWNbWXlxWHzbLJvwFn4VzpGP8kwzNT6yS25CSchKtPbZAwsgpBEbLaSiS/6kaCHZuOFMjW3r6/a8sjh2d41z0euMIjUYzQqXWjDSaa/2fmTspiVM9hl8d9+ZuZvgywHitSu9vkT42EZgtjD2rCt0H1TGkHnCfwHqoSCWxWzinIsb/YBwQ1gHO/1hFRfMl9Tqw0HqOV3ITRBV3QIHP2hH/Nilps0b1o/Z8WEYL91EZMcZL3Zu7A5MkvxfnlXsfGLl30P2uQ3LyCYgK20P0AZzH0nWYGVwcdeDratppkX91/LmFeQ5tbW3OOoPBQa3VOmp1emdLXUPg5ClTsxRqXbzaIs2i5Y+pr4h860KV36dQ5fENVHl/B1V+m6EycAeJXQvZT+QA/QW/dgjp4RiB/erDsJHYqIuEHjCWjZZD6gLjv3A+TH7+PUBzEZF3E5O4F7eGqU8uEZUkjGPjZ9/vxbLdJfBsffkzMdFyrgM99wqUof6XmHgEQpGdstE+DJ9ND+1nIs5e/T10X1nVa/c2tRu96uobnLRarYNKo3FU6UQjLQ1i7/GTu5Pkam20UqMvkaq0hSqrqICVN2tTlc9CqEbrRxL39j2Bj6kK3EOMDRUR+4mc+3iMpCUc7M29eYzIv1mZeuqh3JskDq4q6zKBa8M5PzHOrY/IPKA3H8jT9578rhcPh/a7+L+MrKskJo643kVCzwT+DM/FqD+mIJuEh6M1eQRae6N9m49LDXG+7Oelm1Or27DRUjMjBuP85ArlCImCMbzJ8mXtGPunt6ZP+fQZiZIfjOSvkKu0RVK1qlKlmrZZkLIfSl0/AxoijJtjeG5E68ytqE/sJMbIPrxWnx4qe3FneM6s7NUDxtDRkB4whq7igS4uovePEv6u4kFN4usw9u5hbB3GepYS/Q2NPemHoDDtIBSiMR9j7HLQ2jsezX8kxm4DEcue7DoVgj0tEOpryrfqtyyz1bxRKpXJh4tFiqEW3ccJtZqle+taavImTJkmtDdOSVdoDZWoHZRKFUa+XPDGBVHWYeCi9lTpsJbADrJcVwPLfSOwfDZDte8uQgcYt0YLP0DkPsV6oPXh75KOEnog87GeJMYIXBNyPEwEXvA4IVdZxtEHspHzzEEiR2sfRg/nas3vxelhDBy1FwOH953paP+N5cZ4PYzVIvA3Hm9Aiuf4z/D+t65m6wsN1k8bBXz5cLl44giTfPVeTY0q12C25nd0ja/u7JlQjeSvQuNAlckyZjYzZ9M1Bprz+Gj+4/nugWoHZHuX5cB2XUvqwGsrcQbNQP0BY4zpwQeBhXRAjz2A9HCA0AMeI/uIwB33Uh/2sDiVlK04dS+JQXwY44f2UTkJv2ARH8b5ZUSuI/LHkvi5VZAduoLI7UpgE4M/gbzAJfep/gu+zvN5mXgASkxADcWq39Reb/z2oEm1Rm6Uf7dLqbaUa/Q1xbqamtKm1rbyiZMnM5RaPVeqljP0irc20NAcx0D84LMuccxBEPrsBPbIDcBxXkXmvUU6YHpuITCYGL/IxDoYRWIY8bqhHM0VFYjK4vdCaTyJY8RUiPZDmLCMVGRHanxfXtydj8iJ7YjlJHF3qyCrV8b8iM+hMOIjKAxdfL4gZNGqgrB3XywIeb0lP3Quvyp7LMuHYv7NuCyL/oNko2rDGZ180dMCeWGiQjaaY7a1WVvGTuhsHzdx1tSZz38qFXfU64yjWwTl392uykDrvoRdwEX9WdSLexR47Qahy/cE9pHruhHY7ptITKr3DgL/iNtCOcZpo/1zScgeAgOZH0HiIAsJHOQuYk+cFYXsGrWtV86ND2R92KYEZi/0C8gLXYqus+hIUcT7L1akzuopjp+SaeqiDJyJ1nAYF0kX5A4lMIZ0potQogz9Z75Es3zb0FrT29+ZDW+sbe+avFRVvehgS2fPyo6JUxeO6Zn8WcfEyR/o5K/vsuqXnMd5fjHusBDxWxG7l5BfHn0MZL7HQez+A4idtxGEn5vCQYTzD/fhJ/E8WRa0E4pHbUc62IHstB2oYdsgF5+X4nOR8A2QHfEtpBH4T1LePkwlbr84Z29R2Ad3q7NeOFmVPaMlmjKWeBaK1k4bwuULY9RqtTNPIMDYyhE0BsMByT6CzmSOZLLZvkqtxe2fyY9Lc2f3802jJ2yoNb+4R6cd/0lr58T1HROmruuaPGPNmPGTN5gMs1ajdn+2ALXHCtQvmWjs5iD786IPgDr2DGhCjoPM/SjIXX8g8h5j7CVeN+M2QOSB9t0CZf5boDxgM5QGbYaiUZshL2Qjou+I3PDZoWRbxvISuMzQh/IUByyG/OA3VxfHTZeJbBQnFt9wnieUcfkisSfmXSiVh3J4PG+z2ezG4nAckMxYBw5YB0wW200gViX9K9lbO7spYub0J9q7J75qb53wfW1T+yd1zWMWNo/tnt46bqKuuaM7utRpW24lsj2WnY3mIQ7O9Zx0HFhofFfHnAdt6AlQeZ0isacu+x7ogOm+g8Cf4jVCBVo7l/p/R2BQ84LWIPqGkDVn1AqCsJ2xzBiLmhP0MYGLzPN7Y1Ou60uOQnVlEJPNDWawGP5MrnETm69fyBOKymQKxRMiqSLbYq31QutWFwaT1YdNdUSvnVlsjpdApAr7V/Lj0jRm3O/9hFKK2isDzVPM9HPATD4FvPhjBG5VEXUatCGnQeOLyO3EA+yqyG03YX+m12ZivYwxoDgPdWHAGuI8HD8TCM9FfTWWG2NZSRzuAsj2mbsX3zc9fDQFTvlQOHxJGrJvCJOjH8/gGEAkUchkSg2LzeWHd47rCkRt3xnLjLGtWAccnsCbKxBG8cW1w35XuD9QKgPXfkpH8xUr5TSwEo4DPwbthYMPgy4S57w+Q8j/ADvquh+4D8mPbY8xpBiHWUhgeFc8wMH2yY4xjRjHS+BJfeZCqk9nYmJQA3HvaoaYgmzuxhFI0lkcRgCLp9HSWeLqnskzlvZMmEA119ano7buits70oELxscKRJJgZPt/iHP4d0pByIuUaq9Po8qCdyDZD6P5/CBwIg6DOvoKaMMvgXbUGdD6/YKdxfhNnP8at//qPvmR7fPxMzFwDqPAL39lcxLPie2OscBZ3lOP/BYfPLEhlidUVXG4bD++qMpZb2wFW+PYgwqNrkosU+agcTAAjXfuXD7fVyiWxir1DSNY/D+Wq/CPlGq/NfNxm5fHngN93C3QR10FY/gFIue4xvvsP8qPMaCE/N8h+5P4bQJDHPQFSYF9dl9KYHGxfxDbPs2n558+RE6issUrtLYFfKllL19iAYzJNde3XZUqLdPEclkym8MNQP3ET6KoifqPCU7Bz9UisYZojXtWGXX+vj7yGoE71oWcJ+RXeWLs7qkH2Ok++fHYV+5D4qcfYJCDHsUhE7b3fZ+wfbb3M7f/FR86SwOlrFQ7WFNjf0ult9+WKG0gVdeBSI5qZfMaDlsbK1c3VP8nZe8rOUEzKXGe+uFsjw1XVEGnQBN8lpC9D3dNYpdJ7DXGjuN1YHkvBpiUffkjGGwCU9tr+xyf+YhehnSfKb/LO0dI4m15oppMocyyVCAx3+eIzMATWdG42PQiJ/eb/jJ1w1+hAkp64AQKJfnJJ2mun2zBYz2WG5PM9SRhe/5Dbb/KcxNh+2Ikf77/14/ITsj/wPaLiJz8md6z5/4ZXphcEmv/9qKX+ptsrcH34CiB6OOL/71nMP7REuVPxp1mu82poY1cfl88cieIMPYbjfsE9tudtD0+IynqlR0/AyDb/yHZ/fGzCD4icODZ3u+iMe/ZeX8p039hyXKd3VLkvOAKA+2BmG4bgIb2QJWe30Gp1zoo9PmGeA4Slh3L3EdYdmz3LJ95V5HdiTYfE6j5vVv9jy6RPvXJme6zp+S4v7IF77FzPRdAntdCNK4thlzvD1C9kMxZ4PPW9Rzv197J8X62Ev8vN+Sdv5v1/0jpHzTikfc+ATTHBO/2+BSvnoxUz8npqI7uF0158CzRx/j1x+VxeVwel7+vwJ8tVzrIurv3fR75/q4/+fb2FSpR/7zS4Wv0ruMWTnMAN/wvUfrPBJg58wKl30y477AFLfInwv1Bp69QKJ0ddwe7HyHTFTisJNnCdQCqB3WS73GNn4uBsQH9emuc26APK/BIZocne+sRv3r/r0peb63orf9ZnoeOI731FbIeTdb92nvrZvL7QR8QdT94mbggFYi/DwKyTgNYhQW9jhSF6n69df/7qD6C6w6iHnQbKRVddxBOjnEb55FAF7iLazLHA6nylRTSAlf6/WlbAvk8H8ziSsrjPBGPy+PyuDwuj8vj8rg8Lo/L/75CY3J/9zcs7v+8Zwf+/16MltZH3ksUZgetsTFCoxubqtCag5gsrhuLw3NiCwW/i/9+XH6/9OVtwkVrau7XNKZnRH1zV2LLmEmjG9o65tma2l6utTfPNDc0Taqx1k8x19mnGGsbzAq1rlwolScLJDJ/gVgyWCRVPLbHnyxCec2D1/bRXfF1TWPM1sbRL9TUt32m1DW8oTHauoyWWoveXFuHyKY1Wep0Jms9et1Z1zLGJBBJInE+LZ5QlMQTiMKQPVz6rscR/Ofyp/xvLHItmV7EVN88WGdumac2NMw3WurscnVtF4tru8Lm1d0WSppvGmpGf9U8ekxdc1u7ob65zWKpb7QZrXVGHO+M2n0w0n8whyfAFIYoBn0WK9cbCd/F/695vf7K0pczTKS39VPqm0sUmqbD2pq6Vm2NRarS2yxiWfNBjX4smC09BOlrxhG5xDT60d/UN47tHt3Z09bR1c2rrWuIUGv1gUKJ1A/NBz695M/icEJRX0hC45GzwdZF3KuS8efzvv9vLAKZiUJnNRKvxYr26QJpC3CFDYeEkrr5CnXLKoWm7WeZog2E4uaTDKZtbVW1eX55lWluJc30XDXDMo/Osizgi2ufVRsb2k21dVSzrT7caqsPVqg0Pkwux4PB4ngS+czY3CAuTxjL4Oge5I8orhD/U77+LxScL66SXke8FkqbP+Pw7cBg2wCNNUjfjSCUNHzL4Zm7OTxdKYenyuaKlGkihTRerudHK4zcaJWJH6c0ylMVGl2eXGMsVWh1VLlam4LGoUicU89ktvjy+AJ3Bovl1msHPzaXG15Rrev+Hdb+TxXU5pewOQ1AY1pvsnjWzWJZrVkik4UIxFx/kUwwii/mR3Np5kxe8bjiyuQZtvLIF18uD3/1/cKwZz/NDe/5OCPWOi+fqtRX0qR0gURVotRos8QyZaxGZwg2WSy+QpEYx9a5IvJANvAtKxflFJRoZlbQDcScUFIl/btV8N9euCILpbHtaZwzr4XJrQM6y/Q5R2Dgy5VCH76Q7y+Q8kcJR7v50NPHcllRn73Ai9y5nZd4AMQp50GM8zYgkiVeAnHYYaB7fgdU9xcgOrD2YkyM8ousLGmrSivPkikUiag/jML9QCKVudEZTBc6k+leTeP6Z+coPigoUkzAvKRl/fkcWn9VMf0qz15hQdEgmdqWZahtkeutjQZrY5vJ3NBSVdfYGtj3G3P9vxcLgXQfwuKh8YZXM04iV0WKpVJ/NE8G8pSV/vQiTSY7bPVqadrFGwSmigmA8f7MXpwrrvFzHuWIpGn3QBZ8nMibFOZhh9BRujtRkbJDpWVCtVIji5fJFcGW2lpfdH1sA2c2j+acmqbqTEuTQmGxsh3z8nc9I/3h0pcLEZfG0eMK6lrae9Ba8CVEH6oM5rlaU+MqtaF2bm1j62KLvfUTa2PLzLqmjiH4938kP+KvC0dQv5HJM82Wq9SjRGjPxBFX+3ArayOYCZ81CtJOE7p9GE+NicihKPsll2Lf88Mxxg3neikZugQC3WshOAg/h1sOaWnCaWgPkIZsEITHIoFQ5MrmVjsmJ6smJieqIDVVdBuNRaWYn/9kfsU/WvryMNaYrP30JsNAg9XOR/vKaSpdTaPR1rwC7Xfmo7ZZj/b5cwVSw2yDpekjY13r+tZx478ZO2HKTvvozu/r28bG4Gv8q/yMfYXFJ5/zwhU11rF45rVypSJYIpH5suRpbrzEVwt5qUfW4GchY73j/I18PQBX2ZvDkvtL/k5MuD88/Ax3aRoQMYwZQ14CX3cThCAbhAbLITFR8LLerIhWqrRBVqvVp6pa5JGYoF4QH6eG+HgJpKeLDhSValwxXyJZ/V+m618XkUxJsY8l72esNUeL5caxLJ5uAY2lellTY5uqtzS8jtYVTUj/BqnSNEWssIwRStVGZAu0DzV3tHROWDW6e9Km9nETV9uaurA7mKK3/D7/QukYZxbXulMql0cq1Vo/tinEiR3zCYOXffMGnU3qXWkGIl8GxtLj3Jk4RyDONSJOuUc8w1iAnwGceJPAcxLjEvodzrcgj7kMtKdWQuCwNvD1NMKoQC2RUzM+nveS0aIMR3NyoExuDUtN1qyPw7k14xUQGyeA7BzZGsxbWhbzr1T5g4JzcBZXGYjXSJciOlu/rqyy5kZRkeFeQaEKkC1eNNTaZwslcrZUqVYgOxj5InUesgUXzWl1cq1pGtojLbQ1j363tWvit62d45f90XuzBXVGrtCk1eg0AXI13Y0Tu1XPy7sPXCmZp6O2iXzWqpxPPrtUnILad/QlEAaeJnJ4cp0OA3/EARANO0A841oQeQmYTLJvKLJwLsN9kDjwOXBxMBI5Tf19VMgO4p8yMwU2NA9H6Y0NUXSa7f+xdx7wUVbZ348oivQSSO8hvZBAeu89md57n0mZ9E4SaqiiiGJBcVFUbKhYwAKKoCBdQUCkI00ExEL3vOfeZyYZ0N119++u//d9uZ89+zyGlGd+59xzz33mmfPdRHq7RkUqISJCBNHRYsjIVk//jwn+O6NMIB0glldPL6+ogZLCZijJ6YCclCZISzFCaYXh88q6pvlCibRIodbJ8brTJApVAdbaTeiLqXKVsRH9YtBXWqc1dvR83NDRvaVp8rTJ/+xv4t5qMF9snSGRG2JqG3We4qTDzdwSoH1xOqYDNKD2pKcY4XIS3QnHWux+mmrOHfoVcO7bQfuIkr6SpN8V6btF2PS8CedpfiJMT9KPK2/gShg3uBZ83DAP+ZrA10NJ+qoeYXOlhXKVPtxinrK8qKgaoiIktLdoeLgQYmKl1zNzNZn2a/X2/s/MhfLyJqfyquwBLFbzQ6zSdmAV9AIrex4UJs2G7JgpkBrTgD7Qg9ZSt0wslRdIlepslUFfpDEYlCqDlitTChPFCkEcXyyOx71+tlRZvUyhrT/QNHnKe209vb5/7++SXq0ccc1Ynriqp3Ea31OWultPtNdaAB5+DKClBcCE2muw3lFF/ER15I86SvvVsu/dTnvildPehmuh2NbLlPS2JMYe/AWUlDB5ShH+A5QMXAcB904Fv5GNEO5fD/6Yi/w85LgmC9dYrNpoqbSpoap6+rX0dBWEh/Kp/mQexMVJ94YEaH93Y/zv9Hq9fbDLuhktWD18XvE8EOYtBn7Ok8BKfgKKJyyCnND5kBU2FTImNUN+oeFdlUGWJJSrBZlZsikJ8bIHcb3624Qo1dMJkxS9FWxpplwjiOQLxfGFpZoVUnXdubaeacp/9Pf5EitXpFAUaQs3iSUYq+ZG+JX0jO2ZjDHPZ3poysd/B5KxqP2w/cC9bxdw796Csf4J1d3eQ7a/lyrTT5UcWbg+yMnviL2G82QbRN/9MAQM7oIoz04I826EwHFV4OemhPg4aQtPpMuWyNq/r7RMxRqI9JZVow+UuE5IcE7owM9TR3rM7grw0T6EJsDzQH8vrYu3q2Ekng8N9tcNCh2v/bfubwvKFjiLCh8FSe4yEOW8ALyUF6BiwvNQHPIM04s27EHIjZ0OxQXVz7Jym7UpAd03EnynQoxvF4T7NECgrwF8vYwQ4GOEqHD92tJySZJUyQ8pKVUuVenrn+uaMev+2/+m/bNMPLFlgTxjXZ6gCH4iuX7FSoBZM5h8I4+9AUr/0yAddRwEg/YBf+A2YN31Ge1fW+EQ77drb+/5yEq/wfT0ngj05xPxayH39UKU83SIC5gCoWPaIGhcDQS5686yWLLMgsLqnaS3r1I1GchaHB6qhcgIrEkn6SA8yIQ11C39bIlPSD/bXXj8GO01tMfQZqB1o0+a0KrwXIemsZkWv2b29lLW+3lqOkM96npC3ZtnC3KW7pbnvgKK3DdBkv4W8CauAlboG1Ac+BIUBS2DkojHoChxDnDyWmpzvZeczXNbCtloKa6L8LVMBbcxRvAYawYvV8oLhuBAw8ZyliiewxMl8SXm5egD55au3y4FUlXTEC63YTo36/o6KdY4q1YDLH4E5wBZN2Ougtz3NIiGHwHRvbtpz2TSQ9auvV13xx7Cjn2ES52WASvhe6o/7UN7/0HIdHoJou99ACaOmA8p/nMgevRUiByNc2FMA0wIMC7lCaqai0rroLFhLpSX10NEqB7CQo0wIdIMSXGVEOhpor0u7GbvN+xo9r7DtvObeLyBdt1uHi7am6EubZA4Zj4kjV4IhcFPoe5vgabgfZDnrANRwsfADv8QWEHv0V7E5WEvQsXEpcDJn/5imefrx0tc8bWiFbq8ChnDl0HE0FkwbqQFfWDGv2mCAI9K2o8hPFj/ttbIDuSK9Z0SVZXb7807nsiglmTv7Cb14rMrUP83AGqkTL9mqdcZqj3pW0zyDelhS3oX23PN7boTK7qlt+0zUDbpLK2BtEkAksFHoAC/P/7ex/C1L4ZU74UwafQ8mDBiBu1rHOnc+GMp7rtKyqzHBIJ26O5aCLlZdRAZaoaY8GqIi7bCxHArZVKTXsbk+K+an3MtpI9cAvkjX6JWNPo1kKWuBl3JOtoLWJH1GdYNWDuEYA0xfh2wQ94DdvQbwEl/+gwr4N3dpFcLi/RrcVsHhaPWQM6QFRB1/3wYN8wKrqMt6G+MlXE4t13nQJBbA+5zFBYOT1ImUVjiHXVnC5m26cKKyVMw51+ePI3pAdhhxrU2BmivQSbuv6C1DNND973bcs3r6IvXbukpXIrxbe+nXOS0BNgJF5i9AuovHXQCv3c1JA96BtJHk88GPg7JoxdB/IgFMGlEL0SPmAKT3Frek+vN6oIiK5jNM9AHj0JaYj1MCK2DuIhGSI5thmDfKtpj2dHI/L/9a66jTX3HsaP06ONZUDDsDXpPpGTEGigd8T7wg9aCoewT0JV+DqqCbaBI2QGCcFzfQrai9p8CJ2ItcOPfvMmNffMA22PLTbbHNiBG7m8Vj1oHuUNexbp6EbgOrYVxo8wQMLYFMse9AxkeyyFx7EMw3qv6OIvHSyhl6bi/F//inIPfiLGuf3Y50zuL6KT0P0v7pdq1v71XeYUt1stsepc69K8mx2Jbj+MCp0ehPOsmyHEfZsD8o7rvW/TNWtqLOANjL90VfTDqKUgc/hj1Qfyw2bS3dU58jbWcU/tJbn4tdLQvhMb6hZAQ3QSTQlsgIbIN0id1Ur1Jr2vHPte3m/NwE7WRQzWQNHwJlA5bB+UjPgbWyA3AHvUZcMdsBmPZRtCX7QB96W7QZH8FsoQvQRj2FfDDvgB+xFbgT1qP+8k3vmP77PqB48X07SL9SEpdd0LpqI2QNfhNSB34Nxh7fwPNP6Pus0K09+uQNn4NZLi9DLHOs25MiFDhHlqXd7v2ypijUXw2XKhrBXjyYQAL6TEVdB6Uw0/QfM9oz/RKJ7rbY72vT7ytfzbps1psM6J7IeZ90kub5J+iPKbvnAnXX+XAb6F04MeQNvQ1yHR+HTJdXoCMMc9CyoiltK92woiH0RfzMDdNOyiUVdaVlNddLSltgQfmLweVbA4kRbRDZlw35CRNof227frajfTath/7zQyZQ1+CiuGYT0ZuAs7IrcAbtQNr6F24L9wCxopdYK44APrCQ0B6EUsmfQXS6L0gjPkSBDE7QJy+9iY3eNcJts9hIMbxPQIVXgcx/vdC8ejPIW/wB5B29wpwHlSPOdECg5xqYNS97RBP+l3447+NWwaRXg3ry9ja3Nv152fcnEzu6cyYwfQB05B+fKOwxrxvL+q+qZ/HgHrb84o9tzhaLu1hvoz2Oib9y/Mx7xAruPtdKCrC9UQJUBWO8X/vSSgZvB7z75s4R1dD1rjXIG3MCsgY9QKkjnwGkkc8SXu8J41YcD07fPJkFq92Z3FJI+i1c+GpJW+CXDQP8pOnA7twJvDKeo/EBnXCuCFWcB5SSfuPkT7nowabHMwCKUNeANbwz6nuRHPOmC9p/2+R71eYd1B77kGorDgOuuxjIE08BNJJX1MfyBK/AFnGZzd5MRtPc4OPAi/sEAgiDgE//DCwAo9AmcdBKBuzA8oGb4Lcu1aB272t4OXM9IMjFuT5Iu09kub5DkwcN/NmVqauxFF7ecL3wzlF8CnpFT61DeMzA3MP5nymX/rnttryNao3ietb+6Uv64txYtn4NaK9Y0/xTMw9+RgfZF0nPc+qAn8FFeb/nJGfQrrzash1+xAyXFZB+rjXIcP5FcgctQLSRj1Le6snj3wC0lwf/ILHtWrY3CYoL+2Ers4n4LllH4BCOP+QXNbzvKmqsU0umrNx/MhO8LyvCVww/sYNqoEx9zE91wcP1EPG4FeAN3QrcIbvRO1R9zF7gT92P/BcvwFV6m6o5B8Ei/DANUPR8YvqtG9BEX8ExHEHQZG4D2Tp20GU/OkveX5bbpAe7KQfOS/yW2r8sGPA8TkOrLF7gD1kF2WHeN/TA96jm1F7He3J6Tx0OkSHbILkwPWQPO4ZCPWq63bUX5B5w5eN+cZoZHrBKcN+oTmfN2An1Z7kGLvWpG+73UhcZ9uM6EyO6bS/+dNUc3sfd9L3Oi/iG1Crmf6iVd7XQDTsOOSO2wRp7h9BjudHkOmxBrJc36E+IH3dyVwg/e3TcG3OGLcEimNndfFE9Wsq2B1QXtwDCxe8Ci+9tHpVVW2toqVrSsechQtXSNmLj4wf2E17vXsNbAH3gegLtJz73gDekG3AHf4FCEbtAT7pozL2MAhcj4EY49fM+QaqJN9cM4s/XGbMPQvK1BOgjD8GkoSDIEvZC4p8XHuj99ws9MV4xJgXTThHjfSEF0VgTR6CPhh7kLJMSF0Yctd88B7Zrz85+vl9AnGkR5r7WxAxrvOYo/7CtMtaUhdaMP71WUD3WJJ799E6h9F+GdU20+kxh5h+jGpMtLVrbe8rn2zrLZ9s6ytPei+XZP8Kk1sAJlfg/PL4Gbhjj6Lmm2nvRto725v0EH8P1+HVdC6QfEQYA2nOz9OeXjk+T90QcFtqecL266zyqSDizv1lxSurHl/+wgvtTe2Tq6b0zl04ZdbsZ0Slj54IHjAd/O/qhoABUyHz7pV0ry0YvBv4w/eBZDTp4XIcpONOg9DlDOjzDpL+8z9WyTe+ZSg+clWXeRYUySdAnnQU5CkHcB+wGeRpXwIX456NOacsCPNS7CWQTfyRsegLIAvHXOF6kt5zFDhthRjUwXdEC+3HyvRlVYCz80KICPmK9umLG/MIuHiK+3pY8HPgFSHGfRXGpjbqBsidTwD3rh003xPtida/17PfbkkOvfvtfe1J//54p5m0r30a1saEAbIA69qWFACd70Uo8T4Emb5bIT3gM8gK3wg5YRsgN/gTyAtcC9mEp+HxNsb9m5BO9jXuL0GW9/OQH7hkr1TdtJrD7QIeaxYY1Yt3r1v/yfznV7zS1do1tba7d+7DU2c9+Do/e9G5aLwmcg+Kc/dm4A7aCaIhe0E84jB9baTvv8z7NGgmHAGr5NANq2rTx7qivZcUaftBmryPak8YAMq8HaDO2kO5B4QFIIw5BayoYyCYdA7kiddAlXCV3msnTAByL4zkDInTbnz9SyBgeGef/tQHA3QQFP417ROYgnEV4GKlkHvCCRCVwCnCyTBhbKpDL4Bk2H5aZ9pZGHaNE20a23WOczgnWsdSm97XX58wBCKdWmnd0z0VYF4dQGM06h91Dsoj90N60HZIDd0GGZE4L7G+zgjcAFkBH0MOxgjxQ5Yf5iS3VZQtkOX5CuR4vHC1LG3uGpG46zKfPQNErEXQWPXcibUff/7U8hdend7ZM6exa/qD8+YtXvK2KvadC+SeFNmnC+7fC9Khh0A26lvKHSBaEYZAJfvgzWbNvi317O+PVGdfA1PqL0D4E/K0PaDJ+RK0RbtAHvcDrgWXqEkSL4IsHnNP/Gn8vhtgSLuJe/nroIr9BeRBZyljQ3HXQYzT5yF4aM+t+uO5m9sKiMV6Ns39A8Io+KYv/1Qw76GQ+5ok94ju20Xre5JziPZEZ0d9Y/tYDbNQ3zmo80za2z6SMg06IQw1D3NqhkCnKkjEGFfiuvLcU0w/+7qJJMedAVbyV3092bJit1MfZIZupkyDfrYDzgW/9yET64Zsj1WQ6Ub88NI3Qn7HlxLRTBCw5oGY9Qx0NL0N77y179TTSz491N25+kLv9LUnFj2+9nAj993T4vt33ST3OqQjUPMx34PG9yzo/c+DPvHklWbVsT1NnF++rcHcaEq9Aqa0H8GY8R3mpMOUg6NJPQ+apKuYj66AKu0qqNOvgDrjZ1BnXqRWXcLwEAyJN2g/ePFo9ME9R3BtfBNCh02z5X+VzXQwbHAnTJiwjzIiEkY+9r2zmyCA6s9l3ksh92WkY8/QmofUOiSvk/gmuhNt7UY0JhaOcR529zQIvaeLWhCue4ED2sHXqR68Ufvg+xdDTgHAI4sZjkA7zoOW8l+hknMWWFn7sX7fBYkRTP/8nLhtlKfgOBfSAj6mfsjyZdaGTI936XwoCH52jVDUfV0imEc5E1L2Cmivfw9WvnoAHn14B3S1b4IZU/ZAc+POa5XydSd0XgeuaYZdAIPnedD5/ADG0B9v1pZfvFRT+OvV6sxfoSrlOhhTLoMp/SKYsr4Fc/l20KO+OoxxY8ZNMOQwDJlKrJ+rS3+FmrLrYC76GSzFTG9v4gNdzGVQeeJeddARKBzwAUwYOgdjXndLb/7776mB4ODPICloK6SOfvFykHM9h+hPOQVicl/yJkhHH6NruJ1VYtee0ZvhVgTT2GYsAGvdgHvJsdHGsKgGr0EWcLmrCdLjT0DHFICN6wB65YzNrPkVapXfQXnBfkhLQv1xb0NYDtQHSdsgjfBN0AepoUz/OjIXSE6y93khfkhzWQOs3HnvyySzQMx5AATli0DOeRNz0VpYuuRrWDB3PzRY94CIv+cXLndVrXFaRW6lcPniuorNX3XJTx1sFPx0qSrz2k0L5g8z0ZhqfwksuaehsvwrMOd9D+YsRnMr1oUN3P7+8H294MU3oZ5zBWrKr0EDfo8F85Aa55VwyFEov2cTxA5daOsL79gbH/cCASshAed90tiVEDG6q8euP43/2GsgHnWQ7nPTcM0lXBhHXgjRneGjMOwMwpLwwnMP9CvRnLBDxg1W4f5bTnkIpK/+J58w/b5J7iEsmQe68TUZzmIN+Q1k3MbTmBSxmzI10ifugFTCFgnfQvkiJCel+a+n/cWIH9I9PyDsnYt81qxjavl8EJU/CtKSV0EpWA1Wy+cwpetrqDFhjclZ3yXXRXiKZcowkXhqamfD3vdnLnhwTWv3zCUmwYYPqrOv3KzGvGPO+BGqcs+Bhb3/RlXJGaix8UdIbBPNaT9+bT+Pg5yTPvzE2iS/Uv1NyTdAH/wDCHGNKR+0HeIHP0F7Effrz/jA1+MR2r812fU9cg/qhbFe3Hvt+msmXgfRqAO0bkjBeofhlbTbNGeYJXbtPWwMC8JzGOVkgMF3aWHEvSrce0poX1VyH+mNVQAvYu7pYjO950kf8lmdGG9qzP+lhyA9/SvKsIiLQ4vp90PKhC/Qf6Qn6Xbaq5P0ryR8k4xA3K+RPmdoGZ7roTDy1cNi9kPXazWvgKrobdCXvA9a9segFO8CieSjbqHZzY0rKfYWSRQBWtkTqV2VF75u71r47ORpc5dbrJ2tRtbXp2ryL0FVyalfqpQrV5jLDp8h2rdw+3Un/c/tfd/JOTmS/v+PTmGOpG86YWLUkXsrEb+AxB1ja9iXkDxkOYy5p/Y2/XUwakg37Zub7PExTBq1YLO/m3E4n82sv8pJv4LU+RDVn9TwYVi/EGYGw0ypoubhwA4h7JQx+DtHol+H4d8ZQdbb4L2UJ7r0OYB3X2U4IjOUTM/3RWit1h9BIT0DJSWHITv7axtHZC/Ex++mc2HShK8gLnIPtaz4nZAUuQuSw9AXIVshLehzyldJ898EGYRF5bENeGkv7DMKX4V65ftgKfsQTCUbMU9vwxy9//Jk9alPZjcdNtXU1gRPa9tWNX/yD0fbOh+a19Te+1BNQ2dvY0fPrIaWBUvrWrsfqheeOVaVx7BFyB6R8GeI3j0OHIJHHFgsxIgPHkL9p+qZnzPE3gBF8BmowL112pCV4Hp/02/ifyAa6fua4oH7sZGLzgU5N48jbEay/yL1jwz3EWUD1tLcP96BV3Mrs4XhthCGCdF/lJMI/O+dQ/spk/fpn3wGYNXLDFOC5J35DQwrobXmChhV54HPPgVFRYRhcpAybdLTGT8kY01E/ECYLmQ+EB8Qvk0S7bf8BaSjHzJDdmLduhMy/HdibbQLcr32XBZnrj7epPwQ2tSfYSxvgcqCL6Gu4CAQ3gdhXLRWrsiZ33N2xeLeq1ebW+f0VNbVttU0Nkxv7Gx/vLH++d3NotOX7PyX2op+5hHxAWEBkBgnvASiN8mlJI9S7TuY10bin8wZcl9XjfsxjttxyBmxBgIx1hn9+9cAwooJ8XgZUjw3wMSRj0H46C5vXgFcIPfe1DgPVGGngXP/Fqx7FlFWEol3won5rQ8M1JwJb2TMGijIYXgii5cAPPO4jedSxhxJv/tazRXQSy9Qlk5x8QkoKGB4Orm5RyhHJjPzG8rVScM9UAru+dMwLxHeFPFFKumZPPFLyIxhjPQUzsR5ljN+P+T5fw2l4w9c1uav/75VsRW6DV9AXdF+qM8/Bo24jrYW34AG8Z7mVtPFfS2681Cl+nx/Y9Nj7zQ2PPtZnXbjmdqK729MlTLMn6lqRn+y3hJNyX6FcHiI/oRRscDByNcoq8HM5Cl7HUTWcylqmOW8HiaMmmtj8ziuwSpwHTQP95dYg454CoLHNqXxMn58lXJ6hMyzmmKXw5Dv9BqN/3G2eO+Pe+bck9SXAx4EVuxZ+gyW0oLXtZDJMSSOWrFWaylj7mNrWZgXKy5AaRFqn38SNSccG4bnY2f6EJ4PwzdiGEeE65ON8yIT50UO+iML50Yu+iQP/VGIRvpaF6OVTvgauFG475+497yh8NOfCOunmX0YGvJOQVP+eWguvAIN5Wc316ku/lIjOQtW0RGolRyh7B9L/kXKvrEzYIgPSL1D9CQslFnVjM2xsUDm2Hggdj4GyU9kDSb1EKmPiN/I79AnX4QcN4zhUU9SHs7tOWgwakdYhgnDnoXx4xqNosK188kaQJ7PIc+oySf9BIIxBynv1A9rnlGUkaWhmofgPjhh1CooizlM38slzx6aG5n71jOamHlItCdMN1UqridZPwMv9zyU5JyBrLRTmGsYllBGxnHM/yeoL/LyvqV8qVvtKJQUHsG982EoxTxVgj4pQZ+U4RwpTTsAZclfQ3nCAeDEHwJB3FHKQlIknzhvLvziu7rCr2/U499rKj4PjUUXwFJ4EuvKs1DNOgu1nG8wxg9cr827fqOdzeRtuw9I3mkVM/qT+Cd53W5Ea1L3ONY+jpwe8jPEV4QZVl16DbJ9v4LUUSuwBjLfFv/MnoDck0kZthzjv2WWnPvkI4KC767YGUXkGRFRGu4HYi6AOOA4cNyPACfoGPDiLgDhdrNtzHIxzr1q1L61jrmvTPhNhOdTlYTax98AUdKPwCLMhLQzkJ18ElITT9g4RowPiC+ycL+Tm3sS58BJ1PwUlJaepMayGaf8BPDReJTxdBx46Bd+wVEQ4rzhZx8DYeYJEGWeBnXWd6DJwr1V9qUr1SUHD1YX7blmxd9Zg/415hyAyrz9YC7efrWm+MQ5cp0dXKY2IHPVzn8i2hPrkN/KgeqQ36q1nQXVaDsndSmZG2R9JnwdsmaX4j433fkdcBtU/xv9yZyIHPw0JA9+HkLGti3RiBYtVpauXYFz4EaF7Xlmko/sz9Lyb+NGcbXMs5/0frWWmXck5psymPe2FJOuAD/+Iu0nXZJ8mnKkMtEIR4pYPO7LkpK+xTz/LfrhJOb+k5hzTqEPTlOuVEXpaaioOAMC3CeLeIxJbabgngEZriFy/HdZ2RlQlqHupefAVPYD7oV+gXr2TejAa23h3Thcyz24t7Jsyx5L6da9taUnjqLu3xPNKdsNdZ7qwJ6ieyvx7zOofk97Ev+UzWdmtCfrA6mJlmON/RCu1ez0Q7hX/wTChk+/hc/G6K8Dv3vmQdr9LxD+8bta4VPPWmtbuNrc02vIc/zkWVmitf05cvuz5Wy1jVkvZ97HImtOUwmjO2FYkXunipgfgR97DirizlAeB8PrOU45VsQIz4uwrFISvrWxzU6iH05hPjqFazj+TMEZ9MF3wKn4DvU/BxLh9yCXnAed/AI1k+IiNYvsIpilP0AVWo3sJ2hQXIFm1Q2aH0juJprMwWMvajQLvzZTzayvJD/b11q7kVxDeG6k3iFm554RjVtsRmqIDv2t+wFidF1oYPYChC9FOEtP4BooKsb57reZ8vEG9t2H09gYWQbM6a2QOug5CBrTtsUgXP5KbW2XYNWqVROkcXvXCzJujXtyJO+bk2cPCcunns0wCOtTcL2Kxror8iqooy6BNPo74JJ7tBOPUxYIYQXloxGGVk7CUchMYozcl7D7gDDF0ghrDdeGNMxThCtWUvAd7o/PA491HmS8i6AW/0D5Ymbtj2DV/wx1xp+h3ngZmkxXoNV4FdpM16DD8ivlYhGuEmFKkXqRsLVIfU5qxAV1DHeMGPGL3exrrN3s625vZb/GdrOvvY5m15/UooTvtHQB5iH0g5z7PaQEf0nfRxo+oMp2D1RHGU3kPsQw3Mcm3ruE6L/fIF7xVq21V/DayjejH1o01U3it2OpIurkVVX8NcrvJPdCyHpak8LEOXn/tir4Mlhwv60L/Q6UYSdBEI65OfowsKIPQnnsN5RTRBhmDHvkMDXCc8u1+YDko4yEb6llJdmZZqex5jmL84Bwzc4BG30gwLpJxrsEWoxxs+YX1B51r0TNrdehq/ZXyiDrbWH2F+T1k9h7ei6z3yO5mNTqv8c7o2zHFpt/Wpg6084/s3PAbuegOdrtfiEcrXltDFuLPDui5v8IibiXzHBfTTnRZA0g+g+26T90QA199ixoTMtxs/TVd+us8/mvvPZqxAMPPuDWtd3pXq7v6xKRx2efyTy+BBXuJ9QuZ8Dodga07qdB63EKlJ4nQepzHIR+R2mvfHbQIagIZThRhJ/GsKLwfNLXlAOTH9/PUcu1+YGsC4SnlolzIDMRtUcf5KaehXz0QVHW91CG9Tur8CLwMLfLeD+BXvIL1Givog+u417Opn0H874Oibkn5vez1kgeJqw1wp1bOpvJDfZ9q33/ROaJna1pr+n/EYOO5B1inTYumz0fkSP5GmGY9VgZ7pmY8xPu5/dChu8GCB8+py/3D77bDEPuMcCwu6vps5eBzs3fmiVvvldb9aDwhRUvhs6dP9/NarUON7YV3c/JmOJSPP4pVqn/K8e4gWuB7/k5CMZuowwWgdsu4Hp+ASyvL21sJobDQthtRSEMt6wEawDCsbPz/Ox+IDnJ7od8rIUIzy0f1wGiP+XaoQ9yU89RH5Ri7VqW9wOwi37Edfdn0Apx3imvoh9uQgvqQjh386cwnFjCunv6IUZ/R94d8QGZE3YfLHbQ3669o+72GJ/qwEK0156OHLxG+W+NcNbIc8qc4p9gIr7u5OBtkOLyN1vc61B7Cwy7Tw+jBpnos8eBzvUnLNJ311UZFpmWPbcsaO7cuW61tbUj9Hr9UJVaM9RYKxhM7pHmx8yVl0Yt28uZ8OZVVtAHUOaxDipwD1fq8Rll5xFeWrEfYYXtoSycEhtLsDiyn6NH5kPBxIOUKWj3AeE/ER4Uw9RDs/mAsM2K076HgvTzUJR5ESpyfgBuAcPXU3CugFZ0DarkN6GB1OVWhrNH/EB4i3QuzGbssd5+9h7JDaQ2eaCjP9Zn2OJ8ajUTv4RX2aHvr/NpXS2/lcfnyOQjzDpihFlHPouiwnpEXHgDY+cHiI09BLFhu+l77p7DG+kcGDqwEkber4fR95vBc0gT+I6r2V8pe29jpfbJuU8984T/zFmzXKn+BuNwrVZLTaXUj6rtFo9pn9kYpmItbCqb+Ld3KyJX/kr94PsRlLmvp34gLIpSnx1Q7PuFzQ/7oDCI8UNRxNd9OYlwHQnDjjANyRpNGG6FlMnFzAPig0JcC+w+IHzDMvRBWeYlKM++BPy8n4FfdBkU5ddAy71OOYzkvWviiy4St6jpNBLLTf1G8jPReppN6y5bHunQ/5YHeDv30K4xYQOS92iJETagnQ9IP3eVxzD0uFk38TqvQF7aBcrGiwzdS58vmDR2Pq69Bhh0txXGDNPT58HcR1jBx6VyU7Vs/SdV2mcfeOTxh7xnzJjhQvQ3Go0jtDo9sZEanW6MyVLlMbmrO2rOvLnJal11LKeoQ1kS/fgXZeErgRPyAX0GtQzr3b75YPeDP86F8TgHQhg/2DmCRegHO0uw0MbRs3MVCVOQ7BsYlt5ZKLXxFUvSLwIrA3MR+oGd+RPlLPJyfgZhwWXKWpSX/kqfkyfPa1cK+7mLtbflCLu2jvrauYt2fe38RUeNidmZhXZeIa0V0xlWITf1BpSnXaZcxrSk7yAm5ihlEyYFfQ4Z454Hz1HV9N6D60j7c6pm8HE1vVKp3Pheje6lh+Y9ONOze8qUcdU11pF6or9eT/LQKJwLzpXVVq/unikTeufMTdUZTeFKjT7V0CBNKc2Y3FEY/NS3rLDXfy31ehdKXNZBufsGKMO1guQkyqfCvbh9bSB+IPw+skaTezekViKstjKcC2VxR6kPmH3Dt5QBbPeBnWlI/FCWepHyFwnjsSL9EuU8sjJwTmRcBUEmw3q0cx7lBYzZ9XPUkhjhWkptfMvbWZD2I+FBEuOm9uvMvY0LSfiJFek/0WsqxOvMiD8NEyYcghB8vQnhu+h7p9HOM2n+HzvYAh7jdPQzAn5uxoeqVJter9K+8XjP9C73jo4O56qqqhE6g2EkiX3Uf7TBaBpbWV3tPXnKlOjZc+Ym643mUKVan6rSGfI1FlWGRG4oLo2ft6Q0bNm5Io83oMzlfcrRJM8FUz947aJMUTIXivy/pjxNMhfKMCf18SRtc4GwFMvjjtL5QNiBpYkMT7FvLqT0MxXtrE3GH5eAi6+f6MBJvQLctKvUCHuTWtr135hdR0ZL5shOuU517df2KrCSr9zKqLTxKctsfErKyrYxKknOJPV0Ml5/DL6u8VgTkjWAvH+X6vwM+I+uh/txP+DjrqOfz/DzNNTUaD5/tlq75rnWtnb3hsam0UaTeTjqPlKj1Y3U6o2jNQbWKHMtz7Wza3LErDmzEwwmS4hCo0sj+ss1+lw8z1MZdKUS4eTF7JS/XS90W0H5leU4F8pcPwWWx2Yo99xO/UDmgp31a+f2kZzkyPV0ZFraWY52rmWRzRcMq5LxQ0nyedvr/8HG7fyJGsdm7Nv4n/0c0P5z+/fRn7exP/v5nwwDtAL/DvO3vqdxQIyZm6cZFi2uXYQrSfabKTino6MZNijRPzlkE2S6vUGegcaYNxPd6Wdj/L30E2r0Wx626j9cVV3d4VldXU3ifjjWPiPUavVImSZ/eFvN6sqO+o+gt2ftqTmzX2CZLOZAzD/phJtJ2KEKta5AodWVq9XTVitLP7pUEbwLipzfglI0wlJluWykc4E8K0+Zed67qR/KyFwg/ERbTqqIPtA3F8pjmblAchLxQ3nc8T4/lCac6mOMUl4o4YfafEE0+qNWcoue39t8eY6ZY3bOKv0bjvr28ztp7ZZE9pWHUfNDWE8foGzS9IS9kDLpK4gO3wsRIbtR/530eQLKCBr7NwhzbQd/dwsEeJpOBnmZnav12+bUGdZ/ajZO8SGcWqVSOVQmUwyVKMqH1pvfFTSa37/ZYH18UVf3vKoF81c+0NAwJVSu1qZg/BegD/JwDhTLtcoKo2nuIWHexu/EKQeBE7QTSkZ9DGXO71CGZ8W4DcBx3QQst21Yt2I+8vmSMiwpzzTgQB/X1c40tc8F4ocSG9uU+IBYcVy/H+y+KE0408e/Labs11v/m1hR3/w51acpWeuJ2dmohTbeax8X1cZGZfaPDEM0x4GPmmXjo9otM+5LyhCdFPEF1X1ixHZIiNhK+YtpAesosyVp3CKIdOmGCI+mmaG+VQOsxh21DZZP9xt1swIVCsVwsUQ6WCJWDtHLl46zaj46btEvbq5prM9tau8omT1vgXjajMdZUqUyAfUvUmkNhQqdtlSpbKo1mRdeKE/86rIk9RDwI/cA1xXje/inUD5mTR9Hlev6OVS4bqU8WcIRpQxN330MT9XGEmWFfUPnAvED5aqiDwhftgLrOeIHMieK0A/EGPbwCRuL+ffNrqejpvm2e1NkT27X1q6vnc+abeOz5sbt6WO0Ztg4tJl9nNbd9D1q8qwAYXsy9jnaJkgO/6yP8Un4nmmBH0Cm9+uQ4fY3SHSfty/c00T3VrXmbZzGys3XDbqF4SKJZDiLlzpIwNcPtmo/W25SrnzNaKnOIsxaa0NT/ozZc9lz5j8gwLjPUOuMJeiDErlWVWoyzXpNWv7eyXLccwvx9XGi9oEA8wp/LMb4CMz/oz8AtvNHfSxb6gP3nb/xgX0u2H1A2LZMXrLPhYO2dfpQ3x6ikPKwD9n21rdan6ZolOubsJ+anX9LjOhL3lcjlon6ZsUSrTGOY7+4hfdLjD6PYWf+OuhLmKJ2jirhxhI+LmFtEn4s/fxLwNs3Mnxfv5Tptfxshsfjfc+fE05urXlLRH3lDrBWLmUPHTr0Lr3sedcq5a63zYr3NuD6m4X1ZoahsiqrurYhb+qMWeyHHlms0BmrytV6YwXGf7lCaxJVVz9yAPdVv5Tg9Qtx7efZOMJ8zC+8cV9AxcidwB39EQic1zMs3XFbcS7YffAF/RwPyUcs368pX5mwhQljmYU5qSzC7oP+vbTd8ifuxz11v+Wg5cX160vyA9GX8HftOYKwlHNsHF76fjJqbH+Pn2Hxbu/j8TpyalNCP0Fd1/exTIm2mcHv4/E9yBr/LmSMfwPSA1+/kRnw6vHMgJe3ZQcv/zTT96WnMn1fnJbl/Zw+3XtJSrJP931EdwJ5mOjH9EPB/O9ZadgNdaaPTlfrNiyyKHedMSpeeUytqyzUGoy5OqOlQG+uLDZXWQvbJ3cVPfjwIq6xsrIC45/0muBaKnvmSNnvHyMxWIqvjx13gDJ8RZg/xKidEHXlu+4EjvMGyjIWkKPz530+IDyzCpsPyFyo8NnPzAX0Qwn6gazP5J5SAdZKheH7KR83H41wjonlxeztt9ivqGWjZcbsoRrfyjveTZ95ZFjA22/jAW+ieZronBT8CdXZzsklWmeErIa88LchN+pNyI14BXKCVxzNCnr+lZzxz07LDn5SmBv+SGR+zOzAgviewOIsazCfW5ky3Onzf8oHqTauG1Wl37LLoPrqqlm5+aRG8ViTSq/MU2rNeUqNNU9jtOSaqzt4tU2d5q5ps6Y+uGjJkurqBY8rNdVKld4krzQ//m556r6bJTjXCUO4YAKZAyeAj3tcUfRhkAQdAqE3+gJ9IEbdRWM208+72X3Ac91OfcC9xQfM2sywlfdRtnIB7qOJ5eOenlhOeL9l4npDjDwbQczOlibG6Hx7TG9yyB23M4k/pDkjc/zbkBW86qecsNfO5oW/cDgvaslruWGPVRXE9MYRRgkx2VSne8W1zvezpEmDy1ilwwiflvTx4guE4yVKg/c/056MGuOz91mUa6bqxKuWyYXztTr1gsq61m6rjvficovpwVfr26dvmDJr7jud02at6p45953eBQ+/W1/38Cd69ZO91ZYlbyoF73xXkvj1TTQojCN6YG0z6Wug/chijoMi4hDI/I+AxOMASFz2gGTMjj6z308l84Nyrck9VVwTSI3K3FfdAwVoRQF70AeM5QTvgVys6zLRskIxfwTjehi6ixp5Rig9bDukhW6jOjPsckbrfv7zBhvDvF/vlIA1kIPxnR/5BuSGv7oPY/v5vODljflhz+TmBDzsrmuJ85CYEn2Uzf5DzXXc4XwhfzjqTHjQ1MrKK6gRPi6LzRknVqhj5BrjvXyJ6o+4wKmp5ZEGa82SA/V1Tx2ob1z4Uef0Oa8r8tZcrmub/GzL5Bnbe3rnvNndO/ftrhlz3mmbOuOV+qY5WwzyFafrKl8+V5ayC/Vn+NlZGIMF0biehXxF6xfphJMgizwO6tCjIPc8BnLXb0A+dg9Ix3xJTeS8m/KUyRrBd/mScrVJfUr2auTeBdk3l/h/AQVohFFLGNvZWNtSC95BGdvpwdtstoXuc5JDPrXZBhtL/ZM+BnU6YVDbuNupgQyfNnv8SsiPXvFNSU7veXZZUwu7rDrLw+nYALs26mr23RKZIobD5XnjvmiMRqMZzeXzGR41i+FREyY3ZTNzOM5sDtddIteEaQw1A/+R5o4DtZ3Y0NG9taG1d+vk3hnv1BiWfakRPba3afLkF5q7Z36Fcf/h1NkPfDxl1vyN+L0b26fOXG/Uzn+5PG3H6iJc65hncnANxRzEwbqahXsmLq6/wqhDoIg6DZqwY6D2PwZKN4ZhK3f+CmRj9oDMpj+fcL0p19TG9bb5oJSyvXdCvt8O9MFOyAnYAbnjt+F6hxa0FY15NpSxjTat+/VOsdUh/QzsdykDG2uRm7guns8a/9TK4oyOTLHJcxxXbNzNFRpfEoglRUKJ1MuujUShul8kkyXx+AJXq9XqgjX6aNT4Fh44McKHxq+78IVif4nK4PeP9P690dbdW9PaM/P7linTvtQIn9pmrGx4ora1a31z1/RtaPs7p8/a3zVzzhbMQ8tbpvRU8hM/ep/cSy6YxDwTVY77Qy7uR7m4N+JPOoH6H6AccWXUOdCEfgsa/xOgdvsW5C6HQen8DSic9/XxxKkPXPr1J/critFKvLdCoc8Wanl+jGUHbIaMwE30eVzCV7fXJP3xve42zddQ3jrh0RIOc5b382uygh5tLsszJkib3cdyBKU+LA7Ljycyvcbhm84JJUoJ+iBbKJEPI7pIFerxfJE4ymg2u9bV1bkKxWKSY0aSXIM2ksQ9Mcqp5vLdBCJZmEhW5fKHde9h2p1WW2cPQG3TJ0+fvRpzzp6G9u736tu6XkKb0TZlhnnyjNkTMe5dWrqn0esq9/60tiR6L+Z8XDMx5itw787BfaZdf/6EI7SO1EScBW3Iacpy13qeBKXLccryVY49AIox+0GM+cg+Bxz1L/HaRt9TIHzzAt9PIQct128j5gysTQJu5bs7GtnnEM3tuhPueZbPq5DmvQywBpRneiwdRK5fIBLFol7jCf+dzWX7ofZPVfDMwBPpejH+y0iP8O6pvQOkSnW+UCT1be/o9DYajeO4PP4ozDOj7Dx025o7Gr/uzuHxPARSXcq/Gvtk/BFWvOPguL3vVYx5uAzrzQrcz3OSvgNewhkQTDpJOeqiqCMgwNpHEXYCdKFnQBd4EnTeJylLW0NY0s6H0Af7+5jyjvqXeTDa2/nqOVT/DZSzzTDmP6Ks9dSAD6kRzYn1x/tqW54hDGbCZn36Zprnwhpy3Qnjme2PRKoYzBVIk4gPSA/qCo6axeaboYJnPCpRqiTog1KVxlAkFEsnVlutvp2dnT5SmdwZv3cM6j+a6m/zAeGzY83jzROJgoWyysB/R/9/ZfBiPqfHUr/P9pF6vALrTaI9YdhzY/GcfE4Q6x7yOX11+Bn6nIQu4BQYfE6Bxu1UH89ainlIgj74jf42tjvlm/t8hvG/gervqP0tnHs7893PIeZ9X6PM+3SvpyDZ84FNjtfP4vDokSvQB6EPEjg8bhCbJ45kC4xPsPimszyRYZHOVGVVaU385tbWuBkzeidh7vdAjTHHcJ3RxqLuY1gsDvGFM+Z8L75Q6CuSadL+09o7jlKfD/lkDpD7M2yMe17sSRBMOAbCSOaZCDnWPcbwC6ANOgf6wNNUf/IsBeGp2+cAYar/Pf3zbfrn+XyC+ttjf+1vdLfHfKYDcz7d+0XKXce4h3ivbpoTYv372eFlLKkTl2+9iy/RZ3IEoigyBzh8USSHL0tn8SQp5tqWyc889+Inbe0duV3dPRm4D83GuscN54sLWWdJvifac3gCN7FcPl4oVsYIpWZ36l/Bf573nD3+Cacyj3UjSnw/3Ufu43NijlLjovbckEMgDjsCusgfwBB2HoyoP4l/o1+//tqxzDrA6L8PBLg36M//W2nsM/G/kTLuGf1xbfV7n/azcIx5wv2+RXuvFZDp9Sxq/wQkeM48SK43yq/qd1+HWFw3kC8xVvKEsknog0CeQBCIa6xfW2fvzKraLujo7j1uqanXSOSqBLlKWySSyIKwFvXAOUD9IBBJfMRSeQjut6JUhtqB9l5i/61R7rnWWur/xa+l4w/QezYk7ygiz4Ax+meM/R9AF3wBDEG4BvvjGux9mj5DRPRXOOgvcNC/zEH/PO9PUXtG/3TUnmhO9E/3f4+xPuY8o326z0om35O84/k0jf0Er2l/t3Eum6+iR7mmbqBIbrFIVdUdYrm5GPNPEYtrWi+QVAH+G1hqW0+ZqpsXy1SaTJlckYQ+CEYfkL757gKhyA/XiFCFpuZfrjn/rMHy2viVNOIYyCacA23Mj2CI+gX0ET+BIfQHMAWdB33Ad2Cy6W+Pf7v+QqK/y17g2Op/kntu1f8TXHeZz6H26f4b7d9C7d9wyDvLaOwnec47Guc1bew/unauiGE9SJXVd6sN1hKVoX6TVF33o1hpBWISZQ1oTI1QVd9+QaGt3ydTGVS4J0jmCUSBDD+C6y9XV6b+V4S+bUz0ZXp7J7g1jeN4br1KnkXURV4AXcQlMIZdAjPGvnH892AOOAMG39Og8zgDqrE2/ccd649/1J/tupveByK1T5Hn51T7PG+m5klH7dN8f097Rv90n1V9sZ/ptZzm/RTPRyHRc84sn7C4f3oPjIyaVoaIKxRXxpmsrZ9oTQ3HUe9fZepaUGjqQW1oAPxvEMqtIFfWdQgkuniRWBaOcV+m0vfc959T+R+PVH/aysAp3/W5VL7bjgsqUucHfwfmkHNUe3vsE/01bnb9T4B8LLMXFrrsp7mHxH8ZrT232PTfYNOfxP4HVH/qg9u0d4z9DK+XIQ3zPsk9qR4PXZjkNeUP3QOzD4GEyd0VHP0IzOUNGgPGO+ovVVlBJK8BgbQKsL68IpZOPsfjdL6g0jWaNYYe2k9Povz99eW/MdL8F9BjtvvThXyXz26Qfa4G11qiu117rfvZ3+SePv1dmdqHxD6j/yaqfZb3epp3HLV31D/V9y1b3n8DtX+V5p4MzD2kn1yi56x/q0EuT6ylx7wS7QCxtHK0UGbGpbnyA6GsGqhJK/GIeUlRP09vmkx7eSq0Df/wd/43RqIfxSU4RXk1RVWMXXOe1PXasd9SzUnc92v/bX/s23MPvfewg2pfiNrnem1k9Kefe//w97X3edcW+6sgldY8L9Pck+axFJI8Fswn1xLj96/33XccbL6s79xibfU3VrfkGKta+va3fKneSSg1/o/+xp85JvpSXIJToKfBJc/5xaWsMR+DfMxeUDsfAbnzMRr3jPb966499sl786Uem5nYR/1J7BPtSd+B39OexH6691u22F+JOeclSPf4G+b9RS+FuzffhT74Q3n/nw02l+Nkqf/tvQHTH+gl/1eMcB+GWRDsabo73n1mfObYv+3njlwP4lG7qC/Ezvvonpe8N0z3XI6xj/oXenyG8f9JX8+HVJ811G7R3q4/xn4aeU8bY59on+y58KmJbtV3uHo4Anz7y+4o93ZtzphnD5WOWvMLezR5H3ILcMZuo+9/lbtsgRK3zdSI9jmeGyDHC2t9LxL7a/r0Z8wx77xB806m54sk55zFeKd9ueP8pztF+v3n95//143igXfHuHWXpY1dvChr7LO7sseugPyxb0PhuPegwGUdFLqtg1z3DyCL9NfwXI255Z0+y/B5mxrJN2neb9K1lvQ1Rd0/w7W2Gddaer831q/tn13F/5fD37efnzM0MXoA+mFknOusgInu0yvjXRa8leK66KcU16ch0/U5SHd7ATLcX6R9x9I8cE1Fo//N6I321JZUjyfbUPfAVLeHh9l/71tl8Je8tv8bx93+w3/7xcR7BkZ418bEuvfwEjx6TQkesy0Jrg+YE9zmGuLdekuiPBt/s7+P8fv32E53xr8/gn0Vf/Ul3Bl3xp1xZ9wZd8adcWfcGXfG/7UD/qvjSHf/+Yh1fadX7+v/8mWHS/rIqf/7e/Cf+n70Yvd1+3mG04irttNfO53upuc37oIbV5ycrpDzn3rg5hEnp1bym46+1X39I9zqZeD5lCsZ0IPnPvhzd/2ScRm1cPWhP+rzC54PHUHOpy29Qc7xkq7c5bTjNJ7fg5J9NMDJir/SyfaQaMZFh3P89U62G68+t5879dDzEY7n9JDZ03+eRf//Pvr/fsw58yvoT9nuLfo6nDv9nfO7evr//N0Ol3Lfhv7zuxy+7jTL4dzhZ5lvdPoDI9PhXOVwXutw3ulw7ghC6P6o/5wKaj+/4XAOjuc9fdeWAR/1ny91OI/uP1/b/z13f9D/syPW9J93T+v7/d2w4LjtFGNiif0HaUj2fbnvfAeJ7Hvo6X3k/CLzVqrPZzQomcs95XB+nEYr84d+dTi/QaOdXg/c8Ok/vzgC7H8NrtCpcoS8JIxbcn6RubIp5PyK7SrpJCCXY58l5KX2zyqnDPv5RccM4OQwDf9bg4Bp6au8i/zP1+nOuDPujDvjzrgz7ow74864M+6MO+POuDPujDvjzrgz7ow74864M/5TQ6GpcdGZGiP1po5MvWFKhkKrC2JzuB58odiDLxKPFCilf/Ul3hl/4tAbJvt1TJlf19wx/fHGjp7Xals6l9Q2tT5Y1dA8q6q+caalpn5udX3T/Oq6plZjpZUtksjSxXJlIvksrkylGUp+B4vN/atfxp3xL46GjmnVTe3TVrX1zHitqr55ZmVdUw/aZPR1e1VdYwtas7mmrslYZW1GazFWW9ss1vqZDe3dlRKFKloiV8ViPkjAOEjEc0/yOwvyC//ql3Vn/IMh09UMxjneVtfavQH9PFdralgqVdR9LJLVfiJR1L6iNVc36SxVVXpLdfVtZjVU1tSjtde3dQrFMmWIRKYgFsYTiNAE0UKJNAXjgvZAEEjurBH/W4appp0+v2C2tk3Rm1tOK7S1z+pMNZWGqiqJQFL7OptnBY6gHoSSJpAr26Cypvvjlo7ulrbObktLZ7e1sa0Ts0CLFfNBnamylod1gK9IqvDHeU8sgMsX+pPPo+MxmMcXTBBL5QlyrX7QX/267wxmmGqbI1WG1u+UuobnLNZaBc5llbGykiOW1T0hlrWA3tAN1rpeaGyaBbX1s6CqZjqYLVPAZOn6qq6p+4HpM2eap8zobZzWO7u7ta09otpaF6JSa/xw7nuT3jMcLo8xnsCHyxMG4DFEIJIkYi6gn/+u4P53e3LcGf1DqW/plMhbQaJo3FZZV803VldWyNXWdqW6bafJ0oO+ngp6/WQQipqgvKIWiktroKDECkWlVnpeXFYLFRzr91KV9ZmaxobKKmt9bkNjUwTGQai1riFQrlB64rx3Y3O5pDeOOx49MRZ8MAbG49cnCKXqBHIdpRzNXy3F/zdDpmY+AyxTNa8RSiYDT9QMbH4DsLhMjtfqO0CNxhc2QEmF9ZfCEsubJeXmGRUco0gkVU3SGcX++kqhn8EsDlBo1DF8sVnA5Vd388Q1SyzW1rk6c1WuUqMPwzwQ3NjUHGiyWLxxvrvaevK4kh6QeE5ygh+bywmu4Brr+NKGIYVldz679J8eEqWVHsXylu1cYRP6vQ4q2NXA5lhBJGkEqbzpMl9gfaWCbW7gCVWxWiPXQyqXh+F+bgIxUtOLZeo4PMZLFep4hVqTIFdrk+UqbRrhrsg06gSFRpes1OqT8GsTlGptMMaBv7WuzkcmV7iiz8exOBw0HokDD8wJvlxhiVdxmWElW2Qeklt0py78Tw/0/UaukMz3WihjVQOHX3NaIq+dI1eZ00XyknFiqXQ81mihaOFimTxSolBE6ZryAspFnMTc+HpLTuS0uXnhsxdnRcx8JjW84+X46Mq/xcTKH87Kkmr1FkGMTCFPw9jIJhwMlc6QgOt8mE5v8qutr/dRa7VuXB5/LIvNIeaCceCOceDN4ZX7ZOfrvmOxLcNzCiR/tUT/zw70/WNcQQOUcaqhnF11UCw1S3QGmTvWauOxVicWLJLKQkQSRbi+M9W7IEPPKg596vmygDXn+b7bab9u0jNaHnYE5IFM32ih6+dQ5LISEj1nQkiQCaIiNOtTUxWtWoskDveBOQq1LhVjIEqm0vjV1tX76gwGEgPOpD8e6ceM5oZx4J2aqurMylWfJY/a/r+aB0zVlr/k73KElcT3RRxBI53zbL6501Kp9MQ8Hkj2aUKxNAAtCM9DpJ1BXkVR89orAteelUYdAmX8L5Q1JSsBUFUAKLnMUVEEIE8DUE/6CeQhJ0A45kvIH/YqhLm3QvB4I0SEaC4lJUln6Yxycv8nU6bUxOAa4GetrfMxmEzuXD7fuYLNJkZygZtEWeIcH6+C7FwV7QkoFFv/Eq3+U0OltwwWytRRSl3dA6aa1k8sda3b9JaGM9amjgNVDc0vW5va2q3NbfRz9JbaP69/jEBa46Q3Tx7Ixxq+glN9TSQ1FCpV6vFSudIf6zJfND+0QHmPr0tZ1MImftD2y8TnlE9GOL9chj1O2ctCxsg52/ZvWiHDJ1PGXgKJ6xEoG7oagkZ3wnh/EwQHGiAsVH45I1PcKlNKk6VKdSz+XRoDeqPRjfSLrWCxx5AYmPNg2KCJk7S/xCVIMQY0a/40Af6CYaphel5ZG1vubezoUdS3dS2prGt6pqapfT36+jW13jhNpa/dUdnQ9rRcbZ6Jfn+lurHtDYyBL2qaWpc2tE8Z82dej0DavJLFrbomURqysF4Lwnnoi3tzsj/3EckE3qVp1jR+0Nbj8qSrdH5Tn4v7uebE7MztW0zCfB9hQmvKAKSxmCtcjkLJkLfBZ0QDBPpaIDBAD6HjlTBhgmitXCNJxNogFvOBH+4PfXR6g6s9Bsh1TozVwaRYDcTFSSGnUNv5z17X/7ah1Bro0VDTXIS+noG+fhLrYbOltnmNsbplj1yj78b1sEVrqpwjUVbt4ouV0sr6tu34b5+1TZnxWce03m3dM+fuxng5iDGR8T+9Hp642kmkaJ/E5ltBKDWycA0er1CqfPhCoSePL/IsaXcaxQ5Z/Zg07jvqdzrP5QzHnq9njoStTuNA2j//7Ub8T4yck5ghjFN19A8gGrMXMge9CB4ja8Hfx4y5wIBxoISwMNEhuUpC7gOTtcC3tr7RR63RuuB+cHRGhth3YowWYtEmTJBAQpIUCksNibkFuj+lV9d/Y+hN1R5ac3WV3mw2GivNfI3eoDJYarsNVbXPm6obl0uUmmpcB6tkKstsqdr6hkxlqpGq9O1qQ8MBQ2VNL87/5zEGNmEM7MTjt209M8r/p9fEEzfu5QrNC2QKRaBGZyDz3o3LlbqX5HHHC8P2HSZ5nviO+FeI/pZX4Tw2MudcKfNvZH5r8dwoZRjCWi7haGPOFzIxY48H8n1KrBXUQSeBM3gTRN+/AJxHYg7wM6PpIdBfAeGhwkMqvTQB80AkrkM+9Q2N3ng+Due+cUI0+j5Kg/5XQHS0EJJTFcAVWQfnFP7vrQflGqaflt5cXagxWUxsnnFFUZn2ZG6BCorLdJst1vpHLHXNK3AvJZVrzA3of71cbayRaxpWoC8miWV6HeaFJpXG0ID7pda2Kb2bm7umb2jtmb6hc9qcQ5baznhDVc2/fF1k7gskrdlsQfVZtVaK663ZTyqTu3H4IhdO9OOp0rgzdH0n+ZuPc1xbj1aD/sd5z7fldD0eCTdWlwcgmXQDxBN+AknERZCHfw/iiPP0v6UpTJ1Ac4CQiQ917BUQj/4Gcge+DB5DG8F9nAUCsB7w9dJiLpBBRJhgs8ogm0T3hgYDjYH0dFNvLPo/Gv0fhRYZKYSoaNGv6VmqvX+2z/6sIVWo6NFUVaPkCE0vFZcaoKCoEnJzLZCVpYecPDVojJYZ6P+lWF+XYZyL0NdqtcFcKZSos/Br8ZiTJUqtvgHDolJtsL6tMVQ9R9aNyTNmr23s7NnYgeuCk1Phv5UDuaK6zQKxRYhrTqDBZPHiSgtGc8LfFUpSLtN5zVUyPq/rQL/hvBfLmTlNuM2ERyuNv8H4O/gUyHxOgWDcSazzjwJ/1GEQDT8E4hGHQTjqW1CEX+iLAXsOUAaeBPZ9H0PofbNh1DAzeHkw64CvpwYCfCUQEyN8RKpUJmI9GGyutvoqFe2zUlKMEBOthuhIYmKIiBDh94khK08768/0258xFj/F9EKVa6pnsLiWa4VFtVBa2ArFOW2Qm9YEmak1kJqmAp7QsKqyruVpgVhSgnVPOfoaHa7KJu+D4XpQTua8Sl81X6G1PIi1goXEAsZHD+b+Dxs6uj/GGNiMxxn/6vVJVR0uPFHNN1KFyKemtt5PYc0YzQ/ZpCMcYZKzxTjPW6YATJ6GvsZ8Lxfbcjv6XTHpGshCvwdpAONz4m/esL0guH8PcAftBPa924E3cAvw7tlKTTBwB4iDzvXtCQjvXIlxIxjyJSTfswSch1SDu7OF7gn8PHWYB5Tg6y3COSJUYz5MFMuUgVbrNJ1U1gaJ8Tj3IxQQGS6EcDQSA6QezC8yRecW/u+qBTjCqtdKy+qhvLgTWPnToSx7JhSmTIOc+C5In9gEKXFGyM7T3MCa/ikuX5iH/i+Tq7QV+HoTcN5ztMbKmQqNySqQSLJxD5bBF4lTcU1IEsuVxTWNzXNUusafsSZ4F2vDw2Zr27jcYsMfvjaeqPZhoaxSimt+QEO7xUMY8YWarPVkjqpxrs9+CGDabDxXM3mesL2lcRgHYedBgn6XjD1F5zd3yF7qc+Jvzt2boeKujVDutB5KndYCy8G4Tu+BoIiJLbInlE+8DhKMm5wBr4HHoHZwHVoLAR5WCBtfCT7uGvDxlIGfj/CyVCVJw/wUyxdxXKqqpgOXUwuxE0RkjcB6UUBjIDyC1gI/k9c11lXwz176f2Ww2M2PcMq6gV04Bzj5DwI362EoT30QCifNg+zIXsgI74a02GZISTCS/tRb0a8FuP8tJ/fOVXqTSq4xGvhicY5IIskSSiXpGAPE99T/aIlsLn9iGVu/kSeuuVTT0PxOe8/MfykH4s/tliuF3i2tbf6KSZ/zJEXMWl/dAvD4MwC9c9BPSmbOk/VdHv0zyINOg9DlDIhHHgER8ft9u6jf2QM29fm8xOkDqEBflzuthjKnd2411xPU/2QNUWFdIB17BgoGrIbggbPBa0gLjB/bBJGB9eDvWgN+7pgHPCUQMl64Tm1QJSpUihCRqPmDuvpeKCo00PkfGsqnR5IDiCWnaD7z9dCqfNyMfT3VfX0r/nzn/iNdSx+4i8vuzuGW9AK/4GEQ5j0JguyngZO6BErjHofCyEcgP3QBZIXMgsyIHsiMa4HsbMNWuVJZiDV4Il+sNqSnSacmxssejZ2geD4qUr46OkK1KjJC81JCnLKLIxAliBWCaK5QHCeVC8PzipQreeKqK82Te/b80WuUKBrj2XzzNJncEFhVvrqQ8OaJ75u6AZa/jL7vxTVeydR2iiTAXI/rt98pEI3GXD9sP/AH7Qb+wG3AGfD5b/xO/Fzq9Bb+95sO9jq10gGf0hpQbIspJeYR9j0bIPauxRAwqBOCRkyGSO92CPNuhMBxGAOuBvDxkEJikqhWrtKnFhabKjXqyVBTMx3S0rQQHibD/YIac4AS60ExTJwoh4hQI/hjHYl2LsBH+zRafZCfNjU6QnaPowZBQX9+XLCk9ZTtyCtceFOUvwREuctAlLMceOnPAzv+eSiLWAZFIU9DftBjkB+yEHIj5kDuxClQmFvzsUwjTyjJapqR4N8Jif5dMNGnG6J9WiDUvwaCAoxYE2Ft5GWkNVJkhO5zoZgfzRWIJ4plooj8QsWzYkXlsfapM0tU+n/e05strH5MqZH516imhorSrvf5ftVqgIcw71dhzjeUoX/iboIC122xG875EUdBeP9+4Ny7E7h3b6F5vszpI8zt/X7/e74vdnqNWqHTMuCj74npSoCyffj3bIck/HrwfdMgZNB0iHabAnGBXRA6pg1CnOthvKsJAt1VP+vM8iSDRROaX1ADdfWz6XMHCZN0jP/DSE2AFiWHlEQDeHvowcdND74ejJGagliAN5qPdjP+22LMEd3jfTVGjI1CtCB/b9X/+Jmju0ekOYuKHn9Tmvvcr/K8l0GSsxLE6a+DIGElVES9BqyQl6Bo/HIoHL8UCkMXQ0HUg1AQPwvKimt7y1N7WrM8HwVime6PQqrbwxDvNRsi3CZjbWQCj7FG8BxnwtdlBD8346/BgYZTWLtF8QSiiSIxP7KgWLuTPGz1R66TJ6paoexycpGn/HCC5GNrG8D7awGWLgWowX29mdynib0G6sCzIHU+C8Jhh2ltR+o4ssaXO22gazrxOzHic7v9Pd8XOb2Cx6eBW2LzP9YTaswront3Q47TixA+cC5E4F4gZvQsSAmeAZFjuiFsdDuNgSDcH0YFaF/QGLSJxSW120tK66Cr60EQi1vofjA8VIv1gA6iIwxYG+ggJb4K3MfqwZvkD7f+o2NM2OPi9hjx96LnN/F4Ho/H0L5C+9LB9qIdxX//Dvcq18j3h3k0QLgb1qaB00GR/woo898EVd67IM9aA6Lk94AX8y5wwnB+BGEeDEQdgpdDacTTUBr7KJSlzb4uKm5nF3pgXLi9AAWuz0O2y3LIdlsKiSMXQejwqeA+2gJuY8wYA2Z8LbhPdjeQa/41NNBwVqrihGDtOBFrgjSRvGr3P/O9TNM4gic2TZMl7H2U1OKk1nv/I4A3VgE0mjHvk/o+6hqoAs6AZMwZ4A/9BnjoIzJPyTp/+5z/fd+//hvfEyPzn5V+g/qf1JPqkEsgGbgf8vF7J96zEGIHPQhxox+A1MB5MGH0TIgc1Q3hozsxDpr/D3HvAR9VtX2Px/dUenqFkEo6PSG9914mmSSTSU9m0jtJSCghtNCbXRDb08dTERsqCqJIkd577x1EQASR9d/73JkwIPptn8/vn89nfe5MJmXmrrXX3vuce86Fs2k1EhLyUjIya5ujY2uRmzsOnZNeQGJCHYZ5EAfk+0Pcy0kDlYL/wa6VsLZQUcyopaMOWA9PPv+fYIBFCewtajDKZBaCjV8VCNR/HbmRxH3k1yiOXoXC6O+QE7wWilE/QDb4e6S5fItkp6+Q7PwZkt0+RNKwd5HmswgZSeOnJ/b/7H6CJZ0/QrzF54i3/ARhBv9GQN/F8Og7BVaG1bAkDXCPZEPx70C9sqtVE+ysKuDuolqjLEhzIR/wTs8u++zvuE9XqPVk8trmvLBFsTnk73Lq5z9cDvy4DuhslTgpGH4fBQ4XBfeKPkcE9xn/3IYM8vu0p3D/tDyv5V7Lu8T9B4L/5MDfBP9cW5S434byuWP0+pfwfvZ1ePZ4BX7mLyLIdgG8TWdjhEEXhhlOJEzAUKNWirG6HfVtCvfE5PprEZE1aGycifHj5iMsuBZD3Sow1L0SwwdXw3NIHcL8R2Mg80z89zeVjk/Dk6/x87+CteboZ/QCooifKMP3EW24lPABxffHKIxYjdLE71AavxZFMT8iL/AnZA3bBLn7OqQ507lzXkU54EukDPkUslH/RmbUC0fTbL7Znmq15mGq5VqkWH5P+A7xJisR1fdjBPZ5Gx69p8O8Xx3MDSX+BxL/Qyzb4WUxA8MtOqlOroS3d35RWkaWZ0JKQXt9yxjrv9NAZrGyMSfg7kH2/a45wKatlPOpz6uh54UjfifuL0NhfFFwn/38binX6214zO//zP3j8f54zH+AGPJ3RqzeW93xX6nl/9nj9PPfwPe5N+HXczECzRcjeADrYCG8DebBS38mhhtMxnDSwVCDsVQvl1XJFXXj4igH8JjKlKmvo6FhFvy9azHMrQYjPOowakgDvIc2IoRqa3PDUvLOsv8WLI3VT/2+OX2fX3OifBSt/7FArP5niNOneDX4CgkGK1GS8K3gXZ2wEaVxm5AftgU53tuROXg70t22INVlPdJdfkCqx7dIH/E55IFLkT5s2bIUK+qZ+/8kgR4nW21EnMEaRNLfD+z1nhgfs+zbCAvyAK4D7M1qENT3AwT1fwsBFq9gmNlkONmpb1bUJtrJFVkBialq+d/xL498ZwfX30XlwFffAP+inD+afCDfk743iGo948vI6XNM4v6ZbdS3r/tb3p/kPJH4TtThnBGt974Gi5Eag27/Vzv/CuU/T4j+IaDHe/Dt9S556TsIHfAGgkxfh6/+y/AxWIBR+rPhaTAVI0kDw03GXhk9PdkpJa3hQlRMHf2tNixc+D5UJVPhPawBI92a4OXRDO8hrfAb1oqhLnWCOwujsv82zA3LH3tuYlCCoaTDBP1vEK//NRINviWsRrLBD0gxXIucET8Q9+ugSt5M8b+dYn8X8oJ3QuG1G5nu5J+uO4UG0t03QD78O8h9VkAR/q+1SZr75fI9owUG7kCS2TYkGK5DZL8VCOz5PkY99woseo+GmUGVyP+OlBdDbb7n+0CL+xwEmC2Cq/kYDB9W1JqpkI9ITC3N+Fv+wy+Ken/Bi8DyZcC4KuIhiPh3vk6efwXKvseR9dyex7jnXj5Vh/cUnXhP0uFcN9a1nEfp/Yt0ISFa70XR+ymzJP5LB/2KQuKf60n/Xv9BQJ//IIS8NMTyHYSYvYkgg0WkgVcfaUB/OkZSPRTg2jglW1lblZDcgIjIOlRXz8RLL/0HWRkTBfdebq3w9WiD35A2hPtOIN9mPlXE4yNOzeixqX559/GvYNi3FP793kZavx+QRHGZYvAjUg03IM1oI9KNNyPNfDNUKeuJ+630mfZAFb8fJZH7qW/eB4Un1cxuByB3pfrJfafIBZlUD+REfXgpdeCeizKb/Ui2OdiNVOu9SLHYgXjjn8j/v0FYjw8R+Ow7MOvVBNN+VaL+76NXD7cB/0Ggy7fivqUhVh/Cx3QhnGzKr1fVJ9qnytVxT+M9ddQ2vVzPMwpt7L/7LjCnE6iK0/iw6VXk9j0p6nHmXkZ9vW7MJxPXWiSJWv5RzOvyHa85Mt9RlO9ju/GW+D7PHfFYciXVH2X2d1D0z1NiXCCwzzIE91uGUFPSNdW/oab/QpDR2/A3eAN+Bq/BR/9F0sJc+PSbBS/DKfeqJiS7paQ37I5LIA1ENaGr6w0smL8UidETiP82BA4bjzC/DoR7T6Q80AHjvmriU93Nq0m/sj8dGcZ9KgT4sUGfEpGHk/r9SLyvR6oBcW5EcWy0DXKTHUgz3omioE0i5itS90OdfBSlsUdRGHIEVF8jewRxP3Q/5EP2QT5sF7K8NiM3+DtkDP7pYJrdUQjYnniEAUfI//chwWQLYvRXIarHVwh+5l2Y9hxN/NfAluqZnnq1QgO+bj/Cl2oKvq9SsNl7GGxC/bB3ropqwLC/iv2soHu7Oe93TAJenAfUK7jPe4BCa8r5+qeheH4/8b5VcC/F/GfdfMc9EefaWI/Vec78MtfMeyTxzYihno8RQYjuuU6aA6R8U0+6K7e+LfhP6LERIZRTQw0+RbjZCoSZf4QgE/ICo38LDQQYLtF4wcvw058vdBAyaNwrivyqrKSUBsTENiE5sR2LF32KmdPfQVz4RIT7dSIueAoSQmYgKXI6vN3HwbxPFcz6VsJUw/HfoW9PNWL6rEByP6p7DTZBZrgVGYY7kUa8y0zIH03J1yley2TbUZZ2ABVpJ1CeeBqqiFPI8z+O3FFHKP4PQDFyP7JH7oXSZ4eoB3ODVx1KdziJNCfpXrsyF+nIz5OtjyOx/2GRA2LJb1J7bkAonVer58eIGtDWqgo99EoFHMj3vT02Cg0E9/8MviavwNm+7EdlQcnwp3Gf73jAPitFmsObOlXy/WLy/eJB5PsG55DV87Dgnmt85p49PklTv8XrxPWTRy24h48SXrBE5HnmO5yOUXqvC4QRYmyOC/7rVUBdBHmA1U3kPXsacX02IpjqqECjFYiw+pb4/wShZsuEBgKNl5IG3qXX3iQvWAQ/w5fhz15gtAAF5dne5AEbUmUtSIhvR0nRDHy8bA06O95EbNhkxAXNgCKDevLciR/nyOfcdTFtx4DejaSDOvLUGpj0roRxT0KvChjqoE8PFaJ7f4H0vj8hTX8r0g12QG60S/CeYUrxbHYImWZHUBy1A+XJB1EpO0nxfxbq2PMoCj6NXN+TUHpSDTXqEJSjSAN+u5AbsgV5sd/8HGGz/Ve+zy9DNvgksgiZBLnHCWQ4nkLygBNINd9LNeY6ZPTcLXrjAc9OQP8+zcR/A3GfL/jXJwxx24ZR7j8hgO+tbLoU/PnkyoIRT+Nf6XdzHse+inr8zrFANWmheMg9FFDOz+51hDx/h+A+jfw+WYzVSbxz/pZi+l/dsa31dG2MM/j74fQzzDtzHar3qkCkOL5MmI/UkHvIUgKtxH9tAFBhdguKnicRa/gTQky+pZy/EuED1iCU+uBg889IA8sRYvoRQkkDT3qBv/HLCHeY+VluUXlCasZopKa1Izl+Asa1LcLnn21AS+PryEyavLS4dMILFXV1VdWNYyZlpy78xbZHG6x7tMKKPNWiJ9XVPeth0qMOxs/XwqhHFfo9X46oXp8jvQ/1bf2o7zXYRdxTvJvsQ4bJIXHvwQzLo8gedBQV6QdQlXES1YojqEg5ckQdcQEFgWcp1k8h1+s4cnyOIMfvAHKDdqE4di0S3bbdCLfZTDnhPOWDM9JRgyyPc8h2PQ2Z7Rkk0f9I6LdJxGQ8xaL9P6ZgYJ822Fg2avgvRi/i39byLQwmDfi4biAP+ALexgtha11aZ2Ob+if+s8Mf3uTYq6K4byyhnO9H/Z7DVeT1OyXqPfZ8zvVSXpd4Z04jNN6tBcc2I/wpCKHXQ0Tcv0jHx8Ea4NzP84lt9P/rR1DdaXQLWVRzRJhuRrDFGoT1/x4R1t9T/b8SwZZU21h8Tjp45AWsg2Ajqg3IC4JMFyPU6jXIk5rS07Ma1qTJ2pCSPAEpiVOwcP5H+PzTTZgwsaOkpLyqsK65rWHeK298M27S9LdSwxb9Zqc3DvbPjcXAZ9vR/7lWQjMsnmuCybP1IufKehNHfbcT93uQaUS8Gx9GpilxbnYCcotTkJufhSrhACrTT6BWeQKqgvemqmOuoCT0PPL9iX/v08jzJv59D5Pn70dhBPWCwevux9mfQIz9AaRR7GcPv0z14BWB7OHUdw25RL3CeWQ6nUGGBeUC/Z0o6HVCjLe46s2FQ6/xsDbRxn+h8IC+/6iEu/tBjCINsAcEGr8PF8vG7U9yn+Nw2FZB/TbHXn3Fo9jPNb8sNJapt1lwzzleivO3uv2bEap5HqbxcSmetXEtPQ7SPA8kBOstpOfzH0O45VYx9ze6ARhLOajGibzI8BfIjY/Ta8T/gB8ROvBHcc9mcf/C/quov/laeEGIxguCTanvNfk3wkzfpxzBfeKbiBr06umCiqLEjKwx5AETkJo8Cdmy2fjP0m/Wff3tquntEyaWN7S0VU6bu/C1MR1TXu+YPOfj8GEvPXB6phOO5Kt2/xwPm39QXvhHC+Kf+5Zy7k/I6E09e9/9yDQ4BIXRMWSZnoTC/DQyLShG+5+HctgRwX2D8hSqi798rVx2+FJ59GWqBS8gz/cscv1OQel7HHkBh1AQtgslCauR5n4aaeTxSc7HEedwFMqRvyDT8wblCcZNKIdSHh5yBQrX85BZnqceYy8UxD978giKH4denbAyqtHhX/IAG5sv4OGyC34u6xFo+RmGmk6C7Sif5x/3/hv1PNarVEpzOyUhmn7PiPT8j91CYxz3zL3Ety63Ep/aOObHT+NXC39CgN5sAV8NPPUmir4/j/73vMnEP72XGpuHKLW6hkSqecMHbkGw7XoEO65HkOMPCLJZi2Dr7xBk/Y3QQJjVCtLBZ+QRy4UXhJj9R/QIYQPeRYTNW0j2mtuZkz9mqyy9HelpnUhLnA512ZQP1vz444wvV34zbcKkKVUt4yY2TJ//4jt0fHXanNdX+fWfDxfyVednJgsk/uMbyJ79CbIeO5DVm3K8/kHp+iWTU1CYnUOm+UUoBlxAzoBLqEw5hpqc02go3vptecbOvUVRx6Civpq9v5C4z/U7gfzAoygI3YuS5O+Q7Sl5feawc0gffBopbsdE/Od53Ub+qDsCOSNuQTnkBnLdLyPT5jwyjI5A2Yt+T+8n+BAnTj2noL9R3WP8sweYGE6Ck9MhyQMobnyNX+X5rwrrgY/mN5WhDz5g78/juGMPHv4QBf0vI4c+I/997um5hpO4f1nw7K/h2F8DXY79yI+0YH599GYKeOtN12DaY/Drs1zUfc3NwAvUczaT/mrs7otrwBLtDiPYfgcC7TciyGUjIoZsRJTHekRTfxPtvhYRVN9GOX2LCNuvhBewDsLMP0a4xTJEUB8caUOatX4HuSXl+Yrc8ZCldSAjdRoo16O54bUvt+zYMvPzL1dO7ZzaVTtm/MS6ybPmvTZh2sx3Zy98Z6unwbyHw5+ZJ+auU6kH5Tltea894toGnvNSGp8W3OdaXIJy4AUxX13sfRK12SfRVLJve3nW6m9VUUfI4w8j22cXcX4S+QEnkOtP3IccQGnSevKAY1CMuCaQPeIKMkZcgmz4WaQMPSqunyv2u4ciXzoX3r+hcORtFJAG+LqqLOOzyOlN/bjeTjE/6vr8TFgb6fq/pIHezxRgkOtBDHbdDX+OH8qTzuajP38s98fgGJ//ojxpXrdw8K/INToHZe+9IvalMZslwru1nGu51ULLse5jLecjieNRelPoOIW8ahKGU7wP05sgMFhvNBJD7kBBsb/kNWAm1R8tnlSHuNxF3tDTiHc7hGDnnQgctBUhgxmUC6ieCXVeh+BBaxHq8L24j3w45bdo51VCB2FWn1Pup17R+mOEWX9EOvg3Iu3f/CZL2bxBoeiAPG0q5CmzkCd/Ex1t/zmwY8+OOZ9+sWLKpK6Z9W0TOqs6p88YT3ht3svv7k7us/oPvoaB5zWznt+JzF4HxPgnz3fnGZ8XOVI58JLgnsfGa7KOoKn44OHRRbuW1yfcQ3XofZQH/oryIIpff+rzAnZJcZ+wDaqYPcj1vIW8UTe7ofChGPe+ikyv8/T4CgoDyQcDSQe+D1Ay6p7QAM+587xrbh/KI3oHRQ849LlZT+W/h145LC2XkgccgI/LFlEzeZh1PHZDU57rYf75Gm1VGPHvcgM5lHe53+OxHa7z2PO1vDOvnsSrp048j9SAvzdC87qW76EazgcLvsfBQ68d7npjBDz1PxL/e/xE4IM3gQlUhzRT7VftfRt55JUpXvvh707addmBkOE7ET5sG0KGbBEaCHRaLzQQRjmBdRBmt4p0sBph9tQn9P+SeP+cNPAJIvp/hDCLD5EWOb0jWzEBmRmTSAMzhAcUZizF9M6V2Ljh5MlPPtnw6eRJH301cfznBzs7VuyaPPXDb196490NCqsff2f+ea4ru88h5OmfRK7hRRSaXkWRzWXBe5HjNZRSb1dfcOjgmJJT342mGqom7HdUBN1FRfBtlAf/TPXfRagjT6M07gDUKVuQ7/WbWB9X6ENa97uLgoC75A93UEBaKQi6gXy/C/Q7D1AeRb0Q8VLiL2mg2O2mGJORkw4L/3GSeqgvMPy5BbAzan0K/2r07T0Gw70OUw7YhQDKm95GL8DCIq97HIBrP669+fr8Ih+Iud3svvtFzc81H/s++z3HthdxO1InlrXxzJB4no6h/+wiTBUYTLnTnV5zJc5diW8XvRY4U8w76jXCTq8SPObA14yv+BR4jby/nby/gXqPmribyKdzlRSwD4HDd8PHYxcCB+9ExKhtCB2yDWHDtiDUbZPQQcigDQhy0HgB+UCEw3eItP1W3Gs5ZMAK0gLlhAHLEWH58a3MnPoPCvKmIiOtC1mps5Gdsgj56cvQ1bkaa78/i6Xv7cGkjtXomrQW82dvxsJ5ux6+uPjLfZWjfryqpHOtpJgrMriEQuPrUFlfRan9NZQ53oTK+Tb1TXuOjS24dbAhgfQb8gcqAu+jjDgtC/yF+L+O0rBzUEUfRoVsM/n6XRT7/0ac3qcYv4/i4AeC69Lw+1Qr3KfjbyiNuIXCkCtoJH7qKEarIoBy0oB66G2huRz9s2J+JIY82uvZlzHIpA3P6+U+oYFSqgNzMWT4PsoBeymPUswYvg8Hq6rua0Gy0qU5F3HdtucfKKAahq/h4vFdrvm0sc8xPYxifAjxOaQ7nqWY1sKDNOD+7ES4Ue3McH52HFyeb4fj82Pg8EwrHPTqYatXi/6ky/ChB8Vcwyvk+98Q/1Po8Rj6jO30eZtyfkZRyhkkhO1DOOXOgGF7SAN74D9sF6J8tiGQNBA6dKvwghDXn6g2kLwgSOMFoY7Sfc5ZA6EDqDaw/lLUBtGD3l8uk437vSB3JjJTCSkLoEx7D0WZn2Dy+DVY8fkJvLVkLya0b8D49q2Y3LEPLQ37UFa25W6d8tMtJZYnUdT3uqhNS2yvQWV/E+WDfnlYnXDpFsX8bzxuVR/8ELXk2WXEbxXFM8d+OfGojjmCcuK+lPxAxa+TRlThD1HGY10x0lg788yoTfoD9Sm/oyr+V5TF3BA9URN/n3ygnD1g0HXkG14V8+MJz6yH97OL4WYyXsP9n3OAnd1SDHGjHDBoM0JMlsPdtP17Lf/pGv4rxBzvPXHtLl/Lw97PfR3nfG3cS7yPg5vGxzmmtXHNcKLHg3q0Cb4dn28RsHt+NGx71GFgjxrY9KqCOb0fd4M3he+3jge2bQFe57qfzsEk8qDpNaSBspsolp9BfMQBhPntgr/nHowasg+erIGhuxETRPyP2CF0ILzAfbOoD9kLWAeBxL/Qgc13QgchNppewfJLyOKn/Dsnqwt5ObPJA+YiM3khctOWkwa+QHvTD3j/X0ex6NXDmDR+H0bX7IMyez+yMnbuzy1K96qYlJ1Wk/POm9UxP+wenXDkzLjM+2db6XPUhj9AFcVwXRDH/QNUB1Dup9gvD/6FvPsKyqJPUtxvR3nETcF7lYbzGvKKevLAJuKA/84YioEWpQR+3JD5AI0y0lHCTYyl19lbKqguVLlQjhggjc2lPrsdvs+9jcHGE/+SfzPDiQjyOwBvyqOB5l9jiEknnsZ/ybC7yKXakq/b5Z6PY59zvjbumXtdzh27vbxRxLad5rF9z0bBuU3PWsG5Va8KWPZSwbxXCWz6tog5Pvb9H38EVi4DJqZLeHEssKCD6oGaWyjKOoOU2MMIDdwLX6+9GDViH4YLDewTXhDhux0RfpIXMKS64CeqC1gHUn0YYke9IuWEUNvV1DOuQmh/0oHVt1fSM1t3FuXNgTJzLuWgBciOXyL4L8ldhdaGjVg49zDmzDiBStUByGV7rysLUhx5vZEyv2gEwU+dvfPNyZOXfzN5zsw1zePHLyzPWbu6IuIOKoN+Rw35OXNfEXILlWHXUB1zBlUZO1EZfV3wro1z9nXmvF0pjXmMpRp4YgnVQCoJ4+nxGPp+q+J31KXdRGXsZVRH36f/8QdKPe6g2E669i7j+f0IfH6puB7mOcG3Lv+FogZ4Xi8b8dEHRQ0QaLUGIwznw9wu206X/zI6Fg+7g1zqadOf3Szq/gDq9Tj2Obdrc/ggHd4l7iVPH0D53EoDc4LlcxWw6q0mzlUw7V0Ikz556N+vTFzfz2vGlpHnb90IzKigz0oxsLABeHcB9YBTgbb6n1GQfRbJ8UcRFLQPAb574eO5r1sDwzwkHXBtEB24FYHkBf7u20WPEOS+uVsHYszAQdJBkO0aMW7AOoh2+3STXDbxD3XJAuSlLUJuwn9QEPslimTfoKxkI/G+Aw3VB1GuPoy83LmjFLlFjrwvnSIvzyU7L8GlNu/C6jmTzq6dNHPuionT5n9S1zx2ckn2qs+43q8MuUu4herIq+Tf51GdvftuRcIVVEdJHq6NdeZ8fIHE+ZQy8r4KCZOpB5pIz9sKpGNXHcVEq+SL48gTqii/FA/9lWrOG2L9FK+xCO6xDF595gmu/8x/qfABn+Ffwtt9J9WAa+GvvwQDrVQFuvzzukyOf6XJCaQ8t0n0/Jz3OfbZ75l3e+Kd+ZZiXeLdRq9acG/JvQbBlGCimYPoS/9X/7kCGPfNhRVxn5cmrRF+ZTGwZxfwMtV8Y+mcTKdz8NYc4J0XqAfs+AO15ddRqDiH5IRjCA4+iACqA729yQNGSRrQ9QLOCWHe1COOoDph8Hahg0C3LYRNpIFNQgei7mEvIPA4YsiA9UiJeGlVdvKLaC5bjuKk5VAlrIQqfpW4RqsoczPHPdKy3ojKzk+2yysosc/Jz7fjPQkTi/Usxlf8/kdbza4P2ifOfGf85Hkfl1ZU1zQ2zZxYLjv8oDr2Z1THUdyn7ztcrn51QVXi0dNVkQ8F981yKd6f5J05Z3Dc8/M5vM5iqgQ+RwspT84kHXQUSTlANeJ3cX1kFvWfafoHEdz7C3j3fhV9/1H+F/zztawvwI9yZ5DNOgTpL4WjRd1M5l+eJq274vqvyPMeFGZnkNZji+j7vIl/9n3O68y3NfFtTXxroY15LfcmpD9D+l+GxHsf+t+MXvR+TKn24zW/vE6Y1w7t2AG8vUCq9SbR/32lQ3r+ygw6B83koyXXkC0/h9jY4wgPPyw04Ef5y8fnAPy9H3mB57B9mrpgH3zJC8J8dog+gXUQ4LoVAdTzBjlvRsAgqhEdNiDUnnKD7SaED9yEqAHb/lCmvLRDlbYMbZXfoDJ5FSqI+4q4H1ER+xPKY3eiTnb4dkfJue8WdFzrUpaG2lTX1rtWVLwQ9jLx0lq97t22CXPmtU+YvaiqvqG1ecyMxaPbp0yta3jh9dbOttmVDaOnNmVd+7kqgmq5+IeC+ydjfqIG7P1Ta6TzoOV9UZd0fGWyxP/sBun3Womrck9pnIbXWqVSrx6hvwo+vZfApEfVX9QApehJ/MX47oafDdVI1He7mbSt6eY/S1rHVTgKyOXx5b7bRe0/inI/xz7HvRXxrcu1FuYCahHzumAN9NPLwXB6X9xbctxPng1sJM9/+yU6D3Kp3+cxH/6c7PsdLfcp9n9Gad4VJCefJf5PIirqGMLCDlEeOITAwIPw999PWpD8gHWgrQ18R+yB1+C9CPbcJfoEX9KB5AXbhBaCnKhGcNiOcIetCLPbTj3iDsaF4pR/X6zLXoWO6nWoSlyHyriNlKO3oTp2L2pjjqEh5hJaU3/l2mRDdXOW6wuzD3Z+9SEwv+vAyrbxna01Tc3tvLdxdVPL5OrGxintE+bvam55bUWD/NofHfSZW+gzNiY+FPUc89+hyfN8ZI9nX3+xQ4pz5pprYa0G+Dm/tmDso/hvo/NWFcQecA+5HleQZnkaUUaU63ovpXzb+BT+JQ08T1z4e2yR4t/wY74mRKyJkyfh0borHv9xOocMw91ijpf5d6K8z3Gv5f/vNGCmw7859Z3hA/aIWo+57yQ9r/5W4p7zGPv+ZPr+dNL19Gb6Xu0D1KpuEvfXkCm7iMTEs4iLO0X8n0BE9DHygaMICTlCXnD4Tzrw85ZqRK/he4UOfKhfDCA/CCK/Cxy8GyGUI0JcCM7kEYN2I8JhN8Lt9iPa9gASXHYdUyV+cbclbyMm1W1BXfJW1MXsQUPsQTRGn0Z9FPXgUXfEe62qqxveWn7+A47/MaprqG2tGF3T2DyReJ/c0NyxiPLBlw1lK3c2pF4Hc99J/E4p4Zrv927+tWDeF7ZJNe9L4yUw17rguOfcP7OJ6oAqyTu4D+A6UkV9QIHnNSiJryizbQjq+zEc+o39i/iX4Ga5FIEDSCuGn8HDcJLoAbLC/riYJtdZdzHsJuRmB8S1/Oz/Ev+PcrykgT97AfPPMKb/7Ua/p/B9KK3ppfc8dRawfLnk7/z+2xOkeR7xeUjTTUW/Q537C/KzriE95RLi45n7M4im8x8RI2kgMvIkIiJOkBccQ2iopAVdTwjyk+pEf589CCYEkh5CqG8M1yCCckUo5Yow6oMjnA4hfNBhRDseQbzDcWR47jhQmbSa+q4dmFy7Gw3J+ynujxHv59AcfVXwP5o8fHThT+UtqpuHx5b/grps0kbh9tPNo9/6YUzb4i3NdSuP1Cn3/VabeB0TiWvmfxJ9tmkqqcfjep/BMTynScK8ZinXazGvTYL2+exmKT64NpikknTDtaO4Pir0oeg5C4deR6TVLuL0Cww2mvqUMaBHOaA/1YgBA9aLnx2hPxP97bJtFAE3PxR7t2jX3voAOTZnkd6PzqPeYuH/T4t7c22d3133FYo6Id76JIo0+z3xXiAz5lFd/zYwa+wj7lsp9nmMlOeay1Pvo5D6G3nydSTGXSLOzxPfpOmoM8T3GeL9dDeiok4JsA44L0REHBN6iCQ9hJMvhJEeQgIOIJzqRUYo6SGCEEV6iCKPiKX6McZrP2JHUNwPO4TkoYeRNPiouL4mL3Tz3qqE9Rgt24825SGMJu9pirqA0VHX0Bx7G00Uw43p5z5pKv75TlP+L6jKPo7qnOOozT2OesUpNGTtQm3UzT9Y1+M0/HP8swY497MHsO9zHHPsM9jTGZzbmWd+3KUB1wPaXoB7Qc4VYzT8s45YU/w/1P63EW9DNZLJSowyWoBn/8L/e4iarAXBVhsF/159FsDSsjgq23vDBIVmLw+x5of+dp7Xr8jqf1Ks4eQcwDwbUWwbiTyv1vi91vOLqUeYi3jLw6KHEHu6FEp7gsyaT3lyjnQ9GdctY+IkCP+iXJMfdRfyqJtIj72KxMjLxCNxH3aO4vssxfcZ4lbSgKSDs6SNs4ihfloLKT9I/hBHiCVNRJEeYkkPMeQP0aSJWNJEDHlEnP9Bqn/o6H0QiaMOIcHrMJJHHkWGF8f/KWQNP4/C8N17qhN/QiPVnU30vxojKPcnXyW93qSccBPVCRdQJ7/xR232L2jIvo5q+SnUp5+gOnHr/cYYPCCIOp91PkEp8dOh8Xvmi/nnOObajzG1QgLX/Lr1oO4YwNiiR9wzRmvyCNdNjDrSVZrHMYRYfo8AgzfR65myv4x/9oZQC+qJ9b/AqD4v8VqxqnzFjIlZgQ+692nT7scl5p4HnUZmz4OIpFrARW8CLMj3ub+zob5/MHl8ZN+1UA65KXjX7gPEez/xOvHJ06RrycR7T5F4b+a5jADp+iJl4K+Qh/6MtLBrxNdlRAVdoNgl7gPPkp+fJV+XwFoIDz9LGjgnfCE29qzID1pwnZCccEaDU+Qhp5BAdWNy9EkkkiYSSRNJERKSSRcpQceQEngcaf4Ev1OQ+51Bjt85KP0vo9D/Ol+jeagqZf1Vzv3MfyNpczT1dHXkBVWUi2ro/9dQbqpNPI6apH2oSd57lz7ffY77jiyp3mNv02qgXQPd+k+L8Zo6cOwT4HOmy7ngXXNknbB3cF3woub8ykedQqC1VNeb9Kj5S/55fGhkvw8R0O9TjKJ+0d6qqiM/66UFyrBvDyrkj/bn092bL9f3VxSM/Bn5g68i1/0q8obdQq73Q2nPNm28K6W9vnj/p7oWigHKbc1lklex17dSXDRSvKuovyikukXpcwepATeQGHgVccR7uP95yt/nEOh7luq5s/D1faSBoKBz5AXnyAvOCQ1wfoiJuUDcnyfuJaQS0pIuEM5R/XBOHOXUPzDSSR9ZiaeRQZxlkmdkEK+Z5CeMbPqbeeHnCZdREnEN6rCb4HG8quhfL1WnbthRE7cPdeQDgvuIIyiLPIiqqEOiN6hI2ni1LubGRe5hRC0jl/jn3M/+rx3fY9610K3/dDHmLzBaB+M1uWOmZjyI+wTuEblOUJJvBlNfE2L8Gez7tf3lGACPDzn3eAVBfZbDp+erPAawsDDz9SWqqoo2RcCdO9wHavdr0+pAoekNeI8ehaZO1HLOPs+8KyqktaJlxHm1Snq/HAfC70lD1X48t8T7etyDwvsXpPleRZLvZST4X0Q08RzGfBPvfqPOIMDnTLcG/EgTAeQJgYHnhQbCiauIiAvkAxeQGHsRSVQvpCTSMemSqBszZZeRlX6pG8qMi8iho4L6CWXqRShSLiCXdKKk38mNv4i8+MsooPguir8OdcJNVCXcEfMu2vhtybi9oybtwK7y5FUb1QlrdqsTVm+vTjhwtCH++j7i+yHzrQXzzpik4/tC/0+O6/8N30+L9wmafDFZh3vuDZbMksZLXyUNFCdeRYDzfoSZr8JIkxmaGvDp/Fv/swuhvZaL+Hc0b/ygKOOdfxerRnc2ZX+frAh+CK0GuvfnUz4O7f6NzDt7Pdd4fO0W7wlRnyeNbzZTjddOXj86SBqrUA2nXmXEHWR53kCG92WkeF9ErC/leuKYEUo+HOx9WvDPYC0wAnzOCQ34kz+wBoLIK0JDLyKGNBAXdUniP+ESZMmXIU+7QvxfJY1ST5R9FYWKayjKuSqgUl4TKKXvqem1UvoZFfUaFVk3UCa/iZqsO2jM/g1N2ffFOee+RJunuX7n8clpRVK/qkWHDp58znWftvYbX/B0frUer0WLBq2avKAdG+T6gOOea0LuA3ksgMcFeLx06SvAm1RfVyluIWzkfnGdpK/x6+Tzpd0e0Kubf7WYCzLUG42QXsvg1fNlOJk1rSrJfP/jYtWYqe8uXeJYl7JlQVbgQxHn2hgXOtBwLvbzVEo9Ha8P5msGVZrrxtjvmHfO8Y2U46tGEPdDH4ix6vwRN8X1TWleFKte5xFHHEcT37HEexQhwkfinxHi84h/rQYCSSvBpIGAgAtCA5FCA5cQF00eEnOF8v4VZKReJQ1cQw7xWqi4gWLlDZTm3YS6gOq24l9QU3JToK7olkBt4W00EBoLf0VrMfFe/EBceyziTNubNUm1+Zw6qdbiuYoujSa04P6eofu9SU9AcKmp51o11zg/Ce3Yb1fVo/FgBvcBzL3oD5qkMQEeG+L4/9dLdCQPaKD3H0G1bZD9JgQa/UczDlio4b/4Mf6foyOvHRvVcyHzv0WV9eGKkuIJM5csWeK05dDnrnnDNr6V6/Wz2LNZ1/MZWTxGzPs9aThvkEu1zmjy+VbK702U36s9gEqPe2KOiq9Xyxl6GZlUW6ePPIuUkaeR6HUSsYRobwmRvhK0GmAvYA0EP8E/I8jvAungIvV5VDMEX5Y0EHWF6r2rlAeukgauIzeD545+Jv6Z+1uoKLlFOekOGsooxsvuoqniLlorf0NLxT2B9orfqT/5A5NqpHEoPr88FifG4zo04zJjJd9doO3btZrg3k3Ty82uefT4v4Iut/x86n8BXf55fICvk+QxqDfIA14jHYypfIi44OPwd96JMNMv4KTfrhP/j7jX8u/z3BsYKfG/r0KxbLW6dNKsxW8scfzk089GfLGj0zjb7St1jv0xsS8z7/HD1yDx3g88d8ljDzyXxfVcnY90rX6d2x+odrkr5qVLXW6g0P0ylHy9ugfVW0NPI3XYCSQNPy76rQTPYwJxo44R/8cFYnxOCA2wF0QSdDXACNFoQMs/IzTwstBAVBjFfzTxTxpIozyennKD6tKbKKQerST3NiqKf0W96i4ay39Dc9U9jKm9j/b6B5hQ/xAT6XxOJc5ntEnjE3xOOZ8uniGB66vFUzVjsk/ogcfutBDa0IB9gzG7+dE4z2wNd0/r83X7fS202niaD3Tx32mT5sl5zHwhvbdW8o348NPw8diHsP7fwctsDnTXAvTWcC/xXw6PZ+diRM/ZcDRrOl6e88nqctWMua+//rr9suWfDpu/YKF1S3OrYWbsTKs06++O5VsdQaHtORS7XEKF61VUOP8sUO18HRUO9NzhMkoHXSRQ/+x8FgVO1E85nUKG2wlkuB8XvWnKsCNIHH4YSSMOIY4QP/IwaeAI+cDRbh2wBtgPYnwkDURqNMC1YRjVAKHEfxjxzwgn/iMCLiFSo4E40kBi5FXSwXWkxd2guv9nqvtukQ/cphxwh/z/N9Srmf/f0Vb3B8YSB5OIn66x0jrX+XQeX+qSYonzKc9FcW319jwpz749S6OJqRJe09GEVhcLNWO4nJ8XaPShO47HYA1M12jgSd4nP+H7nAt4LGCcBmJcgL/Hr2t00NUq6beeckoS1cSeHgcQYrcO/sZvweC5ikfXAetV4vlnytCT0PufFXB6bjKG9JoBB7OGU5WKz9dWls6d9fLLL9t9+NGyIQsWLLBuaWkxVKsrxf0341wXl6XYfHY3e8BGZJvvRq7RARSYHkWR2QnkWZxAvuVxKKyOIbP/cWRZH0eGzXGxdlVG/pHseAQJTkeQ5Ep8ux9G3JBDYtwtTkcHrAEJxxEl/OBxDURRfx71hAa0/EdQzx4ZcAUxIVdIA9eQQD1cUtQNpMTcREbSTar7b6OYarvyvLuoLrqHutIHpIHH+ec1B1ruFzPfL0h5VYt3dbSwZNYjX9DO0WjnaThfdI/Zj/3zOO6T3Otyrp37754P1NQK4zW1AY8JtBY9qh349Tb+uQppvZYq5wF97isYOfSgmOfiddIO+i3dNUBvXhf8rBp9nysjVMC2x1jiv4v5P12d8/UPFaULX1qw4AWbfy/9j8fc+fMHtLa2GlVUVPQtLi7uk59bKnQQ7vhCRdyg92/I3FYiw3oD5DbrkG6yCTLzrciw3I40qx2QWW9HqvVuJNvuQbLdXiQ67EeS4wHEDzqIWGfin3SQ6HFI6EB4wciDf/IC1sGTXqDVQBRpIJrqwOgnPECrgZjg6+SD10kHkgbS4m5BkXwbBRm/0jn6jXRwDzVU57XwmCpxMIW4mdXxKP55foI18I5GA3zUcq/1AtaALv9/x70u77qc6/Kty3U3x5pe4MkxgCdRq9lTOyf5N6H94eSx3u67+RoneJlP7x73FevCe6jQj2DQsxxWPRvh3mcyHMxrT1Qpv11TXvzy0jmz5w5897333ObOnTuguaXVqKqqql9paWnfwqLivkVFRX3z80v1WQfRw2Yo4oa9cSjR+SPK7V8izfk7JA1Yg0SLH5FCHpE4YBOSB25Bgs1WJNruIB3sRqL9PsRpdJAwSNJBEmkgbrCUE5KeyAmsgTgdL+A+IY40EKfRQEwA4wJiNBqIJQ3EBl5FfBB5AGkgOZzxM9J4bDnmFrKor1em/orCjN9QRj1eed7vqCt8KPLmhIZHOmAv4JzKOmA/WKTB6zMkcG3ANQLjpclSDp73BOddGr6nNkg+zXXlRA3n43T4btUZ59PltFEpgdffa1GXJYH34alK1+yrnaK5b0I89ewxd4T/DaM8O9Kd+kDbH0UO4DXMHP899Rph2KuEoCKUwax3LZz6jYetRc3+KuWa79QFix68+68phkvefMt51uw5Vhz/xL9+qUrdr6i4pB/5gD6DtGBUXKK2mPZ6rnlR7kxlis+iHQnuS8GekO62Gim23yLRag0SBqwnHWxEkvXmbh0k2O8hP9jbrQP2A17fwTqIH/qoNmANJOp4QbxGA9wvxvhIGogjDfD4gVYDMawB8gBdDSSGkgbCbiIlgnwg6hZk0beRE0+9aNI95MnuQ5X5ABU5D1GdJ3loG3PUIOVUrgVnj5fAHGt5ZswdK9VfM1ulfoH5fpLrCTpca/275SkxrcvzkxwzeB8UsRdOkgSxLyLfOyeGeKcaPDv8ITLC7yM25BbVxFcxjOrqwa6cA7aINZEe5u2C/+f06mFEcW/ctxTGfcrEPgcOBq3Ef9XmGuX6rysL3sWSD7P6vrZo0aDpM2dYUf43qqyqZv71yQP0S0pK9ItLSg0Ixip1mSXfn3HS1Gne81+Z4ZWbPSEh3vvlLYmDlyKD9y8iHST2XyV0kKLRQTz5QaLNdiTZ7pJ0oOMHrAOuDZLIC7g20HpBvKZX0HoB6yCeNJBAGojV0UAc5QIeR4z3u0THJ3wg6AZSQ24iKfQXOt5CesQtyKPuICvmLhSxv1GPe4/08DsKUx8KH+Xxq0aOxyJpHTrnV567mlDzOHQ51uW5VYfnv4pnXY55nyuGLs9ajhlFMRIKoiS+eS/WLOq7MkOoHw/5A6nB9+lz3UV00E3qky8R/yfg6npYXOfH17rymm9es/4c1X8mvRrEfle8z4mxfjlsjBpga1Wxojb3p6+qi5di0deWvV546SX7qdO6LJuam5l/A+Jav6RUZcAgHRiWqFQmZeUV/Ruamp2mTJ3mM23GzMCyiioPVW3+qJTo0Xkxw146nOD+AdKHf40UxzVIsiA/4L3L+m/ozguP6YDqA9aBNidoddDdK+jogDXA4wbxo049poOnaSCRvIDnFpICrwsdpAT/TFogLwiWdJAWehuysDvIiLgDeeSvyIr+DTlxpIOEP8R9SNhb2WP5mli+LrpGw532yHxy7q3RiVstr3/H7ZP8ajlmfhk836JFroZnvn9WRhCQHvgQ2UF/QBb4gB4Tgu4hOfBXJNLniaTP6TfqAkaMOAEXD84Be8U1r7wW2tG8UfSAJv8YA0vj0u79rqxNyQMGlC2uyduyvLJoOeZ99o/ec+bNs500eYpFY9No9n8DtVptQJwz/4bkBcalarVpeWX1ANKHy5Su6T5dM2cFEP9uhSXqwNwCdZC6vjA4OWLs6EjnV6/HD/oAqUNWINH6ayRYUo3Qfy3lBSknpAzcJnJCPNUGrANtjZio0yvw3DzrgKHVQfzI40jS6kCjgQTvs0j00XiB7yMNMOLJExMDrmlwg/Tw85+0IOEOZKG/ITP0PrLIT7PDHwhvVWrijv2W71mmhZY73cdPxuzTeFVoeNWCuc0OenTM1vCcQTzznCxznRbwOx3v0/EeUv1/o+Nd4v4OfZbb4nPEEfdhflfg53We8v8xwf9g1/3wc94s1rzwfIAR1QE8BmBlUCH2PuV963i/MLv+6qnVBdv+XVO0Am2zg42ndU237ujoMKtvaDQsr6g0IL4NmXuVSmXE/JeVlZlXVldbU33oMrVrhs8MiX/XgmKVX35xaVBhqTq6sFgdVTY60zs+aPK8aOe3iM+PED/gc+EFCZbfI7n/OrGfHeuA9zdjL3i8RjwsesZ4tyOP5QTWgXb8SKuDJI0X8Hgy6yDB+zwSSQdJpAPWgtAAnRtGAmkhwf96txY4N+hqISX4toin9KC7IrbkIb8TD7+LY2boAwH2XC0yQx8KZAQ/fOyx4inI0sRuhgbMLSOTuGV+U/3vQ078MseyQIljLZhrGXEtE3zfIt5/ETrm98+5LoY+Y4jPBfh4nhL1H/Pv4XIAvm7bxNqXYJM34WTVIPjv90/eP79U7BfM+186DFTV1BRuX1JbtBr1LQ0OHZ2d/dvb203r6uoMy8vLDYh3Q+H7pcS/Wm2iKqsg/msl/qd1eRP/fuQHLvlFpT4FJRXhzH9BiYrv4RtBiC5QFSfFec7bGOP8Pvn/csSbfYVki9WiV+C9DFM5J1hTfThwl+gZE+z2kw4kL4h3lLyAdcBekDxEO4Z0ROiANZBA+Y7Hk1kHiV6nu71A6IDOia4O4nwv6+hA8gVJD5Iv8HlNp/Mrne9fH+NAC4mbe/8jcMzy72lj90ludfnl/59G7yPFn7wpgHTp/7MAv0etj0nvnT4H1bxc+/K4CI+b+1KdxP2fs8dhaa3X4O1i7QOvfeW9aMnrRR/4aM/jMjjbqwJrSrYtrC9ei9rq2SPGtLdbNjc3G1dW1bL36wvuS0oMi0tLOf5N1GXl5kW13iZtY6YP75zaNoT49yE9OOcWFnsWqcqiKQ9Ekw9E5haVRtAxjrwgraCoaXyO7OWLicPfR7TZh6QBygnm3yDFfC2SLDeIfS2TBmwR+xsKHdjt1ejgULcXJIlxA0kHj8YS6fkIyQsSRp4UOmANpNC5iH9CBwxtbtDqQNQI3edU0oF0zn8hbm51HyVNSODnWp60nOki7Ynn2t/R/j0tr7rcSrje7U2P9HmlO49xbcPvn+scqd45J/ogaYz0BIKoV/KmHDl06EGq/w6K+Gf+ed8PXvsYbLoEbtQLaPavFntE21urxfWfdeqtk+uLN6CualFkfWOjBcW+EcW+flGxivs9A4Ih9YCGecWJBm11n+ePa/gRnW1rMKdr/YapM1vcauoaBikLioYXq8rjKfajKe4jyQ8YceQHGQUFnctKC944mkN+neiwAdFGnyKBepMUs5VIMluDVIv1QgOpVluk/S6tdz4aPyIvSHI4LHmByxGkkBcIHZAXpGo0kDj86GM60HpBMuUErQ4kXOj2hOQnaoR4HT9IFTr4ayT+BXR/JkHDZ4IOr1pu43W4lbR4SeJW6FOHXw3H3PNK4x88DnYCMb7HEUmI8DmGcJ8jCPc+jACv/Rg1cj+GeRwQuX+I2x6qAXfC3/UnBPOaeMsP4GM6Hy4WzcIHmH/ngRViL6Aa1baJdapNqC9fml9RVWNeUVHBMd8vv6Cwb35+fr+CggL9gkKVYVPVh/Et1evQUvPh7bFjXp7XNfXNFxfO+eG96tpSG0V+wRDiP4HjnzQQRbEflVesSswvKc1QlU1dV5Dz+rFseu+8F0bqoK2INPy6WwNppIEU83WQWf6EFMvNSBuwTWggdaCkgVTSgPACHksedFRogMEaYC94TAekgRTKCcIPPP9rHWi1IOGKQLzm+DQ87TXmUBfMpxZS7nkUt1qIMQwNmFuGNMZxkl4/2c1xDHEcTRxH+B4mrg+JOd5wnwMI895PvO+n414BvxHSGpih7rsF754U+z6DtyLQbSOCeC001V9B5m/B23Qm3C3HwLl/I1wH1qU72Mn1ast2VJMHoLFq+bhSNfl7cYlBXn5B39zc/D7K3Ly+uXn5/UqKG42aytehTrXsSl1LbWxz+/jUzqldWfMWLKqb1Pl2YE5eoXuRuiKR4j1G4wF8TMlTKVOramf9lhqxfHuO/2ko/Y4gc9geyMwPI8pgDRJNv0Cy6ddIM/1eaCDNYqPQQKrVtu6x5CSbvUixlTTAXsAaeNILeL8UKS/Qa+QFWh2kajSgq4N4bZ3IWvDV5AfNkXn67+CxGNWJVS3EOJXfmW5exRim70nNeOYJnXnPx/mN8j5Izw8gctQBRI3i4z5x3bKW57CRe8W17EEj9og18byei9dD81oXT+KbOfcdsoWOm+Hn/hPF/zqJf9uvEGz1b7EntJf59IdD+7ev1a7/rS/fKasv346m6i9fzMsvMaOYN8hR5vbJzsnpnaVQ9BY/U7ZxV3XBV3fK68vDa+qboupHt8S2j5+YMmvuvKzpMxbnyTJTHTT8x5IHMPecC2QF+RM66lunXIn33Hc7K+gIcgKPI4veO++PJzOiz9hvPZJNVnZrIN1svdgzV2ax5XENDNwnNJBqd1DoINmB+HY81q0DrRewDlJJA7pekEJe0O0HnlK/oO0ZRN/g8wgxGug+1oLHH6UxyEecaq9h0PKq5ZZ51XLLHh1FHh1NYH6ZW238avnlYyivVyAEE8fMbzDxG6LhmNew8Lo2X7G2basGm+HvsUmDjQJ+7hsQ4LYege7Eu8uPCOQ9YAetQojdFwiz/gCB/ZfAt//sXz1tm3q52kv3oKsp3+pDGkBr3Zp1MnmUMfFuIM/M6pORmdnLO1rv2brSdS01+RtQVjopv7yqNqy8uja0ur4xsqW9PX76rLnpc+a/kJtXqBpWrC5PIv7jCXHEPWshq1w1Z0WR4sOjifQZkylHZdE5S6EcpaQ6Lov4lBlTnWdAPYDRGshMvyENrBUakJtteqSB/jspH+xGGmkg2WY/6eDQIw3oeEGq+9FuDUh54ZEXxGvA/YIW2vEkPmqh5fBJ6MarLq8ctzGEyFEUu1oQx8wvx2+oDr8cv8wvx3DI8EccM79ajn21axc1HDO3vm6b/sRtgBtx676WOP5BwM9ljUCA83eEVQhmOH2LEMevEGr/OUJtPkKw9Vu8d/4k5tzXelr3vfDqqlaa1FfsIv5/hJ7dP3rIs7L00+XyPn379v1HTfEGdXXBDqjzXmgsKa8KLi2rDKZ+P7Sytj68qaUtpmvGLNlLr76WW15Vn1RUWpFYVFrGGkgkD0gtUOfI6upfuJ3it+9sIp2LRMpR2eSNWZ5HIB9yCJlU08spx8uM9yJJf6vQQLrJaqGBDLONj3zAcvtTNMBeQHnf/ki3FyS7HhUa6M4HGh1wbcCI08wvacFjzDFeEuJGHRHHKDp2w1v3+aHu2NX1ZuY3gnSt5ThEJ4aDyadDNT4d8Fgc734sjnlNHvPMfs0c++hw7O+6ViCIuGUEEr/MbYiG3yCnlQgd9DXxvAJhDl9IXNt/ijC7jxBqtxTBtu/tCrV5Z0GI7aKn7vvdrQGK/4bKnaireCE/WRbaT3yvZNu86oI9KM1ZMqWkTB1Uoq4IJf5DVRVV4WXVdZF1Tc1xk6d1yV59fXFxbcPoNKr/UqgHTCX+U6jvyygqnNRRW/nWxWQep+FzR587k3KnnGJORn2KkvjJJO6yqe/LMNuHVMPdSDH+DhkmP5AXrBMakGs0kKHVwIBdYh/0NJ18kKajgVSnY4/Vh9wv8viRdgxJO8cUp5l7ZkR7HngMzKcWzDNzq5uDw6nODiUthxGYY47lQE0sa3n2pc8a4LGrm2eJ48d51sYyx7GfThwHOzFWI9CR43cl8f01wl0ojp1WINT5M0Q4f4Jwp2WIGLQU4YPePxPm+O7qUMc3XwtzfnVyrOesieTxw30tR/f4O76f/Kot2/ZrrfowGtTf3asrXfteddGu3yrztj0syZ9ZVqxWR1BujyT+o4n/2NKK6iR1VW0S1QHx4zomJr74yquZtaMbIkrKqgpEv1eiSi9Ulyrqal7ZmRmz6UwC+WWyD5/v/UijWMqiHJo2mPgfcQbZFK8KiuMsqvfTzbdLsU/8y43XIctkIzJMNz+mAb7GQFcD2nyg6wXcJ/A4svZ6g2i3Q4j1kBA/lHV4EDHDiGt6HDv8IKJGHOg+RtF7jNZAl2fmmGNZG8+Bg/cSt3u7eZY8e7tOXn6ca4nndSKWmWvOy+zVzHM3187fCJ6jPFYgdtjn9B6XI2bwB4hwff9UhPM7yyOc35oa7vx6cbjr7IBIvxRD5m0e+jyTVeXYJzUj3jhNluWQU6D2+p/wrv2qUW9YVlG0BxWFh1FZsAvq/K+/pZhPI975HrMRxGlQTmHBKHl2skdOTn1QTf3Y5NqGlrKOKTPGvvjakvntY19cmascX0I/m0fIKSwaXdVQ+9YNzq/Mf7wPr7faj/hh+5A56hzSKO6yKB8rhx5HjvNxZNuQDwyg/GCxAwriPMv4JwG+vuRpGpD1J68YsOeJfCDVhtp5RUa00wHEOB0SiHKl524HBCKoBolwPygdCeFD9wvw+lAtgggBHow9AoGPcf24dzN8XB/3b669/agGC9Lwrct1IHl2mPOXhC8Q6f4Zoj0+RJT70ntRg9/6PsJ18fRItxcUCf7jBjE3N4nj7ErnXsmpqfpJSWn6Sckp/ZJSJCSnpBqkpKaZpMpkFvIshUduUaXt/4b/CvXMiOqi1T+UFSxfXUz+nVeS559VkOCRmp7kXlzcnlmU8eqc+uYpLzaN6doyacbsL8ZPnsH7JX84afrcz+e//PrKrjnzviiQv/BhUUlnfWFFkry+5u0T2fHfn5K4P9LNfzj1pzKqudM9jyGD+vac4aeRw/sdO5yUNNB/v7jGLMd0O7JNtkBhsg1y063INNsmkG5Bzy13kgZ2dWtAjBPwvTK044YO+zXzilR7DaJYdtonEOG8XyDSZZ9AmOv+xxDsTj7uSh7vRl5O7zPIbZfYP0BA499+7pu745r59qX+OuAJvrXxrc3VXH8HOHKe/pK4/gJRbsspppciyuW9fdHu78wJc1okNuQurIvxk+clOKSkppsWFhYbUx9mSDW4fqoslXnWZ/6Tk1OZf33BPx1T0tKMCKay9IwBykLV3+b4v/tqLP8+vjz7py8K5V/NLcj4eF5F0X8+qap8eXn7xNlfl2V8ebEk+/WdYyd1fdrU3vlDx7RZn46fMuNdev7VuMnTP+3omrVs8sw5X6vzF1+oLvtwfmPV57dGN7x9nP00gWpjRhzVw+z/ERRPsRRX6VR/p1Nuzh1xAcrBp5HvcQI5tqeg7H8cuZYHkW22BzkmO7qRZSYhg/whnaDVgLgvDtWFQgOaMUPtdSaxDMc9iBtE+ZsQ7bRHIMJFA+fdYj+AEJddAqFuOxDiKu0XEcQgrrn25phmaLn2d90guNZ6uV93fK/prruDBn0jau9AqseiBn+GhFHLED/qvZtR7kteinB6Lf7J859XWNJPocyLkMkybKj+Nlar1SbKvDzD9HQ5cyxxn5pqoKsBfk5xb0KvmxH/tgUlFd7/W/75i7g+0dj46pnmltfO1ta8sn/C1OnL2ztnf5IXsQbqmtpZYyd3fdEyfspe4nsF4TPCtxOmzvyydeLkDydMm7myXD37mDpv2Z3R9f/6OTV4//4oqoE57uN8uK7aR7G/k/LsfnHuU6n+SqV8nDviEvKG8D3gT4m1pnnWpAWrY8gxP4hc071QmuwRyKbHDIX5XmSQP8gt9iCz2wN2iTECnjvguUSeU46334UYh90CrIFYx12kgd0Id94pwWWHOAa7bNPBFgEeM33E8ePQ5TtAU6dJkDxd22/x/Zei3D9AnNe/NiVGdd5LS6uYlVMW5ZelVDw1P+fkF3pkKnI8U9NklkXFJabl5eWmCoXSMC093YD4NWCf1+Wfj+T7RsS/Gf2ORUam3KVYXe/0f+GfuJxR3zLhQF3TrG3jpkz5gmJ8ZY36ne3Fsn9drGpoXkTx/k3LuGnHifevO2fM+WbSzLkbJ06fvX5i1+wN5AOrx06evLq0cNob6ZFrFyfytbtUK3Hcx3DPSzEv49rf9zxSeY6G6vF06s0Uw0+iYPh55LufRaHzaRTankGB1SnkWRxDgdkh5JnuR67JPqEFLf9yLUgDUh7Y2X1/LNYAX1cQZ7dTINp+B2lgp0CE4w6qobchjOG8tRu8PxDvHShhvcbDJQQ6r9Xg++66vJtvTX2uy3mo3QcIdXjrWMzIF9tS5AE28nx/m/Ssih1pmRV3s3Nzoojj6Oyc3D/dgy+noDCC8rc7xbFZXV2dhUqlMs3Mzn6cf10NcOynyUzSZOkW9DMDlAWq2P8L9xn5FXpTZs3u1dbZdXD0uCkXiOuNo8eN26ZOX/5Ape74srZl3NutHZNXEc8b2yZ2HaUcsGnSjLlrSAfrSA9rWydM/qm2ceam3Ohvzor1Hb7HEUr1MSOO+uh0/6tI872IdO/zlP/PQUa1X6rHEcg9iOehl1E0+DyKXE+jyO4siqzIDyxOIdfsGPJNDwsNZGugeIJ7Lf8yzb3S4gl8fVGs7TbE2m0lDWxHtMNW4n87IgiRg7ZS37SZdLBJQOwN5kT523ndY1zrxnfQoO80x1UaX6eezOHLbs651w62eZ/+3iufxAY3huaNtjFNkcntiRdH4sdZrqj4KDWjHHKF6lXiP5YQo8greD41Q7oVX15RSR9Fbl6ULCPTtqioyLyhsckqv6DQhHK/IXFsSPwbEvcSJP5ZE0b0t80pX1jKMjLssnPLA/8v/PPXnEVNzzS1dbmNn9J1nnRwYkzHpHV5GTOXlFaXj61ubN9d3zphS2P7xK1NbR0bm8d37mnrnHppbOfUVa0dUye3T5wWwX8j3nHT5yleVHtTz897sCX5nkWK4P4yxb7Ev9zrLNKp98scfFT0a7x/SaH7ReL/LEocz6HA+jwKLE+LtQW8zoA1kG96sJv/p2mAuWckDNwmrjWMttlMGtiCGNtNiLLfhEj7jQLhjgzpXgK8P2SA0w/wH/Q94btuBGmg5Vw3znV55zG1IJt3EDLwzTH82bNzswcTh+5p6fJBxI0DHR3puWtGdvlc5l8mLz+Vk5+fSvxHZSmUQdrznltQ7Eme4MWx3NLSMqChocFSqcwzJi9gfxf8M9/dGiBw3k/PkPcnjfWnvzU0U1lr/3/ln7+mzJyjN3bSrF6U66dRTt8zpnPaqeZxkzc0j+tc3tA6fn5j+4T6cVOm506ds8BD9/cmzZ4jjsl26zq4vosddUB4f4r/5cf4533Nmf9MqvszhxwT/OcPuYQij0sodj0PlTNpgPgvtCAPMCc/MD0pacBEygUKs31/4l8b+3wdSRwhXlx7vpk08BNi7DYgghBpv554+xEh9j+IvWF5TkTw7Kjr6Y8gxswdVj3B+wr6/U8RZruM4/1hiM3ia0ED51nqngfiNoA4dxEaIMjSM11kmeopzD8jM6e0NVuZKxMayFG68u8oC4qSyPtdKfatOjs77dTl5RZZCgX18+lGFP9GlOeNNXW+0AAdjelvW8rlWdb0v2yylP/7uv9/81VUpPrL19Kt1w6Pp7oqhuq8FJ8zgn+Z3xXi/jIyfC4I/jM8pTWBWUNOIGPQceS7nYHK/TKKXS6INWSlNudRbHUBBeZnUWx2CoWmx6E0OyI0oDA78JT434Gk/hL/yYL/TYgn7pn/CMH/OgHmniH2BXb4jnSwuhuSt696gveV4h6KT8Z86MB3EWj98q8j7Ub38nJs7/7sqekZejnKfMP0LIUv8eJMHDmxFhKS5SPS5BWC/7TMsm1U6+UQ90mZ2TmxRaUVERT7vunyLJuOjg77cePG2ZD3m5L3m1BsG1OcGwv+dTRAmjCVZ2YNzMjMtpFlyOyylFVe6dml/y+o/299JVCsxXgcQKIX5XjinGM/w/si5f0LYg0wx372sBNizyWZ/Qmq/U6j1O0KylylNYRq2wso6X9ReECJ2RmqA04IDeSYSfznmO9/Kv8c+3xNIfMfLfjfgHDb9Rr+1z7GveD4r9DN/ZcI4pi3+0wT80vZ65l7+NpM8Q0cOOuZp33+9Myy6HR5uivlZWfycGd6PIi4X6HVAHlAZ05eYX6BqqKwWFUulysUw0rV5YNmzprtWlNT01+hVJrS75owzwzi3ERHAyb0miV5vj1pxjZbWRzjmfD5U9/H/19fSTbrXoxypngceZj4Pye4Tx91ieKe6j7K+/LhJ8X97fi+hqm2R1Ay+DrF/1WUuVxCqeOFbv4LzM93e4BS4wGsgafyP2Db4/zbbhT8R9n+2M3/f8k9c24vxTzzzjEfbPtpd33H3IdYvwrfgV3f/VfnICNHXSzLSHcSGiAPSJcn2VEemCnLKv8hPatsGeWAhJrmsV+pyysjZ8yeEzZn7ry4SVOm+OTlF1hQ/2/OfQD5vxl5gJmkgXShA/5eZk6OLXmGU7pcbk91n8//C07/J19xVm8PjKVaK3bwQer1TkImcr60/p+5Z9+Xux8X60Pz3c6hzOMGSl0f8a+yuSjFvzkdzR55gFYDSvODxP/+p8Z/nA7/4d38S7Ev9gF/Gv9/8nop5oNtl0vzpiLu30aQ9WL4W8/FcOtxXoNty/7y8yelFellprQ+J1eq6on/bg0QPOixB+WFoQkJcQ4NrZ37P/rks731TS1x07qmh1fXN4aWqMviM+SZ/elnrUgDFqK/k0ka4F6fOO+vzCtwptzhSLEfmpHT0vv/IbX/7a+UgT98HeO4TVybJRtxqhvpGu55fXCBO+X6wb8I/it0+C+zv0D5n2pCi0vCA5j/PDOpH1SaH0Ym8Z9pceCp8c/8xxHv2tgPf4x/qu3sqW//y5j/6gnuPxbcc74Ptl6C4AEvwcd6yvn/zudPlRfqZSmqTDOV6plp6Sl2rAOKZSfK604Uu4NS05MGTJu+8Hp1wwRMnDL7Rv3oMbW5hcV+xK1nQbEqTaHMcycNDCANWNHvsQbMWQv/X3vXGtzEdYXlOjPu9I+xMbaklSz5BabgiQPBmCAj2cYpuBQwtmTLkiXZlt/GDzABAkRi2pKkQF1ok5KWxhNKUjJpSaZAaEkai+HZ8kqbx7SdBpQBJoFQakpplMH49Nx7d1e7C4EkmGf3eM5cfV5p5+797nfuufu4W1nlNFe7PdnYD9K9/s67TvvEJqR2aop1z5lnowZLM9+n1+ArHvwAuUcfi55+nK4x2pBzEbm/AI3fPE/5b8g8C83pH1P9E/59oxj/HgX/1Tz/Du27lH/2nnSee/QSwj16Ic8/y/v6Kd+Ef+rpu5hfk3uW41uNr9C4T+6bKDD8DCxcLzxsDPR80XZwuFpjnN62pEp323bkcRxymoXz/gzM79IbmltwftACLm8n1DX2QFv3ss/auhZvxr5RiD4R+0IR5okTSV6IvyXzPDLPJ3O9TNR+psvXVOxtXx17K3m8WZtr2LVxTto79Hz8zPS/07leVc5pum5x04OXqPYbx2I/yD4PjVmfyPgn8V/Qv5J/Qf/zdO+I/DP9s9hfItF+EV3zPXRD7lmet53leuat0bhv2Ey1P5X7KeQbnoIbH7Xc7M5ajd/veaCmrvsw+lanp627vKqpq6yi5TDJByvd86nXNi2EzkUrwq1djx/3N7V7sZ9YkP8pqPXx2AfM5PoOxgMD5vzpdkdFpqd+wU2d679dVsEduOQad5Ke4/M/9Cl9vwThnrzDSOC/eTTyP/ostGScgUazRP/JH1/FP3m/LtV+ynsS/o9K9H8Q+d/L+EfuybvhCs1vyXm/DvdE+yTuE+3bjC8i/5vAot+I+l9PYn/fV2kDu9NFS39rR2dt48IP3HVd4K7tgmpfB3NvB2DfAH/LY9Des/wccnulpm5+n9vjKXB5a6fZK51ZpA8Qx/EjE7fNGlaSbpGV6J+PsepWj7Fzh+gcv278OfDnXKC8U+6z/w0tY/4FTVn/hIaMT6AetU/493NnJPH/FD0PRPI/Uf987Gf8s7GfPD8yA7mfYdhP+WfaD6H234IC85tXc4/OuI/yT7gXcz7jy6L2LdxzMIVbC7mpj436qm1R5a7my/aK1q4lx/1tPYc9/gXg8nXTvuDydYKvYQHUNfUA9hFwuEi/6Drn8jbMddbU5pHzQw5ndbbXP9/hqX/8G1XezmHj6VZbkX6jy4ExunbMSZznn6HjPXGyThTVPsZ+fzrG/rQzovZrk8+K8z+mfcZ/ZcrfZGP/LMr/Iap9yr9xH+Nfov0C8y7q1+Ne0L6F5HxE+4YtyP0mer/sVMz7MPZvGq72sDubEpHzPzS1L3q/sX3JRdS72AdILHB6OqGqpoPkBhfqa59+2zHvCTfG/NF1jUsW1TcH0tx19w73ghXqNywi1+y9qafAn3WaxnviRPeUe177DcYzstxPyj+J/YT/Moz9ZZR/pv0ZPP9i7Kf8R7Uv8n8N7i2mnVdpn8Z+A8v7SOyfgnlfzricrw1XW5Q5amhpd7YUOWvaDhDOBe5JLlDhbIXyqrbBCmf7xQrHgk/Ly5ed9zX2HHZVrs7w1a+6q871fBmz6J5ZbE8+CF5tGOo5nOPhPI/wTnQv5V7I+2js5+f+n6/9I7z+o9ovTGU5X+E1uL+R9q3G34h5X4Ee8379s0T7RXmZS4e9PeaUs7ygyuUzOaqbV+F84e1K13xwuFk/cLjawYnzA1dt93r6vZq2Ya/D7baHjStnkmc9yHlcwi/ROeFccML91fM+pn0h7y+TzfuF2L8fpkv4J+97E7R/be53SrS/LTrfR+1buV/RvK+A+wUZ99fd6jbBeZ34ubljyaSmzsVzWzqW2jEfuCvn9zdjgH8Zplrz9ORNH1UlHqbXdn0jP4S6pFM856d5Pykf9wX+ee0T7mfpjtA1RcRxH51wX2ji3/cn4f1a3FsI96ks9hekvsa0T/I+7kXkvo/E/aN3ur3uR8tKc9NyknZVb+nIV6Eq4QhdU9CT+A/sC8h3Ulg83yud89Nxn9c+eS6I5vyUf6b96cY9IvcW0xvo1+FeMu4T7oW8z4Lan8a9gOW6v5A6juWfkVLt1ljC+GLtlJTe/tKEbWAfcRCcCX8GV+K7UDnyPXAm/ZWOE+TZEDt/399cet8n0z7lnjtAnXA/3bibcm/juRdcybvIvUT7hPsCzPmJ7h8x/OglUrfxaY13unnuezNkFNGSMznz8lLWvlmcuAXKE/rBkXAAKkYeAkfSUZhH7vtOPgpzUo5QL9X+Cb6t+yN8S3+Qcl9s2Eu5txrZO5+l3Ec9GvMt4pgvne+9ROf5+dzqeXe4Sf4vzZDObl/UZs8yTdB+b4Ml+SfwaOJWKB35e/hOUghmj9pDfVbKPpip3Q+l+r3UH+V2Uy9C3ouMb4A19Xeo69fp885Wid7Z/3aImrcaXxXjPbuut773DjeBagrLNrfOnqD7/guTU9aBLWkzFI96BUqSX4Pi5NdhespOKNHtgCI98srtoOsb2AzbMZb/lpaEZ/JZcJuRcU5yvALuZX6M//kJi+7H9/6k6j41Lq1E/Jya6s7O4ZZ2Y87468m6tR/la9fDVO3zYE35JVi0m+naBtN0mLvrt4AN+SVOPlMn+TzO5ZHvS4/on91m0T0z/yHjQrqmcV7aU3fs+FT7cmY2l8lwamq1Kce4sGgCt6IuT79q2WTu6eAk/ZqV+fofBvP1a4KT9T9YPJl70p5rWJ6rGR8rXivNMbff9rqrpppqqqmmmmqqqaaaaqqppppqqqmmmmqqqaba7Ta49y0YkMFB6wkZHrDKv74mXo7zY2RwqEsj219EE+iT7Y78XPKNcCAcgP4o3jOkiQdJDY5FNDFwTPguQF8YG/0Aw/hNsAY1GivDn02IA3gCSTHtYdsnxsLQcsTxu9nPl30IQxHEcUGKg4EQXBlAHEubYEgTyA1cCRMcInhQsyISuBxCHBOysiMZtF4OEtZDJoq7plkv014QJm0woLHFWf9LsU+DNQlrbLH9/6HYzDpLMObEAP2QK3SfExFajBC7E9suLosRCNPi6wr8gICtShyihXi7tUmJafU1MZqgHMfyOJ4vVyqwJsRjHtp8rBTXSc2XY7NGsf1zcPQZvFwFXqLAETmOidhoIW7PV+CEkAJr5HiEAvO7DSqwcrsCa2wKbL4BzlVg5cJD0xTYqqgQT3gURxR4SIEhqMBhBY4o8KAUxmDHV+KgDF9QbA/LcGz/MTnu65PhuL5+WYXj+wIyHNgow/GwUlo/VOF3IdqCcSRcBKNLyhCNh6Q/lmOrHMdRHI7u3ETDVdAn7I3iiGmAP1KGgVesJhZ6ZTgOnlTgfUJA5GszKMdDgywqDgrHttwkxxtY4B0ScDiWj4L80QzwgTjI4wg/OLFDDOAPrRA9RBAD/4DYWJIKSYL6IDu8qAltLViIUSHagNC4vA3JdkdroBhD9sC9av8D+gAhkQ==\"\r\n Icon7 = \"eJzsvQd0U8e2Pn7ARdJRsyXZluQiuffee+82YIyxMab33ntNCIGENJJQEkJICCRAAgkBQgKE3iH0TkLv1XQb2/r+e0bk/e677+a+3Nfu+7+F15o1kiydM7Nnl2/P7L2PIDQT7AQHB4F6s9DbVhDaCoJgNlvfL6HPN9JnEREv3vsKQk8nQcjIsL4PyBIEVbIgdO784v8fCkLcBEEIoGs4sOsI1s//6A/A/6rmvi1wnXdZf3R3e2x5vy0wvQQYEPMAhbqj8NL1OPhHvxNHujm7LAj/xOF141nvoLHvj4hotAxPuYPhyXfQM+QxirVHERDW57xmiHsxTVvLfuMnetq6TQ5t6fl5Qrn5h+Q61XjP7+h/zQMFmIdEP8asqkZ0CbyLVsZfkdpsHUI+LETa1WrErCut10z1vSof7HrXtCkJXj+kwHV25PnfxxJstySzjdM19Ik4gb7Rl1DldQWpws/QFybC40QCQi8UI+FMFcJXlkD1rq/FcVGwJXh3/lOXzj4LnN7yXWQYnfmkumgv2iQc4vOu9LiOaNePoPOOh9Nwb7h2pevkFMEYVQjXmRFQv+0Lx/mB9Z4b0+C6KQReA7pjSPw9TMyzoG8EMDoF6BX9FKmGNYjRfo5E16+RHvAjksK/gsePUae1y/zWaSb7nrMd6fCzcWLOlgzFdkuvwPt4uxXQ3vsB+kTexOhUoMb7Pop1J9BSfw5d/a+hg/kYvPuW3w7t2euUcXzaBofZpuMJuuVXyzX30SO4AWPTGzGK7t0poJZo0Yg2HpdRabqBNPkO6s+jbdIh6ObGQvt11HPNXs9L7m06Psy3uUb3eoZ5HYCBsQ14rdCC9yuAQbFNdN/zSFIuRIjwxochme8t0L0dDbsZLiHCp4IyRJgTm2m/826f0GcYFPcEfaNu4JXcBkzIasRUusarOXUIF5Ycd52ZtEXzvt9t50+DfnGcZuzJ1stfGOuTpPj+envzI7zVAviA+O698meo8j2MflG16OT1AKmui34Tp2jHGLaF9Dcuj96im+I3jP3Wx36oGC9feiDdYT1KDEfwVssnGJpQj3ntG9An7CbyNEcR3/wHaPom3Fd8q+7AfmNanvAsYH5al3+2rFmbwNtzkyBYqDVKBCGFhKDxhe5gfR21wfaCUEv/v5AuCJvThX/5XcCL72X8/0zPvGz/u5r+7dDSoGPZ0ExNQIbuAHIUp9He9zzGZT/EyNSHGBD3GGMzH6Nb2FW0cDuOYsNhZGu2IVycBYe4oJuClyD7j95bO9o3M3hf/lntDB8o2vsi1+FXS0+/OkzOsXBd8ElHYG41MC4dmJIPDI5tRGf/B8hQ7kaq6ju4BMT99h+5L/3ZiUPdxnltToNuij/sS7R1zdsLPXPs7m54JRmYVlKHEWTDRqXWYn7npxiZ8hxdg+6jS8A9dPS7jXjxOxQ6/4xg49DDgrNAKEBQ/8n72jjPCemjnxf+m9M34dBO9oNdjHK+osLgzf6fJjkypaW6FtPyGtDK9Tekyn9BpsNutDAeRbnHeVSYrqDa+w6yHLbDKJTD9EEYgjZmwzAlpNY+SbVdkDTrKXNThovhjmEyV0WK06SAVnTP9N/vH7A3d3vAxiwo3/CGcoK51sZTFvOX40uQrJtSormGd0qBz7uS7o97QHr4OSo9L6K1+1m0dD2JIpeDRP9NMIrFyDhZjvQ7HdAGI5F9uQvc5kTCvo8LlK96QTXaE8oRJvisTIXH6xHr1V3dF4SfaQGfPZnPfValwsYszftr+iTbHVjYybMOy/oAO6YAS/sCr9K3hsaDxnCJaH4Uudo9SJX+DLOxDTLuVyHiVAuEHChAR0xCDSYg+VIVzF8l0Bg8YdtZh8ArRQi71xLBP+VCLHPerf7I/7zrjPADf2t9MoWzMypc2To/Qe/Ic3il4AbdvwnjMiwYnwG0druMYpdjyHU8CG+hK5RjjXDYGAbng3EwHU6D+4lUhD5qiQL0o9YbSRfbwunzUKhm+Ta5bIt9lHSrGvr3Qxkj/Ju5206ShPhHjN2QLTtO674b0TZrECR8hmTVamRq16KasExLwkPJitVIUf4Af5thUIXr4TDXF0lPauC+OQEuW6MQf7wMUZtL4TM3E0FvFED+oTdkw92aFLN9HvvdLUThw96QV+v3ELTtYyM0by2E2beVjXf73G6UAwKcZqBGU4/hqXcwMP8aWgcfRp52P9Idfkax8ymU6s8gTrkEfqoRMEtq4Ch6QdneBR4jguHaOghuUYlwVxXATWgBT6EnPISO8D6YCZdlkVDN84N8jje8t6TBf1cWHPt6wtDPH+ZP4uD9dTJMi2PhNbcAbd+ZhyGvbUbrfqvQJecKuvg8Q43PA/QOr0O/iCa08TuHwIJRMId0g7EkD07dIuDYJ4DwWRTcX8uFO81Z/1o8DBMSYRiaBE1PT5CsQ9rfCOWb3lB/EYCoc62Qcr8DgvYXwP2nRBh3x8F5eTji41eikzdhlwRgSAjQL4TJANA/2oJuwQ8J2zVhYDRQZbiHdOVmRChnI9JxNqJU80kfzkSE7CMUexxBccBBRAfPQ2zCx3CZHgK3WZEwrIqB41fBUE4nms30uSNb43dB/p5Xo3ZGAGLPl8A4KgOJwga0Uj3DN8T3DIf1jST8GGbB4DgL4SmQPrJgeBLhM5LaCtM15Gh2IlG+EtkOu5Ch3ow01TokyVcRf+5CnuogKlyJVxt6wXdvFmTTTNek0zweyGsMv0pClO8p+rnPdNoaDe3yAOhGRdTFyVYgyu4z5KiO4LW8RizuaaH5gvTdI1R5XkP34Ad4Nd+CSTnAmDQaW0QDX5cCHc3X5TiKSDarPK/SZ3fo9TFUeB5DvHwF7Ke4XmlWoZ7q2NU5w+901lGfTalQdDFAMcQd0nGunwsThcwozRfnKrQPUOV+n7BuLfpFPsebLZoIR4L0bh35Ao/oHifQO+oc6WJGkwZqz8k/+I104ymUuZ0lnbiT49y2HndozBfxZsunCLdZCukb2ovaxb6n3Oakwm163m9OU7Lm619Pel1a47zUc1fYdH/ziL0F0nMo019FK5cb6BpQj/5RdZiUbcEMwqSM9v2igLakfyq9LnOa9AyrRzuv2yh3Z3rxHDr43kGh01FkajYhTbGL240pOU8QZbMWvj1qkFEwFwWey666KsvHetWUrncpij6tXuD53PRlFuIV3yJdthO56iNo6/aQ8DuzNc/Qg/D01EIQlgeGJTIcTnje/xkGRD+nMQEd/O4RnY8TzW+Sr3CX1vwQctSHkCLfjCLVecLsN5DV5Su4EuN4fV4A52/CG1XLvWE+lgzthhCoVnojyPwmsmyOkN09im4BzzCtAJjREvy+3YIayB9ooHUA5703ShktGjnt+5AcdvC9T3y3B90C65BN/BYsfR3Jsk1X02Q776XbHan3Gz8YPjXdIfvMDPkakv3V/nDaEg23TQmQL3e/oK8oPpshHEIrp9uoNj+jNX+ElUOsOn8BIfQRyU3Ec0/xWoGFfAJgfidgTrV1LD1CnhP/X6I134Ear1oECONgENK+ihCGRrcUHkSnCT+FOX7nc1U2zBniO56r5W94vitONFXbDHJOFqoET4lUHRTabPrRZNlWtNARv/g8oXk9IX+RfMfIy5hVCbxeBFrje0SDevIrGoj367G4F/BFd9INcQ3IJ782XlyBCJs5Tw0lGUc1VQFP9NGp53z8+ryvH5mwwvWLSIukk/Plv2VnoqQfX0gVt6DU+RzR7xEmZjURrqkn3qpFjd+v3NZ92JZhnjpa+yeYVmyh/z/BsASSyx6Eh0gGkuUbECR5pc5FaBHp15j+nW5bBJSLvaF6yw3u+xOhG+/3OtkZyV/fO1ZctLCF02WUu9RiGPHU67Tm02iu04sbye+2Yq5il1/Rzuc0lvS28sNMon8n3yekfx/ho3Yg+/MzvG36HQkVXg1w2xD6hnlX2i337+Ia3JfGrTasiK5zGOp5hPnzf31vd1mrfnHyr1BmuIByQy0GRTdhEq0n8zPZGrN7/zSG9AvRPFL8BBV+h9E18AHR6B66+NajbyjQQnsevsLoA0IXQePyTfAa53eD19O95G4/xl+UxKlTNAuDD8pTtJP/8r4msVoTKI6ZFydfQrhxDZIdF6MF+bRd/B9jdlUDv2/fiOe01hasHk6Yk9a/2vMC2gcdILt7h+zxTrJ9J5Gt3Itw5UcW595R23WLAo45TvN7+/d7OO+LXURjORfyc95DZRtj5l/eP0L8sGey/CeS8aN03WuIk32HPKc96Bd9h+77K77o9ox46iHJ1CO8W/YEn3Z6jBpTLaaT/1zmdor77ZnKXYgQ5sLYOhcOKz0hH0XC+xf3cDoVN9B9UQxCv8mFrMq51T/br3jZ/te0f/03uRkJWzNYBjdDY60w6VGtIPSjZqLWi7XN/2/vQEItLUMQHtBn5yfTa+ueg5s9vbQIAhqFZtvqBBvUkpq5QC7QZsHm+l/fju1TRFDr/HKf4mV72V62/0PNf03GtdDjhRAXu8GvbAzy7c4RLjyCVMUOtCc/dWzWXfSLvY2B8ffQLfQ+RqXf4697R99ClnYzWrofR57zThTodyNDsw4x9gsRYB4LMU13t7kgqP67xk1/MtdPIhfFXCpv0nzgB+lQZ8T5rUSJ5AFa6+8R1nuACZmNmNkG3N9j2Jvtv40nVMN8ssn02ZD4x+SXPUBH/7socT6NTPUuxEtXwk8+EmK6FvYmm4D/lrH7ClGGOeFH4q5VQDXVG5IcDez7ymGO6Xm/XAbLsCir/zKtGFjYDXiT/JYvCbdvfQUYntiEcekWvFFi3VMcnsA+A/lKl5AoW0v+1CFkunwKjU/AQXcM+y8fu02uulozK7A+6moZ1G/4QlKggdjVALuW0ilRtotTimSPMCICWNKH+f6PMb30EdG7kXzAp/huMPBpR7Yf0US+4HPCpffJP7pPvulT7gfGypYiU7cC3b2fwk/ZZwetcfDfwuD/kabq4p6vnuK9Uf6mJ/xPEv6c6gNJuiPEdvodYqET38tKFH5Q5It37pfJgdmEPsdm1iJRsgd52l+QodqFTIft6BR8kPy2C+Qrn0CB02G0MPzKffYczT4kK1chQDIemuZxEPu7QTXRDPs2unM2/rKv7QWbETKhedQ/Om7bLrp03eygTQHbsqH9KAj6n2KgmxUE+0gV+Qi68r/+foq4eVu29AKqDXX4hHzNIQmPUeJyCUVOx8knIvzv8Av1h9E74iH5aFfROeAu+SdP+Z5Hmmojgpq/Cgcvd2Teq0FeXQ/E/laOgLWZUI8wwy5ZDYlZsV5mVo2QFzkPEnN0gyQSyVhFqcsH+pmhK2wEIfBf8fl0l+5e29MQfKiAsL4HZJM8+HmjJNPx3T+ab7Ls52MF6gvo5PUUH9LsPu/G/Pbn5Mfe4PsYPcMe0XocQKnhJLUTNIcrKHQ+iCL9QfLpfoKfMAK6aDNK6/oi+UY7lDYORE9MRzmGI+JYKVTv0Lr30kM2xBVO80OheysQiqHuUI4xw6Gv6Zl9mvpT4jmN/Xyjh9/BnHsVGIWA9VnQL4mEy8Jw2Ceqv/2jsTvZpcoyZcdvVuktmEn8s3YE8O0ga3urpQWvFTKdY8GIJPLNfC6Snj2MVPJDE8SVNPZ1SFKs4nuOpqFBaNUwBKE7C+C6NAbxJ9ugvGk4zWMaWjwdgIDdOVC87QX7kQYop5EfNckEsbcrYu5XIOKXEhjGBt0gGl8K3pIHl59j6sWlfk+yG3tAN8n/LvMz/2j8AdJxzhmyw0/KnGoxhSRiUY9Gvoe9ZTLwcY3Vj2b7JwNiGtE9hO2bHeN7+Wxfn8lIpsM2wsoT4VUdiyx0R259L4SeKUbgoTxaj/58b7sCI2kthqHgYU9E7SmF/uNwSIcbYVethfupNATdK0FiXXuE/1QASYm2TnAWhmk+DjzhuzkD9pmOC/+erPgKg8Us6ZmbFfqn6BXyHO292J7IQ9JBdzCnuomfx0zMttoBplvZHnmJ/jhau/2GXO1+Gv8WBEunQCPxhvwzM6RbA6DcGgr90QR4/5YN8/F0eNAYvS5lI/ZRJVphKNoSfxTe743oXS3g9WMKfHdnwmF/BMIelaHgVi+L06v+z6VvEu/X6C+Tz2H89+Q9Ubn2UKH6PMpcL5JuOYNY6QoEN19APPIj38+oMJ1H15CLeC2/ER18HqO1+wUkK37g+1fFTmcRJf8IuuZhkLfRQjmU+GKmF0znM6D+PgTiIh+ot4XCeCEFMY2ViKbRR6MN0tEJOc+6QP6BN8S3PEl3udY3m+N60etaDkoxEH4bM2GbpNz898Yt7+KSI47VfxtmerchV3YKqfItSJRuIJu6B4mKlYiQzEOkZDFixC/o9WfIVh1BK8MFLsPpDhsQK36JKHE+gsVXYbKpgjJMA/2CMBjXxqIjXkHChQo4rw6H36F0xOwtQtGaTgj9IAOunWLgkhUNfXwCtItpjuM9oBxualBMMN9W7A1FckNHtMMYqMaZLbZK+6lywS5O1MjNoiDVi4KdUV6hT9fOClwlvmGEY00M0oVf0EbViG7m5+jrW49RoUA3jzoUOx9BjnYX6ZlDSJB/i1zS9208rnK+j5d/gyibjxHcbAq87XvAYJ8Bpb0j1O1doXnbH16vx8CzUwQMqSEwe2TCTSiBUWgFD6GG9NVw+AqDYKbX6tXBULzhCYe5AXD5iea0K463nHtdEX2uDPKBbpAY5JAopU320ao621LHetkQI+STTbDr5winghSEuc5B6+ztmD7xKKa9sRf931qGNpM/R+/8G6hyvYOWxt9Qab5OcnsbHf0e8D3gYs0JhMZMgXtma5g9O8DDtQI6r3AovHSQhyshjZNDmeMKbetAuFTHwVCeBpe8BDjHxcHonQtPbUe4q1oj4HwhQk4WWfnoTTPEaSRDb5jg8HkAQg4XwudgFhwXkA2bEgyfz5IQvq0Isb+U0Xq2RPiuEgQcSYH3qUjkfDsafd7/DpWDN6CozRaUp5wk7PkMvUIbacwPCbPR67A6vo/enyxqz8AmFETsQmibyfCNHwpTXA1ch5DufCsHru/lwPBBKgwzU+A6Mwuen7SEeXEJPBYXwbicPl9NdF4dBufFQXDp7svP/MTXTJANMELW3xXKd72hnOUD88Zk5D7ojlaNQ5BHei32Zlt478ggnoyCw4YwKL81w7tXLyR77UF7DTDUG+jnSePzB/oEW8c5NMHas3iWSvMtwkdP+FnIUMJvnQnztFReQpZ0D+JsliCi2RyEN38fkcJcRAizqX2IYOE1xAiLkSPbj46mx8jWbUWYbhbCAt5HSMSbhME0sKvQQtLBGeqJXnBZEQntijBovwuDx6YkeO5Kh9OmKMiW+MKmsxZ2r7vCc2cq/PZmQ7PBhCCPiSgWiNdlDZhGGPnrfuD4mZ0XsHOcvhFNGBgLPubRaU3oFvyYcJyF7+Wz84UeIdY4HYYj4hVLESl+RDZtNT/fYX2sfBGixQUk618h32k3KtyuId+BndfvRhXpYJfPI2D+Ih6+P6fDvDcV2u/DoJhFvDSI+H4gYcxxxlrpUp8rLkfj63RT/aGa7IWkw22Reon4dWoCgmWvIk22Ezn2dzA0sgFrR1rx89gMRnML8UwTeoc38X3SKfkWjMuw7sWPSLbOkZ1JdfJ/hFL9KdJLp0hvfYt09WbCeQeRpl5Hsr4P2ZqdZKt/RKr6JyQovuFnRrnanagJOkr2bTwK0ReGjbEQp5shm2GGpI++kfDGBcVo0/eiVJYo8ZQXiTM9dxrPp8J9dTzk4wxwGh+GONk3iLFdhHDZB0iS7EBfsl+LegA/jrL6An0jLfxMpcb3Ptr73OJYaFDcM7JpFn6+zObCzln6RzcSpnvC58HOtTLVO/hcWhjP0Dj30m/voJXraVR53iKbsZvs3zFkOrL1Wga3oylQLvKHpL/+nKyNyzDbt43T9OtjGj0HhG+Q+Csvy5I1jxXjTHBaRrqBvif/3OeCZInL+qCwyQ0ZzQ4j1/EA4Zn1yNccRfeAJnTyu4eZ5Q34oMLKHx3IJ2P3Z2fAbTwuULvC/bCx6exM0MLlg/kyHfzY9+4SZj1K9u4q8nX7OJ3ZfGp8b2EAzbHKfBuZiuN8X79z0K8ItfkCkrGu9+xaa98hjGOU+SgDPGZHTouvbYcQwiC62cEgHwBiT5Lr4a4X7ZIdKpjdChWmT0+33dtU7f6E/A7ws9OuAaRfQuqIN5owMcuC2e3YuSU7s2vk5zZ9Iuppbo+Q5rgGE3Iu8/F3D26gVk/8ZaHx1/KzPna2U+19Ey2J3sw+ZznuQBv3q4RLHiNLdYDm/yuW9aXxB/+KgGYfkh9lPK19xW+T+4qIq/qfgmDYFAaHFT7Xm3/hvFu63mOF8guftYQjNskrDAku75j6OJmTlyRLfkSR+ipqPB+jwOEsKlzvorPfM+LlBi6nDO+8U2Y9C2LvuwXVc5nt4Hcbqar1aEd2YEgci5NkvP+Y+OIGtw1tTdfIxv1COI/5Cdf4OVmp/jTZvhXcj8hSHqfr3cO6MST3oVdJN30F8VsjVMu8YXqnFIFl/eHp3XdPrOTTUYnCvO75wuw+JiGn2qZGfcBvT+YT/6PpCDJOR4btARTpTqFAewzZyqOoNj8lXPOIxvmY1vou8c2zf/GF2RowPmJjZWen7Oy2O62TVa828TNGJh9s7K1cz3Nb157wKTuPzHHciwqPazwOgOmidNV29A1/jPFpT5GvPIGE0GXwze6PgqCl6Ki5gg4uvzwwCEXdHGOC33CdkHBJ3yUaygpPOH4SAP2eELh1ao0UYRPHOamKLchQ7EUb411Uuj/gfNI3spF4oYFk8THJahM/e5yUDa4rRyZbdU530pfsvGRgbD2tVxOXaaZTW7tdIF4/ROO9TvbuHpdVdjaapdpPNDpCtP+Fn5FmyYmvBMLfMeeQ8s5kmLq3RPDbbeE3Px/GBRFNDnN9IM71hOlACpy3RNLaBEC3JRTq18IRa7sEadIdNP5tSJfvQjsT+eGEdUYnW8/GGc0HxLBz4yZub1ks7Vvk07CYTDaPt4mnRqZY+Dly54Cn9D0aOz/zbSC6n0Ua0YRhi56hdYT1DnHcFymbhwzxANLE3UgX9yDH7hyyAw4jeE5fJJm+g3SuKxSrfaDaFgjlz0FQbgiGx3HyH38thGlfChwZhv3SFQHBE5BBOC1bcQKt9bfJjhCfhDfivTKrr8X8lG8GWON/Gb07+T/B6NRGLgcM+zOd805r6xyYfDDdb+WpRvQh+1CgO0x6czuP1Wilv8LtlVnWnuzLpL2J4g9vJojfz00WN65KFbavMyxocVM6zwBdbirR2gvK1UFQbw6D6sdgOBIuMOyIB+lQKOaR3/ux6w2H6siDybY/W7LEk2jpdA1VHo/QybeO1vkO6Zmn3Nf6tJP1jJadFbPx9gp7RnzxDK/kWe0xiyf7uAPwCX2P6SbmVzLZ7uT/kJ83ZznsRopiA3oEMdx6DJ7Nu8LDrsV6e1GI9/QUZGWmk+os8QvtOOGZxPVczBzdzCBIR+ggvu8F+Uyv5/Lp5pPiZI9vpBPcZ9gOcxnQfKBTG7vejrFCsuAQKkz9PlG2xpKjOIV2bs9R5f4IncnWdAt+xM85hyc9wxslz/FZF+DDKvAxM33T1nSL5PQ5x2yjaC0+rLTgq97AEtJ/i3pa16GD70Mus0nyteTTrEUa8WWw5DUY/DLqvE3tb4lOiufhDqNPF9ldeCPO/vMJHpp2IzzXJB5zmRcG+27OEIe7pcgqXTz+ln8SLQjNfCS9fgwVp5Ot/Ropsq3IV59FW3drrEa/KNL7gY+QIu5HPPktnfzvcVozn5HJa7nbVYzLfMjjmruHPKP53OHxwYz+jNcWdCY7RnJTZryCJHEdYZ+PESgbB3eh7SDdkcDVum/8GG0hGeLy1CZC+F71nutp1WxP+G/JhryH8aw0XTPy7/lXAdLxXzOMkCkeQZ7qDKpMt9HVvw5vFDN+biD+qeM2a1pxPQbE3iR9fRjVPte47Z1dxfZA67k+ZXaAyUHfyDqyYc8wgnTm7EoaP/HSIJLhLMVhRMjmIkicAJNt217cr/4196LpSBqkX3hDs5Z8S/KxNJ8HwfdmAXTvBF0me+v498buLRnUIluzAywuLU91Dj38GzCM5HJiJnhM2uRshr2ekf5uwqddrHudTCemK/eRfbLayO+HWm0Yk2kW2z+I9GUrw22a82OsGkbXSbMgR3WS/Pc3ECCOqA9sNqaa3dv8beKaoItFCL5UDNlIt0af3ZmW0BPFcHzfHx7fxTc2q3Jo83fpLo5xI/l/VuB4Bq0Nd1DmfB+9gpowjmzRDNLp77Sy0pPFfLDYgw+Jlp91tfJzC9KDcdI1xF/P+OfMhk0jWX2H9FMXkvly4x3CO/X4pIb4Rn8NQfZvwlXM3+or9PUUTgsS77Wp+1w+CYMwzWWS1/nsC8bJIXWSYUZ/l51xq6UDjbB3li3+e2MXREHuJXbbFStbRrb7BN3jEkp119AtoB5jme9E+o3FLTA9w+bA9Dvbt90zzRo7OTzxKcIkHyFU+Sr3tyrI363yvE56hfCl13MMiAB6hzShhfYmycxaeEk6XmD3tRsuCXZdGn3DaWbwMyFGmsA+c9kXN8+0LJ5t3DdTfxfU3+mjEPJlxeF/NHapKAnyEXvtiRbnI1qyBClO81HufhJtDPdR7VmLnqTnmB5kenx8poXsf4PVzhJ/7JpqjcMYl0xYh/g8RDEFNcHHSL/UotDpONnrrSh2Oo2OPrXIVZ5FqnQ/woWZz9Uy3+Hat3zGG7+OhuN0v/NCc8Hn9/GoN4TVuKyLhm2s4rWgS8XLfb9MsdBc3P963GpZoOAr9h9EeqYxWb6ObPV+FLn8wu1fqOwdwjmEYQmTsFidt1o+wIaxVn5n8Vq9CdMw+/ol6cP19PlsWpuunozuZzA5t47Gfw95GsL0yj10HfI7nDYhRrIU0dLPYCzPbnD6OuCp02LyRSd7Lv1rmbRZ6B7mfja10XmMf2P0ltL6gBUZTfQd778ev1nWVRYnLn2eqzyFDPl+wt77ybY+Jj/tDNmUdXzfY0j8QwxOOocEmx2Ylv+YsM0D0n21NC8WW3wfw5JqaQ4PSF7vEia6jnEpz8g2P6N5nEeibD0yVDsIy/yCWMm3iLB5H4aoXKjXm6H+yB92VboP/iYvFwsyl1/ibhs/jkDkwjz4fJuK5mrblH90P/1le9letpftv6mx7s/3jYJgetGrWV+72fr+4mRrP22zIAHLrdosCJMEoS7dGthUN8nax8Haq170yhd95O+95UVgUsIfhixZ/y68iJV60ae/Y+3r0qz99Tprr37xf48XP3P4y2s0Wq/TjPpJ1NtQj8mCYD+MejZGCc2D+pSD1n7cfetPMIjniDU7T/OrpX4TrMOZ9O4/QMcXPYvTyqA2WXgZp/WyvWwv28v2sr1sL9t/fbMbpl8Yd6QMLC5F/ZU/5LPNSPRcg1zZcR6fxM56ksSf0d7vAoanXUe/uKvoHXUNfaJvonv4TQyIv4mR6dbXfWNuotC4C8mqVWjrfRa5ztuQpfsZxcZ9SFavRIx8ISKaz4RbahtIEqSPREHQ/TPnLpTLOoTsyH+QcKsSqoUBsH9NA/+8CSiyv48C5RW0dL6Otu63Ue11C6/lW/i+GDuDZXvZbB+M1YNgZzYsL4udWbIz2NGpdagwXeR7lDW+t3kuWonLKWSotyJevhyx9l/Bx9QPsjy1RSa1y/inzb2NemrQ1lwk3msH+btekKY6QtJDDX1lEXIk59FS/RidvOvRPaiBz3dcpnX/le+7tga+HwZ83d8akzo2zYLX8oBXc635SxPou/0iGzA0Dmjv9RB5jkeQptiGFPkGREkXQmMbBWmeAmKIKv1/et4StdRk20W3JWhnLtIed4RshgnSNEeI7fUQ+xlhVy45k6LYeaGtIzAqBrwWBjvzYo3tKbL9cXauxPKxDrxljSfr6NvAY3HZOQyrMzIoGhgSa41rrfaq5efI8eJy5Gh3oEfwHWTo50FjCPvxf3ruYpRjsv1Alxu+ezKRcLcK4ruekOZoINbo+XmymKX7WpAJymz7s4tbKBrQ0wx8N5BoUMbOCh4RDeqwsCswMLqJ1rmB7+193Rd4t7U1LmFEkgU9QupRaWIxIY8wLLEeVV63Oe8nEO/HKD9Gr4CH6OpZDze78g02guD1P8LrohAodjbMkwwzWlxWRyHqcised8jn3sYFYifDDTFD2/P376eJO18pEevQUiR+J15eSuvO4kFY7lmxnuXYHkWOZj9auZ1Bv9gzmF7yFK3dLpGcn0ULw2mec8vOf1q7X6LXR+i7u5GgWIEQcRq87PrBaNsKskoD7Ku0DZIkhx0Sk2KuTLBvR/rQn5rtf9m8fZsHO47wWuC9KqVBOc8PygV+CD5TCPUsP0hTiOczdXfEti5jRW+Hf7XHGif7OrFAfs3SQm5BpQb4tB0wo0UDCnVneawnswtsTVMVmxAtWY5k+RYex1rldQn5ugM8LqLM7Tw6Bz6k/ldkOexCknIVwsS3YWieAZsQAa474uC/LwfOX4bzPGxJtRMkwUrI1OI5UZC+IwrNff5oXvaCYKcK1/1hDGLzYJmbfLzpfZfPwxuCDuXBf2MWpGNd4boxAbqvwiDJ1TwUgxzeFguc9H/r915iV32S+GNdpvwICuS30NOnCYu7sTMbC9p51qLMlZ3L/0prTnre+yGfc6nhOLcHleab6BX2GN2C76O99z0MYvnhHleRqlqHCHEW9EIu3PsGoJrHkA5H1pMuKHrUB4kXK+H2QxzEce6Q5GsgC1XXiwHqjWKcZqW82vC9vMR5pdQo/1Gml+9TDTRddP0o4qlMsP03+/aCp6BSfuF/y+9INoJPFEAzMwC2FRoe/+UwPwDScudbokL0/Xt8Ey6+F5sibkW+6hxau9zB4EgLP1djZ7k8jirhAVoaLqJ/lLW2EVv7DPVObusYLVq7/8pjLwqdD6HUeAR5ur1IVq7h668VouDazxdtG0Yh72F3JN1vh46YjG6Yig6YiJy73eC+MRHyd7x4bLOstxEK4g/Dymget6waboK8nyvk/d1gH6++ZqeXjRD+Ira++QzDrIAT+bwGReyhVrDt7wTnpeFQzyGeb+lUJ2oVif+e3MTKvmqZLZ5FW2dgJNmwD2nO89qD5xyzs96fx7PY/zqMy2jCq2QbWZ2nQbF1aGu+SDQ4wuUjWfEjjxeLI72fQH2C/BuwM1tFcyMiV2Qj+1FXeG1Lg+aLYCQerkDLukFoicHohFeIN8Yg7nw5j+lj8Yekt6CYZOb1KxwXBUFB9loxxgO+WzOheyMQtmmqw0SDbJtOjt3Dz7ZACQYhna6vXRIG084UZDzqzOIALVKVrN2f0Rtxsm+6ZItn0FLzGKOJWisHA0ffBfZOB+aT/fuqj9XOM16YmG3h+eksdqt/zAPkaQ6h2Pkk8p2YjtjCY4cKnA5Z5V/yNhxkfoi/0Artaa0rHg9H6C7i0UXBMP2UhNw73Wj+k9EVU9CZ+lb1gxB5sgWcl0VA8qobbPs4E0ZxgawT2auWzoitrUDW866IPVYG7Rhf2IbJ4TkzFpq14bCZ7fag+Ty3h1l13RB9pBWa56q2/1m9mSB+PzRLPIUSx1sYHGPNs3mrZT020rrvfxPY9qr1jJHFZ0ym//UMfc7PFVk8Vwv9bzy+iMWQdAl4xOOlWAwM05dRsnlc/sM/SEU0qhD3oAppTzsh9EIJ4i63Qd6T7miLETzGvwj9aB0HoA29LmsYgqwrnRG4PQfa+UGQTnLjcQLagzEwnEnmMdw5lh4I2ZQHuxw1bMLFTTLBLoywzbmoa62hmxEIO6NsyJ+df6K4piZHvIAy3SN0C3xG83tGeu0hrzcwJr2WsE8TPxtm55bsHJvhPYYRWZ2yAYR92pqvkuwfpt9c4/FuLJ7eKv8z4G7TAqJJCcX3/rD73huSH8gubQ6B4Wgi/C7mw+e3HHifzYL3Beov5yD0dgtk1nclao0h3piEqqbRyLrWGUE/ZSNobx5iLpTD/dc0OJ9NQC56o/h2X2hH+NwjmbjA9Ib6Y39IExyOK4Xmyj87/zRhV2CG9EhTK+0DtPd8SDr+MdqartNa7kGU/XdIFNejyOUYhibeJSxYh/Y+V0kO6jlOGBRjQY3PI9IDh3jsR7bjTsIFp9HKcIn8n0/hLm0BuVQN5duecGA1SBb4Q1zjD9XhcE4HyVek977zg/hzIFT7wuByJgHmq5kIul+MmGcVPB+hHCPR6sEgqD72g+7zEOiXRUEkesp2ByL5SQe0ahoMjw1J3N7LCNuRnUj+R3CDY0u/TnHqZQ0ZsoPIILFhZ6dpys2IkX6JaPEzHlcRI/sS8bLVPNcsTbkF2Zqt6OBzm/ylO2TvrvC4x0zHTdznSVP9zK/D6g34i0PhKPGBPJFokOcCeVsD2Fkvq40SfKAA9m+5w21dPIz7E2G+kkkyMJhkYBQyyEJEPGtBPJIJv2PpCNySAeUsXyje84Z0nBtkpU61kndNOyQ7/RHxsDXaYSxizpRB0skZUsEu/8/Mm+xsjXSc02Zdj0Qk2K1GnvIMCrUnES9dw/P+i3SneSxhvPxrkuX5iBEXWfMAxA8QSe9T5NtonS9z/JdHNMty3EpzXgp29h8hfsjiOaiNJgyUDWWyIzz3pUG/NZbb6d54E2UYCu23ofDenYbA/WS/f0pH1lcVSJiYD88uoXCODoGzSzSc5NFwliZDtzQUiqlmiGPcoejnXi/2MFwVfwqAy2+JKEZ/soET4bQwFPbhqkuiYJchCvZq8a/ODejPRszW5WneCditeN0T9hMc4R3aH5nNDqPE4S7aeTyhNb1Ja3sfvUMb0c2vHi115zmmS1P/TDjoHHJ023ksUoZ6O48LZXF/uZq9PL8gRlxItJmNQHEMvMROMIrZcCT4IWtuC3UrPVRf+EO/Mgq+K9LgPz0Rrp0C4FESDpNPEgxCOlyEfNKZpfAQOsBfGA4/uyHwlfSDp7QGLhvIPk41cZzAcL3zmii4H0iG/lgCQq+Xko0Zh8TLlZBPJz8nTA3RRnZPVMrPioEOu8UEzW7yd3bLexh/c5jpD8cFgZAON0DS35n01PvIFM6iSkY+XRL5eGTrPmnN4sCeYHTlJfRuexIDkp6hjdtVXm+nzPU86QPy8/UneIwjw3sM/zPZj1bNR5DiFQQJkxDc7BUE2oyDqVkFdEIYFALhPEGEPUF+G6EZJALJhGCCY7NIOEty4C6rgknSnnRmG5p/W3gJ3REgjKI2Ah7N2kDxQyCcV0VCOyeQ5MALjl8GQ7cqHIplAVAtCYTfniyk3axB6PEiKD/0gbRUB4mbHPa+ctiVOMKumxNkw1yhGmWGsr87HAaaoBvhD1PPbHgNaInEaSPQ+ZsZGLLuA3T4fjqylw1ByvwRyBo2B73jH6Gz7xOa+288PrIr2Qlm8/pFNvE6QSyetb2ZfF/zXvhMqIbLmDjoShLhEp8Bp6xEaDoFwXEoYdLxdM+Z3tDO84XzV0HQfx8Klx+C4LaR9NpKP2gXmqGdbYJmqju0ND7ntqFwzcmBPj4VgZeLkPKwA1zXxkH2lhkyxgtTPCCb4M5qzUAx14cwdTwiTpXCe38GtDw/Igxey5MQvqMIqefaI/tWV+Q86I7Me52RfKs9Yq63RsjTFATfzkTmmgkY9PpW9B10FDV5F1EVcRtdvBrQxRXoFdLEcwlYLHvnwCfoEviUxy2y/aHf8yNYLGm16R7y3fYhPpH0ZtgsBHm/As+0bjAPL4dpeiu4vVICj6nFMExIh350BoxDcuA2ogTeE2vg+VYVPGe1h3leJTyWF8KD/Bi3fUkwHo2Ay8YAGLvTOg+gOY+n+b5OemC0O2SdDdyHUBBNFR/4QEcYkOUt5t3vjkrLKG5HW4HW8XFHBB4vgHFDHBzXhkG1gWzPl2Y4zoiAf9Z0JLjtQKWEsL4nyxti8Ts05yALr4HVOwLczvN5RoPnI7A6RG1NN9Ap4C73g1gsKotj7h3eiArSCUXKE8iU7UCCZAWimy14kQsyG2HC24gUPqb+LXr/PkKE6fx1gvA9MoSdqNLeQ3v9faRIf0SYfBZC9e8jOOBN+IdPgCSP5KeG/PdRNO9eRkg7uYDpMDZnx8+DoPkyBJ47UhF/vS1Sb3VA6OkSOJHOkH3mDbvxBtiO0UM+yxuaz4MhLvWD5ls/uFako0i4jyKbRyiSPcOAENIBLUj2K6xxb2xebM5s7VlcMaMBi839fa5VnnfRI/Qp/5w1Viuqe/BzdPR/xPUEqw3GbEKUOI/7gyyXhOeWKFYSRlpL/1tMfsJb3GawvAyWX9Kd5Kmtxw2kyjcgUbISGdLNyFXsYDkZsK/Uwb61FopB7nD5MgIem5PgsioKjsuD4UDN8bMgiLNpvgtMsPvaE6pVwVZMlEr+/wIfmM5moLipH+LPVEB/jvROYRwSm5HtEy+hUFaHnl7A1+Tz/DDSGg/I9vvYnPpEMNzbiJ5hjTzmmNVjYzGlLH+jT4Q1Zp99zr7LaMW+z2LIWQ4KyzFOJ9sRIc7m+yIpNG9mK1j+GXvNsGK8fBnRhvCGbCHR5jsU64/xPAvmS5QajmJA/B3ov42C5w/JCN6dR2tbBM/9adCuCoN8gS/EN0gfVJOv0N1A9sGN2cin4uc+tzSHohs9jqdCM86H+5BBG7KRf4ywxckCKCeEIdBxLKIl85Eh34d8+XUUKx/w/F3mA7H4tumlVh7g+TeRjAYNVp1HOmBSjoXnULC4ctbYPiGjw+AXeQk9Q5t4/grbGykju8nmmabewPNXrHktu+izH3jcObOfDD8WOh1Gqmot4Y3PkKPdyf3rLO0mdAw9Qnh4IrfxCfcqoSWfR5xF2PF1E9d9sgrnRnGAq0V8zfxY7KhPFuWiWiYIznadnYZLvvGp87tdAK+VyZD3doWiC/kSk0MI0y1Gku2PCBffJf77FEnEZ2XOt/F2qXX+bL/v9zhilsfDcki6EW929H9Auu8BjxtmeRlvtrDWr2J+E6tTwOgwOo3pDAuXgy5EK5bfkE9+EcPU6aotvMYmW9801QaeZ1Lje4/osY/7UqyuWobDJu5L5Wr2E898iwTVMiTfbw+fMzkQF5JtG+nK8vcfkA68Ln7tB8V48zPbHPU05SCP7aJOsVYMc9ggVrqc1UzxfarfG2+RbydbuTUKiuXkf6w1IyR9HI/FT5D9SDy3mOhtnX978xPS9fUYmfIYX/ezWPd/08HzY1huEsvR4PafMF/PsPu8Pl/X4Hs874D5xCx+ls2d/YbJBovNZ/kpjG9YXiyr5VhCGIKds7QkHmCfMX5g9RwGxjYRjZ+iynwHOapj6BV6n/zNRqLXNiQov4aM/AbJB8TnvQ3XxEr9ILkgt+d7IEs8NsY8bIuI97IfygLVd6QJ1v1t5fs+UC8MhMNHAczuP5YMMHxqM0de6e5duTFBWEU8f4jW+wYq3e4i3/EE+bK/oqt/I/qG1XNcMyS+nsfDsz0Plr/C8iW6BT0lW/8U5e6XeU1O5v93D64lf9maG8LOAZieZPLP6jf2IF+Z4UPmI7CcU5a3xWxovtM+osFOTkdrLb0bRM86XvOr2OkMUmQHMKNFE49PzlTvRrhkLmRj3Z/K2ukXEYZVsj0gwWCb4dDH9HrI1vy7mY3dEHi7GO77U1hsN1QtjJC1duZYUVrtvEgU5HzPM0AYlBVr+1V9rpJwjNtT9A8G2ruRjaO+L61xOxPZ9LRGvv/Pzng+am/1+5kMMJvH9F/XoCc8N6BvhIXH5aboPsabpfc5//eNZHkndXxfnOkMZit/94vZOjN9yHADw45sn5g15i8zW8ryq1obryJZtpn0/0Us7A5eDy+TeCVU+i6bz0Gx3GWZZrDXVo+PYh54/pAEr1/S4LE3iTCgL+w+84D0M696zZqw+8o3vSD2Mt4XjYp4opXWPkWaqzC6TAiXvfs0R3ESRepr6OT1DBWu18i2/IZqM6v/SLwf+pzHIzM9xviXxSWz8x+WD8E+Y/v+HfweWnOygh/xOgCsDng78ptZjiXTlYzn25P/0M7rFqq8bpIOeMj305ksszq2LF+IyT/zH9jaszqn7LykhGSj0u0BUmX7yJbe5zqI8R/LtQ6z/QTqOf5w+i4c5mPx0O8hG044UT7Q94myMnyfc2nWVEN2fmVAVqsQnSROIZnnFSZ2Miy3G2v4MOJki0fua8LhFlCFNJsdyJQfQInuHKo87iFZup3ns1ST3Fd7PuB7OgOinxLmecJrlrD9DlaLkvVMrzH71ptowObKcoiYbq8y3+JYYHC8VU6Y/u9DcsDmzfR+W1p/Nv9S/VmOnxnfsDw8lpvG+KDa8w7p38+5jLM9tFySqeFJ9VgxgO5dTPIvI/rYroL0W2fYv22EtkMu4pNmosx/A+KEL3fnCgdGthKu9IkSZn4Q4zrkRJxs9B612W2TfQ+2FxqBlMZ20H+WjFi7ZTyPKUXciFKX35BFvnyG/BeUG+5wvcfy11jNUVb/nsk/W0NWl4/tefH5p1tzTdkcGR0YDbqHWGtmstxNlgs5kMkFzZ3pQPZ5pfkG9xdYTX+mM1oZz/N8SpYTzfbJeD662w2S8Z28jmiqcgOyVHtQ43Uf1R4PkCk9hRzHY0gYNh2B0eNR5rMBA11ItnR0X/OBRoUQ2VEwChU2ec3nO75tvqKZ4Q1pmY7XxtAuCoFhXSzkyz3g7T8Qac23IUn8CemEpZLF9UgVd6Ot612UG8nvNT9E7zBrTjDDe2xuTM7ZXhhbd4Z1phRY15bxPtN1Y9OtuKBrYB3P72ax8cwfYPuDwxKZDnxO/vFlfkbE9hEYpmP6jeXGtXG/hgLtCRrLTmSrDiNLcYjnyKXKN9O4tvF5Z0jPoEJoQkXHvYh6vzd88zsjuG81QidVwHMS+Q7Twy3O7wZCxWrrzvdHwIk8eB3OgGwBvf8mCM7roiFfZYK+vAhJAuEs8WekyDciTb6N72+w3A1W17EPYV6WT8NqPTP7zebH8gDZGrN17R/dxPMsmB5guYxMLzBdxz6z4j+mE58Sv9Ry/cdkgf2O9UwGWI0Ktp/UQn+OX5PlW7G8w1QaC8u1jpUt4flBrKWK20k+DyJX/huKmhFmKNuGsHn9kE+0kH5hhOOmALidiYPhSCyc98dAvT4UIuF4h3XkR26JgccvKTz/WrcyHI7kV6qmBSNGshBpkt38unnqU9zWFel+Q4+g5+gfTnMhmz23ndXOzSV9z/Qu8+UYnu9FMs7wCzvzZ/vfTA+yXDA2b0aP99rgRX1gK0bqQzaE8U930v9MNqq973I5y1TtIvmq5XxSpD1L89+FQHEc3x9iss/yCpPFdVw2WZ5hZvMjiAn/Cupf/eFITBeumc33PqSzPSFZTvhnuS/fN1T+FAz9wXhE3WuN2Ott4H8sh++Hq2j9JfOcYY7vjnRhL11zJ887LGc1pnwaaCxPuZ8xh3Db8v7WPW7G46z2KcuN/T03j535sHVl82M0YPaA9cw3YvWdWFwE6xntmH5g8sHsfrfgen4WVuNzl+eVpas2Ywjphu6Bz5CtPkA+zwy4iSXwFDve85X1LwoSJwVFyuZGx4srSuNlK7rGC8t7On2Y9YVqbyCkg/WQjXGC+LE3lN8HQvdLDBx2RPB8RYfN4dBuiYRuM/lAP4RB/MQbsnfMkL7vfk/ewX17rGRxQ5bsBLLlJ1Gqu0L25SEq3clnC2rkZ3fjMu/xHK2fJ1hzb+a0s557vf/C/2O5ojU+D3neLlt7ZgsYj7D8OdZ/2tlaL5r97s3S32sGs3PDu3z/nOl7dm7KcmpqfG6ge0Aj+XxfwySphEGacVbrEBolOAvRf7lH17JatCvUTpIrjnoW+uzNgtjPFQ6vekOc6w050YD5sMzfEV8z3ZW96nFUOtHtZ8kY42eS0cbpssGuQ8RuhixhrSANF2bMSJT9YGH6JF95AR3MhHVM5Fu6kW023yUs94j08SXi23q+9iyfivk8i3ta4x9eK7DydrfgOo5P+kfVc1+f6Qj2fBF2NsRiQlitI5YP+UUPa/5tP9Id7PtM97N1T1Ks4TghS7WP+1k+0j5wk5cgSj71nklWdEYqs0Ge6vv9JZqrm9LsdiwrEPa9mi2s6SAf6zEz+HAB3++Uk88vTvbYSL7/J7K+xs5iZ0O0mK79w9r/etuUSX7iYNIvc7ney1EeQwun68QDN9EtoI5sO7PzTdy+dQ9+TBjkMOGMw/w5Byyu46MaqwwwHcDyoVvoL5C/18D5nPl27Xmu90OSBQuvmcx85RUDgWX9rH4AuzbL285idWTk3yNOXEo+xgKESKdBr8hcrpkbeEE3mjBasRPklQZoYiOva4XQdoooY6V0iO6TZu1l+9wXxiJgXw4/AxPbuNz6s/v53mKPBaHi64ghvZIorgLxAOn77chzOIl2HrU875XZNZaby/K4S50v0Tj3kZ5cyX00do79dpmFryfzZ8ZnWPj8e4XfxMc1Fs7nA2Ke89zYasIKjB+YLWC+0nLCLO/S60n0WZXpLt13J587q9HuLw6Bp6TDPDZGw+mE056nM6H8LhDKbwMhm+UOSbYaiq5GuKyJ4n5LzJU2rFZ5kzTRcaQoSP/Ucx58xD6dWS2KZNKjOfJTXO7L9DdI55HvQrL3Cqup15FhOWaz6tEzpIGwfQP3c0enPSM8Rv6YwwHC6Hd5jQbWGAafnEvrSTzNns/DdP675VY5Z/Rj9UVGJVtzJFnO51tEn/5k+7IVxxEvW4kQWotg8RWYxIpZfA9+UPMIzxOZjX5nc6FY4g/xUx++J6f40Jvv6WlXhcP/biH0X0agmbd95z+77oJEUIaJ714o0p5BiRPhO/lR8u/u8HzHgZHWHPmhhL3ZWe7UAvBzq+7EA0zWGd+uGGRdb4bTMgiDtfO6zGteLx9oPQNm/2ON6UVGAyY7HX3JF3S9jT7k37J4KaYPBxI+LHA4hxjpEpBe543W/o0X5w9OvusyTiY21CD6bjkcFwbBfpwRgecLEXe+DeTvecFzQwqCLxfDbpB+3z8w92Zhspnb8tQn0c0PhB3rUOJ4E939aW1IhkeRLXuXdNzbzE9NYnWuLZjX0cLrmzEZZ/qcxXb9XrOE+aYJsjWE6Z/yeibMRjAdz849Gf+wmtm9aM5t3e+hWHeZ9MZjbJpI9oHsY77DJcRKl8NH7EWt901q7fkYs8RAj+Xxv3qsTWQ8X+d1OQdhewphm6q67rwhembsnba1Tl+FQVpJfmuZ8ylJnubfxHv8UQsWJ49iPhSrr5DreBRFmvNo5cRw/TPy5wnDZ7G8ZQunAZsH86uY3mb6m/E+wzysBjzz45ndL3Y5jST5ar53NyCK2QELJmY1cJw4lWjwKvFB72ALzy3uRTLE6jkyvu/iw2oPbqH1HgZnMWaNThrGnzsm76zP9t+Sec99ZTxsq7WLhYhm7oqfgub7HsyGskS/ldNnlq5QNt+bxWdBJkj+Zj7l32o6MbTMTxzSECcuQ57mICo9rqJQcxotnQlvmx7zsY0nDN+dbP7rhdaamgy3MBvPdDWLc2R665e3wGuQz2rLsPBThEs+hdGuHHluaziWYXXLmWywmv+sTltX3wZ0ocZ4q4vvc6L3XaRKf+Fnpu5iS1azjteAl3XWD3BfFQ/nT8JgU+zwLzUEbRZ41Pj/mgeXCYGX+PwHqIMMG2LhONEHEpV02J+Zu1YMrPISu1n3VpmuVy9B59CDqHFvRAvCO+29HvB6cf0j6vBmyf/LRR+aYOG1kNg+BstlZ9j24NvWfPD3CNeOjH9AurEWBa7r4SmvIZlp4rUMWawH28PLIt+1WHcWHbyeoKtPE+GLi+SnHiCstx4hdq8+1wohZTR/k8MoryWmDUk8LskmVjH0X8nsm85mwy8JDbqPgmHTvHmh7Ef/DtGXyqEZ5HVH+HdqsEpFWzdXMX8Z0y3piu3kS2xHlITVIXofPnZjkOLwLfl2teRz3SL9dIfL6iq6+9J+1vVnezpsL5OdYfFc/hxrzSKW18xiPSYS7htH/lC56TjCJB/zZyKwfRtW26RQd5zXCUmj+3b0u4lMzU9Ile3iuD1WWAwXl8wryjHOi/Wfhj9nZ9sOb/s12CQqqv6NzjIIto5bw694HEiBPFV3yGl5+Pr0h52h7OV+6u/b926+5DtcZmcJ6YrdJPOEbxz2Eaa/yfcP4hVLeM422zcrd73Ka1v0CL1LftoF/DTCgg3jrbna7PkKnfyeYhjxwnCyZazmwg7yA1j9qY9JY9W4Ek/rrpMtf87tO8uRztccQ7pyB3IdjvBWSHghTv4lYmSLEGk3B25pLaFbGgDX9XHkf4c2yYa5rm0eJP5h/VD5usCV/jcLYJgQBPMrUSjFYKgGetQKL55X+LdapPjxRIZp81S/clzH9m1bu53hdfwLHM6D2T8We5yvPcR9zalF99Ej4ggChNXoE/iQMP1tjE27iyl5tdwnZ8+WYnvO3UPukC28jU863CWsex0V7hfRM+g2ZpTex2t5D0jur9J1t3Asn6HcTn77AbIRP5D/+iXC7N6Eq7EQyq/c4bgiCNIxbrBrrV3y78mvZJHXOOP+BBg+jYB7/2Ck36D1H2mCja2N/5/Vfy/by/ayvWwv28v2sv2fbv/Jv82C0Owv+xdVBSz21veNKmvPCyhQfwFWF2YKL38Ei8RamKFRbe0TTNa+2QJrb/N7n44XBRmsvT698UVf+6Igw+Y/rHHwN//crF2zF3UhlC/6tC0XeG+xsfa1g6zXfTFMQfL77SZbe+nfvLikznp9dZ2aD1NZB/67yAfglx0ssZalqLX2zS6oWX0IQXJ0AZ+der219xi3gP88pWkTLzuR3LSJX0bdmH7B2pv+s6vH6kR0psaH8bJOxMv2sr1sL9vL9rK9bC/by/ayvWwv28v2sr1s/8cbK84pGWW8HrGrCLE7W8BtSQy0n4ZA8aknbBbZIypkAbJtD/M85FztbmRptyBbt4nXqsrW7kSl7xn0iz+LwSm/oXPYaXQIPoO+cefQPfI8ekZfQC9qvWPPY2gae30enUKs/6sOPI0UB1bb6nOUeuxBa68jyHH+GWlauq7zTygwbkGuywbEKhYiRvE5wsV32Hk4fM0DIeuhh2hUNIiCnd8/m37/afp3cPjQvCHZUtLUH36HsuG4KhTyhd5QzPBFQMmryFdeR4H8CgrVl1CivYQywzWUOp3jsc4dfWt5HMb0F/EILI6KxRKx1+wsnsWP9ou0xtmyWDMWc8TisVkMKqs/0SngFgqcjvJcIRaby3LPczR7ee2dloYzKHE5iVTlOh53lyD/DlGyTxAueReO7SMhTVBaREHa5Z9Nv/9okxHbN2vnsNj1+1hUYARCThexnA9rnkuVBqregciyP4oCCdFIcQ2Fqqtorb+DavMjdPN/jmHxDTx2ncUAsWdMsRjmMenW/LUvX9Q7YbFNjNbDEqzxUCxOhq0Lq3XFaj+xZywOiWtEhekKrcU9/hyecvdLPJep0OkYzwlKV7Fc+GU8x4/lzIc1fxO64GhIKx0hquVz/tl0/I/R3t7Dpp1mh+vqOLTEIASeLIBkntlanyTIAWIHA+ynOsCc1x35NjdQKL+Dlo6P0MWnEVWmRzyfnsXesfgzVkeHxZyx2CsWU8piqlmcGXsmBIvDY7G4LNaW5R1NLyb6p1tjVNjvee2NfOt1+kdaa/L0DQfaed5Hke4kMlW7kaHciRT5eiSI3yGS+N9NUgTRSaQxOkP0Ub/2z6blP9rEYHU72y66Wx4bk1CJ0Qgi2tvPMvGcTzGSeKpSz+OEZX31kFVqkKRej2JpPdobLBhGumREvJVev+fzsPh9xsss7o3FM7I4MBb7tprov3Ei8ONoa/xzJ7/nGBrfxPUQ+//ELGtMNIvz7hfBcuXAYz/7hluf21nqcg7pym1c76QqNqPE+QxydDtgsh0IR2kgZCnqG2Kco+mfTc8/2+SC0FzM0c20H26A2+YEzvdhZ0pgP5do355on6BhzyLmcatidyPEAifYpgjDUsUdPxaJpJtdgF7ewEeV1rir117oExZzymLN2LOhWXwt0y3j0oA1w4FfZgCbJ1tzEXqGNPJnlDGeZzmm3D7EWGvudfZr4r9j69cnvJHnI7J8PPbsOJZzHSlZhG4hlzCOyVv8c2S6LIHK1vedfzZN/zTPC4JCbO2yRjLZDcat8Shs6M1rh9jPIdp3MUBM0lppP5Jo35Hep+t+E0MdMthvU2Xb5haId1HhCNTQGrxF9Ns4wVqzkD1jjcV/T85p5M8A4s8aTAePhWYx3Ow5JOyZ1190s64Vs8O9wsDjxJg+6uz/jD/HrpN/Lc8RnZjdRPbagkqet7uV53Sz/PZQyfvI1a/EwEC6Bv0+X7sLjs3CvrMVBId/Nm3/XmN/YoC6jGh7WjrZHepvg5F2rwNCTxZB8iHRvivRPJloX0n8P9AVYhW9T9N+Jark/1JXL1FcMyJfvIVWCrKNKtLTAaTX+1p5vmfYU6LFMeT/f+29d3yUdfY2PBiSzNxTMum99957h5CEkkCk9w7CAiKIihVBsZd1d+1dUbF3QZQmCqiAuAoIIkiVpiAgNbmec507Qd/dZ5/P7vo+7/v7Qz6f+zM3M8lk5ny/33Ou064TsFG5hCbn/oQxaccEv2xSTr1ras5getEpPCu66YNrWI/bqnM3B8UewvCkn7SfkHP62DvYHLpN+UfIT8ceQ/bLc540593lGvcj3TZf8M8zyPd4G/GOKTC6+sGW73PICHY8YVhsgwxL5zyDFPz/A+Susk/2HijyXW9cHQXb/CjYn0pUuad/1R3GQ4JzhovMi0Xn9A3m3EEYfYI3GqX+/f/xffKMx0q6GTtU/s0G0E923A2iB96ZafY59Qn7Xvuq2Q9XLhdnjynHoXOp6I91KLa9g+bIFaJXDmBI/A86w1H57vzXa48+eWDYw8o5xpwlzR4/9i/W+a5RDjRyH2UZdyLZmIVQoxq+F8XAyPSF7Q6Tu8M2ub0/uVLWI871g2EzXjYs3jMEm/Y0LrIF/3+83y/qXOAYahsXui7quUKErSiG7T72UMUg/tMa5GxthuOJRFhHyz7PFltbH8h+lI3yONQwKwb+6T3lO3tXG5/vIq9oH9n/XIehgcCDgi3fuoz48jR6BG5Trlz2RjcEbDC5AXyW6zpwVmm21+Mo9H4HXXzW6F5XnrFA8muu1b5xcnA2idzJOzk2/Rc9T1wD9lOXOd5UnqZU42qEGFXw7mSB98wQhK0qRtKnXRD2TqHy0DnvT1D+GGu/QNgyBcP52WF42Y4ZnrbXZS26/9+WvUetq6dxWcT6oCezkLOpF7qeG4uAJzPgdW24zsBN/6oR/q9kwjpJ9HuGmzp/r9jYP/07vKHFxmsvdrVvQoNDsLn7Z/QT7TQtDfiz2N6HhrDH8axgxsNa08xeJfYqN/ivk72+AS1yPvqEfac+Fft0+0Z+r77AJdnkANqlPe2cA3xx5HatiTc5MY7J72w3+U9F/+QZj+o8tnCvRhguB2I+Lkf92Qno2zYDNUdGKOdh7pZmJH/eFUFv5OjMXNuMcNh6BcBI9YHh74DhsK8yLrJeImejUtajyrAaVVZPa5XV4lFls1xUIc8PNMKdY21d/K73m5P4RPT83DX/jtxtPQNqjZkRi8mXFbeqComra5GxuTuCHs6E1/BA5dOJXlGOgNeyYb1SdHyh3y75TLOMASH/Vh8Sr1zjwdfLjQ9QaWNP6xfiE+/CoNBzmCM4Z9Es8bcuYb/8ee2Hvjhin/bLkSeB16jkk8ofQq4Ingf2yU8rPKX9mH3CdsrvHFafbVTyMe0lZn07+3fZX9ocslVtsCn/6xHsUQlXjC/6nJ2GCbgFw3Ed6k+PR4OsxWDl2bsefU5fiuLv++l3dj8lOHWu6N+xsud6yFmvFFtXKjq3LkB1rmN6JOyjwkzdFeuEEeOCTX7O7/5URDycB6OLLNz/QS7ek0LyXM+k6GzM7O+akPJpHUIXFcJ+Y4zJhSp4xv1EivJbKddRlf8Ozj3/T85VJ8PiUWS8tKXG+AKc+djTd7f2vk7JbMO9ov9fY6/eJLMPhH7AjJITaArZrtwe7Pe7sp3rmxwd7GGs8flIdPvnanfJU9Iz+CuR9VHFPDwj3YM3oHf4NjQErhP9swJlzjfV/pIDLsAjG0aEDZXbB2NU2xz0w0yUHhqE0qODlJtpmKzHONyESbgNg85fieqDwxG7sgLuJ1Nhvz0WxnVRMMhpM12uG6Ph90IGoldVIExk5vtoKhyz5PVBwWIbQ2DtK+sV6PjcsHjm/CvZeN0U8bLPB5nI3dcHXY+NQfHu/nDfnQSvnv6qC12PJcNxj5xF8aeMWv9TRmfbv3yvf3X5yBeuMFb80M3YhYsDfsGIaJGnYPYba8UH6Gb2yrHHgH2WxESLr2Jf6Vnl2p2Sf079Yfpa7K3nuaAN5nxr9pJT/5MbhjajPmAduorOLzQWoth4tT3m86py9lH+7MFyWIIQ0D0MGd/2QNHOvija3hdxKyuRsq4OzWemYhTmokXWpGfrFOWRHYd5aDk7HQXb+yBqSan2qNnviiP/iLkO10TB/9F0RH9YhogPShDwchacDyaqznBeJ3ZTMIq1zPcXm8OY+4/YqvNlwZOcr6Sg+OBA5T6dgjtRuKUvOk8JgvPxJCSs76L8eLarI7jvuZb/lQ1KMKbEVBqrzna3H0K/wPMYKz7YzXXQmY3PCa58/VKR+WzT/2Wf52e3m7Ef+lYdvjE5HCaKDppW0Kb9/pNyT8m+36J2gpxF1a5lKLQtRInIvdzxLsqdb8u1SGzvW8p/S86rOGM0PMVUZT9Si6Gyz7v/OBFpG+rhL3vYdlc0gh7PQun6/mj6eQqaxLdskFMwCFeLnpqvfH5dfxqNzK96iL0u0Dl7xjzB35dHwDZNsJ9gRNc9CXDdJ7K/Iw72udGwTwpH1Iel8HsiDdZRsg5Zru9tFu9RamunBVb4vpiOulPj0R9XofHsJOWhDHguC50fiESOnIemc1Pg/2KmcucZTmP6f2vX04258VXGJ22NjgPo7XsCo+NbcUuDyZ++RvyrDeLjfnar/H+q2VPPM8AeYs4UZfyN/VhXVrZhVMpp5V4ze8vPap8y9Q25RNjrRrzJnh/Orle+JP9PVf+wNyjXeABJtmkwOrkR914p6jFZ9cyIc9ejZs9w7bMkF5rj0USEvJ6Hyh1DRP9cpeeB3Mxj5JEcrAPbrkDtoRG6boGv5sD5N8FMNwhmmhJm8m3JZZCHbnio9jN3Oz0OXY6ORurn3eD7YAq8RSd5hxhrPdMchxMWVelZi91WC8eqdBhLU+F9fwxcKzPRF7PQ5dBo2G6TNU73Wfx7cFW2cVdNlbG6rZv9OzSK3R0YflLjC/Rn2fP40Rxg/W0mvy9jbpy/TJnTv2VMjnZhdlWb2lVy/U0vPqtxnwEx+0Xmn+nMePIHsLeO/hdtAuXfEr5TsRM5UGl/04158LEkI+fOKvSQ/V1+fhha2mZitMi29/lLUbSrL1LXd0PWlz1Q+G0L6o+PE7nfqHInL3B/XIEhcg4G4kq0tE5H3ZExyPp7D4QvKlL7QP4v6yzBTFyLMSL/piDEfluL4uO0LdPRdH4Ksr7qCdftCfAuEEwV74Lj7jhYH46FTXxb2+yIj73/En0s9MtS1UcpX9bDa0wQDKvtn/rV/pNLsE+l4P+2Bvte9HQfxKiE8xonI2fY8MQTmFnyi/ZKsxeNfYcb7zbvGXNjjIj9eYxJTMk/r3xTvNiXylmo/aJMDi5y74zPOKWcNOSPIscAsX+t4CXKn/NH2Hsd2rkeAREBqNk/EvGHuyH4u1LE7emKtH09kbKjEenf9UD18RHo2fYn9JKrqW0qmtumiS66BDXnRqPizHB0PT9W5cn1GI5rMejslag/Mg45m5sQubgYPk+mwCDvxNQw2D9Kg21NGqK+q0bFL8OU57vuxFjELa8wee9j7MRSnxnDQnO9xffyvjHix5h1VbL2E+BekAprtd9Zw9LpP8I7/3hlGXcn1BjrNAatvK7RJzFefCTG/8elcQ70j7gki/Ilf9tZ7f1lLI46iP2wPAuUP+eAcN8zB0MbQL5j6iP2ApLPifiUvgB50cjlQ75D+m6M/bPnn/wmUUYzbBYbjCmhcHyUrjxm1reTYH0vCfYP0uD+JNvkg97dgLTdPZD6fSOSdtQjZXcjUvZ1R/K+Rr1yfmpB5ZkRaJZzRDtNLDtWbPWQ1tnocfQSZG3qichnC5C1owl5B/oi5Jsy+HyRg8jtVSj5ZbBya9fsGwGH4EzPdPs34jtcIT7V68Q8oUsKlVPWOlT2fqjjid8je15RRl//KmPNUcaAevn8iDGJrRgULb5W5EEMT/gZQ+KOyPWj+FubUG5frvGDcek/YnrRaY1RMwdA7kXa4A6uHcbk+EguujGpZ9o5xNZqvIEcWvSjmfsi/qfPlm9/HInGFEQY3eG0+MOa54J1QbzyA3Emtu3lRNjelHX4IAWuT7PgXpsN54oM2N8X3LlIcOWSNPisyoLf53kI+boUEVsrEb2zBmmHe6HgZH9UnhuBxrZJGCC6aZzYa9qM5k2TEPZaPvLWN6suz9jXCz7rc+DamIO8Y/1U7118/DKEvJAL7xFBsI0OUWzF82ObIZgn0rmJcc/fK39eZd7vL6s1vkZ35wEMiTqh85fZ7zoy8Rf1pyqc7yvnYqFtAfKtzwqOeQ8VjmXoLnLkrOARyYcVY/aJ+BaXl59Wnpz5YsPHpp7FCNFhg2OZh9yo2IeYh3a4wX8DmoK/Qf+Ig/Jei5Fmm4NI2f8+nSLgzPRF4Ls58H0hHeGvFgreS4brtTQ412Qi4JsiBG8shvFykq6P8UIijDdTYF0smOfjNPhuzEP49grE7a1D4oEGpBzugcyjzcg42oTcExej4vwI1Lddgm5bRisXqvOxJPg9nY6w1wvgtyQbtpWiV1anInpPrXLC0u9jbMf5VDKcDyXqGhg1/m2GpXPl/xuyt3haAlJCZ6+tsn6MGmMDOF+CfhR50hsD/i57fjHybU8p7yJxIrE6+4jJjVoma1JhX4nuAZvl+krk+CEag9ZjfNpxDI46joFRh1TfDE34UTlG6/zXgL3/5NesJhaiHXB9LL7AW8ptGWeMQ6BV/FK3FY5s8WPjxAam+sJWIfezIhC/sRbVZ8coJvcRv9R2TwzClxQjY1tPJHzTFcFflSDtYE/Uib9Mjn5ysleKtSwRhFog0swRS53R1ge58pi3o1l5yl0PCCb9a4L60caVsq/vim2zvZsMr4+SEP59pdoW2pPKA0MVryqfT4nfedn7vyv2J/bE194tYLzjjtDvEzMvVb47cj/V+Xyp8i8x3kCp8Q4qHSuUk4pcnNTXhfZnZB0e1rUgD2uBsUCfK3G+gCKHyctabluBnv47taeceoZr2Tdqp+ihzTrvp8j+gvK40vdi3jdH9H+mcbvY4JsRI3DC4emGo28Q3HMTEHB/OvyfSIfvgnSU7xwk0rhVMQ/nflhvjVJcGvR6DoI/zEfUpkp0w0TR97eKv3QXBv8yW/T4cCS/X4nweakIGhsHvz4R8KuS3ysVLPlgknJfGreITG8Q+Y8IoV753JgffSNtj7E2HTF7u6C+daLaj24/jTP1T48A1gZM+a/2u9hr+4CQe+2XRhywzQ+Hz1XiX3s/iEqryN6+FX1Df9RcIOVOjo4Gv03KJ8FYTmPARtVD9FvJMV9oXyB793XF8Jm225VLvkDWosKxRPnpLw7fo3F/2luuaa17pcY7lWfY/qhya5mx58vU/yIPEh9dHhFwFfkhZXM90vb0QOiyIkStKkPjCcrhJsV/1T+NUE7x6NUVqDo6XGdtpD1XhbT5xUgZVYCoihT4xvjD5e8vuDYOQZZyhFrqEWKpRoSlSa5mBIj/5PyzyJ/+8mzzsnUPOGVrCdpnW5AAxycZCNpSjBzRW0MFS42SdSd3uZU+b5zrhGHx/Ld9Xvnnae3qO9X31sRDuubXRcH7Oj+El/VBueV91T3dnT8I/j+FfuGH0Cdkr3L2D4g6LDr8JEYmn8TguMMa26fu4Kx48lWRn5U5l8agdSg2XtK4QpXIumfgZn2e8dCm0C2K+csd7+ie5/4n5qTsGfehzCOMngg0chFgZMNl52wCDzgy/eB6NgXOjzIR9GUx4rZ1Qfbu3khfXo+sJ2sRfnUqwicnIra3nIHQBLgtsXJlIEBl3Ygwj0ZEefZFgo38MpPUxnONk4xL5W+OQsSyYuV8tk0Vv2B6OPwXiLxfFNv+lwT4vJqGoC+KEL6tAkkHGnXuyETBUXWHxsBvYYb6dNSPRifrYsPb+KvhMGYZdmO4cZGtTvBSNzkfvOrEN+5ty3PfKH7fNyELchAoZ1XjVNPC4X25L5ITZ6Ha41N0MTYpr/LI2HOas51ZbM4PYb6dNUDzBFdenmfmx8YlnNM5QqzrYXyN+SzG+InpmdPi+vQM3qzxIL7WHLZN183E+s8g23Yvsq13y5m5RePOnD2RKvgz1hiOIEP8JSMSdrtT1qAzrE5v2IrcsJX6wprihHewTZ53CE51w2UJh59F1sZShWjLYCR7zUCa97VI8Z6FZOtMpNquVr8ui7VdotvIdZNkTEO0MRAhRiWClxXA9XSycu0zTuFPO7ykCOR2Ic6P29IFQZ8XIuzLMhQc6Ie+rTNxcetl4qf1gI9gAuaGDOYirAYMw6DuglHgC6PaX/PrnIVjTAyH87Y4jQX6v5IF4w5zLoTtavE/ZsQg1/oAaiyfo96yF/28RN4i42cEOy4Q//d58W+fYz5+0FncM/gAZgz4GuPHfIxLR36t68N4NPMrxJCM55OnuSVip+a7aHeHJfwka7Af/SJ3KbcvY6LUQYWuZ1RfpV80H+mWucjoNA+ZHrci3XMukj1nIKbzYIR2qoZfp0TRHaGwW5wiby+9rJ28YLf5we2SPRqQAL/gLAQEFyEgqBiB/hUIdXdDmL0HQryrEeJRjeBO5aJzKhHXaRRSLroK6Z3nIbXzNYj2GgCfdzPgfDkF9gcS4LwzXudBMD7n+2IGgt/NU+5s8qJzbkLgOzlI/7IB5fsGofbISGRs7K4zmmxXiS6qDxBfQGRvkzUIl8cKf9j6BWndg3VEsGJX6+AgeLb4wbPJT2csdG5xw7slFD7F+XCV5SKwS3dkNl+HIVOewuR7H8JVTzyNyS/8Db3emIrSZX2Q9UkD0j9uRMbCoWga8yqmZkJ1En1bxtiYCxiZclz5NMmlOa6dV5jzF1j7Rt5N5h8vDpJzkrAUkX+rg/99qfAbnoOAGpFdXilCkrsiPEb0UEIp3HmpcNXGwtUkMhkQCffkaLivjIPvHJH5HUkIekD0w2NyiQxCxDaHLcxEyJPyfn+Oha/YU59rI+GSM24fFQyjORD2smC40mIRGCnrEdYDgUFlsC5KQtzfu6Dm4AiEv1ukeR274Bv7HbEw5H2M+wVr3hurPI2OhxM1vhq5qAQF21pQsnsA4tfUIIB7+s9xsM4Ig/egQHj19te8jPXyMFnTOAQ8nI6oF4uQtLgGqR90RebKBuSs6oG81U0o/qovyvYIJj7YH8U/9kTB+Sq574emHZei75brMHTZX9H//pfQ87o3MWDARgwu3oOh4ccxJrJNub/oXxFXEl+St3w0Oa/Tz+icC/KX0iebkt+m3M6sSyTPKX+2hTx6ReJPzLoJ8X/th7AreyB8QAsC+1bBOS1JcEkcAgWHh27MQvB60S/rsxC9owRR35Yg7KsChPw9D5FbShD5Nf+fj8hthYjfW46E/RWI2JGP6N2FiNiZJz5AKWJ3lCH+2wokfF2J2DXyHktyEcFZGncnik7pjy6t41CydyD8XslUvkiV/c3R5IyEcW2UWa9za4zG8oiXfGWNI94TzPt32a+bBfd+XovQpYUIWpyHqI/KkPpVA4q+74+6o+NEX11+ISbVD7M0vsFYavWJkSg9MhTZ+5qQvF8+1658xG+oR+mfb8MVzYdwV0/gGpHX1BhgmE8rhvudx+hI2cui90ennNI4G2VPfmHKlryTY+Q5Ps84G/mkOWuB/jA5eclFzXVgvShnEwwW29HTR2yyZR2qPJejyms5Ki5ahILOTyHb/mekxVyPhC4TENtvDKJ7DUNk94GIaByEkMZGBHUVnF/TgMDyWoRk90RoaW9EVoxEbM0URPUch6iR4xA79k+InTgZkbOGIfTWngh+WH7nFdH3S4sRul70+eZ8BInfHHWl+MyXx4M1U5q/IgadF20+TglXfjP6HZzDyjyL66+J6rOFvCny+qQaWSL/8v1D0HRiisbAic14MS5ed2Y88sVmxHxZg6A1hXB/ZPrs6ls8H4fOD4rNuT4RIYMHI6XiblT5ih0VpDQlTuSe1YYR8eK7xp/DqKTTWldFHmbmGSl3crZzP5PHelo7zzXlSj5vnXURd6A9P3ZKOR+ZHyMXJh8ZgyPnK3P0rI3gPAPaZc6BU9/CJj6e54PIttyHfMujyLH8BZmW2+T+EXnuHrEXc5BluUPuxaZabpbXbkWB5XHBcO+h0rIUjZ03Y7Bb/D/3UfSybkWZ5W3kejyEXMfDyA7inJVbkJI/FwnFl8I7xcH8CwzWSJE39MZo85oUbs59lecUC8m+d4mvxlk5xEjkKSvfPQh9z83AaNyoMabupyab8t5UDd/l2fB+MR6ef46E160R8LpNHu8WvMlY6hvitz8VD+e8IGTPnIEell/QbGlDo+ch9HadxZAwkVE2dEbupXlmrdvlpab8dHZMrsk73SFP1uiSS5czJ6a0c4qTr4z5Rubrp+Sd1frQy9trSTmPhOeGNoH2oH/0XvXLGBviGrBevcPPZl0EfYoc4z6tU2GtFh+LxX8oERzL14hrso175bXXFHsxz0COLH4Onk2ucbHxMoptgnu95Hc7L0R5J/EjOz0NY2yYOV92drvsuecHhsAmNpPnwC2+FvMNgS9mqe4PkMeoD0XHbKxH/u6Lkbu7BXFf1SJgdb7GpzyfiIHXPZGwXhdh1r3NkLPz13j4i20il6K/+Ir2F5MQtbUKrvEBCO/ahLrOWzX2WW/sRQ/jBPr4tOF6kdf9A834Jjl+GefnPp7Zvg6UH3UKL+qUGaUmn33HbBeuD88BOZ1Zc05bwFgoHzvOAOcDUG/RXlNevQQbMS5kyngh8u2PyRr8TR4f1Vko9NsYt6Ccyx1v6zwMxkEo+2xZH+ZwiG3pE1a6FmvujXMYNA/htwaVziXmPBV5L+aF6kOWmbJnTVrPQNgaAmDtGaD1Iz73J2neIPL9EtU1rG3wfSkDfuTLXpwL92vpMJ5KgNeTIu+nY+H9TBxsz4q/tiAJxiMJ5tnpJphogeir97OR+FVX1BwbifJdg5Cwohox22sR9noa/AOLUOb1rvi934r894j8j6OPE7ha5PTKZJNnkrFM5hs7ZmnrnJ28X+fMjMsQ3SSynlZgvsa9zvoTXpcWtck+P6Uchnxtcp6przrm1FCXcY/SbgyQc0A/meegznet5uW5v/OMh9pjTW+q7Ln3Nf4kr1POfI4y5XP0w3nlGY+j0HhBZ71xFixr3onROKOl3LFYsMIBjM/bAZvoeetV4fCaKvK6LhIBT2ciZlm51h1Rv4csLoD75XS4nkvW+jbFQqx7+VOY5jYNznR5NB7GohS41+Yg5huR7WeVsM+S9013wfG84LSvi5F79OILudKirX1hrJPnHy5Egn0q8r2fQKV9pfKo93KcRB9XKwYFtuG+PuZcF3IVs96c+TDG9i9rn11A2REDUfbU51N13hPt8wk9L6xLZy0u9zvzxDw/rKvumIvEc8TfMW14m+In5uxZU0SZMV7aMSOP+55ng1etz8r2uUCv6PPUN7yqXEvaZwYtknVZoj52vu1JXSfOYONZ5Jkos7+P4Ul7cUWXnYhcWoL4tTU6B4z5ftY3ZG/thYiPSuH7diYczyWZ8r0nxszndwkw62gp+1mR5uO08DPG7TE/2Belnvf5KAsRWyrVd3NOkTXoHoCY+wtRuKwZxSv6oHr7MAQvyYL9ylQkeV2KbK+79IyXGK+hi0POpGMnOF+z2fc47uwFvDDRnCdOzl1yjjPn1TE/iXv/kpxWlSHxJnn4yafNXIGp59t03zMnw7rnjlp05e1v11cdM6s6ZjCxboL5BeKnlojvVNacT8wYB/ld6/3Wq09NrnLabMYzOJ+ItY2sB2NsqovvKq0Ho09Ie8LcJmN8zDdz/xfbX0VD0CoMTvtUZ3ZynivjSXW/CNb6tBLudzJhPJMIG/G/4E6Nd86JNmuKhoSY85smhH1vDA8daFwdFW3cFKW1tNZXEkOst0fdKna3NfSbMhQdHYDIZwp0hrl3jfhbsTbYevvCMS8BWY47UeGxVM7o83rGyf1dbLyOCmMZqm1fYFD4Lxq/J6cxuUDJ9Uo7wHwLMT3lNqkd89DH4hwvcvhThpx9xDroSwuJ9U9ibn0bHh5uzoTgOWBvBteBuJTXzHZ7wfegPR6edFRtM/1mngGtFRWMxBoK1hb1jfpeZ0N0Fx3O+BPj3fwd/jxjTPw59tdwlgbXkrqrwvXehXlKpQ6z5qXQ+bRg86s1T1l8ZCCCVxeqDufce8X8l0eaPsADcabMc92HROcvc7yawr290stiCTcslijDclG64bIPsiW4brEPD11rvJTU5r0kGXE/1CF9fy8ELsuD84UU+L+cicTNZUjpNh21lo2ocTBe9ppiDMZ+Od+B/P91jq0Ym3RWuYPJwUy+3VVzzTXoyDXqbJXsNsU5Q9XGitxEfzBXw/pE7nXOIhka/7POHn56HDmczfVjDw1fp+z5yNwZ73mmuAb0I4hz+d6MadT7rdNcDeVHPcN4B2N6nGXOOY7MIfSV/U/sy5rriyN3aI31MPHzBsce0rnode4Nyh0/POmg6jDqpyq/N5C1rxnRm6vhXJIO68NiQ68RmU8OJx49ZUwM+0b2/znmdxyL02AfHvazxWIZKnZ4iLNfWJvhtms9HWMN3OP2a6LA+FrynkYYq9Jg+zAFxrspsL9s2g7rS5EIvKSqrcjyPCqNj5V/t9j2pvKOE58Vib0iF3pPv10622lkoshWdMmMkpN4bGSb1qNwrm/HnDdTRqcUu9C+Ue7ck8R7QxMOqI2jvr04Yo/87BldD/pjHTliriNtgdmrZ+o1nqdhiUfVh6M8NVfsuwG9Q7fr32Dussa9QmN+9C9oJzgrijXZY9NOaE0GbQuxwLD4Y+gdvBPdfDaiu98O3N3HrJdh3wf1VLFrAZwfCo55PVlrmLVmbmTofqN/8PVGmX8s65XtjybEe7+RuCFwWzHqD44XXykeHpaLtlp9bGe1tplrJThVZ2c9nwY/2eMBr2absQvOXJwRccSYG/W29fbQ+a7mxKczvOa3VXgvM2eXGCIrX9lHQbvQIL5XtWO1nIm1uDhsv8i/VXyv0+gfyXry7YpP7utnzo8griEOpf7hHuW+Z2yHtYjU2Tz7vUX/Mt7JvPvguP3685ddmDvYdmFuE/c8bS/9OK4p8Sj9AcbqqIu4xzmPxdzfregXvQvV7qVaQ8Tadv6tppCteib6iV7iPJ8xqceV635g1BH9XlXGWoxIOIbXppv1NAOi96DE9jby7A+pH6T6fUJYq9ESfLMR7XT9Q8ze2nlu2JqE77tpzQ99rJgXiuF3RQK8q3xhLXDDXuIP+0yd+WDWGgk+MkaFHjKq/acaFg/lBi6zjPfMsNz0Zan1XZ3t0uDagQGhpzA5VfZckuyVGPFHI48odvmTyHVMsvgANWZf0fQizrdpxaPDTRvAfiTu2WntNnik/A7nfVF+jIdypk+HXqJcuonNfHzkecWx7FtlnQrnfnXgVtperiX9N8aOWJ/C3+utMesv1c4Sv3OWHuckUebU8eb87JUa8za5xpnjPKXzmDg7jtzTNU6x2+6/Y1L2Wbw9y8TRXFvaumz7PToPVPb8N0aWb+pvZJ5wkZ/nGM869/3Oy6P3Z33WQ21E1qE+CNlSqnnl2K9rEXh/OsKeyIU1323W/tJ/nqT+21NGuDP0t+uYZEz7sNB4TmS/ET3d+9BfZH+pfN/xiUCL/ymMipf9STwoWHBwzFH0Cz+CyTlnVN/rbJVyMwdA3n3OU6AOIsaZVmD6mFwH2t5RKT+r7aV/wD3IHse4zrJvohZgXrczmkugf0B7Oz7znM7p49w50w84L+sh5y7pmMq+o9egVvwB6hvOHWTMgn0z3P/0mamPyh2L9DnONx4c8xOGx3P+7wF09fkMJdb3MTT2R+2hZf8Uvw/tN/Vtltg8oyVora3Ad753mvOGzoWOBV69/Tf5XhHXGvFoHlJX1yFvTwsyv+uF4JUFsL2WBOvCBFhfSNBZ4MFritSv1R6jS0V3dQ/cYM/0vcCf7WexdO5ksaRGOHotoD9fbaxBo88uNPsdwpCoXzCjoBUtQQfRzbEDfUOPYIz4iqNE5w+L+0XrfsaJLuC+pJ6gfaTM6YNR/tp/3e4Pd6wB663GiA6mLu+Yd894QJHjWZHj38U+/HRhNix95sntmJN2kjFR2pERin1+UfzJHIKJf1bL/Vb9W+MzT2kOgb0faosDv0JXeZ02tUL8W87IGhXfqrPhuji/QLltlWDdkzqXgnMMOKPvkuwTystNf9mYHPaLfXYU/O5NQezrZcj+qheKDg9AwuauCFyeC/srifB8KBLed0a02m6L/N52W9R645bodwX3v+41O+xu7+mhD4u/u92YHbXbbrJPevnNT5obcXP2JutA3x2OzNBz6V436pyuRp+derHGfHDUCTQH7kG5Vfwu5xb0CfpB9P4P6B9xCJdkCk6sbp+n0sWUF+VPvc9+pDvbL/pWxKKUC/2wsTpb+aw5gzTHnCHJfgDy8HP2Jn+mI25EH5jrxhwB7a0ZB9rTjhsPav6Ac+2p21nDyHjBZSVtque4z5mHZt8250BSz9W6VyHX9ojG7/pF7NO5jOR/7x6wVfTnSZ0fwT5Z6sCxKcdRZlumdQKMZQa+kaPzWZO/6YaQNTnwXBCITjf5wGuS/1nbiNC1jl6RT7orUwYH+BcnOi2+of+Y1/W5Jtlt9At+1RgfttTrrsgf03f2RLdT42FfGIWYnNEov2gxauxrdZZbpfEJ+oYdwNjkc7I/1un/ewf+gGGxZ8TenhK887POuuL+Hasz3Y5qDHNawVnFLsSPt/Qy+3t5BojnGX/riCdwf09r92upW5h7ZPyF+Jyzby/s/Vzzd3R+Wl6brhvnX9He8neIdVgfqn1LsgbU2fQrWGM6gnN4grfpHu4V8rV83nO6xpwNRl+L/i9nxdJ3ox6bWXpOa7c5I3CO7Kturs0osy5FqfVt5WJwLIhH50f80GmeG87h+Ugqm4rCmHuRY70LRR5PnSzxeG5rjfH+hgbfVcsqvN58p97vhdUXJz75RXPY+2+nuUbf7V1g/9AYGwqvGyO075e1qCmH6+B/WwnyOj2q88pq7J+i1HhP53kNiz0u+2Kj/P339bNwpiDjzCMTRQbx3Hein+KO6iwx1m9yv7GfcZr4U/Sd2MtCX4ry57lQvFdoxhE6YqI6VzGnTXUJZ3BzLWibiXtoS6ivKM8r2+Oi/L+Zq/xB9T/lzZkAfQRjEtfSn6VfzZltl2Sd1/+zhpQ+7cDoA7IeW9UOVIq+4zxa+mXkE+kRuAXTOOc0W/aDYIx653co8/pIMMEmlIy9Ex5/c8E2NQpRFcNQEfUCBobsx5Xys3c3cJbpx+savX5Uu/yoBZ5hlpJMR0b0HFe3xB/901MR7lsG37hoWHv7a/yUNe+JK2u0D8y5JB5xaRPb9/5nOsOvSnQj52rVOteK7JfI858L1v8efYL3Cgbdq/NpGOtnfxbnEeoMX7EBxO60fcwtUt4606/exNJzG36N7fPivuZadMSYua+pY2hXGeecnGueI8YmiIGmF7epH0C9Qtlz39OO9A79VmvWmU9WPRRs9q6O0vzmCcX9nAPJPpuLQwUv+W1HnetLNLi3oIf/dvmuH2lMq9axQWS+A3X271Ft3Sy+/Zdo9jiC5pZPEbMuDUXJj6CX6La7Zb/cmCV6M1owXazYwoR1CLe0/IXUwVZX51usyfZltv4BJ9x3JCD0zTz43BMH74m+sF0RBvf9yQh4KQsRHxQj4PVseL8UjoCBFSiwPCn6ZaX4VItQYryp+qfasVLOwSKd/9EcsFf2/QkMEFvVP4L2lziE2KXNxOvtcU6dKya6Y1jCUV2XjrlqHXPniIO43/nIi/ubZ4QxHuIizYnJGaAOoq7nrFL6YIxPj5e17fCZhsu+p82lz8b4J30H1q/0j/pBseX4zNN6LvpG7EWfkF1an1Ql36nG8ZnOSWXfGmeE8rxzdiZnFFYbn6GLwdla3wjO+A6D/M5hSqzs7SfnobD/zSiLuxsZOVORUz0GiXIGwsq7IKibyLc5Eu5hYfC9NBbOq2LgvD0efs9nIGdbM6qODUf40mIYD8Zpr6nfaxkIfC8X/m9kw/5SPBw3J2o8ocz7Pf0MrGFj7y6xgfmZV+p8w9GJZ/RcDo8RDJ4F5Sahb8S9SZlQfsSE1Bn0j8amn9W5LtPa+15Y939vOxalDmEPkuqnOtM+sza9Y8at+mkiX9as0F8weWvadN7z2IxTuubUQcRBrJerao/1sIaIupCfi2tLW1DjXC17faN+tyLBkfnGE2adnmAM1nFXG5/qviPe62LfhHq74A7nfvTyFt1qbUXNy7cg66YrMNByHp63+8D9cjRiNhQg43vRHTu6IHVnV6TvakTClq4I+aQIPm+mw+e1NPgvzkHIyiIEry7SugnH00l6+b+RhZDF+fB5KQ22JyOQkDIV5Z2WaO0aP1OVfbXonM91/9c4BYP6bsWQ6GM6a4ozRWeJLDhzilwx/I7ENB24nzFNxpQ7ciY8+9QbnKX+F85lGmDiIupxngvqJ+om1j7/uX1mE9fGjFm3aZyU54FYaGo7/mEfJJ+jruMasaZI+wN8VguG2ah2YUqe6C9Zqwbfr1X+Zfb3NO/CnkniyCrjYzTY94msv1e5VxmrdWY2fU321dZ775T1/BLpr06Be08w0nPuQYNlt8rPtTAVgSvzEfKZ+LUrcuBemgXXova4xMJEGHLZ30iGz6IM+C/J0bmlsRuqEfBmNtijFPJevvb/Wl+MQPDIepRYzDhmldrd9ahzfo1G9zfy2TfLfvoWvQJ3CgaTvS+6epLIX/3c/mZsh/uZfRf00znHjBiUOp3y43mgTzA0/qickVbVP5ztxv6LjhgaZU3dRJxEzhr6Ch0xH43bFf3q604rNO9pI0xbc1axUoPfF1rH29X9mc5kIl8H55w3+m2Wa4ue40zjTsQYQ7Vng74Na1UZUyk13lYemwpjuWI76p9a20bUdFqH6JvGwnkkFt4PhCC8ugV5QY+Zcc07Y2B9Ol7ztdYXE2AT/4pyt72SBEPk7l6eheivq1FyZCAKd7Futw+ytvVCwHs58H09E27RP/bH5XfvDkda+FxUdF6qcTTKv87xDZr9D4g/clwxziDZ9/QRyZ9xXY05A455Rs6v5lxR+onT8ts01rlqHvDoSFOmzC9yfzPGQ118SbZpR4lDiUcZk+B5oU8wv/2Ra0kdxbW4sT32Nrs9dj01/1fMxJgnzwPPF/PF1c5VWnPNxylid6gX2Y9Q7/d3wTQf6cwy1q9R/qxnk8fP5HFcpnHH0Dzj4aFFxovXyhr8rdh49ZVi4/WlRV4vrEj3ufk1n+VZZ3035MA2RzDL1AB4c04r627vjYX9ZdEli9LgfD9dL5/lmfD9JAdB64sQ822t1lQXHh+AzN1NiFhbpjMQ7S8mw/5CkuYhvW8Lhn+vEpR1ekd1IPd9tf0zPX89/Xajf/iPGk+bmNGms936Rx4RfX0eS2abM2xXzAE+vhk6n/OK8jad28ecC2cUUr9QbowrEKfQV6I9pa7ia5Q7uZaosyhz6n5yZd3XfqY6OGyUO6jBfC/mwbiulDn1+wSxrex54pzMCvsyzRGWiE4fkbQfV8u6T8xolXP7ndbBxxvjEW40ytWAMKPutX+3/tV3bc6msI1lglkiYJ8WAZ87E3SepH1hEvw+zkXI30sQuL4QvqtFB4nsmctizYhrWabGSe2LU2F/OwWsy7X9LRbG7bJ2cnaMW6JOeY22vZvqvvZQpbcZ1+xq3yKY6xv08t+tvu6gyJ8FZ+4TjH8c49PaNDcxKO47wfCnL/R2vXOFyfvDvCH9dPqMjNlTlh1cTNMKW3XuOPH8n0QnU6fQJ6ZsyfnD9eI6MG7HteTcWT7H/AvfU+cydzPttzmT+5z4F/sVB43POK0xs0rHUuWRKLcvUh9gmpyV4XEnFDek2q5HpNGCQFvBOau3fTblavcx6j28LeX/Su6BvlHKYRP4Yd6Kgr39NI9onxwB/ztTdO8azyXC8UqKrIM539P4SxyMe2JhuyPG7G+8Ra55UewP+Nl2Y9Qe47qor+U9lhqXhN9pTArv12mOJSDZcllFgfezp2n32T9B29PDdRAjBWtpPCScs4x/QkvYfsH4xwR7nJXv9rXYhG0ix9Oqx9nbS53N2ZwLJ5l9Rey5vrXJ1DGM/VCPMwZJ+TNeQ14I7mniHuod7nXqHMr+lUvNvDHXkf4n35c9Y+QVMmcfQzEt60XpW9CnqnB8oPkqYvsq51LRNyvRJ+ggujg2Isd2P6JtAxDfeQSKHH853+j/ycq4wEGLPMM8EegOPzgocc1zPYO//LrOd/UWeY/Npc6XPmuO+HhJhePl52Mts0N8FiR9Ubx3ABzXm7F+17z2PUwdNDfajIXOijhmzIxYL68/aUwLn21MCR9sjAttMIYEZxg1AYFGJ6vtn9bXkh6VYly1h30lJWJ/KowPNc7Z3Uf2fuRZDIk6g/5hx2QPndEZi4Nj2U93DhOyGFcw+xUZg7yy4rzYzvPKcUV7zFgn4/20p3Pa9yz1BnE7fST2A4zLOKF2mbE0rf/RGrez+vPkalrYrsN4z7msjEGSu49nhhiXe17PosaMN2l+kHn1Usdb2pPEOaGVto9RYnsD8baxiDCaWgOzS1uD4ouQ7BoJX1ssvGydEGBLQzdjzcHu9n1Pd7F/Pb9rp6+mNFh21uZaHsryDU4p7FzieCR0YR6K9w+A7eYoM2/OfMtN0R+J7/qyMSNiiuzlcmNIaOB/0kthN1w5ogv3sX473ZgrWOAx1Z/0+xp8tqF34H70Djig85xHJ53BDV1MHEN/h3Eb4pZpBeeV46dG9lyNzyrlq6LNo24nxxX1Ee0s7e9l7XkS1hL0Cv5OzsN53e9cG+pz4pnBcT/qxTg/bQH3PvUa9z5l/9bl5joQN9EW0AYMjf9JdU8Xn081jlDMHhnZT8xNE9+zxiqiU48tzqGJl7teSTgX/EQKvMbaYRsaAPuEcBhjQuBZ5LndYrWMF5/Vz1Jq6dV5oNfztulBO4zhwfCcFITivf2RsqZOaw2VA6wp6Njv6R8S29Mz1hh2knXzacYc+ax/U14F4i/OF+WczVrHOlmHb9ESchgDo3+SfX9COfLGC+6mDBmjbwndLZh6reyx95BvfV5jxozBjE49LnI9r/KjDiGm4ZrxPAxPPKKzayfnHVO58qwwHsQ9zRh0/+h9giUPasyBMVTFqn3NGDBjwa9fZs5u5Yxn9hhMEPzZ6LdJ53RS7xcaz2o+mjNDyQsUZ4zZ429JSbXstsSGf1mOimPD4P9utmJ3nxdSNb9tZ119A+vAHW22YSEIfasAwYLNPa4JQcpn3VBxeCi8b5Q93xxEnpSbjEjnP8Uy/90r2hhUnGLMOp9vPG76tvSxFPOsVp+LcTadaxu8ByPiTmFmIf3bsyK3n+U6qf4OcTd7E1tCDmieekj8Aa2TYRyddRpNodvQT3x/2lbaAsqPXJLUHbOrWtVuNgRsEhtwVG0vX9fcWAV95ZNmTbT4TsSYlDHXh37Z07JeCy+B5mOZ4ye+4ezdrs6NojtXgHmiLOMuzY8kybmOMvptDTIKo/i9Pa8Nr3evyUH4lnK4F2XC9Vqq1ujYn5XracHsN0VpjQh7KVij6fN8KtzvZaKubQL8n82Ad1ff/UaCT5ffs+95yZ74gj1w3O9V9o8E52/Wi77exSGHtHZ2dNI59a844/qBgeYcVc4IJzee5pyyGHdolf+f1/1JHUEZjU49qrqYuSfWM0wUO0H9c2ezKT9iG50zLnt3WCLj9odVL/FnHhxq4k/qI/rNwxJ+VltDnTe70pzr/cgws6aLtRWcvX5J+nnxETep/8SzS13DGeSZxq3E9l8bhj2843t3vjfizeS9jSg43A9By/PhXCgY5qF42Bck6nq4X0wza3VmCb58PEVxY8qRHojdXAuvCUFnDYdR/HtlH2Z0bcqw3abz4ut8NqDWtVb1PWXfJ0Bsazp0jjnrZzlvmX7uVZWteFCwzfMTzZjAiKSTaocvK2xT/hL6WW+Iv/vCZHMNWM+mPUPk6wncJDbhiOoL6m+dhz3GxKYdPi/xDLFRB+5nnRZnvbNvbEjsMYxLPaufaX6jyR/BtWbMg8/18P0e5balqu/ZK5RqzNZ+oSRj2ka3ER3U8b07DXOPiF1bhWqMQZdzYxDzeSUcj8mevzEK9icTkba3J3J39IbzcXnu1mj4PpuG6lMjUdo6FPZnEmHt6v/y75W9jxGZnGa7cXcX1+eCbU7KPj+j+ZN611Y0+f2AMYltuKLEjCuQZ5Y+Lm3ulFyTC+DSglZczjxS9inlBdB4fIW5Z8lrwhqBBRNMXU+c2RS6VXFgmfGBYJ9T6iswLsFcNm0of4/2gLqHZ4P/J04lD+KYlDMYHn8CvUN2oTloHy6Tv03OUHJb8v0niB/SJ/iA4hv2UqYYVyoHX4ZxExKNqa/aDO8L/F2W7vYRkS8WofHMJCQf6I6k/Q3I3dtH+xetY0O09iD1UE80YRpKtw6AzyMm/0z8oipEfFxK3o2fjCDH7+Ly9jOS3RnG/B2M7/ULPo5xyZwl3oaLg35Ek/8BDAj7BdNFxlfLGecakO+UZ/wv/UxeYMYnpxe0qd6gPaWciROJZygPYk3GH+hrMb/CujTWIjDXTRzYGPSF2lf6VLQHPC+0vR2xhcdHmmeDen1q3nkMjT2BAZGH0S9c7ErADlxTfVZzgKzp5Z5oDjygtqrIeEl74th7J/r+tGCKq377vT0G+d0Y81YpEtd1AbnAmAM3PknTvpW8jc3w6uMPW6rrLduatBsSt9cfLDkwEM5H5QxMDIV3lRve9X4/2OsD83/v3hed+FiZ8Z7qHeZQmoJ2qG/b5P+D6J3DYsNOYWxyKyZltGF2uSmHO8QePjzElA1nqhNPEqtQ9stvMP0jzeu2mDhxwcR2HMPaBtFNI5KPahyYmKjG9TEGRB1S7Mr1oq6hLWBdxBXkK+5u2pnrq029Mjz+F83rjEyQz5V6Rs8debTI6zRRzlaNbaP6K+x/DDO6MabwYohRkXZhz1ssnayTQ+/L+rwHMrb2gPXaCHKSvGebHFrpsSB6TtiXpcje3qw9bjaL5wj9nXs8wr0Wxm03HoxX7isj3Q3D4nXX75V9rDGijvz4rNdkzSDnmDMOSz3Eud69Aw+iX9hRDIw6prrnSrF3VzBemXYOs0rbFGewlv/ZCWYcgPJmjXKHb8q9T93z2W3Au1eaa3RrT7HT6acF529FtQ95+p9Apc87GJ7wS3s8+Zzgy6Oi106LfuPZknPQVewO8+uUf+xpraug/SHG+VN2m9qjCSnnxFZtFZ3/AYjfYowh8DfS3v/t95V/bvvlkW9mbuyB5C+7wXpFOKy1fvMuvF5r83e8k3Yu92ALAq5PRmeH19CO1wQDzbc/lQTXo8la12942576PbKXz1Ymn/Eo9SJxMbmqm4K3aw6oXjB4T//vFPMwxjkm+QymF5q47irmn9LPae0U9QplTf3MOmZe1EOM1XM9GG97Z5bJsbTiBhOf8NyQ+21myWnkWp9AtPcIBHgXosj3YbHbzDe2oTmUvJObFe8zRzwy6TgukfM3Ma1V7NNpTEg1z+JUsT+s8eof+jPq7NtE569WnUafUfDESS/DEnFBtkEe+faZkZsT1tQg/P1ieI4Pgq3cb/b/Y32ybJ7GG8nf5R/th5iF8jMp9vs7XvO6OeJB1o+Hv1UIa70/DE/ruP9e9uk14UbDCcb8iDcr7ctRZl+MHqHrcAe5rOW09gk+jO6+36IpcBdGJ5/WXMXMIsF8xW2q8xnDZxySsRyz5rhNcXpHPIG4hTqfdeaU/0dzTT1OffWc6KdxqbvEL92onIfZjtvh65WNUenfKZ5kvJ41amat4Tqtdesta8IaRtZw0caSy29w5Cmtfenm2Ka5z3JjidhcM5bpa8Rf0PcXldjH+MyNP5W8sQ5Br+TCa1Bgq+iQIf872dgWJt4fu6UGkStKYWsKOGy1WLQO3HgqYXPZj4MR8WIhvHJd3xgWi+d/I3uH4b44ymg5lWncpjaKNVv0TRj/pl9YHHCH2ICtGBrRJj7UQbV15OIkruwVsE/0TqvGXihbypp6mjJjPeH0IrNeh3k95mpZ70M/i/pn472mHebPPi3r0NvnGO4WTHOz2AXW2wR27oZe0Uswtxsx0kl9jnWy9BeIlSodH2gcoYf/VgyMPIqhYpcYi+pq36xx8XLjQ5SKf55lu7stolPjB5yP4hlh6+3d4r8q6JFM5T4MejWHuv6QEe2q+1fysT4eNypgVR4i1pXBdW0srN7WayzPePsELSs436dtOgLuS4NXnP2v/6ncnUZwidihFfHGON0jrImtda7XehfWDJm9aQ8ixXsespw3ab/I4IhTGt8nv+mwuKMYlXRWscqbM814C/czY8Ad9cus7xkSR9/0vMpeMUy9ic0/vcXEKFwzrRdLaMPlOW24tVH0tt/HiL/oevSP/Ub1F3ur+0bsVm5c6kTWV1SJfuTFmu9LC06gOuBlFFlf11wE84TldrElXs8iyqPfWa88+7O2McGf+t2ahNhlFYhbXwO3+E3egwJ3GrE+6f8nOXk9EFPuXpGNqE2CMRcXw5bms8djlN+18du7aq+t711J8Iq13/qfyD7EqGqONUae556nb8saCuZs+ZmZT6tzbxQbuFhjUrk2WQPbbOUN589099uKRv/NGBh9CBMz2zAo7kfcP7BNOQypV1bdZM5LoK6nb0SOpck559VPYxzmesHxfxZMs3Y+sPomcw2INclBOTCgFVUX7UO591q0hO4yc+3dTH44xu6ag7+TPb9W8UCD7yb5nOvFNm0UH/wIKlyva20a8+Xs/SnyWIgE6yVwXRKL4IWZSPysi+ZTA9/OBTncrX0Dl9r/DV7ti24IDnYty/gl6ttq5O5rgWtgGIyaABScGYgWzID/g7L/UxzP/ruyTzAm5os9OlNkLFR809W1QTEOzypr9QdG7dFcbUvoHp3ZQf78EqfoJPvz8n3Xif7Zj3Hpx0Uu52Wft2ltGPsppmeexLsz2vDWTPF1W1rx/IQ2/LV/m+CYo5iQcVrjEZNzzNkVN9S1iR/WhqXXtOGD2fI7l7XJ2pzHlIxWjI49j8miz//a1/R/72lpE1tzBt0Dtqi8Kx3LFKPW+36JevffxTf/AiXWxSiyca7L05onz/Caj5iQIfC9ORVhH+Url6//8xmM27RaJ4ZsELzyv9X1/+qyv5n8QcC6ApScFH3/WB5sCS407p6AoW3XIvh50WHZrnW2f3POtezpebRN7MFqcH2r8WPGFVjP0tXxFUal7hTMsQvd3d+jwf2N+JS70M39hdZekONxeMIB0ekHMa1oN27qvg9d/Zch1/NpdLeJXUjYjcm5+7SO/traH3BT42HRG7sEz4vNTj0gNnMnBsbs0h7EcRm7ZQ134+bGPaKXyJ8kfyv0a3ltk/yNbbi6eieurtoveJ545wfUuj4V2X+oOUPy/lQ5V+q+oY/CvDgxc4HarOsRdVFfuKbFw7UoAc6HkmCdFwnviSGMma23W7wD/lNdbXs8fr7z7TREr6sUeWfDu9IXaW92RfG3A+D3nKxrnf9Or3/B+/7H9cf1x/XH9cf1x/XH9cf1x/XH9cf1x/XH9cf1x/XH9cf1x/XH9X/9+p/wb6cWG/zz/RwdsGreW369b7P8+jOnLcsv3B/1+PV+p/fyCz8/J1rvffRXb9jZ8UanO5l/zBLNH2+/l1fm+PzmPvrC/Q2WX++jLNUX7r0sN1y4t4T85v4qYHnH/XT8+nyn39x7/Hrf2ePXn/fy6PjCct8J5y0X/t1w1PL/y7/pv956/ObjhP967/G4xdpx7/OJ5XTHffVyjwuf+YY53js77s9bzGXhvznV7WttoXgu/KoswIUfsXT6zZ/t9F9L4bfyw9EL38tD7qPbX/JYdvTCDnBE77ywKhHRN3ZsSUtJ519X6CrZWW3t96dlk5y/8Bkv3HvslA3T/rV8lpfcgPY/FX13xYUNUx396/311XLf/mevRUXHn+10DBEdf6rTRwi5sDG8MeeC1ESUHW+v9/+T//0vFFslQA==\"\r\n \r\n if old == True:\r\n s = Icon7\r\n else:\r\n s = Icon8\r\n\r\n return s.decode('base64').decode('zlib')", "def get_icon(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetIcon', self.handle)", "def icon(self):\n return self._icon", "def icon(self):\n return self._icon", "def icon(self):\n return self._icon", "def icon(self):\n return self._icon", "def icon(self):\n return self._icon", "def icon(self):\n return self._icon", "def icon(self):\n return self._icon", "def icon(self):\n return self._icon", "def icon(self):\n return self._icon", "def icon(self):\n return self._icon", "def icon(self):\n return self._icon", "def icon(self):\n return self._icon", "def icon(self):\n return self._icon", "def icon(self):\n return self._icon", "def icon(self):\n return self._icon", "def icon(self):\n return self._icon", "def icon(self):\n return self._icon", "def icon(self):\n return self._icon", "def icon(self):\n return self._icon", "def icon(self):\n return self._icon", "def icon(self):\r\n return self._icon", "def topcoat_icons_script_url():\n return static('topcoat-icons/font/icomatic.js')", "def icon(self):\n if self.zone_variable == \"temperature\":\n return \"mdi:thermometer\"\n if self.zone_variable == \"humidity\":\n return \"mdi:water-percent\"", "def create_tray_icon(self):\n tray_icon = gtk.StatusIcon()\n tray_icon.set_from_file(AFM_LOGO_PATH)\n tray_icon.connect('popup-menu', self.popup_menu)\n tray_icon.set_tooltip('Audio Failure Monitor')\n tray_icon.set_visible(True)\n return tray_icon", "def image(name):\n\n # the path where all the images area\n if getattr(sys, 'frozen', False):\n # The application is frozen\n datadir = os.path.dirname(sys.executable)\n else:\n # The application is not frozen\n # Change this bit to match where you store your data files:\n datadir = os.path.dirname(__file__)\n return str(os.path.join(os.path.abspath(datadir), \"icons\", name))", "def icon(self) -> str | None:\n icons = self.entity_description.on_off_icons\n return icons[0] if self.is_on else icons[1]", "def getIconPath(self):\n try:\n return self.primaryAq().zIcon\n except AttributeError:\n return '/zport/dmd/img/icons/noicon.png'", "def _icons(self):", "def get_icon(self):\r\n return get_icon(self.ICON)", "def icon(self):\n return self._sensor_type.icon", "def icon(self):\n value = SENSOR_TYPES[self._type][3]\n if self._type == \"weather\":\n value = self.state\n if value is None:\n value = \"sunny\"\n elif value == \"partlycloudy\":\n value = \"partly-cloudy\"\n value = f\"mdi:weather-{value}\"\n\n return value", "def icon(self):\n value = SENSOR_TYPES[self._type][3]\n if self._type == \"weather\":\n value = self.state\n if value is None:\n value = \"sunny\"\n elif value == \"partlycloudy\":\n value = \"partly-cloudy\"\n value = f\"mdi:weather-{value}\"\n\n return value", "def do_icon(srcfn, magnitude):\n img = Image.open(\"%s.png\" % (srcfn, ))\n draw = ImageDraw.Draw(img)\n (width, _height) = FONT.getsize(magnitude)\n # 40 pixel wide, we want to center it\n x0 = int(20 - (width / 2.))\n draw.text((x0, 8), magnitude, font=FONT, fill=(0, 0, 0, 255))\n img.save((\"../../htdocs/icons/lsr/%s/%s.png\"\n ) % (srcfn, magnitude))\n del img\n del draw", "def icon(self):\n ret_icon = self._icon\n if self.player_name == \"lower\":\n ret_icon = self._icon.lower()\n if self.is_promoted:\n ret_icon = \"+\" + ret_icon\n return ret_icon", "def icon(self):\n return \"mdi:solar-power\"", "def icon(self):\n status_icon = \"mdi:fireplace-off\"\n if self._product.get_key(\"STATUS\") == 6:\n status_icon = \"mdi:fireplace\"\n elif self._product.get_data_config_json()[\"_flag_error_status\"]:\n status_icon = \"mdi:alert\"\n\n return status_icon", "def logo(self):\n self.def_logo(0x21)\n self.send(\"\\x21\\x22\\x08\\x08\\x0a\\x23\\x24\")\n self.reset_codepage()", "def icon(self) -> str:\n return ICON_SPEEDOMETER" ]
[ "0.6832323", "0.68212587", "0.6478877", "0.6392684", "0.63691825", "0.63691825", "0.63691825", "0.63691825", "0.63691825", "0.63691825", "0.63691825", "0.63691825", "0.63691825", "0.63691825", "0.63597417", "0.63435024", "0.63178104", "0.6314043", "0.6314043", "0.6243124", "0.6194222", "0.61826044", "0.6180889", "0.61762774", "0.61762774", "0.61458737", "0.6119743", "0.6058779", "0.6055613", "0.60552055", "0.60552055", "0.60552055", "0.6036389", "0.6019203", "0.6019203", "0.6007862", "0.600737", "0.5987736", "0.59801483", "0.5966208", "0.59590715", "0.5936111", "0.5913722", "0.5908105", "0.5896683", "0.58932227", "0.58714414", "0.5868312", "0.5855714", "0.5850509", "0.5839516", "0.5817686", "0.5807332", "0.5806738", "0.5795815", "0.5790496", "0.57903427", "0.5784916", "0.5780219", "0.57522607", "0.57485044", "0.57378256", "0.57356477", "0.57356477", "0.57356477", "0.57356477", "0.57356477", "0.57356477", "0.57356477", "0.57356477", "0.57356477", "0.57356477", "0.57356477", "0.57356477", "0.57356477", "0.57356477", "0.57356477", "0.57356477", "0.57356477", "0.57356477", "0.57356477", "0.57356477", "0.57171893", "0.5714745", "0.5713448", "0.5711882", "0.56945205", "0.56889576", "0.56869435", "0.5684554", "0.5683121", "0.5682264", "0.5674741", "0.5674741", "0.5655169", "0.5644076", "0.5639096", "0.563867", "0.5636454", "0.5624454" ]
0.83448386
0
Sets "_total_posts" as amount of posts in the VK domain.
async def _set_total_posts_in_domain(self) -> None: logger.info('Getting total posts in "vk.com/%s"...', self.vk_domain) params = { "v": settings.VKAPI_VERSION, "access_token": settings.VKAPI_TOKEN, "count": 1, # Enough just to get total post in domain. "domain": self.vk_domain, } # Data fetching. response = await vk_asynchronous_request( self._url_wall_get, params, domain=self.vk_domain, ) self._total_posts_in_domain = response["response"]["count"] logger.info("Total posts in VK domain: %s", self._total_posts_in_domain)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def posts_count(self):\n return Post.objects.filter(user__username = self.user.username).count()", "def num_posts(self):\n\n return FlicketTicket.query.filter_by(started_id=self.id).count() + FlicketPost.query.filter_by(\n user_id=self.id).count()", "def _update_count(self):\n self._count = len(self._items)", "def _set_all_page_num(self):\n res = get(self.url, headers=self.headers)\n post_num = re.findall(r'微博\\[(\\d+)\\]', res.text)[0]\n page_num = re.findall(r'\\/(\\d+)页', res.text)[0]\n self._current_page -= 1\n self._all_page_num = int(page_num)\n self._all_post_num = int(post_num)", "def page_count(self):\r\n postcount = self.post_set.count()\r\n max_pages = (postcount / get_paginate_by())\r\n if postcount % get_paginate_by() != 0:\r\n max_pages += 1\r\n return max_pages", "async def set_post_number(self, ctx: commands.Context, post_num: int = 0):\n await ctx.cfg_channel.current_post_num.set(post_num)\n await ctx.send(\"Current auto-post number has been set to {}\".format(post_num))\n await ctx.cfg_channel.last_post_time.set(0)", "def total_hits(self, total_hits):\n\n self._total_hits = total_hits", "def get_number_of_posts(self):\n return self._performed_actions[WRITE_POST]", "def __len__(self):\n return len(self._blogposts)", "def set_article_count(cls, count):\n return cls.db.set(\"article_count\", count)", "def page(self):\n data = super(RunningCountPaginator, self).page()\n try:\n obj_count = len(data[self.collection_name])\n if obj_count:\n obj_count += self.get_offset()\n else:\n obj_count = -1\n data['meta']['running_count'] = obj_count\n del data['meta']['total_count']\n except KeyError:\n pass\n return data", "def set_n_comments_observed_task2(posts):\n for p in posts:\n p['n_comments_observed'] = get_hostile_indices(p)[0] + 1", "def new_posts(self, number_posts=5) -> Type[QuerySet]:\n return self.published_posts()[:number_posts]", "def add_main_post_into_ds(post):\n global ds_size\n if post is not None and not post.is_in_ds:\n duplicates = find_all_duplicates_for_post(post)\n if (len(duplicates)) > 0:\n add_post_into_ds(post, ds_size, DS_MAIN_POST)\n add_duplicates_into_ds(duplicates, ds_size)\n ds_size += 1", "def get_num_postings(\n res: List[Dict[str, Any]],\n account_id: str = MAIN_ACCOUNT,\n balance_dimensions: BalanceDimensions = None,\n) -> int:\n balance_dimensions = balance_dimensions or BalanceDimensions()\n return len(get_postings(res, account_id, balance_dimensions))", "def postings(self, postings):\n if postings:\n self._postings = postings", "def setCount(self, num):\n self.count=num", "def set_total(self):\n\n self.total = 0\n for item in self.items.all():\n self.total += item.price\n self.save()", "def get_vote_count(self, post):\n return post.vote_set.count()", "def count(self, value):\n \n self._count = int(value)", "def update_count(self):\n pass", "def add(self, posts):\n for post in posts:\n self._feed.add(FeedEntry(\n summary=post.summary,\n title=post.title,\n title_type='html',\n url=post.url,\n updated=post.date,\n ))", "def num_links(self, num_links):\n self._num_links = num_links", "def count(self, count: int) -> None:\n self._count = count", "def correct_counts():\n articles = mongo.db[app.config['ARTICLES_COLLECTION']]\n monitors = mongo.db[app.config['MONITORS_COLLECTION']]\n unique = articles.distinct('feed_source', dict())\n for link in unique:\n count = articles.count({'feed_source': link})\n monitors.update({'metadata.rss_link': link}, {'$set': {'hits': count}})", "def get_context_data(self, **kwargs):\n data = super().get_context_data(**kwargs)\n post = get_object_or_404(BlogPost, url=kwargs['slug'])\n\n if not self.request.user.is_authenticated:\n BlogPost.objects.filter(pk=post.pk).update(\n views_count=F('views_count') + 1,\n real_views_count=F('real_views_count') + 1\n )\n\n data['post'] = post\n return data", "def count(self, count: int):\n\n self._count = count", "def _count(self):\n if self._count_valid:\n return self._total_results\n\n url = self._build_url(\"/_search\")\n request = self._build_request(0, -1)\n resp = self._cb.post_object(url, body=request)\n result = resp.json()\n\n self._total_results = result[\"num_found\"]\n self._count_valid = True\n\n return self._total_results", "def _count(self):\n if self._count_valid:\n return self._total_results\n\n url = self._build_url(\"/_search\")\n request = self._build_request(0, -1)\n resp = self._cb.post_object(url, body=request)\n result = resp.json()\n\n self._total_results = result[\"num_found\"]\n self._count_valid = True\n\n return self._total_results", "def posts(self, limit=100, all=False):\n source, edge = self.id, \"feed\"\n return lazygen(Post, source, edge,\n limit=limit, get_all=all)", "def count_view(self):\n self.count_views += 1\n self.save(update_fields=['count_views'])", "def update_posts(accounts):\n # print(account.columns)\n for index, post in accounts.iterrows():\n\n # If a post with this URL already exists in database, then continue with next one\n if collection.count_documents({'Posts.URL': post['URL']}, limit=1) != 0:\n print('Post with url ', post['URL'], ' already exists')\n continue\n # Get tags from all posts\n # hashtags = []\n try:\n hashtags = list({tag.strip(\"#\") for tag in post['Description'].split() if tag.startswith(\"#\")})\n except:\n hashtags = []\n # get preprocessed description\n description_without_hashtags, description_preprocessed = preprocess_description(str(post['Description']))\n # update collection with posts\n collection.update_one(\n {\n 'Codename': post['User Name']\n },\n {\n '$push': {\n 'Posts': {'Followers at Posting': post['Followers at Posting'],\n 'Post Created': post['Post Created'],\n 'Post Created Date': post['Post Created Date'],\n 'Post Created Time': post['Post Created Time'],\n 'Type': post['Type'],\n 'Total Interactions': post['Total Interactions'],\n 'Likes': post['Likes'],\n 'Comments': post['Comments'],\n 'Views': post['Views'],\n 'URL': post['URL'],\n 'Link': post['Link'],\n 'Photo': post['Photo'],\n 'Title': post['Title'], # not\n 'Description': post['Description'],\n 'description_without_hashtags': description_without_hashtags,\n 'description_preprocessed': description_preprocessed,\n 'Hashtags': hashtags,\n 'Image Text': post['Image Text'],\n 'Sponsor Id': post['Sponsor Id'],\n 'Sponsor Name': post['Sponsor Name'],\n 'Overperforming Score': post['Overperforming Score (weighted — Likes 1x Comments 1x )']\n }\n }\n }\n )", "def update_count(self):\n pass # Do nothing", "def count(self, count):\n\n self._count = count", "def count(self, count):\n\n self._count = count", "def count(self, count):\n\n self._count = count", "def count(self, count):\n\n self._count = count", "def pages_count(self):\n return self._pages_count", "async def _total_players(self, ctx: Context, number: int):\n\n await self.config.guild(ctx.guild).total_players.set(number)\n\n await ctx.send(_(\"Set total players to `{}`.\").format(number))", "def __get_total_pages(self):\n \"\"\n if self.__total_pages is None:\n if self.count == 0 and not self.allow_empty_first_page:\n self.__total_pages = 0\n else:\n hits = max(1, self.count)\n self.__total_pages = int(ceil(hits / float(self.per_page_limit)))\n return self.__total_pages", "def add_count(self):\n self.count += 1", "def updateWordCounts():\n emaildata = loadEmailData()\n englishwords = importDictionary()\n countAllWords(emaildata, englishwords)", "def set_entity_count(cls, count):\n return cls.db.set(\"entity_count\", count)", "def PagesCount(self, default=None):\n return self.data.get('metadata', {}).get('number_of_pages', default)", "def update_count(self, source, count):\n if source in self._counts:\n self._total_count -= self._counts[source]\n self._counts[source] = count\n self._total_count += count\n self.change_text()", "def vm_count(self, vm_count):\n\n self._vm_count = vm_count", "def vm_count(self, vm_count):\n\n self._vm_count = vm_count", "def num_pages(self):\n if self.count == 0 and not self.allow_empty_first_page:\n return 0\n hits = max(1, self.count + self.delta - self.orphans)\n return ceil(hits / self.per_page)", "def _update_counts(self, msg, subtype, by):\n\n try:\n counts = self.get_local(msg, \"counts\")\n except KeyError:\n counts = defaultdict(int)\n\n counts['all'] += by\n counts[subtype] += by\n self.set_local(msg, \"counts\", counts)", "def update_total_rolls(self):\n\n # Incremene the attribute by 1\n self._total_rolls += 1", "def page_counts(self):\n return 1 + (self.total_count - 1) / self.page_size", "def _get_paginated_content_count(_user_uri, _start_index, _count=100):\n _content_uri = f\"{base_url}/contents?filter=author({_user_uri})&count={_count}&startIndex={_start_index}\"\n _response = core.get_request_with_retries(_content_uri)\n if _response.status_code == 200:\n _response_json = _response.json()\n _content_count = len(_response_json.get('list'))\n else:\n _content_count = 0\n return _content_count", "def question_count_published(self, obj):\n return obj.questions.published().count()", "def count_total():\r\n trans = transaction.begin()\r\n StatBookmarkMgr.count_total_bookmarks()\r\n trans.commit()", "def total_count(self) -> int:\n return self.__total_count", "def update_post(reply_to, connection):\n\n database = connection['test']\n collection = database['posts']\n\n collection.update({\"_id\" : ObjectId(reply_to)},\n {\"$inc\" : {\"num_replies\" : 1}})", "def tt_counts(self, tt_counts):\n\n self._tt_counts = tt_counts", "def count_total_tags():\r\n total = TagMgr.count()\r\n stat = StatBookmark(attrib=TAG_CT, data=total)\r\n DBSession.add(stat)", "def direct_count(self, direct_count):\n\n self._direct_count = direct_count", "def page_size(self):\n return 0 if self.hits is None else len(self.hits)", "async def totalImages(self, tags):\n with async_timeout.timeout(10):\n url = self.urlGen(tags=tags, PID=0)\n async with self.session.get(url=url) as XMLData:\n XMLData = await XMLData.read()\n XMLData = ET.XML(XMLData)\n XML = self.ParseXML(XMLData)\n return int(XML['posts']['@count'])\n return None", "def dhcp_total(self, dhcp_total):\n\n self._dhcp_total = dhcp_total", "async def update_stats(self):\r\n\r\n\t\twhile True:\r\n\t\t\tlogging.info('Attempting to post server count')\r\n\t\t\ttry:\r\n\t\t\t\tawait self.dblpy.post_server_count()\r\n\t\t\t\tlogging.info(f'Posted server count ({len(self.bot.guilds)})')\r\n\t\t\texcept Exception as e:\r\n\t\t\t\tlogging.exception(f'Failed to post server count\\n{type(e).__name__}: {e}')\r\n\t\t\tawait asyncio.sleep(1800)", "def post_detail(request, pk):\n post = get_object_or_404(Post, pk=pk)\n post.views += 1\n post.save()\n return render(request, \"postdetail.html\", {\"post\": post})", "def update_all_posts():\n for post in CURRENT_POSTS:\n update_tag(post)", "def post(request, slug):\n\tsingle_post = get_object_or_404(Post, slug=slug)\n\tsingle_post.views += 1\n\tsingle_post.save()\n\treturn render(request, 'blog/single_post.html', {'single_post': single_post})", "def set_total(self, valeurs):\r\n \r\n self._total = valeurs", "def set_Count(self, value):\n super(MoneyReceivedInputSet, self)._set_input('Count', value)", "def updateBotCounts(self, nextCard):\n nextVal = dnUtil.getValue(nextCard)\n state = self.getState()\n counts = self.getCounts(state)\n newCount = counts.copy()\n for value in dnUtil.valuesList:\n if counts[value][2] == 0:\n continue\n update = self.updateCount(value, nextVal, counts[value])\n newCount[value] = update\n self.setCounts(newCount)", "def total(self, total):\n\n self._total = total", "def total(self, total):\n\n self._total = total", "def total(self, total):\n\n self._total = total", "def total(self, total):\n\n self._total = total", "def total(self, total):\n\n self._total = total", "def total(self, total):\n\n self._total = total", "def total(self, total):\n\n self._total = total", "def total(self, total):\n\n self._total = total", "def resetItemsFreqDist(self):\n from models import PostItem\n\n results = PostItem.most_common.delete_everything()\n #results = PostItem.objects.all().delete()\n print \"Resetting %s items...\" % results.count()\n return\n updated = 0\n for res in results:\n if res.numeric != 0:\n print \"Resetting: %s # %s\" % (res.word, updated)\n res.numeric = 0\n res.save()\n updated += 1\n return updated", "def set_count(self, count):\n self._count = count", "def update_count(self, source, geometry, count):\n if source in self._counts:\n if geometry in self._counts[source]:\n self._total_count -= self._counts[source][geometry]\n self._counts[source][geometry] = count\n else:\n self._counts[source] = {geometry: count}\n self._total_count += count\n self.change_text()", "def set_count(self, count, asset=None):\n self._set_property('pc:count', count, asset)", "def number_of_pages(self, number_of_pages):\n self._number_of_pages = number_of_pages", "def __init__(self, ):\n super().__init__()\n self.duplicate_posts = 0\n self.new_posts = 0\n self.client = MongoClient('mongodb://{}:{}/'.format(_config['db']['hostname'], _config['db']['port']))\n self.db = self.client[_config['db']['database']]\n self.posts = self.db[_config['db']['collection']]\n self.posts.ensure_index(\"postId\", unique=True)", "def pagecount(self):\r\n \r\n return len(self.results) // self.perpage + 1", "def __init__(self):\n self.cnt = 0\n self.follow_map = defaultdict(set)\n self.posts = defaultdict(list)", "def message_count(self, message_count):\r\n\r\n self._message_count = message_count", "def set_total_threads(self, total_thread: int) -> 'General':\n self.total_threads = total_thread\n return self", "def __len__(self):\n\n num_pages = self.get_num_pages()\n\n self.logger.info(f\"Num pages: {num_pages}\")\n\n params = {\"page\": num_pages}\n url = add_query_params(self.url, params)\n\n # get the amount of data on last page\n data, _, result = self.retrieve_data(url)\n\n if result == GithubApiResult.SUCCESS: \n return (100 * (num_pages -1)) + len(data)\n\n self.logger.debug(\"Unable to retrieve data length from api\")\n return 0", "def status_counts(self, status_counts):\n\n self._status_counts = status_counts", "def get_no_of_pages(self, keyword, since=None, to=None):\n response = self.get_news(keyword, since, to)\n total_results = json.loads(response)['totalResults']\n return 1 if total_results/20 == 0 else ceil(total_results/20)", "def count_term(count):\n return ngettext('{0} post', '{0} posts', count).format(count)", "def user_count(self, user_count):\n\n self._user_count = user_count", "def user_count(self, user_count):\n\n self._user_count = user_count", "def hyperv_host_count(self, hyperv_host_count):\n\n self._hyperv_host_count = hyperv_host_count", "def pages_count(self, pages_count):\n if pages_count is None:\n raise ValueError(\"Invalid value for `pages_count`, must not be `None`\")\n\n self._pages_count = pages_count", "def get_total_count(self):\n return self.total_count", "def thread_posts_append(thread, post, initiator):\n thread.length += 1\n thread.updated = datetime.utcnow()", "def create_total_count(self) -> int:\n assert self.count_map is not None, 'count map is not initialized'\n\n res = sum(self.count_map.values())\n self.total_count = res\n return res", "def totalCount(self):\n return sum(self.values())", "def totalCount(self):\n return sum(self.values())" ]
[ "0.63555706", "0.6204205", "0.56337523", "0.55793494", "0.5573961", "0.5543833", "0.5520672", "0.55201143", "0.5465146", "0.5398032", "0.53932655", "0.53263414", "0.52809906", "0.52755684", "0.5259448", "0.5227877", "0.51914954", "0.5175302", "0.51708394", "0.5170826", "0.51669246", "0.5157397", "0.5154564", "0.5126761", "0.5123188", "0.5097314", "0.50963193", "0.5094919", "0.5094919", "0.5079578", "0.5074809", "0.50714874", "0.5064956", "0.50616443", "0.50616443", "0.50616443", "0.50616443", "0.50599885", "0.50471514", "0.5046447", "0.5027432", "0.5008795", "0.50010073", "0.49882793", "0.49819583", "0.49759063", "0.49759063", "0.49746546", "0.49641967", "0.4960945", "0.49433395", "0.49316117", "0.4930336", "0.49269584", "0.4926935", "0.49243385", "0.49152952", "0.49148047", "0.4914042", "0.4904275", "0.49000382", "0.4892504", "0.4890594", "0.48863629", "0.48832226", "0.48695248", "0.48695207", "0.48638916", "0.486132", "0.48508754", "0.48508754", "0.48508754", "0.48508754", "0.48508754", "0.48508754", "0.48508754", "0.48508754", "0.48473305", "0.48419535", "0.48380813", "0.48323444", "0.4824615", "0.4824353", "0.48144653", "0.48105726", "0.47989333", "0.4795716", "0.47894982", "0.47785318", "0.4768151", "0.47639057", "0.4761778", "0.4761778", "0.47564152", "0.47546342", "0.47530553", "0.47352016", "0.47340444", "0.4729451", "0.4729451" ]
0.84554565
0
Fetches posts from VK domain asynchronously and put it into "posts" attribute.
async def fetch_posts(self) -> None: async def fetch_posts_for_offset(offset) -> list: logger.info( "(offset %i) Start fetching posts from vk.com/%s...", offset, self.vk_domain, ) # VK Script code for /execute method. vks_code = get_wall_post_template.substitute( { "domain": self.vk_domain, "offset": offset, "posts_per_portion": self._posts_per_portion, "execution_times": self._execution_times, } ) params = { "v": settings.VKAPI_VERSION, "access_token": settings.VKAPI_TOKEN, "code": vks_code, } url = self._url_execute # Posts fetching. resp_json = await vk_asynchronous_request( url, params, domain=self.vk_domain, offset=offset, ) logger.info( "(offset %i) End fetching posts from vk.com/%s...", offset, self.vk_domain, ) # Gathered posts handling. posts_from_vk = resp_json["response"]["items"] posts = posts_as_schemas(posts_from_vk) del posts_from_vk return posts # Checks and preparations. await self._set_total_posts_in_domain() if not self._total_posts_in_domain: return # Creating tasks for fetching. tasks = [] posts_per_task = self._posts_per_portion * self._execution_times offsets = list(range(0, self._total_posts_in_domain, posts_per_task)) for offset in offsets: tasks.append(asyncio.create_task(fetch_posts_for_offset(offset))) # Running tasks. logger.info("Start fetching posts from vk.com/%s...", self.vk_domain) results = await asyncio.gather(*tasks) logger.info("End fetching posts from vk.com/%s...", self.vk_domain) # Flatting results from many tasks into one list. self._posts = [post for result in results for post in result] # Final actions. if self.sort_by_likes: self._posts = list(sorted(self.posts, key=lambda p: p.likes, reverse=True)) if self.amount_to_fetch: self._posts = self._posts[: self.amount_to_fetch]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fetch_posts():\n get_chain_address = F\"{CONNECTED_NODE_ADDRESS}/chain\"\n response = requests.get(get_chain_address)\n if response.status_code == 200:\n content = []\n chain = json.loads(response.content)\n for block in chain[\"chain\"]:\n for tx in block[\"transactions\"]:\n tx[\"index\"] = block[\"index\"]\n tx[\"hash\"] = block[\"previous_hash\"]\n content.append(tx)\n \n global posts \n posts = sorted(content,\n key=lambda k: k['timestamp'],\n reverse=True)", "def fetch_posts():\n get_chain_address = \"{}/chain\".format(CONNECTED_NODE_ADDRESS)\n response = requests.get(get_chain_address)\n if response.status_code == 200:\n content = []\n chain = json.loads(response.content)\n for pos, block in enumerate(chain[\"chain\"]):\n if pos ==0:\n pass\n else:\n for tx in list(block[\"transactions\"].values()):\n tx[\"index\"] = block[\"index\"]\n tx[\"hash\"] = block[\"previous_hash\"]\n content.append(tx)\n\n global posts\n posts = sorted(content, key=lambda k: k['timestamp'],\n reverse=True)", "def get_posts():\n url = app.config['POSTS_ENDPOINT']\n response = requests.get(url, params={})\n if response.status_code == 200:\n return parse_posts(response.json())\n raise RuntimeError('Error in retrieving posts.')", "async def _set_total_posts_in_domain(self) -> None:\n\n logger.info('Getting total posts in \"vk.com/%s\"...', self.vk_domain)\n\n params = {\n \"v\": settings.VKAPI_VERSION,\n \"access_token\": settings.VKAPI_TOKEN,\n \"count\": 1, # Enough just to get total post in domain.\n \"domain\": self.vk_domain,\n }\n\n # Data fetching.\n response = await vk_asynchronous_request(\n self._url_wall_get,\n params,\n domain=self.vk_domain,\n )\n\n self._total_posts_in_domain = response[\"response\"][\"count\"]\n logger.info(\"Total posts in VK domain: %s\", self._total_posts_in_domain)", "def get_posts(self): #return list of posts that are associated with this blog_id\n return Post.find_posts_for_blog_id(self.blog_id) #this will return a list of posts objects", "def remotePostList(host, posts, public):\n post_list = list()\n posts = posts.get('posts')\n for post in posts:\n author = remoteAddAuthor(post.get('author'))\n title = post.get('title')\n description = post.get('description')\n contentType = post.get('contentType')\n content = post.get('content')\n published = utc.localize(datetime.strptime(post.get('published'), '%Y-%m-%dT%H:%M:%S.%fZ'))\n visibility = post.get('visibility')\n unlisted = post.get('unlisted')\n id = post.get('id')\n origin = post.get('source')\n comments = remoteCommentList(post)\n count = post.get('count')\n next = \"{}/api/posts/{}/comments\".format(DOMAIN, id)\n if host.endswith(\"/\"):\n host = host[:-1]\n source = \"{}/posts/{}\".format(host, post.get('id'))\n\n post_dict = {'author': author, 'title': title, 'description': description,\n 'contentType': contentType, 'content': content, 'published': published,\n 'visibility': visibility, 'unlisted': unlisted, 'id': id,\n 'comments': comments, 'origin': origin,\n 'source': source, 'count': count, 'next': next}\n post_list.append(post_dict)\n return post_list", "def _urlfetch_async(**kwargs):\n return ndb.get_context().urlfetch(**kwargs)", "def get_posts(wp):\n from wordpress_xmlrpc.methods.posts import GetPosts\n\n all_posts = []\n\n offset = 0\n increment = 20\n while True:\n posts = wp.call(GetPosts({'number': increment, 'offset': offset, 'post_type': 'post'}))\n if len(posts) == 0:\n break # no more posts returned\n for post in posts:\n all_posts.append(post)\n\n offset = offset + increment\n\n return all_posts", "def get(self):\n return get_all_posts()", "def get_posts(self):\n return self.blog_posts.all()", "def posts(self, limit=100, all=False):\n source, edge = self.id, \"feed\"\n return lazygen(Post, source, edge,\n limit=limit, get_all=all)", "def get_posts():\n db = psycopg2.connect(\"dbname=forum\")\n c = db.cursor()\n query = \"SELECT content, time FROM posts ORDER BY time DESC\"\n c.execute(query)\n rows = c.fetchall()\n POSTS = rows\n db.close()\n return POSTS", "def posts_get():\n \n\n # Get and filter the posts from the database\n songs = session.query(models.Song).all()\n \n # Convert the posts to JSON and return a response\n data = json.dumps([song.as_dictionary() for song in songs])\n return Response(data, 200, mimetype=\"application/json\")", "def fetch_post(page_num):\n req = POST_API.format(page_num=page_num)\n try:\n response = requests.get(req)\n response.raise_for_status()\n posts = response.json()\n objects = list()\n for json_post in posts:\n fetch_author.delay(json_post.get('author'), json_post.get(\"_links\", dict()).get('authors', []))\n title = BeautifulSoup(json_post.get('title', dict()).get('rendered', \"\"), \"lxml\").text\n content = BeautifulSoup(json_post.get('content', dict()).get('rendered', \"\"), \"lxml\").text\n post = Article(id=json_post.get('id'),\n date=json_post.get('date_gmt', datetime.now()),\n modified=json_post.get('modified_gmt', datetime.now()),\n title=title,\n content=content,\n author_id=json_post.get('author')\n )\n objects.append(post)\n s = Session()\n s.bulk_save_objects(objects)\n s.commit()\n\n except requests.exceptions.HTTPError as error:\n raise Reject(error)\n except Exception as ex:\n raise Reject(ex)", "def get_posts(url):\r\n feed = feedparser.parse(url)\r\n return feed.entries", "def get_posts(self):\r\n postList = []\r\n for tag in self.setting.imgurTags:\r\n try:\r\n req = requests.get('%s%s' % (self.setting.tagLink, tag), headers=self.setting.imgurHeaders)\r\n for post in req.json()['data']['items']:\r\n p = self.json_to_post(post, tag)\r\n if p is not None:\r\n postList.append(p)\r\n except Exception as e:\r\n self.logger.log(logger.LogLevel.CRITICAL, 'imgur.get_posts exception(%s): %s' % (tag, e))\r\n break\r\n return postList", "def getPosts(self):\n # TODO do we really need threading here or it can just do fine without\n allPosts = []\n threads = []\n feedTime = self.startTime\n for oneUrl in self.newsFeeds:\n thread = FeedparserThread(oneUrl, self.startTime, allPosts)\n threads.append(thread)\n thread.start()\n\n # Joining all threads into one\n for thread in threads:\n thread.join()\n\n return allPosts", "def getMyPosts():\n \n cur, user_id = initialise(3)\n cur.execute(\"SELECT username FROM users WHERE id = ?\", [user_id])\n name = cur.fetchall()[0][0]\n cur.execute(\"SELECT * FROM posts WHERE name = ?\", [name])\n posts = cur.fetchall()\n return posts", "def task_fetch_posts(\n author_id,\n count=28,\n posts_out='data/posts_data.xlsx'):\n\n # Create query instances for posts\n post_query = Query(PostParser)\n\n # Query posts data\n post_data = post_query.query_all(POSTS_QUERY_HASH_PARAM, {\n \"id\": author_id,\n \"first\": 50,\n }, count)\n logger.info(\"Count of posts data: %d\" % len(post_data))\n\n # Save the posts data\n post_data_df = pd.DataFrame(post_data)\n post_data_df.to_excel(posts_out, encoding='utf-8', index=False)\n logger.info(\"Save the posts data to %s.\" % posts_out)", "def get_posts(account, pages=10, timeout=5, sleep=0):\n global _session, _timeout\n\n url = f'{_base_url}/{account}/posts/'\n\n _session = HTMLSession()\n _session.headers.update(_headers)\n\n _timeout = timeout\n response = _session.get(url, timeout=_timeout)\n html = response.html\n cursor_blob = html.html\n\n while True:\n for article in html.find('article'):\n yield _extract_post(article)\n\n pages -= 1\n if pages == 0:\n return\n\n cursor = _find_cursor(cursor_blob)\n next_url = f'{_base_url}{cursor}'\n\n if sleep:\n time.sleep(sleep)\n\n try:\n response = _session.get(next_url, timeout=timeout)\n response.raise_for_status()\n data = json.loads(response.text.replace('for (;;);', '', 1))\n except (RequestException, ValueError):\n return\n\n for action in data['payload']['actions']:\n if action['cmd'] == 'replace':\n html = HTML(html=action['html'], url=_base_url)\n elif action['cmd'] == 'script':\n cursor_blob = action['code']", "def api_get_thread_posts(request, opening_post_id):\n\n opening_post = get_object_or_404(Post, id=opening_post_id)\n thread = opening_post.get_thread()\n posts = thread.get_replies()\n\n json_data = {\n 'posts': [],\n 'last_update': None,\n }\n json_post_list = []\n\n for post in posts:\n json_post_list.append(_get_post_data(post.id))\n json_data['last_update'] = datetime_to_epoch(thread.last_edit_time)\n json_data['posts'] = json_post_list\n\n return HttpResponse(content=json.dumps(json_data))", "def recent_posts(self):\n\n try:\n jsondoc = json.load(urllib.urlopen(\"http://reddit.com/user/%s.json\" % self.username))\n except:\n raise self.DoesNotExist\n \n posts = []\n for item in jsondoc['data']['children']:\n if item['kind'] == 't1':\n posts.append(Comment(item['data']))\n elif item['kind'] == 't3':\n posts.append(item['data'])\n\n return posts", "def get_public_posts(server_posts):\n public_list = server_posts\n servers = Server.objects.all()\n\n for server in servers:\n if server.username and server.password:\n host = server.hostname\n if not host.endswith(\"/\"):\n host = host + \"/\"\n server_api = \"{}posts\".format(host)\n try:\n s = requests.Session()\n # https://stackoverflow.com/questions/15431044/can-i-set-max-retries-for-requests-request\n retries = Retry(total=5,\n backoff_factor=0.1,\n status_forcelist=[500, 502, 503, 504])\n\n s.mount('http://', HTTPAdapter(max_retries=retries))\n s.mount('https://', HTTPAdapter(max_retries=retries))\n\n r = s.get(server_api, auth=(server.username, server.password))\n\n if r.status_code == 200:\n posts = remotePostList(server.hostname, r.json(), public_list)\n public_list.extend(posts)\n public_list = sorted(public_list, key=lambda k: k['published'], reverse=True)\n public_list = [next(v) for k, v in groupby(public_list, lambda d: d[\"id\"])]\n\n except:\n print('error')\n return public_list", "def generatePosts(self,**kwargs):\n oldestTimeSoFar = None\n while True:\n if oldestTimeSoFar is None:\n items = self.getPosts(**kwargs)\n else:\n items = self.getPosts(before_time=oldestTimeSoFar,**kwargs)\n if not items:\n return\n for item in items:\n yield item\n oldestTimeSoFar = item['published_at']\n time.sleep(0.5)", "def get_posts(account, pages=10, timeout=5, sleep=0):\n\n url = f'{_base_url}/{account}/posts/'\n\n session = HTMLSession()\n session.headers.update({'Accept-Language': 'en-US,en;q=0.5'})\n\n response = session.get(url, timeout=timeout)\n html = response.html\n cursor_blob = html.html\n\n while True:\n for article in html.find('article'):\n yield _extract_post(article)\n\n pages -= 1\n if pages == 0:\n return\n\n cursor = _find_cursor(cursor_blob)\n next_url = f'{_base_url}{cursor}'\n\n if sleep:\n time.sleep(sleep)\n\n try:\n response = session.get(next_url, timeout=timeout)\n response.raise_for_status()\n data = json.loads(response.text.replace('for (;;);', '', 1))\n except (RequestException, ValueError):\n return\n\n for action in data['payload']['actions']:\n if action['cmd'] == 'replace':\n html = HTML(html=action['html'], url=_base_url)\n elif action['cmd'] == 'script':\n cursor_blob = action['code']", "def get_post(self):\n\n if self.gotten: return\n self.get_text()\n self.get_keywords()\n self.get_poll()\n self.get_schedule()\n self.get_expiration()\n self.get_files()\n self.set_text()\n if Settings.get_performer_category() or self.hasPerformers:\n self.get_performers()\n else:\n self.performers = \"unset\"\n self.gotten = True", "async def scrape_and_post(self):\n # Scrape latest challenge posts\n challenges = self.scraper.scrape()\n await self._update_rooms(challenges)", "def get(self):\n\n self.render_posts()", "async def getPostData(self, PostID):\n url = self.urlGen(id=str(PostID))\n XML =None\n with async_timeout.timeout(10):\n async with self.session.get(url=url) as XML:\n XML = await XML.read()\n XML = self.ParseXML(ET.XML(XML))\n data = XML['posts']['post']\n return data\n return None", "def run(self) -> None:\n self.urls_list = self._create_api_ulr_list()\n self.results = self._sort_results(\n AsyncGetAPI(\n self.urls_list, self.threads, max_requests=self.max_requests\n ).results\n )", "def run(self):\n results = self.fetch()\n return results", "def iter_all_posts(self, limit=None): # new\n feed = self.get_feed(limit=999999)\n posts = feed[\"threads\"]\n if limit:\n posts = posts[:limit]\n for post in posts:\n yield post", "def posts_for_feed():\n user_id = session.get('user_id')\n friend_posts = Post.query.join(Friend, db.and_(Post.user_id == Friend.user_2,\n Friend.active == True)).outerjoin(Comment, db.and_(Comment.post_id == Post.post_id,\n Comment.active == True)).filter(Friend.user_1 == user_id,\n Post.active == True).order_by(Post.post_id.desc()).all()\n\n post_list = []\n for post in friend_posts:\n post_list.append(post.to_dict_for_json())\n\n resp = make_response(jsonify(post_list), 200)\n return resp", "def load_posts(post_ids, current_user_id=None):\r\n logging.warn(\"Ids===={}\".format(post_ids))\r\n\r\n # If list is not used, or any call that trigger __iter__ will end up with the query syntax\r\n # rather than the data itself.\r\n #posts_query = Post.objects.filter(id__in=post_ids).limit(100).allow_filtering()\r\n #post_counters = list(PostCounter.objects.filter(id__in=post_ids).limit(100).allow_filtering())\r\n\r\n post_objects = []\r\n # ok ,\r\n for post_id in post_ids:\r\n p = Post.objects.get(id=post_id)\r\n\r\n try:\r\n pc = PostCounter.objects.get(id=post_id) #filter(lambda x: x.id == post.id, post_counters)\r\n stats = pc._as_dict()\r\n del stats['id']\r\n p.__dict__['statistics'] = stats\r\n except DoesNotExist, dne:\r\n pass\r\n\r\n if current_user_id is not None:\r\n try:\r\n pv = PostVote.objects.get(post_id=post_id, user_id=current_user_id)\r\n p.__dict__['upvoted'] = True\r\n except DoesNotExist, dne:\r\n pass\r\n post_objects.append(p)\r\n\r\n return post_objects", "def load_posts():\n \n with open(FILE_NAME, 'r') as f:\n return pickle.load(f)", "def task_fetch_tag_posts(\n tag_name,\n count=100,\n posts_out='data/tag_posts_data.xlsx'):\n # Create query instances for posts\n post_query = Query(TagPostParser)\n\n # Query posts data\n post_data = post_query.query_all(TAG_POSTS_QUERY_HASH_PARAM, {\n \"tag_name\": tag_name,\n \"first\": 50,\n }, count)\n logger.info(\"Count of posts data: %d\" % len(post_data))\n\n # Save the posts data\n post_data_df = pd.DataFrame(post_data)\n post_data_df.to_excel(posts_out, encoding='utf-8', index=False)\n logger.info(\"Save the posts data to %s.\" % posts_out)", "def _populate_posts(self, channel, url):\n import feedparser\n\n Post = get_model('articles', 'Post')\n Image = get_model('images', 'Image')\n\n parser = feedparser.parse(url)\n\n for entry in parser['entries']:\n # Some entries are incomplete and have only the title, need to\n # ignore these entries.\n if not entry.get('summary'):\n continue\n\n # The title may have only 140 characters\n title = self._truncate_string(entry['title'], 140)\n slug = slugify(title)\n headline = entry['summary']\n\n # Some entries do not have the 'content' field, in this case we\n # get the 'summary' field instead.\n if entry.get('content'):\n content = entry['content'][0]['value']\n else:\n content = entry['summary']\n\n # When we find a entry that already is registered we don't need\n # continue because the following registries already be registered.\n exists = Post.objects.filter(slug=slug).count()\n if exists:\n break\n\n # Check if has some image in the post content.\n # NOTE: For the best user experience we use only the posts that\n # have images.\n image_url = self._get_image_url_in_content(content)\n if image_url:\n main_image = Image.objects.create(\n title=title,\n slug=slug,\n archive_link=image_url,\n published=True,\n user=self._user\n )\n # Generate the 'short_title' based on 'content'\n short_title = re.sub('<[^<]+?>', '', content).encode('utf-8')\n short_title = self._truncate_string(short_title.strip(), 140)\n\n post = Post.objects.create(\n title=title,\n short_title=short_title,\n slug=slug,\n headline=headline,\n content=content,\n channel=channel,\n main_image=main_image,\n show_on_root_channel=True,\n published=True,\n hat='',\n user=self._user\n )", "def get_posts():\n\n\tposts = []\n\n\trows = db().select(db.survey.ALL, orderby=~db.survey.created_on)\n\tfor i, r in enumerate(rows):\n\t\t\n\t\t\tt = dict(\n\t\t\t\tuser_email = r.user_email,\n\t\t\t\tuser_name = get_user_name_from_email(r.user_email),\n\t\t\t\tquestion = r.question,\n\t\t\t\tcreated_on = r.created_on,\n\t\t\t\topt1 = r.opt1,\n\t\t\t\topt2 = r.opt2,\n\t\t\t\topt3 = r.opt3,\n\t\t\t\topt4 = r.opt4,\n\t\t\t\tres1 = r.res1,\n\t\t\t\tres2 = r.res2,\n\t\t\t\tres3 = r.res3,\n\t\t\t\tres4 = r.res4,\n\t\t\t\t#created_on_human = humanize.naturaltime(r.created_on),\n\t\t\t\tupdated_on = r.updated_on,\n\t\t\t\t#updated_on_human = r.updated_on_human,\n\t\t\t\tid = r.id,\n\t\t\t)\n\t\t\tposts.append(t)\n\n\tlogged_in = auth.user_id is not None\n\temail = None\n\tif logged_in:\n\t\temail = auth.user.email\n\n\treturn response.json(dict(\n\t\tposts=posts,\n\t\tlogged_in=logged_in,\n\t\temail=email,\n\t))", "def get_posts(self):\n return Post.select().where (Post.user == self)", "def postList(posts):\n post_list = list()\n for post in posts:\n visible_to = list()\n visible = post.visibleTo.all()\n if visible:\n for author in visible:\n auth = \"{}/api/author/{}\".format(DOMAIN, author.id)\n visible_to.append(auth)\n\n comments = commentList(post)\n comment_url = \"{}/api/posts/{}/comments\".format(DOMAIN, post.id)\n post_dict = {'author': addAuthor(post.author), 'title': post.title, 'description': post.description,\n 'contentType': post.contentType, 'content': post.content, 'published': post.published,\n 'visibility': post.visibility, 'visibleTo': visible_to, 'unlisted': post.unlisted, 'id': post.id,\n 'comments': comments[:5], 'next': comment_url, 'count': len(comments),\n 'origin': \"{}/api/posts/{}\".format(DOMAIN, post.id),\n 'source': \"{}/api/posts/{}\".format(DOMAIN, post.id)}\n post_list.append(post_dict)\n return post_list", "def getPosts():\n\n cur, user_id = initialise(3)\n cur.execute(\"SELECT username FROM users WHERE id = ?\", [user_id])\n name = cur.fetchall()[0][0]\n cur.execute(\"SELECT * FROM posts WHERE name IN (SELECT following FROM followers WHERE user = ?) OR name = ?\", (name, name))\n posts = cur.fetchall()\n return posts", "def json_posts_latest():\n posts = posts_base.order_by(Post.pubdate.desc())[:app.config['FEEDITEMS']]\n out = {'posts': []}\n for post_result in posts:\n post_dict = get_public_post_dict(post_result[0], post_result[2])\n out['posts'].append(post_dict)\n\n return jsonify(out)", "def api_get_threads(request, count):\n\n if PARAMETER_TAG in request.GET:\n tag_name = request.GET[PARAMETER_TAG]\n if tag_name is not None:\n tag = get_object_or_404(Tag, name=tag_name)\n threads = tag.threads.filter(archived=False)\n else: \n threads = Thread.objects.filter(archived=False)\n\n if PARAMETER_OFFSET in request.GET:\n offset = request.GET[PARAMETER_OFFSET]\n offset = int(offset) if offset is not None else 0\n else:\n offset = 0\n\n threads = threads.order_by('-bump_time')\n threads = threads[offset:offset + int(count)]\n\n opening_posts = []\n for thread in threads:\n opening_post = thread.get_opening_post()\n\n # TODO Add tags, replies and images count\n opening_posts.append(_get_post_data(opening_post.id,\n include_last_update=True))\n\n return HttpResponse(content=json.dumps(opening_posts))", "def pull_articles(self, *args, **kwargs):\n tasks.pull_articles()\n return Response({})", "def get(self):\n url = \"http://twitter.com/statuses/public_timeline.json\"\n task = taskqueue.Task(\n url='/tasks/fetch',\n params={'url': url}\n )\n task.add('fetch')", "async def getposts(ctx, theme):\n q = Query(limit=100, tag=\"travelfeed\")\n for post in Discussions_by_created(q):\n continent_code = get_location(post['body'], \"continentcode\")\n link = \"https://steemit.com/\"+construct_authorperm(post['author'], post['permlink'])\n if post['author'] in curatorlist or post['author'] in whitelist:\n continue\n elif (continent_code == \"AF\" or continent_code == \"OC\" or continent_code == \"AN\") and (theme == \"Africa\" or theme == \"Oceania\" or theme ==\"Australia\" or theme == \"australiaoceaniaafrica\"):\n await bot.say(link)\n elif continent_code == \"AS\" and theme == \"Asia\":\n await bot.say(link)\n elif continent_code == \"EU\" and theme == \"Europe\":\n await bot.say(link)\n elif (continent_code == \"SA\" or continent_code == \"NA\") and theme == \"America\":\n await bot.say(link)\n elif (\"food\" in post['body'] or \"eat\" in post['body'] or \"restaurant\" in post['body']) and (theme == \"Food\" or theme ==\"foodoftheworld\"):\n await bot.say(link)\n elif (\"advice\" in post['body'] or \"budget\" in post['body'] or \"learn\" in post['body']) and (theme == \"Advice\" or theme == \"Travel Advice\" or theme == \"traveladvice\"):\n await bot.say(link)", "async def fetch_data(self) -> T:", "def get_posts(username):\r\n\r\n # Authenticate to Twitter\r\n auth = tweepy.OAuthHandler(twitter_credentials.CONSUMER_KEY, twitter_credentials.CONSUMER_SECRET)\r\n auth.set_access_token(twitter_credentials.ACCESS_TOKEN, twitter_credentials.ACCESS_TOKEN_SECRET)\r\n\r\n api = tweepy.API(auth)\r\n\r\n try:\r\n api.verify_credentials()\r\n print(\"Authentication OK\")\r\n except:\r\n print(\"Error during authentication\")\r\n\r\n alltweets=[]\r\n\r\n new_tweets = api.user_timeline(screen_name = username,count=200,tweet_mode='extended')\r\n status = new_tweets[0]\r\n json_str = json.dumps(status._json)\r\n\r\n #convert to string\r\n json_str = json.dumps(status._json)\r\n #deserialise string into python object\r\n parsed = json.loads(json_str)\r\n print(json.dumps(parsed, indent=4, sort_keys=True))\r\n\r\n # save most recent tweets\r\n alltweets.extend(new_tweets)\r\n\r\n # save the id of the oldest tweet less one\r\n oldest = alltweets[-1].id - 1\r\n\r\n # keep grabbing tweets until there are no tweets left to grab\r\n while len(new_tweets) > 0:\r\n print(f\"getting tweets before {oldest}\")\r\n\r\n # all subsiquent requests use the max_id param to prevent duplicates\r\n new_tweets = api.user_timeline(screen_name=username, count=200, max_id=oldest,tweet_mode='extended')\r\n\r\n # save most recent tweets\r\n alltweets.extend(new_tweets)\r\n\r\n # update the id of the oldest tweet less one\r\n oldest = alltweets[-1].id - 1\r\n\r\n print(f\"...{len(alltweets)} tweets downloaded so far\")\r\n\r\n\r\n outtweets=[]\r\n\r\n\r\n for item in alltweets:\r\n\r\n mined = {\r\n 'tweet_id': item.id,\r\n 'name': item.user.name,\r\n 'screen_name': item.user.screen_name,\r\n 'retweet_count': item.retweet_count,\r\n 'lang' : item.lang,\r\n 'text': item.full_text,\r\n 'mined_at': datetime.datetime.now(),\r\n 'created_at': item.created_at,\r\n 'favourite_count': item.favorite_count,\r\n 'hashtags': item.entities['hashtags'],\r\n 'status_count': item.user.statuses_count,\r\n 'location': item.place,\r\n 'source_device': item.source\r\n }\r\n\r\n try:\r\n mined['retweet_text'] = item.retweeted_status.full_text # In case the tweet is a RT, there is a need to\r\n # retrieve the retweet_text field which contains the full comment (up to 280 char) accompanying the retweet\r\n except:\r\n mined['retweet_text'] = ''\r\n\r\n outtweets.extend([mined])\r\n\r\n return outtweets", "def get_all_posts_from_collection(self):\n response = self.get_comments_all_posts(PAYLOAD)\n collection = (response.json())\n return collection", "def get_post(self):\n\t\tself.post = graph.get_object(POST_ID)", "def get_posts():\n\n error_on_unauthorized()\n \n posts = Post.query.order_by(Post.id)\n total_num = posts.count()\n\n if total_num == 0:\n return jsonify(total=0, uploads=[])\n\n try:\n count = int(request.args.get('max', total_num))\n page = int(request.args.get('page', 1))\n origin = request.args.get('origin', None)\n\n if count <= 0 or page <= 0:\n raise APIError(422, \"Query parameters out of range\")\n\n if origin is not None:\n posts = posts.filter(User.origin == origin)\n\n begin = (page - 1) * count\n end = min(begin + count, total_num)\n \n return jsonify(total=total_num, posts=[p.to_dict() for p in posts.all()[begin:end]]), 200\n except ValueError:\n raise APIError(422, \"Invalid query parameter\")", "def get_all_posts_json():\n\n posts = [\n {\n \"postId\": post.post_id,\n \"postPrompt\" : crud.get_prompt_by_prompt_id(post.prompt_id),\n \"postText\": post.post_text,\n \"location\": post.user_facing_location,\n \"dateCreated\": post.created_at,\n \"toneQualities\": crud.get_tone_qualities_by_post_id(post.post_id),\n }\n for post in crud.get_post_by_user_id(session['user_id'])\n ]\n\n return jsonify(posts)", "def posts_as_schemas(posts_from_vk: list[dict]) -> list[Post]:\n posts = []\n\n for post_from_vk in posts_from_vk:\n try:\n post = Post(\n date=post_from_vk[\"date\"],\n likes=post_from_vk[\"likes\"][\"count\"],\n text=post_from_vk[\"text\"],\n path=f\"wall{post_from_vk['owner_id']}_\" f\"{post_from_vk['id']}\",\n photos=[],\n videos=[],\n )\n except KeyError as exc:\n logger.error(\"No key %s for post: %s\", exc, post_from_vk)\n continue\n\n # Collect attachments (photos, videos etc.).\n if \"attachments\" in post_from_vk:\n attachments = post_from_vk[\"attachments\"]\n for attachment in attachments:\n if attachment[\"type\"] == \"photo\":\n try:\n photo = PostPhoto(url=\"\")\n photo.url = attachment[\"photo\"][\"sizes\"][-1][\"url\"]\n post.photos.append(photo)\n except KeyError as exc:\n logger.error(\"No key %s for photo: %s\", exc, post_from_vk)\n\n elif attachment[\"type\"] == \"video\":\n video = PostVideo(first_frame_url=\"\")\n video_from_vk = attachment[\"video\"]\n if \"first_frame\" in video_from_vk:\n video.first_frame_url = video_from_vk[\"first_frame\"][-1][\"url\"]\n elif \"image\" in video_from_vk:\n video.first_frame_url = video_from_vk[\"image\"][-1][\"url\"]\n else:\n logger.error(\"No video image found: %s\", post)\n continue\n post.videos.append(video)\n\n posts.append(post)\n\n return posts", "def get_posts(self):\r\n\r\n sub_dict = {\r\n 'selftext': [], 'title': [], 'id': [], 'sorted_by': [],\r\n 'num_comments': [], 'score': [], 'ups': [], 'downs': []}\r\n csv = f'{self.sub}_posts.csv'\r\n\r\n # Attempt to specify a sorting method.\r\n sort, subreddit = self.set_sort()\r\n\r\n # Set csv_loaded to True if csv exists since you can't\r\n # evaluate the truth value of a DataFrame.\r\n df, csv_loaded = (pd.read_csv(csv), 1) if isfile(csv) else ('', 0)\r\n\r\n print(f'csv = {csv}')\r\n print(f'After set_sort(), sort = {sort} and sub = {self.sub}')\r\n print(f'csv_loaded = {csv_loaded}')\r\n\r\n print(f'Collecting information from r/{self.sub}.')\r\n\r\n for post in subreddit:\r\n\r\n # Check if post.id is in df and set to True if df is empty.\r\n # This way new posts are still added to dictionary when df = ''\r\n unique_id = post.id not in tuple(df.id) if csv_loaded else True\r\n\r\n # Save any unique posts to sub_dict.\r\n if unique_id:\r\n sub_dict['selftext'].append(post.selftext)\r\n sub_dict['title'].append(post.title)\r\n sub_dict['id'].append(post.id)\r\n sub_dict['sorted_by'].append(sort)\r\n sub_dict['num_comments'].append(post.num_comments)\r\n sub_dict['score'].append(post.score)\r\n sub_dict['ups'].append(post.ups)\r\n sub_dict['downs'].append(post.downs)\r\n sleep(0.1)\r\n\r\n new_df = pd.DataFrame(sub_dict)\r\n\r\n # Add new_df to df if df exists then save it to a csv.\r\n if 'DataFrame' in str(type(df)) and self.mode == 'w':\r\n pd.concat([df, new_df], axis=0, sort=0).to_csv(csv, index=False)\r\n print(\r\n f'{len(new_df)} new posts collected and added to {csv}')\r\n elif self.mode == 'w':\r\n new_df.to_csv(csv, index=False)\r\n print(f'{len(new_df)} posts collected and saved to {csv}')\r\n else:\r\n print(\r\n f'{len(new_df)} posts were collected but they were not '\r\n f'added to {csv} because mode was set to \"{self.mode}\"')", "def post(self):\n url = self.request.get('url')\n try:\n response = urlfetch.fetch(url)\n if response.status_code == 200:\n items = simplejson.loads(response.content)\n key = Batch(pickled_items=pickle.dumps(items)).put()\n if key:\n taskqueue.Task(\n url='/tasks/etl',\n params={'batch_id': key.id()}\n ).add('etl')\n else:\n logging.info(\"Fetch failed, got response %d\" % response.status_code)\n except urlfetch_errors.DownloadError, e:\n logging.info(\"Twitter responded too slowly. %s\" % e.message)", "def run_async(self) -> StoryHolderDict:\n self.add_futures(self.j_dict)\n loop = asyncio.get_event_loop()\n get_url_futures = asyncio.gather(\n *[f for f in self.responses.values()])\n find_text_futures = asyncio.gather(\n *[f for f in self.find_futures_list])\n\n final_future = asyncio.gather(get_url_futures, find_text_futures)\n\n if not run_from_ipython:\n loop.run_until_complete(final_future)\n else:\n asyncio.ensure_future(final_future)\n return NewsDump.story_dump", "def run_get_post(m):\n\n doc = get_doc(m)\n assert doc is not None\n\n wp = get_wp(m)\n\n post = find_post(wp, doc.identifier)\n\n if post:\n post.content = \"…content elided…\"\n from pprint import pprint\n pprint(post.struct)\n return\n else:\n warn(f\"Didn't find post for identifier {doc.identifier}\")\n return", "def task_fetch_tag_posts_and_comments(\n tag_name,\n count=100,\n posts_out='data/tag_posts_data.xlsx',\n comments_out='data/tag_comments_data.xlsx'):\n\n # Create query instances for posts and comments\n post_query = Query(TagPostParser)\n comment_query = Query(CommentParser)\n\n # Query posts data\n post_data = post_query.query_all(TAG_POSTS_QUERY_HASH_PARAM, {\n \"tag_name\": tag_name,\n \"first\": 50,\n }, count)\n logger.info(\"Count of posts data: %d\" % len(post_data))\n\n # Save the posts data\n post_data_df = pd.DataFrame(post_data)\n post_data_df.to_excel(posts_out, encoding='utf-8', index=False)\n logger.info(\"Save the posts data to %s.\" % posts_out)\n\n # Query comments data of posts\n comment_data = []\n for i, post in enumerate(post_data):\n logger.info(\"Get comment of %d %s\" % (i, post['short_code']))\n comment_data_of_one_post = comment_query.query_all(COMMENTS_QUERY_HASH_PARAM, {\n \"shortcode\": post['short_code'],\n \"first\": 50,\n }, 100)\n for comment in comment_data_of_one_post:\n comment['post_short_code'] = post['short_code']\n comment_data.extend(comment_data_of_one_post)\n logger.info(\"Count of comment_data: %d\" % len(comment_data))\n\n # Save the comments data\n comment_data_df = pd.DataFrame(comment_data)\n comment_data_df.to_excel(comments_out, encoding='utf-8', index=False)\n logger.info(\"Save the comments data to %s.\" % comments_out)", "def get_feed_entries_task():\n get_feed_entries()\n logger.info(\"Entries for Feed\")", "def get_posts(self, url=None):\n if not url:\n url = self.base_url\n\n self.log.debug(\"Getting URL: %s\", url)\n page_data = json.loads(urlopen(url).read().decode(\"utf-8\"))\n\n for post in page_data.get(\"data\", []):\n if \"message\" not in post:\n continue\n\n for word in self.keywords:\n if word in post[\"message\"]:\n self.log.debug(\"Emitting post: %s\", post[\"id\"])\n yield post\n break\n\n paging = page_data.get(\"paging\", {})\n\n if \"next\" in paging:\n for post in self.get_posts(paging[\"next\"]):\n yield post\n\n return", "def parse_posts(posts_dict):\n return posts_dict['posts']", "def task_fetch_posts_and_comments(\n author_id,\n count=28,\n posts_out='data/posts_data.xlsx',\n comments_out='data/comments_data.xlsx'):\n\n # Create query instances for posts and comments\n post_query = Query(PostParser)\n comment_query = Query(CommentParser)\n\n # Query posts data\n post_data = post_query.query_all(POSTS_QUERY_HASH_PARAM, {\n \"id\": author_id,\n \"first\": 50,\n }, count)\n logger.info(\"Count of posts data: %d\" % len(post_data))\n\n # Save the posts data\n post_data_df = pd.DataFrame(post_data)\n post_data_df.to_excel(posts_out, encoding='utf-8', index=False)\n logger.info(\"Save the posts data to %s.\" % posts_out)\n\n # Query comments data of posts\n comment_data = []\n for i, post in enumerate(post_data):\n logger.info(\"Get comment of %d %s\" % (i, post['short_code']))\n comment_data_of_one_post = comment_query.query_all(COMMENTS_QUERY_HASH_PARAM, {\n \"shortcode\": post['short_code'],\n \"first\": 50,\n }, None)\n for comment in comment_data_of_one_post:\n comment['post_short_code'] = post['short_code']\n comment_data.extend(comment_data_of_one_post)\n logger.info(\"Count of comment_data: %d\" % len(comment_data))\n\n # Save the comments data\n comment_data_df = pd.DataFrame(comment_data)\n comment_data_df.to_excel(comments_out, encoding='utf-8', index=False)\n logger.info(\"Save the comments data to %s.\" % comments_out)", "def get_last_posts(self):\n last_posts = []\n r = requests.get(self.target_url)\n html = BeautifulSoup(r.content, 'html.parser')\n raw_posts = html.findAll(\"div\", {\"class\": \"item\"})\n\n for post in raw_posts:\n title_element = post.find(\"a\", {\"class\": \"item-link\"})\n title = self.text(title_element)\n href = title_element['href']\n description = self.text(post.find(\"div\", {\"class\": \"item-info-container\"}))\n id_post = str(post['data-adid'])\n price = self.text(post.find(\"span\", {\"class\": \"item-price\"}))\n image_element = post.find_all(\"img\")\n image_src = image_element[0]['data-ondemand-img'] if image_element else None\n complete_href = self.crawler_url + href\n description = '\\n'.join([title, description, price, complete_href])\n last_posts.append(Post(id=id_post, href=complete_href, description=description, image=image_src))\n return last_posts", "def get_user_posts(request):\n if request.method == \"POST\":\n token = request.data.get('token')\n post_id = request.data.get('post_id')\n type_ = request.data.get('type')\n\n if Token.objects.filter(key=token).exists():\n token = get_object_or_404(Token, key=token)\n if post_id == -1:\n posts = Post.objects.all().order_by(\"-date\")[:PAGE_OFFSET]\n elif type_ == 'old':\n posts = Post.objects.filter(pk__lt=post_id).order_by(\"-date\")[:PAGE_OFFSET]\n else: # 'new'\n posts = reversed(Post.objects.filter(pk__gt=post_id).order_by(\"date\")[:PAGE_OFFSET])\n\n serializer = PostSerializer(posts, many=True, context={'user_id': token.user_id})\n return Response({\"success\": 29,\n \"post\": serializer.data})\n else:\n return Response({\"error\": 17})", "def notify_posts(shard, post_list, sequence_numbers=None):\n if not post_list:\n return\n\n if isinstance(post_list[0], basestring):\n post_keys = [ndb.Key(models.Post._get_kind(), post_id)\n for post_id in post_list]\n post_list = yield ndb.get_multi_async(post_keys)\n\n if not sequence_numbers:\n sequence_numbers = [None] * len(post_list)\n\n for post, sequence in zip(post_list, sequence_numbers):\n post.sequence = sequence\n\n posts_json = json.dumps({\n 'posts': marshal_posts(shard, post_list),\n })\n\n login_record_list = presence.get_present_users(shard)\n rpc_list = []\n for login_record in login_record_list:\n logging.debug(\n 'Informing shard=%r, user=%r, nickname=%r about messages '\n 'with sequence_numbers=%r', shard, login_record.user_id,\n login_record.nickname, sequence_numbers)\n browser_token = presence.get_token(login_record.user_id)\n rpc_list.append(send_message_async(browser_token, posts_json))\n\n for rpc in rpc_list:\n try:\n yield rpc\n except channel.Error, e:\n # NOTE: When receiving an InvalidChannelKeyError the message may\n # still be available the next time the user connects to the channel\n # with that same application key due to buffering in the backends.\n # The dev_appserver mimics this behavior, but it's not reliable in\n # prod.\n logging.warning('Could not send JSON message to user=%r with '\n 'browser_token=%r. %s: %s', login_record.user_id,\n browser_token, e.__class__.__name__, str(e))", "def users_posts():\n\n user_id = session.get('user_id')\n posts = Post.query.outerjoin(Comment, db.and_(Comment.post_id == Post.post_id, \n Comment.active == True)).filter(Post.user_id == user_id,\n Post.active == True).order_by(Post.post_id.desc()).all()\n post_list = []\n for post in posts:\n post_list.append(post.to_dict_for_json())\n\n resp = make_response(jsonify(post_list), 200)\n\n return resp", "def fetch(self):\r\n self.genre=\"Review\"\r\n try:\r\n self.parent_uri = self.currenturi\r\n self.total_posts_count = 0\r\n self.last_timestamp = datetime( 1980,1,1 )\r\n self.max_posts_count = int(tg.config.get(path='Connector',key='silverlight_numresults'))\r\n self.hrefs_info = self.currenturi.split('/')\r\n if '/forums/t/' in self.currenturi:\r\n if not self.__setSoup():\r\n log.info(self.log_msg('Soup not set , Returning False from Fetch'))\r\n return False\r\n self.__getParentPage()\r\n self.__addPosts()\r\n return True\r\n else:\r\n if not self.__setSoup():\r\n log.info(self.log_msg('Soup not set , Returning False from Fetch'))\r\n return False\r\n while True:\r\n if not self.__getThreadPage():\r\n break\r\n try:\r\n self.currenturi = self.soup.find('a',text='Next >').parent['href']\r\n if not self.__setSoup():\r\n break\r\n except:\r\n log.info(self.log_msg('Next Page link not found'))\r\n break\r\n if self.linksOut:\r\n updateSessionInfo('Search', self.session_info_out,self.last_timestamp , None,'ForumThreadsPage', self.task.instance_data.get('update'))\r\n\r\n return True\r\n except:\r\n log.exception(self.log_msg('Exception in fetch'))\r\n return False", "async def fetch_and_parse(self, timeout=10):\n\n headers = {}\n if self.username and self.password:\n creds = f'{self.username}:{self.password}'.encode('utf-8')\n headers['Authorization'] = f'Basic {base64.urlsafe_b64encode(creds)}'\n\n async with aiohttp.ClientSession(headers=headers) as session:\n rsp = await self._fetch(session, timeout)\n\n feed_entries = []\n if rsp:\n data = feedparser.parse(rsp)\n feed_entries = data.entries\n if data.bozo:\n self.log.error(f\"No valid RSS data from feed {self.url}: {data.bozo_exception}\")\n return feed_entries", "async def get_discussion(context, author:str, permlink:str, observer:str=''):\n db = context['db']\n\n author = valid_account(author)\n permlink = valid_permlink(permlink)\n observer = valid_account(observer, allow_empty=True)\n\n sql = \"SELECT * FROM bridge_get_discussion(:author,:permlink,:observer)\"\n rows = await db.query_all(sql, author=author, permlink=permlink, observer=observer)\n if not rows or len(rows) == 0:\n return {}\n root_id = rows[0]['id']\n all_posts = {}\n root_post = _bridge_post_object(rows[0])\n root_post['active_votes'] = await find_votes_impl(db, rows[0]['author'], rows[0]['permlink'], VotesPresentation.BridgeApi)\n root_post = append_statistics_to_post(root_post, rows[0], False)\n root_post['replies'] = []\n all_posts[root_id] = root_post\n\n parent_to_children_id_map = {}\n\n for index in range(1, len(rows)):\n parent_id = rows[index]['parent_id']\n if parent_id not in parent_to_children_id_map:\n parent_to_children_id_map[parent_id] = []\n parent_to_children_id_map[parent_id].append(rows[index]['id'])\n post = _bridge_post_object(rows[index])\n post['active_votes'] = await find_votes_impl(db, rows[index]['author'], rows[index]['permlink'], VotesPresentation.BridgeApi)\n post = append_statistics_to_post(post, rows[index], False)\n post['replies'] = []\n all_posts[post['post_id']] = post\n\n for key in parent_to_children_id_map:\n children = parent_to_children_id_map[key]\n post = all_posts[key]\n for child_id in children:\n post['replies'].append(_ref(all_posts[child_id]))\n\n #result has to be in form of dictionary of dictionaries {post_ref: post}\n results = {}\n for key in all_posts:\n post_ref = _ref(all_posts[key])\n results[post_ref] = all_posts[key]\n return results", "def get_posts_for_user(account_pk):\n where = \"WHERE account_pk = ?\"\n values = (account_pk, )\n orders = \"ORDER BY time DESC\"\n return Post.select_many(where, orders, values)", "def get_data_fb(user_id, access_token):\n\n my_user = storage.get(User, user_id)\n my_user.update_attr(\"fb_access_token\", access_token)\n\n r = requests.get('https://graph.facebook.com/me/feed?access_token=' + access_token)\n result = r.json()\n post_dict = {}\n post_list = []\n index = 0\n for posts in result[\"data\"]:\n if index == 10:\n break\n new_post = {}\n\n new_post[\"CrossMe_user_id\"] = user_id\n new_post[\"Post_id_CrossMe\"] = str(uuid.uuid4())\n\n if \"message\" in posts.keys():\n new_post[\"message\"] = posts[\"message\"]\n else:\n new_post[\"message\"] = \"NULL\"\n\n new_post[\"created_time\"] = datetime.strptime(posts[\"created_time\"], '%Y-%m-%dT%H:%M:%S+%f')\n\n new_post[\"source\"] = \"FACEBOOK\"\n\n new_post[\"fb_post_id\"] = posts[\"id\"]\n\n\n URLPOST = 'https://graph.facebook.com/' + posts[\"id\"] + '?fields=object_id&access_token=' + access_token\n post_data = requests.get(URLPOST).json()\n if \"object_id\" in post_data.keys():\n URLIMAGE = 'https://graph.facebook.com/' + post_data[\"object_id\"] + '?fields=images&access_token=' + access_token\n image_data = requests.get(URLIMAGE).json()\n if \"images\" not in image_data.keys():\n continue\n all_images = image_data[\"images\"]\n new_post[\"image_url\"] = all_images[1][\"source\"]\n posts[\"media_type\"] = \"IMAGE\"\n else:\n continue\n posts[\"media_type\"] = \"STATUS\"\n new_post[\"image_url\"] = \"NULL\"\n\n post_list.append(new_post)\n index = index + 1\n\n my_post = Post()\n\n my_post.user_id = new_post[\"CrossMe_user_id\"]\n my_post.creation_date = new_post[\"created_time\"]\n my_post.post_source = new_post[\"source\"]\n my_post.post_type = posts[\"media_type\"]\n my_post.post_text = new_post[\"message\"]\n my_post.media_url = new_post[\"image_url\"]\n my_post.save()\n\n\n post_dict[\"fb_last_post\"] = post_list\n\n return make_response(jsonify(post_dict), 200)", "def list_posts(request):\n if request.method == 'POST':\n category = request.POST.get('category', False)\n posts = Post.objects.select_related('author')\\\n .filter(category=category)\\\n .order_by('-modified')\n # import pdb; pdb.set_trace()\n return render(request, 'posts/index.html',\n {'posts': posts})\n\n posts = Post.objects.select_related('author').order_by('-modified')\n likes = Likes.objects.select_related('post')\n\n return render(request, 'posts/index.html',\n {'posts': posts})", "def get_posts(request):\n posts = Post.objects.order_by(\"created_date\")\n return render(request, \"blogposts.html\", {\"posts\": posts})", "def getRemotePost(post_id):\n servers = Server.objects.all()\n for server in servers:\n if server.username and server.password:\n host = server.hostname\n if not host.endswith(\"/\"):\n host = host + \"/\"\n server_api = \"{}posts/{}\".format(host, post_id)\n print('Request:')\n print(server_api)\n try:\n r = requests.get(server_api, auth=(server.username, server.password))\n print(r)\n if r.status_code in [200, 201]:\n return [remotePostCreate(server.hostname, r.json())]\n except Exception as e:\n print(e)\n return None", "def posts_get():\n title_like = request.args.get(\"title_like\")\n body_like = request.args.get(\"body_like\")\n\n posts = session.query(Post)\n if title_like:\n if body_like:\n posts = posts.filter(\n Post.title.contains(title_like), Post.body.contains(body_like))\n else:\n posts = posts.filter(Post.title.contains(title_like))\n posts = posts.all()\n\n data = json.dumps([post.as_dictionary() for post in posts])\n return Response(data, 200, mimetype=\"application/json\")", "def download_images(self):\n # download the json for the thread\n self.download_json()\n\n # open the json file\n with self.file.open('r', encoding=\"utf-8\") as json_file:\n # load into data\n data = json.load(json_file)\n\n # grab the posts from\n posts = data[\"posts\"]\n\n # iterate through posts in the thread\n for post_num in range(len(posts)):\n # grab the current post\n post = posts[post_num]\n\n # try to get these attributes. may throw an error because not\n # all posts or replies have images attached\n try:\n # images should have these attributes\n tim = post[\"tim\"]\n ext = post[\"ext\"]\n width = post[\"w\"]\n height = post[\"h\"]\n desired_size = False\n\n # filename consists of \"tim.ext\"\n image_filename = str(tim) + str(ext)\n\n # set resolution based on bool arguments\n if self.sd:\n self.min_width = 720\n self.min_height = 480\n if self.hd:\n self.min_width = 1280\n self.min_height = 720\n if self.fhd:\n self.min_width = 1920\n self.min_height = 1080\n if self.uhd:\n self.min_width = 3840\n self.min_height = 2160\n\n # check if the image is the desired size\n if (height <= self.max_height and height >= self.min_height\n ) and (width <= self.max_width\n and width >= self.min_width):\n desired_size = True\n\n if desired_size:\n try:\n # request image variables\n image_url = self.images_endpoint + image_filename\n image_res = requests.get(image_url)\n image_content = image_res.content\n\n # local image variables\n image_string = str(self.images_path.absolute()) + \\\n \"\\\\\" + image_filename\n image_file = Path(image_string)\n\n # write to disk\n if self.verbose:\n print(\"Downloading\", image_url, \"to\",\n image_string, \"from thread\",\n self.thread_id, \"with a resolution of\",\n width, \"x\", height)\n with image_file.open(\"wb\") as im_file:\n im_file.write(image_content)\n except KeyboardInterrupt:\n sys.exit(1)\n\n except KeyError:\n pass", "def GET_side_posts(self, *a, **kw):\r\n # Server side cache is also invalidated when new article is posted\r\n return self.render_cached('side-posts', RecentArticles, g.side_posts_max_age)", "def get_posts(subreddits, limit, user_agent=default_user_agent):\n all_posts = []\n\n for subreddit in subreddits:\n print(subreddit)\n data_url = 'https://www.reddit.com/r/{}.json?limit={}'.format(subreddit, limit)\n response_data = requests.get(data_url, headers = {'User-agent': user_agent})\n\n posts = response_data.json()['data']['children']\n\n all_posts.extend(posts)\n\n return all_posts", "def get_posts(db_cursor, page_num):\n posts = []\n db_cursor.execute(\"SELECT * FROM posts ORDER BY time_posted DESC LIMIT ?, ?\", (POSTS_PER_PAGE*(page_num-1), POSTS_PER_PAGE))\n for post in db_cursor.fetchall():\n posts.append({\n 'url_id': post[1],\n 'title': post[2],\n 'time_posted': post[4],\n 'category': post[5],\n 'visibility': post[6]\n })\n return posts", "def find_posts(self):\n\n posts = self.soup.find_all(\"div\", class_=\"_401d\")\n print(f'[Scraper] Found {len(posts)} posts.')\n\n for post in posts:\n try: \n text = post.find(\"div\", class_=\"_6-cp\").div.get_text()\n a_tag = post.find('span', class_=\"_6-cm\").find('a')\n link = \"https://www.facebook.com\" + a_tag['href'] \n print(f\"[Scraper] {link}\")\n print(f\"[Scraper] {text}\")\n except:\n print(\"Error occured. Skipped a result.\")", "async def search_posts(self, ctx: commands.Context, *query):\n async with ctx.typing():\n print(query)\n qstr = ' '.join(query)\n search_results = await self.get_posts_by_query(ctx, qstr)\n\n await ctx.send(\n f\"Found {search_results['total']} results:\",\n # files=[\n # await self.get_file_from_post_data(d) for d in search_results['_']['results'][:3]\n # ],\n )\n max = await self.config.max_searchresults()\n\n for data in search_results['_']['results'][:max]:\n # data = await self.get_post_by_id(ctx, postid)\n post_e = await self.post_data_to_embed(data)\n attach = await self.get_file_from_post_data(data)\n await ctx.send(\n data['_'][\"message_content\"] if not data['_'][\"should_embed\"] else None,\n embed=post_e,\n # file=attach,\n )\n\n if search_results['total'] > max:\n await ctx.send(\"To see more results either refine the search or change the sorting order.\")", "def all_posts_list(request):\n #update is_expired in all posts\n update_posts_expiration()\n #put all posts into post\n post = Post.objects.all()\n #create serializer with the posts\n serializer = ViewPostSerializer(post, many=True)\n #return serializer view\n return Response(serializer.data)", "def get_all_posts(self):\n cur = self.conn.cursor()\n\n query = 'SELECT blog.blog_id as id, blog.title as title, ' \\\n 'blog.subtitle as subtitle, ' \\\n 'blog.content as content, blog.date as date, ' \\\n 'author.name as author ' \\\n 'FROM blog, author ' \\\n 'WHERE blog.author_id = author.author_id ' \\\n 'ORDER BY blog_id DESC '\n\n posts = []\n cur.execute(query)\n\n for row in cur.fetchall():\n posts.append(dict(row))\n\n return posts", "def api_get_post(request, post_id):\n\n post = get_object_or_404(Post, id=post_id)\n\n json = serializers.serialize(\"json\", [post], fields=(\n \"pub_time\", \"_text_rendered\", \"title\", \"text\", \"image\",\n \"image_width\", \"image_height\", \"replies\", \"tags\"\n ))\n\n return HttpResponse(content=json)", "def get_posts(self, userid, username):\n dict_json = {}\n x = 0\n outfile_name = \"tweetsFrom\" + username + \".json\"\n posts = api.GetUserTimeline(user_id=userid, count=200)\n text_list = [p.text for p in posts]\n for text in text_list:\n dict_json[x] = text\n x += 1\n with open(outfile_name, \"w\") as outfile:\n json.dump(dict_json, outfile)\n outfile.close()", "def do_fetch(self):\n pass", "async def get_blogs(self) -> List:\n parsed_blogs = []\n GRAPHQL_PAYLOAD[\"variables\"][\"tagSlug\"] = self.tag\n GRAPHQL_PAYLOAD[\"variables\"][\"paging\"][\"to\"] = str(self.start_index)\n current_datetime = int(datetime.timestamp(datetime.now()))\n post_id_url_map = {}\n response = await aiohttp_request(\n request_type=\"POST\", url=MEDIUM_GRAPHQL_URL,\n data=GRAPHQL_PAYLOAD\n )\n blogs = response[\"json\"].get(\"data\", {}).get(\"tagFeed\", {})\n if blogs:\n for blog in blogs.get(\"items\", []):\n post_date = datetime.fromtimestamp(blog[\"post\"][\"firstPublishedAt\"] // 1000)\n post_created_on = (current_datetime - (blog[\"post\"][\"firstPublishedAt\"] // 1000)) // (60 * 60)\n parsed_blogs.append(\n {\n # Blog DB Data\n \"post_id\": blog[\"post\"][\"id\"],\n \"title\": blog[\"post\"][\"title\"],\n \"blog_desc\": blog[\"post\"][\"previewContent\"][\"subtitle\"],\n \"blog_data\": \"\",\n \"blog_link\": blog[\"post\"][\"mediumUrl\"],\n \"created_time\": post_date.isoformat(),\n \"read_time\": ceil(blog[\"post\"][\"readingTime\"]),\n \"tags\": self.tag,\n # Author DB Data\n \"author_id\": blog[\"post\"][\"creator\"][\"id\"],\n \"creator\": blog[\"post\"][\"creator\"][\"name\"],\n # Extra Meta\n \"post_created_time\": post_created_on,\n }\n )\n post_id_url_map[blog[\"post\"][\"id\"]] = blog[\"post\"][\"mediumUrl\"]\n\n await bulk_update_to_redis(post_id_url_map)\n push_data_to_db(parsed_blogs)\n\n return parsed_blogs", "async def __loop(self):\n await self.bot.wait_until_ready()\n while not self.bot.is_closed():\n await self.post_data()\n await asyncio.sleep(3600)", "def show_posts():\n\n # set page as req.args['page'] coerced to int, or set as one if none is passed\n page = int(request.args.get('page', 1))\n\n # handle private AND public posts if user is logged in, only public if not\n if CURRENT_USER_KEY in session:\n posts = Post.query.order_by(Post.id.desc()).paginate(\n page=page, per_page=10, error_out=True)\n else:\n posts = Post.query.filter_by(is_private='f').order_by(Post.id.desc()).paginate(\n page=page, per_page=10, error_out=True)\n\n all_posts = [post.serialize() for post in posts.items]\n return jsonify(has_next=posts.has_next, posts=all_posts)", "def view_latest_post(request):\n address = request.POST.get('address')\n rpc_raw = rpcRawProxy(helpers.get_rpc_url())\n\n latest_blog_post = Blog.objects.filter(address_from=address).order_by('-time')[0]\n\n blog_post = helpers.download_blg(rpc_raw, latest_blog_post.key, latest_blog_post.address_from)\n\n return HttpResponse(json.dumps({\n \"status\": \"success\",\n \"data\": blog_post\n }, default=helpers.json_custom_parser), content_type='application/json')", "def cli():\n update_all_posts()\n push_updates()", "def get_contents(\n self, post_ids: List[str], datetime_filter_fn: Optional[Callable[[datetime], bool]] = None\n ) -> List[str]:\n contents = []\n url = f\"http://blog.naver.com/PostView.nhn\"\n params = {\"blogId\": self.naver_id}\n for post_id in post_ids:\n params[\"logNo\"] = post_id\n\n # Get contents of a post\n response = self.session.get(url, params=params)\n\n soup = BeautifulSoup(response.text, \"html.parser\")\n\n # Smart editor 3\n text = soup.select_one(f\"#post-view{post_id} > div > div > div.se-main-container\")\n # Smart editor 2\n if not text:\n text = soup.select_one(\n f\"#post-view{post_id} > div > div > div.se_component_wrap.sect_dsc.__se_component_area\"\n )\n\n if not text:\n text = soup.select_one(f\"#post-view{post_id}\")\n if text:\n text = text.get_text(\"\\n\").replace(\"\\xa0\", \" \") # Space unicode replace\n else:\n print(f\"[Error] cannot select content in {post_id}.\", file=sys.stderr)\n continue\n\n text = re.sub(\"\\s+\", \" \", text).strip()\n if datetime_filter_fn is None:\n contents.append(text)\n continue\n\n date_time = soup.select(\n f\"#post-view{post_id} > div > div > div > div > div > div.blog2_container > span.se_publishDate.pcol2\"\n )\n date_time += soup.select(\"#printPost1 > tr > td.bcc > table > tr > td > p.date.fil5\")\n\n if date_time:\n date_time = date_time[0].get_text()\n post_datetime = datetime.strptime(date_time, \"%Y. %m. %d. %H:%M\")\n if not datetime_filter_fn(post_datetime):\n continue\n else:\n print(f\"[Error] cannot select datetime in {post_id}, this post is not filtered\")\n\n contents.append(text)\n\n print(f\"Get contents: {len(contents)} found.\")\n return contents", "def LoadPosts(self):\n result = {}\n for userid in self.User_list:\n file_list = set(os.listdir(f\"{self.home_dir}/{userid}\")) - set(['student.txt','img.jpg'])\n file_list = sorted(list(file_list))\n file_des_dict = {}\n for entirepostname in file_list:\n postname = entirepostname.replace(\".txt\",\"\")\n sp = postname.split(\"-\")\n if sp[0] not in file_des_dict.keys():\n file_des_dict[sp[0]] = {'post':None,'comments':[],'reply':{}}\n if len(sp) == 1:\n file_des_dict[sp[0]]['post'] = f\"{self.home_dir}/{userid}/{entirepostname}\"\n elif len(sp) == 2:\n file_des_dict[sp[0]]['comments'].append(sp[1])\n else:\n if sp[1] not in file_des_dict[sp[0]]['reply'].keys():\n file_des_dict[sp[0]]['reply'][sp[1]] = []\n file_des_dict[sp[0]]['reply'][sp[1]].append(sp[2])\n result[userid] = file_des_dict\n return result", "def load_posts_by_approval_status(self, status):\n return self.load_posts_by_approval_status_id(status.id)", "def collect_fb_posts(sm_account_id, access_token, session, since=None, until=None, days=7, limit=100, cutoff=None):\n graph = facebook.GraphAPI(access_token=access_token, version='2.6')\n first_req = get_feed_request(sm_account_id, since, until, limit=limit, days=days)\n # data = pickle.load(open('sample_json.pkl', 'rb'))\n interactions = set()\n batch_requests = [first_req]\n process_batch(sm_account_id, graph, interactions, batch_requests, session, 0, cutoff)", "def fetch_host_feed(self, host, **args):\n return self.fetch(\"/url\", host=host, **args)", "def fetch_entries(self):\n entries = []\n rss_list = self.__data_adapter.fetch_rss()\n for rss in rss_list:\n rss_href = rss.get('url', None)\n if rss_href is not None:\n feed = feedparser.parse(rss_href)\n [entries.append(FeedDocument(entry.get('title', ''), entry.get('summary', ''))) for entry in feed.get('entries', [])]\n return entries", "def get_blog_posts(self, params=None):\n _url = urljoin(self.base_url, self.API_POSTS)\n return requests.get(_url, params=params)", "async def get_post(self, ctx: commands.Context, postid: int):\n async with ctx.typing():\n data = await self.get_post_by_id(ctx, postid)\n post_e = await self.post_data_to_embed(data)\n attach = await self.get_file_from_post_data(data)\n\n await ctx.send(\n data['_'][\"message_content\"] if not data['_'][\"should_embed\"] else None,\n embed=post_e,\n # file=attach,\n )", "def _get_postings(self):\n raise NotImplementedError" ]
[ "0.655602", "0.65269387", "0.6413515", "0.6018604", "0.5916808", "0.59025943", "0.5787382", "0.57700604", "0.5734009", "0.57019544", "0.5652772", "0.56395626", "0.5616751", "0.5613428", "0.56133306", "0.56015396", "0.5592875", "0.5583471", "0.5548579", "0.551852", "0.55078864", "0.549857", "0.5476955", "0.54635954", "0.5457611", "0.5433103", "0.5420551", "0.5409774", "0.54038525", "0.5379112", "0.5372433", "0.5353822", "0.53516686", "0.5348802", "0.5323983", "0.5321373", "0.5310591", "0.52996725", "0.5297103", "0.52670646", "0.52612936", "0.5250326", "0.5236365", "0.5220147", "0.5213944", "0.5187461", "0.5178892", "0.51738757", "0.5173519", "0.5149293", "0.51479393", "0.51427966", "0.51236755", "0.5120394", "0.5120048", "0.5089355", "0.5089021", "0.50869566", "0.508142", "0.50715077", "0.5065275", "0.5035418", "0.50328505", "0.5022775", "0.5009053", "0.5006215", "0.49969605", "0.49777055", "0.49695796", "0.49683905", "0.4967539", "0.49560425", "0.49536252", "0.49466023", "0.4945589", "0.49353734", "0.49294227", "0.4927992", "0.49269408", "0.49231526", "0.49196532", "0.49146578", "0.49087164", "0.4898178", "0.4893939", "0.48813868", "0.48791716", "0.48773125", "0.4876843", "0.48765835", "0.48759946", "0.48536608", "0.48489922", "0.48392895", "0.48348778", "0.48336524", "0.4830598", "0.48210642", "0.48121423", "0.48077574" ]
0.7330048
0
Creates posts as Pydantic schemas based on posts data given from VK API.
def posts_as_schemas(posts_from_vk: list[dict]) -> list[Post]: posts = [] for post_from_vk in posts_from_vk: try: post = Post( date=post_from_vk["date"], likes=post_from_vk["likes"]["count"], text=post_from_vk["text"], path=f"wall{post_from_vk['owner_id']}_" f"{post_from_vk['id']}", photos=[], videos=[], ) except KeyError as exc: logger.error("No key %s for post: %s", exc, post_from_vk) continue # Collect attachments (photos, videos etc.). if "attachments" in post_from_vk: attachments = post_from_vk["attachments"] for attachment in attachments: if attachment["type"] == "photo": try: photo = PostPhoto(url="") photo.url = attachment["photo"]["sizes"][-1]["url"] post.photos.append(photo) except KeyError as exc: logger.error("No key %s for photo: %s", exc, post_from_vk) elif attachment["type"] == "video": video = PostVideo(first_frame_url="") video_from_vk = attachment["video"] if "first_frame" in video_from_vk: video.first_frame_url = video_from_vk["first_frame"][-1]["url"] elif "image" in video_from_vk: video.first_frame_url = video_from_vk["image"][-1]["url"] else: logger.error("No video image found: %s", post) continue post.videos.append(video) posts.append(post) return posts
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def posts_post():\n data = request.json\n\n try:\n validate(data, post_schema)\n except ValidationError as error:\n data = {\"message\": error.message}\n return Response(json.dumps(data), 422, mimetype=\"application/json\")\n\n post = Post(title=data[\"title\"], body=data[\"body\"])\n session.add(post)\n session.commit()\n\n data = json.dumps(post.as_dictionary())\n headers = {\"Location\": url_for(\"post_get\", id=post.id)}\n\n return Response(data, 201, headers=headers, mimetype=\"application/json\")", "def remotePostCreate(host, post):\n post = post.get('posts')[0]\n author = remoteAddAuthor(post.get('author'))\n title = post.get('title')\n description = post.get('description')\n contentType = post.get('contentType')\n content = post.get('content')\n published = utc.localize(datetime.strptime(post.get('published'), '%Y-%m-%dT%H:%M:%S.%fZ'))\n visibility = post.get('visibility')\n unlisted = post.get('unlisted')\n id = post.get('id')\n origin = post.get('origin')\n count = post.get('count')\n comments = remoteCommentList(post)\n source = \"{}/api/posts/{}\".format(DOMAIN, post.get('id'))\n\n post_dict = {'author': author, 'title': title, 'description': description,\n 'contentType': contentType, 'content': content, 'published': published,\n 'visibility': visibility, 'unlisted': unlisted, 'id': id,\n 'comments': comments, 'origin': origin, 'count': count,\n 'source': source}\n return post_dict", "def postCreate(post):\n post_list = list()\n comments = commentList(post)\n comment_url = \"{}/api/posts/{}/comments\".format(DOMAIN, post.id)\n visible_to = list()\n visible = post.visibleTo.all()\n if visible:\n for author in visible:\n auth = \"{}/api/author/{}\".format(DOMAIN, author.id)\n visible_to.append(auth)\n\n # visible_to = list(post.visibleTo)\n post_dict = {'author': addAuthor(post.author), 'title': post.title, 'description': post.description,\n 'contentType': post.contentType, 'content': post.content, 'published': post.published,\n 'visibility': post.visibility, 'visibleTo': visible_to, 'unlisted': post.unlisted, 'id': post.id,\n 'comments': comments[:5], 'next': comment_url, 'count': len(comments),\n 'source': \"{}/api/posts/{}\".format(DOMAIN, post.id),\n 'origin': \"{}/api/posts/{}\".format(DOMAIN, post.id)}\n post_list.append(post_dict)\n return post_list", "def marshal_posts(shard, post_list):\n out = []\n for post in post_list:\n post_dict = dict(\n shardId=shard,\n archiveType=models.Post.ARCHIVE_REVERSE_MAPPING[post.archive_type],\n nickname=post.nickname,\n title=post.title,\n body=post.body,\n postTimeMs=models.datetime_to_stamp_ms(post.post_time),\n sequenceId=getattr(post, 'sequence', None),\n newTopicId=post.new_topic,\n postId=post.post_id)\n out.append(post_dict)\n return out", "def get_posts():\n url = app.config['POSTS_ENDPOINT']\n response = requests.get(url, params={})\n if response.status_code == 200:\n return parse_posts(response.json())\n raise RuntimeError('Error in retrieving posts.')", "def send_postings_to_api(raw_postings):\n # Start by extracting the posting information we need from the raw\n # postings.\n\n def _copy(src_dict, src_key, dst_dict, dst_key):\n if src_dict.get(src_key) != None:\n dst_dict[dst_key] = src_dict[src_key]\n\n def _dateToSecs(date_str):\n \"\"\" Convert given timestamp string to number of seconds in unix time.\n \"\"\"\n if date_str not in [\"\", None]:\n timestamp = datetime.datetime.strptime(date_str,\n \"%Y-%m-%dT%H:%M:%SZ\")\n delta = timestamp - datetime.datetime(1970, 1, 1)\n return (delta.days*24*3600) + delta.seconds\n else:\n return None\n\n postings = []\n for id,raw_posting in raw_postings:\n posting = {}\n _copy(raw_posting, 'source', posting, 'source')\n _copy(raw_posting, 'category', posting, 'category')\n\n location = {}\n if \"location\" in raw_posting:\n raw_location = raw_posting['location']\n _copy(raw_location, 'latitude', location, 'lat')\n _copy(raw_location, 'longitude', location, 'long')\n _copy(raw_location, 'accuracy', location, 'accuracy')\n _copy(raw_location, 'countryCode', location, 'country')\n _copy(raw_location, 'stateCode', location, 'state')\n _copy(raw_location, 'metroCode', location, 'metro')\n _copy(raw_location, 'regionCode', location, 'region')\n _copy(raw_location, 'countyCode', location, 'county')\n _copy(raw_location, 'cityCode', location, 'city')\n _copy(raw_location, 'localityCode', location, 'locality')\n _copy(raw_location, 'zipCode', location, 'zipcode')\n posting['location'] = location\n\n _copy(raw_posting, 'sourceId', posting, 'external_id')\n _copy(raw_posting, 'sourceUrl', posting, 'external_url')\n _copy(raw_posting, 'heading', posting, 'heading')\n _copy(raw_posting, 'body', posting, 'body')\n _copy(raw_posting, 'html', posting, 'html')\n\n if \"postingTimestamp\" in raw_posting:\n posting['timestamp'] = _dateToSecs(raw_posting['postingTimestamp'])\n if \"expirationTimestamp\" in raw_posting:\n posting['expires'] = _dateToSecs(raw_posting['expirationTimestamp'])\n\n _copy(raw_posting, 'language', posting, 'language')\n _copy(raw_posting, 'price', posting, 'price')\n _copy(raw_posting, 'currency', posting, 'currency')\n\n images = []\n if \"images\" in raw_posting:\n for raw_image in raw_posting['images']:\n image = {}\n _copy(raw_image, 'thumbnail', image, 'thumbnail')\n _copy(raw_image, 'full', image, 'full')\n if len(image) > 0:\n images.append(image)\n posting['images'] = images\n\n annotations = {}\n if \"annotations\" in raw_posting:\n for key,value in raw_posting['annotations'].items():\n annotations[key] = value\n posting['annotations'] = annotations\n\n status = {}\n if \"flags\" in raw_posting:\n flags = raw_posting['flags']\n\n if flags & 1 == 1:\n status['offered'] = True\n elif flags & 2 == 2:\n status['lost'] = True\n elif flags % 4 == 4:\n status['stolen'] = True\n elif flags % 8 == 8:\n status['found'] = True\n posting['status'] = status\n\n _copy(raw_posting, 'immortal', posting, 'immortal')\n\n postings.append(posting)\n\n # Send the postings off to the Posting API.\n\n request = {'postings' : postings}\n\n print \"Sending...\"\n\n response = requests.post(POSTING_URL,\n data=json.dumps(request),\n headers={'content-type' : \"application/json\"})\n\n print \"got response\"\n\n if response.status_code != 200:\n print \"Unexpected response:\" + str(response.status_code)\n print\n print response.text\n return None\n\n if response.headers['content-type'] != \"application/json\":\n print \"Server didn't return JSON data!\"\n print\n print response.text\n return None\n\n response = response.json()\n\n # Check the response to see which postings failed (if any).\n\n num_sent = 0 # initially.\n if response != None:\n if \"responses\" in response:\n posting_errors = response['posting_errors']\n for i in range(len(posting_errors)):\n if posting_errors[i] != None:\n for key in postings[i].keys():\n print \" %s : %s\" % (key, repr(postings[i][key]))\n print \"--> failed, reason = \" + posting_errors[i]\n print\n else:\n num_sent = num_sent + 1\n\n return response.get(\"wait_for\")", "def posts(self, limit=100, all=False):\n source, edge = self.id, \"feed\"\n return lazygen(Post, source, edge,\n limit=limit, get_all=all)", "def post(self):\n data = request.json\n return create_new_blog(data=data)", "def serialize_posts_data_v2(influencer, posts, length_limit=30, highlighted_ids=[], **kw):\n from debra import serializers\n from debra import feeds_helpers\n from debra import constants\n\n request = kw.get('request')\n brand = request.visitor[\"base_brand\"] if request else None\n\n posts_data = []\n urls = set()\n posts = list(posts)\n\n for post in posts:\n if post.url in urls:\n continue\n urls.add(post.url)\n\n feed_json = feeds_helpers.get_feed_handler_for_platform(\n get_post_platform(post))\n\n post_data = feed_json(None,\n for_single_post=post,\n length_limit=length_limit\n )\n\n if post_data is None:\n continue\n\n post_data[\"blog_name\"] = serializers.unescape(influencer.blogname if influencer else post.influencer.blogname)\n post_data[\"title\"] = post.title\n post_data[\"platform\"] = get_post_platform(post)\n\n if brand and brand.flag_show_dummy_data:\n post_data['url'] = constants.FAKE_POST_DATA['url']\n post_data['title'] = constants.FAKE_POST_DATA['title']\n\n if post.id in highlighted_ids:\n post_data[\"highlight\"] = True\n if post.create_date:\n post_data[\"create_date\"] = post.create_date.strftime(\"%b. %e, %Y\")\n if influencer:\n post_data['user'] = influencer.feed_stamp\n else:\n post_data['user'] = post.influencer.feed_stamp\n posts_data.append(post_data)\n return posts_data", "def generate_post(self):\n post = {'title': self.generate_title(), 'draft': False}\n for k in ('blog', 'id', 'labels', 'categories', 'draft'):\n if k not in self.header:\n continue\n if k == 'blog':\n post[k] = {'id': self.header[k]}\n else:\n post[k] = self.header[k]\n return post", "def remotePostList(host, posts, public):\n post_list = list()\n posts = posts.get('posts')\n for post in posts:\n author = remoteAddAuthor(post.get('author'))\n title = post.get('title')\n description = post.get('description')\n contentType = post.get('contentType')\n content = post.get('content')\n published = utc.localize(datetime.strptime(post.get('published'), '%Y-%m-%dT%H:%M:%S.%fZ'))\n visibility = post.get('visibility')\n unlisted = post.get('unlisted')\n id = post.get('id')\n origin = post.get('source')\n comments = remoteCommentList(post)\n count = post.get('count')\n next = \"{}/api/posts/{}/comments\".format(DOMAIN, id)\n if host.endswith(\"/\"):\n host = host[:-1]\n source = \"{}/posts/{}\".format(host, post.get('id'))\n\n post_dict = {'author': author, 'title': title, 'description': description,\n 'contentType': contentType, 'content': content, 'published': published,\n 'visibility': visibility, 'unlisted': unlisted, 'id': id,\n 'comments': comments, 'origin': origin,\n 'source': source, 'count': count, 'next': next}\n post_list.append(post_dict)\n return post_list", "def save_posts(self, posts):\n return self.collection.insert_many(map(lambda post: post.serialize(), posts))", "def PostData(title: str, body: str) -> dict:\n post = Posts(title=title, body=body)\n db.session.add(post)\n db.session.commit()\n return {\"status\": 200, \"message\": \"Data Posted successfully\"}", "def post(self, post_id=None):\n\n if post_id:\n abort(400)\n else:\n args = parsers.post_post_parser.parse_args(strict=True)\n\n new_post = Post(args['title'])\n new_post.text = args['text']\n # new_post.user = user\n\n if args['tags']:\n for item in args['tags']:\n tag = Tag.query.filter_by(name=item).first()\n # If the tag already exist, append.\n if tag:\n new_post.tags.append(tag)\n # If the tag not exist, create the new one.\n # Will be write into DB with session do.\n else:\n new_tag = Tag(item)\n new_post.tags.append(new_tag)\n db.session.add(new_post)\n db.session.commit()\n return (new_post.id, 201)", "def postList(posts):\n post_list = list()\n for post in posts:\n visible_to = list()\n visible = post.visibleTo.all()\n if visible:\n for author in visible:\n auth = \"{}/api/author/{}\".format(DOMAIN, author.id)\n visible_to.append(auth)\n\n comments = commentList(post)\n comment_url = \"{}/api/posts/{}/comments\".format(DOMAIN, post.id)\n post_dict = {'author': addAuthor(post.author), 'title': post.title, 'description': post.description,\n 'contentType': post.contentType, 'content': post.content, 'published': post.published,\n 'visibility': post.visibility, 'visibleTo': visible_to, 'unlisted': post.unlisted, 'id': post.id,\n 'comments': comments[:5], 'next': comment_url, 'count': len(comments),\n 'origin': \"{}/api/posts/{}\".format(DOMAIN, post.id),\n 'source': \"{}/api/posts/{}\".format(DOMAIN, post.id)}\n post_list.append(post_dict)\n return post_list", "def post_to_object(self, post, remove_id_prefix=False):\n id = post.get('id')\n if not id:\n return {}\n\n post_type = post.get('type')\n status_type = post.get('status_type')\n url = self.post_url(post)\n picture = post.get('picture')\n display_name = None\n message = (post.get('message') or post.get('story') or\n post.get('description') or post.get('name'))\n\n data = post.get('data', {})\n for field in ('object', 'song'):\n obj = data.get(field)\n if obj:\n id = obj.get('id')\n post_type = obj.get('type')\n url = obj.get('url')\n display_name = obj.get('title')\n\n object_type = OBJECT_TYPES.get(post_type)\n author = self.user_to_actor(post.get('from'))\n link = post.get('link', '')\n\n if link.startswith('/gifts/'):\n object_type = 'product'\n if not object_type:\n if picture and not message:\n object_type = 'image'\n else:\n object_type = 'note'\n\n obj = {\n 'id': self.tag_uri(str(id)),\n 'objectType': object_type,\n 'published': util.maybe_iso8601_to_rfc3339(post.get('created_time')),\n 'updated': util.maybe_iso8601_to_rfc3339(post.get('updated_time')),\n 'author': author,\n 'content': message,\n # FB post ids are of the form USERID_POSTID\n 'url': url,\n 'image': {'url': picture},\n 'displayName': display_name,\n 'fb_object_id': post.get('object_id'),\n }\n\n privacy = post.get('privacy', {})\n if isinstance(privacy, dict):\n privacy = privacy.get('value')\n if privacy is not None:\n # privacy value '' means it doesn't have an explicit audience set, so i\n # *think* it inherits from its parent. TODO: use that value as opposed to\n # defaulting to public.\n public = privacy.lower() in ('', 'everyone', 'open')\n obj['to'] = [{'objectType': 'group',\n 'alias': '@public' if public else '@private'}]\n\n # tags and likes\n tags = itertools.chain(post.get('to', {}).get('data', []),\n post.get('with_tags', {}).get('data', []),\n *post.get('message_tags', {}).values())\n obj['tags'] = [self.postprocess_object({\n 'objectType': OBJECT_TYPES.get(t.get('type'), 'person'),\n 'id': self.tag_uri(t.get('id')),\n 'url': self.object_url(t.get('id')),\n 'displayName': t.get('name'),\n 'startIndex': t.get('offset'),\n 'length': t.get('length'),\n }) for t in tags]\n\n obj['tags'] += [self.postprocess_object({\n 'id': self.tag_uri('%s_liked_by_%s' % (id, like.get('id'))),\n 'url': url,\n 'objectType': 'activity',\n 'verb': 'like',\n 'object': {'url': url},\n 'author': self.user_to_actor(like),\n 'content': 'likes this.',\n }) for like in post.get('likes', {}).get('data', [])]\n\n # \"See Original\" links\n post_actions = post.get('actions',[])\n see_orig_actions = (act for act in post_actions\n if act.get('name', '').lower() in SEE_ORIGINAL_ACTIONS)\n obj['tags'] += [self.postprocess_object({\n 'objectType': 'article',\n 'url': act.get('link'),\n 'displayName': act.get('name')\n }) for act in see_orig_actions]\n\n # is there an attachment? prefer to represent it as a picture (ie image\n # object), but if not, fall back to a link.\n att = {\n 'url': link if link else url,\n 'image': {'url': picture},\n 'displayName': post.get('name'),\n 'summary': post.get('caption'),\n 'content': post.get('description'),\n }\n\n if (picture and picture.endswith('_s.jpg') and\n (post_type == 'photo' or status_type == 'added_photos')):\n # a picture the user posted. get a larger size.\n att.update({\n 'objectType': 'image',\n 'image': {'url': picture[:-6] + '_o.jpg'},\n })\n obj['attachments'] = [att]\n elif link and not link.startswith('/gifts/'):\n att['objectType'] = 'article'\n obj['attachments'] = [att]\n\n # location\n place = post.get('place')\n if place:\n id = place.get('id')\n obj['location'] = {\n 'displayName': place.get('name'),\n 'id': id,\n 'url': self.object_url(id),\n }\n location = place.get('location', None)\n if isinstance(location, dict):\n lat = location.get('latitude')\n lon = location.get('longitude')\n if lat and lon:\n obj['location'].update({\n 'latitude': lat,\n 'longitude': lon,\n # ISO 6709 location string. details: http://en.wikipedia.org/wiki/ISO_6709\n 'position': '%+f%+f/' % (lat, lon),\n })\n elif 'location' in post:\n obj['location'] = {'displayName': post['location']}\n\n # comments go in the replies field, according to the \"Responses for\n # Activity Streams\" extension spec:\n # http://activitystrea.ms/specs/json/replies/1.0/\n comments = post.get('comments', {}).get('data')\n if comments:\n items = [self.comment_to_object(c) for c in comments]\n obj['replies'] = {\n 'items': items,\n 'totalItems': len(items),\n }\n\n return self.postprocess_object(obj)", "def posts_for_feed():\n user_id = session.get('user_id')\n friend_posts = Post.query.join(Friend, db.and_(Post.user_id == Friend.user_2,\n Friend.active == True)).outerjoin(Comment, db.and_(Comment.post_id == Post.post_id,\n Comment.active == True)).filter(Friend.user_1 == user_id,\n Post.active == True).order_by(Post.post_id.desc()).all()\n\n post_list = []\n for post in friend_posts:\n post_list.append(post.to_dict_for_json())\n\n resp = make_response(jsonify(post_list), 200)\n return resp", "def serializePostsData(influencer, posts, length_limit=30, highlight=False):\n from debra import serializers\n\n posts_data = []\n urls = set()\n posts = list(posts)\n dated = []\n undated = []\n for post in posts:\n if post.create_date:\n dated.append(post)\n else:\n undated.append(post)\n\n posts = sorted(dated, key=lambda x: x.create_date)\n posts.reverse()\n posts.extend(undated)\n\n if length_limit:\n length_limit = length_limit\n\n for post in posts:\n if post.url in urls:\n continue\n urls.add(post.url)\n post_data = {}\n post_data[\"post_image\"] = post.post_image\n stripped_content, images = tagStripper(\n post.content, length_limit=length_limit)\n post_data[\"content\"] = stripped_content\n post_data[\"content_images\"] = images\n post_data[\"url\"] = post.url\n post_data[\"blog_name\"] = serializers.unescape(influencer.blogname if influencer else\\\n post.influencer.blogname)\n post_data[\"title\"] = post.title\n post_data[\"platform\"] = get_post_platform(post)\n if highlight:\n post_data[\"highlight\"] = True\n if post.create_date:\n post_data[\"create_date\"] = post.create_date.strftime(\"%b. %e, %Y\")\n if not influencer:\n post_data['user'] = post.influencer.feed_stamp\n if post.products_json:\n post_data[\"products\"] = post.get_product_json()\n else:\n post_data[\"products\"] = []\n posts_data.append(post_data)\n return posts_data", "def test_get_all_posts(self):\n self.login_client('test_user', 'testing')\n # hit the API endpoint\n response = self.client.get(\n reverse(\"post-list-create\")\n )\n # fetch the data from db\n expected = Post.objects.all()\n serialized = PostSerializerSchema(expected, many=True)\n self.assertEqual(response.data, serialized.data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def save_posts(self):\n logger.info(\"Savings posts to database\")\n records = self.df.to_dict(\"records\")\n\n for record in records:\n Company.objects.get_or_create(name=record[\"company\"])\n\n Post.objects.get_or_create(\n title=record[\"title\"],\n company_id=record[\"company\"],\n defaults={\n \"date_posted\": record[\"date_posted\"],\n \"description\": record[\"description\"],\n \"location\": record[\"location\"],\n \"is_sponsored\": False,\n \"date_added_db\": record[\"date_added_db\"],\n \"source_id\": record[\"source\"],\n \"link\": record[\"link\"],\n },\n )", "def post(self):\n user_id = get_jwt_identity()\n user = find_user(user_id)\n args = post_parser.parse_args()\n\n # check circles\n circles = []\n for circle_id in args['circle_ids']:\n found_circle = find_circle(user, circle_id)\n if not found_circle:\n return {'msg': f'Circle {circle_id} is not found'}, 404\n circles.append(found_circle)\n\n # check reshare\n reshared_from = args['reshared_from']\n reshared_from_post = None\n if reshared_from:\n reshared_from_post = dangerously_get_post(reshared_from)\n if not reshared_from_post:\n return {\"msg\": f\"Post {reshared_from} is not found\"}, 404\n\n # check media\n media_object_names = args['media_object_names']\n if reshared_from and media_object_names:\n return {'msg': \"Reshared post is not allowed to have media\"}, 400\n\n post = create_post(\n user,\n content=args['content'],\n is_public=args['is_public'],\n circles=circles,\n reshareable=args['reshareable'],\n reshared_from=reshared_from_post,\n media_list=check_media_object_names(media_object_names, MaxPostMediaCount),\n mentioned_users=check_mentioned_user_ids(args['mentioned_user_ids']),\n is_update_avatar=False\n )\n if not post:\n return {\"msg\": f\"Not allowed to reshare post {reshared_from}\"}, 403\n return post, 201", "def handler(event, _context):\n model = PostModel()\n post_id = model.create(**json.loads(event['body']))\n return dump_result({'post_id': post_id}, status_code=201)", "def database_post_object(row, truncate_body=0):\n\n paid = row['is_paidout']\n\n post = {}\n post['active'] = json_date(row['active'])\n post['author_rewards'] = row['author_rewards']\n post['id'] = row['id']\n post['author'] = row['author']\n post['permlink'] = row['permlink']\n post['category'] = row['category'] if 'category' in row else 'undefined'\n\n post['title'] = row['title']\n post['body'] = row['body'][0:truncate_body] if truncate_body else row['body']\n post['json_metadata'] = row['json']\n\n post['created'] = json_date(row['created_at'])\n post['last_update'] = json_date(row['updated_at'])\n post['depth'] = row['depth']\n post['children'] = row['children']\n\n post['last_payout'] = json_date(row['last_payout_at'])\n post['cashout_time'] = json_date(row['cashout_time'])\n post['max_cashout_time'] = json_date(None) # ABW: only relevant up to HF17, timestamp::max for all posts later (and also all paid)\n\n curator_payout = sbd_amount(row['curator_payout_value'])\n post['curator_payout_value'] = to_nai(_amount(curator_payout))\n post['total_payout_value'] = to_nai(_amount(row['payout'] - curator_payout))\n\n post['reward_weight'] = 10000 # ABW: only relevant between HF12 and HF17 and we don't have access to correct value\n\n post['root_author'] = row['root_author']\n post['root_permlink'] = row['root_permlink']\n\n post['allow_replies'] = row['allow_replies']\n post['allow_votes'] = row['allow_votes']\n post['allow_curation_rewards'] = row['allow_curation_rewards']\n\n post['parent_author'] = row['parent_author']\n post['parent_permlink'] = row['parent_permlink_or_category']\n\n post['beneficiaries'] = row['beneficiaries']\n post['max_accepted_payout'] = to_nai(row['max_accepted_payout'])\n post['percent_hbd'] = row['percent_hbd']\n post['net_votes'] = row['net_votes']\n\n if paid:\n post['total_vote_weight'] = 0\n post['vote_rshares'] = 0\n post['net_rshares'] = 0 # if row['rshares'] > 0 else row['rshares'] ABW: used to be like this but after HF19 cashouts disappear and all give 0\n post['abs_rshares'] = 0\n post['children_abs_rshares'] = 0\n else:\n post['total_vote_weight'] = row['total_vote_weight']\n post['vote_rshares'] = ( row['rshares'] + row['abs_rshares'] ) // 2 # effectively sum of all positive rshares\n post['net_rshares'] = row['rshares']\n post['abs_rshares'] = row['abs_rshares']\n post['children_abs_rshares'] = 0 # TODO - ABW: I'm not sure about that, it is costly and useless (used to be part of mechanism to determine cashout time)\n\n return post", "def post(self):\n data = api.payload\n return data_dao.create(data)", "def post_list(request):\n if request.method == 'GET':\n posts = Post.objects.all()\n serializer = PostSerializer(posts, many=True)\n return JSONResponse(serializer.data)\n\n elif request.method == 'POST':\n data = JSONParser().parse(request)\n serializer = PostSerializer(data=data)\n if serializer.is_valid():\n serializer.save()\n return JSONResponse(serializer.data, status=201)\n return JSONResponse(serializer.errors, status=400)", "def inputPostFormatter(post):\n\n post[\"ttl\"] = from_decimal(post[\"ttl\"])\n post[\"workToProve\"] = from_decimal(post.get(\"workToProve\", 0))\n post[\"priority\"] = from_decimal(post[\"priority\"])\n\n if not is_array(post.get(\"topics\")):\n post[\"topics\"] = [post[\"topics\"]] if post.get(\"topics\") else []\n\n post[\"topics\"] = [topic if is_0x_prefixed(topic) else encode_hex(topic)\n for topic in post[\"topics\"]]\n\n return post", "def testInputPost(self):\n data = {\n \"title\": \"Example Post\",\n \"rent\": 700\n }\n\n response = self.client.post(\"/api/posts\",\n data=json.dumps(data),\n content_type=\"application/json\",\n headers=[(\"Accept\", \"application/json\")]\n )\n\n self.assertEqual(response.status_code, 201)\n self.assertEqual(response.mimetype, \"application/json\")\n self.assertEqual(urlparse(response.headers.get(\"Location\")).path,\n \"/api/posts/1\")\n\n data = json.loads(response.data)\n self.assertEqual(data[\"id\"], 1)\n self.assertEqual(data[\"title\"], \"Example Post\")\n self.assertEqual(data[\"rent\"], 700)\n\n posts = session.query(Input).all()\n self.assertEqual(len(posts), 1)\n\n post = posts[0]\n self.assertEqual(post.title, \"Example Post\")\n self.assertEqual(post.rent, 700)", "def parse_posts(posts_dict):\n return posts_dict['posts']", "def create(self, validated_data):\n import vk_api\n login, password = 'dostoyewski@yandex.ru', 'seleNa'\n vk_session = vk_api.VkApi(login, password)\n try:\n vk_session.auth(token_only=True)\n except vk_api.AuthError as error_msg:\n print(error_msg)\n\n vk = vk_session.get_api()\n string = 'ALERT' + '\\n' + 'Описание проблемы: ' + validated_data.get('description') + '\\n' + 'Примерное местоположение: ' + validated_data.get('place') + '\\n' + 'Особые приметы: ' + validated_data.get('custom')\n vk.wall.post(message=string, owner_id=-180054668, from_group=1, lat=validated_data.get('lat'), long=validated_data.get('lon'))\n return Post.objects.create(**validated_data)", "def handle_new_post(post_data, user_agent, remote_addr):\n \n for required in POST_REQUIRED_PARAMS:\n if required not in post_data:\n return None, None\n\n try:\n value = int(string_from_interwebs(post_data.getfirst(\"code\", \"\")))\n except ValueError:\n return None, None\n \n if value != 98098098098:\n return None, None\n\n # not yet safe to use.\n location = post_data.getfirst(\"location\", \"\")\n tags = string_from_interwebs(post_data.getfirst(\"tags\")) \n author = post_data.getfirst(\"author\")\n \n split_tags = [string_from_interwebs(tag).strip().lower() for tag in tags.split(\",\")] # temporary\n \n if len(split_tags) > 3:\n return None, None\n \n author_id = string_from_interwebs(author).strip()\n \n with Connection('localhost', 27017) as connection:\n reply_to = string_from_interwebs(post_data.getfirst(\"reply_to\"))\n \n if not verify_author(author_id, connection):\n return None, None\n\n if not verify_post(reply_to, connection):\n return None, None\n\n # if reply then it's verified.\n # XXX: I need to make a standard object structure for this, so that I don't \n # have to update separate things.\n\n post = {\"viewed\" : 0,\n \"comments\" : 0,\n \"flagged\" : 0,\n \"disliked\" : 0,\n \"enjoyed\" : 0,\n \"num_replies\" : 0,\n \"num_reposts\" : 0,\n \"content-type\" : \"image\", # need to pull this from the mime lookup\n \"file\" : \"placeholder\",\n \"user_agent\" : user_agent,\n \"remote_addr\" : remote_addr,\n \"created\" : datetime.utcnow(),\n \"location\" : string_from_interwebs(location).strip(),\n \"author\" : ObjectId(author_id),\n \"reply_to\" : ObjectId(reply_to),\n \"tags\" : split_tags}\n\n update_post(reply_to, connection)\n\n return post_data.getfirst(\"data\"), post", "def format_posts(posts):\n formatted_posts = []\n\n for post in posts:\n post_data = post['data']\n formatted_post = {\n \"title\": post_data['title'],\n \"post_id\": post_data['id'],\n \"subreddit\": post_data['subreddit'],\n \"score\": post_data['score'],\n \"url\": post_data['url'],\n \"author\": post_data['author'],\n \"permalink\": format_post_permalink(post_data['permalink']),\n \"num_comments\": post_data['num_comments'],\n \"created\": post_data['created'],\n \"body\": post_data['selftext']\n }\n\n formatted_posts.append(formatted_post)\n\n return formatted_posts", "def insert_new_post(post_arg_set):\n api, post_data, acct_data, page_id, config = post_arg_set\n\n try:\n post_id = post_data['id'] if post_data.has_key('id') else None\n\n except Exception as e:\n log.error( e )\n\n else:\n\n # parse date\n if post_data.has_key('created_time') and post_data['created_time'] is not None: \n dt = datetime.strptime(post_data['created_time'], FB_DATE_FORMAT)\n date_time = tz_adj(dt, config)\n time_bucket = round_datetime(date_time, config)\n raw_timestamp = int(date_time.strftime(\"%s\"))\n \n else:\n time_bucket = None\n raw_timestamp = None\n \n # extract message so we can find links within the msg if not in url\n article_urls = [get_fb_link(post_data, config, unshorten=True)]\n message = post_data['message'].encode('utf-8') if post_data.has_key('message') else None\n message_urls = get_message_urls(article_urls, message, config)\n\n # detect article links, unshorten and parse\n article_urls = [\n parse_url(unshorten_link(url, config)) \\\n for url in article_urls + message_urls\n if url is not None\n ]\n\n article_urls = [url for url in article_urls if is_article(url, config)]\n\n if article_urls:\n for article_url in set(article_urls):\n\n # sluggify url\n article_slug = sluggify(article_url)\n\n # format data\n post_value = {\n 'article_slug': article_slug,\n 'article_url': article_url,\n 'time_bucket': time_bucket,\n 'fb_post_created': raw_timestamp,\n 'raw_timestamp': raw_timestamp,\n 'fb_raw_link' : get_fb_link(post_data, config=config),\n 'fb_page_id': page_id,\n 'fb_post_id': post_id,\n 'fb_page_likes': acct_data['likes'] if acct_data.has_key('likes') else None,\n 'fb_page_talking_about': acct_data['talking_about_count'] if acct_data.has_key('talking_about_count') else None,\n 'fb_type': post_data['type'] if post_data.has_key('type') else None,\n 'fb_status_type': post_data['status_type'] if post_data.has_key('status_type') else None,\n 'fb_message': message\n }\n \n # always insert insights data\n if is_insights(page_id, config):\n \n log.info( \"INSIGHTS\\tAdding data from %s re: %s\" % (page_id, article_slug) )\n\n # fetch data\n insights_value = get_insights_data(api, page_id, post_id)\n\n # create datasource name\n data_source = \"facebook_insights_%s\" % page_id \n \n # upsert url\n upsert_url(article_url, article_slug, data_source, config)\n\n # insert id\n db.sadd('facebook_post_ids', post_id)\n\n # format time bucket\n current_time_bucket = gen_time_bucket(config)\n insights_value['time_bucket'] = current_time_bucket\n post_value.pop('time_bucket', None)\n \n value = json.dumps({\n data_source : dict(post_value.items() + insights_value.items())\n })\n\n # upload data to redis\n db.zadd(article_slug, current_time_bucket, value) \n \n # only insert new posts\n if not db.sismember('facebook_post_ids', post_id):\n \n log.info( \"FACEBOOK\\tNew post %s\\t%s\" % (post_id, article_url) )\n \n # insert id\n db.sadd('facebook_post_ids', post_id) \n \n # upsert url\n data_source = \"facebook_%s\" % page_id\n upsert_url(article_url, article_slug, data_source, config)\n\n value = json.dumps( {data_source : post_value} )\n\n\n # upload data to redis\n db.zadd(article_slug, time_bucket, value)", "def cleanup_post(post):\n \n post_data = post\n post_data[\"id\"] = str(post[\"_id\"])\n post_data[\"author\"] = str(post[\"author\"])\n post_data[\"created\"] = str(post[\"created\"].ctime())\n del post_data[\"_id\"]\n \n if \"reply_to\" in post:\n post_data[\"reply_to\"] = str(post[\"reply_to\"])\n\n if \"repost_of\" in post:\n post_data[\"repost_of\"] = str(post[\"repost_of\"])\n\n return post_data", "def get_posts():\n\n\tposts = []\n\n\trows = db().select(db.survey.ALL, orderby=~db.survey.created_on)\n\tfor i, r in enumerate(rows):\n\t\t\n\t\t\tt = dict(\n\t\t\t\tuser_email = r.user_email,\n\t\t\t\tuser_name = get_user_name_from_email(r.user_email),\n\t\t\t\tquestion = r.question,\n\t\t\t\tcreated_on = r.created_on,\n\t\t\t\topt1 = r.opt1,\n\t\t\t\topt2 = r.opt2,\n\t\t\t\topt3 = r.opt3,\n\t\t\t\topt4 = r.opt4,\n\t\t\t\tres1 = r.res1,\n\t\t\t\tres2 = r.res2,\n\t\t\t\tres3 = r.res3,\n\t\t\t\tres4 = r.res4,\n\t\t\t\t#created_on_human = humanize.naturaltime(r.created_on),\n\t\t\t\tupdated_on = r.updated_on,\n\t\t\t\t#updated_on_human = r.updated_on_human,\n\t\t\t\tid = r.id,\n\t\t\t)\n\t\t\tposts.append(t)\n\n\tlogged_in = auth.user_id is not None\n\temail = None\n\tif logged_in:\n\t\temail = auth.user.email\n\n\treturn response.json(dict(\n\t\tposts=posts,\n\t\tlogged_in=logged_in,\n\t\temail=email,\n\t))", "def deliver_post(data, access=None):\n\n schema = get_post_schema(data)\n return deliver_fields(schema, data, access)", "def create_post():\n\n #Get prompt id\n prompt_id = request.form.get('prompt_id')\n\n # Get post text\n post_text = request.form.get('user_post')\n\n # Create post timestamp\n created_at = datetime.now()\n user_facing_date = created_at.strftime(\"%B %d, %Y\")\n\n # Save post and related data to database\n post = crud.create_post(session['user_id'], prompt_id, post_text, session['lat'], session['lng'], session['user_facing_location'], created_at)\n\n return render_template('post_data.html', post=post, user_facing_date=user_facing_date)", "def _populate_posts(self, channel, url):\n import feedparser\n\n Post = get_model('articles', 'Post')\n Image = get_model('images', 'Image')\n\n parser = feedparser.parse(url)\n\n for entry in parser['entries']:\n # Some entries are incomplete and have only the title, need to\n # ignore these entries.\n if not entry.get('summary'):\n continue\n\n # The title may have only 140 characters\n title = self._truncate_string(entry['title'], 140)\n slug = slugify(title)\n headline = entry['summary']\n\n # Some entries do not have the 'content' field, in this case we\n # get the 'summary' field instead.\n if entry.get('content'):\n content = entry['content'][0]['value']\n else:\n content = entry['summary']\n\n # When we find a entry that already is registered we don't need\n # continue because the following registries already be registered.\n exists = Post.objects.filter(slug=slug).count()\n if exists:\n break\n\n # Check if has some image in the post content.\n # NOTE: For the best user experience we use only the posts that\n # have images.\n image_url = self._get_image_url_in_content(content)\n if image_url:\n main_image = Image.objects.create(\n title=title,\n slug=slug,\n archive_link=image_url,\n published=True,\n user=self._user\n )\n # Generate the 'short_title' based on 'content'\n short_title = re.sub('<[^<]+?>', '', content).encode('utf-8')\n short_title = self._truncate_string(short_title.strip(), 140)\n\n post = Post.objects.create(\n title=title,\n short_title=short_title,\n slug=slug,\n headline=headline,\n content=content,\n channel=channel,\n main_image=main_image,\n show_on_root_channel=True,\n published=True,\n hat='',\n user=self._user\n )", "def create_model(self, ApiId: str, Name: str, Schema: str, ContentType: str = None, Description: str = None) -> Dict:\n pass", "def get_all_posts_json():\n\n posts = [\n {\n \"postId\": post.post_id,\n \"postPrompt\" : crud.get_prompt_by_prompt_id(post.prompt_id),\n \"postText\": post.post_text,\n \"location\": post.user_facing_location,\n \"dateCreated\": post.created_at,\n \"toneQualities\": crud.get_tone_qualities_by_post_id(post.post_id),\n }\n for post in crud.get_post_by_user_id(session['user_id'])\n ]\n\n return jsonify(posts)", "def get(self, request, format=None, limit=20):\n user = User.objects.get(id=self.request.user.id)\n group_post = Post.objects.filter(\n target_type=ContentType.objects.get(\n model='group',\n app_label='group'\n ).id,\n target_id__in=GroupMember.objects.filter(\n user=user\n ).values('group_id')\n )\n event_post = Post.objects.filter(\n target_type=ContentType.objects.get(model='event').id,\n target_id__in=EventMember.objects.filter(\n user=user,\n role__gt=0\n ).values('event_id')\n )\n friend_post = Post.objects.filter(\n target_type=ContentType.objects.get(model='user').id,\n target_id__in=Friend.objects.filter(\n from_user=self.request.user.id).values('to_user'),\n user__in=Friend.objects.filter(\n from_user=self.request.user.id\n ).values('to_user')\n ) | Post.objects.filter(\n target_type=ContentType.objects.get(model='user').id,\n target_id=None,\n user__in=Friend.objects.filter(\n from_user=self.request.user.id\n ).values('to_user')\n )\n user_post = Post.objects.filter(\n user=user,\n target_type=ContentType.objects.get(model='user')\n ) | Post.objects.filter(\n target_id=user.id,\n target_type=ContentType.objects.get(model='user')\n )\n post = (group_post | event_post | friend_post | user_post).order_by(\n '-datetime')[:limit]\n response = self.serializer_class(post, many=True)\n\n return Response(response.data)", "def __init__(self, post):\n self.id = int(post[0])\n self.type_id = int(post[1])\n try:\n self.parent_id = int(post[2])\n except:\n self.parent_id = None\n try:\n self.accepted_answer_id = int(post[3])\n except:\n self.accepted_answer_id = None\n self.creation_date = post[4]\n try:\n self.score = post[5]\n except:\n self.score = None\n try:\n self.view_count = post[6]\n except:\n self.view_count = 0\n self.body = post[7]\n try:\n self.owner_user_id = int(post[8])\n except:\n self.owner_user_id = None \n try:\n self.last_editor_user_id = int(post[9])\n except:\n self.last_editor_user_id = None\n try:\n self.last_editor_display_name = post[10]\n except:\n self.last_editor_display_name = None\n self.last_activity_date = post[11]\n self.last_edit_date = post[12]\n self.community_owned_date = post[13]\n self.closed_date = post[14]\n try:\n self.title = post[15]\n except:\n self.title = None\n try:\n self.tags = post[16]\n except:\n self.tags = None\n try:\n self.answer_count = int(post[17])\n except:\n self.answer_count = 0\n try:\n self.comment_count = int(post[18])\n except:\n self.comment_count = 0\n try:\n self.favorite_count = int(post[19])\n except:\n self.favorite_count = 0", "def json_to_post(self, post, tag):\r\n # Get media type (image/video)\r\n mediaType = self.get_value(post, (\"images\", 0, \"type\"))\r\n if mediaType is None:\r\n mediaType = self.get_value(post, (\"type\", ))\r\n mediaType = self.get_media_type(mediaType)\r\n\r\n imageCount = self.get_value(post, ('images_count',))\r\n if imageCount is None:\r\n imageCount = 1\r\n \r\n # Only want 1 image/video\r\n if imageCount > 1:\r\n return None\r\n \r\n # Get media url and size\r\n if mediaType == MediaType.IMAGE:\r\n media = self.get_value(post, ('images', 0, 'id', ))\r\n size = self.get_value(post, ('size', ))\r\n if size is None:\r\n size = self.get_value(post, ('images', 0, 'size', ))\r\n if media is None:\r\n media = self.get_value(post, ('link', ))\r\n elif mediaType == MediaType.VIDEO:\r\n media = self.get_value(post, ('images', 0, 'id', ))\r\n size = self.get_value(post, ('mp4_size', ))\r\n if size is None:\r\n size = self.get_value(post, ('images', 0, 'mp4_size', ))\r\n if media is None:\r\n media = self.get_value(post, ('mp4', ))\r\n else:\r\n self.logger.log(logger.LogLevel.WARNING, 'mediaType is not Image or Video')\r\n return None\r\n\r\n #check if image/video is over max size\r\n if mediaType == MediaType.IMAGE:\r\n if size >= self.setting.maxImageSize:\r\n return None\r\n elif mediaType == MediaType.VIDEO:\r\n if size >= self.setting.maxVideoSize:\r\n return None\r\n\r\n postId = self.get_value(post, (\"id\", ))\r\n title = self.get_value(post, (\"title\", ))\r\n views = self.get_value(post, (\"views\", ))\r\n ups = self.get_value(post, (\"ups\", ))\r\n downs = self.get_value(post, (\"downs\", ))\r\n return Post(postId, title, mediaType.value, media, size, tag, views, ups, downs)", "def update_post(prev_data, data, db_conn):\n\n schema = get_post_schema(data)\n post_kind = prev_data['kind']\n if post_kind is 'post' or post_kind is 'proposal':\n data = pick(data, ('body',))\n elif post_kind is 'vote':\n data = pick(data, ('body', 'response',))\n data, errors = update_document(schema, prev_data, data, db_conn)\n if not errors:\n add_post_to_es(data, db_conn)\n return data, errors", "def register_posts(app):\n blog = Blog(app)\n for docname, posts in getattr(app.env, \"ablog_posts\", {}).items():\n for postinfo in posts:\n blog.register(docname, postinfo)", "def post_to_activity(self, post):\n id = None\n if post.get('id'):\n # strip USERID_ prefix if it's there\n post['id'] = post['id'].split('_', 1)[-1]\n id = post['id']\n\n obj = self.post_to_object(post)\n activity = {\n 'verb': VERBS.get(post.get('type', obj.get('objectType')), 'post'),\n 'published': obj.get('published'),\n 'updated': obj.get('updated'),\n 'id': self.tag_uri(id) if id else None,\n 'url': self.post_url(post),\n 'actor': obj.get('author'),\n 'object': obj,\n }\n\n application = post.get('application')\n if application:\n activity['generator'] = {\n 'displayName': application.get('name'),\n 'id': self.tag_uri(application.get('id')),\n }\n return self.postprocess_activity(activity)", "def _CreateSchemas(self) -> None:\n self.schema_objs = dict() # Holds OpenAPI representations of types.\n\n # Add the OpenAPI schemas of protobuf primitive types.\n primitive_type_schemas = {\n primitive_type[\"name\"]: primitive_type[\"schema\"]\n for primitive_type in primitive_types.values()\n }\n self.schema_objs.update(\n cast(Dict[str, Dict[str, str]], primitive_type_schemas))\n # Add the OpenAPI schemas of the statically described RDF types.\n self.schema_objs.update(rdf_type_schemas)\n\n # Holds state of type extraction (white/gray nodes).\n visiting: Set[str] = set()\n self._CreateRouterMethodSchemas(visiting)\n self._CreateFlowSchemas(visiting)", "def tumblr2fields(api_key, blogname):\r\n from time import strftime, localtime\r\n try:\r\n # py3k import\r\n import json\r\n except ImportError:\r\n # py2 import\r\n import simplejson as json\r\n\r\n try:\r\n # py3k import\r\n import urllib.request as urllib_request\r\n except ImportError:\r\n # py2 import\r\n import urllib2 as urllib_request\r\n\r\n def get_tumblr_posts(api_key, blogname, offset=0):\r\n url = \"http://api.tumblr.com/v2/blog/%s.tumblr.com/posts?api_key=%s&offset=%d&filter=raw\" % (blogname, api_key, offset)\r\n request = urllib_request.Request(url)\r\n handle = urllib_request.urlopen(request)\r\n posts = json.loads(handle.read().decode('utf-8'))\r\n return posts.get('response').get('posts')\r\n\r\n offset = 0\r\n posts = get_tumblr_posts(api_key, blogname, offset)\r\n while len(posts) > 0:\r\n for post in posts:\r\n title = post.get('title') or post.get('source_title') or post.get('type').capitalize()\r\n slug = post.get('slug') or slugify(title)\r\n tags = post.get('tags')\r\n timestamp = post.get('timestamp')\r\n date = strftime(\"%Y-%m-%d %H:%M:%S\", localtime(int(timestamp)))\r\n slug = strftime(\"%Y-%m-%d-\", localtime(int(timestamp))) + slug\r\n format = post.get('format')\r\n content = post.get('body')\r\n type = post.get('type')\r\n if type == 'photo':\r\n if format == 'markdown':\r\n fmtstr = '![%s](%s)'\r\n else:\r\n fmtstr = '<img alt=\"%s\" src=\"%s\" />'\r\n content = '\\n'.join(fmtstr % (photo.get('caption'), photo.get('original_size').get('url')) for photo in post.get('photos'))\r\n content += '\\n\\n' + post.get('caption')\r\n elif type == 'quote':\r\n if format == 'markdown':\r\n fmtstr = '\\n\\n&mdash; %s'\r\n else:\r\n fmtstr = '<p>&mdash; %s</p>'\r\n content = post.get('text') + fmtstr % post.get('source')\r\n elif type == 'link':\r\n if format == 'markdown':\r\n fmtstr = '[via](%s)\\n\\n'\r\n else:\r\n fmtstr = '<p><a href=\"%s\">via</a></p>\\n'\r\n content = fmtstr % post.get('url') + post.get('description')\r\n elif type == 'audio':\r\n if format == 'markdown':\r\n fmtstr = '[via](%s)\\n\\n'\r\n else:\r\n fmtstr = '<p><a href=\"%s\">via</a></p>\\n'\r\n content = fmtstr % post.get('source_url') + post.get('caption') + post.get('player')\r\n elif type == 'video':\r\n if format == 'markdown':\r\n fmtstr = '[via](%s)\\n\\n'\r\n else:\r\n fmtstr = '<p><a href=\"%s\">via</a></p>\\n'\r\n content = fmtstr % post.get('source_url') + post.get('caption') + '\\n'.join(player.get('embed_code') for player in post.get('player'))\r\n elif type == 'answer':\r\n title = post.get('question')\r\n content = '<p><a href=\"%s\" rel=\"external nofollow\">%s</a>: %s</p>\\n%s' % (post.get('asking_name'), post.get('asking_url'), post.get('question'), post.get('answer'))\r\n\r\n content = content.rstrip() + '\\n'\r\n kind = 'article'\r\n yield (title, content, slug, date, post.get('blog_name'), [type],\r\n tags, kind, format)\r\n\r\n offset += len(posts)\r\n posts = get_tumblr_posts(api_key, blogname, offset)", "def get_data_fb(user_id, access_token):\n\n my_user = storage.get(User, user_id)\n my_user.update_attr(\"fb_access_token\", access_token)\n\n r = requests.get('https://graph.facebook.com/me/feed?access_token=' + access_token)\n result = r.json()\n post_dict = {}\n post_list = []\n index = 0\n for posts in result[\"data\"]:\n if index == 10:\n break\n new_post = {}\n\n new_post[\"CrossMe_user_id\"] = user_id\n new_post[\"Post_id_CrossMe\"] = str(uuid.uuid4())\n\n if \"message\" in posts.keys():\n new_post[\"message\"] = posts[\"message\"]\n else:\n new_post[\"message\"] = \"NULL\"\n\n new_post[\"created_time\"] = datetime.strptime(posts[\"created_time\"], '%Y-%m-%dT%H:%M:%S+%f')\n\n new_post[\"source\"] = \"FACEBOOK\"\n\n new_post[\"fb_post_id\"] = posts[\"id\"]\n\n\n URLPOST = 'https://graph.facebook.com/' + posts[\"id\"] + '?fields=object_id&access_token=' + access_token\n post_data = requests.get(URLPOST).json()\n if \"object_id\" in post_data.keys():\n URLIMAGE = 'https://graph.facebook.com/' + post_data[\"object_id\"] + '?fields=images&access_token=' + access_token\n image_data = requests.get(URLIMAGE).json()\n if \"images\" not in image_data.keys():\n continue\n all_images = image_data[\"images\"]\n new_post[\"image_url\"] = all_images[1][\"source\"]\n posts[\"media_type\"] = \"IMAGE\"\n else:\n continue\n posts[\"media_type\"] = \"STATUS\"\n new_post[\"image_url\"] = \"NULL\"\n\n post_list.append(new_post)\n index = index + 1\n\n my_post = Post()\n\n my_post.user_id = new_post[\"CrossMe_user_id\"]\n my_post.creation_date = new_post[\"created_time\"]\n my_post.post_source = new_post[\"source\"]\n my_post.post_type = posts[\"media_type\"]\n my_post.post_text = new_post[\"message\"]\n my_post.media_url = new_post[\"image_url\"]\n my_post.save()\n\n\n post_dict[\"fb_last_post\"] = post_list\n\n return make_response(jsonify(post_dict), 200)", "def create(self, validated_data):\n \"\"\" Create post with a location \"\"\"\n location_data = validated_data.pop('location')\n\n # create a new one or get a old for reference\n this_location = Location.objects.get_or_create(\n **location_data\n )\n\n # pop the photo url's data\n photo_data = validated_data.pop('photo')\n\n # must pop the tags data before it would used to create a post \n tags_data = validated_data.pop('tag')\n # create a instance of this post\n this_post = Post.objects.create(\n location = this_location[0],\n **validated_data)\n\n \"\"\"Associate tag's informatiion to post\"\"\"\n for tag in tags_data:\n this_tag = Tag.objects.get_or_create(name = tag.get('name'))\n print(tag.get('name'))\n print(this_tag)\n # attach this tag to this photos_datapost \n this_post.tag.add(this_tag[0])\n\n \"\"\"Associate the photo url \"\"\"\n for photo in photo_data:\n this_post.photo.create(name = photo.get('name'))\n # return the created post \n this_post.save()\n return this_post", "def __build_schema(meta_data):\n \n # Builds the dictionary that represents the schema.\n temporary_dictionary = {'$schema': None, '$id': None, 'title': None, 'type': None, 'properties': []}\n for x in meta_data:\n temporary_dictionary['properties'].append({\n 'name': x,\n 'type': None,\n 'description': None})\n # Creates a new instance of the schema and inserts the dictionary as a json into the field and returns it.\n returned_schema = Schema()\n returned_schema.data = json.dumps(temporary_dictionary)\n return returned_schema", "def proto_post(self, proto_category):\n return baker.make(\n Post,\n content=(\n \"Aliquip excepteur qui mollit labore nulla et culpa \"\n \"minim et commodo reprehenderit consequat sint.\"\n ),\n categories=proto_category,\n make_m2m=True,\n _create_files=True,\n )", "def post(self):\n data = request.json\n return save_new_post(data=data)", "def test_return_list_of_posts(self):\n self.create_new_user()\n self.create_new_posts()\n response = self.c.get('/wall/',\n content_type=\"application/json\")\n\n assert 200 == response.status_code\n assert 2 == len(response.json()['data']['posts'])\n assert response.json()['data']['posts'][0]['message'].startswith('All animals are equal')\n assert response.json()['data']['posts'][1]['message'].startswith('War is peace')", "def test_posts(self):\n self.resource._request.register_uri(\n 'GET', '/users/dotzero/posts?page=2', 'fixture_post.json')\n\n response = self.resource.posts('dotzero', 2)\n\n self.assertTrue('data' in response)\n self.assertTrue('server_time' in response)", "def _post_model_to_entry(self, redditpost):\n entry = Post()\n entry.post_id = redditpost.id\n entry.author = redditpost.author\n entry.author_premium = redditpost.author_premium\n entry.subreddit_subscribers = redditpost.subreddit_subscribers\n entry.title = redditpost.title\n entry.downs = redditpost.downs\n entry.ups = redditpost.ups\n entry.selftext = redditpost.selftext\n entry.num_comments = redditpost.num_comments\n entry.total_awards_received = redditpost.total_awards_received\n entry.view_count = redditpost.view_count\n entry.permalink = redditpost.permalink\n entry.url = redditpost.url\n entry.created = redditpost.created\n entry.created_utc = redditpost.created_utc\n\n return entry", "def parse_posts(self):\n logger.info(\"Parsing posts\")\n\n self.df.title = self.df.title.str.strip()\n\n spam_companies = [\"Indeed Prime\"]\n self.df = self.df[~self.df[\"company\"].isin(spam_companies)]\n self.df = self.df.dropna(subset=[\"company\"])\n self.df = self.df.drop_duplicates(subset=[\"company\", \"date_posted\", \"title\"])", "def update_post_format(post):\n\n post_dict = {\n \"title\": post[1],\n \"genre\": get_genre(post[0]),\n \"content\": post[2],\n \"repeater_link\": get_links(post[3], post[4]),\n }\n \n return post_dict", "def add(self, posts):\n for post in posts:\n self._feed.add(FeedEntry(\n summary=post.summary,\n title=post.title,\n title_type='html',\n url=post.url,\n updated=post.date,\n ))", "def create_schemas():\n\n # TEXT: the field is indexed, analyzed. By default it is not stored.\n # phrase=False does not allow to search for phrases.\n # sortable=True allows to sort the indexed values\n # ID: the file is indexed, without being analyzed.\n # STORED: the file is saved but not indexed.\n\n pub_schema = Schema(\n pubtype=TEXT(stored=True),\n key=STORED,\n author=TEXT(stored=True),\n title=TEXT(stored=True),\n pages=STORED,\n year=TEXT(stored=True),\n journal=STORED,\n volume=STORED,\n number=STORED,\n url=STORED,\n ee=STORED,\n crossref=ID(stored=True),\n )\n\n ven_schema = Schema(\n pubtype=STORED,\n key=ID(stored=True),\n author=STORED,\n title=TEXT(stored=True),\n journal=STORED,\n publisher=TEXT(stored=True),\n url=STORED,\n ee=STORED,\n year=STORED,\n isbn=STORED,\n )\n\n return pub_schema, ven_schema", "def posts_get():\n \n\n # Get and filter the posts from the database\n songs = session.query(models.Song).all()\n \n # Convert the posts to JSON and return a response\n data = json.dumps([song.as_dictionary() for song in songs])\n return Response(data, 200, mimetype=\"application/json\")", "async def _process_create_data(self, data: dict) -> dict:\n return self.SCHEMA(data)", "def post_info():\n\n posts = [\n {\n \"post_id\": post.post_id,\n \"user_id\": post.user_id,\n \"prompt\": crud.get_prompt_by_prompt_id(post.prompt_id),\n \"post_text\": post.post_text,\n \"lat\": post.lat,\n \"lng\": post.lng,\n \"created_at\": post.created_at,\n \"color\": crud.get_max_color_by_post_id(post.post_id),\n }\n for post in Post.query.order_by(desc(Post.created_at)).limit(200)\n ]\n\n return jsonify(posts)", "def update_posts(accounts):\n # print(account.columns)\n for index, post in accounts.iterrows():\n\n # If a post with this URL already exists in database, then continue with next one\n if collection.count_documents({'Posts.URL': post['URL']}, limit=1) != 0:\n print('Post with url ', post['URL'], ' already exists')\n continue\n # Get tags from all posts\n # hashtags = []\n try:\n hashtags = list({tag.strip(\"#\") for tag in post['Description'].split() if tag.startswith(\"#\")})\n except:\n hashtags = []\n # get preprocessed description\n description_without_hashtags, description_preprocessed = preprocess_description(str(post['Description']))\n # update collection with posts\n collection.update_one(\n {\n 'Codename': post['User Name']\n },\n {\n '$push': {\n 'Posts': {'Followers at Posting': post['Followers at Posting'],\n 'Post Created': post['Post Created'],\n 'Post Created Date': post['Post Created Date'],\n 'Post Created Time': post['Post Created Time'],\n 'Type': post['Type'],\n 'Total Interactions': post['Total Interactions'],\n 'Likes': post['Likes'],\n 'Comments': post['Comments'],\n 'Views': post['Views'],\n 'URL': post['URL'],\n 'Link': post['Link'],\n 'Photo': post['Photo'],\n 'Title': post['Title'], # not\n 'Description': post['Description'],\n 'description_without_hashtags': description_without_hashtags,\n 'description_preprocessed': description_preprocessed,\n 'Hashtags': hashtags,\n 'Image Text': post['Image Text'],\n 'Sponsor Id': post['Sponsor Id'],\n 'Sponsor Name': post['Sponsor Name'],\n 'Overperforming Score': post['Overperforming Score (weighted — Likes 1x Comments 1x )']\n }\n }\n }\n )", "def proto_post(self, proto_category):\n return baker.make(\n Post,\n content=\"Aute non ex nostrud amet ipsum.\",\n categories=proto_category,\n make_m2m=True,\n _create_files=True,\n )", "def proto_post(self, proto_category):\n return baker.make(\n Post,\n content=\"Aute non ex nostrud amet ipsum.\",\n categories=proto_category,\n make_m2m=True,\n _create_files=True,\n )", "def post_collection():\n\tpost_json = request.get_json()\n\tif not post_json:\n\t\tabort(400)\n\ttitle = post_json['title']\n\tdescription = post_json['description']\n\tcategory = post_json['category']\n\tuser_id = post_json['user_id']\n\n\tif None in [title, description, category, user_id]:\n\t\tabort(400)\n\n\tcollection = models.Collection(\n\t\tuser_id = user_id,\n\t\ttitle = title,\n\t\tdescription = description,\n\t\tcategory = category,\n\t\tpublished = False,\n\t\tpublish_date = None,\n\t\tthumbnail = None,\n\t)\n\tdb.session.add(collection)\n\tdb.session.commit()\n\treturn jsonify({'collection_id':collection.id}), 201", "def post(self):\n data = request.json\n create_entry(data)\n return None, 201", "def posts_get():\n title_like = request.args.get(\"title_like\")\n body_like = request.args.get(\"body_like\")\n\n posts = session.query(Post)\n if title_like:\n if body_like:\n posts = posts.filter(\n Post.title.contains(title_like), Post.body.contains(body_like))\n else:\n posts = posts.filter(Post.title.contains(title_like))\n posts = posts.all()\n\n data = json.dumps([post.as_dictionary() for post in posts])\n return Response(data, 200, mimetype=\"application/json\")", "def process_posts(app, doctree):\n env = app.builder.env\n if not hasattr(env, \"ablog_posts\"):\n env.ablog_posts = {}\n post_nodes = list(doctree.findall(PostNode))\n if not post_nodes:\n return\n post_date_format = app.config[\"post_date_format\"]\n should_auto_orphan = app.config[\"post_auto_orphan\"]\n docname = env.docname\n if should_auto_orphan:\n # mark the post as 'orphan' so that\n # \"document isn't included in any toctree\" warning is not issued\n # We do not simply assign to should_auto_orphan because if auto-orphan\n # is false, we still want to respect the per-post :rst:dir`orphan` setting\n app.env.metadata[docname][\"orphan\"] = True\n blog = Blog(app)\n auto_excerpt = blog.post_auto_excerpt\n multi_post = len(post_nodes) > 1 or blog.post_always_section\n for order, node in enumerate(post_nodes, start=1):\n if node[\"excerpt\"] is None:\n node[\"excerpt\"] = auto_excerpt\n if multi_post:\n # section title, and first few paragraphs of the section of post\n # are used when there are more than 1 posts\n section = node\n while True:\n if isinstance(section, nodes.section):\n break\n section = node.parent\n else:\n section = doctree\n # get updates here, in the section that post belongs to\n # Might there be orphan updates?\n update_dates = _get_update_dates(section, docname, post_date_format)\n # Making sure that post has a title because all post titles\n # are needed when resolving post lists in documents\n title = node[\"title\"] or _get_section_title(section)\n # creating a summary here, before references are resolved\n excerpt = []\n if node.children:\n if node[\"exclude\"]:\n node.replace_self([])\n else:\n node.replace_self(node.children)\n for child in node.children:\n excerpt.append(child.deepcopy())\n elif node[\"excerpt\"]:\n count = 0\n for nod in section.findall(nodes.paragraph):\n excerpt.append(nod.deepcopy())\n count += 1\n if count >= (node[\"excerpt\"] or 0):\n break\n node.replace_self([])\n else:\n node.replace_self([])\n nimg = node[\"image\"] or blog.post_auto_image\n if nimg:\n for img, nod in enumerate(section.findall(nodes.image), start=1):\n if img == nimg:\n excerpt.append(nod.deepcopy())\n break\n date = node[\"date\"]\n if date:\n try:\n date = datetime.strptime(date, post_date_format)\n except ValueError:\n if date_parser:\n try:\n date = date_parser(date)\n except ValueError:\n raise ValueError(\"invalid post date in: \" + docname)\n else:\n raise ValueError(\n f\"invalid post date ({date}) in \" + docname + f\". Expected format: {post_date_format}\"\n )\n else:\n date = None\n # if docname ends with `index` use folder name to reference the document\n # a potential problem here is that there may be files/folders with the\n # same name, so issuing a warning when that's the case may be a good idea\n folder, label = os.path.split(docname)\n if label == \"index\":\n folder, label = os.path.split(folder)\n if not label:\n label = slugify(title)\n section_name = \"\"\n if multi_post and section.parent is not doctree:\n section_name = section.attributes[\"ids\"][0]\n label += \"-\" + section_name\n else:\n # create a reference for the post\n # if it is posting the document\n # ! this does not work for sections\n app.env.domains[\"std\"].data[\"labels\"][label] = (docname, label, title)\n app.env.domains[\"std\"].data[\"anonlabels\"][label] = (docname, label)\n if section.parent is doctree:\n section_copy = section[0].deepcopy()\n else:\n section_copy = section.deepcopy()\n # multiple posting may result having post nodes\n for nn in section_copy.findall(PostNode):\n if nn[\"exclude\"]:\n nn.replace_self([])\n else:\n nn.replace_self(node.children)\n postinfo = {\n \"docname\": docname,\n \"section\": section_name,\n \"order\": order,\n \"date\": date,\n \"update\": max(update_dates + [date]),\n \"title\": title,\n \"excerpt\": excerpt,\n \"tags\": node[\"tags\"],\n \"author\": node[\"author\"],\n \"category\": node[\"category\"],\n \"location\": node[\"location\"],\n \"language\": node[\"language\"],\n \"redirect\": node[\"redirect\"],\n \"nocomments\": node[\"nocomments\"],\n \"image\": node[\"image\"],\n \"exclude\": node[\"exclude\"],\n \"external_link\": node[\"external_link\"],\n \"doctree\": section_copy,\n }\n if docname not in env.ablog_posts:\n env.ablog_posts[docname] = []\n env.ablog_posts[docname].append(postinfo)\n # instantiate catalogs and collections here\n # so that references are created and no warnings are issued\n if app.builder.format == \"html\":\n stdlabel = env.domains[\"std\"].data[\"labels\"] # NOQA\n else:\n if hasattr(env, \"intersphinx_inventory\"):\n stdlabel = env.intersphinx_inventory.setdefault(\"std:label\", {}) # NOQA\n baseurl = getattr(env.config, \"blog_baseurl\").rstrip(\"/\") + \"/\" # NOQA\n project, version = env.config.project, str(env.config.version) # NOQA\n for key in [\"tags\", \"author\", \"category\", \"location\", \"language\"]:\n catalog = blog.catalogs[key]\n for label in postinfo[key]:\n coll = catalog[label] # NOQA\n if postinfo[\"date\"]:\n coll = blog.archive[postinfo[\"date\"].year] # NOQA", "def get_posts(self): #return list of posts that are associated with this blog_id\n return Post.find_posts_for_blog_id(self.blog_id) #this will return a list of posts objects", "def post(self):\n\n try:\n\n controller = self.controller()\n kwargs = controller.date_time_parser(request.json)\n schema = self.schema(many=False)\n raw_data = controller.create(**kwargs)\n data = schema.dump(raw_data)\n\n return ResponseHandler.render_response(data=data)\n\n except Exception as ex:\n\n return ResponseHandler.render_response(status=ERR, message=traceback.format_exc())", "def parse(self, **kwargs):\n\t\treturn self.create(**kwargs)", "def create_blog_post(user_id):\n \n data = request.get_json()\n\n # Check if the user is in the database\n user = User.query.filter_by(id=user_id).first()\n if not user:\n return jsonify({\"message\": \"user does not exist!\"}), 400\n\n # Create an instance of a HashTable\n ht = hash_table.HashTable(10)\n\n # Create a blog post\n ht.add_key_value(\"title\", data[\"title\"])\n ht.add_key_value(\"body\", data[\"body\"])\n ht.add_key_value(\"date\", now)\n ht.add_key_value(\"user_id\", user_id)\n\n # Add a blog post to the database\n new_blog_post = BlogPost(\n title=ht.get_value(\"title\"),\n body=ht.get_value(\"body\"),\n date=ht.get_value(\"date\"),\n user_id=ht.get_value(\"user_id\"),\n )\n db.session.add(new_blog_post)\n db.session.commit()\n return jsonify({\"message\": \"new blog post created\"}), 200", "def get_user_posts(request):\n if request.method == \"POST\":\n token = request.data.get('token')\n post_id = request.data.get('post_id')\n type_ = request.data.get('type')\n\n if Token.objects.filter(key=token).exists():\n token = get_object_or_404(Token, key=token)\n if post_id == -1:\n posts = Post.objects.all().order_by(\"-date\")[:PAGE_OFFSET]\n elif type_ == 'old':\n posts = Post.objects.filter(pk__lt=post_id).order_by(\"-date\")[:PAGE_OFFSET]\n else: # 'new'\n posts = reversed(Post.objects.filter(pk__gt=post_id).order_by(\"date\")[:PAGE_OFFSET])\n\n serializer = PostSerializer(posts, many=True, context={'user_id': token.user_id})\n return Response({\"success\": 29,\n \"post\": serializer.data})\n else:\n return Response({\"error\": 17})", "def post(self):\n type_model = request.json\n\n type_model = namedtuple(\"Type\", type_model.keys())(*type_model.values())\n type_model = models.Type.Type(\n id=None,\n value=type_model.value,\n description=type_model.description)\n\n repository = TypeRepository(\n FLASK_APP.config[\"DBUSER\"],\n FLASK_APP.config[\"DBPASS\"],\n FLASK_APP.config[\"DBHOST\"],\n FLASK_APP.config[\"DBPORT\"],\n FLASK_APP.config[\"DBNAME\"])\n\n try:\n if not type_model.value:\n raise Exception('value field from type model not defined')\n type_model = repository.create(type_model)\n Logger.Logger.create(FLASK_APP.config[\"ELASTICURL\"],\n 'Informative',\n 'Type sucessfuly created',\n 'post()',\n str(type.__dict__),\n FLASK_APP.config[\"TYPE\"])\n return self.okResponse(\n response=type_model,\n message=\"Type sucessfuly created.\",\n status=201), 200\n except Exception as err:\n Logger.Logger.create(FLASK_APP.config[\"ELASTICURL\"],\n 'Error',\n 'Internal server error',\n 'post()',\n str(err),\n FLASK_APP.config[\"TYPE\"])\n return self.okResponse(\n response=err,\n message=\"Internal server error: \"+str(err),\n status=500)", "def process_posts_and_pages(*, posts, pages, settings):\n for post in posts:\n post.test_attr = 'post'\n for page in pages:\n page.test_attr = 'page'\n return {'posts': posts, 'pages': pages}", "def all_query() -> list:\n data = []\n posts = Posts.query.all()\n for post in posts:\n x = {\n \"title\": post.title,\n \"body\": post.body,\n \"timestamp\": post.timestamp,\n \"id\": post.id,\n \"url\": make_url_from_title(post.title),\n }\n data.append(x)\n return data", "async def fetch_posts(self) -> None:\n\n async def fetch_posts_for_offset(offset) -> list:\n logger.info(\n \"(offset %i) Start fetching posts from vk.com/%s...\",\n offset,\n self.vk_domain,\n )\n\n # VK Script code for /execute method.\n vks_code = get_wall_post_template.substitute(\n {\n \"domain\": self.vk_domain,\n \"offset\": offset,\n \"posts_per_portion\": self._posts_per_portion,\n \"execution_times\": self._execution_times,\n }\n )\n params = {\n \"v\": settings.VKAPI_VERSION,\n \"access_token\": settings.VKAPI_TOKEN,\n \"code\": vks_code,\n }\n url = self._url_execute\n\n # Posts fetching.\n resp_json = await vk_asynchronous_request(\n url,\n params,\n domain=self.vk_domain,\n offset=offset,\n )\n\n logger.info(\n \"(offset %i) End fetching posts from vk.com/%s...\",\n offset,\n self.vk_domain,\n )\n\n # Gathered posts handling.\n posts_from_vk = resp_json[\"response\"][\"items\"]\n posts = posts_as_schemas(posts_from_vk)\n del posts_from_vk\n return posts\n\n # Checks and preparations.\n await self._set_total_posts_in_domain()\n if not self._total_posts_in_domain:\n return\n\n # Creating tasks for fetching.\n tasks = []\n posts_per_task = self._posts_per_portion * self._execution_times\n offsets = list(range(0, self._total_posts_in_domain, posts_per_task))\n for offset in offsets:\n tasks.append(asyncio.create_task(fetch_posts_for_offset(offset)))\n\n # Running tasks.\n logger.info(\"Start fetching posts from vk.com/%s...\", self.vk_domain)\n results = await asyncio.gather(*tasks)\n logger.info(\"End fetching posts from vk.com/%s...\", self.vk_domain)\n\n # Flatting results from many tasks into one list.\n self._posts = [post for result in results for post in result]\n\n # Final actions.\n if self.sort_by_likes:\n self._posts = list(sorted(self.posts, key=lambda p: p.likes, reverse=True))\n if self.amount_to_fetch:\n self._posts = self._posts[: self.amount_to_fetch]", "def get_all_posts(self, *fields):\n if fields:\n posts = self.collection.find(projection=fields)\n else:\n posts = self.collection.find()\n\n for post in posts.sort('created_datetime', -1):\n yield BlogPost(\n title=post['title'],\n content=post['content'],\n created_datetime=post['created_datetime']\n )", "def prep_data(data: list):\n book = {\n 'title': data['title'],\n 'authors': [],\n 'categories': []\n }\n try:\n for author in data['authors']:\n author_obj, created = Author.objects.get_or_create(name=author)\n book['authors'].append(author_obj.id)\n except KeyError:\n pass\n try:\n for category in data['categories']:\n category_obj, created = Category.objects.get_or_create(name=category)\n book['categories'].append(category_obj.id)\n except KeyError:\n pass\n book['published_date'] = data.get('publishedDate', None)\n book['average_rating'] = data.get('averageRating', None)\n book['ratings_count'] = data.get('ratingsCount', None)\n try:\n book['thumbnail'] = data['imageLinks']['thumbnail']\n except KeyError:\n book['thumbnail'] = None\n return book", "def test_createPost(self):\n\t\tself.client.force_authenticate(user=User.objects.get(id=1))\n\t\turl = \"/posts/\"\n\t\tdata = {\n\t\t\t'text' : 'Vivaldi',\n\t\t\t'group': 3\n\n\t\t}\n\t\tresponse = self.client.post(url, data, format='json')\n\t\tself.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\t\tself.assertEqual(response.data[\"id\"], 4)\n\t\tself.assertEqual(response.data[\"text\"], 'Vivaldi')\n\t\tself.assertEqual(Post.objects.count(), 4)", "def insert_post(data, db_conn):\n\n schema = get_post_schema(data)\n data, errors = insert_document(schema, data, db_conn)\n if not errors:\n add_post_to_es(data, db_conn)\n return data, errors", "def setUpTestData(cls):\n cls.post = PostFactory()", "def post(self, body):\n return self.objects.create(body)", "def create_x_posts(x, subject):\n posts = []\n for counter, value in enumerate(\"abcdefghijklmnopqrstuvwxyz\"):\n posts.append(Post.create(subject=subject, title=(value*10), body=(value*100)))\n posts[counter].save()", "def get_public_post_dict(post, user):\n post_dict = post.get_public_dict()\n post_dict['pubdate'] = post.pubdate.strftime(app.config['POST_DATETIME_FORMAT'])\n post_dict['lastmoddate'] = post.lastmoddate.strftime(app.config['POST_DATETIME_FORMAT'])\n post_dict['username'] = user.username\n\n return post_dict", "def get_posts():\n\n error_on_unauthorized()\n \n posts = Post.query.order_by(Post.id)\n total_num = posts.count()\n\n if total_num == 0:\n return jsonify(total=0, uploads=[])\n\n try:\n count = int(request.args.get('max', total_num))\n page = int(request.args.get('page', 1))\n origin = request.args.get('origin', None)\n\n if count <= 0 or page <= 0:\n raise APIError(422, \"Query parameters out of range\")\n\n if origin is not None:\n posts = posts.filter(User.origin == origin)\n\n begin = (page - 1) * count\n end = min(begin + count, total_num)\n \n return jsonify(total=total_num, posts=[p.to_dict() for p in posts.all()[begin:end]]), 200\n except ValueError:\n raise APIError(422, \"Invalid query parameter\")", "def __init__(self, **kwargs):\n self.postKey = kwargs.get(\"postKey\")\n self.location = kwargs.get(\"location\")\n self.category = kwargs.get(\"category\")\n self.source = kwargs.get(\"source\")\n self.heading = kwargs.get(\"heading\")\n self.body = kwargs.get(\"body\")\n self.latitude = kwargs.get(\"latitude\")\n self.longitude = kwargs.get(\"longitude\")\n self.language = kwargs.get(\"language\")\n self.price = kwargs.get(\"price\")\n self.currency = kwargs.get(\"currency\")\n self.images = kwargs.get(\"images\", [])\n self.externalID = kwargs.get(\"externalID\")\n self.externalURL = kwargs.get(\"externalURL\")\n self.accountName = kwargs.get(\"accountName\")\n self.accountID = kwargs.get(\"accountID\")\n self.timestamp = kwargs.get(\"timestamp\")\n self.expiration = kwargs.get(\"expiration\")\n self.annotations = kwargs.get(\"annotations\", {})\n self.trustedAnnotations = kwargs.get(\"trustedAnnotations\", {})\n self.clickCount = kwargs.get(\"clickCount\")", "def _CreateMessageSchema(\n self,\n descriptor: Descriptor,\n visiting: Set[str],\n ) -> None:\n if self.schema_objs is None: # Check required by mypy.\n raise AssertionError(\"OpenAPI type schemas not initialized.\")\n\n type_name = _GetTypeName(descriptor)\n\n properties = dict()\n visiting.add(type_name)\n\n # Create schemas for the fields' types.\n for field_descriptor in descriptor.fields:\n self._CreateSchema(field_descriptor, visiting)\n field_name = casing.SnakeToCamel(field_descriptor.name)\n\n properties[field_name] = self._GetDescribedSchema(field_descriptor)\n\n visiting.remove(type_name)\n\n self.schema_objs[type_name] = cast(MessageSchema, {\n \"type\": \"object\",\n \"properties\": properties,\n })", "def post_create(faker_obj, profile_obj, tag_list, num=3):\n for i in range(num):\n obj = faker_obj\n title = obj.sentence(nb_words=random.randint(5, 10))\n author = User.objects.get(id=profile_obj)\n body = \" \".join(obj.paragraphs(nb=random.randint(8, 20)))\n status = \"published\"\n post = Post.objects.create(title=title, author=author, body=body, status=status)\n post.tags.add(\", \".join(random.sample(tag_list, 1)))\n print(\n \"Created post title:'{}' for user '{}'\".format(post.title, author.username)\n )\n create_comment_list(obj, post)", "def new_post(mkp_form, request):\n newpost = Posts()\n newpost.init()\n newpost.authorid = int(request.user.id)\n newpost.title = mkp_form.cleaned_data['title']\n newpost.name = mkp_form.cleaned_data['short_title'] # 缩略名\n newpost.cover = mkp_form.cleaned_data['cover_url']\n newpost.introduction = mkp_form.cleaned_data['introduction']\n newpost.content = js_resize_img(mkp_form.cleaned_data['content'])\n newpost.status = Status.objects.get(id=2) # id为2是已发布的文章,默认为已发布,后面再改\n tagids = mkp_form.cleaned_data['tags']\n if len(tagids) != 0:\n for tagid in tagids:\n tagid = int(tagid)\n tag = Tags.objects.get(id=tagid)\n newpost.tags.add(tag)\n threadtypeid = mkp_form.cleaned_data['threadtypeid']\n newpost.threadtypeid = ThreadTypes.objects.get(id=threadtypeid)\n if mkp_form.cleaned_data['commentnotshow'] != '':\n newpost.comment_status = False\n else:\n newpost.comment_status = True\n return newpost", "async def create_post(self, community: Community, post_id) -> w_Post:\n post_url = self._api_communities_url + str(community.id) + '/posts/' + str(post_id)\n async with self.web_session.get(post_url, headers=self._headers) as resp:\n if self.check_status(resp.status, post_url):\n data = await resp.json()\n return (create_post_objects([data], community, new=True))[0]", "def setUpTestData(cls):\n cls.post = PostFactory(\n author__first_name='Peter',\n author__last_name='Mustermann',\n title='My test title',\n subtitle='A subtitle for the test post',\n views=10,\n last_viewed=(timezone.now() - datetime.timedelta(days=1)),\n is_active=True,\n activation_date=None\n )", "def outputPostFormatter(post):\n\n post[\"expiry\"] = to_decimal(post[\"expiry\"])\n post[\"sent\"] = to_decimal(post[\"sent\"])\n post[\"ttl\"] = to_decimal(post[\"ttl\"])\n post[\"workProved\"] = to_decimal(post[\"workProved\"])\n\n if not post.get(\"topics\"):\n post[\"topics\"] = []\n\n post[\"topics\"] = [decode_hex(topic) for topic in post[\"topics\"]]\n\n return post", "def post(self):\n title = self.request.get(\"title\")\n body = self.request.get(\"body\")\n\n if title and body:\n\n # create a new Post object and store it in the database\n post = Post(\n title=title,\n body=body\n )\n post.put()\n\n # get the id of the new post, so we can render the post's page (via the permalink)\n id = post.key().id()\n self.redirect(\"/blog/%s\" % id)\n else:\n error = \"we need both a title and a body!\"\n #self.render_form(title, body, error)\n self.render(\"newpost.html\", title, body, error)", "def rebuild_from_yaml(args):\n\n git_checkout_branch('gh-pages')\n\n posts = []\n for fname in glob('_posts/*.html'):\n with codecs.open(fname, 'r', 'utf-8') as f:\n c = f.read()\n # we only want the yaml frontmatter\n start = c.index('---') + 3\n end = c.rindex('---')\n frontmatter = yaml.safe_load(c[start:end])\n\n posts.append(Post(**frontmatter['api_data']['post']))\n\n _write_out(posts, yaml=False, supporting=True)", "def post_import():\n\n validate_request_json(request)\n\n releases = []\n for r in request.json:\n # Get the platform, create if it doesn't exist\n platforms = []\n for p in r['platforms']:\n try:\n query = db.session.query(Platform).filter(Platform.name == p)\n platform = query.one()\n except exc.NoResultFound:\n app.logger.info(\"Creating platform {}\".format(p))\n platform = Platform(p)\n db.session.add(platform)\n platforms.append(platform)\n\n release = Release(\n platforms=platforms,\n user=r['user'],\n team=r.get('team'),\n references=json.dumps(r.get('references')),\n )\n\n release.stime = arrow.get(r['stime']) if r.get('stime') else None\n release.ftime = arrow.get(r['ftime']) if r.get('ftime') else None\n if release.ftime and release.stime:\n release.duration = release.ftime - release.stime\n\n notes = r.get('notes')\n if notes:\n for n in notes:\n note = ReleaseNote(release.id, n)\n db.session.add(note)\n\n for p in r['packages']:\n package = Package(\n release_id=release.id,\n name=p['name'],\n version=p['version'],\n )\n\n package.rollback = p.get('rollback')\n package.status = p.get('status')\n package.diff_url = p.get('diff_url')\n\n if p.get('stime'):\n package.stime = arrow.get(p['stime'])\n else:\n package.stime = arrow.get(r['stime'])\n package.ftime = arrow.get(p['ftime']) if p.get('ftime') else None\n if package.stime and package.ftime:\n package.duration = package.ftime - package.stime\n\n db.session.add(package)\n\n db.session.add(release)\n db.session.commit()\n\n releases.append(release.id)\n\n return jsonify({'releases': [str(x) for x in releases]}), 200", "def create_multiple_posts(author, num, ptext = TEXT, visibility = ACL_DEFAULT):\n posts = []\n\n for i in range(num):\n posts.append(Post.objects.create(content = ptext, author = author, visibility=visibility))\n\n return posts", "def post(self):\n s = ScuttlebuttService()\n try:\n feed_dict = simplejson.loads(self.request.body)\n feed = s.CreateFeed(feed_dict)\n self.response.headers['Content-Type'] = 'application/json'\n self.response.out.write(simplejson.dumps(feed.ToDict()))\n except simplejson.JSONDecodeError:\n # HTTP 400 for bad syntax.\n self.response.set_status(\n 400, 'Failed to create source. Invalid JSON: %s' % self.request.body)\n except Exception, e:\n # HTTP 422 for syntactically correct but semantically wrong.\n self.response.set_status(422, 'Error creating source: %s' % e)", "def _get_postings(self):\n raise NotImplementedError" ]
[ "0.6354075", "0.6284115", "0.6081727", "0.59250623", "0.57985914", "0.57881165", "0.56837225", "0.5657414", "0.56563574", "0.56495565", "0.56208336", "0.56018806", "0.55971843", "0.55958605", "0.557369", "0.55589396", "0.5542766", "0.5523133", "0.5506989", "0.5501767", "0.5500599", "0.5497456", "0.54942805", "0.54731107", "0.5471995", "0.5455074", "0.54547936", "0.545357", "0.54466695", "0.5437523", "0.5424089", "0.541084", "0.5406836", "0.5406808", "0.53866774", "0.5374672", "0.5367282", "0.5347335", "0.5343064", "0.5328213", "0.5313596", "0.53093606", "0.52947575", "0.5287844", "0.525698", "0.52549106", "0.5232963", "0.5219186", "0.5218238", "0.5204147", "0.5143977", "0.51396716", "0.5135181", "0.51350695", "0.5133879", "0.5123325", "0.5119715", "0.5104658", "0.5101323", "0.5080733", "0.5071256", "0.506001", "0.5056548", "0.5054338", "0.5054338", "0.50541586", "0.5052365", "0.50431937", "0.5035759", "0.5031493", "0.5022783", "0.5005996", "0.4980719", "0.49755532", "0.49712104", "0.49600905", "0.4957914", "0.49543875", "0.49494565", "0.49461767", "0.49362648", "0.49354625", "0.49340183", "0.49285316", "0.49122196", "0.49053556", "0.48867527", "0.4881209", "0.4868862", "0.48666656", "0.48476252", "0.48455262", "0.4845202", "0.483485", "0.48342332", "0.4820885", "0.48134056", "0.48099184", "0.48059404", "0.48031557" ]
0.7757099
0
Builds Enter Query Sequence up to Job Title
def buildEnter(self): ttk.Label(self, text='Enter accession number(s), gi(s), or FASTA sequence(s)', font=('Arial', '12', 'bold')).grid(row = self.ROW , column = 1, columnspan=4, sticky ='w') self.clear_button = tk.Button(self, text='Clear', font=('Arial', '9', 'underline'),command = (lambda view = self: self.controller.clear_query(view))) self.clear_button.grid(row = self.ROW, column =5, sticky = 'E') ttk.Label(self, text = 'Subrange:', font=('Arial', '12', 'bold', 'underline') ).grid(row = self.ROW, column = 6, columnspan = 2, sticky = 'E') self.ROW += 1 self.query_box = scrolledtext.ScrolledText(self, width = 70, height = 7, wrap=tk.CHAR) self.query_box.grid(row = self.ROW, column = 1, rowspan = 6, columnspan = 5) self.model_vars['textbox'] = self.query_box #Event generated only refers to scrolledtext need a reference to load_query_button self.query_box.bind('<Key>', lambda event, view = self : self.controller.disable_upload_button(event, view)) tk.Label(self, text = 'From:').grid(row = self.ROW, column = 6, sticky = 'E') self.query_from = ttk.Entry(self, textvariable = self.model_vars['from'], font=('Arial', 10), width = 15) self.query_from.grid(row = self.ROW, column = 7, columnspan = 2, sticky = 'W') self.ROW+=2 tk.Label(self, text = 'To:').grid(row = self.ROW, column = 6, sticky = 'E') self.query_to = tk.Entry(self, textvariable = self.model_vars['to'], font=('Arial', 10), width = 15) self.query_to.grid(row = self.ROW, column = 7, columnspan =2 , sticky = 'W') self.ROW+=5 #There are objects that inherit from this one that will need to know this value for genetic code widget self.upload_file_row = self.ROW ttk.Label(self, text ='Or, Upload File:', font=('Arial', 10, 'bold')).grid(row = self.ROW, column=1, sticky = 'E') self.load_query_button = ttk.Button(self, text='Choose File', command = (lambda view = self: self.controller.load_handler(view))) self.load_query_button.grid(row = self.ROW, column = 2) self.load_status = ttk.Label(self, text='No file chosen', font=('Arial', '10')) self.load_status.grid(row = self.ROW , column = 3, columnspan = 7, sticky = 'W')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def seq_query():\n query_type = input(\n '1.Specific fragment\\n'\n '2.Specific Organism\\n'\n '3.Specific gene\\n'\n '4.All\\n'\n '5.All cds\\n'\n )\n organize = input('Organize output?(y/n)\\n')\n if query_type not in ['1', '2', '3', '4', '5']:\n raise ValueError('wrong input!\\n')\n con = sqlite3.connect('./data/DB')\n cur = con.cursor()\n if query_type == '1':\n organism = input('Organism:\\n')\n gene = input('Gene:\\n')\n frag_type = input('Fragment type(gene, cds, rRNA, tRNA, exon, intron, spacer):\\n')\n cur.execute(\n 'SELECT Taxon, Organism, Name, Type, Strand, Sequence FROM main WHERE Name LIKE ? AND Type = ? AND Organism=?',\n ('%' + gene + '%', frag_type, organism))\n result = cur.fetchall()\n elif query_type == '2':\n organism = input('Organism:\\n')\n frag_type = input('Fragment type(gene, cds, rRNA, tRNA, exon, intron, spacer, whole, fragments):\\n')\n if frag_type == 'fragments':\n cur.execute(\n 'SELECT Taxon, Organism, Name, Type, Strand, Sequence, Head FROM main WHERE Organism = ? ORDER BY Head',\n (organism,))\n else:\n cur.execute(\n 'SELECT Taxon, Organism, Name, Type, Strand, Sequence, Head FROM main WHERE Organism LIKE ? AND Type = ? ORDER BY Head',\n ('%' + organism + '%', frag_type))\n result = cur.fetchall()\n elif query_type == '3':\n gene = input('Gene:\\n')\n frag_type = input('Fragment type(gene, cds, rRNA, tRNA, exon, intron, spacer):\\n')\n cur.execute(\n 'SELECT Taxon, Organism, Name, Type, Strand, Sequence FROM main WHERE Name LIKE ? AND Type = ? ORDER BY Taxon',\n ('%' + gene + '%', frag_type))\n result = cur.fetchall()\n elif query_type == '4':\n cur.execute('SELECT Taxon, Organism, Name, Type, Strand, Sequence, Head FROM main ORDER BY Taxon')\n result = cur.fetchall()\n elif query_type == '5':\n cur.execute(\n 'SELECT Taxon, Organism, Name, Type, Strand, Sequence, Head FROM main WHERE type = \"cds\" ORDER BY Taxon')\n result = cur.fetchall()\n\n query_result = []\n for i in result:\n title = '{0}|{1}|{2}|{3}'.format(i[0], i[1], i[2], i[3])\n sequence = MutableSeq(i[5])\n gene = i[2]\n if i[4] == '-1':\n sequence.seq = sequence.reverse_complement()\n record = [title, gene, sequence]\n query_result.append(record)\n\n if organize == 'y':\n if not exists('output'):\n makedirs('output')\n for i in query_result:\n file_name = 'output/{0}.fasta'.format(i[1].replace('/', ''))\n with open(file_name, 'a') as output_file:\n output_file.write('>{0}\\n{1}\\n'.format(i[0], i[2]))\n else:\n output = input('Enter output filename:\\n')\n with open('{0}.fasta'.format(output), 'w') as output_file:\n for i in query_result:\n output_file.write('>{0}\\n{1}\\n'.format(i[0], i[2]))\n\n cur.close()\n con.close()\n print('Done.\\n')", "def test_job_title(self):\n inv_search = 'title:engineer not title:programmer'\n spi_search = 'find job engineer not position programmer'\n self._compare_searches(inv_search, spi_search)", "def get_job_url(what = WHAT, where = WHERE, start = 0):\n\treturn 'https://www.indeed.co.uk/jobs?q=' + what.replace(' ', '+') + '&l=' + where.replace(' ', '+') + '&start=' + str(start)", "def genRunEntryStr(queryId, docId, rank, score, runId):\n return f'{queryId} Q0 {docId} {rank} {score} {runId}'", "def job_title(self, job):\n def _format_num(num):\n if isinstance(num, bool):\n return str(num)\n elif isinstance(num, Real):\n return str(round(num, 2))\n return str(num)\n\n try:\n s = []\n for keys in sorted(self._schema_variables()):\n v = job.statepoint()[keys[0]]\n try:\n for key in keys[1:]:\n v = v[key]\n except KeyError: # Particular key is present in overall\n continue # schema, but not this state point.\n else:\n s.append('{}={}'.format('.'.join(keys), _format_num(v)))\n return ' '.join(s)\n except Exception as error:\n logger.debug(\n \"Error while generating job title: '{}'. \"\n \"Returning job-id as fallback.\".format(error))\n return str(job)", "def work(self):\n return \"{0} {1}\".format(super().work()[:-1], \"and start programming.\")", "def get_job_title(self, job_name):\n return ''", "def expand(self, pbegin):\n return (\n pbegin\n | 'Reading %r from the datastore' % self.query >> (\n self.datastoreio.ReadFromDatastore(\n job_utils.get_beam_query_from_ndb_query(self.query)))\n | 'Transforming %r into NDB models' % self.query >> (\n beam.Map(job_utils.get_ndb_model_from_beam_entity))\n )", "def start_challenge_code009(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code009 from ChallengeDefinition #\",self.ID, sep='')", "def _get_next_build_sequence_id(self):\n self._build_sequence += 1\n return \"{:0>4}\".format(self._build_sequence)", "def get_stack_overflow_jobs(cursor: sqlite3.Cursor):\n cursor.execute('''DELETE FROM s_jobs''') # Scrub previous results to start over\n url = f\"https://stackoverflow.com/jobs/feed\"\n feed = feedparser.parse(url)\n\n for jobs in feed.entries:\n date = \"(%d/%02d/%02d)\" % (jobs.published_parsed.tm_year, jobs.published_parsed.tm_mon,\n jobs.published_parsed.tm_mday) # Format date entries to be uniform\n title = jobs.title\n location = title[title.rfind(\"(\")+1:title.rfind(\")\")] # Clips location data nested in title field\n\n cursor.execute(f\"\"\"INSERT INTO s_jobs(id, author, link, location, date, summary, title) VALUES\n (?,?,?,?,?,?,?)\"\"\", (jobs.id, jobs.author, jobs.link, location, date, jobs.summary, jobs.title))", "def build_step(self):\n\n pass", "def run_sql_query(self, query_string, tablename=None, queue=None,\n mail=None, text=None, cache=True):\n\n self._existing_tables()\n\n if not queue:\n queue = 'short'\n\n if tablename in self.table_dict.values():\n result = self._request('POST',\n CosmoSim.QUERY_URL,\n auth=(self.username, self.password),\n data={'query': query_string, 'phase': 'run',\n 'queue': queue},\n cache=cache)\n soup = BeautifulSoup(result.content, \"lxml\")\n phase = soup.find(\"uws:phase\").string\n if phase in ['ERROR']:\n warnings.warn(\"No table was generated for job with phase \"\n \"`{0}`\".format(phase))\n gen_tablename = \"{0}\".format(phase)\n else:\n gen_tablename = str(soup.find(id=\"table\").string)\n log.warning(\"Table name {0} is already taken.\"\n .format(tablename))\n warnings.warn(\"Generated table name: {0}\".format(gen_tablename))\n elif tablename is None:\n result = self._request('POST', CosmoSim.QUERY_URL,\n auth=(self.username, self.password),\n data={'query': query_string, 'phase': 'run',\n 'queue': queue},\n cache=cache)\n else:\n result = self._request('POST', CosmoSim.QUERY_URL,\n auth=(self.username, self.password),\n data={'query': query_string,\n 'table': str(tablename),\n 'phase': 'run', 'queue': queue},\n cache=cache)\n self._existing_tables()\n\n soup = BeautifulSoup(result.content, \"lxml\")\n self.current_job = str(soup.find(\"uws:jobref\")[\"id\"])\n warnings.warn(\"Job created: {}\".format(self.current_job))\n\n if mail or text:\n self._initialize_alerting(self.current_job, mail=mail, text=text)\n\n return self.current_job", "def build_step(self):\n pass", "def build_step(self):\n pass", "def format_print_jobs(intent):\n print \"\\nintentName: %s\" %(intent['name'])\n for k,v in intent.iteritems():\n if k <> 'name':\n print \"\\t\" + str(k) + \": \" + str(v)", "def job_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"job_name\")", "def enter(self,\r\n ek=None,\r\n et=EMPTYCHAR,\r\n em=None,\r\n query=True,\r\n not_parsing=True,\r\n show=False,\r\n right_at=False,\r\n as_child=False,\r\n ind=Index(0),\r\n re_entering=False,\r\n returnnote=False,\r\n carrying_keys=True,\r\n usedefaultkeys=True,\r\n poetic=False):\r\n\r\n\r\n projects_old = list(self.project)\r\n returnquit_setting = self.defaults.get('returnquiton')\r\n text_fed_in = False\r\n if et:\r\n self.defaults.set('returnquiton',False)\r\n text_fed_in = True\r\n\r\n\r\n\r\n\r\n def no_arrows(x_temp):\r\n\r\n \"\"\"replaces arrow with equal sign---\r\n used for keys with ontological information\r\n \"\"\"\r\n return x_temp.replace(RIGHTNOTE, EQUAL)\r\n\r\n def order_sequence_keys(key):\r\n\r\n \"\"\" arranges the sequence keys in the order that they should\r\n be queried.\r\n \"\"\"\r\n\r\n if QUESTIONMARK not in key:\r\n return 300\r\n elif key not in presets.keymacro_order:\r\n return 200\r\n else:\r\n return presets.keymacro_order.index(key)\r\n\r\n\r\n\r\n def get_keys_from_projects():\r\n\r\n \"\"\" fetches sequence keys from existing projects\r\n \"\"\"\r\n\r\n returnkeys = set()\r\n for project in (self.project*self.suspend_default_keys) + self.temp_projects:\r\n if project in self.default_dict['projects'].get_all_projects():\r\n returnkeys = returnkeys.union(set(self.default_dict['projects']\r\n .get_default_keys(project=project)))\r\n return returnkeys\r\n\r\n def query_keys(keysetobject=None):\r\n\r\n self.tutor.show('KEYWORDS')\r\n\r\n\r\n key_text = input(queries.KEYS)\r\n if self.use_alphabets:\r\n key_text = self.alphabet_manager.interpret(key_text)\r\n\r\n for k_temp in check_hyperlinks(self.default_dict['abbreviations'].undo(key_text).split(COMMA),\r\n display=display,notebook=notebook):\r\n if isinstance(k_temp,str) and len(k_temp) > 0:\r\n if k_temp[0] == DOLLAR:\r\n keysetobject.update(self.default_dict['keymacros'].get_definition(k_temp[1:]))\r\n elif k_temp[0] == PLUS and k_temp[1:] in self.default_dict['projects'].get_all_projects():\r\n # to add a project\r\n self.project.append(k_temp[1:])\r\n elif k_temp[0] == DASH and k_temp[1:] in self.project:\r\n # to remove a project\r\n self.project.pop(self.project.index(k_temp[1:]))\r\n\r\n else:\r\n if k_temp.endswith('.'):\r\n k_temp = self.keyauto.complete(k_temp.rstrip('.'))\r\n keysetobject.add(k_temp)\r\n self.keyauto.add(k_temp)\r\n\r\n def sequence_keys(keysetobject=None):\r\n\r\n \"\"\" Queries the sequence keys with question marks in them\r\n and gets other keys from projects\r\n \"\"\"\r\n\r\n if self.suspended_sequences:\r\n print('SUSPENDED: '+', '.join(self.suspended_sequences))\r\n\r\n for k_temp in sorted(usedefaultkeys*(self.defaults.get('defaultkeys'))\r\n +list(get_keys_from_projects())\r\n +list(keysetobject),\r\n key=lambda x:order_sequence_keys(x)):\r\n if (not et\r\n and ATSIGN in k_temp\r\n and QUESTIONMARK in k_temp) and not k_temp in self.suspended_sequences:\r\n #for sequence keywords with a question mark\r\n satisfied=False\r\n while satisfied==False:\r\n\r\n def remove_final_slash (x):\r\n\r\n if x.endswith('/'):\r\n return x[0:-1]\r\n else:\r\n return x\r\n\r\n\r\n xt_temp = input(k_temp.split(QUESTIONMARK)[0]+self.default_dict['abbreviations'].undo(self.lastsequencevalue.show(k_temp))+QUESTIONMARK)\r\n\r\n\r\n self.lastsequencevalue.change(k_temp,remove_final_slash(xt_temp))\r\n if xt_temp != '/' and xt_temp.endswith('/') and xt_temp.count('/')%2 != 0:\r\n self.suspended_sequences.add(k_temp)\r\n self.defaults.set('defaultkeys',\r\n self.defaults.get('defaultkeys')\r\n +[k_temp.split('@')[0]+'@'+xt_temp[0:-1]])\r\n satisfied = True\r\n xt_temp = remove_final_slash(xt_temp)\r\n if xt_temp == '/':\r\n self.suspended_sequences.add(k_temp)\r\n satisfied = True\r\n elif xt_temp == ' ' and self.lastsequencevalue.show(k_temp).strip():\r\n keysetobject.add(k_temp.split(QUESTIONMARK)[0]\r\n +self.default_dict['abbreviations'].undo(self.lastsequencevalue.show(k_temp)))\r\n\r\n satisfied = True\r\n elif xt_temp and not xt_temp.replace('+',''):\r\n keysetobject.add(k_temp.split(QUESTIONMARK)[0]\r\n +self.default_dict['abbreviations'].undo(self.lastsequencevalue.show(k_temp)))\r\n satisfied = True\r\n\r\n\r\n\r\n else:\r\n for x_temp in [y.strip() for y in xt_temp.split(COMMA)]:\r\n x_temp = self.default_dict['abbreviations'].undo(x_temp)\r\n\r\n\r\n if not x_temp.strip():\r\n satisfied = True\r\n elif ATSIGN + POUND + QUESTIONMARK in k_temp: # for date sequences\r\n if (SLASH not in x_temp\r\n and is_date(x_temp)) or (x_temp.count(SLASH)==1\r\n and is_date(x_temp.split(SLASH)[0])\r\n and is_date(x_temp.split(SLASH)[1])):\r\n if SLASH not in x_temp:\r\n keysetobject.add(k_temp.replace(QUESTIONMARK,x_temp))\r\n satisfied = True\r\n elif x_temp.count(SLASH) == 1 and x_temp[-1] != SLASH:\r\n keysetobject.add(k_temp.replace(ATSIGN+POUND+QUESTIONMARK,\r\n 'from'+ATSIGN+POUND\r\n +x_temp.split(SLASH)[0]))\r\n keysetobject.add(k_temp.replace(ATSIGN+POUND+QUESTIONMARK,\r\n 'to'+ATSIGN+POUND\r\n +x_temp.split(SLASH)[1]))\r\n satisfied = True\r\n\r\n elif x_temp.replace(PERIOD,\r\n EMPTYCHAR).replace(DASH,\r\n EMPTYCHAR).replace(SLASH,\r\n EMPTYCHAR).isnumeric():\r\n #for indexes or floating sequences\r\n\r\n if ATSIGN + QUESTIONMARK in k_temp and 'page' not in k_temp: #for floating sequences\r\n if x_temp.count(PERIOD) <= 1 or (x_temp.count(DASH) ==1\r\n and x_temp.count(PERIOD) == 2):\r\n if DASH not in x_temp or (x_temp.count(DASH)==1\r\n and x_temp[0]==DASH):\r\n\r\n\r\n keysetobject.add(k_temp.replace(QUESTIONMARK,x_temp))\r\n satisfied = True\r\n\r\n elif x_temp.count(DASH) == 1 and x_temp[-1] != DASH and x_temp[0] != DASH:\r\n keysetobject.add(k_temp.replace(ATSIGN+QUESTIONMARK,\r\n 'from'+ATSIGN+x_temp.split(DASH)[0]))\r\n keysetobject.add(k_temp.replace(ATSIGN+QUESTIONMARK,\r\n 'to'+ATSIGN+x_temp.split(DASH)[1]))\r\n satisfied = True\r\n\r\n elif ATSIGN + QUESTIONMARK in k_temp and 'page' in k_temp:\r\n\r\n if DASH not in x_temp or (x_temp.count(DASH)==1\r\n and x_temp[0]==DASH):\r\n\r\n keysetobject.add(k_temp.replace(QUESTIONMARK,x_temp))\r\n satisfied = True\r\n\r\n elif x_temp.count(PERIOD) == 0 and x_temp.count(DASH) == 1:\r\n\r\n from_temp, to_temp = x_temp.split(DASH)[0],x_temp.split(DASH)[1]\r\n\r\n for val_temp in range(int(from_temp),int(to_temp)+1):\r\n\r\n keysetobject.add(k_temp.replace(QUESTIONMARK,str(val_temp)))\r\n satisfied = True\r\n\r\n\r\n\r\n elif ATSIGN + UNDERLINE + QUESTIONMARK in k_temp: # for index sequences\r\n if PERIOD+PERIOD not in x_temp\\\r\n and x_temp[0] != PERIOD and x_temp[-1] != PERIOD:\r\n if DASH not in x_temp or (x_temp.count(DASH)==1 and x_temp[0]==DASH):\r\n keysetobject.add(k_temp.replace(QUESTIONMARK,x_temp))\r\n satisfied = True\r\n elif x_temp.count(DASH) == 1 and x_temp[-1] != DASH and x_temp[0] != DASH:\r\n keysetobject.add(k_temp.replace(ATSIGN\r\n +UNDERLINE\r\n +QUESTIONMARK,\r\n 'from'\r\n +ATSIGN\r\n +UNDERLINE\r\n +x_temp.split(DASH)[0]))\r\n keysetobject.add(k_temp.replace(ATSIGN\r\n +UNDERLINE\r\n +QUESTIONMARK,\r\n 'to'+ATSIGN\r\n +UNDERLINE\r\n +x_temp.split(DASH)[1]))\r\n satisfied = True\r\n\r\n\r\n else: # for text sequences\r\n\r\n if x_temp.count(DASH) == 2 and DASH+DASH in x_temp and x_temp[-1] != DASH:\r\n keysetobject.add(k_temp.replace(ATSIGN+QUESTIONMARK,\r\n 'from'+ATSIGN+x_temp.split(DASH+DASH)[0]))\r\n keysetobject.add(k_temp.replace(ATSIGN+QUESTIONMARK,\r\n 'to'+ATSIGN+x_temp.split(DASH+DASH)[1]))\r\n satisfied = True\r\n else:\r\n keysetobject.add(k_temp.replace(QUESTIONMARK,x_temp))\r\n satisfied = True\r\n\r\n else:\r\n keysetobject.add(k_temp)\r\n\r\n\r\n def auto_sequence_keys(keysetobject=None):\r\n\r\n \"\"\"adds to the number for the automatic sequence keys\r\n \"\"\"\r\n\r\n if self.project*self.suspend_default_keys + self.temp_projects:\r\n for p_temp in self.project*self.suspend_default_keys + self.temp_projects:\r\n found_temp = False\r\n for x_temp in range(1,10000):\r\n if p_temp + ATSIGN + str(x_temp)+'.0' in self.keys():\r\n found_temp = True\r\n break\r\n if not found_temp:\r\n\r\n self.default_dict['sequences'].query(term1='#TYPE#',\r\n term2=p_temp,\r\n term3=float,\r\n action='set')\r\n self.default_dict['sequences'].query(term1=p_temp,\r\n action='initiate')\r\n\r\n next_temp = float(input('start from?'))\r\n else:\r\n next_temp = self.default_dict['sequences'].query(term1=p_temp,\r\n action='get').next()\r\n\r\n\r\n\r\n\r\n keysetobject.add(p_temp + ATSIGN + str(next_temp))\r\n\r\n #MAIN BODY OF FUNCTION BEGINS\r\n\r\n from_keys = True\r\n keyset = set()\r\n if not ek:\r\n from_keys = False\r\n\r\n\r\n if em is None:\r\n em = {}\r\n oldtext = EMPTYCHAR\r\n oldkeys = set()\r\n if self.entry_buffer: #If last entry was aborted\r\n if input(queries.RESUME_ABORTED_NOTE) in YESTERMS:\r\n #TO resume aborted note ...\r\n oldkeys = set(self.last_keys)\r\n if self.entry_buffer:\r\n oldtext = self.entry_buffer.dump()\r\n self.entry_buffer.clear()\r\n self.last_keys = set()\r\n else:\r\n self.entry_buffer.clear()\r\n\r\n if not from_keys:\r\n print('<<'+nformat.format_keys(usedefaultkeys*(self.defaults.get('defaultkeys')\r\n +list(get_keys_from_projects())))+'>>')\r\n\r\n\r\n\r\n\r\n if not from_keys and self.defaults.get('keysbefore')\\\r\n and not self.defaults.get('fromtext'):\r\n query_keys(keyset)\r\n\r\n\r\n elif from_keys:\r\n keyset = ek\r\n\r\n keyset.update(oldkeys)\r\n self.last_keys = set(keyset)\r\n\r\n if not et and not em and not ek:\r\n if (self.defaults.get('enterhelp')\r\n or self.defaults.get('formattinghelp')):\r\n display.noteprint((labels.ENTRYCOMMANDS,\r\n ENTERSCRIPT*self.defaults.get('enterhelp')\r\n +EOL+FORMATTINGSCRIPT*self.defaults.get('formattinghelp')),\r\n param_width=60,\r\n override=True)\r\n if not self.defaults.get('enterhelp'):\r\n self.tutor.show('ENTERING')\r\n if not self.defaults.get('formattinghelp'):\r\n self.tutor.show('FORMATTING')\r\n\r\n imp_list = []\r\n if et != EMPTYCHAR:\r\n imp_list = et.split(EOL)\r\n\r\n\r\n #split fed-in-text into lines\r\n\r\n poetry = False\r\n lastline = False\r\n editover = False\r\n text = EMPTYCHAR+oldtext\r\n counter = 1\r\n lasttext = EMPTYCHAR\r\n splitting = False\r\n returns_entered = 0\r\n poetrytoggled = poetic\r\n\r\n\r\n if et == EMPTYCHAR:\r\n print(POUND*7+self.defaults.get('size')*UNDERLINE+VERTLINE)\r\n\r\n\r\n # The following block of code enters\r\n # in new text for a note line by line\r\n\r\n while not lastline:\r\n## try:\r\n if imp_list:\r\n #otherwise, pops the next line from list\r\n #of lines from text that has been fed in\r\n t_temp = imp_list.pop(0)\r\n\r\n\r\n elif et == EMPTYCHAR:\r\n #asks for input if text has not been fed into the function\r\n t_temp = input('PO '*poetry\r\n +'PR '*(not poetry)\r\n +str(counter)+(4-len(str(counter)))*BLANK)\r\n if self.defaults.get('convertbyline'):\r\n if self.by_line.interpret(t_temp)[0]:\r\n keyset.update(self.by_line.interpret(t_temp)[0])\r\n t_temp = EMPTYCHAR\r\n\r\n else:\r\n lastline = True\r\n t_temp = EMPTYCHAR\r\n\r\n## except: pass\r\n\r\n if t_temp == PERCENTAGE+PERCENTAGE:\r\n # TO THE POETRY MODE WHICH INSERTS\r\n #A HARD RETURN AFTER EACH LINE\r\n poetry = True\r\n counter += 1\r\n text += EOL\r\n\r\n elif t_temp == PERCENTAGE:\r\n poetry = False #THE PROSE MODE\r\n\r\n else:\r\n if len(t_temp)>len(t_temp.lstrip()):\r\n t_temp = VERTLINE*(not poetry)\\\r\n + '_'*(len(t_temp)-len(t_temp.lstrip()))\\\r\n + t_temp.lstrip()\r\n if not poetry: # prosaic text entry mode\r\n\r\n counter += 1\r\n if t_temp == EMPTYCHAR or len(t_temp) < 2:\r\n # to automatically quit if there is a last line\r\n lasttext = text\r\n text += t_temp+EOL\r\n returns_entered += 1\r\n if self.defaults.get('returnquiton')\\\r\n and(len(text) > self.defaults.get('returnquit')\r\n and text[-self.defaults.get('returnquit')]\r\n == EOL*self.defaults.get('returnquit')):\r\n lastline = True\r\n\r\n elif (t_temp[-1] == PERIOD or (t_temp[-1:] == VERTLINE\r\n and (BLANK+t_temp)[-2] != VERTLINE)):\r\n at_temp = t_temp[:-1]+t_temp[-1].replace(VERTLINE, EMPTYCHAR)+EOL\r\n lasttext = text\r\n text += at_temp\r\n #if the entry line ends with |\r\n #or a period, then add a break.\r\n self.entry_buffer.append(at_temp)\r\n elif t_temp[-1] == TILDA: #to discard line\r\n counter -= 1\r\n elif t_temp[-1] == POUND: #to replace last line with new line\r\n text = EMPTYCHAR.join(text.split(EOL)[0:-2])\r\n at_temp = t_temp[:-1]+t_temp[-1].replace(POUND, EMPTYCHAR)+EOL\r\n lasttext = text\r\n text += at_temp\r\n elif t_temp[-1] == ATSIGN: #to replace entered line with new entry\r\n text = EMPTYCHAR.join(text.split(EOL)[0:-2])\r\n at_temp = t_temp[:-1]+t_temp[-1].replace(ATSIGN, EMPTYCHAR)+EOL\r\n lasttext = text\r\n text = lasttext + EOL + at_temp\r\n elif t_temp[-1] == DOLLAR: #to replace entered line with new entry but no EOL\r\n text = EMPTYCHAR.join(text.split(EOL)[0:-2])\r\n at_temp = t_temp[:-1]+t_temp[-1].replace(DOLLAR, EMPTYCHAR)+EOL\r\n lasttext = text\r\n text = lasttext + at_temp\r\n\r\n else:\r\n if len(t_temp) <= 1 or t_temp[-2] != VERTLINE:\r\n at_temp = (t_temp.replace\r\n (string.whitespace[1], BLANK)+BLANK)\r\n lasttext = text\r\n text += at_temp\r\n #if a simple return, then no break\r\n self.entry_buffer.append(at_temp)\r\n elif not text_fed_in and (len(t_temp) >2\r\n and t_temp[-3] == TILDA): #to edit\r\n at_temp = (t_temp[:-2].replace\r\n (string.whitespace[1:], BLANK)+EOL)\r\n lasttext = text\r\n text += at_temp\r\n # if || then finish entry\r\n self.entry_buffer.append(at_temp)\r\n lastline = True\r\n editover = True\r\n\r\n elif not text_fed_in and (len(t_temp) <3\r\n or t_temp[-3] != VERTLINE):\r\n at_temp = (t_temp[:-2].replace\r\n (string.whitespace[1:], BLANK)+EOL)\r\n lasttext = text\r\n text += at_temp\r\n # if || then finish entry\r\n self.entry_buffer.append(at_temp)\r\n lastline = True\r\n\r\n elif not text_fed_in:\r\n at_temp = (t_temp[:-2].replace\r\n (string.whitespace[1:],\r\n BLANK)+EOL)\r\n lasttext = text\r\n text += at_temp\r\n # if || then finish entry\r\n self.entry_buffer.append(at_temp)\r\n lastline = True\r\n splitting = True\r\n\r\n\r\n\r\n if poetry:\r\n poetrytoggled = True\r\n counter += 1\r\n if t_temp == EMPTYCHAR or len(t_temp) < 2:\r\n at_temp = t_temp+EOL\r\n lasttext = text\r\n text += at_temp\r\n self.entry_buffer.append(at_temp)\r\n\r\n elif t_temp[-1:] == VERTLINE and (BLANK+t_temp)[-2] != VERTLINE:\r\n at_temp = (t_temp.replace\r\n (string.whitespace[1:], BLANK)[:-1]+BLANK)\r\n lasttext = text\r\n text += at_temp\r\n self.entry_buffer.append(at_temp)\r\n\r\n else:\r\n if len(t_temp) <= 1 or t_temp[-2] != VERTLINE:\r\n at_temp = t_temp+EOL\r\n lasttext = text\r\n text += at_temp\r\n self.entry_buffer.append(at_temp)\r\n elif len(t_temp) <= 2 or t_temp[-3] != VERTLINE :\r\n at_temp = (t_temp[:-2].replace\r\n (string.whitespace[1:],\r\n BLANK)+EOL)\r\n lasttext = text\r\n text += at_temp\r\n self.entry_buffer.append(at_temp)\r\n lastline = True\r\n ##text = text.replace(VERTLINE, EOL+BLANK) WHY IS THIS HERE?\r\n else:\r\n at_temp = (t_temp[:-2].replace\r\n (string.whitespace[1:],\r\n BLANK)+EOL)\r\n lasttext = text\r\n text += at_temp\r\n self.entry_buffer.append(at_temp)\r\n lastline = True\r\n splitting = True\r\n if self.use_alphabets:\r\n text = self.alphabet_manager.interpret(text)\r\n\r\n if len(text) > 1 and text[-2:] == VERTLINE + VERTLINE:\r\n text = text[0:-2]\r\n\r\n if self.abridgedformat:\r\n text = text.replace('/*/*/','/NEW/')\r\n text = text.replace('/*/','/BREAK/')\r\n\r\n text = text.replace('/BREAK/',VERTLINE+'/BREAK/'+VERTLINE)\r\n text = text.replace('/NEW/',VERTLINE+'/NEW/'+VERTLINE)\r\n\r\n text = text.replace(VERTLINE, EOL)\r\n if splitting and '/M/' in text:\r\n text = '/SPLIT/'+EOL+text+EOL+'/ENDSPLIT/'\r\n if self.check_spelling:\r\n text, added = self.speller.checktext(text)\r\n self.default_dict['spelling'].update(added)\r\n\r\n\r\n if editover:\r\n text = textedit_new(text,notebookobject=self)\r\n text = reform_text(text)\r\n text = self.default_dict['abbreviations'].do(text)\r\n text = self.default_dict['macros'].do(text)\r\n self.dd_changed = True\r\n\r\n\r\n## else:\r\n## text = et\r\n\r\n knowledgephrases = [self.default_dict['abbreviations'].undo(x_temp)\r\n for x_temp in extract.extract(text,\r\n LEFTCURLY + LEFTCURLY,\r\n RIGHTCURLY + RIGHTCURLY)]\r\n\r\n # extract knowledge phrases embedded within text\r\n\r\n if query:\r\n\r\n for kp_temp in knowledgephrases:\r\n\r\n interpreted = self.default_dict['generalknowledge'].text_interpret(kp_temp)\r\n\r\n display.noteprint((interpreted[0],interpreted[1]))\r\n\r\n text = text.replace(LEFTCURLY+LEFTCURLY+kp_temp+RIGHTCURLY+RIGHTCURLY,\r\n LEFTCURLY+LEFTCURLY\r\n +interpreted[1].replace('\\n','; ').rstrip(';')\r\n +RIGHTCURLY+RIGHTCURLY)\r\n\r\n\r\n text = text.replace(LEFTCURLY + LEFTCURLY, '@@DCL@@')\r\n text = text.replace(RIGHTCURLY + RIGHTCURLY, '@@DCR@@')\r\n\r\n newkeylist = [self.default_dict['abbreviations'].undo(x_temp)\r\n for x_temp\r\n in extract.extract(text,\r\n LEFTCURLY,\r\n RIGHTCURLY)]\r\n text = text.replace('@@DCL@@',LEFTCURLY\r\n + LEFTCURLY)\r\n text = text.replace('@@DCR@@',RIGHTCURLY\r\n + RIGHTCURLY)\r\n\r\n\r\n #extract keywords embedded within text\r\n\r\n if query:\r\n #if query = True then ask if\r\n #the new keywords extracted from text are to be kept\r\n\r\n for a_temp in newkeylist:\r\n print(newkeylist.index(a_temp), EQUAL, a_temp)\r\n\r\n if newkeylist:\r\n it_temp = input(queries.NEW_KEY_LIST)\r\n if it_temp in NOTERMS:\r\n newkeylist = []\r\n if it_temp in YESTERMS+[BLANK]:\r\n pass\r\n else:\r\n newkeylist = [k_temp for k_temp in newkeylist\r\n if newkeylist.index(k_temp)\r\n in rangelist.range_set(it_temp)]\r\n display.noteprint((alerts.ATTENTION,\r\n ', '.join(newkeylist)\r\n + alerts.ADDED_TO_KEYLIST))\r\n\r\n\r\n keyset.update(set(newkeylist))\r\n## print(', '.join(keyset))\r\n #add new kewords to existing set of keywords\r\n\r\n\r\n\r\n\r\n\r\n\r\n if not from_keys and self.defaults.get('keysafter') and not self.defaults.get('fromtext'):\r\n query_keys(keyset)\r\n\r\n\r\n old_fromtext = self.defaults.get('fromtext') # Save old settings\r\n old_mode = self.defaults.get('convertmode')\r\n\r\n if '//' in text and '//' in text:\r\n mode = text.split('//')[1].split('//')[0]\r\n if mode in self.default_dict['convert']:\r\n self.defaults.set('convertmode',mode)\r\n display.noteprint(('MODE',self.defaults.get('convertmode')))\r\n self.defaults.set('fromtext',True)\r\n text = text.replace('//'+mode+'//',EMPTYCHAR)\r\n\r\n if self.defaults.get('fromtext') and not self.defaults.get('convertbyline'):\r\n conv_keys, text = self.default_dict['convert'][self.defaults\r\n .get('convertmode')].interpret(text)\r\n text = reform_text(text)\r\n text = self.default_dict['abbreviations'].do(text)\r\n text = self.default_dict['macros'].do(text)\r\n keyset.update(conv_keys)\r\n\r\n\r\n\r\n\r\n auto_sequence_keys(keysetobject=keyset)\r\n # calls function to add autonomatically numbered sequence keys\r\n sequence_keys(keysetobject=keyset)\r\n # calls function to evaluate sequence keys if they exist\r\n\r\n if (not from_keys\r\n # use old keys if new keys are not to be queried or taken from the text\r\n and not self.defaults.get('keysbefore')\r\n and not self.defaults.get('keysafter')):\r\n keyset.update(oldkeys)\r\n\r\n keyset = {k_temp for k_temp in keyset\r\n if len(k_temp) > 1\r\n and k_temp[-1]\r\n not in [QUESTIONMARK, POUND, ATSIGN, UNDERLINE]}\r\n keyset = modify_keys(keyset, no_arrows, strip=True)\r\n keyset = modify_keys(keyset, self.default_dict['macros'].do)\r\n\r\n\r\n\r\n if em == {}:\r\n if not poetrytoggled:\r\n metatext = {'user': self.defaults.get('user'),\r\n 'size': self.defaults.get('size'),\r\n 'date': [str(datetime.datetime.now())]}\r\n else:\r\n temp_size = max([len(x_temp)+20+(poetic*40) for x_temp in text.split(EOL)])\r\n metatext = {'user': self.defaults.get('user'),\r\n 'size': temp_size,\r\n 'date': [str(datetime.datetime.now())]}\r\n\r\n\r\n else:\r\n metatext = em\r\n if self.autobackup:\r\n self.update(keyset,\r\n self.default_dict['abbreviations'].undo(text),\r\n meta=metatext,\r\n right_at=right_at,\r\n as_child=as_child)\r\n #call autobackup\r\n\r\n if returnnote:\r\n\r\n return Note(keyset,\r\n self.default_dict['abbreviations'].undo(text),\r\n metatext)\r\n\r\n if (not self.defaults.get('overrideextract')\r\n and not_parsing\r\n and extract.embedded_extract(text)[2] > 0):\r\n\r\n #call parsing if there are embedded notes,\r\n #and it is not already in the middle of parsing\r\n\r\n next_index = Index(int(ind))+Index(1)\r\n self.textparse(self.default_dict['abbreviations'].undo(text),\r\n keys=keyset,\r\n newindex=next_index)\r\n index = self.addnew(keyset,\r\n extract.embedded_extract\r\n (self.default_dict['abbreviations'].undo(text),\r\n eliminate=True)[1],\r\n metadata=metatext,\r\n show=True,\r\n right_at=right_at,\r\n as_child=as_child,\r\n re_entering=re_entering,\r\n ind=ind,\r\n carrying_keys=carrying_keys)\r\n\r\n else:\r\n\r\n index = self.addnew(keyset,\r\n self.default_dict['abbreviations'].undo(text),\r\n metadata=metatext,\r\n show=show,\r\n right_at=right_at,\r\n as_child=as_child,\r\n ind=ind,\r\n re_entering=re_entering,\r\n carrying_keys=carrying_keys)\r\n\r\n self.entry_buffer.clear()\r\n self.last_keys = set()\r\n # restore old settings\r\n\r\n\r\n self.defaults.set('fromtext',old_fromtext)\r\n self.defaults.set('convertmode',old_mode)\r\n self.defaults.set('returnquiton',returnquit_setting)\r\n if isinstance(projects_old,list):\r\n self.project = projects_old\r\n\r\n\r\n return index", "def next_query(self):\n raise NotImplementedError()", "def q(self):\n self.qTable()", "def create_job(project, description):\n randomnames = open(os.path.join(\"Anemone\", \"templates\", \"namegen.html\")).readlines()\n jobname = (\"Quick.\" +\n random.choice(randomnames)[:-1] + # for some reason choice gives extra space\n random.choice(randomnames)[:-1]) # for some reason choice gives extra space\n\n newjob = Job.create(project=project, name=jobname, description=description)\n newjob.name = newjob.name + \".{0:0=3d}\".format(newjob.id)\n newjob.save()\n return newjob", "def describe_text_translation_job(JobId=None):\n pass", "def print_recreate_jobs_script():\n for job in job_records():\n cmd = job['command']\n when = job['timestamp'].strftime(AT_DATETIME_FORMAT)\n print('echo \"{}\" | at {}'.format(cmd, when))", "def start_job(planner, job_template, domain, problem):\n template_file = open(job_template, 'r')\n job_string = template_file.read()\\\n .replace('$DOMAIN', str(domain))\\\n .replace('$PROBLEM', str(problem))\\\n .replace('$LOWERDOMAIN', str(domain).lower())\\\n .replace('$LOWERPROBLEM', str(problem).lower())\\\n .replace('$PLANNER', planner)\n print(job_string)", "def addTitleQuery(self, titleQuery):\n if not (titleQuery in self.queries[\"ti\"]):\n self.queries[\"ti\"].append(titleQuery)", "def generate_query(self):\n return", "def BlastSeq_part(Query, Subject, OutPath, outname, BlastDir):\n MakeDir(OutPath)\n OutFile=OutPath+'/'+outname\n print (OutPath)\n errlog=open(OutPath+'/_err.log', 'a')\n column_spec='10 qseqid sseqid pident length mismatch gapopen qstart qend sstart send evalue qcovs btop'\n BLAST=subprocess.Popen([BlastDir, '-query',Query, '-subject',Subject, '-outfmt', column_spec, '-out', OutFile], stderr=errlog)\n BLAST.communicate()\n errlog.close()\n return OutFile", "def main(orcid: str, open_browser: bool, upload: bool, batch_name: Optional[str]):\n lines = get_orcid_quickstatements(orcid)\n print(render_lines(lines, sep=\"\\t\", newline=\"\\n\"))\n print(lines_to_url(lines))\n if open_browser:\n lines_to_new_tab(lines)\n if upload or batch_name is not None:\n client = QuickStatementsClient()\n res = client.post(lines, batch_name=batch_name)\n click.echo(f\"Job posted to {res.batch_url}\")", "def gen_q_stmt(name, query):\n return \"query {} `{}`;\\n\".format(name, query)", "def BlastSeq(Query, Subject, Out, BlastDir):\n print Out\n print Out.split('.')\n if len(Out.split('.'))==1:\n MakeDir(Out)\n OutPath='.'.join(Out.split('.'))\n print (OutPath)\n OutFile=OutPath+'/output.csv'\n errlog=open(OutPath+'/_err.log', 'a')\n else:\n OutFile=Out\n errfile='.'.join( Out.split('.')[:1])+'_err.log'\n errlog=open(errfile, 'a')\n\n\n## column_spec='10 qseqid sseqid pident length mismatch gapopen qstart qend sstart send evalue btop'\n column_spec='10 qseqid sseqid pident length mismatch gapopen qstart qend sstart send evalue qcovs btop'\n BLAST=subprocess.Popen([BlastDir, '-query',Query, '-subject',Subject, '-outfmt', column_spec, '-out', OutFile], stderr=errlog)\n BLAST.communicate()\n errlog.close()\n return OutFile", "def auto_sequence_keys(keysetobject=None):\r\n\r\n if self.project*self.suspend_default_keys + self.temp_projects:\r\n for p_temp in self.project*self.suspend_default_keys + self.temp_projects:\r\n found_temp = False\r\n for x_temp in range(1,10000):\r\n if p_temp + ATSIGN + str(x_temp)+'.0' in self.keys():\r\n found_temp = True\r\n break\r\n if not found_temp:\r\n\r\n self.default_dict['sequences'].query(term1='#TYPE#',\r\n term2=p_temp,\r\n term3=float,\r\n action='set')\r\n self.default_dict['sequences'].query(term1=p_temp,\r\n action='initiate')\r\n\r\n next_temp = float(input('start from?'))\r\n else:\r\n next_temp = self.default_dict['sequences'].query(term1=p_temp,\r\n action='get').next()\r\n\r\n\r\n\r\n\r\n keysetobject.add(p_temp + ATSIGN + str(next_temp))", "def longquery_poster(\n self, taskname, query, \n createtable=False, tablename=False, estimate=False,\n completequery=False, usedataframe=True\n ):\n if self.casjobtoken = None:\n print('Must provide username and password to', \n 'send a job to CasJobs.')\n return False\n\n header = {\n 'Content-Type': accept,\n 'X-Auth-Token': self.casjobtoken \n }\n # Using createtable and estimate are not necessary,\n # and are not advised, but exist for full functionality:\n if not createtable and not estimate:\n payload = {\n 'Query': query,\n 'Taskname': taskname\n }\n elif not createtable:\n payload = {\n 'Query': query,\n 'Taskname': taskname,\n 'Estimate': int(estimate)\n }\n elif not estimate:\n payload = {\n 'Query': query,\n 'Taskname': taskname,\n 'CreatTable': True,\n 'TableName': tablename\n }\n else:\n payload = {\n 'Query': query,\n 'Taskname': taskname,\n 'CreatTable': True,\n 'TableName': tablename,\n 'Estimate': int(estimate)\n }\n longqueryurl = self.casjobsurl + \n '/contexts/' + context + '/jobs'\n try:\n put = requests.put(\n self.casjobsurl, \n data=payload, headers=header, stream=True\n )\n if put.status_code == 200:\n jobid = str(put.text)\n \"\"\"\n responsetable = post.headers[tablekey]\n if usedataframe:\n # turn response into pandas dataframe\n data = json.dumps(responsetable)\n df = pd.read_json(data, orient='records')\n return df\n else:\n return responsetable\n \"\"\"\n else:\n put.raise_for_status()\n except Exception as e:\n print(str(e))\n # grab the results if 'completequery' is true. Because\n # context is static, the 'quickquery_poster' cannot\n # be called\n if completequery:\n # must have created table to download finished query\n if not createtable:\n print('Long query must create table to',\n 'return results')\n return jobid\n else:\n while True:\n check = self.jobstatus_checker(jobid)\n # if the job is done, break loop\n if check['Message'] == 'Query Complete':\n break\n else:\n time.sleep(5)\n sqlquery = 'SELECT * FROM ' + tablename \n tablekey = 'Rows'\n newheader = {\n 'Content-Type': accept, \n 'X-Auth-Token': self.casjobtoken\n }\n\n newpayload = {\n 'Query': sqlquery,\n 'Taskname': 'longquerygrabber'\n }\n quickqueryurl = self.casjobsurl + \n '/contexts/MyDB/query'\n try:\n post = requests.post(\n self.casjobsurl, \n data=newpayload, headers=newheader, stream=True\n )\n if post.status_code == 200: \n responsetable = post.headers[tablekey]\n if usedataframe:\n # turn response into pandas dataframe\n data = json.dumps(responsetable)\n df = pd.read_json(data, orient='records')\n return df\n else:\n return responsetable\n else:\n post.raise_for_status()\n except Exception as e:\n print(str(e))\n else:\n return jobid", "def init_sequential_planning_program() -> str:\n # We reason about the state of the world at particular time steps: [0, t_max]\n seq_encoding = 'time(0..horizon).\\n'\n\n # Predicates evaluate to True or False\n seq_encoding += 'boolean(true).\\n'\n seq_encoding += 'boolean(false).\\n'\n # The contains/2 atom captures this relationship\n seq_encoding += 'contains(X, value(X, B)) :- predicate(X), boolean(B).\\n'\n\n # The initial state is at time t=0\n # The holds/3 atom captures the value of a predicate at a particular timestep t >= 0\n seq_encoding += 'holds(Predicate, Value, 0) :- initialState(Predicate, Value).\\n'\n\n # Closed World Assumption (CWA): Any ground atoms in the initial state which are not explicitly declared True\n # are set to False\n seq_encoding += 'initialState(X, value(X, false)) :- predicate(X), not initialState(X, value(X, true)).\\n'\n\n # The solution to the planning problem is extracted from occurs/2 atoms\n # This is a sequential encoding: only one action may occur at a particular timestep\n # Also, actions may only occur AFTER the initial state.\n seq_encoding += '1 {occurs(Action, T) : action(Action)} 1 :- time(T), T > 0.\\n'\n\n # An action may not occur unless its preconditions are met (i.e., for an action to occur at time t,\n # all applicable predicates must hold the values specified in the precondition at time t-1)\n seq_encoding += (\n ':- occurs(Action, T), precondition(Action, Predicate, Value), '\n 'not holds(Predicate, Value, T - 1).\\n'\n )\n\n # Capture the effects of an action: at time t, the value of a predicate is changed to the one specified in the\n # action's effect as long as the action was valid (see previous statement).\n seq_encoding += (\n 'caused(Predicate, Value, T) :- '\n 'occurs(Action, T), '\n 'effect(Action, Predicate, Value), '\n 'holds(PredicatePre, ValuePre, T - 1) : precondition(Action, PredicatePre, ValuePre).\\n'\n )\n\n # A predicate is considered modified if its value was changed by an action\n seq_encoding += 'modified(Predicate, T) :- caused(Predicate, Value, T).\\n'\n\n # The so-called 'inertia' statements. At a particular timestep, the value of a predicate was either:\n # 1) Modified and therefore holds a new value\n seq_encoding += 'holds(Predicate, Value, T) :- caused(Predicate, Value, T).\\n'\n # 2) Was not modified and therefore continues to hold its previous value\n seq_encoding += (\n 'holds(predicate(V), Value, T) :- holds(predicate(V), Value, T - 1), '\n 'not modified(predicate(V), T), time(T).\\n'\n )\n\n # The goal is not met unless the appropriate predicates hold their goal values at the final timestep\n seq_encoding += ':- goal(Predicate, Value), not holds(Predicate, Value, horizon).\\n'\n\n return seq_encoding", "def _run_query(self):", "def _start_query(self) -> PQGen[None]:\n if self._autocommit:\n return\n\n if self.pgconn.transaction_status != TransactionStatus.IDLE:\n return\n\n yield from self._exec_command(self._get_tx_start_command())", "def enter_title():\n valid_data = False\n # used to keep track of the values and change them in other scopes\n input_data = {'title': ''}\n\n while not valid_data:\n input_data['title'] = get_input(\"Title of the task: \")\n if re.match('[\\w]+', input_data['title']):\n valid_data = True\n clean_scr()\n\n return input_data['title']", "def print_jobs():\n for job in job_records():\n command = job['command']\n timestamp = job['timestamp'].isoformat().replace('T', ' ')[:-3]\n print('\\t'.join((job['id'], timestamp, command)))", "def generate_seq(params: List[str]) -> str:\n params = list(params)\n start_index = params.pop(0)\n if not params:\n return f\"{start_index} + row_number() over(order by 1)\"\n expression = f\"{start_index} + row_number() over(order by {','.join(params)})\"\n \n for param in reversed(params):\n expression = f\"nvl({param}, {expression})\"\n\n return expression", "def _get_sql_sequence(self, table_attr):\n template = 'SELECT setval(\\'%s_%s_seq\\', %d, false);\\n'\n return \"\\n\".join([template % (\n table_attr['name'], col_attrs['name'], table_attr['autoIncrement']\n ) for col_name, col_attrs in table_attr['columns'].iteritems() if col_attrs['isPk'] and table_attr['autoIncrement'] ])", "def go_to_sequential(self, sequential_title):\r\n\r\n # Get the index of the item in the sequence\r\n all_items = self.sequence_items\r\n\r\n try:\r\n seq_index = all_items.index(sequential_title)\r\n\r\n except ValueError:\r\n msg = \"Could not find sequential '{0}'. Available sequentials: [{1}]\".format(\r\n sequential_title, \", \".join(all_items)\r\n )\r\n self.warning(msg)\r\n\r\n else:\r\n\r\n # Click on the sequence item at the correct index\r\n # Convert the list index (starts at 0) to a CSS index (starts at 1)\r\n seq_css = \"ol#sequence-list>li:nth-of-type({0})>a\".format(seq_index + 1)\r\n self.q(css=seq_css).first.click()", "def test_very_long_title( self ):\n driver = self.driver\n driver.get(self.base_url + \"/record=b5713050~S6\")\n driver.find_element_by_link_text(\"Request\").click()\n url_obj = urlparse( driver.current_url )\n q_dct = parse_qs( driver.current_url )\n # print( 'q_dct, ```%s```' % pprint.pformat(q_dct) )\n self.assertEqual(\n 'jcbl.aeon.atlas-sys.com',\n url_obj.netloc )\n self.assertEqual(\n ['b5713050'],\n q_dct['ReferenceNumber'] )\n self.assertEqual(\n [\"The English-American his travail by sea and land: or, A new survey of the West-India's [sic], : containing a journall of three thousand and three hundred miles within the main land of America. Wher...\"],\n q_dct['ItemTitle'] )\n self.assertEqual(\n ['Gage, Thomas, 1603?-1656'],\n q_dct['ItemAuthor'] )\n self.assertEqual(\n ['London : printed by R. Cotes, and are to be sold by Humphrey Blunden at the Castle in Cornhill, and Thomas Williams at the Bible in Little Britain, 1648'],\n q_dct['ItemPublisher'] )\n self.assertEqual(\n ['1-SIZE D648 .G133e'],\n q_dct['CallNumber'] )\n self.assertEqual(\n ['http://www.archive.org/details/englishamericanh00gage'],\n q_dct['ItemInfo2'] )", "def generate_job_list(params,publisher_id):\n job_list = []\n #since we initiated params['start'] at 0\n total_results = 1 \n while int(params['start']) < total_results:\n client = IndeedClient(publisher = publisher_id)\n search_response = client.search(**params)\n root = ET.fromstring(search_response)\n params['start'] = str(int(params['start'])+25) \n total_results = int(root.find('totalresults').text) \n for job in root.iter('result'):\n jobtitle = job.find('jobtitle').text \n company = job.find('company').text\n city = job.find('city').text\n #state = job.find('state').text\n #country = job.find('country').text\n date = job.find('date').text\n snippet = job.find('snippet').text\n sponsored = job.find('sponsored').text\n url = job.find('url').text\n job = (unicode(jobtitle),unicode(company),unicode(city),unicode(date)[5:16].replace(\" \",\"-\"),unicode(sponsored), unicode(url))\n if job not in job_list:\n job_list.append(job) \n \n job_list.insert(0,(unicode(\"jobtitle\"),unicode(\"company\"),unicode(\"city\"),unicode(\"date\"),unicode(\"sponsored\"), unicode(\"url\"))) #add header \n return job_list", "def main():\n ref_seq = {}\n ent_spe_sero = {}\n tag_dict = {\"Contigs_with_VP1\":\"contigs\", \"P1_sequences\":\"p1\",\n \"VP1_sequences\":\"vp1\", \"5UTR_sequences\":\"5utr\", \"3D_sequences\":\"3d\"}\n args = get_arguments()\n # Load query elements\n print(\"Load resume file\")\n (query_dict, classify_list,\n classify_specie_list, serotype_list) = get_query(args.resume_file,\n args.tag,\n args.incomplete)\n print(\"{} descriptions loaded\".format(len(query_dict)))\n # Load specie association\n if args.ent_serotype_file and args.template_seq_file:\n # Load enterovirus serotype\n print(\"Load enterovirus serotype association\")\n ent_spe_sero = load_spe_sero(args.ent_serotype_file)\n # Load template sequence\n print(\"Load template sequence\")\n ref_seq = get_template_sequence(args.template_seq_file, ent_spe_sero)\n # Grab query sequence in the database\n print(\"Load database sequence\")\n sequence_data = get_sequence(query_dict, args.fasta_file)\n print(\"{} sequences loaded\".format(len(sequence_data)))\n # Write the new fasta file\n print(\"Write the new fasta\")\n write_sequence(args.results, sequence_data, query_dict, classify_list,\n tag_dict[args.tag], ref_seq, ent_spe_sero)\n #print(save_association)\n print(\"Write the itol label\")\n write_itol_label(args.itol_dir, sequence_data, query_dict, classify_list,\n tag_dict[args.tag])\n print(\"Write the itol tree color\")\n write_itol_tree_color(args.itol_dir, sequence_data, query_dict, classify_specie_list, serotype_list,\n tag_dict[args.tag])\n print(\"Done\")", "def buildStarted(sb):", "def get_job_name(self) -> Text:\n return self._job_name", "def sequence(self):\n inigen = IniGen()\n fields = algorithm_fields.algorithms['sequence']\n\n output_uuid_map = {}\n\n # set up global parameters\n algorithm_path = fields['path']\n enabled = \"True\"\n inigen.emit_global(algorithm_path, enabled)\n\n label = \"SEQ\"\n for t in ['C','L']:\n run_label = label+'_'+t\n t1Mag_label = '{0}1MAG'.format(t)\n t2Mag_label = '{0}2MAG'.format(t)\n t3Mag_label = '{0}3MAG'.format(t)\n t1Ang_label = '{0}1ANG'.format(t)\n t2Ang_label = '{0}2ANG'.format(t)\n t3Ang_label = '{0}3ANG'.format(t)\n distillate_label = \"{0}-ALL\".format(t)\n\n # header\n inigen.emit_run_header(run_label, CHUNKING, MINTIME, MAXTIME)\n\n # body\n dep_1Mag_label = t1Mag_label\n dep_1Mag_name = fields['deps'][0]\n dep_1Mag_uuid = self.uuid_map[t1Mag_label]\n\n dep_2Mag_label = t2Mag_label\n dep_2Mag_name = fields['deps'][1]\n dep_2Mag_uuid = self.uuid_map[t2Mag_label]\n\n dep_3Mag_label = t3Mag_label\n dep_3Mag_name = fields['deps'][2]\n dep_3Mag_uuid = self.uuid_map[t3Mag_label]\n\n dep_1Ang_label = t1Ang_label\n dep_1Ang_name = fields['deps'][3]\n dep_1Ang_uuid = self.uuid_map[t1Ang_label]\n\n dep_2Ang_label = t2Ang_label\n dep_2Ang_name = fields['deps'][4]\n dep_2Ang_uuid = self.uuid_map[t2Ang_label]\n\n dep_3Ang_label = t3Ang_label\n dep_3Ang_name = fields['deps'][5]\n dep_3Ang_uuid = self.uuid_map[t3Ang_label]\n \n deps = [[dep_1Mag_label, dep_1Mag_name, dep_1Mag_uuid],\n [dep_2Mag_label, dep_2Mag_name, dep_2Mag_uuid],\n [dep_3Mag_label, dep_3Mag_name, dep_3Mag_uuid],\n [dep_1Ang_label, dep_1Ang_name, dep_1Ang_uuid],\n [dep_2Ang_label, dep_2Ang_name, dep_2Ang_uuid],\n [dep_3Ang_label, dep_3Ang_name, dep_3Ang_uuid]]\n\n param_section_name = fields['params'][0]\n param_section_value = \"Production/{0}/{1}/{2}\".format(self.location, self.name, distillate_label)\n param_name_name = fields['params'][1]\n param_name_value = \"SEQ\"\n params = [[param_section_name, param_section_value], [param_name_name, param_name_value]]\n\n outputs = fields['outputs']\n\n emitted = inigen.emit_run_body(deps, params, outputs)\n\n output_uuid_map[\"ZER_{0}ANG\".format(t)] = emitted[-9][-36:]\n output_uuid_map[\"ZER_{0}MAG\".format(t)] = emitted[-8][-36:]\n output_uuid_map[\"POS_{0}ANG\".format(t)] = emitted[-7][-36:]\n output_uuid_map[\"POS_{0}MAG\".format(t)] = emitted[-6][-36:]\n output_uuid_map[\"NEG_{0}ANG\".format(t)] = emitted[-5][-36:]\n output_uuid_map[\"NEG_{0}MAG\".format(t)] = emitted[-4][-36:]\n output_uuid_map[\"UNB_{0}NEG\".format(t)] = emitted[-3][-36:]\n output_uuid_map[\"UNB_{0}ZER\".format(t)] = emitted[-2][-36:]\n\n filename = \"{0}/SEQ_{1}.ini\".format(self.dirname, self.name)\n inigen.generate_file(filename)\n return output_uuid_map", "def test_input_type_seq(self, _run_mock):\n hhblits = self.tool(input_type=hhsuite.QueryType.SEQUENCE)\n self.assertEqual(set(hhblits.REQUIRED), set([\"name\", \"sequence\"]))\n hhblits.run({\"sequence\": self.SEQUENCE, \"name\": self.SEQ_NAME})\n self.verify_common(\"hhblits\", hhblits)\n\n _, kw_args = hhblits.tool.call_args\n self.assertIn(\"input\", kw_args[\"options\"])", "def header(self):\n return \"Step {}: {}\".format(\".\".join(str(e) for e in self._id), self.title)", "async def nextlaunch(self, ctx, *args):\n if not can_answer(ctx):\n return\n launches = launchlibrary.Launch.next(api, 1)\n if launches:\n launch = launches[0]\n launchname = launch.name\n launchtime_tz = launch.net\n utc = datetime.now(timezone.utc)\n tz = launchtime_tz.tzname()\n T = chop_microseconds(launchtime_tz - utc)\n launchtime = launchtime_tz.replace(tzinfo=None)\n probability = launch.probability\n if probability == -1:\n probabilitystr = \"not available\"\n else:\n probabilitystr = '{0}%'.format(probability)\n if launch.agency != None:\n embedcolor = discord.Colour(await get_color(launch.agency.id))\n else:\n embedcolor = discord.Colour(5592405)\n embed = discord.Embed(title=launchname, colour=embedcolor)\n embed.set_footer(text=\"ID: {0}\".format(launch.id))\n if launch.missions:\n description = launch.missions[0]['description']\n if len(description) > 1000:\n embed.add_field(name=\"T-: {0}\".format(T), value=description[:997]+'...', inline=False)\n else:\n embed.add_field(name=\"T-: {0}\".format(T), value=description, inline=False)\n else:\n embed.add_field(name=\"T-: {0}\".format(T), value=\"No description available.\", inline=False)\n embed.set_thumbnail(url=launch.rocket.image_url)\n if '-t' in args:\n embed.add_field(name=\"Window start\", value=timelink(launch.windowstart), inline=True)\n embed.add_field(name=\"NET\", value=timelink(launch.net), inline=True)\n embed.add_field(name=\"Window end\", value=timelink(launch.windowend), inline=True)\n else:\n embed.add_field(name=\"NET\", value=timelink(launch.net), inline=True)\n embed.add_field(name=\"Max hold time:\", value=launch.windowend - launch.net, inline=True)\n if '-w' in args:\n embed.add_field(name=\"Weather probability\", value=probabilitystr)\n if '-v' in args:\n streamurls = launch.vid_urls\n if streamurls:\n url = '\\n'.join(streamurls)\n else:\n url = \"No video available\"\n embed.add_field(name=\"Video\", value=url, inline=False)\n await ctx.send(embed=embed)", "def createSequence(self,**kwargs):\n members = self.bl.getAllSavedActions() \n entries={}\n\n num = len(self.actionSequence)\n self.baxter.mm.changeMenuTitle(\"%f actions saved: %s\" % (num, str(self.actionSequence)))\n\n for param in members:\n entries[str(param)] = self.chooseBlock\n\n entries[\"Run Sequence\"] = self.runSequence\n entries[\"Reset\"] = self.resetSequence\n self.mm.addGenericMenu(\"sequenceMenu\",self.mm.cur_page,\"Select the action to add to the sequence\", entries)\n self.mm.loadMenu(\"sequenceMenu\")", "def make_query(self):", "def title_n(self):\n self.run_command('title_n')", "def build_dataset_prompt():\n\n print(\"\")\n print(\"Let's start by choosing what features you'd like to look at/explore!\")", "def sequence_ingest(self,sequence):\n\t\tdata=self.data\n\t\t\n\t\tcounter=0\n\n\t\tfor item in data[sequence]:\n\t\t\tdatestring=item['specimenDate']\n\t\t\tdate=fetchdate(datestring)\n\t\t\trow,created=DailyCases.objects.get_or_create(specimenDate=date,areacode=item['areaCode'])\n\t\t\trow.areaname=item['areaName']\n\t\t\trow.dailyLabConfirmedCases=item['dailyLabConfirmedCases']\n\t\t\trow.totalLabConfirmedCases=item['totalLabConfirmedCases']\n\t\t\trow.changeInDailyCases=item['changeInDailyCases']\n\t\t\trow.dailyTotalLabConfirmedCasesRate=item['dailyTotalLabConfirmedCasesRate']\n\t\t\trow.previouslyReportedDailyCases=item['previouslyReportedDailyCases']\n\t\t\trow.previouslyReportedTotalCases=item['previouslyReportedTotalCases']\n\t\t\trow.changeInTotalCases=item['changeInTotalCases']\n\t\t\trow.save()\n\t\t\tcounter+=1\n\t\tlog.info(f'Processed: {counter} rows')", "def multiphase_solve(db, position, phase_count):\n def end_solve():\n \"\"\"\n The method used to end the multiphase solve process, regardless of kociemba usage\n :return: the combined sequences from each phase\n \"\"\"\n total_sequence = []\n for sequence in sequence_list:\n total_sequence.extend(sequence)\n\n print('- Final Sequence: ', end='')\n for end_move in total_sequence:\n print(end_move.name, end=' ')\n print()\n\n return total_sequence\n\n sequence_list = []\n phase_name = ['Zero', 'One', 'Two', 'Three', 'Four']\n cube_list = []\n position_list = [position]\n\n for phase in range(phase_count):\n print('- Phase %s: ' % phase_name[phase], end='')\n\n looked_up_sequence = multiphase_lookup.lookup_position(db, position_list[phase], phase)\n\n # If the position wasn't found in the table, option for fallback to kociemba\n if len(looked_up_sequence) > 0 and looked_up_sequence[0] == LookupError:\n print()\n kociemba_choice = ''\n while kociemba_choice != 'Y' and kociemba_choice != 'N':\n kociemba_choice = input('Solve with kociemba package? (y/n) ').upper()\n if kociemba_choice == 'Y':\n print('Converting Cube to kociemba notation.', end='')\n # Overwrite all previous sequences, kociemba uses a different method\n sequence_list = [multiphase_lookup.kociemba_fallback(position)]\n\n return end_solve()\n else:\n print('Exiting program, you got as far as this: \\n%s\\n' % Cube(looked_up_sequence[1]))\n transmit_choice = ''\n while transmit_choice != 'Y' and transmit_choice != 'N':\n transmit_choice = input('Transmit to robot anyway? (y/n) ').upper()\n if transmit_choice == 'Y':\n looked_up_sequence = []\n else:\n print('Nice try, bye')\n exit()\n\n sequence_list.append(looked_up_sequence) # List of sequences from each phase\n cube_list.append(Cube(position_list[phase])) # List of Cubes produced by each phase's sequence\n for move in sequence_list[phase]:\n dyn_move(cube_list[phase], move)\n print(move.name, end=' ')\n position_list.append(cube_list[phase].position)\n print()\n return end_solve()", "def main(argv):\r\n\r\n def log_error(s):\r\n sys.stderr.write(s)\r\n sys.stderr.write('\\n')\r\n def log(s):\r\n pass\r\n\r\n global TEXT_INCLS\r\n TEXT_INCLS = []\r\n\r\n command = os.path.split(argv[0])[1]\r\n params = {}\r\n cpt_char = None\r\n comments = False\r\n\r\n #Extract options\r\n try:\r\n opts, args = getopt.getopt(\r\n argv[1:],\r\n \"c:a:t:r:A:V:R:o:vmh\",\r\n [\"title-char=\",\r\n \"table-attributes=\", \"table-header=\", \"row-pattern=\",\r\n \"view-table-attributes=\", \"view-header=\", \"view-row-pattern=\",\r\n \"output=\", \"verbose\", \"comments\", \"help\"])\r\n\r\n infile = args and args[0] or None\r\n outfile = infile and \"%s.asciidoc\" % os.path.splitext(os.path.split(infile)[1])[0] or '-'\r\n\r\n except getopt.GetoptError, err:\r\n log_error(main.__doc__ % locals())\r\n log_error(\"Error: %s\" % err)\r\n return -2\r\n except IndexError, err:\r\n log_error(main.__doc__ % locals())\r\n log_error(\"Error: File not specified.\")\r\n return -2 \r\n\r\n \r\n for o, a in opts:\r\n if o in (\"-c\", \"--title-char\"):\r\n a = a.strip()\r\n if len(a) > 1:\r\n cpt_char = a[0]\r\n params['title_char'] = a[1]\r\n else:\r\n params['title_char'] = a\r\n elif o in (\"-v\", \"--verbose\"):\r\n log = log_error\r\n elif o in (\"-o\", \"--output\"):\r\n outfile = a\r\n elif o in (\"-m\", \"--comments\"):\r\n comments = True\r\n elif o in (\"-h\", \"--help\"):\r\n print main.__doc__ % locals()\r\n return 0\r\n\r\n if outfile=='-':\r\n outfile = None\r\n\r\n if comments:\r\n log(\"Generating SQL COMMENTS from SQL\")\r\n log(\"================================\")\r\n else:\r\n log(\"Generating ASCIIDOC from SQL\")\r\n log(\"============================\")\r\n\r\n try:\r\n # Read SQL\r\n log(\"Reading file %s ...\" % infile)\r\n f = infile and open(infile) or sys.stdin\r\n sql = f.read()\r\n f.close()\r\n\r\n if comments:\r\n ret = objects_to_comments(sql)\r\n else:\r\n ret = TOP_COMMENT\r\n\r\n if cpt_char:\r\n ret += \"\\n\\n%s\\n%s\\n\" % (TABLES_CPT, cpt_char*len(TABLES_CPT))\r\n\r\n # Parse Tables from SQL\r\n log(\"Parsing Tables...\")\r\n ret += tables_to_asciidoc(sql, **params)\r\n\r\n if cpt_char:\r\n # Parse Views from SQL\r\n vws = views_to_asciidoc(sql, **params)\r\n log(\"Parsing Views...\")\r\n if vws.strip():\r\n ret += \"\\n\\n%s\\n%s\\n\" % (VIEWS_CPT, cpt_char*len(VIEWS_CPT))\r\n ret += vws\r\n \r\n\r\n # Making title references\r\n ret = asciidoc.make_title_references(ret)\r\n \r\n # Making text inclusions of the Views\r\n for i in range(len(TEXT_INCLS)):\r\n ret = ret.replace(\"INCLUSION_%d\" % i, TEXT_INCLS[i])\r\n\r\n # Write SQL\r\n log(\"Writing file %s ...\" % outfile)\r\n f = outfile and open(outfile, \"w\") or sys.stdout\r\n f.write(ret)\r\n f.close()\r\n\r\n log(\"Done!\")\r\n \r\n except Exception,err:\r\n log_error(\"Error: %s\" % err)\r\n raise\r\n\r\n log(\"\")\r\n return 0", "def SubmitJob(jobid, cntSubmitJobDict, numseq_this_user, g_params): # {{{\n# for each job rstdir, keep three log files,\n# 1.seqs finished, finished_seq log keeps all information, finished_index_log\n# can be very compact to speed up reading, e.g.\n# 1-5 7-9 etc\n# 2.seqs queued remotely , format:\n# index node remote_jobid\n# 3. format of the torun_idx_file\n# origIndex\n gen_logfile = g_params['gen_logfile']\n # gen_errfile = g_params['gen_errfile']\n name_server = g_params['name_server']\n\n webcom.loginfo(\"SubmitJob for %s, numseq_this_user=%d\"%(jobid, numseq_this_user), gen_logfile)\n\n path_static = g_params['path_static']\n path_cache = g_params['path_cache']\n\n path_result = os.path.join(path_static, 'result')\n path_log = os.path.join(path_static, 'log')\n\n rstdir = \"%s/%s\"%(path_result, jobid)\n outpath_result = \"%s/%s\"%(rstdir, jobid)\n if not os.path.exists(outpath_result):\n os.mkdir(outpath_result)\n\n finished_idx_file = \"%s/finished_seqindex.txt\"%(rstdir)\n failed_idx_file = \"%s/failed_seqindex.txt\"%(rstdir)\n remotequeue_idx_file = \"%s/remotequeue_seqindex.txt\"%(rstdir)\n torun_idx_file = \"%s/torun_seqindex.txt\"%(rstdir) # ordered seq index to run\n cnttry_idx_file = \"%s/cntsubmittry_seqindex.txt\"%(rstdir)#index file to keep log of tries\n\n runjob_errfile = \"%s/%s\"%(rstdir, \"runjob.err\")\n runjob_logfile = \"%s/%s\"%(rstdir, \"runjob.log\")\n finished_seq_file = \"%s/finished_seqs.txt\"%(outpath_result)\n query_parafile = \"%s/query.para.txt\"%(rstdir)\n query_para = webcom.LoadJsonFromFile(query_parafile)\n tmpdir = \"%s/tmpdir\"%(rstdir)\n qdinittagfile = \"%s/runjob.qdinit\"%(rstdir)\n failedtagfile = \"%s/%s\"%(rstdir, \"runjob.failed\")\n starttagfile = \"%s/%s\"%(rstdir, \"runjob.start\")\n cache_process_finish_tagfile = \"%s/cache_processed.finish\"%(rstdir)\n fafile = \"%s/query.fa\"%(rstdir)\n split_seq_dir = \"%s/splitaa\"%(tmpdir)\n forceruntagfile = \"%s/forcerun\"%(rstdir)\n lastprocessed_cache_idx_file = \"%s/lastprocessed_cache_idx.txt\"%(rstdir)\n variant_file = \"%s/variants.fa\"%(rstdir)\n\n if os.path.exists(forceruntagfile):\n isForceRun = True\n else:\n isForceRun = False\n\n finished_idx_list = []\n failed_idx_list = [] # [origIndex]\n if os.path.exists(finished_idx_file):\n finished_idx_list = list(set(myfunc.ReadIDList(finished_idx_file)))\n if os.path.exists(failed_idx_file):\n failed_idx_list = list(set(myfunc.ReadIDList(failed_idx_file)))\n\n processed_idx_set = set(finished_idx_list) | set(failed_idx_list)\n\n jobinfofile = \"%s/jobinfo\"%(rstdir)\n jobinfo = \"\"\n if os.path.exists(jobinfofile):\n jobinfo = myfunc.ReadFile(jobinfofile).strip()\n jobinfolist = jobinfo.split(\"\\t\")\n email = \"\"\n if len(jobinfolist) >= 8:\n email = jobinfolist[6]\n method_submission = jobinfolist[7]\n\n # the first time when the this jobid is processed, do the following\n # 1. generate a file with sorted seqindex\n # 2. generate splitted sequence files named by the original seqindex\n if not os.path.exists(qdinittagfile): #initialization#{{{\n if not os.path.exists(tmpdir):\n os.mkdir(tmpdir)\n if isForceRun or os.path.exists(cache_process_finish_tagfile):\n isCacheProcessingFinished = True\n else:\n isCacheProcessingFinished = False\n\n # ==== 1.dealing with cached results \n (seqIDList, seqAnnoList, seqList) = myfunc.ReadFasta(fafile)\n if len(seqIDList) <= 0:\n webcom.WriteDateTimeTagFile(failedtagfile, runjob_logfile, runjob_errfile)\n webcom.loginfo(\"Read query seq file failed. Zero sequence read in\", runjob_errfile)\n return 1\n\n if 'DEBUG' in g_params and g_params['DEBUG']:\n msg = \"jobid = %s, isCacheProcessingFinished=%s, MAX_CACHE_PROCESS=%d\"%(\n jobid, str(isCacheProcessingFinished), g_params['MAX_CACHE_PROCESS'])\n webcom.loginfo(msg, gen_logfile)\n\n if not isCacheProcessingFinished:\n finished_idx_set = set(finished_idx_list)\n\n lastprocessed_idx = -1\n if os.path.exists(lastprocessed_cache_idx_file):\n try:\n lastprocessed_idx = int(myfunc.ReadFile(lastprocessed_cache_idx_file))\n except:\n lastprocessed_idx = -1\n\n cnt_processed_cache = 0\n for i in range(lastprocessed_idx+1, len(seqIDList)):\n if i in finished_idx_set:\n continue\n outpath_this_seq = \"%s/%s\"%(outpath_result, \"seq_%d\"%i)\n subfoldername_this_seq = \"seq_%d\"%(i)\n md5_key = hashlib.md5(seqList[i].encode('utf-8')).hexdigest()\n subfoldername = md5_key[:2]\n cachedir = \"%s/%s/%s\"%(path_cache, subfoldername, md5_key)\n zipfile_cache = cachedir + \".zip\"\n\n if os.path.exists(cachedir) or os.path.exists(zipfile_cache):\n if os.path.exists(cachedir):\n try:\n shutil.copytree(cachedir, outpath_this_seq)\n except Exception as e:\n msg = \"Failed to copytree %s -> %s\"%(cachedir, outpath_this_seq)\n webcom.loginfo(\"%s with errmsg=%s\"%(msg, str(e)), runjob_errfile)\n elif os.path.exists(zipfile_cache):\n if os.path.getsize(zipfile_cache) == 0:\n os.remove(zipfile_cache) # remove empty archived result zip file\n else:\n cmd = [\"unzip\", zipfile_cache, \"-d\", outpath_result]\n webcom.RunCmd(cmd, runjob_logfile, runjob_errfile)\n if os.path.exists(outpath_this_seq):\n shutil.rmtree(outpath_this_seq)\n if os.path.exists(os.path.join(outpath_result, md5_key)):\n shutil.move(os.path.join(outpath_result, md5_key), outpath_this_seq)\n\n fafile_this_seq = '%s/seq.fa'%(outpath_this_seq)\n if os.path.exists(outpath_this_seq) and webcom.IsCheckPredictionPassed(outpath_this_seq, name_server):\n myfunc.WriteFile('>%s\\n%s\\n'%(seqAnnoList[i], seqList[i]), fafile_this_seq, 'w', True)\n if not os.path.exists(starttagfile): #write start tagfile\n webcom.WriteDateTimeTagFile(starttagfile, runjob_logfile, runjob_errfile)\n\n info_finish = webcom.GetInfoFinish(name_server, outpath_this_seq,\n i, len(seqList[i]), seqAnnoList[i], source_result=\"cached\", runtime=0.0)\n myfunc.WriteFile(\"\\t\".join(info_finish)+\"\\n\",\n finished_seq_file, \"a\", isFlush=True)\n myfunc.WriteFile(\"%d\\n\"%(i), finished_idx_file, \"a\", True)\n\n if 'DEBUG' in g_params and g_params['DEBUG']:\n webcom.loginfo(\"Get result from cache for seq_%d\"%(i), gen_logfile)\n if cnt_processed_cache+1 >= g_params['MAX_CACHE_PROCESS']:\n myfunc.WriteFile(str(i), lastprocessed_cache_idx_file, \"w\", True)\n return 0\n cnt_processed_cache += 1\n\n webcom.WriteDateTimeTagFile(cache_process_finish_tagfile, runjob_logfile, runjob_errfile)\n\n # Regenerate toRunDict\n toRunDict = {}\n for i in range(len(seqIDList)):\n if not i in processed_idx_set:\n toRunDict[i] = [seqList[i], 0, seqAnnoList[i].replace('\\t', ' ')]\n\n if name_server == \"topcons2\":\n webcom.ResetToRunDictByScampiSingle(toRunDict, g_params['script_scampi'], tmpdir, runjob_logfile, runjob_errfile)\n sortedlist = sorted(list(toRunDict.items()), key=lambda x:x[1][1], reverse=True)\n\n # Write splitted fasta file and write a torunlist.txt\n if not os.path.exists(split_seq_dir):\n os.mkdir(split_seq_dir)\n\n torun_index_str_list = [str(x[0]) for x in sortedlist]\n if len(torun_index_str_list)>0:\n myfunc.WriteFile(\"\\n\".join(torun_index_str_list)+\"\\n\", torun_idx_file, \"w\", True)\n else:\n myfunc.WriteFile(\"\", torun_idx_file, \"w\", True)\n\n # write cnttry file for each jobs to run\n cntTryDict = {}\n for idx in torun_index_str_list:\n cntTryDict[int(idx)] = 0\n json.dump(cntTryDict, open(cnttry_idx_file, \"w\"))\n\n for item in sortedlist:\n origIndex = item[0]\n seq = item[1][0]\n description = item[1][2]\n seqfile_this_seq = \"%s/%s\"%(split_seq_dir, \"query_%d.fa\"%(origIndex))\n seqcontent = \">%s\\n%s\\n\"%(description, seq)\n myfunc.WriteFile(seqcontent, seqfile_this_seq, \"w\", True)\n # qdinit file is written at the end of initialization, to make sure\n # that initialization is either not started or completed\n webcom.WriteDateTimeTagFile(qdinittagfile, runjob_logfile, runjob_errfile)\n#}}}\n\n\n # 3. try to submit the job \n toRunIndexList = [] # index in str\n processedIndexSet = set([]) #seq index set that are already processed\n submitted_loginfo_list = []\n if os.path.exists(torun_idx_file):\n toRunIndexList = myfunc.ReadIDList(torun_idx_file)\n # unique the list but keep the order\n toRunIndexList = myfunc.uniquelist(toRunIndexList)\n if len(toRunIndexList) > 0:\n iToRun = 0\n numToRun = len(toRunIndexList)\n for node in cntSubmitJobDict:\n if \"DEBUG\" in g_params and g_params['DEBUG']:\n webcom.loginfo(\"Trying to submitjob to the node=%s\\n\"%(str(node)), gen_logfile)\n if iToRun >= numToRun:\n if \"DEBUG\" in g_params and g_params['DEBUG']:\n webcom.loginfo(\"iToRun(%d) >= numToRun(%d). Stop SubmitJob for jobid=%s\\n\"%(iToRun, numToRun, jobid), gen_logfile)\n break\n wsdl_url = \"http://%s/pred/api_submitseq/?wsdl\"%(node)\n try:\n myclient = Client(wsdl_url, cache=None, timeout=30)\n except:\n webcom.loginfo(\"Failed to access %s\"%(wsdl_url), gen_logfile)\n continue\n\n if \"DEBUG\" in g_params and g_params['DEBUG']:\n webcom.loginfo(\"iToRun=%d, numToRun=%d\\n\"%(iToRun, numToRun), gen_logfile)\n [cnt, maxnum, queue_method] = cntSubmitJobDict[node]\n cnttry = 0\n while cnt < maxnum and iToRun < numToRun:\n origIndex = int(toRunIndexList[iToRun])\n seqfile_this_seq = \"%s/%s\"%(split_seq_dir, \"query_%d.fa\"%(origIndex))\n # ignore already existing query seq, this is an ugly solution,\n # the generation of torunindexlist has a bug\n outpath_this_seq = \"%s/%s\"%(outpath_result, \"seq_%d\"%origIndex)\n if os.path.exists(outpath_this_seq):\n iToRun += 1\n continue\n\n if 'DEBUG' in g_params and g_params['DEBUG']:\n webcom.loginfo(\"DEBUG: cnt (%d) < maxnum (%d) \"\\\n \"and iToRun(%d) < numToRun(%d)\"%(cnt, maxnum, iToRun, numToRun), gen_logfile)\n fastaseq = \"\"\n seqid = \"\"\n seqanno = \"\"\n seq = \"\"\n if not os.path.exists(seqfile_this_seq):\n all_seqfile = \"%s/query.fa\"%(rstdir)\n try:\n (allseqidlist, allannolist, allseqlist) = myfunc.ReadFasta(all_seqfile)\n seqid = allseqidlist[origIndex]\n seqanno = allannolist[origIndex]\n seq = allseqlist[origIndex]\n fastaseq = \">%s\\n%s\\n\" % (seqanno, seq)\n except KeyError:\n pass\n else:\n fastaseq = myfunc.ReadFile(seqfile_this_seq)#seq text in fasta format\n (seqid, seqanno, seq) = myfunc.ReadSingleFasta(seqfile_this_seq)\n\n isSubmitSuccess = False\n if len(seq) > 0:\n query_para['name_software'] = webcom.GetNameSoftware(name_server.lower(), queue_method)\n query_para['queue_method'] = queue_method\n if name_server.lower() == \"pathopred\":\n variant_text = myfunc.ReadFile(variant_file)\n query_para['variants'] = variant_text\n # also include the identifier name as a query parameter\n query_para['identifier_name'] = seqid\n\n para_str = json.dumps(query_para, sort_keys=True)\n jobname = \"\"\n if email not in g_params['vip_user_list']:\n useemail = \"\"\n else:\n useemail = email\n try:\n myfunc.WriteFile(\"\\tSubmitting seq %4d \"%(origIndex),\n gen_logfile, \"a\", True)\n rtValue = myclient.service.submitjob_remote(fastaseq, para_str,\n jobname, useemail, str(numseq_this_user), str(isForceRun))\n except Exception as e:\n webcom.loginfo(\"Failed to run myclient.service.submitjob_remote with errmsg=%s\"%(str(e)), gen_logfile)\n rtValue = []\n pass\n\n cnttry += 1\n if len(rtValue) >= 1:\n strs = rtValue[0]\n if len(strs) >=5:\n remote_jobid = strs[0]\n result_url = strs[1]\n numseq_str = strs[2]\n errinfo = strs[3]\n warninfo = strs[4]\n if remote_jobid != \"None\" and remote_jobid != \"\":\n isSubmitSuccess = True\n epochtime = time.time()\n # 6 fields in the file remotequeue_idx_file\n txt = \"%d\\t%s\\t%s\\t%s\\t%s\\t%f\"%( origIndex,\n node, remote_jobid, seqanno.replace('\\t', ' '), seq,\n epochtime)\n submitted_loginfo_list.append(txt)\n cnttry = 0 #reset cnttry to zero\n else:\n webcom.loginfo(\"bad wsdl return value\", gen_logfile)\n\n if isSubmitSuccess:\n cnt += 1\n myfunc.WriteFile(\" succeeded on node %s\\n\"%(node), gen_logfile, \"a\", True)\n else:\n myfunc.WriteFile(\" failed on node %s\\n\"%(node), gen_logfile, \"a\", True)\n\n if isSubmitSuccess or cnttry >= g_params['MAX_SUBMIT_TRY']:\n iToRun += 1\n processedIndexSet.add(str(origIndex))\n if 'DEBUG' in g_params and g_params['DEBUG']:\n webcom.loginfo(\"DEBUG: jobid %s processedIndexSet.add(str(%d))\\n\"%(jobid, origIndex), gen_logfile)\n # update cntSubmitJobDict for this node\n cntSubmitJobDict[node][0] = cnt\n\n # finally, append submitted_loginfo_list to remotequeue_idx_file \n if 'DEBUG' in g_params and g_params['DEBUG']:\n webcom.loginfo(\"DEBUG: len(submitted_loginfo_list)=%d\\n\"%(len(submitted_loginfo_list)), gen_logfile)\n if len(submitted_loginfo_list)>0:\n myfunc.WriteFile(\"\\n\".join(submitted_loginfo_list)+\"\\n\", remotequeue_idx_file, \"a\", True)\n # update torun_idx_file\n newToRunIndexList = []\n for idx in toRunIndexList:\n if not idx in processedIndexSet:\n newToRunIndexList.append(idx)\n if 'DEBUG' in g_params and g_params['DEBUG']:\n webcom.loginfo(\"DEBUG: jobid %s, newToRunIndexList=\"%(jobid) + \" \".join( newToRunIndexList), gen_logfile)\n\n if len(newToRunIndexList)>0:\n myfunc.WriteFile(\"\\n\".join(newToRunIndexList)+\"\\n\", torun_idx_file, \"w\", True)\n else:\n myfunc.WriteFile(\"\", torun_idx_file, \"w\", True)\n\n return 0", "def __ask_query(self):\n self.__output = list()\n return input(form('What do you want to search?\\n> '))", "def uploadJobState(self,jobdata):\n\t\tsql = \"INSERT INTO jobresults(jobname,viewname,started,ended,result) VALUES (%s,%s,%s,%s,%s)\"\n\t\tdata = ( jobdata['name'], jobdata['view'], jobdata['start'], jobdata['end'],jobdata['result'] )\n\t\tcsr = self.db.cursor()\n\t\tres = csr.execute(sql,data)\n\t\tprint \"Uploaded a build for %(name)s to the DB\" % jobdata", "def updateJobDB(request,Q={}):\n\tuser = request.user\n\t# Get metadata\n\tresponse = agaveRequestMetadataList(user,Q=Q)\n\t# Add job if not in db\n\tfor metadata in response['result']:\n\t\tvalue = metadata['value']\n\t\tif 'jobName' in value and 'parameters' in value:\n\t\t\tlogger.info('SetName: ' + value['jobName'] + ', Parameters: [' + ', '.join(value['parameters']) + '], Length: ' + str(len(value['parameters'])))\n\t\t\tif len(value['parameters']) == 2: \n\t\t\t\tjobName = value['jobName']\n\t\t\t\tpara1name = value['parameters'][0]\n\t\t\t\tpara2name = value['parameters'][1]\n\t\t\t\tjobsInDB = Job.objects.filter(name=jobName)\n\n\t\t\t\t# Update status if not 'FINISHED'\n\t\t\t\tfor job in jobsInDB:\n\t\t\t\t\tif job.status not in ['FINISHED']:\n\t\t\t\t\t\tjobResponse = agaveRequestJobSearch(user,jobId=job.jobid)\n\t\t\t\t\t\tstatus = jobResponse['result'][0]['status']\n\t\t\t\t\t\tcolor = 'red'\n\t\t\t\t\t\tif status == 'FINISHED':\n\t\t\t\t\t\t\tcolor = 'blue'\n\t\t\t\t\t\telif status not in ['FINISHED','FAILED','STOPPED']: # Running\n\t\t\t\t\t\t\tcolor = 'orange'\n\t\t\t\t\t\t# else failed or stopped (color = 'red')\n\t\t\t\t\t\tjob.status = status\n\t\t\t\t\t\tjob.color = color\n\t\t\t\t\t\tjob.save()\n\n\t\t\t\t# Create new job entries\n\t\t\t\tjobsInDB = [job.jobid for job in Job.objects.filter(name=jobName)]\n\t\t\t\tjobsNotInDB = (set(jobsInDB) ^ set(metadata['associationIds'])) & set(metadata['associationIds'])\n\t\t\t\tfor jobId in jobsNotInDB:\n\t\t\t\t\tjobResponse = agaveRequestJobSearch(user,jobId=jobId)\n\t\t\t\t\tstatus = jobResponse['result'][0]['status']\n\t\t\t\t\tcolor = 'red'\n\t\t\t\t\tif status == 'FINISHED':\n\t\t\t\t\t\tcolor = 'blue'\n\t\t\t\t\telif status == 'RUNNING':\n\t\t\t\t\t\tcolor = 'orange'\n\t\t\t\t\tpara1value = value['paraValues'][jobId][para1name]\n\t\t\t\t\tpara2value = value['paraValues'][jobId][para2name]\n\t\t\t\t\tJob(name=jobName,\n\t\t\t\t\t\tjobid=jobId,\n\t\t\t\t\t\tuser=user,\n\t\t\t\t\t\tvalue=8,\n\t\t\t\t\t\tpara1name=para1name,\n\t\t\t\t\t\tpara1value=para1value,\n\t\t\t\t\t\tpara2name=para2name,\n\t\t\t\t\t\tpara2value=para2value,\n\t\t\t\t\t\tstatus=status,\n\t\t\t\t\t\tcolor=color).save()", "def database_script_list(bs_id, command, arguments_list, threads, expe_proc_time,\n attempt=1):\n #works out the table from the command\n if command == 'make_beam':\n table = 'Beamform'\n if command == 'prepsubband':\n table = 'Prepdata'\n elif command == 'realfft':\n table = 'FFT'\n elif command == 'accelsearch':\n table = 'Accel'\n elif command == 'prepfold':\n table = 'Fold'\n con = lite.connect(DB_FILE, timeout = TIMEOUT)\n with con:\n cur = con.cursor()\n for ai, arguments in enumerate(arguments_list):\n cur.execute(\"INSERT OR IGNORE INTO {0} (Rownum, AttemptNum, BSID, Command, Arguments, CPUs, ExpProc) VALUES(?, ?, ?, ?, ?, ?, ?)\".format(table), (ai, attempt, bs_id, command, arguments, threads, expe_proc_time))\n #update expected jobs\n if attempt == 1:\n cur.execute(\"UPDATE PulsarSearch SET {0}JobExp=? WHERE Rownum=?\".format(table), (len(arguments_list),bs_id))\n else:\n cur.execute(\"SELECT {0}JobExp FROM PulsarSearch WHERE Rownum=?\".format(table), (bs_id,))\n table_job_exp = cur.fetchone()[0]\n cur.execute(\"UPDATE PulsarSearch SET {0}JobExp=? WHERE Rownum=?\".format(table), (len(arguments_list) + table_job_exp, bs_id))\n cur.execute(\"SELECT TotalJobExp FROM PulsarSearch WHERE Rownum=?\", (bs_id,))\n search_job_exp = cur.fetchone()[0]\n if search_job_exp is None:\n search_job_exp = 0\n cur.execute(\"UPDATE PulsarSearch SET TotalJobExp=? WHERE Rownum=?\", (len(arguments_list) + search_job_exp, bs_id))\n\n return", "def job_execute(self, row):\n job_id = self.jobsTableWidget.item(row, 0).text()\n self.connection.job_start(job_id)\n self.refresh_jobs()", "def createSequencingRun(self,start_job,instrument_code,version,seq_run_id):\n try:\n con = self.getSFFDatabaseConnection()\n if start_job:\n db_output=con.cursor().callproc('create_sequencing_run',\n [instrument_code, version,\n seq_run_id])\n return db_output[2]\n except Exception, e:\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), str(e))\n return False", "def create_new_entry(dataset: Dict[str, pd.DataFrame], query: str) -> Dict:\n # Create a single dataframe from dictionary of dataframes\n columns = []\n data = pd.DataFrame()\n for _, df in dataset.items():\n if not df.empty:\n columns.extend(df.columns)\n data = pd.concat([data, df], axis=1)\n # In order to account for potentially different index time steps, lets dropNans here.\n # Potentially problematic down the road\n data = data.dropna(axis=0)\n\n # Eval the query to generate new sequence\n # if there is an = in the query, then there will be a new named column\n if \"=\" in query:\n new_column = query.split(\"=\")[0].replace(\" \", \"\")\n if new_column in data.columns:\n query = query.replace(new_column, new_column + \"_duplicate\")\n new_column += \"_duplicate\"\n # Wrap the eval in a syntax error in case the user does something not allowed\n try:\n new_df = data.eval(query)\n except SyntaxError:\n console.print(\n \"[red]Invalid syntax in query. Please enter something of the form `newcol=col1 + col2`[/red]\\n\"\n )\n return dataset\n except pd.errors.UndefinedVariableError as e:\n console.print(f\"[red]{e}[/red]\")\n return dataset\n\n # If custom exists in the dictionary, we need to append the current dataframe\n if \"custom\" in dataset:\n dataset[\"custom\"] = pd.concat([dataset[\"custom\"], new_df[[new_column]]])\n else:\n dataset[\"custom\"] = new_df[[new_column]]\n return dataset\n\n # If there is not an equal (namely .eval(colA + colB), the result will be a series\n # and not a dataframe. We can just call this custom_exp\n\n try:\n data = pd.DataFrame(data.eval(query), columns=[\"custom_exp\"])\n dataset[\"custom\"] = data\n except SyntaxError:\n console.print(\n \"Invalid syntax in query. Please enter something of the form `newcol=col1 + col2`\"\n )\n return dataset\n except pd.errors.UndefinedVariableError as e:\n console.print(f\"[red]{e}[/red]\")\n return dataset\n return dataset", "def create_job_detail(company_name, job_title, application_deadline, job_listing_url, state, city, application_listed, salary):\n\n job_detail = JobDetail(company_name = company_name, job_title = job_title, application_deadline = application_deadline, job_listing_url = job_listing_url, state = state , city = city, application_listed = application_listed, salary = salary)\n db.session.add(job_detail)\n db.session.commit()\n\n return job_detail", "def change_job_name(input_lines, job_name):\n job_lines = []\n for i, line in enumerate(input_lines):\n new_line = line\n if '#SBATCH --job-name' in line:\n new_line = '#SBATCH --job-name=%s\\n' % job_name\n job_lines.append(new_line)\n return job_lines", "def enter_long_run_data():\n print(LONG_RUN_TEXT)\n\n distance = float(input(\"How many kilometres do you run in this week?\\n>> \"))\n pace = input(\"What is your long run pace?\\n>> \")\n\n print(long_run(distance, pace))", "def _get_job_id(self) -> str:\n return self.split_name[2][3:]", "def execute_queries():\n fetch_job_listings(engine)\n update_job_listing(engine)", "def setup_cmd_input(multi, sequences, ordering, structure = ''):\n if not multi:\n cmd_input = '+'.join(sequences) + '\\n' + structure\n else:\n n_seqs = len(sequences)\n if ordering == None:\n seq_order = ' '.join([str(i) for i in range(1, n_seqs+1)])\n else:\n seq_order = ' '.join([str(i) for i in ordering])\n cmd_input = str(n_seqs) + '\\n' + ('\\n'.join(sequences)) + '\\n' + seq_order + '\\n' + structure\n return cmd_input.strip()", "def generate_workflow_name(self) -> str:\n pass", "def create_hourly_jobflow_manual(self, script, queue_name_out, queue_name_in, current_folder_name, log_prefix, pig_args):\n # TODO: standardize message packing/unpacking\n pig_command_args = ['/home/hadoop/bin/pig']\n for varname in pig_args.keys():\n pig_command_args.append('-p')\n pig_command_args.append(varname + '=' + pig_args[varname])\n pig_command_args.append(script)\n pig_command = '|'.join(pig_command_args)\n \n queue_message = '\\t'.join([pig_command, queue_name_out, current_folder_name + '|' + log_prefix])\n self.write_message(queue_name_in, queue_message)", "def job_subtitle(self, job):\n return str(job)[:max(8, self._project_min_len_unique_id())]", "def first_execution_from(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"first_execution_from\")", "def start_challenge_code001(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code001 from ChallengeDefinition #\",self.ID, sep='')", "def interleaveblastresults(query, subject):\n # Initialise strings to hold the matches, and the final BLAST-formatted string\n matchstring = str()\n blaststring = str()\n # Iterate through the query\n for i, bp in enumerate(query):\n # If the current base in the query is identical to the corresponding base in the reference, append a '|'\n # to the match string, otherwise, append a ' '\n if bp == subject[i]:\n matchstring += '|'\n else:\n matchstring += ' '\n # Set a variable to store the progress through the sequence\n prev = 0\n # Iterate through the query, from start to finish in steps of 60 bp\n for j in range(0, len(query), 60):\n # BLAST results string. The components are: current position (padded to four characters), 'OLC', query\n # sequence, \\n, matches, \\n, 'ref', subject sequence. Repeated until all the sequence data are present.\n \"\"\"\n 0000 OLC ATGAAGAAGATATTTGTAGCGGCTTTATTTGCTTTTGTTTCTGTTAATGCAATGGCAGCT\n ||||||||||| ||| | |||| ||||||||| || ||||||||||||||||||||||||\n ref ATGAAGAAGATGTTTATGGCGGTTTTATTTGCATTAGTTTCTGTTAATGCAATGGCAGCT\n 0060 OLC GATTGTGCAAAAGGTAAAATTGAGTTCTCTAAGTATAATGAGAATGATACATTCACAGTA\n ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||\n ref GATTGTGCAAAAGGTAAAATTGAGTTCTCTAAGTATAATGAGAATGATACATTCACAGTA\n \"\"\"\n blaststring += '{} OLC {}\\n {}\\n ref {}\\n' \\\n .format('{:04d}'.format(j), query[prev:j + 60], matchstring[prev:j + 60], subject[prev:j + 60])\n # Update the progress variable\n prev = j + 60\n # Return the properly formatted string\n return blaststring", "def query(self, message: str):\n return input(message + \" [Press ENTER to continue]\")", "def __startSearch(self):\n # This search uses a pre-existing permutations script\n params = _ClientJobUtils.makeSearchJobParamsDict(options=self._options,\n forRunning=True)\n\n if self._options[\"action\"] == \"dryRun\":\n args = [sys.argv[0], \"--params=%s\" % (json.dumps(params))]\n\n print\n print \"==================================================================\"\n print \"RUNNING PERMUTATIONS INLINE as \\\"DRY RUN\\\"...\"\n print \"==================================================================\"\n jobID = HypersearchWorker.main(args)\n\n else:\n cmdLine = _setUpExports(self._options[\"exports\"])\n # Begin the new search. The {JOBID} string is replaced by the actual\n # jobID returned from jobInsert.\n cmdLine += \"$HYPERSEARCH\"\n maxWorkers = self._options[\"maxWorkers\"]\n\n jobID = self.__cjDAO.jobInsert(\n client=\"GRP\",\n cmdLine=cmdLine,\n params=json.dumps(params),\n minimumWorkers=1,\n maximumWorkers=maxWorkers,\n jobType=self.__cjDAO.JOB_TYPE_HS)\n\n cmdLine = \"python -m nupic.swarming.HypersearchWorker\" \\\n \" --jobID=%d\" % (jobID)\n self._launchWorkers(cmdLine, maxWorkers)\n\n searchJob = _HyperSearchJob(jobID)\n\n # Save search ID to file (this is used for report generation)\n self.__saveHyperSearchJobID(\n permWorkDir=self._options[\"permWorkDir\"],\n outputLabel=self._options[\"outputLabel\"],\n hyperSearchJob=searchJob)\n\n if self._options[\"action\"] == \"dryRun\":\n print \"Successfully executed \\\"dry-run\\\" hypersearch, jobID=%d\" % (jobID)\n else:\n print \"Successfully submitted new HyperSearch job, jobID=%d\" % (jobID)\n _emit(Verbosity.DEBUG,\n \"Each worker executing the command line: %s\" % (cmdLine,))\n\n return searchJob", "def sequence_params(self):", "def get_job_query(self):\n context = aq_inner(self.context)\n catalog = getToolByName(context, 'portal_catalog')\n mt = getToolByName(self, 'portal_membership') \n currentUser = mt.getAuthenticatedMember() \n \n if \"Site Administrators\" not in currentUser.getGroups():\n\treturn catalog.searchResults(portal_type= 'SeniorProject.PloneAddOn.job', \t\t\t\t Creator = currentUser.getUserName())\n else: \n return catalog.searchResults(portal_type= 'SeniorProject.PloneAddOn.job')", "def rl():\n q_table = build_q_table(N_STATES, ACTIONS)\n for episode in range(MAX_EPISODES):\n step_counter = 0\n # initial\n S = 0\n is_terminated = False\n update_env(S, episode, step_counter)\n while not is_terminated:\n A = choose_action(S, q_table)\n S_, R = get_env_feedback(S, A)\n q_predict = q_table.ix[S, A]\n if S_ != 'terminated':\n q_target = R + LAMBDA * q_table.iloc[S_,:].max() # iloc: chose the specific columns based on integer\n else:\n q_target = R\n is_terminated = True\n q_table.ix[S, A] += ALPHA * (q_target - q_predict)\n # next_state <- old_state\n S = S_\n update_env(S, episode, step_counter + 1)\n step_counter += 1\n return q_table", "def log_to_shell(index, qid_raw, condition_raw, output_raw, decoded_seqeunce):\n print(\"Sample index\", index)\n print(\"QID: \", qid_raw)\n print(\"CONDITION: \", condition_raw)\n print(\"OUTPUT: \", output_raw,'\\n')\n print(\"Predicted OUTPUT: \", decoded_seqeunce, '\\n\\n')", "def scrape_single_page(what = WHAT, where = WHERE, start = 0):\n\n\t# Initiate new dict to store *all* job data in one query.\n\tpage_job_data = defaultdict(str)\n\n\t# Make Soup with Indeed *job* page.\n\tjob_soup = get_job_soup(what, where, start)\n\n\t# Scrape every job data (expecting 15 results) on Indeed job page.\n\tfor job in job_soup.find_all('div', {'class': 'row'}):\n\n\t\t# Initiate new OrderedDict to store a *single* job data.\n\t\tjob_data = OrderedDict()\n\n\t\t# `epoch`\n\t\tjob_data['epoch'] = time.mktime(EXECTIME.timetuple())\n\n\t\t# `scrping_dt`\n\t\tjob_data['scrping_dt'] = EXECTIME.strftime(\"%Y-%m-%d %H:%M:%S\")\n\n\t\t# `ad_id_indeed` : unique Indeed job id ('jk')\n\t\tjk = job.attrs['data-jk']\n\t\tjob_data['ad_id_indeed'] = jk\n\n\t\t# `ad_jobtitle_indeed`\n\t\tfor content in job.find_all('a', {'data-tn-element': 'jobTitle'}):\n\t\t\tjob_data['ad_jobtitle_indeed'] = content.text.lstrip()\n\n\t\t# `ad_cie_indeed`\n\t\t# `ad_jobloc_indeed`\n\t\t# `ad_post_dt_indeed`\n\t\tfor field in [['company', 'ad_cie_indeed'], ['location', 'ad_jobloc_indeed'], ['date', 'ad_post_dt_indeed']]:\n\t\t\tfor content in job.find_all('span', {'class': field[0]}):\n\t\t\t\tjob_data[field[1]] = content.text.lstrip()\n\n\t\t# Make Soup with Indeed *view job* page by passing the jk value (`ad_id_indeed`).\n\t\tviewjob_page = rqs.get(get_viewjob_url(jk))\n\t\tviewjob_soup = BeautifulSoup(viewjob_page.text, 'lxml')\n\n\t\t# `ad_jobdes_indeed`\n\t\tfor content in viewjob_soup.find_all('span', {'id': 'job_summary'}):\n\t\t\tjob_data['ad_jobdes_indeed'] = content.text\n\n\t\t# `search_ad_url`\n\t\t# `ad_url`\n\t\tfor content in job.find_all('a', {'class': 'turnstileLink'}):\n\t\t\tsearch_ad_url = 'https://www.indeed.co.uk' + content['href']\n\t\t\tjob_data['search_ad_url'] = search_ad_url\n\n\t\t\ttry:\n\t\t\t\tad_url = rqs.get(search_ad_url).url\n\t\t\t\tjob_data['ad_url'] = ad_url\n\t\t\texcept:\n\t\t\t\tad_url = search_ad_url\n\t\t\t\tjob_data['ad_url'] = search_ad_url\n\n\n\t\t# Ad scraper data. Temporarily set to nan.\n\t\t# `ad_jobdate`\n\t\t# `ad_jobtitle`\n\t\t# `ad_jobcie`\n\t\t# `ad_jobdes`\n\t\t# `ad_email`\n\t\tfor item in ['ad_jobdate', 'ad_jobtitle', 'ad_jobcie', 'ad_jobdes', 'ad_email']:\n\t\t\tjob_data[item] = np.nan\n\n\t\t# Append single job data to `all_job_data` using `ad_url` as key.\n\t\tpage_job_data[ad_url] = job_data\n\n\t\tprint('Get {}'.format(ad_url))\n\n\treturn page_job_data", "def start_job(self):\n # POST /jobs/{job_id}/results\n pass", "def ArgsForSqlQuery(parser):\n job_utils.CommonArgs(parser)\n\n parser.add_argument(\n 'query', metavar='QUERY', help='The SQL query to execute.')\n\n parser.add_argument(\n '--job-name',\n help='The unique name to assign to the Cloud Dataflow job.',\n required=True)\n\n parser.add_argument(\n '--region',\n type=arg_parsers.RegexpValidator(r'\\w+-\\w+\\d',\n 'must provide a valid region'),\n help=('Region ID of the job\\'s regional endpoint. '\n + dataflow_util.DEFAULT_REGION_MESSAGE),\n required=True)\n\n output_group = parser.add_group(\n required=True, help='The destination(s) for the output of the query.')\n\n concept_parsers.ConceptParser([\n presentation_specs.ResourcePresentationSpec(\n '--bigquery-table',\n concepts.ResourceSpec(\n 'bigquery.tables',\n resource_name='BigQuery table',\n tableId=concepts.ResourceParameterAttributeConfig(\n name='bigquery-table', help_text='The BigQuery table ID.'),\n projectId=concepts.ResourceParameterAttributeConfig(\n name='bigquery-project',\n help_text='The BigQuery project ID.'),\n datasetId=concepts.ResourceParameterAttributeConfig(\n name='bigquery-dataset',\n help_text='The BigQuery dataset ID.')),\n 'The BigQuery table to write query output to.',\n prefixes=False,\n group=output_group),\n presentation_specs.ResourcePresentationSpec(\n '--pubsub-topic',\n concepts.ResourceSpec(\n 'pubsub.projects.topics',\n resource_name='Pub/Sub topic',\n topicsId=concepts.ResourceParameterAttributeConfig(\n name='pubsub-topic', help_text='The Pub/Sub topic ID.'),\n projectsId=concepts.ResourceParameterAttributeConfig(\n name='pubsub-project',\n help_text='The Pub/Sub project ID.')),\n 'The Cloud Pub/Sub topic to write query output to.',\n prefixes=False,\n group=output_group),\n ]).AddToParser(parser)\n\n parser.add_argument(\n '--bigquery-write-disposition',\n help='The behavior of the BigQuery write operation.',\n choices=['write-empty', 'write-truncate', 'write-append'],\n default='write-empty')\n\n parser.add_argument(\n '--pubsub-create-disposition',\n help='The behavior of the Pub/Sub create operation.',\n choices=['create-if-not-found', 'fail-if-not-found'],\n default='create-if-not-found')\n\n parameter_group = parser.add_mutually_exclusive_group()\n\n parameter_group.add_argument(\n '--parameter',\n action='append',\n help='Parameters to pass to a query. Parameters must use the format '\n 'name:type:value, for example min_word_count:INT64:250.')\n\n parameter_group.add_argument(\n '--parameters-file',\n help='Path to a file containing query parameters in JSON format.'\n ' e.g. [{\"parameterType\": {\"type\": \"STRING\"}, \"parameterValue\":'\n ' {\"value\": \"foo\"}, \"name\": \"x\"}, {\"parameterType\": {\"type\":'\n ' \"FLOAT64\"}, \"parameterValue\": {\"value\": \"1.0\"}, \"name\": \"y\"}]')\n\n parser.add_argument(\n '--dry-run',\n action='store_true',\n help='Construct but do not run the SQL pipeline, for smoke testing.')\n\n parser.add_argument(\n '--sql-launcher-template-engine',\n hidden=True,\n help='The template engine to use for the SQL launcher template.',\n choices=['flex', 'dynamic'],\n default='flex')\n\n parser.add_argument(\n '--sql-launcher-template',\n hidden=True,\n help='The full GCS path to a SQL launcher template spec, e.g. '\n 'gs://dataflow-sql-templates-us-west1/cloud_dataflow_sql_launcher_template_20201208_RC00/sql_launcher_flex_template. '\n 'If None is specified, default to the latest release in the region. '\n 'Note that older releases are not guaranteed to be compatible.')", "def run_job(\n self, name: str, command: str, afterok: list = None,afternotok: list = None, dry_run: bool = False,\n ) -> int:\n LOG.info(\"Submitting commands %s\", command)\n if afterok:\n LOG.info(\n \"Adding dependencies: %s\", \",\".join([str(dep) for dep in afterok])\n )\n jobid = 1\n if not dry_run:\n jobid = self._jobid\n LOG.info(\"Submitted job %s with job id: %s\", name, jobid)\n return jobid", "def question_new_search():", "def _run_express_job(self, class_name, options=\"\"):\n cmd = \"source {bento_home}/bin/kiji-env.sh; express job {jar} {myclass} --kiji {kiji_uri}\"\n cmd = cmd.format(\n bento_home=self.bento_home,\n jar=os.path.join(self.movie_advisor_home, self.express_jar),\n myclass=class_name,\n kiji_uri=self.kiji_uri,\n ) + \" \" + options\n print(run(cmd))", "def navigate_search_results(self):\n driver = self.driver\n search_results_exhausted = False\n results_page = self.results_page\n delay = 60\n date = get_date_time()\n # css elements to view job pages\n list_element_tag = '/descendant::a[@class=\"job-title-link\"]['\n print_num_search_results(driver, self.keyword, self.location)\n # go to a specific results page number if one is specified\n go_to_specific_results_page(driver, delay, results_page)\n results_page = results_page if results_page > 1 else 1\n\n while not search_results_exhausted:\n for i in range(1,26): # 25 results per page\n # define the css selector for the blue 'View' button for job i\n job_selector = list_element_tag + str(i) + ']'\n if search_suggestion_box_is_present(driver, \n job_selector, i, results_page):\n continue\n # wait for the selector for the next job posting to load.\n # if on last results page, then throw exception as job_selector \n # will not be detected on the page\n if not link_is_present(driver, delay, \n job_selector, i, results_page):\n continue\n robust_wait_for_clickable_element(driver, delay, job_selector)\n extract_transform_load(driver,\n delay,\n job_selector,\n date,\n self.keyword,\n self.location,\n self.filename)\n # attempt to navigate to the next page of search results\n # if the link is not present, then the search results have been \n # exhausted\n try:\n next_results_page(driver, delay)\n print(\"\\n**************************************************\")\n print(\"\\n\\n\\nNavigating to results page {}\" \\\n \"\\n\\n\\n\".format(results_page + 1))\n except ValueError:\n search_results_exhausted = True\n print(\"**************************************************\")\n print(\"\\n\\n\\n\\n\\nSearch results exhausted\\n\\n\\n\\n\\n\")\n else:\n results_page += 1", "def __generate_search_query(self) -> None:\n if self.query_accuracy < 100:\n if self.title is not None and self.title != '' and self.artist is not None and self.artist != '':\n # Use the title and the artist name to find more information about the song.\n query: str = self.title + ' ' + self.artist\n query = re.sub(self.__get_filter_regex(), '', query)\n self.query = query\n # Remove unnecessary information in order to get a simpler query version.\n self.minimal_query = re.sub(r'\\([\\s\\S]+\\)', '', query).strip()\n self.query_accuracy = 100\n return\n if self.query_accuracy < 50:\n # No title nor artist name available, use the filename as search query.\n filename: str = os.path.basename(self.original_path)\n filename = os.path.splitext(filename)[0]\n query: str = filename.lower()\n query = re.sub(self.__get_filter_regex(), '', query)\n query = query.replace('_', ' ')\n query = query.strip()\n self.query = query\n self.minimal_query = re.sub(r'\\([\\s\\S]+\\)', '', query).strip()\n self.query_accuracy = 50", "def prep_jid(nocache): # pylint: disable=unused-argument\n #return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid()\n return salt.utils.gen_jid()", "def run():\n print(\"\\n************************************** PARAMERTERS **************************************\\n\")\n print(f'TARGET_GROUP: {PARAM.TARGET_GROUP}\\n')\n print(f'ACQ_FILE: {PARAM.ACQ_FILE}\\n')\n print(f'FINAL_DATA_DIR: {PARAM.FINAL_DATA_DIR}\\n')\n print(f'FAULTY_EMPLOYEES_DIR: {PARAM.FAULTY_EMPLOYEES_DIR}\\n')\n print(f'NONE_MATCHED_DIR: {PARAM.NONE_MATCHED_DIR}\\n')\n print('*****************************************************************************************\\n')\n\n jti = JobTransitionInspector(PARAM.ACQ_FILE)\n jti.exec()", "def run(self):\n\n seq_id = self.seq_id\n seq_str = self.seq_str\n outdir = self.outdir\n seq_count = self.seq_count\n\n log = logging.getLogger('process_sequence_{}'.format(seq_count))\n fh = logging.FileHandler(os.path.join(outdir, 'process.log'))\n fh.setLevel(logging.INFO)\n formatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n fh.setFormatter(formatter)\n log.addHandler(fh)\n\n log.info(\"SEQUENCE %s: %s (%s residues)\",\n seq_count, seq_id, len(seq_str))\n\n char_width = 80\n seq_lines = [seq_str[i:i+char_width]\n for i in range(0, len(seq_str), char_width)]\n for seq_line in seq_lines:\n log.info(\"%s\", seq_line)\n\n log_br(log)\n log.info(\"Searching for template structures ... \")\n\n api1submit = models.SubmitSelectTemplate(\n query_id=seq_id, query_sequence=seq_str)\n\n api1 = managers.CathSelectTemplateManager(\n base_url=self.api1_base,\n submit_data=api1submit,\n api_user=self.api1_user,\n api_password=self.api1_password,\n logger=log,\n )\n\n api1.run()\n\n task_uuid = api1.task_uuid\n\n log_br(log)\n\n # TODO: abstract the following chunk of hard coded URLs\n # to clients / managers / swagger? ...\n\n # swagger_app, swagger_client = api1.api_client.get_swagger()\n # hit_operation_id = 'select-template_hits_read'\n \n # TODO: this is nasty\n # req, resp = swagger_app.op[hit_operation_id](\n # uuid=api1.task_uuid)\n # req.produce('application/json')\n # hits = swagger_client.request((req, resp)).data\n\n api1_base = self.api1_base\n headers = api1.api_client.headers\n\n log.info(\"Getting hit info ...\")\n hits_url = '{api1_base}/api/select-template/{task_uuid}/hits'.format(\n api1_base=api1_base, task_uuid=task_uuid)\n log.info(\"GET %s\", hits_url)\n resp = requests.get(hits_url, headers=headers)\n resp.raise_for_status()\n hits = resp.json()\n log.info(\" ... retrieved %s hits\", len(hits))\n log_br(log)\n\n # hits = managers.GetSelectTemplateHits(task_uuid=api1.task_uuid)\n # hits = api1.funfam_scan_hits()\n\n for hit_count, hit in enumerate(hits, 1):\n\n log_hr(log)\n\n log.info(\"SEQUENCE %s, HIT %s [%s]: FunFam '%s': %s\",\n seq_count, hit_count, hit['query_range'], hit['ff_id'], hit['ff_name'])\n\n log.info(\"Getting template alignments ...\")\n aln_url = '{api1_base}/api/select-template/hit/{hit_uuid}/alignments'.format(\n api1_base=api1_base, hit_uuid=hit['uuid'])\n log.info(\"GET %s\", aln_url)\n resp = requests.get(aln_url, headers=headers)\n resp.raise_for_status()\n alns = resp.json()\n log.info(\" ... retrieved %s template alignments\", len(alns))\n log_br(log)\n\n if not alns:\n log.warning(\"Found no valid template alignments from hit '%s'. \" + \\\n \"This is probably due to a lack of non-discontinuous CATH domains \" + \\\n \"in the matching FunFam (skipping modelling step).\", hit['ff_id'])\n continue\n\n log_prefix = 'HIT{}'.format(hit_count)\n aln = alns[0]\n\n log.info(\"%s: Modelling region against template %s, %s (offset %s) ... \",\n log_prefix, aln['pdb_id'], aln['auth_asym_id'], aln['template_seqres_offset'])\n\n log.info(\"%10s %8s: %s\", 'QUERY',\n hit['query_range'],\n aln['target_sequence'], )\n log.info(\"%10s %8s: %s\", '{}, {}'.format(aln['pdb_id'], aln['auth_asym_id']),\n aln['template_seqres_offset'],\n aln['template_sequence'])\n log_br(log)\n\n api2submit = models.SubmitAlignment(\n target_sequence=aln['target_sequence'],\n template_sequence=aln['template_sequence'],\n template_seqres_offset=aln['template_seqres_offset'],\n pdb_id=aln['pdb_id'],\n auth_asym_id=aln['auth_asym_id'],\n )\n\n pdb_out_id = re.sub(r'[\\W]+', '', seq_id)\n\n api2 = managers.SMAlignmentManager(\n base_url=self.api2_base,\n submit_data=api2submit,\n outfile=os.path.join(outdir, \"{}.pdb\".format(pdb_out_id)),\n api_user=self.api2_user,\n api_password=self.api2_password,\n logger=log,\n )\n api2.run()\n log_br(log)", "def _parse_table_to_madx_sequence_part(name: str, length: float, df: pd.DataFrame) -> str:\n # start the sequence definition\n text = \"{}: SEQUENCE, L={};\\n\".format(name, length)\n\n # loop over the table rows\n for _, row in df.iterrows():\n line = \"{:11}, at = {:12.6f};\\n\".format(row[\"name\"], row[\"at\"])\n text += line\n\n # close the sequence definition\n text += \"ENDSEQUENCE;\"\n\n return text", "def scrape_delta_jobs(args):\n today = date.today()\n today = today.strftime(\"%m/%d/%Y\")\n kwargs = parse_args(args)\n\n if kwargs is None:\n return\n\n browser = webdriver.Firefox()\n browser.get(base_url)\n actionChains = ActionChains(browser)\n try:\n link = browser.find_element_by_link_text(\"Search All Jobs\")\n actionChains.move_to_element(link).click(link).perform()\n time.sleep(3)\n html = browser.page_source\n soup = BeautifulSoup(html)\n table = soup.find('table')\n jobs_link = table.findAll('a')[-1]['href']\n browser.execute_script(jobs_link)\n time.sleep(3)\n html = browser.page_source\n soup = BeautifulSoup(html)\n table = soup.find('table')\n rows = table.findAll('tr')\n\n jobs = {}\n titles = []\n for row in rows:\n try:\n location = row.find('td', {'class': 'column-3'}).text\n # modify this line to adjust location\n if 'GA-Atlanta-ATG' in location:\n date_posted = row.find('td', {'class': 'column-5'}).text\n department = row.find('td', {'class': 'column-4'}).text\n title = row.find('td', {'class': 'column-1'})\n link = title.find('a')['href']\n if today in date_posted:\n jobs[title.text] = {\n 'date_posted': date_posted,\n 'department': department,\n }\n titles.append(title.text)\n except:\n pass\n\n formatted_titles = []\n for title in titles:\n formatted_titles.append(title.rstrip().encode('utf-8'))\n\n sentence = \"\"\n for title in formatted_titles:\n sentence = sentence + \\\n \" New Job Posting for Postion: {} \\n\".format(title)\n sender = kwargs['sender']\n receivers = kwargs['recipient']\n\n message = \"\"\"From: {}\n To: {}\n Subject: New Delta Jobs for {}\n\n {}\n\n {}\n \"\"\".format(sender, receivers, today, base_url, sentence)\n if len(sentence) > 0:\n server = smtplib.SMTP('smtp.gmail.com:587')\n server.ehlo()\n server.starttls()\n server.login(sender, kwargs['password'])\n server.sendmail(sender, receivers, message)\n browser.close()\n except:\n browser.close()", "def jobid(self):\n return self.get_db('jobid')", "def handle_input():\n\n command = None\n\n while command != \"quit\":\n input_string = raw_input(\"HBA Database> \")\n tokens = input_string.split()\n command = tokens[0]\n args = tokens[1:]\n\n if command == \"student\":\n github = args[0]\n get_student_by_github(github)\n\n elif command == \"new_student\":\n first_name, last_name, github = args # unpack!\n make_new_student(first_name, last_name, github)\n\n elif command == \"project\":\n title = args[0] # unpack!\n get_project_by_title(title)\n\n elif command == \"grade\":\n github = args[0]\n title = args[1]\n get_grade_by_github_title(github, title)\n\n elif command == \"update_grade\":\n github, title, grade = args\n assign_grade(github, title, grade)\n\n elif command == \"new_project\":\n title = args[0]\n description = \" \".join(args[1:-1])\n max_grade = int(args[-1])\n add_new_project(title, description, max_grade)\n elif command == \"get_all_grades\":\n first_name = args[0]\n last_name = args[1]\n get_all_grades(first_name, last_name)", "def biginputterm_imp (self,lastup,stackobject=None,series_enter=EMPTYCHAR):\r\n\r\n\r\n rawbig = ''\r\n def add_mark (index):\r\n\r\n if str(index) in self.default_dict['marked']:\r\n return POUND\r\n return EMPTYCHAR\r\n\r\n while True:\r\n\r\n if command_stack.size() == 0:\r\n temp_insert = EMPTYCHAR\r\n\r\n if self.project:\r\n temp_insert = SLASH\r\n## print('<<'+nformat.format_keys(self.default_dict['defaultkeys'])+'>>')\r\n self.tutor.show('INITIATE')\r\n if series_enter:\r\n self.tutor.show('CONESCAPE')\r\n\r\n manyinputterm = input(self.using_shelf*'*'+self.using_database*'DB'+notebookname\r\n +temp_insert\r\n +UNDERLINE.join(self.project)\r\n +COLON+index_reduce(str(lastup))\r\n +BLANK+add_mark(lastup)+\r\n self.parent\r\n +BLANK+{EMPTYCHAR:EMPTYCHAR,\r\n PLUS:'[+]',\r\n PLUS+PLUS:'[++]',\r\n PLUS+PLUS+PLUS:'[+++]'}[series_enter]+BLANK)\r\n rawbig = manyinputterm\r\n if self.use_alphabets:\r\n\r\n manyinputterm = self.alphabet_manager.interpret(manyinputterm)\r\n\r\n if self.apply_abr_inp:\r\n manyinputterm = self.default_dict['abbreviations'].undo(manyinputterm)\r\n manyinputterm = self.default_dict['commands'].do(manyinputterm, lchar=EMPTYCHAR)\r\n print('<'+manyinputterm+'>')\r\n if STAR + STAR in manyinputterm and manyinputterm.split(STAR+STAR)[1].isnumeric():\r\n manyinputterm, mult_temp = manyinputterm.split(STAR+STAR)[0], int(manyinputterm.split(STAR+STAR)[1])\r\n manyinputterm = ((manyinputterm + SLASH + SLASH) * mult_temp)[:-2]\r\n\r\n if manyinputterm[:1] == ATSIGN: #for a macro\r\n\r\n manyinputterm = manyinputterm[1:]\r\n\r\n firstindex = str(self.iterator.first())\r\n lastindex = str(self.iterator.last())\r\n backupname = self.filename + str(datetime.datetime.now()).split(BLANK)[0]\r\n\r\n questionlist = extract.extract(manyinputterm,LEFTBRACKET,RIGHTBRACKET)\r\n asked = set()\r\n for question in questionlist:\r\n if question not in asked:\r\n answer = input(question)\r\n asked.add(question)\r\n\r\n manyinputterm = manyinputterm.replace(LEFTBRACKET+question+RIGHTBRACKET,answer)\r\n manyinputterm = manyinputterm.replace('FIRST',\r\n firstindex)\r\n manyinputterm = manyinputterm.replace('LAST',\r\n lastindex)\r\n manyinputterm = manyinputterm.replace('FILE',\r\n self.filename)\r\n manyinputterm = manyinputterm.replace('BACKUP',\r\n backupname)\r\n manyinputterm = manyinputterm.replace('NOW',\r\n POUND+str(datetime.datetime.now()).split(BLANK)[0])\r\n\r\n\r\n\r\n manyinputterm = manyinputterm.split(SLASH+SLASH) ## split into commands\r\n\r\n rootcommand = EMPTYCHAR\r\n afterroot = EMPTYCHAR\r\n filledinputlist = []\r\n for t_temp in manyinputterm:\r\n if COLON in t_temp:\r\n if t_temp.split(COLON)[0]:\r\n rootcommand = t_temp.split(COLON)[0]\r\n if t_temp.split(COLON)[1]:\r\n afterroot = COLON.join(t_temp.split(COLON)[1:])\r\n t_temp = rootcommand + COLON + afterroot\r\n\r\n filledinputlist += [t_temp]\r\n\r\n\r\n\r\n for t_temp in reversed(filledinputlist):\r\n\r\n command_stack.add(t_temp)\r\n biginputterm = command_stack.pop()\r\n if biginputterm == False:\r\n biginputterm = EMPTYCHAR\r\n\r\n if biginputterm in [LEFTBRACKET]:\r\n\r\n self.default_dict['marked'].add(str(lastup))\r\n elif biginputterm in [RIGHTBRACKET]:\r\n self.default_dict['marked'].discard(str(lastup))\r\n\r\n else:\r\n break\r\n\r\n continuelooping = True\r\n close_notebook = False\r\n\r\n # to retrieve search result\r\n\r\n for rep_temp in range(0,biginputterm.count('{{')):\r\n if '{{' in biginputterm and '}}' in biginputterm:\r\n\r\n for x_temp in range(0,biginputterm.count('{{')):\r\n\r\n\r\n n_temp = biginputterm.split('{{')[1].split('}}')[0]\r\n\r\n\r\n\r\n\r\n if n_temp and n_temp[0] == POUND:\r\n\r\n n_temp = n_temp[1:]\r\n\r\n temp_list = eval(LEFTBRACKET + n_temp + RIGHTBRACKET)\r\n temp_list = [str(x_temp) for x_temp in temp_list]\r\n temp_listterm = ','.join(temp_list)\r\n\r\n if temp_listterm:\r\n biginputterm = biginputterm.replace('{{'+POUND+n_temp+'}}',temp_listterm)\r\n\r\n elif n_temp.isnumeric():\r\n biginputterm = biginputterm.replace('{{'+n_temp+'}}',\r\n rangelist.range_find([Index(a_temp)\r\n for a_temp\r\n in self.searchlog[-(int(n_temp.strip()))][1]\r\n if a_temp!=0]).replace(LONGDASH,SLASH))\r\n elif n_temp.isupper() and n_temp in self.variables:\r\n\r\n biginputterm = biginputterm.replace('{{'+n_temp+'}}',self.variables[n_temp])\r\n## rangelist.range_find([Index(a_temp)\r\n## for a_temp\r\n## in self.variables[n_temp]]).replace(LONGDASH,SLASH))\r\n\r\n elif n_temp and n_temp[0] == ATSIGN:\r\n n_temp = n_temp[1:]\r\n try:\r\n textfile = file_access.get_text_file(n_temp)\r\n biginputterm = biginputterm.replace('{{'+ATSIGN+n_temp+'}}',textfile)\r\n except:\r\n display.noteprint((alerts.ATTENTION,labels.FILE_ERROR))\r\n\r\n\r\n\r\n\r\n # to send result to next command\r\n if '=>' in biginputterm:\r\n\r\n\r\n self.next_term = '=>'.join(biginputterm.split('=>')[1:])\r\n print(self.next_term)\r\n biginputterm = biginputterm.split('=>')[0]\r\n self.last_term = biginputterm\r\n\r\n # to retieve last index\r\n if '[/]' in biginputterm:\r\n biginputterm = biginputterm.replace('[/]',\r\n str(lastup))\r\n # to retrieve marked results\r\n if '[?]' in biginputterm:\r\n biginputterm = biginputterm.replace('[?]',\r\n rangelist.range_find([Index(a_temp)\r\n for a_temp\r\n in self.default_dict['marked']\r\n if a_temp in str(a_temp)\r\n in self.indexes()]).replace(LONGDASH,SLASH))\r\n if '[*]' in biginputterm:\r\n biginputterm = biginputterm.replace('[*]',\r\n rangelist.range_find([a_temp\r\n for a_temp\r\n in self.default_dict['flipbook']]).replace(LONGDASH,SLASH))\r\n\r\n if '[%' in biginputterm and ']' in biginputterm and '[%]' not in biginputterm:\r\n projectname = biginputterm.split('[%')[1].split(']')[0]\r\n print(projectname)\r\n if projectname in self.default_dict['projects'].get_all_projects():\r\n biginputterm = biginputterm.replace('[%'+projectname+']', rangelist.range_find([a_temp\r\n for a_temp\r\n in transpose_keys(self.default_dict['projects'].\r\n get_all_indexes(project=projectname),\r\n surround=False,\r\n notebook=notebook).replace(LONGDASH,SLASH)]))\r\n\r\n\r\n return biginputterm,continuelooping,close_notebook,rawbig", "def resolve_step_name(job_definition: dict[str, Any], start_line: int, end_line: int) -> str:\n if not job_definition:\n return \"\"\n for idx, step in enumerate([step for step in job_definition.get('steps') or [] if step]):\n if isinstance(step, str):\n return f\"[{idx + 1}]({step})\"\n elif isinstance(step, dict):\n if step[START_LINE] <= start_line <= end_line <= step[END_LINE]:\n name = step.get('name')\n return f\"[{idx + 1}]({name})\" if name else f\"[{idx + 1}]\"\n return \"\"", "def generateTaskName(self):\n brokenComponent = ['head','hand','leg','body','hand','leg']\n for component in brokenComponent:\n self.enqueue(Task(component))" ]
[ "0.57784486", "0.5282127", "0.51783377", "0.5130702", "0.512259", "0.50177485", "0.49613672", "0.49327144", "0.48891747", "0.48707858", "0.4861328", "0.4850734", "0.48490104", "0.48305196", "0.48305196", "0.47956473", "0.4788198", "0.47836232", "0.47815114", "0.4765019", "0.47635615", "0.475893", "0.47450316", "0.473916", "0.47357854", "0.4677404", "0.4674963", "0.46550408", "0.4647681", "0.4638643", "0.46259668", "0.46106437", "0.46104336", "0.4597499", "0.4591411", "0.45908213", "0.45885068", "0.4586711", "0.45859987", "0.45850533", "0.45806372", "0.45753402", "0.45704097", "0.4561474", "0.45400155", "0.45310876", "0.45128018", "0.4510477", "0.45053783", "0.45002562", "0.44941765", "0.4486789", "0.44836324", "0.44835845", "0.44834983", "0.44806677", "0.44774038", "0.44728127", "0.4471642", "0.4470437", "0.44701314", "0.44612682", "0.44612017", "0.44589216", "0.4457989", "0.44568586", "0.44520855", "0.44505095", "0.44491714", "0.44478706", "0.44437498", "0.4438444", "0.4436349", "0.44291568", "0.44288465", "0.44269562", "0.44269478", "0.4426227", "0.44214028", "0.44208544", "0.44143823", "0.44096488", "0.44084787", "0.4399222", "0.43988374", "0.43923318", "0.4388253", "0.43880674", "0.43869078", "0.4384496", "0.43816775", "0.4378176", "0.43769854", "0.43727106", "0.4372301", "0.4370059", "0.43650118", "0.4362949", "0.4360499", "0.4357061" ]
0.44272318
75
\'w\' means Way > str r will output as float betwenn rangestart and rangeend ri will output as int betwenn rangestart and rangeend
def r(w,rangestart,rangeend): if w == 'r': print(random.random(rangestart , rangeend)) if w == 'ri': print(random.randint(rangestart,rangeend))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_range(self):\r\n\t\tif self.battery_size == 70:\r\n\t\t\trange = 240\r\n\t\telif self.battery_size == 85:\r\n\t\t\trange = 270\r\n\t\t\t\r\n\t\tmessage = \"This car can go approx. \" + str(range)\r\n\t\tmessage += \" miles on a full charge.\"\r\n\t\tprint(message)", "def get_range(self):\n if self.battery_size == 70:\n range = 240\n elif self.battery_size == 85:\n range = 270\n\n message = \"this car can go approximately \"+ str(range)\n message += \" miles on a full charge.\"\n print(message)", "def _convert_range_boundary(boundary, test_value):\n if _is_string(boundary):\n if boundary.lower() == 'min':\n boundary = test_value - 1\n elif boundary.lower() == 'max':\n boundary = test_value + 1\n else:\n raise error.CommandDescriptionError('Invalid range boundary constant; must be \"min\", \"max\" or integer value')\n \n return boundary", "def get_range(self):\n if self.battery_size == 70:\n r = 240\n elif self.battery_size == 85:\n r = 270\n\n message = \"This car can go approximately \" + str(r)\n message += \" miles on a full charge.\"\n print(message)", "def get_range(self, range, last):\n found = re.match('\\s*([+-]?)([0-9]+)(%?)(\\s*,\\s*([+-]?)([0-9]+)(%?))?\\s*', str(range))\n if not found:\n raise ConfigError('Failed to parse range \"%s\" in \"__size\"!' % str(range))\n (s1, n1, p1, v2, s2, n2, p2) = found.groups()\n if v2:\n d1 = last * int(n1) / 100.0 if p1 else int(n1)\n d2 = last * int(n2) / 100.0 if p2 else int(n2)\n m = last + d1 if s1 == '+' else last - d1 if s1 == '-' or p1 else d1\n M = last - d2 if s2 == '-' else last + d2 if s2 == '+' or p2 else d2\n else:\n d = last * int(n1) / 100.0 if p1 else int(n1)\n m = last - d\n M = last + d\n return m, M", "def get_range(self):\n if self.battery_size == 70:\n range = 240\n elif self.battery_size == 85:\n range = 270\n message = \"This car can go approximately \" + str(range)\n message += \" miles on a full charge.\"\n print(message)", "def get_range(self):\n if self.battery_size == 70:\n range = 240\n elif self.battery_size == 85:\n range = 270\n \n message = \"This car can go approximately \" + str(range)\n message += \" miles on a full charge.\"\n print(message)", "def wl_to_str(w: float) -> str:\n return f\"{int(np.floor(w*100))}\"", "def between(minl:int, maxl:int) -> str:\n return f\"{{{minl},{maxl}}}\"", "def get_range(self):\n if self.battery_size == 75:\n range = 260\n elif self.battery_size == 100:\n range = 315\n\n print(f\"This car can go about {range} miles on a full charge\")", "def sight_range(self) -> Union[int, float]:\n return self.type_data.proto.sight_range", "def sight_range(self) -> Union[int, float]:\n return self.type_data.proto.sight_range", "def get_range(self):\n if self.battery_size == 75:\n range = 260\n elif self.battery_size == 100:\n range = 315\n\n print(f'This car can go about {range} miles on a full charge.')", "def get_range(self):\n if self.battery_size == 75:\n range = 260\n elif self.battery_size == 100:\n range = 315\n\n print(f\"This car can go about {range} miles on a full charge.\")", "def get_range(self):\n if self.battery_size == 75:\n range = 260\n elif self.battery_size == 100:\n range = 315\n\n print(f\"This car can go to about {range} miles on a full charge.\")", "def get_range(self):\n if self.battery_size == 40:\n range = 150\n elif self.battery_size == 65:\n range = 225\n print(f\"This car can go about {range} miles on a full charge.\")", "def get_range(self):\n if self.battery_size == 75:\n range = 260\n elif self.battery_size == 100:\n range = 315\n \n print(f\"This car can go about {range} miles on a full charge.\")", "def find_boundaries(s, w):\n ind = w.i\n # handling height\n if ind + 2 < len(s) and s[ind + 1].text == \"'\" and s[ind + 2].like_num:\n return ind, ind + 3\n if ind - 2 >= 0 and s[ind - 1].text == \"'\" and s[ind - 2].like_num:\n return ind - 2, ind + 1\n\n # forward\n if s[ind].ent_iob == 2:\n return ind, ind + 1\n if ind != len(s) - 1:\n i = ind + 1\n while s[i].ent_iob == 1 and (s[i].pos_ == 'NUM' or s[i].like_num or\n (i+1 < len(s) and (s[i+1].pos_ == 'NUM' or s[i+1].like_num))):\n i += 1\n if i == len(s):\n break\n if s[i - 1].pos_ == 'NUM' or s[i - 1].like_num or s[i - 1].lemma_ in ['one']:\n end = i\n else:\n end = i - 1\n else:\n end = ind + 1\n\n # backward\n if s[ind].ent_iob == 3:\n return ind, end\n i = ind - 1\n while s[i].ent_iob != 2 and (s[i].pos_ == 'NUM' or s[i].like_num or s[i-1].pos_ == 'NUM' or s[i-1].like_num):\n i -= 1\n if i == -1:\n break\n i += 1\n if s[i].pos_ != 'NUM' and not s[i].like_num:\n i += 1\n return i, end", "def convertToWindDirection(wb):\n if wb >= 0 and wb < 11.25:\n return \"N\"\n elif wb >= 11.25 and wb < 33.75:\n return \"NNE\"\n elif wb >= 33.75 and wb < 56.25:\n return \"NE\"\n elif wb >= 56.25 and wb < 78.75:\n return \"ENE\"\n elif wb >= 78.75 and wb < 101.25:\n return \"E\"\n elif wb >= 101.25 and wb < 123.75:\n return \"ESE\"\n elif wb >= 123.75 and wb < 146.25:\n return \"SE\"\n elif wb >= 146.25 and wb < 168.75:\n return \"SSE\"\n elif wb >= 168.75 and wb < 191.25:\n return \"S\"\n elif wb >= 191.25 and wb < 213.75:\n return \"SSW\"\n elif wb >= 213.75 and wb < 236.25:\n return \"SW\"\n elif wb >= 236.25 and wb < 258.75:\n return \"WSW\"\n elif wb >= 258.75 and wb < 281.25:\n return \"W\"\n elif wb >= 281.25 and wb < 303.75:\n return \"WNW\"\n elif wb >= 303.75 and wb < 326.25:\n return \"NW\"\n elif wb >= 326.25 and wb < 348.75:\n return \"NNW\"\n elif wb >= 348.75 and wb < 360:\n return \"N\"\n else:\n return \"NA\"", "def interval(entry):\n string = entry.lower()\n number1 = ''\n number2 = ''\n second = False\n for c in entry:\n if c.isdigit() or c == '.':\n if second == False:\n number1 += c\n else:\n number2 += c\n elif not c.isdigit() and len(number1) > 0:\n second = True\n if number1 and number2:\n leftover = re.sub(number1, '', string).strip().replace(\" \", \"\")\n leftover = re.sub(number2, '', leftover).strip().replace(\" \", \"\")\n numbers = (number1, number2)\n if leftover:\n op = leftover\n else:\n op = None\n\n if op:\n if op == '--':\n ops = (OPERATORS['>='], OPERATORS['<='])\n else:\n ops = (OPERATORS['>'], OPERATORS['<'])\n return ops, numbers\n return (None, None), (None, None)", "def ParseRange(s):\n t = [int(x) for x in s.split('-')]\n return 1.0 * sum(t) / len(t)", "def get_display_trange(self) -> float:\n return float(self.query(':timebase:range?'))", "def redshift_range_type(s):\n try:\n return tuple(map(float, s.split(',')))\n except:\n raise TypeError(\"redshift range must be zmin,zmax\")", "def _translate_range(self, len_, start, end):\n start = int(start)\n end = int(end)\n if start < 0:\n start += len_\n start = max(0, min(start, len_))\n if end < 0:\n end += len_\n end = max(-1, min(end, len_ - 1))\n return start, end", "def sanitizeFloatFromKeyboard(s,range_start=0,range_end=0):\n try:\n\tx = float(s)\n except ValueError:\n\terr = 1\n\treturn err,0\n\n if (x >= range_start) and (x <= range_end):\n\terr = 0\n return err,x\n else:\n\terr = 1\n\treturn err,x", "def check_range(number: object, min_r: float, max_r: float, name: str = \"\") -> float:\n if not isinstance(number, (float, int)):\n raise FFmpegNormalizeError(f\"{name} must be an int or float\")\n if number < min_r or number > max_r:\n raise FFmpegNormalizeError(f\"{name} must be within [{min_r},{max_r}]\")\n return number", "def define_range():\n\n def_range = {'lt': [0.0, 24.0],\n 'lon': [0.0, 360.0],\n 'angle': [0.0, 2.0 * np.pi]}\n\n return def_range", "def range(self):\n\n return time_stat(self, stat=\"range\")", "def _get_sight_range(self):\n raise NotImplementedError", "def converter(letter, start_value, end_value):\n if letter == \"F\":\n return start_value, (start_value + end_value - 1)/2\n elif letter == \"B\":\n return (start_value + end_value + 1)/2, end_value", "def compare_with_bounds(first, second):\n result = compare_sentences(first, second)\n if result <= LOWER_BOUND or result >= UPPER_BOUND:\n result = 0\n return result", "def getSliderRange(*args):\n\n #get timeslider range start\n startF = cmds.playbackOptions(query=True, min=True)\n endF = cmds.playbackOptions(query=True, max=True)\n return(startF, endF)", "def learning_rate_range():\n # Lower and upper bounds\n #######\n lower_bound = 0.1 \n upper_bound = 1e-6\n #######\n return lower_bound, upper_bound", "def ind_pos(position, ind, current_geno, chr_starts, chr_ends):\n ind_starts = chr_starts[ind]\n ind_ends = chr_ends[ind]\n #print [position, ind, current_geno, ind_starts, ind_ends]\n in_interval = False\n for interval in range(len(ind_starts)):\n if position > int(ind_starts[interval]) and position < int(ind_ends[interval]):\n in_interval = True\n break\n if in_interval:\n return(current_geno)\n else:\n return(\"./.\")", "def range_params(self, ran, kw):\n specs = {\"range\": (SchemaNode(\"value\"),\n SchemaNode(\"param\").set_attr(\"name\",\"minInclusive\"),\n SchemaNode(\"param\").set_attr(\"name\",\"maxInclusive\")),\n \"length\": (SchemaNode(\"param\").set_attr(\"name\",\"length\"),\n SchemaNode(\"param\").set_attr(\"name\",\"minLength\"),\n SchemaNode(\"param\").set_attr(\"name\",\"maxLength\"))}\n (exact, min_, max_) = specs[kw]\n if (len(ran) == 1 or ran[0] == ran[1]) and ran[0][0] != \"m\":\n elem = exact\n elem.text = ran[0]\n return [elem]\n res = []\n if ran[0][0] != \"m\":\n elem = min_\n elem.text = ran[0]\n res.append(elem)\n if ran[1][0] != \"m\":\n elem = max_\n elem.text = ran[1]\n res.append(elem)\n return res", "def range(self) -> ty.Tuple[float, float]:\r\n ...", "def high_and_low(numbers: str) -> str:\n return f'{min(map(int, numbers.split()))} {max(map(int, numbers.split()))}'", "def converttolbs( self, wkg ):\n if self.debug == 1:\n print \"wkg\",wkg\n wlb = 0\n if ( wkg > 0 ):\n wlb = wkg * 2.20462\n if self.debug == 1:\n print \"wlb\",wlb\n return wlb\n else:\n return 0", "def get_range(self):\n if self.battery_size == 75:\n car_range = 260\n elif self.battery_size == 100:\n car_range = 315\n \n print(f\"This car can run for {car_range} miles.\")", "def find_range_from_cons_pos(my_pos, gpcr_pdb):\n (ext_range,chain)=gpcr_pdb[my_pos]\n pos_range=str(ext_range)\n #pos_range=ext_range+\"-\"+ext_range\n return pos_range", "def get_strand_state(w, c):\n if (w is None) or (c is None) or (w + c == 0):\n return (0, 0)\n r = w / (w + c)\n if r < 0.2:\n return (0, 2)\n elif r > 0.8:\n return (2, 0)\n else:\n return (1, 1)", "def REC_L_STRAIGHT():\n return 11", "def location_bounds(glimpse_w, input_w):\n offset = float(glimpse_w) / input_w\n lower = (-1 + offset)\n upper = (1 - offset)\n\n assert lower >= -1 and lower <= 1, 'lower must be in (-1,1), is {}'.format(lower)\n assert upper >= -1 and upper <= 1, 'upper must be in (-1,1), is {}'.format(upper)\n\n return lower, upper", "def range_weights(rangemap, rr, pulselength, db=False):\n\n dr = rangemap - rr\n fr = 1. - np.abs(dr) / pulselength\n\n ind0 = fr < 0\n fr[ind0] = 0.\n fr[~ind0] = fr[~ind0] * (rr / rangemap[~ind0]) ** 4\n\n if db:\n fr = 10. * np.log10(fr)\n\n return fr", "def m_to_range(self, data):\n return (data - self._min_range_m) / self._total_range", "def range_validator_advice(validator_args):\n \n a_type, lb, ub, allow_none, error_msg = validator_args\n if lb == None and ub == None:\n return \"\"\n adv_str = 'x'\n if lb != None:\n adv_str = str(lb) + ' <= ' + adv_str\n if ub != None:\n adv_str += ' <= ' + str(ub)\n if allow_none:\n adv_str += ', None'\n return ' {' + adv_str + '}'", "def range_function(num, start_range, end_range):\n if num > start_range and num < end_range:\n print(num, \"is in the range.\\n\")\n elif num < start_range or num > end_range:\n print(num, \"is not in the range.\\n\")", "def in_valid_range(self, string):\n fret_number = self.cursor.get_frets()[string]\n return (\n (self.min_x <= fret_number <= self.max_x) or\n (self.allow_open and fret_number == self.guitar.min_fret)\n )", "def range(self):\n return self._upper - self._lower", "def wl_to_int(w: float) -> int:\n return int(np.floo(w * 100))", "def detect_range(self) -> Union[int, float]:\n return self.proto.detect_range", "def get_bounds():\n return [0.00], [1.00]", "def parse_run_range(self, run_range_str):\r\n\r\n assert isinstance(run_range_str, str)\r\n if not \"-\" in run_range_str:\r\n return None\r\n\r\n # split <>-<>\r\n (str_min, str_max) = run_range_str.split(\"-\")\r\n run_min_set = False\r\n run_max_set = False\r\n\r\n # parse run min\r\n try:\r\n run_min = int(str_min)\r\n run_min_set = True\r\n except ValueError:\r\n run_min = 0\r\n\r\n # parse run max\r\n try:\r\n run_max = int(str_max)\r\n run_max_set = True\r\n except ValueError:\r\n run_max = INFINITE_RUN\r\n\r\n return run_min, run_max, run_min_set, run_max_set", "def count_convert_wavelength_range(self):\n mini = ct.c_float()\n maxi = ct.c_float()\n self.lib.GetCountConvertWavelengthRange(ct.pointer(mini),\n ct.pointer(maxi))\n return (mini.value, maxi.value)", "def test_get_visual_range__scan__character(self, coord, expected):\n mapstr = self.map.get_visual_range(coord, dist=4, mode=\"scan\", character=\"@\")\n self.assertEqual(expected, mapstr.replace(\"||\", \"|\"))", "def get_range(self):\n if self.size == 75:\n return 260\n elif self.size == 100:\n return 315", "def relative_range(self):\n self.calculate_relative_mags()\n string = '{:.0f}-{:.0f}Hz: {:.5f}'\n s_ind = self.get_bin(self.s_freq)\n e_ind = self.get_bin(self.e_freq)\n lst = self.rel_mags[s_ind:e_ind+1]\n return sum(lst)/len(lst)", "def get_range(self) -> tuple[int, int]:\n return self.range_from, self.range_to", "def get_h_score(start, end):\n #uses a heuristic function\n #return 0 #used if you want Djikstras algorithm\n return (abs(end[0]-start[0])+abs(end[1]-start[1])) * 10", "def rangeB(self):\r\n if self._range_B is not None:\r\n return round(self._range_B,2)\r\n else:\r\n return self._range_B", "def sanitizeIntFromKeyboard(s,range_start=0,range_end=0):\n try:\n\tx = int(s)\n except ValueError:\n\terr = 1\n\treturn err,0\n\n if (x >= range_start) and (x <= range_end):\n\terr = 0\n return err,x\n else:\n\terr = 1\n\treturn err,x", "def fix_range(l_left, l_right, over_value=0):\n\n def innner_fix_range(v):\n try:\n return v if l_right > float(v) > l_left else over_value\n except ValueError:\n return over_value\n\n return innner_fix_range", "def within_boundaries(move):\n if move == ord('w') and ZERO_BASE_PLYR_POS in range(0, 10):\n return False\n elif move == ord('s') and ZERO_BASE_PLYR_POS in range(90, 100):\n return False\n elif move == ord('a') and ZERO_BASE_PLYR_POS in range(0, 91, 10):\n return False\n elif move == ord('d') and ZERO_BASE_PLYR_POS in range(9, 100, 10): \n return False\n else:\n return True", "def test_get_range(self):\n pass", "def computerange(lyrindex):\n for i in range(len(lyrindex)):\n if i != len(lyrindex) - 1:\n if lyrindex[i][0].find('.') > 0: # special case where inventory files have two records\n lyrindex[i].append( 'range=%s-%s' %( lyrindex[i][1], int(lyrindex[i+2][1]) - 1) )\n else:\n lyrindex[i].append( 'range=%s-%s' %( lyrindex[i][1], int(lyrindex[i+1][1]) - 1) )\n else:\n lyrindex[-1].append( 'range=%s' % ( lyrindex[-1][1] ) ) \n return lyrindex", "def __str__(self):\n\t\treturn \"{min} ~ {max}\".format(min=str(self.min), max=str(self.max))", "def convert_room_type_input(input_str):\n if input_str[-1] == '+':\n return (float(input_str[:-1]), operator.ge)\n else:\n return (float(input_str), operator.eq)", "def check_range(value, value_type):\n\n if value_type == V_LAT:\n if (value < -90) or (value > 90):\n result = NOT_VALID\n else:\n result = value\n elif value_type == V_LON:\n if value < -180 or value > 180:\n result = NOT_VALID\n else:\n result = value\n return result", "def lower_bound(self) -> float:\n ...", "def in_range(a :int, range :str):\n range = range.strip()\n left, right = range[0], range[-1]\n range = range[1:-1].split(',')\n \n brac = ['[', ']', '(', ')']\n assert left in brac and right in brac and len(range) == 2\n\n range = [int(i) for i in range]\n b, c = range\n\n a %= 1<<__m__\n b %= 1<<__m__\n c %= 1<<__m__\n\n if left == '[' and right == ']':\n return ( a>=b and a<=c ) if b <= c else ( a>=b or a<=c )\n if left == '[' and right == ')': \n return ( a>=b and a<c ) if b <= c else ( a>=b or a<c )\n if left == '(' and right == ')': \n return ( a>b and a<c ) if b <= c else ( a>b or a<c )\n if left == '(' and right == ']': \n return ( a>b and a<=c ) if b <= c else ( a>b or a<=c )", "def time_range(self):\n tr_str = self['Time Range']\n val, unit = tr_str.split(' ')\n val = float(val)\n unit_multiplier = {'ps': 1e-12, 'ns': 1e-9, 'us': '1e-6'}[unit]\n return val*unit_multiplier", "def REC_S_STRAIGHT():\n return 10", "def calculate_sight_range(self):\n if not self.check_map_obstacle_has_sight():\n return 0\n else:\n return self.map_obstacle.sight_range", "def get_port_operator(port_low, port_high):\n\n if ((port_low) and (port_high)):\n return \"range\"\n elif((port_low) or (port_high)):\n return \"eq\"\n else:\n return None", "def _is_range_boundary(boundary):\n return (isinstance(boundary, numbers.Integral) or\n (_is_string(boundary) and (boundary.lower() in ('min','max'))))", "def within_percent_interval(interval_str: str) -> float:\n interval = float(interval_str)\n if interval < 0 or interval > 1:\n raise ArgumentTypeError(\"Input given is out of bounds!\")\n\n return interval", "def get_range(self):\r\n\r\n if self.size == 70:\r\n return 240\r\n elif self.size == 85:\r\n return 270", "def get_range(value):\n\n raw = value\n\n # If we find a '@' at the beginning of the range, we should invert\n # the match.\n\n invert = False\n\n if value.find('@') == 0:\n invert = True\n value = value.lstrip('@')\n\n # The : separates a max/min range. If it exists, there is at least\n # a minimum. We'll start our ranges at zero and infinity so we don't\n # have to worry about complex testing logic.\n\n bottom = 0\n top = float('infinity')\n\n if value.find(':') > 0:\n (bottom, top) = value.split(':')\n if top == '':\n top = float('infinity')\n else:\n top = float(top)\n\n if bottom == '':\n bottom = 0\n elif bottom == '~':\n bottom = -float('infinity')\n else:\n bottom = float(bottom)\n else:\n top = float(value)\n\n return (bottom, top, invert, raw)", "def _parse_run_range(run_range_str):\r\n\r\n if run_range_str is None:\r\n return None, None\r\n\r\n run_range_str = str(run_range_str).strip()\r\n if not run_range_str:\r\n return None, None\r\n\r\n assert isinstance(run_range_str, str)\r\n\r\n # Have run-range?\r\n if '-' in run_range_str:\r\n tokens = [t.strip() for t in run_range_str.split(\"-\")]\r\n try:\r\n run_from = int(tokens[0])\r\n except (ValueError, KeyError):\r\n return None, None\r\n\r\n try:\r\n run_to = int(tokens[1])\r\n except (ValueError, KeyError):\r\n return run_from, None\r\n\r\n return (run_from, run_to) if run_from <= run_to else (run_to, run_from)\r\n\r\n # Have run number?\r\n if run_range_str.isdigit():\r\n return int(run_range_str), None\r\n\r\n # Default return is index\r\n return None, None", "def query_range(tree, start_y, start_x, end_y, end_x):\n res = 0\n start_y -= 1\n\n while end_y > start_y:\n res += bit.query_range(tree[end_y], start_x, end_x)\n end_y -= (end_y & -end_y)\n\n while start_y > end_y:\n res -= bit.query_range(tree[start_y], start_x, end_x)\n start_y -= (start_y & -start_y)\n\n return res", "def simplebounds(cls, val, lower, upper):\n if val < lower:\n val = lower\n if val > upper:\n val = upper\n return val", "def get_bounds():\n lower_bound = 0\n upper_bound = input(\"Please enter a whole number: \")\n domain = [lower_bound, upper_bound]\n return domain", "def getRangeInches(self) -> float:\n ...", "def test_get_visual_range__scan(self, coord, expectstr, expectlst):\n mapstr = self.map.get_visual_range(coord, dist=1, mode=\"scan\", character=None)\n maplst = self.map.get_visual_range(\n coord, dist=1, mode=\"scan\", return_str=False, character=None\n )\n maplst = [[part.replace(\"||\", \"|\") for part in partlst] for partlst in maplst]\n self.assertEqual(expectstr, mapstr.replace(\"||\", \"|\"))\n self.assertEqual(expectlst, maplst[::-1])", "def get_valid_fret_range(history, *, dist_range, guitar):\n min_w = guitar.max_fret\n max_w = guitar.min_fret\n for fret in history:\n min_w = min(min_w, fret)\n max_w = max(max_w, fret)\n min_x = max(max_w - dist_range, guitar.min_fret)\n max_x = min(min_w + dist_range, guitar.max_fret)\n return min_x, max_x", "def expand_range(txt, range_operator='~'):\n if range_operator not in txt:\n return txt\n\n result = []\n index_pattern = r'(\\D*)(\\d+)'\n pair = txt.split(range_operator)\n result.append(pair[0])\n\n # Find start/end points\n match1 = re.search(index_pattern, pair[0])\n match2 = re.search(index_pattern, pair[1])\n start = int(match1.group(2))\n end = int(match2.group(2))\n label = match1.group(1) if match1.group(1) != match1.group(2) else ''\n result.extend([str(label) + str(i) for i in range(start + 1, end + 1)])\n return result", "def get_time_range(vid_folder_string):\n parts = vid_folder_string.split(\"_\")\n tc_start = -1.0\n tc_end = -1.0\n if len(parts) == 3:\n # segment is single frame\n tc_start = parts[2]\n tc_end = parts[2]\n pass\n elif len(parts) == 4:\n # segment is multiframe\n tc_start = parts[2]\n tc_end = parts[3]\n else:\n print(\"Invalid Segment: \" + vid_folder_string)\n return float(tc_start), float(tc_end)", "def convert_range(option, opt_str, value, parser):\n # Preserve the original option string for print output.\n\n parser.values.ensure_value(\"raw_%s\" % option.dest, value)\n\n # Place the max and min into a single entry for each found\n # threshold. This lets a user see all possible ranges passed by\n # the user, and select a set based on specification order.\n\n for part in value.split(','):\n parser.values.ensure_value(option.dest, []).append(get_range(part))", "def Interval(caseAttrib, queryValue, max, min, weight):\n try:\n queryValue = float(queryValue)\n # build query string\n queryFnc = {\n \"script_score\": {\n \"query\": {\n \"exists\": {\n \"field\": caseAttrib\n }\n },\n \"script\": {\n \"params\": {\n \"attrib\": caseAttrib,\n \"queryValue\": queryValue,\n \"max\": max,\n \"min\": min,\n \"weight\": weight\n },\n \"source\": \"(1 - (float)( Math.abs(params.queryValue - doc[params.attrib].value) / ((float)Math.max(params.max,params.queryValue) - (float)Math.min(params.min,params.queryValue)) )) * params.weight\"\n },\n \"_name\": caseAttrib\n }\n }\n return queryFnc\n\n except ValueError:\n print(\"Interval() is only applicable to numbers\")", "def make_sure_between(val, start=None, end=None):\n if start is not None:\n if val < start:\n return start\n if end is not None:\n if val > end:\n return end\n return val", "def _parse_query_range_info(self, startw=None, stopw=None, start=None, stop=None, intt=None):\n try:\n if start > self.duration:\n start = start\n relstart = start - self.start_time\n else:\n relstart = start\n start = self.start_time + start\n\n qstart = int(relstart * self.TICKS_PER_SEC)\n\n if start <= 0 or relstart > self.duration:\n raise TypeError\n\n except TypeError:\n start = self.start_time\n relstart = 0\n qstart = None\n\n if intt is not None:\n stop = relstart + intt\n\n try:\n if stop >= self.duration:\n stop -= self.start_time\n if stop < 0 or stop > self.duration:\n raise TypeError\n relstop = stop\n stop += self.start_time\n qstop = int(relstop * self.TICKS_PER_SEC)\n\n except TypeError:\n stop = self.duration + self.start_time\n relstop = self.duration\n qstop = None\n\n if startw is None:\n qminw = None\n else:\n startw = u.Quantity(startw, u.nm).value\n qminw = None if startw <= self.nominal_wavelength_bins[0] / 10 else startw\n\n if stopw is None:\n qmaxw = None\n else:\n stopw = u.Quantity(stopw, u.nm).value\n qmaxw = None if stopw >= self.nominal_wavelength_bins[-1] * 10 else stopw\n\n return dict(start=start, stop=stop, relstart=relstart, relstop=relstop, duration=relstop - relstart,\n qstart=qstart, qstop=qstop, minw=startw, maxw=stopw, qminw=qminw, qmaxw=qmaxw)", "def dR_tagger(dR):\n\tif dR == 0.04:\n return 5\n elif dR == 0.06:\n return 9\n elif dR == 0.08:\n return 13\n elif dR == 0.1:\n return 17\n elif dR == 0.16:\n return 21\n else:\n print \"invalid dR-input\"\n return False", "def parse_range(option):\n return {\"range\": timedelta(days=option)}", "def quintil_rent(x,p,d):\n \n if x <= d[p][0.20]:\n return 'Q1'\n elif x <= d[p][0.4]:\n return 'Q2'\n elif x <= d[p][0.6]: \n return 'Q3'\n elif x <= d[p][0.8]:\n return 'Q4'\n else:\n return 'Q5'", "def find_best_point(self, start_i, end_i, ranges):\n max_val = 0\n target = start_i\n for i in range(start_i, end_i):\n if ranges[i] > max_val:\n target = i\n max_val = ranges[i]\n \n angle = -(540-target)*3\n return float(angle)/1080, target", "def _format_range(x_range):\n\n try:\n x1, x2 = x_range\n except (TypeError, ValueError):\n raise NNDCInputError(f'Range keyword arg must have two elements: \"{x_range}\"')\n try:\n if np.isfinite(x1):\n x1 = f\"{x1}\"\n else:\n x1 = \"\"\n except TypeError:\n x1 = \"\"\n try:\n if np.isfinite(x2):\n x2 = f\"{x2}\"\n else:\n x2 = \"\"\n except TypeError:\n x2 = \"\"\n return x1, x2", "def high_and_low_classic(numbers: str) -> str:\n min_num = max_num = int(numbers[0])\n for number in map(int, numbers.split()): # type: int\n if number < min_num:\n min_num = number\n if number > max_num:\n max_num = number\n return f'{min_num} {max_num}'", "def test_float_range():\n assert 0.5 == float_range('0.5')", "def test_get_visual_range__scan__character(self, coord, expectstr, expectlst):\n mapstr = self.map.get_visual_range(coord, dist=1, mode=\"scan\", character=\"@\")\n maplst = self.map.get_visual_range(\n coord, dist=1, mode=\"scan\", return_str=False, character=\"@\"\n )\n maplst = [[part.replace(\"||\", \"|\") for part in partlst] for partlst in maplst]\n self.assertEqual(expectstr, mapstr.replace(\"||\", \"|\"))\n self.assertEqual(expectlst, maplst[::-1]) # flip y-axis for print", "def get_range(self, rel_name):\n return self._declaration[rel_name].range_type" ]
[ "0.59787583", "0.59501344", "0.5829102", "0.5819392", "0.5818934", "0.57981986", "0.5792976", "0.5778558", "0.5731891", "0.57078105", "0.56870514", "0.56870514", "0.56675357", "0.5645643", "0.56384957", "0.5616127", "0.55904025", "0.5577517", "0.5560445", "0.5550442", "0.55366576", "0.5533957", "0.5514583", "0.5513986", "0.54857695", "0.54596347", "0.5447091", "0.54437906", "0.54008627", "0.5376879", "0.53633064", "0.535681", "0.53565824", "0.53550553", "0.5347964", "0.5274138", "0.5238101", "0.5231078", "0.5222449", "0.5222009", "0.521648", "0.5205999", "0.5205157", "0.51787794", "0.5178218", "0.51712906", "0.51707727", "0.515249", "0.5150333", "0.5144486", "0.51388574", "0.51339376", "0.51323736", "0.5131622", "0.5124232", "0.511947", "0.5119296", "0.5112167", "0.50931966", "0.5087497", "0.5087265", "0.50831676", "0.50778514", "0.5075457", "0.5070571", "0.5069505", "0.50663334", "0.50548697", "0.50536275", "0.50467265", "0.50464404", "0.5040553", "0.5033327", "0.5028526", "0.5025902", "0.5017911", "0.5017436", "0.50051886", "0.50031865", "0.50000846", "0.4986213", "0.49855876", "0.49799693", "0.497512", "0.4972322", "0.4959956", "0.49596187", "0.49562132", "0.49521872", "0.49493673", "0.49483863", "0.4934048", "0.49326575", "0.49325943", "0.49291942", "0.49280557", "0.49248052", "0.49244344", "0.49237156", "0.4923607" ]
0.55370027
20
Initializes the model parameters.
def __init__(self, hparams, batch_size=None, num_classes=None, summary_dir=None, verbose=False): self._model = None self._hparams = hparams self._verbose = verbose self._batch_size = batch_size self._num_classes = num_classes self._summary_dir = summary_dir
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, **kwargs):\n super(Model, self).__init__(**kwargs)\n self._params = self.find_params()", "def _initialize_model_params(self):\n\n if 'model' not in self._raw_data_dict:\n raise Error('The \"model\" key is not found in the configuration file. Looks like the parsed file is not '\n 'Object Detection API model configuration file.')\n params = list(self._raw_data_dict['model'].values())[0]\n for rule in mapping_rules:\n self._update_param_using_rule(params, rule)", "def initialize_model(self):\n pass", "def initialize(self):\n for key in self.parameter_dict:\n self.models[key] = self._create_model(key)", "def init_model(self):\n pass", "def __init__(self, param_dictionary):\n\n BaseModel.__init__(self)\n\n # set starting compartment values\n self.set_compartment(\"susceptible\",\n param_dictionary[\"population\"] - param_dictionary[\"start_infectious\"])\n self.set_compartment(\"infectious\", param_dictionary[\"start_infectious\"])\n self.set_compartment(\"immune\", 0.)\n\n # set model parameters\n self.set_param(\"infection_beta\",\n param_dictionary[\"r0\"]\n / (param_dictionary[\"duration_infectious\"] * param_dictionary[\"population\"]))\n self.set_param(\"infection_rate_recover\", 1. / param_dictionary[\"duration_infectious\"])", "def initialise_parameters(self):\n # Weights\n init = select_w_init(self.w_init)\n if self.w_gain:\n gain = nn.init.calculate_gain('relu')\n init(self.relations, gain=gain)\n else:\n init(self.relations)\n\n # Biases\n if self.b_init:\n init = select_b_init(self.b_init)\n init(self.sbias)\n init(self.pbias)\n init(self.obias)", "def _init_model_params(self):\n super()._init_model_params()\n\n if 'e' in self.init_params:\n if self.init_type == 'uniform':\n if self.nr_no_train_de == 0:\n self.B = [\n np.full(\n (self.n_states, self.n_features[i]), 1.0 / self.n_features[i])\n for i in range(self.n_emissions)\n ]\n else:\n check_if_attributes_set(self, attr='e')\n else:\n if self.nr_no_train_de == 0:\n self.B = [\n np.random.rand(self.n_states, self.n_features[i])\n for i in range(self.n_emissions)\n ]\n for i in range(self.n_emissions):\n normalise(self.B[i], axis=1)\n\n else:\n check_if_attributes_set(self, attr='e')", "def __init__(self, param_dictionary):\n\n BaseModel.__init__(self)\n\n # set starting compartment values\n self.set_compartment(\"susceptible\",\n param_dictionary[\"population\"] - param_dictionary[\"start_infectious\"])\n self.set_compartment(\"preinfectious\", 0.)\n self.set_compartment(\"infectious\", param_dictionary[\"start_infectious\"])\n self.set_compartment(\"immune\", 0.)\n\n # set model parameters\n self.set_param(\"infection_beta\",\n param_dictionary[\"r0\"]\n / (param_dictionary[\"duration_infectious\"] * param_dictionary[\"population\"]))\n self.set_param(\"infection_rate_progress\", 1. / param_dictionary[\"duration_preinfectious\"])\n self.set_param(\"infection_rate_recover\", 1. / param_dictionary[\"duration_infectious\"])", "def initialize(self, model):\n pass", "def init_params(self):\n self.clear()\n self._init_load_data()\n self._init_net_delay_data()", "def __init__(self, **kwargs):\n super(Model, self).__init__(**kwargs)", "def init_params(self):\n self.params = Parameters()\n self.params.add('qoff', self.qoff, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)\n self.params.add('yscale', self.yscale, vary=0, min=0, max=np.inf, expr=None, brute_step=0.1)\n self.params.add('int_bg', self.int_bg, vary=0, min=0, max=np.inf, expr=None, brute_step=0.1)\n self.params.add('Rc', self.Rc, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)\n self.params.add('sur_den', self.sur_den, vary=0, min=0, max=np.inf, expr=None, brute_step=0.1)\n self.params.add('ion_depth', self.ion_depth, vary=0, min=0, max=np.inf, expr=None, brute_step=0.1)", "def init_params(self):\n blah", "def __init__(self):\n logger.debug('Initializing %s model.' % self.__class__.__name__)\n self.dependent_attributes = ['_alpha',\n '_log_like',\n '_gradient','_K',\n '_log_det']\n self._previous_parameters = None # previous parameters from last call\n self.grad_method = None # could be {'finite_difference','adjoint'}\n self.noise_var_constraint = '+ve' # Gaussian noise variance constraint\n return", "def __init__(self, **kwargs):\n super(Model, self).__init__(**kwargs)\n\n for (key, value) in kwargs.iteritems():\n # use setattr so that validation is triggered\n setattr(self, key, value)", "def init(self, parameters):\n pass", "def _initialize_parameters(self):\n self.ent_emb = tf.get_variable('ent_emb', shape=[len(self.ent_to_idx), self.k * 2],\n initializer=self.initializer)\n self.rel_emb = tf.get_variable('rel_emb', shape=[len(self.rel_to_idx), self.k * 2],\n initializer=self.initializer)", "def _initialize_parameters(self):\n self.ent_emb = tf.get_variable('ent_emb', shape=[len(self.ent_to_idx), self.k],\n initializer=self.initializer)\n self.rel_emb = tf.get_variable('rel_emb', shape=[len(self.rel_to_idx), self.k],\n initializer=self.initializer)", "def init_params(self, parameters):\r\n max_epoch = parameters['num_epoch']\r\n momentum_rate = parameters['momentum']\r\n loss = parameters['loss_function']\r\n accuracy = parameters['accuracy']\r\n regularization = parameters['regularization']\r\n batch_size = parameters['batch_size']\r\n optimizer = parameters['optimizer'] if parameters['optimizer'] is not None else 'batch'\r\n self.__init__(max_epoch, optimizer, loss, accuracy, momentum_rate, regularization, batch_size)", "def __init__( self, parameters={} ):\n self.params = {}", "def params_init(self) -> None:\n # Initialize weights and biases with uniform distribution.\n nn.init.uniform_(self.emb.weight, self.init_lower, self.init_upper)\n nn.init.uniform_(self.fc_e2h[1].weight, self.init_lower, self.init_upper)\n nn.init.uniform_(self.fc_e2h[1].bias, self.init_lower, self.init_upper)\n for lyr in range(self.n_lyr):\n self.stack_rnn[2 * lyr].params_init()\n nn.init.uniform_(self.fc_h2e[0].weight, self.init_lower, self.init_upper)\n nn.init.uniform_(self.fc_h2e[0].bias, self.init_lower, self.init_upper)", "def init_paramters(self):\r\n carb_bg_ratio = 5.0\r\n time_to_breakdown = 45.0\r\n insulin_bg_ratio = 50.0\r\n time_to_peak = 45.0\r\n basal_rate = 0.0\r\n digestion_speed = 1.0\r\n activation_speed = 1.0\r\n\r\n # set state to initial\r\n self.S = [self.carb_bg_ratio, self.time_to_breakdown,\r\n self.insulin_bg_ratio, self.time_to_peak,\r\n self.basal_rate, self.digestion_speed,\r\n self.activation_speed]", "def initializeParameters(self):\n\n self.params[2].value = False\n self.params[3].enabled = False\n self.params[7].value = True\n self.params[7].enabled = False\n self.params[8].value = None\n self.params[8].enabled = False", "def initialisation(self):\n self.create_variables()\n self.create_placeholders()\n self.build_model()\n self.reset_lr(None, True)\n self.build_loss()\n self.initialised = True", "def initialize_params(self, params):\n pass", "def _setupModel(self, parameters):\r\n ModelFitterCore.setupModel(self.roadrunnerModel, parameters,\r\n logger=self.logger)", "def __init__(self):\n self._params = None", "def init_params(self):\n self.conv = Conv(self.conv_layers[0][-1], self.out_channels, padding=self.padding,stride=self.stride)\n self.W = torch.randn(self.num_labels, self.cout_numel, requires_grad=True)\n self.T = torch.randn(self.num_labels, self.num_labels, requires_grad=True)", "def __init__(self):\n\n # The object is already initialised.\n if self._initialised: return\n\n # Execute the base class __init__ method.\n Param_list.__init__(self)\n\n # Add the model variables.\n self._add_model_info()\n\n # Add the base data.\n self._add_align_data()\n\n # Add the parameters of all models.\n self._add(\n 'pivot_x',\n scope = 'global',\n units = 'Angstrom',\n desc = 'The pivot point position x coordinate',\n py_type = float,\n set = 'params',\n scaling = 1e2,\n grid_lower = pivot_x_lower,\n grid_upper = pivot_x_upper,\n err = True,\n sim = True\n )\n self._add(\n 'pivot_y',\n scope = 'global',\n units = 'Angstrom',\n desc = 'The pivot point position y coordinate',\n py_type = float,\n set = 'params',\n scaling = 1e2,\n grid_lower = pivot_y_lower,\n grid_upper = pivot_y_upper,\n err = True,\n sim = True\n )\n self._add(\n 'pivot_z',\n scope = 'global',\n units = 'Angstrom',\n desc = 'The pivot point position z coordinate',\n py_type = float,\n set = 'params',\n scaling = 1e2,\n grid_lower = pivot_z_lower,\n grid_upper = pivot_z_upper,\n err = True,\n sim = True\n )\n self._add(\n 'ave_pos_x',\n scope = 'global',\n units = 'Angstrom',\n desc = 'The average position x translation',\n py_type = float,\n set = 'params',\n grid_lower = -5,\n grid_upper = 5,\n err = True,\n sim = True\n )\n self._add(\n 'ave_pos_y',\n scope = 'global',\n units = 'Angstrom',\n desc = 'The average position y translation',\n py_type = float,\n set = 'params',\n grid_lower = -5,\n grid_upper = 5,\n err = True,\n sim = True\n )\n self._add(\n 'ave_pos_z',\n scope = 'global',\n units = 'Angstrom',\n desc = 'The average position z translation',\n py_type = float,\n set = 'params',\n grid_lower = -5,\n grid_upper = 5,\n err = True,\n sim = True\n )\n self._add(\n 'ave_pos_alpha',\n scope = 'global',\n units = 'rad',\n desc = 'The average position alpha Euler angle',\n py_type = float,\n set = 'params',\n grid_lower = 0.0,\n grid_upper = angle_upper_excluding_bound,\n err = True,\n sim = True\n )\n self._add(\n 'ave_pos_beta',\n scope = 'global',\n units = 'rad',\n desc = 'The average position beta Euler angle',\n py_type = float,\n set = 'params',\n grid_lower = 0.0,\n grid_upper = pi,\n err = True,\n sim = True\n )\n self._add(\n 'ave_pos_gamma',\n scope = 'global',\n units = 'rad',\n desc = 'The average position gamma Euler angle',\n py_type = float,\n set = 'params',\n grid_lower = 0.0,\n grid_upper = angle_upper_excluding_bound,\n err = True,\n sim = True\n )\n self._add(\n 'eigen_alpha',\n scope = 'global',\n units = 'rad',\n desc = 'The Eigenframe alpha Euler angle',\n py_type = float,\n set = 'params',\n grid_lower = 0.0,\n grid_upper = angle_upper_excluding_bound,\n err = True,\n sim = True\n )\n self._add(\n 'eigen_beta',\n scope = 'global',\n units = 'rad',\n desc = 'The Eigenframe beta Euler angle',\n py_type = float,\n set = 'params',\n grid_lower = 0.0,\n grid_upper = pi,\n err = True,\n sim = True\n )\n self._add(\n 'eigen_gamma',\n scope = 'global',\n units = 'rad',\n desc = 'The Eigenframe gamma Euler angle',\n py_type = float,\n set = 'params',\n grid_lower = 0.0,\n grid_upper = angle_upper_excluding_bound,\n err = True,\n sim = True\n )\n self._add(\n 'axis_theta',\n scope = 'global',\n units = 'rad',\n desc = 'The cone axis polar angle (for the isotropic cone model)',\n py_type = float,\n set = 'params',\n grid_lower = 0.0,\n grid_upper = pi,\n err = True,\n sim = True\n )\n self._add(\n 'axis_phi',\n scope = 'global',\n units = 'rad',\n desc = 'The cone axis azimuthal angle (for the isotropic cone model)',\n py_type = float,\n set = 'params',\n grid_lower = 0.0,\n grid_upper = angle_upper_excluding_bound,\n err = True,\n sim = True\n )\n self._add(\n 'axis_alpha',\n scope = 'global',\n units = 'rad',\n desc = 'The rotor axis alpha angle (the rotation angle out of the xy plane)',\n py_type = float,\n set = 'params',\n grid_lower = -pi,\n grid_upper = axis_alpha_upper,\n err = True,\n sim = True\n )\n self._add(\n 'cone_theta_x',\n scope = 'global',\n units = 'rad',\n desc = 'The pseudo-ellipse cone opening half-angle for the x-axis',\n py_type = float,\n set = 'params',\n grid_lower = cone_angle_lower,\n grid_upper = cone_angle_upper,\n err = True,\n sim = True\n )\n self._add(\n 'cone_theta_y',\n scope = 'global',\n units = 'rad',\n desc = 'The pseudo-ellipse cone opening half-angle for the y-axis',\n py_type = float,\n set = 'params',\n grid_lower = cone_angle_lower,\n grid_upper = cone_angle_upper,\n err = True,\n sim = True\n )\n self._add(\n 'cone_theta',\n scope = 'global',\n units = 'rad',\n desc = 'The isotropic cone opening half-angle',\n py_type = float,\n set = 'params',\n grid_lower = cone_angle_lower,\n grid_upper = cone_angle_upper,\n err = True,\n sim = True\n )\n self._add(\n 'cone_s1',\n scope = 'global',\n units = '',\n desc = 'The isotropic cone order parameter',\n py_type = float,\n set = 'params',\n grid_lower = -0.125,\n grid_upper = 1.0,\n err = True,\n sim = True\n )\n self._add(\n 'cone_sigma_max',\n scope = 'global',\n units = 'rad',\n desc = 'The torsion angle',\n py_type = float,\n set = 'params',\n grid_lower = cone_angle_lower,\n grid_upper = cone_angle_upper,\n err = True,\n sim = True\n )\n\n # Add minimisation structures.\n self._add_min_data(min_stats_global=True)\n\n # Set up the user function documentation.\n self._set_uf_title(\"Frame order parameters\")\n self._uf_param_table(label=\"table: frame order parameters\", caption=\"Frame order parameters.\", scope='global')\n self._uf_param_table(label=\"table: frame order parameter value setting with defaults\", caption=\"Frame order parameter value setting.\", scope='global', default=True)", "def __init__(self, parameter_dictionary):\n super().__init__(parameter_dictionary)\n\n self.model_string = \"gauss\"\n model_dictionary = self._get_model_dict(__class__.default_parameters)\n\n # wake expansion parameters\n self.ka = model_dictionary[\"ka\"]\n self.kb = model_dictionary[\"kb\"]\n\n # near wake / far wake boundary parameters\n self.alpha = model_dictionary[\"alpha\"]\n self.beta = model_dictionary[\"beta\"]\n\n # GCH Parameters\n self.calculate_VW_velocities = model_dictionary[\"calculate_VW_velocities\"]\n self.use_yaw_added_recovery = model_dictionary[\"use_yaw_added_recovery\"]\n self.eps_gain = model_dictionary[\"eps_gain\"]", "def __init__(self, name, config):\n super(Model, self).__init__()\n # set all config values as attributes on the model for ease of access\n self.config = config\n for key in config.keys():\n setattr(self, key, config[key])\n # override the name with the run number appended name\n self.name = name\n self.param_groups = []", "def initialize(self):\n LOG.info(\"Initializing Model.\")\n self.model = self.convert(df=self.training_df)\n if self.bootstraps is not None:\n LOG.info(\"Bootstrapping Data.\")\n self.bootstrap_data()", "def _initialize_model(rngs):\n init_model_state, init_params = model_def.init(\n rngs, *dummy_input, train=False, debug=False).pop('params')\n # Set bias in the head to low value, such that loss is small initially.\n if config.get('init_head_bias', None) is not None:\n init_params = flax.core.unfreeze(init_params)\n init_params['output_projection'] = optimizers.tree_map_with_names(\n lambda p: jnp.full_like(p, config.init_head_bias),\n init_params['output_projection'],\n match_name_fn=lambda name: 'bias' in name)\n init_params = flax.core.freeze(init_params)\n return init_params, init_model_state", "def set_params(self):\r\n pass", "def initialize_model_params():\n beta_0 = np.array([0., 0.])\n mu_0 = 0.\n return beta_0, mu_0", "def initialize(self):\n self.initialize_edges()\n self.initialize_prob()\n self.initialize_total_input_dict()\n\n self.initialize_fpmusigv_dict()", "def init_attrs(self):\n raise NotImplementedError", "def __init__(self, **kwargs):\n\n ## Model names to be loaded\n self.names = []\n # Apply passed keyword arguments to the Request object.\n super(ObjectDetectionLoadModels.Request, self).__init__(**kwargs)", "def __init__(self, initial_params, save_name=\"model_param.joblib\"):\n super().__init__()\n self.initial_params = initial_params\n self.save_name = save_name", "def initialize(self, **kwargs):", "def setup_parameters(self):\n structure = self.ctx.structure_initial_primitive\n ecutwfc = []\n ecutrho = []\n\n for kind in structure.get_kind_names():\n try:\n dual = self.ctx.protocol['pseudo_data'][kind]['dual']\n cutoff = self.ctx.protocol['pseudo_data'][kind]['cutoff']\n cutrho = dual * cutoff\n ecutwfc.append(cutoff)\n ecutrho.append(cutrho)\n except KeyError as exception:\n self.abort_nowait('failed to retrieve the cutoff or dual factor for {}'.format(kind))\n\n natoms = len(structure.sites)\n conv_thr = self.ctx.protocol['convergence_threshold'] * natoms\n\n self.ctx.inputs['parameters'] = {\n 'CONTROL': {\n 'restart_mode': 'from_scratch',\n 'tstress': self.ctx.protocol['tstress'],\n },\n 'SYSTEM': {\n 'ecutwfc': max(ecutwfc),\n 'ecutrho': max(ecutrho),\n 'smearing': self.ctx.protocol['smearing'],\n 'degauss': self.ctx.protocol['degauss'],\n 'occupations': self.ctx.protocol['occupations'],\n },\n 'ELECTRONS': {\n 'conv_thr': conv_thr,\n }\n }", "def __init__(self, model):\n self._model = model", "def reset_parameters(self):\n logger.info('===== Initialize %s =====' % self.__class__.__name__)\n nn.init.normal_(self.embed.weight, mean=0.0, std=self.d_model ** -0.5)\n nn.init.constant_(self.embed.weight[self.pad], 0)\n if self.output is not None and not self.tie_embedding:\n nn.init.xavier_uniform_(self.output.weight)\n nn.init.constant_(self.output.bias, 0.0)", "def _set_init_param_dict(self):\n\n self.param_dict = {}\n\n try:\n suppress_warning = self._suppress_repeated_param_warning\n except AttributeError:\n suppress_warning = False\n msg = (\"\\n\\nThe param_dict key %s appears in more than one component model.\\n\"\n \"This is permissible, but if you are seeing this message you should be sure you \"\n \"understand it.\\nIn particular, double-check that this parameter does not have \"\n \"conflicting meanings across components.\\n\"\n \"\\nIf you do not wish to see this message every time you instantiate, \\n\"\n \"simply attach a _suppress_repeated_param_warning attribute \\n\"\n \"to any of your component models and set this variable to ``True``.\\n\")\n\n for component_model in self.model_dictionary.values():\n\n if not hasattr(component_model, 'param_dict'):\n component_model.param_dict = {}\n intersection = set(self.param_dict) & set(component_model.param_dict)\n if intersection != set():\n for key in intersection:\n if suppress_warning is False:\n warn(msg % key)\n\n for key, value in component_model.param_dict.iteritems():\n self.param_dict[key] = value\n\n self._init_param_dict = copy(self.param_dict)", "def __init__(self):\n self.model = None", "def __init__(self):\n self.model = None", "def __init__(self, parameters: dict) -> None:\n self.model_list = parameters[\"model_list\"]\n self.plot_name = parameters[\"plot_name\"]\n self.save_path = parameters[\"save_path\"]", "def init_parameters(self):\n stdv = 1. / math.sqrt(self.weight.data.size(1))\n self.weight.data.uniform_(-stdv, stdv)\n if self.bias is not None:\n self.bias.data.uniform_(-stdv, stdv)", "def initialize(self) -> None:\n self.model = load(self.path)", "def set_load_model_parameters(self):\n\n self.controller.set_new_model_test_input_path(self.test_input.get())\n self.controller.set_new_model_results_input_path(self.results_input.get())\n self.controller.set_new_model_running(False)", "def __init__(self, controlparams, schizparams):\n self.controlparams = controlparams\n self.schizparams = schizparams\n super(BeemanNML2Model, self).__init__(\n controlparams=controlparams, schizparams=schizparams\n )", "def __init__( self, parameters={} ):\n self.params = {}\n self.reset(parameters)", "def _setup_params(self) -> None:\n self.i = 0 # Year\n self.ela = self.ela_start # Equilibrium line altitude\n self.steady_state = False # Control variable for steady state\n self.fracd8_mode = \"limited\" # Mode of the fracd8 algorithm", "def __init__(self):\n # Number of examples per epoch of training data.\n self.num_examples_per_epoch = None \n\n # Optimizer for training the model.\n self.optimizer = \"SGD\" #default \"SGD\"\n\n # Learning rate for the initial phase of training.\n self.initial_learning_rate = 2.0 # default 2.0\n self.learning_rate_decay_factor = 0.8\n self.num_epochs_per_decay = 4 #default 8\n\n # If not None, clip gradients to this value.\n self.clip_gradients = 5.0\n\n # How many model checkpoints to keep.\n self.max_checkpoints_to_keep = 2", "def init_parameters(self):\n # Create the weights and biases\n for i in range(1, len(self.layer_dimensions)):\n # Initialization from He et al.\n mu = 0\n var = 2 / self.layer_dimensions[i]\n sigma = np.sqrt(var)\n weight_shape = (self.layer_dimensions[i - 1], self.layer_dimensions[i])\n weight = np.random.normal(loc=mu, scale=sigma, size=weight_shape)\n bias = np.zeros((self.layer_dimensions[i], ))\n\n # Saving in the parameters dict\n layer_weight = \"w_\" + str(i)\n self._parameters[layer_weight] = weight\n layer_b = \"b_\" + str(i)\n self._parameters[layer_b] = bias", "def set_default_parameters(self):\n super().set_default_parameters()", "def __init__(self, **kwds ):\n super(Model, self).__init__()\n self.__key = None \n for name, value in kwds.items():\n self[name] = value", "def set_params(self):\n raise NotImplementedError", "def init(self):\n inputs = self.inputs()\n outputs = self.outputs(inputs)\n self.model = tf.keras.Model(inputs=inputs, outputs=outputs)\n self.model.compile(optimizer=self.optimizer() or self.config.get('optimizer'),\n loss=self.loss() or None,\n metrics=self.metrics() or None,\n loss_weights=self.loss_weights() or None,\n weighted_metrics=self.weighted_metrics() or None,\n target_tensors=self.target_tensors() or None)\n if self.config.get('debug'):\n self.model.summary()", "def set_initial_values(self):\n\n pass", "def create_initial_parameters(self):\n update_nested_dictionary(\n self.settings,\n {self.highest_lookup: {\n self.highest_sublookup: self.kw\n }})", "def initializeParameters(self):\r\n\t\tself.input_raster.enabled = True\r\n\t\tself.approach.enabled = True\r\n\t\tself.predefined_pattern.enabled = False\r\n\t\tself.predefined_pattern.value = 'Mexican Hat wavelet'\r\n\t\tself.pattern_workspace.enabled = False\r\n\t\tself.point_matrix_size.enabled = False\r\n\t\tself.point_matrix_size.value = 3\r\n\t\tself.point_vectors.enabled = False\r\n\t\tself.mapping_field.enabled = False\r\n\t\tself.move_to_max.enabled = False\r\n\t\tself.move_to_max_distance.enabled = False\r\n\t\tself.move_to_max_distance.value = 3\r\n\t\tself.mh_iteration.enabled = False\r\n\t\tself.mh_dil_val.enabled = False\r\n\t\tself.mh_dil_val.value = 1\r\n\t\tself.mh_dil_start.value = 0.01\r\n\t\tself.mh_dil_stop.value = 1\r\n\t\tself.mh_dil_step.value = 0.1\r\n\t\tself.mh_dil_start.enabled = False\r\n\t\tself.mh_dil_stop.enabled = False\r\n\t\tself.mh_dil_step.enabled = False\r\n\t\tself.transform.enabled = False\r\n\t\tself.size_of_the_cell.enabled = False\r\n\t\tself.size_of_the_cell.value = 1\r\n\t\tself.output_sim_matrix.enabled = False\r\n\t\tself.output_table.enabled = False\r\n\t\tself.output_raster_workspace.enabled = False", "def initialize(self, **kwargs):\n\n # Defining the configuration object\n self.config = kwargs.get('config')", "def initialize(self, **kwargs: Any) -> None:\n pass", "def __init__(self):\n self.model = None\n self.joined_datasets = None\n self.id_col = None\n self.val_col = None\n self.pop_col = None\n self.total_population_per_unit = None\n self.centroids_of_areal_data = None\n self.prepared_data = None\n self.unknown_area_id = None\n\n # Parameters\n self.lags = None\n self.step = None\n self.min_no_of_observations = None\n self.max_search_radius = None", "def __init__(self, config):\n self.model = None\n self.config = config\n self.batch_size = config.get('batch_size')\n self.epochs = config.get('epochs')\n self.steps_per_epoch = config.get('steps_per_epoch')\n self.validation_steps = config.get('validation_steps')\n self.distributed = config.get('distributed', False)\n \n # init model\n self.init()", "def initialize_model(self, config_param_vals = None):\n self._is_initialized = True\n\n self.fmu.instantiate()\n self.fmu.reset()\n self.fmu.setupExperiment(startTime=self.start_time)\n if config_param_vals is not None:\n self._apply_config(config_param_vals)\n self.fmu.enterInitializationMode()\n self.fmu.exitInitializationMode()\n\n return", "def init(self, rng_key, num_warmup, init_params, model_args, model_kwargs):\n raise NotImplementedError", "def Params(cls):\n p = super().Params()\n p.Define('train_task', None, 'Underlying task')\n p.Define('decode_task', None, 'Underlying task')\n p.Define('train_dataset_name', None, '')\n p.Define('decode_dataset_name', None, '')\n p.Define('train_steps_per_loop', 0, '')\n p.Define('decode_steps_per_loop', 0, '')\n return p", "def __init__(self, model):\n self.model = model", "def __init__(self, model):\n self.model = model", "def __init__(self, model):\n self.model = model", "def __init__(self, model):\n self.model = model", "def initialize(self):\n self.conv1.reset_parameters()\n self.conv2.reset_parameters()", "def _initialize_model(rngs):\n init_model_state, init_params = nn.init(\n fn=init_fn, module=model_def)(rngs).pop('params')\n # Set bias in the head to low value, such that loss is small initially.\n if (config.get('init_head_bias', None) is not None and\n 'output_projection' in init_params):\n init_params = flax.core.unfreeze(init_params)\n init_params['output_projection'] = optimizers.tree_map_with_names(\n lambda p: jnp.full_like(p, config.init_head_bias),\n init_params['output_projection'],\n match_name_fn=lambda name: 'bias' in name)\n init_params = flax.core.freeze(init_params)\n return init_params, init_model_state", "def __init__(self, params):\r\n self.Params.update(params)\r\n self._tracked_properties.extend(\r\n ['Application', 'Algorithm', 'Citation'])", "def __init__(self, params):\r\n self.Params = params", "def __init__(self, params):\r\n self.Params = params", "def __init__(self, params):\r\n self.Params = params", "def __init__(self, params):\r\n self.Params = params", "def __init__(self, params):\r\n self.Params = params", "def __init__(self, params):\r\n self.Params = params", "def __init__(self):\n\n super().__init__()\n\n self._model = None # type: StateSpaceModel\n self._kernel = None # type: Distribution", "def setParameters(self):\n\n # Set the parameters\n self.taux = 24.2\n self.mu = 0.23\n self.G = 33.75\n self.alpha_0 = 0.05\n self.delta = 0.0075\n self.p = 0.50\n self.I0 = 9500.0\n self.kparam = 0.55", "def __init__(self, params):\n defaults = {}\n super(Regralizer, self).__init__(params, defaults)", "def __init__(self, model: Model1D):\n super().__init__(model=model)", "def __init__(self, model: Model1D):\n super().__init__(model=model)", "def __init__(self, model: Model1D):\n super().__init__(model=model)", "def __init__(self, model: Model1D):\n super().__init__(model=model)", "def __init__(self, model: Model1D):\n super().__init__(model=model)", "def init(self, parameters, agent_parameters):\n pass", "def __init__(self, **parameters):\n self.parameters = parameters", "def getInitParams(self):\n paramDict = super().getInitParams()\n paramDict['method'] = self.method\n paramDict['dimension'] = self.dimension\n paramDict['rank'] = self.rank\n paramDict['mu'] = self.mu\n paramDict['covariance'] = self.covariance\n return paramDict", "def __init__(self, config):\n super().__init__()\n self.model_list = []\n self.model_name_list = []\n for key in config[\"Models\"]:\n model_config = config[\"Models\"][key]\n freeze_params = False\n pretrained = None\n if \"freeze_params\" in model_config:\n freeze_params = model_config.pop(\"freeze_params\")\n if \"pretrained\" in model_config:\n pretrained = model_config.pop(\"pretrained\")\n model = BaseModel(model_config)\n if pretrained is not None:\n load_pretrained_params(model, pretrained)\n if freeze_params:\n for param in model.parameters():\n param.trainable = False\n self.model_list.append(self.add_sublayer(key, model))\n self.model_name_list.append(key)", "def _default_params(self) -> Dict[str, Any]:\n normal_params = {\n \"temperature\": self.temperature,\n \"max_tokens\": self.max_tokens,\n \"top_p\": self.top_p,\n \"frequency_penalty\": self.frequency_penalty,\n \"presence_penalty\": self.presence_penalty,\n \"n\": self.n,\n # \"best_of\": self.best_of,\n \"request_timeout\": self.request_timeout,\n \"logit_bias\": self.logit_bias,\n }\n return {**normal_params, **self.model_kwargs}", "def define_parameters(self):\n self.weight_matrix = torch.nn.Parameter(torch.Tensor(self.in_channels, self.out_channels))\n self.bias = torch.nn.Parameter(torch.Tensor(self.out_channels))", "def define_parameters(self):\n self.weight_matrix = torch.nn.Parameter(torch.Tensor(self.in_channels, self.out_channels))\n self.bias = torch.nn.Parameter(torch.Tensor(self.out_channels))", "def __init__(self, num_models: int, num_classes: int):\n self.nun_models = num_models\n self.num_classes = num_classes\n self.model: keras.Model = self.init_model()", "def prepare_model(self, **kwargs):\n pass", "def __init__(self):\n self.scaler = None\n self.model = None\n self.encoder = {}\n\n self._load_model()\n return" ]
[ "0.8004957", "0.76819557", "0.7654317", "0.75942737", "0.7463661", "0.7323001", "0.7311815", "0.7258441", "0.7244055", "0.72392327", "0.7186368", "0.71451765", "0.709025", "0.7045288", "0.7019865", "0.7001315", "0.69580907", "0.69482946", "0.69447833", "0.68896854", "0.68411654", "0.6813034", "0.679329", "0.67905813", "0.6785132", "0.67707014", "0.6748236", "0.6698412", "0.6697852", "0.66753745", "0.66676307", "0.66569585", "0.6649168", "0.6633604", "0.6627908", "0.6612606", "0.6609555", "0.66081893", "0.6605977", "0.6600568", "0.6574636", "0.6516159", "0.650923", "0.64701325", "0.6465385", "0.6457398", "0.6457398", "0.6454762", "0.64467376", "0.6446651", "0.644438", "0.64366525", "0.64333546", "0.64236563", "0.64190716", "0.6400577", "0.63990426", "0.63957286", "0.6394604", "0.63941246", "0.6388647", "0.6386515", "0.63748527", "0.63732743", "0.6363901", "0.63595366", "0.6345623", "0.63450986", "0.6339371", "0.63301885", "0.63263774", "0.63263774", "0.63263774", "0.63263774", "0.63227314", "0.6318074", "0.63148797", "0.6313992", "0.6313992", "0.6313992", "0.6313992", "0.6313992", "0.6313992", "0.6312352", "0.63121694", "0.63088036", "0.6306286", "0.6306286", "0.6306286", "0.6306286", "0.6306286", "0.63057095", "0.6303333", "0.6300902", "0.6298246", "0.6297175", "0.62967694", "0.62967694", "0.62925476", "0.62884974", "0.62865967" ]
0.0
-1
Builds an HParam object with default hyperparameters.
def default_hparams(): raise NotImplementedError('Not implemented')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def default_hparams():\n\n model_hparams = hparams.ModelHparams(\n model_name='imagenet_resnet_50',\n model_init='kaiming_normal',\n batchnorm_init='uniform',\n )\n\n dataset_hparams = hparams.DatasetHparams(\n dataset_name='imagenet',\n batch_size=1024,\n )\n\n training_hparams = hparams.TrainingHparams(\n optimizer_name='sgd',\n momentum=0.9,\n milestone_steps='30ep,60ep,80ep',\n lr=0.4,\n gamma=0.1,\n weight_decay=1e-4,\n training_steps='90ep',\n warmup_steps='5ep',\n )\n\n pruning_hparams = sparse_global.PruningHparams(\n pruning_strategy='sparse_global',\n pruning_fraction=0.2\n )\n\n return LotteryDesc(model_hparams, dataset_hparams, training_hparams, pruning_hparams)", "def get_default_hparams():\n hparams_map = base_model.get_default_hparams().values()\n hparams_map.update({\n 'conditional': True,\n 'dec_rnn_size': [512], # Decoder RNN: number of units per layer.\n 'dec_rnn_attn_len': 0, # Decoder RNN: length of attention vector.\n 'enc_rnn_size': [256], # Encoder RNN: number of units per layer per dir.\n 'dropout_keep_prob': 1.0, # Probability all dropout keep.\n 'sampling_schedule': 'constant', # constant, exponential, inverse_sigmoid\n 'sampling_rate': 0.0, # Interpretation is based on `sampling_schedule`.\n })\n return tf.contrib.training.HParams(**hparams_map)", "def default_hparams():\n return tf.contrib.training.HParams(\n decay_rate=0.96,\n decay_steps=2000,\n leaky=False,\n learning_rate=0.001,\n # loss_type=[sigmoid, softmax, margin]\n loss_type='margin',\n # mask_type=[none, label, norm, routing, weighted-routing]\n mask_type='weighted-routing',\n balance_factor=0.005,\n num_prime_capsules=32,\n num_latent_capsules=16,\n num_latent_atoms=16,\n padding='VALID',\n remake=True,\n routing=3,\n verbose=True,\n unsupervised=True,\n ema_decay=0.99,\n boost_step=50,\n boost_factor=0.1,\n target_min_freq=0.03,\n target_max_freq=0.12,\n boosting=True\n )", "def _starting_hparams():\n hparams = contrib_training.HParams()\n hparams.add_hparam('batch_style', 'bucket')\n hparams.add_hparam('gradient_clipping_decay', 0.9999)\n hparams.add_hparam('learning_rate', 0.0005)\n hparams.add_hparam('lr_decay_rate', .997)\n hparams.add_hparam('lr_decay_steps', 1000)\n hparams.add_hparam('lr_warmup_steps', 3000)\n hparams.add_hparam('model_type', 'cnn')\n hparams.add_hparam('resnet_bottleneck_factor', 0.5)\n hparams.add_hparam('decision_threshold', 0.5)\n hparams.add_hparam('denominator_power', 1.0) # Standard mean-pooling.\n return hparams", "def make_default_hyperparameters(dim):\n return numpy.ones(dim + 1)", "def default_hparams():\n return {\n \"value\": 0.,\n \"name\": \"constant_connector\"\n }", "def create_or_load_hparams(default_hparams, hparams_path):\n hparams = utils.maybe_parse_standard_hparams(default_hparams, hparams_path)\n hparams = extend_hparams(hparams)\n # Print HParams\n utils.print_hparams(hparams)\n return hparams", "def default_hparams():\n hparams = DatasetBase.default_hparams()\n hparams.update({\n \"transforms\": None,\n \"processed_csv\": None,\n \"mode\": None,\n \"batch_size\": 1,\n \"shuffle\": False,\n \"shuffle_buffer_size\": 32,\n \"input_channel\": \"RGB\"\n })\n return hparams", "def test_hparams(self):\n\n inputs = tf.placeholder(dtype=tf.int32, shape=[None, None])\n\n # case 1: set \"pretrained_mode_name\" by constructor argument\n encoder = XLNetEncoder(pretrained_model_name=\"xlnet-large-cased\",\n hparams={})\n encoder(inputs)\n self.assertEqual(len(encoder.attn_layers), 24)\n self.assertEqual(len(encoder.ff_layers), 24)\n\n # case 2: set \"pretrained_mode_name\" by hparams\n hparams = {\n \"pretrained_model_name\": \"xlnet-base-cased\"\n }\n encoder = XLNetEncoder(hparams=hparams)\n encoder(inputs)\n self.assertEqual(len(encoder.attn_layers), 12)\n self.assertEqual(len(encoder.ff_layers), 12)\n\n # case 3: set to None in both hparams and constructor argument\n # load no pre-trained model\n hparams = {\n \"pretrained_model_name\": None,\n \"num_layers\": 16\n }\n encoder = XLNetEncoder(hparams=hparams)\n encoder(inputs)\n self.assertEqual(len(encoder.attn_layers), 16)\n self.assertEqual(len(encoder.ff_layers), 16)\n\n # case 4: using default hparams\n encoder = XLNetEncoder()\n encoder(inputs)\n self.assertEqual(len(encoder.attn_layers), 12)\n self.assertEqual(len(encoder.ff_layers), 12)", "def init_parameters(obj, hyperparameters):\n # Initialize Global Configuration Parameter\n params = hyperparameters['global']\n setattr(obj, 'param', params)\n\n # Initialize Attributes (Pre-Checked Parameters)\n setattr(obj, 'learning_rate', params['learning_rate'])\n setattr(obj, 'loss', params['loss'])\n setattr(obj, 'max_iter', params['max_iter'])\n\n if params['loss'] == 'least_squares':\n setattr(obj, 'num_classes', 1)\n elif params['loss'] in ['binary_crossentropy', 'categorical_crossentropy', 'auto']:\n setattr(obj, 'num_classes', params['num_classes'])\n\n # Initialize Attributes (Optional Values - Based on Default Parameters)\n if 'l2_regularization' not in params or params['l2_regularization'] is None:\n setattr(obj, 'l2_regularization', 0)\n else:\n setattr(obj, 'l2_regularization', params['l2_regularization'])\n\n if 'max_bins' not in params:\n setattr(obj, 'max_bins', 255)\n else:\n setattr(obj, 'max_bins', params['max_bins'])\n\n if 'max_depth' not in params or params['max_depth'] is None:\n setattr(obj, 'max_depth', None)\n else:\n setattr(obj, 'max_depth', params['max_depth'])\n\n if 'max_leaf_nodes' not in params or params['max_leaf_nodes'] is None:\n setattr(obj, 'max_leaf_nodes', 31)\n else:\n setattr(obj, 'max_leaf_nodes', params['max_leaf_nodes'])\n\n if 'min_samples_leaf' not in params or params['min_samples_leaf'] is None:\n setattr(obj, 'min_samples_leaf', 20)\n else:\n setattr(obj, 'min_samples_leaf', params['min_samples_leaf'])\n\n if 'random_state' in params:\n setattr(obj, 'random_state', params['random_state'])\n else:\n setattr(obj, 'random_state', None)\n\n if 'scoring' in params:\n setattr(obj, 'scoring', params['scoring'])\n else:\n setattr(obj, 'scoring', None)\n\n if 'verbose' not in params or params['verbose'] is None:\n setattr(obj, 'verbose', False)\n else:\n setattr(obj, 'verbose', True)\n\n return obj", "def create_hparams(hparam_string=None):\n hparams = tf.contrib.training.HParams(\n # The name of the architecture to use.\n arch='resnet',\n lrelu_leakiness=0.2,\n batch_norm_decay=0.9,\n weight_decay=1e-5,\n normal_init_std=0.02,\n generator_kernel_size=3,\n discriminator_kernel_size=3,\n\n # Stop training after this many examples are processed\n # If none, train indefinitely\n num_training_examples=0,\n\n # Apply data augmentation to datasets\n # Applies only in training job\n augment_source_images=False,\n augment_target_images=False,\n\n # Discriminator\n # Number of filters in first layer of discriminator\n num_discriminator_filters=64,\n discriminator_conv_block_size=1, # How many convs to have at each size\n discriminator_filter_factor=2.0, # Multiply # filters by this each layer\n # Add gaussian noise with this stddev to every hidden layer of D\n discriminator_noise_stddev=0.2, # lmetz: Start seeing results at >= 0.1\n # If true, add this gaussian noise to input images to D as well\n discriminator_image_noise=False,\n discriminator_first_stride=1, # Stride in first conv of discriminator\n discriminator_do_pooling=False, # If true, replace stride 2 with avg pool\n discriminator_dropout_keep_prob=0.9, # keep probability for dropout\n\n # DCGAN Generator\n # Number of filters in generator decoder last layer (repeatedly halved\n # from 1st layer)\n num_decoder_filters=64,\n # Number of filters in generator encoder 1st layer (repeatedly doubled\n # after 1st layer)\n num_encoder_filters=64,\n\n # This is the shape to which the noise vector is projected (if we're\n # transferring from noise).\n # Write this way instead of [4, 4, 64] for hparam search flexibility\n projection_shape_size=4,\n projection_shape_channels=64,\n\n # Indicates the method by which we enlarge the spatial representation\n # of an image. Possible values include:\n # - resize_conv: Performs a nearest neighbor resize followed by a conv.\n # - conv2d_transpose: Performs a conv2d_transpose.\n upsample_method='resize_conv',\n\n # Visualization\n summary_steps=500, # Output image summary every N steps\n\n ###################################\n # Task Classifier Hyperparameters #\n ###################################\n\n # Which task-specific prediction tower to use. Possible choices are:\n # none: No task tower.\n # doubling_pose_estimator: classifier + quaternion regressor.\n # [conv + pool]* + FC\n # Classifiers used in DSN paper:\n # gtsrb: Classifier used for GTSRB\n # svhn: Classifier used for SVHN\n # mnist: Classifier used for MNIST\n # pose_mini: Classifier + regressor used for pose_mini\n task_tower='doubling_pose_estimator',\n weight_decay_task_classifier=1e-5,\n source_task_loss_weight=1.0,\n transferred_task_loss_weight=1.0,\n\n # Number of private layers in doubling_pose_estimator task tower\n num_private_layers=2,\n\n # The weight for the log quaternion loss we use for source and transferred\n # samples of the cropped_linemod dataset.\n # In the DSN work, 1/8 of the classifier weight worked well for our log\n # quaternion loss\n source_pose_weight=0.125 * 2.0,\n transferred_pose_weight=0.125 * 1.0,\n\n # If set to True, the style transfer network also attempts to change its\n # weights to maximize the performance of the task tower. If set to False,\n # then the style transfer network only attempts to change its weights to\n # make the transferred images more likely according to the domain\n # classifier.\n task_tower_in_g_step=True,\n task_loss_in_g_weight=1.0, # Weight of task loss in G\n\n #########################################\n # 'simple` generator arch model hparams #\n #########################################\n simple_num_conv_layers=1,\n simple_conv_filters=8,\n\n #########################\n # Resnet Hyperparameters#\n #########################\n resnet_blocks=6, # Number of resnet blocks\n resnet_filters=64, # Number of filters per conv in resnet blocks\n # If true, add original input back to result of convolutions inside the\n # resnet arch. If false, it turns into a simple stack of conv/relu/BN\n # layers.\n resnet_residuals=True,\n\n #######################################\n # The residual / interpretable model. #\n #######################################\n res_int_blocks=2, # The number of residual blocks.\n res_int_convs=2, # The number of conv calls inside each block.\n res_int_filters=64, # The number of filters used by each convolution.\n\n ####################\n # Latent variables #\n ####################\n # if true, then generate random noise and project to input for generator\n noise_channel=True,\n # The number of dimensions in the input noise vector.\n noise_dims=10,\n\n # If true, then one hot encode source image class and project as an\n # additional channel for the input to generator. This gives the generator\n # access to the class, which may help generation performance.\n condition_on_source_class=False,\n\n ########################\n # Loss Hyperparameters #\n ########################\n domain_loss_weight=1.0,\n style_transfer_loss_weight=1.0,\n\n ########################################################################\n # Encourages the transferred images to be similar to the source images #\n # using a configurable metric. #\n ########################################################################\n\n # The weight of the loss function encouraging the source and transferred\n # images to be similar. If set to 0, then the loss function is not used.\n transferred_similarity_loss_weight=0.0,\n\n # The type of loss used to encourage transferred and source image\n # similarity. Valid values include:\n # mpse: Mean Pairwise Squared Error\n # mse: Mean Squared Error\n # hinged_mse: Computes the mean squared error using squared differences\n # greater than hparams.transferred_similarity_max_diff\n # hinged_mae: Computes the mean absolute error using absolute\n # differences greater than hparams.transferred_similarity_max_diff.\n transferred_similarity_loss='mpse',\n\n # The maximum allowable difference between the source and target images.\n # This value is used, in effect, to produce a hinge loss. Note that the\n # range of values should be between 0 and 1.\n transferred_similarity_max_diff=0.4,\n\n ################################\n # Optimization Hyperparameters #\n ################################\n learning_rate=0.001,\n batch_size=32,\n lr_decay_steps=20000,\n lr_decay_rate=0.95,\n\n # Recomendation from the DCGAN paper:\n adam_beta1=0.5,\n clip_gradient_norm=5.0,\n\n # The number of times we run the discriminator train_op in a row.\n discriminator_steps=1,\n\n # The number of times we run the generator train_op in a row.\n generator_steps=1)\n\n if hparam_string:\n tf.logging.info('Parsing command line hparams: %s', hparam_string)\n hparams.parse(hparam_string)\n\n tf.logging.info('Final parsed hparams: %s', hparams.values())\n return hparams", "def default_hparams():\n return tf.contrib.training.HParams(\n batch_size=5,\n learning_rate=0.0003,\n loss_type='sse', # sum square error (only option is sse)\n nonlinearity='tanh', # tanh or sigmoid\n filters=1024,\n bias_neurons=0, # add this many 'active' bias neurons\n bias=False, # include a bias value (to be trained)\n use_batch_transformer=True, #\n bt_presentation_repeat=2, # number of times the total sequence of repeats with blanks, is repeated\n bt_sample_repeat=6, # number of repeats of each original sample (1 = identical to input)\n bt_blank_repeat=4, # number of zero samples between each original sample\n bt_amplify_factor=20, # amplify input by this amount\n bt_degrade=True, # randomly select a sample from batch, degrade and append it & non-degraded sample\n bt_degrade_repeat=6,\n bt_degrade_value=0.0, # when degrading, set pixel to this value\n bt_degrade_factor=0.5, # what proportion of bits to knockout\n bt_degrade_type='random', # options: 'random' = randomly degrade,\n # 'vertical' = degrade a random half along vertical symmetry,\n # 'horizontal' = same but horizontal symmetry\n input_sparsity=0.5,\n max_outputs=3\n )", "def build_hparams(FLAGS):\n hparams = add_model_parameters(hyperparameters.params, FLAGS)\n hparams.training = True\n if FLAGS.hparams:\n hparams.parse(FLAGS.hparams)\n if FLAGS.eval_model:\n hparams.summary_frequency = 1\n hparams.test_frequency = 1\n hparams.save_frequency = 5\n hparams.training = False\n\n hparams.sdr_frequency = hparams.test_frequency * constants.AVG_SDR_ON_N_BATCHES\n # See STFT scipy doc\n hparams.waveform_size = (hparams.ntimebins - 1) * constants.ndiff\n\n return hparams", "def default_hparams():\n return {\n \"activation_fn\": \"tensorflow.identity\",\n \"name\": \"reparameterized_stochastic_connector\"\n }", "def default_hparams():\n return {\n 'initializer': None,\n 'num_heads': 8,\n 'output_dim': 512,\n 'num_units': 512,\n 'dropout_rate': 0.1,\n 'use_bias': False,\n 'name': 'multihead_attention_rpr',\n 'is_decoder': False,\n 'relative_attention_num_buckets': 32\n }", "def default_optimization_hparams() -> Dict[str, Any]:\n return {\n \"optimizer\": {\n \"type\": \"Adam\",\n \"kwargs\": {\n \"lr\": 0.001\n }\n },\n \"learning_rate_decay\": {\n \"type\": \"\",\n \"kwargs\": {}\n },\n \"gradient_clip\": {\n \"type\": \"\",\n \"kwargs\": {}\n },\n \"gradient_noise_scale\": None,\n # TODO(zhiting): allow module-level control of gradient_multipliers\n \"name\": None\n }", "def overwrite_hyperparams(self):\n try:\n default_hyperparams = self.hyperparams\n for key in default_hyperparams:\n try:\n flag = self.FLAGS[key]\n param_value = flag.value\n if param_value is not None:\n self.hyperparams[key] = param_value\n except:\n pass\n except:\n pass", "def _default_params(self) -> Dict[str, Any]:\n normal_params = {\n \"temperature\": self.temperature,\n \"max_tokens\": self.max_tokens,\n \"top_p\": self.top_p,\n \"frequency_penalty\": self.frequency_penalty,\n \"presence_penalty\": self.presence_penalty,\n \"n\": self.n,\n # \"best_of\": self.best_of,\n \"request_timeout\": self.request_timeout,\n \"logit_bias\": self.logit_bias,\n }\n return {**normal_params, **self.model_kwargs}", "def __init__(self, defaults={}, data=None):\n\n super().__init__(\n defaults={**OptimizationParameters.parameters, **defaults}, data=data\n )", "def _init_hyperparam(self, **p_par):\r\n \r\n try:\r\n p_input_size = self._input_space.get_num_dim()\r\n p_output_size = self._output_space.get_num_dim()\r\n except:\r\n raise ParamError('Input size and/or output size of the network are not defined.')\r\n \r\n if 'p_update_rate' not in p_par:\r\n p_par['p_update_rate'] = 1\r\n elif p_par.get('p_update_rate') < 1:\r\n raise ParamError(\"p_update_rate must be equal or higher than 1.\")\r\n \r\n if 'p_num_hidden_layers' not in p_par:\r\n raise ParamError(\"p_num_hidden_layers is not defined.\")\r\n \r\n if 'p_output_activation_fct' not in p_par:\r\n p_par['p_output_activation_fct'] = None\r\n \r\n if 'p_optimizer' not in p_par:\r\n raise ParamError(\"p_optimizer is not defined.\")\r\n \r\n if 'p_loss_fct' not in p_par:\r\n raise ParamError(\"p_loss_fct is not defined.\")\r\n\r\n if 'p_test_data' not in p_par:\r\n p_par['p_test_data'] = 0.3\r\n\r\n if 'p_batch_size' not in p_par:\r\n p_par['p_batch_size'] = 100\r\n\r\n if 'p_seed_buffer' not in p_par:\r\n p_par['p_seed_buffer'] = 1\r\n\r\n if 'p_learning_rate' not in p_par:\r\n p_par['p_learning_rate'] = 3e-4\r\n \r\n if 'p_hidden_size' not in p_par:\r\n raise ParamError(\"p_hidden_size is not defined.\")\r\n try:\r\n if len(p_par['p_hidden_size']) != p_par['p_num_hidden_layers']:\r\n raise ParamError(\"length of p_hidden_size list must be equal to p_num_hidden_layers or an integer.\")\r\n except:\r\n p_par['p_hidden_size'] = [int(p_par['p_hidden_size'])] * int(p_par['p_num_hidden_layers'])\r\n \r\n if 'p_activation_fct' not in p_par:\r\n raise ParamError(\"p_activation_fct is not defined.\")\r\n try:\r\n if len(p_par['p_activation_fct']) != p_par['p_num_hidden_layers']:\r\n raise ParamError(\"length of p_activation_fct list must be equal to p_num_hidden_layers or a single activation function.\")\r\n except:\r\n if isinstance(p_par['p_activation_fct'], list):\r\n raise ParamError(\"length of p_activation_fct list must be equal to p_num_hidden_layers or a single activation function.\")\r\n else:\r\n p_par['p_activation_fct'] = [p_par['p_activation_fct']] * int(p_par['p_num_hidden_layers'])\r\n \r\n if 'p_weight_bias_init' not in p_par:\r\n p_par['p_weight_bias_init'] = True\r\n \r\n if p_par['p_weight_bias_init']:\r\n if 'p_weight_init' not in p_par:\r\n p_par['p_weight_init'] = torch.nn.init.orthogonal_\r\n \r\n if 'p_bias_init' not in p_par:\r\n p_par['p_bias_init'] = lambda x: torch.nn.init.constant_(x, 0)\r\n \r\n if 'p_gain_init' not in p_par:\r\n p_par['p_gain_init'] = np.sqrt(2)\r\n \r\n self._hyperparam_space.add_dim(HyperParam('p_input_size','Z'))\r\n self._hyperparam_space.add_dim(HyperParam('p_output_size','Z'))\r\n self._hyperparam_space.add_dim(HyperParam('p_update_rate','Z'))\r\n self._hyperparam_space.add_dim(HyperParam('p_num_hidden_layers','Z'))\r\n self._hyperparam_space.add_dim(HyperParam('p_hidden_size','Z'))\r\n self._hyperparam_space.add_dim(HyperParam('p_activation_fct'))\r\n self._hyperparam_space.add_dim(HyperParam('p_output_activation_fct'))\r\n self._hyperparam_space.add_dim(HyperParam('p_optimizer'))\r\n self._hyperparam_space.add_dim(HyperParam('p_loss_fct'))\r\n self._hyperparam_space.add_dim(HyperParam('p_test_data'))\r\n self._hyperparam_space.add_dim(HyperParam('p_batch_size'))\r\n self._hyperparam_space.add_dim(HyperParam('p_seed_buffer'))\r\n self._hyperparam_space.add_dim(HyperParam('p_learning_rate'))\r\n self._hyperparam_space.add_dim(HyperParam('p_weight_bias_init'))\r\n self._hyperparam_space.add_dim(HyperParam('p_weight_init'))\r\n self._hyperparam_space.add_dim(HyperParam('p_bias_init'))\r\n self._hyperparam_space.add_dim(HyperParam('p_gain_init'))\r\n self._hyperparam_tuple = HyperParamTuple(self._hyperparam_space)\r\n \r\n ids_ = self.get_hyperparam().get_dim_ids()\r\n self.get_hyperparam().set_value(ids_[0], p_input_size)\r\n self.get_hyperparam().set_value(ids_[1], p_output_size)\r\n self.get_hyperparam().set_value(ids_[2], p_par['p_update_rate'])\r\n self.get_hyperparam().set_value(ids_[3], p_par['p_num_hidden_layers'])\r\n self.get_hyperparam().set_value(ids_[4], p_par['p_hidden_size'])\r\n self.get_hyperparam().set_value(ids_[5], p_par['p_activation_fct'])\r\n self.get_hyperparam().set_value(ids_[6], p_par['p_output_activation_fct'])\r\n self.get_hyperparam().set_value(ids_[7], p_par['p_optimizer'])\r\n self.get_hyperparam().set_value(ids_[8], p_par['p_loss_fct'])\r\n self.get_hyperparam().set_value(ids_[9], p_par['p_test_data'])\r\n self.get_hyperparam().set_value(ids_[10], p_par['p_batch_size'])\r\n self.get_hyperparam().set_value(ids_[11], p_par['p_seed_buffer'])\r\n self.get_hyperparam().set_value(ids_[12], p_par['p_learning_rate'])\r\n self.get_hyperparam().set_value(ids_[13], p_par['p_weight_bias_init'])\r\n self.get_hyperparam().set_value(ids_[14], p_par['p_weight_init'])\r\n self.get_hyperparam().set_value(ids_[15], p_par['p_bias_init'])\r\n self.get_hyperparam().set_value(ids_[16], p_par['p_gain_init'])", "def __init__(self, **kwargs):\n # Register the hyperparameters and their type in _hparam_types.\n # _hparam_types maps the parameter name to a tuple (type, bool).\n # The type value is the type of the parameter for scalar hyperparameters,\n # or the type of the list elements for multidimensional hyperparameters.\n # The bool value is True if the value is a list, False otherwise.\n self._hparam_types = {}\n for name, value in six.iteritems(kwargs):\n self.add_hparam(name, value)", "def __init__(self, input_size, hidden_size, output_size, weight_init_std=0.01):\n\n self.params = {}\n self.params['W1'] = weight_init_std * \\\n np.random.randn(input_size, hidden_size)\n self.params['b1'] = np.zeros(hidden_size)\n self.params['W2'] = weight_init_std * \\\n np.random.randn(hidden_size, output_size)\n self.params['b2'] = np.zeros(output_size)", "def default_hparams():\n return {\n \"activation_fn\": \"tensorflow.identity\",\n \"name\": \"stochastic_connector\"\n }", "def add_default_params(self, params):\n params['key'] = self.key\n params['format'] = self.format\n #params['unique_id'] = generate_unique_id()\n return params", "def default_opts():\n return tf.contrib.training.HParams(\n num_repeats=1,\n superclass=False,\n class_proportion=1.0,\n invert_images=False,\n min_val=0, # set any 0 in the input image, to this new min_val. ---> if >0, then don't do anything\n train_classes=['5', '6', '7', '8', '9'],\n test_classes=['5', '6', '7', '8', '9'],\n degrade_type='vertical', # vertical, horizontal or random: the model completes image degraded by this method\n degrade_step='hidden', # 'test' (apply at gen of test set), or 'input', 'hidden', 'none' (applied in graph)\n completion_gain=1.0,\n train_recurse=False,\n test_recurse=False,\n recurse_iterations=5, # if >1, then PC is recursive (only supported for Hopfield i.e. no recursion on training)\n rsummary_batches=2,\n input_mode={\n \"train_first\": \"complete\",\n \"train_inference\": \"complete\",\n \"test_first\": \"complete\",\n \"test_inference\": \"complete\"\n },\n evaluate=True,\n train=True,\n visualise_vc=False,\n visualise_dg_at_vc=False,\n visualise_pc_at_dg=False,\n visualise_pc_at_vc=False,\n evaluate_mode='simple' # simple = calc compl. of pc use pattern_completion_workflow,\n # expA_isolate_view = test completion and visualise at each stage\n # expA_isolate = test completion and range of tests to isolate performance of components\n )", "def default_hparams():\n return {\n \"activation_fn\": \"identity\",\n \"name\": \"mlp_connector\"\n }", "def create_hparams(experiment):\n hparams = {}\n\n # General parameters.\n hparams['batch_size'] = 64\n hparams['eval_batch_size'] = 64\n hparams['learning_rate_warmup_steps'] = 2000\n hparams['learning_rate_constant'] = 1\n hparams['learning_rate'] = 0.001\n hparams['train_epoches'] = 200\n hparams['steps_per_epoch'] = 30\n hparams['train_steps'] = 1000 * 1000\n hparams['eval_steps'] = 100\n hparams['caption_optimizer'] = 't2t'\n hparams['clip_norm'] = 5.0\n hparams['train_files'] = ''\n hparams['eval_files'] = ''\n hparams['train_buffer_size'] = 2000\n hparams['eval_buffer_size'] = 500\n hparams['train_pixel_encoder'] = True\n hparams['debug'] = False\n hparams['distribution_strategy'] = 'mirrored'\n\n # Embedding parameters.\n hparams['embedding_file'] = ''\n hparams['word_vocab_path'] = ''\n hparams['glove_trainable'] = True\n hparams['vocab_size'] = 10000\n\n # View hierarchy encoder parameters.\n hparams['max_pixel_pos'] = 100\n hparams['max_dom_pos'] = 500\n hparams['screen_encoder'] = 'pixel_transformer'\n hparams['screen_embedding_feature'] = ['text', 'type', 'pos', 'click', 'dom']\n hparams['obj_text_aggregation'] = 'max'\n hparams['synthetic_screen_noise'] = 0.\n\n # General parameters.\n hparams['num_hidden_layers'] = 2\n hparams['hidden_size'] = 2\n hparams['filter_size'] = 2\n hparams['num_heads'] = 2\n hparams['dropout'] = 0.2\n hparams['layer_prepostprocess_dropout'] = 0.2\n hparams['attention_dropout'] = 0.2\n hparams['relu_dropout'] = 0.2\n\n transformer_hparams = model_params.BASE_PARAMS\n\n # Add parameters from transformer model.\n hparams.update(transformer_hparams)\n\n # Rewrite all the parameters from command-line flags.\n config = screen2words_experiment_config.experiments[experiment]\n hparams.update(config)\n\n return hparams", "def _add_parameter_default(self, msg_param):\n default_types = msg_param.default_types\n while default_types: # iterate over each bit\n def_type = default_types & (~default_types+1)\n default_types ^= def_type\n def_type -= 1\n if def_type not in self._default_parameters:\n self._default_parameters[def_type] = {}\n self._default_parameters[def_type][msg_param.key] = msg_param.value", "def default_hparams():\n params = {\n \"labels_index_map_store_path\": \"/tmp/shabda/\"\n }\n return params", "def parameters_default(cls):\n return cls._Parameters.__new__.__defaults__", "def create_object_parameter_from_default(obj, default):\n values = []\n if default.enum:\n for v in DefaultParameterVl.objects.filter(parameter=default).all():\n values.append({'value' : v.value,\n 'caption' : v.caption})\n return create_object_parameter(obj, 'user', False,\n tp = default.tp,\n name=default.name,\n descr=default.descr,\n values=values)", "def set_default_parameters(self):\n super().set_default_parameters()", "def create_hparams(hparams_string=None, verbose=False):\n\n hparams = tf.contrib.training.HParams(\n ################################\n # Experiment Parameters #\n ################################\n epochs=1000,\n iters_per_checkpoint=1000,\n iters_per_validation=1000,\n seed=1234,\n dynamic_loss_scaling=True,\n fp16_run=False,\n distributed_run=False,\n dist_backend=\"nccl\",\n dist_url=\"tcp://127.0.0.1:54321\",\n cudnn_enabled=True,\n cudnn_benchmark=False,\n ignore_layers=[\"none-N/A\"],\n frozen_modules=[\"none-N/A\"], # only the module names are required e.g: \"encoder.\" will freeze all parameters INSIDE the encoder recursively\n print_layer_names_during_startup=True,\n \n ################################\n # Data Parameters #\n ################################\n check_files=1, # check all files exist, aren't corrupted, have text, good length, and other stuff before training.\n # This can take a little as it has to simulate an entire EPOCH of dataloading.\n speakerlist='/media/cookie/Samsung 860 QVO/ClipperDatasetV2/filelists/speaker_ids.txt', # lets the checkpoints include speaker names.\n dict_path='../../dict/merged.dict.txt',\n use_saved_speakers=True,# use the speaker lookups saved inside the model instead of generating again\n numeric_speaker_ids=False, # sort speaker_ids in filelist numerically, rather than alphabetically.\n # e.g:\n # [10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] -> [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n # instead of,\n # [10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] -> [0, 1, 10, 2, 3, 4, 5, 6, 7, 8, 9]\n # Mellotron repo has this off by default, but ON makes the most logical sense to me.\n raw_speaker_ids=False, # use the speaker IDs found in filelists for the internal IDs. Values greater than n_speakers will crash (as intended).\n # This will disable sorting the ids\n training_files=\"/media/cookie/Samsung PM961/TwiBot/CookiePPPTTS/CookieTTS/_2_ttm/tacotron2/EncDurFilelist/map_train.txt\",\n validation_files=\"/media/cookie/Samsung PM961/TwiBot/CookiePPPTTS/CookieTTS/_2_ttm/tacotron2/EncDurFilelist/map_val.txt\",\n text_cleaners=['basic_cleaners'],\n \n ################################\n # Audio Parameters #\n ################################\n max_wav_value=32768.0,\n sampling_rate=48000,\n filter_length=2400,\n hop_length=600,\n win_length=2400,\n n_mel_channels=160,\n mel_fmin=0.0,\n mel_fmax=16000.0,\n \n ################################\n # Model Parameters #\n ################################\n n_symbols=len(symbols),\n symbols_embedding_dim=512,\n \n # (Encoder) Encoder parameters\n encoder_speaker_embed_dim=64, # speaker_embedding before encoder\n encoder_concat_speaker_embed='before_conv', # concat before encoder convs, or just before the LSTM inside decode. Options 'before_conv','before_lstm'\n encoder_kernel_size=5,\n encoder_n_convolutions=3,\n encoder_conv_hidden_dim=512,\n encoder_LSTM_dim=768,\n \n # (SylpsNet) Predicts speaking speed\n sylpsnet_layer_dims = [32, 32],# width of each layer, LeakyReLU() is used between hiddens\n \n # (EmotionNet) Semi-supervised VAE/Classifier\n emotion_classes = ['neutral','anxious','happy','annoyed','sad','confused','smug','angry','whispering','shouting','sarcastic','amused','surprised','singing','fear','serious'],\n emotionnet_latent_dim=32,# unsupervised Latent Dim\n emotionnet_encoder_outputs_dropout=0.0,# Encoder Outputs Dropout\n emotionnet_RNN_dim=128, # GRU dim to summarise Encoder Outputs\n emotionnet_classifier_layer_dropout=0.25, # Dropout ref, speaker and summarised Encoder outputs.\n # Which are used to predict zs and zu\n \n # (EmotionNet) Reference encoder\n emotionnet_ref_enc_convs=[32, 32, 64, 64, 128, 128],\n emotionnet_ref_enc_rnn_dim=64, # GRU dim to summarise RefSpec Conv Outputs\n emotionnet_ref_enc_use_bias=False,\n emotionnet_ref_enc_droprate=0.3, # Dropout for Reference Spectrogram Encoder Conv Layers\n \n # (AuxEmotionNet)\n auxemotionnet_layer_dims=[256,],# width of each layer, LeakyReLU() is used between hiddens\n # input is TorchMoji hidden, outputs to classifier layer and zu param predictor\n auxemotionnet_encoder_outputs_dropout=0.0,# Encoder Outputs Dropout\n auxemotionnet_RNN_dim=128, # GRU dim to summarise Encoder outputs\n auxemotionnet_classifier_layer_dropout=0.25, # Dropout ref, speaker and summarised Encoder outputs.\n # Which are used to predict zs and zu params\n \n # (AuxEmotionNet) TorchMoji\n torchMoji_attDim=2304,# published model uses 2304\n \n # (Speaker) Speaker embedding\n n_speakers=512, # maximum number of speakers the model can support.\n speaker_embedding_dim=256, # speaker embedding size # 128 baseline\n \n # (Decoder/Encoder) Bottleneck parameters\n # The outputs from the encoder, speaker, emotionnet and sylpsnet need to be mixed.\n # By default the information is mixed by the DecoderRNN, but this is repeated every spectrogram frame so likely wastes a massive amount of compute performing the same operations repeatedly.\n # Thus, this memory bottleneck can be used to mix the above mentioned outputs into a more compressed representation before decoding, allowing the DecoderRNN to be made smaller and more effective.\n use_memory_bottleneck=True,# False baseline\n memory_bottleneck_dim=512,# new memory size. 512 would be equivalent to the original Tacotron2.\n memory_bottleneck_bias=False,\n \n # (Duration Predictor) parameters\n len_pred_filter_size=512,\n len_pred_kernel_size=3,\n len_pred_dropout=0.2,\n len_pred_n_layers=3,\n \n # (Decoder) parameters\n z_dim = 128,\n gblock_kernel_size = 3,\n in_channels = 512,\n decoder_dims = [768, 768, 768, 384, 384, 384, 256, 192, 192],\n decoder_scales = [1 , 1 , 1 , 2 , 2 , 2 , 3 , 5 , 5 ],# upsample from 12.5ms hop_length features\n # 80, 80, 80, 160, 320, 640,1920,9600,48000 Hz\n dilations = [1,2,4,8], # dilations of each layer in each block.\n \n # (Destriminator(s)) parameters\n d_dilations = [1, 2],\n descriminator_base_window = 600, # scaled by in_channels for each descriminator.\n descriminator_configs = [\n # Using Conditional Features\n [\n 1,# in_channels\n [128, 128, 128, 256, 256, 384, 512, 512, 512], # dims\n [ 0, 0, 0, 0, 0, 0, 1, 0, 0], # use_cond\n [ 5, 5, 3, 2, 2, 2, 1, 1, 1], # scales\n ], [\n 2,# in_channels\n [128, 128, 128, 256, 256, 512, 512, 512], # dims\n [ 0, 0, 0, 0, 0, 1, 0, 0], # use_cond\n [ 5, 5, 3, 2, 2, 1, 1, 1], # scales\n ], [\n 4,# in_channels\n [128, 128, 128, 256, 512, 512, 512], # dims\n [ 0, 0, 0, 0, 1, 0, 0], # use_cond\n [ 5, 5, 3, 2, 1, 1, 1], # scales\n ], [\n 8,# in_channels\n [128, 128, 256, 512, 512, 512], # dims\n [ 0, 0, 0, 1, 0, 0], # use_cond\n [ 5, 5, 3, 1, 1, 1], # scales\n ], [\n 15,# in_channels\n [128, 256, 256, 384, 512, 512, 512], # dims\n [ 0, 0, 0, 0, 1, 0, 0], # use_cond\n [ 5, 2, 2, 2, 1, 1, 1], # scales\n ], [\n 30,# in_channels\n [128, 256, 384, 512, 512, 512], # dims\n [ 0, 0, 0, 1, 0, 0], # use_cond\n [ 5, 2, 2, 1, 1, 1], # scales\n ],\n # Without Features\n [\n 8,# in_channels\n [128, 128, 256, 512, 512, 512], # dims\n [ 0, 0, 0, 0, 0, 0], # use_cond\n [ 5, 5, 3, 1, 1, 1], # scales\n ], [\n 8,# in_channels\n [128, 128, 256, 512, 512, 512], # dims\n [ 0, 0, 0, 0, 0, 0], # use_cond\n [ 5, 5, 3, 1, 1, 1], # scales\n ], [\n 8,# in_channels\n [128, 128, 256, 512, 512, 512], # dims\n [ 0, 0, 0, 0, 0, 0], # use_cond\n [ 5, 5, 3, 1, 1, 1], # scales\n ], [\n 150,# in_channels\n [256, 384, 512, 512, 512], # dims\n [ 0, 0, 0, 0, 0], # use_cond\n [ 2, 2, 1, 1, 1], # scales\n ],\n ],\n ################################\n # Optimization Hyperparameters #\n ################################\n weight_decay=1e-6,\n batch_size=4, # controls num of files processed in parallel per GPU\n val_batch_size=4, # for more precise comparisons between models, constant batch_size is useful\n segment_length=96000,\n ################################\n # Loss Weights/Scalars #\n ################################\n duration_predictor_weight = 1.0,\n )\n\n if hparams_string:\n tf.compat.v1.logging.info('Parsing command line hparams: %s', hparams_string)\n hparams.parse(hparams_string)\n\n if verbose:\n tf.compat.v1.logging.info('Final parsed hparams: %s', hparams.values())\n\n return hparams", "def _default_params(self) -> dict[str, Any]:\n return {\n \"max_tokens\": self.max_tokens,\n \"temperature\": self.temperature,\n \"top_p\": self.top_p,\n \"logprobs\": self.logprobs,\n \"echo\": self.echo,\n \"stop_sequences\": self.stop_sequences,\n \"repeat_penalty\": self.repeat_penalty,\n \"top_k\": self.top_k,\n \"n_threads\": self.n_threads,\n \"n_ctx\": self.n_ctx,\n \"n_gpu_layers\": self.n_gpu_layers,\n \"n_gqa\": self.n_gqa if self.n_gqa else None,\n \"n_parts\": self.n_parts,\n \"seed\": self.seed,\n \"f16_kv\": self.f16_kv,\n \"logits_all\": self.logits_all,\n \"vocab_only\": self.vocab_only,\n \"use_mlock\": self.use_mlock,\n \"n_batch\": self.n_batch,\n \"last_n_tokens_size\": self.last_n_tokens_size,\n \"streaming\": self.streaming,\n }", "def default_hparams():\n return {\n \"name\": \"forward_connector\"\n }", "def get_hyper_params():\n #################################\n ##### INSERT YOUR CODE HERE #####\n layers_size = [4096, 4096, 10]\n activation = 'relu'\n lr = 1e-7\n epochs = 30\n dropout_rate = 0.2\n init_kind = 'xavier'\n ##### END YOUR CODE HERE ########\n #################################\n hyper_params = {\n 'layers_size': layers_size,\n 'activation': activation,\n 'lr': lr,\n 'epochs': epochs,\n 'init_kind': init_kind,\n 'dropout_rate': dropout_rate,\n }\n return hyper_params", "def get_hyper_params():\n #################################\n ##### INSERT YOUR CODE HERE #####\n layers_size = [4096, 4096, 10]\n activation = 'relu'\n lr = 5e-4\n epochs = 30\n dropout_rate = 0.2\n init_kind = 'xavier'\n ##### END YOUR CODE HERE ########\n #################################\n hyper_params = {\n 'layers_size': layers_size,\n 'activation': activation,\n 'lr': lr,\n 'epochs': epochs,\n 'init_kind': init_kind,\n 'dropout_rate': dropout_rate,\n }\n return hyper_params", "def get_hyper_params():\n #################################\n ##### INSERT YOUR CODE HERE #####\n layers_size = [4096, 4096, 10]\n activation = 'relu'\n lr = 5e-4\n epochs = 30\n dropout_rate = 0.2\n init_kind = 'xavier'\n ##### END YOUR CODE HERE ########\n #################################\n hyper_params = {\n 'layers_size': layers_size,\n 'activation': activation,\n 'lr': lr,\n 'epochs': epochs,\n 'init_kind': init_kind,\n 'dropout_rate': dropout_rate,\n }\n return hyper_params", "def get_hyper_params():\n #################################\n ##### INSERT YOUR CODE HERE #####\n layers_size = [4096, 4096, 10]\n activation = 'relu'\n lr = 0.3\n epochs = 30\n dropout_rate = 0.2\n init_kind = 'xavier'\n ##### END YOUR CODE HERE ########\n #################################\n hyper_params = {\n 'layers_size': layers_size,\n 'activation': activation,\n 'lr': lr,\n 'epochs': epochs,\n 'init_kind': init_kind,\n 'dropout_rate': dropout_rate,\n }\n return hyper_params", "def __init__(self, parameter_dictionary):\n super().__init__(parameter_dictionary)\n\n self.model_string = \"gauss\"\n model_dictionary = self._get_model_dict(__class__.default_parameters)\n\n # wake expansion parameters\n self.ka = model_dictionary[\"ka\"]\n self.kb = model_dictionary[\"kb\"]\n\n # near wake / far wake boundary parameters\n self.alpha = model_dictionary[\"alpha\"]\n self.beta = model_dictionary[\"beta\"]\n\n # GCH Parameters\n self.calculate_VW_velocities = model_dictionary[\"calculate_VW_velocities\"]\n self.use_yaw_added_recovery = model_dictionary[\"use_yaw_added_recovery\"]\n self.eps_gain = model_dictionary[\"eps_gain\"]", "def get_hyper_params():\n #################################\n ##### INSERT YOUR CODE HERE #####\n layers_size = [10]\n activation = 'relu'\n lr = 5e-4\n epochs = 30\n dropout_rate = 0.2\n init_kind = 'xavier'\n ##### END YOUR CODE HERE ########\n #################################\n hyper_params = {\n 'layers_size': layers_size,\n 'activation': activation,\n 'lr': lr,\n 'epochs': epochs,\n 'init_kind': init_kind,\n 'dropout_rate': dropout_rate,\n }\n return hyper_params", "def Params(cls):\n p = hyperparams.InstantiableParams(cls)\n p.Define('task', None, 'Underlying task')\n p.Define('logdir', None, 'Log directory')\n p.Define('num_splits_per_client', None, '')\n p.Define('steps_per_loop', None, 'Number of steps to run.')\n p.Define('dataset_name', None,\n 'Dataset the program is operating on, eg: \"Test\"')\n p.Define('name', 'base_program', 'Program name.')\n p.Define('task_name', None,\n 'If multi-task, what the high-level task name is')\n p.Define('num_threads', 1, 'Number of threads in multiprocessing pool.')\n p.Define('spmd', False, 'Whether program is running under SPMD mode.')\n p.Define('write_train_input_stats', False,\n 'Whether to write input data stats during training.')\n p.Define('max_metrics', 256, 'Overrides TpuEvalMetrics.max_metrics')\n p.Define('ml_perf', None, 'MLPerf config')\n return p", "def _default_parameters():\n\n return {\n 'opt': 'adadelta',\n 'activation_function': 'softmax',\n 'lr': 0.0001,\n 'decay': 1e-6,\n 'loss': 'categorical_crossentropy',\n 'batch_size': 32,\n 'nb_epoch': 20,\n 'shuffle': True,\n 'momentum': 0.9,\n 'nesterov': True,\n 'rho': 0.95,\n 'epsilon': 1e-08,\n 'beta_1': 0.9,\n 'beta_2': 0.999,\n 'horizontal_flip': False,\n 'im_size': 240,#256,\n 'dense_layer': 1024,\n 'nb_classes': 10,\n 'nb_channels': 3,\n 'dropout': 0.5,\n 'metrics': ['accuracy'],\n 'volume': None,\n 'input_size': 25,\n 'temporal': False,\n 'input_dim': 512,\n 'nb_frames': 60,\n 'stride': 16,\n 'nb_hidden':512,\n 'lstm': False\n\n }", "def _build_param_dict(self):\n self._build_common_param_dict()\n\n self._param_dict.add(Parameter.NUM_AVG_SAMPLES,\n r'ScansToAverage>([\\d]+)</ScansToAverage>',\n lambda match: int(match.group(1)),\n str,\n type=ParameterDictType.INT,\n display_name=\"Scans to Average\",\n description=\"Number of samples to average (must be even)\",\n range=INT16,\n startup_param=True,\n direct_access=False,\n default_value=4,\n visibility=ParameterDictVisibility.READ_WRITE)\n self._param_dict.add(Parameter.MIN_COND_FREQ,\n r'MinimumCondFreq>([\\d]+)</MinimumCondFreq',\n lambda match: int(match.group(1)),\n str,\n type=ParameterDictType.INT,\n display_name=\"Minimum Conductivity Frequency\",\n range=INT16,\n description=\"Minimum conductivity frequency to enable pump turn-on.\",\n startup_param=True,\n direct_access=False,\n default_value=500,\n units=Units.HERTZ,\n visibility=ParameterDictVisibility.IMMUTABLE)\n self._param_dict.add(Parameter.PUMP_DELAY,\n r'PumpDelay>([\\d]+)</PumpDelay',\n lambda match: int(match.group(1)),\n str,\n type=ParameterDictType.INT,\n display_name=\"Pump Delay\",\n range=INT16,\n description=\"Time to wait after minimum conductivity frequency is reached before turning pump on.\",\n startup_param=True,\n direct_access=False,\n default_value=60,\n units=Units.SECOND,\n visibility=ParameterDictVisibility.READ_WRITE)\n self._param_dict.add(Parameter.AUTO_RUN,\n r'AutoRun>(.*)</AutoRun',\n lambda match: True if match.group(1) == 'yes' else False,\n self._true_false_to_string,\n type=ParameterDictType.BOOL,\n display_name=\"Auto Run\",\n description=\"Enable automatic logging when power is applied: (true | false).\",\n range={'True': True, 'False': False},\n startup_param=True,\n direct_access=True,\n default_value=False,\n visibility=ParameterDictVisibility.IMMUTABLE)\n self._param_dict.add(Parameter.IGNORE_SWITCH,\n r'IgnoreSwitch>(.*)</IgnoreSwitch',\n lambda match: True if match.group(1) == 'yes' else False,\n self._true_false_to_string,\n type=ParameterDictType.BOOL,\n display_name=\"Ignore Switch\",\n description=\"Disable magnetic switch position for starting or stopping logging: (true | false)\",\n range={'True': True, 'False': False},\n startup_param=True,\n direct_access=True,\n default_value=True,\n visibility=ParameterDictVisibility.IMMUTABLE)\n self._param_dict.add(Parameter.OPTODE,\n r'OPTODE>(.*)</OPTODE',\n lambda match: True if match.group(1) == 'yes' else False,\n self._true_false_to_string,\n type=ParameterDictType.BOOL,\n display_name=\"Optode Attached\",\n description=\"Enable optode: (true | false)\",\n range={'True': True, 'False': False},\n startup_param=True,\n direct_access=True,\n default_value=True,\n visibility=ParameterDictVisibility.IMMUTABLE)\n self._param_dict.add(Parameter.VOLT1,\n r'ExtVolt1>(.*)</ExtVolt1',\n lambda match: True if match.group(1) == 'yes' else False,\n self._true_false_to_string,\n type=ParameterDictType.BOOL,\n display_name=\"Volt 1\",\n description=\"Enable external voltage 1: (true | false)\",\n range={'True': True, 'False': False},\n startup_param=True,\n direct_access=True,\n default_value=True,\n visibility=ParameterDictVisibility.IMMUTABLE)\n\n self._build_ctd_specific_params()", "def default_feature_hp_kernel_config(defn):\n defn = _validate_definition(defn)\n\n # hyperparams\n hparams = {}\n for i, hp in enumerate(defn.hyperpriors()):\n if not hp:\n continue\n # XXX(stephentu): we are arbitrarily picking w=0.1\n hparams[i] = {k: (fn, 0.1) for k, fn in hp.iteritems()}\n\n if not hparams:\n return []\n else:\n return [('slice_feature_hp', {'hparams': hparams})]", "def __init__(self,default=(0,0),length=None,**params):\n if length is None:\n self.length = len(default)\n else:\n self.length = length\n \n self._check(default)\n Parameter.__init__(self,default=default,**params)", "def __init__(self, input_size, hidden_size, output_size, std=1e-4):\n self.params = {}\n self.params['W1'] = std * np.random.randn(input_size, hidden_size)\n self.params['b1'] = np.zeros(hidden_size)\n self.params['W2'] = std * np.random.randn(hidden_size, output_size)\n self.params['b2'] = np.zeros(output_size)", "def Params(cls):\n p = hyperparams.InstantiableParams(cls)\n\n p.Define('task_dict', None, 'dataset_name -> task params')\n p.Define('task_name', None, 'High level task name')\n p.Define('logdir', None, 'Log directory')\n p.Define('train_program', None, 'Train program params')\n p.Define('train_executions_per_eval', 1, '')\n p.Define('dataset_names', [], 'List of all dataset names.')\n p.Define('num_splits_per_client', None, '')\n\n p.Define('ml_perf', hyperparams.Params(), 'MlPerf configuration.')\n\n mlp = p.ml_perf\n mlp.Define('benchmark_name', None, 'Benchmark name for compliance log.')\n mlp.Define('decoder_metric_name', None,\n 'Name of the decoder metric to report for compliance log.')\n mlp.Define('decoder_metric_success_threshold', None,\n 'Benchmark run must exceed this value to succeed.')\n mlp.Define('max_steps_to_train', None,\n 'Maximum number of steps to reach target accuracy')\n mlp.Define('steps_per_epoch', None, 'Number of training steps per epoch.')\n mlp.Define('global_batch_size', None, 'Global batch size.')\n mlp.Define('max_sequence_length', None, 'Maximum sequence length.')\n mlp.Define('optimizer_name', None, 'Optimizer used.')\n mlp.Define('base_learning_rate', None, 'Base learning rate.')\n mlp.Define('warmup_steps', None, 'Number of warm-up steps.')\n\n return p", "def model_config(**overrides):\n config = base_model_config()\n _override(config, overrides)\n return tf.contrib.training.HParams(**config)", "def default_parameters(name):\n prm = Parameters(name)\n\n prm.add('venous_compliance', float())\n prm.add('arterial_compliance', float())\n\n prm.add('venous_resistance', float())\n prm.add('arterial_resistance', float())\n prm.add('peripheral_resistance', float())\n\n prm.add('venous_resting_volume', float())\n prm.add('arterial_resting_volume', float())\n\n return prm", "def default_params():\n params = {}\n params['dataset'] = 'adult'\n params['engines'] = ['MD','RDA']\n params['iters'] = 10000\n params['epsilon'] = 1.0\n params['delta'] = 0.0\n params['bounded'] = True\n params['frequency'] = 1\n params['seed'] = 0\n params['save'] = None\n params['load'] = None\n params['plot'] = None\n\n return params", "def _build_param_dict(self):\n # Add parameter handlers to parameter dict.\n self._param_dict = ProtocolParameterDict()\n \n self._param_dict.add(Parameter.CYCLE_TIME,\n r'(\\d+)\\s+= Cycle Time \\(.*\\)\\r\\n(0|1)\\s+= Minutes or Seconds Cycle Time',\n lambda match : self._to_seconds(int(match.group(1)),\n int(match.group(2))),\n self._int_to_string,\n visibility=ParameterDictVisibility.READ_WRITE,\n startup_param=True,\n direct_access=False,\n default_value=20,\n menu_path_read=SubMenu.SHOW_PARAM,\n submenu_read=[],\n menu_path_write=SubMenu.CHANGE_PARAM,\n submenu_write=[[\"1\", Prompt.CYCLE_TIME_PROMPT]])\n \n self._param_dict.add(Parameter.VERBOSE,\n r'', # Write-only, so does it really matter?\n lambda match : None,\n self._int_to_string,\n visibility=ParameterDictVisibility.READ_ONLY,\n startup_param=True,\n direct_access=True,\n init_value=1,\n menu_path_write=SubMenu.CHANGE_PARAM,\n submenu_write=[[\"2\", Prompt.VERBOSE_PROMPT]])\n \n self._param_dict.add(Parameter.METADATA_POWERUP,\n r'(0|1)\\s+= Metadata Print Status on Power up',\n lambda match : int(match.group(1)),\n self._int_to_string,\n visibility=ParameterDictVisibility.READ_ONLY,\n startup_param=True,\n direct_access=True,\n init_value=0,\n menu_path_write=SubMenu.CHANGE_PARAM,\n submenu_write=[[\"3\", Prompt.METADATA_PROMPT]])\n\n self._param_dict.add(Parameter.METADATA_RESTART,\n r'(0|1)\\s+= Metadata Print Status on Restart Data Collection',\n lambda match : int(match.group(1)),\n self._int_to_string,\n visibility=ParameterDictVisibility.READ_ONLY,\n startup_param=True,\n direct_access=True,\n init_value=0,\n menu_path_write=SubMenu.CHANGE_PARAM,\n submenu_write=[[\"4\", Prompt.METADATA_PROMPT]])\n \n self._param_dict.add(Parameter.RES_SENSOR_POWER,\n r'(0|1)\\s+= Res Power Status',\n lambda match : int(match.group(1)),\n self._int_to_string,\n visibility=ParameterDictVisibility.READ_ONLY,\n startup_param=True,\n direct_access=False,\n init_value=1,\n menu_path_read=SubMenu.SHOW_PARAM,\n submenu_read=[],\n menu_path_write=SubMenu.SENSOR_POWER,\n submenu_write=[[\"1\"]])\n\n self._param_dict.add(Parameter.INST_AMP_POWER,\n r'(0|1)\\s+= Thermocouple & Hydrogen Amp Power Status',\n lambda match : int(match.group(1)),\n self._int_to_string,\n visibility=ParameterDictVisibility.READ_ONLY,\n startup_param=True,\n direct_access=False,\n init_value=1,\n menu_path_read=SubMenu.SHOW_PARAM,\n submenu_read=[],\n menu_path_write=SubMenu.SENSOR_POWER,\n submenu_write=[[\"2\"]])\n\n self._param_dict.add(Parameter.EH_ISOLATION_AMP_POWER,\n r'(0|1)\\s+= eh Amp Power Status',\n lambda match : int(match.group(1)),\n self._int_to_string,\n visibility=ParameterDictVisibility.READ_ONLY,\n startup_param=True,\n direct_access=False,\n init_value=1,\n menu_path_read=SubMenu.SHOW_PARAM,\n submenu_read=[],\n menu_path_write=SubMenu.SENSOR_POWER,\n submenu_write=[[\"3\"]])\n \n self._param_dict.add(Parameter.HYDROGEN_POWER,\n r'(0|1)\\s+= Hydrogen Sensor Power Status',\n lambda match : int(match.group(1)),\n self._int_to_string,\n visibility=ParameterDictVisibility.READ_ONLY,\n startup_param=True,\n direct_access=False,\n init_value=1,\n menu_path_read=SubMenu.SHOW_PARAM,\n submenu_read=[],\n menu_path_write=SubMenu.SENSOR_POWER,\n submenu_write=[[\"4\"]])\n \n self._param_dict.add(Parameter.REFERENCE_TEMP_POWER,\n r'(0|1)\\s+= Reference Temperature Power Status',\n lambda match : int(match.group(1)),\n self._int_to_string,\n visibility=ParameterDictVisibility.READ_ONLY,\n startup_param=True,\n direct_access=False,\n init_value=1,\n menu_path_read=SubMenu.SHOW_PARAM,\n submenu_read=[],\n menu_path_write=SubMenu.SENSOR_POWER,\n submenu_write=[[\"5\"]])", "def get_default_params() -> Dict:\n default_params = {\n \"n_estimators\": {\n \"default_value\": 100,\n \"description\": \"Number of gradient boosted trees. \"\n \"Equivalent to number of boosting rounds.\",\n \"type\": \"int\"\n },\n \"max_depth\": {\n \"default_value\": 6,\n \"description\": \"Maximum tree depth for base learners.\",\n \"type\": \"int\"\n },\n \"learning_rate\": {\n \"default_value\": 0.3,\n \"description\": \"Boosting learning rate (xgb's 'eta')\",\n \"type\": \"float\"\n },\n \"verbosity\": {\n \"default_value\": 1,\n \"description\": \"The degree of verbosity. Valid values are 0 (silent) - 3 (debug).\",\n \"type\": [0, 1, 2, 3]\n },\n \"booster\": {\n \"default_value\": \"gbtree\",\n \"description\": \"Specify which booster to use: gbtree, gblinear or dart.\",\n \"type\": ['gbtree', 'gblinear', 'dart']\n },\n \"tree_method\": {\n \"default_value\": \"auto\",\n \"description\":\n '''\n Specify which tree method to use. Default to auto. If this parameter\n is set to default, XGBoost will choose the most conservative option\n available. It's recommended to study this option from parameters\n document.\n ''',\n \"type\": [\"auto\", \"exact\", \"approx\", \"hist\", \"gpu_hist\"]\n },\n \"n_jobs\": {\n \"default_value\": 1,\n \"description\": '''\n Number of parallel threads used to run xgboost. When used with other Scikit-Learn\n algorithms like grid search, you may choose which algorithm to parallelize and\n balance the threads. Creating thread contention will significantly slow dowm both\n algorithms.\n ''',\n \"type\": \"int\"\n },\n \"gamma\": {\n \"default_value\": 0.0,\n \"description\": \"Minimum loss reduction required to make a further \"\n \"partition on a leaf node of the tree.\",\n \"type\": \"float\"\n },\n \"min_child_weight\": {\n \"default_value\": 1.0,\n \"description\": \"Minimum loss reduction required to make a further \"\n \"partition on a leaf node of the tree.\",\n \"type\": \"float\"\n },\n \"max_delta_step\": {\n \"default_value\": 0.0,\n \"description\": \"Maximum delta step we allow each tree's weight estimation to be.\",\n \"type\": \"float\"\n },\n \"subsample\": {\n \"default_value\": 1.0,\n \"description\": \"Subsample ratio of the training instance.\",\n \"type\": \"float\"\n },\n \"colsample_bytree\": {\n \"default_value\": 1.0,\n \"description\": \"Subsample ratio of columns when constructing each tree.\",\n \"type\": \"float\"\n },\n \"colsample_bylevel\": {\n \"default_value\": 1.0,\n \"description\": \"Subsample ratio of columns for each level.\",\n \"type\": \"float\"\n },\n \"colsample_bynode\": {\n \"default_value\": 1.0,\n \"description\": \"Subsample ratio of columns for each split.\",\n \"type\": \"float\"\n },\n \"reg_alpha\": {\n \"default_value\": 0.0,\n \"description\": \"L1 regularization term on weights\",\n \"type\": \"float\"\n },\n \"reg_lambda\": {\n \"default_value\": 0.0,\n \"description\": \"L2 regularization term on weights\",\n \"type\": \"float\"\n },\n \"scale_pos_weight\": {\n \"default_value\": 1.0,\n \"description\": \"Balancing of positive and negative weights.\",\n \"type\": \"float\"\n },\n \"random_state\": {\n \"default_value\": 0,\n \"description\": \"Random number seed.\",\n \"type\": \"int\"\n },\n \"base_score\": {\n \"default_value\": 0.5,\n \"description\": \"The initial prediction score of all instances, global bias.\",\n \"type\": \"float\"\n },\n # \"missing\": {\n # \"default_value\": None,\n # \"description\": \"Value in the data which needs to be present as a missing value.\",\n # \"type\": \"float\"\n # },\n \"num_parallel_tree\": {\n \"default_value\": 1,\n \"description\": \"Used for boosting random forest.\",\n \"type\": \"int\"\n },\n # \"monotone_constraints\": {\n # \"default_value\": \"(0,0)\",\n # \"description\": \" Constraint of variable monotonicity. \"\n # \"See tutorial for more information.\",\n # \"type\": \"str\"\n # },\n # \"interaction_constraints\": {\n # \"default_value\": None,\n # \"description\": '''\n # Constraints for interaction representing permitted interactions. The\n # constraints must be specified in the form of a nest list, e.g. [[0, 1],\n # [2, 3, 4]], where each inner list is a group of indices of features\n # that are allowed to interact with each other. See tutorial for more\n # information\n # ''',\n # \"type\": \"str\"\n # },\n \"importance_type\": {\n \"default_value\": \"gain\",\n \"description\": '''\n The feature importance type for the feature_importances. property:\n either \"gain\", \"weight\", \"cover\", \"total_gain\" or \"total_cover\".\n ''',\n \"type\": [\"gain\", \"weight\", \"cover\", \"total_gain\", \"total_cover\"]\n }\n }\n\n return default_params", "def Params(cls):\n p = hyperparams.InstantiableParams(cls)\n p.Define('task_dict', None, 'dataset_name -> task params')\n p.Define('task_name', None, 'High level task name')\n p.Define('logdir', None, 'Log directory')\n p.Define('train_program', None, 'Train program params')\n p.Define('train_executions_per_eval', 1, '')\n p.Define('eval_programs', [], 'List of eval program params.')\n p.Define('num_splits_per_client', None, '')\n p.Define('dataset_names', [], 'List of all dataset names.')\n p.Define('emails', [], 'List of emails to send metrics.')\n p.Define('summary_exporter', None, 'The summary exporter Params.')\n p.Define('async_postprocess', True,\n 'whether to CPU postprocess asynchronously with TPU train')\n p.Define(\n 'checkpoint_to_load', None,\n 'If set, the program will initially load from this checkpoint, '\n 'ignoring train_dir. Typically used for oneoff decode.')\n\n # TODO(blee): Clean these up.\n p.Define('ml_perf', hyperparams.Params(), 'MlPerf configuration.')\n mlp = p.ml_perf\n mlp.Define('submission_metadata', None,\n 'A dictionary of static submission metadata')\n mlp.Define('benchmark_name', None, 'Benchmark name for compliance log.')\n mlp.Define('steps_per_epoch', None, 'Number of training steps per epoch.')\n mlp.Define('decoder_metric_name', None,\n 'Name of the decoder metric to report for compliance log.')\n mlp.Define('decoder_metric_success_threshold', None,\n 'Benchmark run must exceed this value to succeed.')\n mlp.Define('max_steps_to_train', None,\n 'Maximum number of steps to reach target accuracy')\n return p", "def __init__(self, defaults={}, data=None):\n\n super().__init__(\n defaults={\n **EnergyParameters.parameters,\n **EnergyParameters.output,\n **defaults,\n },\n data=data,\n )", "def get_default_model_params(self):\n\n model_params = {\n 'dropout_rate': 0.3,\n 'hidden_layer_size': 160,\n 'learning_rate': 0.01,\n 'minibatch_size': 64,\n 'max_gradient_norm': 0.01,\n 'num_heads': 1,\n 'stack_size': 1\n }\n\n return model_params", "def init(self, cr):\n param_obj = self.pool.get('ir.config_parameter')\n for key, func in _default_parameters.iteritems():\n ids = param_obj.search(cr, 1, [('key', '=', key)])\n if not ids:\n param_obj.set_param(cr, 1, key, func())", "def __init__(self, defaults={}, data=None):\n\n super().__init__(\n defaults={**ThermodynamicsParameters.parameters, **defaults}, data=data\n )", "def create_hparams(hparams_string=None, verbose=False):\n\n hparams = tf.contrib.training.HParams(\n ################################\n # Experiment Parameters #\n ################################\n epochs=1000,\n iters_per_checkpoint=1000,\n iters_per_validation=1000,\n seed=1234,\n dynamic_loss_scaling=True,\n fp16_run=False,\n distributed_run=False,\n dist_backend=\"nccl\",\n dist_url=\"tcp://127.0.0.1:54321\",\n cudnn_enabled=True,\n cudnn_benchmark=False,\n #ignore_layers=[\"decoder.attention_layer.F.2.weight\", \"decoder.attention_layer.F.2.bias\",\"decoder.attention_layer.F.0.linear_layer.weight\",\"decoder.attention_layer.F.0.linear_layer.bias\"],\n ignore_layers=[\"encoder.lstm.weight_ih_l0\",\"encoder.lstm.weight_hh_l0\",\"encoder.lstm.bias_ih_l0\",\"encoder.lstm.bias_hh_l0\",\"encoder.lstm.weight_ih_l0_reverse\",\"encoder.lstm.weight_hh_l0_reverse\",\"encoder.lstm.bias_ih_l0_reverse\",\"encoder.lstm.bias_hh_l0_reverse\",\"decoder.attention_rnn.weight_ih\",\"decoder.attention_rnn.weight_hh\",\"decoder.attention_rnn.bias_ih\",\"decoder.attention_rnn.bias_hh\",\"decoder.attention_layer.query_layer.linear_layer.weight\",\"decoder.attention_layer.memory_layer.linear_layer.weight\",\"decoder.decoder_rnn.weight_ih\",\"decoder.linear_projection.linear_layer.weight\",\"decoder.gate_layer.linear_layer.weight\"],\n \n ################################\n # Data Parameters #\n ################################\n load_mel_from_disk=True,\n training_files='/media/cookie/Samsung 860 QVO/ClipperDatasetV2/filelists/mel_train_taca2_merged.txt',\n validation_files='/media/cookie/Samsung 860 QVO/ClipperDatasetV2/filelists/mel_validation_taca2_merged.txt',\n text_cleaners=['english_cleaners'],\n \n ################################\n # Audio Parameters #\n ################################\n max_wav_value=32768.0,\n sampling_rate=48000,\n filter_length=2400,\n hop_length=600,\n win_length=2400,\n n_mel_channels=160,\n mel_fmin=0.0,\n mel_fmax=16000.0,\n \n ################################\n # Model Parameters #\n ################################\n n_symbols=len(symbols),\n symbols_embedding_dim=512,\n \n # Gate\n gate_threshold=0.5,\n mask_gate_loss=False, # False = Vanilla Nvidia Tacotron2\n # masking the gate after the end of the clip will make the model never see the gate loss after the end of the clip. # TODO, explain this better # TODO, figure out why this is useful. # TODO, figure out why I added this\n # false would punish the model for trying to end the clip before it's ready, but barely punish the model for just forgetting to end the clip.\n # True will also help with badly trimmed audio.\n gate_positive_weight=10, # how much more valuable 1 positive frame is to 1 zero frame. 80 Frames per seconds, therefore values around 20 are fine.\n \n # Synthesis/Inference Related\n max_decoder_steps=3000,\n low_vram_inference=False, # doesn't save alignment and gate information, frees up some vram, especially for large input sequences.\n \n # Teacher-forcing Config\n p_teacher_forcing=1.00, # 1.00 baseline\n teacher_force_till=20, # int, number of starting frames with teacher_forcing at 100%, helps with clips that have challenging starting conditions i.e breathing before the text begins.\n val_p_teacher_forcing=0.80,\n val_teacher_force_till=20,\n \n # (Encoder) Encoder parameters\n encoder_speaker_embed_dim=256, # speaker_embedding before encoder\n encoder_concat_speaker_embed='inside', # concat before encoder convs, or just before the LSTM inside decode. Options 'before','inside'\n encoder_kernel_size=5,\n encoder_n_convolutions=3,\n encoder_embedding_dim=768, # = symbols_embedding_dim + encoder_speaker_embed_dim\n \n # (Decoder) Decoder parameters\n start_token = \"\",#\"☺\"\n stop_token = \"\",#\"␤\"\n hide_startstop_tokens=False, # remove first/last encoder output, *should* remove start and stop tokens from the decocer assuming the tokens are used.\n n_frames_per_step=1, # currently only 1 is supported\n context_frames=1, # TODO TODO TODO TODO TODO\n \n # (Decoder) Prenet\n prenet_dim=256, # 256 baseline\n prenet_layers=2, # 2 baseline\n prenet_batchnorm=False, # False baseline\n p_prenet_dropout=0.5, # 0.5 baseline\n \n # (Decoder) AttentionRNN\n attention_rnn_dim=1280, # 1024 baseline\n AttRNN_extra_decoder_input=True,# False baselinee\n AttRNN_hidden_dropout_type='zoneout',# options ('dropout','zoneout')\n p_AttRNN_hidden_dropout=0.10, # 0.1 baseline\n p_AttRNN_cell_dropout=0.00, # 0.0 baseline\n \n # (Decoder) AttentionRNN Speaker embedding\n n_speakers=512,\n speaker_embedding_dim=256, # speaker embedding size # 128 baseline\n \n # (Decoder) DecoderRNN\n decoder_rnn_dim=1024, # 1024 baseline\n extra_projection=False, # another linear between decoder_rnn and the linear projection layer (hopefully helps with high sampling rates and hopefully doesn't help decoder_rnn overfit)\n DecRNN_hidden_dropout_type='zoneout',# options ('dropout','zoneout')\n p_DecRNN_hidden_dropout=0.1, # 0.1 baseline\n p_DecRNN_cell_dropout=0.00, # 0.0 baseline\n \n # (Decoder) Attention parameters\n attention_type=0,\n # 0 -> Location-Based Attention (Vanilla Tacotron2)\n # 1 -> GMMAttention (Multiheaded Long-form Synthesis)\n attention_dim=128, # 128 Layer baseline\n \n # (Decoder) Attention Type 0 Parameters\n attention_location_n_filters=32, # 32 baseline\n attention_location_kernel_size=31, # 31 baseline\n \n # (Decoder) Attention Type 1 Parameters\n num_att_mixtures=1,# 5 baseline\n attention_layers=1,# 1 baseline\n delta_offset=0, # 0 baseline, values around 0.005 will push the model forwards. Since we're using the sigmoid function caution is suggested.\n delta_min_limit=0, # 0 baseline, values around 0.010 will force the model to move forward, in this example, the model cannot spend more than 100 steps on the same encoder output.\n lin_bias=False, # I need to figure out what that layer is called.\n initial_gain='relu', # initial weight distribution 'tanh','relu','sigmoid','linear'\n normalize_attention_input=True, # False baseline\n normalize_AttRNN_output=False, # True baseline\n \n # (Postnet) Mel-post processing network parameters\n postnet_embedding_dim=512,\n postnet_kernel_size=5,\n postnet_n_convolutions=5,\n \n # (GST) Reference encoder\n with_gst=True,\n ref_enc_filters=[32, 32, 64, 64, 128, 128],\n ref_enc_size=[3, 3],\n ref_enc_strides=[2, 2],\n ref_enc_pad=[1, 1],\n ref_enc_gru_size=128,\n \n # (GST) Multi-headed Attention Layer\n gstAtt_dim=128,\n num_heads=8,\n \n # (GST) Style Token Layer\n token_num=5, # acts as the information bottleneck.\n token_activation_func='tanh', # default 'softmax', options 'softmax','sigmoid','tanh','absolute'\n token_embedding_size=256, # token embedding size\n \n # (GST) TorchMoji\n torchMoji_attDim=2304,# pretrained model uses 2304\n torchMoji_linear=False,# load/save text infer linear layer.\n torchMoji_training=False,# switch GST to torchMoji mode\n \n # (GST) Drop Style Tokens\n p_drop_tokens=0.4, # Nudge the decoder to infer style without GST's input\n drop_tokens_mode='speaker_embedding',#Options: ('zeros','halfs','embedding','speaker_embedding') # Replaces style_tokens with either a scaler or an embedding, or a speaker_dependant embedding\n \n ################################\n # Optimization Hyperparameters #\n ################################\n use_saved_learning_rate=False,\n learning_rate=0.1e-5,\n weight_decay=1e-6,\n grad_clip_thresh=1.0,\n batch_size=56, # 32*3 = 0.377 val loss, # 2 = 0.71 val loss\n val_batch_size=56, # for more precise comparisons between models, constant batch_size is useful\n mask_padding=True, # set model's padded outputs to padded values\n \n # DFR (Drop Frame Rate)\n global_mean_npy='global_mean.npy',\n drop_frame_rate=0.25,\n \n ##################################\n # MMI options #\n ##################################\n use_mmi=False,#depreciated\n use_gaf=True,#depreciated\n max_gaf=0.01,#depreciated\n )\n\n if hparams_string:\n tf.compat.v1.logging.info('Parsing command line hparams: %s', hparams_string)\n hparams.parse(hparams_string)\n\n if verbose:\n tf.compat.v1.logging.info('Final parsed hparams: %s', hparams.values())\n\n return hparams", "def set_hyperparams(self, params):", "def _init_parameter(self):\n abs_val = np.sqrt(2.0 / (self.num_h + self.num_v))\n W = tf.get_variable('weights', shape=(self.num_v, self.num_h),\n initializer=tf.random_uniform_initializer(minval=-abs_val, maxval=abs_val))\n a = tf.get_variable('visible_bias', shape=(self.num_v), initializer=tf.zeros_initializer())\n b = tf.get_variable('hidden_bias', shape=(self.num_h), initializer=tf.zeros_initializer())\n return W, a, b", "def hyperparams():\n H = 6\n return Munch(N=500, H=H, D=(H // 2) ** 2, batch_size=10, precision=to.float32)", "def create_initial_parameters(self):\n update_nested_dictionary(\n self.settings,\n {self.highest_lookup: {\n self.highest_sublookup: self.kw\n }})", "def Params(cls):\n return hyperparams.InstantiableParams(cls)", "def default_parameters(name):\n prm = Parameters(name)\n\n prm.add('total_volume', 5000.0) # Not important for non-closed loop. Included for compatibility.\n\n prm.add('venous_pressure', float())\n\n prm.add('arterial_compliance', float())\n\n prm.add('venous_resistance', float())\n prm.add('arterial_resistance', float())\n prm.add('peripheral_resistance', float())\n\n return prm", "def test_defaults(self):\n params = DefaultsInterface()\n # make sure from_param_server can be called repeatedly\n params.from_param_server()\n\n self.assertEqual(params.verbosity_param_w_default, 'info')\n\n self.assertEqual(params.int_param_w_default, 1)\n self.assertAlmostEqual(params.double_param_w_default, 1.1)\n self.assertEqual(params.str_param_w_default, \"Hello World\")\n self.assertEqual(params.bool_param_w_default, True)\n self.assertEqual(params.long_param_w_default_int, 1)\n self.assertEqual(params.long_param_w_default_int_str, -1)\n self.assertEqual(params.long_param_w_default_long_string, 9223372036854775807)\n\n self.assertEqual(params.vector_int_param_w_default, [1, 2, 3])\n self.assertEqual(params.vector_double_param_w_default, [1.1, 1.2, 1.3])\n self.assertEqual(params.vector_string_param_w_default, [\"Hello\", \"World\"])\n\n self.assertEqual(params.map_param_w_default, {\"Hello\": \"World\"})\n self.assertEqual(params.enum_int_param_w_default, 1)\n self.assertEqual(params.enum_str_param_w_default, \"One\")", "def create_hyper_parameter_tuning_job(HyperParameterTuningJobName=None, HyperParameterTuningJobConfig=None, TrainingJobDefinition=None, WarmStartConfig=None, Tags=None):\n pass", "def init_hyperparameters():\n alpha = .8\n alpha2 = 1\n\n return alpha, alpha2", "def init_params(self):\n self.params = Parameters()\n self.params.add('qoff', self.qoff, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)\n self.params.add('yscale', self.yscale, vary=0, min=0, max=np.inf, expr=None, brute_step=0.1)\n self.params.add('int_bg', self.int_bg, vary=0, min=0, max=np.inf, expr=None, brute_step=0.1)\n self.params.add('Rc', self.Rc, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)\n self.params.add('sur_den', self.sur_den, vary=0, min=0, max=np.inf, expr=None, brute_step=0.1)\n self.params.add('ion_depth', self.ion_depth, vary=0, min=0, max=np.inf, expr=None, brute_step=0.1)", "def __init__(self, hparams, batch_size=None, num_classes=None,\n summary_dir=None, verbose=False):\n self._model = None\n self._hparams = hparams\n self._verbose = verbose\n self._batch_size = batch_size\n self._num_classes = num_classes\n self._summary_dir = summary_dir", "def __init__( self, parameters={} ):\n self.params = {}", "def create(\n cls,\n param_name: str,\n rule_dicts: List[platform_parameter_domain.PlatformParameterRuleDict],\n rule_schema_version: int,\n default_value: platform_parameter_domain.PlatformDataTypes\n ) -> PlatformParameterModel:\n return cls(\n id=param_name,\n rules=rule_dicts,\n rule_schema_version=rule_schema_version,\n default_value=default_value)", "def getDefaultParams():\n defpar = [\n # coordinate system\n ['crd_sys', \"'sph'\", 'Coordinate system'],\n ['nx', '[60, 40, 30]', 'Number of grid points in the first dimension'],\n ['xbound', '[0.1*au, 30.*au, 110.*au, 250.*au]', 'Number of radial grid points'],\n ['ny', '[10,30, 30, 10]',\n 'Number of grid points in the second dimension'],\n ['ybound', '[0.1, pi/6., pi/2., 5.*pi/6., 3.04]',\n 'Number of radial grid points'],\n ['nz', '[361]', 'Number of grid points in the third dimension'],\n ['zbound', '[0., 2.0*pi]', 'Number of radial grid points'],\n # star related\n ['tstar', '[3900.0]', 'Temperature of star'],\n ['mstar', '[1.0*ms]', 'Mass of the star(s)'],\n ['rstar', '[2.5*rs]', 'Radius of star'],\n # gas density \n ['Rin', '[0.1*au, 80*au]', 'inner bounding edge'],\n ['Rin_w', '[0, 1*au]', 'gaussian taper before inner edge'], \n ['Rout', '[30*au, 120*au]', 'outer bounding edge'],\n ['Rout_w', '[1*au, 1*au]', 'gaussian taper after outer edge'], \n ['sigp', '[-1.0, -1.5]', 'power-law surface density'],\n ['sig0', '[1e2, 1e1]', 'surface density at Rin in g/cm^2'], \n ['ring_r', '[50*au]', 'location of gaussian ring'], \n ['ring_win', '[5*au]', 'width of gaussian ring in inner radius'],\n ['ring_wout', '[5*au]', 'width of gaussian ring in outer radius'], \n ['ring_a', '[1e2]', 'surface density at center of ring in g/cm^2]'], \n ['cutgdens', '1e-30', 'cut for density'], \n ['Rt', '100*au', 'radius for scale height'], \n ['Ht', '10*au', 'scale height'], \n ['qheight', '1.25', 'height power-law'], \n # gas species\n ['gasspec_mol_name', \"['12co']\", 'name of molecule'],\n ['gasspec_mol_abun', '[5e-5]', 'mass abundance '],\n ['gasspec_mol_dbase_type', \"['leiden']\", ''],\n ['gasspec_mol_freezeout_dfact', '[1e-3]',\n 'Factor by which the molecular abundance should be decreased in the freeze-out zone'],\n ['mol_freeze_Ht', '[24*au]', 'Height at Rt, with index=qheight, for freeze out to happen'],\n ['mol_freeze_del_hfrac', '0.2', 'Gaussian taper for freeze-out. del H = h * hfrac'],\n ['mol_snowR', '[20*au]', 'Radius when freeze out begins to happen'],\n # dust density\n # flat power-law parts\n ['dRin', '[0.1*au, 80*au]', 'inner bounding edge'],\n ['dRin_w', '[0, 1*au]', 'gaussian taper before inner edge'], \n ['dRout', '[30*au, 120*au]', 'outer bounding edge'],\n ['dRout_w', '[1*au, 1*au]', 'gaussian taper after outer edge'], \n ['dsigp', '[-1.0, -1.5]', 'power-law surface density'],\n ['dsig0', '[1e2, 1e1]', 'surface density at Rin'],\n # Lynden-Bell parts\n ['dLB_Rin', '[0.1*au]', 'inner bounding radius'], \n ['dLB_Rsig', '[30*au]', 'charcteristic radius'],\n ['dLB_sigp', '[-1.0]', 'power-law exponent. Careful, the sign is different from the usual function by a negative sign for consistency with flat power-law'], \n ['dLB_sig0', '[1e2]', 'surface density'], \n # ring parts\n ['dring_r', '[50*au]', 'location of gaussian ring'],\n ['dring_win', '[5*au]', 'width of gaussian ring in inner radius'],\n ['dring_wout', '[5*au]', 'width of gaussian ring in outer radius'], \n ['dring_a', '[1e2]', 'surface density at center of ring in g/cm^2]'],\n ['cutddens', '1e-30', 'cut for dust density'],\n ['dRt', '[100*au]', 'radius for scale height for each grain size'], \n ['dHt', '[10*au]', 'scale height for each grain size'], \n ['dqheight', '[1.25]', 'scale height power-law for dust'], \n # temperature\n ['T0mid', '50', 'mid plane temperature at Rt'],\n ['T0atm', '50', 'atmosphere temperature at Rt'],\n ['zqratio', '3', 'factor of Ht of where temperature transition occurs'],\n ['qmid', '-0.5', 'midplane temperature exponent'],\n ['qatm', '-0.5', 'atmosphere temperature exponent'],\n ['hdel', '2', 'temperature transition exponent '],\n ['cuttemp', '10', 'temperature cut'], \n # alignment\n ['altype', \"'toroidal'\", 'alignment type']\n ]\n\n return defpar", "def set_hyperparams(use_defaults):\n if use_defaults:\n n_neurons, n_hidden, n_steps, k_prob = default_hyperparams()\n return n_neurons, n_hidden, n_steps, k_prob\n\n print (\"Select number of neurons in recurrent layer (default \" +\n \"100):\")\n n_neurons = int(input())\n print (\"Select number of hidden neurons in fully connected \" +\n \"layer (default 100):\")\n n_hidden = int(input())\n print (\"Select n_steps; the max number of words to be read \" +\n \"from each abstract (default 50):\")\n n_steps = int(input())\n print (\"Select k_prob; the dropout probability (default 0.5):\")\n k_prob = float(input())\n\n return n_neurons, n_hidden, n_steps, k_prob", "def add_hparam(self, name, value):\n # Keys in kwargs are unique, but 'name' could be the name of a pre-existing\n # attribute of this object.\n if getattr(self, name, None) is not None:\n raise ValueError('Hyperparameter name is reserved: %s' % name)\n if isinstance(value, (list, tuple)):\n if not value:\n raise ValueError('Multi-valued hyperparameters cannot be empty: %s' %\n name)\n self._hparam_types[name] = (type(value[0]), True)\n else:\n self._hparam_types[name] = (type(value), False)\n setattr(self, name, value)", "def create_hparams(hparams_string=None, verbose=False):\n\n hparams = tf.contrib.training.HParams(\n ################################\n # Experiment Parameters #\n ################################\n epochs=5000,\n check_by=\"epoch\", # 'epoch' or 'iter'\n iters_per_checkpoint=1000,\n epochs_per_checkpoint=2,\n shuffle_audiopaths=True,\n shuffle_batches=True,\n shuffle_samples=False, # exclusive with shuffle_audiopaths and shuffle_batches\n permute_opt='rand', # 'rand', 'semi-sort', 'bucket', etc.\n local_rand_factor=0.1, # used when permute_opt == 'semi-sort'\n pre_batching=True, # pre batch data, so batch_size is 1 in DataLoader\n prep_trainset_per_epoch=False,\n seed=1234,\n dynamic_loss_scaling=True,\n fp16_run=False,\n distributed_run=False,\n dist_backend=\"nccl\",\n dist_url=\"tcp://localhost:54321\",\n cudnn_enabled=True,\n cudnn_benchmark=True,\n ignore_layers=['embedding.weight'],\n\n ################################\n # Data Parameters #\n ################################\n load_mel_from_disk=False, # if true, 1st element in the filelist should be mel\n mel_data_type='numpy', # 'numpy' or 'torch'\n training_files='filelists/soe/3x/soe_wav-emo_v0_train_3x.txt',\n validation_files='filelists/soe/3x/soe_wav-emo_v0_valid_3x.txt',\n filelist_cols=['audiopath','emoembpath','text','dur','speaker','emotion'],\n text_cleaners=['english_cleaners'], # english_cleaners, korean_cleaners\n\n ################################\n # Emotion Embedding Parameters #\n ################################\n include_emo_emb=True, # check filelist and ensure include emo if True\n load_emo_from_disk=True, # currently only support True (ignored if include_emo_emb is False)\n emo_emb_dim=64, # dim of the offline emotion embedding\n\n ################################\n # Audio Parameters #\n ################################\n max_wav_value=32768.0,\n sampling_rate=22050,\n override_sample_size=True, # override filter_length,hop_length,win_length\n hop_time=12.5, # in milliseconds\n win_time=50.0, # in milliseconds\n filter_length=1024,\n hop_length=256, # number audio of frames between stft colmns, default win_length/4\n win_length=1024, # win_length int <= n_ftt: fft window size (frequency domain), defaults to win_length = n_fft\n n_mel_channels=80,\n mel_fmin=0.0,\n mel_fmax=11025.0,\n\n ################################\n # Model Parameters #\n ################################\n n_symbols=len(symbols), # set 80 for korean_cleaners. set 65 for english_cleaners\n symbols_embedding_dim=512,\n use_vae=True,\n vae_input_type='emo', # mel (default) or emo\n embedding_variation=0,\n label_type='one-hot', # 'one-hot' (default) or 'id'\n\n # Transcript encoder parameters\n encoder_kernel_size=5,\n encoder_n_convolutions=3,\n encoder_embedding_dim=512,\n\n # Speaker embedding parameters\n n_speakers=1,\n speaker_embedding_dim=16, # currently for speaker labeling embdding\n\n # Emotion Label parameters\n n_emotions=4, # number of emotion labels\n emotion_embedding_dim=64, # currently for emotion label embedding, 16 (original) or 64\n\n # reference encoder\n E=512,\n ref_enc_filters=[32, 32, 64, 64, 128, 128],\n ref_enc_size=[3, 3],\n ref_enc_strides=[2, 2],\n ref_enc_pad=[1, 1],\n ref_enc_gru_size=512//2,\n\n z_latent_dim=32,\n anneal_function='logistic',\n anneal_k=0.0025, # the smaller the faster increasing\n anneal_x0=10000,\n anneal_upper=0.2,\n anneal_lag=50000,\n anneal_constant=0.001,\n\n # Prosody embedding parameters\n prosody_n_convolutions=6,\n prosody_conv_dim_in=[1, 32, 32, 64, 64, 128],\n prosody_conv_dim_out=[32, 32, 64, 64, 128, 128],\n prosody_conv_kernel=3,\n prosody_conv_stride=2,\n prosody_embedding_dim=128,\n\n # Decoder parameters\n n_frames_per_step=1, # currently only 1 is supported\n decoder_rnn_dim=1024,\n prenet_dim=256,\n max_decoder_steps=1000,\n gate_threshold=0.5,\n p_attention_dropout=0.1,\n p_decoder_dropout=0.1,\n\n # Attention parameters\n attention_rnn_dim=1024,\n attention_dim=128,\n\n # Location Layer parameters\n attention_location_n_filters=32,\n attention_location_kernel_size=31,\n\n # Mel-post processing network parameters\n postnet_embedding_dim=512,\n postnet_kernel_size=5,\n postnet_n_convolutions=5,\n\n ################################\n # Optimization Hyperparameters #\n ################################\n use_saved_learning_rate=False,\n learning_rate=1e-3,\n weight_decay=1e-6,\n grad_clip_thresh=1.0,\n batch_size=32,\n mask_padding=True # set model's padded outputs to padded values\n )\n\n if hparams_string:\n tf.logging.info('Parsing command line hparams: %s', hparams_string)\n hparams.parse(hparams_string)\n\n if verbose:\n tf.logging.info('Final parsed hparams: %s', hparams.values())\n\n return hparams", "def get_hparams():\n hparams = registry.get_registered_hparams_set(FLAGS.hparams_set)\n hparams.add_hparam(\"inputs_vocab_size\", FLAGS.inputs_vocab_size) \n hparams.add_hparam(\"targets_vocab_size\", FLAGS.targets_vocab_size) \n hparams.parse(FLAGS.hparams)\n return hparams", "def get_hyper_params(**kwargs):\n hyper_params = {\n \"anchor_ratios\": [0.5, 1, 2],\n \"anchor_scales\": [16, 32, 64, 128, 256],\n \"stride\": 32,\n \"nms_topn\": 300,\n \"total_pos_bboxes\": 64,\n \"total_neg_bboxes\": 64,\n \"pooling_size\": (7, 7),\n }\n for key, value in kwargs.items():\n if key in hyper_params and value:\n hyper_params[key] = value\n #\n hyper_params[\"anchor_count\"] = len(hyper_params[\"anchor_ratios\"]) * len(hyper_params[\"anchor_scales\"])\n return hyper_params", "def _default_parameters(cls) -> Options:\n params = super()._default_parameters()\n params.main_axes = None\n params.i_means = None\n params.q_means = None\n params.scales = None\n\n return params", "def test_get_params():\n\n kwargs = {\n 'population_size': 500,\n 'generations': 1000,\n 'verbosity': 1\n }\n\n tpot_obj = TPOTClassifier(**kwargs)\n\n # Get default parameters of TPOT and merge with our specified parameters\n initializer = inspect.getargspec(TPOTBase.__init__)\n default_kwargs = dict(zip(initializer.args[1:], initializer.defaults))\n default_kwargs.update(kwargs)\n\n assert tpot_obj.get_params() == default_kwargs", "def get_parameters(self):\n params = {}\n for p in self.DEFAULT_VALUES.keys():\n params[p] = getattr(self, p)\n return params", "def getDefaultParameterValues(self):\r\n dct = {}\r\n self.initializeRoadRunnerModel()\r\n self.roadrunnerModel.reset()\r\n for parameterName in self.parametersToFit:\r\n dct[parameterName] = self.roadrunnerModel.model[parameterName]\r\n return dct", "def get_hyperparams(self):", "def default_parameters():\n prm = Parameters('windkessel_model')\n\n prm.add('total_volume', float())\n\n prm.add('venous_compliance', float())\n prm.add('arterial_compliance', float())\n\n prm.add('venous_resistance', float())\n prm.add('arterial_resistance', float())\n prm.add('peripheral_resistance', float())\n\n prm.add('venous_resting_volume', float())\n prm.add('arterial_resting_volume', float())\n\n return prm", "def set(self, hyperparam: str, info: Union[int, float, Sequence]):\n # Determine hyperparameter type by info type\n if isinstance(info, int):\n self.hyperparams[hyperparam] = ('int', info)\n elif isinstance(info, float):\n self.hyperparams[hyperparam] = ('float', info)\n elif isinstance(info, Sequence):\n default_value = info[0]\n object_list = info[1]\n # Index of the default value in the object list\n default_idx = object_list.index(default_value)\n self.hyperparams[hyperparam] = ('object', default_idx, object_list)\n elif isinstance(info, bool):\n # Treat like an object, but handle this common special case for\n # simplicity\n self.hyperparams[hyperparam] = ('object', info, [False, True])\n\n pass", "def requires_hparams(self):\n return None", "def _initialize_defaults(self):\n for key, value in defaults.items():\n if key not in self.source_params:\n self.source_params[key] = value", "def _initialize_defaults(self):\n for key, value in defaults.items():\n if key not in self.source_params:\n self.source_params[key] = value", "def default_hparams():\n hparams = TextDataBase.default_hparams()\n hparams[\"name\"] = \"paired_text_data\"\n hparams.update(_default_paired_text_dataset_hparams())\n return hparams", "def get_default_config(self):\n \n config = {}\n \n # default z_0_hat, zeros, flexible\n config['z_0_hat_option'] = 'flexible'\n config['initial_z_0_hat'] = np.zeros(self.dimension)\n \n # default P_0_hat, identity times a small scalar, flexible\n config['P_0_hat_option'] = 'flexible'\n config['initial_P_0_hat'] = 0.1 * np.eye(self.dimension)\n \n # default A, identity, flexible\n config['AB_option'] = 'flexible'\n config['initial_A'] = np.eye(self.dimension)\n config['initial_B'] = np.zeros((self.dimension, self.control_dimension))\n \n # default Q, identity times a small scalar, flexible\n config['Q_option'] = 'flexible'\n config['initial_Q'] = 0.1 * np.eye(self.dimension)\n \n # default R, identity times a small scalar, flexible\n config['R_option'] = 'flexible'\n config['initial_R'] = 0.1 * np.eye(self.dimension)\n \n # default stopping criteria, threshold 1e-5, num_iterations 1000\n # stop whenever either of the two critieria is reached\n config['threshold'] = 1e-5\n config['num_iterations'] = 1000\n\n return config", "def construct_params(self):\n\n return {\"expand\": self.get_expand()}", "def default_parameters():\n prm = Parameters('lvad_model')\n\n prm.add('lvad_volume', 66.0)\n\n prm.add('alpha_slope', 0.0091)\n prm.add('alpha_intercept', 1.4)\n\n prm.add('beta_slope', -0.19)\n prm.add('beta_intercept', -1.9)\n\n prm.add('frequency', float())\n\n return prm", "def load_hyperparams():\n #Load halo data (encoding='latin1' for Python3)\n with open('../Data/halo_data.pkl', 'rb') as halo_input:\n halo_data = pickle.load(halo_input, encoding='latin1')\n\n #Load interpolator\n with open('../Data/interpolator.pkl', 'rb') as interp:\n vpeak_Mr_interp = pickle.load(interp, encoding='latin1')\n\n #Cosmological params\n cosmo_params = {}\n cosmo_params['omega_b'] = 0.0 \n cosmo_params['omega_m'] = 0.286\n cosmo_params['h'] = 0.7\n\n #hyperparameters\n hparams = {}\n hparams['mpeak_cut'] = 10**7\n hparams['vpeak_cut'] = 10.\n hparams['vmax_cut'] = 9.\n hparams['orphan_radii_cut'] = 300.\n hparams['chi'] = 1.\n hparams['R0'] = 10.0\n hparams['gamma_r'] = 0.0\n hparams['beta'] = 0.\n hparams['O'] = 1.\n hparams['n_realizations'] = 5\n\n #prior hyperparameters\n prior_hparams = {}\n prior_hparams['alpha'] = np.array([-2.,-1.1])\n prior_hparams['sigma_M'] = np.array([0.,2.])\n prior_hparams['M50'] = np.array([7.35,10.85])\n prior_hparams['sigma_mpeak'] = np.array([1e-5,1.])\n prior_hparams['B'] = np.array([1e-5,3.])\n prior_hparams['A'] = np.array([10.,500.])\n prior_hparams['sigma_r'] = np.array([1e-5,2.])\n prior_hparams['n'] = np.array([0.,2.])\n prior_hparams['Mhm'] = np.array([5.,9.])\n\n #Orphan hyperparameters\n orphan_params = {}\n orphan_params['eps'] = 0.01 \n orphan_params['df'] = 1\n\n #Simulation and LMC indices\n sim_indices = {}\n sim_indices['host'] = [0,1]\n sim_indices['LMC'] = [0,0]\n\n return hparams, prior_hparams, cosmo_params, orphan_params, halo_data, sim_indices, vpeak_Mr_interp", "def initialize(self):\n params = {}\n for i in range(1, len(self.layer_dimensions)):\n params['b_' + str(i)] = np.ones((self.layer_dimensions[i], 1))\n if self.he_initialization:\n params['W_' + str(i)] = np.random.randn(self.layer_dimensions[i],\n self.layer_dimensions[i - 1]) * np.sqrt(\n 2 / self.layer_dimensions[i - 1])\n else:\n params['W_' + str(i)] = np.random.rand(self.layer_dimensions[i], self.layer_dimensions[i - 1]) - 0.5\n return params", "def build(self):\n return self.hyperparams.items()", "def training_config(**overrides):\n config = base_training_config()\n _override(config, overrides)\n return tf.contrib.training.HParams(**config)", "def _extract_params(self, kwargs, hyperparameters):\n init_params = dict()\n fit_params = dict()\n produce_params = dict()\n\n for name, param in hyperparameters.get('fixed', dict()).items():\n if name in kwargs:\n value = kwargs.pop(name)\n\n elif 'default' in param:\n value = param['default']\n\n else:\n raise TypeError(\"{} required argument '{}' not found\".format(self.name, name))\n\n init_params[name] = value\n\n for name, param in hyperparameters.get('tunable', dict()).items():\n if name in kwargs:\n init_params[name] = kwargs.pop(name)\n\n if not isinstance(self.fit_args, str):\n fit_args = [arg['name'] for arg in self.fit_args]\n else:\n fit_args = []\n\n if not isinstance(self.produce_args, str):\n produce_args = [arg['name'] for arg in self.produce_args]\n else:\n produce_args = []\n\n for name in list(kwargs.keys()):\n if name in fit_args:\n fit_params[name] = kwargs.pop(name)\n\n elif name in produce_args:\n produce_params[name] = kwargs.pop(name)\n\n if kwargs:\n error = \"Unexpected hyperparameters '{}'\".format(', '.join(kwargs.keys()))\n raise TypeError(error)\n\n return init_params, fit_params, produce_params", "def _use_default_params(self):\n self.params = {\n # Desktop window params\n 'pos': (100, 100),\n 'lock_pos': False,\n # Font params\n 'default_font': 'Sans 9',\n # Lessons colors\n 'lecture_color': '#009566660000',\n 'laboratory_color': '#987600000000',\n 'practice_color': '#188820eda89b',\n 'non_color': '#0000849acdf4',\n 'day_color': '#000000000000',\n # Window style\n 'full_transparent': True,\n 'window_color': '#5ad65ad65ad6',\n 'transparent_percent': 50.0,\n # View schedule settings\n 'view_sch': [True, True, True, True, True]\n }\n self.save_params()", "def _get_default_pipeline_params(\n project: str,\n location: str,\n root_dir: str,\n target_column: str,\n prediction_type: str,\n optimization_objective: str,\n transformations: str,\n train_budget_milli_node_hours: float,\n stage_1_num_parallel_trials: Optional[int] = None,\n stage_2_num_parallel_trials: Optional[int] = None,\n stage_2_num_selected_trials: Optional[int] = None,\n data_source_csv_filenames: Optional[str] = None,\n data_source_bigquery_table_path: Optional[str] = None,\n predefined_split_key: Optional[str] = None,\n timestamp_split_key: Optional[str] = None,\n stratified_split_key: Optional[str] = None,\n training_fraction: Optional[float] = None,\n validation_fraction: Optional[float] = None,\n test_fraction: Optional[float] = None,\n weight_column: Optional[float] = None,\n study_spec_parameters_override: Optional[List[Dict[str, Any]]] = None,\n optimization_objective_recall_value: Optional[float] = None,\n optimization_objective_precision_value: Optional[float] = None,\n stage_1_tuner_worker_pool_specs_override: Optional[Dict[str, Any]] = None,\n cv_trainer_worker_pool_specs_override: Optional[Dict[str, Any]] = None,\n export_additional_model_without_custom_ops: bool = False,\n stats_and_example_gen_dataflow_machine_type: Optional[str] = None,\n stats_and_example_gen_dataflow_max_num_workers: Optional[int] = None,\n stats_and_example_gen_dataflow_disk_size_gb: Optional[int] = None,\n transform_dataflow_machine_type: Optional[str] = None,\n transform_dataflow_max_num_workers: Optional[int] = None,\n transform_dataflow_disk_size_gb: Optional[int] = None,\n dataflow_subnetwork: Optional[str] = None,\n dataflow_use_public_ips: bool = True,\n encryption_spec_key_name: Optional[str] = None,\n additional_experiments: Optional[Dict[str, Any]] = None,\n dataflow_service_account: Optional[str] = None,\n max_selected_features: Optional[int] = None,\n apply_feature_selection_tuning: bool = False,\n run_evaluation: bool = True,\n evaluation_batch_predict_machine_type: Optional[str] = None,\n evaluation_batch_predict_starting_replica_count: Optional[int] = None,\n evaluation_batch_predict_max_replica_count: Optional[int] = None,\n evaluation_batch_explain_machine_type: Optional[str] = None,\n evaluation_batch_explain_starting_replica_count: Optional[int] = None,\n evaluation_batch_explain_max_replica_count: Optional[int] = None,\n evaluation_dataflow_machine_type: Optional[str] = None,\n evaluation_dataflow_starting_num_workers: Optional[int] = None,\n evaluation_dataflow_max_num_workers: Optional[int] = None,\n evaluation_dataflow_disk_size_gb: Optional[int] = None,\n run_distillation: bool = False,\n distill_batch_predict_machine_type: Optional[str] = None,\n distill_batch_predict_starting_replica_count: Optional[int] = None,\n distill_batch_predict_max_replica_count: Optional[int] = None,\n stage_1_tuning_result_artifact_uri: Optional[str] = None,\n quantiles: Optional[List[float]] = None,\n enable_probabilistic_inference: bool = False,\n num_selected_features: Optional[int] = None,\n model_display_name: str = '',\n model_description: str = '',\n) -> Dict[str, Any]:\n if not study_spec_parameters_override:\n study_spec_parameters_override = []\n if not stage_1_tuner_worker_pool_specs_override:\n stage_1_tuner_worker_pool_specs_override = []\n if not cv_trainer_worker_pool_specs_override:\n cv_trainer_worker_pool_specs_override = []\n if not quantiles:\n quantiles = []\n\n parameter_values = {}\n parameters = {\n 'project': project,\n 'location': location,\n 'root_dir': root_dir,\n 'target_column': target_column,\n 'prediction_type': prediction_type,\n 'data_source_csv_filenames': data_source_csv_filenames,\n 'data_source_bigquery_table_path': data_source_bigquery_table_path,\n 'predefined_split_key': predefined_split_key,\n 'timestamp_split_key': timestamp_split_key,\n 'stratified_split_key': stratified_split_key,\n 'training_fraction': training_fraction,\n 'validation_fraction': validation_fraction,\n 'test_fraction': test_fraction,\n 'optimization_objective': optimization_objective,\n 'train_budget_milli_node_hours': train_budget_milli_node_hours,\n 'stage_1_num_parallel_trials': stage_1_num_parallel_trials,\n 'stage_2_num_parallel_trials': stage_2_num_parallel_trials,\n 'stage_2_num_selected_trials': stage_2_num_selected_trials,\n 'weight_column': weight_column,\n 'optimization_objective_recall_value': (\n optimization_objective_recall_value\n ),\n 'optimization_objective_precision_value': (\n optimization_objective_precision_value\n ),\n 'study_spec_parameters_override': study_spec_parameters_override,\n 'stage_1_tuner_worker_pool_specs_override': (\n stage_1_tuner_worker_pool_specs_override\n ),\n 'cv_trainer_worker_pool_specs_override': (\n cv_trainer_worker_pool_specs_override\n ),\n 'export_additional_model_without_custom_ops': (\n export_additional_model_without_custom_ops\n ),\n 'dataflow_subnetwork': dataflow_subnetwork,\n 'dataflow_use_public_ips': dataflow_use_public_ips,\n 'dataflow_service_account': dataflow_service_account,\n 'encryption_spec_key_name': encryption_spec_key_name,\n 'max_selected_features': max_selected_features,\n 'stage_1_tuning_result_artifact_uri': stage_1_tuning_result_artifact_uri,\n 'quantiles': quantiles,\n 'enable_probabilistic_inference': enable_probabilistic_inference,\n 'model_display_name': model_display_name,\n 'model_description': model_description,\n }\n parameter_values.update(\n {param: value for param, value in parameters.items() if value is not None}\n )\n\n if run_evaluation:\n eval_parameters = {\n 'evaluation_batch_predict_machine_type': (\n evaluation_batch_predict_machine_type\n ),\n 'evaluation_batch_predict_starting_replica_count': (\n evaluation_batch_predict_starting_replica_count\n ),\n 'evaluation_batch_predict_max_replica_count': (\n evaluation_batch_predict_max_replica_count\n ),\n 'evaluation_batch_explain_machine_type': (\n evaluation_batch_explain_machine_type\n ),\n 'evaluation_batch_explain_starting_replica_count': (\n evaluation_batch_explain_starting_replica_count\n ),\n 'evaluation_batch_explain_max_replica_count': (\n evaluation_batch_explain_max_replica_count\n ),\n 'evaluation_dataflow_machine_type': evaluation_dataflow_machine_type,\n 'evaluation_dataflow_starting_num_workers': (\n evaluation_dataflow_starting_num_workers\n ),\n 'evaluation_dataflow_max_num_workers': (\n evaluation_dataflow_max_num_workers\n ),\n 'evaluation_dataflow_disk_size_gb': evaluation_dataflow_disk_size_gb,\n 'run_evaluation': run_evaluation,\n }\n parameter_values.update(\n {\n param: value\n for param, value in eval_parameters.items()\n if value is not None\n }\n )\n\n # V1 pipeline without FTE\n if num_selected_features is None:\n if not additional_experiments:\n additional_experiments = {}\n\n parameters = {\n 'transformations': transformations,\n 'stats_and_example_gen_dataflow_machine_type': (\n stats_and_example_gen_dataflow_machine_type\n ),\n 'stats_and_example_gen_dataflow_max_num_workers': (\n stats_and_example_gen_dataflow_max_num_workers\n ),\n 'stats_and_example_gen_dataflow_disk_size_gb': (\n stats_and_example_gen_dataflow_disk_size_gb\n ),\n 'transform_dataflow_machine_type': transform_dataflow_machine_type,\n 'transform_dataflow_max_num_workers': (\n transform_dataflow_max_num_workers\n ),\n 'transform_dataflow_disk_size_gb': transform_dataflow_disk_size_gb,\n 'additional_experiments': additional_experiments,\n }\n parameter_values.update(\n {\n param: value\n for param, value in parameters.items()\n if value is not None\n }\n )\n\n if apply_feature_selection_tuning:\n parameter_values.update({\n 'apply_feature_selection_tuning': apply_feature_selection_tuning,\n })\n\n if run_distillation:\n distillation_parameters = {\n 'distill_batch_predict_machine_type': (\n distill_batch_predict_machine_type\n ),\n 'distill_batch_predict_starting_replica_count': (\n distill_batch_predict_starting_replica_count\n ),\n 'distill_batch_predict_max_replica_count': (\n distill_batch_predict_max_replica_count\n ),\n 'run_distillation': run_distillation,\n }\n parameter_values.update(\n {\n param: value\n for param, value in distillation_parameters.items()\n if value is not None\n }\n )\n\n # V2 pipeline (with FTE)\n else:\n if run_distillation:\n raise ValueError(\n 'Distillation is currently not supported'\n ' when num_selected_features is specified.'\n )\n\n parameters = {\n 'num_selected_features': num_selected_features,\n 'dataset_level_custom_transformation_definitions': [],\n 'dataset_level_transformations': [],\n 'tf_auto_transform_features': {},\n 'tf_custom_transformation_definitions': [],\n 'legacy_transformations_path': transformations,\n 'feature_transform_engine_dataflow_machine_type': (\n transform_dataflow_machine_type\n ),\n 'feature_transform_engine_dataflow_max_num_workers': (\n transform_dataflow_max_num_workers\n ),\n 'feature_transform_engine_dataflow_disk_size_gb': (\n transform_dataflow_disk_size_gb\n ),\n }\n parameter_values.update(\n {\n param: value\n for param, value in parameters.items()\n if value is not None\n }\n )\n\n return parameter_values", "def parameters(self):\n return self._default_params" ]
[ "0.70342475", "0.7031786", "0.6892825", "0.67746496", "0.6762069", "0.6687497", "0.66754377", "0.6643588", "0.6621793", "0.65395397", "0.6534683", "0.6510586", "0.64588934", "0.6378265", "0.63729376", "0.63572174", "0.62473166", "0.62324756", "0.6184058", "0.61717397", "0.61234725", "0.61059225", "0.6093912", "0.6079184", "0.60525495", "0.60291696", "0.601052", "0.59988946", "0.5989664", "0.59542036", "0.5938891", "0.5929288", "0.58936787", "0.5890589", "0.5876371", "0.58709234", "0.58669096", "0.58669096", "0.5857602", "0.5856277", "0.5853329", "0.5833257", "0.58192515", "0.580576", "0.5781383", "0.5771903", "0.576837", "0.5768136", "0.5757119", "0.5733797", "0.5728282", "0.5727666", "0.5726656", "0.57239157", "0.57072043", "0.5706374", "0.5702283", "0.57008135", "0.5697934", "0.56928515", "0.5655827", "0.563885", "0.5631389", "0.5631172", "0.5625887", "0.56160295", "0.5606778", "0.5605747", "0.56025386", "0.557682", "0.5553729", "0.55462736", "0.5545577", "0.5545052", "0.5536981", "0.5523924", "0.5506959", "0.5505933", "0.5504317", "0.54888755", "0.5484615", "0.5473096", "0.5469379", "0.54601866", "0.54514223", "0.5445145", "0.54405504", "0.54405504", "0.54296815", "0.5423749", "0.5420251", "0.5405144", "0.537942", "0.53637475", "0.5362221", "0.5362089", "0.5350187", "0.53495497", "0.53442395", "0.53433925" ]
0.74614435
0
Setup the model with specified hyperparameters and train the model.
def train(self, features, labels, seed=None): raise NotImplementedError('Not implemented')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def train_model(self, *args, **kwargs):\n raise NotImplementedError", "def _setupModel(self, parameters):\r\n ModelFitterCore.setupModel(self.roadrunnerModel, parameters,\r\n logger=self.logger)", "def train_model(self, *args, **kwargs):\n self.model.train(self.training, *args, **kwargs)", "def run(self) -> None:\n self.model = self.trainer.train_model(self.model, self.data)", "def _setup_training(self, params, **kwargs):\n model_params = params.permute_training_on_top().model\n\n model_kwargs = {**model_params.fixed, **model_params.variable}\n\n model = self.model_cls(**model_kwargs)\n\n training_params = params.permute_training_on_top().training\n losses = training_params.nested_get(\"losses\")\n optimizer_cls = training_params.nested_get(\"optimizer_cls\")\n optimizer_params = training_params.nested_get(\"optimizer_params\")\n train_metrics = training_params.nested_get(\"train_metrics\", {})\n lr_scheduler_cls = training_params.nested_get(\"lr_sched_cls\", None)\n lr_scheduler_params = training_params.nested_get(\"lr_sched_params\",\n {})\n val_metrics = training_params.nested_get(\"val_metrics\", {})\n\n # necessary for resuming training from a given path\n save_path = kwargs.pop(\"save_path\", os.path.join(\n self.save_path,\n \"checkpoints\",\n \"run_%02d\" % self._run))\n\n return self.trainer_cls(\n network=model,\n save_path=save_path,\n losses=losses,\n key_mapping=self.key_mapping,\n optimizer_cls=optimizer_cls,\n optimizer_params=optimizer_params,\n train_metrics=train_metrics,\n val_metrics=val_metrics,\n lr_scheduler_cls=lr_scheduler_cls,\n lr_scheduler_params=lr_scheduler_params,\n optim_fn=self._optim_builder,\n save_freq=self.checkpoint_freq,\n **kwargs\n )", "def train(self):\n\t\tself.model.fit(self.training_data, self.training_labels)", "def _set_model(self):\n print(\"Setting up model...\")\n # Encoder\n inputs = Input(batch_shape=(None,) + self.input_shape)\n\n baseEncoder = self.createEncoder(inputs)\n baseEncoder = Dropout(self.drop)(baseEncoder)\n\n # Instantiate encoder layers\n Q_z_mean = Dense(self.latent_dim)\n Q_z_log_var = Dense(self.latent_dim)\n\n # Parameters for continous latent distribution\n z_mean = Q_z_mean(baseEncoder)\n z_log_var = Q_z_log_var(baseEncoder)\n self.encoder =Model(inputs, z_mean)\n\n # Sample from latent distributions\n\n encoding = Lambda(self._sampling_normal, output_shape=(self.latent_dim,))([z_mean, z_log_var])\n \n G_0 = Dense(8*self.kernel_init)(encoding)\n G_0 = Dropout(self.drop)(G_0)\n baseDecoder = self.createDecoder(G_0)\n\n self.model =Model(inputs, baseDecoder)\n # Store latent distribution parameters\n self.z_mean = z_mean\n self.z_log_var = z_log_var\n\n\n # Compile models\n #self.opt = RMSprop()\n self.model.compile(optimizer=self.opt, loss=self._vae_loss)\n self.model.summary()\n print(\"Completed model setup.\")", "def set_train(self):\n self.model.train()", "def train(self):\n # setup model\n self.createModel()\n self.setGenerators()\n self.buildCallbacks()\n self.printParameters()\n \n # train model\n _ = self.model.fit_generator(\n generator = self.trainGen,\n validation_data = self.validateGen,\n steps_per_epoch = self.steps_per_epoch,\n validation_steps = self.validation_steps,\n epochs = self.epochs,\n use_multiprocessing = True,\n callbacks = self.callbacks)\n # clear save paths to avoid overwriting accidentaly\n self.saveName = None", "def model_setup(params):\n n_classes = len(classes_config.training_ids)\n if general_config.model_id == constants.ssdlite:\n model = SSDLite.SSD_Head(n_classes=n_classes, k_list=anchor_config.k_list)\n elif general_config.model_id == constants.ssd:\n model = resnet_ssd.SSD300(n_classes=n_classes)\n elif general_config.model_id == constants.ssd_modified:\n model = SSDLite.SSD_Head(n_classes=n_classes, k_list=anchor_config.k_list,\n out_channels=params.out_channels, width_mult=params.width_mult)\n model.to(general_config.device)\n\n return model", "def train_model(dataset):\n\n # clear the session so that we can train more than one model\n K.clear_session()\n\n # initialize the model\n model = initalizer.init_nn()\n\n # fit the model\n model.fit(dataset, epochs=40)\n\n return model", "def model(self, hyperparams, test_mode=False):\n run_doc = OrderedDict() # Document important hyperparameters\n run_start_time = time.time()\n run_id = str(uuid4())\n # TODO: Not ideal: Loads from memory every time. Use generator?\n train_data, train_targets, test_data, test_targets = \\\n self.data_loader(dataset=hyperparams['dataset'], size=hyperparams['dataset_size'])\n run_doc['dataset'] = hyperparams['dataset']\n run_doc['data_size'] = len(train_targets)\n # Visualization tools\n if config.INPUT_DEBUG:\n image_analysis(image=train_data[0, :, :, :], label=train_targets[0, :])\n # Input shape comes from image shape\n img_width = train_data[0].shape[0]\n img_height = train_data[0].shape[1]\n num_channels = train_data[0].shape[2]\n input_shape = (img_width, img_height, num_channels)\n run_doc['input_shape'] = '(%d, %d, %d)' % input_shape\n input_tensor = Input(shape=input_shape, dtype='float32', name='input_image')\n try: # Model creation is in separate file\n x, run_doc = custom_model(input_tensor, params=hyperparams, run_doc=run_doc)\n except ValueError as e:\n if not test_mode: # If not testing, ignore error causing models\n return {'loss': 100, 'status': STATUS_OK}\n else:\n raise e\n # Final layer classifies into 4 possible actions\n output = layers.Dense(4, activation='softmax')(x)\n # File names for the model and logs\n log_file = os.path.join(self._logs_dir, run_id)\n model_file = os.path.join(self._models_dir, run_id + '.h5')\n # Add some callbacks so we can track progress using Tensorboard\n callbacks = [keras.callbacks.EarlyStopping('val_loss', patience=config.TRAIN_PATIENCE, mode=\"min\")]\n if not test_mode: # Don't save models/logs if in testing mode\n callbacks += [keras.callbacks.TensorBoard(log_dir=log_file),\n keras.callbacks.ModelCheckpoint(model_file, save_best_only=True)]\n # Choice of optimizer and optimization parameters\n if hyperparams['optimizer'] == 'sgd':\n optimizer = optimizers.SGD(lr=hyperparams[\"learning_rate\"],\n decay=hyperparams[\"decay\"],\n clipnorm=hyperparams[\"clipnorm\"])\n elif hyperparams['optimizer'] == 'rmsprop':\n optimizer = optimizers.RMSprop(lr=hyperparams[\"learning_rate\"],\n decay=hyperparams[\"decay\"],\n clipnorm=hyperparams[\"clipnorm\"])\n elif hyperparams['optimizer'] == 'nadam':\n optimizer = optimizers.Nadam(lr=hyperparams[\"learning_rate\"],\n schedule_decay=hyperparams[\"decay\"],\n clipnorm=hyperparams[\"clipnorm\"])\n elif hyperparams['optimizer'] == 'adam':\n optimizer = optimizers.Adam(lr=hyperparams[\"learning_rate\"],\n decay=hyperparams[\"decay\"],\n clipnorm=hyperparams[\"clipnorm\"])\n # Save optimizer parameters to run doc\n run_doc['optimizer'] = hyperparams['optimizer']\n run_doc['opt_learning_rate'] = hyperparams[\"learning_rate\"]\n run_doc['opt_decay'] = hyperparams[\"decay\"]\n run_doc['opt_clipnorm'] = hyperparams[\"clipnorm\"]\n # Create and compile the model\n model = Model(input_tensor, output)\n model.compile(loss='categorical_crossentropy',\n optimizer=optimizer,\n metrics=['accuracy'])\n # Print out model summary and store inside run documentation as list of strings\n model.summary()\n run_doc['model_summary'] = []\n model.summary(print_fn=(lambda a: run_doc['model_summary'].append(a)))\n # Fit the model to the datasets\n self.log.info(\"Fitting model (eval %d of %d) ...\" % (self._eval_idx + 1, self._max_eval))\n self._eval_idx += 1\n model.fit(x=train_data, y=train_targets,\n batch_size=hyperparams['batch_size'],\n epochs=hyperparams['epochs'],\n validation_data=(test_data, test_targets),\n callbacks=callbacks,\n verbose=1)\n val_loss, val_acc = model.evaluate(x=test_data, y=test_targets, verbose=2)\n self.log.info(\" .... Completed!\")\n self.log.info(\" -- Evaluation time %ds\" % (time.time() - run_start_time))\n self.log.info(\" -- Total time %ds\" % (time.time() - self._start_time))\n # Save training parameters to run doc\n run_doc['batch_size'] = hyperparams['batch_size']\n run_doc['epochs'] = hyperparams['epochs']\n run_doc['val_loss'] = val_loss\n run_doc['val_acc'] = val_acc\n # Results are used to pick best pirate\n self._results[run_id] = val_loss\n # Save run_doc to pickle file in model directory\n run_doc_file_name = run_id + '.pickle'\n if not test_mode: # Don't save docs if in testing mode\n with open(os.path.join(self._models_dir, run_doc_file_name), 'wb') as f:\n pickle.dump(run_doc, f)\n self.log.info('Run Dictionary %s' % str(run_doc))\n # Delete the session to prevent GPU memory from getting full\n keras.backend.clear_session()\n # Optimizer minimizes validation loss\n return {'loss': val_loss, 'status': STATUS_OK}", "def _setup_model(self) -> torch.nn.Sequential:\r\n\r\n # setting up model\r\n ids_ = self.get_hyperparam().get_dim_ids()\r\n if self.get_hyperparam().get_value(ids_[13]):\r\n init_ = lambda mod: self._default_weight_bias_init(mod,\r\n self.get_hyperparam().get_value(ids_[14]),\r\n self.get_hyperparam().get_value(ids_[15]),\r\n self.get_hyperparam().get_value(ids_[16]))\r\n\r\n modules = []\r\n for hd in range(int(self.get_hyperparam().get_value(ids_[3]))+1):\r\n if hd == 0:\r\n act_input_size = self.get_hyperparam().get_value(ids_[0])\r\n output_size = self.get_hyperparam().get_value(ids_[4])[hd]\r\n act_fct = self.get_hyperparam().get_value(ids_[5])[hd]()\r\n elif hd == self.get_hyperparam().get_value(ids_[3]):\r\n act_input_size = self.get_hyperparam().get_value(ids_[4])[hd-1]\r\n output_size = self.get_hyperparam().get_value(ids_[1])\r\n act_fct = self.get_hyperparam().get_value(ids_[6])()\r\n else:\r\n act_input_size = self.get_hyperparam().get_value(ids_[4])[hd-1]\r\n output_size = self.get_hyperparam().get_value(ids_[4])[hd]\r\n act_fct = self.get_hyperparam().get_value(ids_[5])[hd]()\r\n \r\n if self.get_hyperparam().get_value(ids_[13]):\r\n modules.append(init_(torch.nn.Linear(int(act_input_size), int(output_size))))\r\n else:\r\n modules.append(torch.nn.Linear(int(act_input_size), int(output_size)))\r\n modules.append(act_fct)\r\n\r\n model = torch.nn.Sequential(*modules)\r\n \r\n # add process to the model\r\n try:\r\n model = self._add_init(model)\r\n except:\r\n pass \r\n \r\n self._loss_fct = self.get_hyperparam().get_value(ids_[8])()\r\n self._optimizer = self.get_hyperparam().get_value(ids_[7])(model.parameters(), lr=self.get_hyperparam().get_value(ids_[12]))\r\n self._sampling_seed = self.get_hyperparam().get_value(ids_[11])\r\n \r\n return model", "def training(self):\n self.model.fit(self.train_x, self.train_y)", "def trainModel( self, featureTrain, classTrain):", "def train(self, current_hyper_params):\n train_loss = 0\n train_n_iter = 0\n # Set model to train mode\n self.model.train()\n # Iterate over train data\n print(\"Iterating over training data...\")\n for i, batch in enumerate(tqdm(self.train_loader)):\n loss = self._train_batch(batch)\n # Statistics\n train_loss += loss.item()\n train_n_iter += 1\n self.stats.train_loss_history.append(train_loss / train_n_iter)", "def _train_model(self):\n raise NotImplementedError()", "def __setup_model(self, **kwargs):\n self.model_architecture = kwargs['model_architecture'].upper()\n self.model = Classifier.IMAGENET_MODELS[self.model_architecture](\n pretrained=True\n )\n\n if 'input_size' in kwargs: # Loading from a checkpoint\n self.input_size = kwargs['input_size']\n self.model.current_epoch = kwargs['current_epoch']\n\n else: # No checkpoint, will be creating a new classifier for the model\n # The number of features coming from the feature detector CNN\n if 'ALEXNET' in self.model_architecture:\n self.input_size = self.model.classifier[1].in_features\n elif 'VGG' in self.model_architecture:\n self.input_size = self.model.classifier[0].in_features\n elif 'DENSENET' in self.model_architecture:\n self.input_size = self.model.classifier.in_features\n\n # Freeze the feature detector parameters to prevent backpropagating\n # through them.\n for param in self.model.parameters():\n param.requires_grad = False\n\n self.model.current_epoch = 1\n\n self.output_size = kwargs['output_size']\n self.hidden_layers = kwargs['hidden_layers']\n self.learn_rate = kwargs['learn_rate']\n self.drop_p = kwargs['drop_p']\n\n self.model.class_to_idx = kwargs['class_to_idx']\n self.model.classifier = Network(self.input_size,\n self.output_size,\n self.hidden_layers,\n self.drop_p)\n\n if 'model_state_dict' in kwargs: # load the state from checkpoint\n self.model.load_state_dict(kwargs['model_state_dict'])\n\n self.criterion = nn.NLLLoss()\n self.optimizer = optim.Adam(self.model.classifier.parameters(),\n lr=self.learn_rate)\n\n if 'optimizer_state_dict' in kwargs: # load the state from checkpoint\n self.optimizer.load_state_dict(kwargs['optimizer_state_dict'])", "def _set_train(self):\n\n if not self.model.__dict__['training']:\n self.model.train()", "def train(self, model, args):\n if model == self.WORD_DET_RFC:\n return self.train_rfc(args)\n elif model == self.REGRESSION_PARAMS:\n return self.train_bb_reg(args)\n else:\n raise Exception('No model %s exists to train' % model)", "def train(self, model_type, params=None):\n Model = load_model_class(model_type)\n self.model_type = model_type\n X, y = self.task.make_dataset()\n self.final_data = X.copy()\n # Save preds\n preds = np.zeros_like(y.values).astype(np.float)\n with TMPFolder():\n N = len(X)\n n = N // self.cv\n # Assign a fold to each sample\n folds = np.random.permutation(np.repeat(np.arange(self.cv), n+1)[:N])\n if self.cv == 1:\n folds[:] = 1\n folds[np.random.permutation(np.arange(N))[:int(round(0.25 * N))]] = 0\n # Iterate over folds\n for k in range(self.cv):\n print(\"Fold\", k)\n # Create model\n model = Model()\n if params is not None:\n model.set_hp(params)\n # Create sub-dataset\n X_train = X[folds != k]\n y_train = y[folds != k]\n X_test = X[folds == k]\n y_test = y[folds == k]\n # Train the model\n model.train(X_train, y_train)\n # Make predictions on test samples\n y_pred = model.predict(X_test)\n # Save the predictions\n preds[folds == k] = y_pred\n self.model_save.append(model)\n # Save folds\n self.folds = folds\n self.is_trained = True\n self.preds = preds\n self.true_labels = y", "def set_up_train(path_model_id='', config_names=['config.gin'], bindings=[]):\n # inject config\n utils_params.inject_gin(config_names, path_model_id=path_model_id,\n bindings=bindings) # bindings=['train_and_eval.n_epochs = 3','train_and_eval.save_period = 1']\n\n # generate folder structures\n run_paths = utils_params.gen_run_folder(path_model_id=path_model_id)\n\n # set loggers\n utils_misc.set_loggers(run_paths['path_logs_train'], logging.INFO)\n\n # Define input pipeline depending on the type of training\n logging.info('Setup input pipeline...')\n train_ds, train_ds_info = gen_pipeline_train_baseline()\n eval_ds, eval_ds_info = gen_pipeline_eval_baseline()\n test_ds, test_info = gen_pipeline_test_baseline()\n\n # Define model\n logging.info(\"Setup model...\")\n model = model_fn.gen_model(n_classes=train_ds_info.features['label'].num_classes)\n\n # Train and eval\n logging.info('Start training...')\n results = train_baseline.train_and_eval_baseline(model, train_ds, train_ds_info, eval_ds, test_ds, run_paths)\n\n return results", "def setup(self):\n print(\"setup\")\n \n self.modelToUse = 1\n if self.train:\n print(\"train\")\n else:\n print(\"no train\")\n \n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n self.envSize = 17\n \n #init model\n if self.train or not os.path.isfile(\"my-saved-model.pt\"):\n self.logger.info(\"Setting up model from scratch.\")\n if self.modelToUse == 0:\n self.policy_net = Model_global_view(self.envSize, self.envSize, 6).to(device)\n self.model = Model_global_view(self.envSize, self.envSize, 6).to(device)\n elif self.modelToUse == 1:\n self.policy_net = Model_local_view(self.envSize, self.envSize, 6).to(device)\n self.model = Model_local_view(self.envSize, self.envSize, 6).to(device)\n else:\n self.policy_net = Model_combined_view(self.envSize, self.envSize, 6).to(device)\n self.model = Model_combined_view(self.envSize, self.envSize, 6).to(device)\n self.model.load_state_dict(self.policy_net.state_dict())\n self.model.eval()\n else:\n self.logger.info(\"Loading model from saved state.\")\n with open(\"my-saved-model.pt\", \"rb\") as file:\n if self.modelToUse == 0:\n self.model = Model_global_view(self.envSize, self.envSize, 6)\n elif self.modelToUse == 1:\n self.model = Model_local_view(self.envSize, self.envSize, 6)\n else:\n self.model = Model_combined_view(self.envSize, self.envSize, 6)\n if torch.cuda.is_available():\n self.model.load_state_dict(torch.load(file))\n self.model.to(device)\n else:\n self.model.load_state_dict(torch.load(file, map_location=device))", "def initiate(self):\n # if self.opt.checkpoint_encoder:\n # self.load(self.opt.checkpoint_encoder, self.opt.checkpoint_decoder)\n # else:\n # start fresh.\n self.model = Transformer(\n self.opt.src_vocab_size,\n self.opt.tgt_vocab_size,\n self.opt.max_token_seq_len,\n tgt_emb_prj_weight_sharing=self.opt.proj_share_weight,\n emb_src_tgt_weight_sharing=self.opt.embs_share_weight,\n d_k=self.opt.d_k,\n d_v=self.opt.d_v,\n d_model=self.opt.d_model,\n d_word_vec=self.opt.d_word_vec,\n d_inner=self.opt.d_inner_hid,\n n_layers=self.opt.layers,\n n_head=self.opt.n_head,\n dropout=self.opt.dropout).to(self.device)\n \n for p in self.model.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)", "def _make_model(self):\n self._model = tf.estimator.Estimator(model_fn=self.model_fn,\n model_dir=self.model_dir,\n config=self._config,\n params=self._params,\n )", "def train(self, X_train, y_train):\n self.model.fit(X_train, y_train)", "def setup_training(model, train_loader, valid_loader, hps):\r\n\r\n train_dir = os.path.join(hps.save_root, \"train\")\r\n if not os.path.exists(train_dir): os.makedirs(train_dir)\r\n\r\n if hps.restore_model != 'None':\r\n logger.info(\"[INFO] Restoring %s for training...\", hps.restore_model)\r\n bestmodel_file = os.path.join(train_dir, hps.restore_model)\r\n loader = ModelLoader()\r\n loader.load_pytorch(model, bestmodel_file)\r\n else:\r\n logger.info(\"[INFO] Create new model for training...\")\r\n\r\n run_training(model, train_loader, valid_loader, hps) # this is an infinite loop until interrupted\r", "def train(self):\n self.emission_model(self.train_data)\n self.transition_model(self.train_data)", "def train(model_name, batch_size, steps_per_epoch, epochs, validation_steps, \n model_file=None, save_path=None):\n \n print(\"- Loading configuration...\")\n if model_name in models_default_params:\n default_params = models_default_params[model_name]\n else:\n print(\"Error: the model '{}' has not been implemented\".format(model_name))\n return\n custom_objects = default_params['custom_objects']\n patch_size = default_params['patch_size']\n if save_path is None:\n save_path = default_params['default_path']\n if os.path.isfile(save_path):\n print(\"Warning: {} is an existing file and will be overwritten.\".format(save_path))\n print(\"- Configuration loaded.\")\n \n print(\"- Loading datasets...\")\n train_gen = preprocess.load_dataset(batch_size, x_directory = \"datasets/Potsdam/Training/RGB/\",\n y_directory = \"datasets/Potsdam/Training/Labels/\",\n patch_size = patch_size)\n val_gen = preprocess.load_dataset(batch_size, x_directory = \"datasets/Potsdam/Validation/RGB/\",\n y_directory = \"datasets/Potsdam/Validation/Labels/\",\n patch_size = patch_size)\n print(\"- Data loaded.\")\n \n print(\"- Initialising model...\")\n if(model_file is not None): # Further train existing model\n model = keras.models.load_model(model_file, custom_objects=custom_objects)\n else: # Create new model\n if model_name == 'fcn':\n model = fcn.make_fcn_resnet((patch_size, patch_size, channels), nb_labels, \n use_pretraining=False, freeze_base=False)\n elif model_name == 'pspnet':\n model = pspnet.build_pspnet(nb_classes=nb_labels, resnet_layers=50,\n input_shape=patch_size)\n elif model_name == 'mobilenetv2':\n model = mobilenetv2.MobileNetv2((patch_size, patch_size, channels), nb_labels) \n\n model.compile(\n optimizer = optimizers.Adam(lr = 0.00001),\n loss = losses.categorical_crossentropy,\n metrics = [metrics.categorical_accuracy]) \n model.summary() \n print(\"- Model initialised.\")\n \n tensorboard = callbacks.TensorBoard(log_dir='./logs')\n csv_logger = callbacks.CSVLogger('logs/training.csv')\n checkpoint = callbacks.ModelCheckpoint(filepath=checkpoint_path,\n save_weights_only=True,\n save_best_only=True)\n \n print(\"- Starting training.\")\n model.fit_generator(\n generator=train_gen,\n steps_per_epoch=steps_per_epoch,\n epochs=epochs,\n validation_data=val_gen,\n validation_steps=validation_steps,\n # callbacks=[checkpoint, csv_logger]\n )\n print(\"- Training complete.\")\n \n model.save(save_path)\n print(\"- Model saved to {}\".format(save_path))", "def train(self, **kwargs):\n self.solver.train(**kwargs)", "def train_small_test_version(self, hyperparams_dict):\n trainer = ModelTrainer(self.dataloaders, hyperparams_dict,\n self.wv_wrapper, self.path)\n model, losses, accuracies = trainer.train(epochs=3)\n return model, losses, accuracies", "def train(self, X_train, y_train):\n\n self.model_pipeline.fit(X_train, y_train)", "def train(self, data, option, param_map):\n if option == \"lr\":\n md = self.logistic_regression(elastic_param=param_map[\"elastic_param\"],\n reg_param=param_map[\"reg_param\"],\n family=param_map[\"family\"])\n elif option == \"rf\":\n md = self.random_forest(max_depth=param_map[\"max_depth\"],\n max_num_tree=param_map[\"max_num_tree\"])\n elif option == \"gbdt\":\n md = self.gbdt(max_depth=param_map[\"max_depth\"],\n max_bins=param_map[\"max_bins\"])\n else:\n raise ValueError(\"ERROR | model %s does not support yet\" % option)\n\n self.model = md.fit(data)\n return self.model", "def train():\n import training\n\n # Ensure output directories exist\n os.makedirs(os.path.dirname(cfg.scaler_path), exist_ok=True)\n os.makedirs(cfg.model_path, exist_ok=True)\n os.makedirs(cfg.log_path, exist_ok=True)\n\n # Load (standardized) input data and target values\n tr_x, tr_y, _ = _load_data(cfg.training_set, is_training=True)\n val_x, val_y, _ = _load_data(cfg.validation_set)\n\n # Try to create reproducible results\n np.random.seed(cfg.initial_seed)\n\n # Save free parameters to disk\n utils.log_parameters(cfg.training, os.path.join(cfg.model_path,\n 'parameters.json'))\n\n training.train(tr_x, tr_y, val_x, val_y)", "def train_model(self):\r\n alpha, accuracy_rate = self.select_model()\r\n # Initialize logistic regression with alpha(learning rate)\r\n lg = logisticregression(C=alpha)\r\n # Train the model.\r\n lg.fit(self.training_data, self.target_data)\r\n # Save the trained model as .pkl file.\r\n joblib.dump(value=lg, filename=self.intention_id+'.pkl', compress=1)\r\n print \"Estimated Parameters of Logistic Regression\"\r\n # Estimated parameters of logistic regression.\r\n print lg.get_params()", "def set_train(self):\n for m in self.models.values():\n m.train()", "def __init__(self,\n verbosity=1,\n model=None,\n path=None,\n prefix=None,\n **kwargs):\n if K.BACKEND == 'tensorflow' and tf is not None:\n min_version = tf.__version__.split(\".\")[1]\n maj_version = tf.__version__.split(\".\")[0]\n if maj_version in [\"2\"] and min_version in [\"3\", \"4\"]:\n raise NotImplementedError(f\"\"\"\n Not implemented due to a bug in tensorflow as shown here https://github.com/tensorflow/tensorflow/issues/44646\n You can use functional API instead by using\n from ai4water.functional import Model\n instead of \n from ai4water import Model\n Or change the tensorflow version. Current version is {tf.__version__}. \n \"\"\")\n\n tf_kwargs = {}\n for arg in ['inputs', 'outputs']:\n if arg in kwargs:\n tf_kwargs[arg] = kwargs[arg]\n\n self._go_up = False\n\n MODEL.__init__(self, **tf_kwargs)\n\n self._go_up = True\n BaseModel.__init__(self,\n prefix=prefix,\n path=path,\n verbosity=verbosity,\n model=model,\n **kwargs)\n\n self.config['backend'] = K.BACKEND\n\n if torch is not None:\n from .models._torch import Learner\n self.torch_learner = Learner(\n model=self,\n batch_size=self.config['batch_size'],\n num_epochs=self.config['epochs'],\n shuffle=self.config['shuffle'],\n to_monitor=self.config['monitor'],\n patience=self.config['patience'],\n path=self.path,\n use_cuda=False,\n wandb_config=self.config['wandb_config'],\n verbosity=self.verbosity\n )\n\n if self.category == \"DL\":\n self.initialize_layers(self.config['model']['layers'])\n\n if K.BACKEND == 'tensorflow':\n outs = self.call(self._input_lyrs(), run_call=False)\n setattr(self, 'output_lyrs', outs)\n self._go_up = False # do not reinitiate BaseModel and other upper classes\n\n maj_ver = int(tf.__version__.split('.')[0])\n min_ver = int(tf.__version__.split('.')[1][0])\n # in tf versions >= 2.5, we don't need to specify inputs and outputs as keyword arguments\n if maj_ver>1 and min_ver>=5:\n MODEL.__init__(self, self._input_lyrs(), self.output_lyrs)\n else:\n MODEL.__init__(self, inputs=self._input_lyrs(), outputs=self.output_lyrs)\n\n self.build(self._get_dummy_input_shape()) # will initialize ML models or build NNs", "def init(self):\n inputs = self.inputs()\n outputs = self.outputs(inputs)\n self.model = tf.keras.Model(inputs=inputs, outputs=outputs)\n self.model.compile(optimizer=self.optimizer() or self.config.get('optimizer'),\n loss=self.loss() or None,\n metrics=self.metrics() or None,\n loss_weights=self.loss_weights() or None,\n weighted_metrics=self.weighted_metrics() or None,\n target_tensors=self.target_tensors() or None)\n if self.config.get('debug'):\n self.model.summary()", "def train_model(model, x_train, y_train, x_test, y_test,\n epochs=None, batch_size=None):\n\n # Training\n if batch_size is None:\n batch_size = 128\n if epochs is None:\n epochs = 20\n\n print('x_train shape:', x_train.shape)\n print('x_test shape:', x_test.shape)\n\n print('Train...')\n model.fit(x_train,\n y_train,\n batch_size=batch_size,\n epochs=epochs,\n validation_data=(x_test, y_test),\n use_multiprocessing=True)", "def setup_training(model, batcher):\r\n train_dir = os.path.join(FLAGS.log_root, \"train\")\r\n if FLAGS.finetune:\r\n if not os.path.exists(train_dir):\r\n print (util.bcolors.OKGREEN + 'Copying See et al. pre-trained model (%s) to (%s) to be fine-tuned' % (os.path.join(FLAGS.pretrained_path, 'train'), train_dir) + util.bcolors.ENDC)\r\n os.makedirs(train_dir)\r\n files = glob.glob(os.path.join(os.path.join(FLAGS.pretrained_path, 'train'), \"*model*\"))\r\n files.extend(glob.glob(os.path.join(os.path.join(FLAGS.pretrained_path, 'train'), \"*checkpoint*\")))\r\n for file in files:\r\n if os.path.isfile(file):\r\n shutil.copy2(file, train_dir)\r\n if not os.path.exists(train_dir): os.makedirs(train_dir)\r\n\r\n model.build_graph() # build the graph\r\n if FLAGS.convert_to_coverage_model:\r\n assert FLAGS.coverage, \"To convert your non-coverage model to a coverage model, run with convert_to_coverage_model=True and coverage=True\"\r\n convert_to_coverage_model()\r\n if FLAGS.restore_best_model:\r\n restore_best_model()\r\n saver = tf.train.Saver(max_to_keep=3) # keep 3 checkpoints at a time\r\n\r\n sv = tf.train.Supervisor(logdir=train_dir,\r\n is_chief=True,\r\n saver=saver,\r\n summary_op=None,\r\n save_summaries_secs=60, # save summaries for tensorboard every 60 secs\r\n save_model_secs=60, # checkpoint every 60 secs\r\n global_step=model.global_step)\r\n summary_writer = sv.summary_writer\r\n logging.info(\"Preparing or waiting for session...\")\r\n sess_context_manager = sv.prepare_or_wait_for_session(config=util.get_config())\r\n logging.info(\"Created session.\")\r\n try:\r\n run_training(model, batcher, sess_context_manager, sv, summary_writer) # this is an infinite loop until interrupted\r\n except KeyboardInterrupt:\r\n logging.info(\"Caught keyboard interrupt on worker. Stopping supervisor...\")\r\n sv.stop()", "def _train_model(self):\n self.experiment = EpisodicExperiment(self.task, self.agent)\n n_epochs = int(self.rl_params.n_training_episodes / self.rl_params.n_episodes_per_epoch)\n logger.debug(\"Fitting user model over {} epochs, each {} episodes, total {} episodes.\"\n .format(n_epochs, self.rl_params.n_episodes_per_epoch, n_epochs*self.rl_params.n_episodes_per_epoch))\n for i in range(n_epochs):\n logger.debug(\"RL epoch {}\".format(i))\n self.experiment.doEpisodes(self.rl_params.n_episodes_per_epoch)\n self.agent.learn()\n self.agent.reset() # reset buffers", "def setup_training(args: argparse.Namespace) -> None:\n # 1. Read hyperparameters from file\n hp = HParams.from_yaml(args.path_config)\n # check if GPU available and add it to parameters\n hp[\"device\"] = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\n # 2. Create extension of the architecture of the model and timestamp for this run (use to\n # identify folders and files created for this run)\n # format: f(params_file)_t(n_tiers)_l(n_layers)_hd(hidden_size)_gmm(gmm_size).\n extension_architecture = f\"d{hp.name}_t{hp.network.n_tiers}_\" \\\n f\"l{'.'.join(map(str, hp.network.layers))}_\" \\\n f\"hd{hp.network.hidden_size}_gmm{hp.network.gmm_size}\"\n timestamp = f\"{datetime.now().strftime('%Y%m%d-%H%M%S')}\"\n\n # 3 Create directories for saving logs and model weights if they do not exist\n # 3.1 Create model weights directory for this run (the same directory will be used for different\n # runs of a model with same architecture and the difference will be in the file stored)\n hp[\"training\"][\"dir_chkpt\"] = hp.training.dir_chkpt + extension_architecture\n Path(hp.training.dir_chkpt).mkdir(parents=True, exist_ok=True)\n # 3.2 Create general log directory for this run (the same directory will be used for different\n # runs of a model with same architecture and the difference will be in the file stored)\n hp[\"logging\"][\"dir_log\"] = hp.logging.dir_log + extension_architecture\n Path(hp.logging.dir_log).mkdir(parents=True, exist_ok=True)\n\n # 4. Setup general logging (it will use the folder previously created and the filename will be:\n tier = str(args.tier) if args.tier is not None else 'ALL'\n filename = f\"{hp.logging.dir_log}/tier{tier}_{timestamp}\"\n logging.basicConfig(\n level=logging.INFO,\n format='%(asctime)s - %(levelname)s - %(message)s',\n handlers=[\n logging.FileHandler(filename=filename), # handler to save the log to a file\n logging.StreamHandler() # handler to output the log to the terminal\n ])\n logger = logging.getLogger()\n\n # 5. Show device that will be used for training: CPU or GPU\n logger.info(f\"Device for training: {hp.device}\")\n\n # 6. Start training of the model (or a single tier, depending on args)\n train_model(args, hp, extension_architecture, timestamp, logger)", "def train(self):\n self.log(f\"{self.cur_file_path}\\t\\tInfo: train method invoked!\")\n self.log(f\"{self.cur_file_path}\\t\\tInfo: training {self.model.__class__.__name__} model!\")\n\n self.model.fit(self.trainX, self.trainY)", "def train_and_eval(self):\n self.__create_indexes()\n model = None\n model = None\n if self.model == 'OMult':\n model = OMult(self.kwargs)\n elif self.model == 'ConvO':\n model = ConvO(self.kwargs)\n elif self.model == 'QMult':\n model = QMult(self.kwargs)\n elif self.model == 'ConvQ':\n model = ConvQ(self.kwargs)\n elif self.model == 'OMultBatch':\n model = OMultBatch(self.kwargs)\n elif self.model == 'ConvOBatch':\n model = ConvOBatch(self.kwargs)\n elif self.model == 'QMultBatch':\n model = QMultBatch(self.kwargs)\n elif self.model == 'ConvQBatch':\n model = ConvQBatch(self.kwargs)\n else:\n print(self.model, ' is not valid name')\n raise ValueError\n\n self.train(model)\n self.eval(model)", "def prepare_model(model_hyperparameters=None, load_weights=False, weight_directory=None, verbose=False):\r\n if model_hyperparameters is None:\r\n raise ValueError(\"Hyperparameters must be given to prepare a model.\")\r\n model_to_train = build_character_cnn(model_hyperparameters, verbose=verbose)\r\n optimizer = tf.keras.optimizers.SGD(lr=0.01, decay=1e-5, momentum=0.9, nesterov=True)\r\n model_to_train.compile(optimizer=optimizer, loss=loss)\r\n if verbose:\r\n model_to_train.summary()\r\n if load_weights:\r\n if weight_directory is not None:\r\n model_to_train.load_weights(tf.train.latest_checkpoint(weight_directory))\r\n else:\r\n print(\"Error with directory provided to load weights from. \\\r\n Continuing with no weights loaded.\")\r\n return model_to_train", "def __init__(self, config, data_loader, layer_hyperparams):\n self.config = config\n self.layer_hyperparams = layer_hyperparams\n\n if config.is_train:\n self.train_loader = data_loader[0]\n self.valid_loader = data_loader[1]\n self.num_train = len(self.train_loader.dataset)\n self.num_valid = self.valid_loader.dataset.trials\n else:\n if config.get_embedding:\n self.test_embedding_loader = data_loader\n self.n_embeddings = config.n_embeddings\n else:\n self.test_loader = data_loader\n self.num_test = self.test_loader.dataset.trials\n\n if config.use_batch_norm:\n self.model = SiameseNetWithBN()\n else:\n self.model = SiameseNet()\n \n if config.use_gpu:\n self.model.cuda()\n\n # model params\n self.num_params = sum(\n [p.data.nelement() for p in self.model.parameters()]\n )\n self.num_model = get_num_model(config)\n self.num_layers = len(list(self.model.children()))\n\n print('[*] Number of model parameters: {:,}'.format(self.num_params))\n\n # path params\n self.ckpt_dir = os.path.join(config.ckpt_dir, self.num_model)\n self.logs_dir = os.path.join(config.logs_dir, self.num_model)\n\n # misc params\n self.resume = config.resume\n self.use_gpu = config.use_gpu\n self.dtype = (\n torch.cuda.FloatTensor if self.use_gpu else torch.FloatTensor\n )\n\n # optimization params\n self.best = config.best\n self.best_valid_acc = 0.\n self.epochs = config.epochs\n self.start_epoch = 0\n self.lr_patience = config.lr_patience\n self.train_patience = config.train_patience\n self.counter = 0\n\n # grab layer-wise hyperparams\n self.init_lrs = self.layer_hyperparams['layer_init_lrs']\n self.init_momentums = [config.init_momentum]*self.num_layers\n self.end_momentums = self.layer_hyperparams['layer_end_momentums']\n self.l2_regs = self.layer_hyperparams['layer_l2_regs']\n\n # compute temper rate for momentum\n if self.epochs == 1:\n f = lambda max, min: min\n else:\n f = lambda max, min: (max - min) / (self.epochs-1)\n self.momentum_temper_rates = [\n f(x, y) for x, y in zip(self.end_momentums, self.init_momentums)\n ]\n\n # set global learning rates and momentums\n self.lrs = self.init_lrs\n self.momentums = self.init_momentums\n\n # # initialize optimizer\n # optim_dict = []\n # for i, layer in enumerate(self.model.children()):\n # group = {}\n # group['params'] = layer.parameters()\n # group['lr'] = self.lrs[i]\n # group['momentum'] = self.momentums[i]\n # group['weight_decay'] = self.l2_regs[i]\n # optim_dict.append(group)\n # self.optimizer = optim.SGD(optim_dict)\n # self.optimizer = optim.SGD(\n # self.model.parameters(), lr=1e-3, momentum=0.9, weight_decay=4e-4,\n # )\n self.optimizer = optim.Adam(\n self.model.parameters(), lr=3e-4, weight_decay=6e-5,\n )\n\n # # learning rate scheduler\n # self.scheduler = StepLR(\n # self.optimizer, step_size=self.lr_patience, gamma=0.99,\n # )\n self.debug = dict()", "def train_model(self):\n self.logger.info('Loading the data...')\n train_data = self.load_data(split=\"train\")\n dev_data = self.load_data(split=\"dev\")\n self.config.best_model = os.path.join(self.config.output_dir,\"best_model\")\n self.logger.info('Training the model, outputdir=%s...,best_model=%s' % (self.config.output_dir,self.config.best_model))\n\n train_params = {\n \"overwrite_output_dir\" : True,\n \"reprocess_input_data\": True,\n \"learning_rate\" : self.config.learning_rate,\n \"num_train_epochs\" : self.config.num_train_epochs,\n \"train_batch_size\" : self.config.train_batch_size,\n \"eval_batch_size\" : self.config.eval_batch_size,\n \"gradient_accumulation_steps\": self.config.gradient_accumulation_steps,\n \"use_early_stopping\" : self.config.early_stopping,\n \"fp16\" : False,\n \"classification_report\" : True,\n \"evaluate_during_training\" : True,\n \"evaluate_during_training_verbose\" : True,\n \"best_model_dir\": self.config.best_model,\n \"save_model_every_epoch\" : self.config.save_model_every_epoch,\n \"save_steps\" : self.config.save_steps,\n \"save_optimizer_and_scheduler\" : self.config.save_optimizer_and_scheduler,\n \"save_best_model\": True,\n }\n\n ## train the model \n self.model.train_model(\n train_data,\n eval_data=dev_data,\n output_dir=self.config.output_dir,\n show_running_loss=False,\n args=train_params,\n )\n\n ## backing up the config and create pointer to best model \n with open(os.path.join(self.config.best_model,\"trainer_config.json\"),'w') as mconfig:\n mconfig.write(json.dumps(self.config.__dict__))\n self.config.existing_model = self.config.best_model", "def setup_model(self):\r\n\r\n logging.info(\"Setup the models.\")\r\n\r\n logging.info(\"{} model\".format(self.base_network_name))\r\n if self.base_network_name.lower().startswith(\"resnet\"):\r\n base_model, classifier = getattr(setops_models, self.base_network_name)(\r\n num_classes=80,\r\n avgpool_kernel=self.avgpool_kernel\r\n )\r\n else:\r\n base_model = getattr(setops_models, self.base_network_name)()\r\n classifier = getattr(setops_models, self.classifier_name)(num_classes=80)\r\n\r\n if self.init_inception:\r\n logging.info(\"Initialize inception model using Amit's networks.\")\r\n\r\n checkpoint = torch.load(self.resume_path)\r\n\r\n base_model = Inception3(aux_logits=False, transform_input=True)\r\n base_model.load_state_dict(\r\n {k: v for k, v in checkpoint[\"state_dict\"].items() if k in base_model.state_dict()}\r\n )\r\n classifier.load_state_dict(\r\n {k: v for k, v in checkpoint[\"state_dict\"].items() if k in classifier.state_dict()}\r\n )\r\n\r\n setops_model_cls = getattr(setops_models, self.sets_network_name)\r\n setops_model = setops_model_cls(\r\n input_dim=2048,\r\n S_latent_dim=self.ops_latent_dim, S_layers_num=self.ops_layer_num,\r\n I_latent_dim=self.ops_latent_dim, I_layers_num=self.ops_layer_num,\r\n U_latent_dim=self.ops_latent_dim, U_layers_num=self.ops_layer_num,\r\n block_cls_name=self.sets_block_name, basic_block_cls_name=self.sets_basic_block_name,\r\n dropout_ratio=self.setops_dropout,\r\n )\r\n\r\n if self.resume_path:\r\n logging.info(\"Resuming the models.\")\r\n models_path = Path(self.resume_path)\r\n if self.base_network_name.lower().startswith(\"resnet\"):\r\n base_model.load_state_dict(\r\n torch.load(sorted(models_path.glob(\"networks_base_model_{}*.pth\".format(self.resume_epoch)))[-1])\r\n )\r\n classifier.load_state_dict(\r\n torch.load(sorted(models_path.glob(\"networks_classifier_{}*.pth\".format(self.resume_epoch)))[-1])\r\n )\r\n\r\n setops_models_paths = sorted(models_path.glob(\"networks_setops_model_{}*.pth\".format(self.resume_epoch)))\r\n if len(setops_models_paths) > 0:\r\n setops_model.load_state_dict(\r\n torch.load(setops_models_paths[-1]).state_dict()\r\n )\r\n\r\n return base_model, classifier, setops_model", "def fit(self):\n \n # Open an existing model and get the training & test dataset and targets\n train_test_df, target_df = self._get_model_and_data(target=True, set_feature_def=True)\n \n # Check that the estimator is an supervised ML algorithm\n if self.model.estimator_type not in [\"classifier\", \"regressor\"]:\n err = \"Incorrect usage. The estimator specified is not a known classifier or regressor: {0}\".format(self.model.estimator)\n raise Exception(err)\n \n # Check which validation strategy is to be used, if any\n # For an explanation of cross validation in scikit-learn see: http://scikit-learn.org/stable/modules/cross_validation.html#multimetric-cross-validation\n if self.model.time_series_split > 0:\n self.model.validation = \"timeseries\"\n # Set up cross validation to be performed using TimeSeriesSplit\n self.model.cv = TimeSeriesSplit(n_splits=self.model.time_series_split, max_train_size=self.model.max_train_size)\n elif self.model.cv > 0:\n self.model.validation = \"k-fold\"\n elif self.model.test_size > 0:\n self.model.validation = \"hold-out\"\n else:\n self.model.validation = \"external\"\n\n if self.model.validation == \"hold-out\": \n # Split the data into training and testing subsets\n self.X_train, self.X_test, self.y_train, self.y_test = \\\n train_test_split(train_test_df, target_df, test_size=self.model.test_size, random_state=self.model.random_state)\n else:\n self.X_train = train_test_df\n self.y_train = target_df\n \n # Add the training and test data to the model if required\n if self.model.retain_data:\n self.model.X_train = self.X_train\n self.model.y_train = self.y_train\n \n try:\n self.model.X_test = self.X_test\n self.model.y_test = self.y_test\n except AttributeError:\n pass\n \n # Scale the targets and increase stationarity if required\n if self.model.scale_target or self.model.make_stationary:\n # Set up the target transformer\n self.model.target_transformer = TargetTransformer(scale=self.model.scale_target, make_stationary=self.model.make_stationary, stationarity_lags=self.model.stationarity_lags,\\\n missing=self.model.missing, scaler=self.model.scaler, logfile=self.logfile, **self.model.scaler_kwargs)\n\n # Fit the transformer to the training targets\n self.model.target_transformer = self.model.target_transformer.fit(self.y_train)\n\n # Apply the transformer to the training targets\n self.y_train = self.model.target_transformer.transform(self.y_train)\n # Drop samples where the target cannot be transformed due to insufficient lags\n self.X_train = self.X_train.iloc[len(self.X_train)-len(self.y_train):] \n \n # Add lag observations to the samples if required\n if self.model.lags or self.model.lag_target:\n # Check if the current sample will be included as an input, or whether we only use lag observations for predictions\n extrapolate = 1 if self.model.current_sample_as_input else 0\n # Add the lag observations\n self.X_train = self._add_lags(self.X_train, self.y_train, extrapolate=extrapolate, update_features_df=True)\n # Drop targets for samples which were dropped due to null values after adding lags.\n if len(self.y_train) > len(self.X_train):\n self.y_train = self.y_train.iloc[len(self.y_train)-len(self.X_train):]\n\n # If this is a Keras estimator, we require the preprocessing to return a data frame instead of a numpy array\n prep_return = 'df' if self.model.using_keras else 'np'\n\n # Construct the preprocessor\n prep = Preprocessor(self.model.features_df, return_type=prep_return, scale_hashed=self.model.scale_hashed, scale_vectors=self.model.scale_vectors,\\\n missing=self.model.missing, scaler=self.model.scaler, logfile=self.logfile, **self.model.scaler_kwargs)\n\n # Setup a list to store steps for the sklearn pipeline\n pipe_steps = [('preprocessor', prep)]\n\n if self.model.dim_reduction:\n # Construct the dimensionality reduction object\n reduction = self.decomposers[self.model.reduction](**self.model.dim_reduction_args)\n \n # Include dimensionality reduction in the pipeline steps\n pipe_steps.append(('reduction', reduction))\n self.model.estimation_step = 2\n else:\n self.model.estimation_step = 1 \n\n # If this is a Keras estimator, update the input shape and reshape the data if required\n if self.model.using_keras:\n # Update the input shape based on the final number of features after preprocessing\n self._keras_update_shape(prep)\n\n # Add the Keras build function, architecture and prediction_periods to the estimator keyword arguments\n self.model.estimator_kwargs['build_fn'] = self._keras_build_fn\n self.model.estimator_kwargs['architecture'] = self.model.architecture\n self.model.estimator_kwargs['prediction_periods'] = self.model.prediction_periods\n\n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(10)\n \n # Check than an identifier has been provided for sorting data if this is a sequence prediction problem\n if self.model.lags or len(self.model.first_layer_kwargs[\"input_shape\"]) > 1:\n assert len(self.model.original_features_df[self.model.original_features_df['variable_type'].isin([\"identifier\"])]) == 1, \\\n \"An identifier is mandatory when using lags or with sequence prediction problems. Define this field in your feature definitions.\"\n\n # Cater for multi-step predictions\n if self.model.prediction_periods > 1:\n # Transform y to a vector of values equal to prediction_periods\n self.y_train = utils.vectorize_array(self.y_train, steps=self.model.prediction_periods)\n # Drop values from x for which we don't have sufficient y values\n self.X_train = self.X_train.iloc[:-len(self.X_train)+len(self.y_train)]\n\n # Add a pipeline step to update the input shape and reshape the data if required\n # This transform will also add lag observations if specified through the lags parameter\n # If lag_target is True, an additional feature will be created for each sample using the previous value of y \n reshape = Reshaper(first_layer_kwargs=self.model.first_layer_kwargs, logfile=self.logfile)\n pipe_steps.append(('reshape', reshape))\n self.model.estimation_step += self.model.estimation_step\n\n # Avoid tensorflow error for keras models\n # https://github.com/tensorflow/tensorflow/issues/14356\n # https://stackoverflow.com/questions/40785224/tensorflow-cannot-interpret-feed-dict-key-as-tensor\n kerasbackend.clear_session()\n \n # Try assuming the pipeline involves a grid search\n try:\n # Construct an estimator\n estimator = self.algorithms[self.model.estimator](**self.model.estimator_kwargs)\n\n # Prepare the grid search using the previously set parameter grid\n grid_search = GridSearchCV(estimator=estimator, param_grid=self.model.param_grid, **self.model.grid_search_args)\n \n # Add grid search to the pipeline steps\n pipe_steps.append(('grid_search', grid_search))\n\n # Construct the sklearn pipeline using the list of steps\n self.model.pipe = Pipeline(pipe_steps)\n\n if self.model.validation in [\"k-fold\", \"timeseries\"]:\n # Perform K-fold cross validation\n self._cross_validate()\n\n # Fit the training data to the pipeline\n if self.model.using_keras:\n # https://stackoverflow.com/questions/54652536/keras-tensorflow-backend-error-tensor-input-10-specified-in-either-feed-de\n session = tf.Session()\n kerasbackend.set_session(session)\n with session.as_default():\n with session.graph.as_default():\n sys.stdout.write(\"\\nMODEL: {}, INPUT SHAPE: {}\\n\\n\".format(self.model.name, self.model.first_layer_kwargs['input_shape']))\n y = self.y_train.values if self.y_train.shape[1] > 1 else self.y_train.values.ravel()\n self.model.pipe.fit(self.X_train, y)\n else:\n self.model.pipe.fit(self.X_train, self.y_train.values.ravel())\n\n # Get the best parameters and the cross validation results\n grid_search = self.model.pipe.named_steps['grid_search']\n self.model.best_params = grid_search.best_params_\n self.model.cv_results = grid_search.cv_results_\n\n # Get the best estimator to add to the final pipeline\n estimator = grid_search.best_estimator_\n\n # Update the pipeline with the best estimator\n self.model.pipe.steps[self.model.estimation_step] = ('estimator', estimator)\n\n except AttributeError:\n # Construct an estimator\n estimator = self.algorithms[self.model.estimator](**self.model.estimator_kwargs)\n\n # Add the estimator to the pipeline steps\n pipe_steps.append(('estimator', estimator))\n\n # Construct the sklearn pipeline using the list of steps\n self.model.pipe = Pipeline(pipe_steps)\n\n if self.model.validation in [\"k-fold\", \"timeseries\"]:\n # Perform K-fold cross validation\n self._cross_validate()\n\n # Fit the training data to the pipeline\n if self.model.using_keras:\n # https://stackoverflow.com/questions/54652536/keras-tensorflow-backend-error-tensor-input-10-specified-in-either-feed-de\n session = tf.Session()\n kerasbackend.set_session(session)\n with session.as_default():\n with session.graph.as_default():\n sys.stdout.write(\"\\nMODEL: {}, INPUT SHAPE: {}\\n\\n\".format(self.model.name, self.model.first_layer_kwargs['input_shape']))\n y = self.y_train.values if self.y_train.shape[1] > 1 else self.y_train.values.ravel()\n self.model.pipe.fit(self.X_train, y)\n else:\n self.model.pipe.fit(self.X_train, self.y_train.values.ravel())\n \n if self.model.validation == \"hold-out\": \n # Evaluate the model using the test data \n self.calculate_metrics(caller=\"internal\")\n \n if self.model.calc_feature_importances:\n # Select the dataset for calculating importances\n if self.model.validation == \"hold-out\":\n X = self.X_test\n y = self.y_test # Already a numpy array after calculate_metrics\n else:\n X = self.X_train\n y = self.y_train.values.ravel()\n \n # Calculate model agnostic feature importances\n self._calc_importances(X = X, y = y)\n\n # Persist the model to disk\n self.model = self.model.save(self.model.name, self.path, overwrite=self.model.overwrite, compress=self.model.compress)\n \n # Update the cache to keep this model in memory\n self._update_cache()\n \n # Prepare the output\n if self.model.validation != \"external\": \n message = [[self.model.name, 'Model successfully trained, tested and saved to disk.',\\\n time.strftime('%X %x %Z', time.localtime(self.model.state_timestamp)),\\\n \"{0} model has a score of {1:.3f} against the test data.\"\\\n .format(self.model.estimator, self.model.score), self.model.score]]\n else:\n message = [[self.model.name, 'Model successfully trained and saved to disk.',\\\n time.strftime('%X %x %Z', time.localtime(self.model.state_timestamp)),\\\n \"{0} model score unknown as test_size was <= 0.\"\\\n .format(self.model.estimator), np.NaN]]\n \n self.response = pd.DataFrame(message, columns=['model_name', 'result', 'time_stamp', 'score_result', 'score'])\n \n # Send the reponse table description to Qlik\n self._send_table_description(\"fit\")\n \n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(4)\n \n # Finally send the response\n return self.response", "def __init__(self, config):\n self.model = None\n self.config = config\n self.batch_size = config.get('batch_size')\n self.epochs = config.get('epochs')\n self.steps_per_epoch = config.get('steps_per_epoch')\n self.validation_steps = config.get('validation_steps')\n self.distributed = config.get('distributed', False)\n \n # init model\n self.init()", "def train_model(self) -> Model:\n run = self.submit_experiment_run(wait_for_completion=self.wait_for_completion)\n model = run.register_model(\n model_name=self.model_name, model_path=self.model_path\n )\n return model", "def __train_model(self):\n logger.info(\"Training the ALS model...\")\n self.model = ALS.train(self.ratings_RDD, self.rank, seed=self.seed,\n iterations=self.iterations, lambda_=self.regularization_parameter)\n logger.info(\"ALS model built!\")", "def __train_model(self):\n logger.info(\"Training the ALS model...\")\n self.model = ALS.train(self.ratings_RDD, self.rank, seed=self.seed,\n iterations=self.iterations, lambda_=self.regularization_parameter)\n logger.info(\"ALS model built!\")", "def fit(self, model_name, **model_params):\n model = self.model_dict[model_name]\n model.set_params(**model_params)\n self.model = model.fit(\n self.data.loc[:, self.selected_features_], self.data.loc[:, self.target_name])", "def train_model(self):\n if not self.is_exist(self.path_model_directory):\n # Then create the parent folder\n os.makedirs(self.path_model_directory)\n\n # Create a meta-data pickle for the model\n self.create_meta_data_pickle()\n\n # Necessary meta-data file must be created before starting the training. Check if the file exists\n if self.is_exist(self.path_model_metadata):\n\n # We do not need to train a model if there is already a best model for the same training exist\n try:\n self.model = load_model(self.path_best_model)\n return\n except:\n self.log_event('There is no best trained model found in the parent folder. Going with the training...')\n\n # Load the model meta-data\n self.load_model_metadata()\n self.encoding_vector_size = self.number_of_distinct_items\n\n # Iterate trough the split data for the training\n for split_number in range(self.k_split):\n split_path = f'split_{str(split_number)}/'\n split_directory = self.path_model_directory + split_path\n\n # Check the split directory is already created. If it is, then we can directly start the training by using the existing data\n if self.is_exist(split_directory):\n try:\n self.load_best_tuned_model(split_number)\n except (IndexError, FileNotFoundError):\n self.load_fold_k_data_and_fit(split_number=int(split_number))\n\n else:\n # Create a folder for the split data and prepare the data for the training\n os.makedirs(split_directory)\n\n # Create an array which will contain train features-labels and test features-labels\n train_array = np.full(4, fill_value=self.mask_value, dtype=object)\n train_index = 0\n for position, split_name in enumerate(['train_split_', 'test_split_']):\n training_features_directory = split_directory + f'{split_name}{str(split_number)}_all_training_features.data'\n training_targets_directory = split_directory + f'{split_name}{str(split_number)}_all_training_targets.data'\n fold_directory = self.path_shared_folds + f'{split_name}{str(split_number)}.fold'\n\n self.process_training_data(fold_directory=fold_directory)\n\n self.save_data_to_disk(data_to_save=self.all_features, path_to_save=training_features_directory)\n train_array[train_index] = self.all_features\n train_index += 1\n self.all_features = None # Memory Management\n\n self.save_data_to_disk(data_to_save=self.all_targets, path_to_save=training_targets_directory)\n train_array[train_index] = self.all_targets\n train_index += 1\n self.all_targets = None # Memory Management\n\n # Assign the input data to respective variables for the training\n self.train_features = train_array[0]\n self.train_targets = train_array[1]\n self.test_features = train_array[2]\n self.test_targets = train_array[3]\n del train_array\n\n self.start_hyper_parameter_tuning(split_number)\n\n self.retrieve_best_model(metric=self.hyper_parameters['metric'])", "def setup(self):\n self.model.monitor = myMonitor.get_monitor(self.model)\n self.model.monitor.time_budget_exceeded = False\n if self.algorithm is not None:\n self.algorithm.setup(model=self.model, dataset=self.dataset)\n self.setup_extensions()\n\n # Model.modify_updates is used by the training algorithm to\n # enforce constraints after each step of learning. Here we\n # make sure the constraints are enforced from the start.\n self.model.enforce_constraints()", "def train_model(args: argparse.Namespace, hp: HParams, extension_architecture: str, timestamp: str,\n logger: logging.Logger) -> None:\n # 1. Check if we have to train a single tier or a complete model (with several tiers)\n if args.tier is not None:\n # 1.1 Argument tier was defined. Only that tier will be trained.\n logging.info(f\"Training single tier of the model: Tier {args.tier}\")\n\n # 2. Setup tensorboard logging\n # 2.1 Create tensorboard logs directory (tensorboard requires a different folder for each\n # run of the model, in this case every run to train a tier) so we add the extension of the\n # network's architecture of this run and the timestamp to identify it completely\n tensorboard_dir = f\"{hp.logging.dir_log_tensorboard}{extension_architecture}_\" \\\n f\"{timestamp}_tier{args.tier}\"\n Path(tensorboard_dir).mkdir(parents=True, exist_ok=True)\n # 2.2 Create tensorboard writer\n tensorboardwriter = TensorboardWriter(hp, tensorboard_dir)\n\n # 3. Start training of the tier\n train_tier(args, hp, args.tier, extension_architecture, timestamp, tensorboardwriter,\n logger)\n\n tensorboardwriter.close()\n\n else:\n # 1.2 Argument tier was not defined. Train all tiers of the model.\n logging.info(\"Training all tiers of the model\")\n\n for tier in range(1, hp.network.n_tiers + 1):\n # 2. Setup tensorboard logging (one for every tier)\n # 2.1 Create tensorboard logs directory (tensorboard requires a different folder for\n # each run of the model, in this case every run to train a tier) so we add the extension\n # of the network's architecture of this run and the timestamp to identify it completely\n tensorboard_dir = hp.logging.dir_log_tensorboard + extension_architecture \\\n + f\"_{timestamp}_tier{tier}\"\n Path(tensorboard_dir).mkdir(parents=True, exist_ok=True)\n # 2.2 Create tensorboard writer\n tensorboardwriter = TensorboardWriter(hp, tensorboard_dir)\n\n # 3. Start training of the tier\n train_tier(args, hp, tier, extension_architecture, timestamp, tensorboardwriter, logger)\n\n tensorboardwriter.close()\n del tensorboardwriter", "def training(self) -> None:\n self.compile_model()\n self.train_epoch()\n self.agent.save()", "def train(self, dataset, model_dir):\n raise NotImplementedError", "def train_model():\n return model.fit(train_images, train_labels, epochs=10, validation_data=(test_images, test_labels), shuffle='True')", "def init_training(self):\n\n if not os.path.exists(self._model_root_path):\n os.makedirs(self._model_root_path)\n\n # Only initialize once!\n if self._model is None:\n self._model = TrainableAimbotModel(self._config, self._fov,\n os.path.join(self._model_root_path, 'aimbot_model.tf'))\n\n if not os.path.isfile(self._train_data_tfrecord_path) and not os.path.isfile(self._test_data_tfrecord_path):\n # Only create if not existing\n images_labels = _get_annotations_and_images(self._image_path)\n images_labels_train, images_labels_test = train_test_split(images_labels, shuffle=True, test_size=0.20)\n\n self._model.create_tfrecords(self._train_data_tfrecord_path, images_labels_train)\n self._model.create_tfrecords(self._test_data_tfrecord_path, images_labels_test)\n\n self._train_data_set = self._model.create_dataset(self._train_data_tfrecord_path, augment=True, shuffle=True)\n self._test_data_set = self._model.create_dataset(self._train_data_tfrecord_path)", "def fit_model(self):\n logger.info('Fitting model')\n if self.traj_dict is None:\n self.traj_dict = self.get_traj_dict()\n self.model.fit(self.traj_dict.values())", "def keras_setup(self):\n\n # Interpret the request data based on the expected row and column structure\n row_template = ['strData', 'numData', 'strData', 'strData', 'strData']\n col_headers = ['model_name', 'sort_order', 'layer_type', 'args', 'kwargs']\n \n # Create a Pandas Data Frame for the request data\n self.request_df = utils.request_df(self.request, row_template, col_headers)\n \n # Create a model that can be persisted to disk\n self.model = PersistentModel()\n \n # Get the model name from the request dataframe\n self.model.name = self.request_df.loc[0, 'model_name']\n \n # Get the model from cache or disk\n self._get_model()\n \n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(3)\n\n # Set a flag which will let us know that this is a Keras model\n self.model.using_keras = True\n\n # Sort the layers, drop unnecessart columns and save the model architecture to a new data frame\n architecture = self.request_df.sort_values(by=['sort_order']).reset_index(drop=True).drop(labels=['model_name', 'sort_order'], axis = 1)\n\n # Convert args to a list and kwargs to a dictionary\n architecture['args'] = architecture['args'].apply(utils.get_args_by_type)\n architecture['kwargs'] = architecture['kwargs'].apply(utils.get_kwargs).apply(utils.get_kwargs_by_type) \n\n # Add the architecture to the model\n self.model.architecture = architecture\n\n # Add the first layer's kwargs as a property for easy reference\n self.model.first_layer_kwargs = self.model.architecture.iloc[0, 2]\n\n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(10)\n\n # Persist the model to disk\n self.model = self.model.save(self.model.name, self.path, overwrite=self.model.overwrite, compress=self.model.compress)\n \n # Update the cache to keep this model in memory\n self._update_cache()\n \n # Prepare the output\n message = [[self.model.name, 'Keras model architecture saved to disk',\\\n time.strftime('%X %x %Z', time.localtime(self.model.state_timestamp))]]\n self.response = pd.DataFrame(message, columns=['model_name', 'result', 'time_stamp'])\n \n # Send the reponse table description to Qlik\n self._send_table_description(\"setup\")\n \n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(4)\n \n # Finally send the response\n return self.response", "def set_load_model_parameters(self):\n\n self.controller.set_new_model_test_input_path(self.test_input.get())\n self.controller.set_new_model_results_input_path(self.results_input.get())\n self.controller.set_new_model_running(False)", "def train_model(model,\n dataset_info,\n steps_per_epoch,\n args):\n if args.mode not in ['train', 'finetune']:\n raise ValueError(\"train_model() called when in %s mode\" % args.mode)\n\n dataset_info, model_info = fill_info_dicts(dataset_info, args)\n\n train_batches = {name: model_info[name]['train_batch']\n for name in model_info}\n\n additional_encoder_kwargs = dict()\n\n for dataset_name in model_info:\n additional_encoder_kwargs[dataset_name] = dict()\n\n with open(args.encoder_config_file, 'r') as f:\n encoders = json.load(f)\n extract_fn = encoders[args.architecture][dataset_name]['extract_fn']\n embed_fn = encoders[args.architecture][dataset_name]['embed_fn']\n\n if embed_fn in ['embed_sequence', 'pretrained']:\n # TODO text_field_name ?\n if 'input_key' == 'weights':\n additional_encoder_kwargs[dataset_name]['weights'] = \\\n train_batches[\n dataset_name]['text_weights']\n elif embed_fn == 'pretrained':\n additional_encoder_kwargs[dataset_name]['is_training'] = True\n elif embed_fn == 'tokenized_embed':\n additional_encoder_kwargs[dataset_name][\n 'precompute_path'] = args.precompute_path\n else:\n pass\n\n if extract_fn == \"serial_lbirnn\":\n additional_encoder_kwargs[dataset_name]['is_training'] = True\n if args.experiment_name == \"RUDER_NAACL_18\":\n # use last token of last sequence as feature representation\n indices = train_batches[dataset_name][\n 'seq2_length']\n ones = tf.ones([tf.shape(indices)[0]], dtype=tf.int64)\n # last token is at pos. length-1\n indices = tf.subtract(indices, ones)\n additional_encoder_kwargs[dataset_name]['indices'] = indices\n elif extract_fn == \"lbirnn\":\n additional_encoder_kwargs[dataset_name]['is_training'] = True\n elif extract_fn == \"serial_lbirnn_stock\":\n additional_encoder_kwargs[dataset_name]['is_training'] = True\n elif extract_fn == \"dan\":\n additional_encoder_kwargs[dataset_name]['is_training'] = True\n else:\n pass\n\n # losses = model.get_multi_task_loss(train_batches,\n # is_training=True,\n # additional_encoder_kwargs=additional_encoder_kwargs)\n\n # TODO multi loss\n losses = dict()\n for dataset in args.datasets:\n # import pdb\n # sess = tf.Session()\n # sess.run(model_info[dataset_name]['train_iter'].initializer)\n # batch = model_info[dataset_name]['train_batch']\n losses[dataset] = model.get_loss(train_batches[dataset],\n dataset,\n dataset,\n additional_encoder_kwargs=additional_encoder_kwargs,\n # sequence in train mode\n is_training=True)\n\n # import pdb\n # sess.run(model_info[dataset_name]['pred_iter'].initializer)\n # batch = model_info[dataset_name]['pred_batch']\n # text, weights = sess.run([batch['text'], batch['text_weights']])\n # pdb.set_trace()\n\n # # see if dropout and is_training working\n # # by checking train loss with different is_training the same\n\n # Done building compute graph; set up training ops.\n\n # Training ops\n global_step_tensor = tf.train.get_or_create_global_step()\n\n train_ops = dict()\n optim = tf.train.RMSPropOptimizer(learning_rate=args.lr0)\n for dataset_name in model_info:\n # tvars, grads = get_var_grads(losses[dataset_name])\n # train_ops[dataset_name] = get_train_op(tvars, grads, lr, args.max_grad_norm,\n # global_step_tensor, args.optimizer, name='train_op_{}'.format(dataset_name))\n train_ops[dataset_name] = optim.minimize(losses[dataset_name],\n global_step=global_step_tensor)\n\n # tvars, grads = get_var_grads(loss)\n # train_op = get_train_op(tvars, grads, lr, args.max_grad_norm,\n # global_step_tensor, args.optimizer, name='train_op')\n init_ops = [tf.global_variables_initializer(),\n tf.local_variables_initializer()]\n config = get_proto_config(args)\n\n # Get training objective. The inputs are:\n # 1. A dict of { dataset_key: dataset_iterator }\n #\n\n fill_eval_loss_op(args, model, dataset_info, model_info)\n fill_pred_op_info(dataset_info, model, args, model_info)\n fill_topic_op(args, model_info)\n\n print(\"All the variables after defining valid/test accuracy:\")\n all_variables = tf.global_variables()\n trainable_variables = tf.trainable_variables()\n total_trainable_parameters = 0\n for var in all_variables:\n if var in trainable_variables:\n print('(t) {}'.format(var))\n shape = var.get_shape()\n var_params = 1\n for dim in shape:\n var_params *= dim.value\n total_trainable_parameters += var_params\n else:\n print('( ) {}'.format(var))\n\n print(\"Total trainable parameters in this model={}\\n\\n\\n\".format(\n total_trainable_parameters))\n\n # # Add ops to save and restore all the variables.\n\n # latest checkpoint\n # saves every several steps\n # automatically done by tf.train.SingularMonitorSession with\n # tf.train.CheckpoinSaverHook\n\n # TODO load from some checkpoint dif at the beginning(?)\n saver_hook = tf.train.CheckpointSaverHook(\n checkpoint_dir=os.path.join(args.checkpoint_dir, 'latest'),\n save_steps=100)\n\n # saved model builders for each model\n # builders = init_builders(args, model_info)\n\n saver = tf.train.Saver(max_to_keep=100)\n\n with tf.train.SingularMonitoredSession(hooks=[saver_hook],\n config=config) as sess:\n\n if args.mode == 'train':\n sess.run(init_ops)\n\n else:\n assert len(args.datasets) == 1\n checkpoint_path_load = model_info[args.datasets[0]][\n 'checkpoint_path_load']\n saver.restore(sess, checkpoint_path_load)\n\n if args.summaries_dir:\n train_file_writer = tf.summary.FileWriter(\n os.path.join(args.summaries_dir, 'train'), graph=sess.graph)\n valid_file_writer = tf.summary.FileWriter(\n os.path.join(args.summaries_dir, 'valid'), graph=sess.graph)\n\n best_eval_performance = dict()\n for dataset_name in model_info:\n _train_init_op = model_info[dataset_name]['train_init_op']\n _valid_init_op = model_info[dataset_name]['valid_init_op']\n\n sess.run([_train_init_op, _valid_init_op])\n\n init_value = float('-inf')\n if args.tuning_metric == 'MAE_MACRO':\n init_value = float('inf')\n best_eval_performance[dataset_name] = {\"epoch\": -1,\n args.tuning_metric: init_value,\n \"performance\": None\n }\n\n best_total_tuning_metric = init_value\n best_tuning_metric_epoch = -1\n\n main_task_dev_tuning_metric = []\n stopping_criterion_reached = False\n early_stopping_dev_results = \"\"\n\n # Do training\n make_dir(os.path.dirname(args.log_file))\n with open(args.log_file, 'a') as f:\n f.write('VALIDATION RESULTS\\n')\n for epoch in xrange(1, args.num_train_epochs + 1):\n\n start_time = time()\n\n total_tuning_metric = 0.0\n\n # Take steps_per_epoch gradient steps\n total_loss = 0\n num_iter = 0\n # for _ in tqdm(xrange(steps_per_epoch)):\n # step, loss_v, _ = sess.run(\n # [global_step_tensor, loss, train_op])\n # num_iter += 1\n # total_loss += loss_v\n #\n # # loss_v is sum over a batch from each dataset of the average loss *per\n # # training example*\n # assert num_iter > 0\n #\n # average loss per batch (which is in turn averaged across examples)\n # train_loss = float(total_loss) / float(num_iter)\n\n for _ in tqdm(xrange(steps_per_epoch)):\n for (dataset_name, alpha) in zip(*[args.datasets, args.alphas]):\n loss_v, _ = sess.run(\n [losses[dataset_name], train_ops[dataset_name]])\n total_loss += alpha * loss_v\n step = sess.run(global_step_tensor)\n num_iter += 1\n assert num_iter > 0\n\n train_loss = float(total_loss) / float(num_iter)\n\n if args.summaries_dir:\n train_loss_summary = tf.Summary(\n value=[\n tf.Summary.Value(tag=\"loss\", simple_value=train_loss)])\n train_file_writer.add_summary(\n train_loss_summary, global_step=step)\n\n # Evaluate held-out tuning metric\n # if not args.test: # Validation mode\n # Get performance metrics on each dataset\n for dataset_name in args.datasets:\n _pred_op = model_info[dataset_name]['valid_pred_op']\n _eval_labels = model_info[dataset_name]['valid_batch'][\n args.label_key]\n _eval_iter = model_info[dataset_name]['valid_iter']\n _get_topic_op = model_info[dataset_name]['valid_topic_op']\n _loss_op = model_info[dataset_name]['valid_loss_op']\n _metrics = compute_held_out_performance(sess,\n _pred_op,\n _eval_labels,\n _eval_iter,\n metrics=dataset_info[\n dataset_name][\n 'metrics'],\n labels=dataset_info[\n dataset_name][\n 'labels'],\n args=args,\n get_topic_op=_get_topic_op,\n topic_path=dataset_info[\n dataset_name][\n 'topic_path'],\n eval_loss_op=_loss_op)\n model_info[dataset_name]['valid_metrics'] = _metrics\n\n end_time = time()\n elapsed = end_time - start_time\n\n # Manually compute the validation loss since each dataset is iterated through once\n # in a serial manner and not \"in parallel\" (i.e., a batch from each)\n valid_loss = 0.0\n for (dataset_name, alpha) in zip(*[args.datasets, args.alphas]):\n valid_loss += float(alpha) * \\\n model_info[dataset_name]['valid_metrics'][\n 'eval_loss']\n\n main_task_tuning_metric = model_info[args.datasets[0]\n ]['valid_metrics'][args.tuning_metric]\n\n if args.summaries_dir:\n valid_loss_summary = tf.Summary(\n value=[\n tf.Summary.Value(tag=\"loss\", simple_value=valid_loss)])\n valid_file_writer.add_summary(\n valid_loss_summary, global_step=step)\n valid_main_task_tuning_metric_summary = tf.Summary(value=[\n tf.Summary.Value(tag=\"main-task-\" + args.tuning_metric,\n simple_value=main_task_tuning_metric)])\n valid_file_writer.add_summary(\n valid_main_task_tuning_metric_summary,\n global_step=step)\n\n if (\n main_task_tuning_metric >= args.early_stopping_acc_threshold) and (\n len(main_task_dev_tuning_metric) >= args.patience) and (\n main_task_tuning_metric < main_task_dev_tuning_metric[\n -args.patience]):\n print(\n \"Stopping early at epoch {} (patience={}, early stopping acc threshold={})\".format(\n epoch, args.patience,\n args.early_stopping_acc_threshold))\n stopping_criterion_reached = True\n\n main_task_dev_tuning_metric.append(main_task_tuning_metric)\n\n if args.reporting_metric != \"Acc\":\n main_task_performance = \\\n model_info[args.datasets[0]]['valid_metrics'][\n args.reporting_metric]\n if args.summaries_dir:\n valid_main_task_performance_summary = tf.Summary(value=[\n tf.Summary.Value(\n tag=\"main-task-{}\".format(args.reporting_metric),\n simple_value=main_task_performance)])\n valid_file_writer.add_summary(\n valid_main_task_performance_summary,\n global_step=step)\n\n # Log performance(s)\n str_ = '[epoch=%d/%d step=%d (%d s)] train_loss=%s valid_loss=%s (per batch)' % (\n epoch, args.num_train_epochs, np.asscalar(step), elapsed,\n train_loss, valid_loss)\n\n for dataset_name in args.datasets:\n _num_eval_total = model_info[dataset_name]['valid_metrics'][\n 'ntotal']\n # TODO use other metric here for tuning\n _eval_tuning_metric = model_info[dataset_name]['valid_metrics'][\n args.tuning_metric]\n # _eval_align_acc = model_info[dataset_name]['valid_metrics'][\n # 'aligned_accuracy']\n\n str_ += '\\n(%s) ' % dataset_name\n for m, s in model_info[dataset_name]['valid_metrics'].items():\n if (dataset_name == args.datasets[0]) and (\n m == args.reporting_metric): # main task\n str_ += '**%s=%f** ' % (m, s)\n elif m == args.tuning_metric:\n str_ += '*%s=%f* ' % (m, s)\n elif m == 'Confusion_Matrix':\n pass\n else:\n str_ += '%s=%f ' % (m, s)\n if 'Confusion_Matrix' in model_info[dataset_name][\n 'valid_metrics']:\n str_ += 'Confusion_Matrix:\\n'\n str_ += '\\n'.join(' '.join('%4d' % x for x in y) for y in\n model_info[dataset_name]['valid_metrics'][\n 'Confusion_Matrix'])\n\n # Track best-performing epoch for each dataset\n # use the newest best epoch for test\n if _eval_tuning_metric >= best_eval_performance[dataset_name][\n args.tuning_metric]:\n best_eval_performance[dataset_name][args.tuning_metric] = \\\n _eval_tuning_metric\n best_eval_performance[dataset_name][\"performance\"] = \\\n model_info[dataset_name]['valid_metrics'].copy()\n best_eval_performance[dataset_name][\"epoch\"] = epoch\n # save best model\n saver.save(sess.raw_session(),\n model_info[dataset_name]['checkpoint_path'])\n\n # # test\n # saver.save(sess.raw_session(), checkpoint_path)\n\n total_tuning_metric += _eval_tuning_metric\n\n # Track best-performing epoch for collection of datasets\n\n if total_tuning_metric >= best_total_tuning_metric:\n best_total_tuning_metric = total_tuning_metric\n best_tuning_metric_epoch = epoch\n best_epoch_results = str_\n if len(args.datasets) > 1:\n saver.save(sess.raw_session(),\n os.path.join(args.checkpoint_dir, 'MULT',\n 'model'))\n\n logging.info(str_)\n\n # Log dev results in a file\n with open(args.log_file, 'a') as f:\n f.write(str_ + '\\n')\n\n if stopping_criterion_reached:\n saver.save(sess.raw_session(),\n os.path.join(args.checkpoint_dir, 'early-stopping',\n 'model'))\n\n early_stopping_dev_results = str_\n # with open(args.log_file, 'a') as f:\n # f.write('\\nSTOPPED EARLY AFTER {} EPOCHS\\n'.format(epoch))\n # f.write(str_ + '\\n')\n break\n\n print(best_eval_performance)\n print(\n 'Best total {}: {} at epoch {}'.format(\n args.tuning_metric,\n best_total_tuning_metric,\n best_tuning_metric_epoch))\n print(best_epoch_results)\n\n with open(args.log_file, 'a') as f:\n # f.write(best_eval_acc + '\\n')\n # f.write('Best total accuracy: {} at epoch {}'.format(best_total_acc,\n # best_total_acc_epoch))\n f.write('\\nBest single-epoch performance across all datasets\\n')\n f.write(best_epoch_results + '\\n\\n')\n\n # Write (add) the result to a common report file\n with open(args.log_file, 'a') as f:\n for dataset in best_eval_performance.keys():\n f.write(str(dataset))\n f.write(\" \")\n f.write(\"\\n\")\n for dataset, values in best_eval_performance.items():\n f.write(\n 'Metrics on highest-{} epoch for dataset {}: {}\\n'.format(\n args.tuning_metric, dataset, values))\n\n f.write('Best total {}: {} at epoch {}\\n\\n'.format(\n args.tuning_metric, best_total_tuning_metric,\n best_tuning_metric_epoch))\n if stopping_criterion_reached:\n f.write('STOPPED EARLY AFTER {} EPOCHS\\n'.format(epoch))\n f.write(early_stopping_dev_results + '\\n\\n')\n\n if args.summaries_dir:\n train_file_writer.close()\n valid_file_writer.close()", "def build_model(self):\n self.global_step = tf.Variable(0, name=\"global_step\", trainable=False)\n\n if self.config.optimizer == 'sgd':\n self.optimizer = tf.keras.optimizers.SGD(learning_rate=self.config.learning_rate)\n elif self.config.optimizer == 'rms':\n self.optimizer = tf.keras.optimizers.RMSprop(learning_rate=self.config.learning_rate)\n elif self.config.optimizer == 'adam':\n self.optimizer = tf.keras.optimizers.Adam(learning_rate=self.config.learning_rate)\n elif self.config.optimizer == 'adagrad':\n self.optimizer = tf.keras.optimizers.Adagrad(learning_rate=self.config.learning_rate)\n elif self.config.optimizer == 'adadelta':\n self.optimizer = tf.keras.optimizers.Adadelta(learning_rate=self.config.learning_rate)\n else:\n raise NotImplementedError(\"No support for %s optimizer\" % self.config.optimizer)\n \n if self.config.optimizer in ['rms', 'adagrad', 'adadelta']:\n with tf.device('cpu:0'):\n self.model.def_parameters()\n else:\n self.model.def_parameters()\n\n self.config.summary()\n self.config.summary_hyperparameter(self.model.model_name)", "def train(self, trnM, trnL):\n print 'Training ...'\n self.clf.fit(trnM, trnL)", "def __init__(self, conf):\n self.model_conf = conf[\"model\"]\n self.epochs = self.model_conf.getint(\"n_epochs\")\n self.epoch = self.model_conf.getint(\"epoch_start\")\n self.batch_size = self.model_conf.getint(\"batch_size\")\n self.criterion = nn.CrossEntropyLoss()\n self.device = torch.device(self.model_conf.get('device'))\n #self.model = (\n # eval(self.model_conf.get('name'))(self.model_conf).to(self.device)\n #)\n self.model = nn.DataParallel(\n eval(self.model_conf.get('name'))(self.model_conf).to(self.device)\n )\n total_params = sum(p.numel() for p in self.model.parameters())\n print(\"Created model {}: {} parameters\"\n .format(self.model_conf.get('name'), total_params))\n if self.model_conf.get(\"optim\") == 'SGD':\n self.optimizer = optim.SGD(\n self.model.parameters(),\n lr=self.model_conf.getfloat(\"learning_rate\"),\n momentum=self.model_conf.getfloat(\"momentum\"),\n weight_decay=self.model_conf.getfloat(\"weight_decay\"))\n elif self.model_conf.get(\"optim\") == 'Adam':\n self.optimizer = optim.Adam(\n self.model.parameters(),\n lr=self.model_conf.getfloat(\"learning_rate\"),\n betas=json.loads(self.model_conf.get(\"betas\")))\n else:\n raise ValueError('Only SGD is supported')\n\n if self.model_conf.get(\"checkpoint\") is not None:\n self.load_checkpoint(self.model_conf.get(\"checkpoint\"))\n\n self.checkpoints_path = conf.get(\"paths\", \"checkpoints\")\n self.results_path = conf.get(\"paths\", \"results\")\n self.best_accuracy = 0\n self.train_size = None\n self.valid_size = None\n self.iteration_print_freq = conf.getint(\"log\", \"iteration_print_freq\")", "def train_model(config: dict, load_weights=None, resume_epoch=None):\n # Set GPU memory optimization\n try:\n physical_devices = tf.config.list_physical_devices(\"GPU\")\n for index in range(len(physical_devices)):\n try:\n tf.config.experimental.set_memory_growth(physical_devices[index], True)\n\n except Exception as err:\n print(\"[WARN]: Failed to set memory growth for {0}\".format(physical_devices[index]))\n print(\"[WARN]: Error\", err, \" .Skipping memory optimization\")\n\n except Exception as err:\n print(\"[WARN]: memory optimization failed. Error:\", err, \" . Skipping!\")\n\n # Set up random states\n np.random.seed(100)\n random.seed(100)\n tf.random.set_seed(100)\n\n # Get the required configurations\n no_of_epochs = config[\"no_of_epochs\"]\n steps_per_epoch = config[\"steps_per_epoch\"] if config[\"steps_per_epoch\"] > 0 else None\n\n train_batch_size = config[\"train_batch_size\"]\n val_batch_size = config[\"val_batch_size\"]\n test_batch_size = config[\"test_batch_size\"]\n\n model_name = config[\"model_name\"]\n\n # Create the dataset\n dataset_path = config[\"dataset_path\"]\n dataset_family = config[\"dataset_family\"]\n\n # get width and height\n image_width = config[\"image_width\"]\n image_height = config[\"image_height\"]\n initial_width = config[\"initial_width\"]\n initial_height = config[\"initial_height\"]\n\n model_name = model_name + \"_\" + dataset_family\n\n print(\"[INFO]: Using Configuration: \\n\", config)\n\n # Set up environments\n folders = [\n \"./outputs/model_save/{0}/checkpoints/\".format(model_name),\n \"./outputs/output_logs/{0}/csv_log/\".format(model_name),\n \"./outputs/model_save/{0}/best_model/\".format(model_name),\n \"./outputs/model_save/{0}/saved_model/\".format(model_name),\n \"./outputs/output_logs/{0}/graphs/\".format(model_name)\n ]\n print(\"[INFO]: Setting up folders: \")\n os_utilities.make_directories(paths=folders)\n\n # Load the dataset\n print(\"[INFO]: Loading dataset\")\n if dataset_family == \"CVC-ClinicDB\":\n X, y = du.get_cvc_clinic_datapath(dataset_path)\n elif dataset_family == \"Kvasir-Seg\":\n X, y = du.get_kvasir_seg_datapath(dataset_path)\n else:\n print(\"[ERROR]: {0} dataset family is unrecognized or not supported!\".format(dataset_family))\n raise NotImplementedError\n\n X_train, X_sided, y_train, y_sided = train_test_split(X, y, random_state=100, test_size=0.2)\n X_val, X_test, y_val, y_test = train_test_split(X_sided, y_sided, random_state=100, test_size=0.5)\n\n print(\"[INFO]: Training set size: \", len(X_train))\n print(\"[INFO]: Validation set size: \", len(X_val))\n print(\"[INFO]: Testing set size: \", len(X_test))\n\n print(\"[INFO]: Loading Training set\")\n train_datagen = du.DataGenerator(X_train,\n y_train,\n image_size=(image_width, image_height),\n model_name=config[\"model_name\"],\n batch_size=train_batch_size,\n dataset_family=dataset_family,\n initial_size=(initial_width, initial_height),\n aug_config_path=\"./augmentation_config.yaml\",\n shuffle=True)\n\n print(\"[INFO]: Loading Validation set\")\n val_datagen = du.DataGenerator(X_val,\n y_val,\n image_size=(image_width, image_height),\n model_name=config[\"model_name\"],\n batch_size=val_batch_size,\n dataset_family=dataset_family,\n initial_size=None,\n aug_config_path=None,\n shuffle=False)\n\n print(\"[INFO]: Setting tf.data pipeline\")\n train_steps = len(train_datagen) if steps_per_epoch is None else int(steps_per_epoch)\n val_steps = len(val_datagen)\n\n train_dataset = train_datagen.get_tf_data()\n val_dataset = val_datagen.get_tf_data()\n\n # Get the model, loss and metrics\n print(\"[INFO]: Building the model - {0}\".format(model_name))\n model = models.ModelSelector(config)\n\n # Load the weights if available\n if load_weights is not None:\n print(\"[INFO]: Load the weights from {0}\".format(load_weights))\n model.load_weights(load_weights)\n\n # Setup Callbacks\n print(\"[INFO]: Setting up training Callbacks and Optimizers. Its almost done\")\n resume_epoch = 0 if resume_epoch is None else resume_epoch\n overwrite = True if resume_epoch == 0 else False\n\n train_csv_logger = callbacks.CSVLogging(\"./outputs/output_logs/{0}/csv_log/train_log.csv\".format(model_name),\n overwrite=overwrite)\n valid_csv_logger = callbacks.CSVLogging(\"./outputs/output_logs/{0}/csv_log/valid_log.csv\".format(model_name),\n overwrite=overwrite)\n lr_reducer = callbacks.ReduceLROnPlateau(learning_rate=float(config[\"learning_rate\"]),\n patience=4,\n decay_rate=1E-1,\n delta=0.0001,\n min_lr=1E-7,\n mode=\"min\")\n\n model_save_path = \"./outputs/model_save/{0}/\".format(model_name)\n\n # Setup Optimizer\n optimizer = tf.keras.optimizers.Adam(learning_rate=float(config[\"learning_rate\"]))\n\n # Check for monitor\n monitor_variable = 100.0 # Initialize a start max\n\n print(\"[INFO]: Setting up metrics\")\n loss_avg = tf.keras.metrics.Mean(name=\"loss\")\n f1_score_metric = tf.keras.metrics.Mean(name=\"f1_score\")\n iou_coe_metric = tf.keras.metrics.Mean(name=\"iou_coe\")\n dice_coe_metric = tf.keras.metrics.Mean(name=\"dice_coe\")\n\n # Set up a custom train loop\n print(\"[INFO]: Beginning training loops\")\n # Iterate epoch wise\n for epoch in range(resume_epoch, no_of_epochs):\n\n print(\"Training {0}/{1}\".format(epoch + 1, no_of_epochs))\n\n try:\n # Training-loop == using batches\n train_loss, train_f1_score, train_iou, train_dice = train(config[\"model_name\"],\n model,\n train_dataset,\n train_steps,\n metrics_tracker=(loss_avg,\n f1_score_metric,\n iou_coe_metric,\n dice_coe_metric),\n optimizer=optimizer,\n epoch=epoch + 1,\n total_epochs=no_of_epochs)\n\n train_tracker = {\n \"train_loss\": [train_loss],\n \"train_f1_score\": [train_f1_score],\n \"train_iou_coe\": [train_iou],\n \"train_dice_coe\": [train_dice]\n }\n\n # Validation loop == using batches\n val_loss, val_f1_score, val_iou, val_dice = train(config[\"model_name\"],\n model,\n val_dataset,\n val_steps,\n metrics_tracker=(loss_avg,\n f1_score_metric,\n iou_coe_metric,\n dice_coe_metric),\n optimizer=None,\n epoch=1,\n total_epochs=1)\n val_tracker = {\n \"val_loss\": [val_loss],\n \"val_f1_score\": [val_f1_score],\n \"val_iou_coe\": [val_iou],\n \"val_dice_coe\": [val_dice]\n }\n\n model.save_weights(model_save_path + \"checkpoints/{0}_ckpt.h5\".format(model_name))\n print(\"[INFO]: Epoch {0}/{1} - \\nTrain evaluation: {2}, \\nValidation evaluation: {3}\".\n format(epoch + 1, no_of_epochs, train_tracker, val_tracker))\n train_csv_logger.log(train_tracker)\n valid_csv_logger.log(val_tracker)\n\n # Save the best model\n if monitor_variable > val_loss:\n monitor_variable = val_loss\n model.save_weights(model_save_path + \"best_model/best_{0}.h5\".format(model_name))\n\n # LR Reduce\n lr_reducer.check_lr(monitor_variable=val_loss, optimizer=optimizer)\n\n except KeyboardInterrupt:\n print(\"[INFO]: Interrupted Training. Trying to save model\")\n model.save_weights(model_save_path + \"{0}_{1}_interrupted.h5\".format(model_name, epoch + 1))\n print(\"[INFO]: Attempting to run test on the best model so far!\")\n break\n\n except Exception as err:\n print(\"[ERROR]: Unexpected Critical Error: \", err)\n print(\"[ERROR]: Trying to save the weights\")\n model.save_weights(model_save_path + \"{0}_{1}_critical.h5\".format(model_name, epoch + 1))\n traceback.print_exc()\n sys.exit(2)\n\n print(\"[INFO]: Training completed. Saving model\")\n model.save_weights(model_save_path + \"saved_model/{0}.h5\".format(model_name))\n\n print(\"[INFO]: Testing model\")\n print(\"[INFO]: Loading Best saved model:\")\n model.load_weights(model_save_path + \"best_model/best_{0}.h5\".format(model_name))\n\n print(\"[INFO]: Loading the test set\")\n test_datagen = du.DataGenerator(X_test,\n y_test,\n image_size=(image_width, image_height),\n model_name=config[\"model_name\"],\n batch_size=test_batch_size,\n dataset_family=dataset_family,\n initial_size=None,\n aug_config_path=None,\n shuffle=False)\n\n print(\"[INFO]: Loading TF data\")\n test_dataset = test_datagen.get_tf_data()\n test_steps = len(test_datagen)\n\n print(\"[INFO]: Testing Initiated\")\n test_loss, test_f1_score, test_iou, test_dice = train(config[\"model_name\"],\n model,\n test_dataset,\n test_steps,\n metrics_tracker=(loss_avg,\n f1_score_metric,\n iou_coe_metric,\n dice_coe_metric),\n optimizer=None,\n epoch=1,\n total_epochs=1)\n test_tracker = {\n \"test_loss\": [test_loss],\n \"test_f1_score\": [test_f1_score],\n \"test_iou_coe\": [test_iou],\n \"test_dice_coe\": [test_dice]\n }\n print(\"[INFO]: Test Results: \\n\", test_tracker)\n with open(\"./outputs/output_logs/{0}/test_results.txt\".format(model_name), mode=\"w\") as f:\n f.write(\"Dumped Test Results for the model {0}\\n\".format(model_name))\n for k, v in test_tracker.items():\n f.write(\"{0} => {1}\\n\".format(k, v))\n\n print(\"[INFO]: Closing operations\")", "def initialize_model(model_name, num_classes, feature_extract, verbose=False):\n\n model_ft = None\n\n if model_name == \"resnet\":\n \"\"\" Resnet18\n \"\"\"\n model_ft = models.resnet18(pretrained=True)\n set_parameter_requires_grad(model_ft, feature_extract)\n num_ftrs = model_ft.fc.in_features\n model_ft.fc = nn.Linear(num_ftrs, num_classes)\n\n elif model_name == \"alexnet\":\n \"\"\" Alexnet\n \"\"\"\n model_ft = models.alexnet(pretrained=True)\n set_parameter_requires_grad(model_ft, feature_extract)\n num_ftrs = model_ft.classifier[6].in_features\n model_ft.classifier[6] = nn.Linear(num_ftrs, num_classes)\n\n elif model_name == \"vgg\":\n \"\"\" VGG11_bn\n \"\"\"\n model_ft = models.vgg11_bn(pretrained=True)\n set_parameter_requires_grad(model_ft, feature_extract)\n num_ftrs = model_ft.classifier[6].in_features\n model_ft.classifier[6] = nn.Linear(num_ftrs, num_classes)\n\n elif model_name == \"squeezenet\":\n \"\"\" Squeezenet\n \"\"\"\n with warnings.catch_warnings(): # temporarily suppress warnings about deprecated functions\n warnings.simplefilter(\"ignore\")\n model_ft = models.squeezenet1_0(pretrained=True)\n set_parameter_requires_grad(model_ft, feature_extract)\n model_ft.classifier[1] = nn.Conv2d(512, num_classes, kernel_size=(1, 1), stride=(1, 1))\n model_ft.num_classes = num_classes\n\n elif model_name == \"densenet\":\n \"\"\" Densenet\n \"\"\"\n model_ft = models.densenet121(pretrained=True)\n set_parameter_requires_grad(model_ft, feature_extract)\n num_ftrs = model_ft.classifier.in_features\n model_ft.classifier = nn.Linear(num_ftrs, num_classes)\n\n elif model_name == \"inception\":\n \"\"\" Inception v3\n Be careful, expects (299,299) sized images and has auxiliary output\n \"\"\"\n model_ft = models.inception_v3(pretrained=True)\n set_parameter_requires_grad(model_ft, feature_extract)\n # Handle the auxilary net\n num_ftrs = model_ft.AuxLogits.fc.in_features\n model_ft.AuxLogits.fc = nn.Linear(num_ftrs, num_classes)\n # Handle the primary net\n num_ftrs = model_ft.fc.in_features\n model_ft.fc = nn.Linear(num_ftrs, num_classes)\n\n else: # Unreachable\n exit()\n\n # Gather the parameters to be optimized\n params_to_update = list(filter(lambda p: p.requires_grad, model_ft.parameters()))\n\n # Print model info\n if verbose:\n print()\n print(model_ft)\n print()\n print(\"Params to learn:\")\n for name, param in model_ft.named_parameters():\n if param.requires_grad:\n print('\\t', name)\n\n return model_ft, params_to_update", "def train_network(self, batch_size, epochs):\n\n if self.eq_train: self.model.fit([self.X_train_high_level, self.X_train_low_level], self.y_train, epochs=epochs, batch_size=batch_size, sample_weight=self.train_weights_eq) \n else: self.model.fit([self.X_train_high_level, self.X_train_low_level], self.y_train, epochs=epochs, batch_size=batch_size, sample_weight=self.train_weights)", "def initialize_setup(self, init_lr):\n param_list = []\n for name, param in self.model.named_parameters():\n if param.requires_grad:\n param_list.append(param)\n\n self.optimizer = torch.optim.AdamW(\n param_list, lr=init_lr, eps=1e-6)\n\n self.optim_scheduler = get_linear_schedule_with_warmup(\n self.optimizer, num_warmup_steps=0,\n num_training_steps=len(self.train_examples) * self.max_epochs)\n\n if not path.exists(self.model_path):\n torch.manual_seed(self.seed)\n np.random.seed(self.seed)\n # Try to initialize the mention model part\n if path.exists(self.pretrained_mention_model):\n print(\"Found pretrained model!!\")\n checkpoint = torch.load(self.pretrained_mention_model)\n self.model.load_state_dict(checkpoint['model'], strict=False)\n else:\n logger.info('Loading previous model: %s' % self.model_path)\n # Load model\n self.load_model(self.model_path)", "def train(self) -> None:\n\n # Check if in the saved model path there is already a trained model\n if self.config.TRN_HYPERP[\"save_path\"]:\n if tf.saved_model.contains_saved_model(self.config.TRN_HYPERP[\"save_path\"]):\n print(\"INFO: An existing saved model will be used for inference\\n\")\n else:\n params = {**self.config.TRN_HYPERP, **self.config.DATASET_HYPERP}\n trainer = Trainer(**params)\n\n print(f\"INFO: Starting training ... \\n\")\n start_time = time.time()\n trainer.train()\n print(f\"\\nINFO: Training completed in {round((time.time() - start_time)/60, 2)} minutes.\\n\")\n\n # Instantiate the saved translator for inference\n saved_path = self.config.TRN_HYPERP[\"save_path\"]\n self.saved_translator = tf.saved_model.load(saved_path)\n else:\n print(\"INFO: Path to save model wasn't provided in config file. Can't train the model\\n\")", "def initialize(self):\n LOG.info(\"Initializing Model.\")\n self.model = self.convert(df=self.training_df)\n if self.bootstraps is not None:\n LOG.info(\"Bootstrapping Data.\")\n self.bootstrap_data()", "def _setup(self, config: dict):\n\n #This is important! When this is declared outside, the tune can give errors.\n import tensorflow as tf\n\n ## CONFIG\n batch_size = 30\n\n self.train_ds = tf.data.Dataset.from_tensor_slices((x_train, y_train)).batch(batch_size)\n self.val_ds = tf.data.Dataset.from_tensor_slices((x_val, y_val)).batch(batch_size)\n\n self.model = Model1DCNN(num_classes=num_classes, dilations=config[\"num_dilations\"], filter_size=config[\"filter_size\"])\n self.optimizer = tf.keras.optimizers.Adam(lr=config[\"lr\"])\n self.train_loss = tf.keras.metrics.Mean(name=\"train_loss\")\n self.val_loss = tf.keras.metrics.Mean(name=\"val_loss\")\n self.train_acc = tf.keras.metrics.SparseCategoricalAccuracy()\n self.val_acc = tf.keras.metrics.SparseCategoricalAccuracy()\n self.loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False)\n\n @tf.function\n def train_step(x, y):\n \"\"\"\n does a single training step with the provided batch and updates the weights corresponding to the\n loss defined in the self.loss_object function\n\n :param batch:\n \"\"\"\n with tf.GradientTape() as tape:\n predictions = self.model(x)\n loss = self.loss_object(y, predictions)\n gradients = tape.gradient(loss, self.model.trainable_variables)\n self.optimizer.apply_gradients(zip(gradients, self.model.trainable_variables))\n\n self.train_loss(loss)\n self.train_acc(y, predictions)\n\n @tf.function\n def val_step(x, y):\n \"\"\"\n does a single validation step with the provided batch\n\n :param batch:\n \"\"\"\n predictions = self.model(x)\n loss = self.loss_object(y, predictions)\n\n self.val_loss(loss)\n self.val_acc(y, predictions)\n\n self.tf_train_step = train_step\n self.tf_val_step = val_step", "def TrainStudent(self, model_name, teacher_model_name, **kwargs):\n batch_size = kwargs.pop(\"batch_size\", 64)\n model_save_path = kwargs.pop('model_save_path', \"./checkpoints/student/\")\n teacher_model_path = kwargs.pop(\"teacher_model_path\", \"./checkpoints/teacher/\")\n temp = kwargs.pop(\"temp\", 10)\n num_epoch = kwargs.pop(\"num_epoch\", 20)\n basic_learning_rate = kwargs.pop(\"basic_learning_rate\", 5e-4)\n record_save_path = kwargs.pop(\"record_save_path\", \"./records/student\")\n is_dev = kwargs.pop(\"dev_mode\", False)\n learning_rate_decay = kwargs.pop(\"learning_rate_decay\", 0.01)\n reg_scale = kwargs.pop(\"reg_scale\", 1e-1)\n soft_target_scale = kwargs.pop(\"soft_target_scale\", 1)\n verbose = kwargs.pop(\"verbose\", False)\n\n # Do some check\n if not os.path.exists(teacher_model_path):\n raise RuntimeError(\"Cannot find pretrained teacher model in '{}'\".format(teacher_model_path))\n if not os.path.exists(model_save_path):\n os.makedirs(model_save_path)\n if not os.path.exists(record_save_path):\n os.makedirs(record_save_path)\n\n model_save_path = os.path.join(model_save_path, \"{}.ckpt\".format(model_name))\n teacher_model_path = os.path.join(teacher_model_path, \"{}.ckpt\".format(teacher_model_name))\n\n tf.reset_default_graph()\n \n # Get training dataset\n if is_dev:\n train_data, train_label = self.data_manager.dev_data, self.data_manager.dev_label\n else:\n train_data, train_label = self.data_manager.train_data, self.data_manager.train_label\n \n num_train_data = train_data.shape[0]\n\n # The input of model\n X = tf.placeholder(train_data.dtype, [None]+list(train_data.shape[1:]), name=\"input_data\")\n y = tf.placeholder(train_label.dtype, [None]+list(train_label.shape[1:]), name=\"input_label\")\n is_train = tf.placeholder(tf.bool, name=\"is_train\")\n \n dataset = tf.data.Dataset.from_tensor_slices((X, y))\n dataset = dataset.shuffle(buffer_size=8000)\n batched_dataset = dataset.batch(batch_size)\n\n iterator = batched_dataset.make_initializable_iterator()\n batch_data, batch_label = iterator.get_next()\n\n # Get the teacher and student model\n regularizer = tf.contrib.layers.l2_regularizer(scale=reg_scale)\n with tf.variable_scope('student_model', regularizer=regularizer):\n logits, probs = self.student_model(batch_data, is_train=is_train)\n\n with tf.variable_scope('teacher_model'):\n teacher_logits, teacher_probs = self.teacher_model(batch_data, is_train=False, trainable=False, temp=temp)\n\n result = tf.argmax(logits, axis=1)\n correct_num = tf.reduce_sum(tf.cast(tf.equal(result, tf.argmax(batch_label, axis=1)), tf.float32))\n\n teacher_variabels = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=\"teacher_model\")\n student_variabels = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=\"student_model\")\n teacher_loader = tf.train.Saver(teacher_variabels)\n student_saver = tf.train.Saver(student_variabels)\n \n # Training part\n loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=batch_label, name=\"hard_loss\"))\n reg_loss = tf.reduce_sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES, 'teacher_model'))\n loss += reg_loss\n soft_target_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=teacher_probs, name=\"soft_loss\"))\n loss += soft_target_scale * soft_target_loss\n \n global_step = tf.get_variable('global_step', initializer=0.0, trainable=False)\n learning_rate = tf.train.natural_exp_decay(\n basic_learning_rate, global_step,\n decay_rate=learning_rate_decay,\n name='learning_rate', decay_steps=1\n )\n \n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n train_op = optimizer.minimize(loss)\n global_step_add = tf.assign_add(global_step, 1)\n\n train_acc_hist = []\n val_acc_hist = []\n train_loss_hist = []\n best_acc = 0.0\n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n teacher_loader.restore(sess, teacher_model_path)\n for i in range(num_epoch):\n sess.run(iterator.initializer, feed_dict={X:train_data, y:train_label})\n cnt = 0\n total_correct_cnt = 0\n total_loss, acc = 0.0, 0.0\n while True:\n try:\n curr_loss, train, right_num, curr_result = sess.run(\n [loss, train_op, correct_num, result],\n feed_dict={is_train: True}\n )\n total_correct_cnt += right_num\n total_loss += curr_loss\n cnt += 1\n except tf.errors.OutOfRangeError:\n acc = total_correct_cnt * 1.0 / num_train_data\n last_loss = total_loss / cnt \n if verbose:\n div = \"===========================\"\n print(\"{}\\nEpoch {}/{}\\t\\tloss: {}\\t\\tacc: {}\".format(div, i+1, num_epoch, last_loss, acc))\n train_acc_hist.append(acc)\n train_loss_hist.append(last_loss)\n sess.run([global_step_add])\n if verbose:\n last_global_step, last_learning_rate = sess.run([global_step, learning_rate])\n print(\"learning_rate: {}\".format(last_learning_rate))\n break\n \n # Validation\n sess.run(iterator.initializer, feed_dict={X:self.data_manager.val_data, y:self.data_manager.val_label})\n acc = 0.0\n total_correct_cnt = 0\n while True:\n try:\n right_num = sess.run([correct_num], feed_dict={is_train:False})\n total_correct_cnt += right_num[0]\n except tf.errors.OutOfRangeError:\n acc = total_correct_cnt * 1.0 / self.data_manager.val_data.shape[0]\n if verbose:\n print(\"Validation acc: {}\".format(acc))\n val_acc_hist.append(acc)\n if acc > best_acc:\n best_acc = acc\n student_saver.save(sess, model_save_path)\n break\n # Write train process record\n self._writeRecord(record_save_path, \"{}_train_accuracy\".format(model_name), train_acc_hist)\n self._writeRecord(record_save_path, \"{}_validation_accuracy\".format(model_name), val_acc_hist)\n self._writeRecord(record_save_path, \"{}_train_loss\".format(model_name), train_loss_hist)\n if verbose:\n print(\"Finish Training Student Model! The Best Validation Accuracy is: {}\".format(best_acc))", "def _train_model(\n self,\n dataset: DatasetEntity,\n ):\n logger.info(\"init data cfg.\")\n self._data_cfg = ConfigDict(data=ConfigDict())\n\n for cfg_key, subset in zip(\n [\"train\", \"val\", \"unlabeled\"],\n [Subset.TRAINING, Subset.VALIDATION, Subset.UNLABELED],\n ):\n subset = get_dataset(dataset, subset)\n if subset and self._data_cfg is not None:\n self._data_cfg.data[cfg_key] = ConfigDict(\n otx_dataset=subset,\n labels=self._labels,\n )\n\n self._is_training = True\n\n self._init_task()\n\n cfg = self.configure(True, None)\n logger.info(\"train!\")\n\n timestamp = time.strftime(\"%Y%m%d_%H%M%S\", time.localtime())\n\n # Environment\n logger.info(f\"cfg.gpu_ids = {cfg.gpu_ids}, distributed = {cfg.distributed}\")\n env_info_dict = collect_env()\n env_info = \"\\n\".join([(f\"{k}: {v}\") for k, v in env_info_dict.items()])\n dash_line = \"-\" * 60 + \"\\n\"\n logger.info(f\"Environment info:\\n{dash_line}{env_info}\\n{dash_line}\")\n\n # Data\n datasets = [build_dataset(cfg.data.train)]\n\n if self._train_type == TrainType.Semisupervised:\n # forward the knowledge of num iters per epoch to model for filter loss\n bs_per_gpu = cfg.data.train_dataloader[\"samples_per_gpu\"]\n actual_bs = bs_per_gpu * torch.distributed.get_world_size() if cfg.distributed else bs_per_gpu\n cfg.model.num_iters_per_epoch = math.ceil(len(datasets[0]) / actual_bs)\n\n # FIXME: Currently segmentor does not support multi batch evaluation.\n # For the Self-SL case, there is no val data. So, need to check the\n\n if \"val\" in cfg.data and \"val_dataloader\" in cfg.data:\n cfg.data.val_dataloader[\"samples_per_gpu\"] = 1\n\n # Target classes\n if \"task_adapt\" in cfg:\n target_classes = cfg.task_adapt.final\n else:\n target_classes = datasets[0].CLASSES\n\n # Metadata\n meta = dict()\n meta[\"env_info\"] = env_info\n meta[\"seed\"] = cfg.seed\n meta[\"exp_name\"] = cfg.work_dir\n if cfg.checkpoint_config is not None:\n cfg.checkpoint_config.meta = dict(\n mmseg_version=__version__ + get_git_hash()[:7],\n CLASSES=target_classes,\n )\n\n # Model\n model = self.build_model(cfg, fp16=cfg.get(\"fp16\", False), is_training=self._is_training)\n model.train()\n model.CLASSES = target_classes\n\n if cfg.distributed:\n convert_sync_batchnorm(model)\n\n validate = bool(cfg.data.get(\"val\", None))\n\n if self._hyperparams.learning_parameters.auto_adapt_batch_size != BatchSizeAdaptType.NONE:\n train_func = partial(train_segmentor, meta=deepcopy(meta), model=deepcopy(model), distributed=False)\n adapt_batch_size(\n train_func,\n cfg,\n datasets,\n isinstance(self, NNCFBaseTask), # nncf needs eval hooks\n not_increase=(self._hyperparams.learning_parameters.auto_adapt_batch_size == BatchSizeAdaptType.SAFE),\n )\n\n train_segmentor(\n model,\n datasets,\n cfg,\n distributed=cfg.distributed,\n validate=validate,\n timestamp=timestamp,\n meta=meta,\n )\n\n # Save outputs\n output_ckpt_path = os.path.join(cfg.work_dir, \"latest.pth\")\n best_ckpt_path = glob.glob(os.path.join(cfg.work_dir, \"best_mDice_*.pth\"))\n if len(best_ckpt_path) > 0:\n output_ckpt_path = best_ckpt_path[0]\n best_ckpt_path = glob.glob(os.path.join(cfg.work_dir, \"best_mIoU_*.pth\"))\n if len(best_ckpt_path) > 0:\n output_ckpt_path = best_ckpt_path[0]\n return dict(\n final_ckpt=output_ckpt_path,\n )", "def __init__(self, model_type, model_cfg, training_cfg):\n super().__init__()\n self.save_hyperparameters()\n\n self.model_cfg = model_cfg\n self.training_cfg = training_cfg\n \n if model_type == \"ConvLSTM\":\n self.model = Conv_LSTM(input_dim=self.model_cfg[\"input_channels\"],\n output_dim=self.model_cfg[\"output_channels\"],\n hidden_dims=self.model_cfg[\"hidden_channels\"],\n big_mem=self.model_cfg[\"big_mem\"],\n num_layers=self.model_cfg[\"n_layers\"],\n kernel_size=self.model_cfg[\"kernel\"],\n memory_kernel_size=self.model_cfg[\"memory_kernel\"],\n dilation_rate=self.model_cfg[\"dilation_rate\"],\n baseline=self.training_cfg[\"baseline\"],\n layer_norm_flag=self.model_cfg[\"layer_norm\"],\n img_width=self.model_cfg[\"img_width\"],\n img_height=self.model_cfg[\"img_height\"],\n peephole=self.model_cfg[\"peephole\"])\n elif model_type == \"AutoencLSTM\":\n self.model = AutoencLSTM(input_dim=self.model_cfg[\"input_channels\"],\n output_dim=self.model_cfg[\"output_channels\"],\n hidden_dims=self.model_cfg[\"hidden_channels\"],\n big_mem=self.model_cfg[\"big_mem\"],\n num_layers=self.model_cfg[\"n_layers\"],\n kernel_size=self.model_cfg[\"kernel\"],\n memory_kernel_size=self.model_cfg[\"memory_kernel\"],\n dilation_rate=self.model_cfg[\"dilation_rate\"],\n baseline=self.training_cfg[\"baseline\"],\n layer_norm_flag=self.model_cfg[\"layer_norm\"],\n img_width=self.model_cfg[\"img_width\"],\n img_height=self.model_cfg[\"img_height\"],\n peephole=self.model_cfg[\"peephole\"])\n elif model_type == \"ConvTransformer\":\n self.model = ENS_Conv_Transformer(num_hidden=self.model_cfg[\"num_hidden\"],\n output_dim=self.model_cfg[\"output_channels\"],\n depth=self.model_cfg[\"depth\"],\n dilation_rate=self.model_cfg[\"dilation_rate\"],\n num_conv_layers=self.model_cfg[\"num_conv_layers\"],\n kernel_size=self.model_cfg[\"kernel_size\"],\n img_width=self.model_cfg[\"img_width\"],\n non_pred_channels=self.model_cfg[\"non_pred_channels\"],\n num_layers_query_feat=self.model_cfg[\"num_layers_query_feat\"],\n in_channels=self.model_cfg[\"in_channels\"],\n baseline=self.training_cfg[\"baseline\"])\n self.baseline = self.training_cfg[\"baseline\"]\n self.future_training = self.training_cfg[\"future_training\"]\n self.learning_rate = self.training_cfg[\"start_learn_rate\"]\n self.training_loss = get_loss_from_name(self.training_cfg[\"training_loss\"])\n self.test_loss = get_loss_from_name(self.training_cfg[\"test_loss\"])", "def __init__(self):\n \n self.model = Net()\n\n if torch.cuda.is_available():\n map_location=torch.device('cuda')\n else:\n map_location=torch.device('cpu')\n\n # load parameters\n self.model.load_state_dict(torch.load('model.pt',\n map_location=map_location)) \n \n if torch.cuda.is_available():\n self.model.cuda()\n else:\n self.model.cpu()\n \n self.model.eval()", "def __init__(self,seed=None,batch_size=60, use_earlyStopping=False, loaded_model=None):\n self.batch_size = batch_size\n self.pruning_rates = PRUNING_PERCENTAGES\n self.early_stopping = use_earlyStopping\n\n if loaded_model == None:\n self.model = keras.Sequential()\n self.model.add(keras.layers.Flatten(input_shape=(28, 28)))\n for layer in LAYERS:\n (units, activation) = LAYERS[layer]\n self.model.add(keras.layers.Dense(units, activation=activation, kernel_initializer=tf.keras.initializers.glorot_normal(seed=None)))\n \n \n self.model.compile(optimizer=OPTIMIZER_FC,\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=['accuracy'])\n\n self.weights_init = self.get_weights()\n \n else:\n self.model = loaded_model\n\n if use_earlyStopping:\n self.es = EarlyStopping(monitor='val_loss', patience=10)", "def setupModel(cls, roadrunner, parameters, logger=Logger()):\r\n pp = parameters.valuesdict()\r\n for parameter in pp.keys():\r\n try:\r\n roadrunner.model[parameter] = pp[parameter]\r\n except Exception as err:\r\n msg = \"_modelFitterCore.setupModel: Could not set value for %s\" \\\r\n % parameter\r\n logger.error(msg, err)", "def __init__(\n self,\n hparams: argparse.Namespace,\n num_labels=None,\n mode=\"base\",\n config=None,\n tokenizer=None,\n model=None,\n **config_kwargs\n ):\n super().__init__()\n # TODO: move to self.save_hyperparameters()\n # self.save_hyperparameters()\n # can also expand arguments into trainer signature for easier reading\n\n self.save_hyperparameters(hparams)\n self.step_count = 0\n self.output_dir = Path(self.hparams.output_dir)\n cache_dir = self.hparams.cache_dir if self.hparams.cache_dir else None\n if config is None:\n self.config = AutoConfig.from_pretrained(\n self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path,\n **({\"num_labels\": num_labels} if num_labels is not None else {}),\n cache_dir=cache_dir,\n **config_kwargs,\n )\n else:\n self.config: BartConfig = config\n\n extra_model_params = (\"encoder_layerdrop\", \"decoder_layerdrop\", \"dropout\", \"attention_dropout\")\n for p in extra_model_params:\n if getattr(self.hparams, p, None):\n assert hasattr(self.config, p), f\"model config doesn't have a `{p}` attribute\"\n setattr(self.config, p, getattr(self.hparams, p))\n\n if tokenizer is None:\n self.tokenizer = AutoTokenizer.from_pretrained(\n self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path,\n cache_dir=cache_dir,\n )\n else:\n self.tokenizer: BartTokenizer = tokenizer\n # self.model_type = MODEL_MODES[mode]\n if model is None:\n self.model = self.model_type.from_pretrained(\n self.hparams.model_name_or_path,\n from_tf=bool(\".ckpt\" in self.hparams.model_name_or_path),\n config=self.config,\n cache_dir=cache_dir,\n )\n else:\n self.model = model", "def _set_model(self):\n print(\"Setting up model...\")\n # Encoder\n inputs = Input(batch_shape=(None,) + self.input_shape)\n self.inputs=inputs\n # Instantiate encoder layers \n for i in range(len(self.filters)):\n if i==0:\n Q = Conv2D(self.filters[i], (self.KernelDim[i], self.KernelDim[i]), \n strides=(self.strides[i], self.strides[i]),padding='same')(inputs)\n Q = BatchNormalization()(Q)\n Q = Activation('relu')(Q)\n else:\n Q = Conv2D(self.filters[i], (self.KernelDim[i], self.KernelDim[i]), padding='same',\n strides=(self.strides[i], self.strides[i]))(Q) \n Q = BatchNormalization()(Q)\n Q = Activation('relu')(Q) \n \n Q_4 = Flatten()\n Q_5 = Dense(self.hidden_dim)\n Q_50= BatchNormalization()\n Q_51=Activation('relu')\n Q_6 = Dropout(self.dropout)\n Q_z_mean = Dense(self.latent_dim)\n Q_z_log_var = Dense(self.latent_dim)\n\n # Set up encoder\n flat = Q_4(Q)\n db = Q_5(flat)\n da = Q_50(db)\n dp = Q_51(da)\n hidden= Q_6(dp)\n\n # Parameters for continous latent distribution\n z_mean = Q_z_mean(hidden)\n z_log_var = Q_z_log_var(hidden)\n self.encoder =Model(inputs, z_mean)\n\n # Sample from latent distributions\n encoding = Lambda(self._sampling_normal, output_shape=(self.latent_dim,))([z_mean, z_log_var])\n self.encoding = encoding\n # Generator\n # Instantiate generator layers to be able to sample from latent\n # distribution later\n out_shape = (int(np.ceil(self.input_shape[0] / np.prod(self.strides) )), int(np.ceil(self.input_shape[1] / np.prod(self.strides))), self.filters[-1])\n \n G_0 = Dense(self.hidden_dim)\n G_00= BatchNormalization()\n G_01= Activation('relu')\n G_d = Dropout(self.dropout)\n G_1 = Dense(np.prod(out_shape))\n G_10= BatchNormalization()\n G_11= Activation('relu')\n G_2 = Reshape(out_shape)\n G=[]\n for i in range(len(self.filters)):\n if i==0:\n G_ = Conv2DTranspose(self.filters[-1], (self.KernelDim[-1], self.KernelDim[-1]), \n strides=(self.strides[-1], self.strides[-1]),padding='same')\n G.append(G_)\n G_ = BatchNormalization()\n G.append(G_)\n G_ = Activation('relu')\n G.append(G_) \n else:\n G_ = Conv2DTranspose(self.filters[-i-1], (self.KernelDim[-i-1], self.KernelDim[-i-1]), padding='same',\n strides=(self.strides[-i-1], self.strides[-i-1]))\n G.append(G_)\n G_ = BatchNormalization()\n G.append(G_)\n G_ = Activation('relu')\n G.append(G_) \n \n G_5_= BilinearUpsampling(output_size=(self.input_shape[0], self.input_shape[1]))\n G_6 = Conv2D(self.input_shape[2], (2, 2), padding='same',\n strides=(1, 1), activation=self.act, name='generated')\n # Apply generator layers\n x = G_0(encoding)\n x = G_00(x)\n x = G_01(x)\n x = G_d(x)\n x = G_1(x)\n x = G_10(x) \n x = G_11(x)\n x = G_2(x)\n \n for i in range(len(G)):\n x = G[i](x)\n \n x = G_5_(x)\n generated = G_6(x)\n self.model =Model(inputs, generated)\n # Set up generator\n inputs_G = Input(batch_shape=(None, self.latent_dim))\n x = G_0(inputs_G)\n x = G_00(x)\n x = G_01(x)\n x = G_d(x) \n x = G_1(x)\n x = G_10(x) \n x = G_11(x) \n x = G_2(x)\n \n for i in range(len(G)):\n x = G[i](x)\n \n x = G_5_(x)\n generated_G = G_6(x)\n self.generator = Model(inputs_G, generated_G)\n\n # Store latent distribution parameters\n self.z_mean = z_mean\n self.z_log_var = z_log_var\n\n # Compile models\n #self.opt = RMSprop()\n self.model.compile(optimizer=self.opt, loss=self._vae_loss)\n # Loss and optimizer do not matter here as we do not train these models\n self.generator.compile(optimizer=self.opt, loss='mse')\n self.model.summary()\n print(\"Completed model setup.\")", "def train(self, x_data, y_data):\n for model in self.list_of_models:\n model.fit(x_data, y_data)\n self.trained_models.append(model)", "def __init__(self, x_train, model):\n self.x_train = x_train\n self.model = model", "def TrainTeacher(self, model_name, **kwargs):\n batch_size = kwargs.pop(\"batch_size\", 64)\n model_save_path = kwargs.pop('model_save_path', \"./checkpoints/teacher/\")\n num_epoch = kwargs.pop(\"num_epoch\", 10)\n basic_learning_rate = kwargs.pop(\"basic_learning_rate\", 5e-4)\n record_save_path = kwargs.pop(\"record_save_path\", \"./records/teacher\")\n is_dev = kwargs.pop(\"dev_mode\", False)\n learning_rate_decay = kwargs.pop(\"learning_rate_decay\", 0.01)\n reg_scale = kwargs.pop(\"reg_scale\", 1e-1)\n verbose = kwargs.pop(\"verbose\", False)\n\n # Do some check\n if not os.path.exists(model_save_path):\n os.makedirs(model_save_path)\n if not os.path.exists(record_save_path):\n os.makedirs(record_save_path)\n model_save_path = os.path.join(model_save_path, \"{}.ckpt\".format(model_name))\n \n tf.reset_default_graph()\n \n # Get training dataset\n if is_dev:\n train_data, train_label = self.data_manager.dev_data, self.data_manager.dev_label\n else:\n train_data, train_label = self.data_manager.train_data, self.data_manager.train_label\n \n num_train_data = train_data.shape[0]\n\n # The input of teacher model\n X = tf.placeholder(train_data.dtype, [None]+list(train_data.shape[1:]), name=\"input_data\")\n y = tf.placeholder(train_label.dtype, [None]+list(train_label.shape[1:]), name=\"input_label\")\n is_train = tf.placeholder(tf.bool, name=\"is_train\")\n \n dataset = tf.data.Dataset.from_tensor_slices((X, y))\n dataset = dataset.shuffle(buffer_size=8000)\n batched_dataset = dataset.batch(batch_size)\n\n iterator = batched_dataset.make_initializable_iterator()\n batch_data, batch_label = iterator.get_next()\n\n # Get the teacher model\n #logits, probs = self.teacher_models[self.dataset_name](batch_data, is_train=is_train, reg_scale=reg_scale)\n regularizer = tf.contrib.layers.l2_regularizer(scale=reg_scale)\n with tf.variable_scope('teacher_model', regularizer=regularizer):\n logits, probs = self.teacher_model(batch_data, is_train=is_train)\n result = tf.argmax(logits, axis=1)\n correct_num = tf.reduce_sum(tf.cast(tf.equal(result, tf.argmax(batch_label, axis=1)), tf.float32))\n saver = tf.train.Saver()\n\n # Training part\n loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=batch_label, name=\"cross_entropy_loss\"))\n reg_loss = tf.reduce_sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES, 'teacher_model'))\n loss += reg_loss\n global_step = tf.get_variable('global_step', initializer=0.0, trainable=False)\n learning_rate = tf.train.natural_exp_decay(\n basic_learning_rate, global_step,\n decay_rate=learning_rate_decay,\n name='learning_rate', decay_steps=1\n )\n \n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n train_op = optimizer.minimize(loss)\n global_step_add = tf.assign_add(global_step, 1)\n\n train_acc_hist = []\n val_acc_hist = []\n train_loss_hist = []\n best_acc = 0.0\n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n for i in range(num_epoch):\n sess.run(iterator.initializer, feed_dict={X:train_data, y:train_label})\n cnt = 0\n total_correct_cnt = 0\n total_loss, acc = 0.0, 0.0\n while True:\n try:\n curr_loss, train, right_num, curr_result = sess.run(\n [loss, train_op, correct_num, result],\n feed_dict={is_train: True}\n )\n total_correct_cnt += right_num\n total_loss += curr_loss\n cnt += 1\n except tf.errors.OutOfRangeError:\n acc = total_correct_cnt * 1.0 / num_train_data\n last_loss = total_loss / cnt \n if verbose:\n div = \"===========================\"\n print(\"{}\\nEpoch {}/{}\\t\\tloss: {}\\t\\tacc: {}\".format(div, i+1, num_epoch, last_loss, acc))\n train_acc_hist.append(acc)\n train_loss_hist.append(last_loss)\n sess.run([global_step_add])\n if verbose:\n last_global_step, last_learning_rate = sess.run([global_step, learning_rate])\n print(\"learning_rate: {}\".format(last_learning_rate))\n break\n \n # Validation\n sess.run(iterator.initializer, feed_dict={X:self.data_manager.val_data, y:self.data_manager.val_label})\n acc = 0.0\n total_correct_cnt = 0\n while True:\n try:\n right_num = sess.run([correct_num], feed_dict={is_train:False})\n total_correct_cnt += right_num[0]\n except tf.errors.OutOfRangeError:\n acc = total_correct_cnt * 1.0 / self.data_manager.val_data.shape[0]\n if verbose:\n print(\"Validation acc: {}\".format(acc))\n val_acc_hist.append(acc)\n if acc > best_acc:\n best_acc = acc\n saver.save(sess, model_save_path)\n break\n # Write train process record\n self._writeRecord(record_save_path, \"{}_train_accuracy\".format(model_name), train_acc_hist)\n self._writeRecord(record_save_path, \"{}_validation_accuracy\".format(model_name), val_acc_hist)\n self._writeRecord(record_save_path, \"{}_train_loss\".format(model_name), train_loss_hist)\n if verbose:\n print(\"Finish Training Teacher Model! The Best Validation Accuracy is: {}\".format(best_acc))", "def setup_model(self, weights_name='Weights', bias_name='Bias'):\n tf.reset_default_graph()\n\n self.global_step = tf.get_variable(\n 'global_step', [],\n dtype=tf.int64, trainable=False,\n initializer=tf.constant_initializer(0))\n\n # Model parameters and placeholders.\n x = tf.placeholder(tf.float32, name='x')\n y = tf.placeholder(tf.float32, name='y')\n W = tf.get_variable(weights_name, [1], dtype=tf.float32)\n b = tf.get_variable(bias_name, [1], dtype=tf.float32)\n\n # Model output, loss and optimizer.\n linear_model = W * x + b\n loss = tf.reduce_sum(tf.square(linear_model - y))\n optimizer_base = tf.train.GradientDescentOptimizer(0.01)\n\n # Model train op.\n optimizer = optimizer_base.minimize(\n loss, global_step=self.global_step)\n\n # Train targets.\n self.train_targets = {'loss': loss,\n 'optimizer': optimizer}", "def setUp(self):\n self.X_train, self.y_train = load_data(\"../data/traindata.mat.tar.gz\")\n self.nn = NN_hwr([len(self.X_train[0]), 50, 10])", "def __init__(self, model, check_point, model_name, **kwargs):\n self.model = model\n self.model_name = model_name\n self.check_point = check_point\n self.num_epochs = kwargs.pop('num_epochs', 10)\n self.batch_size = kwargs.pop('batch_size', 128)\n self.learning_rate = kwargs.pop('learning_rate', 1e-4)\n self.model = nn.DataParallel(self.model)\n self.optimizer = optim.Adam(\n model.parameters(),\n lr=self.learning_rate, weight_decay=1e-6)\n self.scheduler = lr_scheduler.StepLR(self.optimizer, step_size=200, gamma=0.5)\n self.loss_fn = kwargs.pop('loss_fn', nn.MSELoss())\n self.fine_tune = kwargs.pop('fine_tune', False)\n self.verbose = kwargs.pop('verbose', False)\n self.print_every = kwargs.pop('print_every', 10)\n\n self._reset()", "def set_model(self, model):\r\n self.model = model.model\r\n with context.eager_mode():\r\n self._close_writers()\r\n if self.write_graph:\r\n with self._get_writer(self._train_run_name).as_default():\r\n with summary_ops_v2.always_record_summaries():\r\n if not self.model.run_eagerly:\r\n summary_ops_v2.graph(K.get_graph(), step=0)\r\n\r\n summary_writable = (\r\n self.model._is_graph_network or # pylint: disable=protected-access\r\n self.model.__class__.__name__ == 'Sequential') # pylint: disable=protected-access\r\n if summary_writable:\r\n summary_ops_v2.keras_model('keras', self.model, step=0)\r\n\r\n if self.embeddings_freq:\r\n self._configure_embeddings()", "def train(hparams, summary_dir, num_gpus, model_type, max_steps, save_step,\n data_dir, num_targets, dataset, validate, seed, shuffled, shift,\n pad, batch_size=128):\n summary_dir += '/train/'\n with tf.Graph().as_default():\n # Build model\n features = get_features('train', batch_size, num_gpus, data_dir,\n num_targets, dataset, validate, evaluate=False,\n seed=seed, shuffled=shuffled, shift=shift,\n pad=pad)\n model = models[model_type](hparams)\n result, _ = model.multi_gpu(features, num_gpus)\n # Print stats\n param_stats = tf.profiler.profile(\n tf.get_default_graph(),\n options=tf.contrib.tfprof.model_analyzer.\n TRAINABLE_VARS_PARAMS_STAT_OPTIONS)\n sys.stdout.write('total_params: %d\\n' % param_stats.total_parameters)\n writer = tf.summary.FileWriter(summary_dir)\n run_experiment(load_training, summary_dir, writer, train_experiment,\n model, result, max_steps, save_step)\n writer.close()", "def train_model(config, environ, train_data, test_data, trainval_data=None):\n np.random.seed(0)\n if not hasattr(config, \"seed\"):\n tf.set_random_seed(1234)\n log.info(\"Setting tensorflow random seed={:d}\".format(1234))\n else:\n log.info(\"Setting tensorflow random seed={:d}\".format(config.seed))\n tf.set_random_seed(config.seed)\n if environ.verbose:\n verbose_level = 0\n else:\n verbose_level = 2\n\n if trainval_data is None:\n trainval_data = train_data\n\n log.info(\"Environment: {}\".format(environ.__dict__))\n log.info(\"Config: {}\".format(config.__dict__))\n\n save_folder = os.path.join(environ.save_folder, environ.exp_id)\n logs_folder = os.path.join(environ.logs_folder, environ.exp_id)\n with log.verbose_level(verbose_level):\n exp_logger = ExperimentLogger(logs_folder)\n\n if not hasattr(config, \"seed\"):\n data_seed = 0\n else:\n data_seed = config.seed\n\n # Gets data iterators.\n train_iter = get_iter(\n train_data,\n batch_size=config.batch_size,\n shuffle=True,\n cycle=True,\n prefetch=config.prefetch,\n seed=data_seed,\n num_worker=25,\n queue_size=500)\n trainval_iter = get_iter(\n train_data,\n batch_size=config.batch_size,\n shuffle=True,\n cycle=True,\n prefetch=config.prefetch,\n num_worker=10,\n queue_size=200)\n test_iter = get_iter(\n test_data,\n batch_size=config.batch_size,\n shuffle=False,\n cycle=False,\n prefetch=config.prefetch,\n num_worker=10,\n queue_size=200)\n\n # Builds models.\n log.info(\"Building models\")\n with tf.name_scope(\"Train\"):\n with tf.variable_scope(\"Model\", reuse=None):\n with tf.device(environ.device):\n if config.model.startswith(\"resnet\"):\n m = ResNetModel(config, is_training=True)\n else:\n m = CNNModel(config, is_training=True)\n\n with tf.name_scope(\"Valid\"):\n with tf.variable_scope(\"Model\", reuse=True):\n with tf.device(environ.device):\n if config.model.startswith(\"resnet\"):\n mvalid = ResNetModel(config, is_training=False)\n else:\n mvalid = CNNModel(config, is_training=False)\n\n # Initializes variables.\n with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:\n saver = tf.train.Saver()\n sess.run(tf.global_variables_initializer())\n\n def train_step():\n \"\"\"Train step.\"\"\"\n batch = train_iter.next()\n feed_data = {m.input: batch[\"img\"], m.label: batch[\"label\"]}\n cost, ce, _ = sess.run([m.cost, m.cross_ent, m.train_op],\n feed_dict=feed_data)\n return ce\n\n def evaluate(data_iter, nbatches):\n \"\"\"Runs evaluation.\"\"\"\n num_correct = 0.0\n count = 0\n if nbatches == -1:\n iter_ = data_iter\n else:\n iter_ = range(nbatches)\n\n for bb in iter_:\n if nbatches == -1:\n batch = bb\n else:\n batch = data_iter.next()\n feed_data = {mvalid.input: batch[\"img\"]}\n y = sess.run(mvalid.output, feed_dict=feed_data)\n pred_label = np.argmax(y, axis=1)\n num_correct += np.sum(\n np.equal(pred_label, batch[\"label\"]).astype(float))\n count += pred_label.size\n acc = (num_correct / count)\n return acc\n\n def save():\n \"\"\"Snapshots a model.\"\"\"\n if not os.path.isdir(save_folder):\n os.makedirs(save_folder)\n config_file = os.path.join(save_folder, \"conf.json\")\n environ_file = os.path.join(save_folder, \"env.json\")\n with open(config_file, \"w\") as f:\n f.write(config.to_json())\n with open(environ_file, \"w\") as f:\n f.write(environ.to_json())\n log.info(\"Saving to {}\".format(save_folder))\n saver.save(\n sess,\n os.path.join(save_folder, \"model.ckpt\"),\n global_step=m.global_step)\n\n def train():\n \"\"\"Train loop.\"\"\"\n lr = config.base_learn_rate\n lr_decay_steps = config.lr_decay_steps\n max_train_iter = config.max_train_iter\n m.assign_lr(sess, lr)\n\n if environ.verbose:\n loop = range(max_train_iter)\n else:\n loop = pb.get(max_train_iter)\n\n for niter in loop:\n # decrease learning rate\n if len(lr_decay_steps) > 0:\n if (niter + 1) == lr_decay_steps[0]:\n lr *= 0.1\n m.assign_lr(sess, lr)\n lr_decay_steps.pop(0)\n ce = train_step()\n if (niter + 1) % config.disp_iter == 0 or niter == 0:\n exp_logger.log_train_ce(niter, ce)\n if (niter + 1) % config.valid_iter == 0 or niter == 0:\n acc = evaluate(trainval_iter, 10)\n exp_logger.log_train_acc(niter, acc)\n test_iter.reset()\n acc = evaluate(test_iter, -1)\n log.info(\"Experment ID {}\".format(environ.exp_id))\n exp_logger.log_valid_acc(niter, acc)\n if (niter + 1) % config.save_iter == 0:\n save()\n test_iter.reset()\n acc = evaluate(test_iter, -1)\n return acc\n\n acc = train()\n return acc", "def main():\n setup_keras()\n\n args = parse()\n\n train_settings = common.load_settings(args.settings_path, default_conf_name='train.yml')\n train_settings['store'] = args.store\n\n feature_settings = common.load_settings(args.settings_path, default_conf_name='feature.yml')\n model_settings = common.load_settings(args.settings_path, default_conf_name=train_settings['model_conf'])\n\n train_df, val_df = load_training_data(dict(train_settings, **feature_settings))\n assert train_df.shape[0] > val_df.shape[0] * 4.5, f'training data {train_df.shape[0]} should be much larger than validation {val_df.shape[0]}'\n\n sample_featurizer = AudioFeature(feature_settings)\n\n if args.load_name:\n model_name = args.load_name\n print('Loading existing model', model_name)\n m = keras.models.load_model(model_name)\n else:\n t = datetime.datetime.now().strftime('%Y%m%d-%H%M')\n model_name = f\"model-{model_settings['model']}_hop{feature_settings['hop_length']}_{t}\"\n m = models.build(dict(model_settings, **feature_settings))\n m.summary()\n\n output_dir = os.path.join(args.model_store, model_name)\n\n print(f\"Training model: '{model_name}'\", json.dumps(train_settings, indent=1))\n\n combined_settings = dict(train_settings, **model_settings, **feature_settings)\n\n h = train_model(output_dir, train_df, val_df,\n model=m,\n sample_featurizer=sample_featurizer,\n settings=combined_settings)", "def setup_model(self,\n model_weights_path: Optional[str] = None,\n model_def_path: Optional[str] = None) -> None:\n if self.model is not None:\n self.model.to(self.device)\n return\n\n self._onnx_mode = (model_weights_path is not None\n and model_weights_path.lower().endswith('.onnx'))\n if self._onnx_mode:\n model = self.load_onnx_model(model_weights_path)\n else:\n model = self.build_model(model_def_path)\n\n if self.cfg.model.external_def is not None:\n # this model will have 1 extra output classes that we will ignore\n self.model = TorchVisionODAdapter(model, ignored_output_inds=[0])\n else:\n # this model will have 2 extra output classes that we will ignore\n num_classes = self.cfg.data.num_classes\n self.model = TorchVisionODAdapter(\n model, ignored_output_inds=[0, num_classes + 1])\n\n if not self._onnx_mode:\n self.model.to(self.device)\n self.load_init_weights(model_weights_path)", "def initialize_trainer(self):\n self.initialize_matrices()\n self.initialize_model()\n self.initialize_optimizers()\n return self", "def init_learner(self,**kwargs):\r\n \r\n if self.learn_type == 'nn':\r\n #initialize neural network\r\n shape = kwargs[\"shape\"]\r\n #initialize input layer\r\n model = Sequential() \r\n #add hidden layers\r\n for i in range(len(shape)):\r\n if i == 0:\r\n nb_input = self.size\r\n else:\r\n nb_input = shape[i -1]\r\n nb_output = shape[i]\r\n model.add(Dense(nb_input,nb_output,init=\"he_normal\",\r\n activation = \"tanh\"))\r\n model.add(Dropout(.5))\r\n model.add(Dense(shape[-1],1,init = \"he_normal\",\r\n activation = \"linear\"))\r\n model.compile(loss = 'mean_squared_error',optimizer = 'rmsprop')\r\n self.learner = model\r\n \r\n elif self.learn_type == 'linear':\r\n #initialize parameter\r\n self.learner = Linear(self.size,**kwargs)", "def prepare_model_optimizer_and_scheduler(self):\n\n ###################################################################\n # MODEL PREPARATION\n # -----------------\n # - step 1: Initialize a random model from config\n # - step 2: Load model weights from checkpoint if any\n # - step 3: Move model to device (GPU)\n ###################################################################\n\n # Initialize a random model according to a specific config:\n # NOTE: here we load from a physical path instead of using a keyword\n # as compute nodes may not allow downloading from online hubs\n if self.is_character_bert:\n model_config = CharacterBertConfig.from_pretrained(\n os.path.join(WORKDIR, 'data', 'character-bert'))\n model = CharacterBertForPreTraining(model_config)\n else:\n model_config = BertConfig.from_pretrained(\n os.path.join(WORKDIR, 'data', 'bert-base-uncased'))\n model = BertForPreTraining(model_config)\n if self.is_main_process:\n logging.info(\n \"Initialized %s using Config:\\n%s\",\n \"CharacterBERT\" if self.is_character_bert else \"BERT\",\n model_config\n )\n\n # Load checkpoint if any:\n if not self.resume_pretraining:\n # CASE: no checkpoint -> training from scratch\n self.global_step = 0\n if self.is_main_process:\n logging.info(\"Pre-training from scratch (good luck!)\")\n else:\n if self.init_checkpoint:\n # CASE: load checkpoint from direct path\n self.global_step = 0\n init_checkpoint = self.init_checkpoint\n if self.is_main_process:\n logging.info(\n \"Resuming pre-training from specific checkpoint `%s`\",\n init_checkpoint\n )\n else:\n # CASE: load checkpoint from resume_step\n if self.is_main_process:\n logging.info(\n \"Resuming pre-training from step `%s`. \"\n \"Looking inside `output_directory` for checkpoints...\",\n self.resume_step\n )\n\n if self.resume_step == -1:\n # CASE: resume_step == -1, load latest checkpoint\n model_names = [\n fname\n for fname in os.listdir(self.output_directory)\n if fname.endswith(\".pt\")]\n assert model_names, \"Could not find any checkpoints to resume from.\"\n self.resume_step = max([\n int(x.split('.pt')[0].split('_')[1].strip())\n for x in model_names]) # TODO: find a better way for this\n if self.is_main_process:\n logging.info(\n \"Resuming from latest checkpoint: ckpt_%s.pt\",\n self.resume_step\n )\n else:\n # CASE: resume_step == X, load checkpoint: `ckpt_X.pt`\n if self.is_main_process:\n logging.info(\n \"Resuming from checkpoint: ckpt_%s.pt\",\n self.resume_step\n )\n self.global_step = self.resume_step\n init_checkpoint = os.path.join(\n self.output_directory, f\"ckpt_{self.resume_step}.pt\")\n\n # Load the actual checkpoint file\n self.checkpoint = torch.load(\n init_checkpoint, map_location=\"cpu\"\n )\n\n # NOTE: Keeping these lines below as a reminder that re-training on\n # a different domain with CharacterBERT requires changing the\n # output layer with a topK tokens matrix from the new domain.\n\n # # Case where we would retrain a general_domain CharacterBERT\n # # on the medical domain. Don't use the general domain output layer:\n # if self.is_medical_domain and self.is_character_bert and (not self.phase2):\n # model.load_state_dict(\n # {\n # k: v for (k, v) in self.checkpoint['model'].items()\n # # Don't load output matrix from general domain model\n # if not k.startswith('cls.predictions') # ignoring the old output layer\n # },\n # strict=False)\n # if self.is_main_process:\n # logging.warning(\n # \"Loaded model weights from `%s`, \"\n # \"but ignored the `cls.predictions` module.\",\n # init_checkpoint)\n\n # # General case: load weights from checkpoint\n # else:\n # model.load_state_dict(self.checkpoint['model'], strict=True)\n # if self.is_main_process:\n # logging.info('Loaded model weights from `%s`',\n # init_checkpoint)\n\n # General case: load weights from checkpoint\n model.load_state_dict(self.checkpoint['model'], strict=True)\n if self.is_main_process:\n logging.info('Loaded model weights from `%s`', init_checkpoint)\n\n # Deduce previous steps from phase1 when in phase2\n if self.phase2 and not self.init_checkpoint:\n self.global_step -= self.phase1_end_step\n\n if self.is_main_process:\n logging.info(\"Training will start at global_step=%s\", self.global_step)\n\n # Move model to GPU:\n model.to(self.device)\n if self.is_main_process:\n logging.info(\"Model was moved to device: %s\", self.device)\n\n ###################################################################\n # OPTIMIZER / SCHEDULER PREPARATION\n # ---------------------------------\n # - step 1: Define the optimizer (FusedLAMB w/ some weight decay)\n # - step 2: Define the learning rate scheduler (PolyWarmUpScheduler)\n ###################################################################\n\n # Initialize an optimizer:\n no_decay = ['bias', 'gamma', 'beta', 'LayerNorm'] # no weight decay\n optimizer_grouped_parameters = [\n {\n 'params': [\n param for name, param in model.named_parameters()\n if not any((nd in name) for nd in no_decay)],\n 'weight_decay': 0.01\n },\n {\n 'params': [\n param for name, param in model.named_parameters()\n if any((nd in name) for nd in no_decay)],\n 'weight_decay': 0.0\n }\n ]\n optimizer = FusedLAMB(\n optimizer_grouped_parameters, lr=self.learning_rate)\n if self.is_main_process:\n logging.info(\"Using optimizer: %s\", optimizer)\n\n # Initialize a learning rate scheduler:\n self.lr_scheduler = PolyWarmUpScheduler(\n optimizer,\n warmup=self.warmup_proportion,\n total_steps=self.total_steps\n )\n if self.is_main_process:\n logging.info(\"Using scheduler: %s\", self.lr_scheduler)\n\n ###################################################################\n # OTHER PREPARATION STEPS\n # -----------------------\n # - step 1: Set up Mixed Precision training (fp16) if required\n # - step 2: Load optimizer stat from checkpoint if any\n # - step 2: Set up DataParallel\n ###################################################################\n\n # Set up fp16:\n if self.fp16:\n if self.is_main_process:\n logging.info(\"Setting up `Almost FP16` Mixed Precision...\")\n if self.loss_scale == 0:\n model, optimizer = amp.initialize(\n model, optimizer, opt_level=\"O2\", loss_scale=\"dynamic\")\n else:\n model, optimizer = amp.initialize(\n model, optimizer, opt_level=\"O2\", loss_scale=self.loss_scale)\n amp._amp_state.loss_scalers[0]._loss_scale = 2**20\n\n # Load optimizer state from checkpoint\n if self.resume_pretraining:\n if self.is_main_process:\n logging.info(\"Loading optimizer state from checkpoint...\")\n if self.phase2 or self.init_checkpoint:\n keys = list(self.checkpoint['optimizer']['state'].keys())\n # Override hyperparameters from previous self.checkpoint\n for key in keys:\n self.checkpoint['optimizer']['state'][key]['step'] = self.global_step\n for i, _ in enumerate(self.checkpoint['optimizer']['param_groups']):\n self.checkpoint['optimizer']['param_groups'][i]['step'] = self.global_step\n self.checkpoint['optimizer']['param_groups'][i]['t_total'] = self.total_steps\n self.checkpoint['optimizer']['param_groups'][i]['warmup'] = self.warmup_proportion\n self.checkpoint['optimizer']['param_groups'][i]['lr'] = self.learning_rate\n if self.is_main_process:\n logging.info(\"Overwrote the following parameters with new values:\")\n logging.info(\"* step: %s\", self.global_step)\n logging.info(\"* t_total: %s\", self.total_steps)\n logging.info(\"* warmup: %s\", self.warmup_proportion)\n logging.info(\"* lr: %s\", self.learning_rate)\n optimizer.load_state_dict(self.checkpoint['optimizer'])\n # Restore AMP master parameters\n if self.fp16:\n if self.is_main_process:\n logging.info(\"Restoring AMP master parameters (optimizer)...\")\n optimizer._lazy_init_maybe_master_weights()\n optimizer._amp_stash.lazy_init_called = True\n optimizer.load_state_dict(self.checkpoint['optimizer'])\n for param, saved_param in zip(amp.master_params(optimizer), self.checkpoint['master params']):\n param.data.copy_(saved_param.data)\n\n # Distribute model\n if self.training_is_distributed:\n if not self.allreduce_post_accumulation:\n model = DistributedDataParallel(\n model,\n message_size=250000000,\n gradient_predivide_factor=\\\n torch.distributed.get_world_size()\n )\n else:\n flat_dist_call(\n [param.data for param in model.parameters()],\n torch.distributed.broadcast,\n (0,)\n )\n elif self.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n # Set the values of self.model and self.optimizer\n self.model = model\n self.optimizer = optimizer", "def train_model(x_data, y_data, model_type):\n # def lr model object\n clr = None\n try:\n clr = model_list[model_type]()\n except Exception as e:\n print(e)\n # fit model\n clr.fit(x_data, y_data)\n # save model in pkl file\n try:\n joblib.dump(clr, \"model/\" + model_type + \".pkl\")\n except Exception as e:\n print(e)\n return clr", "def train(self, X, y):\n self.model.fit(X, y)", "def set_model(self, model: tf.keras.Model):\n self.model = model", "def _train(args, pretrain_args):\n start_time = time.time()\n print('Training', ', '.join(args.speakers), '...')\n\n # randomly sample validation set monte_carlo_cv_num times\n for num in range(args.monte_carlo_cv_num):\n # get seed used to sub-sample validation dataset (use 42 for 1st run)\n seed = utils.get_seed(num)\n\n # get train/valid/test data and convert to sequences\n train_data, valid_data, test_data, id_to_word = data_reader.get_data(\n args, seed=seed)\n # set configurations/hyperparameters for model\n config, test_config = utils.set_config(args, id_to_word)\n\n # initialize word embeddings\n init_embed = utils.init_embedding(id_to_word, dim=args.embed_size,\n init_scale=args.init_scale,\n embed_path=args.embed_path)\n\n with tf.Graph().as_default():\n # initializer used to initialize TensorFlow variables\n initializer = tf.random_uniform_initializer(-config['init_scale'],\n config['init_scale'])\n # create Train model\n with tf.name_scope('Train'):\n with tf.variable_scope('Model', reuse=None,\n initializer=initializer):\n m_train = model.Model(args, is_training=True, config=config,\n init_embed=init_embed, name='Train')\n m_train.build_graph()\n\n # create Valid model\n with tf.name_scope('Valid'):\n with tf.variable_scope('Model', reuse=True,\n initializer=initializer):\n m_valid = model.Model(args, is_training=False, config=config,\n init_embed=init_embed, name='Valid')\n m_valid.build_graph()\n\n # create Test model\n with tf.name_scope('Test'):\n with tf.variable_scope('Model', reuse=True,\n initializer=initializer):\n m_test = model.Model(args, is_training=False, config=test_config,\n init_embed=init_embed, name='Test')\n m_test.build_graph()\n\n # create summaries to be viewed in TensorBoard\n tb_summaries = utils.TensorBoardSummaries()\n tb_summaries.create_ops()\n\n init = tf.global_variables_initializer()\n\n # if pretrained, must create dict to initialize TF Saver\n if bool(pretrain_args):\n # get trainable variables and convert to dict for Saver\n reuse_vars = tf.get_collection(\n tf.GraphKeys.TRAINABLE_VARIABLES)\n reuse_vars_dict = dict(\n [(var.op.name, var) for var in reuse_vars])\n # create saver for TF session (see function for addl details)\n saver = utils.create_tf_saver(args, pretrain_args,\n reuse_vars_dict)\n else:\n saver = tf.train.Saver()\n\n # ppls dict has perplexities that are stored in results database\n ppls = {}\n ppls, _ = _update_ppls(ppls, initialize=True)\n\n with tf.Session() as sess:\n sess.run(init)\n\n if args.load_path != '':\n print('Restoring model...')\n saver.restore(sess, args.load_path)\n\n for epoch in range(config['max_epoch']):\n print('Epoch: {0} Learning rate: {1:.3f}\\n'.format(\n epoch + 1, sess.run(m_train.lr)))\n for i, speaker in enumerate(args.speakers):\n print('Training {0} ...'.format(speaker))\n\n # run epoch on training data\n train_perplexity = _run_epoch(sess, m_train, args, train_data,\n i, tb_summaries, id_to_word,\n train_op=m_train.train_op,\n verbose=True)\n print('Epoch: {0} Train Perplexity: {1:.3f}'.format(\n epoch + 1, train_perplexity))\n ppls, _ = _update_ppls(ppls, epoch=epoch+1,\n speaker=speaker,\n ppl=train_perplexity,\n dataset='train')\n\n print('Validating...')\n # run epoch on validation data\n valid_perplexity = _run_epoch(sess, m_valid, args,\n valid_data, i, tb_summaries,\n id_to_word, verbose=True)\n print('Epoch: {0} Valid Perplexity: {1:.3f}'.format(\n epoch + 1, valid_perplexity))\n ppls, improved = _update_ppls(ppls, epoch=epoch+1,\n speaker=speaker,\n ppl=valid_perplexity,\n dataset='valid')\n\n if improved:\n # save model if valid ppl is lower than current\n # best valid ppl\n if args.save_path != '':\n print('Saving model to {0}.'.format(\n args.save_path))\n saver.save(sess, args.save_path)\n\n for i, speaker in enumerate(args.speakers):\n print('Testing {0} ...'.format(speaker))\n print('Restoring best model for testing...')\n saver.restore(sess, args.save_path)\n # run model on test data\n test_perplexity = _run_epoch(sess, m_test, args, test_data, i)\n ppls['test_ppl_' + speaker] = test_perplexity\n print('Test Perplexity: {0:.3f}'.format(test_perplexity))\n\n if args.insert_db == 'True':\n # write params/config/results to sql database\n results_db.insert_results(args, config, start_time, ppls)" ]
[ "0.7335617", "0.72743934", "0.7272043", "0.71031904", "0.70921487", "0.70791066", "0.70587295", "0.70124644", "0.6923429", "0.69174415", "0.68890333", "0.68263906", "0.6823547", "0.68021375", "0.6764099", "0.6710221", "0.6695851", "0.6694311", "0.66900265", "0.6688204", "0.6682642", "0.66538507", "0.6640513", "0.6631236", "0.6593", "0.659288", "0.657659", "0.657592", "0.6571553", "0.6555436", "0.654236", "0.65420485", "0.65270555", "0.65180904", "0.65098375", "0.64922726", "0.64798903", "0.6463414", "0.6454817", "0.64488685", "0.64413595", "0.6441092", "0.6435093", "0.64339375", "0.6432557", "0.6431338", "0.64305174", "0.6397281", "0.63830745", "0.6376061", "0.6369252", "0.63584137", "0.63584137", "0.6355381", "0.6352656", "0.6352104", "0.63340086", "0.63322246", "0.63146687", "0.6309463", "0.630773", "0.6303671", "0.62971944", "0.62675226", "0.62671125", "0.62660855", "0.62562144", "0.6250814", "0.6245411", "0.6241749", "0.62407374", "0.62387955", "0.6236839", "0.6234209", "0.6232493", "0.6229885", "0.62261254", "0.62164783", "0.62159336", "0.62151873", "0.62149584", "0.62125444", "0.6211934", "0.62100476", "0.6208597", "0.6207281", "0.6206027", "0.6204415", "0.62035865", "0.62020564", "0.6201957", "0.6200927", "0.61987597", "0.6182724", "0.61823785", "0.6175718", "0.6175086", "0.61708975", "0.617064", "0.6169045", "0.61630875" ]
0.0
-1
Evaluates the trained model using the specified features and labels.
def evaluate(self, features, labels): raise NotImplementedError('Not implemented')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def evaluate(model, eval_data, num_labels): \n # Turn on the evaluation state to ignore dropouts\n model.eval()\n results = [predict(model, x) for x, y in eval_data]\n f1_score, accuracy = get_metrics(np.array([y for x, y in eval_data]), results, num_labels)\n return f1_score, accuracy", "def model_fn(features, labels, mode, params):\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n\n is_predicting = (mode == tf.estimator.ModeKeys.PREDICT)\n\n # TRAIN and EVAL\n if not is_predicting:\n\n (loss, predicted_labels, log_probs) = ClassifierModel.create_model(\n is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels)\n\n train_op = bert.optimization.create_optimizer(\n loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu=False)\n\n # Calculate evaluation metrics.\n def metric_fn(label_ids, predicted_labels):\n accuracy = tf.metrics.accuracy(label_ids, predicted_labels)\n f1_score = tf.contrib.metrics.f1_score(\n label_ids,\n predicted_labels)\n auc = tf.metrics.auc(\n label_ids,\n predicted_labels)\n recall = tf.metrics.recall(\n label_ids,\n predicted_labels)\n precision = tf.metrics.precision(\n label_ids,\n predicted_labels)\n true_pos = tf.metrics.true_positives(\n label_ids,\n predicted_labels)\n true_neg = tf.metrics.true_negatives(\n label_ids,\n predicted_labels)\n false_pos = tf.metrics.false_positives(\n label_ids,\n predicted_labels)\n false_neg = tf.metrics.false_negatives(\n label_ids,\n predicted_labels)\n\n return {\n \"eval_accuracy\": accuracy,\n \"f1_score\": f1_score,\n \"auc\": auc,\n \"precision\": precision,\n \"recall\": recall,\n \"true_positives\": true_pos,\n \"true_negatives\": true_neg,\n \"false_positives\": false_pos,\n \"false_negatives\": false_neg\n }\n\n eval_metrics = metric_fn(label_ids, predicted_labels)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n return tf.estimator.EstimatorSpec(mode=mode,\n loss=loss,\n train_op=train_op)\n else:\n return tf.estimator.EstimatorSpec(mode=mode,\n loss=loss,\n eval_metric_ops=eval_metrics)\n else:\n (predicted_labels, log_probs) = ClassifierModel.create_model(\n is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels)\n\n predictions = {\n 'probabilities': log_probs,\n 'labels': predicted_labels\n }\n return tf.estimator.EstimatorSpec(mode, predictions=predictions)", "def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n\n is_predicting = (mode == tf.estimator.ModeKeys.PREDICT)\n\n # TRAIN and EVAL\n if not is_predicting:\n\n (loss, predicted_labels,\n log_probs) = create_model(bert_model_hub, is_predicting,\n input_ids, input_mask, segment_ids,\n label_ids, num_labels)\n\n train_op = bert.optimization.create_optimizer(loss,\n learning_rate,\n num_train_steps,\n num_warmup_steps,\n use_tpu=False)\n\n # Calculate evaluation metrics.\n def metric_fn(label_ids, predicted_labels):\n accuracy = tf.metrics.accuracy(label_ids, predicted_labels)\n f1_score = tf.contrib.metrics.f1_score(label_ids,\n predicted_labels)\n auc = tf.metrics.auc(label_ids, predicted_labels)\n recall = tf.metrics.recall(label_ids, predicted_labels)\n precision = tf.metrics.precision(label_ids, predicted_labels)\n true_pos = tf.metrics.true_positives(label_ids,\n predicted_labels)\n true_neg = tf.metrics.true_negatives(label_ids,\n predicted_labels)\n false_pos = tf.metrics.false_positives(label_ids,\n predicted_labels)\n false_neg = tf.metrics.false_negatives(label_ids,\n predicted_labels)\n return {\n \"eval_accuracy\": accuracy,\n \"f1_score\": f1_score,\n \"auc\": auc,\n \"precision\": precision,\n \"recall\": recall,\n \"true_positives\": true_pos,\n \"true_negatives\": true_neg,\n \"false_positives\": false_pos,\n \"false_negatives\": false_neg\n }\n\n eval_metrics = metric_fn(label_ids, predicted_labels)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n return tf.estimator.EstimatorSpec(mode=mode,\n loss=loss,\n train_op=train_op)\n else:\n return tf.estimator.EstimatorSpec(mode=mode,\n loss=loss,\n eval_metric_ops=eval_metrics)\n else:\n (predicted_labels,\n log_probs) = create_model(bert_model_hub, is_predicting,\n input_ids, input_mask, segment_ids,\n label_ids, num_labels)\n\n predictions = {\n 'probabilities': log_probs,\n 'labels': predicted_labels\n }\n return tf.estimator.EstimatorSpec(mode, predictions=predictions)", "def train(self, features, labels):\n pass", "def evaluate_model(self, test_data, test_labels,verbose=2):\n test_loss, test_acc = self.model.evaluate(test_data, test_labels, verbose=verbose)\n return test_loss, test_acc", "def evaluate(self, eval_data, eval_labels, eval_input_fn=\"default\"):\n # Validations:\n # If it is of type str, make sure is a valid\n if isinstance(eval_input_fn, str):\n # We use a list in case we want to extend in the future.\n if eval_input_fn in [\"default\"]:\n if eval_input_fn == \"default\":\n # pylint: disable=no-member\n eval_input_fn = tf.estimator.inputs.numpy_input_fn(\n x={\"x\": eval_data},\n y=eval_labels,\n num_epochs=1,\n shuffle=False\n )\n\n eval_res = self.classifier.evaluate(input_fn=eval_input_fn)\n return eval_res", "def evaluate(inputs, labels):\n # Your code here.\n _, probs = forward(inputs)\n preds = predict(probs)\n trues = np.argmax(labels, axis=1)\n return np.mean(preds == trues)", "def evaluate(self, test_data, test_labels):\n raise NotImplementedError", "def evaluate_model(\n self,\n val_loader,\n additional_gpu=None,\n metrics=None,\n inputs_key=\"image\",\n labels_key=\"label\"\n ):\n # predict on the validation set\n all_preds = []\n all_labels = []\n\n self.model.eval()\n\n if additional_gpu is not None:\n device = additional_gpu\n else:\n device = self.device\n\n with torch.no_grad():\n for i, data in enumerate(val_loader):\n inputs, labels = data[inputs_key], data[labels_key]\n inputs = inputs.to(device)\n labels = labels.to(device)\n # forward + backward + optimize\n outputs = self.model(inputs)\n # run inference\n all_preds, all_labels = predict(\n outputs,\n labels,\n all_preds,\n all_labels,\n self.prediction_type,\n self.criterion,\n class_threshold=self.class_threshold\n )\n\n # compute confusion matrix\n cm = confusion_matrix(all_labels, all_preds)\n plt.imshow(cm, interpolation=\"nearest\", cmap=plt.cm.Blues)\n\n # Visualize the confusion matrix\n classes = [\"control\", \"patient\"]\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = \"d\"\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(\n j,\n i,\n format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\",\n )\n plt.title(\"Confusion Matrix\")\n plt.ylabel(\"True label\")\n plt.xlabel(\"Predicted label\")\n plt.show()\n\n # print metrics\n if metrics is not None:\n for metric in metrics:\n if isinstance(all_preds[0], list):\n print(\"{}: {}\".format(metric.__name__, np.mean([metric(labels, preds) for preds,labels in zip(all_preds, all_labels)])))\n else:\n print(\"{}: {}\".format(metric.__name__, metric(all_labels, all_preds)))\n\n\n self.model.train()", "def train(self, features, labels):\n self._clf.fit(features, labels)", "def train(self, features, labels):\n self._clf.fit(features, labels)", "def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n\n is_predicting = (mode == tf.estimator.ModeKeys.PREDICT)\n\n # TRAIN and EVAL\n if not is_predicting:\n\n (loss, predicted_labels, log_probs) = create_model(\n is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels)\n\n train_op = create_optimizer(\n loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu=False)\n\n # Calculate evaluation metrics.\n def metric_fn(label_ids, predicted_labels):\n accuracy = tf.compat.v1.metrics.accuracy(label_ids, predicted_labels)\n #f1_score = tf.contrib.metrics.f1_score(\n # label_ids,\n # predicted_labels)\n #auc = tf.metrics.auc(\n # label_ids,\n # predicted_labels)\n #recall = tf.metrics.recall(\n # label_ids,\n # predicted_labels)\n #precision = tf.metrics.precision(\n # label_ids,\n # predicted_labels)\n #true_pos = tf.metrics.true_positives(\n # label_ids,\n # predicted_labels)\n #true_neg = tf.metrics.true_negatives(\n # label_ids,\n # predicted_labels)\n #false_pos = tf.metrics.false_positives(\n # label_ids,\n # predicted_labels)\n #false_neg = tf.metrics.false_negatives(\n # label_ids,\n # predicted_labels)\n return {\n \"eval_accuracy\": accuracy,\n # \"f1_score\": f1_score,\n #\"auc\": auc,\n # \"precision\": precision,\n # \"recall\": recall,\n # \"true_positives\": true_pos,\n # \"true_negatives\": true_neg,\n # \"false_positives\": false_pos,\n # \"false_negatives\": false_neg\n }\n\n eval_metrics = metric_fn(label_ids, predicted_labels)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n return tf.estimator.EstimatorSpec(mode=mode,\n loss=loss,\n train_op=train_op)\n else:\n return tf.estimator.EstimatorSpec(mode=mode,\n loss=loss,\n eval_metric_ops=eval_metrics)\n else:\n (predicted_labels, log_probs) = create_model(\n is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels)\n\n predictions = {\n 'probabilities': log_probs,\n 'labels': predicted_labels\n }\n return tf.estimator.EstimatorSpec(mode, predictions=predictions)", "def evaluate(train, train_labels, test, test_labels):\n \n # Use the same model for each training set for now\n model = RandomForestClassifier(n_estimators = 100, \n random_state = 50, n_jobs = -1)\n \n train = train.replace({np.inf: np.nan, -np.inf: np.nan})\n test = test.replace({np.inf: np.nan, -np.inf:np.nan})\n \n feature_names = list(train.columns)\n \n # Impute the missing values\n imputer = Imputer(strategy = 'median', axis = 1)\n train = imputer.fit_transform(train)\n test = imputer.transform(test)\n \n cv_score = 1 * cross_val_score(model, train, train_labels, \n scoring = \"f1\", \n cv = 5)\n \n # Fit on the training data and make predictions\n model.fit(train, train_labels)\n preds = model.predict(test)\n \n # Calculate the performance\n f1 = f1_score(test_labels, preds)\n print('5-fold CV F1: {:.2f} with std: {:.2f}'.format(cv_score.mean(),cv_score.std()))\n print('Test F1: {:.2f}.'.format(f1))\n \n feature_importances = pd.DataFrame({'feature': feature_names, \n 'importance': model.feature_importances_})\n \n return preds, feature_importances", "def compute_eval_dict(features, labels):\r\n # For evaling on train data, it is necessary to check whether groundtruth\r\n # must be unpadded.\r\n boxes_shape = (\r\n labels[fields.InputDataFields.groundtruth_boxes].get_shape().as_list())\r\n unpad_groundtruth_tensors = boxes_shape[1] is not None and not use_tpu\r\n labels = model_lib.unstack_batch(\r\n labels, unpad_groundtruth_tensors=unpad_groundtruth_tensors)\r\n\r\n losses_dict, prediction_dict = _compute_losses_and_predictions_dicts(\r\n detection_model, features, labels, add_regularization_loss)\r\n\r\n def postprocess_wrapper(args):\r\n return detection_model.postprocess(args[0], args[1])\r\n\r\n # TODO(kaftan): Depending on how postprocessing will work for TPUS w/\r\n ## TPUStrategy, may be good to move wrapping to a utility method\r\n if use_tpu and postprocess_on_cpu:\r\n detections = contrib_tpu.outside_compilation(\r\n postprocess_wrapper,\r\n (prediction_dict, features[fields.InputDataFields.true_image_shape]))\r\n else:\r\n detections = postprocess_wrapper(\r\n (prediction_dict, features[fields.InputDataFields.true_image_shape]))\r\n\r\n class_agnostic = (\r\n fields.DetectionResultFields.detection_classes not in detections)\r\n # TODO(kaftan) (or anyone): move `_prepare_groundtruth_for_eval to eval_util\r\n ## and call this from there.\r\n groundtruth = model_lib._prepare_groundtruth_for_eval( # pylint: disable=protected-access\r\n detection_model, class_agnostic, eval_input_config.max_number_of_boxes)\r\n use_original_images = fields.InputDataFields.original_image in features\r\n if use_original_images:\r\n eval_images = features[fields.InputDataFields.original_image]\r\n true_image_shapes = tf.slice(\r\n features[fields.InputDataFields.true_image_shape], [0, 0], [-1, 3])\r\n original_image_spatial_shapes = features[\r\n fields.InputDataFields.original_image_spatial_shape]\r\n else:\r\n eval_images = features[fields.InputDataFields.image]\r\n true_image_shapes = None\r\n original_image_spatial_shapes = None\r\n\r\n eval_dict = eval_util.result_dict_for_batched_example(\r\n eval_images,\r\n features[inputs.HASH_KEY],\r\n detections,\r\n groundtruth,\r\n class_agnostic=class_agnostic,\r\n scale_to_absolute=True,\r\n original_image_spatial_shapes=original_image_spatial_shapes,\r\n true_image_shapes=true_image_shapes)\r\n\r\n return eval_dict, losses_dict, class_agnostic", "def evaluate(inputs, labels):\n _, probs = forward(inputs)\n preds = predict(probs)\n trues = np.argmax(labels, axis=0)\n return np.mean(preds == trues)", "def evaluate(K, labels, model):\n K = sparse.hstack((1+np.arange(len(labels))[:,None], K)).A\n pred_labels, accuracy, _ = svm_predict(labels, K, model)\n return accuracy[0]", "def evaluate(data_loader, model, device):\n\n\tmodel.eval()\n\ttotal_num_examples = 0\n\ttotal_error = 0\n\tfor idx, batch in enumerate(data_loader):\n\t\tquestion_feature_vec = batch['feature_vec'].to(device)\n\t\tquestion_len = batch['len'].to(device)\n\t\tlabels = batch['labels'].to(device)\n\n\t\t####Your code here ---\n\n\t\t# get the output from the model\n\t\tlogits = model(question_feature_vec, question_len)\n\n\t\t# get error, num_examples using accuracy_fn defined previously\n\t\terror, num_examples = accuracy_fn(logits, labels)\n\n\t\t# update total_error and total_num_examples\n\t\ttotal_error += error\n\t\ttotal_num_examples += num_examples\n\n\taccuracy = 1 - total_error / total_num_examples\n\treturn accuracy", "def evaluate(\n config, feature_table, label_table,\n model_paths, model_summaries,\n save_preds_to_db=False, save_prefix='',\n discard_columns=[], log_dir='./results/'):\n\n # Create log directory if not exists\n if not os.path.exists(log_dir):\n os.makedirs(log_dir)\n\n # Get feature and label data\n X, y = get_data(feature_table, label_table, discard_columns=discard_columns)\n labeled_indices = np.logical_or(y == 0, y == 1)\n\n # Evaluate models\n metrics_str = [s.rsplit('.', 1) for s in config['eval_config']['metrics']]\n metrics = [getattr(importlib.import_module(m), c) for (m, c) in metrics_str]\n k_values = config['eval_config']['k']\n results = evaluate_multiprocessing(\n model_paths, save_preds_to_db, save_prefix,\n X, y, labeled_indices, metrics, k_values)\n\n # Convert results to dataframe table\n results_columns = [f'{metric.__name__}_at_{k}' for metric in metrics for k in k_values]\n results = pd.DataFrame({\n **pd.DataFrame(model_summaries),\n 'model_path': model_paths,\n 'num_labeled_rows': [int(labeled_indices.sum())] * len(model_paths),\n **pd.DataFrame(np.array(results).round(4), columns=results_columns),\n })\n\n # Save results to csv file\n experiment_name = config['experiment_name']\n results_path = Path(log_dir) / f'{experiment_name}_results.csv'\n results.to_csv(results_path)\n\n return results", "def evaluate(self, train_set=\"train_set\", test_set=\"test_set\", targets=\"targets\", k=10):\n\n test_set = self.cache.fetch(test_set) if isinstance(test_set, str) else test_set\n\n # Predict\n preds = self.run(dataset=train_set, targets=targets, k=k)\n\n # Evaluate model\n print(\"evaluating model ...\")\n score = evaluate(preds, test_set)\n print(\"MAP@{}: {:.5f}\\n\".format(k, score))\n\n return score", "def eval(self, test_docs, test_labels):\n assert len(test_docs)==len(test_labels)\n preds = [] # predicted labels\n for doc,y_gold in zip(test_docs,test_labels):\n y_pred = self.predict(doc)\n preds.append(y_pred)\n ev = Eval(test_labels, preds)\n return ev.accuracy()", "def eval_input_fn(features, labels, batch_size):\n\tfeatures=dict(features)\n\tif labels is None:\n\t\t# No labels, use only features.\n\t\tinputs = features\n\telse:\n\t\tinputs = (features, labels)\n\n\t# Convert the inputs to a Dataset.\n\tdataset = tensorflow.data.Dataset.from_tensor_slices(inputs)\n\n\t# Batch the examples\n\tassert batch_size is not None, \"batch_size must not be None\"\n\tdataset = dataset.batch(batch_size)\n\t\n\tversion_full = tensorflow.__version__\n\tx, version, y = version_full.split('.')\n\tprint('Versionfull: ' + version_full)\n\tprint('Version: ' + version)\n\t\n\tif version >= '5':\n\t\t# Return the dataset.\n\t\treturn dataset\n\telse:\n\t\treturn dataset.make_one_shot_iterator().get_next() #for 1.4", "def model_evaluate(self, test):\n features = {name: np.array(value) for name, value in test.items()}\n labels = {name: features.pop(name) for name in self.label_names}\n metrics = self.model.evaluate(x=features, y=labels, batch_size=5)\n return metrics", "def test(classifier, data, labels):\n eval_input_fn = tf.estimator.inputs.numpy_input_fn(\n x={\"x\": data},\n y=labels,\n num_epochs=1,\n shuffle=False)\n eval_results = classifier.evaluate(input_fn=eval_input_fn)\n eval_results[\"F-Score\"] = 2 * eval_results[\"precision\"] * eval_results[\"recall\"] / (eval_results[\"precision\"] + eval_results[\"recall\"])\n# print(eval_results)\n return eval_results", "def evaluate_model(model, X_test, Y_test, category_names):\n\n print(\"Testing Performance\")\n print(classification_report(Y_test, model.predict(X_test), target_names=category_names))\n\n #Todo cat names", "def test_model(valid_features, valid_labels):\n\n #valid_features, valid_labels = load_preprocess_testset()\n loaded_graph = tf.Graph()\n \n with tf.Session(graph=loaded_graph) as sess:\n # Load model\n loader = tf.train.import_meta_graph(SAVE_MODEL_PATH + '.meta')\n loader.restore(sess, SAVE_MODEL_PATH)\n\n # Get Tensors from loaded model\n loaded_x = loaded_graph.get_tensor_by_name('x:0')\n loaded_y = loaded_graph.get_tensor_by_name('y:0')\n loaded_keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')\n loaded_logits = loaded_graph.get_tensor_by_name('logits:0')\n loaded_acc = loaded_graph.get_tensor_by_name('accuracy:0')\n \n\n acc = sess.run(\n loaded_acc,\n feed_dict={loaded_x: valid_features, loaded_y: valid_labels, loaded_keep_prob: 1.0})\n\n return acc", "def evaluate(X_test, y_test):\n # batch size is 16 for evaluation\n batch_size = 16\n\n # Load Model\n model = load_model('model/model.h5')\n return model.evaluate(X_test, y_test, batch_size, verbose = 1)", "def evaluate(model, g, val_nid, device):\n model.eval()\n nfeat = g.ndata['features']\n labels = g.ndata['labels']\n with th.no_grad():\n pred = model.module.inference(g, nfeat, device, args.batch_size, args.num_workers)\n model.train()\n test_acc = Accuracy()\n return test_acc(th.softmax(pred[val_nid], -1), labels[val_nid].to(pred.device))", "def evaluate_from_featurizations(self, sess, featurizations, y):\n feed_dict = {self.featurizations: featurizations, self.y: y}\n loss, acc = sess.run([self.loss, self.accuracy], feed_dict = feed_dict)\n self.logger.info(\"Model was evaluated from featurizations\")\n return loss, acc", "def eval_input_fn(features, labels, batch_size):\n features = dict(features)\n if labels is None:\n # No labels, use only features. (in prediction)\n inputs = features\n else:\n inputs = (features, labels)\n print(inputs)\n # Convert the inputs to a Dataset.\n dataset = tf.data.Dataset.from_tensor_slices(inputs)\n\n # Batch the examples\n assert batch_size is not None, \"batch_size must not be None\"\n dataset = dataset.batch(batch_size)\n\n # Return the dataset.\n return dataset", "def eval_input_fn(features, labels, batch_size):\n #features=dict(features)\n features = dataframetodict(features)\n if labels is None:\n # No labels, use only features.\n inputs = features\n else:\n inputs = (features, labels)\n\n # Convert the inputs to a Dataset.\n dataset = tf.data.Dataset.from_tensor_slices(inputs)\n\n # Batch the examples\n assert batch_size is not None, \"batch_size must not be None\"\n dataset = dataset.batch(batch_size)\n\n # Return the dataset.\n return dataset", "def assess_model(model, test_data, label):\n return model.score(test_data,label)", "def evaluate_model(model, X_test, y_test, category_names):\n y_pred = model.predict(X_test)\n labels = np.unique(y_pred)\n print(labels)\n #print out score for each class and mean scores, including precision, recall, f1 score\n print(classification_report(y_test.values, y_pred, target_names=category_names.values))", "def evaluate_model(model, train_input, train_target, test_input, test_target, loss, save_plot, mname=None):\n # Evalute Model in train set\n epochs_number = len(loss)\n output = model.forward(train_input)\n train_loss = model.compute_loss(output, train_target).item()\n train_error = compute_number_error(output, train_target).item()\n\n print(\"\\nTraining Loss: \", train_loss)\n print(\"Training Number of errors: \", train_error)\n\n id_class_train = output.argmax(dim=1)\n if save_plot:\n plot_result(train_input, train_target, id_class_train, fname=mname)\n plot_loss(range(0, epochs_number), loss, fname=mname)\n\n # Deactivate dropout to test models\n model.enable_dropout(False)\n \n # Evaluate Model in test set\n output = model.forward(test_input)\n test_loss = model.compute_loss(output, test_target).item()\n test_error = compute_number_error(output, test_target).item()\n\n print(\"\\nTest Loss: \", test_loss)\n print(\"Test Number of errors: \", test_error)\n\n\n id_class_test = output.argmax(dim=1)\n if save_plot:\n plot_result(test_input, test_target, id_class_test, train=False, fname=mname)\n \n return [train_loss, train_error, test_loss, test_error]", "def evaluate(self, eval_data, eval_labels, eval_input_fn):\n raise NotImplementedError(\"Method must be implemented by subclass\")", "def evaluate_model(model,test_inputs,test_labels,model_mode):\n\n if model_mode == \"classification\":\n y_pred = model.predict(test_inputs)\n print(\"Accuracy score: \", accuracy_score(test_labels, y_pred))\n #print(\"F1 score: \", f1_score(test_labels,y_pred, average='weighted'))\n\n conf_mx = confusion_matrix(test_labels, y_pred)\n #print(conf_mx)\n plt.matshow(conf_mx, cmap = plt.cm.jet)\n plt.show()\n\n if model_mode == \"regression\":\n y_pred = model.predict(test_inputs)\n print(\"Mean absolute error: \", mean_absolute_error(test_labels, y_pred))", "def train(self, features, labels):\n self.train_features = features\n self.train_labels = labels\n #raise NotImplementedError", "def evaluate(model, test_files):\n print(\"Running predictions.\")\n models = load_model(model)\n predictions = predict(models, test_files)\n\n # # write predictions to file\n # write_predictions(\"evaluate_out.json\",predictions)\n evaluate_individual(predictions, test_files, models)\n evaluate_overall(predictions)", "def eval_input_fn(features, labels, batch_size):\n if labels is None:\n # No labels, use only features.\n inputs = dict(features)\n else:\n inputs = (dict(features), labels)\n # Convert inputs to a tf.dataset object.\n dataset = tf.data.Dataset.from_tensor_slices(inputs)\n # Batch the examples\n assert batch_size is not None, \"batch_size must not be None\"\n dataset = dataset.batch(batch_size)\n # Return the read end of the pipeline.\n return dataset.make_one_shot_iterator().get_next()", "def evaluate_model(model, X_test, Y_test, category_names): \n # predict on the X_test\n y_pred = model.predict(X_test)\n \n # build classification report on every column\n performances = []\n for i in range(len(category_names)):\n performances.append([f1_score(Y_test.iloc[:, i].values, y_pred[:, i], average='micro'),\n precision_score(Y_test.iloc[:, i].values, y_pred[:, i], average='micro'),\n recall_score(Y_test.iloc[:, i].values, y_pred[:, i], average='micro')])\n # build dataframe\n performances = pd.DataFrame(performances, columns=['f1 score', 'precision', 'recall'],\n index = category_names) \n return performances", "def train(self, features, labels, seed=None):\n raise NotImplementedError('Not implemented')", "def evaluate_model(model, X_test, Y_test, category_names): \n \n Y_pred = model.predict(X_test)\n print(classification_report(Y_test, Y_pred))\n display_results(Y_test, Y_pred)", "def clf_eval():\n y_true = np.random.randint(2, size=10000)\n y_pred = np.clip(np.random.normal(0.25, 0.3, size=y_true.shape) + y_true * 0.5, 0, 1)\n\n model_eval = ClassificationEvaluation(\n y_true=y_true,\n y_pred=y_pred,\n class_names=['a', 'b'],\n model_name='foo',\n )\n return model_eval", "def train_and_eval(self):\n self.__create_indexes()\n model = None\n model = None\n if self.model == 'OMult':\n model = OMult(self.kwargs)\n elif self.model == 'ConvO':\n model = ConvO(self.kwargs)\n elif self.model == 'QMult':\n model = QMult(self.kwargs)\n elif self.model == 'ConvQ':\n model = ConvQ(self.kwargs)\n elif self.model == 'OMultBatch':\n model = OMultBatch(self.kwargs)\n elif self.model == 'ConvOBatch':\n model = ConvOBatch(self.kwargs)\n elif self.model == 'QMultBatch':\n model = QMultBatch(self.kwargs)\n elif self.model == 'ConvQBatch':\n model = ConvQBatch(self.kwargs)\n else:\n print(self.model, ' is not valid name')\n raise ValueError\n\n self.train(model)\n self.eval(model)", "def evaluate_model(model, X_test, Y_test, category_names):\n\n y_pred = model.predict(X_test)\n Y_test_as_array = np.array(Y_test)\n for i in range(len(category_names)):\n print(\"{} accuracy {} precision {} recall {} f1 {}\".format(\n category_names[i],\n (y_pred[:, i] == Y_test_as_array[:, i]).mean(), # accuracy\n precision_score(Y_test_as_array[:, i], y_pred[:, i], average=None), # precision\n recall_score(Y_test_as_array[:, i], y_pred[:, i], average=None), # recall\n f1_score(Y_test_as_array[:, i], y_pred[:, i], average=None) # f1\n ))\n print(\"mean accuracy {}\".format((y_pred == Y_test_as_array).mean().mean()))", "def evaluate_model(model, X_test, Y_test, category_names):\n y_pred = model.predict(X_test)\n return classification_report(Y_test, y_pred, target_names = category_names)", "def evaluate(model, criterion, X, labels, batch_size=None):\n loss = 0\n error = 0\n\n # In case the data is small enough to not require any batch computation we \n # implement : batch size = data set size\n if batch_size == None:\n batch_size = X.size(0)\n \n for i in range(0, X.size(0), batch_size):\n \n # Select batch\n x = X[i:i+batch_size, :]\n lab = labels[i:i+batch_size] \n \n # Compute output to obtain loss\n output = model.forward(x)\n loss += criterion.forward(output, lab.long())\n\n # Compute predictions to obtain errors\n pred = convert_to_predictions(output)\n error += (pred != lab).sum()\n\n # Average loss over batches and error over samples\n loss = loss/(len(X)/batch_size)\n error = error/len(X)\n\n return loss, error", "def evaluate_model(model, X_test, Y_test, category_names):\n\n # Predict labels using model\n y_pred1 = model.predict(X_test)\n\n # Generate accuracy report\n accuracy = [[(y_pred1[:, i] == Y_test.values[:, i]).mean(),\n *precision_recall_fscore_support(\n Y_test.values[:, i], y_pred1[:, i], average='weighted')]\n for i in range(y_pred1.shape[1])]\n accuracy = np.array(accuracy)[:, :-1]\n accuracy = (accuracy * 10000).astype(int) / 100\n scores1= pd.DataFrame( data=accuracy, index=list(Y_test), columns=['Accuracy', 'Precision', 'Recall', 'F-score'])\n print(scores1)\n return scores1", "def evaluate_model(model, X_test, Y_test, category_names):\n y_pred = model.predict(X_test)\n print(classification_report(Y_test, y_pred, target_names=category_names))\n pass", "def model_fn(self, features, labels, mode):\n col_count, row_count = self.metadata_.get_matrix_size(0)\n sequence_size = self.metadata_.get_sequence_size()\n output_dim = self.metadata_.get_output_size()\n\n # Construct a neural network with 0 hidden layer\n input_layer = tf.reshape(features[\"x\"],\n [-1, sequence_size*row_count*col_count])\n\n # Replace missing values by 0\n input_layer = tf.where(tf.is_nan(input_layer),\n tf.zeros_like(input_layer), input_layer)\n\n input_layer = tf.layers.dense(inputs=input_layer, units=64, activation=tf.nn.relu)\n input_layer = tf.layers.dense(inputs=input_layer, units=128, activation=tf.nn.relu)\n input_layer = tf.layers.dropout(inputs=input_layer, rate=0.15, training=mode == tf.estimator.ModeKeys.TRAIN)\n input_layer = tf.layers.dense(inputs=input_layer, units=64, activation=tf.nn.relu)\n input_layer = tf.layers.dropout(inputs=input_layer, rate=0.15, training=mode == tf.estimator.ModeKeys.TRAIN)\n\n logits = tf.layers.dense(inputs=hidden_layer, units=output_dim)\n sigmoid_tensor = tf.nn.sigmoid(logits, name=\"sigmoid_tensor\")\n\n predictions = {\n # Generate predictions (for PREDICT and EVAL mode)\n \"classes\": tf.argmax(input=logits, axis=1),\n # \"classes\": binary_predictions,\n # Add `sigmoid_tensor` to the graph. It is used for PREDICT and by the\n # `logging_hook`.\n \"probabilities\": sigmoid_tensor\n }\n if mode == tf.estimator.ModeKeys.PREDICT:\n return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)\n\n # Calculate Loss (for both TRAIN and EVAL modes)\n # For multi-label classification, a correct loss is sigmoid cross entropy\n loss = sigmoid_cross_entropy_with_logits(labels=labels, logits=logits)\n\n # Configure the Training Op (for TRAIN mode)\n if mode == tf.estimator.ModeKeys.TRAIN:\n optimizer = tf.train.AdamOptimizer()\n train_op = optimizer.minimize(\n loss=loss,\n global_step=tf.train.get_global_step())\n return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)\n\n # Add evaluation metrics (for EVAL mode)\n assert mode == tf.estimator.ModeKeys.EVAL\n eval_metric_ops = {\n \"accuracy\": tf.metrics.accuracy(\n labels=labels, predictions=predictions[\"classes\"])}\n return tf.estimator.EstimatorSpec(\n mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)", "def train_and_eval(model_dir, model_type, train_steps, train_data, test_data, train_embeddings_file_name, test_embeddings_file_name, positive_labels, combination_method, method):\n \n index_map, weights = wvd.load(train_embeddings_file_name)\n #Get positive labels\n positive_labels = positive_labels.split(',')\n \n print(\"reading data...\")\n train_file_name = train_data \n df_train = pd.read_table(train_file_name, dtype={'node1':str, 'node2':str})\n df_train = df_train.sample(frac=1)\n\n # remove NaN elements\n df_train = df_train.dropna(how='any', axis=0)\n \n df_train[LABEL_COLUMN] = (\n df_train[\"label\"].apply(lambda x: label_func(x, positive_labels))).astype(int)\n\n model_dir = tempfile.mkdtemp() if not model_dir else model_dir\n print(\"model directory = %s\" % model_dir)\n \n train_x, _, train_y, _ = get_input(df_train, weights, index_map, combination_method)\n \n print(\"\\nBuilding model...\")\n m = build_estimator(model_dir, model_type, weights, index_map, combination_method)\n \n print(\"\\nTraining model...\")\n if model_type == \"regressor\":\n m.fit(train_x, train_y, n_epoch=train_steps, show_metric=True, snapshot_epoch=False)\n \n print(\"\\nTesting model...\")\n index_map, weights = wvd.load(test_embeddings_file_name)\n \n print(\"reading data...\")\n test_file_name = test_data\n df_test = pd.read_table(test_file_name, dtype={'node1':str, 'node2':str})\n df_test = df_test.sample(frac=1)\n\n # remove NaN elements\n df_test = df_test.dropna(how='any', axis=0)\n \n df_test[LABEL_COLUMN] = (\n df_test[\"label\"].apply(lambda x: label_func(x, positive_labels))).astype(int)\n \n if model_type == \"regressor\":\n test_x, test_original_y, test_index_y, test_original_x = get_input(df_test, weights, index_map, combination_method, data_purpose='test')\n node_sets = get_node_sets(test_original_x, test_original_y)\n \n print(\"\\nPredicting:\")\n model_predictions = m.predict(test_x)\n model_predictions = list(model_predictions)\n #Covert back to 1 and 0\n predictions = []\n model_predictions_probs = []\n for prediction in model_predictions:\n predictions.append(prediction[1]) #non-thresholded value of positve class\n model_predictions_probs.append(prediction[1])\n \n k = int(len([i for i in test_original_y if i == 1]) * 0.3)\n do_evaluations([x for x in test_original_x], [y for y in test_original_y], [p for p in predictions], k, node_sets, \n positive_labels, model=m, weights=weights, index_map=index_map, combination_method=combination_method)\n #Uncomment to log ranked links\n #log_predictions([x for x in test_original_x], [y for y in test_original_y], [p for p in predictions], k, node_sets, \n # positive_labels, model=m, weights=weights, index_map=index_map, combination_method=combination_method,\n # outfilename=combination_method, method=method)", "def evaluate(model, optimizer, loss_function, loader, device, labels, log_every_n=10):\n\n model.eval()\n\n batch_wise_true_labels = []\n batch_wise_predictions = []\n\n loss_history = []\n running_loss = 0.\n running_loss_history = []\n\n with torch.no_grad(): # Disable gradient computation - required only during training\n for i, batch in tqdm(enumerate(loader)):\n\n logits = model(batch[0].to(device), batch[1]).squeeze()\n loss = loss_function(logits, batch[2].to(device))\n loss_history.append(loss.item())\n\n running_loss += (loss_history[-1] - running_loss) / (i + 1) # Compute rolling average\n\n running_loss_history.append(running_loss)\n\n predictions = torch.sigmoid(logits)\n\n batch_wise_true_labels.append(batch[2].view(-1).tolist())\n batch_wise_predictions.append(predictions.view(-1).tolist())\n\n # flatten the list of predictions using itertools\n all_true_labels = list(chain.from_iterable(batch_wise_true_labels))\n all_predictions = list(chain.from_iterable(batch_wise_predictions))\n all_predictions = [1 if p > 0.5 else 0 for p in all_predictions]\n\n\n print(\"Evaluation Loss: \", running_loss)\n # Now we can generate a classification report\n print(\"Classification report after epoch:\")\n print(f1_score(all_true_labels, all_predictions, average='micro'))\n print(classification_report(all_true_labels, all_predictions, labels=labels))\n\n return loss_history, running_loss_history", "def predict(self, test_set, test_labels):\n\n with tf.Session() as self.tf_session:\n self.tf_saver.restore(self.tf_session, self.models_dir + self.model_name)\n return self.accuracy.eval({self.input_data: test_set, self.input_labels: test_labels})", "def evaluate_model(model, X_test, Y_test, category_names):\n y_pred = model.predict(X_test)\n print(classification_report(Y_test, y_pred, target_names=Y_test.keys()))", "def evaluate_model(model, X_test, Y_test, category_names):\n y_pred_grid = model.predict(X_test)\n print(\n classification_report(Y_test.values, y_pred_grid, target_names=category_names)\n )", "def main():\n data = load_data()\n analyze_features(data['full_features'])\n model = train(data)\n\n with open('model.pickle', 'wb') as f:\n pickle.dump(model, f)\n evaluate(model, data)", "def evaluate_model(model, X_test, Y_test, category_names):\n \n Y_pred = model.predict(X_test)\n \n print(classification_report(Y_test.values, Y_pred, target_names=category_names))", "def evaluate_model(model, ds_valid):\n print(\"-- Evaluate Model:\")\n for features, labels in ds_valid:\n valid_step(model, features, labels)\n logs = \"\\nValid Loss: {}, Valid Accuracy: {}\"\n tf.print(tf.strings.format(logs, (valid_loss.result(), valid_metric.result())))\n valid_loss.reset_states()\n train_metric.reset_states()\n valid_metric.reset_states()", "def evaluate(true_labels, predicted_labels):\n accuracy = np.round(metrics.accuracy_score(true_labels, predicted_labels), \n 2)\n precision = np.round(metrics.precision_score(true_labels, predicted_labels, \n average='weighted'), 2)\n recall = np.round(metrics.recall_score(true_labels, predicted_labels,\n average='weighted'), 2)\n f1 = np.round(metrics.f1_score(true_labels, predicted_labels, \n average='weighted'), 2)\n \n return accuracy, precision, recall, f1", "def model(\n self, features: Dict[str, tf.Tensor], labels: tf.Tensor, mode: str\n ) -> tf.Tensor:\n # set flag if the model is currently training\n is_training = mode == tf.estimator.ModeKeys.TRAIN\n\n # get input data\n image = features[\"input\"]\n # initialise model architecture\n logits = _create_model(image, self.config[\"keep_prob\"], is_training)\n\n # define model predictions\n predictions = {\n \"class\": tf.argmax(input=logits, axis=1),\n \"probabilities\": tf.nn.softmax(logits),\n }\n\n if mode == tf.estimator.ModeKeys.PREDICT:\n # define what to output during serving\n export_outputs = {\n \"labels\": tf.estimator.export.PredictOutput(\n ## {\"id\": features[\"id\"], \"label\": predictions[\"class\"]}\n {\"label\": predictions[\"class\"]}\n )\n }\n return tf.estimator.EstimatorSpec(\n mode, predictions=predictions, export_outputs=export_outputs\n )\n\n # calculate loss\n labels = tf.reshape(labels, [-1, 1])\n loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)\n\n # add summaries for tensorboard\n tf.summary.scalar(\"loss\", loss)\n tf.summary.image(\"input\", tf.reshape(image, [-1, 28, 28, 1]))\n\n if mode == tf.estimator.ModeKeys.EVAL:\n # create a evaluation metric\n summaries_dict = {\n \"val_accuracy\": tf.metrics.accuracy(\n labels, predictions=predictions[\"class\"]\n )\n }\n return tf.estimator.EstimatorSpec(\n mode=mode, loss=loss, eval_metric_ops=summaries_dict\n )\n\n # assert only reach this point during training mode\n assert mode == tf.estimator.ModeKeys.TRAIN\n\n # collect operations which need updating before back-prob e.g. Batch norm\n extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n\n # create learning rate variable for hyper-parameter tuning\n lr = tf.Variable(\n initial_value=self.config[\"learning_rate\"], name=\"learning-rate\"\n )\n\n # initialise optimiser\n optimizer = tf.train.AdamOptimizer(lr)\n\n # Do these operations after updating the extra ops due to BatchNorm\n with tf.control_dependencies(extra_update_ops):\n train_op = optimizer.minimize(\n loss,\n global_step=tf.train.get_global_step(),\n colocate_gradients_with_ops=True,\n )\n\n return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)", "def evaluate(parser):\n required_args = (\n 'train_tfrecord',\n 'valid_tfrecord',\n 'predicted_data',\n 'actual_data',\n )\n cli_args = add_all_args(parser, EVALUATION, *required_args)\n evaluator = Evaluator(\n input_shape=cli_args.input_shape,\n model_configuration=cli_args.model_cfg,\n train_tf_record=cli_args.train_tfrecord,\n valid_tf_record=cli_args.valid_tfrecord,\n classes_file=cli_args.classes,\n max_boxes=cli_args.max_boxes,\n iou_threshold=cli_args.iou_threshold,\n score_threshold=cli_args.score_threshold,\n )\n predicted = pd.read_csv(cli_args.predicted_data)\n actual = pd.read_csv(cli_args.actual_data)\n evaluator.calculate_map(\n prediction_data=predicted,\n actual_data=actual,\n min_overlaps=cli_args.min_overlaps,\n display_stats=cli_args.display_stats,\n save_figs=cli_args.save_figs,\n plot_results=cli_args.plot_stats,\n )", "def evaluate_model(model, X_test, y_test):\n # run prediction with test data\n y_pred = model.predict(X_test)\n\n # print precision, recall and f1-score\n i = 0\n for col in y_test:\n print('Evaluation for \"{}\": \\n {} \\n\\n'.format(col, classification_report(y_test[col], y_pred[:,i])))\n i += 1", "def evaluate_model(model, X_test, Y_test, category_names):\n \n y_preds = model.predict(X_test)\n predictions = pd.DataFrame(data=y_preds, columns=Y_test.columns, index=Y_test.index)\n for col in Y_test.columns:\n print(classification_report(predictions[col],Y_test[col]))", "def evaluate_model(model, X_test, Y_test, category_names):\n\n logging.info(\"run evaluate_model\")\n\n # find current foler path for savings\n folder_path = os.path.dirname(__file__)\n\n # predict outputs on test data\n Y_pred = model.predict(X_test)\n\n # create classification report with precision, recall, and F1 score for each categories\n clf_report_df = pd.DataFrame(classification_report(Y_test, Y_pred,\n target_names=category_names, output_dict=True)).T\n clf_report_df.to_markdown(buf=os.path.join(folder_path,'test','classification_report.md'), mode='w')\n\n # calculate confusion matrix for each categories and save corresponding heatmap plots\n conf_matrix_df = multilabel_confusion_matrix(Y_test, Y_pred)\n plot_confusion_matrix(conf_matrix_df, category_names,\n os.path.join(folder_path,'test','confusion_matrix.png'))", "def evaluate_model(model, X_test, Y_test, category_names):\n \n \n yPredictorTest = model.predict(X_test)\n \n for idx, col in enumerate(Y_test):\n print(col, classification_report(Y_test[col], yPredictorTest[:, idx]))", "def evaluate(self, sess, data_gen):\n pred_labels = np.array([], dtype=np.intp)\n labels = np.array([], dtype=np.intp)\n for inputs, seq_length, batch_labels in data_gen:\n feed_dict = self.create_feed_dict(inputs, seq_length, batch_labels,\n self.config.dropout)\n pred = sess.run(self.pred, feed_dict)\n pred = softmax(pred, -1)\n pred = binarize(pred[:, 1], self.threshold)\n\n pred_labels = np.concatenate((pred_labels, pred))\n labels = np.concatenate((labels, batch_labels))\n\n score = f1_score(labels, pred_labels)\n\n return score, pred_labels", "def evaluate(self,\n model,\n x=None,\n y=None,\n batch_size=None,\n verbose=1,\n sample_weight=None,\n steps=None,\n callbacks=None,\n **kwargs):\n raise NotImplementedError()", "def train(self, labeled_featuresets):\n\n\t\t# generate training file from labeled_featuresets\n\t\tself.__text_converter = Text2svmConverter(self.__converter_arguments)\n\t\tself.__convert_labeled_featuresets(labeled_featuresets, self.__svmFilePath)\n\n\t\t# train the model\n\t\tself.__model = train_converted_text(self.__svmFilePath, self.__text_converter, grid_arguments=self.__grid_arguments, feature_arguments=self.__feature_arguments, train_arguments=self.__liblinear_arguments)", "def predict_and_eval_in_val(self, sess, tst_reader, metrics):\n raise NotImplementedError(\"\"\"please customize predict_and_eval_in_val\"\"\")", "def evaluate(self, data, labels, batch_size=32, max_seq_len=128):\n test_dataloader = setup_dataloader(data, labels, max_seq_len, batch_size)\n accuracy = 0\n \n for batch in tqdm(test_dataloader, desc=\"Iteration\"):\n with torch.no_grad():\n labels = batch[\"labels\"]\n batch = {k: t.to(self.device) for k, t in batch.items() if k != \"labels\"}\n outputs = self.model(**batch)\n logits = outputs[0]\n accuracy += calculate_accuracy(logits, labels)\n \n batch = {k: t.detach().cpu() for k, t in batch.items()}\n del batch\n torch.cuda.empty_cache()\n\n accuracy = accuracy / len(test_dataloader)\n return accuracy", "def evaluate_model(model, instances, labels):\n prediction = model.predict(np.array(instances))\n\n prediction_inv = inverse_normalization(prediction)\n labels_inv = inverse_normalization(labels)\n mse = mean_squared_error(labels_inv, prediction_inv)\n\n return mse", "def evaluate_model(model, X_test, Y_test, category_names):\n\n #predict on test data\n y_pred = model.predict(X_test)\n y_pred_pd = pd.DataFrame(y_pred, columns = category_names)\n\n print(\"\\nBest Parameters:\", model.best_params_)\n\n for column in category_names:\n print('--------------------------------------------------------\\n')\n print(str(column))\n print(classification_report(Y_test[column], y_pred_pd[column]))", "def evaluate_model(estimator: es.Estimator, speech_labels: List[str], entries, input_fn_eval) -> Dict[str, float]:\n # Get predictions\n predictions = estimator.predict(input_fn=input_fn_eval)\n\n # Get probabilities of each predicted class\n probs = [pred[\"probabilities\"] for pred in predictions]\n num_of_examples = len(probs)\n targets = [entry[1] for entry in entries] # The ground truth transcript\n\n total_wer, total_cer = 0., 0.\n greedy_decoder = decoder.DeepSpeechDecoder(speech_labels, blank_index=28)\n for prob, target in zip(probs, targets):\n decode = greedy_decoder.decode(prob)\n total_cer += greedy_decoder.cer(decode, target)\n total_wer += greedy_decoder.wer(decode, target)\n\n total_cer /= num_of_examples\n total_wer /= num_of_examples\n global_step = estimator.get_variable_value(tf.GraphKeys.GLOBAL_STEP)\n eval_results = {\n _WER_KEY: total_wer,\n _CER_KEY: total_cer,\n tf.GraphKeys.GLOBAL_STEP: global_step\n }\n return eval_results", "def evaluate(self, ts_loader=None):\n # start evaluation of the model\n self.tr_model.eval()\n samples, correct = 0, 0\n \n # check if a dataloader was provided for evaluation\n loader = self.ts_loader if not ts_loader else ts_loader\n \n with torch.no_grad():\n for x, y in loader:\n \n x, y = x.to(device), y.to(device)\n \n y_ = self.tr_model(x)\n _, predicted = torch.max(y_.detach(), 1)\n \n samples += y.shape[0]\n correct += (predicted == y).sum().item()\n \n # return evaluation statistics\n return {\"accuracy\" : correct/samples}", "def model_fn(features, labels, mode):\n\n # Build a Graph that computes predictions from the inference model.\n logits = inference(features, args.hidden1, args.hidden2)\n\n tensors = {}\n # Add to the Graph the Ops for loss calculation.\n if mode == ModeKeys.INFER:\n softmax = tf.nn.softmax(logits)\n tensors['digit'] = tf.argmax(softmax, 1)\n loss_op = None\n else:\n loss_op = loss(logits, labels)\n tensors['loss'] = loss_op\n tf.scalar_summary('loss', loss_op)\n\n # Add to the Graph the Ops for accuracy calculation.\n if mode == ModeKeys.EVAL:\n accuracy_op = evaluation(logits, labels)\n tensors['accuracy'] = accuracy_op\n tf.scalar_summary('training/hptuning/metric', accuracy_op)\n\n # Add to the Graph the Ops that calculate and apply gradients.\n if mode == ModeKeys.TRAIN:\n global_step = framework.get_global_step()\n # Create the gradient descent optimizer with the given learning rate.\n optimizer = tf.train.GradientDescentOptimizer(args.learning_rate)\n # Create a variable to track the global step.\n # Use the optimizer to apply the gradients that minimize the loss\n # (and also increment the global step counter) as a single training step.\n train_op = optimizer.minimize(loss_op, global_step=global_step)\n # Add streaming means.\n else:\n train_op = None\n\n return tensors, loss_op, train_op", "def evaluate(clf, dataset, feature_list, features, labels, num_iter, params):\n\n features_train, features_test, labels_train, labels_test = \\\n train_test_split(features, labels, test_size=0.3, random_state=42)\n\n\n\n precision_values = []\n recall_values = []\n accuracy_values = []\n print clf\n for i in xrange(0, num_iter):\n #print params\n clf = GridSearchCV(clf, params)\n clf.fit(features_train, labels_train)\n print '*****************************'\n print clf.best_estimator_\n print clf.best_params_\n\n clf = clf.best_estimator_\n #test_classifier(clf, dataset, feature_list)\n pred = clf.predict(features_test)\n precision_values.append(precision_score(labels_test, pred))\n recall_values.append(recall_score(labels_test, pred))\n accuracy_values.append(accuracy_score(labels_test, pred))\n print 'Recall score: ', mean(recall_values)\n print 'Precision score: ', mean(precision_values)\n print 'Accuracy score: ' , mean(accuracy_values)", "def evaluate_model(model, testset):\n\n # Sort data by top level label to ease inspection\n testset = testset.sort_using_layer(-1, reverse=True)\n\n # Feed the samples to the model to obtain each layers' activations\n v = testset.get_layer(0)\n hs = model.transform(v)[1:]\n\n # Read model weights\n ws = [params['w'] for params in model.parameters]\n del params\n\n # Take the (hidden) labels from the data set\n ls = testset.get_layers()[1:]\n\n # In each layer, reorder and invert neurons to match best with the labels\n for i in range(len(ls)):\n hs[i], ws[i] = align_with_labels(ls[i], hs[i], ws[i])\n del i\n\n # Measure correlations, etcetera\n metrics = compare(ls, hs)\n\n # Simply return a dict with all used variables\n return locals()", "def evaluate_model(model, X_test, Y_test, category_names):\n y_pred = model.predict(X_test)\n for x in range(0, len(category_names)):\n print(category_names[x])\n print(classification_report(Y_test[:,x], y_pred[:,x]))\n print(\"Accuracy: \" + str(accuracy_score(Y_test[:, x], y_pred[:, x])))", "def evaluate_model(model, X_test, y_test, category_names):\n # Predict for test set\n y_pred = model.predict(X_test)\n \n print(\"**** Scores for each category *****\\n\")\n for i in range(36):\n print(\"Scores for '{}':\".format(category_names[i]))\n print(classification_report(y_test.values[:,i], y_pred[:,i]))", "def eval_input_fn(features, labels, batch_size):\n features=dict(features)\n if labels is None:\n # No labels, use only features.\n inputs = features\n else:\n inputs = (features, labels)\n\n # Convert the inputs to a Dataset.\n dataset = tf.data.Dataset.from_tensor_slices(inputs)\n\n # Batch the examples\n assert batch_size is not None, \"batch_size must not be None\"\n dataset = dataset.batch(batch_size)\n\n # Return the dataset.\n return dataset", "def eval_input_fn(features, labels, batch_size):\n features=dict(features)\n if labels is None:\n # No labels, use only features.\n inputs = features\n else:\n inputs = (features, labels)\n\n # Convert the inputs to a Dataset.\n dataset = tf.data.Dataset.from_tensor_slices(inputs)\n\n # Batch the examples\n assert batch_size is not None, \"batch_size must not be None\"\n dataset = dataset.batch(batch_size)\n\n # Return the dataset.\n return dataset", "def eval_input_fn(features, labels, batch_size):\n features=dict(features)\n if labels is None:\n # No labels, use only features.\n inputs = features\n else:\n inputs = (features, labels)\n\n # Convert the inputs to a Dataset.\n dataset = tf.data.Dataset.from_tensor_slices(inputs)\n\n # Batch the examples\n assert batch_size is not None, \"batch_size must not be None\"\n dataset = dataset.batch(batch_size)\n\n # Return the dataset.\n return dataset", "def evaluate(model, iterator, f, ner_label, verbose = False):\n # set model to evaluation mode\n model.eval()\n\n y_true = []\n y_pred = []\n Words, Is_heads, Tags, Y, Y_hat = [], [], [], [], []\n with torch.no_grad():\n for i, batch in enumerate(iterator):\n words, input_ids, is_heads, tags, input_tags, entity_label, seqlens = batch\n\n _, _, y_hat = model(input_ids, input_tags, entity_label) # y_hat: (N, T)\n\n Words.extend(words)\n Is_heads.extend(is_heads)\n Tags.extend(tags)\n Y.extend(input_tags.numpy().tolist())\n Y_hat.extend(y_hat.cpu().numpy().tolist())\n ## gets results and save\n with open(\"temp\", 'w') as fout:\n for words, is_heads, tags, y_hat in zip(Words, Is_heads, Tags, Y_hat):\n y_hat = [hat for head, hat in zip(is_heads, y_hat) if head == 1]\n preds = [ner_label.idx2tag[hat] for hat in y_hat]\n if len(preds[1:-1]) > 0:\n y_pred.append(preds[1:-1])\n if len(tags.split()[1:-1]) > 0:\n y_true.append(tags.split()[1:-1])\n assert len(preds) == len(words.split()) == len(tags.split())\n for w, t, p in zip(words.split()[1:-1], tags.split()[1:-1], preds[1:-1]):\n fout.write(f\"{w} {t} {p}\\n\")\n fout.write(\"\\n\")\n\n assert len(y_pred) == len(y_true)\n\n # logging loss, f1 and report\n p, r, f1 = f1_score(y_true, y_pred)\n\n # metrics_str = \"; \".join(\"{}: {:05.2f}\".format(k, v) for k, v in metrics.items())\n # logging.info(\"- {} metrics: \".format(mark) + metrics_str)\n #\n # if verbose:\n # report = classification_report(true_tags, pred_tags)\n # logging.info(report)\n\n final = f + \".P%.4f_R%.4f_F%.4f\" %(p, r, f1)\n with open(final, 'w') as fout:\n result = open(\"temp\", \"r\").read()\n fout.write(f\"{result}\\n\")\n\n fout.write(f\"precision={p}\\n\")\n fout.write(f\"recall={r}\\n\")\n fout.write(f\"f1={f1}\\n\")\n if verbose:\n report = classification_report(y_true, y_pred)\n print(report)\n\n os.remove(\"temp\")\n\n print(\"precision=%.2f\"%p)\n print(\"recall=%.2f\"%r)\n print(\"f1=%.2f\"%f1)\n return p, r, f1", "def eval_input_fn(features, labels, batch_size):\n features = dict(features)\n if labels is None:\n # No labels, use only features.\n inputs = features\n else:\n inputs = (features, labels)\n\n # Convert the inputs to a Dataset.\n dataset = tf.data.Dataset.from_tensor_slices(inputs)\n\n # Batch the examples\n assert batch_size is not None, \"batch_size must not be None\"\n dataset = dataset.batch(batch_size)\n\n # Return the dataset.\n return dataset", "def evaluate(self, inputs: ByteTensor, targets: IntTensor, unused) -> float:\n assert isinstance(inputs, ByteTensor)\n assert inputs.shape[1] == self.feature_count\n assert isinstance(targets, IntTensor)\n assert targets.shape == (inputs.shape[0], )\n\n errors = 0\n examples = targets.shape[0]\n for i in range(examples):\n input = inputs[i]\n prediction = self.predict(input)\n if prediction[0] != targets[i].long():\n errors += 1\n accuracy = (examples - errors) / examples\n return accuracy", "def train_models(self, clf, silent, feature_names=None, target_names=None, live=False):\n X_train, X_test, y_train, y_test = self.X_train, self.X_test, self.y_train, self.y_test\n t0 = time()\n clf.fit(X_train, y_train)\n train_time = time() - t0\n pred = clf.predict(X_test)\n test_time = time() - t0\n accuracy = metrics.accuracy_score(y_test, pred)\n fbeta = metrics.fbeta_score(y_test, pred,1,labels=self.dataset['label'].unique(),average='weighted')\n name = clf.name[0]\n if False:\n score_stats = f'Model : {name} | Score : {accuracy} | F-beta : {fbeta}'\n print(score_stats)\n\n if self.best_score_ledger[name][0] < accuracy:\n last = self.best_score_ledger[name][0]\n print(name)\n self.best_score_ledger[name] = [accuracy,fbeta]\n score_stats = f'Model : {name} | Score : {accuracy} | F-beta : {fbeta}'\n print(self.stemmer, ' ', self.transform)\n print(score_stats)\n\n if accuracy > self.best_models[name] and last != 0.0 and self.tuning_depth in ['normal','maximal']:\n new_model,score = self.hyperparameter_tuning(name,clf)\n if score > accuracy:\n self.best_score_ledger[name][0] = score\n clf = new_model\n dump(clf, os.path.join(os.getcwd(), self.file_term, 'models', f'{\"_\".join([self.uid_base, name])}'))\n\n\n\n if not silent:\n if hasattr(clf, 'coef_'):\n print(\"dimensionality: %d\" % clf.coef_.shape[1])\n print(\"density: %f\" % density(clf.coef_))\n\n if True and feature_names is not None:\n print(\"top 10 keywords per class:\")\n for i, label in enumerate(target_names):\n top10 = np.argsort(clf.coef_[i])[-10:]\n print(trim(\"%s: %s\" % (label, \" \".join(feature_names[top10]))))\n print()\n\n if True:\n print(\"classification report:\")\n print(metrics.classification_report(y_test, pred,\n target_names=target_names))\n\n if True:\n print(\"confusion matrix:\")\n print(metrics.confusion_matrix(y_test, pred))\n # if no model exists for the current settings, create one by default. Prevents issues if models are deleted.\n elif not os.path.exists(\n os.path.join(os.getcwd(), self.file_term, 'models', f'{\"_\".join([self.uid_base, name])}')):\n dump(clf, os.path.join(os.getcwd(), self.file_term, 'models', f'{\"_\".join([self.uid_base, name])}'))\n clf_descr = str(clf).split('(')[0]\n return clf_descr, accuracy, train_time, test_time", "def eval_model(model, x_test, y_test, batch_size=None):\n if batch_size is None:\n batch_size = 128\n\n loss, acc = model.evaluate(x_test, y_test, batch_size=batch_size)\n confusion_matrix_model(model, y_test, x_test)\n return loss, acc", "def build_and_evaluate(\n X, y, classifier=SGDClassifier,\n verbose=True, ngram_range=(1,1), test_size=0.2, max_features=None\n ):\n\n def build(classifier, X, y=None, ngram_range=(1,1), max_features=None):\n \"\"\"\n Inner build function that builds a single model.\n \"\"\"\n if isinstance(classifier, type):\n classifier = classifier()\n\n model = Pipeline([\n ('vectorizer', TfidfVectorizer(\n ngram_range=ngram_range,\n stop_words='english',\n max_features=max_features\n )),\n ('classifier', classifier),\n ])\n\n model.fit(X, y)\n return model\n\n # Label encode the targets\n labels = LabelEncoder()\n y = labels.fit_transform(y)\n\n # Begin evaluation\n if verbose: print(\"Building for evaluation\")\n X_train, X_test, y_train, y_test = tts(X, y, test_size=test_size)\n \n model = build(classifier, \n X_train, \n y_train, \n ngram_range=ngram_range, \n max_features=max_features\n )\n\n model.labels_ = labels\n\n if verbose: print(\"Classification Report:\\n\")\n\n y_pred = model.predict(X_test)\n print(clsr(y_test, y_pred, target_names=labels.classes_))\n print(confusion_matrix(y_test, y_pred))\n\n return model", "def test_evaluate():\n X_train, X_test, y_train, y_test = src.load()\n clf, score = src.train(X_train, y_train)\n test_score = src.evaluate(clf, X_test, y_test)\n assert isinstance(test_score, float)", "def predict(self, eval_features):\n\t\tinput_ids = torch.tensor(eval_features.input_ids, dtype=torch.long).to(self.device).unsqueeze(0)\n\t\tinput_mask = torch.tensor(eval_features.input_mask, dtype=torch.long).to(self.device).unsqueeze(0)\n\t\tsegment_ids = torch.tensor(eval_features.segment_ids, dtype=torch.long).to(self.device).unsqueeze(0)\n\t\t\n\t\twith torch.no_grad():\n\t\t\tlogits = self.model(input_ids, segment_ids, input_mask)\n\t\t\tlogits = logits.to(\"cpu\")\n\t\t\tsoftmax_logits = F.softmax(logits[0], dim=0).numpy()\n\t\t\tprint(\"softmax score : \", softmax_logits)\n# final_logits = list(zip(list(map(lambda x : self.reverse_label_map[np.ravel(np.where(softmax_logits==x))[0]], softmax_logits )), softmax_logits))\n\t\tpred = np.argmax(softmax_logits)\n\t\tprob = np.max(softmax_logits)\n\t\t\n\t\treturn pred , prob", "def evaluate(model, iterations, use_cuda=False):\n\n logger.debug(\"Allocating input and target tensors on GPU : %r\", use_cuda)\n\n # create the instance of data loader\n data_loader = DataLoaderMnist(cuda=use_cuda, seed=1, shuffle=False, train_batch_size=64, test_batch_size=100)\n\n model.eval()\n total = 0\n correct = 0\n current_iterations = 0\n\n with torch.no_grad():\n for inputs, labels in data_loader.test_loader:\n inputs, labels = inputs.to(data_loader.device), labels.to(data_loader.device)\n output = model(inputs)\n current_iterations += 1\n _, predicted = torch.max(output.data, dim=1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n if iterations is not None:\n if current_iterations >= iterations:\n break\n\n accuracy = correct / total\n return accuracy", "def eval_input_fn(features, labels, batch_size):\n features = dict(features)\n if labels is None:\n # No labels, use only features.\n inputs = features\n else:\n inputs = (features, labels)\n\n # Convert the inputs to a Dataset.\n dataset = tf.data.Dataset.from_tensor_slices(inputs)\n\n # Batch the examples\n assert batch_size is not None, \"batch_size must not be None\"\n dataset = dataset.batch(batch_size)\n\n return dataset", "def evaluate(self, predicted_df):\n logging.info(\"Evaluating model: {}\".format(self.model_type))\n y_true = predicted_df[\"user_label\"].as_matrix()\n y_pred = predicted_df[\"label\"].as_matrix()\n\n scores_cols = [col for col in predicted_df.columns if col.startswith(\"scores_\")]\n print(\"scores_cols: {}\".format(scores_cols))\n\n y_pred_scores = predicted_df[scores_cols].copy().fillna(value=0).as_matrix()\n print(\"predicted scores: {}\".format(y_pred_scores))\n y_true_scores = []\n for lab in predicted_df[\"user_label\"]:\n trues = [0 for _ in range(len(scores_cols))]\n if \"scores_\"+lab in scores_cols:\n trues[scores_cols.index(\"scores_\"+lab)] = 1\n y_true_scores.append(trues)\n print(\"true scores: {}\".format(y_true_scores))\n y_true_scores = np.array(y_true_scores)\n\n performance = {\"model\": self.model_type, \"description\": self.description}\n if 'categorical_accuracy' in self.metrics:\n logging.info(\"Calculating categorical accuracy for {}\".format(self))\n performance['categorical_accuracy'] = sklearn.metrics.accuracy_score(y_true,\n y_pred) # np.mean(y_pred == y_true)\n if 'fmeasure' in self.metrics:\n logging.info(\"Calculating fmeasure for {}\".format(self))\n performance['fmeasure'] = sklearn.metrics.f1_score(y_true, y_pred, average=self.metrics_average)\n if 'MRR' in self.metrics:\n logging.info(\"Calculating MRR for {}\".format(self))\n performance['MRR'] = sklearn.metrics.label_ranking_average_precision_score(y_true_scores, y_pred_scores)\n logging.info(\"Calculated performance: {}\".format(performance))\n print(performance)\n return pd.DataFrame(performance, index=[0])", "def evaluate_model(model, X_test, Y_test, category_names):\n y_pred = model.predict(X_test)\n # print the metrics\n for i, col in enumerate(category_names):\n print('{} category metrics: '.format(col))\n print(classification_report(Y_test.iloc[:,i], y_pred[:,i]))", "def trainAndEvaluate(trainDataFile, devDataFile, classifier, average):\n\n ids, instances, labels, features, classes = readArffFile(trainDataFile)\n\n startTime = time.time()\n\n classifier = classifier.lower()\n if classifier == \"svc\" or classifier == \"svm\":\n print(\"Using SVM\")\n clf = LinearSVC()\n elif classifier == \"nb\":\n print(\"Using Naive Bayes\")\n clf = MultinomialNB()\n elif classifier.lower() == \"nbboost\" or classifier.lower() == \"nbboosted\":\n print(\"Using Boosted Naive Bayes\")\n clf = MultinomialNB()\n clf = AdaBoostClassifier(clf)\n elif classifier == \"1r\":\n print(\"Sorry, 1R / LinearRegression isn't working right now\")\n exit()\n clf = LinearRegression(copy_X=False,fit_intercept=True, normalize=False)\n elif classifier == \"0r\":\n print(\"Using 0R\")\n from collections import Counter\n mostCommonTrainingClass = Counter(labels).most_common(1)[0][0]\n else:\n print(\"Invalid classifier choice.\")\n return\n\n print(\"Training the model\")\n\n if classifier != \"0r\":\n clf.fit(instances, labels)\n\n timeForTrain = time.time() - startTime\n numTrainInstances = len(instances)\n\n \"\"\"\n Testing and evaluating the model\n \"\"\"\n\n # Throw away the features and classes, we've already read them in.\n ids, instances, labels, _, _ = readArffFile(devDataFile)\n\n startTime = time.time()\n\n print(\"Testing the model\")\n numCorrect = 0\n numWrong = 0\n lenInstances = len(instances)\n predicted = []\n for i in range(lenInstances):\n # Status update of how it's going.\n if i % 1000 == 0:\n print(\"\\r\" + str(i).zfill(len(str(lenInstances))) + \"/\" + str(lenInstances) + \" \", end=\"\")\n instance = instances[i]\n label = labels[i]\n\n if classifier == \"0r\":\n res = mostCommonTrainingClass\n else:\n res = predictPrint(clf, instance)\n predicted.append(res)\n # print(\"-- Predicted label: {} || Correct label: {} --\". format(res, label))\n if res == label:\n numCorrect += 1\n else:\n numWrong += 1\n print()\n\n timeForTest = time.time() - startTime\n\n predicted = np.array(predicted)\n outName = outputFileName + classifier.upper() + dataSet + \".csv\"\n writeOutput(ids, predicted, outName)\n numDevInstances = len(instances)\n\n\n \"\"\"\n Printing various evaluation metrics.\n \"\"\"\n # report = classification_report(labels, predicted, target_names=classes)\n report = parameterizableReport(labels, predicted, beta=0.5, target_names=classes, averageType=average)\n print(report)\n print()\n # print(classification_report(labels, predicted, target_names=classes))\n\n \"\"\"\n print(\"Number of training instances: {}\".format(numTrainInstances))\n print(\"Number of dev instances: {}\".format(numDevInstances))\n print()\n\n print(\"Number of correct classifications: {}\".format(numCorrect))\n print(\"Number of wrong classifications: {}\".format(numWrong))\n print(\"Percentage of correct classifications: {0:.2f}%\".format(numCorrect*100/(numCorrect+numWrong)))\n print()\n \"\"\"\n\n print(\"Time taken to train the model: {0:.2f} sec\".format(timeForTrain))\n print(\"Time taken to test the model: {0:.2f} sec\".format(timeForTest))\n print()\n\n confMatrix = confusion_matrix(labels, predicted)\n if classifier == \"nb\":\n title = \"Naive Bayes\"\n elif classifier == \"svm\" or classifier == \"svc\":\n title = \"Support Vector Machine\"\n title += \" \" + dataSet\n plot_confusion_matrix(confMatrix, classes, title=title, normalize=True)", "def evaluate_model(model_name, y_true, y_pred):\n\n # Calculate performance metrics\n rmse_eval = evaluate_rmse(y_true, y_pred)\n mae_eval = evaluate_mae(y_true, y_pred) \n r2_eval = evaluate_r2(y_true, y_pred)\n\n # Print results\n print_evaluation(model_name, mae_eval, rmse_eval, r2_eval)", "def evaluate(self, prediction_fn):\n pass", "def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n\t\ttf.logging.info(\"*** Features ***\")\n\t\tfor name in sorted(features.keys()):\n\t\t\ttf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n\t\tis_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n\t\tinput_ids=[]\n\t\tinput_mask=[]\n\t\tsegment_ids=[]\n\t\tmask_lm_info = []\n\t\tif is_training:\n\t\t\tinput_ids = [features[\"rewrite_query_ids\"], features[\"doc0_ids\"], features[\"doc1_ids\"], features[\"raw_query_ids\"]]\n\t\t\tinput_mask = [features[\"rewrite_query_mask\"], features[\"doc0_mask\"], features[\"doc1_mask\"], features[\"raw_query_mask\"]]\n\t\t\tsegment_ids = [features[\"rewrite_query_segment_ids\"], features[\"doc0_segment_ids\"], features[\"doc1_segment_ids\"], features[\"raw_query_segment_ids\"]]\n\t\t\teffective_mask = [features[\"effective_rewrite_query_mask\"], features[\"effective_doc0_mask\"], features[\"effective_doc1_mask\"], features[\"effective_raw_query_mask\"]]\n\t\telif is_eval:\n\t\t\tinput_ids = [features[\"query_ids\"], features[\"docx_ids\"], 0, features[\"query_ids\"]]\n\t\t\tinput_mask = [features[\"query_mask\"], features[\"docx_mask\"], 0, features[\"query_mask\"]]\n\t\t\tsegment_ids = [features[\"query_segment_ids\"], features[\"docx_segment_ids\"], 0, features[\"query_segment_ids\"]]\n\t\t\teffective_mask = [features[\"effective_query_mask\"], features[\"effective_docx_mask\"], 0, features[\"effective_query_mask\"]]\n\t\telif is_output:\n\t\t\tinput_ids=[features[\"input_ids\"], features[\"input_ids\"], features[\"input_ids\"], features[\"input_ids\"]]\n\t\t\tinput_mask = [features[\"input_mask\"], features[\"input_mask\"], features[\"input_mask\"], features[\"input_mask\"]]\n\t\t\tsegment_ids = [features[\"segment_ids\"], features[\"segment_ids\"], features[\"segment_ids\"], features[\"segment_ids\"]]\n\t\t\teffective_mask = [features[\"effective_input_mask\"], features[\"effective_input_mask\"], features[\"effective_input_mask\"], features[\"effective_input_mask\"]]\n\n\n\n\t\tlabel = features[\"label\"]\n\n\n\t\ttf.logging.info(\"Create model\")\n\t\tif (is_training) or (is_eval):\n\t\t\t(total_loss, score, doc_length) = create_model(\n\t\t\t\tbert_config, is_training, is_eval, is_output, input_ids, input_mask, segment_ids, effective_mask, label, use_one_hot_embeddings,\n\t\t\t\tcolbert_dim, dotbert_dim, max_q_len, max_p_len, doc_type, loss, kd_source, train_model, eval_model)\n\t\telif is_output:\n\t\t\t(pooling_emb, emb, doc_length) = create_model(\n\t\t\t\tbert_config, is_training, is_eval, is_output, input_ids, input_mask, segment_ids, effective_mask, label, use_one_hot_embeddings,\n\t\t\t\tcolbert_dim, dotbert_dim, max_q_len, max_p_len, doc_type, loss, kd_source, train_model, eval_model)\n\n\t\ttf.logging.info(\"Finish create model\")\n\t\ttvars = tf.trainable_variables()\n\n\t\tscaffold_fn = None\n\t\tif init_checkpoint:\n\t\t\t(assignment_map, initialized_variable_names)= modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n\t\t\t(assignment_map1, initialized_variable_names1) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint, 'Student/', 'query_reformulator/')\n\t\t\tassignment_maps = [assignment_map, assignment_map1]\n\t\t\tinitialized_variable_names.update(initialized_variable_names1)\n\n\t\t\ttf.logging.info(\"**** Assignment Map ****\")\n\t\t\tif use_tpu:\n\t\t\t\tdef tpu_scaffold():\n\t\t\t\t\tfor assignment_map in assignment_maps:\n\t\t\t\t\t tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\t\t\t\t\treturn tf.train.Scaffold()\n\n\t\t\t\tscaffold_fn = tpu_scaffold\n\t\t\telse:\n\t\t\t\ttf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\t\ttf.logging.info(\"**** Trainable Variables ****\")\n\n\t\tfor var in tvars:\n\t\t\tinit_string = \"\"\n\t\t\tif var.name in initialized_variable_names:\n\t\t\t\tinit_string = \", *INIT_FROM_CKPT*\"\n\t\t\ttf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n\t\t\t\t\t\t\tinit_string)\n\n\t\toutput_spec = None\n\t\tif mode == tf.estimator.ModeKeys.TRAIN:\n\t\t\ttrain_op = optimization.create_optimizer(\n\t\t\t\t\t\ttotal_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu, train_model)\n\n\t\t\toutput_spec = tf.contrib.tpu.TPUEstimatorSpec(\n\t\t\t\t\t\tmode=mode,\n\t\t\t\t\t\tloss=total_loss,\n\t\t\t\t\t\ttrain_op=train_op,\n\t\t\t\t\t\tscaffold_fn=scaffold_fn)\n\n\t\telif mode == tf.estimator.ModeKeys.PREDICT:\n\t\t\tif is_output:\n\t\t\t\toutput_spec = tf.contrib.tpu.TPUEstimatorSpec(\n\t\t\t\t\t\t\t\tmode=mode,\n\t\t\t\t\t\t\t\tpredictions={\n\t\t\t\t\t\t\t\t\t\"docid\": features['docid'],\n\t\t\t\t\t\t\t\t\t\"pooling_emb\":pooling_emb,\n\t\t\t\t\t\t\t\t\t\"emb\":emb,\n\t\t\t\t\t\t\t\t\t\"doc_length\":doc_length,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tscaffold_fn=scaffold_fn)\n\t\t\telif is_eval:\n\t\t\t\toutput_spec = tf.contrib.tpu.TPUEstimatorSpec(\n\t\t\t\t\t\t\t\tmode=mode,\n\t\t\t\t\t\t\t\tpredictions={\n\t\t\t\t\t\t\t\t\t\"log_probs\": score,\n\t\t\t\t\t\t\t\t\t\"label_ids\": label,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tscaffold_fn=scaffold_fn)\n\n\t\telse:\n\t\t\traise ValueError(\n\t\t\t\t\t\"Only TRAIN and PREDICT modes are supported: %s\" % (mode))\n\n\t\treturn output_spec", "def evaluate_single_model(\n model_path, model_index, save_preds_to_db, save_prefix,\n metrics, k_values, X, y, labeled_indices):\n\n # Load saved model\n with open(model_path, 'rb') as file:\n model = pickle.load(file)\n\n # Get predictions\n pred_table_name = f'{save_prefix}_model_{model_index}' if save_preds_to_db else None\n y_preds, probs = get_predictions(model, X, k_values=k_values, pred_table_name=pred_table_name)\n\n # Filter labels\n y_preds_filtered = y_preds[labeled_indices]\n y_filtered = y.to_numpy(copy=True)[labeled_indices]\n\n # Calculate metrics for each k value\n model_results = np.zeros((len(metrics), len(k_values)))\n for i, metric in enumerate(metrics):\n for j in range(len(k_values)):\n model_results[i, j] = metric(y_filtered, y_preds_filtered[:, j])\n\n return model_index, model_results", "def evaluate_model(model, X_test, Y_test, category_names):\n Y_prediction = model.predict(X_test)\n Y_prediction_df = pd.DataFrame(Y_prediction, columns=category_names)\n \n for col in category_names:\n print(f\"category:{col}\")\n print(classification_report(Y_test[col], Y_prediction_df[col]))\n print('------------------------------------------------------')\n \n accuracy = np.mean(Y_prediction == Y_test.values)\n print(f\"Accuracy: {accuracy:.2%}\")", "def evaluate_model(pipeline = None, reader = sick_dev_reader, features = None, file_name = \"\", load_vec = None):\n if reader == sick_dev_reader:\n reader_name = 'Dev'\n elif reader == sick_train_reader:\n reader_name = 'Train + Dev'\n elif reader == sick_test_reader:\n reader_name = 'Test'\n else:\n reader_name = 'Train'\n\n if len(pipeline.steps) == 2: #Only have a vectorizer and a classifier step in pipeline\n dict_vectorizer = pipeline.steps[0][1]\n print reader_name + ' Feature Set Size: ', len(dict_vectorizer.feature_names_)\n else:\n feature_selector = pipeline.steps[1][1] #Extracts the dictVectorizer from the pipeline object (assumes feature vectorizer is first transform applied)\n print reader_name + ' Feature Set Size: ', len(feature_selector.get_support(True))\n\n prettyColor = color.RED\n if reader == 'sick_dev_reader':\n reader = sick_dev_reader\n file_name += \".dev\"\n elif reader == 'sick_train_dev_reader':\n reader = sick_train_dev_reader\n file_name += \".train_dev\"\n elif reader == 'sick_train_reader':\n reader = sick_train_reader\n file_name += \".train\"\n prettyColor = color.CYAN\n else:\n reader = sick_test_reader\n file_name += \".test\"\n feat_vec, gold_labels = obtain_vectors(file_name, load_vec, reader, features)\n \n predicted_labels = pipeline.predict(feat_vec)\n prettyPrint( metrics.classification_report(gold_labels, predicted_labels, digits = 5), prettyColor)" ]
[ "0.71969485", "0.6895252", "0.68876994", "0.6868299", "0.6828266", "0.68280625", "0.6817962", "0.6815478", "0.6739548", "0.66729695", "0.66729695", "0.6669808", "0.6641863", "0.65919566", "0.6583201", "0.6532106", "0.6510232", "0.6506622", "0.65015924", "0.64879614", "0.64562", "0.6448343", "0.64396787", "0.6437344", "0.64362913", "0.64341515", "0.64323837", "0.6425257", "0.64235103", "0.6390746", "0.63819015", "0.63613623", "0.63522184", "0.6350844", "0.634878", "0.63375765", "0.6320662", "0.6318716", "0.63176435", "0.63156325", "0.63105434", "0.6281513", "0.6279614", "0.62722325", "0.62522674", "0.6251422", "0.62481236", "0.62425214", "0.6239724", "0.6238509", "0.6232047", "0.622613", "0.6216391", "0.6214609", "0.621425", "0.62010896", "0.61998236", "0.61994535", "0.6196167", "0.61859596", "0.61828184", "0.618011", "0.6175601", "0.61739933", "0.6167836", "0.6164997", "0.61610615", "0.61604106", "0.615722", "0.61541593", "0.6146413", "0.6141441", "0.6134543", "0.613218", "0.6125908", "0.61256963", "0.6123158", "0.61229306", "0.611824", "0.611824", "0.611824", "0.61145586", "0.6110489", "0.61091375", "0.61008966", "0.6093169", "0.60929275", "0.60810673", "0.60793513", "0.60761505", "0.60756636", "0.6073418", "0.60732996", "0.6072305", "0.6071904", "0.6070188", "0.6068693", "0.6068668", "0.60678816", "0.60653776" ]
0.8275948
0
Simple wrapper around sklearn's learning curve module
def learning_curve(self, features, labels): return learning_curve(self._model, features, labels)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def train(self,features,y):\r\n \r\n if self.learn_type == \"nn\":\r\n #generate supervised dataset\r\n return(self.learner.train_on_batch(features,y))\r\n elif self.learn_type == \"linear\":\r\n grad = 0\r\n n = len(features)\r\n for i in range(n):\r\n #sum over the instances to get an estimate of the gradient\r\n print((y[i] - self.learner.activate(features[i])))\r\n grad -= (y[i] - self.learner.activate(features[i])) * \\\r\n self.learner.grad(features[i])\r\n grad /= n\r\n #update paramter\r\n param = np.copy(self.learner.param)\r\n self.learner.param = param - self.alpha * grad\r\n #print(self.learner.param)\r", "def learn(self, Xtrain, ytrain):", "def linear_regression(features, values):\n clf = SGDRegressor(n_iter=100)\n clf.fit(features,values)\n print(clf.score(features,values))\n intercept = clf.intercept_ \n params = clf.coef_\n \n return intercept, params", "def __init__(self, data, target, target_names, alpha=.0001, n_iter=100, penalty='l2', preprocess=False):\n super().__init__(data, target, target_names, sklearn.linear_model.SGDClassifier(alpha=alpha, n_iter=n_iter,\n penalty=penalty), preprocess=preprocess)", "def learnHyperLinear(X, y, xTest, yTest, penalty, scoring, transformedData,jobs=1):\n #Check if test is empty, if it is, don't refit and predict data\n testAvailable = np.size(xTest,0)!=0\n\n # Parameters selection\n #====================\n cRange = np.logspace(-5,1,3)\n parameters = {'C': cRange}\n\n if penalty=='l1':\n dual=False\n else:\n dual=True\n\n #Creating Model and begin classification\n #=======================================\n classif = svm.LinearSVC(penalty=penalty, class_weight=CLASS_WEIGHT, dual=dual)\n clf = grid_search.GridSearchCV(classif, parameters, scoring=scoring, cv=5, n_jobs=jobs, verbose=3, refit=testAvailable)\n print(\"Begin\\n...\")\n clf.fit(X,y)\n\n \n #Get results, print and write them into a file\n #============================================\n print(clf.best_params_, clf.best_score_)\n\n if testAvailable:\n scores = testModel(clf.best_estimator_,X,y,xTest,yTest,penalty)\n writeResults(clf.grid_scores_, clf.best_params_, clf.best_score_,'Linear',\\\n penalty,scoring, transformedData, scores=scores)\n else:\n print(\"No test, don't predict data\")\n writeResults(clf.grid_scores_, clf.best_params_, clf.best_score_,'Linear',\\\n penalty,scoring, transformedData, scores=None)", "def get_learning_curve(estimator, X, y, ylim=None, cv=None, n_jobs=4, train_sizes=np.linspace(.125, 1.0, 8)):\n if ylim is not None:\n plt.ylim(*ylim)\n train_sizes, train_scores, test_scores = learning_curve(\n estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)\n train_scores_mean = np.mean(train_scores, axis=1)\n train_scores_std = np.std(train_scores, axis=1)\n test_scores_mean = np.mean(test_scores, axis=1)\n test_scores_std = np.std(test_scores, axis=1)\n\n return train_sizes, train_scores_mean, test_scores_mean, train_scores_std, test_scores_std", "def sklearn(experiment, method, prediction_threshold=0.5, **kwargs):\n experiment['method'] = method\n experiment['prediction_threshold'] = prediction_threshold\n X_train = experiment['X_train']\n X_test = experiment['X_test']\n y_train = experiment['y_train']\n\n\n classifier = None\n if method == 0:\n # k-Nearest Neighbors\n classifier = KNeighborsClassifier(**kwargs)\n elif method == 1:\n # Logistic Regression\n classifier = LogisticRegression(**kwargs)\n elif method == 2:\n # Random Forest\n classifier = RandomForestClassifier(**kwargs)\n elif method == 3:\n # Support Vector Classifier\n classifier = SVC(kernel = 'rbf') # kernel = linear, poly, rbf, sigmoid\n elif method == 4:\n # Gaussian Naive Bayes\n classifier = GaussianNB(**kwargs)\n elif method == 5:\n # Decision Trees\n classifier = DecisionTreeClassifier(**kwargs)\n elif method == 6:\n # AdaBoost Classifier\n classifier = AdaBoostClassifier(**kwargs)\n elif method == 7:\n # Gradient Boosting Classifier\n classifier = GradientBoostingClassifier(**kwargs)\n elif method == 8:\n # Neural Network Classifier\n classifier = MLPClassifier(**kwargs)\n # classifier = MLPClassifier(hidden_layer_sizes=(10, 5))\n else:\n print('Invalid method!')\n\n classifier.fit(X_train, np.ravel(y_train))\n\n # output probability of prediction, use threshold to pick class\n y_train_probabilities = classifier.predict_proba(X_train)\n y_test_probabilities = classifier.predict_proba(X_test)\n\n\n y_test = experiment['y_test']\n\n FPR, TPR, prediction_threshold = roc_curve(y_test, y_test_probabilities[:, 1], pos_label=1)\n\n N_roc = np.shape(FPR)[0]\n best_d = 10\n best_i = 0\n d = np.ones((N_roc, 1))\n for i in range(N_roc):\n d[i] = np.sqrt((1 - TPR[i]) ** 2 + FPR[i] ** 2)\n if best_d > d[i]:\n best_d = d[i]\n best_i = i\n\n threshold = prediction_threshold[best_i]\n # auc2 = roc_auc_score(y_test, y_test_probabilities[:, 1])\n y_train_prediction = (y_train_probabilities[:, 1] >= threshold) * 1\n y_test_prediction = (y_test_probabilities[:, 1] >= threshold) * 1\n\n experiment['FPR'] = FPR\n experiment['TPR'] = TPR\n experiment['y_test_probabilities'] = y_test_probabilities\n experiment['y_train_probabilities'] = y_train_probabilities\n experiment['y_test_prediction'] = y_test_prediction\n experiment['y_train_prediction'] = y_train_prediction\n\n return experiment", "def test_learning_curves():\n\n p = pipeline.Pipeline(\n FX_TRAIN,\n FX_TEST,\n FX_LOOKUP,\n RESULTS_DIR\n )\n\n data = p.learning_curves()", "def __init__(self):\n super().__init__()\n import sklearn\n import sklearn.linear_model\n self.model = sklearn.linear_model.LogisticRegression", "def train(self, X, y):", "def sklearn_train() -> None:\n cross_validate(args=SklearnTrainArgs().parse_args(), train_func=run_sklearn)", "def __init__(self, estimator = LogisticRegression()): \n\t self.estimator = estimator", "def learningCurve(X, y, Xval, yval, Lambda):\n\n # Number of training examples\n m, _ = X.shape\n\n # You need to return these values correctly\n error_train = np.zeros(m)\n error_val = np.zeros(m)\n\n for i in range(m):\n theta = trainLinearReg(X[:i + 1], y[:i + 1], Lambda)\n error_train[i], _ = linearRegCostFunction(X[:i + 1], y[:i + 1], theta, 0)\n error_val[i], _ = linearRegCostFunction(Xval, yval, theta, 0)\n \n return error_train, error_val", "def __init__(self, reg_penalty='l2', reg_inv=1.0, k_fold=5, random_state=0):\n print(\"Initialize model Logistic Regression\")\n self.reg_penalty = reg_penalty\n self.reg_inv = reg_inv\n self.k_fold = k_fold\n self.random_state = random_state\n self.model = sklearn.linear_model.LogisticRegression(penalty=self.reg_penalty,\n C=self.reg_inv,\n max_iter=1000, \n random_state=self.random_state)", "def __call__(self, y, pred, sample_weight=None):", "def plot_learning_curve(model, X_train, X_test, y_train, y_test):\n\n m, train_scores, valid_scores = learning_curve(estimator = model, \n X = X_train, y = y_train.ravel(), train_sizes = np.linspace(0.1,1.0, 80))\n\n train_cv_err = np.mean(train_scores, axis=1)\n test_cv_err = np.mean(valid_scores, axis=1)\n tr, = plt.plot(m, train_cv_err)\n ts, = plt.plot(m, test_cv_err)\n plt.legend((tr, ts), ('training error', 'test error'), loc = 'best')\n plt.title('Learning Curve')\n plt.xlabel('Data Points')\n plt.ylabel('Accuracy')", "def plot_learning_curve(ax, estimator, X, y, ylim=None, cv=None,\n n_jobs=-1, train_sizes=np.linspace(.1, 1.0, 5)):\n ax.set_xlabel(\"Training examples\")\n ax.set_ylabel(\"F1 score\")\n train_sizes, train_scores, test_scores = learning_curve(\n estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes, scoring='f1_macro')\n train_scores_mean = np.mean(train_scores, axis=1)\n train_scores_std = np.std(train_scores, axis=1)\n test_scores_mean = np.mean(test_scores, axis=1)\n test_scores_std = np.std(test_scores, axis=1)\n ax.grid()\n\n ax.fill_between(train_sizes, train_scores_mean - train_scores_std,\n train_scores_mean + train_scores_std, alpha=0.1,\n color=\"r\")\n ax.fill_between(train_sizes, test_scores_mean - test_scores_std,\n test_scores_mean + test_scores_std, alpha=0.1, color=\"g\")\n ax.plot(train_sizes, train_scores_mean, 'o-', color=\"r\",\n label=\"Training score\")\n ax.plot(train_sizes, test_scores_mean, 'o-', color=\"g\",\n label=\"Cross-validation score\")\n\n ax.legend(loc=\"best\")\n\n if ylim is not None:\n ax.set_ylim(*ylim)\n return ax", "def train_logisticRegression(data: np.array, labels: np.array)->None:\n\n n_examples = np.size(data, 0)\n n_features = np.size(data, 1)\n n_categories = np.size(labels, 1)\n\n data = np.hstack((np.ones((n_examples, 1)), data))\n\n print(data[0:5, :])\n\n X_train, X_test, y_train, y_test, idx_test = split_data(data, labels, 0.7)\n\n convergence_goal = 1e-3\n learning_rate = 0.01\n\n theta = np.random.uniform(size=((n_features+1, n_categories)))\n\n for i in range(n_categories):\n\n cost_var = 1\n\n previous_cost = 1e6\n iterations = 0\n cost_to_plot = []\n\n while cost_var > convergence_goal:\n iterations += 1\n cost, grad = costFunction(X_train, y_train[:, i], theta[:, i])\n theta[:, i] = update_theta(theta[:, i], grad, learning_rate)\n cost_var = previous_cost - cost\n previous_cost = cost\n if iterations == 1: cost_var = 1\n cost_to_plot.append(cost)\n # print(cost)\n\n plt.plot(range(iterations), cost_to_plot, 'g-', label = 'cost')\n plt.xlabel('iterations')\n plt.ylabel('cost')\n # plt.show()\n\n predictions = lrPredict(theta, X_test)\n\n print(predictions[0:5, :])\n print(y_test[0:5, :])\n\n accuracy = np.mean([p == l for p, l in zip(predictions, y_test)])\n print(\"Accuracy = {}\".format(accuracy))\n\n pass", "def mlr(df, exp_vars, resp_var, \n method='ols', \n fit_intercept=True,\n kcv=3,\n normalize=False):\n from sklearn import cross_validation\n from sklearn.linear_model import LinearRegression, RidgeCV\n from sklearn.linear_model import LassoCV, ElasticNetCV\n from sklearn.metrics import r2_score\n from sklearn.utils import resample\n import matplotlib.pyplot as plt\n import seaborn as sn\n import pandas as pd\n import numpy as np\n \n # Separate data\n X = df[exp_vars]\n y = df[resp_var]\n \n # Setup model\n if method == 'ols':\n model = LinearRegression(fit_intercept=fit_intercept, \n normalize=normalize)\n elif method == 'lasso':\n model = LassoCV(fit_intercept=fit_intercept, \n normalize=normalize, \n max_iter=10000,\n cv=kcv)\n elif method == 'ridge':\n model = RidgeCV(fit_intercept=fit_intercept, \n normalize=normalize, \n alphas=np.logspace(-10, 10, 21))\n elif method == 'el-net':\n model = ElasticNetCV(l1_ratio=[.1, .5, .7, .9, .95, .99, 1],\n fit_intercept=fit_intercept, \n normalize=normalize,\n cv=kcv)\n else:\n raise ValueError('\"method\" parameter must be in [\"ols\", \"lasso\", \"ridge\", \"el-net\"]')\n \n # k-fold cross validation\n #cv_scores = cross_validation.cross_val_score(model, X, y, cv=kcv, scoring='r2')\n #print 'Mean r2 from %s-fold CV: %.3f\\n' % (kcv, cv_scores.mean())\n \n # Train model on full dataset\n model.fit(X, y)\n \n # Get y-hat\n y_pred = model.predict(X)\n \n # r2 based on calibration data\n r2 = r2_score(y, y_pred)\n print 'r2:', r2\n print ''\n \n # Summary of model\n print model\n print ''\n \n if method == 'lasso':\n print 'Lasso alpha:', model.alpha_\n print ''\n elif method == 'ridge':\n print 'Ridge alpha:', model.alpha_\n print ''\n elif method == 'el-net':\n print 'Elastic net alpha:', model.alpha_ \n print 'Elastic net L1 ratio:', model.l1_ratio_ \n print ''\n else: # OLS\n pass\n \n # Plot\n fig = plt.figure(figsize=(15,15))\n \n # Paired points for each site\n ax1 = plt.subplot2grid((2,2), (0,0), colspan=2)\n ax1.plot(range(0, len(X.index)), y, 'ro', label='Observed')\n ax1.plot(range(0, len(X.index)), y_pred, 'b^', label='Modelled')\n \n ax1.set_xticks(range(0, len(X.index)))\n ax1.set_xticklabels(X.index, rotation=90, fontsize=12)\n ax1.set_xlim(0, len(X.index)-1)\n \n ax1.set_xlabel('Site code', fontsize=16)\n ax1.set_ylabel(resp_var)\n ax1.set_title('Points paired for each location', fontsize=20)\n ax1.legend(loc='best', fontsize=16)\n \n # Modelled versus observed\n ax2 = plt.subplot2grid((2,2), (1,0), colspan=1)\n ax2.plot(y, y_pred, 'ro')\n ax2.set_xlabel('Observed', fontsize=16)\n ax2.set_ylabel('Modelled', fontsize=16)\n ax2.set_title('Modelled versus observed', fontsize=20)\n \n # Hist of residuals\n ax3 = plt.subplot2grid((2,2), (1,1), colspan=1)\n sn.distplot(y - y_pred, kde=True, ax=ax3)\n ax3.set_title('Histogram of residuals', fontsize=20)\n \n plt.tight_layout()\n \n # Get param estimates\n params = pd.Series(model.coef_, index=X.columns)\n\n # Estimate confidence using bootstrap\n # i.e. what is the std. dev. of the estimates for each parameter\n # based on 1000 resamplings\n err = np.std([model.fit(*resample(X, y)).coef_ for i in range(1000)], \n axis=0)\n\n # Build df\n res = pd.DataFrame({'effect':params,\n 'error':2*err})\n\n # Rough indicator of significance: are the estimated values more than\n # 2 std. devs. from 0 (~95% CI?). NB: this assumnes the \"marginal posterior\" \n # is normal, which I haven't tested for and which quite possibly isn't true\n # - use with care! \n res['signif'] = np.abs(res['effect']) > res['error']\n \n return res", "def plot_learning_curve(X, y, maxdepth, estimator, plt):\n # create cv training and test scores for various training set sizes\n train_sizes, train_scores, test_scores = learning_curve(estimator,\n X, # feature matrix\n y, # target vector\n cv=10, # number of folds in cross-validation\n scoring='neg_mean_squared_error', # metric\n n_jobs=-1, # use all computer cores,\n train_sizes=np.linspace(0.01, 1.0, 30) # 30 different sizes of the training set\n )\n # create means and standart deviations of training set scores\n train_mean = np.mean(train_scores, axis=1)\n train_std = np.std(train_scores, axis=1)\n\n # create means and standart deviations of test set scores\n test_mean = np.mean(test_scores, axis=1)\n test_std = np.std(test_scores, axis=1)\n\n # draw lines\n plt.plot(train_sizes, train_mean, '--', color='#111111', label=\"Training score\")\n plt.plot(train_sizes, test_mean, color='#111111', label=\"Cross-validation score\")\n\n # draw bands\n plt.fill_between(train_sizes, train_mean - train_std, train_mean + train_std, color=\"#DDDDDD\")\n plt.fill_between(train_sizes, test_mean - test_std, test_mean + test_std, color=\"#f4d0d7\")\n \n # create plot \n plt.title(\"Learning curve\")\n plt.xlabel(\"Training set size\", fontsize=18)\n plt.ylabel(\"mse\", fontsize=18)\n plt.legend(loc=\"best\")\n plt.tight_layout()", "def linear_regression_sklearn(data):\n# Split the data into training/testing sets\n dataset = np.array(data)\n\n X_train = dataset[:,0].reshape(-1,1)\n y_train = dataset[:,1]\n\n# Create linear regression object\n regr = linear_model.LinearRegression()\n\n# Train the model using the training sets\n regr.fit(X_train, y_train)\n\n return (regr.coef_[0], regr.intercept_)", "def do_scikit_learn_regression(data, verbose = False):\n \n \n regr = linear_model.LinearRegression()\n\n x = data['c'].values.reshape(100,1)\n y = data['f'].values.reshape(100,1)\n \n regr.fit(x, y)\n \n if verbose:\n\n string = '\\n'.join((\n f'Coefficient of {regr.coef_[0][0]} compared to actual {9/5}',\n f'Intercept of {regr.intercept_[0]} compared to actual {32}'\n ))\n\n print (string)\n\n return regr.coef_[0][0], regr.intercept_[0]", "def fit(self, X):", "def stability_lasso(x, y, **kwargs):\n rl = RandomizedLasso()\n if 'param' in kwargs:\n rl.set_params(**kwargs['param'])\n rl.fit(x, y)\n return rl.get_support()", "def nnRegression(data):", "def ex_2_b(x_train, y_train, x_test, y_test):\n ###########\n ## TODO:\n ## Train SVMs with polynomial kernels for different values of the degree\n ## (Remember to set the 'coef0' parameter to 1)\n ## and plot the variation of the training and test scores with polynomial degree using 'plot_score_vs_degree' func.\n ## Plot the decision boundary and support vectors for the best value of degree\n ## using 'plot_svm_decision_boundary' function\n ###########\n degrees = range(1, 21)\n\n test_scores = np.array([])\n train_scores = np.array([])\n best_svm = None\n best_test_score = 0\n\n for deg in degrees:\n clf = svm.SVC(kernel='poly', degree=deg, coef0=1)\n clf.fit(x_train, y_train)\n\n test_score = clf.score(x_test, y_test)\n\n if test_score > best_test_score:\n best_test_score = test_score\n best_svm = clf\n\n test_scores = np.append(test_scores, test_score)\n train_scores = np.append(train_scores, clf.score(x_train, y_train))\n\n plot_score_vs_degree(train_scores, test_scores, degrees)\n\n plot_svm_decision_boundary(clf, x_train, y_train, x_test, y_test)", "def mylinearsvm(lambdat, eta_init, maxiter, X, y):\n d = np.size(X, 1)\n beta_init = np.zeros(d)\n theta_init = np.zeros(d)\n betas, objs = fast_grad(beta_init, theta_init, lambdat, eta_init, maxiter,X=X,y=y)\n return betas, objs", "def train(features, targets, weights, bias):\n # see gradient_descent for explanation\n epochs = 100\n learning_rate = 0.1\n\n picture_nb = 2\n\n # Print current accuracy. How many people have been classified as sick/healthy correctly?\n predictions = predict(features, weights, bias)\n print(\"Accuracy: \", np.mean(predictions == targets))\n\n for epoch in range(epochs):\n if epoch % 10 == 0:\n # get normalized scores\n predictions = activation(pre_activation(features, weights, bias))\n # compare with targets to see how bad our algorithm is\n print(\"Cost = %s\" % cost(predictions, targets))\n # Replot graph. Check in create_dataset for explanation of parameters\n if picture_nb == 2:\n plt.plot(features[:, 0], (weights[0] * features[:, 0] + bias) / -weights[1], color='red')\n elif picture_nb == 11:\n plt.plot(features[:, 0], (weights[0] * features[:, 0] + bias) / -weights[1], color='green')\n else:\n plt.plot(features[:, 0], (weights[0] * features[:, 0] + bias) / -weights[1], color='orange')\n picture_nb+=1\n\n # Initialize gradients\n # weights_gradients is 2D array with 2 values\n weights_gradients = np.zeros(weights.shape)\n bias_gradient = 0\n # Go through each row\n for feature, target in zip(features, targets):\n # Compute prediction\n z = pre_activation(feature, weights, bias)\n # Get normalized score\n y = activation(z)\n # Update gradients based on formulas established before. Look at gradient_descent to understand what we\n # are doing. Also, the formulas are below, just before the call of the function train.\n weights_gradients += (y - target) * derivative_activation(z) * feature\n # no multiplication of feature because it does not depend on some coordinates.\n bias_gradient += (y - target) * derivative_activation(z)\n\n # Update variables. These are the lines that result the cost to get reduced.\n weights = weights - learning_rate * weights_gradients\n bias = bias - learning_rate * bias_gradient\n\n # Print final accuracy. How many people have been classified as sick/healthy correctly?\n predictions = predict(features, weights, bias)\n print(\"Accuracy: \", np.mean(predictions == targets))\n\n plt.scatter(features[:, 0], features[:, 1], s=40, c=targets, cmap=plt.cm.Spectral)\n plt.savefig(\"DataPointsLineEvolution.png\")\n # legend for understanding\n plt.legend(['Original division', 'New division', 'New division', 'New division', 'New division', 'New division',\n 'New division', 'New division', 'New division', 'Final division'], loc='upper left')\n # save picture of data points drawn.\n plt.savefig(\"DataPointsLineEvolutionLegend.png\")", "def train_model(x_tra, y_tra):\n\n clf1 = AdaBoostClassifier(n_estimators=300, random_state=1)\n clf1.fit(x_tra, y_tra)\n return clf1", "def __init__(self, reg_penalty='l2', reg=0.001, k_fold=5, random_state=0):\n print(\"Initialize model Perceptron\")\n self.reg_penalty = reg_penalty\n self.reg = reg\n self.k_fold = k_fold\n self.random_state = random_state\n self.model = sklearn.linear_model.Perceptron(penalty=reg_penalty,\n alpha=self.reg,\n max_iter=1000,\n random_state=self.random_state)", "def sklearn_model(train_data):\n X, y = train_data\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n model = LogisticRegression(\n multi_class=\"multinomial\", solver=\"lbfgs\", max_iter=1000\n )\n model.fit(X, y)\n return model", "def fit(self, X, y):", "def fit(self, X, y):", "def fit(self, X, y):", "def train(self, X, y):\n lagrange_multipliers = self._compute_multipliers(X, y)\n return self._construct_predictor(X, y, lagrange_multipliers)", "def learn(self):\n raise NotImplementedError", "def ann_learning_curve(trainx, trainy, testx, testy, n_hidden=[5, 3],\n\t\t\t\t\t\tn_iter=5, cv=5, train_sizes=np.linspace(.1, 1.0, 10)):\n\n\tcv_train_scores = [[0] * len(train_sizes)]\n\tcv_test_scores = [[0] * len(train_sizes)]\n\tfor c in range(cv):\n\t\ttrain_scores = []\n\t\ttest_scores = []\n\t\tfor ts in train_sizes:\n\t\t\tn_examples = int(round(len(trainx) * ts))\n\t\t\trows = random.sample(range(len(trainx)), n_examples)\n\t\t\tsubx = trainx.iloc[rows, ]\n\t\t\tsuby = trainy.iloc[rows, ]\n\t\t\tstart = time.time()\n\t\t\ta, b = test_ann2(subx, suby, testx, testy,\n\t\t\t\t\t\t\t\tn_hidden=n_hidden, n_iter=n_iter)\n\t\t\tprint(\"training time: {} secs\".format(time.time() - start))\n\t\t\tcurrent_train_score = a\n\t\t\tcurrent_test_score = b\n\t\t\ttrain_scores.append(current_train_score)\n\t\t\ttest_scores.append(current_test_score)\n\t\tcv_train_scores.append(train_scores)\n\t\tcv_test_scores.append(test_scores)\n\taverage_train_scores = [sum(i) / cv for i in zip(*cv_train_scores)]\n\taverage_test_scores = [sum(i) / cv for i in zip(*cv_test_scores)]\n\treturn train_sizes, average_train_scores, average_test_scores", "def learn(self):\n Xt = np.append(np.ones((self.X.shape[0], 1)), self.X, axis=1)\n Yt = self.Y * 2 - 1\n\n w = np.ones(Xt.shape[1]) # avoiding random init, for debugging\n lw = [[] for k in range(len(w))]\n \n for iter in range(self.max_steps):\n P = Yt * np.dot(Xt, w)\n M = np.where(P <= 0)[0] # indices of misclassified datapoints\n\n if len(M) == 0: \n self.logger.debug(\"Found linearly separable hyperplane!\")\n break\n\n if self.is_stochastic:\n # just pick one randomly from M\n M = [M[random.randint(0, len(M)-1)]]\n\n grad = -1 * np.sum((Yt[M] * Xt[M].T), axis=1) / len(M)\n\n if self.reg_constant > 0:\n grad += self.reg_constant * w\n \n eta = self.step_size * 10000 / (10000 + iter)\n \n w = w - grad * eta\n \n if iter % 100 == 0:\n for k in range(len(w)):\n lw[k].append(w[k])\n \n if iter % 1000 == 0:\n self.logger.debug(\"Iter %s:\\t %f %f %f\" %(iter, w[0], w[1], w[2]))\n \n self.logger.debug(\"Iterations: %s\" %(iter))\n\n# x_range = range(len(lw[0]))\n# fig = plt.figure()\n# ax1 = fig.add_subplot(111) \n# for j, lwn in enumerate(lw):\n# if j % 3 >= 2: # plot an arbitrary subset of features\n# a = w[j]\n# ax1.plot(x_range, [(x-a) for x in lwn], label=str(j))\n# \n# plt.xlabel(\"Iteration\")\n# plt.ylabel(\"Feature weight\")\n# plt.show()\n \n #self.logger.debug(\"%s\" % np.array2string(w, precision=2, separator=','))\n \n self.w = w", "def to_sklearn(self):\n import sklearn.pipeline as skp\n\n steps = []\n for step in self.steps:\n steps += [(step[0], step[1].to_sklearn())]\n return skp.Pipeline(steps)", "def orig_sklearn_fork_bench(\n dat: Dict[str, Union[np.ndarray, sps.spmatrix]],\n distribution: str,\n alpha: float,\n l1_ratio: float,\n iterations: int,\n cv: bool,\n reg_multiplier: Optional[float] = True,\n **kwargs,\n):\n if cv:\n raise ValueError(\"original sklearn fork does not support cross-validation\")\n result: Dict[str, Any] = {}\n\n X = dat[\"X\"]\n\n if X.shape[0] > 100000 and not isinstance(X, (np.ndarray, pd.DataFrame)):\n warnings.warn(\n \"original sklearn fork is too slow on sparse data sets with more than \"\n \"100,000 rows. Skipping.\"\n )\n return result\n\n fit_args = dict(X=X, y=dat[\"y\"])\n if \"sample_weight\" in dat.keys():\n fit_args.update({\"sample_weight\": dat[\"sample_weight\"]})\n if \"offset\" in dat.keys():\n warnings.warn(\"Original sklearn_fork does not support offsets.\")\n return result\n\n family = distribution\n if family == \"gaussian\":\n family = \"normal\"\n elif \"tweedie\" in family:\n tweedie_p = float(family.split(\"-p=\")[1])\n family = TweedieDistribution(tweedie_p) # type: ignore\n\n model_args = dict(\n family=family,\n alpha=alpha if reg_multiplier is None else alpha * reg_multiplier,\n l1_ratio=l1_ratio,\n max_iter=150,\n random_state=random_seed,\n copy_X=False,\n selection=\"cyclic\",\n tol=benchmark_convergence_tolerance,\n )\n\n try:\n result[\"runtime\"], m = runtime(_build_and_fit, iterations, model_args, fit_args)\n except ValueError as e:\n warnings.warn(f\"Problem failed with this error: {e}\")\n return result\n\n result[\"intercept\"] = m.intercept_\n result[\"coef\"] = m.coef_\n result[\"n_iter\"] = m.n_iter_\n\n return result", "def train(self, trainingData, trainingLabels, validationData, validationLabels ):\n import sklearn\n from sklearn import svm\n\n \"*** YOUR CODE HERE ***\"\n self.sklearn_classifier = svm.SVC(C=2, gamma=0.025, decision_function_shape='ovo', tol=0.015)\n self.sklearn_classifier.fit(trainingData, trainingLabels)", "def __init__(self, lr, eps=1e-6):\n LearningRate.__init__(self, lr)\n\n self.epsilon = eps\n self.parameters = []", "def train(self, X, y):\n pass", "def __init__(self, train, validation=None, initial_weight=None,\n loss_function_name='logistic',\n calculate_weight='gradient',\n regularizer=None, regularizer_p=None):\n # Initialize the super class with given data.\n # Transform the y into {0,1}\n y, tx = train\n y[np.where(y < 0)] = 0\n train = (y, tx)\n if validation:\n val_y, val_tx = validation\n val_y[np.where(val_y < 0)] = 0\n validation = (val_y, val_tx)\n super(LogisticRegression, self).__init__(train, validation,\n initial_weight=initial_weight,\n loss_function_name=loss_function_name,\n cal_weight=calculate_weight,\n regularizer=regularizer,\n regularizer_p=regularizer_p)\n # Set predicted label\n self.pred_label = [-1, 1]", "def refit_simple(x_train: np.ndarray, y: np.ndarray, interp: bool = True,\n p_val: float = 0.05, x_val: Optional[np.ndarray] = None, y_val: Optional[np.ndarray] = None\n ) -> Tuple[np.ndarray, float, np.ndarray, np.ndarray, np.ndarray]:\n sl_ok = np.ones(x_train.shape[1], dtype=bool)\n\n n = -1\n\n while True:\n n += 1\n assert sl_ok.sum() > 0, 'No features left to fit on iter'.format(n)\n\n logger.info('Iter {0} of final refit starts with {1} features'.format(n, sl_ok.sum()))\n\n x_train_ = x_train[:, sl_ok]\n # индексы в исходном массиве\n ok_idx = np.arange(x_train.shape[1])[sl_ok]\n\n clf = LogisticRegression(penalty='none', solver='lbfgs', warm_start=False,\n intercept_scaling=1)\n clf.fit(x_train_, y)\n\n # check negative coefs here if interp\n sl_pos_coef = np.zeros((x_train_.shape[1],), dtype=np.bool)\n if interp:\n sl_pos_coef = clf.coef_[0] >= 0\n\n # если хотя бы один неотрицательный - убирай самый большой и по новой\n if sl_pos_coef.sum() > 0:\n max_coef_idx = clf.coef_[0].argmax()\n sl_ok[ok_idx[max_coef_idx]] = False\n continue\n\n # если прошли все отрицательные смотрим на pvalue\n p_vals, b_var = calc_p_val(x_train_, clf.coef_[0], clf.intercept_[0])\n # без интерсепта\n p_vals_f = p_vals[:-1]\n\n model_p_vals = p_vals.copy()\n model_b_var = b_var.copy\n\n # если хотя бы один больше p_val - дропай самый большой и погнали по новой\n if p_vals_f.max() > p_val:\n max_p_val_idx = p_vals_f.argmax()\n sl_ok[ok_idx[max_p_val_idx]] = False\n continue\n\n if x_val is not None:\n # то же самое на валидационной выборке\n logger.info('Validation data checks')\n x_val_ = x_val[:, sl_ok]\n\n p_vals, b_var = calc_p_val_on_valid(x_val_, y_val)\n p_vals_f = p_vals[:-1]\n\n # если хотя бы один больше p_val - дропай самый большой и погнали по новой\n if p_vals_f.max() > p_val:\n max_p_val_idx = p_vals_f.argmax()\n sl_ok[ok_idx[max_p_val_idx]] = False\n continue\n\n weights = cast(np.ndarray, clf.coef_[0])\n intercept = cast(float, clf.intercept_[0])\n\n return weights, intercept, sl_ok, cast(np.ndarray, model_p_vals), cast(np.ndarray, model_b_var)", "def fit(self, trainingFeatures, trainingTargets):\r\n\r\n self._fitCalled = True\r\n self.pp = self.ppC(trainingFeatures)\r\n preProcTrainingFeatures = self.pp.preProc(trainingFeatures)\r\n\r\n\r\n \"\"\" \r\n Implement the linear regression learning below.\r\n\r\n Hint: w = X\\b\r\n where w is the weight vector to be learned, \r\n X is the matrix that should be built from the data and the bias terms \r\n and b is the trainingTarget vector\r\n \\ operation corresponds to multiplying the pseudo-inverse of X with b (very matlab-like)\r\n\r\n Look at numpy linalg methods!\r\n\r\n The preprocessing call has been handled for you.\r\n \"\"\"\r\n\r\n X = add_ones(preProcTrainingFeatures)\r\n X_pinv = np.linalg.pinv(X)\r\n b = trainingTargets\r\n W = X_pinv.dot(b)\r\n self.w = W", "def _fit(self, X, y):\n\n if self.fit_intercept:\n X = add_intercept(X)\n\n self.scaler = StandardScaler()\n if self.fit_intercept:\n X[:,1:] = self.scaler.fit(X[:,1:]).transform(X[:,1:])\n else:\n X = self.scaler.fit(X).transform(X)\n\n # note: input y is always shape of (n,c)\n # even if it's binary classification, it's (n,2) not (n,)\n # see implementation of bareml.base.Classifier\n if y.shape[1] == 2: # binary classification\n y = y[:,1]\n self.activation = sigmoid\n else:\n self.activation = softmax\n\n # function to calculate gradient of loss function w.r.t. w\n def gradient(X, y, w):\n # X.T is a (d,n) array\n # (X @ w - y) is a (n,c) array if multi-class\n # a (n,) array if binary\n # w & penalty is a (d,c) array if multi-class\n # a (d,) array if binary\n # X.T @ (X @ w - y) + self.alpha * w is a (d,c) array if multi-class\n # a (d,) array if binary\n if self.fit_intercept:\n penalty = np.insert(w[1:], 0, 0, axis=0) # no penalise intercept\n else:\n penalty = w\n return self.C * X.T @ (self.activation(X @ w) - y) + penalty\n\n # initialise optimiser\n opt = GradientDescent(\n gradient=gradient, max_iter=self.max_iter,\n tol=self.tol, lr=self.lr)\n \n # optimise\n self.w = opt.solve(X, y)\n\n return self", "def fit(self, X, y=None):\n # default to QuicGraphicalLassoCV\n estimator = self.estimator or QuicGraphicalLassoCV()\n\n self.lam_ = None\n self.estimator_ = None\n\n X = check_array(X, ensure_min_features=2, estimator=self)\n X = as_float_array(X, copy=False, force_all_finite=False)\n\n n_samples_, n_features_ = X.shape\n \n # perform first estimate\n estimator.fit(X)\n\n if self.method == \"binary\":\n # generate weights\n self.lam_ = self._binary_weights(estimator)\n\n # perform second step adaptive estimate\n self.estimator_ = QuicGraphicalLasso(\n lam=self.lam_ * estimator.lam_,\n mode=\"default\",\n init_method=\"cov\",\n auto_scale=False,\n )\n self.estimator_.fit(X)\n\n elif self.method == \"inverse_squared\":\n self.lam_ = self._inverse_squared_weights(estimator)\n\n # perform second step adaptive estimate\n self.estimator_ = QuicGraphicalLassoCV(\n lam=self.lam_ * self.estimator.lam_, auto_scale=False\n )\n self.estimator_.fit(X)\n\n elif self.method == \"inverse\":\n self.lam_ = self._inverse_weights(estimator)\n\n # perform second step adaptive estimate\n self.estimator_ = QuicGraphicalLassoCV(\n lam=self.lam_ * estimator.lam_, auto_scale=False\n )\n self.estimator_.fit(X)\n\n else:\n raise NotImplementedError(\n (\n \"Only method='binary', 'inverse_squared', or\",\n \"'inverse' have been implemented.\",\n )\n )\n\n self.is_fitted_ = True\n self.n_features_in_ = X.shape[1]\n return self", "def train(self, *args, **kwargs):\n raise NotImplementedError", "def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None, n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):\n\tfig = plt.figure()\n\tplt.title(title)\n\tif ylim is not None:\n\t\tplt.ylim(ylim)\n\tplt.xlabel(\"Training example\")\n\tplt.ylabel(\"Score\")\n\ttrain_sizes, train_scores, test_scores = learning_curve(estimator, X, y, cv=cv, n_jobs=n_jobs,\n\t train_sizes=train_sizes)\n\ttrain_scores_mean = np.mean(train_scores, axis=1)\n\ttrain_scores_std = np.std(train_scores, axis=1)\n\ttest_scores_mean = np.mean(test_scores, axis=1)\n\ttest_scores_std = np.std(test_scores, axis=1)\n\tplt.grid()\n\n\tplt.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std, alpha=0.1,\n\t color='r')\n\tplt.fill_between(train_sizes, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std, alpha=0.1,\n\t color='g')\n\tplt.plot(train_sizes, train_scores_mean, 'o-', color='r', label='Training score')\n\tplt.plot(train_sizes, test_scores_mean, 'o-', color='g', label='Cross-validation score')\n\n\tplt.legend(loc='best')\n\treturn plt", "def __init__(self, clf, y_real, y_pred, y_proba):\n self.clf = clf\n self.y_real = y_real\n self.y_pred = y_pred\n self.y_proba = y_proba", "def scikit_learn_classifier_comparison_example():\n\n # Code source: Gael Varoqueux\n # Andreas Mueller\n # Modified for Documentation merge by Jaques Grobler\n # Modified to serve as a MinCq example by Jean-Francis Roy\n # License: BSD 3 clause\n\n h = .02 # step size in the mesh\n\n names = [\"Linear SVM\", \"RBF SVM\", \"AdaBoost\", \"Linear MinCq\", \"RBF MinCq\", \"Stumps MinCq\"]\n classifiers = [\n SVC(kernel=\"linear\", C=0.025),\n SVC(gamma=2, C=1),\n AdaBoostClassifier(),\n MinCqLearner(mu=0.01, voters_type=\"kernel\", kernel=\"linear\"),\n MinCqLearner(mu=0.01, voters_type=\"kernel\", kernel=\"rbf\", gamma=2),\n MinCqLearner(mu=0.01, voters_type=\"stumps\"),\n ]\n\n X, y = make_classification(n_features=2, n_redundant=0, n_informative=2,\n random_state=1, n_clusters_per_class=1)\n\n rng = np.random.RandomState(2)\n X += 2 * rng.uniform(size=X.shape)\n linearly_separable = (X, y)\n\n datasets = [make_moons(noise=0.3, random_state=0),\n make_circles(noise=0.2, factor=0.5, random_state=1),\n linearly_separable\n ]\n\n figure = pl.figure(figsize=(27, 9))\n i = 1\n # iterate over datasets\n for ds in datasets:\n # preprocess dataset, split into training and test part\n X, y = ds\n y[y == 0] = -1\n X = StandardScaler().fit_transform(X)\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.4)\n\n x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5\n y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h),\n np.arange(y_min, y_max, h))\n\n # just plot the dataset first\n cm = pl.cm.RdBu\n cm_bright = ListedColormap(['#FF0000', '#0000FF'])\n ax = pl.subplot(len(datasets), len(classifiers) + 1, i)\n # Plot the training points\n ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)\n # and testing points\n ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6)\n ax.set_xlim(xx.min(), xx.max())\n ax.set_ylim(yy.min(), yy.max())\n ax.set_xticks(())\n ax.set_yticks(())\n i += 1\n\n # iterate over classifiers\n for name, clf in zip(names, classifiers):\n ax = pl.subplot(len(datasets), len(classifiers) + 1, i)\n clf.fit(X_train, y_train)\n score = clf.score(X_test, y_test)\n\n # Plot the decision boundary. For that, we will assign a color to each\n # point in the mesh [x_min, m_max]x[y_min, y_max].\n if hasattr(clf, \"decision_function\"):\n Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])\n else:\n Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]\n\n # Put the result into a color plot\n Z = Z.reshape(xx.shape)\n ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)\n\n # Plot also the training points\n ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)\n # and testing points\n ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright,\n alpha=0.6)\n\n ax.set_xlim(xx.min(), xx.max())\n ax.set_ylim(yy.min(), yy.max())\n ax.set_xticks(())\n ax.set_yticks(())\n ax.set_title(name)\n ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),\n size=15, horizontalalignment='right')\n i += 1\n\n figure.subplots_adjust(left=.02, right=.98)\n pl.show()", "def __init__(self, estimator, **kwargs):\n super(LogisticRegression, self).__init__(\n estimator, **kwargs)\n\n self.estimator = estimator", "def log_reg(x_train, y_train):\n\n log_reg_classifier = LogisticRegression(max_iter=1000, solver='lbfgs')\n log_reg_classifier.fit(x_train, y_train)\n return log_reg_classifier\n\n # log_reg_classifier.fit(x_train, y_train)", "def learn(instance: machine_learning.StationMachineLearning):\n instance.learn()", "def linreg_scikit(X, y):\n lr = linear_model.LinearRegression()\n lr.fit(X, y)\n theta = lr.coef_.tolist()[1:]\n theta.insert(0, lr.intercept_)\n return theta", "def __init__(self, train_data, validation=None, initial_weight=None,\n loss_function_name='mse', cal_weight='gradient',\n regularizer=None, regularizer_p=None):\n self.train_x = train_data[1]\n self.train_y = train_data[0]\n\n self.set_valid(validation)\n\n ''' Define the progress of history here '''\n self.losses = []\n self.iterations = 0\n self.weights = []\n self.misclass_rate = []\n\n ''' Define loss, weight calculation, regularizer '''\n self.loss_function = get_loss_function(loss_function_name)\n self.loss_function_name = loss_function_name\n self.calculate_weight = cal_weight\n self.regularizer = Regularizer.get_regularizer(regularizer, regularizer_p)\n self.regularizer_p = regularizer_p\n\n # Asserting degree\n if len(self.train_x.shape) > 1:\n degree = self.train_x.shape[1]\n else:\n degree = 1\n\n # Initialize the weight for linear model.\n if initial_weight is not None:\n self.weights.append(initial_weight)\n else:\n self.weights.append(np.random.rand(degree))", "def svm_train(X, y, b, alpha, n_samples, n_features, learner, loop, eta,\n max_iter=100, step_probability=0.5):\n from pysofia import _sofia_ml\n if isinstance(X, six.string_types):\n if n_features is None:\n # the default in sofia-ml TODO: parse file to see\n n_features = 2**17\n w = _sofia_ml.train(X, n_features, alpha, max_iter, False,\n learner.value, loop.value, eta.value,\n step_probability)\n elif isinstance(X, np.ndarray):\n if n_features is None:\n n_features = X.shape[1]\n\n if n_samples is None:\n n_samples = X.shape[0]\n\n w = _sofia_ml.train_fast(np.float64(X), np.float64(y), n_samples,\n n_features, alpha, max_iter, False,\n learner.value, loop.value, eta.value,\n step_probability)\n else:\n if n_features is None:\n n_features = X.shape[1]\n\n with tempfile.NamedTemporaryFile() as f:\n datasets.dump_svmlight_file(X, y, f.name, query_id=b)\n w = _sofia_ml.train(f.name, n_features, alpha, max_iter, False,\n learner.value, loop.value, eta.value,\n step_probability)\n return w", "def learn(self):\n raise NotImplementedError()", "def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None, n_jobs=1, train_sizes=np.linspace(.1, 1, 10)):\n font = {'family': 'Droid Sans',\n 'weight': 'normal',\n 'size': 14}\n plt.rc('font', **font)\n plt.figure()\n plt.title(title)\n if ylim is not None:\n plt.ylim(*ylim)\n plt.xlabel(u\"Объем тренировочной выборки\")\n plt.ylabel(u\"Точность\")\n train_sizes, train_scores, test_scores = learning_curve(estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)\n train_scores_mean = np.mean(train_scores, axis=1)\n train_scores_std = np.std(train_scores, axis=1)\n test_scores_mean = np.mean(test_scores, axis=1)\n test_scores_std = np.std(test_scores, axis=1)\n plt.grid()\n\n plt.fill_between(train_sizes, train_scores_mean - train_scores_std,\n train_scores_mean + train_scores_std, alpha=0.1,\n color=\"r\")\n plt.fill_between(train_sizes, test_scores_mean - test_scores_std,\n test_scores_mean + test_scores_std, alpha=0.1, color=\"g\")\n\n plt.plot(train_sizes, train_scores_mean, 'o-', color=\"r\",label=u\"Точность обучения\")\n plt.plot(train_sizes, test_scores_mean, 'o-', color=\"g\",label=u\"Точность тестирования\")\n\n plt.legend(loc=\"best\")\n print('train scores')\n print(train_scores)\n print('test scores')\n print(train_scores)\n return plt", "def __init__(self):\n super().__init__()\n import sklearn\n import sklearn.svm\n self.model = sklearn.svm.LinearSVR", "def __init__(self, initialLearnRate):\n self.initialLearnRate = initialLearnRate", "def learn(self, D, **kwargs):\n pass", "def train_model_and_score(X,y_train):\n scaler = MinMaxScaler()\n X_scaled = scaler.fit_transform(X)\n\n #chose model\n # model = RandomForestClassifier()\n model = GradientBoostingClassifier()\n\n #split train/test\n x_train,x_test,y_train,y_test = train_test_split(X_scaled,y_train,test_size=0.33,random_state =42)\n\n #train\n model.fit(x_train,y_train)\n\n #evaluation\n sc = model.score(x_test,y_test), model.score(x_train,y_train)\n\n print(sc)\n\n return model,sc", "def draw_learning_curve(self, estimator, cvs=5, n_jobs=5, space_size=10,\n **kwargs):\n train_sizes, train_scores, test_scores = learning_curve(\n estimator=estimator, X=self.x_matrix, y=self.y_vector,\n train_sizes=numpy.linspace(.1, 1., space_size), cv=cvs,\n n_jobs=n_jobs, **kwargs\n )\n\n train_scores_mean = numpy.mean(train_scores, axis=1)\n train_scores_std = numpy.std(train_scores, axis=1)\n test_scores_mean = numpy.mean(test_scores, axis=1)\n test_scores_std = numpy.std(test_scores, axis=1)\n\n fig, ax_learning = pyplot.subplots(figsize=(10, 10))\n\n ax_learning.fill_between(\n train_sizes,\n train_scores_mean + train_scores_std,\n train_scores_mean - train_scores_std,\n alpha=0.1\n )\n ax_learning.plot(\n train_sizes, train_scores_mean, color='r',\n label='Training score'\n )\n\n ax_learning.fill_between(\n train_sizes,\n test_scores_mean + test_scores_std,\n test_scores_mean - test_scores_std,\n alpha=0.1\n )\n ax_learning.plot(\n train_sizes, test_scores_mean, color='g',\n label='Cross-validation score'\n )\n\n ax_learning.set(\n title='Learning curve',\n xlabel='Training examples', ylabel='Score'\n )\n ax_learning.legend(loc='best')\n\n self.learning_line = (fig, ax_learning)", "def learn(self):\n pass", "def learn(self):\n pass", "def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None, n_jobs=None, train_sizes=np.linspace(.1, 1.0, 5)):\n plt.figure()\n plt.title(title)\n if ylim is not None:\n plt.ylim(*ylim)\n plt.xlabel(\"Training examples\")\n plt.ylabel(\"Score\")\n train_sizes, train_scores, test_scores = learning_curve(\n estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)\n train_scores_mean = np.mean(train_scores, axis=1)\n train_scores_std = np.std(train_scores, axis=1)\n test_scores_mean = np.mean(test_scores, axis=1)\n test_scores_std = np.std(test_scores, axis=1)\n plt.grid()\n\n plt.fill_between(train_sizes, train_scores_mean - train_scores_std,\n train_scores_mean + train_scores_std, alpha=0.1,\n color=\"r\")\n plt.fill_between(train_sizes, test_scores_mean - test_scores_std,\n test_scores_mean + test_scores_std, alpha=0.1, color=\"g\")\n plt.plot(train_sizes, train_scores_mean, 'o-', color=\"r\",\n label=\"Training score\")\n plt.plot(train_sizes, test_scores_mean, 'o-', color=\"g\",\n label=\"Cross-validation score\")\n\n plt.legend(loc=\"best\")\n\n print(train_scores_mean, test_scores_mean)\n return plt", "def train_model(X_train, y_train):\n rgs = linear_model.Lasso(alpha=0.1)\n rgs.fit(X_train, y_train)\n return rgs", "def run_learning_curve_experiment(X, y, dataset_name, train_ratio=0.7):\n gkr_cv, tau_opt, s2_opt = pick_optimal_params_using_cv(X, y)\n K, K_mmd = create_krr_mmd_kernel_matrices(X, s2_opt)\n n = K.shape[0]\n train_indices, test_indices = train_test_indices(\n n, train_ratio=train_ratio)\n\n learning_curves_mc_train, learning_curves_mc_test = sample_mc_learning_curves_train_test(\n K, y, train_indices, test_indices, tau_opt, num_trajectories=10)\n\n K_train = K[np.ix_(train_indices, train_indices)]\n fw = alg.FrankWolfe(K_train)\n fw.run_frank_wolfe()\n learning_curve_fw_train, learning_curve_fw_test = calculate_learning_curves_train_test(K, y,\n train_indices,\n test_indices,\n fw.sampled_order,\n tau_opt)\n\n save_dir = 'learning_curves-{}'.format(dataset_name)\n save_dir = Path(data_experiments_dir) / save_dir\n print(save_dir)\n save_dir.mkdir(parents=True, exist_ok=False)\n\n # Save all of the learning curves\n np.save(save_dir / 'learning_curve_fw_train', learning_curve_fw_train)\n np.save(save_dir / 'learning_curve_fw_test', learning_curve_fw_test)\n np.save(save_dir / 'learning_curves_mc_train', learning_curves_mc_train)\n np.save(save_dir / 'learning_curves_mc_test', learning_curves_mc_test)\n\n # Save json file of interesting information for this particular run\n euclidean_dist_q05 = kernel_quantile_heuristic(X, q=0.05)\n euclidean_dist_q95 = kernel_quantile_heuristic(X, q=0.95)\n\n param_config = {\n 'n': X.shape[0],\n 'd': X.shape[1],\n 'tau_opt_KRR': tau_opt,\n 's2_opt_KRR': s2_opt,\n 'train_ratio': train_ratio,\n 'euclidean_dist_q05': euclidean_dist_q05,\n 'euclidean_dist_q95': euclidean_dist_q95,\n 'time_created': str(datetime.now().strftime('%Y-%m-%d_%H:%M:%S'))\n }\n\n with open(save_dir / 'experiment_config.json', 'w') as json_file:\n json.dump(param_config, json_file)", "def __call__(self, y_true: np.ndarray, y_pred: np.ndarray) -> float:", "def ex_2_b(x_train, y_train, x_test, y_test):\n ###########\n ## TODO:\n ## Train SVMs with polynomial kernels for different values of the degree\n ## (Remember to set the 'coef0' parameter to 1)\n ## and plot the variation of the test and training scores with polynomial degree using 'plot_score_vs_degree' func.\n ## Plot the decision boundary and support vectors for the best value of degree\n ## using 'plot_svm_decision_boundary' function\n ###########\n\n degrees = range(1, 21)\n machines = [svm.SVC(kernel='poly', degree=d, coef0=1.0) for d in degrees]\n\n for machine in machines:\n machine.fit(x_train, y_train)\n\n trainScores = [machine.score(x_train, y_train) for machine in machines]\n testScores = [machine.score(x_test, y_test) for machine in machines]\n\n plot_score_vs_degree(trainScores, testScores, degrees)\n\n bestDegree = testScores.index(max(testScores))\n print('Score of best polynomial degree ({}): {}'.format(bestDegree + 1, testScores[bestDegree]))\n plot_svm_decision_boundary(machines[bestDegree], x_train, y_train, x_test, y_test)", "def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None, n_jobs=-1, train_sizes=np.linspace(.1, 1.0, 5)):\n plt.figure()\n plt.title(title)\n if ylim is not None:\n plt.ylim(*ylim)\n plt.xlabel(\"Training examples\")\n plt.ylabel(\"Score\")\n train_sizes, train_scores, test_scores = learning_curve(\n estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)\n train_scores_mean = np.mean(train_scores, axis=1)\n train_scores_std = np.std(train_scores, axis=1)\n test_scores_mean = np.mean(test_scores, axis=1)\n test_scores_std = np.std(test_scores, axis=1)\n plt.grid()\n\n plt.fill_between(train_sizes, train_scores_mean - train_scores_std,\n train_scores_mean + train_scores_std, alpha=0.1,\n color=\"r\")\n plt.fill_between(train_sizes, test_scores_mean - test_scores_std,\n test_scores_mean + test_scores_std, alpha=0.1, color=\"g\")\n plt.plot(train_sizes, train_scores_mean, 'o-', color=\"r\",\n label=\"Training score\")\n plt.plot(train_sizes, test_scores_mean, 'o-', color=\"g\",\n label=\"Cross-validation score\")\n\n plt.legend(loc=\"best\")\n return plt", "def fit(self, x, y):\n def initiate_theta(dim):\n self.theta = np.zeros(dim)\n # print('self.theta initiated is {}'.format(self.theta))\n \n def implement_sigmoid(x):\n if self.theta is None:\n initiate_theta(x.shape[1])\n z = np.matmul(np.transpose(self.theta), np.transpose(x))\n return 1/(np.ones(x.shape[0]) + np.exp(-z))\n \n def implement_partial_loss(x, y):\n return -np.matmul(np.transpose(y - implement_sigmoid(x)), x)/x.shape[0]\n \n def implement_transposed_hess(x):\n sigmoid_hadamard = implement_sigmoid(x) * (np.ones(x.shape[0]) - implement_sigmoid(x))\n hess2 = np.diag(sigmoid_hadamard)\n hess = np.matmul(hess2,x)\n hess = np.matmul(np.transpose(x),hess)/x.shape[0]\n hess_inverse = np.linalg.inv(hess)\n return hess_inverse\n \n def train(x, y):\n count = 0\n if self.theta is None:\n initiate_theta(x.shape[1])\n while count < self.max_iter:\n if self.verbose:\n loss_y1 = np.matmul(np.transpose(y), np.log(implement_sigmoid(x)))\n loss_y0 = np.matmul(np.transpose(np.ones(x.shape[0]) - y), np.log(np.ones(x.shape[0]) - implement_sigmoid(x)))\n loss = -(loss_y1 + loss_y0 )/x.shape[0]\n print('Average empirical loss for step {} is {}'.format(count, loss))\n delta = np.matmul(implement_transposed_hess(x), implement_partial_loss(x, y))\n new_theta = self.theta - delta * self.step_size\n delta_theta = np.linalg.norm(new_theta - self.theta)\n # print('delta is {}'.format(delta_theta))\n if delta_theta < self.eps:\n return new_theta\n else:\n self.theta = new_theta\n count += 1\n return self.theta\n \n return train(x, y)", "def train_classifier(X, y, Cs=10):\n cls = LogisticRegressionCV(Cs=Cs, random_state=0, solver='lbfgs', max_iter=10000)\n cls.fit(X, y)\n return cls", "def test_compatibility_with_sklearn(self) -> type(None):\n check_estimator(StackingRegressor)", "def decision_function(alphas, target, kernel, X_train, x_test, b):\n\n result = (alphas * target) @ kernel(X_train, x_test) - b\n return result", "def __init__(self, x=None, y=None, theta=None):\r\n\r\n # ------------- Hyperparameters ------------------ #\r\n\r\n self.LEARNING_RATE = 0.01 # The learning rate.\r\n self.CONVERGENCE_MARGIN = 0.001 # The convergence criterion.\r\n self.MAX_ITERATIONS = 1 # Maximal number of passes through the datapoints in stochastic gradient descent.\r\n self.MINIBATCH_SIZE = 1000 # Minibatch size (only for minibatch gradient descent)\r\n\r\n # ----------------------------------------------------------------------\r\n\r\n if not any([x, y, theta]) or all([x, y, theta]):\r\n raise Exception('You have to either give x and y or theta')\r\n\r\n if theta:\r\n self.FEATURES = len(theta)\r\n self.theta = theta\r\n\r\n elif x and y:\r\n # Number of datapoints.\r\n self.DATAPOINTS = len(x)\r\n\r\n # Number of features.\r\n self.FEATURES = len(x[0]) + 1\r\n\r\n # Encoding of the data points (as a DATAPOINTS x FEATURES size array).\r\n self.x = np.concatenate((np.ones((self.DATAPOINTS, 1)), np.array(x)), axis=1)\r\n\r\n # Correct labels for the datapoints.\r\n self.y = np.array(y)\r\n\r\n # The weights we want to learn in the training phase.\r\n self.theta = np.random.uniform(-1, 1, self.FEATURES)\r\n\r\n # The current gradient.\r\n self.gradient = np.zeros(self.FEATURES)\r\n\r\n self.training_iteration = 0", "def learn(self, p: np.ndarray):\n target = self.target(p)\n self.set_inputs(p)\n self.set_transformation(p)\n self.set_outputs(target)\n self.set_output_transformation(target)\n self.activation(clamps = ['input', 'transformation', 'output', 'output_transformation'])", "def calculate_learning_curves_train_test(K, y, train_indices, test_indices, sampled_order_train,\n tau, stop_t=None):\n gaussian_kr = GaussianKernelRidgeRegression(\n tau=tau, s2=None, precompute_K=True)\n\n # Index K differently depending on what we do.\n # When predicting, we need the kernel matrix to be\n # K_mn, where m indexes the set to predict over and\n # n indexes the set we train over\n K_train = K[np.ix_(train_indices, train_indices)]\n K_test = K[np.ix_(test_indices, test_indices)]\n K_test_train = K[np.ix_(test_indices, train_indices)]\n K_sampled_train = K_train[np.ix_(sampled_order_train, sampled_order_train)]\n\n y_train = y[train_indices]\n y_test = y[test_indices]\n y_sampled_train = y_train[sampled_order_train]\n\n n_train = K_train.shape[0]\n n_test = K_test.shape[0]\n\n if stop_t is None:\n stop_t = n_train\n\n learning_curve_train = np.zeros(stop_t)\n learning_curve_test = np.zeros(stop_t)\n\n for t in range(stop_t):\n K_sampled_train_t = K_sampled_train[0:t+1, 0:t+1]\n gaussian_kr.fit(X=K_sampled_train_t, y=y_sampled_train[:t+1])\n\n # Predict for train set\n K_xn_train = K_train[np.ix_(\n np.arange(n_train), sampled_order_train[:t+1])]\n y_train_ = gaussian_kr.predict(K_xn_train)\n learning_curve_train[t] = mean_squared_error(y_train, y_train_)\n\n # Then test set\n K_xn_test = K_test_train[np.ix_(\n np.arange(n_test), sampled_order_train[:t+1])]\n y_test_ = gaussian_kr.predict(K_xn_test)\n learning_curve_test[t] = mean_squared_error(y_test, y_test_)\n\n return learning_curve_train, learning_curve_test", "def fit(self, X, y):\r\n self.X = X\r\n self.y = y\r\n\r\n self.arr_weight = []\r\n self.pred_fi = []\r\n self.arr_alpha = []\r\n wt = np.ones(len(y))/len(y)\r\n weights = np.copy(wt)\r\n self.initial_weights = np.copy(wt)\r\n gr_truth = np.array(y)\r\n self.arr_weight.append(np.copy(wt))\r\n\r\n for i in range(self.n_estimators):\r\n self.base_estimator.fit(X,y,self.arr_weight[i])\r\n y_pred = self.base_estimator.predict(X)\r\n\r\n predicted = np.array(y_pred)\r\n t = np.nonzero(predicted-gr_truth)\r\n error = sum(weights[(t[0])])\r\n error /= sum(weights)\r\n alpha_m = 0.5*np.log((1-error)/error) # Calculation of error\r\n self.arr_alpha.append(alpha_m)\r\n weights[np.nonzero(predicted - gr_truth)[0]]*=np.exp(alpha_m)\r\n factor = np.exp(-alpha_m)\r\n weights[np.delete(np.arange(len(y)),(np.nonzero(predicted - gr_truth)[0]))] = weights[np.delete(np.arange(len(y)),(np.nonzero(predicted - gr_truth)[0]))]*factor\r\n norm = sum(weights)\r\n weights = weights/norm # normalization\r\n\r\n self.clfs.append(copy(self.base_estimator))\r\n self.pred_fi.append(copy(predicted))\r\n self.arr_weight.append(copy(weights))", "def linear_regresstion_action(X_train=\"not defined\", X_test=\"not defined\", y_train=\"not defined\", y_test=\"not defined\",\r\n input_data=\"not defined\"):\r\n if \"not defined\" in [X_train, X_test, y_train, y_test]:\r\n X_train, X_test, y_train, y_test = splitting.splitting_data()\r\n\r\n if input_data == \"not defined\":\r\n raise ValueError(\"please provide input data\")\r\n\r\n linreg = LinearRegression()\r\n grid = {\r\n \"normalize\": [\"True\", \"False\"],\r\n }\r\n\r\n model = RandomizedSearchCV(linreg, grid, random_state=1007486)\r\n model.fit(X_train, y_train)\r\n y_pred = model.predict(X_test)\r\n predicted_units = model.predict(input_data)\r\n\r\n # assert score > 0.6, \"fuck this model is too bad!!!\"\r\n return y_pred, predicted_units", "def lr(self):\n pass", "def fit(self, X, y):\n return self # as required by sklearn.", "def train(self, X, y, learning_rate=1e-3, reg=1e-5, num_iters=100,\n batch_size=200, verbose=False):\n num_train, dim = X.shape\n num_classes = np.max(y) + 1 # assume y takes values 0...K-1 where K is number of classes\n if self.W is None:\n self.W = np.random.randn(dim, num_classes) * 0.001\n\n # Run stochastic gradient descent to optimize W\n loss_history = []\n for it in xrange(num_iters):\n batch_ind = np.random.choice(X.shape[0],batch_size, replace=True)\n X_batch = X[batch_ind]\n y_batch = y[batch_ind]\n\n # Step One: Implement Stochastic\n #########################################################################\n # Sample batch_size elements from the training data and their #\n # corresponding labels to use in this round of gradient descent. #\n # Store the data in X_batch and their corresponding labels in #\n # y_batch; after sampling X_batch should have shape (batch_size, dim) #\n # and y_batch should have shape (batch_size,) #\n # #\n # Hint: Use np.random.choice to generate indices. Sampling with #\n # replacement is faster than sampling without replacement. #\n #########################################################################\n\n # Step Two: Implement Gradient\n # Simply call self.loss (which calls svm_loss_vectorized) to evaluate loss and gradient\n loss, dW = self.loss(X_batch,y_batch,reg)\n loss_history.append(loss)\n\n # Step Three: Implement Descent\n # Simply update the weights using the gradient and the learning rate. #\n self.W -= dW*learning_rate\n\n if verbose and it % 100 == 0:\n print('iteration %d / %d: loss %f' % (it, num_iters, loss))\n\n return loss_history", "def test_score(coefs, intercept, method):\n X, y = _create_dataset(coefs, intercept, noise=1.0)\n lad = LADRegression(method=method)\n lad.fit(X, y)\n assert lad.score(X, y) > 0.9", "def __init__(self, estimator, target_language='java',\n target_method='predict', **kwargs):\n super(SVC, self).__init__(estimator, target_language=target_language,\n target_method=target_method, **kwargs)\n self.estimator = estimator", "def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,\n n_jobs=-1, train_sizes=np.linspace(.1, 1.0, 5)):\n plt.figure()\n plt.title(title)\n if ylim is not None:\n plt.ylim(*ylim)\n plt.xlabel(\"Training examples\")\n plt.ylabel(\"Score\")\n train_sizes, train_scores, test_scores = learning_curve(\n estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)\n train_scores_mean = np.mean(train_scores, axis=1)\n train_scores_std = np.std(train_scores, axis=1)\n test_scores_mean = np.mean(test_scores, axis=1)\n test_scores_std = np.std(test_scores, axis=1)\n plt.grid()\n\n plt.fill_between(train_sizes, train_scores_mean - train_scores_std,\n train_scores_mean + train_scores_std, alpha=0.1,\n color=\"r\")\n plt.fill_between(train_sizes, test_scores_mean - test_scores_std,\n test_scores_mean + test_scores_std, alpha=0.1, color=\"g\")\n plt.plot(train_sizes, train_scores_mean, 'o-', color=\"r\",\n label=\"Training score\")\n plt.plot(train_sizes, test_scores_mean, 'o-', color=\"g\",\n label=\"Cross-validation score\")\n\n plt.legend(loc=\"best\")\n return plt", "def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,\n n_jobs=-1, train_sizes=np.linspace(.1, 1.0, 5)):\n plt.figure()\n plt.title(title)\n if ylim is not None:\n plt.ylim(*ylim)\n plt.xlabel(\"Training examples\")\n plt.ylabel(\"Score\")\n train_sizes, train_scores, test_scores = learning_curve(\n estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)\n train_scores_mean = np.mean(train_scores, axis=1)\n train_scores_std = np.std(train_scores, axis=1)\n test_scores_mean = np.mean(test_scores, axis=1)\n test_scores_std = np.std(test_scores, axis=1)\n plt.grid()\n\n plt.fill_between(train_sizes, train_scores_mean - train_scores_std,\n train_scores_mean + train_scores_std, alpha=0.1,\n color=\"r\")\n plt.fill_between(train_sizes, test_scores_mean - test_scores_std,\n test_scores_mean + test_scores_std, alpha=0.1, color=\"g\")\n plt.plot(train_sizes, train_scores_mean, 'o-', color=\"r\",\n label=\"Training score\")\n plt.plot(train_sizes, test_scores_mean, 'o-', color=\"g\",\n label=\"Cross-validation score\")\n\n plt.legend(loc=\"best\")\n return plt", "def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,\n n_jobs=-1, train_sizes=np.linspace(.1, 1.0, 5)):\n plt.figure()\n plt.title(title)\n if ylim is not None:\n plt.ylim(*ylim)\n plt.xlabel(\"Training examples\")\n plt.ylabel(\"Score\")\n train_sizes, train_scores, test_scores = learning_curve(\n estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)\n train_scores_mean = np.mean(train_scores, axis=1)\n train_scores_std = np.std(train_scores, axis=1)\n test_scores_mean = np.mean(test_scores, axis=1)\n test_scores_std = np.std(test_scores, axis=1)\n plt.grid()\n\n plt.fill_between(train_sizes, train_scores_mean - train_scores_std,\n train_scores_mean + train_scores_std, alpha=0.1,\n color=\"r\")\n plt.fill_between(train_sizes, test_scores_mean - test_scores_std,\n test_scores_mean + test_scores_std, alpha=0.1, color=\"g\")\n plt.plot(train_sizes, train_scores_mean, 'o-', color=\"r\",\n label=\"Training score\")\n plt.plot(train_sizes, test_scores_mean, 'o-', color=\"g\",\n label=\"Cross-validation score\")\n\n plt.legend(loc=\"best\")\n return plt", "def smooth(x, y, PLOT=False, **kwargs):\n assert len(x) == len(y)\n x = np.array(x)\n y = np.array(y)\n\n # do a grid search to find the optimal polynomial degree\n model = GridSearchCV(\n # use a linear model with polynomial features\n sklearn.pipeline.Pipeline([\n ('poly', sklearn.preprocessing.PolynomialFeatures(degree=3)),\n ('linear', sklearn.linear_model.LinearRegression())\n ]),\n cv=kwargs.get('cv', min(5, len(x))),\n param_grid={\n 'poly__degree': np.arange(\n kwargs.get('min_degree', 3),\n kwargs.get('max_degree', 14),\n 2\n )\n }\n )\n x = x.reshape(-1, 1)\n model.fit(x, y)\n\n def predict(_x):\n return model.predict(np.array(_x).reshape(-1, 1))\n\n if PLOT:\n yhat = predict(x)\n fig, ax = plt.subplots(1, 1)\n ax.plot(x, yhat, 'k--')\n ax.plot(x, y, 'ko')\n ax.set(xlabel='x', ylabel='y')\n sns.despine()\n return predict, fig, ax\n if kwargs.get('PRINT'):\n print(model)\n return predict", "def train_linear_SVM(X_train_input, y_train_input, C=1):\r\n from sklearn.svm import SVC\r\n svc_clf = SVC(kernel='linear', probability=True, C=C)\r\n svc_clf.fit(X_train_input, y_train_input)\r\n return svc_clf", "def plot_learning_curve(self, estimators, title, labels, colors, X, y, \n ylim=None, cv=None, n_jobs=1, train_sizes=np.linspace(.1, 1.0, 10)):\n plt.figure(figsize = (6,8))\n plt.title(title)\n if ylim is not None:\n plt.ylim(*ylim)\n plt.xlabel(\"Training examples\")\n plt.ylabel(\"Accuracy\")\n for i in range(len(estimators)): \n estimator = estimators[i]\n line_label = labels[i]\n line_color = colors[i]\n train_sizes, train_scores, test_scores = learning_curve(\n estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)\n train_scores_mean = np.mean(train_scores, axis=1)\n train_scores_std = np.std(train_scores, axis=1)\n test_scores_mean = np.mean(test_scores, axis=1)\n test_scores_std = np.std(test_scores, axis=1)\n\n plt.fill_between(train_sizes, train_scores_mean - train_scores_std,\n train_scores_mean + train_scores_std, alpha=0.1,\n color=line_color[0])\n plt.fill_between(train_sizes, test_scores_mean - test_scores_std,\n test_scores_mean + test_scores_std, alpha=0.1, color=line_color[1])\n plt.plot(train_sizes, train_scores_mean, \"o-\", color=line_color[0],\n label=line_label[0])#\"Training accuracy\"\n plt.plot(train_sizes, test_scores_mean, \"o-\", color=line_color[1],\n label=line_label[1]) #\"Cross-validation accuracy\"\n\n plt.grid() \n plt.legend(bbox_to_anchor=(0., 1.05, 1., .105),loc=3, mode=\"expand\",\n borderaxespad=0.)\n plt.savefig(\"../Figures/\" + title + \".png\", bbox_inches=\"tight\")\n return plt", "def train(self, loss_function='logistic',\n lr=0.1, decay=0.5, max_iters=3000, batch_size=128, **kwargs):\n return super(LogisticRegression, self).train('sgd', loss_function,\n lr=lr,\n decay=decay, max_iters=max_iters,\n batch_size=batch_size, **kwargs)", "def train(self, X, y, lr, epoch, method='adam', quit=1e-4):\n if len(y.shape) == 1:\n y = y.reshape((-1, 1))\n if not (0 < lr < 1):\n raise self.ANNException('learning rate cannot be negative or exceeds 1')\n if epoch <= 0:\n raise self.ANNException('epoch must be postitive integer')\n if method == 'gd':\n for _ in range(epoch):\n nodes = self._forward(X)\n self._backpropagate(y, nodes, lr)\n elif method == 'adam':\n alpha = 0.1\n beta1 = 0.5\n beta2 = 0.999\n epsilon = 1e-8\n mt = np.zeros(shape=self.weight.shape)\n vt = np.zeros(shape=self.weight.shape)\n before_err = self._energy(X, y)\n for t in range(1, epoch+1):\n nodes = self._forward(X)\n gt = self._backpropagate(y, nodes, alpha, ret=True)\n mt = beta1*mt + (1-beta1)*gt\n vt = beta2*vt + (1-beta2)*gt**2\n mthat = mt / (1-np.power(beta1, t))\n vthat = vt / (1-np.power(beta2, t))\n self.weight -= alpha * mthat/(np.sqrt(vthat)+epsilon)\n after_err = self._energy(X, y)\n if 0 < after_err-before_err < quit:\n return\n else:\n before_err = after_err\n else:\n raise self.ANNException('only gd and adam optimizer are supported')", "def fit_score(estimator, train_data, test_data):\n estimator.fit(*train_data)\n return estimator.score(*test_data)", "def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,\n n_jobs=None, train_sizes=np.linspace(.1, 1.0, 5)):\n plt.figure()\n plt.title(title)\n if ylim is not None:\n plt.ylim(*ylim)\n plt.xlabel(\"Training examples\")\n plt.ylabel(\"Score\")\n train_sizes, train_scores, test_scores = learning_curve(\n estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)\n train_scores_mean = np.mean(train_scores, axis=1)\n train_scores_std = np.std(train_scores, axis=1)\n test_scores_mean = np.mean(test_scores, axis=1)\n test_scores_std = np.std(test_scores, axis=1)\n plt.grid()\n\n plt.fill_between(train_sizes, train_scores_mean - train_scores_std,\n train_scores_mean + train_scores_std, alpha=0.1,\n color=\"r\")\n plt.fill_between(train_sizes, test_scores_mean - test_scores_std,\n test_scores_mean + test_scores_std, alpha=0.1, color=\"g\")\n plt.plot(train_sizes, train_scores_mean, 'o-', color=\"r\",\n label=\"Training score\")\n plt.plot(train_sizes, test_scores_mean, 'o-', color=\"g\",\n label=\"Cross-validation score\")\n\n plt.legend(loc=\"best\")\n plt.show()\n # return plt", "def train_regressor_age_estimator(x, y, **kwargs):\n\n verbose = kwargs['verbose'] if 'verbose' in kwargs else 0\n verboseprint = print if verbose else lambda *a, **k: None\n verbose -= 1 if verbose > 0 else 0\n\n n_jobs = kwargs['n_jobs'] if 'n_jobs' in kwargs else -1\n n_folds = kwargs['n_folds'] if 'n_folds' in kwargs else 10\n\n # ---------------------------------------------------------------------------------\n # TRAIN AND VALIDATION\n # ---------------------------------------------------------------------------------\n verboseprint('Training Age Regressor ...')\n t = time.time()\n model, param, mae, std = reg_group_lopo(x=x,\n y=y,\n ind=[],\n n_jobs=n_jobs,\n n_folds=n_folds,\n verbose=verbose,\n rang=[min(y), max(y)],\n gamma=[0.001, 0.01, 0.1, 1],\n c=[0.001, 0.01, 0.1, 1])\n # gamma=np.array(range(1, 100, 5))/100.0,\n # c=np.array(range(1, 100, 5))/100.0)\n t = time.time() - t\n verboseprint(u'Best MAE: {0} \\u00B1 {1} - Best Params: {2} - Time: {3}'.format(mae, std, param, t))\n\n return {'model': model, 'param': param, 'score': mae, 'std': std}", "def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,\n n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):\n plt.figure()\n plt.title(title)\n if ylim is not None:\n plt.ylim(*ylim)\n plt.xlabel(\"Training examples\")\n plt.ylabel(\"Score\")\n train_sizes, train_scores, test_scores = learning_curve(\n estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)\n train_scores_mean = np.mean(train_scores, axis=1)\n train_scores_std = np.std(train_scores, axis=1)\n test_scores_mean = np.mean(test_scores, axis=1)\n test_scores_std = np.std(test_scores, axis=1)\n plt.grid()\n\n plt.fill_between(train_sizes, train_scores_mean - train_scores_std,\n train_scores_mean + train_scores_std, alpha=0.1,\n color=\"r\")\n plt.fill_between(train_sizes, test_scores_mean - test_scores_std,\n test_scores_mean + test_scores_std, alpha=0.1, color=\"g\")\n plt.plot(train_sizes, train_scores_mean, 'o-', color=\"r\",\n label=\"Training score\")\n plt.plot(train_sizes, test_scores_mean, 'o-', color=\"g\",\n label=\"Cross-validation score\")\n\n plt.legend(loc=\"best\")\n return plt", "def calculate_learning_curve(K, y, sampled_order, tau, stop_t=None):\n gaussian_kr = GaussianKernelRidgeRegression(\n tau=tau, s2=None, precompute_K=True)\n\n n = K.shape[0]\n if stop_t is None:\n stop_t = n\n\n all_indices = np.arange(n)\n K_sampled = K[np.ix_(sampled_order, sampled_order)]\n\n learning_curve = np.zeros(stop_t)\n for t in range(stop_t):\n K_sampled_t = K_sampled[0:t+1, 0:t+1]\n gaussian_kr.fit(X=K_sampled_t, y=y[sampled_order[:t+1]])\n # NB: the sampled_order index the train set\n # and all indices the set we are predicting\n K_xn = K[np.ix_(all_indices, sampled_order[:t+1])]\n y_ = gaussian_kr.predict(K_xn)\n learning_curve[t] = mean_squared_error(y, y_)\n\n return learning_curve" ]
[ "0.64221656", "0.63034314", "0.6246641", "0.6199103", "0.61902195", "0.6133346", "0.6075857", "0.6074158", "0.6016552", "0.5998704", "0.59975034", "0.59883845", "0.59740204", "0.59331024", "0.5884026", "0.58812094", "0.5867041", "0.58130425", "0.5809215", "0.5798471", "0.5792855", "0.5756564", "0.57458466", "0.57367617", "0.57365996", "0.5735482", "0.5725889", "0.5722918", "0.5716548", "0.5710387", "0.56805813", "0.5653359", "0.5653359", "0.5653359", "0.5650313", "0.5643706", "0.5631886", "0.562707", "0.5621232", "0.56100553", "0.5605829", "0.5602816", "0.5596639", "0.558762", "0.5584459", "0.5582768", "0.55817634", "0.55764866", "0.557345", "0.5567197", "0.55655664", "0.55510664", "0.55436146", "0.5540171", "0.5538087", "0.5527264", "0.55245936", "0.55233514", "0.55169874", "0.5514286", "0.55087954", "0.54888153", "0.54864794", "0.5464647", "0.5462513", "0.5461729", "0.5461729", "0.5459122", "0.545722", "0.54551035", "0.54521716", "0.5450652", "0.5449633", "0.5443429", "0.5441989", "0.5439341", "0.54356635", "0.5432149", "0.54298115", "0.5428097", "0.54278207", "0.54270214", "0.54229736", "0.5418387", "0.54182243", "0.54172486", "0.54152423", "0.5413665", "0.5413665", "0.5413665", "0.54133946", "0.5412259", "0.54085964", "0.5405625", "0.54042727", "0.54023725", "0.5401986", "0.54019725", "0.5398464", "0.53975064" ]
0.70836437
0
Create an agent membership
def create_agent_membership(self, context, agent_membership): am = agent_membership['agent_membership'] with context.session.begin(subtransactions=True): am_db = AgentMembership(id=am['id'], ip_address=am['ip_address']) context.session.add(am_db) return self._make_agent_membership_dict(am_db)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_member(self, body=None):\r\n return self.post(self.members_path, body=body)", "def add_member():\n client = RequestManager()\n client.set_method(\"POST\")\n client.set_endpoint(\"/accounts/{0}/memberships\".format(CONFIG_DATA['account_id']))\n body = {\"person_id\": CONFIG_DATA['member_id']}\n client.set_body(json.dumps(body))\n client.execute_request()", "def create_member(org_id, group_id, target_group_ids, sex, first_name, last_name, title_name, email):\n user = get_user_by_email(email)\n # --- falls e-mail schon existiert wird nichts unternommen\n if user != None:\n if org_id > 0: # nur bei Schulen wird die Schulnummer vorangestellt\n prefix = '%i_' % org_id\n else:\n prefix = ''\n user = User()\n username = get_username(prefix, first_name, last_name)\n user.username = username\n user.sex = sex\n user.first_name = first_name\n user.last_name = last_name\n user.email = email\n user.title = title_name\n user.is_staff = False\n user.is_active = True\n user.is_superuser = False\n user.date_joined = datetime.datetime.now()\n password = generate_passwd()\n user.set_password(password)\n user.save()\n set_user_org(org_id, user)\n send_password(email, username, password)\n set_user_group(user, get_group_by_id(group_id))\n for group in target_group_ids:\n set_user_group(user, get_group_by_id(group))\n transaction.commit()", "def create_member(actioncluster, user):\n membership, is_new = (ActionClusterMembership.objects\n .get_or_create(actioncluster=actioncluster, user=user))\n return membership if is_new else None", "def test_create_member(self):\r\n resource = 'member'\r\n cmd = member.CreateMember(test_cli20.MyApp(sys.stdout), None)\r\n address = '10.0.0.1'\r\n port = '8080'\r\n tenant_id = 'my-tenant'\r\n my_id = 'my-id'\r\n pool_id = 'pool-id'\r\n args = ['--address', address, '--protocol-port', port,\r\n '--tenant-id', tenant_id, pool_id]\r\n position_names = ['address', 'protocol_port', 'tenant_id', 'pool_id',\r\n 'admin_state_up']\r\n position_values = [address, port, tenant_id, pool_id, True]\r\n self._test_create_resource(resource, cmd, None, my_id, args,\r\n position_names, position_values,\r\n admin_state_up=None)", "def create_memberships_project():\n client = RequestManager()\n client.set_method(\"POST\")\n client.set_endpoint(\"/projects/{0}/memberships\".format(STORED_ID['project_id']))\n body = {\"person_id\": CONFIG_DATA['member_id'], \"role\": 'member'}\n client.set_body(json.dumps(body))\n client.execute_request()", "async def command_create(self, context):\n # await self._create_new_role(context, name, target=GROUP_CATEGORY_NAME)\n print('main create')", "def create_member(self, context, member):\n LOG.info(\"Received request 'Create Member' for Pool:%(pool_id)s \",\n {'pool_id': member['pool_id']})\n arg_dict = {'context': context,\n lb_const.MEMBER: member,\n }\n self._send_event(lb_const.EVENT_CREATE_MEMBER_V2, arg_dict,\n serialize=True,\n binding_key=member[lb_const.POOL]['loadbalancer_id'],\n key=member['id'])", "def create_creator_member(sender, **kwargs):\n if kwargs.get('created'):\n league = kwargs['instance']\n league.members.create(user=league.creator,\n fb_uid=league.creator.fb_uid,\n status='creator')", "def _create_member(self, **kwargs):\n category_name = kwargs.pop('category_name', Category.ACTIVE)\n params = {\n 'category': Category.objects.get(name=category_name),\n 'first_payment_month': 8,\n 'first_payment_year': 2015,\n 'has_student_certificate': False,\n 'has_subscription_letter': True,\n 'has_collaborator_acceptance': False,\n }\n params = {k: kwargs.pop(k, v) for k, v in params.items()}\n member = Member.objects.create(**params)\n\n # create the related person\n params = {\n 'membership': member,\n 'nickname': 'test-nick',\n 'picture': 'fake-pic',\n }\n params = {k: kwargs.pop(k, v) for k, v in params.items()}\n Person.objects.create(**params)\n\n assert not kwargs, kwargs # would indicate a misuse of the parameters\n return member", "def create(self, identity, data=None, record=None, **kwargs):\n if system_process in identity.provides:\n return\n\n member = {\n \"type\": \"user\",\n \"id\": str(identity.id),\n }\n self.service.members.add(\n # the user is not yet owner of the community (is being added)\n # therefore we cannot use `identity`\n system_identity,\n record.id,\n {\"members\": [member], \"role\": current_roles.owner_role.name},\n uow=self.uow,\n )\n\n # Invalidate the membership cache\n on_user_membership_change(identity=identity)", "def create(self, name, login, password, email, address=\"\", vat=\"\", jobguid=\"\", executionparams=None):", "def createMentor(self, org):\n self.createProfile()\n self.profile.mentor_for = [org.key()]\n self.profile.put()", "def createAgent(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def test_alert_create_for_site_members(self):\n pass", "def make_agent(agent_id, **kwargs):\n return agent_register[agent_id](**kwargs)", "def test_agent_creation():\n agent = AgentFactory()\n agent.name = 'agent test name'\n agent.save()\n assert agent.name == 'agent test name'", "def create_member(net_id):\n #TODO put this exception handling in to the presentation layer\n #if ' ' in net_id or '@' in net_id:\n # raise Exception('Only enter the first portion of the net id => jmrolf@iastate.edu - jmrolf')\n student_html = Info_IaState_Scraper.get_raw_html(net_id)\n student_data = Info_IaState_Scraper.parse_student_data(student_html)\n connection = get_connection()\n cursor = connection.cursor()\n sql_string = \"INSERT INTO Member VALUES('\"+net_id+\"', '\"+net_id+\"@iastate.edu', \" \\\n \"'\"+student_data['classification']+\"', '\" + student_data['major']+\"', \" \\\n \"'\"+student_data['name']+\"', 0)\"\n cursor.execute(sql_string)\n connection.commit()", "def create(self, data):\n curso = self.context['curso']\n invitation = self.context['invitation']\n user = data['user']\n\n now = timezone.now()\n\n # studen creation\n member = Rol.objects.create(\n user=user,\n profile=user.profile,\n curso=curso,\n invited_by=invitation.issued_by\n )\n\n # Update Invitation\n invitation.used_by = user\n invitation.used = True\n invitation.used_at = now\n invitation.save()\n\n # Update issuer data\n issuer = Rol.objects.get(user=invitation.issued_by, curso=curso)\n issuer.remaining_invitations -= 1\n issuer.save()\n\n return member", "def main_role_create(\n client: CitusCloudMgmt,\n **opts: tp.Any\n) -> None:\n\n role_id = client.create_role(opts[\"formation\"], opts[\"name\"])\n logger.info(f\"Created new role \\\"name\\\" with id=\\\"{role_id}\\\"\")\n click.echo(role_id)", "def test_ipam_roles_create(self):\n pass", "async def create(self, ctx):\n\n # get the model data for the role assigner object\n data = await self.get_objects(\n model=RoleAssigner, filter={\"bot__name\": str(self.bot_name)}\n )\n\n # role assigner object\n data = data[0]\n\n message = await ctx.send(\"_ _\", embed=self.create_message_embed(data))\n\n data.message.uid = message.id\n data.message.cuid = message.channel.id\n\n self.message_id = data.message.uid\n\n await self.update_reactions(message, data)\n\n await self.update_objects(model_instance=data)", "def create_individual(self):\n pass", "def create_members(self, accounts_info):\n detector_id = self.list_detector()\n if detector_id:\n try:\n response = self.client.create_members(\n AccountDetails=accounts_info,\n DetectorId=detector_id\n )\n for result in response['UnprocessedAccounts']:\n print(result)\n return True\n except ClientError as e:\n print(e.response['Error']['Code'])\n return False", "async def post(self):\r\n data = await self.request.json()\r\n register_date = data[\"register_date\"]\r\n ip_address = data[\"ip_address\"]\r\n try:\r\n Agent.create(register_date=register_date, ip_address=ip_address)\r\n response_obj = {\"status\": \"success\"}\r\n return web.Response(text=str(response_obj), status=201)\r\n except Exception as exception:\r\n response_obj = {\"status\": \"failed\", \"reason\": exception}\r\n error_message = str(exception)\r\n logger.error(error_message)\r\n return web.Response(text=str(response_obj), status=500)", "def create(self, name, username, emailaddress, maxMemoryCapacity=-1.0, maxVDiskCapacity=-1, maxCPUCapacity=-1, maxNetworkPeerTransfer=-1, maxNumPublicIP=-1, sendAccessEmails=True, **kwargs):\n #put your code here to implement this method\n raise NotImplementedError (\"not implemented method create\")", "def _register_agent(self, agent, agent_avatar: AgentBody):\n\n # Random seed for agent between 1 and 10000000, might need to be adjusted still\n agent_seed = self.__rnd_gen.randint(1, 1000000)\n\n # check if the agent can be succesfully placed at that location\n self.__validate_obj_placement(agent_avatar)\n\n # Add agent to registered agents\n self.__registered_agents[agent_avatar.obj_id] = agent_avatar\n\n if self.__verbose:\n print(f\"@{os.path.basename(__file__)}: Created agent with id {agent_avatar.obj_id}.\")\n\n # Get all properties from the agent avatar\n avatar_props = agent_avatar.properties\n\n if agent_avatar.is_human_agent is False:\n agent._factory_initialise(agent_name=agent_avatar.obj_name,\n agent_id=agent_avatar.obj_id,\n action_set=agent_avatar.action_set,\n sense_capability=agent_avatar.sense_capability,\n agent_properties=avatar_props,\n customizable_properties=agent_avatar.customizable_properties,\n callback_is_action_possible=self.__check_action_is_possible,\n rnd_seed=agent_seed)\n else: # if the agent is a human agent, we also assign its user input action map\n agent._factory_initialise(agent_name=agent_avatar.obj_name,\n agent_id=agent_avatar.obj_id,\n action_set=agent_avatar.action_set,\n sense_capability=agent_avatar.sense_capability,\n agent_properties=avatar_props,\n customizable_properties=agent_avatar.customizable_properties,\n callback_is_action_possible=self.__check_action_is_possible,\n rnd_seed=agent_seed,\n key_action_map=agent_avatar.properties[\"key_action_map\"])\n\n return agent_avatar.obj_id", "async def addFreeAgentRole(self, ctx, tier, role : discord.Role):\n server_dict = self.get_server_dict(ctx)\n free_agent_dict = server_dict.setdefault(\"Free agent roles\", {})\n \n try:\n free_agent_dict[tier] = role.id\n self.save_data()\n await self.bot.say(\"Franchise role for {0} = {1}\".format(tier, role.mention))\n except IndexError:\n await self.bot.say(\":x: Error adding info to the free agent role dictionary\")", "def do_user_create():\n target = User(\n request.form['gender'],\n request.form['first_name'],\n request.form['name'],\n request.form['mail'],\n request.form['meter_id'],\n request.form['group_id'],\n secrets.token_hex(33))\n target.set_role(request.form['role'])\n target.nick = request.form['nick']\n db.session.add(target)\n db.session.commit()\n return user_list(\"Created user \" + target.name)", "def test_create_owner(self):\n self.assertEqual(\n ProjectInvite.objects.filter(project=self.project).count(), 0\n )\n\n url = reverse(\n 'projectroles:api_invite_create',\n kwargs={'project': self.project.sodar_uuid},\n )\n post_data = {\n 'email': INVITE_USER_EMAIL,\n 'role': PROJECT_ROLE_OWNER,\n 'message': INVITE_MESSAGE,\n }\n response = self.request_knox(url, method='POST', data=post_data)\n\n self.assertEqual(response.status_code, 400, msg=response.content)\n self.assertEqual(\n ProjectInvite.objects.filter(project=self.project).count(), 0\n )\n self.assertEqual(len(mail.outbox), 0)", "async def createRole(self, ctx):\n await self.deleteRole(ctx=ctx, reason=\"Début de partie.\")\n await ctx.guild.create_role(name=self.categoryName)\n await asyncio.sleep(1)\n self.roleForPlayer = discord.utils.get(ctx.guild.roles, name=self.categoryName)\n print(\"Role created.\")\n member = await ctx.guild.fetch_member(bot.user.id)\n await member.add_roles(self.roleForPlayer, reason=\"Début de partie.\")\n for member in ctx.author.voice.channel.members:\n await member.add_roles(self.roleForPlayer, reason=\"Début de partie.\")", "def create_investor(sender, **kwargs):\n u = kwargs[\"instance\"]\n try:\n \n if not InvestorProfile.objects.filter(username=u.username):\n inv = InvestorProfile(username=u.username,user=u)\n inv.save()\n g = DjangoGroup.objects.get(name='Investors') \n g.user_set.add(u)\n except Exception as e:\n print e", "def create(ctx):\n pass", "def create():", "def create():", "def create_role():\n headers = {\"X-Vault-Token\": args.x_vault_token}\n data = {\n \"bound_service_account_names\": args.k8s_service_account,\n \"bound_service_account_namespaces\": args.k8s_namespace,\n \"policies\": args.vault_policies.split(','),\n \"ttl\": args.vault_role_ttl\n }\n url = \"{0}/auth/{1}/role/{2}\".format(args.vault_url, args.k8s_cluster_name, args.k8s_namespace)\n print 'Creating role {0} for {1} with policies {2} and ttl {3}'.format(args.k8s_namespace,\n args.k8s_cluster_name,\n args.vault_policies,\n args.vault_role_ttl)\n send_post(url=url, data=data, headers=headers)", "def test_create_member_all_params(self):\r\n resource = 'member'\r\n cmd = member.CreateMember(test_cli20.MyApp(sys.stdout), None)\r\n address = '10.0.0.1'\r\n admin_state_up = False\r\n port = '8080'\r\n weight = '1'\r\n tenant_id = 'my-tenant'\r\n my_id = 'my-id'\r\n pool_id = 'pool-id'\r\n args = ['--address', address, '--admin-state-down',\r\n '--protocol-port', port, '--weight', weight,\r\n '--tenant-id', tenant_id, pool_id]\r\n position_names = [\r\n 'address', 'admin_state_up', 'protocol_port', 'weight',\r\n 'tenant_id', 'pool_id'\r\n ]\r\n position_values = [address, admin_state_up, port, weight,\r\n tenant_id, pool_id]\r\n self._test_create_resource(resource, cmd, None, my_id, args,\r\n position_names, position_values,\r\n admin_state_up=None)", "def create_member(self, email, name, password, **properties):\n\t\tproperties.update({\"email\":email, \"name\":name, \"password\":password})\n\t\tresponse = self.client.post(self._endpoint + \"/member\",content=properties)\n\t\treturn Member(\n\t\t\tresponse.json['member_id'],\n\t\t\tself.user_id,\n\t\t\tself.site_id,\n\t\t\tdata=response.json\n\t\t)", "def onUserCreation(event):\n\n client = getUtility(IAdminClient)\n xmpp_users = getUtility(IXMPPUsers)\n storage = getUtility(IPubSubStorage)\n principal = event.principal\n mtool = getToolByName(principal, 'portal_membership')\n\n principal_id = principal.getUserId()\n principal_jid = xmpp_users.getUserJID(principal_id)\n members_jids = [xmpp_users.getUserJID(member.getUserId())\n for member in mtool.listMembers()]\n pass_storage = getUtility(IXMPPPasswordStorage)\n principal_pass = pass_storage.set(principal_id)\n\n storage.leaf_nodes.append(principal_id)\n storage.node_items[principal_id] = []\n storage.collections['people'].append(principal_id)\n storage.publishers[principal_id] = [principal_id]\n\n d = setupPrincipal(client, principal_jid, principal_pass, members_jids)\n return d", "def create_auth_token(sender, instance=None, created=False, **kwargs):\n\n if created:\n # Generate API token for user.\n api_token = Token.objects.create(user=instance)\n\n # Only create agent using username and API token for non-admin users.\n if instance.is_superuser is False:\n Agent.objects.create(scan_agent=instance, api_token=api_token)", "def test_add_team_member(self):\n pass", "def create_person(self):", "def test_teams_invite_member(self):\n pass", "def create(user):\n return Member(key_name=user.user_id(), user=user)", "def createMember(self):\n return _libsbml.Group_createMember(self)", "def create_role(name, arn):\n\tsession = get_session()\n\tresponse = session.post(\"{url}/api/roles\".format(url=get_registry_url()), json={\"name\": name, \"arn\": arn})\n\treturn response.json()", "def add_member(self, request, pk):\n farm = self.get_object()\n user = request.data.get('user')\n farm.add_member(user)\n return Response({}, status=status.HTTP_202_ACCEPTED)", "def subject_member_create(context, values, session=None):\n memb_ref = models.SubjectMember()\n _subject_member_update(context, memb_ref, values, session=session)\n return _subject_member_format(memb_ref)", "def create_members(N):\n for _ in range(N):\n name = fake.name()\n phone = fake.phone_number()\n email = fake.email()\n address = fake.address()\n Member.objects.create(\n name=name,phone=phone,\n email=email,address=address\n )", "def test_create_delegate_limit(self):\n # Create new user and grant delegate role\n new_user = self.make_user('new_user')\n self.make_assignment(self.project, new_user, self.role_delegate)\n url = reverse(\n 'projectroles:api_role_create',\n kwargs={'project': self.project.sodar_uuid},\n )\n post_data = {\n 'role': PROJECT_ROLE_DELEGATE,\n 'user': str(self.assign_user.sodar_uuid),\n }\n # NOTE: Post as owner\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 400, msg=response.content)", "def create(*args):", "def create_user(self):\n unique_id = str(uuid.uuid4())\n new_user_properties = {\n \"name\": self.name,\n \"mission_statement\": self.mission_statement,\n \"unique_id\": unique_id,\n \"email\": self.email.lower(),\n \"is_mentor\": True,\n \"is_tutor\": True,\n \"is_visible\": True,\n \"is_available_for_in_person\": True,\n \"is_admin\": True}\n new_user_node = Node.cast(AgoraLabel.USER, new_user_properties)\n try:\n self.graph_db.create(new_user_node)\n except:\n pass\n return new_user_node", "def _add(self, signup_form_id, group_ids):\n path = '/members/add'\n data = self.extract()\n if group_ids:\n data['group_ids'] = group_ids\n if signup_form_id:\n data['signup_form_id'] = signup_form_id\n\n outcome = self.account.adapter.post(path, data)\n self['member_status_id'] = outcome['status']\n if outcome['added']:\n self['member_id'] = outcome['member_id']", "def create_member(self, vestorly_auth, member, **kwargs):\n \n # verify the required parameter 'vestorly_auth' is set\n if vestorly_auth is None:\n raise ValueError(\"Missing the required parameter `vestorly_auth` when calling `create_member`\")\n \n # verify the required parameter 'member' is set\n if member is None:\n raise ValueError(\"Missing the required parameter `member` when calling `create_member`\")\n \n all_params = ['vestorly_auth', 'member']\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\"Got an unexpected keyword argument '%s' to method create_member\" % key)\n params[key] = val\n del params['kwargs']\n\n resource_path = '/members'.replace('{format}', 'json')\n method = 'POST'\n\n path_params = remove_none(dict())\n query_params = remove_none(dict(vestorly_auth=params.get('vestorly_auth')))\n header_params = remove_none(dict())\n form_params = remove_none(dict())\n files = remove_none(dict())\n body_params = params.get('member')\n\n # HTTP header `Accept`\n header_params['Accept'] = ApiClient.select_header_accept([])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = ApiClient.select_header_content_type([])\n\n response = self.api_client.call_api(resource_path, method, path_params, query_params, header_params,\n body=body_params, post_params=form_params, files=files,\n response='Memberresponse')\n \n return response", "def test_create_existing_user(self):\n user = self.make_user('new_user')\n user.email = INVITE_USER_EMAIL\n user.save()\n self.assertEqual(\n ProjectInvite.objects.filter(project=self.project).count(), 0\n )\n\n url = reverse(\n 'projectroles:api_invite_create',\n kwargs={'project': self.project.sodar_uuid},\n )\n post_data = {\n 'email': INVITE_USER_EMAIL,\n 'role': PROJECT_ROLE_CONTRIBUTOR,\n 'message': INVITE_MESSAGE,\n }\n response = self.request_knox(url, method='POST', data=post_data)\n\n self.assertEqual(response.status_code, 400, msg=response.content)\n self.assertEqual(\n ProjectInvite.objects.filter(project=self.project).count(), 0\n )\n self.assertEqual(len(mail.outbox), 0)", "def register_message():\n\n logger.info('Nos registramos')\n\n gr = registerAgent(VendedorAgent, DirectoryAgent, VendedorAgent.uri, getMessageCount())\n return gr", "def create_organization_members(\n self, id: str, body: dict[str, Any]\n ) -> dict[str, Any]:\n\n return self.client.post(self._url(id, \"members\"), data=body)", "def test_manage_agent(pa_instance):\n wrapper, agent_uuid = pa_instance\n publickey, secretkey = get_new_keypair()\n\n agent = wrapper.build_agent(\n serverkey=wrapper.publickey, publickey=publickey, secretkey=secretkey)\n peers = agent.vip.peerlist().get(timeout=2)\n assert VOLTTRON_CENTRAL_PLATFORM in peers\n\n # Make a call to manage which should return to us the publickey of the\n # platform.agent on the instance.\n papublickey = agent.vip.rpc.call(\n VOLTTRON_CENTRAL_PLATFORM, 'manage', wrapper.vip_address,\n wrapper.publickey, agent.core.publickey).get(timeout=2)\n assert papublickey", "def create(self, *args, **kwargs):\n pass", "def func(self):\n caller = self.caller\n\n if not self.args:\n caller.msg(\"Usage: +createNPC <name>\")\n return\n if not caller.location:\n # May not create an NPC when OOC\n caller.msg(\"You must have a location to create an NPC.\")\n return\n # Make the name always start with capital letter\n name = self.args.strip().capitalize()\n # Create an NPC in caller's location\n npc = create_object(\"characters.Character\",\n key=name,\n location=caller.location,\n locks=f\"edit:id({caller.id}) and perm(Builders);call:false()\")\n # Announce\n message = \"%s created the NPC '%s'.\"\n caller.msg(message % (\"You\", name))\n caller.location.msg_contents(message % (caller.key, name),\n exclude=caller)", "def _create_nsem_user():\n users = User.objects.filter(username=settings.CWWED_NSEM_USER)\n if users.exists():\n user = users[0]\n else:\n user = User.objects.create_user(settings.CWWED_NSEM_USER, password=settings.CWWED_NSEM_PASSWORD)\n group, _ = Group.objects.get_or_create(name=settings.CWWED_NSEM_GROUP)\n perm_names = [\n 'add_{}'.format(NsemPsa._meta.model_name),\n 'add_{}'.format(NamedStormCoveredDataSnapshot._meta.model_name),\n ]\n perms = Permission.objects.filter(codename__in=perm_names)\n # set permission\n user.user_permissions.set(list(perms))\n group.permissions.set(list(perms))\n # add user to group\n group.user_set.add(user)", "def create(self, username, password, email):\n pass", "def test_create_delegate(self):\n self.assertEqual(\n ProjectInvite.objects.filter(project=self.project).count(), 0\n )\n\n url = reverse(\n 'projectroles:api_invite_create',\n kwargs={'project': self.project.sodar_uuid},\n )\n post_data = {\n 'email': INVITE_USER_EMAIL,\n 'role': PROJECT_ROLE_DELEGATE,\n 'message': INVITE_MESSAGE,\n }\n response = self.request_knox(url, method='POST', data=post_data)\n\n self.assertEqual(response.status_code, 201, msg=response.content)\n self.assertEqual(\n ProjectInvite.objects.filter(project=self.project).count(), 1\n )\n invite = ProjectInvite.objects.first()\n self.assertEqual(invite.role, self.role_delegate)\n self.assertEqual(len(mail.outbox), 1)", "def test_add_role_to_project_member(self):\n pass", "def create_candidate(self, data, header):\n return self.client.post(\n path='/api/v2/office/1/register/', data=json.dumps(data), content_type='application/json', headers=header)", "def testInviteCreatesUser(self):\r\n me = User()\r\n me.username = u'me'\r\n me.email = u'me.com'\r\n me.invite_ct = 2\r\n you = me.invite(u'you.com')\r\n\r\n self.assertEqual(\r\n 'you.com',\r\n you.username,\r\n 'The email should be the username')\r\n self.assertEqual(\r\n 'you.com',\r\n you.email,\r\n 'The email should be the email')\r\n self.assertTrue(\r\n len(you.api_key),\r\n 'The api key should be generated for the user')\r\n self.assertFalse(\r\n you.activated,\r\n 'The new user should not be activated')\r\n self.assertEqual(\r\n 1,\r\n me.invite_ct,\r\n 'My invite count should be deprecated')", "def create_candidate(self, username: str) -> int:", "def assign_team_member_to_customer_room(room, **room_args):\n\n logging.log(logging.INFO, 'assign_team_member_to_customer_room')\n\n # setup Spark API connection using SPARK_TOKEN\n spark_api = ciscosparkapi.CiscoSparkAPI(access_token=config.SPARK_TOKEN)\n\n person_ids_in_room = []\n logging.log(logging.INFO, room)\n\n # Who is already in the room\n for membership in spark_api.memberships.list(roomId=room.id):\n person_ids_in_room.append(membership.personId)\n\n available_people = []\n # Loop over all members of team\n for membership in spark_api.team_memberships.list(teamId=config.SPARK_AGENT_TEAM_ID):\n # skip our self\n if membership.personEmail == config.SPARK_CUSTOMER_PROXY_EMAIL:\n continue\n\n # Skip people already in room\n if membership.personId in person_ids_in_room:\n continue\n\n # Get the person object for current membership\n person = spark_api.people.get(membership.personId)\n\n # NOTE: Status is only shown to users in the same domain!\n if person.status == 'active':\n available_people.append(person)\n\n logging.log(logging.INFO, 'Available people %s' % available_people)\n # If no agents are active, we cant assign one\n if not available_people:\n return False\n\n # Get the most idle, available user\n if config.SPARK_TASK_ASSIGN_MOST_IDLE_ACTIVE:\n available_people.sort(key=lambda agent: agent.lastActivity, reverse=True)\n else:\n # Random agent\n random.shuffle(available_people)\n\n agent_to_assign = available_people.pop()\n\n logging.log(logging.INFO, 'Assiging %s' % agent_to_assign)\n # assign agent to room\n membership = spark_api.memberships.create(room.id, personId=agent_to_assign.id)\n\n if membership:\n # indicate success\n return True\n\n # in case of failure assigning agent to room\n return False", "def create_vip(self, context, vip, netinfo):\n LOG.info(_(\"Agent received create_vip\"))\n self.driver.create_vip(vip, netinfo)", "def create(self, username, password):\n pass", "def test_add_user_existing_with_role(self):\n\n project = fake_clients.FakeProject(name=\"test_project\")\n\n user = fake_clients.FakeUser(\n name=\"test@example.com\", password=\"123\", email=\"test@example.com\"\n )\n\n assignment = fake_clients.FakeRoleAssignment(\n scope={\"project\": {\"id\": project.id}},\n role_name=\"member\",\n user={\"id\": user.id},\n )\n\n setup_identity_cache(\n projects=[project], users=[user], role_assignments=[assignment]\n )\n\n url = \"/v1/actions/InviteUser\"\n headers = {\n \"project_name\": \"test_project\",\n \"project_id\": project.id,\n \"roles\": \"project_admin,member,project_mod\",\n \"username\": \"test@example.com\",\n \"user_id\": \"test_user_id\",\n \"authenticated\": True,\n }\n data = {\n \"email\": \"test@example.com\",\n \"roles\": [\"member\"],\n \"project_id\": project.id,\n }\n response = self.client.post(url, data, format=\"json\", headers=headers)\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(response.json(), {\"notes\": [\"task created\"]})\n\n tasks = Task.objects.all()\n self.assertEqual(1, len(tasks))\n self.assertTrue(tasks[0].completed)", "async def post(self):\r\n\r\n data = await self.request.json()\r\n agent_uuid = data.get(\"agent_uuid\")\r\n agent = Agent.get(Agent.uuid == agent_uuid)\r\n if not agent:\r\n response_obj = {\"status\": \"failed\", \"reason\": \"agent not present\"}\r\n logger.info(\"agent not present\")\r\n return web.Response(text=str(response_obj), status=404)\r\n try:\r\n System.create(agent_uuid=agent)\r\n logger.info(\"System created successfully!!!\")\r\n return web.Response(text=\"Successful\", status=201)\r\n except Exception as ex:\r\n response_obj = {\"status\": \"failed\", \"reason\": \"agent not added\"}\r\n error_message = str(ex)\r\n logger.error(error_message)\r\n return web.Response(text=str(response_obj), status=500)", "async def createrole(self, ctx, role: str):\n if role.lower() == \"muted\" or role.lower() == \"punished\":\n return await ctx.send(\"Can not create this roles.\")\n \"\"\"Create a new role\"\"\"\n role = await ctx.guild.create_role(name=role)\n return await ctx.send(\n embed=discord.Embed(\n title=f\"Role *{role}* has been created!\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.green(),\n )\n )", "def create(self):\n logging.warning(\n \"IRC back-end does not support explicit creation, joining room \"\n \"instead to ensure it exists.\"\n )\n self.join()", "def add_person(room_id, person=None, isModerator='false'):\n\n url = 'https://api.ciscospark.com/v1/memberships'\n headers = {'Authorization': 'Bearer '+context.get('spark.CISCO_SPARK_PLUMBERY_BOT')}\n payload = {'roomId': room_id,\n 'personEmail': person,\n 'isModerator': isModerator }\n response = requests.post(url=url, headers=headers, data=payload)\n\n if response.status_code != 200:\n print(response.json())\n raise Exception(\"Received error code {}\".format(response.status_code))", "def createRole():\n if hasRole(): return False\n conn = iamConn()\n role = getArgs().role_name\n conn.create_role(role, assume_role_policy.strip().format(accountId()))\n conn.put_role_policy(role, 'Admin', admin_policy.strip())\n print(\"Role created:\", role)\n return True", "def create_member(username, password, name, program, email, club_rep=False):\n\n # check username format\n if not username or not re.match(cfg['username_regex'], username):\n raise InvalidArgument(\"username\", username, \"expected format %s\" % repr(cfg['username_regex']))\n\n # check password length\n if not password or len(password) < cfg['min_password_length']:\n raise InvalidArgument(\"password\", \"<hidden>\", \"too short (minimum %d characters)\" % cfg['min_password_length'])\n\n try:\n request = ceo_pb2.AddUser()\n request.username = username\n request.password = password\n request.realname = name\n request.program = program\n request.email = email\n\n if club_rep:\n request.type = ceo_pb2.AddUser.CLUB_REP\n else:\n request.type = ceo_pb2.AddUser.MEMBER\n\n out = remote.run_remote('adduser', request.SerializeToString())\n\n response = ceo_pb2.AddUserResponse()\n response.ParseFromString(out)\n\n if any(message.status != 0 for message in response.messages):\n raise MemberException('\\n'.join(message.message for message in response.messages))\n\n except remote.RemoteException, e:\n raise MemberException(e)\n except OSError, e:\n raise MemberException(e)", "async def create_new_role(request):\n required_fields = [\"name\", \"administrators\", \"owners\"]\n utils.validate_fields(required_fields, request.json)\n\n conn = await create_connection()\n response = await roles_query.roles_search_duplicate(conn, request.json.get(\"name\"))\n if not response:\n txn_key, txn_user_id = await utils.get_transactor_key(request)\n role_id = str(uuid4())\n batch_list = Role().batch_list(\n signer_keypair=txn_key,\n signer_user_id=txn_user_id,\n name=request.json.get(\"name\"),\n role_id=role_id,\n metadata=request.json.get(\"metadata\"),\n admins=request.json.get(\"administrators\"),\n owners=request.json.get(\"owners\"),\n description=request.json.get(\"description\"),\n )\n await utils.send(\n request.app.config.VAL_CONN, batch_list, request.app.config.TIMEOUT\n )\n return create_role_response(request, role_id)\n raise ApiBadRequest(\n \"Error: could not create this role because role name has been taken or already exists\"\n )", "def create_team_action(request):\n # Create the team.\n now = datetime.utcnow()\n user_id = request.context.user_id\n user = load_user(request.db, user_id)\n # Select a round based on the user's badges.\n round_ids = find_round_ids_with_badges(request.db, user['badges'], now)\n if len(round_ids) == 0:\n # The user does not have access to any open round.\n raise ApiError('not qualified for any open round')\n if len(round_ids) > 1:\n # XXX The case where a user has badges for multiple open rounds\n # is currently handled by picking the first one, which is the\n # one that has the greatest id. This is unsatisfactory.\n pass\n round_id = round_ids[0]\n round_ = load_round(request.db, round_id, now)\n if not round_['is_registration_open']:\n raise ApiError('registration is closed')\n # Create the team.\n team_id = create_user_team(request.db, user_id, now)\n # Create a participation.\n create_participation(request.db, team_id, round_id, now=now)\n # Ensure the user gets team credentials.\n reset_user_principals(request)\n return {'success': True}", "def test_create_delegate_limit(self):\n del_user = self.make_user('delegate')\n self.make_assignment(self.project, del_user, self.role_delegate)\n self.assertEqual(\n ProjectInvite.objects.filter(project=self.project).count(), 0\n )\n\n url = reverse(\n 'projectroles:api_invite_create',\n kwargs={'project': self.project.sodar_uuid},\n )\n post_data = {\n 'email': INVITE_USER_EMAIL,\n 'role': PROJECT_ROLE_DELEGATE,\n 'message': INVITE_MESSAGE,\n }\n response = self.request_knox(url, method='POST', data=post_data)\n\n self.assertEqual(response.status_code, 400, msg=response.content)\n self.assertEqual(\n ProjectInvite.objects.filter(project=self.project).count(), 0\n )\n self.assertEqual(len(mail.outbox), 0)", "def create_agents(agents, start_params=None, **kwargs):\n if isinstance(agents, str):\n if (start_params is not None) and (\"save_history\" in start_params):\n save_history = start_params[\"save_history\"]\n else:\n save_history = False\n return Agent(strategy=agents, save_history=save_history, **kwargs)\n return AgentGroup(agents=agents, start_params=start_params)", "def test_create_actor(self):\n # create a new actor with proper credentials\n res = self.client().post('/actors', headers={\n 'Authorization': \"Bearer {}\".format(self.casting_director_token)\n }, json=self.VALID_NEW_ACTOR)\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 201)\n self.assertTrue(data[\"success\"])\n self.assertIn('created', data)", "def create_instant_invite(_) -> int:\n return 1 << 0", "def create_instant_invite(_) -> int:\n return 1 << 0", "def create(self):\n\n pass", "def form_agents(n, r, a, b, agents):\n for a_ind, b_ind in izip(a, b):\n #util_fn = random.choice([util.Linear, util.CES, util.CobbDouglas])\n #util_fn = util_fn.rand(p, n)\n #util_fn = util.Linear.rand(n, r, a_ind)\n\n agent = Agent.rand(util.Linear, n, r, a_ind, b_ind)\n agents.add(agent)", "def createGroup(listOfPerson):\n atk=Department()\n atk.members=listOfPerson\n return atk", "def register_message():\n\n logger.info('Nos registramos')\n\n gr = register_agent(AgenteAlojamientosExternoAmadeus, DirectoryAgent, AgenteAlojamientosExternoAmadeus.uri, get_count())\n return gr", "def spawn_agents(self, num_agents):\n for x in range(num_agents):\n self.agents.append(P.Person(self, personality = Personality.Personality,\n friends_affinity = self.friends_affinity, enemies_affinity = self.enemies_affinity))\n # Determine if this agent will be online at the start of the simulation\n if random.random() < self.probability_initially_online:\n self.agents[x].online = True\n self.online_agents.append(self.agents[x])\n\n # connect some users to internet and seed some friends just by random for now\n for agent in self.online_agents:\n self.initial_connect_friend(agent)", "def level1AI(self, values):\n AI_server = AgentServer.get()\n values['e']['agent'] = AI_server.newAgent(2)\n #values['r']['agent'] = AI_server.newAgent(2)\n values['r']['agent'] = AI_server.newFakeAgent()\n values['j']['agent'] = AI_server.newFakeAgent()", "def test_create_role_existing(self):\n self.assertEqual(\n RoleAssignment.objects.filter(project=self.project).count(), 1\n )\n\n url = reverse(\n 'projectroles:api_role_create',\n kwargs={'project': self.project.sodar_uuid},\n )\n post_data = {\n 'role': PROJECT_ROLE_CONTRIBUTOR,\n 'user': str(self.assign_user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n\n self.assertEqual(response.status_code, 201, msg=response.content)\n self.assertEqual(\n RoleAssignment.objects.filter(project=self.project).count(), 2\n )\n post_data = {\n 'role': PROJECT_ROLE_GUEST,\n 'user': str(self.assign_user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n\n self.assertEqual(response.status_code, 400, msg=response.content)\n self.assertEqual(\n RoleAssignment.objects.filter(project=self.project).count(), 2\n )", "def create():\n pass", "def create_manager(self, username, tenancy):\n raise NotImplementedError", "def add_member(self, request, **kwargs):\n valid_user = Member.objects.filter(group=self.get_object(), user=self.request.user).values('role_type').first()\n if valid_user['role_type'] == 'member':\n return Response({'message': 'You have no right to perform this action'}, status=status.HTTP_403_FORBIDDEN)\n if request.data.get('phone') is None:\n return Response({'message': 'Phone number not provided'}, status=status.HTTP_400_BAD_REQUEST)\n if request.data.get('role') is None:\n return Response({'message': 'Role is required'}, status=status.HTTP_400_BAD_REQUEST)\n if request.data.get('display_name') is None:\n return Response({'message': 'Name is required'}, status=status.HTTP_400_BAD_REQUEST)\n req_user = request.data.get('phone')\n user_data = User.objects.get(phone=req_user)\n if user_data is None:\n return Response({'message': 'User with this number is not registered'}, status=status.HTTP_404_NOT_FOUND)\n group = self.get_object()\n if group.members.filter(user=user_data).count() != 0:\n return Response({'message': 'User is already member of this group'}, status=status.HTTP_400_BAD_REQUEST)\n member_role = request.data.get('role')\n new_member_data = Member.objects.create(group=group, user=user_data,role_type=member_role, display_name=request.data.get('display_name'))\n new_member_data.save()\n serializer_data = MemberSerializer(new_member_data)\n return Response(serializer_data.data)", "def add_member(data):\n print(\"Adding: %s \" % data)\n conn = create_connection(db_location)\n sql = \"INSERT INTO members(member_uid, name, email, badge_id, new) VALUES({}, \\\"{}\\\", \\\"{}\\\", \\\"{}\\\", \\\"{}\\\");\".format(\n data['id'], data['forename'] + \" \" + data['surname'], data['email'], data['badge_id'], data['new'])\n execute_sql(conn, sql)\n return", "def test_create_owner(self):\n url = reverse(\n 'projectroles:api_role_create',\n kwargs={'project': self.project.sodar_uuid},\n )\n post_data = {\n 'role': PROJECT_ROLE_OWNER,\n 'user': str(self.assign_user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 400, msg=response.content)", "def test_add_member_to_group(client):\n group = client.add_members_to_group(TEAM_ID, GROUP_ID, 35555)\n assert group.team_id == TEAM_ID\n assert group.group_id == GROUP_ID\n assert 35555 in group.members", "def create_agentless(host, client, parsed_args):\n info = {\"type\": \"CbwRam::RemoteAccess::Ssh::WithPassword\",\n \"address\": host.target,\n \"port\": \"22\",\n \"node_id\": host.node_id,\n \"login\": SSH_LOGIN,\n \"password\": SSH_PASSWORD\n }\n if parsed_args.i is True:\n create = client.create_remote_access(info)\n if create is False:\n ERRORS.append(host)\n else:\n NEW.append(create)\n else:\n NEW.append(host.target)", "def __init__(__self__,\n resource_name: str,\n args: MembershipArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def create_api_agent(baseurl, userid):\n c = act.Act(baseurl, userid, log_level=\"info\")\n return c" ]
[ "0.6627206", "0.63848805", "0.6290456", "0.6268151", "0.62504977", "0.624233", "0.6192578", "0.59588885", "0.5937359", "0.59215134", "0.589374", "0.5893678", "0.58593506", "0.58082986", "0.58055055", "0.5803748", "0.580091", "0.5796165", "0.5793468", "0.57622015", "0.57265323", "0.5709941", "0.56969666", "0.56872076", "0.5662437", "0.56577617", "0.5644747", "0.563892", "0.5636193", "0.562371", "0.55962926", "0.55842656", "0.5579983", "0.55791533", "0.55791533", "0.5576866", "0.5556784", "0.5549237", "0.55489457", "0.5545106", "0.55446106", "0.55237854", "0.55221516", "0.5520461", "0.55178386", "0.55056965", "0.5500702", "0.54902285", "0.5488228", "0.548657", "0.54661506", "0.54650617", "0.54505646", "0.54331166", "0.54312724", "0.54296076", "0.54244715", "0.5407203", "0.53955865", "0.539427", "0.5392754", "0.53910786", "0.53894", "0.5387197", "0.53855556", "0.5384944", "0.5384181", "0.53798944", "0.5377667", "0.5375356", "0.5374786", "0.5373485", "0.5371409", "0.53512025", "0.5349103", "0.5325379", "0.53204256", "0.5292302", "0.5290944", "0.5290693", "0.52882", "0.5285255", "0.5280813", "0.5280813", "0.5275395", "0.52721995", "0.5271657", "0.52705616", "0.5264138", "0.5262748", "0.5262359", "0.52616155", "0.52567005", "0.5256674", "0.5253172", "0.52486604", "0.5248133", "0.5239061", "0.5237492", "0.523544" ]
0.7037421
0
Test load class works correctly and raises right exceptions.
def test_load_class(): full_classname = 'collections.namedtuple' cls_ = load_class(full_classname) assert cls_ is collections.namedtuple with pytest.raises(ValueError): full_classname = 'collections.Foobar' load_class(full_classname) with pytest.raises(ImportError): full_classname = 'barfoo.Foobar' load_class(full_classname)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_load_testcase(self):\n tests = self.loader.load(\"tests.sampletest.hellotest.HelloTest\")\n self.assertEqual(len(tests), 1)\n from tests.sampletest.hellotest import HelloTest\n\n self.assertEqual(type(tests[0]), HelloTest)", "def test_load_testcase_in_module(self):\n tests = self.loader.load(\"tests.sampletest.InitTest\")\n self.assertEqual(len(tests), 1)\n from tests.sampletest import InitTest\n\n self.assertEqual(type(tests[0]), InitTest)", "def test_load_model_method_with_wrong_class_path(self):\n # arrange\n # instantiating the model manager class\n model_manager = ModelManager()\n\n # act\n # adding the model\n exception_raised = False\n exception_message = None\n # accessing the MLModelMock model object\n try:\n model_manager.load_model(\"sdf.sdf.sdf\")\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n\n # assert\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message == \"No module named 'sdf'\")", "def test_class_errored(self, cls, exception):", "def _loadClass(self, loader):\r\n raise NotImplementedError(\"The method 'loadClass' has to \"\r\n 'be implemented.')", "def test_class_started(self, cls):", "def test_loader(cls):\r\n return _test_loader_factory(cls)", "def test_initialization(self):\n test_node = class_dependency.JavaClass(self.TEST_PKG, self.TEST_CLS)\n self.assertEqual(test_node.name, f'{self.TEST_PKG}.{self.TEST_CLS}')\n self.assertEqual(test_node.package, self.TEST_PKG)\n self.assertEqual(test_node.class_name, self.TEST_CLS)", "def _load(self):\n raise NotImplementedError()", "def test_import_string_missing_class_or_attribute(self):\n valid_module = 'ttgn.pokedex'\n invalid_class = 'NonexistentClass'\n with pytest.raises(ImportError) as error:\n utils.import_string('{}.{}'.format(valid_module, invalid_class))\n assert 'Module {} has no class or attribute {}'.format(\n valid_module, invalid_class) == str(error.value)", "def test_custom_class_fail_import(self):\n conf = Configuration(Path(self.conf_dir, \"custom_class_doesnt_exists.yaml\"))\n self.test_survey = Survey.objects.get(name=\"Test survëy\")\n fail_import = str(Survey2Tex(self.test_survey, conf))\n should_contain = [\n \"could not render\",\n \"not a standard type\",\n \"importable valid Question2Tex child class\",\n \"'raw'\",\n \"'sankey'\",\n \"'pie'\",\n \"'cloud'\",\n \"'square'\",\n \"'polar'\",\n ]\n for text in should_contain:\n self.assertIn(text, fail_import)", "def test_load_model_method(self):\n # arrange\n # instantiating the model manager class\n model_manager = ModelManager()\n\n # adding the model\n model_manager.load_model(\"tests.mocks.MLModelMock\")\n\n # act\n exception_raised = False\n model_object = None\n # accessing the MLModelMock model object\n try:\n model_object = model_manager.get_model(qualified_name=\"qualified_name\")\n except Exception as e:\n exception_raised = True\n print_tb(e)\n\n # assert\n self.assertFalse(exception_raised)\n self.assertTrue(model_object is not None)", "def test_lint(self):\n l = self.l\n l.loadTestsFromTestCase\n l.loadTestsFromModule\n l.loadTestsFromName\n l.loadTestsFromNames", "def test_load(self):\n command = constituencies.Command()\n command.handle('load', silent=True)", "def test_load_fail():\n parameters = {'path': 'foo.bar'}\n\n images.load(parameters)", "def setUpClass(cls):\n # check for python3\n cls.assertGreaterEqual(cls, sys.version_info[0], 3)\n # This will import the module to be tested\n cls.module = importlib.import_module(PKG_NAME)", "def load(self):\n\n raise NotImplementedError", "def test_load(self):\n (spec, check) = bundylogging.load()\n # It returns the checking function\n self.assertEqual(check, bundylogging.check)\n # The plugin stores it's spec\n self.assertEqual(spec, bundylogging.spec)", "def test_constructor(self):\n pass", "def test_instantiates_badgr_lite_class(self):\n badgr = self.get_badgr_setup()\n self.assertIsInstance(badgr, BadgrLite)", "def setUpClass(self):\n self.repo = Repository(\"https://github.com/qcoumes/gitload_test.git\")\n self.repo.get_repo()\n self.loader = self.repo.load_pltp(\"/PLTP/test.pltp\")", "def setUpClass(cls):\n cls.assertGreaterEqual(cls, sys.version_info[0], 3)\n cls.module = importlib.import_module(PKG_NAME)", "def test_bad_class(self):\n\n mock_entry_badclass = mock.create_autospec(EntryPoint)\n mock_entry_badclass.name = \"BadClass\"\n mock_entry_badclass.load = self.returnbadclass\n\n with pytest.warns(AstropyUserWarning, match=r\".*BadClass.*\"):\n populate_entry_points([mock_entry_badclass])", "def __init__(self):\n if DynamicImporter._instance is not None:\n raise Exception(\"DynamicImporter instance already exists!\")\n DynamicImporter._instance = self\n\n current_path = Path(__file__).parent\n test_path = current_path / \"testdata\"\n files = test_path.rglob(\"*.py\")\n\n for file in files:\n\n if file.name in [\"__init__.py\", \"test_module.py\", \"test_registry.py\", \"connections.py\"]:\n continue\n\n name = file.stem\n module = import_module(f\"testdata.{name}\")\n class_title = f\"{name.title()}Test\"\n\n try:\n _class = getattr(module, class_title) # get the class\n self.class_list[class_title] = _class # add the class to the class list\n except AttributeError: # don't throw exceptions for files that don't have a test\n continue", "def test_loads(self, game=\"SuperMarioKart-Snes\"):\n with self.assertRaises(NameError):\n retro.make(game=game)", "def load(self):\n pass", "def load(self):\n pass", "def load(self):\n pass", "def load(self):\n pass", "def test_can_instantiate(self):\n\n exc_thrown = False\n\n try:\n self.klass(*self.instantiate_args)\n except Exception:\n exc_thrown = True\n\n self.assertFalse(exc_thrown)", "def __init__(self, module_name,class_name):\n\n try:\n self.module = importlib.import_module(module_name)\n self.get_class_object = getattr(self.module,class_name)\n \n except:\n print(\"Failed to import the module {} from {}\".format(class_name,module_name))", "def __init__(self):\n self.load()", "def test_load_experiment(self):\n exp = Experiment(self.epath,\n normalization='ch0',\n auto_alignment=False)\n self.assertTrue(isinstance(exp, Experiment))", "def test_instantiate_non_existent_class(self):\n # create test configs\n test_configs = [\n {\"_target_\": \"collections.NonExistentClass\"},\n {\"_target_\": \"collections.OtherNonExistentClass\", \"a\": 1, \"b\": 2}\n ]\n\n # check that instantiate raises AttributeError for each test config\n for test_conf in test_configs:\n self.assertRaises(AttributeError, instantiate, test_conf)", "def setUpClass(cls):\n cls.run_mgr = runner(['start', 'execute'], ['stop'])\n cls.load_mgr = loader(verbose=False, recursive=True)\n cls.load_mgr.set_addon_dirs(['./data'])\n cls.load_mgr.load_addons()\n cls.cli_inst = cls.load_mgr.get_instance('CommandLineAddon')\n cls.fileio_inst = cls.load_mgr.get_instance('FileIOAddon')", "def test_import_fails_with_no_modules(self):\n with self.assertRaises(ValueError):\n LazyImportTester([])", "def test_motorcycle(self):\n try:\n self.test = oop1.Motorcycle()\n self.assertIsInstance(self.test, oop1.Motorcycle)\n print(\"\\nPASS : Class Exists\\n\")\n except NameError as e:\n print(e)", "def test_load_invalid_file(self):\n with self.assertRaises(Exception):\n track = Track.from_filename(__file__)", "def setup_class(cls):\n # ns.assert_true(False, \"setup_class run\")\n print('setup_class\\n')", "def setUpClass(self):\n super(TestExpedition, self).setUpClass()", "def test_compute_glycemic_load(self):\n pass", "def load(self):", "def test_required_methods(self):\n\n required_methods = ('__init__', 'load')\n\n for method in required_methods:\n self.assertIn(method, dir(DatasetLoader_Jakob2019))", "def test_class_dependencies(self):\n dependencies = get_class_dependencies(ParentBlock)\n self.assertIsNotNone(dependencies)\n self.assertIn(\"nio\", dependencies)\n self.assertEqual(dependencies[\"nio\"][1], \"2.0.1\")", "def test_twice_dependent_object_import(self):\n pass", "def setup_class(klass):", "def setup_class(klass):", "def test_load_pltp(self):\n self.assertIsInstance(self.repo.load_pltp(\"/PLTP/test.pltp\"), PLTP_Loader)", "def test_module(self):\n pass", "def setUpClass(self):", "def test_load_model(base_bertopic):\n base_bertopic.save(\"test\")\n loaded_bertopic = BERTopic.load(\"test\")\n assert type(base_bertopic) == type(loaded_bertopic)", "def load(self):\n self._really_load()", "def force_load(self):\n pass", "def test_loaders():\n\n tempdir = tempfile.mkdtemp()\n\n loader = \"\"\"\nfrom mindbender import api\n\nclass DemoLoader(api.Loader):\n def process(self, asset, subset, version, representation):\n pass\n\n\"\"\"\n\n with open(os.path.join(tempdir, \"my_loader.py\"), \"w\") as f:\n f.write(loader)\n\n try:\n pipeline.register_loaders_path(tempdir)\n loaders = pipeline.discover_loaders()\n\n assert \"DemoLoader\" in list(\n L.__name__ for L in loaders\n ), \"Loader not found in %s\" % \", \".join(\n l.__name__ for l in loaders)\n\n finally:\n shutil.rmtree(tempdir)", "def test_CL04_import(): \n\tdef test(): \n\t\ttry: \n\t\t\tfrom .. import CL04 \n\t\texcept: \n\t\t\treturn False \n\t\treturn True \n\treturn [\"vice.yields.ccsne.CL04\", test]", "def test_load_file(self):\n self.assertTrue(os.path.exists(MEDIA_ROOT+\"/pl_test1_\"+self.loader.version))\n self.assertTrue(os.path.exists(MEDIA_ROOT+\"/pl_test2_\"+self.loader.version))", "def test_constructor_missing_config():\n with pytest.raises(TypeError):\n Unpacker()", "def test_ground_vehicle(self):\n try:\n self.test = oop1.GroundVehicle()\n self.assertIsInstance(self.test, oop1.GroundVehicle)\n print(\"\\nPASS : GroundVehicle Class Exists\\n\")\n except NameError as e:\n print(e)", "def load(self):\n return True", "def setUpClass(cls):\n super(BaseTest, cls).setUpClass()\n try:\n cls.configure()\n cls.execute()\n except:\n cls.annihilate()\n raise", "def testImport(self):\n success = False\n try:\n from cutlass import DiseaseMeta\n success = True\n except:\n pass\n\n self.failUnless(success)\n self.failIf(DiseaseMeta is None)", "def do_load(self, name):\n try:\n self.runner.run()\n\n except():\n print('Loading failed')", "def load(self, *args, **kwargs):\n pass", "def test_exception(self):\n self.assertRaises(TypeError, lambda: self.init_model())", "def __init__(self):\n ScriptedLoadableModuleLogic.__init__(self)", "def test_entities__Entity__getClass__2():\n e = Entity(None, IDummy, None)\n with pytest.raises(ValueError):\n e.getClass()", "def test_exceptions_init_valid():\n exceptions = Exceptions(os.path.join(os.path.dirname(__file__),\n 'valid_exceptions.yaml'))\n assert exceptions.exceptions", "def load(path):\n pass", "def setUpClass(cls):\r\n pass", "def test_instantiate_non_existent_module(self):\n # create test configs\n test_configs = [\n {\"_target_\": \"non_existent_module.some_class\"},\n {\"_target_\": \"another_non_existent_module.some_class\", \"a\": 1, \"b\": 2}\n ]\n\n # check that instantiate raises ModuleNotFoundError for each test config\n for test_conf in test_configs:\n self.assertRaises(ModuleNotFoundError, instantiate, test_conf)", "def test_modules(self):\n for mod in self.expected_modules:\n try:\n __import__(mod)\n except ImportError:\n raise", "def test_pickle_file_not_found(self):\n self.assertRaises(IOError, self.plugin.load_data)", "def test_load_non_existant_protocol():\n Protocol.load(path(__file__).parent /\n path('protocols') /\n path('no protocol'))", "def test_not_loaded(person):\n with pytest.raises(KeyError):\n person.load(-1)\n\n assert person.loaded is False", "def test_car(self):\n try:\n self.test = oop1.Car()\n self.assertIsInstance(self.test, oop1.Car)\n print(\"\\nPASS : Car Class Exists\\n\")\n except NameError as e:\n print(e)", "def test_manual_load_lecture(lecture_class, course, valid_datetime):\n id = lecture_class.create_lecture(course, valid_datetime)\n assert id != None\n assert lecture_class.load_lecture(id)\n assert lecture_class.course == course\n assert lecture_class.time == valid_datetime", "def test_LC18_import(): \n\tdef test(): \n\t\ttry: \n\t\t\tfrom .. import LC18 \n\t\texcept: \n\t\t\treturn False \n\t\treturn True \n\treturn [\"vice.yields.ccsne.LC18\", test]", "def test_flight_vehicle(self):\n try:\n self.test = oop1.FlightVehicle()\n self.assertIsInstance(self.test, oop1.FlightVehicle)\n print(\"\\nPASS : FlightVehicle Class Exists\\n\")\n except NameError as e:\n print(e)", "def test_init_client(self):\n # TODO: dynamically importing dependancies from the file tested\n self.assertIn(\n \"describe_trusted_advisor_check_result\", dir(self.subclass.client)\n )", "def test_load_object_from_string():\n tests = (\n (\"string.Template\", string.Template),\n (\"os.path.basename\", os.path.basename),\n (\"string.ascii_letters\", string.ascii_letters)\n )\n for test in tests:\n assert load_object_from_string(test[0]) is test[1]", "def load(self):\n raise NotImplementedError()", "def load(self):\n raise NotImplementedError()", "def test_loading_document(self):", "def __init_on_load__(self):", "def __init__(self, name, dir='.'):\n try:\n full_name = os.path.join(dir, name + '.task1')\n self.load(full_name)\n except Exception as e:\n print('Failed to load \"{}\"'.format(full_name))\n print(e)", "def _load(self, directory):\n pass", "def test_airplane(self):\n try:\n self.test = oop1.Airplane()\n self.assertIsInstance(self.test, oop1.Airplane)\n print(\"\\nPASS : Airplane Class Exists\\n\")\n except NameError as e:\n print(e)", "def load_module(cls, *args, **kwargs): # real signature unknown\n pass", "def load_module(cls, *args, **kwargs): # real signature unknown\n pass", "def load_module(cls, *args, **kwargs): # real signature unknown\n pass", "def test_load_context_raise_typeerror(self, mock_yaml, _):\n mock_yaml.side_effect = TypeError()\n with patch(\"builtins.open\", mock_open(read_data=\"\")):\n with self.assertRaises(TypeError) as exception:\n ctx_obj = CtxCore()\n self.assertEqual(exception.exception, TypeError())", "def setUpClass(cls):\n pass", "def setUpClass(cls):\n pass", "def setUpClass(cls):\n pass", "def setUpClass(cls):\n pass", "def test_loader_loads_from_file():\n base_json = 'tests/test_json.json'\n json_test = {\"foo\": \"bar\"}\n assert whenzat.loader(base_json) == json_test", "def setUpClass(cls):\n cls.inst = storage", "def test_handling_wrong_locate_module_implementation(method):\n loader = WrongEnamlImporter()\n with pytest.raises(ImportError):\n getattr(loader, method)('module_name')", "def load(self):\n raise NotImplementedError", "def load(self):\n raise NotImplementedError" ]
[ "0.7552986", "0.7182643", "0.70636034", "0.686629", "0.6754328", "0.67336494", "0.6622316", "0.6577228", "0.65132713", "0.6475406", "0.6446178", "0.6431292", "0.6425918", "0.63992125", "0.6388809", "0.6313314", "0.6301513", "0.62873274", "0.6244719", "0.62361884", "0.6233843", "0.62248665", "0.62203985", "0.6209335", "0.62069184", "0.61509365", "0.61509365", "0.61509365", "0.61509365", "0.61487836", "0.6135182", "0.6132403", "0.61129355", "0.61085624", "0.6099537", "0.6093693", "0.6081687", "0.60717964", "0.6064302", "0.60622615", "0.6058457", "0.6051133", "0.6050739", "0.60495484", "0.60254574", "0.6023711", "0.6023711", "0.6011911", "0.59993434", "0.59969205", "0.5995415", "0.5990845", "0.5990416", "0.5985541", "0.59809303", "0.5956248", "0.5949697", "0.5946842", "0.5934161", "0.5932122", "0.5926815", "0.59230775", "0.5921033", "0.59163254", "0.5915269", "0.59116924", "0.59111786", "0.5907537", "0.5899964", "0.589704", "0.5895121", "0.58935416", "0.58919907", "0.58900636", "0.5886782", "0.58833706", "0.5882376", "0.5875959", "0.5875154", "0.58665735", "0.5864206", "0.5864206", "0.5862036", "0.58541036", "0.5852985", "0.58492786", "0.58482546", "0.5834873", "0.5834873", "0.5834873", "0.583459", "0.5829363", "0.5829363", "0.5829363", "0.5829363", "0.58287907", "0.5825235", "0.58210534", "0.5819565", "0.5819565" ]
0.72938436
1
irq_handler contains the code you want to execute when the interrupt occurs. Define your own callback function here by rewriting the code. We make an LED flash in this example.
def irq_handler(): # open an LED session with LEDs() as LED: # specify the LED which you want to control led = Led.LED1 # specify the LED status led_on_off = True # writes values 10 times, which makes LED1 flash for 3 seconds for x in range(0, 10): # turn LED0 on or off LED.write(led, led_on_off) # add a short delay time.sleep(0.3) # if the LED is on, set the parameter to off # if the LED is off, set the parameter to on led_on_off = not led_on_off
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def irq(self, handler: Callable, trigger: int, hard: bool = False) -> Callable:", "def enable_irq(state:int):", "def extirq_cbf(task):\n try:\n if not execute_LM_function_Core(task.split(' ')):\n console_write(\"[IRQ] EXTIRQ execute_LM_function_Core error: {}\".format(task))\n except Exception as e:\n console_write(\"[IRQ] EVENTIRQ callback: {} error: {}\".format(task, e))", "def disable_irq() -> int:", "def enable_irq(state: bool = True, /) -> None:", "def enableInterrupt():\n console_write(\"[IRQ] TIMIRQ SETUP: {} SEQ: {}\".format(cfgget(\"timirq\"), cfgget(\"timirqseq\")))\n console_write(\"|- [IRQ] TIMIRQ CBF:{}\".format(cfgget('timirqcbf')))\n if cfgget(\"timirq\") and cfgget('timirqcbf').lower() != 'n/a':\n from machine import Timer\n # INIT TIMER IRQ with callback function wrapper\n timer = Timer(0)\n timer.init(period=int(cfgget(\"timirqseq\")), mode=Timer.PERIODIC,\n callback=lambda timer: timirq_cbfs(cfgget('timirqcbf')))", "def set_on_interrupt_callback(self, callback):\n self.__interrupt_callback = callback", "def interrupt_kernel(self, kernel_id):", "def interrupt_kernel(self):", "def test_interrupt(self):\n with patch('RPi.GPIO.setmode') as mock_setmode:\n gpio = GPIODevice()\n with patch('RPi.GPIO.setup') as mock_setup:\n with patch('RPi.GPIO.add_event_detection') as mock_detection:\n with patch('RPi.GPIO.add_event_callback') as mock_callback:\n gpio.interrupt(self._callback, 0)\n with patch('RPi.GPIO.cleanup') as mock_cleanup:\n gpio.close()\n mock_detection.called_once_with(0, GPIO.BOTH)\n mock_callback.called_once_with(0, self._callback)", "def stopCallback (self):\n GPIO.remove_event_detect (self.IRQ_PIN)\n self.hasCallback = False", "def handle(req):\n\n gpio.output(26, gpio.HIGH)\n time.sleep(0.2)\n gpio.output(26, gpio.LOW)\n\n return req", "def imu_fth_isr(gpio, level, tick):\n isr_time = time.time()\n\n # Sometimes INT1 can trigger again as the FIFO is being read and filled\n # back up at the same time. If the time since the last tick is less than\n # 0.1s then exit the ISR.\n global last_tick\n MIN_TICK_DIFF_US = 10**5 \n tick_diff = pigpio.tickDiff(last_tick, tick)\n print(f\"Time since last tick {tick_diff / 10**6} seconds\")\n if tick_diff < MIN_TICK_DIFF_US:\n return\n\n global fifo_start\n print(f\"Interrupt at {isr_time}\")\n print(f\"FIFO fill time: {isr_time - fifo_start:4.03f} seconds\")\n fifo_start = isr_time\n\n # Read FIFO status\n status1 = imu._fifo_status1\n status2 = imu._fifo_status2\n status3 = imu._fifo_status3\n status4 = imu._fifo_status4\n\n # Number of unread words (16 bits) \n unread_words = ((status2 & 0x0F) << 8) + status1\n print(f\"Words in FIFO: {unread_words}\")\n\n # Pattern index\n # In our case, the accelerometer and gyroscope data rates are equal, so the\n # pattern is in [0:5] where\n # 0 -> Gx\n # 1 -> Gy\n # 2 -> Gz\n # 3 -> Ax\n # 4 -> Ay\n # 5 -> Az\n pattern_index = (status4 << 8) + status3\n print(f\"Index of next reading: {pattern_index}\")\n\n # Read in multiples of 6, the number of readings from Gx to Az\n BYTES_PER_WORD = 2\n WORDS_PER_PATTERN = 6\n words_to_read = unread_words // WORDS_PER_PATTERN * WORDS_PER_PATTERN\n buffer_size = words_to_read * BYTES_PER_WORD\n buffer = bytearray(buffer_size)\n FIFO_DATA_OUT_L = bytearray(b'\\x3E')\n\n # Read FIFO data into buffer\n start_time = time.time()\n imu.i2c_device.write_then_readinto(FIFO_DATA_OUT_L, buffer)\n end_time = time.time()\n total_read_time = end_time - start_time\n print(f\"{buffer_size} bytes read in {total_read_time:.6f} seconds. {buffer_size/total_read_time:.0f} bytes/s\")\n\n # Read FIFO status\n status1 = imu._fifo_status1\n status2 = imu._fifo_status2\n status3 = imu._fifo_status3\n status4 = imu._fifo_status4\n unread_words = ((status2 & 0x0F) << 8) + status1\n print(f\"Words in FIFO: {unread_words}\")\n pattern_index = (status4 << 8) + status3\n print(f\"Index of next reading: {pattern_index}\")\n\n last_tick = tick\n\n # Print data\n PREVIEW_BYTES = 12\n print(f\"buffer = {buffer[:PREVIEW_BYTES].hex()} ... {buffer[-PREVIEW_BYTES:].hex()} | Len: {len(buffer)}\")\n data = [parse_fifo_data(buffer[i:i+2]) for i in range(0, len(buffer), 2)]\n print(f\"data = [{', '.join(map(str, data[:PREVIEW_BYTES]))}, ..., {', '.join(map(str, data[-PREVIEW_BYTES:]))}] | Len: {len(data)}\")\n\n print()", "def signal_handler(sig, frame):\r\n print('You pressed Control+C')\r\n led.off()\r\n sys.exit(0)", "def add_button_callback(self, button, function, event=BUTTON_DOWN, threaded=True):\n\t\tif event == LCD.BUTTON_DOWN:\n\t\t\tedge = 'falling'\n\t\telif event == LCD.BUTTON_UP:\n\t\t\tedge = 'rising'\n\t\telif event == LCD.BUTTON_EITHER:\n\t\t\tedge = 'both'\n\t\tRPIO.add_interrupt_callback(button, function, edge, RPIO.PUD_UP, threaded, 20)", "def startCallback (self):\n if self.hasCallback:\n return\n # set up IRQ interrupt function. GPIO.setmode should alreay have been called\n GPIO.setup(self.IRQ_PIN, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n GPIO.add_event_detect (self.IRQ_PIN, GPIO.FALLING)\n GPIO.add_event_callback (self.IRQ_PIN, AHF_LickDetectorCallback) \n self.hasCallack = True\n # state of touches from one invocation to next, used in callback to separate touches from untouches\n self.prevTouches = self.mpr121.touched()", "def dummy_callback_handler(self, ret):\n pass", "def __init__(self, pin_num, button_func):\n self.pin = Pin(pin_num, Pin.IN)\n print(pin_num, button_func)\n self.pin.irq(trigger=Pin.IRQ_RISING, handler=button_func)", "def interrupt(func):\n def do_stuff(*args, **kwargs):\n App.get_running_app().controller.interrupt(restart=True)\n return func(*args, **kwargs)\n return do_stuff", "def interrupt_handler(self, signo, frame):\n log.debug(\"interrupting run\")\n self._keep_running = False", "def keyboard_interrupt_handler(interrupt_signal, frame):\n print(\"Scanning finished\")\n print(\"KeyboardInterrupt ID: {} {} has been caught.\".format(interrupt_signal, frame))\n exit(1)", "def keyboard_interrupt_handler(interrupt_signal, frame):\n print(\"Scanning finished\")\n print(\"KeyboardInterrupt ID: {} {} has been caught.\".format(interrupt_signal, frame))\n exit(1)", "def switch(ind, status):\n print(\"Switching :\", ind, \">>\", status == 'on')\n GPIO.output(ind, status == 'on')", "def led(path, tags, args, source):\n\toscProg = args[0]\n\t#pinValue = args[1]\t\n\t#action = args[2]\n\tprint oscProg\n\t#print pinValue\n\t#print action\n\t\n\t#check if first argument is a pin value\n\tif oscProg in gpioPins.keys():\n\t\tpinValue = args[1]\n\t\taction = args[2]\n\t\t#search gpioPins dict for pin value. Exit when found\n\t\tfor dictColor,gpioPin in gpioPins.iteritems():\n\t\t\tif oscProg == dictColor:\n\t\t\t\tbreak\n\t\t#set the pin color\n\t\tif action == 'solid':\n\t\t\tcLED.setPinValue(gpioPin,pinValue)\n\t\telif action == 'flashFade':\n\t\t\tt = threading.Thread(target=ef.ledFlashFade,args=(gpioPin,pinValue,0.01))\n\t\t\tt.start()\n\t\t\tt.join\n\t\telif action ==\"flash\":\n\t\t\tef.flash(gpioPin,0.1)\n\t\telif action ==\"contFlash\":\n\t\t\tef.flash(gpioPin,0.1)\n\t\telse:\n\t\t\t#not a valid option\n\t\t\tpass\t\t\t\n\t\t\n\t#Turn all LEDs on\n\telif oscProg == 'allOn':\n\t\tcLED.setColor(1,[1,1,1])\n\t\tcLED.setColor(2,[1,1,1])\n\t#Turn all LEDs off\n\telif oscProg == 'allOff':\n\t\tcLED.allOff()\n\telse:\n\t\tpass", "def callback(self):\n try:\n function()\n finally:\n main_loop.remove_handler(handler[0])", "def _signal_handler(*_: typing.Any) -> None:\n shutdown_event.set()", "def shutdown_callback():\n pass", "def on_switch(self, callback):\n self._switch_callback = callback", "def gpio_edge_listener(port):\n self.schedule_update_ha_state(True)", "def event11512210():\n header(11512210, 0)\n knight, is_active = define_args('ii')\n if_event_flag_on(1, is_active)\n if_entity_health_less_than_or_equal(1, knight, 0.1)\n if_condition_true(0, 1)\n chr.disable_gravity(knight)\n chr.disable_collision(knight)\n chr.disable_ai(knight)\n chr.replan_ai(knight)\n wait(2.5)\n # Skipping the fade-out, they just get obliterated usually.\n anim.force_animation(knight, 1201, do_not_wait_for_transition=True, wait_for_completion=True)\n chr.enable_ai(knight)\n chr.disable(knight)\n chr.enable_gravity(knight)\n chr.enable_collision(knight)\n chr.set_special_effect(knight, 3231)\n flag.disable(is_active)\n restart()", "def sigint_handler(signal, frame):\n rclpy.shutdown()\n if prev_sigint_handler is not None:\n prev_sigint_handler(signal)", "def _callback(self, channel, callback):\n # warte eine zufaellige Zeit\n sleep(randint(5, 10))\n # falls das Programm noch laueft,\n # der channel noch nicht gecleart wurde\n # und der Channel noch nicht entfernt wurde\n # (zum Beispiel durch einen Wechsel des Channels zu einem Ausgang)\n # rufe die Callbackfunktion auf\n if self.mode is not None and channel in self.channels and channel in self.events:\n callback(channel)", "def timirq_cbfs(tasks):\n try:\n # Execute CBF from cached config\n for cmd in (cmd.strip().split(' ') for cmd in tasks.split(';')):\n if not execute_LM_function_Core(cmd):\n console_write(\"[IRQ] TIMIRQ execute_LM_function_Core error: {}\".format(tasks))\n except Exception as e:\n console_write(\"[IRQ] TIMIRQ callback: {} error: {}\".format(tasks, e))", "def sigint_handler(*dummy):\n print \"Received SIGINT. Stopping everything.\"\n executor.Stop()\n server.Stop()", "def lambda_handler(event, context):\n try:\n aq = Aquifer()\n aq.run()\n\n return \"Completed\"\n\n except (Exception, KeyboardInterrupt) as e:\n return \"Error occurred\"", "def run_handler(self, handler):\n self.last_activity = time.time()\n const_name = handler.upper()\n try:\n const_value = getattr(cwiid, const_name)\n if self.wm.state['buttons'] == const_value:\n self.exec_script(handler)\n except AttributeError:\n return 0", "def _handle_interrupts(signal_number, current_stack_frame):\n print(\" Interrupted!\\n\", file=sys.stderr)\n _display_help()\n sys.exit(0)", "def create_callback(output_element,retfunc):\n def callback(*input_values):\n return retfunc(*input_values)\n return callback", "def receive_interrupt_request(self, _: EmptyMsg):\n self.renderer.interrupt()", "async def blink(my_board, pin):\n\n # set the pin mode\n await my_board.set_pin_mode_digital_output(pin)\n\n # toggle the pin 4 times and exit\n for x in range(4):\n print('ON')\n await my_board.digital_write(pin, 1)\n await asyncio.sleep(1)\n print('OFF')\n await my_board.digital_write(pin, 0)\n await asyncio.sleep(1)", "def _Iregfunc_handler(self,L,C,y) :\n\t\tif self.name == \"AR\" : \n\t\t\treturn self.regfunc(C,y)\n\t\telse :\n\t\t\tself.logger.error(\"Unknown name %r\"%(self.name))\n\t\t\tsys.exit(1)", "def event_main_on(self, ioname, iovalue):\n # Switch on/off output O_1\n self.rpi.io.main_relay.value = True\n self.main_state = True\n self.system_on_time = time.time()\n self.trigger = self.trigger_system_on_trigger", "def add_callback(self, callback, when='sensing'):\n if when == 'both':\n self.pin.add_edge_callback(callback, 'both')\n else:\n on_sensing = True if when == 'sensing' else False\n self.pin.add_edge_callback(\n callback, 'rising' if self.pinishigh ^ on_sensing else 'falling')", "def do_red(self,command):\n if \"on\" in command:\n print 'Red ON'\n GPIO.output(7,GPIO.HIGH)\n elif \"off\" in command:\n print 'Red OFF'\n GPIO.output(7,GPIO.LOW)\n elif \"flash\" in command:\n print 'Flashing green'\n FlashPin(pin=7,count=5,delay=0.1)\n else:\n print \"ERROR! MF!\"", "def log_handler(self, handler):\n if not self.opened():\n handler = handler or util.noop\n self._log_handler = enums.JLinkFunctions.LOG_PROTOTYPE(handler)\n self._dll.JLINKARM_EnableLog(self._log_handler)", "def warning_handler(self, handler):\n if not self.opened():\n handler = handler or util.noop\n self._warning_handler = enums.JLinkFunctions.LOG_PROTOTYPE(handler)\n self._dll.JLINKARM_SetWarnOutHandler(self._warning_handler)", "def keyboard_interrupt_handler(sig: int, _: object) -> None:\n logger.warning(f'KeyboardInterrupt (id: {sig}) has been caught...')\n logger.info('Terminating the session gracefully...')\n ray.shutdown()\n minio_leftovers = glob('*.part.minio')\n for leftover in minio_leftovers:\n Path(leftover).unlink()\n sys.exit(1)", "def _handle_interrupts(signal_number, current_stack_frame):\n print(\" Interrupted!\\n\", file=sys.stderr)\n _display_help()\n sys.exit(1) # no match", "def lambda_handler(event, context):\n return dispatch(event)", "def ControlLights(state):\n for led in (RED,YELLOW,GREEN):\n GPIO.output(LED[led],state[led])\n time.sleep(FLASH_TIME)", "def sig_handler(sig, frame):\n if sig == signal.SIGINT:\n stop_ros()\n print(\"Shutting down...\")\n sys.exit(0)", "def set_rx_callback(self, cb_function):\n self.serial_received = cb_function", "def sigterm_handler(signal, frame):\n GPIO.cleanup()\n print('WARN : %s Received Kill' % PROG_NAME)\n print('INFO : Performed GPIO.cleanup. Bye ...')\n sys.exit(0)", "def start_ispy_action_listener(self,on_ispy_action_received):\n self.ispy_to_ros__action_subs = rospy.Subscriber(ISPY_GAME_TO_ROS_ACTION_TOPIC, iSpyAction, on_ispy_action_received) \n print(\"action listener\")\n rospy.spin()", "def __atexit_handler():\n global _iom_shutdown\n _iom_shutdown = True\n clear_IOM()", "def lambda_handler(event, context):\n return", "def lambda_handler(event, context):\n return", "def register_for_status_requests(self, handler):\n # noinspection PyUnusedLocal\n def wrapped_handler(signal_num, frame):\n handler()\n signal.signal(signal.SIGINT, wrapped_handler)", "def signal_handler(signal, _): \n import signal\n import sys\n from time import localtime, strftime\n signal.signal(signal.SIGINT, original_sigint)\n thetime = strftime(\"%H:%M:%S\", localtime())\n INPUTFUNC('\\n\\n%s: Paused. Press any key to resume, or ctrl+c to quit.\\n' % thetime)\n time = strftime(\"%H:%M:%S\", localtime())\n print('%s: Interrogation resumed.\\n' % time)\n signal.signal(signal.SIGINT, signal_handler)", "def analogueTriggerChangeHandler(val):\n print(\"Analogue Trigger Value Changed: {}\".format(val) )", "def set_interrupt_trigger(ft_handle: FtHandle, trigger: GpioTrigger) -> None:\n result: Ft4222Status = _set_interrupt_trigger(\n ft_handle, trigger.value)\n\n if result != Ft4222Status.OK:\n raise Ft4222Exception(result)", "def error_handler(self, handler):\n if not self.opened():\n handler = handler or util.noop\n self._error_handler = enums.JLinkFunctions.LOG_PROTOTYPE(handler)\n self._dll.JLINKARM_SetErrorOutHandler(self._error_handler)", "def onAction(*args):", "def onAction(*args):", "def onAction(*args):", "def onAction(*args):", "def on_press(self, callback):\n GPIO.remove_event_detect(self.channel)\n if callback is not None:\n self.callback = callback\n\n # Martijn van der Sar:\n # I replaced original the debouncing solution by GPIO's built in option 'bouncetime' since it works better\n GPIO.add_event_detect(self.channel, self.polarity, callback=self.callback, bouncetime=self.debounce_time)\n\n # Original function call:\n # GPIO.add_event_detect(self.channel, self.polarity, callback=self._debounce_and_callback)", "def wait_for_interrupts(self, wait_time = 1):\n raise AssertionError(\"wait_for_interrupts function i not implemented\")", "def run(self):\n\t\twhile not self.interrupt:\n\t\t\tfunc = self.callbackQueue.get()\n\t\t\tfunc()\n\t\tself.stop_elevator()", "def _on_message(ws, msg, turn_handler):\n\n def x():\n parsed = json.loads(msg)\n player = parsed['player']\n actions = parsed['actions']\n state = parsed['state']\n\n action = turn_handler(player, actions, state)\n response = {\"action\":action}\n\n ws.send(json.dumps(response))\n\n _thread.start_new_thread(x, ())", "def display(board, leds, delay=0.05, flashdelay=0.05):\n global i\n delay = float(delay)\n flashdelay = float(flashdelay)\n img = np.tile([i, 255, 255], board.shape).astype(np.uint8)\n img = cv2.cvtColor(img, cv2.COLOR_HSV2RGB)\n leds.draw(img, delay=delay)\n img = np.tile([0, 0, 0], board.shape).astype(np.uint8)\n if flashdelay > 0:\n leds.draw(img, delay=flashdelay)\n i += 5\n if i > 255:\n i = 0", "def processInterrupt(uPid):\n try:\n # pylint: disable=no-member\n win32console.GenerateConsoleCtrlEvent(win32con.CTRL_BREAK_EVENT, uPid);\n #GenerateConsoleCtrlEvent = ctypes.windll.kernel32.GenerateConsoleCtrlEvent\n #rc = GenerateConsoleCtrlEvent(1, uPid);\n #reporter.log('GenerateConsoleCtrlEvent -> %s' % (rc,));\n fRc = True;\n except:\n reporter.logXcpt('uPid=%s' % (uPid,));\n fRc = False;\n return fRc;", "def on_interrupt(self, *args) -> None: #pylint: disable=unused-argument\r\n if not self.stop_requested:\r\n self.stop_requested = True\r\n self.logger.critical('SIGINT detected - will stop at the end of the current evolution')\r\n else:\r\n stop_from = time.time() - 5000\r\n if self.last_stop_request > stop_from:\r\n raise KeyboardInterrupt\r\n else:\r\n self.last_stop_request = time.time()\r\n self.logger.critical('SIGINT suppressed - repeat within 5 seconds to sigterm')", "def SIGINT_handler(signal, frame):\n exit(2)", "def signal_handler(self, signum, frame):\n if signum == signal.SIGINT:\n self.terminate = True\n elif signum == signal.SIGALRM:\n self.button_handler(self.BUTTON_PIN)", "def default_int_handler(*more): # real signature unknown; restored from __doc__\n pass", "def signal_handler(self, sig, frame):\n GPIO.cleanup()\n sys.exit(0)", "def turn_on(self):\n self._interrupt_flash()\n if not self.on:\n GPIO.output(self.pin, GPIO.HIGH)\n self.on = True", "def Handler(self, *events: str, colon: bool = False,\n ircv3: bool = False) -> Callable:\n ...", "def lambda_handler(event, context):\n LOGGER.debug(\n \"Processing event: %s\",\n json.dumps(event, indent=2)\n if LOGGER.isEnabledFor(logging.DEBUG)\n else \"--data-hidden--\",\n )\n sfn_client = boto3.client(\"stepfunctions\")\n s3_resource = boto3.resource(\"s3\")\n\n all_accounts = get_all_accounts()\n account_file = get_file_from_s3(get_details_from_event(event), s3_resource)\n\n processed_account_list = process_account_list(\n all_accounts=all_accounts,\n accounts_in_file=account_file.get(\"content\", {}).get(\"accounts\", []),\n )\n\n if processed_account_list:\n start_executions(\n sfn_client,\n processed_account_list,\n codepipeline_execution_id=account_file.get(\"execution_id\"),\n request_id=context.aws_request_id,\n )\n return event", "def on_rfid(self):\n if self.active:\n self.sm.on_state_event(self.events.RFID)", "def connection_handler(to_wrap: Callable[[Arduino], None],\n port: str = '/dev/ttyACM0',\n baud_rate: int = 9600,\n timeout: int = 5):\n arduino = Arduino(port, baud_rate, timeout)\n arduino.connect()\n time.sleep(2)\n\n to_wrap(arduino)\n\n arduino.disconnect()", "def lambda_handler(event, context):\n logging.info('Starting function with context=%s and event=%s', context, event)\n holiday_schedule = trash.holidayschedule()\n old_holiday_schedule = trash_service.list()['data']\n old_holidays = [old_holiday['name'] for old_holiday in old_holiday_schedule]\n logging.info('Updating holiday schedule with schedule=%s', holiday_schedule)\n update_schedule(old_holidays, holiday_schedule)", "def release_iden_progress_changed_callback(self, callback=None):\r\n return self._arm.release_iden_progress_changed_callback(callback=callback)", "def handler(event, ctx):\n logger.info(event)\n job_id = None\n try:\n job_id = event['CodePipeline.job']['id']\n job_data = event['CodePipeline.job']['data']\n if len(job_data.get('outputArtifacts', [])) > 1:\n raise ValueError(\"Maximum number of output Artifacts is 1\")\n\n params = PipelineUserParameters(job_data, ctx)\n in_artifacts = load_pipeline_artifacts(job_data.get('inputArtifacts', []), params.Region)\n\n if params.ActionMode == 'CREATE_UPDATE':\n create_update_stack_handler(job_id, job_data, params, in_artifacts)\n elif params.ActionMode == 'DELETE_ONLY':\n delete_stack_handler(job_id, job_data, params)\n elif params.ActionMode == 'REPLACE_ON_FAILURE':\n replace_stack_handler(job_id)\n elif params.ActionMode == 'CHANGE_SET_REPLACE':\n create_replace_change_set_handler(job_id, job_data, params, in_artifacts)\n elif params.ActionMode == 'CHANGE_SET_EXECUTE':\n execute_change_set_handler(job_id, job_data, params)\n else:\n raise ValueError(\"Unknown operation mode requested: {}\".format(params.ActionMode))\n\n except Exception as e:\n logger.error('Function failed due to exception. {}'.format(e))\n traceback.print_exc()\n put_job_failure(job_id, 'Function exception: ' + str(e))\n\n logger.debug('Function complete.')\n return \"Complete.\"", "def main():\n # Initialize variables.\n global sensor\n global identified\n global finished\n\n upload_data_deadline = time.time()\n was_btn_pressed = is_button_pressed()\n upload_immediately = False\n\n print(\" +---------------------------------------+\")\n print(\" | End-to-End IoT Tank Monitoring Sample |\")\n print(\" +---------------------------------------+\\n\")\n\n # Instantiate the HDC1080 peripheral.\n try:\n sensor = HDC1080(I2C(1))\n except AssertionError:\n pass\n\n # Configure the Bluetooth advertisement.\n config_advertisement()\n\n # Register relay callback to handle incoming relay packets.\n relay.callback(relay_frame_callback)\n\n # Set the LED pin initial value to off (0).\n led_pin.off()\n\n # Wait until the device is connected to DRM.\n wait_drm_connection()\n\n # Start the main application loop.\n while True:\n # Sleep 100 ms.\n time.sleep_ms(100)\n\n # Check if the button was pressed.\n button_pressed = is_button_pressed()\n if not was_btn_pressed and button_pressed:\n toggle_valve()\n upload_immediately = True\n was_btn_pressed = button_pressed\n\n # Blink identification LED if necessary.\n if identified:\n if finished:\n identified = False\n finished = False\n else:\n led_pin.value(not led_pin.value())\n\n # Check if there is any DRM request to process.\n request = cloud.device_request_receive()\n upload_immediately |= process_drm_request(request)\n\n # Upload sensor values to DRM.\n if time.time() >= upload_data_deadline or upload_immediately:\n upload_sensor_data()\n upload_data_deadline = time.time() + DEFAULT_REPORT_INTERVAL\n upload_immediately = False", "def _print_callback(sample):\n\n print sample", "def triangleBtnHandler(val):\n if val == 1 :\n print(\"Triangle button pressed\")\n else:\n print(\"Triangle button released\")", "def interrupt(self):\n raise NotImplementedError", "def led_on(args):\n _check_mcu()\n led_on_bytes = CMD_MODULE_ID_LEDS | 0x00\n i2c.write_bytes_to_address(MCU_MOUTH_ADDRESS, led_on_bytes)", "def fire():\n print(\"FIRING\")\n GPIO.output(PIN1, 0)\n GPIO.output(PIN2, 0)\n GPIO.output(TRIGGER, 1)", "def test_handle_abort_when_not_idle(self):\n mock_status_callback = MagicMock()\n mock_firmware_handler = self.MockFirmwareHandler()\n firmware_update = OSFirmwareUpdate(\n mock_firmware_handler, mock_status_callback\n )\n firmware_update.logger.setLevel(logging.CRITICAL)\n expected_status = FirmwareUpdateStatus(\n FirmwareUpdateStatusType.ABORTED\n )\n firmware_update.current_status = FirmwareUpdateStatus(\n FirmwareUpdateStatusType.INSTALLING\n )\n firmware_update.handle_abort()\n\n firmware_update.status_callback.assert_called_once_with(\n expected_status\n )", "def delete_button_callback(self, button):\n\t\tRPIO.del_interrupt_callback(button)", "def interr(self, *args):\n return _ida_hexrays.Hexrays_Hooks_interr(self, *args)", "def callback(fun):\n return ffi.callback(_callback_type, fun)", "def lambda_handler(func):\n logger = logging.getLogger(__name__)\n\n @wraps(func)\n def func_wrapper(event, context):\n \"\"\"\n This is what's invoked via lambda.\n\n Args:\n event (dict): AWS Lambda event\n context (LambdaContext): AWS Lambda context\n\n .. _AWS Lambda python programming model:\n http://docs.aws.amazon.com/lambda/latest/dg/python-programming-model-handler-types.html\n \"\"\"\n req = Request(event, context)\n\n try:\n resp = func(req)\n\n if not isinstance(resp, Response):\n message = (\n 'Invalid return value from handler. '\n 'It should be either Response or Exception'\n )\n raise TypeError(message)\n except ServerlessError as e:\n status_code = e.status_code\n message = e.message if e.message else e.__class__.__name__\n\n resp = to_error_response(message, e.errors, status_code)\n except Exception as e: # pylint: disable=W0703\n logger.exception(e)\n status_code = 500\n message = 'InternalServerError'\n errors = tuple()\n\n resp = to_error_response(message, errors, status_code)\n return resp.to_lambda_output()\n return func_wrapper", "def signal_handler(self, signal, frame):\n logger.error(\"Received Signal to Terminate\")\n self.set_lcd_brightness(self.DIM_SHUT)\n ui.runui = False\n # sys.exit(0)", "def int_handle_switch(self,pin):\n\t\t#press_start = clock()\n\t\tpress_start = datetime.now()\n\t\tpress_time = 0 #datetime.now() - datetime.now()\t# wtf?\n\t\t\n\t\t# debounce\n\t\t#if 'debounce' in self.pins_config[pin]:\n\t\t#\tdebounce = self.pins_config[pin]['debounce'] / 1000\n\t\t#\tprint \"DEBUG: sleeping: {0}\".format(debounce)\n\t\t#\tsleep(debounce)\n\t\t#\t\n\t\tsleep(0.02)\n\t\tif not self.gpio.input(pin) == self.pins_config[pin]['gpio_on']:\n\t\t\treturn None\n\t\t\n\t\t#print \"DEBUG: self.int_handle_switch! for pin: {0}\".format(pin)\n\t\tself.__mode_reset()\t\t\t\t\t\t\t\t\t# Keep resetting as long as the mode is being used\n\t\t# TODO, check if possible to only reset affected timer: self.ms_all[fun['mode_cycle']].\n\t\t\n\t\t# check wheather we have short and/or long press functions and multi-press functions\n\t\tif self.pins_config[pin]['has_short'] and not self.pins_config[pin]['has_long'] and not self.pins_config[pin]['has_multi']:\n\t\t\t\"\"\" Only a SHORT function, no long press functions, no multi-button, go ahead and execute \"\"\"\n\t\t\tself.__printer(\"Executing short function, as it is the only option\") # LL_DEBUG #TODO\n\t\t\t\n\t\t\t# execute, checking mode\n\t\t\tfor ix, fun in enumerate(self.pins_config[pin]['functions']):\n\t\t\t\tif 'mode' in fun:\n\t\t\t\t\tif fun['mode'] in self.activemodes():\n\t\t\t\t\t\tself.__exec_function_by_code(fun['function'])\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint \"DEBUG mode mismatch\"\n\t\t\t\telse:\n\t\t\t\t\tif 'mode_select' in fun and 'mode_cycle' in fun:\n\t\t\t\t\t\tself.ms_all[fun['mode_cycle']].next()\n\t\t\t\t\tself.__exec_function_by_code(fun['function'])\n\t\t\t\t\n\t\t\treturn\n\n\t\tif (self.pins_config[pin]['has_long'] or self.pins_config[pin]['has_short']) and not self.pins_config[pin]['has_multi']:\n\t\t\t\"\"\" LONG + possible short press functions, no multi-buttons, go ahead and execute, if pressed long enough \"\"\"\n\t\t\tself.__printer(\"Button pressed (pin {0}), waiting for button to be released....\".format(pin))\n\t\t\tpressed = True\n\t\t\twhile True: #pressed == True or press_time >= self.long_press_ms:\n\t\t\t\tstate = self.gpio.input(pin)\n\t\t\t\tif state != self.pins_config[pin]['gpio_on']:\n\t\t\t\t\tpressed = False\n\t\t\t\t\tbreak\n\t\t\t\tif press_time >= self.long_press_ms:\n\t\t\t\t\tprint \"TIMEOUT\"\n\t\t\t\t\tbreak\n\t\t\t\t#press_time = (clock()-press_start)*1000\n\t\t\t\tdelta = datetime.now() - press_start\n\t\t\t\tpress_time = int(delta.total_seconds() * 1000)\n\t\t\t\tsleep(0.005)\n\t\t\t\t\n\t\t\tif press_time >= self.long_press_ms and self.pins_config[pin]['has_long']:\n\t\t\t\tself.__printer(\"Button was pressed for {0}ms (threshold={1}). Executing long function.\".format(press_time,self.long_press_ms))\t# TODO: LL_DEBUG\n\t\t\t\t\n\t\t\t\t# execute, checking mode\n\t\t\t\tfor ix, fun in enumerate(self.pins_config[pin]['functions']):\n\t\t\t\t\tif fun['press_type'] == 'long':\n\t\t\t\t\t\tif 'mode' in fun:\n\t\t\t\t\t\t\tif fun['mode'] in self.activemodes():\n\t\t\t\t\t\t\t\tself.__exec_function_by_code(fun['function'])\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tprint \"DEBUG mode mismatch\"\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tif 'mode_select' in fun and 'mode_cycle' in fun:\n\t\t\t\t\t\t\t\tself.ms_all[fun['mode_cycle']].next()\n\t\t\t\t\t\t\tself.__exec_function_by_code(fun['function'])\t\t\t\n\t\t\t\t\n\t\t\telif press_time > 0 and press_time < self.long_press_ms and self.pins_config[pin]['has_short']:\n\t\t\t\tself.__printer(\"Button was pressed for {0}ms (threshold={1}). Executing short function.\".format(press_time,self.long_press_ms))\t# TODO: LL_DEBUG\n\t\t\t\t\n\t\t\t\t# execute, checking mode\n\t\t\t\tfor ix, fun in enumerate(self.pins_config[pin]['functions']):\n\t\t\t\t\tif fun['press_type'] == 'short':\n\t\t\t\t\t\tif 'mode' in fun:\n\t\t\t\t\t\t\tif fun['mode'] in self.activemodes():\n\t\t\t\t\t\t\t\tself.__exec_function_by_code(fun['function'])\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tprint \"DEBUG mode mismatch\"\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tif 'mode_select' in fun and 'mode_cycle' in fun:\n\t\t\t\t\t\t\t\tself.ms_all[fun['mode_cycle']].next()\n\t\t\t\t\t\t\tself.__exec_function_by_code(fun['function'])\n\n\t\t\telse:\n\t\t\t\tprint \"No Match!\"\n\t\t\t\t\n\t\t\treturn\n\t\t\t\n\t\t# check wheather we have short and/or long press functions and multi-press functions\n\t\tif self.pins_config[pin]['has_multi']:\n\t\t\t\"\"\" There are multi-button combinations possible. The function pin list is sorted with highest button counts first.\n\t\t\t\tLooping from top to bottom we will check if any of these are valid.\t\"\"\"\n\t\t\tprint \"checking multi-button...\"\n\t\t\tmatched_short_press_function_code = None\n\t\t\tmatched_long_press_function_code = None\n\t\t\tfor function in self.pins_config[pin]['functions']:\n\t\t\t\n\t\t\t\tif 'mode' in function and function['mode'] in self.activemodes():\n\t\t\t\n\t\t\t\t\tmulti_match = True\n\t\t\t\t\tfor multi_pin in function['multi']:\n\t\t\t\t\t\tif not self.gpio.input(multi_pin) == self.pins_config[pin]['gpio_on']:\n\t\t\t\t\t\t\tmulti_match = False\n\t\t\t\t\tif multi_match == True:\n\t\t\t\t\t\tif function['press_type'] == 'short_press':\n\t\t\t\t\t\t\tmatched_short_press_function_code = function['function']\n\t\t\t\t\t\telif function['press_type'] == 'long_press':\n\t\t\t\t\t\t\tmatched_long_press_function_code = function['function']\n\t\t\t\t\t\n\t\t\tself.__printer(\"Waiting for button to be released....\")\n\t\t\tpressed = True\n\t\t\twhile pressed == True or press_time >= self.long_press_ms:\n\t\t\t\tstate = self.gpio.input(pin)\n\t\t\t\tif state != self.pins_config[pin]['gpio_on']:\n\t\t\t\t\tprint \"RELEASED!\"\n\t\t\t\t\tpressed = False\n\t\t\t\t\tbreak\n\t\t\t\t#press_time = clock()-press_start\n\t\t\t\tdelta = datetime.now() - press_start\n\t\t\t\tpress_time = int(delta.total_seconds() * 1000)\n\t\t\t\tsleep(0.01)\n\t\t\t\t\t\n\t\t\tprint \"....done\"\n\t\t\tprint \"switch was pressed for {0} ms\".format(press_time)\n\t\t\t\n\t#\t\t\tif self.pins_config[pin]['has_long'] and not self.pins_config[pin]['has_short']:\n\t#\t\t\t\tprint \"EXECUTING THE LONG FUNCTION (only long)\"\n\t\t\tif press_timemiliseconds >= self.long_press_ms and self.pins_config[pin]['has_long'] and matched_long_press_function_code is not None:\n\t\t\t\tprint \"EXECUTING THE LONG FUNCTION (long enough pressed)\"\n\t\t\telif press_timemiliseconds < self.long_press_ms and self.pins_config[pin]['has_short'] and matched_short_press_function_code is not None:\n\t\t\t\tprint \"EXECUTING THE SHORT FUNCTION (not long enough pressed)\"\n\t\t\telse:\n\t\t\t\tprint \"No Match!\"\n\t\t\t\t\n\t\t# feedback in case of no attached function\n\t\tself.__printer(\"Switch. Pin: {0}\".format(pin),level=LL_DEBUG)", "def catch_keyboard_interrupt() -> Callable:\n return signal.signal(signal.SIGINT, keyboard_interrupt_handler)", "def when_pressed(self, button, func, *args):\n\n self.hardware_interfaces[self._gpio].set_pin_event(self._b_names[button],\n func,\n *args)" ]
[ "0.6793415", "0.59784335", "0.5772505", "0.5751147", "0.569456", "0.56923246", "0.55878675", "0.5526748", "0.53978044", "0.50646555", "0.4927286", "0.4895538", "0.47954524", "0.4737531", "0.4658877", "0.4636048", "0.46294534", "0.46239135", "0.45867148", "0.45833647", "0.45762804", "0.45762804", "0.4533775", "0.45126545", "0.44978258", "0.4471549", "0.44616014", "0.44111654", "0.44101328", "0.4403133", "0.43907022", "0.4378229", "0.43129516", "0.4311412", "0.4303357", "0.4299316", "0.4296834", "0.42960814", "0.42933106", "0.4274331", "0.42728165", "0.42575926", "0.42538106", "0.42527342", "0.42497033", "0.42390162", "0.423064", "0.42174163", "0.42142186", "0.420191", "0.42005396", "0.4194973", "0.41896453", "0.41807494", "0.4180742", "0.41631567", "0.41631567", "0.41446868", "0.41400477", "0.41370025", "0.41308585", "0.41285986", "0.41213706", "0.41213706", "0.41213706", "0.41213706", "0.41169766", "0.4110857", "0.41099322", "0.4108558", "0.4107401", "0.41053477", "0.41023424", "0.40972808", "0.40941608", "0.40926713", "0.40908167", "0.40839887", "0.40787518", "0.40781894", "0.40708", "0.40630898", "0.4062767", "0.40622053", "0.40568158", "0.4056229", "0.40534016", "0.40519083", "0.40509242", "0.4049034", "0.40430516", "0.40409195", "0.40390885", "0.40374237", "0.4034268", "0.4027947", "0.40269122", "0.40267885", "0.4014921", "0.40148035" ]
0.72954494
0
Creates a newly initialised datasource with the specified training or test data. If _load_test_data and _load_training_data are implemented, they are called as well, and saving/loading is handled there.
def __init__(self, _expected_d_input=None, shuffled=False, _training_data=None, _test_data=None): self._training_data = _training_data self._test_data = _test_data self._num_training_samples = None self._num_test_samples = None self._available_training_lengths = [] self._available_test_lengths = [] self._training_data_path = os.path.join(type(self).__name__, "training_data.npy") if not os.path.isdir(type(self).__name__): os.mkdir(type(self).__name__) self._expected_d_input = _expected_d_input if self._training_data is None: if os.path.isfile(self._training_data_path): self._training_data = np.load(self._training_data_path).item() for _, value in self._training_data.items(): print(value[0].shape) print(_expected_d_input) if value[0].shape[2] != _expected_d_input: self._training_data = None break self._test_data_path = os.path.join(type(self).__name__, "testing_data.npy") if self._test_data is None: if os.path.isfile(self._test_data_path): self._test_data = np.load(self._test_data_path).item() for _, value in self._test_data.items(): if value[0].shape[2] != _expected_d_input: self._test_data = None break if self._test_data is None: self._load_test_data() if self._training_data is None: self._load_training_data() if shuffled: print("Shuffling not supported at this point!") self.current_index = {} for key, _ in self._training_data.items(): self.current_index[key] = 0 self._initialise_available_training_lengths() self._initialise_available_test_lengths() self._swapped_test_data = None self._swapped_training_data = None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def data_creator(config):\n train_dataset, val_dataset = LinearDataset(2, 5), LinearDataset(2, 5)\n train_loader = DataLoader(train_dataset, batch_size=config[\"batch_size\"])\n val_loader = DataLoader(val_dataset, batch_size=config[\"batch_size\"])\n return train_loader, val_loader", "def _init_train_loader(self):\n # Choose the right dataset type\n if self.config_args[\"num_members\"] > 1:\n class_dataset_wrapper = dataset_wrapper.MixMoDataset\n else:\n class_dataset_wrapper = dataset_wrapper.MSDADataset\n\n # Load augmentations\n self.traindatasetwrapper = class_dataset_wrapper(\n dataset=self.train_dataset,\n num_classes=int(self.config_args[\"data\"][\"num_classes\"]),\n num_members=self.config_args[\"num_members\"],\n dict_config=self.config_args[\"training\"][\"dataset_wrapper\"],\n properties=self.properties\n )\n\n # Build standard sampler\n _train_sampler = torch.utils.data.sampler.RandomSampler(\n data_source=self.traindatasetwrapper, ## only needed for its length\n num_samples=None,\n replacement=False,\n )\n\n # Wrap it with the repeating sampler used for multi-input models\n batch_sampler = batch_repetition_sampler.BatchRepetitionSampler(\n sampler=_train_sampler,\n batch_size=self.batch_size,\n num_members=self.config_args[\"num_members\"],\n drop_last=True,\n config_batch_sampler=self.config_args[\"training\"][\"batch_sampler\"]\n )\n\n self.train_loader = torch.utils.data.DataLoader(\n self.traindatasetwrapper,\n batch_sampler=batch_sampler,\n num_workers=self.num_workers,\n batch_size=1,\n shuffle=False,\n sampler=None,\n drop_last=False,\n pin_memory=True,\n )", "def build_training_data_loader(self) -> DataLoader:\n pass", "def test_dataloader(self) -> torch.utils.data.DataLoader: \n return torch.utils.data.DataLoader(self.dataset_test, **self.dl_kwargs)", "def initialise_dataset_loader(\n self, data_param=None, task_param=None, data_partitioner=None):\n raise NotImplementedError", "def test_dataloader(self) -> data.DataLoader:\n return data.DataLoader(dataset=self.datasets['test'], batch_size=self.batch_size,\n num_workers=self.num_workers, shuffle=False, pin_memory=False)", "def initialize_dataloaders(\n self, X: Union[np.ndarray, pd.DataFrame], y: Union[np.ndarray, np.array]\n ):\n training_design_matrix, training_targets_array, validation_design_matrix, validation_targets_array = self.generate_training_validation_split(\n X, y\n )\n training_dataloader_kwargs = {\n \"design_matrix\": training_design_matrix,\n \"targets_array\": training_targets_array,\n \"data_type\": self.data_type,\n \"batch_size\": self.batch_size,\n \"shuffle\": self.shuffle_training_examples,\n }\n validation_dataloader_kwargs = {\n \"design_matrix\": validation_design_matrix,\n \"targets_array\": validation_targets_array,\n \"data_type\": self.data_type,\n \"batch_size\": self.batch_size,\n \"shuffle\": False,\n }\n self.training_dataloader = self.generate_dataloader(**training_dataloader_kwargs)\n self.validation_dataloader = self.generate_dataloader(**validation_dataloader_kwargs)", "def create_test_dataloader(configs):\n\n test_dataset = KittiDataset(configs, mode='test', lidar_aug=None, hflip_prob=0., num_samples=configs.num_samples)\n test_sampler = None\n if configs.distributed:\n test_sampler = torch.utils.data.distributed.DistributedSampler(test_dataset)\n test_dataloader = DataLoader(test_dataset, batch_size=configs.batch_size, shuffle=False,\n pin_memory=configs.pin_memory, num_workers=configs.num_workers, sampler=test_sampler)\n\n return test_dataloader", "def _create_data_loader(self, data, **kwargs):\n if data is None:\n return None\n\n # Set DataLoader config\n # NOTE: Not applicable if data is already a DataLoader\n config = {\n **self.config[\"train_config\"][\"data_loader_config\"],\n **kwargs,\n \"pin_memory\": self.config[\"device\"] != \"cpu\",\n }\n # Return data as DataLoader\n if isinstance(data, DataLoader):\n return data\n elif isinstance(data, Dataset):\n return DataLoader(data, **config)\n elif isinstance(data, (tuple, list)):\n return DataLoader(self._create_dataset(*data), **config)\n else:\n raise ValueError(\"Input data type not recognized.\")", "def test_dataloader(self) -> DataLoader:\n return DataLoader(\n self.test_dataset,\n batch_size=self.hparams.batch_size,\n shuffle=False,\n num_workers=multiprocessing.cpu_count(),\n )", "def init_data(dataset_config: dict):\n # train and dev will be in random order, test may be ordered according to labels\n if dataset_config[\"name\"] == \"CoLA\":\n train, dev, test, num_classes = load_cola(dataset_config)\n elif dataset_config[\"name\"] == \"AGNews\":\n train, dev, test, num_classes = load_ag_news(dataset_config)\n elif dataset_config[\"name\"] == \"DBPedia\":\n train, dev, test, num_classes = load_dbpedia(dataset_config)\n elif dataset_config[\"name\"] == \"YRF\":\n train, dev, test, num_classes = load_yrf(dataset_config)\n else:\n raise NameError(f\"Dataset {dataset_config['name']} not implemented.\")\n # etc.\n\n # shrink size if debugging\n if dataset_config[\"debug\"]:\n # choose a random subset using huggingface select function\n train = train.select(random.sample(range(len(train)), k=200))\n dev = dev.select(random.sample(range(len(dev)), k=40))\n test = test.select(random.sample(range(len(test)), k=200))\n\n # create class imbalance\n random.seed(dataset_config[\"seed\"])\n if dataset_config[\"pool_balance\"] == \"balanced\":\n pass\n elif dataset_config[\"pool_balance\"] == \"imbalanced\":\n train = train.filter(lambda example: create_imbalanced_dataset(example, dataset_config[\"imbalance_prop\"], dataset_config['imbalance_cls']))\n else:\n NameError(f\"pool_balance = {dataset_config['pool_balance']} not allowed\")\n\n if dataset_config[\"dev_balance\"] == \"balanced\":\n pass\n elif dataset_config[\"dev_balance\"] == \"imbalanced\":\n dev = dev.filter(lambda example: create_imbalanced_dataset(example, dataset_config[\"imbalance_prop\"], dataset_config['imbalance_cls']))\n else:\n NameError(f\"dev_balance = {dataset_config['dev_balance']} not allowed\")\n\n # get seed labelled pool indices (using the same seed data every time)\n random.seed(dataset_config[\"seed\"])\n if dataset_config[\"seed_balance\"] == \"balanced\":\n # this is random (will have some variance vs pool)\n indices = list(range(len(train)))\n unlabelled_pool_idx, labelled_pool_idx = split(\n indices,\n random_state=dataset_config[\"seed\"],\n test_size=dataset_config[\"seed_size\"]\n )\n elif dataset_config[\"seed_balance\"] == \"stratified\":\n # this is the same as the underlying train set (which may be unbalanced)\n indices = list(range(len(train)))\n unlabelled_pool_idx, labelled_pool_idx = split(\n indices,\n random_state=dataset_config[\"seed\"],\n test_size=dataset_config[\"seed_size\"],\n stratify=train['label']\n )\n elif dataset_config[\"seed_balance\"] == \"imbalanced\":\n # artificially sample an imbalanced seed set from the pool\n unlabelled_pool_idx, labelled_pool_idx = create_imbalanced_seed(\n train,\n num_classes,\n dataset_config[\"seed_size\"],\n dataset_config['imbalance_prop'],\n dataset_config['imbalance_cls']\n )\n else:\n raise NameError(f\"seed_balance = {dataset_config['seed_balance']} not allowed\")\n\n return train, dev, test, num_classes, labelled_pool_idx, unlabelled_pool_idx", "def train_dataloader(self) -> torch.utils.data.DataLoader: \n return torch.utils.data.DataLoader(self.dataset_train, **self.dl_kwargs)", "def load_training_data(config):\n # Load data\n LOGGER.info(\"Loading training data.\")\n train_x = load_data(config['data_source'], config['train_x_filename'])\n train_y = load_data(config['data_source'], config['train_y_filename'])\n val_x = load_data(config['data_source'], config['val_x_filename'])\n val_y = load_data(config['data_source'], config['val_y_filename'])\n LOGGER.info(\"Training data size: %d\", len(train_x))\n LOGGER.info(\"Validation data size: %d\", len(val_x))\n\n # Build datasets and create iterators\n LOGGER.info(\"Building dataset.\")\n train_dataset = get_dataset(\n train_x, train_y, config['batch_size'], config['data_shape'],\n config['n_classes'], True)\n val_dataset = get_dataset(\n val_x, val_y, config['batch_size'], config['data_shape'],\n config['n_classes'])\n\n return train_dataset, val_dataset, len(val_x)", "def create_train_dataloader(configs):\n train_lidar_aug = OneOf([\n Random_Rotation(limit_angle=np.pi / 4, p=1.0),\n Random_Scaling(scaling_range=(0.95, 1.05), p=1.0),\n ], p=0.66)\n train_dataset = KittiDataset(configs, mode='train', lidar_aug=train_lidar_aug, hflip_prob=configs.hflip_prob,\n num_samples=configs.num_samples)\n train_sampler = None\n if configs.distributed:\n train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)\n train_dataloader = DataLoader(train_dataset, batch_size=configs.batch_size, shuffle=(train_sampler is None),\n pin_memory=configs.pin_memory, num_workers=configs.num_workers, sampler=train_sampler)\n\n return train_dataloader, train_sampler", "def test_dataloader(self, batch_size: Optional[int] = None) -> DataLoader:\n if self.test is not None:\n dataset = TabularDataset(\n task=self.config.task,\n data=self.test,\n categorical_cols=self.config.categorical_cols,\n continuous_cols=self.config.continuous_cols,\n embed_categorical=(not self.do_leave_one_out_encoder()),\n target=self.target,\n )\n return DataLoader(\n dataset,\n batch_size if batch_size is not None else self.batch_size,\n shuffle=False,\n num_workers=self.config.num_workers,\n pin_memory=self.config.pin_memory,\n )", "def setup(self):\n # TODO check if need both dataset together\n self.train_dataset = ABSADataset(data_path=self.train_path, mode=self.in_mode, task=self.task, \n tokenizer=self.tokenizer, vocab=\"bert\")\n self.vocabulary = self.train_dataset.vocabulary\n\n self.eval_dataset = ABSADataset(data_path=self.dev_path, mode=self.in_mode, task=self.task,\n tokenizer=self.tokenizer, vocab=self.vocabulary)\n #self.train_restaurant = ABSADataset(data_path=RESTAURANT_TRAIN)\n #self.eval_restaurant = ABSADataset(data_path=RESTAURANT_DEV)", "def train_dataloader(self) -> DataLoader:\n return self._custom_data_loader()", "def prepare_data(self):\n try:\n self.train_dataset = self.datasets['train']\n self.val_dataset = self.datasets['val']\n try:\n self.test_dataset = self.datasets['test']\n except:\n pass\n except Exception as e:\n print('Data was not succesfully prepared:', e)", "def prepare_dataloader(opt, dataobj):\n\n def load_data(name):\n with open(name, 'rb') as f:\n data = pickle.load(f)\n num_types = 1 # There is no event type prediction, hence using a dummy value, this will basically be a constant value field\n return data, num_types\n\n print('[Info] Loading train data...')\n train_data, num_types = load_data(opt.data + 'train_ny.pkl')\n print('[Info] Loading dev data...')\n val_data, _ = load_data(opt.data + 'val_ny.pkl')\n print('[Info] Loading test data...')\n test_data, _ = load_data(opt.data + 'test_ny.pkl')\n\n trainloader = get_dataloader(train_data, opt.batch_size, shuffle=True)\n validationloader = get_dataloader(val_data, opt.batch_size, shuffle=True)\n testloader = get_dataloader(test_data, opt.batch_size, shuffle=False)\n return trainloader, validationloader, testloader, num_types", "def test_dataloader(self) -> DataLoader:\n return self._custom_data_loader()", "def test_setup(self, test_data: list=None):\n print(\"[dataset]: using test setup ...\")\n self.vocabulary = [\"empty\"]\n self.eval_dataset = ABSADataset(data_path=self.dev_path, mode=self.in_mode, task=self.task,\n tokenizer=self.tokenizer, vocab=\"bert\", test=True)\n return", "def train(self, training_data, cfg, **kwargs):\n pass", "def _load_test_data(self):\n\n self.test_loader = data.Test_loader(self.N_max, self.n_per_conn,\n self.data_path, self.device)\n\n self.test_loader.load_data()\n\n # load mean and std from train\n scc_mean_std = np.loadtxt(\n os.path.join(self.data_path, 'scc_mean_std.csv'), delimiter=',')\n self.mean = torch.Tensor(scc_mean_std[0])\n self.std = torch.Tensor(scc_mean_std[1])", "def load_training_data(\n self,\n train_data_file=\"datasets/train_data.json\",\n test_data_file=\"datasets/test_data.json\",\n ):\n train_data = pd.read_json(train_data_file)\n test_data = pd.read_json(test_data_file)\n return train_data, test_data", "def _load_training_data(self):\n self._save_training_data()", "def dataloader(self):\n\n # load / split data\n train_data = self.data.get_train_data()\n if self.args.use_dev:\n train_data, dev_data = self.data.split_data(train_data)\n test_data = self.data.get_test_data()\n\n #print(train_data[0])\n #print(dev_data[0])\n #print(test_data[0])\n\n # build dataset\n train_dataset = self.loader.build_dataset(\n train_data, \n self.args.train_max_seq_len)\n train_loader = self.loader.build_dataloader(\n train_dataset, 'train')\n\n test_dataset = self.loader.build_dataset(\n test_data,\n self.args.eval_max_seq_len)\n test_loader = self.loader.build_dataloader(\n test_dataset, 'test')\n\n if self.args.use_dev:\n dev_dataset = self.loader.build_dataset(\n dev_data,\n self.args.eval_max_seq_len)\n dev_loader = self.loader.build_dataloader(\n dev_dataset, 'dev')\n return train_loader, dev_loader, test_loader\n else:\n return train_loader, test_loader", "def to_DataLoader(self, **kwargs):\r\n return DataLoader(self, **kwargs)", "def generate_train_test(self):\n x, y = self.read_data()\n x_train, y_train, x_test, y_test = self.sample_data(x, y)\n self.train = (x_train, y_train)\n self.test = (x_test, y_test)", "def get_dataloader(data_folder, model_name, data_name, size=\"default\"):\n training_set = None\n validation_set = None\n\n if model_name == \"Howe_Patterson\":\n if data_name == \"combined\":\n partition = []\n for data_fold in data_folder:\n partition.append(load_obj(os.path.join(data_fold, 'data_partition.pkl')))\n\n elif size == \"small\":\n partition = load_obj(os.path.join(data_folder, 'data_partition_small.pkl'))\n elif size == \"tiny\":\n partition = load_obj(os.path.join(data_folder, 'data_partition_tiny.pkl'))\n else:\n partition = load_obj(os.path.join(data_folder, 'data_partition.pkl'))\n\n\n if data_name == \"SHHS\":\n training_set = Dataset_full_SHHS(partition['train'], data_folder)\n validation_set = Dataset_full_SHHS(partition['validation'], data_folder)\n elif data_name == \"snooze\":\n training_set = Dataset_full(partition['train'], data_folder)\n validation_set = Dataset_full(partition['validation'], data_folder)\n elif data_name == \"philips\":\n training_set = Dataset_Philips_full(partition['train'], data_folder)\n validation_set = Dataset_Philips_full(partition['validation'], data_folder)\n elif data_name == \"HMC\":\n print(\"{} not implemented data\".format(data_name))\n exit()\n elif data_name == \"combined\":\n training_set = ConcatDataset(\n Dataset_full(partition[0]['train'], data_folder[0], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False),\n Dataset_full_SHHS(partition[1]['train'], data_folder[1], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False))\n validation_set = ConcatDataset(\n Dataset_full(partition[0]['validation'], data_folder[0], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False),\n Dataset_full_SHHS(partition[1]['validation'], data_folder[1], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False))\n else:\n print(\"{} wrong data for dataloader\".format(data_name))\n exit()\n elif model_name == \"Deep_Sleep\":\n if data_name == \"combined\":\n partition = []\n for data_fold in data_folder:\n partition.append(load_obj(os.path.join(data_fold, 'data_partition.pkl')))\n\n elif size == \"small\":\n partition = load_obj(os.path.join(data_folder, 'data_partition_small.pkl'))\n elif size == \"tiny\":\n partition = load_obj(os.path.join(data_folder, 'data_partition_tiny.pkl'))\n else:\n partition = load_obj(os.path.join(data_folder, 'data_partition.pkl'))\n\n if data_name == \"SHHS\":\n training_set = Dataset_full_SHHS(partition['train'], data_folder, downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False)\n validation_set = Dataset_full_SHHS(partition['validation'], data_folder, downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False)\n elif data_name == \"snooze\":\n training_set = Dataset_full(partition['train'], data_folder, downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False)\n validation_set = Dataset_full(partition['validation'], data_folder, downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False)\n elif data_name == \"philips\":\n training_set = Dataset_Philips_full(partition['train'], data_folder, downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False)\n validation_set = Dataset_Philips_full(partition['validation'], data_folder, downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False)\n elif data_name == \"HMC\":\n print(\"{} not implemented data\".format(data_name))\n exit()\n elif data_name == \"combined\":\n # TODO combined dataset https://discuss.pytorch.org/t/train-simultaneously-on-two-datasets/649/17\n training_set = ConcatDataset(\n Dataset_full(partition[0]['train'], data_folder[0], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False),\n Dataset_full_SHHS(partition[1]['train'], data_folder[1], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False))\n validation_set = ConcatDataset(\n Dataset_full(partition[0]['validation'], data_folder[0], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False),\n Dataset_full_SHHS(partition[1]['validation'], data_folder[1], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False))\n else:\n print(\"{} wrong data for dataloader\".format(data_name))\n exit()\n elif model_name == \"ConvNet_IID\":\n if data_name == \"combined\":\n partition = []\n for data_fold in data_folder:\n partition.append(load_obj(os.path.join(data_fold, 'data_partition_IID_windows.pkl')))\n else:\n partition = load_obj(os.path.join(data_folder, 'data_partition_IID_windows.pkl'))\n if data_name == \"SHHS\":\n training_set = Dataset_IID_window_SHHS(partition['train'], data_folder)\n validation_set = Dataset_IID_window_SHHS(partition['validation'], data_folder)\n elif data_name == \"snooze\":\n training_set = Dataset_IID_window(partition['train'], data_folder)\n validation_set = Dataset_IID_window(partition['validation'], data_folder)\n elif data_name == \"philips\":\n print(\"{} not implemented data\".format(data_name))\n exit()\n elif data_name == \"HMC\":\n print(\"{} not implemented data\".format(data_name))\n exit()\n elif data_name == \"combined\":\n training_set = ConcatDataset(\n Dataset_IID_window(partition[0]['train'], data_folder[0]),\n Dataset_IID_window_SHHS(partition[1]['train'], data_folder[1]))\n validation_set = ConcatDataset(\n Dataset_IID_window(partition[0]['validation'], data_folder[0]),\n Dataset_IID_window_SHHS(partition[1]['validation'], data_folder[1]))\n else:\n print(\"{} wrong data for dataloader\".format(data_name))\n exit()\n\n else:\n print(\"{} wrong model for dataloader\".format(model_name))\n exit()\n\n return training_set, validation_set", "def test_dataset(self):\n self.assertIsInstance(self.dataset, LazyDataset)\n\n # Not loaded\n self.assertIsNone(self.dataset._training)\n self.assertIsNone(self.dataset._testing)\n self.assertIsNone(self.dataset._validation)\n self.assertFalse(self.dataset._loaded)\n self.assertFalse(self.dataset._loaded_validation)\n\n # Load\n try:\n self.dataset._load()\n except (EOFError, IOError):\n self.skipTest('Problem with connection. Try this test again later.')\n\n self.assertIsInstance(self.dataset.training, TriplesFactory)\n self.assertIsInstance(self.dataset.testing, TriplesFactory)\n self.assertTrue(self.dataset._loaded)\n\n if self.autoloaded_validation:\n self.assertTrue(self.dataset._loaded_validation)\n else:\n self.assertFalse(self.dataset._loaded_validation)\n self.dataset._load_validation()\n\n self.assertIsInstance(self.dataset.validation, TriplesFactory)\n\n self.assertIsNotNone(self.dataset._training)\n self.assertIsNotNone(self.dataset._testing)\n self.assertIsNotNone(self.dataset._validation)\n self.assertTrue(self.dataset._loaded)\n self.assertTrue(self.dataset._loaded_validation)\n\n self.assertEqual(self.dataset.num_entities, self.exp_num_entities)\n self.assertEqual(self.dataset.num_relations, self.exp_num_relations)\n\n num_triples = sum(\n triples_factory.num_triples for\n triples_factory in (self.dataset._training, self.dataset._testing, self.dataset._validation)\n )\n if self.exp_num_triples_tolerance is None:\n self.assertEqual(self.exp_num_triples, num_triples)\n else:\n self.assertAlmostEqual(self.exp_num_triples, num_triples, delta=self.exp_num_triples_tolerance)\n\n # Test caching\n start = timeit.default_timer()\n _ = self.dataset.training\n end = timeit.default_timer()\n # assert (end - start) < 1.0e-02\n self.assertAlmostEqual(start, end, delta=1.0e-02, msg='Caching should have made this operation fast')", "def init_loaders(self, *args, **kwargs):\n\n # Convert the data to Dataset\n dataset_dict = self.init_datasets(*args, **kwargs)\n\n # If the Dataset implements collate_fn, that is used. Otherwise, default_collate is used\n if hasattr(dataset_dict[\"train\"], \"collate_fn\") and callable(\n getattr(dataset_dict[\"train\"], \"collate_fn\")\n ):\n collate_fn = dataset_dict[\"train\"].collate_fn\n else:\n collate_fn = default_collate\n\n # If 'iters_per_epoch' is defined, then a fixed number of random sample batches from the training set\n # are drawn per epoch.\n # Otherwise, an epoch is defined by a full run through all of the data in the dataloader.\n #\n if self.config_dict.get(\"iters_per_epoch\") is not None:\n num_samples = (\n self.config_dict[\"iters_per_epoch\"] * self.config_dict[\"batch_size\"]\n )\n loaders_dict = {}\n for key in dataset_dict.keys():\n if key == \"train\":\n loaders_dict[key] = DataLoader(\n dataset_dict[key],\n batch_sampler=BatchSampler(\n RandomSampler(\n dataset_dict[key],\n replacement=True,\n num_samples=num_samples,\n ),\n batch_size=self.config_dict[\"batch_size\"],\n drop_last=False,\n ),\n collate_fn=collate_fn,\n )\n else:\n loaders_dict[key] = DataLoader(\n dataset_dict[key],\n batch_size=self.config_dict[\"batch_size\"],\n collate_fn=collate_fn,\n )\n else:\n loaders_dict = {\n key: DataLoader(\n dataset_dict[key],\n batch_size=self.config_dict[\"batch_size\"],\n collate_fn=collate_fn,\n )\n for key in data_dict.keys()\n }\n\n return loaders_dict", "def create_dataset(opt):\n\tdata_loader = CustomDatasetDataLoader(opt)\n\tdataset = data_loader.load_data()\n\treturn dataset", "def make_loader(dataset, train_batch_size, validation_split=0.2):\n # number of samples in train and test set\n train_len = int(len(dataset) * (1 - validation_split))\n test_len = len(dataset) - train_len\n train_set, test_set = torch.utils.data.random_split(dataset, [train_len, test_len])\n # create train_loader\n train_loader = torch.utils.data.DataLoader(\n train_set, batch_size=train_batch_size, shuffle=True,\n )\n # create test_loader\n test_loader = torch.utils.data.DataLoader(test_set, batch_size=1, shuffle=False,)\n return train_loader, test_loader", "def train_online(\n self,\n dataset: Union[str, dict, pd.DataFrame],\n training_set_metadata: Union[str, dict] = None,\n data_format: str = \"auto\",\n random_seed: int = default_random_seed,\n ) -> None:\n training_set_metadata = training_set_metadata or self.training_set_metadata\n preprocessing_params = get_preprocessing_params(self.config_obj)\n\n with provision_preprocessing_workers(self.backend):\n # TODO (Connor): Refactor to use self.config_obj\n training_dataset, _, _, training_set_metadata = preprocess_for_training(\n self.config_obj.to_dict(),\n training_set=dataset,\n training_set_metadata=training_set_metadata,\n data_format=data_format,\n skip_save_processed_input=True,\n preprocessing_params=preprocessing_params,\n backend=self.backend,\n random_seed=random_seed,\n callbacks=self.callbacks,\n )\n\n if not self.training_set_metadata:\n self.training_set_metadata = training_set_metadata\n\n if not self.model:\n update_config_with_metadata(self.config_obj, training_set_metadata)\n self.model = LudwigModel.create_model(self.config_obj, random_seed=random_seed)\n # update config with properties determined during model instantiation\n update_config_with_model(self.config_obj, self.model)\n set_saved_weights_in_checkpoint_flag(self.config_obj)\n\n if not self._online_trainer:\n self._online_trainer = self.backend.create_trainer(\n config=self.config_obj.trainer, model=self.model, random_seed=random_seed\n )\n\n self._tune_batch_size(self._online_trainer, dataset, random_seed=random_seed)\n\n self.model = self._online_trainer.train_online(training_dataset)", "def setUpTestData(cls):\n call_command('loaddata', 'db.json', verbosity=0)", "def train(train_dataset: torch.utils.data.Dataset, test_dataset: torch.utils.data.Dataset,\n training_config: dict = train_config, global_config: dict = global_config):\n\n for path in global_config.values():\n create_dirs(path)\n\n # wrap datasets with Dataloader classes\n train_loader = torch.utils.data.DataLoader(train_dataset,\n **training_config[\"DATA_LOADER_CONFIG\"])\n test_loader = torch.utils.data.DataLoader(test_dataset,\n **training_config[\"DATA_LOADER_CONFIG\"])\n\n # model name & paths\n name = \"_\".join([train_config[\"DATE\"], train_config[\"SESSION_NAME\"]])\n modelpath = os.path.join(global_config[\"WEIGHT_DIR\"], name)\n\n # instantiate model\n model = training_config[\"MODEL\"](**training_config[\"MODEL_CONFIG\"])\n\n optimizer = training_config[\"OPTIMIZER\"](model.parameters(),\n **training_config[\"OPTIMIZER_CONFIG\"])\n\n # set up ignite engine\n training_config[\"METRICS\"].update({\"loss\" : Loss(training_config[\"LOSS\"])})\n trainer = create_supervised_trainer(model=model, optimizer=optimizer,\n loss_fn=training_config[\"LOSS\"],\n device=training_config[\"DEVICE\"])\n evaluator = create_supervised_evaluator(model,\n metrics=training_config[\"METRICS\"],\n device=training_config[\"DEVICE\"])\n\n\n # tensorboardX setup\n log_dir = os.path.join(global_config[\"LOG_DIR\"], \"tensorboardx\", name)\n create_dirs(log_dir)\n writer = SummaryWriter(logdir=log_dir)\n\n # log using the logging tool\n logger = log.Log(training_config, run_name=train_config['SESSION_NAME'])\n\n @trainer.on(Events.ITERATION_COMPLETED)\n def log_training(engine):\n iteration = (engine.state.iteration - 1) % len(train_loader) + 1\n writer.add_scalar(\"training/loss\", engine.state.output, engine.state.iteration)\n if iteration % 4 == 0:\n print(\"\\repoch[{}] iteration[{}/{}] loss: {:.2f} \".format(engine.state.epoch,\n iteration, len(train_loader),\n engine.state.output), end=\"\")\n\n # generic evaluation function\n def evaluate(engine, loader):\n evaluator.run(loader)\n metrics = evaluator.state.metrics\n return metrics\n\n # training data metrics\n @trainer.on(Events.EPOCH_COMPLETED)\n def log_training_results(engine):\n print(\"\\ntraining results - epoch {}\".format(engine.state.epoch))\n metrics = evaluate(engine, train_loader)\n print(metrics)\n for key, value in metrics.items():\n logger.log_metric(key, value)\n writer.add_scalar(\"training/avg_{}\".format(key), value, engine.state.epoch)\n\n # test data metrics\n @trainer.on(Events.EPOCH_COMPLETED)\n def log_validation_results(engine):\n print(\"test results - epoch {}\".format(engine.state.epoch))\n metrics = evaluate(engine, test_loader)\n print(metrics)\n for key, value in metrics.items():\n writer.add_scalar(\"validation/avg_{}\".format(key), value, engine.state.epoch)\n\n # model checkpointing\n @trainer.on(Events.EPOCH_COMPLETED)\n def model_checkpoint(engine):\n torch.save(model.state_dict(), modelpath + \".pth\")\n print(\"Checkpoint saved to {}\".format(modelpath + \".pth\"))\n\n # training iteration\n try:\n trainer.run(train_loader, max_epochs=training_config[\"EPOCHS\"])\n except KeyboardInterrupt:\n torch.save(model.state_dict(), modelpath + \".pth\")\n print(\"Model saved to {}\".format(modelpath + \".pth\"))\n raise KeyboardInterrupt\n\n # write weights\n torch.save(model.state_dict(), modelpath + \".pth\")\n\n # write csv log file\n log_content = training_config.copy()\n evaluator.run(test_loader)\n log_content[\"VAL_METRICS\"] = evaluator.state.metrics\n log_path = os.path.join(global_config[\"LOG_DIR\"], training_config[\"LOGFILE\"])\n write_log(log_path, log_content)\n\n logger.end_run()\n \n return evaluator.state.metrics[\"training/avg_loss\"]", "def __init__(self, dataset, batch_size, n_threads=4,\n\t ten_crop=False, data_path='/home/dataset/', logger=None):\n\t\tself.dataset = dataset\n\t\tself.batch_size = batch_size\n\t\tself.n_threads = n_threads\n\t\tself.ten_crop = ten_crop\n\t\tself.data_path = data_path\n\t\tself.logger = logger\n\t\tself.dataset_root = data_path\n\t\t\n\t\tself.logger.info(\"|===>Creating data loader for \" + self.dataset)\n\t\t\n\t\tif self.dataset in [\"cifar100\"]:\n\t\t\tself.train_loader, self.test_loader = self.cifar(\n\t\t\t\tdataset=self.dataset)\n\n\t\telif self.dataset in [\"cifar10\"]:\n\t\t\tself.train_loader, self.test_loader = self.cifar(\n dataset=self.dataset)\n\t\t\n\t\telif self.dataset in [\"imagenet\"]:\n\t\t\tself.train_loader, self.test_loader = self.imagenet(\n\t\t\t\tdataset=self.dataset)\n\t\telse:\n\t\t\tassert False, \"invalid data set\"", "def from_datasets(\n cls,\n train_dataset: Optional[Union[Dataset, Sequence[Dataset], Mapping[str, Dataset]]] = None,\n val_dataset: Optional[Union[Dataset, Sequence[Dataset]]] = None,\n test_dataset: Optional[Union[Dataset, Sequence[Dataset]]] = None,\n batch_size: int = 1,\n num_workers: int = 0,\n ):\n\n def dataloader(ds: Dataset, shuffle: bool = False) -> DataLoader:\n shuffle &= not isinstance(ds, IterableDataset)\n return DataLoader(ds, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers, pin_memory=True)\n\n def train_dataloader():\n if isinstance(train_dataset, Mapping):\n return {key: dataloader(ds, shuffle=True) for key, ds in train_dataset.items()}\n if isinstance(train_dataset, Sequence):\n return [dataloader(ds, shuffle=True) for ds in train_dataset]\n return dataloader(train_dataset, shuffle=True)\n\n def val_dataloader():\n if isinstance(val_dataset, Sequence):\n return [dataloader(ds) for ds in val_dataset]\n return dataloader(val_dataset)\n\n def test_dataloader():\n if isinstance(test_dataset, Sequence):\n return [dataloader(ds) for ds in test_dataset]\n return dataloader(test_dataset)\n\n datamodule = cls()\n if train_dataset is not None:\n datamodule.train_dataloader = train_dataloader\n if val_dataset is not None:\n datamodule.val_dataloader = val_dataloader\n if test_dataset is not None:\n datamodule.test_dataloader = test_dataloader\n return datamodule", "def train(\n self, training_data: Dataset, validation_data: Optional[Dataset] = None\n ) -> Predictor:\n raise NotImplementedError", "def train_dataloader(self) -> DataLoader:\n return DataLoader(\n self.train_dataset,\n batch_size=self.hparams.batch_size,\n shuffle=True,\n num_workers=multiprocessing.cpu_count(),\n )", "def get_dataloaders(args):\n if args.dataset == 'heat':\n dataset_class = heat.HeatDiffusionDataset\n else:\n raise ValueError(f'Unknown dataset {args.dataset}')\n train_dataset = dataset_class(\n dataset_class.get_train_path(args.data_path), args, train=True)\n if args.dist:\n train_sampler = torch.utils.data.distributed.DistributedSampler(\n train_dataset)\n else:\n train_sampler = torch.utils.data.RandomSampler(train_dataset)\n train_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=args.batch_size, num_workers=args.workers,\n sampler=train_sampler, pin_memory=True, drop_last=args.drop_last)\n if not args.no_eval:\n validation_dataset = dataset_class(\n dataset_class.get_validation_path(args.data_path), args, train=False)\n if args.dist:\n validation_sampler = torch.utils.data.distributed.DistributedSampler(\n validation_dataset, shuffle=False)\n else:\n validation_sampler = torch.utils.data.SequentialSampler(\n validation_dataset)\n validation_loader = torch.utils.data.DataLoader(\n validation_dataset, batch_size=args.batch_size,\n num_workers=args.workers, sampler=validation_sampler,\n pin_memory=True, drop_last=args.drop_last)\n\n test_dataset = dataset_class(\n dataset_class.get_test_path(args.data_path), args, train=False)\n if args.dist:\n test_sampler = torch.utils.data.distributed.DistributedSampler(\n test_dataset, shuffle=False)\n else:\n test_sampler = torch.utils.data.SequentialSampler(\n test_dataset)\n test_loader = torch.utils.data.DataLoader(\n test_dataset, batch_size=args.batch_size,\n num_workers=args.workers, sampler=test_sampler,\n pin_memory=True, drop_last=args.drop_last)\n else:\n validation_loader = None\n test_loader = None\n\n # Update the data shape if needed.\n if args.data_shape is None:\n args.data_shape = train_dataset.get_shape()\n if args.data_target_shape is None:\n args.data_target_shape = train_dataset.get_target_shape()\n\n return train_loader, validation_loader, test_loader", "def prepare_data(self):\n # Set up the path\n self.path_target_train = os.path.join(self.data_dir, self.train_path_file_target + \".pkl\")\n self.path_target_test = os.path.join(self.data_dir, self.test_path_file_target + \".pkl\")\n\n if not os.path.exists(self.path_target_train) or not os.path.exists(self.path_target_test):\n # Create vocabularies of the appropriate sizes.\n self.create_vocabulary(self.train_path_file)\n\n # Create token ids for the training data.\n input_train_path = self.train_path_file\n target_train_path = self.train_path_file_target\n train_input, train_input_length, train_labels = self.data_to_token_ids(input_train_path, target_train_path)\n\n # Create token ids for the validation data.\n input_test_path = self.test_path_file\n target_test_path = self.test_path_file_target\n test_input, test_input_length, _ = self.data_to_token_ids(input_test_path, target_test_path, train=False)\n\n # Collect data into a list\n training_data = [train_input, train_input_length, train_labels]\n test_data = [test_input, test_input_length]\n\n # Save all the data\n with open(self.path_target_train, 'wb') as f:\n pickle.dump(training_data,f)\n with open(self.path_target_test, 'wb') as f:\n pickle.dump(test_data, f)\n else:\n # Load data\n with open(self.path_target_train, 'rb') as f:\n training_data = pickle.load(f)\n with open(self.path_target_test, 'rb') as f:\n test_data = pickle.load(f)\n\n # Initialize vocabulary\n self.initialize_vocabulary()\n\n # Convert list into a numpy array - train data\n train_input = pd.DataFrame(training_data[0]).fillna(value=0).astype(int).values\n train_length_input = np.array(training_data[1], dtype=int)\n train_labels = np.array(training_data[2], dtype=int)\n\n # Convert list into a numpy array - test data\n test_input = pd.DataFrame(test_data[0]).fillna(value=0).astype(int).values\n test_length_input = pd.DataFrame(test_data[1]).fillna(value=0).astype(int).values\n\n # Printing maximum length\n print(\"Shape of the input training matrix {}\".format(str(train_input.shape)))\n print(\"Shape of the input test matrix {}\".format(str(test_input.shape)))\n\n # Copy the files\n self.copy_files()\n\n # Return output\n return train_input, train_length_input, train_labels, test_input, test_length_input", "def __init__(\n self,\n data_dir,\n which_set='train',\n which_year='09',\n fraction=1,\n use_plus_minus_feats=False,\n use_compressed_sensing=False,\n batch_size=100,\n max_num_batches=-1,\n shuffle_order=True,\n rng=None,\n data=None):\n expanded_data_dir = os.path.expanduser(data_dir)\n data_path = os.path.join(\n expanded_data_dir, 'assist{0}-{1}'.format(which_year, which_set))\n self._validate_inputs(which_set, which_year, data_path)\n self.which_set = which_set\n self.which_year = which_year\n self.data_dir = expanded_data_dir\n self.num_classes = 2\n self.fraction = fraction\n self.use_plus_minus_feats = use_plus_minus_feats\n self.use_compressed_sensing = use_compressed_sensing\n\n if data:\n inputs, targets, self.target_ids = data['inputs'], \\\n data['targets'], data['target_ids']\n self.max_num_ans, self.max_prob_set_id = data['max_num_ans'],\\\n data['max_prob_set_id']\n self.encoding_dim = data['encoding_dim']\n else:\n inputs, targets = self.load_data(data_path, use_plus_minus_feats)\n inputs, targets = self.reduce_data(inputs, targets, fraction)\n if use_compressed_sensing:\n inputs = self.apply_compressed_sensing(inputs, rng)\n # pass the loaded data to the parent class __init__\n super(ASSISTDataProvider, self).__init__(\n inputs, targets, batch_size, max_num_batches, shuffle_order, rng)", "def _prepare_data(self):\n #TODO hardcoded values need to change\n print_info(\"Preprocessing the train data...\")\n self._place_dataset(os.path.join(self._hparams[\"temp-data\"], \"train\"),\n self.TRAIN_OUT_PATH)\n\n print_info(\"Preprocessing the test data...\")\n self._place_dataset(os.path.join(self._hparams[\"temp-data\"], \"test\"),\n self.TEST_OUT_PATH)\n\n print_info(\"Preprocessing the validation data...\")\n self._place_dataset(os.path.join(self._hparams[\"temp-data\"], \"val\"),\n self.VAL_OUT_PATH)", "def train(self, trainingData, trainingLabels, testData, testLabels, validate): \n\t\t \n\t\tself.features = trainingData[0].keys() # this could be useful for your code later...\n\n\t\tif (self.automaticTuning):\n\t\t\tCgrid = [0.001, 0.002, 0.003, 0.004, 0.005]\n\t\telse:\n\t\t\tCgrid = [self.C]\n\t\t\t\n\t\treturn self.trainAndTune(trainingData, trainingLabels, testData, testLabels, Cgrid, validate)", "def __init__(\n self,\n train: pd.DataFrame,\n config: DictConfig,\n validation: pd.DataFrame = None,\n test: pd.DataFrame = None,\n target_transform: Optional[Union[TransformerMixin, Tuple]] = None,\n train_sampler: Optional[torch.utils.data.Sampler] = None,\n seed: Optional[int] = 42,\n ):\n super().__init__()\n self.train = train.copy()\n self.validation = validation\n self._set_target_transform(target_transform)\n self.test = test if test is None else test.copy()\n self.target = config.target\n self.batch_size = config.batch_size\n self.train_sampler = train_sampler\n self.config = config\n self.seed = seed\n self._fitted = False", "def setUpTestData(cls):\n data_gen.run()", "def setUpTestData(cls):\n data_gen.run()", "def setUpTestData(cls):\n data_gen.run()", "def setUpTestData(cls):\n data_gen.run()", "def create_loader(dataset: Dataset, cfg: trainer_configs.BaseDatasetConfig, batch_size: int, *,\r\n collate_fn: Optional[Callable[[List[Any]], Any]] = None) -> DataLoader:\r\n # return DataLoader(\r\n # dataset, batch_size=batch_size, num_workers=cfg.num_workers,\r\n # drop_last=cfg.drop_last, collate_fn=collate_fn) # type: ignore\r\n return DataLoader(\r\n dataset, batch_size=batch_size, shuffle=cfg.shuffle, num_workers=cfg.num_workers,\r\n drop_last=cfg.drop_last, collate_fn=collate_fn) # type: ignore\r", "def _init_dataset(self, data_config, split='train'):\n assert split in {'train', 'valid'}\n\n # load datasets\n print(f'Load {split} dataset')\n if data_config['type'] == 'npy':\n dataset = MSDMelDataset(\n data_config['mel_root'], data_config[f'{split}_tids_fn'],\n data_config['label_fn'], on_mem=data_config['on_mem'],\n ignore_intersection=data_config['ignore_label_intersection'],\n transform=ToVariable())\n\n elif data_config['type'] == 'hdf':\n dataset = MSDMelHDFDataset(\n data_config['hdf_fn'], data_config[f'{split}_tids_fn'],\n data_config['label_fn'],\n ignore_intersection=data_config['ignore_label_intersection'],\n transform=ToVariable())\n\n elif data_config['type'] == 'audio':\n dataset = MSDAudioDataset(\n data_config['audio_root'], data_config[f'{split}_tids_fn'],\n data_config['tid2path_fn'], data_config['label_fn'],\n ignore_intersection=data_config['ignore_label_intersection'],\n device='cpu',\n transform=ToVariable())\n\n return dataset", "def prepare_data_for_training(args):\n # Form the train/test splits and write them to disk\n dataset = data.Dataset(args)\n # get image classes and image counts in each class\n label_map = dataset.get_class_info()\n class_count = len(list(label_map.values()))\n # split the data and store it in log dir\n df_train, df_test = dataset.split_dataset()\n\n # perform dataset augmentations\n image_data = augment.Augmentation(args)\n # get the data gens for training and test images\n train_data_gen, _ = image_data.map_fn_train(df_train)\n test_data_gen, _ = image_data.map_fn_test(df_test)\n\n return train_data_gen, test_data_gen, df_train, df_test, class_count", "def __init__(self, **kwargs):\n DataLoader.__init__(self, **kwargs)", "def train(\n self, training_data: TrainingData, cfg: DazuConfig, **kwargs: Any\n ) -> None:", "def create_train_test(option, transform, params, split=0.2):\r\n clip_im_dir = option.clip_im_dir\r\n matting_dir = option.matting_dir\r\n csv_path = option.csv_path\r\n \r\n print(\"create datasets\")\r\n \r\n \r\n data_df = pd.read_csv(csv_path)\r\n # data_df = MergeDataframe(clip_im_dir, matting_dir)\r\n \r\n #separate data in training and test data (20/80)\r\n train_df, test_df = train_test_split(data_df, test_size=split)\r\n \r\n #search right Dataset class\r\n package_dir = Path(src.dataset.__file__).resolve().parent\r\n\r\n for (_, module_name, _) in iter_modules([package_dir]):\r\n # print(module_name, self.ComType)\r\n if option.dataset.lower() == module_name.lower() :\r\n modelModule = importlib.import_module(\".\"+module_name)\r\n break\r\n \r\n # train data\r\n training_set = modelModule(train_df, clip_im_dir, matting_dir, transform, transform)\r\n train_loader = DataLoader(training_set, **params)\r\n \r\n \r\n #test data\r\n testing_set = modelModule(test_df, clip_im_dir, matting_dir, transform, transform)\r\n test_loader = DataLoader(testing_set, **params)\r\n \r\n return train_loader, test_loader", "def set_data():\r\n #if not os.path.exists(filepath):\r\n #download_data()\r\n metadata = read(filepath + flist[-1])\r\n ndata = metadata['num_cases_per_batch']\r\n ndim = metadata['num_vis']\r\n\r\n data, train, test = {}, {}, {}\r\n data['labels'] = metadata['label_names']\r\n data['ntraindata'] = metadata['num_cases_per_batch'] * (len(flist) - 2)\r\n data['ntestdata'] = metadata['num_cases_per_batch']\r\n data['ndim'] = metadata['num_vis']\r\n\r\n train['x'], train['y'] = convert_train(data['ntraindata'], data['ndim'])\r\n\r\n testdata = read(filepath + flist[-2])\r\n test['x'] = testdata['data']\r\n test['y'] = testdata['labels']\r\n\r\n data['train'], data['test'] = train, test\r\n save_pkl(data)", "def create_dataset(opt):\n data_loader = CustomDatasetDataLoader(opt)\n dataset = data_loader.load_data()\n return dataset", "def train(self, training_data, training_labels, validation_data, validation_labels):\n abstract", "def creates_data_loader():\n dataset_faces = FaceDataset(\n IMG_DIR, transform=transform_train, face=True)\n\n dataset_no_faces = FaceDataset(\n IMG_DIR, transform=transform_train, face=False)\n\n datasets_faces_split = train_val_test(dataset_faces, 0.2, 0.0)\n datasets_no_faces_split = train_val_test(dataset_no_faces, 0.2, 0.0)\n\n datasets = {}\n datasets[\"train\"] = datasets_faces_split[\"train\"] + \\\n datasets_no_faces_split[\"train\"]\n datasets[\"test\"] = datasets_no_faces_split[\"test\"]\n datasets[\"val\"] = datasets_faces_split[\"val\"] + \\\n datasets_no_faces_split[\"val\"]\n\n train_loader = DataLoader(dataset=datasets[\"train\"], batch_size=BATCH_SIZE,\n num_workers=NUM_WORKERS, pin_memory=PIN_MEMORY, shuffle=True, drop_last=False)\n\n val_loader = DataLoader(dataset=datasets[\"val\"], batch_size=BATCH_SIZE,\n num_workers=NUM_WORKERS, pin_memory=PIN_MEMORY, shuffle=True, drop_last=False)\n return train_loader, val_loader", "def _create_data():\n tf.logging.info(\"Create records..\")\n train, val, test = util.load_data(data_dir, FLAGS[\"is_aug\"])\n tf.logging.info(\"Dataset size: Train-{} Test-{} Val-{}\".format(len(train), len(test), len(val)))\n return train, val, test", "def load_data(batch_size=batch_size):\n trainset = LibriSpeechDataset(training_set, int(LIBRISPEECH_SAMPLING_RATE * n_seconds))\n testset = LibriSpeechDataset(validation_set, int(LIBRISPEECH_SAMPLING_RATE * n_seconds), stochastic=False)\n\n train_loader = DataLoader(trainset, batch_size=batch_size, num_workers=1, shuffle=True, drop_last=True)\n test_loader = DataLoader(testset, batch_size=1, num_workers=1, drop_last=True)\n\n return train_loader, test_loader", "def construct_data(paths=DEFAULT_PATHS, use_saved=True):\n if not verify_paths(paths):\n raise FileNotFoundError('Some of the required data files could not be '\n 'found. Before running the project, run '\n '`setup.sh` to create/download them.')\n\n # Paths to save or load the constructed datasets from\n saved_train = os.path.join(paths['dir_output'], 'train.pk')\n saved_test = os.path.join(paths['dir_output'], 'test.pk')\n\n # Load the data if possible\n if (os.path.exists(saved_train) and os.path.exists(saved_test)\n and use_saved):\n print('Found existing saved dataset; loading it...')\n with open(saved_train, mode='rb') as train_file:\n train = pickle.load(train_file)\n with open(saved_test, mode='rb') as test_file:\n test = pickle.load(test_file)\n return train, test\n\n print('Constructing dataset...')\n\n # Read in the .csv files and create DataFrames for train, test observations\n depths = pd.read_csv(paths['df_depths'], index_col='id')\n train = pd.read_csv(paths['df_train'], index_col='id', usecols=[0])\n train = train.join(depths)\n test = depths[~depths.index.isin(train.index)].copy()\n\n # (Training images)\n print('Reading training images...')\n path = paths['dir_train_images'] + '{}.png'\n train['image'] = [read_image(path.format(img))\n for img in tqdm(train.index)]\n\n # (Training masks)\n print('Reading training masks...')\n path = paths['dir_train_masks'] + '{}.png'\n train['mask'] = [read_image(path.format(img)) for img in tqdm(train.index)]\n\n # (Testing images)\n print('Reading test images...')\n path = paths['dir_test_images'] + '{}.png'\n test['image'] = [read_image(path.format(img)) for img in tqdm(test.index)]\n\n # Calculate the coverage for the training images\n # Then, bin the images into discrete classes corresponding to coverage\n train['coverage'] = train['mask'].map(np.sum) / pow(101, 2)\n train['cov_class'] = train['coverage'].map(\n lambda cov: np.int(np.ceil(cov * 10)))\n\n # Write to file\n print('Saving the constructed dataset...')\n try:\n with open(saved_train, mode='wb') as train_file:\n pickle.dump(train, train_file)\n with open(saved_test, mode='wb') as test_file:\n pickle.dump(test, test_file)\n except OSError:\n print('Could not save the data due to an occasional Python bug on '\n 'some systems. :( If this is happening on macOS, try running on '\n 'Linux instead.')\n\n return train, test", "def make_standard_loader(self, dataset):\n return torch.utils.data.DataLoader(\n dataset,\n batch_size=self.batch_size,\n shuffle=False,\n drop_last=False,\n pin_memory=not (cfg.DEBUG > 0),\n num_workers=self.num_workers,\n )", "def _init_al_dataset(self):\n\n self._init_dataset()\n\n train_dataset = self.datasets['train']\n\n dataset_size = len(train_dataset)\n self.budget = math.ceil(self.budget_frac*dataset_size)\n Sampler.__init__(self, config, self.budget) # TODO: Weird place to initialise this\n\n all_indices = set(np.arange(dataset_size))\n k_initial = math.ceil(len(all_indices)*self.initial_budget_frac)\n initial_indices = random.sample(list(all_indices), k=k_initial)\n\n sampler_init = data.sampler.SubsetRandomSampler(initial_indices) # need to sample from training dataset\n\n self.labelled_dataloader = data.DataLoader(train_dataset, sampler=sampler_init, batch_size=self.batch_size, drop_last=True)\n self.val_dataloader = data.DataLoader(self.datasets['valid'], batch_size=self.batch_size, drop_last=False)\n self.test_dataloader = data.DataLoader(self.datasets['test'], batch_size=self.batch_size, drop_last=False)\n\n return all_indices, initial_indices", "def __init__(self, opt, validation=False):\n\n\n BaseDataset.__init__(self, opt)\n phase = opt.phase\n if validation:\n phase = 'validation'\n self.dir_A = os.path.join(opt.dataroot, phase + 'A') # create a path '/path/to/data/trainA'\n self.dir_B = os.path.join(opt.dataroot, phase + 'B') # create a path '/path/to/data/trainB'\n\n self.A_paths, self.B_paths = make_paired_dataset(self.dir_A, self.dir_B, opt.max_dataset_size)\n\n self.A_size = len(self.A_paths) # get the size of dataset A\n self.B_size = len(self.B_paths) # get the size of dataset B\n btoA = self.opt.direction == 'BtoA'\n self.input_nc = self.opt.output_nc if btoA else self.opt.input_nc # get the number of channels of input image\n self.output_nc = self.opt.input_nc if btoA else self.opt.output_nc # get the number of channels of output image\n\n if opt.phase == 'train':\n tf_terms = [\n RandomHorizontalFlip(),\n Scale(opt.load_size),\n Rotate(2.0),\n RandomCrop(opt.crop_size)\n ]\n elif opt.phase == 'test':\n tf_terms = [\n CenterCrop(),\n Scale(opt.load_size)\n ]\n\n self.transforms = Compose(tf_terms)", "def autogen_dataset_with_test():\n return TabularDataset.autogen('tests/data/dummy_tabular/train.csv',\n test_path='tests/data/dummy_tabular_test/test.csv',\n seed=42,\n sep=',')", "def get_test_loader(test_dataset,\n batch_size,\n num_workers=4,\n pin_memory=False):\n data_loader = torchutils.DataLoader(\n test_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers, pin_memory=pin_memory)\n return data_loader", "def prepare_dataset(data_path, test_size=0.2, validation_size=0.2):\r\n\r\n # load dataset\r\n if data_path.endswith('json'):\r\n X, y = load_data_from_json(data_path)\r\n else:\r\n X, y = load_data_from_fold(data_path)\r\n # create train, validation, test split\r\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size)\r\n X_train, X_validation, y_train, y_validation = train_test_split(X_train, y_train, test_size=validation_size)\r\n\r\n # add an axis to nd array\r\n X_train = X_train[..., np.newaxis]\r\n X_test = X_test[..., np.newaxis]\r\n X_validation = X_validation[..., np.newaxis]\r\n\r\n return X_train, y_train, X_validation, y_validation, X_test, y_test", "def create_loader(self):\n # load data to memory.\n if self.is_cifar100:\n (x_train, y_train), (x_test,\n y_test) = tf.keras.datasets.cifar100.load_data()\n else:\n (x_train, y_train), (x_test,\n y_test) = tf.keras.datasets.cifar10.load_data()\n\n y_train = y_train.astype(np.int32)\n y_test = y_test.astype(np.int32)\n\n x_train, y_train = shuffle_dataset(x_train, y_train)\n n_probe = int(math.floor(x_train.shape[0] * FLAGS.probe_dataset_hold_ratio))\n\n # TODO(zizhaoz): add other noise types.\n if 'asymmetric' in self.dataset_name:\n assert 'cifar100' not in self.dataset_name, 'Asymmetric only has CIFAR10'\n (x_train, y_train, y_gold), (x_probe, y_probe) = load_asymmetric(\n x_train,\n y_train,\n noise_ratio=self.noise_ratio,\n n_val=n_probe,\n random_seed=FLAGS.seed)\n elif 'uniform' in self.dataset_name:\n (x_train, y_train, y_gold), (x_probe,\n y_probe) = load_train_val_uniform_noise(\n x_train,\n y_train,\n n_classes=self.num_classes,\n noise_ratio=self.noise_ratio,\n n_val=n_probe)\n else:\n assert self.dataset_name in ['cifar10', 'cifar100']\n\n if not self.split_probe and x_probe is not None:\n # Usually used for supervised comparison.\n tf.logging.info('Merge train and probe')\n x_train = np.concatenate([x_train, x_probe], axis=0)\n y_train = np.concatenate([y_train, y_probe], axis=0)\n y_gold = np.concatenate([y_gold, y_probe], axis=0)\n\n conf_mat = sklearn_metrics.confusion_matrix(y_gold, y_train)\n conf_mat = conf_mat / np.sum(conf_mat, axis=1, keepdims=True)\n tf.logging.info('Corrupted confusion matirx\\n {}'.format(conf_mat))\n x_test, y_test = shuffle_dataset(x_test, y_test)\n self.train_dataset_size = x_train.shape[0]\n self.val_dataset_size = x_test.shape[0]\n if self.split_probe:\n self.probe_size = x_probe.shape[0]\n\n input_tuple = (x_train, y_train.squeeze())\n self.train_dataflow = self.create_ds(input_tuple, is_train=True)\n self.val_dataflow = self.create_ds((x_test, y_test.squeeze()),\n is_train=False)\n if self.split_probe:\n self.probe_dataflow = self.create_ds((x_probe, y_probe.squeeze()),\n is_train=True)\n\n tf.logging.info('Init [{}] dataset loader'.format(self.dataset_name))\n verbose_data('train', x_train, y_train)\n verbose_data('test', x_test, y_test)\n if self.split_probe:\n verbose_data('probe', x_probe, y_probe)\n\n return self", "def load_data(self):\n\n self._load_train_data()\n self._load_test_data()", "def __init__(self, data_dir: Path, config: Config):\n self.device = torch.device(\n \"cuda\" if torch.cuda.is_available() else \"cpu\"\n )\n\n training_path_list, ground_truth_path_list = get_file_paths(data_dir)\n\n X_train, X_test, y_train, y_test = self.train_test_split(\n training_path_list,\n ground_truth_path_list,\n test_portion=config.val_split,\n )\n\n train_dataset = TrainDataset(\n config, X_train, y_train, random_augmentation=True\n )\n val_dataset = TrainDataset(\n config, X_test, y_test, random_augmentation=False\n )\n\n self.train_loader = DataLoader(\n train_dataset,\n batch_size=config.train_batch_size,\n shuffle=True,\n pin_memory=True,\n )\n self.val_loader = DataLoader(\n val_dataset,\n batch_size=config.test_batch_size,\n # No shuffle as it won't make any difference\n pin_memory=True,\n )\n\n model = UNet(INPUT_CHANNELS, OUTPUT_CHANNELS, config)\n self.model = DataParallel(model).to(self.device)\n\n if config.loss == \"logit_bce\":\n loss_weight = (\n self._get_loss_weight() if config.balanced_loss else None\n )\n # Using logits directly is numerically more stable and efficient\n self.class_loss_fn = BCEWithLogitsLoss(pos_weight=loss_weight)\n elif config.loss == \"soft_dice\":\n self.class_loss_fn = soft_dice_loss\n\n self.texture_transform = get_texture_transform(config)\n self.shape_loss_fn = ContrastiveLoss(config.temperature)\n\n self.optim = Adam(\n self.model.parameters(),\n lr=config.learn_rate,\n weight_decay=config.weight_decay,\n )\n max_steps = config.epochs * len(self.train_loader)\n self.scheduler = OneCycleLR(\n self.optim,\n max_lr=config.max_learn_rate,\n total_steps=max_steps,\n )\n self.scaler = GradScaler(enabled=config.mixed_precision)\n\n # Used when dumping hyper-params to a file\n self.config = config\n\n # To store best acc achieved so far\n self.best_acc = 0.0", "def __init__(self, **kwargs):\n is_training = kwargs.get('is_training', True)\n rootfolder = kwargs['rootfolder']\n dtype = kwargs.get('dtype', np.float64)\n self._load_mnist(rootfolder, is_training, dtype)\n # normalize data.\n self._data /= 255.\n ndarraydata.NdarrayDataLayer.__init__(\n self, sources=[self._data, self._label], **kwargs)", "def define_training_data(self, train_sources, train_labels=None):\n logging.info(\"Defining training data for NNetModel...\")\n self.train_cols = []\n if train_labels is None:\n for source in train_sources:\n self.train_cols += self._read(source)\n else:\n for source, label in zip(train_sources, train_labels):\n self.train_cols += self._read(source, label)\n\n logging.info(\"NNetModel: Training data contains {} columns from {} sources\".format(len(self.train_cols), len(train_sources)))", "def init_data_generator(config_tuple, data_dir):\n\n (_preprocess_function, flags) = config_tuple\n rescale = 1. / 255 if _preprocess_function is None else None\n image_sizes = (flags.image_width, flags.image_height)\n batch_size = flags.batch_size\n # Configure test generator\n train_datagen = ImageDataGenerator(\n preprocessing_function=_preprocess_function,\n rescale=rescale,\n rotation_range=30,\n width_shift_range=0.2,\n height_shift_range=0.2,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True\n )\n # Configure test data flow\n train_generator = train_datagen.flow_from_directory(\n data_dir,\n target_size=image_sizes,\n batch_size=batch_size,\n )\n\n return train_generator", "def setUp(self):\n self.dataset = self.dataset_cls()", "def _train_model(\n self,\n dataset: DatasetEntity,\n ):\n logger.info(\"init data cfg.\")\n self._data_cfg = ConfigDict(data=ConfigDict())\n\n for cfg_key, subset in zip(\n [\"train\", \"val\", \"unlabeled\"],\n [Subset.TRAINING, Subset.VALIDATION, Subset.UNLABELED],\n ):\n subset = get_dataset(dataset, subset)\n if subset and self._data_cfg is not None:\n self._data_cfg.data[cfg_key] = ConfigDict(\n otx_dataset=subset,\n labels=self._labels,\n )\n\n self._is_training = True\n\n self._init_task()\n\n cfg = self.configure(True, None)\n logger.info(\"train!\")\n\n timestamp = time.strftime(\"%Y%m%d_%H%M%S\", time.localtime())\n\n # Environment\n logger.info(f\"cfg.gpu_ids = {cfg.gpu_ids}, distributed = {cfg.distributed}\")\n env_info_dict = collect_env()\n env_info = \"\\n\".join([(f\"{k}: {v}\") for k, v in env_info_dict.items()])\n dash_line = \"-\" * 60 + \"\\n\"\n logger.info(f\"Environment info:\\n{dash_line}{env_info}\\n{dash_line}\")\n\n # Data\n datasets = [build_dataset(cfg.data.train)]\n\n if self._train_type == TrainType.Semisupervised:\n # forward the knowledge of num iters per epoch to model for filter loss\n bs_per_gpu = cfg.data.train_dataloader[\"samples_per_gpu\"]\n actual_bs = bs_per_gpu * torch.distributed.get_world_size() if cfg.distributed else bs_per_gpu\n cfg.model.num_iters_per_epoch = math.ceil(len(datasets[0]) / actual_bs)\n\n # FIXME: Currently segmentor does not support multi batch evaluation.\n # For the Self-SL case, there is no val data. So, need to check the\n\n if \"val\" in cfg.data and \"val_dataloader\" in cfg.data:\n cfg.data.val_dataloader[\"samples_per_gpu\"] = 1\n\n # Target classes\n if \"task_adapt\" in cfg:\n target_classes = cfg.task_adapt.final\n else:\n target_classes = datasets[0].CLASSES\n\n # Metadata\n meta = dict()\n meta[\"env_info\"] = env_info\n meta[\"seed\"] = cfg.seed\n meta[\"exp_name\"] = cfg.work_dir\n if cfg.checkpoint_config is not None:\n cfg.checkpoint_config.meta = dict(\n mmseg_version=__version__ + get_git_hash()[:7],\n CLASSES=target_classes,\n )\n\n # Model\n model = self.build_model(cfg, fp16=cfg.get(\"fp16\", False), is_training=self._is_training)\n model.train()\n model.CLASSES = target_classes\n\n if cfg.distributed:\n convert_sync_batchnorm(model)\n\n validate = bool(cfg.data.get(\"val\", None))\n\n if self._hyperparams.learning_parameters.auto_adapt_batch_size != BatchSizeAdaptType.NONE:\n train_func = partial(train_segmentor, meta=deepcopy(meta), model=deepcopy(model), distributed=False)\n adapt_batch_size(\n train_func,\n cfg,\n datasets,\n isinstance(self, NNCFBaseTask), # nncf needs eval hooks\n not_increase=(self._hyperparams.learning_parameters.auto_adapt_batch_size == BatchSizeAdaptType.SAFE),\n )\n\n train_segmentor(\n model,\n datasets,\n cfg,\n distributed=cfg.distributed,\n validate=validate,\n timestamp=timestamp,\n meta=meta,\n )\n\n # Save outputs\n output_ckpt_path = os.path.join(cfg.work_dir, \"latest.pth\")\n best_ckpt_path = glob.glob(os.path.join(cfg.work_dir, \"best_mDice_*.pth\"))\n if len(best_ckpt_path) > 0:\n output_ckpt_path = best_ckpt_path[0]\n best_ckpt_path = glob.glob(os.path.join(cfg.work_dir, \"best_mIoU_*.pth\"))\n if len(best_ckpt_path) > 0:\n output_ckpt_path = best_ckpt_path[0]\n return dict(\n final_ckpt=output_ckpt_path,\n )", "def setup(self, stage: Optional[str] = None) -> None:\n if self.dataset_type == \"hdf5\":\n DS = SegmentationHDF5Dataset\n else:\n DS = SegmentationFolderDataset\n\n self.trainset = DS(\n path=self._get_path(\"train\", self.dataset_type, is_mask=False),\n mask_path=self._get_path(\"train\", self.dataset_type, is_mask=True),\n img_transforms=self.img_transforms,\n inst_transforms=self.inst_transforms,\n return_sem=False,\n normalization=self.normalization,\n **self.kwargs,\n )\n\n self.validset = DS(\n path=self._get_path(\"valid\", self.dataset_type, is_mask=False),\n mask_path=self._get_path(\"valid\", self.dataset_type, is_mask=True),\n img_transforms=self.img_transforms,\n inst_transforms=self.inst_transforms,\n return_sem=False,\n normalization=self.normalization,\n **self.kwargs,\n )\n\n self.testset = DS(\n path=self._get_path(\"test\", self.dataset_type, is_mask=False),\n mask_path=self._get_path(\"test\", self.dataset_type, is_mask=True),\n img_transforms=self.img_transforms,\n inst_transforms=self.inst_transforms,\n return_sem=False,\n normalization=self.normalization,\n **self.kwargs,\n )", "def train_test_loaders(dataset, validation_ratio=0.2, **kwargs):\n dataset_size = len(dataset)\n test_size = int(np.floor(validation_ratio * dataset_size))\n train_size = dataset_size - test_size\n print('TRAIN SIZE {}'.format(train_size))\n print('TEST SIZE {}'.format(test_size))\n train_dataset, test_dataset = random_split(dataset, (train_size, test_size),\n generator=torch.Generator().manual_seed(RANDOM_SEED))\n train_loader = torch.utils.data.DataLoader(train_dataset, **kwargs)\n test_loader = torch.utils.data.DataLoader(test_dataset, **kwargs)\n return train_loader, test_loader", "def get_test_dataset_DataLoader(self):\n test_info = self.get_test_DataLoader_info()\n name = test_info[\"name\"]\n task = test_info[\"task\"]\n data_dir = test_info[\"data_dir\"]\n hdf5_file = test_info[\"hdf5_file\"]\n\n data_loader = DataLoader(name, task, data_dir, hdf5_file)\n\n return data_loader, self.dataset, self.data_fields", "def _setup_training(self, params, **kwargs):\n model_params = params.permute_training_on_top().model\n\n model_kwargs = {**model_params.fixed, **model_params.variable}\n\n model = self.model_cls(**model_kwargs)\n\n training_params = params.permute_training_on_top().training\n losses = training_params.nested_get(\"losses\")\n optimizer_cls = training_params.nested_get(\"optimizer_cls\")\n optimizer_params = training_params.nested_get(\"optimizer_params\")\n train_metrics = training_params.nested_get(\"train_metrics\", {})\n lr_scheduler_cls = training_params.nested_get(\"lr_sched_cls\", None)\n lr_scheduler_params = training_params.nested_get(\"lr_sched_params\",\n {})\n val_metrics = training_params.nested_get(\"val_metrics\", {})\n\n # necessary for resuming training from a given path\n save_path = kwargs.pop(\"save_path\", os.path.join(\n self.save_path,\n \"checkpoints\",\n \"run_%02d\" % self._run))\n\n return self.trainer_cls(\n network=model,\n save_path=save_path,\n losses=losses,\n key_mapping=self.key_mapping,\n optimizer_cls=optimizer_cls,\n optimizer_params=optimizer_params,\n train_metrics=train_metrics,\n val_metrics=val_metrics,\n lr_scheduler_cls=lr_scheduler_cls,\n lr_scheduler_params=lr_scheduler_params,\n optim_fn=self._optim_builder,\n save_freq=self.checkpoint_freq,\n **kwargs\n )", "def setUpClass(cls):\n ### necessary since one instance per test case is created and pid and testparams\n ### need to be shared between instances\n cls.data_provider = DBSDataProvider()", "def load_data(self, training_data):\n \"\"\"training data format [(instance, label),(instance, label),...]\"\"\"\n self.training_data = training_data", "def setUpTestData(cls):\n cls.board = Board.objects.create(name = DICT.get('board_name') )\n\n cls.task = Task.objects.create(head = DICT.get('task_head'),\n description = DICT.get('task_description'),\n board = cls.board )", "def train_dataloader(self):\r\n\r\n # transformation\r\n train_transform = Compose(\r\n [\r\n ApplyTransformToKey(\r\n key='video',\r\n transform=Compose(\r\n [\r\n UniformTemporalSubsample(8),\r\n Lambda(lambda x: x / 255.0),\r\n Normalize((0.45, 0.45, 0.45), (0.225, 0.225, 0.225)),\r\n RandomShortSideScale(min_size=256, max_size=320),\r\n RandomCrop(244),\r\n RandomHorizontalFlip(p=0.5),\r\n ]\r\n )\r\n )\r\n ]\r\n )\r\n\r\n train_dataset = pv.data.Kinetics(\r\n data_path=os.path.join(self._DATA_PATH, \"train\"),\r\n clip_sampler=pv.data.make_clip_sampler(\"random\", self._CLIP_DURATION),\r\n decode_audio=False,\r\n transform=train_transform\r\n )\r\n return torch.utils.data.DataLoader(\r\n train_dataset,\r\n batch_size=self._BATCH_SIZE,\r\n num_workers=self._NUM_WORKERS,\r\n )", "def load_dataset():\n\n\n train_dd_loader = DailyDialogLoader(PATH_TO_TRAIN_DATA, load=False)\n train_dataloader = DataLoader(train_dd_loader, batch_size=16, shuffle=True, num_workers=0,\n collate_fn=PadCollate())\n\n test_dd_loader = DailyDialogLoader(PATH_TO_TEST_DATA, load=True)\n test_dataloader = DataLoader(test_dd_loader, batch_size=1, shuffle=False, num_workers=0,\n collate_fn=PadCollate())\n\n assert train_dd_loader.vocabulary.n_words == test_dd_loader.vocabulary.n_words\n\n return train_dd_loader, train_dataloader, test_dataloader", "def register_data(self, *, train_loader=None, validation_loader=None):\n raise NotImplementedError()", "def __init__(self):\n print ('Initializing Data reader object...')\n data_Train_Images, data_Train_Labels, data_Test_Image, data_Test_Labels = self.readDataFromFile()\n test_10k_x, test_10k_y, training_55k_x, training_55k_y, validation_5k_x, validation_5k_y = self.dataTransform(\n data_Test_Image, data_Test_Labels, data_Train_Images, data_Train_Labels)\n self.train = zip(training_55k_x, training_55k_y)\n self.valid = zip(validation_5k_x, validation_5k_y)\n self.test = zip(test_10k_x, test_10k_y)\n\n self.train_position = 0\n print ('Initialized!')", "def load_dataset(self):\n # Get all the files in the directory\n file_list = self.get_file_list()\n\n # Concatenate the data corresponding to a list of files\n data = self.concatenate_file_data(file_list)\n\n # Shuffle the data and create the training and the validation datasets\n data = self.shuffle_data_dictionary(data)\n self.training_dataset, self.validation_dataset = self.split_data_into_training_and_validation(data)", "def create_dataset(self, config, rng):\n raise NotImplementedError()", "def prepare_dataset(self, dataset_type: str) -> Dataset:\n\n logger.info(\"Creating features from dataset file at %s\", self.hparams.data_dir)\n\n if dataset_type == \"train\":\n dataset = self.processor.get_train_dataset(self.hparams.data_dir, self.hparams.train_file_name)\n elif dataset_type == \"dev\":\n dataset = self.processor.get_dev_dataset(self.hparams.data_dir, self.hparams.dev_file_name)\n elif dataset_type == \"test\":\n dataset = self.processor.get_test_dataset(self.hparams.data_dir, self.hparams.test_file_name)\n else:\n raise ValueError(f\"{dataset_type} do not support. [train|dev|test]\")\n logger.info(f\"Prepare {dataset_type} dataset (Count: {len(dataset)}) \")\n return dataset", "def _load_test_data(self):\n self._save_test_data()", "def prepare_data(dataset, train_ratio=0.8, input_dim=None, seed=10):\n # Retrieve main path of project\n dirname = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\n\n # Download and store dataset at chosen location\n if dataset == 'Cora' or dataset == 'PubMed' or dataset == 'Citeseer':\n path = os.path.join(dirname, 'data')\n data = Planetoid(path, name=dataset, split='full')[0]\n data.name = dataset\n data.num_classes = (max(data.y)+1).item()\n # data.train_mask, data.val_mask, data.test_mask = split_function(data.y.numpy())\n # data = Planetoid(path, name=dataset, split='public', transform=T.NormalizeFeatures(), num_train_per_class=20, num_val=500, num_test=1000)\n\n elif dataset == 'Amazon':\n path = os.path.join(dirname, 'data', 'Amazon')\n data = Amazon(path, 'photo')[0]\n data.name = dataset\n data.num_classes = (max(data.y)+1).item()\n data.train_mask, data.val_mask, data.test_mask = split_function(\n data.y.numpy(), seed=seed)\n # Amazon: 4896 train, 1224 val, 1530 test\n \n elif dataset in ['syn1', 'syn2', 'syn4', 'syn5']: \n data = synthetic_data(\n dataset, dirname, train_ratio, input_dim)\n \n elif dataset == 'syn6':\n data = gc_data(dataset, dirname, train_ratio)\n\n elif dataset == 'Mutagenicity':\n data = gc_data(dataset, dirname, train_ratio)\n\n return data", "def __init__(self, **kwargs):\n DataLoader.__init__(self, **kwargs)\n \n self._results_ = None", "def get_data_loader_from_data(cls, batch_size, X, Y, **kwargs):\n X_torch = torch.from_numpy(X).float()\n\n if (\n \"classification_problem\" in kwargs\n and kwargs[\"classification_problem\"] == False\n ):\n Y_torch = torch.from_numpy(Y).float()\n else:\n Y_torch = torch.from_numpy(Y).long()\n dataset = TensorDataset(X_torch, Y_torch)\n kwargs.pop(\"classification_problem\", None)\n return DataLoader(dataset, batch_size=batch_size, **kwargs)", "def prepare_datastream(data_name,\n batch_size):\n train_set = H5PYDataset(data_name, which_sets=(\n \"train\",), load_in_memory=True)\n test_set = H5PYDataset(data_name, which_sets=(\n \"test\", ), load_in_memory=True)\n\n train_scheme = SequentialScheme(train_set.num_examples,\n batch_size=batch_size)\n test_scheme = SequentialScheme(test_set.num_examples,\n batch_size=batch_size)\n stream_train = DataStream.default_stream(train_set,\n iteration_scheme=train_scheme)\n stream_test = DataStream.default_stream(test_set,\n iteration_scheme=test_scheme)\n\n return train_set, stream_train, test_set, stream_test", "def __init__(self, source_data: Dict[str, dict], verbose: bool = True):\n self.verbose = verbose\n self._validate_source_data(source_data=source_data, verbose=self.verbose)\n self.data_interface_objects = {\n name: data_interface(**source_data[name])\n for name, data_interface in self.data_interface_classes.items()\n if name in source_data\n }", "def create_model_and_data(dataset_name: str, use_synthetic_data: bool) ->...:\n # This `train_batch_size` is only used in training clients, not validation and\n # test clients, which are the ones we used to evaluation the personalization\n # performance. For validation and test clients, batching is applied after\n # splitting their local data into a personalization set and an eval set (i.e.,\n # inside `knn_per_avg_clients` above).\n unused_batch_size = 20\n if dataset_name == 'emnist':\n return emnist.create_model_and_data(\n num_local_epochs=1,\n train_batch_size=unused_batch_size,\n use_synthetic_data=use_synthetic_data)\n elif dataset_name == 'stackoverflow':\n return stackoverflow.create_model_and_data(\n num_local_epochs=1,\n train_batch_size=unused_batch_size,\n use_synthetic_data=use_synthetic_data)\n elif dataset_name == 'landmark':\n return landmark.create_model_and_data(\n num_local_epochs=1,\n train_batch_size=unused_batch_size,\n use_synthetic_data=use_synthetic_data)\n elif dataset_name == 'ted_multi':\n return ted_multi.create_model_and_data(\n num_local_epochs=1,\n train_batch_size=unused_batch_size,\n use_synthetic_data=use_synthetic_data)\n raise ValueError(f'Accepted dataset names: {constants.DATASET_NAMES}, but '\n f'found {dataset_name}. Please provide a valid name.')", "def data_loader(\n self, batch_size: int = 1, iter_steps: int = 0, batch_as_list: bool = True\n ) -> DataLoader:\n data = self.data\n datasets = []\n\n for _, dat in data.items():\n datasets.append(dat.dataset())\n\n if len(datasets) < 1:\n raise FileNotFoundError(\n \"no datasets available for this model to create a loader from\"\n )\n\n return DataLoader(\n *datasets,\n batch_size=batch_size,\n iter_steps=iter_steps,\n batch_as_list=batch_as_list,\n )", "def setup_task(cls, args, **kwargs):\n args.left_pad_source = options.eval_bool(args.left_pad_source)\n args.left_pad_target = options.eval_bool(args.left_pad_target)\n if getattr(args, 'raw_text', False):\n utils.deprecation_warning('--raw-text is deprecated, please use --dataset-impl=raw')\n args.dataset_impl = 'raw'\n elif getattr(args, 'lazy_load', False):\n utils.deprecation_warning('--lazy-load is deprecated, please use --dataset-impl=lazy')\n args.dataset_impl = 'lazy'\n\n paths = args.data.split(':')\n assert len(paths) > 0\n # find language pair automatically\n if args.source_lang is None or args.target_lang is None:\n args.source_lang, args.target_lang = data_utils.infer_language_pair(paths[0])\n if args.source_lang is None or args.target_lang is None:\n raise Exception('Could not infer language pair, please provide it explicitly')\n\n # load dictionaries\n src_dict = cls.load_dictionary(os.path.join(paths[0], 'dict.{}.txt'.format(args.source_lang)), sde=args.sde)\n tgt_dict = cls.load_dictionary(os.path.join(paths[0], 'dict.{}.txt'.format(args.target_lang)))\n assert src_dict.pad() == tgt_dict.pad()\n assert src_dict.eos() == tgt_dict.eos()\n assert src_dict.unk() == tgt_dict.unk()\n print('| [{}] dictionary: {} types'.format(args.source_lang, len(src_dict)))\n print('| [{}] dictionary: {} types'.format(args.target_lang, len(tgt_dict)))\n\n return cls(args, src_dict, tgt_dict)", "def __init__(self, train, test, head):\n\n # Save the URL's, in case we want it later\n self.train_url = train\n self.test_url = test\n self.head_url = head\n\n # Grab the data, and store the internal variables\n self._get_data()" ]
[ "0.66052645", "0.6420619", "0.63613456", "0.63351035", "0.6321313", "0.6239802", "0.62257236", "0.6179237", "0.61755306", "0.6175438", "0.6154874", "0.60897607", "0.6085115", "0.60844827", "0.60654354", "0.60575354", "0.6012859", "0.60088485", "0.59914017", "0.5959267", "0.5948421", "0.5947667", "0.5939688", "0.5919652", "0.5887488", "0.5883639", "0.5863985", "0.58609056", "0.5852806", "0.5849076", "0.5844402", "0.5839556", "0.5830672", "0.5824685", "0.58188844", "0.58181536", "0.5815899", "0.58097386", "0.5795898", "0.57926226", "0.57773507", "0.5776835", "0.57742304", "0.5771363", "0.5768194", "0.5759629", "0.57595265", "0.57595265", "0.57595265", "0.57595265", "0.57593393", "0.57420313", "0.5741579", "0.5725294", "0.57245946", "0.57220906", "0.5718828", "0.57155704", "0.57136005", "0.5713099", "0.57057947", "0.57037383", "0.5700858", "0.56978935", "0.5696931", "0.5692153", "0.56825775", "0.5680026", "0.5670646", "0.5667828", "0.5666794", "0.5662079", "0.565489", "0.5635538", "0.56329864", "0.5628696", "0.5623923", "0.56234103", "0.5602547", "0.5578616", "0.5574013", "0.55721575", "0.55682516", "0.55481803", "0.5547373", "0.5547347", "0.55350643", "0.5534396", "0.55248004", "0.552381", "0.55236804", "0.5523636", "0.5517526", "0.55169016", "0.5516358", "0.550801", "0.55054224", "0.55047643", "0.55005676", "0.549148", "0.5486542" ]
0.0
-1
This property returns the test data, and loads the test data if it doesn't exist. Note that this function returns the test data and labels in the form ([MPS input size, batch, other dimensions], [batch, classifications]) in accordance with how it is used in the MPS and MPSOptimizer classes. If the data is required in the form ([batch, MPS input size, other dimensions], [batch, classifications]), the variable _test_data should be used
def test_data(self): if self._test_data is None: self._load_test_data() if self._swapped_test_data is None: self._swapped_test_data = {} for key, value in self._test_data.items(): self._swapped_test_data[key] = value return self._swapped_test_data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_data(self):\n\n return self.__valid_data, self.__valid_labels", "def get_data():\r\n if not path_validation(MODEL_PATH, read_access=True):\r\n exit(0) \r\n if not path_validation(TEST_DATA_PATH, read_access=True):\r\n exit(0) \r\n if not path_validation(TEST_LABEL_PATH, read_access=True):\r\n exit(0) \r\n\r\n params = joblib.load(MODEL_PATH)\r\n test_images = np.load(TEST_DATA_PATH)\r\n test_labels = np.load(TEST_LABEL_PATH)\r\n\r\n # Addition of bias in test set\r\n test_images = np.insert(test_images, 0, 1, axis=1)\r\n\r\n return params, test_images, test_labels", "def load_testing_data(self) -> List[np.ndarray]:\n input_data = self._load_set(config.TEST_DIR, False)\n return input_data", "def _load_test_data(self):\n\n self.test_loader = data.Test_loader(self.N_max, self.n_per_conn,\n self.data_path, self.device)\n\n self.test_loader.load_data()\n\n # load mean and std from train\n scc_mean_std = np.loadtxt(\n os.path.join(self.data_path, 'scc_mean_std.csv'), delimiter=',')\n self.mean = torch.Tensor(scc_mean_std[0])\n self.std = torch.Tensor(scc_mean_std[1])", "def test_data(self):\n return self._test_data", "def test(self) -> tf.contrib.data.Dataset:\n return self.__test_dataset", "def load_test_data():\r\n X_test = np.load('data/test/X_test.npy')\r\n scaling_test = np.load('data/test/scaling_test.npy')\r\n ids_test = np.load('data/test/ids_test.npy')\r\n y_test = np.load('data/test/y_test.npy')\r\n\r\n seed = np.random.randint(1, 10e6)\r\n np.random.seed(seed)\r\n np.random.shuffle(X_test)\r\n np.random.seed(seed)\r\n np.random.shuffle(scaling_test)\r\n np.random.seed(seed)\r\n np.random.shuffle(ids_test)\r\n np.random.seed(seed)\r\n np.random.shuffle(y_test)\r\n\r\n return X_test, scaling_test, ids_test, y_test", "def fetch_test_batch(self):\n data = self.data\n # size of train dataset\n num_train = data['train'].shape[0]\n image_size = self.image_size\n # index of test image that is being classified in this batch\n batch_index = self.test_batch_index\n\n # create batch array\n X = np.zeros([2 * num_train, image_size[0], image_size[1]], dtype='uint8')\n # first half are all training images\n X[:num_train, ...] = data['train']\n # second half is copy of a batch_index-th test image to be classified\n X[num_train:, ...] = data['test'][batch_index, ...]\n # true label is extracted from array of indexes where particular class start\n test_label = np.argmax(self.starts['test']>batch_index) - 1\n\n # rescale intensities and center\n X = X / 255.0\n X = X - self.mean_train\n\n X = X[:, np.newaxis]\n X = X.astype(\"float32\")\n\n self.test_batch_index += 1\n\n X = Variable(torch.from_numpy(X)).view(2 * num_train, self.image_size[0], self.image_size[1])\n\n # stack batch by second axis to [batch size, 2 (pair to be compared), image height, image width]\n X1 = X[:num_train] # (B, h, w)\n X2 = X[num_train:] # (B, h, w)\n\n X = torch.stack([X1, X2], dim=1) # (B, 2, h, w)\n\n if use_cuda:\n X = X.cuda()\n # using test dataset size and current index for controlling test loop in test_model.py\n return X, test_label, data['test'].shape[0], self.test_batch_index", "def getProcessedData(self, data, labels):\n if self.underSamplePercentage != 0:\n data, labels = self.underSample(data, labels)\n if self.beta != 0: \n synData, synLabels = self.adaSynAdd(data, labels)\n if synData is not None:\n data, labels = combineTestSets(data, labels, synData, synLabels)\n return data, labels", "def get_data(self):\n if self.config['model'] == 'vggnet':\n if self.is_training:\n return self.data.shuffle(self.shuffle).batch(self.batch_size)\n elif self.is_testing:\n return self.data.batch(self.batch_size)\n elif not self.is_testing and not self.is_training:\n return self.data.batch(self.batch_size)\n else:\n raise NotImplementedError('In dataset.py: default input not specified for this model!')", "def get_data(self):\n return self.X_train, self.X_test, self.y_train, self.y_test", "def test_dataloader(self) -> data.DataLoader:\n return data.DataLoader(dataset=self.datasets['test'], batch_size=self.batch_size,\n num_workers=self.num_workers, shuffle=False, pin_memory=False)", "def get_test_dataset_DataLoader(self):\n test_info = self.get_test_DataLoader_info()\n name = test_info[\"name\"]\n task = test_info[\"task\"]\n data_dir = test_info[\"data_dir\"]\n hdf5_file = test_info[\"hdf5_file\"]\n\n data_loader = DataLoader(name, task, data_dir, hdf5_file)\n\n return data_loader, self.dataset, self.data_fields", "def load_test_dataset(self):\n test_data_path = \"testdata\"\n root = Path(test_data_path)\n classes = sorted([j.name.split('/')[-1] for j in root.iterdir()])\n print(classes)\n\n transform = transforms.Compose([\n transforms.Resize(300),\n transforms.RandomHorizontalFlip(),\n transforms.CenterCrop(250),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.6071, 0.4828, 0.3934], std=[0.2845, 0.3187, 0.3240])\n ])\n\n dataset = datasets.ImageFolder(test_data_path, transform=transform)\n testloader = DataLoader(dataset, batch_size=4, shuffle=True)\n print(\"Loaded data\")\n return testloader", "def getTestData(self):\n raise NotImplementedError", "def prepare_data(self):\n # Set up the path\n self.path_target_train = os.path.join(self.data_dir, self.train_path_file_target + \".pkl\")\n self.path_target_test = os.path.join(self.data_dir, self.test_path_file_target + \".pkl\")\n\n if not os.path.exists(self.path_target_train) or not os.path.exists(self.path_target_test):\n # Create vocabularies of the appropriate sizes.\n self.create_vocabulary(self.train_path_file)\n\n # Create token ids for the training data.\n input_train_path = self.train_path_file\n target_train_path = self.train_path_file_target\n train_input, train_input_length, train_labels = self.data_to_token_ids(input_train_path, target_train_path)\n\n # Create token ids for the validation data.\n input_test_path = self.test_path_file\n target_test_path = self.test_path_file_target\n test_input, test_input_length, _ = self.data_to_token_ids(input_test_path, target_test_path, train=False)\n\n # Collect data into a list\n training_data = [train_input, train_input_length, train_labels]\n test_data = [test_input, test_input_length]\n\n # Save all the data\n with open(self.path_target_train, 'wb') as f:\n pickle.dump(training_data,f)\n with open(self.path_target_test, 'wb') as f:\n pickle.dump(test_data, f)\n else:\n # Load data\n with open(self.path_target_train, 'rb') as f:\n training_data = pickle.load(f)\n with open(self.path_target_test, 'rb') as f:\n test_data = pickle.load(f)\n\n # Initialize vocabulary\n self.initialize_vocabulary()\n\n # Convert list into a numpy array - train data\n train_input = pd.DataFrame(training_data[0]).fillna(value=0).astype(int).values\n train_length_input = np.array(training_data[1], dtype=int)\n train_labels = np.array(training_data[2], dtype=int)\n\n # Convert list into a numpy array - test data\n test_input = pd.DataFrame(test_data[0]).fillna(value=0).astype(int).values\n test_length_input = pd.DataFrame(test_data[1]).fillna(value=0).astype(int).values\n\n # Printing maximum length\n print(\"Shape of the input training matrix {}\".format(str(train_input.shape)))\n print(\"Shape of the input test matrix {}\".format(str(test_input.shape)))\n\n # Copy the files\n self.copy_files()\n\n # Return output\n return train_input, train_length_input, train_labels, test_input, test_length_input", "def _load_test_data(self):\n self._save_test_data()", "def load(self):\n\n X_train, y_train, X_test, y_test, variable_types, name = _load_data(\n self.task_id)\n\n self.X_train = X_train\n self.y_train = y_train\n self.X_test = X_test\n self.y_test = y_test\n self.variable_types = variable_types\n self.name = name\n\n return self.X_train, self.y_train, self.X_test, self.y_test", "def getTrainingData(self):\n raise NotImplementedError", "def __init__(self, data_path):\r\n\t\tfile_names = ['data_batch_%d' % i for i in range(1,6)]\r\n\t\tfile_names.append('test_batch')\r\n\r\n\t\tX = []\r\n\t\ty = []\r\n\t\tfor file_name in file_names:\r\n\t\t\twith open(data_path + file_name) as fin:\r\n\t\t\t\tdata_dict = cPickle.load(fin)\r\n\t\t\tX.append(data_dict['data'].ravel())\r\n\t\t\ty = y + data_dict['labels']\r\n\r\n\t\tself.X = np.asarray(X).reshape(60000, 32*32*3)\r\n\t\tself.y = np.asarray(y)\r\n\r\n\t\tfin = open(data_path + 'batches.meta')\r\n\t\tself.LABEL_NAMES = cPickle.load(fin)['label_names']\r\n\t\tfin.close()", "def load_data(self):\n self.data = self.read_var(self.datavar)\n self.test_shape(self.datavar, self.data.shape, 2)", "def test_dataloader(self) -> DataLoader:\n return DataLoader(\n self.test_dataset,\n batch_size=self.hparams.batch_size,\n shuffle=False,\n num_workers=multiprocessing.cpu_count(),\n )", "def load_data_and_labels(self):\n gen = image.ImageDataGenerator()\n target_size = (224,224)\n if self.preprocess:\n print('Preprocessing data...')\n if not os.path.isdir(self.pproc_dir()):\n os.mkdir(self.pproc_dir())\n \n batch_arr = []\n for ld,segment in [(self.train_dir(), 'train'),\n (self.valid_dir(), 'valid')]:\n # TODO(ness): segment = os.basename(ld)\n flowgen = gen.flow_from_directory(\n ld,\n target_size=target_size,\n shuffle=False,\n class_mode=None,\n batch_size=1)\n # Save the batches using method defined in utils.py\n data = np.concatenate([flowgen.next() for i in range(flowgen.n)])\n batches_dir = self.pproc_dir() + segment + '-bc'\n save_array(batches_dir, data)\n \n # Save the classes.\n cls_dir = self.pproc_dir() + segment + '-cl'\n save_array(cls_dir, flowgen.classes)\n \n batch_arr.append((data, flowgen.classes, flowgen.class_indices))\n \n # Set the data.\n self.training_data = batch_arr[0][0]\n self.validation_data = batch_arr[1][0]\n \n # Classes are zero-indexed and represent a category in\n # numerical form. So if the classes are 'dog' and 'cat',\n # the possible class values will be 0 and 1.\n self.trn_classes = batch_arr[0][1]\n self.val_classes = batch_arr[1][1]\n \n # Labels are the one-hot encoded (i.e. categorical)\n # version of the classes. In other words, if there are\n # 5 classes and an element belongs to class 2,\n # its label will be [0,0,1,0,0] (index 1).\n self.training_labels = to_categorical(batch_arr[0][1])\n self.validation_labels = to_categorical(batch_arr[1][1])\n \n # Class indices are dictionaries of the form\n # {'category_name': 0, 'category_name_2: 1}. They\n # make the mapping between numerical class indices and\n # a human-readable category name. They are (should be...)\n # the same for validation and training, so only load them\n # once, after sanity checking.\n self.cindices = batch_arr[0][2]\n print('Done preprocessing.')\n else:\n print('Loading data...')\n # Load the pre-saved data using methods defined in utils.py. See\n # preprocessing branch for the meaning of the data.\n self.training_data = load_array(self.pproc_dir() + 'train-bc')\n self.validation_data = load_array(self.pproc_dir() + 'valid-bc')\n self.trn_classes = load_array(self.pproc_dir() + 'train-cl')\n self.val_classes = load_array(self.pproc_dir() + 'valid-cl')\n self.training_labels = to_categorical(self.trn_classes)\n self.validation_labels = to_categorical(self.val_classes)\n \n # To get the class indices, we create the generator. It's cheap to\n # run since it doesn't actually load all the data.\n flowgen = gen.flow_from_directory(\n self.train_dir(),\n target_size=target_size,\n shuffle=False,\n class_mode=None,\n batch_size=1) \n self.cindices = flowgen.class_indices\n print('Done loading.')", "def _loadTest(self, features, labels):\n\t\tself.testX_, self.testY_, self.testLabel_ = self.__load(features, labels)", "def _load_data(self):\n self.mapper = Mapper()\n self.mapper.generate_vocabulary(self.review_summary_file)\n self.X_fwd, self.X_bwd, self.Y = self.mapper.get_tensor(reverseflag=True)\n # Store all the mapper values in a dict for later recovery\n self.mapper_dict = dict()\n self.mapper_dict['seq_length'] = self.mapper.get_seq_length()\n self.mapper_dict['vocab_size'] = self.mapper.get_vocabulary_size()\n self.mapper_dict['rev_map'] = self.mapper.get_reverse_map()\n # Split into test and train data\n self._split_train_tst()", "def get_data_train(self):\n return self.get_data(self.file_train, self.batch)", "def test_set(self):\n if self._testset is None: # loads the data to memory once and when requested.\n testset_raw = self.read_dataset(self._testset_path)\n testset_spacy = self.read_spacy_pickle(self._testset_spacy_path)\n self._testset = pd.concat([testset_raw, testset_spacy], axis=1)\n\n self._testset['language'] = self._language\n self._testset['dataset_name'] = self._dataset_name\n\n return self._testset", "def test_setup(self, test_data: list=None):\n print(\"[dataset]: using test setup ...\")\n self.vocabulary = [\"empty\"]\n self.eval_dataset = ABSADataset(data_path=self.dev_path, mode=self.in_mode, task=self.task,\n tokenizer=self.tokenizer, vocab=\"bert\", test=True)\n return", "def get_data():\n\n pathxtrain = sys.argv[1]\n pathxtest = sys.argv[2]\n pathlabeltrain = sys.argv[3]\n pathlabeltest = sys.argv[4]\n\n xtrain = p.read_csv(pathxtrain, header=None)\n xtest = p.read_csv(pathxtest, header=None)\n label_train = p.read_csv(pathlabeltrain, header=None)\n label_test = p.read_csv(pathlabeltest, header=None)\n\n xtrain_mx = xtrain.values\n xtest_mx = xtest.values\n\n label_train = label_train.values.reshape(label_train.shape[0])\n label_test = label_test.values.reshape(label_test.shape[0])\n\n return xtrain_mx, xtest_mx, label_train, label_test", "def get_test(self, preprocess=False):\n return self._dataset('test', self._directory, 'sharded_test_0shifted_affnist.tfrecords')", "def test(self, dataset):\n test_accuracy = 0\n test_loss = 0\n num_examples_tested = 0\n # Put model into evaluation mode\n self.model.eval()\n for num, batch in enumerate(dataset.loader):\n xs, ys = batch\n batch_size = len(xs)\n num_examples_tested += batch_size\n iloss, iaccuracy = self.model(xs, ys)\n test_loss += iloss.cpu().data.numpy().item() * batch_size\n test_accuracy += iaccuracy.cpu().data.numpy().item() * batch_size\n test_accuracy = test_accuracy / num_examples_tested\n test_loss = test_loss / num_examples_tested\n # Return accuracy and loss for this model on the test set\n return test_accuracy, test_loss", "def test(self, dataset):\n model_path = os.path.join(self.check_point, 'model.pt')\n if not os.path.exists(model_path):\n raise Exception('Cannot find %s.' %model_path)\n \n self.model = torch.load(model_path)\n _, _, stats, outputs = self._check_PSNR(dataset, is_test=True)\n return stats, outputs", "def load_data(self) -> tuple:\n label_num = {}\n data_set = pathlib.Path(self.path)\n data = []\n\n # create the label lookup dict for verifcation later\n for i, v in enumerate(data_set.iterdir()):\n label_num[v.name] = i\n self.labels[i] = v.name\n # end\n\n # read images\n for img_path in data_set.rglob(\"*.jpg\"):\n lbl = label_num[str(img_path.parent.stem)]\n img = cv2.imread(str(img_path))\n img = cv2.resize(img, self.dims, interpolation=cv2.INTER_AREA)\n\n # flatten RGB data into a vector\n # NOTE: NOT ACTUALLY NECESSARY! \n img.flatten()\n\n # label the sample and append to temp data list\n sample = np.append(lbl, img)\n data.append(sample)\n # end\n\n # partition and package the data (*_ ensures safe unpacking)\n train, test, validate, *_ = Data.partition(data, self.parts, 0.7, 0.2)\n self.train = Data(train)\n self.test = Data(test)\n self.validate = Data(validate)", "def split_data(self,test=False):\n shuffle_index = torch.randperm(self.train_target.shape[0])\n load = shuffle_index.shape[0]\n train_input_shuffle = self.train_input[shuffle_index]\n train_target_shuffle = self.train_target[shuffle_index]\n train_classes_shuffle = self.train_classes[shuffle_index]\n index_train = self.index_for_equal_class(train_target_shuffle[:load//2])\n train_input = train_input_shuffle[index_train]\n train_target = train_target_shuffle[index_train]\n train_classes = train_classes_shuffle[index_train]\n if not test:\n index_test = self.index_for_equal_class( train_target_shuffle[load//2:]) + load//2\n test_input = train_input_shuffle[index_test]\n test_target = train_target_shuffle[index_test]\n test_classes = train_classes_shuffle[index_test]\n else:\n index_test = self.index_for_equal_class(self.test_target)\n test_input = self.test_input[index_test]\n test_target = self.test_target[index_test]\n test_classes = self.test_classes[index_test]\n train_input, mean, std = normalize(train_input)\n test_input, _, _ = normalize(test_input,mean,std)\n return train_input, train_target, train_classes ,test_input ,test_target ,test_classes", "def get_data(dataset, max_train_size=None, max_test_size=None, do_preprocess=True, train_start=0,\n test_start=0, prefix=\"processed\", x_dims=None):\n if max_train_size is None:\n train_end = None\n else:\n train_end = train_start + max_train_size\n if max_test_size is None:\n test_end = None\n else:\n test_end = test_start + max_test_size\n print('load data of:', dataset)\n print(\"train: \", train_start, train_end)\n print(\"test: \", test_start, test_end)\n if x_dims is None:\n x_dim = get_data_dim(dataset)\n else:\n x_dim = x_dims\n f = open(os.path.join(prefix, dataset + '_train.pkl'), \"rb\")\n train_data = pickle.load(f).reshape((-1, x_dim))[train_start:train_end, :]\n f.close()\n try:\n f = open(os.path.join(prefix, dataset + '_test.pkl'), \"rb\")\n test_data = pickle.load(f).reshape((-1, x_dim))[test_start:test_end, :]\n f.close()\n except (KeyError, FileNotFoundError):\n test_data = None\n try:\n f = open(os.path.join(prefix, dataset + \"_test_label.pkl\"), \"rb\")\n test_label = pickle.load(f).reshape((-1))[test_start:test_end]\n f.close()\n except (KeyError, FileNotFoundError):\n test_label = None\n if do_preprocess:\n train_data, test_data = preprocess(train_data, test_data)\n print(\"train set shape: \", train_data.shape)\n print(\"test set shape: \", test_data.shape)\n if test_label is not None:\n print(\"test label shape: \", test_label.shape)\n print()\n return (train_data, None), (test_data, test_label)", "def test_dataloader(self) -> torch.utils.data.DataLoader: \n return torch.utils.data.DataLoader(self.dataset_test, **self.dl_kwargs)", "def load_data(self, task):\n params = self.params\n data = {splt: {} for splt in ['train', 'valid', 'test']}\n dpath = os.path.join(params.data_path, 'eval', task)\n\n self.n_sent = 1 if task in ['SST-2', 'CoLA'] else 2\n\n for splt in ['train', 'valid', 'test']:\n\n # load data and dictionary\n data1 = load_binarized(os.path.join(dpath, '%s.s1.pth' % splt), params)\n data2 = load_binarized(os.path.join(dpath, '%s.s2.pth' % splt), params) if self.n_sent == 2 else None\n data['dico'] = data.get('dico', data1['dico'])\n\n # set dictionary parameters\n set_dico_parameters(params, data, data1['dico'])\n if self.n_sent == 2:\n set_dico_parameters(params, data, data2['dico'])\n\n # create dataset\n if self.n_sent == 1:\n data[splt]['x'] = Dataset(data1['sentences'], data1['positions'], params)\n else:\n data[splt]['x'] = ParallelDataset(\n data1['sentences'], data1['positions'],\n data2['sentences'], data2['positions'],\n params\n )\n\n # load labels\n if splt != 'test' or task in ['MRPC']:\n # read labels from file\n with open(os.path.join(dpath, '%s.label' % splt), 'r') as f:\n lines = [l.rstrip() for l in f]\n # STS-B task\n if task == 'STS-B':\n assert all(0 <= float(x) <= 5 for x in lines)\n y = [float(l) for l in lines]\n # QQP\n elif task == 'QQP':\n UNK_LABEL = 0\n lab2id = {x: i for i, x in enumerate(sorted(set(lines) - set([''])))}\n y = [lab2id.get(x, UNK_LABEL) for x in lines]\n # other tasks\n else:\n lab2id = {x: i for i, x in enumerate(sorted(set(lines)))}\n y = [lab2id[x] for x in lines]\n data[splt]['y'] = torch.LongTensor(y)\n assert len(data[splt]['x']) == len(data[splt]['y'])\n\n # compute weights for weighted training\n if task != 'STS-B' and params.weighted_training:\n weights = torch.FloatTensor([\n 1.0 / (data['train']['y'] == i).sum().item()\n for i in range(len(lab2id))\n ]).npu()\n self.weights = weights / weights.sum()\n else:\n self.weights = None\n\n return data", "def _load_data(self):\n if self._name in BALANCE_DATASET:\n _loader = dataset_loaders[self._name]\n xnp, y = _loader()\n\n # Train - Test split\n gen = ShuffleSplit(n_splits=1, random_state=42, test_size=self._test_size).split(xnp)\n train_idx, test_idx = next(gen)\n\n # Train data.\n self.xnp_tr = xnp[train_idx]\n self.y_tr = y[train_idx]\n # Test data.\n self.xnp_ts = xnp[test_idx]\n self.y_ts = y[test_idx]\n\n else:\n _loader = dataset_loaders[self._name]\n xnp, xp, y = _loader()\n # self.xnp, self.xp, self.y = _loader()\n\n # Train - Test split\n gen = ShuffleSplit(n_splits=1, random_state=42, test_size=self._test_size).split(xnp)\n train_idx, test_idx = next(gen)\n\n # Train data.\n self.xnp_tr = xnp[train_idx]\n self.xp_tr = xp[train_idx]\n self.y_tr = y[train_idx]\n # Test data.\n self.xnp_ts = xnp[test_idx]\n self.xp_ts = xp[test_idx]\n self.y_ts = y[test_idx]", "def test_data():\n batch_size = 10\n input_dim = 28\n test_data = np.random.rand(batch_size, input_dim)\n\n return test_data", "def load_data(self):\n params = self.params\n catg = params.data_category\n langs = ['en', params.target_lang]\n data = {lang: {splt: {} for splt in (['train', 'valid'] if lang == 'en' else ['test'])} for lang in langs}\n clf_dataset_path = {\n lang: {\n splt: {\n 'x': os.path.join(params.data_path, '%s_%s_%s_x.bpe.pth' % (splt, lang, catg)),\n 'y': os.path.join(params.data_path, '%s_%s_%s_y.txt' % (splt, lang, catg)),\n } for splt in (['train', 'valid'] if lang == 'en' else ['test'])\n } for lang in langs\n }\n for splt in ['train', 'valid', 'test']:\n for lang in langs:\n if lang == 'en' and splt in ['train', 'valid'] or lang != 'en' and splt == 'test':\n # load data and dictionary\n data1 = load_binarized(clf_dataset_path[lang][splt]['x'], params)\n data['dico'] = data.get('dico', data1['dico'])\n # set dictionary parameters\n set_dico_parameters(params, data, data1['dico'])\n # create dataset\n data[lang][splt]['x'] = Dataset(data1['sentences'], data1['positions'], params)\n # load labels\n with open(clf_dataset_path[lang][splt]['y'], 'r') as f:\n labels = [int(l) for l in f]\n data[lang][splt]['y'] = torch.LongTensor(labels)\n assert len(data[lang][splt]['x']) == len(data[lang][splt]['y'])\n\n return data", "def get_data(self):\n return self.train_edges, self.train_labels, self.test_edges, self.test_labels", "def _load_data(self):\n pickle_in = open(\"X_train.pickle\", \"rb\")\n self.X = pickle.load(pickle_in)\n pickle_in = open(\"y_train.pickle\", \"rb\")\n self.Y = pickle.load(pickle_in)\n\n pickle_in = open(\"X_test.pickle\", \"rb\")\n self.X_final = pickle.load(pickle_in)\n pickle_in = open(\"y_test.pickle\", \"rb\")\n self.Y_final = pickle.load(pickle_in)\n\n # Set input shape:\n if K.image_data_format() == 'channels_first':\n self.input_shape = (3, self.img_rows, self.img_cols)\n else:\n self.input_shape = (self.img_rows, self.img_cols, 3)\n\n self.X = self.X.astype('float32')\n self.X /= 255\n self.X_final = self.X_final.astype('float32')\n self.X_final /= 255\n print('X shape:', self.X.shape)\n print(self.X.shape[0], 'Samples')\n\n num_datapoints = 3000\n self.X = self.X[0:num_datapoints]\n self.Y = self.Y[0:num_datapoints]\n\n num_datapoints = 2000\n self.X_final = self.X_final[0:num_datapoints]\n self.Y_final = self.Y_final[0:num_datapoints]\n\n self.Y_final = to_categorical(self.Y_final, self.num_classes)\n\n # Initialize Data\n kfold = StratifiedKFold(n_splits=self.nFolds, shuffle=True)\n\n if self.b_eval_advanced:\n # Loop through the indices the split() method returns\n for index, (train_indices, test_indices) in enumerate(kfold.split(self.X, self.Y)):\n if index == 0:\n self.Y = to_categorical(self.Y, self.num_classes)\n\n # Generate batches from indices\n xtrain, xtest = self.X[train_indices], self.X[test_indices]\n ytrain, ytest = self.Y[train_indices], self.Y[test_indices]\n\n self.data.append(tuple([xtrain, xtest, ytrain, ytest]))\n\n if not self.b_eval_advanced:\n self.Y = to_categorical(self.Y, self.num_classes)\n\n #print(np.asarray(self.data).shape)\n #print(self.data)\n print(\"Y_final Shape\", self.Y_final.shape)", "def train_data(self):\n\n return self.__train_data, self.__train_labels", "def loadTestData():\n path = raw_input(\"Enter the path of Test Data: \")\n data = np.genfromtxt(path, delimiter=',', dtype=int)\n\n labels = data[:, -1]\n\n unwantedLabels = [4, 5, 6, 7, 8, 9]\n listToDelete = []\n for i, line in enumerate(range(len(data))):\n if labels[i] in unwantedLabels:\n listToDelete.append(i)\n\n actualData = np.delete(data, listToDelete, axis=0)\n\n # print(actualData.shape)\n # Separating the labels and data into different arrays\n actualLabels = actualData[:, -1]\n actualData = actualData[:, :-1]\n\n actualData = pre.scale(actualData)\n\n # Change the label vector to label matrix\n # If Label is 2 then it becomes [0, 1, 0]\n labelMatrix = np.zeros((actualLabels.shape[0], 4))\n for j in range(len(actualLabels)):\n if actualLabels[j] == 0:\n labelMatrix[j][0] = 1\n if actualLabels[j] == 1:\n labelMatrix[j][1] = 1\n if actualLabels[j] == 2:\n labelMatrix[j][2] = 1\n if actualLabels[j] == 3:\n labelMatrix[j][3] = 1\n\n return actualData, actualLabels", "def generate_train_test(self):\n x, y = self.read_data()\n x_train, y_train, x_test, y_test = self.sample_data(x, y)\n self.train = (x_train, y_train)\n self.test = (x_test, y_test)", "def load_training_data(self) -> Tuple[List[np.ndarray], np.ndarray]:\n return self._load_set(config.TRAIN_DIR, True)", "def test_load_model_data(self):\n add_components_and_load_data(\n prereq_modules=IMPORTED_PREREQ_MODULES,\n module_to_test=MODULE_BEING_TESTED,\n test_data_dir=TEST_DATA_DIRECTORY,\n subproblem=\"\",\n stage=\"\",\n )", "def test_load_model_data(self):\n add_components_and_load_data(\n prereq_modules=IMPORTED_PREREQ_MODULES,\n module_to_test=MODULE_BEING_TESTED,\n test_data_dir=TEST_DATA_DIRECTORY,\n subproblem=\"\",\n stage=\"\",\n )", "def set_data():\r\n #if not os.path.exists(filepath):\r\n #download_data()\r\n metadata = read(filepath + flist[-1])\r\n ndata = metadata['num_cases_per_batch']\r\n ndim = metadata['num_vis']\r\n\r\n data, train, test = {}, {}, {}\r\n data['labels'] = metadata['label_names']\r\n data['ntraindata'] = metadata['num_cases_per_batch'] * (len(flist) - 2)\r\n data['ntestdata'] = metadata['num_cases_per_batch']\r\n data['ndim'] = metadata['num_vis']\r\n\r\n train['x'], train['y'] = convert_train(data['ntraindata'], data['ndim'])\r\n\r\n testdata = read(filepath + flist[-2])\r\n test['x'] = testdata['data']\r\n test['y'] = testdata['labels']\r\n\r\n data['train'], data['test'] = train, test\r\n save_pkl(data)", "def getTestSet(self):\r\n return self.fTestData", "def test_labels(self):\n return self._test_labels", "def evaluate(self, test_data, test_labels):\n raise NotImplementedError", "def test_load_data(self):\n assert len(self._mnist.get()) == 10\n assert self._mnist.get()[0].label == 7\n pass", "def load_preprocess_test_batch(batch_id, batch_size):\r\n filename = 'preprocess_test_' + str(batch_id) + '.p'\r\n features, labels = pickle.load(open(filename, mode='rb'))\r\n# labels = np.argmax(labels,1)\r\n# num = len(labels)\r\n# arr = np.zeros((num, 1))\r\n# for i in range(num):\r\n# arr[i][0] = labels[i]\r\n# ind = [i for i in range(len(features))]\r\n# random.shuffle(ind)\r\n# features = features[ind]\r\n# labels = labels[ind]\r\n\r\n # Return the training data in batches of size <batch_size> or less\r\n return features[1200:batch_size],labels[1200:batch_size]\r\n #return batch_features_labels(features, labels, batch_size)\r", "def model_data():\n x_train, y_train, x_val, y_val, x_test, y_test = read_data(\"src/tests/dataclassificationmodel/ferPlus_processed.pbz2\", False)\n return x_train, y_train, x_val, y_val, x_test, y_test", "def _split_train_tst(self):\n num_samples = self.Y.shape[0]\n mapper_file = self.checkpointer.get_mapper_file_location()\n if not self.checkpointer.is_mapper_checkpointed():\n print 'No mapper checkpoint found. Fresh loading in progress ...'\n # Now shuffle the data\n sample_id = range(num_samples)\n random.shuffle(sample_id)\n print 'Dumping the mapper shuffle for reuse.'\n Pickle.dump(sample_id, open(mapper_file, 'wb'))\n print 'Dump complete. Moving Forward...'\n else:\n print 'Mapper Checkpoint found... Reading from mapper dump'\n sample_id = Pickle.load(open(mapper_file, 'rb'))\n print 'Mapping unpickling complete.. Moving forward...'\n\n self.X_fwd = self.X_fwd[sample_id]\n self.X_bwd = self.X_bwd[sample_id]\n self.Y = self.Y[sample_id]\n # Now divide the data into test ans train set\n test_fraction = 0.01\n self.test_size = int(test_fraction * num_samples)\n self.train_size = num_samples - self.test_size\n # Forward review\n self.X_trn_fwd = self.X_fwd[0:self.train_size]\n self.X_tst_fwd = self.X_fwd[self.train_size:num_samples]\n # Backward review\n self.X_trn_bwd = self.X_bwd[0:self.train_size]\n self.X_tst_bwd = self.X_bwd[self.train_size:num_samples]\n # Summary\n self.Y_trn = self.Y[0:self.train_size]\n self.Y_tst = self.Y[self.train_size:num_samples]", "def split_data(self):\n if not self.load_data:\n raise AttributeError('Preprocessor has not loaded any data.')\n \n # 3 - Find example counts for each set\n self.n_examples = self.data[0].shape[0]\n self.n_train = int(self.n_examples * self.train_ratio)\n self.n_val = int(self.n_examples * self.val_ratio)\n self.n_test = self.n_examples - self.n_train - self.n_val\n \n logger.info(f'Set sizes:')\n logger.info(f'train: {self.n_train}')\n logger.info(f'val: {self.n_val}')\n logger.info(f'test: {self.n_test}')\n if self.n_test < 0:\n raise ValueError('Train + validation ratios must bef < 1')\n\n # 4 - Separate data into train, test, val\n if isinstance(self.data[0], pd.DataFrame):\n logger.info('Dataset is in a dataframe.')\n self.isdataframe = True\n\n self.train_data = [self.data[0].iloc[:self.n_train],\n self.data[1].iloc[:self.n_train]]\n \n self.val_data = [self.data[0].iloc[self.n_train:self.n_val + self.n_train],\n self.data[1].iloc[self.n_train:self.n_val + self.n_train]]\n \n self.test_data = [self.data[0].iloc[self.n_val + self.n_train:],\n self.data[1].iloc[self.n_val + self.n_train:]]\n logger.info('Data was split into train, val, test.')\n else:\n self.isdataframe = False\n logger.info('Dataset is in a numpy array.')\n \n # If datasets are numpy array or sparse\n self.train_data = [self.data[0][:self.n_train],\n self.data[1][:self.n_train]]\n \n self.val_data = [self.data[0][self.n_train:self.n_val + self.n_train],\n self.data[1][self.n_train:self.n_val + self.n_train]]\n \n self.test_data = [self.data[0][self.n_val + self.n_train:],\n self.data[1][self.n_val + self.n_train:]]\n logger.info('Data was split into train, val, test.')\n \n assert(self.n_train == self.train_data[0].shape[0])\n assert(self.n_val == self.val_data[0].shape[0])\n assert(self.n_test == self.test_data[0].shape[0])\n \n # Free memory\n del self.data\n \n if self.save_sets:\n self.save_datasets()", "def load_data():\n X = load_pickle(config['image_paths']['train_images_pickle'])\n y = load_train_labels()\n y = to_categorical(y)\n test_indices = np.random.choice(len(X), int(len(X) * float(config['model']['test_size'])), replace=False)\n X_train = np.asarray([e for idx, e in enumerate(X) if idx not in test_indices])\n X_test = np.asarray([e for idx, e in enumerate(X) if idx in test_indices])\n y_train = np.asarray([e for idx, e in enumerate(y) if idx not in test_indices])\n y_test = np.asarray([e for idx, e in enumerate(y) if idx in test_indices])\n return X_train, y_train, X_test, y_test", "def get_test_labels(self):\n raise NotImplementedError", "def load_testing_data_generator(self) -> Generator[List[np.ndarray], None, None]:\n return self._load_generator(config.TEST_DIR, False)", "def load_test_data(label_fname, data_fname):\n labels = load_csv(label_fname)\n data = load_csv(data_fname, 'excel-tab')\n\n # Join all data together on the ids given in the files\n joined_data = {}\n for label in labels:\n id = label[0]\n joined_data[id] = {'class': label[1]}\n for rec in data:\n id = rec[0]\n if id in joined_data:\n joined_data[id]['data'] = rec[1]\n\n # Clean and convert the data to reals\n max_features = 0\n for id in joined_data:\n words = clean_text(joined_data[id]['data'])\n reals = convert_to_reals(words)\n joined_data[id]['data'] = reals\n if len(reals) > max_features:\n max_features = len(reals)\n\n # Pad the data\n for id in joined_data:\n reals = joined_data[id]['data']\n joined_data[id]['data'] = reals + (max_features - len(reals)) * [0.0]\n\n # Prepare the data for training\n training_data = np.array([joined_data[id]['data'] for id in joined_data])\n training_labels = [joined_data[id]['class'] == 'OFF' for id in joined_data]\n return training_labels, training_data, max_features", "def testData(self, ):\n count = 0\n while count < len(self.RAD_sequences_test):\n RAD_filename = self.RAD_sequences_test[count] \n RAD_complex = loader.readRAD(RAD_filename)\n if RAD_complex is None:\n raise ValueError(\"RAD file not found, please double check the path\")\n ### NOTE: Gloabl Normalization ###\n RAD_data = helper.complexTo2Channels(RAD_complex)\n RAD_data = (RAD_data - self.config_data[\"global_mean_log\"]) / \\\n self.config_data[\"global_variance_log\"]\n ### load ground truth instances ###\n gt_filename = loader.gtfileFromRADfile(RAD_filename, \\\n self.config_data[\"test_set_dir\"])\n gt_instances = loader.readRadarInstances(gt_filename)\n if gt_instances is None:\n raise ValueError(\"gt file not found, please double check the path\")\n\n ### NOTE: decode ground truth boxes to YOLO format ###\n gt_labels, has_label, raw_boxes = self.encodeToLabels(gt_instances)\n\n if has_label:\n yield (RAD_data, gt_labels, raw_boxes)\n count += 1", "def data(self, train=True):\n data = self.train_data if train else self.val_data\n return data.data, data.targets", "def test_data():\n global _MEAN # pylint: disable=global-statement\n _np.random.seed(1)\n view = _skdc10.view.OfficialImageClassificationTask()\n permutation = _np.random.permutation(range(10000))\n if _MEAN is None:\n _MEAN = view.train.x.reshape((50000 * 32 * 32, 3)).mean(axis=0)\n return ((view.test.x[:10000, :][permutation, :] - _MEAN).\n transpose((0, 3, 1, 2)).astype('float32'),\n view.test.y[:10000][permutation].reshape((10000, 1)).astype('float32'))", "def test_cached_dataloader(self):\n\n v = [\"data\", \"target\", \"model_out_sqnet\"]\n\n for data, target in self.train_loader:\n b, c, h, w = data[v[0]].shape\n assert data[v[1]].shape == (b, )\n assert data[v[2]].shape == (b, 100)\n assert data[v[1]].shape == target.shape", "def __init__(self, _expected_d_input=None, shuffled=False, _training_data=None, _test_data=None):\n self._training_data = _training_data\n self._test_data = _test_data\n self._num_training_samples = None\n self._num_test_samples = None\n self._available_training_lengths = []\n self._available_test_lengths = []\n self._training_data_path = os.path.join(type(self).__name__, \"training_data.npy\")\n if not os.path.isdir(type(self).__name__):\n os.mkdir(type(self).__name__)\n\n self._expected_d_input = _expected_d_input\n if self._training_data is None:\n if os.path.isfile(self._training_data_path):\n self._training_data = np.load(self._training_data_path).item()\n for _, value in self._training_data.items():\n print(value[0].shape)\n print(_expected_d_input)\n if value[0].shape[2] != _expected_d_input:\n self._training_data = None\n break\n self._test_data_path = os.path.join(type(self).__name__, \"testing_data.npy\")\n if self._test_data is None:\n if os.path.isfile(self._test_data_path):\n self._test_data = np.load(self._test_data_path).item()\n for _, value in self._test_data.items():\n if value[0].shape[2] != _expected_d_input:\n self._test_data = None\n break\n if self._test_data is None:\n self._load_test_data()\n if self._training_data is None:\n self._load_training_data()\n if shuffled:\n print(\"Shuffling not supported at this point!\")\n self.current_index = {}\n for key, _ in self._training_data.items():\n self.current_index[key] = 0\n self._initialise_available_training_lengths()\n self._initialise_available_test_lengths()\n self._swapped_test_data = None\n self._swapped_training_data = None", "def get_test(self, even=None):\n\n self.get_train(even)", "def _load_data(self):\n\n # This allows a simulated dataset to use the same constructor.\n if self.input_file is None:\n return\n\n logging.info(f\"Loading data from file {self.input_file}\")\n\n # Load the dataset.\n if os.path.isdir(self.input_file):\n self.data = get_matrix_from_mtx(self.input_file)\n else:\n self.data = get_matrix_from_h5(self.input_file)", "def load_data(self, data_path, use_plus_minus_feats):\n loaded = np.load(data_path + '-targets.npz')\n self.max_num_ans = int(loaded['max_num_ans'])\n self.max_prob_set_id = int(loaded['max_prob_set_id'])\n targets = loaded['targets']\n if use_plus_minus_feats:\n print(\"using plus minus feats!!!\")\n inputs = sp.load_npz(data_path + '-inputs-plus-minus.npz')\n self.encoding_dim = self.max_prob_set_id + 1\n else:\n inputs = sp.load_npz(data_path + '-inputs.npz')\n self.encoding_dim = 2 * self.max_prob_set_id + 1\n self.target_ids = sp.load_npz(data_path + '-targetids.npz')\n\n return inputs, targets", "def get_test_examples(self, data_path):\r\n return self.create_examples(self.read_data(data_path), 'test')", "def get_test(self, preprocess=False):\n return self._dataset(self._directory, 'images_background_small2', preprocess)", "def __init__(self, data_set):\r\n self.name = data_set\r\n\r\n # The training and test labels\r\n self.labels = {'train': None, 'test': None}\r\n\r\n # The training and test examples\r\n self.examples = {'train': None, 'test': None}\r\n\r\n # Load all the data for this data set\r\n for data in ['train', 'test']:\r\n self.load_file(data)\r\n\r\n # The shape of the training and test data matrices\r\n self.num_train = self.examples['train'].shape[0]\r\n self.num_test = self.examples['test'].shape[0]\r\n self.dim = self.examples['train'].shape[1]", "def loadData(self):\n batch_size = 256\n \n #if self.conv_sg == True:\n # batch_size = 1 \n \n download = True\n root = self.root + self.dataset\n if self.dataset == \"MNIST\": \n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])\n trainset = torchvision.datasets.MNIST(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.MNIST(root, train=False, download=download, transform=transform)\n \n if self.dataset == \"CIFAR10\":\n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465,), (0.2023, 0.1994, 0.2010,))])\n trainset = torchvision.datasets.CIFAR10(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.CIFAR10(root, train=False, download=download, transform=transform)\n \n if self.dataset == \"CIFAR100\":\n transform = transforms.Compose([transforms.ToTensor()])\n trainset = torchvision.datasets.CIFAR100(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.CIFAR100(root, train=False, download=download, transform=transform)\n \n \n trainloader = torch.utils.data.DataLoader(trainset, batch_size = batch_size,\n shuffle=False, num_workers=0, pin_memory = False)\n \n testloader = torch.utils.data.DataLoader(testset, batch_size= batch_size,\n shuffle=False, num_workers=2, pin_memory = False)\n \n return trainloader, testloader", "def get_data(self):\n\n if not self.checked:\n self.check_cache()\n h5f = h5py.File(self.data_filename, 'r')\n train_lbl = h5f['train_lbl'][:]\n train_img = h5f['train_img'][:]\n val_lbl = h5f['val_lbl'][:]\n val_img = h5f['val_img'][:]\n h5f.close()\n return train_img, train_lbl, val_img, val_lbl", "def eval(self):\r\n if WORDSPLIT:\r\n train, test = self.get_train_test_wordsplit()\r\n elif UTTERANCE_SPLIT:\r\n train, test, val = self.get_train_test_utterance_split()\r\n wordlist = joblib.load('wordlist.pkl')\r\n dictionary = joblib.load('dict.pkl')\r\n phones = joblib.load('phones.pkl')\r\n metadata_help = {'wordlist': wordlist, 'dictionary': dictionary, 'phones': phones}\r\n p2c = utils.phone2class(phones)\r\n c2p = utils.class2phone(phones)\r\n \"\"\"Get test generator\"\"\"\r\n test_data = Dataset({'files': test, 'mode': 'eval', 'metadata_help': metadata_help})\r\n test_gen = data.DataLoader(test_data, batch_size=1,\r\n shuffle=True, collate_fn=test_data.collate_eval, drop_last=True)\r\n for batch_number, features in tqdm(enumerate(test_gen)):\r\n spectrograms = features['spectrograms']\r\n phones = features['phones']\r\n batch_metadata = features['metadata'][0]\r\n self.G = self.G.eval()\r\n\r\n outputs = self.G(spectrograms)\r\n outputs = np.squeeze(outputs.detach().cpu().numpy())\r\n phones = np.squeeze(phones.detach().cpu().numpy())\r\n phones = phones.astype(dtype=int)\r\n phones = [c2p[x] for x in phones]\r\n\r\n output_classes = np.argmax(outputs, axis=1)\r\n\r\n \"\"\"Decode the output predictions into a phone sequence\"\"\"\r\n # https://stackoverflow.com/questions/38065898/how-to-remove-the-adjacent-duplicate-value-in-a-numpy-array\r\n duplicates_eliminated = np.asarray([k for k, g in groupby(output_classes)])\r\n blanks_eliminated = duplicates_eliminated[duplicates_eliminated != 0]\r\n predicted_phones_ = [c2p[x] for x in blanks_eliminated]\r\n \"\"\"remove SOS and EOS\"\"\"\r\n predicted_phones = []\r\n for x in predicted_phones_:\r\n if x != 'SOS' and x != 'EOS':\r\n predicted_phones.append(x)\r\n\r\n data_to_save = {'speaker': batch_metadata['speaker'],\r\n 'word': batch_metadata['word'],\r\n 'true_phones': batch_metadata['phones'],\r\n 'predicted_phones': predicted_phones}\r\n dump_path = os.path.join(self.predict_dir, batch_metadata['utterance'] + '.pkl')\r\n joblib.dump(data_to_save, dump_path)", "def _get_data_for_tests():\n X = np.random.randn(100, input_dim)\n Y = np.random.randn(100, output_dim)\n X_new = np.random.randn(100, input_dim)\n return X, X_new, Y", "def get_test_data_FieldLoader(self, set_name='train', field='data'):\n path = \"/{set_name}/{field}\".format(set_name=set_name, field=field)\n field_loader = self.load_hdf5_file_FieldLoader(path)\n set_data = self.dataset[set_name]\n return field_loader, set_data", "def load_data():\n print(\"PARSING TRAIN\")\n ys_train, x_train, ids_train = load_pickle_data(\"ys_train\"), load_pickle_data(\"x_train\"), load_pickle_data(\n \"ids_train\")\n if ys_train is None or x_train is None or ids_train is None:\n ys_train, x_train, ids_train = load_csv_data(\"{}/train.csv\".format(DATA_DIR))\n dump_pickle_data(ys_train, \"ys_train\")\n dump_pickle_data(x_train, \"x_train\")\n dump_pickle_data(ids_train, \"ids_train\")\n\n print(\"PARSING TEST\")\n x_test, ids_test = load_pickle_data(\"x_test\"), load_pickle_data(\"ids_test\")\n if x_test is None or ids_test is None:\n _, x_test, ids_test = load_csv_data(\"{}/test.csv\".format(DATA_DIR))\n dump_pickle_data(x_test, \"x_test\")\n dump_pickle_data(ids_test, \"ids_test\")\n\n return ys_train, x_train, ids_train, x_test, ids_test", "def load_test_data():\n X = []\n y = []\n for fname in os.listdir(test_dir):\n label = int(fname.split(\"_\")[0])\n img = plt.imread(os.path.join(test_dir, fname))\n X.append(img)\n y.append(label)\n X = np.stack(X)\n y = np.stack(y)\n return X, y", "def __load_data(self, input_directory):\n print(\"Loading data...\")\n self.training, self.validation, self.testing = (\n tuple(\n numpy.load(\n os.path.join(input_directory, '{}_{}.npy'.format(x, y))\n )\n for y in ('data', 'labels')\n )\n for x in ('training', 'validation', 'testing')\n )\n self.report['data_directory'] = input_directory\n self.report['images_training'] = len(self.training[1])\n self.report['images_validation'] = len(self.validation[1])\n self.report['images_testing'] = len(self.validation[1])", "def train_data(self):\n return self._train_data", "def prep_data(self):\n\n self.fit_tokenizer(texts=self.texts)\n sequences = self.get_sequences(self.texts)\n self.text_data = pad_sequences(sequences, maxlen=self.MAX_SEQUENCE_LENGTH)\n\n self.labels = to_categorical(np.asarray(self.labels))\n print('Shape of data tensor:', self.text_data.shape)\n print('Shape of label tensor:', self.labels.shape)\n\n # split the data into a training set and a validation set\n indices = np.arange(self.text_data.shape[0])\n np.random.shuffle(indices)\n self.text_data = self.text_data[indices]\n self.labels = self.labels[indices]\n nb_validation_samples = int(self.VALIDATION_SPLIT * self.text_data.shape[0])\n\n x_train = self.text_data[:-nb_validation_samples]\n y_train = self.labels[:-nb_validation_samples]\n x_val = self.text_data[-nb_validation_samples:]\n y_val = self.labels[-nb_validation_samples:]\n\n return x_train,y_train, x_val, y_val", "def pre_process_data(self, all_labels, all_data):\n\n # [1] Normalizes data\n all_data = self.pre_precess_manager.normalization(all_data)\n\n data_train, data_test, label_train, label_test = train_test_split(all_data, all_labels, test_size=0.1,\n shuffle=True)\n\n return data_train, data_test, label_train, label_test", "def test_dataloader(self, batch_size: Optional[int] = None) -> DataLoader:\n if self.test is not None:\n dataset = TabularDataset(\n task=self.config.task,\n data=self.test,\n categorical_cols=self.config.categorical_cols,\n continuous_cols=self.config.continuous_cols,\n embed_categorical=(not self.do_leave_one_out_encoder()),\n target=self.target,\n )\n return DataLoader(\n dataset,\n batch_size if batch_size is not None else self.batch_size,\n shuffle=False,\n num_workers=self.config.num_workers,\n pin_memory=self.config.pin_memory,\n )", "def refresh_test_dataset(self):\n inputs_id, inputs_starts, inputs_paths, inputs_ends, inputs_label = self.build_data(self.reader, self.test_items, self.option.max_path_length)\n self.test_dataset = CodeDataset(inputs_id, inputs_starts, inputs_paths, inputs_ends, inputs_label)", "def load_and_predict(self):\n if DataLoader.data is None:\n messagebox.showerror(\"Information\", \"Data file is empty, please load the data first.\")\n return\n\n path = filedialog.askopenfilename()\n with open(path, 'rb') as file:\n Trainer.model = pickle.load(file)\n\n scale = DataLoader.data['out'].max() - DataLoader.data['out'].min()\n scaler = MinMaxScaler(feature_range=(0, 1))\n scaler.fit(DataLoader.data)\n data_scaled = pd.DataFrame(scaler.transform(DataLoader.data), columns=DataLoader.data.columns)\n\n Trainer.y_pred = batch_predict(Trainer.model, data_scaled.drop(columns=['out']))\n Trainer.y_true = data_scaled['out']\n\n self.test_rmse = scale * math.sqrt(mean_squared_error(Trainer.y_pred, Trainer.y_true))\n print(self.test_rmse)\n self.r_squared = np.corrcoef(Trainer.y_pred * scale, data_scaled['out'] * scale)[0, 1] ** 2\n print(self.r_squared)\n\n models = Trainer.model.get_models()\n param_string = f'Component Function Trained Parameters:\\n'\n for i in range(len(models)):\n param_string += \"length scale: {:.4f}\".format(models[i].kernel_.k1.length_scale) + ' ' + \\\n \"noise level: {:.4e}\".format(models[i].kernel_.k2.noise_level) + '\\n'\n param_string += f'\\nRMSE on the test set: {self.test_rmse}\\n'\n param_string += f'R^2 value on the test set: {self.r_squared}'\n display_params = ttk.Label(self, text=param_string, width=40)\n display_params.grid(row=24 + 7, column=0, columnspan=2, sticky=tk.W + tk.E)", "def load_data(self):\n\n self._load_train_data()\n self._load_test_data()", "def get_train_test_info(self):\n \n imagePaths = list(paths.list_images(self.cross_val_dir))\n\n train_path_info = defaultdict(list)\n test_path_info = defaultdict(list)\n\n for imagePath in imagePaths:\n path_parts = imagePath.split(os.path.sep)\n fold_number = path_parts[-3][-1]\n label = path_parts[-2]\n if(fold_number==str(self.fold)):\n test_path_info['path_list'].append(imagePath)\n test_path_info['label_list'].append(label)\n else:\n train_path_info['path_list'].append(imagePath)\n train_path_info['label_list'].append(label)\n\n return train_path_info, test_path_info", "def preprocess_test_data(self):\r\n print(\"* Preprocessing test data.\", flush=True)\r\n prep.create_HDF_file(self.C.test_set)\r\n\r\n self.print_time_elapsed()", "def load_data(test_split=0.1):\n global _data\n random.shuffle(_data)\n idx = int(len(_data) * (1 - test_split))\n x_train, y_train = np.array([d[:4] for d in _data[:idx]]), np.array([name_index[d[4]] for d in _data[:idx]])\n x_test, y_test = np.array([d[:4] for d in _data[idx:]]), np.array([name_index[d[4]] for d in _data[idx:]])\n return (x_train, y_train), (x_test, y_test)", "def load_data(self):\n # make sure preprocessing is same as preprocessing as the network\n # reduce mean, and divide by a value to do scaling\n self.train_datagen = ImageDataGenerator(\n rescale=1./ 255,\n shear_range=0.05,\n rotation_range=20, # randomly rotate images in the range (degrees, 0 to 180)\n zoom_range=[0.9, 1.1], # Randomly zoom image\n width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)\n height_shift_range=0.1, # randomly shift images vertically (fraction of total height)\n horizontal_flip=True, # randomly flip images\n brightness_range=[0.8, 1.2],\n fill_mode='reflect',\n validation_split=0.2)\n\n self.test_datagen = ImageDataGenerator(rescale=1. / 255)\n\n self.train_generator = self.train_datagen.flow_from_directory(\n self.train_dir,\n target_size=(224, 224),\n shuffle=True,\n batch_size=self.batchsize,\n class_mode='categorical',\n subset=\"training\")\n\n self.validation_generator = self.train_datagen.flow_from_directory(\n self.train_dir,\n target_size=(224, 224),\n shuffle=True,\n batch_size=self.batchsize,\n class_mode='categorical',\n subset=\"validation\")\n\n self.test_generator = self.test_datagen.flow_from_directory(\n self.test_dir,\n target_size=(224, 224),\n shuffle=False,\n batch_size=1,\n class_mode='categorical')", "def load_ps(self):\n self.ps = self.read_var(self.psvar)\n self.test_shape(self.psvar, self.ps.shape, 2)", "def __test_input_fn(self):\n ## Test labels\n labels = self.labels_test\n ## Recast spectra into dictionary for estimator\n features = {'flux': self.spectra_test}\n ## Convert labels to integers\n ilabels = [self.label_index_lookup[l] for l in labels]\n return features, ilabels", "def _load_data(self):\n raw_data = self._load(\n tf.gfile.Open(self._get_full_pickle_path(self._dataset_split), \"rb\"))\n if self._dataset_split == MetaSplit.TRAIN and self._config[\"train_on_val\"]:\n valid_data = self._load(\n tf.gfile.Open(self._get_full_pickle_path(MetaSplit.VALID), \"rb\"))\n for key in valid_data:\n if self._verbose:\n tf.logging.info(str([key, raw_data[key].shape]))\n raw_data[key] = np.concatenate([raw_data[key],\n valid_data[key]], axis=0)\n if self._verbose:\n tf.logging.info(str([key, raw_data[key].shape]))\n\n if self._verbose:\n tf.logging.info(\n str([(k, np.shape(v)) for k, v in six.iteritems(raw_data)]))\n\n return raw_data", "def test(self):\n self.load()\n bottleneck_features = np.load(self.feature_path)\n test = bottleneck_features['test']\n _, test_targets = load_dataset(self.image_path_test) \n predictions = [np.argmax(self.model.predict(np.expand_dims(feature, axis=0))) for feature in test]\n test_accuracy = 100*np.sum(np.array(predictions) == np.argmax(test_targets, axis=1))/len(predictions)\n print('{}, test accuracy: {:.4f}%'.format(self.name, test_accuracy))\n return test_accuracy", "def load_data_pickle(self, load_full=False):\n self.train = pd.read_pickle('../input/train_mod.pkl')\n self.test = pd.read_pickle('../input/test_mod.pkl')\n if load_full:\n self.train_full = pd.read_pickle('../input/train_full_mod.pkl')", "def load_dataset(self, testPrefix = 'cv9', root = 'datasets', classes = [ 'pos', 'neg' ]):\n\n\t\tfor senti_class in classes:\n\n\t\t\tdirname = os.path.join(root, senti_class)\n\n\t\t\tfor filename in os.listdir(dirname):\n\n\t\t\t\twith open(os.path.join(dirname, filename)) as file:\n\n\t\t\t\t\tcontent = file.read()\n\n\t\t\t\t\tif filename.startswith(testPrefix):\n\t\t\t\t\t\t# Testing data\n\t\t\t\t\t\tself.testing_set.append(content)\n\t\t\t\t\t\tself.testing_labels.append(senti_class)\n\t\t\t\t\telse:\n\t\t\t\t\t\t# Training data\n\t\t\t\t\t\tself.training_set.append(content)\n\t\t\t\t\t\tself.training_labels.append(senti_class)\n\n\t\tself._vectorize(self.vectorizer)", "def get_train(self, data_file):\r\n return self.read_data(data_file)", "def test_text_classifier_get_training_samples(self):\n pass", "def __test_and_train(self):\n f = open(\"all_data_info.csv\")\n reader = csv.DictReader(f, delimiter=\",\")\n data = []\n for line in reader:\n if line['artist_group'] == \"train_and_test\" and line[\"in_train\"] == \"False\":\n # the img's artist is in training set\n # but the img is in test set only\n data.append((line['artist'], line['new_filename']))\n\n return data" ]
[ "0.70449495", "0.6995364", "0.6777924", "0.66745436", "0.65272325", "0.6480013", "0.6461336", "0.63587755", "0.63330346", "0.630707", "0.6300786", "0.6267064", "0.6254497", "0.62507844", "0.6207419", "0.62047035", "0.61748314", "0.6138185", "0.61186504", "0.6097096", "0.60467106", "0.603805", "0.6011956", "0.60096276", "0.6001075", "0.5992767", "0.59853816", "0.5970728", "0.5963372", "0.5939059", "0.5934836", "0.59345937", "0.5926332", "0.59183913", "0.5915859", "0.59155416", "0.5914839", "0.5909804", "0.58976007", "0.58888054", "0.5884585", "0.58812976", "0.5874462", "0.5868535", "0.58633107", "0.5839107", "0.58325356", "0.58325356", "0.5832153", "0.5826521", "0.5821544", "0.5821536", "0.58170784", "0.58160543", "0.5815142", "0.5807487", "0.5800426", "0.57904744", "0.5789623", "0.5781709", "0.5770622", "0.57696265", "0.5762582", "0.57531726", "0.57519084", "0.5734052", "0.57276237", "0.57253134", "0.5724457", "0.57238835", "0.5716112", "0.57117635", "0.57113874", "0.5694505", "0.5673615", "0.5669611", "0.56669605", "0.5665888", "0.566458", "0.5657639", "0.56551754", "0.56546074", "0.564081", "0.5634821", "0.56337315", "0.5632987", "0.56324583", "0.5619709", "0.5617627", "0.56000936", "0.55976737", "0.5593576", "0.55900985", "0.5584025", "0.5582531", "0.55803484", "0.55778", "0.55773926", "0.556909", "0.55686134" ]
0.637961
7
Get test data (perhaps from remote server) and preprocess in shape [batch, expected shape of element]. Remember to call this from a subclass to save the things.
def _load_test_data(self): self._save_test_data()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fetch_test_batch(self):\n data = self.data\n # size of train dataset\n num_train = data['train'].shape[0]\n image_size = self.image_size\n # index of test image that is being classified in this batch\n batch_index = self.test_batch_index\n\n # create batch array\n X = np.zeros([2 * num_train, image_size[0], image_size[1]], dtype='uint8')\n # first half are all training images\n X[:num_train, ...] = data['train']\n # second half is copy of a batch_index-th test image to be classified\n X[num_train:, ...] = data['test'][batch_index, ...]\n # true label is extracted from array of indexes where particular class start\n test_label = np.argmax(self.starts['test']>batch_index) - 1\n\n # rescale intensities and center\n X = X / 255.0\n X = X - self.mean_train\n\n X = X[:, np.newaxis]\n X = X.astype(\"float32\")\n\n self.test_batch_index += 1\n\n X = Variable(torch.from_numpy(X)).view(2 * num_train, self.image_size[0], self.image_size[1])\n\n # stack batch by second axis to [batch size, 2 (pair to be compared), image height, image width]\n X1 = X[:num_train] # (B, h, w)\n X2 = X[num_train:] # (B, h, w)\n\n X = torch.stack([X1, X2], dim=1) # (B, 2, h, w)\n\n if use_cuda:\n X = X.cuda()\n # using test dataset size and current index for controlling test loop in test_model.py\n return X, test_label, data['test'].shape[0], self.test_batch_index", "def preprocess_test_data(self):\r\n print(\"* Preprocessing test data.\", flush=True)\r\n prep.create_HDF_file(self.C.test_set)\r\n\r\n self.print_time_elapsed()", "def load_preprocess_test_batch(batch_id, batch_size):\r\n filename = 'preprocess_test_' + str(batch_id) + '.p'\r\n features, labels = pickle.load(open(filename, mode='rb'))\r\n# labels = np.argmax(labels,1)\r\n# num = len(labels)\r\n# arr = np.zeros((num, 1))\r\n# for i in range(num):\r\n# arr[i][0] = labels[i]\r\n# ind = [i for i in range(len(features))]\r\n# random.shuffle(ind)\r\n# features = features[ind]\r\n# labels = labels[ind]\r\n\r\n # Return the training data in batches of size <batch_size> or less\r\n return features[1200:batch_size],labels[1200:batch_size]\r\n #return batch_features_labels(features, labels, batch_size)\r", "def preprocess_data(self):\n\n self._preprocess_train_data()\n self._preprocess_test_data()", "def test_preprocessed_data(self):\n self.assertEqual(self.tester.preprocessed_data, [1, 2])", "def preprocess(self, train_file, validation_file, test_file):\n chardict, labeldict = self.make_dictionary(train_file, validation_file, test_file)\n print 'preparing training data'\n training = self.parse_file(train_file, chardict, labeldict)\n \n print 'preparing validation data'\n validation = self.parse_file(validation_file, chardict, labeldict)\n\n print 'preparing test data'\n test = self.parse_file(test_file, chardict, labeldict)\n\n return Data(training, validation, test, chardict, labeldict)", "def get_data():\r\n if not path_validation(MODEL_PATH, read_access=True):\r\n exit(0) \r\n if not path_validation(TEST_DATA_PATH, read_access=True):\r\n exit(0) \r\n if not path_validation(TEST_LABEL_PATH, read_access=True):\r\n exit(0) \r\n\r\n params = joblib.load(MODEL_PATH)\r\n test_images = np.load(TEST_DATA_PATH)\r\n test_labels = np.load(TEST_LABEL_PATH)\r\n\r\n # Addition of bias in test set\r\n test_images = np.insert(test_images, 0, 1, axis=1)\r\n\r\n return params, test_images, test_labels", "def get_data(dataset, max_train_size=None, max_test_size=None, do_preprocess=True, train_start=0,\n test_start=0, prefix=\"processed\", x_dims=None):\n if max_train_size is None:\n train_end = None\n else:\n train_end = train_start + max_train_size\n if max_test_size is None:\n test_end = None\n else:\n test_end = test_start + max_test_size\n print('load data of:', dataset)\n print(\"train: \", train_start, train_end)\n print(\"test: \", test_start, test_end)\n if x_dims is None:\n x_dim = get_data_dim(dataset)\n else:\n x_dim = x_dims\n f = open(os.path.join(prefix, dataset + '_train.pkl'), \"rb\")\n train_data = pickle.load(f).reshape((-1, x_dim))[train_start:train_end, :]\n f.close()\n try:\n f = open(os.path.join(prefix, dataset + '_test.pkl'), \"rb\")\n test_data = pickle.load(f).reshape((-1, x_dim))[test_start:test_end, :]\n f.close()\n except (KeyError, FileNotFoundError):\n test_data = None\n try:\n f = open(os.path.join(prefix, dataset + \"_test_label.pkl\"), \"rb\")\n test_label = pickle.load(f).reshape((-1))[test_start:test_end]\n f.close()\n except (KeyError, FileNotFoundError):\n test_label = None\n if do_preprocess:\n train_data, test_data = preprocess(train_data, test_data)\n print(\"train set shape: \", train_data.shape)\n print(\"test set shape: \", test_data.shape)\n if test_label is not None:\n print(\"test label shape: \", test_label.shape)\n print()\n return (train_data, None), (test_data, test_label)", "def test_process_data(self):\n pass", "def test_pyt_preprocess_train(self):\n # Second, check that the model will train\n defaults = parser_defaults.copy()\n defaults['datatype'] = 'train'\n defaults['pytorch_preprocess'] = True\n str_output, _, _ = testing_utils.train_model(defaults)\n self.assertTrue(\n solved_task(str_output),\n 'Teacher could not teach seq2seq with preprocessed obs, output: {}'\n .format(str_output)\n )", "def test_batch(self):\n pass", "def _prepare_data(self):\n #TODO hardcoded values need to change\n print_info(\"Preprocessing the train data...\")\n self._place_dataset(os.path.join(self._hparams[\"temp-data\"], \"train\"),\n self.TRAIN_OUT_PATH)\n\n print_info(\"Preprocessing the test data...\")\n self._place_dataset(os.path.join(self._hparams[\"temp-data\"], \"test\"),\n self.TEST_OUT_PATH)\n\n print_info(\"Preprocessing the validation data...\")\n self._place_dataset(os.path.join(self._hparams[\"temp-data\"], \"val\"),\n self.VAL_OUT_PATH)", "def input_setup_test(config):\n # Load data path, if is_train False, get test data\n data = load_data(config.is_train, config.test_img)\n \n # Make sub_input and sub_label, if is_train false more return nx, ny\n sub_input_sequence, sub_label_sequence, nx, ny = make_sub_data_test(data, config)\n\n\n # Make list to numpy array. With this transform\n arrinput = np.asarray(sub_input_sequence) # [?, 41, 41, 3]\n arrlabel = np.asarray(sub_label_sequence) # [?, 41, 41, 3]\n make_data_hf(arrinput, arrlabel, config)\n\n return nx, ny", "def preprocess(self, dataset_iter, single_device=False):\n dataset_iter = map(self.as_example, dataset_iter)\n if not single_device:\n dataset_iter = self.shard(dataset_iter)\n return dataset_iter", "def test_data():\n batch_size = 10\n input_dim = 28\n test_data = np.random.rand(batch_size, input_dim)\n\n return test_data", "def extract(self):\n\n # print some infos about data\n print(\"\\n--extract batches from data:\\ntrain: {}\\nval: {}\\ntest: {}\\n\".format(self.data[0]['x'].shape, self.data[1]['x'].shape, self.data[2]['x'].shape))\n\n # create batches\n self.x_train, self.y_train, _ = self.create_batches(self.data[0], batch_size=self.batch_size)\n self.x_val, self.y_val, _ = self.create_batches(self.data[1], batch_size=self.batch_size_eval)\n self.x_test, self.y_test, _ = self.create_batches(self.data[2], batch_size=self.batch_size_eval)\n\n # my data\n if len(self.mfcc_data_files) == 4:\n self.x_my, self.y_my, self.z_my = self.create_batches(self.data[3], batch_size=1)", "def _split_train_tst(self):\n num_samples = self.Y.shape[0]\n mapper_file = self.checkpointer.get_mapper_file_location()\n if not self.checkpointer.is_mapper_checkpointed():\n print 'No mapper checkpoint found. Fresh loading in progress ...'\n # Now shuffle the data\n sample_id = range(num_samples)\n random.shuffle(sample_id)\n print 'Dumping the mapper shuffle for reuse.'\n Pickle.dump(sample_id, open(mapper_file, 'wb'))\n print 'Dump complete. Moving Forward...'\n else:\n print 'Mapper Checkpoint found... Reading from mapper dump'\n sample_id = Pickle.load(open(mapper_file, 'rb'))\n print 'Mapping unpickling complete.. Moving forward...'\n\n self.X_fwd = self.X_fwd[sample_id]\n self.X_bwd = self.X_bwd[sample_id]\n self.Y = self.Y[sample_id]\n # Now divide the data into test ans train set\n test_fraction = 0.01\n self.test_size = int(test_fraction * num_samples)\n self.train_size = num_samples - self.test_size\n # Forward review\n self.X_trn_fwd = self.X_fwd[0:self.train_size]\n self.X_tst_fwd = self.X_fwd[self.train_size:num_samples]\n # Backward review\n self.X_trn_bwd = self.X_bwd[0:self.train_size]\n self.X_tst_bwd = self.X_bwd[self.train_size:num_samples]\n # Summary\n self.Y_trn = self.Y[0:self.train_size]\n self.Y_tst = self.Y[self.train_size:num_samples]", "def get_preprocess(self) -> Dict:\n input_shape = get_input_shape(self.deploy_cfg)\n cfg = process_model_config(self.model_cfg, [''], input_shape)\n preprocess = cfg.data.test.pipeline\n return preprocess", "def _get_data_for_tests():\n X = np.random.randn(100, input_dim)\n Y = np.random.randn(100, output_dim)\n X_new = np.random.randn(100, input_dim)\n return X, X_new, Y", "def get_test(self, preprocess=False):\n return self._dataset(self._directory, 'images_background_small2', preprocess)", "def load_testing_data(self) -> List[np.ndarray]:\n input_data = self._load_set(config.TEST_DIR, False)\n return input_data", "def init_test_input_pipeline(self, config):\n\n print('Initiating test input pipelines')\n\n ######################\n # Calibrate parameters\n ######################\n\n # Update num classes in config\n config.num_classes = self.num_classes - len(self.ignored_labels)\n config.ignored_label_inds = [self.label_to_idx[ign_label] for ign_label in self.ignored_labels]\n\n # Update network model in config\n config.network_model = self.network_model\n\n # Update num classes in config\n\n if config.network_model == 'multi_segmentation':\n config.num_classes = self.num_parts\n elif config.network_model == 'segmentation':\n if self.ShapeNetPartType in self.label_names:\n config.num_classes = self.num_parts[self.name_to_label[self.ShapeNetPartType]]\n else:\n raise ValueError('Wrong object name given for ShapeNetPart single object segmentation')\n\n # Calibrate generators to batch_num\n self.batch_limit = self.calibrate_batches(config)\n\n # From config parameter, compute higher bound of neighbors number in a neighborhood\n hist_n = int(np.ceil(4 / 3 * np.pi * (config.density_parameter + 1) ** 3))\n\n # Initiate neighbors limit with higher bound\n self.neighborhood_limits = np.full(config.num_layers, hist_n, dtype=np.int32)\n\n # Calibrate max neighbors number\n self.calibrate_neighbors(config)\n\n ################################\n # Initiate tensorflow parameters\n ################################\n\n # Reset graph\n tf.reset_default_graph()\n\n # Set random seed (You also have to set it in network_architectures.weight_variable)\n #np.random.seed(42)\n #tf.set_random_seed(42)\n\n # Get generator and mapping function\n gen_function, gen_types, gen_shapes = self.get_batch_gen('test', config)\n map_func = self.get_tf_mapping(config)\n\n ##############\n # Test dataset\n ##############\n\n # Create batched dataset from generator\n self.test_data = tf.data.Dataset.from_generator(gen_function,\n gen_types,\n gen_shapes)\n\n self.test_data = self.test_data.map(map_func=map_func, num_parallel_calls=self.num_threads)\n\n # Prefetch data\n self.test_data = self.test_data.prefetch(10)\n\n #################\n # Common iterator\n #################\n\n # create a iterator of the correct shape and type\n iter = tf.data.Iterator.from_structure(self.test_data.output_types, self.test_data.output_shapes)\n self.flat_inputs = iter.get_next()\n\n # create the initialisation operations\n self.test_init_op = iter.make_initializer(self.test_data)", "def preproc_pipeline(data):\n # Preprocess\n data = preprocess(data)\n\n # Optional --> run a technical analysis on it and add more features\n data = generate_ta(data)\n \n # Split\n train_set, validation_set, test_set = train_val_test_split(data)\n \n # Set up for Keras\n train_set = shape_for_keras(train_set)\n validation_set = shape_for_keras(validation_set)\n test_set = shape_for_keras(test_set)\n\n # We could save this to csv.\n return train_set, validation_set, test_set", "def get_preprocessed_data(x_train, x_test, y_train, y_test):\n x_train = x_train.reshape(50000, 3072)\n x_test = x_test.reshape(10000, 3072)\n x_train = x_train.astype('float32')\n x_test = x_test.astype('float32')\n x_train /= 255\n x_test /= 255\n\n y_train = np_utils.to_categorical(y_train, num_classes)\n y_test = np_utils.to_categorical(y_test, num_classes)\n\n return x_train, x_test, y_train, y_test", "def _create_test_iterator(self):\n input_ids = tf.range(self.left_images.shape[0])\n dataset = tf.data.Dataset.from_tensor_slices(input_ids)\n # NOTE: Loads 1 sample, i.e. batch mode not implemented yet.\n dataset = dataset.map(self._test_parse_function)\n iterator = dataset.make_one_shot_iterator()\n\n return iterator", "def testGenerator(self,):\n return tf.data.Dataset.from_generator(self.testData, \\\n output_types=(tf.float32, tf.float32, tf.float32), \\\n output_shapes=(tf.TensorShape(self.config_model[\"input_shape\"]), \\\n tf.TensorShape(list(self.headoutput_shape[1:4]) + \\\n [len(self.anchor_boxes), \\\n 7+len(self.config_data[\"all_classes\"])]), \\\n tf.TensorShape([self.config_data[\"max_boxes_per_frame\"], 7]) \\\n ), )", "def load_data_preprocess(self):\n\n print(\"Loading the dataset ...\")\n # load the data\n c_util = CarUtils()\n train_x, train_y, test_x, test_y, classes = c_util.load_data()\n\n # set the image ordering\n K.set_image_dim_ordering(\"th\")\n\n print(\"Pre-processing the dataset ...\")\n # pre-process the data\n train_x = train_x.astype('float32')\n test_x = test_x.astype('float32')\n\n train_x = train_x / 255\n test_x = test_x / 255\n\n print(train_x.shape[0], ' train samples')\n print(test_x.shape[0], ' test samples')\n\n train_y = np_utils.to_categorical(train_y, CarsClassifierModel._nb_classes)\n test_y = np_utils.to_categorical(test_y, CarsClassifierModel._nb_classes)\n\n return train_x, train_y, test_x, test_y", "def preprocess_train_data(self):\r\n print(\"* Preprocessing training data.\", flush=True)\r\n prep.create_HDF_file(self.C.training_set, is_training_set=True)\r\n\r\n self.print_time_elapsed()", "def getData(trainSize):\r\n return splitData([getReal(), getFake()], trainSize=trainSize)", "def prepare_data(self, *args, **kwargs):\n # get paths to train and test splits\n _split_paths = [os.path.join(self.path_to_data, split)\n for split in os.listdir(self.path_to_data)]\n\n # for each split [train, test]\n for _path in _split_paths:\n _img_classes = os.listdir(_path) # get subfolders representing each class\n self.splits[os.path.basename(_path)] = []\n\n # get the images in pairs with its corresponding class\n for _class in _img_classes:\n _data = self.get_img_text_pair(os.path.join(_path, _class))\n\n if os.path.basename(_path) == 'train':\n self.weights[self.encode_label(_class)] = len(_data)\n self.splits[os.path.basename(_path)].extend(_data)", "def prepare_dataset(data_path, test_size=0.2, validation_size=0.2):\r\n\r\n # load dataset\r\n if data_path.endswith('json'):\r\n X, y = load_data_from_json(data_path)\r\n else:\r\n X, y = load_data_from_fold(data_path)\r\n # create train, validation, test split\r\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size)\r\n X_train, X_validation, y_train, y_validation = train_test_split(X_train, y_train, test_size=validation_size)\r\n\r\n # add an axis to nd array\r\n X_train = X_train[..., np.newaxis]\r\n X_test = X_test[..., np.newaxis]\r\n X_validation = X_validation[..., np.newaxis]\r\n\r\n return X_train, y_train, X_validation, y_validation, X_test, y_test", "def run_testing_batch(self, session, batch):\n feed_dict = self.batch_to_feed(batch)\n feed_dict[self.use_dropout_placeholder] = 0.0\n fetches = [self.loss, self.predictions]\n loss, probabilities = session.run(fetches, feed_dict=feed_dict)\n return loss, probabilities", "def setUp(self):\n self.batch_size = 8\n num_keypoints = 16\n self.data_batch = []\n self.data_samples = []\n\n for i in range(self.batch_size):\n gt_instances = InstanceData()\n keypoints = np.zeros((1, num_keypoints, 2))\n keypoints[0, i] = [0.5 * i, 0.5 * i]\n gt_instances.keypoints = keypoints + 1.0\n gt_instances.keypoints_visible = np.ones(\n (1, num_keypoints, 1)).astype(bool)\n gt_instances.keypoints_visible[0, (2 * i) % 8, 0] = False\n gt_instances.bboxes = np.random.random((1, 4)) * 20 * i\n gt_instances.head_size = np.random.random((1, 1)) * 10 * i\n\n pred_instances = InstanceData()\n pred_instances.keypoints = keypoints\n\n data = {'inputs': None}\n data_sample = {\n 'gt_instances': gt_instances.to_dict(),\n 'pred_instances': pred_instances.to_dict(),\n }\n\n self.data_batch.append(data)\n self.data_samples.append(data_sample)", "def setUp(self):\n self.batch_size = 8\n num_keypoints = 15\n self.data_batch = []\n self.data_samples = []\n\n for i in range(self.batch_size):\n gt_instances = InstanceData()\n keypoints = np.zeros((1, num_keypoints, 2))\n keypoints[0, i] = [0.5 * i, 0.5 * i]\n gt_instances.keypoints = keypoints\n gt_instances.keypoints_visible = np.ones(\n (1, num_keypoints, 1)).astype(bool)\n gt_instances.keypoints_visible[0, (2 * i) % 8, 0] = False\n gt_instances.bboxes = np.random.random((1, 4)) * 20 * i\n gt_instances.head_size = np.random.random((1, 1)) * 10 * i\n\n pred_instances = InstanceData()\n pred_instances.keypoints = keypoints\n\n data = {'inputs': None}\n data_sample = {\n 'gt_instances': gt_instances.to_dict(),\n 'pred_instances': pred_instances.to_dict(),\n }\n\n self.data_batch.append(data)\n self.data_samples.append(data_sample)", "def setUp(self):\n self.batch_size = 8\n num_keypoints = 15\n self.data_batch = []\n self.data_samples = []\n\n for i in range(self.batch_size):\n gt_instances = InstanceData()\n keypoints = np.zeros((1, num_keypoints, 2))\n keypoints[0, i] = [0.5 * i, 0.5 * i]\n gt_instances.keypoints = keypoints\n gt_instances.keypoints_visible = np.ones(\n (1, num_keypoints, 1)).astype(bool)\n gt_instances.keypoints_visible[0, (2 * i) % 8, 0] = False\n gt_instances.bboxes = np.random.random((1, 4)) * 20 * i\n gt_instances.head_size = np.random.random((1, 1)) * 10 * i\n\n pred_instances = InstanceData()\n pred_instances.keypoints = keypoints\n\n data = {'inputs': None}\n data_sample = {\n 'gt_instances': gt_instances.to_dict(),\n 'pred_instances': pred_instances.to_dict(),\n }\n\n self.data_batch.append(data)\n self.data_samples.append(data_sample)", "def data(self, train=True, batch_size=2):\n if train:\n elements = self.prepare_batch(self.training_albums)\n else:\n elements = self.prepare_batch(self.validation_albums)\n\n while len(elements) > 0:\n # Collect the batch\n batch = []\n for _ in range(min(batch_size, len(elements))):\n batch.append(elements.pop())\n\n # Get same sequence size for all elements of the batch\n albums, labels = self.batchify(batch)\n yield albums, labels", "def preprocess_train_dataset(dataset):\n return (dataset\n # Shuffle according to the largest client dataset\n .shuffle(buffer_size=MAX_CLIENT_DATASET_SIZE)\n # Repeat to do multiple local epochs\n .repeat(CLIENT_EPOCHS_PER_ROUND)\n # Batch to a fixed client batch size\n .batch(CLIENT_BATCH_SIZE, drop_remainder=False)\n # Preprocessing step\n .map(reshape_emnist_element))", "def prepare_data(self):\n data = self._get_dataset(self.hparams.dataset_path)\n label_encoder = data[\"label_encoder\"]\n del data[\"label_encoder\"]\n\n click.secho(\"Building inputs and labels.\", fg=\"yellow\")\n datasets = {\n \"train\": defaultdict(list),\n \"valid\": defaultdict(list),\n \"test\": defaultdict(list),\n }\n for dataset_name, dataset in data.items():\n for sample in dataset:\n instance = self.build_input(\n self.tokenizer, sample[\"text\"], label_encoder, sample[\"label\"]\n )\n for input_name, input_array in instance.items():\n datasets[dataset_name][input_name].append(input_array)\n\n click.secho(\"Padding inputs and building tensors.\", fg=\"yellow\")\n tensor_datasets = {\"train\": [], \"valid\": [], \"test\": []}\n for dataset_name, dataset in datasets.items():\n dataset = self.pad_dataset(dataset, padding=self.tokenizer.pad_index)\n for input_name in MODEL_INPUTS:\n if input_name == \"labels\":\n tensor = torch.tensor(dataset[input_name], dtype=torch.float32)\n else:\n tensor = torch.tensor(dataset[input_name])\n tensor_datasets[dataset_name].append(tensor)\n\n self.train_dataset = TensorDataset(*tensor_datasets[\"train\"])\n self.valid_dataset = TensorDataset(*tensor_datasets[\"valid\"])\n self.test_dataset = TensorDataset(*tensor_datasets[\"test\"])\n click.secho(\n \"Train dataset (Batch, Candidates, Seq length): {}\".format(\n self.train_dataset.tensors[0].shape\n ),\n fg=\"yellow\",\n )\n click.secho(\n \"Valid dataset (Batch, Candidates, Seq length): {}\".format(\n self.valid_dataset.tensors[0].shape\n ),\n fg=\"yellow\",\n )\n click.secho(\n \"Test dataset (Batch, Candidates, Seq length): {}\".format(\n self.test_dataset.tensors[0].shape\n ),\n fg=\"yellow\",\n )", "def split_data(self,test=False):\n shuffle_index = torch.randperm(self.train_target.shape[0])\n load = shuffle_index.shape[0]\n train_input_shuffle = self.train_input[shuffle_index]\n train_target_shuffle = self.train_target[shuffle_index]\n train_classes_shuffle = self.train_classes[shuffle_index]\n index_train = self.index_for_equal_class(train_target_shuffle[:load//2])\n train_input = train_input_shuffle[index_train]\n train_target = train_target_shuffle[index_train]\n train_classes = train_classes_shuffle[index_train]\n if not test:\n index_test = self.index_for_equal_class( train_target_shuffle[load//2:]) + load//2\n test_input = train_input_shuffle[index_test]\n test_target = train_target_shuffle[index_test]\n test_classes = train_classes_shuffle[index_test]\n else:\n index_test = self.index_for_equal_class(self.test_target)\n test_input = self.test_input[index_test]\n test_target = self.test_target[index_test]\n test_classes = self.test_classes[index_test]\n train_input, mean, std = normalize(train_input)\n test_input, _, _ = normalize(test_input,mean,std)\n return train_input, train_target, train_classes ,test_input ,test_target ,test_classes", "def test_example(self, example_dataset, expected_result):\n\n transformer = PreprocessFeatures()\n result = transformer.fit_transform(example_dataset)\n\n assert (result == expected_result).all()", "def _next_test(self):\n idx = self.it\n self.it = (self.it + 1) % self.n_examples\n\n if self.render_path:\n target_view = data_types.Views(\n rays=jax.tree_map(lambda r: r[idx], self.render_rays),)\n else:\n target_view = data_types.Views(\n rays=jax.tree_map(lambda r: r[idx], self.rays), rgb=self.images[idx])\n\n #--------------------------------------------------------------------------------------\n # Get the reference data\n batch_near_cam_idx = self.sorted_near_cam[idx]\n ref_images = self.train_images[batch_near_cam_idx]\n ref_images = ref_images.reshape(ref_images.shape[0], self.h, self.w, 3)\n\n ref_cameratoworld = self.train_camtoworlds[batch_near_cam_idx]\n ref_worldtocamera = self.train_worldtocamera[batch_near_cam_idx]\n\n #--------------------------------------------------------------------------------------\n # Replicate these so that they may be distributed onto several devices for\n # parallel computaion.\n l_devices = jax.local_device_count()\n reference_views = data_types.ReferenceViews(\n rgb=np.tile(ref_images, (l_devices, 1, 1, 1)),\n ref_worldtocamera=np.tile(ref_worldtocamera, (l_devices, 1, 1)),\n ref_cameratoworld=np.tile(ref_cameratoworld, (l_devices, 1, 1)),\n intrinsic_matrix=np.tile(self.intrinsic_matrix[None, :],\n (l_devices, 1, 1)),\n idx=np.tile(batch_near_cam_idx[None, :], (jax.local_device_count(), 1)),\n )\n\n return_batch = data_types.Batch(\n target_view=target_view, reference_views=reference_views)\n\n return return_batch", "def split_data(self):\n if not self.load_data:\n raise AttributeError('Preprocessor has not loaded any data.')\n \n # 3 - Find example counts for each set\n self.n_examples = self.data[0].shape[0]\n self.n_train = int(self.n_examples * self.train_ratio)\n self.n_val = int(self.n_examples * self.val_ratio)\n self.n_test = self.n_examples - self.n_train - self.n_val\n \n logger.info(f'Set sizes:')\n logger.info(f'train: {self.n_train}')\n logger.info(f'val: {self.n_val}')\n logger.info(f'test: {self.n_test}')\n if self.n_test < 0:\n raise ValueError('Train + validation ratios must bef < 1')\n\n # 4 - Separate data into train, test, val\n if isinstance(self.data[0], pd.DataFrame):\n logger.info('Dataset is in a dataframe.')\n self.isdataframe = True\n\n self.train_data = [self.data[0].iloc[:self.n_train],\n self.data[1].iloc[:self.n_train]]\n \n self.val_data = [self.data[0].iloc[self.n_train:self.n_val + self.n_train],\n self.data[1].iloc[self.n_train:self.n_val + self.n_train]]\n \n self.test_data = [self.data[0].iloc[self.n_val + self.n_train:],\n self.data[1].iloc[self.n_val + self.n_train:]]\n logger.info('Data was split into train, val, test.')\n else:\n self.isdataframe = False\n logger.info('Dataset is in a numpy array.')\n \n # If datasets are numpy array or sparse\n self.train_data = [self.data[0][:self.n_train],\n self.data[1][:self.n_train]]\n \n self.val_data = [self.data[0][self.n_train:self.n_val + self.n_train],\n self.data[1][self.n_train:self.n_val + self.n_train]]\n \n self.test_data = [self.data[0][self.n_val + self.n_train:],\n self.data[1][self.n_val + self.n_train:]]\n logger.info('Data was split into train, val, test.')\n \n assert(self.n_train == self.train_data[0].shape[0])\n assert(self.n_val == self.val_data[0].shape[0])\n assert(self.n_test == self.test_data[0].shape[0])\n \n # Free memory\n del self.data\n \n if self.save_sets:\n self.save_datasets()", "def test_generate_data_produces_examples_of_correct_shape(self):\n\n with TemporaryDirectory() as tmp_dir:\n mock_raw_data(tmp_dir, raw_dim=256, num_images=100)\n with TemporaryDirectory() as data_dir:\n for problem_obj in self.all_problems:\n problem_object = problem_obj()\n\n problem_object.generate_data(data_dir, tmp_dir)\n\n for mode in [Modes.TRAIN, Modes.EVAL]:\n\n dataset = problem_object.dataset(mode, data_dir)\n example = tfe.Iterator(dataset).next()\n\n num_channels = problem_object.num_channels\n\n # Check that the input tensor has the right shape\n input_dim = problem_object.input_dim\n self.assertEqual(example[\"inputs\"].numpy().shape,\n (input_dim, input_dim, num_channels))\n\n # Check that the targets tensor has the right shape\n output_dim = problem_object.output_dim\n self.assertEqual(example[\"targets\"].numpy().shape,\n (output_dim, output_dim, num_channels))", "def make_batch(self, batch_size):\n filenames = self.get_filenames()\n if self.mode == tf.estimator.ModeKeys.PREDICT and self.imagenet_train_predict_partial:\n # Sort and shuffle with seed to randomize deterministically.\n random.seed(self.imagenet_train_predict_shuffle_seed)\n random.shuffle(filenames)\n dataset = tf.contrib.data.TFRecordDataset(filenames)\n\n # Parse records.\n dataset = dataset.map(self.parser,\n num_threads=batch_size,\n output_buffer_size=2 * batch_size)\n\n # If training, shuffle and repeat indefinitely.\n if self.mode == tf.estimator.ModeKeys.TRAIN:\n dataset = dataset.shuffle(buffer_size=50000 + 3 * batch_size)\n dataset = dataset.repeat(-1)\n elif self.mode == tf.estimator.ModeKeys.PREDICT:\n if self.predict_split == 'train':\n if self.imagenet_train_predict_partial:\n MAX_EXAMPLES = 50000\n # Skip to start at a random spot in the first TFRecord.\n random.seed(self.imagenet_train_predict_shuffle_seed)\n skip_examples = random.randint(0, 1251)\n dataset = dataset.skip(skip_examples)\n # Continue shuffling amongst at least as many examples\n # as it could see in 3 cross validations.\n dataset.shuffle(buffer_size=3 * MAX_EXAMPLES,\n seed=self.imagenet_train_predict_shuffle_seed)\n num_examples = MAX_EXAMPLES\n else:\n # Take whole training set.\n num_examples = self.num_examples_per_epoch(tf.estimator.ModeKeys.TRAIN)\n else:\n # Take whole validation set.\n num_examples = self.num_examples_per_epoch(tf.estimator.ModeKeys.EVAL)\n # Take as much of the dataset as possible that can be evenly\n # divided by batch_size.\n while True:\n if num_examples % batch_size == 0:\n break\n else:\n num_examples -= 1\n dataset = dataset.take(num_examples)\n dataset = dataset.repeat(1)\n\n # dataset = dataset.take(1000) # For fast debugging!\n else:\n dataset = dataset.repeat(1)\n\n # Batch it up.\n dataset = dataset.batch(batch_size)\n iterator = dataset.make_one_shot_iterator()\n image_batch, label_batch = iterator.get_next()\n\n return image_batch, label_batch", "def test_reconstruct_source(setup_multidim_src):\n config = setup_multidim_src\n try:\n reconstruct_batch(config)\n except Exception as ex:\n pytest.fail(\"Exception thrown during reconstruction = \"+str(ex))", "def test_data_format(self, model, image_shape, model_setup_images_clf, pipeline):\n dataset, model_config = model_setup_images_clf('channels_first', image_shape=image_shape)\n config = {'model_class': model, 'model_config': model_config}\n test_pipeline = (pipeline << dataset) << config\n batch = test_pipeline.next_batch(2, n_epochs=None)\n\n assert len(batch) == 2", "def load_data_wrapper():\r\n\r\n train_data, valid_data, tst_data = load_data()\r\n ## calling the function load_data()\r\n ## will return a tuple with three values for train, validation and test data\r\n ## storing the tuple values in separate three variables\r\n\r\n ## training_data:\r\n training_inputs = [np.reshape(x, (784,1)) for x in train_data[0]]\r\n ## reshaping the training inputs to 784x1 vector\r\n ## the required format for our neural network's input layer\r\n ## ---\r\n training_results = [vectorized_result(y) for y in train_data[1]]\r\n ## calling vectorized_result() function(see below)\r\n ## will convert the digit value in 10-dimensional vector\r\n ## the required format for our neural network's output layer\r\n ## ---\r\n training_data = zip(training_inputs, training_results)\r\n ## zipping together the training_inputs and training_results\r\n\r\n ## validation_data:\r\n validation_inputs = [np.reshape(x, (784,1)) for x in valid_data[0]]\r\n ## reshaping the validation inputs to 784x1 vector\r\n ## ---\r\n validation_data = zip(validation_inputs, valid_data[1])\r\n ## zipping together the validation_inputs and it's corresponding outputs\r\n\r\n ## test_data:\r\n test_inputs = [np.reshape(x, (784,1)) for x in tst_data[0]]\r\n ## reshaping the test inputs to 784x1 vector\r\n ## ---\r\n test_data = zip(test_inputs, tst_data[1])\r\n ## zipping together the test_inputs and it's corresponding outputs\r\n\r\n return (training_data, validation_data, test_data)", "def prep_data(self):\n\n self.fit_tokenizer(texts=self.texts)\n sequences = self.get_sequences(self.texts)\n self.text_data = pad_sequences(sequences, maxlen=self.MAX_SEQUENCE_LENGTH)\n\n self.labels = to_categorical(np.asarray(self.labels))\n print('Shape of data tensor:', self.text_data.shape)\n print('Shape of label tensor:', self.labels.shape)\n\n # split the data into a training set and a validation set\n indices = np.arange(self.text_data.shape[0])\n np.random.shuffle(indices)\n self.text_data = self.text_data[indices]\n self.labels = self.labels[indices]\n nb_validation_samples = int(self.VALIDATION_SPLIT * self.text_data.shape[0])\n\n x_train = self.text_data[:-nb_validation_samples]\n y_train = self.labels[:-nb_validation_samples]\n x_val = self.text_data[-nb_validation_samples:]\n y_val = self.labels[-nb_validation_samples:]\n\n return x_train,y_train, x_val, y_val", "def next_simple_dataset(dataset, batch_size: int, datatype):\n while True:\n x_batch = []\n y_batch = []\n for i in range(batch_size):\n try:\n x, y, data_unit, index = create_xy(dataset, datatype)\n # x = normalize(x)\n x_batch.append(x)\n y_batch.append(y)\n except StopIteration:\n break\n x_batch, y_batch = np.array(x_batch), np.array(y_batch)\n if datatype != DataType.test:\n x_batch = SEQ_CVXTZ.augment_images(x_batch).astype(\"float32\")\n x_batch = np.array([normalize(x) for x in x_batch])\n # org_shape = x_batch.shape\n # org_width = x_batch.shape[1]\n # corner = int((org_width - ROI_IMAGE_SIZE) // 2)\n # print(f\"0: org_shape:{org_shape} x_batch:{x_batch.shape} corner:{corner}\")\n # x_batch = x_batch[:, corner:(org_width - corner), corner:(org_width - corner), :]\n # resized_x_batch = []\n # for x in x_batch:\n # img = Image.fromarray(np.uint8(x))\n # img = img.resize((IMAGE_SIZE, IMAGE_SIZE), Image.LANCZOS)\n # resized_x_batch.append(normalize(np.array(img)))\n # print(f\"1: org_shape:{org_shape} corner:{corner} x_batch:{x_batch.shape}\")\n # yield np.array(resized_x_batch), y_batch\n yield np.array(x_batch), y_batch", "def setUpClass(cls):\n cls.test_file_1 = \"/tmp/test_data_loader_dummy_1.pkl\"\n cls.test_file_2 = \"/tmp/test_data_loader_dummy_2.pkl\"\n cls.in_cols = [\"file\", \"id\", \"len\", \"seq\", \"phyche\", \"pssm\", \"logits\",\n \"ss\", \"h_0\", \"h_1\", \"h_2\", \"lm_logits\"]\n cls.out_cols = [\"dataset\", \"id\", \"len\", \"position\", \"amino\",\n \"phyche\", \"pssm\", \"logits\", \"ss\", \"h_0\", \"h_1\", \"h_2\",\n \"lm_logits\"]\n\n seq = np.array([[0., 0., 1.],\n [1., 0., 0.]])\n phyche = np.array([[0., 0.], # phyche\n [1., 0.]])\n pssm = np.array([[0., 0., .8], # pssm\n [.8, 0., 0.]])\n logits = np.array([[0.1, 0., 0.9], # logits\n [0.9, 0., 0.1]])\n ss = np.array([[0., 0., 1.], # ss\n [1., 0., 0.]])\n h_0 = np.array([[0., 0., 1., 0.],\n [1., 0., 0., 0.]])\n h_1 = np.array([[0., 0., 1., 0.],\n [1., 0., 0., 0.]])\n h_2 = np.array([[0., 0., 1., 0.], # h_2\n [1., 0., 0., 0.]])\n lm_logits = np.array([[0., 0., 1.], # lm_logits\n [1., 0., 0.]])\n\n ex_1_in = (\"dummy_train.tfrecords\", # file\n \"id1\", # id\n 2, # len\n seq,\n phyche,\n pssm,\n logits,\n ss,\n h_0,\n h_1,\n h_2,\n lm_logits,\n )\n ex_1_out = [tuple([\"train\", ex_1_in[1], ex_1_in[2], j] + [ex_1_in[i][j, :] for i in range(3, len(ex_1_in))]) for j in range(2)]\n\n in_df = pd.DataFrame.from_records(data=[ex_1_in], columns=cls.in_cols)\n # write to file\n in_df.to_pickle(cls.test_file_1)\n\n cls.out_df = pd.DataFrame.from_records(data=ex_1_out, columns=cls.out_cols)", "def prepare_data(self):\n # Set up the path\n self.path_target_train = os.path.join(self.data_dir, self.train_path_file_target + \".pkl\")\n self.path_target_test = os.path.join(self.data_dir, self.test_path_file_target + \".pkl\")\n\n if not os.path.exists(self.path_target_train) or not os.path.exists(self.path_target_test):\n # Create vocabularies of the appropriate sizes.\n self.create_vocabulary(self.train_path_file)\n\n # Create token ids for the training data.\n input_train_path = self.train_path_file\n target_train_path = self.train_path_file_target\n train_input, train_input_length, train_labels = self.data_to_token_ids(input_train_path, target_train_path)\n\n # Create token ids for the validation data.\n input_test_path = self.test_path_file\n target_test_path = self.test_path_file_target\n test_input, test_input_length, _ = self.data_to_token_ids(input_test_path, target_test_path, train=False)\n\n # Collect data into a list\n training_data = [train_input, train_input_length, train_labels]\n test_data = [test_input, test_input_length]\n\n # Save all the data\n with open(self.path_target_train, 'wb') as f:\n pickle.dump(training_data,f)\n with open(self.path_target_test, 'wb') as f:\n pickle.dump(test_data, f)\n else:\n # Load data\n with open(self.path_target_train, 'rb') as f:\n training_data = pickle.load(f)\n with open(self.path_target_test, 'rb') as f:\n test_data = pickle.load(f)\n\n # Initialize vocabulary\n self.initialize_vocabulary()\n\n # Convert list into a numpy array - train data\n train_input = pd.DataFrame(training_data[0]).fillna(value=0).astype(int).values\n train_length_input = np.array(training_data[1], dtype=int)\n train_labels = np.array(training_data[2], dtype=int)\n\n # Convert list into a numpy array - test data\n test_input = pd.DataFrame(test_data[0]).fillna(value=0).astype(int).values\n test_length_input = pd.DataFrame(test_data[1]).fillna(value=0).astype(int).values\n\n # Printing maximum length\n print(\"Shape of the input training matrix {}\".format(str(train_input.shape)))\n print(\"Shape of the input test matrix {}\".format(str(test_input.shape)))\n\n # Copy the files\n self.copy_files()\n\n # Return output\n return train_input, train_length_input, train_labels, test_input, test_length_input", "def prepare_data(src, dst):\n\n data_prefix = 'miniCelebA_'\n for split in ['train', 'val', 'test']:\n print('processing %s split' % split)\n if (not os.path.exists(os.path.join(dst, 'x_' + split + '.npy')) or not\n os.path.exists(os.path.join(dst, 'y_' + split + '.npy'))):\n labels = glob(os.path.join(src, split, '*'))\n no_sample = 0\n for lb in labels:\n no_sample += len(os.listdir(lb))\n\n x = np.zeros((no_sample, 224, 224, 3))\n y = np.zeros((no_sample, 20))\n count = 0\n for lb in labels:\n files = glob(os.path.join(lb, '*.png'))\n for f in files:\n print('processing file: %s, with label %s' % (f, lb.split('/')[-1]))\n y[count] = to_categorical(int(lb.split('/')[-1]), 20)\n img = misc.imresize(misc.imread(f), (224, 224), 'bicubic')\n if img.ndim == 2:\n img = np.expand_dims(img, -1)\n img = np.concatenate((img, img, img), axis=-1)\n x[count] = img\n\n count += 1\n\n assert count == no_sample, \"number of sample (%d) is different than number of read image (%d)\" % (\n no_sample, count)\n\n x = get_deep_feature(x)\n np.save(os.path.join(dst, data_prefix + 'x_' + split + '.npy'), x)\n np.save(os.path.join(dst, data_prefix + 'y_' + split + '.npy'), y)", "def generate_train_test(self):\n x, y = self.read_data()\n x_train, y_train, x_test, y_test = self.sample_data(x, y)\n self.train = (x_train, y_train)\n self.test = (x_test, y_test)", "def test_cached_dataloader(self):\n\n v = [\"data\", \"target\", \"model_out_sqnet\"]\n\n for data, target in self.train_loader:\n b, c, h, w = data[v[0]].shape\n assert data[v[1]].shape == (b, )\n assert data[v[2]].shape == (b, 100)\n assert data[v[1]].shape == target.shape", "def _data_preproc(self, X, y, X_test, y_test=None):\n \n X = np.array(X)\n y = np.array(y)\n X_test = np.array(X_test)\n y_test = np.array(y_test) \n\n # y need to be a column:\n if y.shape == y.flatten().shape:\n y = y.reshape(-1, 1)\n\n # Scale the data\n stda = StandardScaler()\n stda.fit(np.vstack([X, X_test]))\n\n X_test = stda.transform(X_test)\n X = stda.transform(X)\n\n # Stack target to X (train)\n X = np.column_stack((y, X))\n\n # Stack id to X_test\n #X_test = np.column_stack((ids, X_test))\n\n # Export to txt files (, del.)\n np.savetxt(self._train_file, X, delimiter=\",\", fmt='%.5f')\n np.savetxt(self._test_file, X_test, delimiter=\",\", fmt='%.5f')", "def getTrainingData(self):\n raise NotImplementedError", "def preprocess(\n self,\n dataset: Union[str, dict, pd.DataFrame] = None,\n training_set: Union[str, dict, pd.DataFrame] = None,\n validation_set: Union[str, dict, pd.DataFrame] = None,\n test_set: Union[str, dict, pd.DataFrame] = None,\n training_set_metadata: Union[str, dict] = None,\n data_format: str = None,\n skip_save_processed_input: bool = True,\n random_seed: int = default_random_seed,\n **kwargs,\n ) -> PreprocessedDataset:\n print_boxed(\"PREPROCESSING\")\n\n for callback in self.callbacks:\n callback.on_preprocess_start(self.config_obj.to_dict())\n\n preprocessing_params = get_preprocessing_params(self.config_obj)\n\n proc_training_set = proc_validation_set = proc_test_set = None\n try:\n with provision_preprocessing_workers(self.backend):\n # TODO (Connor): Refactor to use self.config_obj\n preprocessed_data = preprocess_for_training(\n self.config_obj.to_dict(),\n dataset=dataset,\n training_set=training_set,\n validation_set=validation_set,\n test_set=test_set,\n training_set_metadata=training_set_metadata,\n data_format=data_format,\n skip_save_processed_input=skip_save_processed_input,\n preprocessing_params=preprocessing_params,\n backend=self.backend,\n random_seed=random_seed,\n callbacks=self.callbacks,\n )\n\n (proc_training_set, proc_validation_set, proc_test_set, training_set_metadata) = preprocessed_data\n\n return PreprocessedDataset(proc_training_set, proc_validation_set, proc_test_set, training_set_metadata)\n except Exception as e:\n raise RuntimeError(f\"Caught exception during model preprocessing: {str(e)}\") from e\n finally:\n for callback in self.callbacks:\n callback.on_preprocess_end(proc_training_set, proc_validation_set, proc_test_set, training_set_metadata)", "def preprocess_valid_data(self):\r\n print(\"* Preprocessing validation data.\", flush=True)\r\n prep.create_HDF_file(self.C.validation_set)\r\n\r\n self.print_time_elapsed()", "def preprocess_data(X):\n # NOTE: # If you have conducted any pre-processing on the image,\n # please implement this function to apply onto test images.\n return X", "def getTestData(self):\n raise NotImplementedError", "def prepare_data(self):\n try:\n self.train_dataset = self.datasets['train']\n self.val_dataset = self.datasets['val']\n try:\n self.test_dataset = self.datasets['test']\n except:\n pass\n except Exception as e:\n print('Data was not succesfully prepared:', e)", "def test(self, max_batch_size=16):\n\n print(\"Creating predictions for submission ...\")\n time_start_total = time.time()\n\n # pass batch size to test loader\n self.test_loader.batch_size = max_batch_size\n\n pred_list = []\n id_list = []\n\n self.net.train(mode=False)\n # test loop\n for _, (single, pairs, type_data, id_data,\n batch_size, size) in enumerate(self.test_loader):\n\n # get prediction\n with torch.no_grad():\n pred = self.net(single, pairs, batch_size, size)\n\n # multiply with std and add mean from train set\n for i in range(8):\n pred[type_data==i] *= self.std[i]\n pred[type_data==i] += self.mean[i]\n\n pred_list.append(pred.cpu().flatten())\n id_list.append(id_data.flatten())\n\n self._create_submission_file(pred_list, id_list)\n\n # print total time\n dt = time.time()-time_start_total\n print(\"\\ntotal time: \"+utils.get_formatted_time(dt))", "def setup(self):\n in_dataset, out_dataset = self.get_datasets()", "def setup(self):\n in_dataset, out_dataset = self.get_datasets()", "def get_batch(batch_data, config):\n N = len(batch_data['obs_traj_rel'])\n P = config.P\n OF = config.flow_size\n T_in = config.obs_len\n T_pred = config.pred_len\n\n returned_inputs = []\n traj_obs_gt = np.zeros([N, T_in, P], dtype='float32')\n traj_pred_gt = np.zeros([N, T_pred, P], dtype='float32')\n # --- xy input\n for i, (obs_data, pred_data) in enumerate(zip(batch_data['obs_traj_rel'],\n batch_data['pred_traj_rel'])):\n for j, xy in enumerate(obs_data):\n traj_obs_gt[i, j, :] = xy\n for j, xy in enumerate(pred_data):\n traj_pred_gt[i, j, :] = xy\n returned_inputs.append(traj_obs_gt)\n # ------------------------------------------------------\n # Social component (through optical flow)\n if config.add_social:\n obs_flow = np.zeros((N, T_in, OF),dtype ='float32')\n # each batch\n for i, flow_seq in enumerate(batch_data['obs_optical_flow']):\n for j , flow_step in enumerate(flow_seq):\n obs_flow[i,j,:] = flow_step\n returned_inputs.append(obs_flow)\n # -----------------------------------------------------------\n # Person pose input\n if config.add_kp:\n obs_kp = np.zeros((N, T_in, KP, 2), dtype='float32')\n # each bacth\n for i, obs_kp_rel in enumerate(batch_data['obs_kp_rel']):\n for j, obs_kp_step in enumerate(obs_kp_rel):\n obs_kp[i, j, :, :] = obs_kp_step\n return returned_inputs,traj_pred_gt", "def load_data_wrapper():\r\n \r\n global training_inputs, training_results\r\n global validation_inputs, validation_results\r\n global test_inputs, test_results\r\n global num_samples, numpixels, num_test_samples\r\n \r\n tr_d, va_d, te_d = load_data()\r\n \r\n num_samples=len(tr_d[0])\r\n training_inputs=zeros([num_samples,numpixels])\r\n training_results=zeros([num_samples,10]) \r\n for j in range(num_samples):\r\n training_inputs[j,:] = reshape(tr_d[0][j], (numpixels))\r\n training_results[j,:] = vectorized_result(tr_d[1][j])\r\n# validation_inputs = [reshape(x, (numpixels)) for x in va_d[0]]\r\n# validation_results = [vectorized_result(y) for y in va_d[1]]\r\n\r\n num_test_samples=len(te_d[0])\r\n test_inputs=zeros([num_test_samples,numpixels])\r\n test_results=zeros([num_test_samples,10]) \r\n for j in range(num_test_samples):\r\n test_inputs[j,:] = reshape(te_d[0][j], (numpixels))\r\n test_results[j,:] = vectorized_result(te_d[1][j])", "def preprocess(self, question_dir, unlabeled_set, training_set, vocab_size, max_example=100,\n use_chars=True, only_test_run=False):\n # Define files to be used\n dataset_files = {\"training\": \"train-p\" + training_set,\n \"validation\": \"dev-v1.1\",\n \"test\": \"test-p0.1\"}\n # Define vocabulary source file\n vocab_filename = \"vocab_training\" + training_set + \"+\" + \\\n \"unlabeled_\" + unlabeled_set + \".bin\"\n vocab_f = os.path.join(question_dir, vocab_filename)\n\n # Generate or load dictionaries\n raw_file_dir = '/scratch/s161027/ga_reader_data/ssqa'\n word_dictionary, char_dictionary, num_entities = \\\n self.make_dictionary(dataset_files, raw_file_dir, vocab_size,\n vocab_file=vocab_f)\n dictionary = (word_dictionary, char_dictionary)\n\n # Check for the existence of parsed data in binary files\n # Load them if they exist. Otherwise generate and save new ones.\n print(\"Preparing data...\")\n\n start_time = time.time()\n loaded_dataset = []\n for data_role in dataset_files.keys():\n if data_role == \"training\":\n path = os.path.join(question_dir, dataset_files[data_role])+\".bin\"\n else:\n path = os.path.join(question_dir, dataset_files[data_role]) + \\\n \"_train\" + training_set + \".bin\"\n\n # Parse original json file and save results in binary, then load with pickle\n if not os.path.exists(path):\n print(\"Can't find binary data for {} set. Parsing raw data...\".format(data_role))\n self.json_parser(raw_file_dir, dataset_files[data_role],\n dictionary, training_set, use_chars)\n\n print(\"Loading binary data for {} set\".format(data_role))\n infile = open(path, \"rb\")\n # loaded_data = pickle.load(infile)\n loaded_data = msgpack.unpack(infile)\n\n loaded_dataset.append(loaded_data)\n\n training, validation, test = loaded_dataset\n unpack_time = time.time() - start_time\n print(\"Unpacking took {}\".format(unpack_time))\n\n if only_test_run:\n training = random.sample(training, max_example)\n validation = random.sample(validation, max_example)\n test = random.sample(test, max_example)\n # training = training[0:max_example]\n # validation = validation[0:max_example]\n # test = test[0:max_example]\n\n data = Data(dictionary, num_entities, training, validation, test)\n\n # Message about bad samples being removed.\n print(\"{} questions were removed due to bad formatting.\"\n .format(self.num_removed_questions))\n\n return data", "def split_data_into_train_and_test(raw_training_data):\n train_set, test_set = train_test_split(raw_training_data, test_size=0.2, random_state=42)\n return train_set, test_set", "def predict(self, test_batch_size=64, device='cuda', load=False, model_path=None, dataloader_num_workers=4, save_prediction=True):\n self.model.eval()\n self.device = device\n self.test_batch_size = test_batch_size\n if load:\n if model_path:\n self.load(model_path, device=self.device)\n else:\n model_path = os.path.join(path_checkpoints_dir, f\"{self.experiment_id}.pth\")\n print(f\"loaded model={model_path}\")\n self.load(model_path, device=self.device)\n if self.model is None:\n raise Exception(\"model cannot be None. Load or train the model before inference\")\n dataloader = self.data_module.get_test_dataloader(batch_size=self.test_batch_size, shuffle=False, num_workers=dataloader_num_workers)\n all_outputs = []\n tk0 = tqdm(enumerate(dataloader, 1), total=len(dataloader))\n for batch_id, data in tk0:\n for key, value in data.items():\n data[key] = value.to(self.device)\n # batch_outputs, batch_loss = self.model(**data)\n batch_outputs, batch_loss= self.validate_one_batch(data)\n all_outputs.append(batch_outputs.detach().cpu().numpy())\n predictions = np.concatenate(all_outputs, axis=0)\n if save_prediction:\n submission = pd.read_csv(path_sample_submission_file)\n assert submission.shape[0] == predictions.shape[0], \"unexpected behavior.code fix required\"\n submission.iloc[:, 1:] = predictions\n\n if not os.path.isdir(path_submissions_dir):\n os.mkdir(path_submissions_dir)\n submission.to_csv(os.path.join(path_submissions_dir, f\"{self.experiment_id}.csv\"), index=False)\n tk0.close()\n return predictions", "def prepare_train_validation(self) -> Tuple:\n Xt, Xv, Yt, Yv = self.dataset.train_test_split_representations()\n\n Xt = self.dataset.prepare_input_samples(Xt)\n Yt = self.dataset.prepare_output_samples(Yt)\n traindataset = tf.data.Dataset.from_tensor_slices((Xt, Yt))\n traindataset = traindataset.batch(\n self.batch_size,\n num_parallel_calls=tf.data.experimental.AUTOTUNE\n )\n\n Xv = self.dataset.prepare_input_samples(Xv)\n Yv = self.dataset.prepare_output_samples(Yv)\n validdataset = tf.data.Dataset.from_tensor_slices((Xv, Yv))\n validdataset = validdataset.batch(\n self.batch_size,\n num_parallel_calls=tf.data.experimental.AUTOTUNE\n )\n\n return traindataset, validdataset", "def test_data() -> Iterator[Tuple[Label, ChanneledImage]]:\n return zip(*get_data(TEST_FILES, 10000))", "def pre_process_data(self, all_labels, all_data):\n\n # [1] Normalizes data\n all_data = self.pre_precess_manager.normalization(all_data)\n\n data_train, data_test, label_train, label_test = train_test_split(all_data, all_labels, test_size=0.1,\n shuffle=True)\n\n return data_train, data_test, label_train, label_test", "def load_test_data():\r\n X_test = np.load('data/test/X_test.npy')\r\n scaling_test = np.load('data/test/scaling_test.npy')\r\n ids_test = np.load('data/test/ids_test.npy')\r\n y_test = np.load('data/test/y_test.npy')\r\n\r\n seed = np.random.randint(1, 10e6)\r\n np.random.seed(seed)\r\n np.random.shuffle(X_test)\r\n np.random.seed(seed)\r\n np.random.shuffle(scaling_test)\r\n np.random.seed(seed)\r\n np.random.shuffle(ids_test)\r\n np.random.seed(seed)\r\n np.random.shuffle(y_test)\r\n\r\n return X_test, scaling_test, ids_test, y_test", "def split_dataset(dataset, test_size):\n train_data = dataset.skip(test_size).shuffle(SHUFFLE_BUFFER_SIZE)\n train_data = train_data.padded_batch(BATCH_SIZE)\n \n test_data = dataset.take(test_size)\n test_data = test_data.padded_batch(BATCH_SIZE)\n \n return train_data, test_data", "def test_data(self):\n\n return self.__valid_data, self.__valid_labels", "def test_next_batch(self, batch_size):\n # Get next batch of image (path) and labels\n paths = self.test_images[self.test_pointer:self.test_pointer + batch_size]\n labels = self.test_labels[self.test_pointer:self.test_pointer + batch_size]\n # update train_pointer\n self.test_pointer += batch_size\n\n # Read images\n images = np.ndarray([batch_size, self.scale_size[0], self.scale_size[1], 3])\n for i in range(len(paths)):\n #print(paths[i])\n img = utils.load_image(paths[i])\n #img = cv2.imread(paths[i])\n # flip image at random if flag is selected\n if self.horizontal_flip and np.random.random() < 0.5:\n img = cv2.flip(img, 1)\n # rescale image\n #img = cv2.resize(img, (self.scale_size[0], self.scale_size[1]))\n #utils.load_image()\n #img = img.astype(np.float32)\n\n # subtract mean\n #img -= self.mean\n\n images[i] = img\n\n # Expand labels to one hot encoding\n one_hot_labels = np.zeros((batch_size, self.n_classes))\n for i in range(len(labels)):\n one_hot_labels[i][labels[i]] = 1\n\n # return array of images and labels\n return images, one_hot_labels", "def test_parsing(self):\n truth = self.generate_fake_pos()\n batch_size = 4\n records = []\n for i in range(batch_size):\n record = b''\n for j in range(2):\n record += self.v4_record(*truth)\n records.append(record)\n\n parser = ChunkParser(ChunkDataSrc(records),\n shuffle_size=1,\n workers=1,\n batch_size=batch_size)\n batchgen = parser.parse()\n data = next(batchgen)\n\n batch = (np.reshape(np.frombuffer(data[0], dtype=np.float32),\n (batch_size, 112, 64)),\n np.reshape(np.frombuffer(data[1], dtype=np.int32),\n (batch_size, 1858)),\n np.reshape(np.frombuffer(data[2], dtype=np.float32),\n (batch_size, 3)),\n np.reshape(np.frombuffer(data[3], dtype=np.float32),\n (batch_size, 3)))\n\n fltplanes = truth[1].astype(np.float32)\n fltplanes[5] /= 99\n for i in range(batch_size):\n data = (batch[0][i][:104],\n np.array([batch[0][i][j][0] for j in range(104, 111)]),\n batch[1][i], batch[2][i], batch[3][i])\n self.assertTrue((data[0] == truth[0]).all())\n self.assertTrue((data[1] == fltplanes).all())\n self.assertTrue((data[2] == truth[2]).all())\n scalar_win = data[3][0] - data[3][-1]\n self.assertTrue(np.abs(scalar_win - truth[3]) < 1e-6)\n scalar_q = data[4][0] - data[4][-1]\n self.assertTrue(np.abs(scalar_q - truth[4]) < 1e-6)\n\n parser.shutdown()", "def _prepare_ml_data(X, y, to_optimize=False):\n size_test = 1\n y_test = None\n if to_optimize:\n size_test = CONFIG.OPTIMIZE_PARAMS['size'] + 1\n y_test = y.iloc[-size_test:]\n X_train = X.iloc[:-size_test]\n y_train = y.iloc[:-size_test]\n X_test = X.iloc[-size_test:]\n return X_train, y_train, X_test, y_test", "def predict_and_evaluate(config, workdir, ckpt_path=None):\n logging.info('Starting testing at %s', workdir)\n tf.io.gfile.makedirs(workdir)\n\n rng = jax.random.PRNGKey(config.seed)\n # Build input pipeline.\n rng, data_rng = jax.random.split(rng)\n data_rng = jax.random.fold_in(data_rng, jax.process_index())\n test_ds = []\n for split in config.dataset.test_splits:\n ds = input_pipeline.create_val_dataset(\n config.dataset, split, config.dataset.test_per_device_batch_size,\n config.dataset.test_pad_last_batch)\n test_ds.append(ds)\n\n # Initialize model.\n inputs = train_utils.get_init_inputs(test_ds[0])\n rng, model_rng = jax.random.split(rng)\n predict_config = models.TransformerConfig(**config.model.to_dict())\n predict_config = predict_config.replace(decode=True)\n model = models.Model(predict_config)\n state = train_utils.create_train_state(\n model, config, model_rng, inputs=inputs)\n\n writer = metric_writers.create_default_writer(\n workdir, just_logging=jax.process_index() > 0)\n\n # Set up checkpointing of the model and the input pipeline.\n checkpoint_dir = os.path.join(workdir, 'checkpoints')\n ckpt = checkpoint.MultihostCheckpoint(checkpoint_dir, max_to_keep=3)\n\n logging.info('Testing and evaluating checkpoint %s', ckpt_path)\n try:\n state = ckpt.restore(state, ckpt_path)\n except FileNotFoundError:\n state = ckpt.restore_or_initialize(state)\n step = int(state.step)\n\n p_pred_step = jax.pmap(\n functools.partial(predict_step, config=predict_config),\n axis_name='batch',\n static_broadcasted_argnums=(3,))\n p_init_cache = jax.pmap(\n functools.partial(init_cache, config=predict_config), axis_name='batch')\n\n # Distribute testing.\n state = flax_utils.replicate(state)\n with metric_writers.ensure_flushes(writer):\n test_metrics = {}\n for ds, split in zip(test_ds, config.dataset.test_splits):\n ds_metrics = evaluate_sequence_accuracy(p_pred_step, p_init_cache, state,\n ds, config, split, workdir,\n config.num_test_steps)\n ds_metrics = {f'{k}_{split}': v for k, v in ds_metrics.items()}\n test_metrics.update(ds_metrics)\n writer.write_scalars(step, test_metrics)", "def load_data_wrapper():\n\ttr_d, te_d = load_data()\n\ttraining_inputs = [np.reshape(x, (4, 1)) for x in tr_d[0]]\n\ttraining_results = [vectorized_result(y) for y in tr_d[1]]\n\ttraining_data = zip(training_inputs, training_results)\n\ttest_inputs = [np.reshape(x, (4, 1)) for x in te_d[0]]\n\ttest_data = zip(test_inputs, te_d[1])\n\treturn (training_data, test_data)", "def setUpTestData(cls):\n data_gen.run()", "def setUpTestData(cls):\n data_gen.run()", "def setUpTestData(cls):\n data_gen.run()", "def setUpTestData(cls):\n data_gen.run()", "def set_data():\r\n #if not os.path.exists(filepath):\r\n #download_data()\r\n metadata = read(filepath + flist[-1])\r\n ndata = metadata['num_cases_per_batch']\r\n ndim = metadata['num_vis']\r\n\r\n data, train, test = {}, {}, {}\r\n data['labels'] = metadata['label_names']\r\n data['ntraindata'] = metadata['num_cases_per_batch'] * (len(flist) - 2)\r\n data['ntestdata'] = metadata['num_cases_per_batch']\r\n data['ndim'] = metadata['num_vis']\r\n\r\n train['x'], train['y'] = convert_train(data['ntraindata'], data['ndim'])\r\n\r\n testdata = read(filepath + flist[-2])\r\n test['x'] = testdata['data']\r\n test['y'] = testdata['labels']\r\n\r\n data['train'], data['test'] = train, test\r\n save_pkl(data)", "def test_data():\n global _MEAN # pylint: disable=global-statement\n _np.random.seed(1)\n view = _skdc10.view.OfficialImageClassificationTask()\n permutation = _np.random.permutation(range(10000))\n if _MEAN is None:\n _MEAN = view.train.x.reshape((50000 * 32 * 32, 3)).mean(axis=0)\n return ((view.test.x[:10000, :][permutation, :] - _MEAN).\n transpose((0, 3, 1, 2)).astype('float32'),\n view.test.y[:10000][permutation].reshape((10000, 1)).astype('float32'))", "def _prepare(self, data, train=True):\n if data is None:\n return None\n\n if hasattr(data, \"to_tfdataset\"):\n return data.to_tfdataset(train=train)\n else:\n return data", "def split_data(data, test_size):\r\n ntest = int(round(len(data) * (1 - test_size)))+1\r\n \r\n train, test = data[:ntest], data[ntest:]\r\n \r\n return train,test", "def predict(self):\n self.predicted_test_summary = []\n for step in xrange(0, self.test_size // self.test_batch_size):\n print 'Predicting Batch No.:', step\n offset = (step * self.test_batch_size) % self.test_size\n batch_data_fwd = self.X_tst_fwd[offset:(offset + self.test_batch_size), :].T\n batch_data_bwd = self.X_tst_bwd[offset:(offset + self.test_batch_size), :].T\n summary_test_out = self._predict_batch(batch_data_fwd, batch_data_bwd)\n self.predicted_test_summary.extend(summary_test_out)\n\n print 'Prediction Complete. Moving Forward..'\n\n # test answers\n self.test_review = self.X_tst_fwd\n self.predicted_test_summary = self.predicted_test_summary\n self.true_summary = self.Y_tst", "def test_setup(self, test_data: list=None):\n print(\"[dataset]: using test setup ...\")\n self.vocabulary = [\"empty\"]\n self.eval_dataset = ABSADataset(data_path=self.dev_path, mode=self.in_mode, task=self.task,\n tokenizer=self.tokenizer, vocab=\"bert\", test=True)\n return", "def prepare_test_data(args):\n image_dir = args.test_image_dir\n\n files = os.listdir(image_dir)\n files = [f for f in files if f.lower().endswith('.png')]\n\n img_ids = list(range(len(files)))\n img_files = []\n img_heights = []\n img_widths = []\n \n for f in files:\n img_path = os.path.join(image_dir, f)\n img_files.append(img_path)\n img = cv2.imread(img_path)\n img_heights.append(img.shape[0]) \n img_widths.append(img.shape[1]) \n\n print(\"Building the testing dataset...\")\n dataset = DataSet(img_ids, img_files, img_heights, img_widths)\n print(\"Dataset built.\")\n return dataset", "def test_model(all_data):\n nll = 0\n xent = 0\n step = 0\n for data in jsb.batch_iterator(all_data, FLAGS.batch_size, FLAGS.sequence_length):\n feed = fill_feed(inputs, targets, data)\n\n batch_xent, batch_nll = sess.run([loss_op, nll_op],\n feed_dict=feed)\n\n xent += batch_xent\n nll += batch_nll\n step += 1\n return xent/step, nll/step", "def _get_test_feed_dict(self, batch):\n raise NotImplementedError", "def input_setup(config):\n # Load data path, if is_train False, get test data\n data = load_data(config.is_train, config.test_img)\n \n # Make sub_input and sub_label, if is_train false more return nx, ny\n if config.is_train:\n \tsub_input_sequence, sub_label_sequence, nx, ny = make_sub_data_train(data, config)\n else:\n\tsub_input_sequence, sub_label_sequence, nx, ny = make_sub_data_test(data, config)\n\n\n # Make list to numpy array. With this transform\n arrinput = np.asarray(sub_input_sequence) # [?, 41, 41, 3]\n arrlabel = np.asarray(sub_label_sequence) # [?, 41, 41, 3]\n make_data_hf(arrinput, arrlabel, config)\n\n return nx, ny", "def get_test(self, preprocess=False):\n return self._dataset('test', self._directory, 'sharded_test_0shifted_affnist.tfrecords')", "def _evaluate_during_fit(self, test_loader, epoch):", "def refresh_test_dataset(self):\n inputs_id, inputs_starts, inputs_paths, inputs_ends, inputs_label = self.build_data(self.reader, self.test_items, self.option.max_path_length)\n self.test_dataset = CodeDataset(inputs_id, inputs_starts, inputs_paths, inputs_ends, inputs_label)", "def process_sample_train(self):\n raise NotImplementedError", "def data_split():\n x_train = np.load(os.path.join(svhn.DEFAULT_PREPROCESSED_DATA_FOLDER, svhn.TRAIN, \"images.npy\"))\n y_train = np.load(os.path.join(svhn.DEFAULT_PREPROCESSED_DATA_FOLDER, svhn.TRAIN, \"label.npy\"))\n x_test = np.load(os.path.join(svhn.DEFAULT_PREPROCESSED_DATA_FOLDER, svhn.TEST, \"images.npy\"))\n y_test = np.load(os.path.join(svhn.DEFAULT_PREPROCESSED_DATA_FOLDER, svhn.TEST, \"label.npy\"))\n\n print(x_train.shape)\n print(x_test.shape)\n\n img_rows, img_cols = x_train.shape[1], x_train.shape[2]\n num_classes = 10 # starts with 1 not 0\n\n y_test1 = y_test.reshape((y_test.shape[0],))\n y_test1 = [y - 1 for y in y_test1]\n\n y_train1 = y_train.reshape((y_train.shape[0],))\n y_train1 = [y - 1 for y in y_train1]\n\n input_shape = (img_rows, img_cols, 3)\n\n X_train = x_train.astype('float32')\n X_test = x_test.astype('float32')\n\n X_train /= 255\n X_test /= 255\n print('x_train shape:', X_train.shape)\n print(X_train.shape[0], 'train samples')\n print(X_test.shape[0], 'test samples')\n\n # convert class vectors to binary class matrices\n y_train2 = keras.utils.to_categorical(y_train1, num_classes)\n y_test2 = keras.utils.to_categorical(y_test1, num_classes)\n\n y_train2 = y_train2.astype('int32')\n y_test2 = y_test2.astype('int32')\n\n print(\n \"after process: X train shape: {}, X test shape: {}, y train shape: {}, y test shape: {}\".format(x_train.shape,\n x_test.shape,\n y_train2.shape,\n y_test2.shape))\n return input_shape, X_train, X_test, y_train2, y_test2", "def Test(self):\n print('Testing:')\n # set mode eval\n torch.cuda.empty_cache()\n self.network.eval()\n transform = transforms.Compose([Rescale(params.rescale_size),\n RandomCrop(params.image_size),\n \n ToTensor()\n ])\n dataset = Cityscapes(params.dataset_root, mode='test', transforms = transform)\n test_loader = DataLoader(dataset,\n batch_size=params.test_batch,\n shuffle=params.shuffle,\n num_workers=params.dataloader_workers)\n # prepare test data\n recal = 0\n precision = 0\n F_one = 0\n IOU = 0\n accuracy_new = 0\n test_size = 1124\n if test_size % self.params.test_batch != 0:\n total_batch = test_size // self.params.test_batch + 1\n else:\n total_batch = test_size // self.params.test_batch\n\n # test for one epoch\n for batch_idx, batch in enumerate(test_loader):\n self.pb.click(batch_idx, total_batch)\n image, label, name = batch['image'], batch['label'], batch['label_name']\n image_cuda, label_cuda = image.cuda(), label.cuda()\n pred = image_cuda\n pred = pred.to(torch.device(\"cpu\"))\n pred = pred.detach()\n img_grid = pred[0]\n #img_grid = torchvision.utils.make_grid(out) \n img_grid = img_grid.numpy().transpose(1, 2, 0)*255\n cv2.imwrite(\"/content/drive/My Drive/Test_images/original%d.jpg\" % batch_idx, img_grid)\n if self.params.should_split:\n image_cuda.requires_grad_()\n out = checkpoint_sequential(self.network, self.params.split, image_cuda)\n else:\n out = self.network(image_cuda)\n TP, FP, TN, FN = confusion(out, label_cuda)\n recal = recal+TP\n precision = precision+FP\n F_one = F_one +TN\n IOU = IOU+ FN \n _,predict = torch.max(out.data,1)\n predict = predict.to(torch.device(\"cpu\"))\n predict = predict.detach()\n img = predict[0]\n img = img.numpy()*255\n #img_grid = torchvision.utils.make_grid(out) \n cv2.imwrite(\"/content/drive/My Drive/Test_images/predict_label%d.png\" % batch_idx, img)\n label = label_cuda.to(torch.device(\"cpu\"))\n label = label.detach()\n label = label[0].numpy()*255\n cv2.imwrite(\"/content/drive/My Drive/Test_images/original_label%d.png\" % batch_idx, label)\n\n accuracy_final = accuracy(out, label_cuda)\n accuracy_new = accuracy_new + accuracy_final\n print(\"\\t\")\n print(recal/total_batch, precision/ total_batch, F_one/ total_batch, IOU/ total_batch)\n print(\"\\t\")\n print(accuracy_new/total_batch)" ]
[ "0.6583765", "0.6574852", "0.6408957", "0.6383224", "0.63410145", "0.62234724", "0.6203767", "0.60811645", "0.60587937", "0.60369897", "0.60160315", "0.6011207", "0.6007375", "0.5988163", "0.5982726", "0.59650534", "0.5962204", "0.59511745", "0.59286445", "0.59174746", "0.59078795", "0.5892828", "0.58918947", "0.58858156", "0.5861628", "0.5845098", "0.58367646", "0.58309025", "0.58162403", "0.5806787", "0.58064085", "0.57806426", "0.5779768", "0.5776282", "0.5776282", "0.57576007", "0.5753815", "0.57487005", "0.5739508", "0.5738202", "0.57380944", "0.57307404", "0.57109016", "0.5688704", "0.5685362", "0.5675496", "0.56745464", "0.5672792", "0.5661827", "0.56586426", "0.56487185", "0.56461686", "0.5644783", "0.5640701", "0.5638662", "0.56319284", "0.562516", "0.5618993", "0.5615111", "0.56101686", "0.5600284", "0.55874264", "0.5577723", "0.5577723", "0.55765426", "0.55719525", "0.5571784", "0.5563611", "0.55620193", "0.5560034", "0.5557151", "0.5551295", "0.5550299", "0.5549466", "0.55384827", "0.5537077", "0.55350757", "0.5532058", "0.55301", "0.55291265", "0.5526271", "0.5526271", "0.5526271", "0.5526271", "0.55238813", "0.55227023", "0.5521827", "0.55185354", "0.5516368", "0.5516317", "0.5514739", "0.5513029", "0.5506069", "0.54955393", "0.5495118", "0.54914755", "0.54909647", "0.54895866", "0.54860884", "0.54819685" ]
0.56260467
56
This property returns the training data, and loads the training data if it doesn't exist. Note that this function returns the training data and labels in the form ([MPS input size, batch, other dimensions], [batch, classifications]) in accordance with how it is used in the MPS and MPSOptimizer classes. If the data is required in the form ([batch, MPS input size, other dimensions], [batch, classifications]), the variable _training_data should be used
def training_data(self): if self._training_data is None: self._load_training_data() if self._swapped_training_data is None: self._swapped_training_data = {} for key, value in self._training_data.items(): self._swapped_training_data[key] = value return self._swapped_training_data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_training_data(self) -> Tuple[List[np.ndarray], np.ndarray]:\n return self._load_set(config.TRAIN_DIR, True)", "def getTrainingData(self):\n raise NotImplementedError", "def get_training_data() -> GraphDataset:\n _load_data_if_needed()\n return training_data", "def get_data_train(self):\n return self.get_data(self.file_train, self.batch)", "def _load_training_data(self):\n self._save_training_data()", "def train_data(self):\n\n return self.__train_data, self.__train_labels", "def load_data(self, training_data):\n \"\"\"training data format [(instance, label),(instance, label),...]\"\"\"\n self.training_data = training_data", "def build_training_data_loader(self) -> DataLoader:\n pass", "def train_data(self):\n return self._train_data", "def load_data_and_labels(self):\n gen = image.ImageDataGenerator()\n target_size = (224,224)\n if self.preprocess:\n print('Preprocessing data...')\n if not os.path.isdir(self.pproc_dir()):\n os.mkdir(self.pproc_dir())\n \n batch_arr = []\n for ld,segment in [(self.train_dir(), 'train'),\n (self.valid_dir(), 'valid')]:\n # TODO(ness): segment = os.basename(ld)\n flowgen = gen.flow_from_directory(\n ld,\n target_size=target_size,\n shuffle=False,\n class_mode=None,\n batch_size=1)\n # Save the batches using method defined in utils.py\n data = np.concatenate([flowgen.next() for i in range(flowgen.n)])\n batches_dir = self.pproc_dir() + segment + '-bc'\n save_array(batches_dir, data)\n \n # Save the classes.\n cls_dir = self.pproc_dir() + segment + '-cl'\n save_array(cls_dir, flowgen.classes)\n \n batch_arr.append((data, flowgen.classes, flowgen.class_indices))\n \n # Set the data.\n self.training_data = batch_arr[0][0]\n self.validation_data = batch_arr[1][0]\n \n # Classes are zero-indexed and represent a category in\n # numerical form. So if the classes are 'dog' and 'cat',\n # the possible class values will be 0 and 1.\n self.trn_classes = batch_arr[0][1]\n self.val_classes = batch_arr[1][1]\n \n # Labels are the one-hot encoded (i.e. categorical)\n # version of the classes. In other words, if there are\n # 5 classes and an element belongs to class 2,\n # its label will be [0,0,1,0,0] (index 1).\n self.training_labels = to_categorical(batch_arr[0][1])\n self.validation_labels = to_categorical(batch_arr[1][1])\n \n # Class indices are dictionaries of the form\n # {'category_name': 0, 'category_name_2: 1}. They\n # make the mapping between numerical class indices and\n # a human-readable category name. They are (should be...)\n # the same for validation and training, so only load them\n # once, after sanity checking.\n self.cindices = batch_arr[0][2]\n print('Done preprocessing.')\n else:\n print('Loading data...')\n # Load the pre-saved data using methods defined in utils.py. See\n # preprocessing branch for the meaning of the data.\n self.training_data = load_array(self.pproc_dir() + 'train-bc')\n self.validation_data = load_array(self.pproc_dir() + 'valid-bc')\n self.trn_classes = load_array(self.pproc_dir() + 'train-cl')\n self.val_classes = load_array(self.pproc_dir() + 'valid-cl')\n self.training_labels = to_categorical(self.trn_classes)\n self.validation_labels = to_categorical(self.val_classes)\n \n # To get the class indices, we create the generator. It's cheap to\n # run since it doesn't actually load all the data.\n flowgen = gen.flow_from_directory(\n self.train_dir(),\n target_size=target_size,\n shuffle=False,\n class_mode=None,\n batch_size=1) \n self.cindices = flowgen.class_indices\n print('Done loading.')", "def get_training_data(self):\n train_data = None\n \n if self.left_data is not None:\n train_data = self.left_data\n \n if self.right_data is not None:\n if train_data is not None:\n train_data = train_data.join(self.right_data)\n else:\n train_data = self.right_data\n \n return train_data", "def load_training_data_generator(self) -> Generator[Tuple[List[np.ndarray], np.ndarray], None, None]:\n return self._load_generator(config.TRAIN_DIR, True)", "def get_training_data(self):\n labels = self.get_labels()\n\n print 'Loading training data from ', self.train_folder , '...'\n train_index = []\n #train_ans = []\n train_text = []\n cnt = 0\n\n for f in listdir(self.train_folder):\n file_path = join(self.train_folder, f)\n if isfile(file_path):\n cnt += 1\n if cnt % 10000 == 0:\n print 'finished:', cnt # line counter\n #train_index.append(f[:-4])\n self.train_ans.append(labels[f[:-4]])\n with open(file_path, 'rb') as f:\n train_text.append( f.read() )\n\n return train_text", "def train(self, training_data):\n pass", "def prepare_data(self):\n # Set up the path\n self.path_target_train = os.path.join(self.data_dir, self.train_path_file_target + \".pkl\")\n self.path_target_test = os.path.join(self.data_dir, self.test_path_file_target + \".pkl\")\n\n if not os.path.exists(self.path_target_train) or not os.path.exists(self.path_target_test):\n # Create vocabularies of the appropriate sizes.\n self.create_vocabulary(self.train_path_file)\n\n # Create token ids for the training data.\n input_train_path = self.train_path_file\n target_train_path = self.train_path_file_target\n train_input, train_input_length, train_labels = self.data_to_token_ids(input_train_path, target_train_path)\n\n # Create token ids for the validation data.\n input_test_path = self.test_path_file\n target_test_path = self.test_path_file_target\n test_input, test_input_length, _ = self.data_to_token_ids(input_test_path, target_test_path, train=False)\n\n # Collect data into a list\n training_data = [train_input, train_input_length, train_labels]\n test_data = [test_input, test_input_length]\n\n # Save all the data\n with open(self.path_target_train, 'wb') as f:\n pickle.dump(training_data,f)\n with open(self.path_target_test, 'wb') as f:\n pickle.dump(test_data, f)\n else:\n # Load data\n with open(self.path_target_train, 'rb') as f:\n training_data = pickle.load(f)\n with open(self.path_target_test, 'rb') as f:\n test_data = pickle.load(f)\n\n # Initialize vocabulary\n self.initialize_vocabulary()\n\n # Convert list into a numpy array - train data\n train_input = pd.DataFrame(training_data[0]).fillna(value=0).astype(int).values\n train_length_input = np.array(training_data[1], dtype=int)\n train_labels = np.array(training_data[2], dtype=int)\n\n # Convert list into a numpy array - test data\n test_input = pd.DataFrame(test_data[0]).fillna(value=0).astype(int).values\n test_length_input = pd.DataFrame(test_data[1]).fillna(value=0).astype(int).values\n\n # Printing maximum length\n print(\"Shape of the input training matrix {}\".format(str(train_input.shape)))\n print(\"Shape of the input test matrix {}\".format(str(test_input.shape)))\n\n # Copy the files\n self.copy_files()\n\n # Return output\n return train_input, train_length_input, train_labels, test_input, test_length_input", "def get_data(self):\n if self.config['model'] == 'vggnet':\n if self.is_training:\n return self.data.shuffle(self.shuffle).batch(self.batch_size)\n elif self.is_testing:\n return self.data.batch(self.batch_size)\n elif not self.is_testing and not self.is_training:\n return self.data.batch(self.batch_size)\n else:\n raise NotImplementedError('In dataset.py: default input not specified for this model!')", "def get_train_data(self) -> Tuple[np.array, np.array, np.array]:\n train_data = []\n for season in self.__train_seasons:\n train_data.extend(self.__get_season_data(season, sys.maxsize, True))\n train_input = np.array([ExamDropEncoder.extract_features(sample, sys.maxsize) for sample in train_data])\n train_output = np.array([1.0 if get_is_mol(sample.selected_player) else 0.0 for sample in train_data])\n\n num_bins = self.get_num_bins(train_input, self.__max_splits)\n self.__discretizer = KBinsDiscretizer(n_bins = num_bins, encode = \"onehot-dense\",\n strategy = ExamDropExtractor.BIN_STRATEGY)\n train_input = self.__discretizer.fit_transform(train_input)\n train_input = self.__add_answered_on_feature(train_data, train_input)\n self.__anova_f_filter = SelectFpr(f_classif, alpha = self.__anova_f_significance)\n train_input = self.__anova_f_filter.fit_transform(train_input, train_output)\n self.__pca = PCA(n_components = self.__pca_explain)\n train_input = self.__pca.fit_transform(train_input)\n return train_input, train_output, self.__get_train_weights(train_data)", "def get_train(self, data_file):\r\n return self.read_data(data_file)", "def load_training_data(\n self,\n train_data_file=\"datasets/train_data.json\",\n test_data_file=\"datasets/test_data.json\",\n ):\n train_data = pd.read_json(train_data_file)\n test_data = pd.read_json(test_data_file)\n return train_data, test_data", "def _load_train_data(self):\n\n self.train_loader = data.Train_loader(self.N_max, self.n_per_conn,\n self.data_path, self.device)\n self.train_loader.load_data()\n\n # load mean and std\n scc_mean_std = np.loadtxt(\n os.path.join(self.data_path, 'scc_mean_std.csv'), delimiter=',')\n self.mean = torch.Tensor(scc_mean_std[0])\n self.std = torch.Tensor(scc_mean_std[1])", "def train(self, training_data, training_labels, validation_data, validation_labels):\n abstract", "def load(self):\n\n X_train, y_train, X_test, y_test, variable_types, name = _load_data(\n self.task_id)\n\n self.X_train = X_train\n self.y_train = y_train\n self.X_test = X_test\n self.y_test = y_test\n self.variable_types = variable_types\n self.name = name\n\n return self.X_train, self.y_train, self.X_test, self.y_test", "def _get_training_data(self) -> tuple:\n\n training_data = self._data.loc[self._data.target == 'train'].drop('target', axis=1)\n y = training_data.y_label.to_numpy()\n X = training_data.drop('y_label', axis=1).to_numpy()\n\n return X, y", "def train(self, trainingData, trainingLabels, validationData, validationLabels):\n self.trainingData = trainingData\n self.trainingLabels = trainingLabels", "def get_train(self, data_file):\n return self.read_data(data_file)", "def training_set(self):\n return self._training_set", "def train(self, batch_training=False):\n raise NotImplementedError", "def load_training_data(config):\n # Load data\n LOGGER.info(\"Loading training data.\")\n train_x = load_data(config['data_source'], config['train_x_filename'])\n train_y = load_data(config['data_source'], config['train_y_filename'])\n val_x = load_data(config['data_source'], config['val_x_filename'])\n val_y = load_data(config['data_source'], config['val_y_filename'])\n LOGGER.info(\"Training data size: %d\", len(train_x))\n LOGGER.info(\"Validation data size: %d\", len(val_x))\n\n # Build datasets and create iterators\n LOGGER.info(\"Building dataset.\")\n train_dataset = get_dataset(\n train_x, train_y, config['batch_size'], config['data_shape'],\n config['n_classes'], True)\n val_dataset = get_dataset(\n val_x, val_y, config['batch_size'], config['data_shape'],\n config['n_classes'])\n\n return train_dataset, val_dataset, len(val_x)", "def _load_processed_data(self):\n with open(os.path.join(self._data_root_path, self._processed_train_data_file_name),\n 'r') as f:\n train_examples = json.load(f)\n\n with open(os.path.join(self._data_root_path, self._processed_dev_data_file_name), 'r') as f:\n dev_examples = json.load(f)\n\n with open(os.path.join(self._data_root_path, self._word_vocab_file_name), 'r') as f:\n word_vocab = Vocab.from_json(json.load(f))\n\n with open(os.path.join(self._data_root_path, self._char_vocab_file_name), 'r') as f:\n char_vocab = Vocab.from_json(json.load(f))\n\n return train_examples, dev_examples, word_vocab, char_vocab", "def init_train(self):\n data = self.loader.load_labelled_data(self.conf.split, 'training')\n\n # Initialise unlabelled data iterator\n num_ul = 0\n if self.conf.ul_mix > 0:\n ul_data = self.loader.load_unlabelled_data(self.conf.split, 'all')\n\n # calculate number of unlabelled images as a proportion of the labelled images\n num_ul = int(data.size() * self.conf.ul_mix)\n num_ul = num_ul if num_ul <= ul_data.size() else ul_data.size()\n log.info('Sampling %d unlabelled images out of total %d.' % (num_ul, ul_data.size()))\n ul_data.sample(num_ul)\n self.gen_X_U = data_utils.generator(self.conf.batch_size, 'overflow', ul_data.images)\n\n # Initialise labelled data iterator\n assert self.conf.l_mix >= 0\n\n # calculate number of labelled images\n num_l = int(data.size() * self.conf.l_mix)\n num_l = num_l if num_l <= data.size() else data.size()\n log.info('Using %d labelled images out of total %d.' % (num_l, data.size()))\n train_images = data.images[:num_l]\n train_masks = data.masks[:num_l]\n\n self.conf.unlabelled_image_num = num_ul\n self.conf.labelled_image_num = num_l\n self.conf.data_len = num_ul if num_ul > num_l else num_l\n self.conf.batches = int(np.ceil(self.conf.data_len / self.conf.batch_size))\n self.conf.save()\n\n self.gen_X_L = data_utils.generator(self.conf.batch_size, 'overflow', train_images, train_masks)\n\n # Initialise real masks iterator for discriminator training, using the real masks from the data CV split.\n self.other_masks = data_utils.generator(self.conf.batch_size, 'overflow', data.masks + 0)", "def load_data(self):\n # make sure preprocessing is same as preprocessing as the network\n # reduce mean, and divide by a value to do scaling\n self.train_datagen = ImageDataGenerator(\n rescale=1./ 255,\n shear_range=0.05,\n rotation_range=20, # randomly rotate images in the range (degrees, 0 to 180)\n zoom_range=[0.9, 1.1], # Randomly zoom image\n width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)\n height_shift_range=0.1, # randomly shift images vertically (fraction of total height)\n horizontal_flip=True, # randomly flip images\n brightness_range=[0.8, 1.2],\n fill_mode='reflect',\n validation_split=0.2)\n\n self.test_datagen = ImageDataGenerator(rescale=1. / 255)\n\n self.train_generator = self.train_datagen.flow_from_directory(\n self.train_dir,\n target_size=(224, 224),\n shuffle=True,\n batch_size=self.batchsize,\n class_mode='categorical',\n subset=\"training\")\n\n self.validation_generator = self.train_datagen.flow_from_directory(\n self.train_dir,\n target_size=(224, 224),\n shuffle=True,\n batch_size=self.batchsize,\n class_mode='categorical',\n subset=\"validation\")\n\n self.test_generator = self.test_datagen.flow_from_directory(\n self.test_dir,\n target_size=(224, 224),\n shuffle=False,\n batch_size=1,\n class_mode='categorical')", "def _read_train_datas(self):\r\n with open(self.train_label_path, 'r') as fb:\r\n lines = fb.readlines()\r\n return self._parse_raw_labels(lines)", "def preprocess_train_data(self):\r\n print(\"* Preprocessing training data.\", flush=True)\r\n prep.create_HDF_file(self.C.training_set, is_training_set=True)\r\n\r\n self.print_time_elapsed()", "def training_documents(self):\n return self._training_documents", "def train(self, trainingData, trainingLabels, validationData, validationLabels):\n\n self.features = trainingData[0].keys() # this could be useful for your code later...\n\n if (self.automaticTuning):\n kgrid = [0.001, 0.01, 0.05, 0.1, 0.5, 1, 5, 10, 20, 50]\n else:\n kgrid = [self.k]\n\n self.trainAndTune(trainingData, trainingLabels, validationData, validationLabels, kgrid)", "def load_training(self):\n path = \"./training/\" + self.training + \".json\"\n\n data = {}\n\n with open(path, \"r\") as infile:\n data = json.load(infile)\n\n self.states = data[\"states\"]\n self.transitions = data[\"transitions\"]\n self.matrix = data[\"matrix\"]", "def _load_data(self):\n pickle_in = open(\"X_train.pickle\", \"rb\")\n self.X = pickle.load(pickle_in)\n pickle_in = open(\"y_train.pickle\", \"rb\")\n self.Y = pickle.load(pickle_in)\n\n pickle_in = open(\"X_test.pickle\", \"rb\")\n self.X_final = pickle.load(pickle_in)\n pickle_in = open(\"y_test.pickle\", \"rb\")\n self.Y_final = pickle.load(pickle_in)\n\n # Set input shape:\n if K.image_data_format() == 'channels_first':\n self.input_shape = (3, self.img_rows, self.img_cols)\n else:\n self.input_shape = (self.img_rows, self.img_cols, 3)\n\n self.X = self.X.astype('float32')\n self.X /= 255\n self.X_final = self.X_final.astype('float32')\n self.X_final /= 255\n print('X shape:', self.X.shape)\n print(self.X.shape[0], 'Samples')\n\n num_datapoints = 3000\n self.X = self.X[0:num_datapoints]\n self.Y = self.Y[0:num_datapoints]\n\n num_datapoints = 2000\n self.X_final = self.X_final[0:num_datapoints]\n self.Y_final = self.Y_final[0:num_datapoints]\n\n self.Y_final = to_categorical(self.Y_final, self.num_classes)\n\n # Initialize Data\n kfold = StratifiedKFold(n_splits=self.nFolds, shuffle=True)\n\n if self.b_eval_advanced:\n # Loop through the indices the split() method returns\n for index, (train_indices, test_indices) in enumerate(kfold.split(self.X, self.Y)):\n if index == 0:\n self.Y = to_categorical(self.Y, self.num_classes)\n\n # Generate batches from indices\n xtrain, xtest = self.X[train_indices], self.X[test_indices]\n ytrain, ytest = self.Y[train_indices], self.Y[test_indices]\n\n self.data.append(tuple([xtrain, xtest, ytrain, ytest]))\n\n if not self.b_eval_advanced:\n self.Y = to_categorical(self.Y, self.num_classes)\n\n #print(np.asarray(self.data).shape)\n #print(self.data)\n print(\"Y_final Shape\", self.Y_final.shape)", "def train(\n self, training_data: Dataset, validation_data: Optional[Dataset] = None\n ) -> Predictor:\n raise NotImplementedError", "def preprocess_train_data(self, training_data: \"TrainingData\"):\n\n label_id_dict = self._create_label_id_dict(\n training_data, attribute=INTENT_ATTRIBUTE\n )\n\n self.inverted_label_dict = {v: k for k, v in label_id_dict.items()}\n\n self._label_data = self._create_label_data(\n training_data, label_id_dict, attribute=INTENT_ATTRIBUTE\n )\n\n session_data = self._create_session_data(\n training_data.intent_examples,\n label_id_dict,\n label_attribute=INTENT_ATTRIBUTE,\n )\n\n self.check_input_dimension_consistency(session_data)\n\n return session_data", "def train(self, training_data, testData, classNum, batchSize):\n # find the numbers for feature and label\n featureNum = training_data.shape[1] - 1\n\n # #this will find all the unique labels automatically, but will have problem when training data is lacking some labels\n # labelNum = len(np.unique(training_data[:, :1]))\n labelNum = classNum\n\n # get the number of nodes for each layer\n if \"hidden_layer\" in self.params and self.params[\"hidden_layer\"] is not None:\n nodeNum = [featureNum] + self.params[\"hidden_layer\"] + [labelNum]\n else:\n nodeNum = [featureNum, featureNum * 2, labelNum]\n\n # get the mode for initializing the weight\n if \"weightInitMode\" in self.params and self.params[\"weightInitMode\"] is not None:\n weightInitMode = self.params[\"weightInitMode\"]\n else:\n weightInitMode = None\n\n # get the momentum factor\n if \"momentumFactor\" in self.params:\n momentumFactor = self.params[\"momentumFactor\"]\n else:\n momentumFactor = 0.0\n\n self.clf = NeuralNetwork(training_data, nodeNum, weightInitMode, momentumFactor)\n iteration = 5\n totalIter = 0\n testSize = 100000\n while iteration > 0:\n\n if iteration < 10:\n self.clf.train(iteration, batchSize)\n totalIter += iteration\n print \"---------- Settings ----------\"\n print \"Examples :\", training_data.shape[0]\n print \"Batch size :\", batchSize\n print \"Alpha :\", self.clf.getAlpha()\n print \"Momentum factor :\", momentumFactor\n print \"# of Nodes in all layers :\", nodeNum\n print \"Training iteration so far:\", totalIter\n self.file.write(\"\\n\")\n self.file.write(\"---------- Settings ----------\" + \"\\n\")\n self.file.write(\"Examples : \" + str(training_data.shape[0]) + \"\\n\")\n self.file.write(\"Batch size : \" + str(batchSize) + \"\\n\")\n self.file.write(\"Alpha : \" + str(self.clf.getAlpha()) + \"\\n\")\n self.file.write(\"Momentum factor : \" + str(momentumFactor) + \"\\n\")\n self.file.write(\"# of Nodes in all layers : \" + str(nodeNum) + \"\\n\")\n self.file.write(\"Training iteration so far: \" + str(totalIter) + \"\\n\")\n self.test(training_data, \"training\")\n self.test(testData, \"testing\")\n iteration = 0\n\n while iteration >= testSize:\n self.clf.train(testSize, batchSize)\n totalIter += testSize\n print \"---------- Settings ----------\"\n print \"Examples :\", training_data.shape[0]\n print \"Batch size :\", batchSize\n print \"Alpha :\", self.clf.getAlpha()\n print \"Momentum factor :\", momentumFactor\n print \"# of Nodes in all layers :\", nodeNum\n print \"Training iteration so far:\", totalIter\n self.file.write(\"\\n\")\n self.file.write(\"---------- Settings ----------\" + \"\\n\")\n self.file.write(\"Examples : \" + str(training_data.shape[0]) + \"\\n\")\n self.file.write(\"Batch size : \" + str(batchSize) + \"\\n\")\n self.file.write(\"Alpha : \" + str(self.clf.getAlpha()) + \"\\n\")\n self.file.write(\"Momentum factor : \" + str(momentumFactor) + \"\\n\")\n self.file.write(\"# of Nodes in all layers : \" + str(nodeNum) + \"\\n\")\n self.file.write(\"Training iteration so far: \" + str(totalIter) + \"\\n\")\n self.test(training_data, \"training\")\n self.test(testData, \"testing\")\n iteration -= testSize\n\n if iteration > 0:\n self.clf.train(iteration, batchSize)\n totalIter += iteration\n print \"---------- Settings ----------\"\n print \"Examples :\", training_data.shape[0]\n print \"Batch size :\", batchSize\n print \"Alpha :\", self.clf.getAlpha()\n print \"Momentum factor :\", momentumFactor\n print \"# of Nodes in all layers :\", nodeNum\n print \"Training iteration so far:\", totalIter\n self.file.write(\"\\n\")\n self.file.write(\"---------- Settings ----------\" + \"\\n\")\n self.file.write(\"Examples : \" + str(training_data.shape[0]) + \"\\n\")\n self.file.write(\"Batch size : \" + str(batchSize) + \"\\n\")\n self.file.write(\"Alpha : \" + str(self.clf.getAlpha()) + \"\\n\")\n self.file.write(\"Momentum factor : \" + str(momentumFactor) + \"\\n\")\n self.file.write(\"# of Nodes in all layers : \" + str(nodeNum) + \"\\n\")\n self.file.write(\"Training iteration so far: \" + str(totalIter) + \"\\n\")\n self.test(training_data, \"training\")\n self.test(testData, \"testing\")\n iteration = 0\n\n print \"\"\n restart = raw_input(\"Do you want to restart? (Y/N)\")\n if restart.upper() == \"Y\":\n totalIter = 0\n print \"Current Alpha is\", self.clf.getAlpha()\n alpha = raw_input(\"What alpha ?\")\n self.clf.setAlpha(float(alpha))\n self.clf.initTheta()\n self.file.write(\"\\n\")\n self.file.write(\"*****************************************************\\n\")\n self.file.write(\"Re-initialize trail with alpha = \" + str(alpha) + \"\\n\")\n self.file.write(\"*****************************************************\\n\")\n\n print \"\"\n iteration = raw_input(\"How many iteration do you want to train the model?\")\n try:\n iteration = int(iteration)\n except:\n iteration = raw_input(\"Please input an integer\")\n iteration = 1\n print \"Total training iterations:\", totalIter", "def set_training_data(self, *, inputs: Inputs, outputs: Outputs) -> None:\n\n X = inputs.astype(np.float32).values\n X = (X - X.mean(0, keepdims=True)) / X.std(0, keepdims=True)\n\n idx_labeled = np.where(outputs.values != \"\")[0]\n idx_unlabeled = np.where(outputs.values == \"\")[0]\n X_labeled = X[idx_labeled]\n X_unlabeled = X[idx_unlabeled]\n\n y_labeled = outputs.values[idx_labeled].flatten()\n self.label_encoder = LabelEncoder()\n y_labeled = self.label_encoder.fit_transform(y_labeled)\n n_class = len(self.label_encoder.classes_)\n\n self.mlp_model = Mlp(X.shape[1], n_class, 128)\n self.labeled_loader, self.unlabeled_loader = self._create_datasets(\n X_labeled, y_labeled, X_unlabeled\n )\n\n self.output_column = outputs.columns[0]\n self._is_fit = False", "def prep_data(self):\n\n self.fit_tokenizer(texts=self.texts)\n sequences = self.get_sequences(self.texts)\n self.text_data = pad_sequences(sequences, maxlen=self.MAX_SEQUENCE_LENGTH)\n\n self.labels = to_categorical(np.asarray(self.labels))\n print('Shape of data tensor:', self.text_data.shape)\n print('Shape of label tensor:', self.labels.shape)\n\n # split the data into a training set and a validation set\n indices = np.arange(self.text_data.shape[0])\n np.random.shuffle(indices)\n self.text_data = self.text_data[indices]\n self.labels = self.labels[indices]\n nb_validation_samples = int(self.VALIDATION_SPLIT * self.text_data.shape[0])\n\n x_train = self.text_data[:-nb_validation_samples]\n y_train = self.labels[:-nb_validation_samples]\n x_val = self.text_data[-nb_validation_samples:]\n y_val = self.labels[-nb_validation_samples:]\n\n return x_train,y_train, x_val, y_val", "def _verify_training_data(self):\n if self.training_fluxes.shape != self.training_flux_uncertainties.shape:\n raise ValueError(\n \"the training flux and uncertainty arrays should \"\n \"have the same shape\")\n\n if len(self.training_labels) == 0 \\\n or self.training_labels.dtype.names is None:\n raise ValueError(\"no named labels provided for the training set\")\n\n if len(self.training_labels) != self.training_fluxes.shape[0]:\n raise ValueError(\n \"the first axes of the training flux array should \"\n \"have the same shape as the nuber of rows in the label table \"\n \"(N_stars, N_pixels)\")\n\n if self.dispersion is not None:\n dispersion = np.atleast_1d(self.dispersion).flatten()\n if dispersion.size != self.training_fluxes.shape[1]:\n raise ValueError(\n \"mis-match between the number of wavelength \"\n \"points ({0}) and flux values ({1})\".format(\n self.training_fluxes.shape[1], dispersion.size))\n return None", "def training_info(self):\n pass", "def training(self):\r\n self.model, self.voc = svm_clf_training('all', self.dataset)\r\n return 0", "def get_classification_training_data() -> Iterable[Tuple[str, Dict[str, Any]]]:\n return (_create_training_entry(*pair) for pair in TRAINING_DATA) # type: ignore", "def _load_data(self):\n self.mapper = Mapper()\n self.mapper.generate_vocabulary(self.review_summary_file)\n self.X_fwd, self.X_bwd, self.Y = self.mapper.get_tensor(reverseflag=True)\n # Store all the mapper values in a dict for later recovery\n self.mapper_dict = dict()\n self.mapper_dict['seq_length'] = self.mapper.get_seq_length()\n self.mapper_dict['vocab_size'] = self.mapper.get_vocabulary_size()\n self.mapper_dict['rev_map'] = self.mapper.get_reverse_map()\n # Split into test and train data\n self._split_train_tst()", "def get_data():\r\n if not path_validation(MODEL_PATH, read_access=True):\r\n exit(0) \r\n if not path_validation(TEST_DATA_PATH, read_access=True):\r\n exit(0) \r\n if not path_validation(TEST_LABEL_PATH, read_access=True):\r\n exit(0) \r\n\r\n params = joblib.load(MODEL_PATH)\r\n test_images = np.load(TEST_DATA_PATH)\r\n test_labels = np.load(TEST_LABEL_PATH)\r\n\r\n # Addition of bias in test set\r\n test_images = np.insert(test_images, 0, 1, axis=1)\r\n\r\n return params, test_images, test_labels", "def set_train_data(self):\n files_per_worker = len(self.train_list) // self.num_workers\n files_for_this_worker = self.train_list[ \n self.worker_id*files_per_worker : (self.worker_id+1)*files_per_worker ]\n # The worker takes an extra file if needed\n if self.worker_id < len(self.train_list) % self.num_workers:\n files_for_this_worker.append(self.train_list[ self.num_workers*files_per_worker + self.worker_id ])\n print \"Files for worker %d:\" % self.comm_block.Get_rank()\n for f in files_for_this_worker:\n print \" %s\" % f\n self.data.file_names = files_for_this_worker", "def load_data_preprocess(self):\n\n print(\"Loading the dataset ...\")\n # load the data\n c_util = CarUtils()\n train_x, train_y, test_x, test_y, classes = c_util.load_data()\n\n # set the image ordering\n K.set_image_dim_ordering(\"th\")\n\n print(\"Pre-processing the dataset ...\")\n # pre-process the data\n train_x = train_x.astype('float32')\n test_x = test_x.astype('float32')\n\n train_x = train_x / 255\n test_x = test_x / 255\n\n print(train_x.shape[0], ' train samples')\n print(test_x.shape[0], ' test samples')\n\n train_y = np_utils.to_categorical(train_y, CarsClassifierModel._nb_classes)\n test_y = np_utils.to_categorical(test_y, CarsClassifierModel._nb_classes)\n\n return train_x, train_y, test_x, test_y", "def load_training_set():\n global training_set\n f = gzip.open('mnist.pkl.gz', 'rb')\n train, valid, test = cPickle.load(f)\n [training_set, training_labels] = train\n [validation_set, validation_labels] = valid\n [testing_set, testing_labels] = test\n training_set = np.concatenate((training_set, validation_set))\n f.close()\n np.random.shuffle(training_set)", "def _get_training_dataset(self):\n\n return tf.data.Dataset.zip((self.conditioned_dataset, self.dataset))", "def _load_data(self):\n raw_data = self._load(\n tf.gfile.Open(self._get_full_pickle_path(self._dataset_split), \"rb\"))\n if self._dataset_split == MetaSplit.TRAIN and self._config[\"train_on_val\"]:\n valid_data = self._load(\n tf.gfile.Open(self._get_full_pickle_path(MetaSplit.VALID), \"rb\"))\n for key in valid_data:\n if self._verbose:\n tf.logging.info(str([key, raw_data[key].shape]))\n raw_data[key] = np.concatenate([raw_data[key],\n valid_data[key]], axis=0)\n if self._verbose:\n tf.logging.info(str([key, raw_data[key].shape]))\n\n if self._verbose:\n tf.logging.info(\n str([(k, np.shape(v)) for k, v in six.iteritems(raw_data)]))\n\n return raw_data", "def train(self, trainingData, trainingLabels, validationData, validationLabels):\t \n\t \n\t# might be useful in your code later...\n\t# this is a list of all features in the training set.\n\tself.features = list(set([ f for datum in trainingData for f in datum.keys() ]));\n\t\n\tif (self.automaticTuning):\n\t\tkgrid = [0.001, 0.01, 0.05, 0.1, 0.5, 1, 5, 10, 20, 50]\n\telse:\n\t\tkgrid = [self.k]\n\t\t\n\tself.trainAndTune(trainingData, trainingLabels, validationData, validationLabels, kgrid)", "def _prepare_for_training(\n self,\n trackers: List[TrackerWithCachedStates],\n domain: Domain,\n precomputations: MessageContainerForCoreFeaturization,\n **kwargs: Any,\n ) -> Tuple[RasaModelData, np.ndarray]:\n training_trackers = self._get_trackers_for_training(trackers)\n # dealing with training data\n tracker_state_features, label_ids, entity_tags = self._featurize_for_training(\n training_trackers,\n domain,\n precomputations=precomputations,\n bilou_tagging=self.config[BILOU_FLAG],\n **kwargs,\n )\n\n if not tracker_state_features:\n return RasaModelData(), label_ids\n\n self._label_data, encoded_all_labels = self._create_label_data(\n domain, precomputations=precomputations\n )\n\n # extract actual training data to feed to model\n model_data = self._create_model_data(\n tracker_state_features, label_ids, entity_tags, encoded_all_labels\n )\n\n if self.config[ENTITY_RECOGNITION]:\n self._entity_tag_specs = (\n self.featurizer.state_featurizer.entity_tag_specs\n if self.featurizer.state_featurizer is not None\n else []\n )\n\n # keep one example for persisting and loading\n self.data_example = model_data.first_data_example()\n\n return model_data, label_ids", "def preprocess(self, train_file, validation_file, test_file):\n chardict, labeldict = self.make_dictionary(train_file, validation_file, test_file)\n print 'preparing training data'\n training = self.parse_file(train_file, chardict, labeldict)\n \n print 'preparing validation data'\n validation = self.parse_file(validation_file, chardict, labeldict)\n\n print 'preparing test data'\n test = self.parse_file(test_file, chardict, labeldict)\n\n return Data(training, validation, test, chardict, labeldict)", "def fit_training_data(self):\n self.model.fit(self.X_train)", "def train(self, trainingData, trainingLabels, validationData, validationLabels): \n \n # might be useful in your code later...\n # this is a list of all features in the training set.\n self.features = list(set([ f for datum in trainingData for f in datum.keys() ]));\n \n if (self.automaticTuning):\n kgrid = [0.001, 0.01, 0.05, 0.1, 0.5, 1, 5, 10, 20, 50]\n else:\n kgrid = [self.k]\n \n self.trainAndTune(trainingData, trainingLabels, validationData, validationLabels, kgrid)", "def load_model(self, ckpt_name=\"best_model.pth\"):\n path = \"/\".join(ckpt_name.split(\"/\")[:-1])\n chkpt = torch.load(ckpt_name)\n self.start_epoch = chkpt['epoch']\n self.best_metric = chkpt['best_metric']\n\n # fix the DataParallel caused problem with keys names\n if self.multi_gpu_flag:\n new_state_dict = fix_multigpu_chkpt_names(chkpt['state_dict'], drop=False)\n self.net.load_state_dict(new_state_dict)\n else:\n try:\n self.net.load_state_dict(chkpt['state_dict'])\n except:\n new_state_dict = fix_multigpu_chkpt_names(chkpt['state_dict'], drop=True)\n self.net.load_state_dict(new_state_dict)\n\n if self.load_optimizer_state:\n self.optimizer.load_state_dict(chkpt['optimizer'])\n logging.info(\"******** State loaded ********\")\n\n training_meta = pickle.load(open(f\"{path}/training_meta.pickle.dat\", \"rb\"))\n for k, v in training_meta.items():\n if k in self.__class__.__params:\n setattr(self, k, v)\n logging.info(\"******** Training params loaded ********\")", "def _parse_train_data(self, data):\n image, label = self._prepare_image_and_label(data)\n\n # Flips image randomly during training.\n if self._aug_rand_hflip:\n image, label = input_utils.random_horizontal_flip(image, masks=label)\n\n # Resizes and crops image.\n image, image_info = input_utils.resize_and_crop_image(\n image,\n self._output_size,\n self._output_size,\n aug_scale_min=self._aug_scale_min,\n aug_scale_max=self._aug_scale_max)\n\n # Resizes and crops boxes.\n image_scale = image_info[2, :]\n offset = image_info[3, :]\n\n # Pad label and make sure the padded region assigned to the ignore label.\n # The label is first offset by +1 and then padded with 0.\n label += 1\n label = tf.expand_dims(label, axis=3)\n label = input_utils.resize_and_crop_masks(\n label, image_scale, self._output_size, offset)\n label -= 1\n label = tf.where(tf.equal(label, -1),\n self._ignore_label * tf.ones_like(label), label)\n label = tf.squeeze(label, axis=0)\n valid_mask = tf.not_equal(label, self._ignore_label)\n labels = {\n 'masks': label,\n 'valid_masks': valid_mask\n }\n\n # If bfloat16 is used, casts input image to tf.bfloat16.\n if self._use_bfloat16:\n image = tf.cast(image, dtype=tf.bfloat16)\n return image, labels", "def training_opts(self):\n return self._training_opts", "def loadData(self):\n batch_size = 256\n \n #if self.conv_sg == True:\n # batch_size = 1 \n \n download = True\n root = self.root + self.dataset\n if self.dataset == \"MNIST\": \n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])\n trainset = torchvision.datasets.MNIST(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.MNIST(root, train=False, download=download, transform=transform)\n \n if self.dataset == \"CIFAR10\":\n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465,), (0.2023, 0.1994, 0.2010,))])\n trainset = torchvision.datasets.CIFAR10(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.CIFAR10(root, train=False, download=download, transform=transform)\n \n if self.dataset == \"CIFAR100\":\n transform = transforms.Compose([transforms.ToTensor()])\n trainset = torchvision.datasets.CIFAR100(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.CIFAR100(root, train=False, download=download, transform=transform)\n \n \n trainloader = torch.utils.data.DataLoader(trainset, batch_size = batch_size,\n shuffle=False, num_workers=0, pin_memory = False)\n \n testloader = torch.utils.data.DataLoader(testset, batch_size= batch_size,\n shuffle=False, num_workers=2, pin_memory = False)\n \n return trainloader, testloader", "def train(self) -> tf.contrib.data.Dataset:\n return self.__train_dataset", "def train(self):\n\t\tself.model.fit(self.training_data, self.training_labels)", "def train_set(self):\n if self._trainset is None: # loads the data to memory once and when requested.\n trainset_raw = self.read_dataset(self._trainset_path)\n trainset_spacy = self.read_spacy_pickle(self._trainset_spacy_path)\n if trainset_raw is None and trainset_spacy is None:\n # This is for languages we never see (French)\n self._trainset = None\n else:\n self._trainset = pd.concat([trainset_raw, trainset_spacy], axis=1)\n\n self._trainset['language'] = self._language\n self._trainset['dataset_name'] = self._dataset_name\n\n return self._trainset", "def load_data():\n data = gzip.open(\"mnist.pkl.gz\", \"rb\")\n train_set, valid_set, test_set = cPickle.load(data)\n data.close()\n\n # Combine validation and train folds to recreate the master 60k set.\n new_images = numpy.concatenate((train_set[0], valid_set[0]))\n new_labels = numpy.concatenate((train_set[1], valid_set[1]))\n\n train_set = (new_images, new_labels)\n \n return (train_set, test_set)", "def train(self, trainingData, trainingLabels, testData, testLabels, validate): \n\t\t \n\t\tself.features = trainingData[0].keys() # this could be useful for your code later...\n\n\t\tif (self.automaticTuning):\n\t\t\tCgrid = [0.001, 0.002, 0.003, 0.004, 0.005]\n\t\telse:\n\t\t\tCgrid = [self.C]\n\t\t\t\n\t\treturn self.trainAndTune(trainingData, trainingLabels, testData, testLabels, Cgrid, validate)", "def train(self, data):\n pass", "def load_data(self):\n\n self._load_train_data()\n self._load_test_data()", "def __init__(self, data_path):\r\n\t\tfile_names = ['data_batch_%d' % i for i in range(1,6)]\r\n\t\tfile_names.append('test_batch')\r\n\r\n\t\tX = []\r\n\t\ty = []\r\n\t\tfor file_name in file_names:\r\n\t\t\twith open(data_path + file_name) as fin:\r\n\t\t\t\tdata_dict = cPickle.load(fin)\r\n\t\t\tX.append(data_dict['data'].ravel())\r\n\t\t\ty = y + data_dict['labels']\r\n\r\n\t\tself.X = np.asarray(X).reshape(60000, 32*32*3)\r\n\t\tself.y = np.asarray(y)\r\n\r\n\t\tfin = open(data_path + 'batches.meta')\r\n\t\tself.LABEL_NAMES = cPickle.load(fin)['label_names']\r\n\t\tfin.close()", "def get_data(self):\n return self.X_train, self.X_test, self.y_train, self.y_test", "def load_data():\n f = gzip.open('../data/mnist.pkl.gz', 'rb')\n training_data, validation_data, test_data = cPickle.load(f)\n f.close()\n return (training_data, validation_data, test_data)", "def _load_data(self):\n data_x, data_y = make_classification(n_samples=5000, n_features=20,\n n_informative=10,\n n_redundant=0, n_repeated=0,\n n_classes=2,\n n_clusters_per_class=4,\n weights=None, flip_y=0.01,\n class_sep=1.0, hypercube=True,\n shift=0.0, scale=1.0,\n shuffle=True,\n random_state=self.args.rand_seed)\n\n self.orig_column_names = np.arange(data_x.shape[-1])\n self.data_x = data_x\n self.data_y = self.to_one_hot_encoding(data_y)\n self.numerical_idx = np.arange(data_x.shape[-1])\n self.non_num_idx = None\n self.all_non_numerical_idx = None\n\n # Calculate adjacency matrix\n self.meta_inf = data_x[:, :1].astype('float32')\n if self.args.graph_type:\n self.adj = self.get_adjacency()", "def data(self):\n (x_train, y_train), (_, _) = datasets.fashion_mnist.load_data()\n x_train = x_train.reshape((-1, 28, 28, 1))\n x_train, y_train = x_train.astype('float16') / 255.0, \\\n tf.keras.utils.to_categorical(y_train.astype('float16'), 10)\n (x_train, x_eval) = x_train[5000:], x_train[:5000]\n (y_train, y_eval) = y_train[5000:], y_train[:5000]\n train_data, eval_data = (x_train, y_train), (x_eval, y_eval)\n return train_data, eval_data", "def load_mnsit_training_set():\n try:\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n train_data = np.asarray(x_train, dtype=np.float32)\n eval_data = np.asarray(x_test, dtype=np.float32)\n train_labels = np.asarray(y_train, dtype=np.int32)\n eval_labels = np.asarray(y_test, dtype=np.int32)\n return train_data, eval_data, train_labels, eval_labels\n\n except Exception as error:\n raise EnvironmentError(\"load_mnsit_training_set: Exception loading MNSIT data: {0}\".format(error))", "def __loadPreProcessedData(self):\n le = joblib.load(self.le_filename)\n X = np.loadtxt(self.X_filename, delimiter=',').astype(int)\n raw_y = np.loadtxt(self.y_filename, delimiter=',').astype(int)\n y = le.inverse_transform(raw_y)\n ##Initialize atrtribute for this class\n self.le, self.X, self.y = le, X, y", "def _prepare_data(self):\n #TODO hardcoded values need to change\n print_info(\"Preprocessing the train data...\")\n self._place_dataset(os.path.join(self._hparams[\"temp-data\"], \"train\"),\n self.TRAIN_OUT_PATH)\n\n print_info(\"Preprocessing the test data...\")\n self._place_dataset(os.path.join(self._hparams[\"temp-data\"], \"test\"),\n self.TEST_OUT_PATH)\n\n print_info(\"Preprocessing the validation data...\")\n self._place_dataset(os.path.join(self._hparams[\"temp-data\"], \"val\"),\n self.VAL_OUT_PATH)", "def train(self, training_data, cfg, **kwargs):\n pass", "def split_data_into_training_and_validation(self, data):\n training_dataset = self.get_data_from_indices(data, np.arange(self.num_training_samples))\n validation_dataset = self.get_data_from_indices(data, np.arange(self.num_training_samples,\n self.p.trainer.num_samples))\n return training_dataset, validation_dataset", "def load_data():\n\n \"\"\"The ``training_data`` is returned as a tuple with two entries.\n The first entry contains the actual training images. This is a\n numpy ndarray with 50,000 entries. Each entry is, in turn, a\n numpy ndarray with 784 values, representing the 28 * 28 = 784\n pixels in a single MNIST image.\"\"\"\n\n \"\"\"The second entry in the ``training_data`` tuple is a numpy ndarray\n containing 50,000 entries. Those entries are just the digit\n values (0...9) for the corresponding images contained in the first\n entry of the tuple.\"\"\"\n\n \"\"\"The ``validation_data`` and ``test_data`` are similar, except\n each contains only 10,000 images.\"\"\"\n f = gzip.open('MNIST/data/mnist.pkl.gz', 'rb')\n training_data, validation_data, test_data = Pickle.load(f, encoding='bytes'\n )\n f.close()\n return (training_data, validation_data, test_data)", "def get_nlu_data(self, language: Optional[Text] = \"en\") -> TrainingData:\n return utils.training_data_from_paths(self._nlu_files, language)", "def _training__(self):\n self.input_size, self.output_size = self.X_train.shape[1], self.y_train.shape[1]\n w1 = np.random.uniform(size=[self.input_size, self.hidden_size])\n b = np.random.uniform(size=[1, self.hidden_size])\n H = self._activation__(np.add(np.matmul(self.X_train, w1), b))\n w2 = np.dot(np.linalg.pinv(H), self.y_train)\n self.model = {\"w1\": w1, \"b\": b, \"w2\": w2}", "def prepare_train(self) -> Tuple[ZLIMGS, ZLIMGS, ZLIMGS, ZLIMGS]:\n\n if self.setting == 'setting1':\n warnings.warn(\"Please note that Setting 1 should not use train eval dataset! \"\n \"Because its training set only contain normal samples!\")\n\n with open(self.json_path) as fp:\n ids_json = json.load(fp)\n ids_train_normal = ids_json['normal']['train']\n ids_train_defect = ids_json['defect']['train']\n\n # train\n zlimgs_train_normal = self._create_zl_imgs_given_ids(ids=ids_train_normal,\n subset=CONFIG[self.setting]['normal_train'],\n ann_type=CONFIG[self.setting]['ann_train'])\n zlimgs_train_defect = self._create_zl_imgs_given_ids(ids=ids_train_defect,\n subset=CONFIG[self.setting]['defect_train'],\n ann_type=CONFIG[self.setting]['ann_train'])\n\n # train eval\n zlimgs_train_eval_normal = self._create_zl_imgs_given_ids(ids=ids_train_normal,\n subset=CONFIG[self.setting]['normal_train'],\n ann_type=CONFIG[self.setting]['ann_eval'])\n zlimgs_train_eval_defect = self._create_zl_imgs_given_ids(ids=ids_train_defect,\n subset=CONFIG[self.setting]['defect_train'],\n ann_type=CONFIG[self.setting]['ann_eval'])\n\n return zlimgs_train_normal, zlimgs_train_defect, zlimgs_train_eval_normal, zlimgs_train_eval_defect", "def _restore_data_inputs(self):\n super()._restore_data_inputs()\n self.training_data = (\n self._data.training_data.data if self._data.training_data and self._data.training_data.data else None\n )\n self.validation_data = (\n self._data.validation_data.data if self._data.validation_data and self._data.validation_data.data else None\n )", "def load_dataset(self):\n # Get all the files in the directory\n file_list = self.get_file_list()\n\n # Concatenate the data corresponding to a list of files\n data = self.concatenate_file_data(file_list)\n\n # Shuffle the data and create the training and the validation datasets\n data = self.shuffle_data_dictionary(data)\n self.training_dataset, self.validation_dataset = self.split_data_into_training_and_validation(data)", "def set_training_data(self, *, inputs: Inputs) -> None:\n\t\tsuper().set_training_data(inputs=inputs)", "def add_training_data(self, X):\n\n raise NotImplementedError(\"not implemented!\")", "def train_with_loader(self, data, validating_data=None, scheduler=None, epochs=1):\n print('Training...')\n for epoch in range(epochs):\n self.train()\n for train_in, train_out in data:\n self.compute_loss(train_in, train_out, is_guess=False, training=True)\n self.eval()\n if validating_data:\n with torch.no_grad():\n valid_loss = self.compute_loss_loader(validating_data).item()\n print('Average validation error at step ',epoch+1,': ', valid_loss)\n if scheduler and valid_loss:\n scheduler.step()", "def _training(self, data_loader: torch.utils.data.DataLoader,\n data_size: int):\n\n self.model.train()\n total_loss = torch.Tensor([0])\n with tqdm(total=data_size//self.batch_size) as pbar:\n for _, ((row, col), val) in enumerate(data_loader):\n self.optimizer.zero_grad()\n\n row = row.long()\n if isinstance(col, list):\n col = tuple(c.long() for c in col)\n else:\n col = col.long()\n\n preds = self.model(row, col)\n loss = self.loss_function(preds)\n loss.backward()\n\n self.optimizer.step()\n\n total_loss += loss.item()\n batch_loss = loss.item() / row.size()[0]\n\n pbar.update(1)\n\n total_loss /= data_size\n return total_loss", "def _load_data(dataset, is_training=False):\n import data_augmentation as aug\n import features\n\n features_path = os.path.join(cfg.extraction_path, dataset.name + '.h5')\n x = utils.timeit(lambda: features.load_features(features_path),\n 'Loaded features of %s dataset' % dataset.name)\n\n # Clip dynamic range to 90 dB\n x = np.maximum(x, x.max() - 90.0)\n\n # Load scaler from file if cached, or else compute it.\n scaler_path = cfg.scaler_path\n if os.path.exists(scaler_path) or not is_training:\n with open(scaler_path, 'rb') as f:\n scaler = pickle.load(f)\n else:\n scaler = utils.timeit(lambda: utils.compute_scaler(x),\n 'Computed standard scaler')\n with open(scaler_path, 'wb') as f:\n pickle.dump(scaler, f)\n\n x = utils.timeit(lambda: utils.standardize(x, scaler),\n 'Standardized %s features' % dataset.name)\n\n names, y = utils.timeit(lambda: utils.read_metadata(dataset.metadata_path),\n 'Loaded %s metadata' % dataset.name)\n if dataset == cfg.training_set and cfg.enable_augmentation:\n names, y = aug.expand_metadata((names, y))\n\n return x, y, names", "def train(self):\n self.dataGenerator.printDataStatistics()\n sE = len(self.dataGenerator.ids[\"train\"])// 32\n sV = len(self.dataGenerator.ids[\"validation\"])// 32\n self.model.fit_generator(\n generator=self.dataGenerator.trainingGenerator,\n steps_per_epoch= sE,\n epochs=2,\n validation_data=self.dataGenerator.validationGenerator,\n validation_steps=sV,\n # use_multiprocessing=True,\n # workers=2,\n )", "def preprocess_with_train_data(self,\n data_dir: Path,\n output_processed_data_dir: Path,\n ) -> NoReturn:\n pass", "def train_epoch(self, data_loader):\n self.model.train()\n\n # Prepare summary information\n summary = dict()\n sum_loss = 0\n\n # Loop over training batches\n for i, (batch_input, batch_target) in enumerate(data_loader):\n batch_input = [a.to(self.device) for a in batch_input]\n batch_target = batch_target.to(self.device)\n\n # Compute target weights on-the-fly for loss function\n batch_weights_real = batch_target * self.real_weight\n batch_weights_fake = (1 - batch_target) * self.fake_weight\n batch_weights = batch_weights_real + batch_weights_fake\n\n # Train on this batch\n self.model.zero_grad()\n batch_output = self.model(batch_input)\n batch_loss = self.loss_func(batch_output, batch_target, weight=batch_weights)\n batch_loss.backward()\n self.optimizer.step()\n sum_loss += batch_loss.item()\n self.logger.debug(' train batch %i, loss %f', i, batch_loss.item())\n\n # Summarize the epoch\n n_batches = i + 1\n summary['lr'] = self.optimizer.param_groups[0]['lr']\n summary['train_loss'] = sum_loss / n_batches\n self.logger.debug(' Processed %i batches', n_batches)\n self.logger.debug(' Current LR %f', summary['lr'])\n self.logger.info(' Training loss: %.3f', summary['train_loss'])\n return summary", "def train(self, trainData):\n pass", "def load_data(self):\n params = self.params\n catg = params.data_category\n langs = ['en', params.target_lang]\n data = {lang: {splt: {} for splt in (['train', 'valid'] if lang == 'en' else ['test'])} for lang in langs}\n clf_dataset_path = {\n lang: {\n splt: {\n 'x': os.path.join(params.data_path, '%s_%s_%s_x.bpe.pth' % (splt, lang, catg)),\n 'y': os.path.join(params.data_path, '%s_%s_%s_y.txt' % (splt, lang, catg)),\n } for splt in (['train', 'valid'] if lang == 'en' else ['test'])\n } for lang in langs\n }\n for splt in ['train', 'valid', 'test']:\n for lang in langs:\n if lang == 'en' and splt in ['train', 'valid'] or lang != 'en' and splt == 'test':\n # load data and dictionary\n data1 = load_binarized(clf_dataset_path[lang][splt]['x'], params)\n data['dico'] = data.get('dico', data1['dico'])\n # set dictionary parameters\n set_dico_parameters(params, data, data1['dico'])\n # create dataset\n data[lang][splt]['x'] = Dataset(data1['sentences'], data1['positions'], params)\n # load labels\n with open(clf_dataset_path[lang][splt]['y'], 'r') as f:\n labels = [int(l) for l in f]\n data[lang][splt]['y'] = torch.LongTensor(labels)\n assert len(data[lang][splt]['x']) == len(data[lang][splt]['y'])\n\n return data", "def training_step(self, batch):\n return {}", "def setTrainData(self, trainData=\"\", trainLabel=\"\"):\n # 1. Read train data set from given file paths\n trainDataDump = self._read_file(trainData)\n trainLabelDump = self._read_file(trainLabel)\n\n # 2. Format train data and labels into list of words\n trainFormattedData = np.array([line.split(' ') for line in trainDataDump.split('\\n')])\n trainFormattedLabels = np.array([label for label in trainLabelDump.split('\\n')])\n\n # 3. Compute total unique words over all docs\n uniqueWords = set()\n for line in trainFormattedData:\n for w in line:\n uniqueWords.add(w)\n self._sizeOfVocabulary = len(uniqueWords)\n\n # 4. Instantiate classAttr object for each label\n for index, label in enumerate(set(trainFormattedLabels)):\n self._classLabelMap.append(label)\n # Distinguish only those that belong under the label\n trainDataInClass = trainFormattedData[[i == label for i in trainFormattedLabels]]\n # Size of total documents belong under the label\n totalTrainDocsInClass = len(trainDataInClass)\n # Update the total documents\n self._totalTrainDocs += totalTrainDocsInClass\n # Count the frequency (possibly replaced with Collections.Counter)\n frequencyDict = self._count_word_frequency(trainDataInClass)\n # Create new ClassAtr and set it.\n newClassAttrObj = NBClassifier.ClassAttr(label)\n self._classAttrs[index] = newClassAttrObj\n newClassAttrObj.frequencyDict = frequencyDict\n newClassAttrObj.totalDocsInClass = totalTrainDocsInClass", "def get_train_examples(self, data_path):\r\n return self.create_examples(self.read_data(data_path), 'train')", "def model_data():\n x_train, y_train, x_val, y_val, x_test, y_test = read_data(\"src/tests/dataclassificationmodel/ferPlus_processed.pbz2\", False)\n return x_train, y_train, x_val, y_val, x_test, y_test", "def _training_params(self):\n if isinstance(\n self.lyapunov_hybrid_system.system,\n feedback_system.FeedbackSystem) and self.search_controller:\n # For a feedback system, we train both the Lyapunov network\n # parameters and the controller network parameters.\n training_params = list(\n self.lyapunov_hybrid_system.lyapunov_relu.parameters(\n )) + self.lyapunov_hybrid_system.system.controller_variables(\n ) + self.R_options.variables()\n else:\n training_params = \\\n list(self.lyapunov_hybrid_system.lyapunov_relu.parameters()) +\\\n self.R_options.variables()\n return training_params" ]
[ "0.7380025", "0.71378094", "0.6946885", "0.69251007", "0.6852432", "0.6813262", "0.6799918", "0.6751542", "0.6742031", "0.6650401", "0.6590069", "0.65826356", "0.64915186", "0.6483886", "0.6470438", "0.6446683", "0.6443755", "0.62918407", "0.6268519", "0.6244086", "0.6243447", "0.6232298", "0.6221952", "0.62203336", "0.6217214", "0.62146485", "0.6194592", "0.61521375", "0.6149793", "0.61317915", "0.61128265", "0.60851", "0.6076075", "0.60612106", "0.60587984", "0.6051752", "0.6023201", "0.6017436", "0.6017431", "0.60167116", "0.60078746", "0.6005606", "0.59840685", "0.5959869", "0.5941411", "0.5939915", "0.5937602", "0.5933322", "0.59066635", "0.5900436", "0.588524", "0.58759207", "0.5875915", "0.5868662", "0.58648527", "0.5859794", "0.5834354", "0.5832758", "0.5826415", "0.58188945", "0.58167595", "0.581536", "0.58108056", "0.58093274", "0.5808865", "0.57908624", "0.5787636", "0.5786646", "0.57811254", "0.57731444", "0.576174", "0.5757707", "0.57539403", "0.5752735", "0.5747853", "0.57459074", "0.57430947", "0.57407564", "0.5733635", "0.57333815", "0.57331306", "0.5724441", "0.57232827", "0.5722568", "0.57219434", "0.5712999", "0.570924", "0.5707521", "0.5702749", "0.5700557", "0.5698399", "0.56905514", "0.56852067", "0.5683006", "0.5680509", "0.56796145", "0.5679477", "0.5672369", "0.566804", "0.5663804" ]
0.72757715
1
Get training data (perhaps from remote server) and preprocess in shape [batch, expected shape of element] Remember to call this from a subclass to save the things.
def _load_training_data(self): self._save_training_data()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getTrainingData(self):\n raise NotImplementedError", "def preprocess_data(self):\n\n self._preprocess_train_data()\n self._preprocess_test_data()", "def preprocess_train_data(self):\r\n print(\"* Preprocessing training data.\", flush=True)\r\n prep.create_HDF_file(self.C.training_set, is_training_set=True)\r\n\r\n self.print_time_elapsed()", "def load_preprocess_training_batch(batch_id, batch_size):\n path, dataset = select_dataset(training = True)\n data = dataset_lib.get_data(batch_id, dataset=dataset, path=path)\n features = [np.array(x[1]) for x in data]\n labels = np.array([x[0] for x in data])\n\n # Return the training data in batches of size <batch_size> or less\n return batch_features_labels(features, labels, batch_size)", "def load_data_preprocess(self):\n\n print(\"Loading the dataset ...\")\n # load the data\n c_util = CarUtils()\n train_x, train_y, test_x, test_y, classes = c_util.load_data()\n\n # set the image ordering\n K.set_image_dim_ordering(\"th\")\n\n print(\"Pre-processing the dataset ...\")\n # pre-process the data\n train_x = train_x.astype('float32')\n test_x = test_x.astype('float32')\n\n train_x = train_x / 255\n test_x = test_x / 255\n\n print(train_x.shape[0], ' train samples')\n print(test_x.shape[0], ' test samples')\n\n train_y = np_utils.to_categorical(train_y, CarsClassifierModel._nb_classes)\n test_y = np_utils.to_categorical(test_y, CarsClassifierModel._nb_classes)\n\n return train_x, train_y, test_x, test_y", "def preprocess(self, train_file, validation_file, test_file):\n chardict, labeldict = self.make_dictionary(train_file, validation_file, test_file)\n print 'preparing training data'\n training = self.parse_file(train_file, chardict, labeldict)\n \n print 'preparing validation data'\n validation = self.parse_file(validation_file, chardict, labeldict)\n\n print 'preparing test data'\n test = self.parse_file(test_file, chardict, labeldict)\n\n return Data(training, validation, test, chardict, labeldict)", "def _prepare_for_training(\n self,\n trackers: List[TrackerWithCachedStates],\n domain: Domain,\n precomputations: MessageContainerForCoreFeaturization,\n **kwargs: Any,\n ) -> Tuple[RasaModelData, np.ndarray]:\n training_trackers = self._get_trackers_for_training(trackers)\n # dealing with training data\n tracker_state_features, label_ids, entity_tags = self._featurize_for_training(\n training_trackers,\n domain,\n precomputations=precomputations,\n bilou_tagging=self.config[BILOU_FLAG],\n **kwargs,\n )\n\n if not tracker_state_features:\n return RasaModelData(), label_ids\n\n self._label_data, encoded_all_labels = self._create_label_data(\n domain, precomputations=precomputations\n )\n\n # extract actual training data to feed to model\n model_data = self._create_model_data(\n tracker_state_features, label_ids, entity_tags, encoded_all_labels\n )\n\n if self.config[ENTITY_RECOGNITION]:\n self._entity_tag_specs = (\n self.featurizer.state_featurizer.entity_tag_specs\n if self.featurizer.state_featurizer is not None\n else []\n )\n\n # keep one example for persisting and loading\n self.data_example = model_data.first_data_example()\n\n return model_data, label_ids", "def load_preprocess_training_batch(batch_id, batch_size):\r\n filename = 'preprocess_batch_' + str(batch_id) + '.p'\r\n features, labels = pickle.load(open(filename, mode='rb'))\r\n# labels = np.argmax(labels,1)\r\n# num = len(labels)\r\n# arr = np.zeros((num, 1))\r\n# for i in range(num):\r\n# arr[i][0] = labels[i]\r\n# np.reshape(features,(2500,150528))\r\n# ind = [i for i in range(len(features))]\r\n# random.shuffle(ind)\r\n# features = features[ind]\r\n# labels = labels[ind]\r\n\r\n # Return the training data in batches of size <batch_size> or less\r\n return features[0:batch_size],labels[0:batch_size]", "def parse_train(self, proto, height, width):\n _, sequence_parsed = tf.io.parse_single_sequence_example(\n proto,\n context_features=self._context_features,\n sequence_features=self._sequence_features)\n\n # Deserialize images to float32 tensors.\n images = tf.map_fn(\n _deserialize_png, sequence_parsed['images'], dtype=tf.float32)\n\n # Resize images.\n if height is not None and width is not None:\n images = smurf_utils.resize(images, height, width, is_flow=False)\n\n return {'images': images}", "def preprocess_train_dataset(dataset):\n return (dataset\n # Shuffle according to the largest client dataset\n .shuffle(buffer_size=MAX_CLIENT_DATASET_SIZE)\n # Repeat to do multiple local epochs\n .repeat(CLIENT_EPOCHS_PER_ROUND)\n # Batch to a fixed client batch size\n .batch(CLIENT_BATCH_SIZE, drop_remainder=False)\n # Preprocessing step\n .map(reshape_emnist_element))", "def prep_data(self):\n\n self.fit_tokenizer(texts=self.texts)\n sequences = self.get_sequences(self.texts)\n self.text_data = pad_sequences(sequences, maxlen=self.MAX_SEQUENCE_LENGTH)\n\n self.labels = to_categorical(np.asarray(self.labels))\n print('Shape of data tensor:', self.text_data.shape)\n print('Shape of label tensor:', self.labels.shape)\n\n # split the data into a training set and a validation set\n indices = np.arange(self.text_data.shape[0])\n np.random.shuffle(indices)\n self.text_data = self.text_data[indices]\n self.labels = self.labels[indices]\n nb_validation_samples = int(self.VALIDATION_SPLIT * self.text_data.shape[0])\n\n x_train = self.text_data[:-nb_validation_samples]\n y_train = self.labels[:-nb_validation_samples]\n x_val = self.text_data[-nb_validation_samples:]\n y_val = self.labels[-nb_validation_samples:]\n\n return x_train,y_train, x_val, y_val", "def train(self, training_data):\n pass", "def _parse_train_data(self, data):\n image, label = self._prepare_image_and_label(data)\n\n # Flips image randomly during training.\n if self._aug_rand_hflip:\n image, label = input_utils.random_horizontal_flip(image, masks=label)\n\n # Resizes and crops image.\n image, image_info = input_utils.resize_and_crop_image(\n image,\n self._output_size,\n self._output_size,\n aug_scale_min=self._aug_scale_min,\n aug_scale_max=self._aug_scale_max)\n\n # Resizes and crops boxes.\n image_scale = image_info[2, :]\n offset = image_info[3, :]\n\n # Pad label and make sure the padded region assigned to the ignore label.\n # The label is first offset by +1 and then padded with 0.\n label += 1\n label = tf.expand_dims(label, axis=3)\n label = input_utils.resize_and_crop_masks(\n label, image_scale, self._output_size, offset)\n label -= 1\n label = tf.where(tf.equal(label, -1),\n self._ignore_label * tf.ones_like(label), label)\n label = tf.squeeze(label, axis=0)\n valid_mask = tf.not_equal(label, self._ignore_label)\n labels = {\n 'masks': label,\n 'valid_masks': valid_mask\n }\n\n # If bfloat16 is used, casts input image to tf.bfloat16.\n if self._use_bfloat16:\n image = tf.cast(image, dtype=tf.bfloat16)\n return image, labels", "def train(self, batch):\n pass", "def process_sample_train(self):\n raise NotImplementedError", "def train(self, batch_training=False):\n raise NotImplementedError", "def train(self, data):\n pass", "def get_data(self):\n if self.config['model'] == 'vggnet':\n if self.is_training:\n return self.data.shuffle(self.shuffle).batch(self.batch_size)\n elif self.is_testing:\n return self.data.batch(self.batch_size)\n elif not self.is_testing and not self.is_training:\n return self.data.batch(self.batch_size)\n else:\n raise NotImplementedError('In dataset.py: default input not specified for this model!')", "def training_data(self):\n if self._training_data is None:\n self._load_training_data()\n if self._swapped_training_data is None:\n self._swapped_training_data = {}\n for key, value in self._training_data.items():\n self._swapped_training_data[key] = value\n return self._swapped_training_data", "def load_training_data(self) -> Tuple[List[np.ndarray], np.ndarray]:\n return self._load_set(config.TRAIN_DIR, True)", "def preprocess_train_data(self, training_data: \"TrainingData\"):\n\n label_id_dict = self._create_label_id_dict(\n training_data, attribute=INTENT_ATTRIBUTE\n )\n\n self.inverted_label_dict = {v: k for k, v in label_id_dict.items()}\n\n self._label_data = self._create_label_data(\n training_data, label_id_dict, attribute=INTENT_ATTRIBUTE\n )\n\n session_data = self._create_session_data(\n training_data.intent_examples,\n label_id_dict,\n label_attribute=INTENT_ATTRIBUTE,\n )\n\n self.check_input_dimension_consistency(session_data)\n\n return session_data", "def _prepare_data(self):\n #TODO hardcoded values need to change\n print_info(\"Preprocessing the train data...\")\n self._place_dataset(os.path.join(self._hparams[\"temp-data\"], \"train\"),\n self.TRAIN_OUT_PATH)\n\n print_info(\"Preprocessing the test data...\")\n self._place_dataset(os.path.join(self._hparams[\"temp-data\"], \"test\"),\n self.TEST_OUT_PATH)\n\n print_info(\"Preprocessing the validation data...\")\n self._place_dataset(os.path.join(self._hparams[\"temp-data\"], \"val\"),\n self.VAL_OUT_PATH)", "def prepare_train(self) -> Tuple[ZLIMGS, ZLIMGS, ZLIMGS, ZLIMGS]:\n\n if self.setting == 'setting1':\n warnings.warn(\"Please note that Setting 1 should not use train eval dataset! \"\n \"Because its training set only contain normal samples!\")\n\n with open(self.json_path) as fp:\n ids_json = json.load(fp)\n ids_train_normal = ids_json['normal']['train']\n ids_train_defect = ids_json['defect']['train']\n\n # train\n zlimgs_train_normal = self._create_zl_imgs_given_ids(ids=ids_train_normal,\n subset=CONFIG[self.setting]['normal_train'],\n ann_type=CONFIG[self.setting]['ann_train'])\n zlimgs_train_defect = self._create_zl_imgs_given_ids(ids=ids_train_defect,\n subset=CONFIG[self.setting]['defect_train'],\n ann_type=CONFIG[self.setting]['ann_train'])\n\n # train eval\n zlimgs_train_eval_normal = self._create_zl_imgs_given_ids(ids=ids_train_normal,\n subset=CONFIG[self.setting]['normal_train'],\n ann_type=CONFIG[self.setting]['ann_eval'])\n zlimgs_train_eval_defect = self._create_zl_imgs_given_ids(ids=ids_train_defect,\n subset=CONFIG[self.setting]['defect_train'],\n ann_type=CONFIG[self.setting]['ann_eval'])\n\n return zlimgs_train_normal, zlimgs_train_defect, zlimgs_train_eval_normal, zlimgs_train_eval_defect", "def prepare_data(self):\n data = self._get_dataset(self.hparams.dataset_path)\n label_encoder = data[\"label_encoder\"]\n del data[\"label_encoder\"]\n\n click.secho(\"Building inputs and labels.\", fg=\"yellow\")\n datasets = {\n \"train\": defaultdict(list),\n \"valid\": defaultdict(list),\n \"test\": defaultdict(list),\n }\n for dataset_name, dataset in data.items():\n for sample in dataset:\n instance = self.build_input(\n self.tokenizer, sample[\"text\"], label_encoder, sample[\"label\"]\n )\n for input_name, input_array in instance.items():\n datasets[dataset_name][input_name].append(input_array)\n\n click.secho(\"Padding inputs and building tensors.\", fg=\"yellow\")\n tensor_datasets = {\"train\": [], \"valid\": [], \"test\": []}\n for dataset_name, dataset in datasets.items():\n dataset = self.pad_dataset(dataset, padding=self.tokenizer.pad_index)\n for input_name in MODEL_INPUTS:\n if input_name == \"labels\":\n tensor = torch.tensor(dataset[input_name], dtype=torch.float32)\n else:\n tensor = torch.tensor(dataset[input_name])\n tensor_datasets[dataset_name].append(tensor)\n\n self.train_dataset = TensorDataset(*tensor_datasets[\"train\"])\n self.valid_dataset = TensorDataset(*tensor_datasets[\"valid\"])\n self.test_dataset = TensorDataset(*tensor_datasets[\"test\"])\n click.secho(\n \"Train dataset (Batch, Candidates, Seq length): {}\".format(\n self.train_dataset.tensors[0].shape\n ),\n fg=\"yellow\",\n )\n click.secho(\n \"Valid dataset (Batch, Candidates, Seq length): {}\".format(\n self.valid_dataset.tensors[0].shape\n ),\n fg=\"yellow\",\n )\n click.secho(\n \"Test dataset (Batch, Candidates, Seq length): {}\".format(\n self.test_dataset.tensors[0].shape\n ),\n fg=\"yellow\",\n )", "def get_data_train(self):\n return self.get_data(self.file_train, self.batch)", "def get_train(self, preprocess=False):\n return self._dataset(self._directory, 'images_background_small1', preprocess)", "def load_preprocess_training_batch(batch_id, batch_size):\r\n filename = 'preprocess_batch_' + str(batch_id) + '.p'\r\n features, labels = pickle.load(open(filename, mode='rb'))\r\n\r\n # Return the training data in batches of size <batch_size> or less\r\n return batch_features_labels(features, labels, batch_size)", "def preprocess(\n self,\n dataset: Union[str, dict, pd.DataFrame] = None,\n training_set: Union[str, dict, pd.DataFrame] = None,\n validation_set: Union[str, dict, pd.DataFrame] = None,\n test_set: Union[str, dict, pd.DataFrame] = None,\n training_set_metadata: Union[str, dict] = None,\n data_format: str = None,\n skip_save_processed_input: bool = True,\n random_seed: int = default_random_seed,\n **kwargs,\n ) -> PreprocessedDataset:\n print_boxed(\"PREPROCESSING\")\n\n for callback in self.callbacks:\n callback.on_preprocess_start(self.config_obj.to_dict())\n\n preprocessing_params = get_preprocessing_params(self.config_obj)\n\n proc_training_set = proc_validation_set = proc_test_set = None\n try:\n with provision_preprocessing_workers(self.backend):\n # TODO (Connor): Refactor to use self.config_obj\n preprocessed_data = preprocess_for_training(\n self.config_obj.to_dict(),\n dataset=dataset,\n training_set=training_set,\n validation_set=validation_set,\n test_set=test_set,\n training_set_metadata=training_set_metadata,\n data_format=data_format,\n skip_save_processed_input=skip_save_processed_input,\n preprocessing_params=preprocessing_params,\n backend=self.backend,\n random_seed=random_seed,\n callbacks=self.callbacks,\n )\n\n (proc_training_set, proc_validation_set, proc_test_set, training_set_metadata) = preprocessed_data\n\n return PreprocessedDataset(proc_training_set, proc_validation_set, proc_test_set, training_set_metadata)\n except Exception as e:\n raise RuntimeError(f\"Caught exception during model preprocessing: {str(e)}\") from e\n finally:\n for callback in self.callbacks:\n callback.on_preprocess_end(proc_training_set, proc_validation_set, proc_test_set, training_set_metadata)", "def load_preprocess_test_batch(batch_id, batch_size):\r\n filename = 'preprocess_test_' + str(batch_id) + '.p'\r\n features, labels = pickle.load(open(filename, mode='rb'))\r\n# labels = np.argmax(labels,1)\r\n# num = len(labels)\r\n# arr = np.zeros((num, 1))\r\n# for i in range(num):\r\n# arr[i][0] = labels[i]\r\n# ind = [i for i in range(len(features))]\r\n# random.shuffle(ind)\r\n# features = features[ind]\r\n# labels = labels[ind]\r\n\r\n # Return the training data in batches of size <batch_size> or less\r\n return features[1200:batch_size],labels[1200:batch_size]\r\n #return batch_features_labels(features, labels, batch_size)\r", "def load_train_batch(self):\n def _parse_train_img(img_path):\n with tf.device('/cpu:0'):\n img_buffer = tf.read_file(img_path)\n image_decoded = tf.image.decode_jpeg(img_buffer)\n tgt_image, src_image_stack = \\\n self.unpack_image_sequence(\n image_decoded, self.img_height, self.img_width, self.num_source)\n return tgt_image, src_image_stack\n\n def _batch_preprocessing(stack_images, intrinsics, optional_data):\n intrinsics = tf.cast(intrinsics, tf.float32)\n image_all = tf.concat([stack_images[0], stack_images[1]], axis=3)\n\n if self.match_num == 0: # otherwise matches coords are wrong\n image_all, intrinsics = self.data_augmentation(\n image_all, intrinsics, self.img_height, self.img_width)\n tgt_image = image_all[:, :, :, :3]\n src_image_stack = image_all[:, :, :, 3:]\n intrinsics = self.get_multi_scale_intrinsics(intrinsics, self.num_scales)\n return tgt_image, src_image_stack, intrinsics, optional_data\n\n file_list = self.format_file_list(self.dataset_dir, 'train')\n self.steps_per_epoch = int(len(file_list['image_file_list'])//self.batch_size)\n\n input_image_names_ph = tf.placeholder(tf.string, shape=[None], name='input_image_names_ph')\n image_dataset = tf.data.Dataset.from_tensor_slices(\n input_image_names_ph).map(_parse_train_img)\n\n cam_intrinsics_ph = tf.placeholder(tf.float32, [None, 3, 3], name='cam_intrinsics_ph')\n intrinsics_dataset = tf.data.Dataset.from_tensor_slices(cam_intrinsics_ph)\n\n datasets = (image_dataset, intrinsics_dataset, intrinsics_dataset)\n if self.read_pose:\n poses_ph = tf.placeholder(tf.float32, [None, self.num_source+1, 6], name='poses_ph')\n pose_dataset = tf.data.Dataset.from_tensor_slices(poses_ph)\n datasets = (image_dataset, intrinsics_dataset, pose_dataset)\n if self.match_num > 0:\n matches_ph = tf.placeholder(tf.float32, [None, self.num_source, self.match_num, 4], name='matches_ph')\n match_dataset = tf.data.Dataset.from_tensor_slices(matches_ph)\n datasets = (image_dataset, intrinsics_dataset, match_dataset)\n\n all_dataset = tf.data.Dataset.zip(datasets)\n all_dataset = all_dataset.batch(self.batch_size).repeat().prefetch(self.batch_size*4)\n all_dataset = all_dataset.map(_batch_preprocessing)\n iterator = all_dataset.make_initializable_iterator()\n return iterator", "def get_train_data(self) -> Tuple[np.array, np.array, np.array]:\n train_data = []\n for season in self.__train_seasons:\n train_data.extend(self.__get_season_data(season, sys.maxsize, True))\n train_input = np.array([ExamDropEncoder.extract_features(sample, sys.maxsize) for sample in train_data])\n train_output = np.array([1.0 if get_is_mol(sample.selected_player) else 0.0 for sample in train_data])\n\n num_bins = self.get_num_bins(train_input, self.__max_splits)\n self.__discretizer = KBinsDiscretizer(n_bins = num_bins, encode = \"onehot-dense\",\n strategy = ExamDropExtractor.BIN_STRATEGY)\n train_input = self.__discretizer.fit_transform(train_input)\n train_input = self.__add_answered_on_feature(train_data, train_input)\n self.__anova_f_filter = SelectFpr(f_classif, alpha = self.__anova_f_significance)\n train_input = self.__anova_f_filter.fit_transform(train_input, train_output)\n self.__pca = PCA(n_components = self.__pca_explain)\n train_input = self.__pca.fit_transform(train_input)\n return train_input, train_output, self.__get_train_weights(train_data)", "def train(self, trainData):\n pass", "def get_preprocess(self) -> Dict:\n input_shape = get_input_shape(self.deploy_cfg)\n cfg = process_model_config(self.model_cfg, [''], input_shape)\n preprocess = cfg.data.test.pipeline\n return preprocess", "def _prepare(self, data, train=True):\n if data is None:\n return None\n\n if hasattr(data, \"to_tfdataset\"):\n return data.to_tfdataset(train=train)\n else:\n return data", "def preprocess():\n # Load the data\n random.seed(77)\n X,y = make_classification(n_samples=500, n_features=30, n_informative=8, n_redundant=2, \n n_repeated=0, n_classes=3, n_clusters_per_class=2, weights=None, \n flip_y=0.01, class_sep=1.0, hypercube=True, shift=0.0, scale=1.0, \n shuffle=True, random_state=None)\n\n x_train, x_val, y_train, y_val = train_test_split(X, y, random_state=0, test_size=0.25)\n\n # Standardize the data\n scaler = StandardScaler()\n X_train = scaler.fit_transform(x_train)\n X_val = scaler.transform(x_val)\n\n \n return X_train,y_train,X_val,y_val", "def extract(self):\n\n # print some infos about data\n print(\"\\n--extract batches from data:\\ntrain: {}\\nval: {}\\ntest: {}\\n\".format(self.data[0]['x'].shape, self.data[1]['x'].shape, self.data[2]['x'].shape))\n\n # create batches\n self.x_train, self.y_train, _ = self.create_batches(self.data[0], batch_size=self.batch_size)\n self.x_val, self.y_val, _ = self.create_batches(self.data[1], batch_size=self.batch_size_eval)\n self.x_test, self.y_test, _ = self.create_batches(self.data[2], batch_size=self.batch_size_eval)\n\n # my data\n if len(self.mfcc_data_files) == 4:\n self.x_my, self.y_my, self.z_my = self.create_batches(self.data[3], batch_size=1)", "def preprocess(self, dataset_iter, single_device=False):\n dataset_iter = map(self.as_example, dataset_iter)\n if not single_device:\n dataset_iter = self.shard(dataset_iter)\n return dataset_iter", "def preproc_pipeline(data):\n # Preprocess\n data = preprocess(data)\n\n # Optional --> run a technical analysis on it and add more features\n data = generate_ta(data)\n \n # Split\n train_set, validation_set, test_set = train_val_test_split(data)\n \n # Set up for Keras\n train_set = shape_for_keras(train_set)\n validation_set = shape_for_keras(validation_set)\n test_set = shape_for_keras(test_set)\n\n # We could save this to csv.\n return train_set, validation_set, test_set", "def preprocess_dataset(self, dataset, params=None):\n if params is None:\n assert self.params_loaded, (\n \"You must either provide parameters or load the model params before preprocessing.\")\n params = self.params\n for key in dataset.keys():\n if dataset[key] is None:\n continue\n if hasattr(params, \"whiten_data\") and params.whiten_data:\n if hasattr(params, \"whiten_method\"):\n if params.whiten_method == \"FT\": # other methods require patching first\n if hasattr(params, \"whiten_batch_size\"):\n batch_size = params.whiten_batch_size\n else:\n batch_size = None\n dataset[key].images, dataset[key].data_mean, dataset[key].w_filter = \\\n dp.whiten_data_batch(dataset[key].images, method=params.whiten_method,\n batch_size=batch_size)\n print(\"INFO:preprocessing:FT Whitened \"+key+\" data\")\n if hasattr(params, \"lpf_data\") and params.lpf_data:\n dataset[key].images, dataset[key].data_mean, dataset[key].lpf_filter = \\\n dp.lpf_data(dataset[key].images, cutoff=params.lpf_cutoff)\n print(\"INFO:preprocessing:Low pass filtered \"+key+\" data\")\n if hasattr(params, \"contrast_normalize\") and params.contrast_normalize:\n if hasattr(params, \"gauss_patch_size\"):\n dataset[key].images = dp.contrast_normalize(dataset[key].images,\n params.gauss_patch_size)\n else:\n dataset[key].images = dp.contrast_normalize(dataset[key].images)\n print(\"INFO:preprocessing:Contrast normalized \"+key+\" data\")\n if hasattr(params, \"standardize_data\") and params.standardize_data:\n if params.data_type == \"mnist\":\n eps = 1e-5\n else:\n eps = None\n dataset[key].images, dataset[key].data_mean, dataset[key].data_std = \\\n dp.standardize_data(dataset[key].images, eps)\n self.data_mean = dataset[key].data_mean\n self.data_std = dataset[key].data_std\n print(\"INFO:preprocessing:Standardized \"+key+\" data\")\n if hasattr(params, \"extract_patches\") and params.extract_patches:\n assert all(key in params.__dict__.keys()\n for key in [\"num_patches\", \"patch_edge_size\", \"overlapping_patches\",\n \"randomize_patches\"]), (\"Insufficient params for patches.\")\n out_shape = (int(params.num_patches), int(params.patch_edge_size),\n int(params.patch_edge_size), dataset[key].num_channels)\n dataset[key].num_examples = out_shape[0]\n dataset[key].reset_counters()\n if hasattr(params, \"patch_variance_threshold\"):\n dataset[key].images = dp.extract_patches(dataset[key].images, out_shape,\n params.overlapping_patches, params.randomize_patches,\n params.patch_variance_threshold, dataset[key].rand_state)\n else:\n dataset[key].images = dp.extract_patches(dataset[key].images, out_shape,\n params.overlapping_patches, params.randomize_patches,\n var_thresh=0, rand_state=dataset[key].rand_state)\n dataset[key].shape = dataset[key].images.shape\n dataset[key].num_rows = dataset[key].shape[1]\n dataset[key].num_cols = dataset[key].shape[2]\n dataset[key].num_channels = dataset[key].shape[3]\n dataset[key].num_pixels = np.prod(dataset[key].shape[1:])\n print(\"INFO:preprocessing:Extracted patches from \"+key+\" data\")\n if hasattr(params, \"whiten_data\") and params.whiten_data:\n if hasattr(params, \"whiten_method\") and params.whiten_method != \"FT\":\n dataset[key].images, dataset[key].data_mean, dataset[key].w_filter = \\\n dp.whiten_data(dataset[key].images, method=params.whiten_method)\n print(\"INFO:preprocessing:Whitened \"+key+\" data\")\n if hasattr(params, \"norm_data\") and params.norm_data:\n dataset[key].images, dataset[key].data_max = dp.normalize_data_with_max(dataset[key].images)\n self.data_max = dataset[key].data_max\n print(\"INFO:preprocessing:Normalized \"+key+\" data with maximum\")\n if hasattr(params, \"rescale_data\") and params.rescale_data:\n dataset[key].images, dataset[key].data_min, dataset[key].data_max = dp.rescale_data_to_one(dataset[key].images)\n self.data_max = dataset[key].data_max\n self.data_min = dataset[key].data_min\n print(\"INFO:preprocessing:Rescaled each \"+key+\" datapoint to one\")\n if hasattr(params, \"center_data\") and params.center_data:\n dataset[key].images, dataset[key].data_mean = dp.center_data(dataset[key].images,\n use_dataset_mean=True)\n self.data_mean = dataset[key].data_mean\n print(\"INFO:preprocessing:Centered \"+key+\" data\")\n return dataset", "def data(self, train=True, batch_size=2):\n if train:\n elements = self.prepare_batch(self.training_albums)\n else:\n elements = self.prepare_batch(self.validation_albums)\n\n while len(elements) > 0:\n # Collect the batch\n batch = []\n for _ in range(min(batch_size, len(elements))):\n batch.append(elements.pop())\n\n # Get same sequence size for all elements of the batch\n albums, labels = self.batchify(batch)\n yield albums, labels", "def _unpack_training_data(data, val=None):\n if isinstance(data, TrainingData):\n assert val is None\n return data\n\n if val is not None:\n x, y = data\n return TrainingData.from_x_y(x, y, val)\n\n train, val = data\n if not isinstance(train, Dataset):\n xx, yy = train\n train = RamDataset(xx, yy)\n if not isinstance(val, Dataset):\n xx, yy = val\n val = RamDataset(xx, yy)\n return TrainingData(train, val)", "def _train(self):\n self.train_acc.reset_states()\n self.val_acc.reset_states()\n self.train_loss.reset_states()\n self.val_loss.reset_states()\n\n self.train_ds.shuffle(buffer_size=1000)\n for idx, (x,y) in enumerate(self.train_ds):\n self.tf_train_step(x, y)\n\n for x,y in self.val_ds:\n self.tf_val_step(x, y)\n\n # It is important to return tf.Tensors as numpy objects.\n return {\n \"epoch\": self.iteration,\n \"loss_train\": self.train_loss.result().numpy(),\n \"loss_val\": self.val_loss.result().numpy(),\n \"acc_train\": self.train_acc.result().numpy(),\n \"acc_val\": self.val_acc.result().numpy(),\n }", "def _preprocess_training_model(self, data):\n def _pre_process(raw_data):\n \"\"\" Pre-process raw data. \"\"\"\n pattern = re.compile(\n r\"((?<=')\\w\\d.*?(?=')|(?<=\\\")\\w\\d.*?(?=\\\")|[\\w\\d]+)\")\n words = re.findall(pattern, raw_data)\n return ' '.join(list(map(string_utils.snake_case_to_camel, words)))\n\n data_list = []\n # Preprocess the dataset with naming convention, etc.\n with Progress() as progress:\n preprocess_task = progress.add_task('Pre-processing dataset...',\n total=data.shape[0])\n for idx, row in data.iterrows():\n row_data = {}\n for column in ['text', 'key', 'value']:\n row_data[column] = _pre_process(row[column])\n data_list.append(row_data)\n progress.update(preprocess_task, advance=1)\n return pd.DataFrame(data=data_list)", "def get_training_data() -> GraphDataset:\n _load_data_if_needed()\n return training_data", "def train(self, num_batches: int):", "def _train(self):\r\n lr, hr = self.sess.run(self.val_batch)\r\n res = self.sess.run(\r\n [self.train, self.merged,\r\n self.GAN.g_loss, self.GAN.mse_loss, self.GAN.g_gan_loss,\r\n self.GAN.d_loss, self.GAN.d_loss_real, self.GAN.d_loss_fake],\r\n feed_dict={\r\n self.GAN.g_images: lr,\r\n self.GAN.d_images: hr,\r\n self.GAN.is_training: True\r\n })\r\n\r\n return res[1:]", "def step(self, sess, batch_data, is_training):\n\n # Input feed\n input_feed = {}\n input_feed[self.images] = batch_data['images']\n input_feed[self.bbox_true_13] = batch_data['bbox_true_13']\n input_feed[self.bbox_true_26] = batch_data['bbox_true_26']\n input_feed[self.bbox_true_52] = batch_data['bbox_true_52']\n\n # Output feed: depends on training or test\n output_feed = [self.loss] # Loss for this batch.\n if is_training:\n output_feed.append(self.train_op) # Gradient updates\n\n outputs = sess.run(output_feed, input_feed)\n return outputs[0] # loss", "def make_batch(self, batch_size):\n filenames = self.get_filenames()\n if self.mode == tf.estimator.ModeKeys.PREDICT and self.imagenet_train_predict_partial:\n # Sort and shuffle with seed to randomize deterministically.\n random.seed(self.imagenet_train_predict_shuffle_seed)\n random.shuffle(filenames)\n dataset = tf.contrib.data.TFRecordDataset(filenames)\n\n # Parse records.\n dataset = dataset.map(self.parser,\n num_threads=batch_size,\n output_buffer_size=2 * batch_size)\n\n # If training, shuffle and repeat indefinitely.\n if self.mode == tf.estimator.ModeKeys.TRAIN:\n dataset = dataset.shuffle(buffer_size=50000 + 3 * batch_size)\n dataset = dataset.repeat(-1)\n elif self.mode == tf.estimator.ModeKeys.PREDICT:\n if self.predict_split == 'train':\n if self.imagenet_train_predict_partial:\n MAX_EXAMPLES = 50000\n # Skip to start at a random spot in the first TFRecord.\n random.seed(self.imagenet_train_predict_shuffle_seed)\n skip_examples = random.randint(0, 1251)\n dataset = dataset.skip(skip_examples)\n # Continue shuffling amongst at least as many examples\n # as it could see in 3 cross validations.\n dataset.shuffle(buffer_size=3 * MAX_EXAMPLES,\n seed=self.imagenet_train_predict_shuffle_seed)\n num_examples = MAX_EXAMPLES\n else:\n # Take whole training set.\n num_examples = self.num_examples_per_epoch(tf.estimator.ModeKeys.TRAIN)\n else:\n # Take whole validation set.\n num_examples = self.num_examples_per_epoch(tf.estimator.ModeKeys.EVAL)\n # Take as much of the dataset as possible that can be evenly\n # divided by batch_size.\n while True:\n if num_examples % batch_size == 0:\n break\n else:\n num_examples -= 1\n dataset = dataset.take(num_examples)\n dataset = dataset.repeat(1)\n\n # dataset = dataset.take(1000) # For fast debugging!\n else:\n dataset = dataset.repeat(1)\n\n # Batch it up.\n dataset = dataset.batch(batch_size)\n iterator = dataset.make_one_shot_iterator()\n image_batch, label_batch = iterator.get_next()\n\n return image_batch, label_batch", "def train(self, training_set):\n\n lengths = [ seq.shape[0] for seq in training_set ]\n\n #preprocessing the training set to obtain the desired shape\n concatenated_set = self.preprocessing( training_set )\n\n #fitting the model\n self.model.fit(concatenated_set, lengths)\n\n return", "def train(self):\n raise NotImplementedError", "def get_preprocessed_data(x_train, x_test, y_train, y_test):\n x_train = x_train.reshape(50000, 3072)\n x_test = x_test.reshape(10000, 3072)\n x_train = x_train.astype('float32')\n x_test = x_test.astype('float32')\n x_train /= 255\n x_test /= 255\n\n y_train = np_utils.to_categorical(y_train, num_classes)\n y_test = np_utils.to_categorical(y_test, num_classes)\n\n return x_train, x_test, y_train, y_test", "def prepare_data(self):\n # Set up the path\n self.path_target_train = os.path.join(self.data_dir, self.train_path_file_target + \".pkl\")\n self.path_target_test = os.path.join(self.data_dir, self.test_path_file_target + \".pkl\")\n\n if not os.path.exists(self.path_target_train) or not os.path.exists(self.path_target_test):\n # Create vocabularies of the appropriate sizes.\n self.create_vocabulary(self.train_path_file)\n\n # Create token ids for the training data.\n input_train_path = self.train_path_file\n target_train_path = self.train_path_file_target\n train_input, train_input_length, train_labels = self.data_to_token_ids(input_train_path, target_train_path)\n\n # Create token ids for the validation data.\n input_test_path = self.test_path_file\n target_test_path = self.test_path_file_target\n test_input, test_input_length, _ = self.data_to_token_ids(input_test_path, target_test_path, train=False)\n\n # Collect data into a list\n training_data = [train_input, train_input_length, train_labels]\n test_data = [test_input, test_input_length]\n\n # Save all the data\n with open(self.path_target_train, 'wb') as f:\n pickle.dump(training_data,f)\n with open(self.path_target_test, 'wb') as f:\n pickle.dump(test_data, f)\n else:\n # Load data\n with open(self.path_target_train, 'rb') as f:\n training_data = pickle.load(f)\n with open(self.path_target_test, 'rb') as f:\n test_data = pickle.load(f)\n\n # Initialize vocabulary\n self.initialize_vocabulary()\n\n # Convert list into a numpy array - train data\n train_input = pd.DataFrame(training_data[0]).fillna(value=0).astype(int).values\n train_length_input = np.array(training_data[1], dtype=int)\n train_labels = np.array(training_data[2], dtype=int)\n\n # Convert list into a numpy array - test data\n test_input = pd.DataFrame(test_data[0]).fillna(value=0).astype(int).values\n test_length_input = pd.DataFrame(test_data[1]).fillna(value=0).astype(int).values\n\n # Printing maximum length\n print(\"Shape of the input training matrix {}\".format(str(train_input.shape)))\n print(\"Shape of the input test matrix {}\".format(str(test_input.shape)))\n\n # Copy the files\n self.copy_files()\n\n # Return output\n return train_input, train_length_input, train_labels, test_input, test_length_input", "def process_batch(self, data):\n [embedding_batch] = self._sess.run([self._embedding_tensor],\n feed_dict={self._features_tensor: data})\n return embedding_batch", "def prepare_data(self, context_size, model_name):\n self.context_size = context_size\n data_x = []\n data_y = []\n oob = self.word2idx['OOB']\n\n for item in self.docs:\n data = [oob] * context_size + self.doc2token(item) + [oob] * context_size #padding\n for i in range(context_size, len(data) - context_size):\n data_x.append(data[i - context_size: i] + data[i + 1: i + context_size + 1])\n data_y.append(data[i])\n \n if model_name.lower() == 'skipgram':\n data_x, data_y = data_y, data_x\n self.data_x = Variable(torch.LongTensor(data_x))\n self.data_y = Variable(torch.LongTensor(data_y))\n logging.info(f'data preprocessed, data shape: {self.data_x.shape}, {self.data_y.shape}')", "def preprocess(self, data):\n (w,h,f) = self.rawinputformat()\n dt = numpy.dtype(numpy.uint8)\n nb = numpy.frombuffer(data,dt,-1,0)\n actual_stream_width = (w&1)+w # hack, rather get this from the app sink\n if(actual_stream_width != self.reqsize):\n nb = nb.reshape(h,actual_stream_width,3)\n nb = nb[0:h,0:w,0:3] # crop to network input size\n else:\n nb = nb.reshape((actual_stream_width,actual_stream_width,3))\n img = nb.astype('float32')\n #Preprocess image\n #for i in range(3):\n # img[:,:,i] = (img[:,:,i] - self.mean[i]) * self.std[i]\n #img = resize(img/255.0,(w,h),1)\n img = img/255.0\n print(img.shape)\n #print(img[0,0,:])\n return img.astype(numpy.float16)", "def model_data():\n x_train, y_train, x_val, y_val, x_test, y_test = read_data(\"src/tests/dataclassificationmodel/ferPlus_processed.pbz2\", False)\n return x_train, y_train, x_val, y_val, x_test, y_test", "def _data_generation(self, batch_data):\n # Initialization\n batch_x = []\n batch_y = defaultdict(list)\n\n for ind, item_data in batch_data.iterrows():\n img_path = os.path.join(self.img_dir, \"images\", \"rgb\", item_data[\"name\"])\n img = cv2.imread(img_path)\n try:\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n except Exception as error:\n print(img_path)\n print(error)\n not_valid_mask = self.read_masks_borders(item_data[\"name\"])\n img[not_valid_mask] = 0\n\n # getmasks\n targets = np.zeros((img.shape[0], img.shape[1], len(self.classes)))\n for i, c in enumerate(self.classes):\n mask_path = os.path.join(self.img_dir, \"labels\", c, item_data[\"name\"])\n mask = cv2.imread(\n mask_path.replace(\".jpg\", \".png\"), cv2.IMREAD_GRAYSCALE\n )\n mask[not_valid_mask[:, :, 0]] = 0\n mask = mask > 0\n targets[:, :, i] = mask\n\n res = self.reshape_func(image=img, mask=targets)\n img, targets = res['image'], res['mask']\n if self.do_aug:\n res = self.aug(image=img, mask=targets)\n img, targets = res['image'], res['mask']\n\n for i, c in enumerate(self.classes):\n batch_y[c].append(targets[:, :, i])\n\n batch_x.append(img)\n\n batch_x = np.array(batch_x, np.float32)\n batch_y = {k: np.array(v, np.float32) for k, v in batch_y.items()}\n batch_y = {k: np.expand_dims(v, axis=-1) for k, v in batch_y.items()}\n\n return (\n imagenet_utils.preprocess_input(batch_x, \"channels_last\", mode=\"tf\"),\n batch_y\n )", "def prepare_dataset(data_path, test_size=0.2, validation_size=0.2):\r\n\r\n # load dataset\r\n if data_path.endswith('json'):\r\n X, y = load_data_from_json(data_path)\r\n else:\r\n X, y = load_data_from_fold(data_path)\r\n # create train, validation, test split\r\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size)\r\n X_train, X_validation, y_train, y_validation = train_test_split(X_train, y_train, test_size=validation_size)\r\n\r\n # add an axis to nd array\r\n X_train = X_train[..., np.newaxis]\r\n X_test = X_test[..., np.newaxis]\r\n X_validation = X_validation[..., np.newaxis]\r\n\r\n return X_train, y_train, X_validation, y_validation, X_test, y_test", "def fetch_test_batch(self):\n data = self.data\n # size of train dataset\n num_train = data['train'].shape[0]\n image_size = self.image_size\n # index of test image that is being classified in this batch\n batch_index = self.test_batch_index\n\n # create batch array\n X = np.zeros([2 * num_train, image_size[0], image_size[1]], dtype='uint8')\n # first half are all training images\n X[:num_train, ...] = data['train']\n # second half is copy of a batch_index-th test image to be classified\n X[num_train:, ...] = data['test'][batch_index, ...]\n # true label is extracted from array of indexes where particular class start\n test_label = np.argmax(self.starts['test']>batch_index) - 1\n\n # rescale intensities and center\n X = X / 255.0\n X = X - self.mean_train\n\n X = X[:, np.newaxis]\n X = X.astype(\"float32\")\n\n self.test_batch_index += 1\n\n X = Variable(torch.from_numpy(X)).view(2 * num_train, self.image_size[0], self.image_size[1])\n\n # stack batch by second axis to [batch size, 2 (pair to be compared), image height, image width]\n X1 = X[:num_train] # (B, h, w)\n X2 = X[num_train:] # (B, h, w)\n\n X = torch.stack([X1, X2], dim=1) # (B, 2, h, w)\n\n if use_cuda:\n X = X.cuda()\n # using test dataset size and current index for controlling test loop in test_model.py\n return X, test_label, data['test'].shape[0], self.test_batch_index", "def convert_train(ndata, ndim):\r\n print ('Converting training data ... ')\r\n x = np.zeros([ndata, ndim])\r\n y = np.zeros([ndata])\r\n \r\n for i in range(0, len(flist) - 2):\r\n batchn = filepath + flist[i]\r\n temp = read(batchn)\r\n x[i * 10000:(i + 1) * 10000] = temp['data']\r\n y[i * 10000:(i + 1) * 10000] = temp['labels']\r\n \"\"\"\r\n i=0\r\n batchn = filepath + flist[i]\r\n\r\n temp = read(batchn)\r\n\r\n x[i * 10000:(i + 1) * 10000] = temp['data']\r\n\r\n y[i * 10000:(i + 1) * 10000] = temp['labels']\r\n \"\"\"\r\n return x, y", "def prepare_data(self, *args, **kwargs):\n # get paths to train and test splits\n _split_paths = [os.path.join(self.path_to_data, split)\n for split in os.listdir(self.path_to_data)]\n\n # for each split [train, test]\n for _path in _split_paths:\n _img_classes = os.listdir(_path) # get subfolders representing each class\n self.splits[os.path.basename(_path)] = []\n\n # get the images in pairs with its corresponding class\n for _class in _img_classes:\n _data = self.get_img_text_pair(os.path.join(_path, _class))\n\n if os.path.basename(_path) == 'train':\n self.weights[self.encode_label(_class)] = len(_data)\n self.splits[os.path.basename(_path)].extend(_data)", "def test_pyt_preprocess_train(self):\n # Second, check that the model will train\n defaults = parser_defaults.copy()\n defaults['datatype'] = 'train'\n defaults['pytorch_preprocess'] = True\n str_output, _, _ = testing_utils.train_model(defaults)\n self.assertTrue(\n solved_task(str_output),\n 'Teacher could not teach seq2seq with preprocessed obs, output: {}'\n .format(str_output)\n )", "def load_data_and_labels(self):\n gen = image.ImageDataGenerator()\n target_size = (224,224)\n if self.preprocess:\n print('Preprocessing data...')\n if not os.path.isdir(self.pproc_dir()):\n os.mkdir(self.pproc_dir())\n \n batch_arr = []\n for ld,segment in [(self.train_dir(), 'train'),\n (self.valid_dir(), 'valid')]:\n # TODO(ness): segment = os.basename(ld)\n flowgen = gen.flow_from_directory(\n ld,\n target_size=target_size,\n shuffle=False,\n class_mode=None,\n batch_size=1)\n # Save the batches using method defined in utils.py\n data = np.concatenate([flowgen.next() for i in range(flowgen.n)])\n batches_dir = self.pproc_dir() + segment + '-bc'\n save_array(batches_dir, data)\n \n # Save the classes.\n cls_dir = self.pproc_dir() + segment + '-cl'\n save_array(cls_dir, flowgen.classes)\n \n batch_arr.append((data, flowgen.classes, flowgen.class_indices))\n \n # Set the data.\n self.training_data = batch_arr[0][0]\n self.validation_data = batch_arr[1][0]\n \n # Classes are zero-indexed and represent a category in\n # numerical form. So if the classes are 'dog' and 'cat',\n # the possible class values will be 0 and 1.\n self.trn_classes = batch_arr[0][1]\n self.val_classes = batch_arr[1][1]\n \n # Labels are the one-hot encoded (i.e. categorical)\n # version of the classes. In other words, if there are\n # 5 classes and an element belongs to class 2,\n # its label will be [0,0,1,0,0] (index 1).\n self.training_labels = to_categorical(batch_arr[0][1])\n self.validation_labels = to_categorical(batch_arr[1][1])\n \n # Class indices are dictionaries of the form\n # {'category_name': 0, 'category_name_2: 1}. They\n # make the mapping between numerical class indices and\n # a human-readable category name. They are (should be...)\n # the same for validation and training, so only load them\n # once, after sanity checking.\n self.cindices = batch_arr[0][2]\n print('Done preprocessing.')\n else:\n print('Loading data...')\n # Load the pre-saved data using methods defined in utils.py. See\n # preprocessing branch for the meaning of the data.\n self.training_data = load_array(self.pproc_dir() + 'train-bc')\n self.validation_data = load_array(self.pproc_dir() + 'valid-bc')\n self.trn_classes = load_array(self.pproc_dir() + 'train-cl')\n self.val_classes = load_array(self.pproc_dir() + 'valid-cl')\n self.training_labels = to_categorical(self.trn_classes)\n self.validation_labels = to_categorical(self.val_classes)\n \n # To get the class indices, we create the generator. It's cheap to\n # run since it doesn't actually load all the data.\n flowgen = gen.flow_from_directory(\n self.train_dir(),\n target_size=target_size,\n shuffle=False,\n class_mode=None,\n batch_size=1) \n self.cindices = flowgen.class_indices\n print('Done loading.')", "def preprocess(self, data):\n if self.mode == 'image':\n data = self.transpose(data)\n data = self.dilate(data)\n data = self.mask(data)\n\n if self.mode == 'histogram':\n data = self.flatten(data)\n data = self.mask(data)\n\n if self.mode == 'curve':\n if isinstance(data, np.ndarray) or (isinstance(data, list) and contains_numbers(data)):\n if hasattr(self, 'objects'):\n xdata = self.main_object.get_xdata()\n else:\n xdata = range(len(data))\n\n data = [xdata, data]\n\n smoothed = self.smooth(data[1].squeeze() if data[1].ndim > 1 else data[1])\n data = [*data, smoothed]\n\n if self.mode == 'loss':\n if isinstance(data, tuple):\n loss, lr = data\n else:\n loss, lr = data, None\n\n if loss is None:\n smoothed = None\n else:\n smoothed = self.smooth(loss)\n\n data = [loss, smoothed, lr]\n\n return data", "def training_step(self, batch):\n return {}", "def train(self, ):\n raise NotImplementedError", "def train(self, data_iterator):\n \n if self.config['sequence_input']:\n if self.config['net_input_add_onehot']:\n input_data_ph = tf.placeholder(tf.uint8, shape=(self.config['batch_size'], self.config['timesteps']))\n else:\n input_data_ph = tf.placeholder(tf.float32, shape=(self.config['batch_size'], self.config['timesteps'], self.config['num_input']))\n else:\n input_data_ph = tf.placeholder(tf.float32, shape=(self.config['batch_size'], self.config['num_input']))\n \n if self.config['sequence_output']:\n if self.config['net_target_add_onehot']:\n target_ph = tf.placeholder(tf.uint8, shape=(self.config['batch_size'], self.config['timesteps']))\n else:\n target_ph = tf.placeholder(tf.float32, shape=(self.config['batch_size'], self.config['timesteps'], self.config['num_output']))\n else:\n target_ph = tf.placeholder(tf.float32, shape=(self.config['batch_size'], self.config['num_output']))\n \n training, loss_avg_t = self.setup_train(input_data_ph, target_ph)\n \n session = tf.Session()\n session.run(tf.global_variables_initializer())\n \n self.analyze_config()\n \n for epoch in range(self.config['epochs']):\n starttime = time.time()\n for step in range(self.config['epoch_steps']):\n input_data, target = next(data_iterator)\n tmp, loss_avg_value = session.run([training, loss_avg_t], {input_data_ph:input_data, target_ph:target})\n print(\"Epoch: {} Loss: {} Elapsed:{}s\".format(epoch, loss_avg_value, (time.time() - starttime)))", "def load_data_wrapper():\r\n\r\n train_data, valid_data, tst_data = load_data()\r\n ## calling the function load_data()\r\n ## will return a tuple with three values for train, validation and test data\r\n ## storing the tuple values in separate three variables\r\n\r\n ## training_data:\r\n training_inputs = [np.reshape(x, (784,1)) for x in train_data[0]]\r\n ## reshaping the training inputs to 784x1 vector\r\n ## the required format for our neural network's input layer\r\n ## ---\r\n training_results = [vectorized_result(y) for y in train_data[1]]\r\n ## calling vectorized_result() function(see below)\r\n ## will convert the digit value in 10-dimensional vector\r\n ## the required format for our neural network's output layer\r\n ## ---\r\n training_data = zip(training_inputs, training_results)\r\n ## zipping together the training_inputs and training_results\r\n\r\n ## validation_data:\r\n validation_inputs = [np.reshape(x, (784,1)) for x in valid_data[0]]\r\n ## reshaping the validation inputs to 784x1 vector\r\n ## ---\r\n validation_data = zip(validation_inputs, valid_data[1])\r\n ## zipping together the validation_inputs and it's corresponding outputs\r\n\r\n ## test_data:\r\n test_inputs = [np.reshape(x, (784,1)) for x in tst_data[0]]\r\n ## reshaping the test inputs to 784x1 vector\r\n ## ---\r\n test_data = zip(test_inputs, tst_data[1])\r\n ## zipping together the test_inputs and it's corresponding outputs\r\n\r\n return (training_data, validation_data, test_data)", "def _get_batch(self,\n X_train, \n Y_train):\n\n raw_data_length = len(X_train)\n\n # partition raw data into batches and stack them vertically in a data matrix\n batch_partition_length = raw_data_length // self.model_parameters.batch_size\n data_x = np.zeros([self.model_parameters.batch_size, \n batch_partition_length, \n self.model_parameters.input_dimension], \n dtype=np.float32)\n\n data_y = np.zeros([self.model_parameters.batch_size, \n batch_partition_length, \n self.model_parameters.n_classes], \n dtype=np.float32)\n #data_y = np.zeros([batch_size, n_classes], dtype=np.int32)\n \n for i in range(self.model_parameters.batch_size):\n data_x[i] = X_train[batch_partition_length * i:batch_partition_length * (i + 1), :]\n data_y[i] = Y_train[batch_partition_length * i:batch_partition_length * (i + 1),:]\n \n # further divide batch partitions into sequence_length for truncated backprop\n epoch_size = batch_partition_length // self.model_parameters.sequence_length\n\n for i in range(epoch_size):\n x = data_x[:, i * self.model_parameters.sequence_length:(i + 1) * self.model_parameters.sequence_length,:]\n y = data_y[:, i * self.model_parameters.sequence_length:(i + 1) * self.model_parameters.sequence_length,:]\n yield (x, y)", "def generate_training_batch(self, start_index):\n assert self.training_dataset is not None\n assert self.data_tags is not None\n return self.get_data_from_indices(self.training_dataset,\n np.arange(start_index, start_index + self.p.trainer.batch_size))", "def preprocess(self, data, scope):\n if scope != 'train':\n # reshape\n data = self._data_reshape(data)\n\n # normalize\n if data.dtype == np.int16:\n start_unit = -1000\n end_unit = 300\n data = 2 * (data.astype(np.float32) - start_unit) / (end_unit - start_unit) - 1\n\n # subtract train mean and divide by train std\n if scope == 'train':\n self.mean = np.mean(data)\n data -= self.mean\n self.std = np.std(data)\n data /= self.std\n else:\n data -= self.mean\n data /= self.std\n\n # reshape for channel\n s = data.shape\n if len(data.shape) == 4:\n data = data.reshape((s[0], s[1], s[2], s[3], 1))\n else:\n data = data.reshape((s[0], s[1], s[2], 1))\n return data", "def Train_data():\n print (\"loading train data ...\")\n time_start = time.time()\n data_root = '/media/keziwen/86AA9651AA963E1D'\n with h5py.File(join(data_root, './data/train_real2.h5')) as f:\n data_real = f['train_real'][:]\n num, nt, ny, nx = data_real.shape\n data_real = np.transpose(data_real, (0, 1, 3, 2))\n with h5py.File(join(data_root, './data/train_imag2.h5')) as f:\n data_imag = f['train_imag'][:]\n num, nt, ny, nx = data_imag.shape\n data_imag = np.transpose(data_imag, (0, 1, 3, 2))\n data = data_real+1j*data_imag\n num_train = 15000\n num_validate = 2000\n train_data = data[0:num_train]\n validate_data = data[num_train:num_train+num_validate]\n\n train_data = np.random.permutation(train_data)\n\n time_end = time.time()\n print ('dataset has been created using {}s'.format(time_end-time_start))\n return train_data, validate_data", "def preprocess(self,data):\n preprocessObj = PreprocessData()\n preprocess_data = preprocessObj.preprocess(data)\n return preprocess_data", "def build_inputs(self):\n if self.mode == \"inference\":\n # In inference mode, images and inputs are fed via placeholders.\n image_feed = tf.placeholder(dtype=tf.string, shape=[], name=\"image_feed\")\n input_feed = tf.placeholder(\n dtype=tf.int64,\n shape=[None], # batch_size\n name=\"input_feed\")\n\n # Process image and insert batch dimensions.\n images = tf.expand_dims(self.load_image(image_feed), 0)\n input_seqs = tf.expand_dims(input_feed, 1)\n\n # No target sequences or input mask in inference mode.\n target_seqs = None\n input_mask = None\n else:\n def _load_example(serialized_example):\n encoded_image, caption = input_ops.parse_example(\n serialized_example,\n image_feature=self.config.image_feature_name,\n caption_feature=self.config.caption_feature_name)\n image = self.load_image(encoded_image)\n\n # strings.split expects a batch\n input_seqs, target_seqs, input_mask = input_ops.pad_caption_to_input(\n caption)\n return image, input_seqs, target_seqs, input_mask\n\n def _load_dataset(filename):\n return tf.data.TFRecordDataset(filename, buffer_size=16 * 1024 * 1024)\n\n df = tf.data.Dataset.list_files(\n self.config.input_file_pattern, shuffle=self.mode == \"train\")\n df = df.apply(\n tf.data.experimental.parallel_interleave(\n _load_dataset, cycle_length=64, sloppy=True))\n\n if self.mode == \"train\":\n df = df.repeat()\n df = df.shuffle(1024)\n\n df = df.apply(\n tf.data.experimental.map_and_batch(\n _load_example,\n self.config.batch_size,\n num_parallel_batches=8,\n drop_remainder=True))\n df = df.prefetch(8)\n images, input_seqs, target_seqs, input_mask = df.make_one_shot_iterator(\n ).get_next()\n\n self.images = images\n self.input_seqs = input_seqs\n self.target_seqs = target_seqs\n self.input_mask = input_mask", "def train(self):\n\t\traise NotImplementedError", "def preprocess_model_inputs(self, data_batch):\n\n return data_batch, np.array([])", "def _train_batch(self, review_fwd, review_bwd, summary):\n # feed in the data for forward model\n feed_dict_fwd = {self.enc_inp_fwd[t]: review_fwd[t] for t in range(self.seq_length)}\n feed_dict_fwd.update({self.labels[t]: summary[t] for t in range(self.seq_length)})\n\n # feed in the data for the backward model\n feed_dict_bwd = {self.enc_inp_bwd[t]: review_bwd[t] for t in range(self.seq_length)}\n feed_dict_bwd.update({self.labels[t]: summary[t] for t in range(self.seq_length)})\n\n # train forward model\n print 'Forward Batch Training.......'\n _, loss_t_forward = self.sess.run([self.train_op_fwd, self.loss_fwd], feed_dict_fwd)\n\n # train backward model\n print 'Backward Batch Training.......'\n _, loss_t_backward = self.sess.run([self.train_op_bwd, self.loss_bwd], feed_dict_bwd)\n\n return loss_t_forward, loss_t_backward", "def train_data(self):\n return self._train_data", "def prepare_train_validation(self) -> Tuple:\n Xt, Xv, Yt, Yv = self.dataset.train_test_split_representations()\n\n Xt = self.dataset.prepare_input_samples(Xt)\n Yt = self.dataset.prepare_output_samples(Yt)\n traindataset = tf.data.Dataset.from_tensor_slices((Xt, Yt))\n traindataset = traindataset.batch(\n self.batch_size,\n num_parallel_calls=tf.data.experimental.AUTOTUNE\n )\n\n Xv = self.dataset.prepare_input_samples(Xv)\n Yv = self.dataset.prepare_output_samples(Yv)\n validdataset = tf.data.Dataset.from_tensor_slices((Xv, Yv))\n validdataset = validdataset.batch(\n self.batch_size,\n num_parallel_calls=tf.data.experimental.AUTOTUNE\n )\n\n return traindataset, validdataset", "def preprocess(self, data, attr):\n # If num_workers > 0, use new RNG with unique seed for each thread.\n # Else, use default RNG.\n if torch.utils.data.get_worker_info():\n seedseq = np.random.SeedSequence(\n torch.utils.data.get_worker_info().seed +\n torch.utils.data.get_worker_info().id)\n rng = np.random.default_rng(seedseq.spawn(1)[0])\n else:\n rng = self.rng\n\n points = np.array(data['point'], dtype=np.float32)\n\n if 'label' not in data or data['label'] is None:\n labels = np.zeros((points.shape[0],), dtype=np.int32)\n else:\n labels = np.array(data['label'], dtype=np.int32).reshape((-1,))\n\n if 'feat' not in data or data['feat'] is None:\n feat = points.copy()\n else:\n feat = np.array(data['feat'], dtype=np.float32)\n\n if attr['split'] in ['training', 'train']:\n points, feat, labels = self.augmenter.augment(\n points, feat, labels, self.cfg.get('augment', None))\n\n points -= np.min(points, 0)\n\n feat = feat / 255.0 # Normalize to [0, 1]\n\n max_points_x = np.max(points[:, 0])\n max_points_y = np.max(points[:, 1])\n max_points_z = np.max(points[:, 2])\n\n x, y, z = np.split(points, (1, 2), axis=-1)\n norm_x = x / max_points_x\n norm_y = y / max_points_y\n norm_z = z / max_points_z\n\n feat = np.concatenate([x, y, z, feat, norm_x, norm_y, norm_z], axis=-1)\n\n choices = rng.choice(points.shape[0],\n self.cfg.num_points,\n replace=(points.shape[0] < self.cfg.num_points))\n points = points[choices].transpose()\n feat = feat[choices].transpose()\n labels = labels[choices]\n\n data = {}\n data['point'] = points\n data['feat'] = feat\n data['label'] = labels\n\n return data", "def get_data(dataset, max_train_size=None, max_test_size=None, do_preprocess=True, train_start=0,\n test_start=0, prefix=\"processed\", x_dims=None):\n if max_train_size is None:\n train_end = None\n else:\n train_end = train_start + max_train_size\n if max_test_size is None:\n test_end = None\n else:\n test_end = test_start + max_test_size\n print('load data of:', dataset)\n print(\"train: \", train_start, train_end)\n print(\"test: \", test_start, test_end)\n if x_dims is None:\n x_dim = get_data_dim(dataset)\n else:\n x_dim = x_dims\n f = open(os.path.join(prefix, dataset + '_train.pkl'), \"rb\")\n train_data = pickle.load(f).reshape((-1, x_dim))[train_start:train_end, :]\n f.close()\n try:\n f = open(os.path.join(prefix, dataset + '_test.pkl'), \"rb\")\n test_data = pickle.load(f).reshape((-1, x_dim))[test_start:test_end, :]\n f.close()\n except (KeyError, FileNotFoundError):\n test_data = None\n try:\n f = open(os.path.join(prefix, dataset + \"_test_label.pkl\"), \"rb\")\n test_label = pickle.load(f).reshape((-1))[test_start:test_end]\n f.close()\n except (KeyError, FileNotFoundError):\n test_label = None\n if do_preprocess:\n train_data, test_data = preprocess(train_data, test_data)\n print(\"train set shape: \", train_data.shape)\n print(\"test set shape: \", test_data.shape)\n if test_label is not None:\n print(\"test label shape: \", test_label.shape)\n print()\n return (train_data, None), (test_data, test_label)", "def preprocess(data):\n raise NotImplementedError", "def _load_data(self):\n raw_data = self._load(\n tf.gfile.Open(self._get_full_pickle_path(self._dataset_split), \"rb\"))\n if self._dataset_split == MetaSplit.TRAIN and self._config[\"train_on_val\"]:\n valid_data = self._load(\n tf.gfile.Open(self._get_full_pickle_path(MetaSplit.VALID), \"rb\"))\n for key in valid_data:\n if self._verbose:\n tf.logging.info(str([key, raw_data[key].shape]))\n raw_data[key] = np.concatenate([raw_data[key],\n valid_data[key]], axis=0)\n if self._verbose:\n tf.logging.info(str([key, raw_data[key].shape]))\n\n if self._verbose:\n tf.logging.info(\n str([(k, np.shape(v)) for k, v in six.iteritems(raw_data)]))\n\n return raw_data", "def train(self):\n self.dataGenerator.printDataStatistics()\n sE = len(self.dataGenerator.ids[\"train\"])// 32\n sV = len(self.dataGenerator.ids[\"validation\"])// 32\n self.model.fit_generator(\n generator=self.dataGenerator.trainingGenerator,\n steps_per_epoch= sE,\n epochs=2,\n validation_data=self.dataGenerator.validationGenerator,\n validation_steps=sV,\n # use_multiprocessing=True,\n # workers=2,\n )", "def get_input_fn_training(Xtrain_ul, Xtrain_l, Xtest, ytrain_ul, ytrain_l, ytest, batch_size, num_labeled):\n dataset = input_data.Data(Xtrain_ul,\n Xtrain_l,\n Xtest,\n ytrain_ul,\n ytrain_l,\n ytest,\n num_labeled, \n batch_size, \n shuffle=True)\n return dataset.next_batch()", "def _preprocess_data(\n self, data_batch: Dict[str, Any]) -> Tuple[tf.Tensor, tf.Tensor]:\n # Get input data, with shape [batch_size, dim_h, dim_w, dim_c].\n input_data = data_batch['sdf_map']\n\n gt_data = input_data\n\n return input_data, gt_data", "def trainGenerator(self,):\n return tf.data.Dataset.from_generator(self.trainData, \\\n output_types=(tf.float32, tf.float32, tf.float32), \\\n output_shapes=(tf.TensorShape(self.config_model[\"input_shape\"]), \\\n tf.TensorShape(list(self.headoutput_shape[1:4]) + \\\n [len(self.anchor_boxes), \\\n 7+len(self.config_data[\"all_classes\"])]), \\\n tf.TensorShape([self.config_data[\"max_boxes_per_frame\"], 7]) \\\n ), )", "def get_batch(batch_data, config):\n N = len(batch_data['obs_traj_rel'])\n P = config.P\n OF = config.flow_size\n T_in = config.obs_len\n T_pred = config.pred_len\n\n returned_inputs = []\n traj_obs_gt = np.zeros([N, T_in, P], dtype='float32')\n traj_pred_gt = np.zeros([N, T_pred, P], dtype='float32')\n # --- xy input\n for i, (obs_data, pred_data) in enumerate(zip(batch_data['obs_traj_rel'],\n batch_data['pred_traj_rel'])):\n for j, xy in enumerate(obs_data):\n traj_obs_gt[i, j, :] = xy\n for j, xy in enumerate(pred_data):\n traj_pred_gt[i, j, :] = xy\n returned_inputs.append(traj_obs_gt)\n # ------------------------------------------------------\n # Social component (through optical flow)\n if config.add_social:\n obs_flow = np.zeros((N, T_in, OF),dtype ='float32')\n # each batch\n for i, flow_seq in enumerate(batch_data['obs_optical_flow']):\n for j , flow_step in enumerate(flow_seq):\n obs_flow[i,j,:] = flow_step\n returned_inputs.append(obs_flow)\n # -----------------------------------------------------------\n # Person pose input\n if config.add_kp:\n obs_kp = np.zeros((N, T_in, KP, 2), dtype='float32')\n # each bacth\n for i, obs_kp_rel in enumerate(batch_data['obs_kp_rel']):\n for j, obs_kp_step in enumerate(obs_kp_rel):\n obs_kp[i, j, :, :] = obs_kp_step\n return returned_inputs,traj_pred_gt", "def _load_data(self):\n pickle_in = open(\"X_train.pickle\", \"rb\")\n self.X = pickle.load(pickle_in)\n pickle_in = open(\"y_train.pickle\", \"rb\")\n self.Y = pickle.load(pickle_in)\n\n pickle_in = open(\"X_test.pickle\", \"rb\")\n self.X_final = pickle.load(pickle_in)\n pickle_in = open(\"y_test.pickle\", \"rb\")\n self.Y_final = pickle.load(pickle_in)\n\n # Set input shape:\n if K.image_data_format() == 'channels_first':\n self.input_shape = (3, self.img_rows, self.img_cols)\n else:\n self.input_shape = (self.img_rows, self.img_cols, 3)\n\n self.X = self.X.astype('float32')\n self.X /= 255\n self.X_final = self.X_final.astype('float32')\n self.X_final /= 255\n print('X shape:', self.X.shape)\n print(self.X.shape[0], 'Samples')\n\n num_datapoints = 3000\n self.X = self.X[0:num_datapoints]\n self.Y = self.Y[0:num_datapoints]\n\n num_datapoints = 2000\n self.X_final = self.X_final[0:num_datapoints]\n self.Y_final = self.Y_final[0:num_datapoints]\n\n self.Y_final = to_categorical(self.Y_final, self.num_classes)\n\n # Initialize Data\n kfold = StratifiedKFold(n_splits=self.nFolds, shuffle=True)\n\n if self.b_eval_advanced:\n # Loop through the indices the split() method returns\n for index, (train_indices, test_indices) in enumerate(kfold.split(self.X, self.Y)):\n if index == 0:\n self.Y = to_categorical(self.Y, self.num_classes)\n\n # Generate batches from indices\n xtrain, xtest = self.X[train_indices], self.X[test_indices]\n ytrain, ytest = self.Y[train_indices], self.Y[test_indices]\n\n self.data.append(tuple([xtrain, xtest, ytrain, ytest]))\n\n if not self.b_eval_advanced:\n self.Y = to_categorical(self.Y, self.num_classes)\n\n #print(np.asarray(self.data).shape)\n #print(self.data)\n print(\"Y_final Shape\", self.Y_final.shape)", "def data(self):\n (x_train, y_train), (_, _) = datasets.fashion_mnist.load_data()\n x_train = x_train.reshape((-1, 28, 28, 1))\n x_train, y_train = x_train.astype('float16') / 255.0, \\\n tf.keras.utils.to_categorical(y_train.astype('float16'), 10)\n (x_train, x_eval) = x_train[5000:], x_train[:5000]\n (y_train, y_eval) = y_train[5000:], y_train[:5000]\n train_data, eval_data = (x_train, y_train), (x_eval, y_eval)\n return train_data, eval_data", "def _get_train_feed_dict(self, batch):\n raise NotImplementedError", "def get_data():\r\n if not path_validation(MODEL_PATH, read_access=True):\r\n exit(0) \r\n if not path_validation(TEST_DATA_PATH, read_access=True):\r\n exit(0) \r\n if not path_validation(TEST_LABEL_PATH, read_access=True):\r\n exit(0) \r\n\r\n params = joblib.load(MODEL_PATH)\r\n test_images = np.load(TEST_DATA_PATH)\r\n test_labels = np.load(TEST_LABEL_PATH)\r\n\r\n # Addition of bias in test set\r\n test_images = np.insert(test_images, 0, 1, axis=1)\r\n\r\n return params, test_images, test_labels", "def _keras_update_shape(self, prep):\n\n # Run preprocessing on the training data\n X_transform = prep.fit_transform(self.X_train)\n\n # If the input shape has not been specified, it is simply the number of features in X_transform\n if 'input_shape' not in self.model.first_layer_kwargs:\n self.model.first_layer_kwargs['input_shape'] = tuple([X_transform.shape[1]])\n # Else update the input shape based on the number of features after preprocessing\n else:\n # Transform to a list to make the input_shape mutable\n self.model.first_layer_kwargs['input_shape'] = list(self.model.first_layer_kwargs['input_shape'])\n # Update the number of features based on X_transform\n if self.model.lags:\n self.model.first_layer_kwargs['input_shape'][-1] = X_transform.shape[1]//(self.model.lags + (1 if self.model.current_sample_as_input else 0))\n else:\n self.model.first_layer_kwargs['input_shape'][-1] = X_transform.shape[1]//np.prod(self.model.first_layer_kwargs['input_shape'][:-1])\n # Transform back to a tuple as required by Keras\n self.model.first_layer_kwargs['input_shape'] = tuple(self.model.first_layer_kwargs['input_shape'])\n \n # Ensure the Architecture has been updated\n self.model.architecture.iloc[0, 2]['input_shape'] = self.model.first_layer_kwargs['input_shape']\n \n # 2D, 3D and 4D data is valid. \n # e.g. The input_shape can be a tuple of (subsequences, timesteps, features), with subsequences and timesteps as optional.\n # A 4D shape may be valid for e.g. a ConvLSTM with (timesteps, rows, columns, features) \n if len(self.model.first_layer_kwargs['input_shape']) > 5:\n err = \"Unsupported input_shape: {}\".format(self.model.first_layer_kwargs['input_shape'])\n raise Exception(err)", "def train_step(self):\r\n batch_images = next(self.data_loader.next_batch())\r\n _, loss, summary, ea = self.sess.run([self.model.train_op, self.model.total_loss, self.model.merged, self.model.euclidean_a_p],\r\n feed_dict={self.model.input: batch_images, self.model.is_training: True})\r\n \r\n return loss, summary", "def get_batch(self, batch_shape, use='train', val_set=None, cell_type=None, verbose=True):\n self.batch_size = batch_shape[0]\n self.data = np.zeros(batch_shape)\n self.labels = np.zeros([self.batch_size, 2] if self.use_softmax else self.batch_size)\n\n files = self.get_data_files(use=use, val_set=val_set, cell_type=cell_type)\n\n random_file_idxs = np.arange(len(files))\n np.random.shuffle(random_file_idxs)\n\n i = 0\n # num_negatives = 0\n progress = 0\n for count, idx in enumerate(random_file_idxs):\n if verbose and float(count)/len(random_file_idxs) >= progress + 0.05:\n progress += 0.05\n print str(int(round(progress * 100))) + \"%\",\n sys.stdout.flush()\n if abs(progress - 0.95) <= 0.01:\n print \"\"\n f = files[idx]\n d, l = self.load_point_cloud(f)\n d = self.format_point_cloud(d, batch_shape[1])\n self.data[i] = d\n self.labels[i] = l\n\n i += 1\n if i >= self.batch_size:\n # Augment batched point clouds by rotation and jittering\n # if use == 'train':\n # self.data = PointNetDataHandler.rotate_point_cloud(self.data)\n # self.data = PointNetDataHandler.jitter_point_cloud(self.data)\n # Yield batch\n yield self.data, self.labels\n i = 0\n # num_negatives = 0", "def getData(trainSize):\r\n return splitData([getReal(), getFake()], trainSize=trainSize)", "def preprocess(data_path, glove_path, embed_size):\n train_data = read_imdb(data_path, 'train')\n test_data = read_imdb(data_path, 'test')\n\n train_tokenized = []\n test_tokenized = []\n for review, _ in train_data:\n train_tokenized.append(tokenizer(review))\n for review, _ in test_data:\n test_tokenized.append(tokenizer(review))\n\n vocab = set(chain(*train_tokenized))\n vocab_size = len(vocab)\n print(\"vocab_size: \", vocab_size)\n\n word_to_idx = {word: i + 1 for i, word in enumerate(vocab)}\n word_to_idx['<unk>'] = 0\n\n train_features = np.array(pad_samples(encode_samples(train_tokenized, word_to_idx))).astype(np.int32)\n train_labels = np.array([score for _, score in train_data]).astype(np.int32)\n test_features = np.array(pad_samples(encode_samples(test_tokenized, word_to_idx))).astype(np.int32)\n test_labels = np.array([score for _, score in test_data]).astype(np.int32)\n\n weight_np = collect_weight(glove_path, vocab, word_to_idx, embed_size)\n return train_features, train_labels, test_features, test_labels, weight_np, vocab_size", "def train(self):\n raise NotImplementedError()", "def _train(self):\n\n batch = random.sample(self.D, min(self.batch_size, len(self.D)))\n no_state = np.zeros(self.stateCnt)\n\n states = [ o[0] for o in batch]\n states_ = [ (no_state if o[3] is None else o[3]) for o in batch ]\n\n p = []\n p_ = []\n for ii in range(len(batch)):\n p.append(self._predict(states[ii][:,:,:]))\n p_.append(self._predict(states_[ii][:,:,:]))\n\n batchLen = len(batch)\n\n x = np.zeros((batchLen, 84, 84, 1))\n y =np.zeros((batchLen, 11,11,6))\n\n for i in range(batchLen):\n o = batch[i]\n s = o[0]; a = o[1]; r = o[2]; s_ = o[3]\n\n t = p[i][0,:,:,:]\n if s_ is None:\n t[a] = r\n else:\n t[a] = r + self.gamma* np.amax(p_[i])\n x[i] = s\n y[i] = t\n\n self.model.fit(x,y,nb_epoch=1,verbose=0)", "def training_data():\n global _MEAN # pylint: disable=global-statement\n _np.random.seed(1)\n view = _skdc10.view.OfficialImageClassificationTask()\n permutation = _np.random.permutation(range(50000))\n if _MEAN is None:\n _MEAN = view.train.x.reshape((50000 * 32 * 32, 3)).mean(axis=0)\n return ((view.train.x[:50000, :][permutation, :] - _MEAN).\n transpose((0, 3, 1, 2)).astype('float32'),\n view.train.y[:50000][permutation].reshape((50000, 1)).astype('float32'))" ]
[ "0.6737078", "0.6703798", "0.6572939", "0.6394934", "0.6370723", "0.6355788", "0.63249135", "0.63132846", "0.62849104", "0.62823945", "0.6275704", "0.6259968", "0.6241224", "0.62354356", "0.6223649", "0.6207623", "0.616901", "0.6163054", "0.6130993", "0.61243504", "0.611347", "0.61085564", "0.6104256", "0.61030227", "0.61011994", "0.6087741", "0.6084944", "0.6080672", "0.60532576", "0.60508573", "0.6046444", "0.6039822", "0.6038137", "0.60283196", "0.6001797", "0.59986126", "0.5993287", "0.59895897", "0.5987061", "0.5972019", "0.59674525", "0.595568", "0.59476256", "0.59465736", "0.5945168", "0.5942271", "0.5940712", "0.5931433", "0.59248", "0.59213424", "0.5918461", "0.5914946", "0.59083533", "0.5903198", "0.590095", "0.58898616", "0.58878887", "0.58863777", "0.5884351", "0.58806574", "0.58785534", "0.58726025", "0.58686405", "0.5857125", "0.58499503", "0.58453196", "0.5839909", "0.5835183", "0.58337593", "0.58294773", "0.5829296", "0.58286744", "0.58224446", "0.5821294", "0.5819739", "0.5818863", "0.58128124", "0.58051276", "0.5802359", "0.578622", "0.57786465", "0.57660806", "0.5758518", "0.5754702", "0.57451373", "0.57428074", "0.5740012", "0.57337695", "0.57290834", "0.572478", "0.5722793", "0.5722493", "0.5707358", "0.56956476", "0.5691025", "0.5690974", "0.5688135", "0.56880635", "0.5685651", "0.56847346" ]
0.611388
20
Gets the next n data and label points from the training dataset, where n = batch_size length is the expected length of the inputs
def next_training_data_batch(self, batch_size, expected_length): if self._training_data is None: self._load_training_data() if expected_length in self._training_data: actual_length = expected_length else: differences = np.abs(self._available_training_lengths - expected_length) mininimum_loc = np.argmin(differences) actual_length = self._available_training_lengths[mininimum_loc] all_data, all_labels = self._training_data[actual_length] if batch_size > len(all_data): print("Probably shouldn't do this; your batch size is greater than the size of the dataset") data = None labels = None while batch_size > 0: if len(all_data) - self.current_index[actual_length] < batch_size: # print("A" + str(self.current_index)) batch_size -= (len(all_data) - self.current_index[actual_length]) if self.current_index[actual_length] != len(all_data): if data is None: data = np.array(all_data[self.current_index[actual_length]:]) labels = np.array(all_labels[self.current_index[actual_length]:]) else: data = np.concatenate((data, all_data[self.current_index[actual_length]:]), axis=0) labels = np.concatenate((labels, all_labels[self.current_index[actual_length]:]), axis=0) self.current_index[actual_length] = 0 else: # print("B" + str(self.current_index)) if data is None: data = all_data[self.current_index[actual_length]:self.current_index[actual_length] + batch_size] labels = np.array(all_labels[self.current_index[actual_length]:self.current_index[actual_length] + batch_size]) else: data = np.concatenate((data, all_data[self.current_index[actual_length]:self.current_index[actual_length] + batch_size]), axis=0) labels = np.concatenate((labels, all_labels[self.current_index[actual_length]:self.current_index[actual_length] + batch_size]), axis=0) self.current_index[actual_length] += batch_size batch_size = 0 data = np.array(data) data = np.swapaxes(data, 0, 1) return (actual_length, (data, labels))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def next_batch(data, labels, batch_size):\n global _index_in_epoch\n start = _index_in_epoch\n _index_in_epoch += batch_size\n _num_examples = len(data)\n\n if _index_in_epoch > _num_examples:\n # Shuffle the data\n perm = np.arange(_num_examples)\n np.random.shuffle(perm)\n data = data[perm]\n labels = labels[perm]\n # Start next epoch\n start = 0\n _index_in_epoch = batch_size\n assert batch_size <= _num_examples\n\n end = _index_in_epoch\n return data[start:end], labels[start:end]", "def next(self, batch_size=np.inf):\n if self.batch_id == len(self.data):\n self.batch_id = 0\n # shuffle the data each pass over it.\n rng_state = np.random.get_state()\n np.random.shuffle(self.data)\n np.random.set_state(rng_state)\n np.random.shuffle(self.labels)\n \n end_idx = min(self.batch_id + batch_size, len(self.data))\n batch_data = (self.data[self.batch_id:end_idx])\n batch_labels = self.labels[self.batch_id:end_idx]\n batch_seqlen = (self.seqlen[self.batch_id:end_idx])\n self.batch_id = end_idx\n return batch_data, batch_labels, batch_seqlen", "def next(self, batch_size):\n if self.batch_id == len(self.data):\n self.batch_id = 0\n batch_data = (self.data[self.batch_id:min(self.batch_id + batch_size, len(self.data))])\n batch_labels = (self.labels[self.batch_id:min(self.batch_id + batch_size, len(self.data))])\n batch_seqlen = (self.seqlen[self.batch_id:min(self.batch_id + batch_size, len(self.data))])\n self.batch_id = min(self.batch_id + batch_size, len(self.data))\n\n\n return batch_data, batch_labels, batch_seqlen", "def next(self, batch_size):\n if self.batch_id == len(self.data):\n self.batch_id = 0\n batch_data = (self.data[self.batch_id:min(self.batch_id + batch_size, len(self.data))])\n batch_labels = (self.labels[self.batch_id:min(self.batch_id + batch_size, len(self.data))])\n batch_seqlen = (self.seqlen[self.batch_id:min(self.batch_id + batch_size, len(self.data))])\n self.batch_id = min(self.batch_id + batch_size, len(self.data))\n\n\n return batch_data, batch_labels, batch_seqlen", "def next_batch(self, batch_size):\n start = self._index_in_epoch\n end = min(start + batch_size, self._num_examples)\n batch_data = self._data[start:end]\n if self._label_used:\n batch_labels = self._labels[start:end]\n\n if end == self._num_examples:\n self._epochs_completed += 1\n self._index_in_epoch = 0\n if self._shuffled:\n perm = np.arange(self._num_examples)\n random.shuffle(perm)\n self._data = self._data[perm]\n if self._label_used:\n self._labels = self._labels[perm]\n else:\n self._index_in_epoch = end\n\n if self._label_used:\n return batch_data,batch_labels\n else:\n return batch_data", "def next_batch(self, batch_size):\n start = self._index_in_epoch\n self._index_in_epoch += batch_size\n if self._index_in_epoch > self._num_examples:\n self._epochs_completed += 1\n start = 0\n self._index_in_epoch = batch_size\n end = self._index_in_epoch\n return self._samples[start:end], self._labels[start:end]", "def next_batch(index,feature,label,batch_size):\n epochs_completed = 0\n examples = feature.shape[0]\n start = index*batch_size\n index_in_epoch =index*batch_size+batch_size-1\n if index_in_epoch > examples:\n # Finished epoch\n epochs_completed += 1\n # Shuffle the data\n perm = np.arange(examples)\n np.random.shuffle(perm)\n feature = feature[perm]\n label = label[perm]\n # Start next epoch\n start = 0\n index_in_epoch = batch_size\n assert batch_size <= examples\n end = index_in_epoch\n return feature[start:end], label[start:end]", "def generate_next_batch(self, data): \n \n batch_words = np.array(data[self.batch_lookup[self.batch_index]][0])\n batch_labels = np.array(data[self.batch_lookup[self.batch_index]][1])\n self.batch_index += 1\n if self.batch_index == len(data) - 1:\n self.epoch += 1\n return batch_words, batch_labels", "def next_batch(self, batch_size: int) -> Tuple[numpy.ndarray, numpy.ndarray]:\n assert batch_size <= self._num_examples\n start = self._index_in_epoch\n self._index_in_epoch += batch_size\n if self._index_in_epoch > self._num_examples:\n start = self._start_new_epoch(batch_size, start)\n end = self._index_in_epoch\n return self._input[start:end], self._labels[start:end]", "def next_batch(self,batch_size):\r\n end_indicator = self._indicator + batch_size\r\n if end_indicator > self._num_examples:\r\n if self._need_shuffle:\r\n self._shuffle_data()\r\n self._indicator = 0\r\n end_indicator = batch_size\r\n else:\r\n raise Exception(\"have no more examples.\")\r\n if end_indicator > self._num_examples:\r\n raise Exception(\"too lager batch size than examples.\")\r\n batch_data = self._data[self._indicator: end_indicator]\r\n batch_label = self._label[self._indicator: end_indicator]\r\n self._indicator = end_indicator\r\n return batch_data, batch_label", "def next_batch(self,batch_size):\r\n end_indicator = self._indicator + batch_size\r\n if end_indicator > self._num_examples:\r\n if self._need_shuffle:\r\n self._shuffle_data()\r\n end_indicator = batch_size\r\n else:\r\n raise Exception(\"have no more examples.\")\r\n\r\n if end_indicator > self._num_examples:\r\n raise Exception(\"batch size is larger than all examples.\")\r\n batch_data = self._data[self._indicator: end_indicator]\r\n batch_labels = self._labels[self._indicator: end_indicator]\r\n self._indicator = end_indicator\r\n return batch_data,batch_labels", "def next(self, batch_size):\n if self.batch_id == len(self.data):\n self.batch_id = 0\n self.data, self.labels, self.seqlen = shuffle(self.data, self.labels, self.seqlen)\n batch_data = (self.data[self.batch_id:min(self.batch_id +\n batch_size, len(self.data))])\n batch_labels = (self.labels[self.batch_id:min(self.batch_id +\n batch_size, len(self.data))])\n batch_seqlen = (self.seqlen[self.batch_id:min(self.batch_id +\n batch_size, len(self.data))])\n self.batch_id = min(self.batch_id + batch_size, len(self.data))\n return batch_data, batch_labels, batch_seqlen", "def next_batch(self, batch_size):\n start = self._index_in_epoch\n self._index_in_epoch += batch_size\n if self._index_in_epoch > self._num_examples:\n # Finished epoch\n self._epochs_completed += 1\n\n # Shuffle the data\n perm = np.arange(self._num_examples)\n np.random.shuffle(perm)\n self._features = self._features[perm]\n self._targets = self._targets[perm]\n\n # start next epoch\n start = 0\n self._index_in_epoch = batch_size\n assert batch_size <= self._num_examples\n\n end = self._index_in_epoch\n return self._features[start:end], self._targets[start:end]", "def next_batch(self):\n if self.ptr + self.batch_size >= self.size:\n head = 0\n tail = self.batch_size\n self.ptr = self.batch_size\n else:\n head = self.ptr\n tail = self.ptr + self.batch_size\n self.ptr += self.batch_size\n return self.train_x[head:tail, 0:self.fig_w**2], self.train_y[head:tail, 0:10]", "def next_batch(self, batch_size, fake_data=False):\r\n if fake_data:\r\n fake_image = [1.0 for _ in range(784)]\r\n fake_label = 0\r\n return [fake_image for _ in range(batch_size)], [fake_label for _ in range(batch_size)]\r\n start = self._index_in_epoch\r\n self._index_in_epoch += batch_size\r\n #print (0)\r\n #print(self._index_in_epoch,self._num_examples)\r\n #若当前训练读取的index>总体的images数时,则读取读取开始的batch_size大小的数据\r\n if self._index_in_epoch > self._num_examples:\r\n #print (0)\r\n # Finished epoch\r\n self._epochs_completed += 1\r\n # Shuffle the data\r\n perm = numpy.arange(self._num_examples)\r\n numpy.random.shuffle(perm)\r\n self._images = self._images[perm]\r\n self._labels = self._labels[perm]\r\n # Start next epoch\r\n start = 0\r\n self._index_in_epoch = batch_size\r\n assert batch_size <= self._num_examples\r\n end = self._index_in_epoch\r\n #print (\"start is:%d,end is:%d\"%(start,end))\r\n return self._images[start:end], self._labels[start:end]", "def train_next_batch(self, batch_size=None):", "def next_batch(self, batch_size, fake_data=False):\n start = self._index_in_epoch\n self._index_in_epoch += batch_size\n if self._index_in_epoch > self._num_examples:\n # Finished epoch\n self._epochs_completed += 1\n # Shuffle the data\n perm = numpy.arange(self._num_examples)\n numpy.random.shuffle(perm)\n self._texts = self._texts[perm]\n self._topologys = self._topologys[perm]\n self._urls = self._urls[perm]\n self._demos = self._demos[perm]\n self._labels = self._labels[perm]\n # Start next epoch\n start = 0\n self._index_in_epoch = batch_size\n assert batch_size <= self._num_examples\n end = self._index_in_epoch\n return self._texts[start:end], self._topologys[start:end], self._urls[start:end], self._demos[start:end], self._labels[start:end]", "def next_batch(self, batch_size):\r\n start = self._index_in_epoch\r\n self._index_in_epoch += batch_size\r\n\r\n if self._index_in_epoch > self._num_examples:\r\n # After each epoch we update this\r\n self._epochs_done += 1\r\n start = 0\r\n self._index_in_epoch = batch_size\r\n #print(\"numexamples \",self._num_examples)\r\n assert batch_size <= self._num_examples\r\n end = self._index_in_epoch\r\n\r\n return self._images[start:end], self._labels[start:end], self._img_names[start:end], self._cls[start:end]", "def next_batch(self, batch_size):\n start = self._index_in_epoch\n self._index_in_epoch += batch_size\n if self._index_in_epoch > self._number_examples:\n # finished epoch\n self._epochs_completed += 1\n # Shuffle the data \n if self._shuffle:\n new_index = np.random.permutation(self._number_examples)\n self._X = self._X[new_index]\n self._y = self._y[new_index]\n start = 0\n self._index_in_epoch = batch_size\n assert batch_size <= self._number_examples\n end = self._index_in_epoch\n return self._X[start:end], self._y[start:end]", "def next_batch(self, batch_size):\n start = self._index_in_epoch\n self._index_in_epoch += batch_size\n if self._index_in_epoch > self._number_examples:\n # finished epoch\n self._epochs_completed += 1\n # Shuffle the data \n if self._shuffle:\n new_index = np.random.permutation(self._number_examples)\n self._X = self._X[new_index]\n self._y = self._y[new_index]\n start = 0\n self._index_in_epoch = batch_size\n assert batch_size <= self._number_examples\n end = self._index_in_epoch\n return self._X[start:end], self._y[start:end]", "def next_batch(self,batch_size):\r\n start=self._index_in_epoch\r\n self._index_in_epoch=+batch_size\r\n\r\n if self._index_in_epoch > self._number_examples:\r\n # finished epoch\r\n self._epochs_completed += 1\r\n # Shuffle the data\r\n if self._shuffle:\r\n new_index = np.random.permutation(self._number_examples)\r\n self._X = self._X[new_index]\r\n self._y = self._y[new_index]\r\n start = 0\r\n self._index_in_epoch = batch_size\r\n assert batch_size <= self._number_examples\r\n end = self._index_in_epoch\r\n return self._X[start:end], self._y[start:end]", "def next_batch(self, batch_size):\n # If the caller wants all of the data simply return the whole data set as a triple\n if batch_size is None:\n self.__num_epochs += 1\n return (self.__x, self.__y1, self.__y2)\n\n if batch_size > self.__data_size:\n print(\"Please specify a batch size less than the number of entries in the data set\")\n sys.exit(2)\n\n if batch_size + self.__batch_cursor < self.__data_size:\n # If the batch size is less than the number of entries left in the data:\n # Take the next batch size number of elements and move the cursor forwards.\n x_batch = self.__x[self.__batch_cursor:batch_size + self.__batch_cursor]\n y1_batch = self.__y1[self.__batch_cursor:batch_size + self.__batch_cursor]\n y2_batch = self.__y2[self.__batch_cursor:batch_size + self.__batch_cursor]\n self.__batch_cursor = self.__batch_cursor + batch_size\n else:\n # If there is not enough data left then take the remaining data from the end and start again at the begining.\n x_batch = self.__x[self.__batch_cursor:]\n y1_batch = self.__y1[self.__batch_cursor:]\n y2_batch = self.__y2[self.__batch_cursor:]\n number_still_required = batch_size - (self.__data_size - self.__batch_cursor)\n x_batch = np.concatenate((x_batch, self.__x[0:number_still_required]))\n y1_batch = np.concatenate((y1_batch, self.__y1[0:number_still_required]))\n y2_batch = np.concatenate((y2_batch, self.__y2[0:number_still_required]))\n self.__batch_cursor = number_still_required\n self.__num_epochs += 1\n\n return (x_batch, y1_batch, y2_batch)", "def next_batch(self, batch_size):\n \n start = self._index_in_epoch\n self._index_in_epoch += batch_size\n if self._index_in_epoch > self._num_examples:\n # Finished epoch\n self._epochs_completed += 1\n # Shuffle the data\n perm = np.arange(self._num_examples)\n np.random.shuffle(perm)\n self._images = self._images[perm]\n self._labels = self._labels[perm]\n # Start next epoch\n start = 0\n self._index_in_epoch = batch_size\n assert batch_size <= self._num_examples\n end = self._index_in_epoch\n return self._images[start: end], self._labels[start: end]", "def next_batch(self, batchSize, use_labels=False):\n start = self._index_in_epochs\n self._index_in_epochs += batchSize\n\n if self._index_in_epochs >= self.xtrain.shape[0]:\n self._epochs_completed += 1\n perm = np.arange(self.xtrain.shape[0])\n np.random.shuffle(perm)\n self.xtrain = self.xtrain[perm, :]\n self.ytrain = self.ytrain[perm]\n start = 0\n self._index_in_epochs = batchSize\n\n end = self._index_in_epochs\n if use_labels:\n return self.xtrain[start:end, :], self.ytrain[start:end]\n else:\n return self.xtrain[start:end, :]", "def next_batch(self, batch_size):\n start = self._index_in_epoch\n self._index_in_epoch += batch_size\n if self._index_in_epoch > self._num_examples:\n # Finished epoch\n self._epochs_completed += 1\n # Shuffle the data\n perm = np.arange(self._num_examples)\n np.random.shuffle(perm)\n self._images = self._images[perm]\n self._labels = self._labels[perm]\n # Start next epoch\n start = 0\n self._index_in_epoch = batch_size\n assert batch_size <= self._num_examples\n end = self._index_in_epoch\n return self._images[start:end], self._labels[start:end]", "def next_batch(self, batch_size):\n start = self._index_in_epoch\n self._index_in_epoch += batch_size\n if self._index_in_epoch > self._num_examples:\n # Finished epoch\n self._epochs_completed += 1\n # Shuffle the data\n perm = np.arange(self._num_examples)\n np.random.shuffle(perm)\n self._images = self._images[perm]\n self._labels = self._labels[perm]\n # Start next epoch\n start = 0\n self._index_in_epoch = batch_size\n assert batch_size <= self._num_examples\n end = self._index_in_epoch\n return self._images[start:end], self._labels[start:end]", "def next_batch(self, batch_size):\n start = self._index_in_epoch\n self._index_in_epoch += batch_size\n if self._index_in_epoch > self._number_examples:\n # finished epoch\n self._epochs_completed += 1\n # Shuffle the data\n if self._shuffle:\n new_index = np.random.permutation(self._number_examples)\n self._X = self._X[new_index]\n self._y = self._y[new_index]\n start = 0\n self._index_in_epoch = batch_size\n assert batch_size <= self._number_examples\n end = self._index_in_epoch\n return self._X[start:end], self._y[start:end]", "def next_batch(self, batch_size, fake_data=False):\n if fake_data:\n fake_image = [1] * 784\n if self.one_hot:\n fake_label = [1] + [0] * 9\n else:\n fake_label = 0\n return [fake_image for _ in range(batch_size)], [\n fake_label for _ in range(batch_size)]\n start = self._index_in_epoch\n self._index_in_epoch += batch_size\n if self._index_in_epoch > self._num_examples:\n # Finished epoch\n self._epochs_completed += 1\n\n # Shuffle the data\n np.random.seed(0)\n perm = np.arange(self._num_examples)\n np.random.shuffle(perm)\n self._images = self._images[perm]\n self._labels = self._labels[perm]\n\n # Start next epoch\n start = 0\n self._index_in_epoch = batch_size\n assert batch_size <= self._num_examples\n end = self._index_in_epoch\n return self._images[start:end], self._labels[start:end]", "def next_batch(self, batch_size):\n start = self._index_in_epoch\n if start + batch_size > self.num_examples:\n self._epochs_completed += 1\n rest_num_examples = self.num_examples - start\n images_rest_part = self._images[start:self.num_examples]\n labels_rest_part = self._labels[start:self.num_examples]\n self.permute()\n start = 0\n self._index_in_epoch = batch_size - rest_num_examples\n end = self._index_in_epoch\n images_new_part = self._images[start:end]\n labels_new_part = self._labels[start:end]\n\n result_images = np.concatenate(\n (images_rest_part, images_new_part), axis=0\n )\n result_labels = np.concatenate(\n (labels_rest_part, labels_new_part), axis=0\n )\n return result_images, result_labels\n else:\n self._index_in_epoch += batch_size\n end = self._index_in_epoch\n return self._images[start:end], self._labels[start:end]", "def next_batch(self, batch_size, fake_data=False):\n if fake_data:\n fake_image = [1] * 784\n if self.one_hot:\n fake_label = [1] + [0] * 9\n else:\n fake_label = 0\n return [fake_image for _ in range(batch_size)], [fake_label for _ in range(batch_size)]\n start = self._index_in_epoch\n self._index_in_epoch += batch_size\n\n if self._index_in_epoch > self._num_examples:\n # Finished epoch\n self._epochs_completed += 1\n\n # Shuffle data\n np.random.seed(0)\n perm = np.arange(self._num_examples)\n np.random.shuffle(perm)\n self._images = self._images[perm]\n self._labels = self._labels[perm]\n\n # Start next epoch\n start = 0\n self._index_in_epoch = batch_size\n assert batch_size <= self._num_examples\n\n end = self._index_in_epoch\n\n return self._images[start:end], self._labels[start:end]", "def next_batch(self, batch_size, fake_data=False):\n if fake_data:\n #fake_image = [1.0 for _ in xrange(784)]\n fake_image = [1.0 for _ in range(784)]\n fake_label = 0\n #return [fake_image for _ in xrange(batch_size)], [\n # fake_label for _ in xrange(batch_size)]\n return [fake_image for _ in range(batch_size)], [\n fake_label for _ in range(batch_size)]\n start = self._index_in_epoch\n self._index_in_epoch += batch_size\n if self._index_in_epoch > self._num_examples:\n # Finished epoch\n self._epochs_completed += 1\n # Shuffle the data\n perm = numpy.arange(self._num_examples)\n numpy.random.shuffle(perm)\n self._images = self._images[perm]\n self._labels = self._labels[perm]\n # Start next epoch\n start = 0\n self._index_in_epoch = batch_size\n assert batch_size <= self._num_examples\n end = self._index_in_epoch\n return self._images[start:end], self._labels[start:end]", "def next_batch(self, batch_size, fake_data=False):\n if fake_data:\n fake_image = [1] * 784\n if self.one_hot:\n fake_label = [1] + [0] * 9\n else:\n fake_label = 0\n return [fake_image for _ in xrange(batch_size)], [\n fake_label for _ in xrange(batch_size)]\n start = self._index_in_epoch\n self._index_in_epoch += batch_size\n if self._index_in_epoch > self._num_examples:\n # Finished epoch\n self._epochs_completed += 1\n # Shuffle the data\n perm = np.arange(self._num_examples)\n np.random.shuffle(perm)\n self._images = self._images[perm]\n self._labels = self._labels[perm]\n # Start next epoch\n start = 0\n self._index_in_epoch = batch_size\n assert batch_size <= self._num_examples\n end = self._index_in_epoch\n return self._images[start:end], self._labels[start:end]", "def next_batch(self, batch_size):\n X_batch = self.X_data[self.batch_num*batch_size:(self.batch_num+1)*batch_size]\n Y_batch = self.Y_data[self.batch_num*batch_size:(self.batch_num+1)*batch_size]\n self.batch_num += 1\n return X_batch, Y_batch", "def next_batch(self):\n\n start = self._index_in_epoch\n self._index_in_epoch += self._batch_size\n\n if self._index_in_epoch >= (self._dataset.num_examples - 1):\n # set to last image in data set\n self._index_in_epoch = self._dataset.num_examples - 1\n assert self._batch_size <= self._dataset.num_examples\n\n end = self._index_in_epoch\n\n return self._dataset.images[start:end], self._dataset.labels[start:end]", "def next_batch(self, batch_size, fake_data=False):\n if fake_data:\n fake_image = [1] * 784\n if self.one_hot:\n fake_label = [1] + [0] * 9\n else:\n fake_label = 0\n return [fake_image for _ in xrange(batch_size)], [\n fake_label for _ in xrange(batch_size)\n ]\n start = self._index_in_epoch\n self._index_in_epoch += batch_size\n if self._index_in_epoch > self._num_examples:\n # Finished epoch\n self._epochs_completed += 1\n # Shuffle the data\n perm = numpy.arange(self._num_examples)\n numpy.random.shuffle(perm)\n self._images = self._images[perm]\n self._labels = self._labels[perm]\n # Start next epoch\n start = 0\n self._index_in_epoch = batch_size\n assert batch_size <= self._num_examples\n end = self._index_in_epoch\n return self._images[start:end], self._labels[start:end]", "def next_batch(images, labels, step, batch_size):\n offset = (step * batch_size) % (images.shape[0] - batch_size)\n batch_images = images[offset: offset + batch_size]\n batch_labels = labels[offset:offset + batch_size]\n return batch_images, batch_labels", "def next_batch(self, batch_size, shuffle=True):", "def next(self):\n # Most batches will be equal to batch_size\n if self.cur < (self.n - self.batch_size):\n # Get positions of files in batch\n positions = self.order[self.cur:self.cur + self.batch_size]\n\n self.cur += self.batch_size\n\n # create Batches\n X_train, y_train, sample_weights = self.createBatches(positions)\n\n return X_train, y_train, sample_weights\n\n # Final batch is smaller than batch_size\n if self.cur < self.n:\n positions = self.order[self.cur::]\n\n # Step is maximum - next will return None\n self.cur = self.n\n\n # Create Batches\n X_train, y_train, sample_weights = self.createBatches(positions)\n\n return X_train, y_train, sample_weights\n\n else:\n # reshuffle order for next batch\n np.random.shuffle(self.order)\n\n # Reset cur\n self.cur = 0\n\n # Signal end of epoch\n return None", "def get_next_batch(self):\n if self.index_in_epoch + self.batch_size > self.X.shape[0]:\n idx = np.arange(0, self.X.shape[0])\n self.index_in_epoch = 0\n np.random.shuffle(idx)\n self.X = self.X[idx]\n self.y = self.y[idx]\n # idx = idx[self.index_in_epoch:self.index_in_epoch + self.batch_size]\n # data_shuffle = [self.X[i] for i in idx]\n # labels_shuffle = [self.y[i] for i in idx]\n # data_shuffle = np.asarray(data_shuffle)\n # data_shuffle = np.reshape(data_shuffle, newshape=(self.batch_size, 1024))\n #\n # labels_shuffle = np.asarray(labels_shuffle)\n # labels_shuffle = np.reshape(labels_shuffle, newshape=(self.batch_size, 26))\n data_shuffle = self.X[self.index_in_epoch:self.index_in_epoch + self.batch_size, :]\n data_shuffle = np.reshape(data_shuffle, newshape=(self.batch_size, 32, 32, 1))\n labels_shuffle = self.y[self.index_in_epoch:self.index_in_epoch + self.batch_size, :]\n labels_shuffle = np.reshape(labels_shuffle, newshape=(self.batch_size, 26))\n self.index_in_epoch += self.batch_size\n return data_shuffle, labels_shuffle", "def _next_train_batch(self, batch_size=128, replace=False):\n mask = np.random.choice(self.train_data.shape[0], batch_size, replace=replace)\n return self.train_data[mask], self.train_label[mask]", "def next_batch(num, data, labels):\n idx = np.arange(0, len(data))\n np.random.shuffle(idx)\n idx = idx[:num]\n data_shuffle = [data[i] for i in idx]\n labels_shuffle = [labels[i] for i in idx]\n \n return np.asarray(data_shuffle), np.asarray(labels_shuffle)", "def next_batch(num, data, labels):\n idx = np.arange(0 , len(data))\n np.random.shuffle(idx)\n idx = idx[:num]\n data_shuffle = [data[i] for i in idx]\n labels_shuffle = [labels[i] for i in idx]\n\n return np.asarray(data_shuffle), np.asarray(labels_shuffle)", "def next_batch(self, batch_size, seed=None):\n if seed:\n np.random.seed(seed)\n\n start = self.index_in_epoch\n self.index_in_epoch += batch_size\n if self.index_in_epoch > self.num_examples:\n # Finished epoch\n self.epochs_completed += 1\n # Shuffle the data\n perm = np.arange(self.num_examples)\n np.random.shuffle(perm)\n self.data_X = self.data_X[perm]\n self.data_Y = self.data_Y[perm]\n # Start next epoch\n start = 0\n self.index_in_epoch = batch_size\n assert batch_size <= self.num_examples\n end = self.index_in_epoch\n\n return self.data_X[start:end], self.data_Y[start:end]", "def next_batch(self, batch_size):\n raise NotImplementedError", "def next_batch(x, y, batch_size):\n index = np.arange(n_labeled)\n random_index = np.random.permutation(index)[:batch_size]\n return x[random_index], y[random_index]", "def next_batch(num, data, labels):\n idx = np.arange(0, len(data))\n np.random.shuffle(idx)\n idx = idx[:num]\n data_shuffle = [data[i] for i in idx]\n labels_shuffle = [labels[i] for i in idx]\n return np.asarray(data_shuffle), np.asarray(labels_shuffle)", "def _next(self):\n batch_start, batch_end = self.batch_start, self.batch_start + self.batch_size\n X_batch, y_batch = self.X[batch_start:batch_end], self.y[batch_start:batch_end]\n X_batch, y_batch = self.process_batch(X_batch, y_batch)\n if batch_end > self.X.shape[0]:\n self.batch_start = 0\n else:\n self.batch_start = batch_end\n return X_batch, y_batch", "def next(self):\n #print('next')\n batch_size = self.batch_size\n batch_data = nd.empty((batch_size,)+self.data_shape)\n batch_label = nd.empty((batch_size,)+self.label_shape)\n i = 0\n #self.cutoff = random.randint(800,1280)\n try:\n while i < batch_size:\n #print('N', i)\n data, label, annot = self.next_sample()\n R = self.get_data(data, label, annot)\n if R is None:\n continue\n data_out, label_out, flip_data_out, flip_label_out = R\n if not self.use_coherent:\n data = nd.array(data_out)\n data = nd.transpose(data, axes=(2, 0, 1))\n label = nd.array(label_out)\n #print(data.shape, label.shape)\n batch_data[i][:] = data\n batch_label[i][:] = label\n i += 1\n else:\n data = nd.array(data_out)\n data = nd.transpose(data, axes=(2, 0, 1))\n label = nd.array(label_out)\n data2 = nd.array(flip_data_out)\n data2 = nd.transpose(data2, axes=(2, 0, 1))\n label2 = nd.array(flip_label_out)\n #M = nd.array(M)\n #print(data.shape, label.shape)\n batch_data[i][:] = data\n batch_label[i][:] = label\n #i+=1\n j = i+self.per_batch_size//2\n batch_data[j][:] = data2\n batch_label[j][:] = label2\n i += 1\n if j%self.per_batch_size==self.per_batch_size-1:\n i = j+1\n except StopIteration:\n if i<batch_size:\n raise StopIteration\n\n #return {self.data_name : batch_data,\n # self.label_name : batch_label}\n #print(batch_data.shape, batch_label.shape)\n return mx.io.DataBatch([batch_data], [batch_label], batch_size - i)", "def _next(self):\n batch_start, batch_end = self.batch_start, self.batch_start + self.batch_size\n if batch_end > self.X.shape[0]:\n self.shuffle()\n return self._next()\n else:\n batch_indices = self.indices[batch_start:batch_end]\n X_batch, y_batch = self.X[batch_indices], self.y[batch_indices]\n X_batch, y_batch = self.process_batch(X_batch, y_batch)\n self.batch_start = batch_end\n return X_batch, y_batch", "def next_batch(self, batch_size, fake_data=False, shuffle=True):\r\n if fake_data:\r\n fake_image = [1] * 784\r\n if self.one_hot:\r\n fake_label = [1] + [0] * 9\r\n else:\r\n fake_label = 0\r\n return [fake_image for _ in xrange(batch_size)], [\r\n fake_label for _ in xrange(batch_size)\r\n ]\r\n start = self._index_in_epoch\r\n # Shuffle for the first epoch\r\n if self._epochs_completed == 0 and start == 0 and shuffle:\r\n perm0 = numpy.arange(self._num_examples)\r\n numpy.random.shuffle(perm0)\r\n self._images = self.images[perm0]\r\n self._labels = self.labels[perm0]\r\n # Go to the next epoch\r\n if start + batch_size > self._num_examples:\r\n # Finished epoch\r\n self._epochs_completed += 1\r\n # Get the rest examples in this epoch\r\n rest_num_examples = self._num_examples - start\r\n images_rest_part = self._images[start:self._num_examples]\r\n labels_rest_part = self._labels[start:self._num_examples]\r\n # Shuffle the data\r\n if shuffle:\r\n perm = numpy.arange(self._num_examples)\r\n numpy.random.shuffle(perm)\r\n self._images = self.images[perm]\r\n self._labels = self.labels[perm]\r\n # Start next epoch\r\n start = 0\r\n self._index_in_epoch = batch_size - rest_num_examples\r\n end = self._index_in_epoch\r\n images_new_part = self._images[start:end]\r\n labels_new_part = self._labels[start:end]\r\n return numpy.concatenate((images_rest_part, images_new_part), axis=0), numpy.concatenate((labels_rest_part, labels_new_part), axis=0)\r\n else:\r\n self._index_in_epoch += batch_size\r\n end = self._index_in_epoch\r\n return self._images[start:end], self._labels[start:end]", "def get_input_fn_training(Xtrain_ul, Xtrain_l, Xtest, ytrain_ul, ytrain_l, ytest, batch_size, num_labeled):\n dataset = input_data.Data(Xtrain_ul,\n Xtrain_l,\n Xtest,\n ytrain_ul,\n ytrain_l,\n ytest,\n num_labeled, \n batch_size, \n shuffle=True)\n return dataset.next_batch()", "def next_batch(self, batch_size, fake_data=False, shuffle=True):\r\n if fake_data:\r\n #fake_image = [1] * 784\r\n fake_image = [1]*6400\r\n if self.one_hot:\r\n #fake_label = [1] + [0] * 9\r\n fake_label = [1]+[0]*(people-1)\r\n else:\r\n fake_label = 0\r\n return [fake_image for _ in xrange(batch_size)], [\r\n fake_label for _ in xrange(batch_size)\r\n ]\r\n start = self._index_in_epoch\r\n # Shuffle for the first epoch\r\n if self._epochs_completed == 0 and start == 0 and shuffle:\r\n perm0 = numpy.arange(self._num_examples)\r\n numpy.random.shuffle(perm0)\r\n self._images = self.images[perm0]\r\n self._labels = self.labels[perm0]\r\n # Go to the next epoch\r\n if start + batch_size > self._num_examples:\r\n # Finished epoch\r\n self._epochs_completed += 1\r\n # Get the rest examples in this epoch\r\n rest_num_examples = self._num_examples - start\r\n images_rest_part = self._images[start:self._num_examples]\r\n labels_rest_part = self._labels[start:self._num_examples]\r\n # Shuffle the data\r\n if shuffle:\r\n perm = numpy.arange(self._num_examples)\r\n numpy.random.shuffle(perm)\r\n self._images = self.images[perm]\r\n self._labels = self.labels[perm]\r\n # Start next epoch\r\n start = 0\r\n self._index_in_epoch = batch_size - rest_num_examples\r\n end = self._index_in_epoch\r\n images_new_part = self._images[start:end]\r\n labels_new_part = self._labels[start:end]\r\n return numpy.concatenate((images_rest_part, images_new_part), axis=0) , numpy.concatenate((labels_rest_part, labels_new_part), axis=0)\r\n else:\r\n self._index_in_epoch += batch_size\r\n end = self._index_in_epoch\r\n return self._images[start:end], self._labels[start:end]", "def next_batch(self, batch_size):\n batch_data = np.zeros([batch_size,] + list(self.example_shape))\n for i in range(batch_size):\n index = self.q.pop()\n batch_data[i,...] = self.data[index]\n if len(self.q)==0:\n self.__new_epoch()\n\n return batch_data", "def next_batch(self):\n this_batch_sentences = self.sentences_batches[self.pointer]\n this_batch_labels = self.labels_batches[self.pointer]\n self.pointer = (self.pointer + 1) % self.num_batch\n return this_batch_sentences, this_batch_labels", "def next_batch(self, batch_size):\n start = self.index_in_epoch\n self.index_in_epoch += batch_size\n self.epoch += batch_size/self.num_examples\n\n # When all the training data is ran, shuffles it\n if self.index_in_epoch > self.num_examples and self.shuffle:\n self.indexer = np.random.permutation(self.num_examples)\n # Start next epoch\n start = 0\n self.index_in_epoch = batch_size\n assert batch_size <= self.num_examples\n\n if self.iterate:\n batch_df = pd.DataFrame()\n if self.epoch < 1:\n batch_df = pd.read_csv(self.path, nrows=batch_size, skiprows=start)\n else:\n for i in range(batch_size):\n item = pd.read_csv(self.path, nrows=1, skiprows=self.indexer[start+i])\n batch_df = pd.concat(item)\n else:\n batch_df = self.df[start: self.index_in_epoch]\n\n examples = np.multiply(batch_df.iloc[:, 1:].values.astype(np.float), 1.0 / 255.0)\n labels = self.dense_to_one_hot(batch_df.iloc[:, 0].values.ravel(), 10)\n\n batch = {'features': examples, 'labels': labels}\n return batch", "def next_batch(self, batch_size, shuffle=False):\n\n start = self._index_in_epoch\n # Shuffle for the first epoch\n if self._epochs_completed == 0 and start == 0 and shuffle:\n perm0 = np.arange(self._num_examples)\n np.random.shuffle(perm0)\n self._data_index = self._data_index[perm0]\n # Go to the next epoch\n if start + batch_size > self._num_examples:\n # Finished epoch\n self._epochs_completed += 1\n # Get the rest examples in this epoch\n rest_num_examples = self._num_examples - start\n _data_index_rest_part = self._data_index[start:self._num_examples]\n imgs_batch_rest, labels_batch_rest = self._read_batch_data(_data_index_rest_part)\n # Shuffle the data\n if shuffle:\n perm = np.arange(self._num_examples)\n np.random.shuffle(perm)\n self._data_index = self._data_index[perm]\n # Start next epoch\n start = 0\n self._index_in_epoch = batch_size - rest_num_examples\n end = self._index_in_epoch\n _data_index_new_part = self._data_index[start:end]\n imgs_batch_new_part, labels_batch_new_part = self._read_batch_data(_data_index_new_part)\n imgs_batch = np.concatenate((imgs_batch_rest, imgs_batch_new_part), axis=0)\n labels_batch = np.concatenate((labels_batch_rest, labels_batch_new_part), axis=0)\n return imgs_batch, labels_batch\n else:\n self._index_in_epoch += batch_size\n end = self._index_in_epoch\n imgs_batch, labels_batch = self._read_batch_data(self._data_index[start:end])\n return imgs_batch, labels_batch", "def next_batch(self, batch_size):\r\n raise NotImplementedError", "def _generate_training_batch(ground_truth_data, representation_function,\n batch_size, num_points, random_state):\n points = None # Dimensionality depends on the representation function.\n labels = np.zeros(num_points, dtype=np.int64)\n for i in range(num_points):\n labels[i], feature_vector = _generate_training_sample(\n ground_truth_data, representation_function, batch_size, random_state)\n if points is None:\n points = np.zeros((num_points, feature_vector.shape[0]))\n points[i, :] = feature_vector\n return points, labels", "def next_batch(self, shuffle=True):\n start = self._index_in_epoch\n # Shuffle for the first epoch\n if self._epochs_completed == 0 and start == 0 and shuffle:\n perm0 = np.arange(self._num_examples)\n np.random.shuffle(perm0)\n self._units = self.units.iloc[perm0]\n self._labels = self.labels[perm0]\n # Go to the next epoch\n if start + self._batch_size > self._num_examples:\n # Finished epoch\n self._epochs_completed += 1\n # Get the rest examples in this epoch\n rest_num_examples = self._num_examples - start\n units_rest_part = self._units[start:self._num_examples]\n labels_rest_part = self._labels[start:self._num_examples]\n # Shuffle the data\n if shuffle:\n perm = np.arange(self._num_examples)\n np.random.shuffle(perm)\n self._units = self.units.iloc[perm]\n self._labels = self.labels[perm]\n # Start next epoch\n start = 0\n self._index_in_epoch = self._batch_size - rest_num_examples\n end = self._index_in_epoch\n units_new_part = self._units[start:end]\n labels_new_part = self._labels[start:end]\n return np.concatenate((units_rest_part, units_new_part), axis=0), np.concatenate(\n (labels_rest_part, labels_new_part), axis=0)\n else:\n self._index_in_epoch += self._batch_size\n end = self._index_in_epoch\n return self._units[start:end], self._labels[start:end]", "def next_batch(self, batch_size=8):\n raise NotImplementedError()", "def next_batch(self, fake_data=False, shuffle=True):\n if fake_data:\n fake_unit = [1] * self.column_number\n if self.one_hot:\n fake_label = 1\n else:\n fake_label = 0\n fake_date = [i for i in range(self.column_number)]\n return [fake_unit for _ in range(self._batch_size)], [fake_label for _ in range(self._batch_size)], [\n fake_date for _ in range(self._batch_size)]\n start = self._index_in_epoch\n # Shuffle for the first epoch\n if self._epochs_completed == 0 and start == 0 and shuffle:\n perm0 = np.arange(self._num_examples)\n np.random.shuffle(perm0)\n self._units = self.units.iloc[perm0]\n self._labels = self.labels[perm0]\n self._dates = self.dates[perm0]\n # Go to the next epoch\n if start + self._batch_size > self._num_examples:\n # Finished epoch\n self._epochs_completed += 1\n # Get the rest examples in this epoch\n rest_num_examples = self._num_examples - start\n units_rest_part = self._units[start:self._num_examples]\n labels_rest_part = self._labels[start:self._num_examples]\n dates_rest_part = self._dates[start:self._num_examples]\n # Shuffle the data\n if shuffle:\n perm = np.arange(self._num_examples)\n np.random.shuffle(perm)\n self._units = self.units.iloc[perm]\n self._labels = self.labels[perm]\n self._dates = self.dates[perm]\n # Start next epoch\n start = 0\n self._index_in_epoch = self._batch_size - rest_num_examples\n end = self._index_in_epoch\n units_new_part = self._units[start:end]\n labels_new_part = self._labels[start:end]\n dates_new_part = self._dates[start:end]\n return np.concatenate((units_rest_part, units_new_part), axis=0), np.concatenate(\n (labels_rest_part, labels_new_part), axis=0), np.concatenate((dates_rest_part, dates_new_part), axis=0)\n else:\n self._index_in_epoch += self._batch_size\n end = self._index_in_epoch\n return self._units[start:end], self._labels[start:end], self._dates[start:end]", "def next_train_batch(self, batch_size):\n if (not self.has_next_train()):\n self._random_permutation()\n self.train_next = 0\n if (self.train_next + batch_size <= len(self.train_list)):\n real_batch_size = batch_size\n else:\n real_batch_size = len(self.train_list) - self.train_next\n img_set = np.zeros([batch_size, self.img_height, self.img_width, 3])\n ground_truth_set = np.zeros([batch_size, self.img_height, self.img_width])\n for i in range(self.train_next, self.train_next + real_batch_size):\n train_ind = self.train_list[self.train_permutation[i]]\n img_path = join(self.dataset_dir, 'data/jpg_images', train_ind + '.jpg')\n img_set[i - self.train_next] = self.load_image(img_path)\n mat_path = join(self.dataset_dir, 'data/label_mat', train_ind + '.mat')\n ground_truth_set[i - self.train_next] = self.load_ground_truth(mat_path)\n dup_cnt = 0\n while (real_batch_size < batch_size):\n img_set[real_batch_size] = img_set[dup_cnt]\n ground_truth_set[real_batch_size] = ground_truth_set[dup_cnt]\n dup_cnt = dup_cnt + 1\n real_batch_size = real_batch_size + 1\n self.train_next = self.train_next + batch_size\n return [img_set, ground_truth_set]", "def next_batch(self, batch_size, shuffle=True):\n\n\n\t\tstart = self._index_in_epoch\n\t\t# Shuffle for the first epoch\n\t\tif self._epochs_completed == 0 and start == 0 and shuffle:\n\t\t\tperm0 = numpy.arange(self._num_examples)\n\t\t\tnumpy.random.shuffle(perm0)\n\t\t\tself._images = self.images[perm0]\n\t\t\tself._labels = self.labels[perm0]\n\n\t\t# Go to the next epoch\n\t\tif start + batch_size > self._num_examples:\n\t\t\t# Finished epoch\n\t\t\tself._epochs_completed += 1\n\t\t\t# Get the rest examples in this epoch\n\t\t\trest_num_examples = self._num_examples - start\n\t\t\timages_rest_part = self._images[start:self._num_examples]\n\t\t\tlabels_rest_part = self._labels[start:self._num_examples]\n\t\t\t# Shuffle the data\n\t\t\tif shuffle:\n\t\t\t\tperm = numpy.arange(self._num_examples)\n\t\t\t\tnumpy.random.shuffle(perm)\n\t\t\t\tself._images = self.images[perm]\n\t\t\t\tself._labels = self.labels[perm]\n\t\t\t# Start next epoch\n\t\t\tstart = 0\n\t\t\tself._index_in_epoch = batch_size - rest_num_examples\n\t\t\tend = self._index_in_epoch\n\t\t\timages_new_part = self._images[start:end]\n\t\t\tlabels_new_part = self._labels[start:end]\n\t\t\treturn numpy.concatenate((images_rest_part, images_new_part), axis=0), numpy.concatenate((labels_rest_part, labels_new_part), axis=0)\n\t\telse:\n\t\t\tself._index_in_epoch += batch_size\n\t\t\tend = self._index_in_epoch\n\t\t\treturn self._images[start:end], self._labels[start:end]", "def calc_predict_next_token_index(state, total_kv_pooling, max_len, chunk_len,\n chunk_offset):\n current_token = state // total_kv_pooling\n sequence_length = max_len\n\n if chunk_len is not None:\n if chunk_offset != 0:\n current_token -= chunk_offset * (current_token >= chunk_offset)\n current_token = current_token % chunk_len\n sequence_length = chunk_len\n return current_token, sequence_length", "def batch_data(cls, train_data, train_labels, batch_size):\n for batch in range(int(np.ceil(train_data.shape[0] / batch_size))):\n start = batch_size * batch\n end = start + batch_size\n if end > train_data.shape[0]:\n yield batch, (train_data[start:train_data.shape[0]], \\\n train_labels[start:train_data.shape[0]])\n else:\n yield batch, (train_data[start:end], \\\n train_labels[start:end])", "def get_train_input(self, prev, i):\n pass", "def batch_iter(data: Union[np.ndarray, List[Any]], labels: Union[np.ndarray, List[Any]],\n batch_size: int, num_epochs: int) -> Tuple[Iterable[Any], Iterable[Any]]:\n assert len(data) == len(labels)\n\n for _ in range(num_epochs):\n start_index = 0\n while start_index < len(data) - 1:\n end_index = min(len(data) - 1, start_index + batch_size)\n\n xdata = data[start_index: end_index]\n ydata = labels[start_index: end_index]\n\n yield xdata, ydata\n\n start_index += batch_size", "def next_batch(self, batch_size, shuffle=True):\n start = self._index_in_epoch\n # Shuffle for the first epoch\n if self._epochs_completed == 0 and start == 0 and shuffle:\n perm0 = np.arange(self._num_examples)\n np.random.shuffle(perm0)\n self._images = self.images[perm0]\n self._labels = self.labels[perm0]\n # Go to the next epoch\n if start + batch_size > self._num_examples:\n # Finished epoch\n self._epochs_completed += 1\n # Get the rest examples in this epoch\n rest_num_examples = self._num_examples - start\n images_rest_part = self._images[start:self._num_examples]\n labels_rest_part = self._labels[start:self._num_examples]\n # Shuffle the data\n if shuffle:\n perm = np.arange(self._num_examples)\n np.random.shuffle(perm)\n self._images = self.images[perm]\n self._labels = self.labels[perm]\n # Start next epoch\n start = 0\n self._index_in_epoch = batch_size - rest_num_examples\n end = self._index_in_epoch\n images_new_part = self._images[start:end]\n labels_new_part = self._labels[start:end]\n return np.concatenate(\n (images_rest_part, images_new_part), axis=0), np.concatenate(\n (labels_rest_part, labels_new_part), axis=0)\n else:\n self._index_in_epoch += batch_size\n end = self._index_in_epoch\n return self._images[start:end], self._labels[start:end]", "def next(self):\n if self._curr_batch + 1 > self.num_batches:\n # no more batches in current iteration through data set so start\n # new epoch ready for another pass and indicate iteration is at end\n self.new_epoch()\n raise StopIteration()\n # create an index slice corresponding to current batch number\n batch_slice = slice(self._curr_batch * self.batch_size,\n (self._curr_batch + 1) * self.batch_size)\n inputs_batch = self.inputs[batch_slice]\n targets_batch = self.targets[batch_slice]\n self._curr_batch += 1\n return inputs_batch, targets_batch", "def next_batch(self, batch_size, shuffle=True):\n start = self._index_in_epoch\n # Shuffle for the first epoch\n if self._epochs_completed == 0 and start == 0 and shuffle:\n perm0 = numpy.arange(self._num_examples)\n numpy.random.shuffle(perm0)\n self._images = self.images[perm0]\n self._labels = self.labels[perm0]\n # Go to the next epoch\n if start + batch_size > self._num_examples:\n # Finished epoch\n self._epochs_completed += 1\n # Get the rest examples in this epoch\n rest_num_examples = self._num_examples - start\n images_rest_part = self._images[start:self._num_examples]\n labels_rest_part = self._labels[start:self._num_examples]\n # Shuffle the data\n if shuffle:\n perm = numpy.arange(self._num_examples)\n numpy.random.shuffle(perm)\n self._images = self.images[perm]\n self._labels = self.labels[perm]\n # Start next epoch\n start = 0\n self._index_in_epoch = batch_size - rest_num_examples\n end = self._index_in_epoch\n images_new_part = self._images[start:end]\n labels_new_part = self._labels[start:end]\n return numpy.concatenate((images_rest_part, images_new_part), axis=0), numpy.concatenate(\n (labels_rest_part, labels_new_part), axis=0)\n else:\n self._index_in_epoch += batch_size\n end = self._index_in_epoch\n return self._images[start:end], self._labels[start:end]", "def generate(\n self,\n dataset: Tensor,\n labels: Tensor,\n chunk_size: int) -> Tuple[\n int, Iterator[Tuple[Tensor, Tensor]]]:", "def _get_epochs(self, \n n, \n X_train, \n Y_train):\n for i in range(n):\n yield self._get_batch(X_train, Y_train)", "def next(self):\n if self.curr_idx == len(self.idx):\n raise StopIteration\n #i = batches index, j = starting record\n i, j = self.idx[self.curr_idx] \n self.curr_idx += 1\n\n indices = self.ndindex[i][j:j + self.batch_size]\n sentences = self.ndsent[i][j:j + self.batch_size]\n characters = self.ndchar[i][j:j + self.batch_size]\n label = self.ndlabel[i][j:j + self.batch_size]\n\n return DataBatch([sentences, characters], [label], pad=0, index = indices, bucket_key=self.buckets[i],\n provide_data=[DataDesc(name=self.data_names[0], shape=sentences.shape, layout=self.layout),\n DataDesc(name=self.data_names[1], shape=characters.shape, layout=self.layout)],\n provide_label=[DataDesc(name=self.label_name, shape=label.shape, layout=self.layout)])", "def next_batch(self, batch_size):\n\n all_idx = np.arange(0, self.length)\n np.random.shuffle(all_idx)\n batch_idx = all_idx[:batch_size]\n batch_imgs = [self.images[i] for i in batch_idx]\n batch_traces = [self.traces[i] for i in batch_idx]\n return batch_imgs, batch_traces", "def next_batch(self):\n # Whether an epoch is done.\n done = False\n samples = []\n for _ in range(self.batch_size):\n # Indeed, `>` will not occur.\n if self.ptr >= self.dataset_size:\n done = True\n break\n else:\n self.ptr += 1\n sample = self.enqueuer.queue.get()\n samples.append(sample)\n # print 'queue size: {}'.format(self.enqueuer.queue.qsize())\n # Indeed, `>` will not occur.\n if self.ptr >= self.dataset_size:\n done = True\n return samples, done", "def sequential_data_sample(data, seq_len, batch_size, start_idx):\n assert isinstance(batch_size, int)\n assert isinstance(data, list)\n assert isinstance(start_idx, int)\n assert seq_len > 0\n assert batch_size > 0\n assert start_idx >= 0\n\n n = len(data)\n\n x, y = [], []\n for b in range(batch_size):\n if (start_idx+seq_len+1) >= n:\n start_idx = seq_len-(n-start_idx)\n\n x.append(data[start_idx:start_idx + seq_len])\n y.append(data[start_idx + 1:start_idx + seq_len + 1])\n start_idx = start_idx+seq_len\n\n return start_idx, x, y", "def next_batch(X1, X2, batch_size):\n tot = X1.shape[0]\n total = math.ceil(tot / batch_size)\n for i in range(int(total)):\n start_idx = i * batch_size\n end_idx = (i + 1) * batch_size\n end_idx = min(tot, end_idx)\n batch_x1 = X1[start_idx: end_idx, ...]\n batch_x2 = X2[start_idx: end_idx, ...]\n yield (batch_x1, batch_x2, (i + 1))", "def nextBatch(self, batch_size):\n self._start = self._cursor\n self._cursor += batch_size\n if self._start + batch_size > self._num_samples:\n rest_num_samples = self._num_samples - self._start\n word_batch = np.zeros((batch_size, self._sentences.shape[1]), dtype=np.int32)\n tag_batch = np.zeros((batch_size), dtype=np.int32)\n word_batch[0:rest_num_samples] = self._sentences[self._start:self._num_samples]\n tag_batch[0:rest_num_samples] = self.labels[self._start:self._num_samples]\n\n return word_batch, tag_batch\n else:\n end = self._cursor\n return self._sentences[self._start:end], self._labels[self._start:end]", "def next_batch(self, batch_size, batch_wrap=True, shuffle=True):\n start = self.i_in_epoch\n if self.epochs_completed == 0 and start == 0 and shuffle:\n self.shuffle()\n\n data_batch = [0] * self.nb_data\n if start + batch_size >= self.d_size:\n # Finished epoch\n self.epochs_completed += 1\n self.i_in_epoch = 0\n for idx_dt in range(self.nb_data):\n data_batch[idx_dt] = self.data_list[idx_dt][start:self.d_size]\n if shuffle:\n self.shuffle()\n\n if batch_wrap:\n # Start next epoch\n self.i_in_epoch = batch_size - (self.d_size - start)\n end = self.i_in_epoch\n\n for idx_dt in range(self.nb_data):\n data_new_part = self.data_list[idx_dt][0:end]\n # e.g.shape of two inputs: (58, 12), (70, 12)\n data_batch[idx_dt] = np.vstack([data_batch[idx_dt], data_new_part])\n return data_batch\n else:\n self.i_in_epoch += batch_size\n end = self.i_in_epoch\n for idx_dt in range(self.nb_data):\n data_batch[idx_dt] = self.data_list[idx_dt][start:end]\n return data_batch", "def train_data() -> Iterator[Tuple[Label, ChanneledImage]]:\n return zip(*get_data(TRAIN_FILES, 60000))", "def next(self):\n\n if self.i_sample < self.n_sample:\n df_batch = self.grouped[self.i_sample:min(self.n_sample, self.i_sample + self.batch_size)]\n # at end of epoch, number of sample remains may be smaller than batch size\n if len(df_batch) < self.batch_size:\n df_sample = random.sample(self.grouped, self.batch_size-len(df_batch))\n df_batch = df_batch + df_sample\n try:\n assert len(df_batch) == self.batch_size\n except AssertionError:\n print(self.i_sample, df_sample, df_batch)\n\n # get random frame_idxs\n if self.train:\n flips = np.random.choice(a=[False, True], size=(self.batch_size,), p=[0.5, 0.5])\n else:\n flips = np.zeros(self.batch_size, dtype=bool)\n\n\n video = sample_clips(df_batch, flips, self.batch_size, self.n_frame,\n self.scale_w, self.scale_h, self.sample_half_time, self.train)\n\n bboxes = np.zeros((self.batch_size, self.n_frame // self.temporal_scale, self.n_bbox, 5))\n labels = np.zeros((self.batch_size, self.n_bbox, self.num_class))\n for i in range(len(df_batch)):\n tmp_bbox, tmp_label = self.get_bbox_and_label(df_batch[i], flips[i], i, self.scale_w, self.scale_h)\n bboxes[i] = tmp_bbox\n labels[i] = tmp_label\n\n if self.debug_dataloader:\n with open('dataset/AVA_v2.1/ava_action_list_v2.1.pbtxt') as fd:\n lines = fd.readlines()\n\n labels_info = []\n for i in range(80):\n name_line = lines[i * 5 + 1]\n label_id_line = lines[i * 5 + 2]\n label_type_line = lines[i * 5 + 3]\n\n name = name_line[name_line.find('\"') + 1:name_line.rfind('\"')]\n label_id = int(label_id_line.strip().split(':')[1].strip())\n label_type = label_type_line.strip().split(':')[1].strip()\n\n assert label_id == i + 1\n labels_info.append({\n 'name': name,\n 'label_type': label_type\n })\n\n for bidx in range(self.batch_size):\n s_video = video[bidx, ...]\n s_bboxes = bboxes[bidx, ...]\n s_labels = labels[bidx, ...]\n\n window_name = 'batch_idx_'+str(bidx)\n if self.train:\n window_name += '_train'\n else:\n window_name += '_val'\n\n\n bbox = s_bboxes[0, 0, 1:].astype(np.int32)\n label_indices = np.where(s_labels[0, :])[0]\n\n for fidx in range(self.n_frame):\n # print('fidx', fidx)\n save_name = window_name + '_' + str(fidx)\n tmp_img = (s_video[:, fidx, :, :].transpose((1,2,0))).astype(np.uint8).copy()\n\n cv2.rectangle(tmp_img, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color=(0,0,255), thickness=2)\n for en_idx, label_index in enumerate(label_indices):\n # print('label_index', label_index, 'len', len(labels_info))\n cv2.putText(tmp_img, labels_info[label_index]['name'], (bbox[0], bbox[1] + en_idx * 10), cv2.FONT_HERSHEY_SIMPLEX, 0.3, color=(0, 255, 0), thickness=1)\n\n cv2.imwrite(save_name+'.jpg', tmp_img)\n\n\n #print(video.shape, bboxes.shape, labels.shape)\n ret = mx.io.DataBatch(data=[mx.nd.array(video), mx.nd.array(bboxes)],\n label=[mx.nd.array(labels),],\n provide_data=self.provide_data,\n provide_label=self.provide_label)\n\n self.i_sample += self.batch_size\n return ret\n else:\n raise StopIteration", "def next(self):\r\n # print(self.cursor)\r\n # print(self.num_data)\r\n if self.iter_next():\r\n if (self.cursor + self.batch_size) <= self.num_data:\r\n # print ('---------小于-----------')\r\n self.data = np.zeros((self.batch_size, 361))\r\n self.label = np.zeros((self.batch_size, 361))\r\n self.dataBatch = np.zeros((self.batch_size, 361))\r\n self.labelBatch = np.zeros((self.batch_size, 361))\r\n # self.data, self.label = self._read()\r\n for i in range(self.batch_size):\r\n self.dataBatch, self.labelBatch = self._read()\r\n self.cursor += 1\r\n # print self.cursor, self.num_data\r\n self.data[i,:] = self.dataBatch[0][1]\r\n self.label[i,:] = self.labelBatch[0][1]\r\n # print (self.data)\r\n return {self.data_name : self.data,\r\n self.label_name : self.label}\r\n else:\r\n # print('-----------不小于-----------')\r\n sizex = self.num_data - self.cursor\r\n self.data = np.zeros((sizex,361))\r\n self.label = np.zeros((sizex,361))\r\n self.dataBatch = np.zeros((sizex,361))\r\n self.labelBatch = np.zeros((sizex,361))\r\n # print(sizex)\r\n # print(self.cursor)\r\n for i in range(sizex):\r\n # print(self.cursor)\r\n self.dataBatch,self.labelBatch = self._read()\r\n self.cursor += 1\r\n self.data[i,:] = self.dataBatch[0][1]\r\n self.label[i,:] = self.labelBatch[0][1]\r\n return {self.data_name : self.data,\r\n self.label_name : self.label}\r\n else:\r\n return StopIteration", "def train_input_fn(features, labels, batch_size):\n # Convert the inputs to a Dataset.\n dataset = tf.data.Dataset.from_tensor_slices((dict(zip(labels, features))))\n\n # Shuffle, repeat, and batch the examples.\n dataset = dataset.shuffle(1000).repeat().batch(batch_size)\n\n # Return the read end of the pipeline.\n return dataset.make_initializable_iterator().get_next()", "def next_batch(self, batch_size, shuffle=True):\n\n start = self._index_in_epoch\n # Shuffle for the first epoch\n if self._epochs_completed == 0 and start == 0 and shuffle:\n perm0 = np.arange(self._num_examples)\n np.random.shuffle(perm0)\n self._inps = self.inps[perm0]\n self._outs = self.outs[perm0]\n # Go to the next epoch\n if start + batch_size > self._num_examples:\n # Finished epoch\n self._epochs_completed += 1\n # Get the rest examples in this epoch\n rest_num_examples = self._num_examples - start\n inps_rest_part = self._inps[start:self._num_examples]\n outs_rest_part = self._outs[start:self._num_examples]\n # Shuffle the data\n if shuffle:\n perm = np.arange(self._num_examples)\n np.random.shuffle(perm)\n self._inps = self.inps[perm]\n self._outs = self.outs[perm]\n # Start next epoch\n start = 0\n self._index_in_epoch = batch_size - rest_num_examples\n end = self._index_in_epoch\n inps_new_part = self._inps[start:end]\n outs_new_part = self._outs[start:end]\n return np.concatenate((inps_rest_part, inps_new_part), axis=0) , np.concatenate((outs_rest_part, outs_new_part), axis=0)\n else:\n self._index_in_epoch += batch_size\n end = self._index_in_epoch\n return self._inps[start:end], self._outs[start:end]", "def next(self):\n if self._curr_batch + 1 > self.num_batches:\n # no more batches in current iteration through data set so start\n # new epoch ready for another pass and indicate iteration is at end\n self.new_epoch()\n raise StopIteration()\n # create an index slice corresponding to current batch number\n batch_slice = slice(self._curr_batch * self.batch_size,\n (self._curr_batch + 1) * self.batch_size)\n inputs_batch = self.inputs[batch_slice]\n targets_batch = self.targets[batch_slice]\n # target_ids_global = self.target_ids[batch_slice]\n target_ids_batch = self.target_ids[batch_slice]\n self._curr_batch += 1\n\n batch_inputs, batch_target_ids, batch_targets = \\\n self.transform_batch(inputs_batch, target_ids_batch, targets_batch)\n\n return batch_inputs, batch_targets, batch_target_ids", "def next_batch(self,shuffle=True):\n\n start = self._index_in_epoch #样本个数下标\n # Shuffle for the first epoch\n if self._epochs_completed == 0 and start == 0 and shuffle:\n perm0 = np.arange(self._num_examples) #0-num_examples之间的整数\n np.random.shuffle(perm0) #乱序处理\n self._images = self.images[perm0]\n self._labels = self.labels[perm0]\n\n # Go to the next epoch,如果不够一批次则自动补齐差额\n if start + self.batch_size > self._num_examples:\n # Finished epoch\n self._epochs_completed += 1\n # Get the rest examples in this epoch\n rest_num_examples = self._num_examples - start\n images_rest_part = self._images[start:self._num_examples]\n labels_rest_part = self._labels[start:self._num_examples]\n # Shuffle the data\n if shuffle:\n perm = np.arange(self._num_examples)\n np.random.shuffle(perm)\n self._images = self.images[perm]\n self._labels = self.labels[perm]\n # Start next epoch\n start = 0\n self._index_in_epoch = self.batch_size - rest_num_examples\n end = self._index_in_epoch\n images_new_part = self._images[start:end]\n labels_new_part = self._labels[start:end]\n return np.concatenate((images_rest_part, images_new_part), axis=0) , np.concatenate((labels_rest_part, labels_new_part), axis=0)\n else:\n self._index_in_epoch += self.batch_size #更新批处理样本索引\n end = self._index_in_epoch\n return self._images[start:end], self._labels[start:end]", "def next_batch_set(images, labels, batch_size=128):\n indices = np.random.choice(len(images), batch_size)\n batch_images = images[indices]\n batch_labels = labels[indices]\n return batch_images, batch_labels", "def first_n_features(data, n=5000):\n for i, feature in enumerate(gffutils.iterators.DataIterator(data)):\n if i > n:\n break\n yield feature", "def get_next_batch(self, onehot=True):\n if self.current_batch_idx == 0:\n self.permutation()\n next_beg = self.current_batch_idx * self.batch_size\n next_end = (self.current_batch_idx + 1) * self.batch_size\n if next_end > self.n_samples:\n next_end = self.n_samples\n self.current_batch_idx = 0\n data_batch = self.data.values[next_beg:next_end][:]\n if onehot is True:\n labels_batch = self.labels_onehot.values[next_beg:next_end][:]\n else:\n labels_batch = self.labels.values[next_beg:next_end][:]\n self.current_batch_idx += 1\n return data_batch, labels_batch", "def next_sample(self, batch_size=1):\n\n X = []\n y = []\n\n for count in range(batch_size):\n #check for abrupt drift\n if count % self.abrupt_drift_rate == 0:\n dimfaks = [round(np.random.rand() * 4, 1) for _ in range(self.dims)]\n dimpots = [1 + round(np.random.rand() * 2) for _ in range(self.dims)]\n dimvars = [np.random.rand() * self.var for _ in range(self.dims)]\n dimmeans = [5 + np.random.rand() * 10 for _ in range(self.dims)]\n print(\"Random Polynomconcept: \", end=\"\")\n for i in range(self.dims):\n print(dimfaks[i],\" * x\", i+1, \"^\", dimpots[i], \" + \",end=\"\", sep=\"\")\n print()\n\n value = 0\n sample = []\n for i in range(self.dims):\n sample.append(np.random.normal(loc=dimmeans[i], scale=dimvars[i]))\n value += dimfaks[i] * (sample[i] ** dimpots[i])\n \n X.append(sample)\n y.append(value)\n\n self._x_idx += batch_size\n\n return (X, y)", "def get_batch(batch_size):\n global ctr\n global data\n\n # shuffle the data if we don't have enough left, then go back to start\n if ctr*batch_size > len(data)-batch_size: \n data = np.random.permutation(data)\n ctr = -1\n ctr += 1\n batch_data = data[ctr*batch_size:(1 + ctr)*batch_size, :(data.shape[1]-1)]\n batch_labels = dense_to_one_hot(data[ctr*batch_size:(1 + ctr)*batch_size, data.shape[1]-1]) \n \n return batch_data, batch_labels", "def next(self):\n if self.ptr >= self.num_examples:\n self.reset()\n raise StopIteration()\n batch_size = self.batch_size\n if self.ptr>self.num_examples-self.batch_size:\n batch_size = self.num_examples-self.ptr\n\n ixs = range(self.ptr,self.ptr+batch_size)\n self.ptr += batch_size\n\n i = np.zeros((batch_size), dtype='int32')\n e = np.zeros((batch_size, self.max_len, \n self.num_chars), dtype='int32') # entity\n l = np.zeros((batch_size, self.num_labels), dtype='int32') # labels\n for n, ix in enumerate(ixs):\n idx, ent, lab = self.examples[self.permutation[ix]]\n le = min(len(ent),self.max_len)\n i[n] = idx\n e[n,np.arange(le),ent[:le]] = 1\n #e[n,:min(len(ent),self.max_len)] = np.array(ent[:self.max_len])\n #l[n,lab] = 1/len(lab)\n l[n,lab] = 1\n\n return i, e, l", "def get_next_train_valid(X_shuffled, y_shuffled, itr, k):\n X_valid = X_shuffled[itr*len(X_shuffled)//k:(itr+1)*len(X_shuffled)//k,:]\n Y_valid = y_shuffled[itr*len(y_shuffled)//k:(itr+1)*len(y_shuffled)//k]\n \n X_train = np.delete(X_shuffled, np.s_[itr*len(X_shuffled)//k:(itr+1)*len(X_shuffled)//k:1], 0)\n Y_train = np.delete(y_shuffled, np.s_[itr*len(y_shuffled)//k:(itr+1)*len(y_shuffled)//k:1], 0)\n \n \n return X_train, Y_train, X_valid, Y_valid", "def next_batch(x, y, batch_size):\n\n def as_batch(data, start, count):\n part = []\n for i in range(start, start + count):\n part.append(data[i])\n return np.array(part)\n\n for i in range(0, len(x)-batch_size, batch_size):\n yield as_batch(x, i, batch_size), as_batch(y, i, batch_size)", "def predict_random_chunk(self, dataset , optimizer, loss_function, size=32):\n # get a random chunk from the test data\n random_start = numpy.random.randint(len(dataset.X)-size)\n X = dataset.X[random_start:random_start+size].view(-1,1,self.input_rows,self.input_cols).to(self.device)\n y = dataset.y[random_start:random_start+size].to(self.device)\n ls_emb = dataset.ls[random_start:random_start+size].to(self.device) if self.type == \"listener\" or self.type == \"both\" else None\n sp_emb = dataset.sp[random_start:random_start+size].to(self.device) if self.type == \"speaker\" or self.type == \"both\" else None\n\n # grant no learning\n with torch.no_grad():\n acc, loss, conf = self.fwd_pass(X, ls_emb, sp_emb, y, optimizer, loss_function)\n return acc, loss, conf", "def train_input_fn(features, labels, batch_size, nr_epochs):\n\t# Convert the inputs to a Dataset.\n\tdataset = tensorflow.data.Dataset.from_tensor_slices((dict(features), labels))\n\n\t# repeat, and batch the examples.\n\tdataset = dataset.repeat(nr_epochs).batch(batch_size)\n\t#ds = ds.batch(batch_size).repeat(num_epochs) # num_epochs ?\n\t\n\tversion_full = tensorflow.__version__\n\tx, version, y = version_full.split('.')\n\tprint('Versionfull: ' + version_full)\n\tprint('Version: ' + version)\n\t\n\tif version >= '5':\n\t\t# Return the dataset.\n\t\treturn dataset\n\telse:\n\t\treturn dataset.make_one_shot_iterator().get_next() #for 1.4", "def next(self):\n if not self.iter_next():\n self.reset()\n self.data, self.label = self._read()\n self.train_cursor = self.train_cursor + self.batch_size\n return {self.data_name: self.data[0][1], self.label_name: self.label[0][1]}", "def next_batch(self, batch_size):\n i_bucket = self.bucket_order[self.bucket_cursor]\n # Increment cursor and shuffle in case of new round\n self.bucket_cursor = (self.bucket_cursor + 1) % self.num_buckets\n if self.bucket_cursor == 0:\n self.bucket_order = np.random.permutation(self.num_buckets)\n\n if self.cursor[i_bucket] + batch_size > self.buckets_size[i_bucket]:\n self.shuffle(i_bucket)\n\n # Handle too big batch sizes\n if (batch_size > self.buckets_size[i_bucket]):\n batch_size = self.buckets_size[i_bucket]\n\n res = self.buckets[i_bucket].iloc[self.cursor[i_bucket]:\n self.cursor[i_bucket]+batch_size]\n self.cursor[i_bucket] += batch_size\n\n # PAD input sequence and output\n input_max = max(res['in_length'])\n\n input_imgs = np.zeros(\n (batch_size, self.slider[0], input_max, 1), dtype=np.uint8)\n for i, img in enumerate(res['images']):\n input_imgs[i][:, :res['in_length'].values[i], 0] = img\n \n if self.train:\n input_imgs = self.augmentation.augment_images(input_imgs)\n input_imgs = input_imgs.astype(np.float32)\n\n targets = sequences_to_sparse(res['targets'].values)\n return input_imgs, targets, res['in_length'].values", "def train(self, num_batches: int):", "def get_batch(source, i):\n data = source[i]\n target = source[i + 1]\n return data.reshape((1, len(data))), target.reshape((-1,))" ]
[ "0.74909896", "0.72481114", "0.7209859", "0.7209859", "0.7201162", "0.71773523", "0.7172384", "0.7135985", "0.7022579", "0.7018331", "0.700701", "0.69525987", "0.69326574", "0.6926339", "0.6913601", "0.6876563", "0.6874372", "0.6864877", "0.6839137", "0.6839137", "0.6784926", "0.6760381", "0.6742455", "0.67329025", "0.67206436", "0.67206436", "0.67168826", "0.6709444", "0.6706641", "0.6697455", "0.66920924", "0.6678753", "0.6672402", "0.666609", "0.6662978", "0.6629399", "0.65802604", "0.6535817", "0.65245104", "0.6496993", "0.6484024", "0.6471698", "0.64692247", "0.6467406", "0.6466365", "0.64479554", "0.644618", "0.64177006", "0.6399627", "0.63643837", "0.63584447", "0.63562375", "0.628235", "0.6256825", "0.6256663", "0.6253573", "0.62107795", "0.62098175", "0.62048656", "0.6196102", "0.619586", "0.61767143", "0.61741143", "0.6165393", "0.6152881", "0.6149448", "0.6137804", "0.6128387", "0.61022574", "0.60862076", "0.6079934", "0.6058182", "0.6047448", "0.6016098", "0.6003228", "0.5985675", "0.5980628", "0.59740525", "0.59680706", "0.5957454", "0.59567136", "0.5944685", "0.592756", "0.5925902", "0.5925466", "0.5920193", "0.5920021", "0.5909565", "0.5903335", "0.58848727", "0.5863322", "0.58368015", "0.58140427", "0.5811834", "0.5807986", "0.58010435", "0.5793975", "0.57872146", "0.57670707", "0.5753263" ]
0.7202597
4
Property giving the number of training samples for each key
def num_train_samples(self): if self._num_training_samples is None: for key, value in self._training_data.items(): self._num_training_samples[key] = len(value[0]) return self._num_training_samples
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def num_training_examples(self):", "def get_num_train_samples(self):\n raise NotImplementedError", "def __len__(self):\n if self.TRAIN_BOOL is True:\n count = len(self.dict_batch_1[b'data'])\n count += len(self.dict_batch_2[b'data'])\n count += len(self.dict_batch_3[b'data'])\n count += len(self.dict_batch_4[b'data'])\n count += len(self.dict_batch_5[b'data'])\n else:\n count = len(self.dict_batch_test[b'data'])\n return count", "def get_sample_size(self, key=None):\n if key is None:\n return len(self.Y)\n else:\n return len(self.get_partitions(self.persistence)[key])", "def sample_count(self):", "def num_test_samples(self):\n if self._num_test_samples is None:\n for key, value in self._test_data.items():\n self._num_test_samples[key] = len(value[0])\n return self._num_test_samples", "def _number_of_samples(self):\n return len(self._raw_data.samples)", "def __len__(self):\r\n return len(self.train_data)", "def num_train_instances(self):\n raise NotImplementedError()", "def n_train(self):\n return self.factors[0].shape[0]", "def get_number_of_features(key):\n sum = 0\n for name, module in common.QOL_PARAMS[key].items():\n sum += module.LENGTH\n\n return sum", "def __len__(self):\n return len(self.train) + len(self.val) + len(self.test)", "def get_number_of_training(self):\n return self.n_train", "def __len__(self):\n return len(self.dataset) * self.samples_per_pair", "def __len__(self):\n return self.n_samples", "def getNumberOfKeys(self) -> int:\n ...", "def num_samples(self):\n raise NotImplementedError()", "def test_train_data_length(self):\n total_count = 0\n for batch in self._dataset.get_train():\n total_count += len(batch['label'])\n\n self.assertEqual(total_count, self._dataset.get_train_len())", "def __len__(self):\n return self._num_samples", "def __len__(self):\n return self.data.num_samples", "def __len__(self):\n return self.__n_samples", "def __len__(self):\n if self.settype == \"train\":\n return 64000\n else:\n return len(self.list_ids)", "def num_examples(self):\r\n raise NotImplementedError", "def __len__(self):\n return len(self.samples)", "def __len__(self) -> int:\n return len(self.samples)", "def __len__(self):\n return len(self.samples)", "def __len__(self):\n return len(self.samples)", "def training_set_count(self) -> int:\n return pulumi.get(self, \"training_set_count\")", "def __len__(self):\n if self._train:\n return len(self._train_data)\n return len(self._test_data)", "def __len__(self):\n if self._train:\n return len(self._train_data)\n return len(self._test_data)", "def __len__(self):\n key = list(self.keys())[0]\n feature = self[key]\n return len(feature)", "def get_train_data_size(self):\n return len(self.pipeline.data['train'])", "def test_len_trainset(self):\n self.assertEqual(self.__dataset.get_train_len, 10000)", "def __len__(self):\n return 1 + len(self.features)", "def getNrSamples(self): \r\n return self.numSamples", "def get_num_samples(self):\n return self._num_samples", "def __len__(self):\n return len(self.examples)", "def size(self):\r\n return len(self._train_datas)", "def __len__(self):\n return len(self.list_sample)", "def get_num_samples(self) -> int:\n # must be implemented in subclass\n raise NotImplementedError", "def getNumberOfKeys(self, attr, view) -> int:\n ...", "def __len__(self):\n return math.ceil(len(self._sampler) / self._batch_size)", "def batch_size(self) -> int:\n ...", "def getNumberOfScaleKeys(self, view) -> int:\n ...", "def key_size(self) -> int:\n pass", "def key_size(self) -> int:\n pass", "def count_samples(self):\n return sum(SEQ_LENGTHS)", "def nkeytexts(self):\n return len(self.__keytexts)", "def nkeytexts(self):\n return len(self.__keytexts)", "def test_test_data_length(self):\n total_count = 0\n for batch in self._dataset.get_test():\n total_count += len(batch['label'])\n\n self.assertEqual(total_count, self._dataset.get_test_len())", "def samples(self) -> int:\n return self._samples", "def count(self, key):\n self._metrics[key] += 1", "def get_n_samples(self):\n return {op.rx.get_n_samples() for op in self.ops}", "def train(self, num_batches: int):", "def num_samples(self):\n if self.f is None:\n raise AssertionError(\"Please call setup_read first.\")\n\n if self.image_key not in self.f:\n raise AssertionError(\"Key %s not found in database. Check your image key\" % self.image_key)\n\n if self.label_key not in self.f:\n raise AssertionError(\"Key %s not found in database. Check your label key\" % self.label_key)\n\n if self.f[self.label_key].shape[0] != self.f[self.image_key].shape[0]:\n raise AssertionError(\n \"The number of elements in the images blob does not match the number of elements in the labels blob.\")\n\n return self.f[self.image_key].shape[0]", "def count_data_items(fileids, train=True):\n sizes = 28000 if train else 22500\n return len(fileids) * sizes", "def num_trials(self):", "def num_keys(self):\n return len(self.counter.keys())", "def num_keys(self):\r\n return len(self.keys)", "def GetTrainSampleCount(self) :\r\n\t\ttry :\r\n\t\t\tself.DB_Cursor.execute(self.SQLCMDs['SampleSetCount'],(0,))\r\n\t\t\tCurSampleCount = self.DB_Cursor.fetchone()[0]\r\n\t\texcept Exception as detail:\r\n\t\t\tlogging.error(\"Failed to get count of training samples in database: %s\"%detail)\r\n\t\treturn CurSampleCount", "def size(self, key):\n _id, feature = self._extract(key)\n return self.client.sound_feature_size(_id, feature)", "def train_size(self) -> int:\n return int(self.data_size * self.__train_fraction)", "def dataset_length(data_loader):\n sample = next(iter(data_loader))\n batch_size = None\n\n if isinstance(sample, dict):\n try:\n if isinstance(sample[\"label\"], torch.Tensor):\n batch_size = sample[\"label\"].shape[0]\n else:\n # in case of sequence of inputs use first input\n batch_size = sample[\"label\"][0].shape[0]\n except:\n KeyError(\"Expects key to be 'label'.\")\n else:\n if isinstance(sample[1], torch.Tensor):\n batch_size = sample[1].shape[0]\n else:\n # in case of sequence of inputs use first input\n batch_size = sample[1][0].shape[0]\n return len(data_loader) * batch_size", "def get_number_samples(self):\n return self.samples.shape[0]", "def train(self, corpus): \n # TODO your code here\n \n for sentence in corpus.corpus:\n for i,dotum in enumerate(sentence.data[1:]):\n self.vocab[dotum.word][sentence.data[i].word] +=1\n self.word_counts[sentence.data[i].word] +=1\n self.total +=1\n self.v = len(self.vocab.keys())", "def __init__(self):\n self.num_counts = {}", "def __len__(self):\n return int(np.ceil(len(self.ids) / self.batch_size))", "def __len__(self):\n return self.nb_iterations", "def generate_samples(self, n_samples):", "def generate_samples(self, n_samples):", "def __len__(self):\n gen_len = len(self.image_ids) // self.batch_size\n if len(self.image_ids) % self.batch_size != 0:\n gen_len += 1\n return gen_len", "def getSampleCount(self):\r\n return len(self._data)", "def num_eval_instances(self):\n return self.num_train_instances // 4", "def num_inducing(self) -> tf.Tensor:\n raise NotImplementedError", "def __len__(self):\n if self.batch_size == 1:\n return len(self.index_list)\n else:\n return max(1, len(self.index_list)//self.batch_size)", "def __len__(self):\n return int(np.floor(len(self.ids) / self.batch_size))", "def num_samples(self):\n return self._dist_samples", "def __len__(self):\n return len(self.batches)", "def __len__(self):\n return int(np.floor(len(self.list_ids) / self.batch_size))", "def vocab_size(self):\n return self._vocab_size", "def __len__(self):\n return self._num_samples_per_file * len(self._files) // self._world_size", "def __len__(self):\n return len(self.indexes) // self.batch_size", "def __len__(self):\n return len(self.indexes) // self.batch_size", "def train(self, corpus): \n for sentence in corpus.corpus:\n prev_word = None\n for datum in sentence.data:\n word = datum.word\n self.unigram_count[word] += 1\n if prev_word != None:\n self.bigram_count[prev_word][word] += 1\n prev_word = word\n \n self.vocabulary_size = len(self.unigram_count)\n self.num_words = sum(self.unigram_count.values())", "def __len__(self):\n return sum(self.size_freqs.values())", "def n_samples(self) -> int: # pragma: no cover\n return self.samples.shape[0]", "def sample_count(self):\n assert len(self.decay_x) == len(self.decay_y)\n return len(self.decay_x)", "def test_num_trainable_params():\n model = micronet.cifar.linear_model.create_model()\n assert test.util.count_trainable_params(model) \\\n == cifar_linear_model.NUM_TRAINABLE_PARAM\n # Just for sanity sake, so that I know the true value and if it changes:\n assert cifar_linear_model.NUM_TRAINABLE_PARAM == 172900", "def getNumberOfSkewXKeys(self, view) -> int:\n ...", "def count(self, val):\n raise ValueError('cannot set \\'count\\' in class KeyTracker')", "def __setKeyCount(self,\n key,\n count):\n self.__keyCount[key] = count\n return self.__keyCount[key]", "def train(self, corpus):\n for sentence in corpus.corpus:\n for datum in sentence.data: \n self.unigramCounts[datum.word] += 1\n self.totalCount += 1", "def __len__(self):\r\n return int(np.floor(len(self.list_IDs) / self.batch_size))", "def test_data_counts(self):\n model = PoincareModel(self.data)\n self.assertEqual(len(model.all_relations), 5)\n self.assertEqual(len(model.node_relations[model.kv.vocab['kangaroo.n.01'].index]), 3)\n self.assertEqual(len(model.kv.vocab), 7)\n self.assertTrue('mammal.n.01' not in model.node_relations)", "def predict(self, key):\n return self.counts.get(key, 1.0)", "def __len__(self) -> int:\n batch_sampler = cast(BatchSampler, self.batch_sampler)\n return len(batch_sampler)", "def num_examples_per_epoch(self):\n\t\tif self.subset == 'train':\n\t\t\treturn 50000\n\t\tif self.subset == 'validation':\n\t\t\treturn 10000", "def test_create_count_key(self):\n s = SimulationStats()\n\n # new count key added\n s.create_count_key(count_key=0)\n assert s.results_dict == {0: {\n 'net winnings': 0,\n 'number of rounds': 0,\n 'number of split hands': 0,\n 'overall bet': 0\n }}\n\n # count key already exists\n s.create_count_key(count_key=0)\n assert s.results_dict == {0: {\n 'net winnings': 0,\n 'number of rounds': 0,\n 'number of split hands': 0,\n 'overall bet': 0\n }}", "def num_samples(self):\n return self._ll_tree_sequence.get_num_samples()", "def __len__(self):\n return len(self.features)" ]
[ "0.76472265", "0.72392845", "0.7154465", "0.71007866", "0.6989497", "0.6887514", "0.68653405", "0.6852151", "0.6844715", "0.680676", "0.6782863", "0.67534184", "0.6745939", "0.6720424", "0.670524", "0.66739583", "0.66528374", "0.66478294", "0.66011435", "0.659892", "0.6594394", "0.65546584", "0.65115505", "0.6476304", "0.64750504", "0.6449615", "0.6449615", "0.6442748", "0.6419965", "0.6419965", "0.6401204", "0.6366133", "0.6348914", "0.6348608", "0.63311607", "0.6328858", "0.62824446", "0.62781596", "0.62449265", "0.62305397", "0.62280715", "0.61906224", "0.6183281", "0.6179984", "0.6173985", "0.6173985", "0.6148154", "0.614735", "0.614735", "0.6124018", "0.61192214", "0.61190104", "0.60998106", "0.60825115", "0.60481364", "0.6042652", "0.6016405", "0.6010234", "0.6007267", "0.59996426", "0.5980463", "0.5979106", "0.5960893", "0.5958308", "0.5948541", "0.5945548", "0.59415996", "0.593789", "0.59335214", "0.59335214", "0.5930354", "0.59242815", "0.59233725", "0.59177685", "0.59157276", "0.591164", "0.591091", "0.5910455", "0.59096146", "0.5903265", "0.5898961", "0.58964914", "0.58964914", "0.5895059", "0.5891847", "0.58851373", "0.5879787", "0.587666", "0.58751947", "0.58713084", "0.58627486", "0.585983", "0.58567184", "0.58565015", "0.5853893", "0.5850602", "0.5848952", "0.5833041", "0.5822394", "0.58198255" ]
0.7513668
1
Property giving the number of test samples
def num_test_samples(self): if self._num_test_samples is None: for key, value in self._test_data.items(): self._num_test_samples[key] = len(value[0]) return self._num_test_samples
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _number_of_samples(self):\n return len(self._raw_data.samples)", "def setTestSampleSize(self, Ntest):\n self.Ntest = Ntest", "def getNrSamples(self): \r\n return self.numSamples", "def num_training_examples(self):", "def sample_count(self):", "def get_number_of_testing(self):\n return self.n_test", "def n_test(self):\n return self.factors[1].shape[0]", "def num_samples(self):\n raise NotImplementedError()", "def __len__(self):\n return self.n_samples", "def num_examples(self):\r\n raise NotImplementedError", "def num_trials(self):", "def get_num_samples(self):\n return self._num_samples", "def __len__(self):\n return self.__n_samples", "def test_test_data_length(self):\n total_count = 0\n for batch in self._dataset.get_test():\n total_count += len(batch['label'])\n\n self.assertEqual(total_count, self._dataset.get_test_len())", "def __len__(self):\n return self._num_samples", "def get_num_train_samples(self):\n raise NotImplementedError", "def test_getSampleCount(self):\r\n self.assertEqual(self.estimator1.getSampleCount(), 1)", "def __len__(self):\n return self.data.num_samples", "def test_len_testset(self):\n self.assertEqual(self.__dataset.get_test_len, 1000)", "def get_num_samples(self) -> int:\n # must be implemented in subclass\n raise NotImplementedError", "def __len__(self) -> int:\n return len(self.samples)", "def num_train_samples(self):\n if self._num_training_samples is None:\n for key, value in self._training_data.items():\n self._num_training_samples[key] = len(value[0])\n return self._num_training_samples", "def test_len_trainset(self):\n self.assertEqual(self.__dataset.get_train_len, 10000)", "def samples(self) -> int:\n return self._samples", "def __len__(self):\n return len(self.samples)", "def __len__(self):\n return len(self.samples)", "def __len__(self):\n return len(self.samples)", "def test_size(self) -> int:\n return int(self.data_size * self.__test_fraction)", "def __len__(self):\n return len(self.list_sample)", "def n_samples(self) -> int: # pragma: no cover\n return self.samples.shape[0]", "def __len__(self):\n return len(self.train) + len(self.val) + len(self.test)", "def __len__(self):\n return len(self.examples)", "def get_number_samples(self):\n return self.samples.shape[0]", "def getSampleCount(self):\r\n return len(self._data)", "def count_samples(self):\n return sum(SEQ_LENGTHS)", "def test_train_data_length(self):\n total_count = 0\n for batch in self._dataset.get_train():\n total_count += len(batch['label'])\n\n self.assertEqual(total_count, self._dataset.get_train_len())", "def __len__(self):\n if self._train:\n return len(self._train_data)\n return len(self._test_data)", "def __len__(self):\n if self._train:\n return len(self._train_data)\n return len(self._test_data)", "def test_getSampleCount(self):\r\n self.assertEqual(self.res1.getSampleCount(), 0)\r\n\r\n self.res1.addSample('S1', 42)\r\n self.assertEqual(self.res1.getSampleCount(), 1)\r\n\r\n self.res1.addSample('S2', 43)\r\n self.assertEqual(self.res1.getSampleCount(), 2)", "def set_number_of_samples(self, N):\n\n self.numSamples = N", "def n_train(self):\n return self.factors[0].shape[0]", "def num_samples(self):\n return self._ll_tree_sequence.get_num_samples()", "def test_set_count(self) -> int:\n return pulumi.get(self, \"test_set_count\")", "def numberTests(self):\n for i, test in enumerate(self._tests):\n test.number = i + 1\n test.info.cs_test_num = test.number", "def generate_samples(self, n_samples):", "def generate_samples(self, n_samples):", "def has_more_samples(self):\n return True", "def has_more_samples(self):\n return True", "def has_more_samples(self):\n return True", "def length(self) -> int:\n return len(self.__samples)", "def __len__(self):\n return len(self.dataset) * self.samples_per_pair", "def __len__(self):\r\n return len(self.train_data)", "def test_n_features_property():\n atom = ATOMClassifier(X_bin, y_bin, random_state=1)\n atom.run(\"LR\")\n assert atom.lr.n_features == atom.n_features", "def test_max_number_of_records(self):\n self._config['Number of examples'] = '2'\n result = self._gen.generate(\n example=self._example,\n model=self._model,\n dataset=self._dataset,\n config=self._config)\n self.assertLen(result, 2)", "def num_samples(self):\n return self._dist_samples", "def get_test_size(self):\n return self.test_size", "def get_output_samples_number_multiplier(self):\n return 1", "def getNumExamples(self):\n return self.__numExamples", "def __len__(self):\n if self.TRAIN_BOOL is True:\n count = len(self.dict_batch_1[b'data'])\n count += len(self.dict_batch_2[b'data'])\n count += len(self.dict_batch_3[b'data'])\n count += len(self.dict_batch_4[b'data'])\n count += len(self.dict_batch_5[b'data'])\n else:\n count = len(self.dict_batch_test[b'data'])\n return count", "def Numtrials(self):\n\t\treturn self._get_attribute('numtrials')", "def NbSamples(self, *args):\n return _Adaptor3d.Adaptor3d_TopolTool_NbSamples(self, *args)", "def get_valid_data_size(self):\n return len(self.pipeline.data['test'])", "def get_number_of_training(self):\n return self.n_train", "def test_generate_nb_testing(self):\n pass", "def _set_number_of_subsamples(self, number_of_subsamples):\n self._number_of_subsamples = number_of_subsamples\n self._compute_down_sample_factor()", "def samples(self):\n pass", "def test_count(self):\n self._test_count_func(count)", "def test_max_samples(self):\n assert setup.setup_component(\n self.opp,\n \"binary_sensor\",\n {\n \"binary_sensor\": {\n \"platform\": \"trend\",\n \"sensors\": {\n \"test_trend_sensor\": {\n \"entity_id\": \"sensor.test_state\",\n \"max_samples\": 3,\n \"min_gradient\": -1,\n }\n },\n }\n },\n )\n self.opp.block_till_done()\n\n for val in [0, 1, 2, 3, 2, 1]:\n self.opp.states.set(\"sensor.test_state\", val)\n self.opp.block_till_done()\n\n state = self.opp.states.get(\"binary_sensor.test_trend_sensor\")\n assert state.state == \"on\"\n assert state.attributes[\"sample_count\"] == 3", "def __len__(self):\n return math.ceil(len(self._sampler) / self._batch_size)", "def random_test(self):\r\n return 1", "def random_test(self):\r\n return 1", "def num_samples(self, u=None):\n u = self.virtual_root if u is None else u\n return self._ll_tree.get_num_samples(u)", "def setUp(self):\n self.samples = 5\n self.otus = 10\n seed(0) # this will seed numpy prng at 0 before each test", "def num_examples_per_epoch(self):\n\t\tif self.subset == 'train':\n\t\t\treturn 50000\n\t\tif self.subset == 'validation':\n\t\t\treturn 10000", "def test_properties_count_get(self):\n pass", "def test_default_num_products(self):\n test_list = generate_products()\n self.assertEqual(len(test_list), 30, msg=\"Length is Bad\")", "def total_test_batches(self) -> int:\n return sum(self.trainer.num_test_batches)", "def __len__(self):\n return self._num_samples_per_file * len(self._files) // self._world_size", "def test_count(self):\n return len(self.tests) + sum(suite.test_count for suite in self.suites)", "def num_streams(self):\n self._num_streams = self.lib.iperf_get_test_num_streams(self._test)\n return self._num_streams", "def test_default_num_products(self):\n self.assertEqual(len(generate_products()), 30)", "def test_temperature_count(self):\n self.assertEqual(self.Tcount, 4)", "def num_train_instances(self):\n raise NotImplementedError()", "def __len__(self):\n return 1 + len(self.features)", "def count(self,value = 1):\n n = 0\n for s in self.sample:\n if s == value:\n n += 1\n return n", "def get_sample_size(self, key=None):\n if key is None:\n return len(self.Y)\n else:\n return len(self.get_partitions(self.persistence)[key])", "def test_default_num_products(self):\r\n lst = generate_products()\r\n self.assertEqual(len(lst), 30)", "def test_default_num_products(self):\n gen_prods = generate_products()\n self.assertEqual(len(gen_prods), 30)", "def test_default_num_products(self):\r\n prod = generate_products()\r\n self.assertEqual(len(prod), 30)", "def test_pressure_count(self):\n self.assertEqual(self.Pcount, 7)", "def sample_count(self):\n assert len(self.decay_x) == len(self.decay_y)\n return len(self.decay_x)", "def number_of_sample_loops(self) -> int:\n return self.__number_of_sample_loops", "def n_rounds(self) -> int:\n return self.y.shape[0]", "def test_default_num_products(self):\n products = generate_products()\n self.assertEqual(len(products), 30)", "def n_remaining_samples(self):\n return -1", "def n_remaining_samples(self):\n return -1", "def n_remaining_samples(self):\n return -1", "def __len__(self):\n return self.nb_iterations", "def get_test_amount(self):\n\n return len(self.__test_set_list)", "def test_default_num_products(self):\n product_list = generate_products()\n self.assertEqual(len(product_list), 30)" ]
[ "0.76248336", "0.7584526", "0.7412485", "0.74109805", "0.7401342", "0.7390789", "0.72752166", "0.726213", "0.7233075", "0.7224675", "0.71610683", "0.71362716", "0.71249604", "0.70941573", "0.7070653", "0.704998", "0.70279443", "0.7010169", "0.6996375", "0.6985382", "0.6942792", "0.68967336", "0.6891787", "0.6871163", "0.6865328", "0.6865328", "0.6835634", "0.6789015", "0.6767874", "0.6764788", "0.6763059", "0.67473024", "0.66823965", "0.666752", "0.6642121", "0.6603201", "0.66018933", "0.66018933", "0.6590338", "0.65739197", "0.65726894", "0.6569026", "0.6565638", "0.6549449", "0.65213674", "0.65213674", "0.6504146", "0.6504146", "0.6504146", "0.6502791", "0.6496819", "0.6482784", "0.64712465", "0.6451257", "0.64393693", "0.6435268", "0.6422337", "0.6385122", "0.6372923", "0.6370428", "0.63669837", "0.6346253", "0.63413256", "0.63361734", "0.63114667", "0.63057387", "0.6301759", "0.6275401", "0.6266827", "0.6262966", "0.6262966", "0.62595713", "0.62422925", "0.62411404", "0.6235463", "0.62256545", "0.6223998", "0.6185689", "0.61830294", "0.61821324", "0.61739284", "0.6158552", "0.6158054", "0.6149974", "0.614643", "0.6136792", "0.61354005", "0.6121143", "0.61179256", "0.61153585", "0.61118907", "0.61094195", "0.6103456", "0.60845995", "0.60785115", "0.60785115", "0.60785115", "0.6065765", "0.6063517", "0.604798" ]
0.8134777
0
Parse nonchimeric alignments with walkspolicy mask with pysam backend.
def test_mock_pysam(): mock_sam_path = os.path.join(testdir, "data", "mock.sam") mock_chroms_path = os.path.join(testdir, "data", "mock.chrom.sizes") try: result = subprocess.check_output( [ "python", "-m", "pairtools", "parse", "--walks-policy", "mask", "-c", mock_chroms_path, mock_sam_path, ], ).decode("ascii") except subprocess.CalledProcessError as e: print(e.output) print(sys.exc_info()) raise e # check if the header got transferred correctly sam_header = [l.strip() for l in open(mock_sam_path, "r") if l.startswith("@")] pairsam_header = [l.strip() for l in result.split("\n") if l.startswith("#")] for l in sam_header: assert any([l in l2 for l2 in pairsam_header]) # check that the pairs got assigned properly for l in result.split("\n"): if l.startswith("#") or not l: continue print(l) assigned_pair = l.split("\t")[1:8] simulated_pair = l.split("CT:Z:SIMULATED:", 1)[1].split("\031", 1)[0].split(",") print(assigned_pair) print(simulated_pair) print() assert assigned_pair == simulated_pair
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def recover_para_segmentations(aligned_data, gt_paras_sents, human_translator_data):\n sents_covered = 0\n outputs = \"\"\n for p1, s1, src in zip(aligned_data[\"gt_paras\"], gt_paras_sents, aligned_data[\"source_paras\"]):\n num_sents = len(s1)\n outputs += f\"<b>Source</> = {src}\\n<b>Google Translate</> = {p1}\\n\"\n\n for translator, htd in human_translator_data.items():\n s2_sent_idx = []\n sent_aligns = extract_match(htd[\"match_matrix\"], sents_covered, sents_covered + num_sents, readable=True)\n for salign in sent_aligns:\n s2_sent_idx.extend(salign['trans_idx'])\n s2_sent_idx = list(set(s2_sent_idx))\n s2_sent_idx.sort()\n\n p2_alignment = \" \".join([htd['all_sents'][x] for x in s2_sent_idx])\n outputs += f\"<b>{translator}</> = {p2_alignment}\\n\"\n\n aligned_data[\"translator_data\"][translator][\"translator_paras\"].append(p2_alignment)\n aligned_data[\"translator_data\"][translator][\"sent_alignments\"].append(sent_aligns)\n sents_covered += num_sents\n outputs += \"\\n\\n\"\n return aligned_data, outputs", "def parse_sam(in_file, out_file, read_type , strand):\n out_handle = open(out_file , 'a')\n if strand == 'watson':\n nt = ['C']\n else:\n nt = ['G']\n count = 0\n # print 'Warning, only works for forward mapped reads'\n mismatch = 0\n clip_count_total = 0\n for line in open(in_file, 'r'):\n modulo_line_no = count % 2\n #alternates between 0 and 1\n if line.startswith('@'):\n continue\n split_line = line.rstrip('\\n').split('\\t')\n #skip read pairs with improper flags.\n #TODO: do this filtering in mark_PCR_duplicates or elsewhere with access to pysam.\n if split_line[1] not in ['0', '99', '147']:\n mismatch += 1\n count += 1\n # continue\n char_count = ''\n clip_count = 0\n for char in split_line[5]:\n if not char.isalpha():\n char_count += char\n elif char == 'S':\n clip_count += int(char_count)\n else:\n char_count = ''\n if clip_count > 6:\n clip_count_total += 1\n count += 1\n # continue\n header = split_line[0].split('|')\n #meth_post list can be present for both R1 and R2 the last Samtools tag added should be the RN:Z: tag, look\n #to the right of this tag only\n meth_pos_list = split_line[0][split_line[0].rindex(':Z:'):].split('|')[1:]\n out_line = [header[0]]\n out_line += split_line[1:9]\n seq = list(split_line[9])\n try:\n meth_pos = [int(n) for n in meth_pos_list[-modulo_line_no].split(',')]\n for n in meth_pos:\n if n >= len(seq):\n break\n if seq[n] not in ['T','A']:\n break\n seq[n] = nt[-modulo_line_no]\n except ValueError:\n pass\n out_line += [''.join(seq)]\n out_line += split_line[10:]\n for item in header[1:]:\n if ':' in item and item not in out_line:\n out_line.append(item)\n # out_line += header[3:6]\n out_handle.write('\\t'.join(out_line) + '\\n')\n count += 1\n print('%s mismatches out of %s' % (mismatch, count))\n print('%s reads out of %s soft clipped more than 5' % (clip_count_total, count))", "def pssmwalk(motif, sequence, pos, inputformat):\n\n\tif inputformat == \"numpy\": # accepts both numpy PSSM matrices or Bio.motifs PFMs as input. both are then stored as a numpy array\n\t\tpssm = motif\n\n\telif inputformat == \"pfm\":\n\t\tpssm = np.array(pd.DataFrame(motif)).transpose()\n\n\t# make a buffer array of 0.25 probability accross the board. This will flank the real PWM and both sides. This will allow the \n\t# alignment of the PSSM a little before and after the CORE sequence, giving flexibility to the process without giving these buffer \n\t# sequences a strong probability to be the best alignment position. a log-odds of -1 is unprobable, without being 0. \n\tmat = np.ones((4, 20)) * -1 \n\n\titerpssm = np.concatenate((mat, pssm, mat), axis=1) \n\n\talphapos = 0\n\talphascore = -1000\t\t\n\tfor i in xrange(pos,len(iterpssm.transpose())-pos):\n\t\ttry:\n\t\t\tbetapos = i \n\t\t\tbetascore = np.sum(sequence * iterpssm[:,betapos:(betapos + sequence.shape[1])])\n\n\t\t\tif betascore > alphascore:\n\t\t\t\talphascore = betascore\n\t\t\t\talphapos = betapos\n\t\texcept ValueError:\n\t\t\tpass\n\tprox = np.zeros((4,alphapos))\n\tdist = np.zeros((4,(iterpssm.shape[1]-(sequence.shape[1]+alphapos))))\n\tcoreiter = np.transpose(np.concatenate((prox, sequence, dist), axis=1))\n\n\n\t\n\treturn [alphascore,alphapos,iterpssm,coreiter]", "def parse_a_stanza(self):\n\t\t# 's' line -- score, 1 field\n\t\tline = self.fetch_line(report=\" in a-stanza\")\n\t\tfields = line.split()\n\t\tassert (fields[0] == \"s\"), \"s line expected in a-stanza (line %d, \\\"%s\\\")\" \\\n\t\t\t\t\t\t\t\t % (self.lineNumber,line)\n\t\ttry: score = int(fields[1])\n\t\texcept: score = float(fields[1])\n\n\t\t# 'b' line -- begin positions in seqs, 2 fields\n\t\tline = self.fetch_line(report=\" in a-stanza\")\n\t\tfields = line.split()\n\t\tassert (fields[0] == \"b\"), \"b line expected in a-stanza (line %d, \\\"%s\\\")\" \\\n\t\t\t\t\t\t\t\t % (self.lineNumber,line)\n\t\tbeg1 = int(fields[1]) - 1\n\t\tbeg2 = int(fields[2]) - 1\n\n\t\t# 'e' line -- end positions in seqs, 2 fields\n\t\tline = self.fetch_line(report=\" in a-stanza\")\n\t\tfields = line.split()\n\t\tassert (fields[0] == \"e\"), \"e line expected in a-stanza (line %d, \\\"%s\\\")\" \\\n\t\t\t\t\t\t\t\t % (self.lineNumber,line)\n\t\tlen1 = int(fields[1]) - beg1\n\t\tlen2 = int(fields[2]) - beg2\n\n\t\t# 'l' lines\n\t\tpieces = []\n\t\twhile (True):\n\t\t\tline = self.fetch_line(report=\" in a-stanza\")\n\t\t\tfields = line.split()\n\t\t\tif (fields[0] != \"l\"):\n\t\t\t\tbreak\n\t\t\tstart1 = int(fields[1]) - 1\n\t\t\tstart2 = int(fields[2]) - 1\n\t\t\tlength = int(fields[3]) - start1\n\t\t\tlength2 = int(fields[4]) - start2\n\t\t\ttry: pctId = int(fields[5])\n\t\t\texcept: pctId = float(fields[5])\n\t\t\tassert (length2 == length), \"length mismatch in a-stanza\"\n\t\t\tpieces.append((start1+self.seq1_start,start2+self.seq2_start,length,pctId))\n\t\tassert (line == \"}\"), \"improper a-stanza terminator (line %d, \\\"%s\\\")\" \\\n\t\t\t\t\t\t\t% (self.lineNumber,line)\n\t\treturn (score,pieces)", "def __init__(self,\n seq,\n aligned_index,\n unaligned_index):\n \n self.seq=seq\n self.aligned_index=aligned_index\n self.unaligned_index=unaligned_index\n self.numeric_seq=convert_to_numeric(self.seq)\n self.upstream_regions=[]\n self.downstream_regions=[]\n self.labels=[]\n self.match_count=0\n self.percent_match=0\n self.non_specific_hits=0\n self.non_specific_percent=0\n \n self.std_index = False\n self.f_std_index = None\n self.r_std_index = None", "def pslMap( options ):\n\n if options.format == \"gtf\":\n use_copy = False\n else:\n use_copy = True\n\n ninput, noutput, ndiscarded, nskipped, nskipped_small_queries = 0, 0, 0, 0, 0\n\n min_length = options.min_aligned\n\n for match, qx, tx in iterator_psl_intervals( options ):\n\n map_query2target = match.getMapQuery2Target()\n\n ninput += 1\n\n ## if no filter on qx or tx, use full segment\n if qx == None:\n qx = [ (match.mQueryFrom,match.mQueryTo,0) ]\n elif tx == None:\n tx = [ (match.mSbjctFrom,match.mSbjctTo,0) ]\n\n ## if no overlap: return\n if not qx or not tx: \n nskipped += 1\n continue\n\n for query in qx:\n\n qstart, qend, qval = query\n\n # skip elements that are too small\n if qend - qstart < min_length: \n E.debug( \"query too small - skipped at %s:%i-%i\" % (match.mQueryId, qstart, qend) )\n nskipped_small_queries += 1\n continue\n\n E.debug( \"working on query %s:%i-%i\" % (match.mQueryId, qstart, qend) )\n\n mqstart, mqend = ( map_query2target.mapRowToCol(qstart, \n alignlib_lite.py_RIGHT), \n map_query2target.mapRowToCol(qend, \n alignlib_lite.py_LEFT) )\n \n \n if match.strand == \"-\":\n qstart, qend = match.mQueryLength - qend, match.mQueryLength - qstart\n\n for target in tx:\n\n tstart, tend, tval = target\n if tstart >= mqend or tend <= mqstart: continue\n if tend - tstart < min_length: continue\n\n new = alignlib_lite.py_makeAlignmentBlocks()\n \n if use_copy:\n # do copy with range filter\n if options.loglevel >= 3:\n\n mtstart, mtend = map_query2target.mapColToRow(tstart), map_query2target.mapColToRow(tend) \n E.debug( \"query: %i-%i (len=%i)-> %i-%i(len=%i); target: %i-%i (len=%i)-> %i-%i (len=%i)\" % \\\n (qstart, qend,\n qend - qstart,\n mqstart, mqend,\n mqend - mqstart,\n tstart, tend,\n tend - tstart,\n mtstart, mtend,\n mtend - mtstart ) )\n \n alignlib_lite.py_copyAlignment( \n new, \n map_query2target,\n qstart, qend,\n tstart, tend )\n else:\n # do copy with alignment filter\n map_query = qval\n if map_query:\n tmp = alignlib_lite.py_makeAlignmentBlocks() \n alignlib_lite.py_copyAlignment( tmp, map_query2target, map_query, alignlib_lite.py_RR )\n if options.loglevel >= 5:\n options.stdlog.write( \"######## mapping query ###########\\n\" )\n options.stdlog.write( \"# %s\\n\" % str(alignlib_lite.py_AlignmentFormatEmissions( map_query2target ) ))\n options.stdlog.write( \"# %s\\n\" % str(alignlib_lite.py_AlignmentFormatEmissions( map_query ) ))\n options.stdlog.write( \"# %s\\n\" % str(alignlib_lite.py_AlignmentFormatEmissions( tmp ) ))\n else:\n tmp = map_query2target\n \n map_target = tval\n if map_target:\n new = alignlib_lite.py_makeAlignmentBlocks()\n alignlib_lite.py_copyAlignment( new, tmp, map_target, alignlib_lite.py_CR ) \n if options.loglevel >= 5:\n options.stdlog.write( \"######## mapping target ###########\\n\" )\n options.stdlog.write( \"# before: %s\\n\" % str(alignlib_lite.py_AlignmentFormatEmissions( tmp ) ))\n options.stdlog.write( \"# map : %s\\n\" % str(alignlib_lite.py_AlignmentFormatEmissions( map_target ) ))\n options.stdlog.write( \"# after : %s\\n\" % str(alignlib_lite.py_AlignmentFormatEmissions( new ) ))\n else:\n new = tmp\n\n if options.loglevel >= 4:\n E.debug(\"putative match with intervals: %s and %s: %i-%i\" % \\\n (str(query), str(target), qstart, qend ))\n if options.loglevel >= 5:\n E.debug( \"input : %s\" % str(alignlib_lite.py_AlignmentFormatEmissions( map_query2target ) ))\n E.debug( \"final : %s\" % str(alignlib_lite.py_AlignmentFormatEmissions( new ) ) )\n\n if new.getLength() > 0:\n n = match.copy()\n n.fromMap( new, use_strand = True )\n E.info( \"match : %s\" % (str(n)))\n\n if new.getNumAligned() > options.min_aligned:\n n = match.copy()\n n.fromMap( new, use_strand = True )\n options.stdout.write( str(n) + \"\\n\" )\n noutput += 1\n else:\n ndiscarded += 1\n\n E.info( \"map: ninput=%i, noutput=%i, nskipped=%i, ndiscarded=%i, nsmall_queries=%i\" % \\\n (ninput, noutput, nskipped, ndiscarded, nskipped_small_queries) )", "def _preprocess_sgm(line, is_sgm):\n pass", "def readalign(self, opt, fh):\n## print \"entering readalign:\", opt\n edgeInfo = {}\n for p in opt:\n (key, value) = p.split('=')\n edgeInfo[key] = value\n\n s = fh.readline().split()\n## print s;\n if(len(s) == 7 and s[0] == 's'):\n vseq = self._vseq(len(s[6]))\n self.mAlign += vseq\n while len(s) == 7 and s[0] == 's':\n # Add the sequence name to the dictionary,\n # then add a corresponding node to the mapping.\n if s[1] not in self.sequences:\n self.sequences[s[1]] = AnonSequence(int(s[5]), s[1])\n self.mAlign += self.sequences[s[1]]\n\n # PROCESS THE KNOWN INTERVALS\n if(s[4] == '-'):\n ns = self.sequences[s[1]][-int(s[2]):-int(s[2]) - int(s[3])]\n self.sequences[s[1]].seqsplice(reverse_complement(\n s[6].replace('-', '')), ns.start, ns.stop)\n else:\n ns = self.sequences[s[1]][int(s[2]):int(s[2]) + int(s[3])]\n self.sequences[s[1]].seqsplice(s[6].replace('-', ''),\n ns.start, ns.stop)\n\n for inter in refIntervals(s[6]):\n self.mAlign[vseq[inter[0]:inter[1]]][ns[inter[2]:inter[3]]] = \\\n (inter[4])\n self.mAlign[ns[inter[2]:inter[3]]][vseq[inter[0]:inter[1]]] = \\\n (inter[4])\n\n s = fh.readline().split()", "def get_preprocessed_from_raw(sess_no, raw_path, align_on, from_time, to_time) :\n \n #params\n sess = '01'\n \n trial_length = abs(from_time - to_time)\n\n # Paths\n #raw_path = base_path + 'data/raw/' + sess_no + '/session' + sess + '/'\n rinfo_path = raw_path + 'recording_info.mat'\n tinfo_path = raw_path + 'trial_info.mat'\n\n # Define and loop over intervals\n \n srate = io.get_sfreq(rinfo_path) # = 1 000\n n_trials = io.get_number_of_trials(tinfo_path) \n last_trial = int(max(io.get_trial_ids(raw_path)))\n n_chans = io.get_number_of_channels(rinfo_path)\n channels = [ch for ch in range(n_chans)]\n\n # Pre-process data\n filtered = np.empty([n_trials,\n len(channels),\n int(trial_length * srate/1000)])\n\n trial_counter = 0; counter = 0\n while trial_counter < last_trial:\n n_zeros = 4-len(str(trial_counter+1))\n trial_str = '0' * n_zeros + str(trial_counter+1) # fills leading 0s\n if sess == '01' :\n file_in = sess_no + '01.' + trial_str + '.mat'\n else :\n file_in = sess_no + '02.' + trial_str + '.mat'\n \n if align_on == 'sample' : \n onset = io.get_sample_on(tinfo_path)[trial_counter].item()\n elif align_on == 'match' :\n onset = io.get_match_on(tinfo_path)[trial_counter].item()\n else :\n print(\"Petit problème avec align_on : 'sample' ou 'match' \")\n \n\n \n if np.isnan(onset): # drop trials for which there is no onset info\n print('No onset for ' + file_in)\n trial_counter += 1\n if trial_counter == last_trial:\n break\n else:\n counter += 1\n continue\n print(file_in)\n try:\n raw = io.get_data(raw_path + file_in)\n temp = pp.strip_data(raw,\n rinfo_path,\n onset,\n start=from_time,\n length=trial_length)\n \n if temp.shape[1] == trial_length: # drop trials shorter than length\n filtered[counter] = temp\n counter += 1\n except IOError:\n print('No file ' + file_in)\n trial_counter += 1\n\n # Return data\n\n filtered = np.array(filtered)\n return(filtered)", "def get_input_pattern():\n return '-palign'", "def alignment_stop():\n\n smi = SMI_Beamline()\n yield from smi.modeMeasurement()\n proposal_id('2023_2', '311564_Pettersson')", "def parse_alg_to_slice_moves(self, alg):\n temp_cube = Cube()\n alg_list = alg.split()\n rev_alg = reverse_alg(alg)\n final_alg = []\n temp_cube.solve_helper = alg\n center = temp_cube.current_perm(5)\n while alg_list:\n slice_move = None\n if len(alg_list) > 1:\n slice_move = temp_cube.check_slice(alg_list[0], alg_list[1])\n if slice_move:\n for m in slice_move.split():\n final_alg.append(m)\n alg_list.pop(0)\n else:\n final_alg.append(alg_list[0])\n alg_list.pop(0)\n alg_apply_rot = temp_cube.parse_rotation_from_alg(final_alg)\n final = []\n final_alg_str = \" \".join(alg_apply_rot)\n if final_alg_str.count('E') == 4:\n found = 0\n for i in range(len(alg_apply_rot)):\n if alg_apply_rot[i] == 'E' or alg_apply_rot[i] == \"E'\":\n found += 1\n if found == 1 or found == 4:\n if alg_apply_rot[i] == 'E':\n final.append(\"U\")\n final.append(\"D'\")\n final.append(\"y'\")\n if alg_apply_rot[i] == \"E'\":\n final.append(\"U'\")\n final.append(\"D\")\n final.append(\"y\")\n else:\n final.append(alg_apply_rot[i])\n else:\n final.append(alg_apply_rot[i])\n\n\n\n final_alg_str =\" \".join(temp_cube.parse_rotation_from_alg(final))\n check_orientation_cube = Cube()\n check_orientation_cube.solve = final_alg_str\n check_orientation_cube.currently_parsing_smart_cube = True\n\n fix = check_orientation_cube.fix_rotation()\n final_alg_str += \" \" + \" \".join(fix)\n return final_alg_str", "def prob_t_a_given_s(self, alignment_info):\n ...", "def parseGard(kh, aln, o, logger):\n\tlBP = []\n\tf = open(kh, \"r\")\n\tlLine = f.readline()\n\twhile lLine:\n\t if lLine.find(\"\\\"breakpoints\\\"\")!=-1:\n\t lLine = f.readline()\n\t lLine=lLine[lLine.find(\"[\")+1:lLine.find(\"]\")]\n\t lBP=list(map(int, lLine.split(\",\")))\n\t break\n\t lLine = f.readline()\n\tf.close()\n\tindex = 0\n\t\n\t#If there are breakpoints, add it in lBP\n\tif len(lBP) > 0:\n\t\tlogger.info(\"There are {:d} significant breakpoints in alignement {:s} at positions {}\".format(len(lBP), aln, lBP))\n\telse:\n\t\tlogger.info(\"There are no significant breakpoints in alignement {:s}.\".format(aln))\n\t\treturn []\n \n\t#If there're breakpoint(s), cut sequence in subsequences according to breakpoints\n\tif len(lBP) > 0:\n\t\tdFname2Fseq = {}\n\t\tfor fasta in SeqIO.parse(open(aln),'fasta'):\n\t\t\tdFname2Fseq[fasta.id] = str(fasta.seq)\n\t\t\n\t\t#Creation of a dico where atgc in sequence has been replace by 1 and - by 0\n\t\tlSeqBin = []\n\t\tlNameGene = []\n\t\tfor fastaSeq in dFname2Fseq:\n\t\t\tlSeqBin.append(dFname2Fseq[fastaSeq].lower().replace(\"a\", \"1\").replace(\"t\", \"1\").replace(\"c\", \"1\").replace(\"g\", \"1\").replace(\"-\", \"0\"))\n\t\t\tlNameGene.append(fastaSeq)\n\n\t\t#looking for a multiple of 3 (number of letter) (subsequence ends on or after the breakpoint)\n\t\tnbSeq = len(lNameGene)\n\t\tlenSeq = len(lSeqBin[0])\n\t\tlPos = [0]\n\t\tlBPprec = [0 for i in range(len(lSeqBin))]\n\t\tlFrag = []\n\t\tfor bp in lBP:\n\t\t\twhile bp%3 != 0:\n\t\t\t\tbp += 1\n\t\t\tlPos.append(bp)\n\t\t\tlFrag += [ dFname2Fseq[lNameGene[j]][lPos[-2]:lPos[-1]] for j in range(nbSeq) ]\n\t\t\n\t\t#Adding subsequences that start at the last breakpoint to the end\n\t\tlFrag += [dFname2Fseq[lNameGene[i]][lPos[-1]:] for i in range(nbSeq)]\n\n\t\tlBP = lPos+[lenSeq]\n\t\tlOutFrag = []\n\t\tindex = 0\n\t\tfor x in range(1,len(lBP)):\n\t\t\tdFrag = {}\n\t\t\tif lBP[x-1] == 0:\n\t\t\t\textension = \"_{:d}_{:d}\".format(lBP[x-1], lBP[x])\n\t\t\telse:\n\t\t\t\textension = \"_{:d}_{:d}\".format(lBP[x-1]-1, lBP[x])\n\n\t\t\toutFrag = o+aln.split(\"/\")[-1].split(\".\")[0]+\"_frag\"+extension+\".best.fas\"\n\t\t\tfor name in lNameGene:\n\t\t\t\tdFrag[name] = lFrag[index]\n\t\t\t\tindex += 1\n\t\t\twith open(outFrag, \"w\") as outF:\n\t\t\t outF.write(FastaResFunc.dict2fasta(dFrag))\n\t\t\t logger.info(\"\\tNew alignment: %s\"%{outFrag})\n\t\t\t outF.close()\n\t\t\t lOutFrag.append(outFrag)\n\n\t\treturn lOutFrag\n\telse:\n\t\treturn []", "def parse_fast_align_output(fast_align_output: str) -> List[Tuple[int, int]]: # List[List[int]]:\n input2output_alignment = []\n max_output_idx = -1\n for input in fast_align_output.split(\" \"):\n input_token, output_token = int(input.split(\"-\")[0]), int(input.split(\"-\")[1])\n max_output_idx = max(output_token, max_output_idx)\n input2output_alignment.append((input_token, output_token))\n\n # alignments_to_input = [[] for _ in range(max_output_idx + 1)]\n # for input_token, output_token in input_output_alignment:\n # alignments_to_input[output_token].append(input_token)\n\n return input2output_alignment", "def apply_lane_mask_and_gap_filter(fastalines, lane_mask,\\\n allowed_gap_frac=1-eps, verbose=False, entropy_threshold=None):\n\n if entropy_threshold:\n if entropy_threshold < 0 or entropy_threshold > 1:\n raise ValueError,('Entropy threshold parameter (-e) needs to be '+\\\n 'between 0 and 1')\n \n if lane_mask:\n # convert lane_mask to a numpy index array\n p = mask_to_positions(lane_mask)\n \n # special case: lanemask is all zeros\n if sum(p) == 0:\n for line in fastalines:\n if line.startswith(\">\"):\n yield line + '\\n'\n else:\n yield '\\n'\n return\n\n # random temporary file for first-pass results\n tmpfilename = \"/tmp/\"+\"\".join(sample(lowercase, 20)) + \".tmp\"\n try:\n tmpfile = open(tmpfilename,'w')\n except IOError:\n raise IOError, \"Can't open temporary file for writing: %s\" %\\\n tmpfilename\n\n # the number of gaps seen in each position (length may be unknown here)\n gapcounts = None\n\n # First pass: apply filter, and track gaps\n if verbose and lane_mask:\n print \"Applying lanemask...\"\n seq_count = 0\n for k, v in MinimalFastaParser(fastalines):\n seq_count += 1\n # print progress in verbose mode\n if verbose and (seq_count % 100) == 0: status(seq_count)\n\n # apply lanemask if there is one\n if lane_mask:\n masked = get_masked_string(v,p)\n else:\n masked = v.replace('.', '-')\n\n # initialize gapcount array to proper length\n if gapcounts == None:\n gapcounts = zeros(len(masked))\n\n # increment gap counts if requested\n if allowed_gap_frac < 1:\n gapcounts[find_gaps(masked)] += 1\n \n # write masked sequence to temporary file\n tmpfile.write('>%s\\n%s\\n' % (k, masked))\n if verbose: print; print\n tmpfile.close()\n tmpfile = open(tmpfilename,'U')\n \n # random temporary file for second-pass results\n tmpfilename_gaps = \"/tmp/\"+\"\".join(sample(lowercase, 20)) + \".tmp\"\n try:\n tmpfile_gaps = open(tmpfilename_gaps,'w')\n except IOError:\n raise IOError, \"Can't open temporary file for writing: %s\" %\\\n tmpfilename_gaps\n\n\n # if we're not removing gaps, we're done; yield the temp file contents\n if allowed_gap_frac == 1:\n for line in tmpfile:\n yield line\n \n # else we are removing gaps; do second pass\n else:\n\n # convert gapcounts to true/false mask\n gapcounts = (gapcounts / float(seq_count) ) <= allowed_gap_frac\n \n # Second pass: remove all-gap positions\n if verbose: print \"Remove all-gap positions...\"\n seq_count = 0\n for k, v in MinimalFastaParser(tmpfile):\n seq_count += 1\n # print progress in verbose mode\n if verbose and (seq_count % 100) == 0: status(seq_count)\n \n masked = get_masked_string(v.replace('.','-'),gapcounts)\n tmpfile_gaps.write('>%s\\n' % (k))\n tmpfile_gaps.write('%s\\n' % (masked))\n if verbose: print\n \n tmpfile_gaps.close()\n tmpfile_gaps = open(tmpfilename_gaps, \"U\")\n \n # If no dynamic entropy calculations, return current values\n if not entropy_threshold:\n for line in tmpfile_gaps:\n yield line\n # Otherwise, filter out positions of post-gap filtered sequences\n else:\n if verbose:\n print \"Generating lanemask...\"\n lane_mask = generate_lane_mask(tmpfile_gaps, entropy_threshold)\n tmpfile_gaps.close()\n tmpfile_gaps = open(tmpfilename_gaps, \"U\")\n if verbose:\n print \"Applying lanemask...\"\n \n p = mask_to_positions(lane_mask)\n \n seq_count = 0\n for k, v in MinimalFastaParser(tmpfile_gaps):\n seq_count += 1\n # print progress in verbose mode\n if verbose and (seq_count % 100) == 0: status(seq_count)\n\n masked = get_masked_string(v,p)\n \n yield(\">%s\\n%s\\n\" % (k, masked))\n \n if verbose:\n print\n \n # delete temporary files\n tmpfile.close()\n tmpfile_gaps.close()\n remove(tmpfilename)\n remove(tmpfilename_gaps)", "def init_aligner(allow_target_gaps=False, allow_target_mismatches=False):\n a = Align.PairwiseAligner()\n a.mismatch = -np.inf\n a.mismatch_score = -np.inf\n\n # Don't allow for gaps or mismatches with the target sequence\n if not allow_target_gaps:\n a.target_gap_score = -np.inf\n\n # Do not let matching items overwhelm determining where gaps should go\n if not allow_target_gaps:\n a.match = 10\n else:\n a.match = 200\n\n if allow_target_mismatches:\n a.mismatch = 200\n\n # Generally, prefer to extend gaps than to create them\n a.query_extend_gap_score = 99\n a.query_open_gap_score = 49\n\n # Set slight preference for open gaps on the edges, however, if present, strongly prefer single edge gaps\n a.query_end_open_gap_score = 50\n a.query_end_extend_gap_score = 100\n\n return a", "def filter_segments(segmentiterator, minscore):\n \n # First get the first segment, so we in the loop can compare to the previous\n alignedsegment = next(segmentiterator)\n \n yield alignedsegment\n \n lastname = alignedsegment.query_name\n lastwasforward = alignedsegment.flag & 64 == 64\n \n for alignedsegment in segmentiterator:\n if alignedsegment.get_tag('AS') < minscore:\n continue\n \n # Depressingly, BWA somtimes outputs the same read multiple times.\n # We identify them by having same name as directionality as previous. \n thisname = alignedsegment.query_name\n thisisforward = alignedsegment.flag & 64 == 64\n \n if thisisforward is not lastwasforward or thisname != lastname:\n yield alignedsegment\n \n lastname = thisname\n lastwasforward = thisisforward", "def spectrum_alignment(self):\n self.diff_PROTEIN()\n \n score = [] #node->>([t][i][j])\n for t in range(self.post_modif+1):\n pos = 0 # position of peptide for converting mass\n score_ij = {0: [ float('-inf') for t in range(len(self.vector))]}\n for amino in self.peptide:\n score_j = [ float('-inf') for t in range(len(self.vector))]\n pos += PROTEIN_MASS[amino]\n score_ij[pos] = score_j\n score.append(score_ij)\n \n score[0][0][0] = 0\n # score for node(i,j,t)\n for t in range(self.post_modif+1):\n for i in sorted(score[t]):\n if i > 0: # i-self.diff[i]\n for j in range(len(self.vector)):\n temp_max = float('-inf')\n if j >= self.diff[i]:\n temp_max = score[t][i-self.diff[i]][j-self.diff[i]]\n if t > 0:\n for j_p in range(j):\n if temp_max < score[t-1][i-self.diff[i]][j_p]:\n temp_max = score[t-1][i-self.diff[i]][j_p]\n \n score[t][i][j] = self.vector[j] + temp_max\n \n # trace back --> the longest path\n max_score = float('-inf')\n layer = 0 # modify\n row = pos # mass\n column = len(self.vector)-1 # vector\n modify = []\n for t in range(self.post_modif+1):\n if max_score < score[t][pos][-1] :\n max_score = score[t][pos][-1]\n layer = t\n \n while layer > 0:\n score_temp = score[layer][row][column] - self.vector[column]\n if score_temp == score[layer][row-self.diff[row]][column-self.diff[row]]:\n column -= self.diff[row]\n row -= self.diff[row]\n else:\n for j_p in range(column-1):\n if score_temp == score[layer-1][row-self.diff[row]][j_p]:\n modify.append((row, column-row))\n row -= self.diff[row]\n column = j_p\n layer -= 1\n break\n \n\n # print out the sequence\n modify.sort()\n sequence = \"\"\n pos = 0\n i = 0\n mass = 0\n for amino in self.peptide:\n pos += PROTEIN_MASS[amino]\n sequence += str(amino)\n if pos == modify[i][0]:\n if i == 0:\n mass = modify[i][1]\n else:\n mass = modify[i][1]-modify[i-1][1]\n \n if mass > 0:\n sequence += \"(+\"+str(mass)+\")\"\n else:\n sequence += \"(\"+str(mass)+\")\"\n i += 1\n \n print sequence", "def test_ignore(self):\n parser = hhsuite.FastaParser(ignore={\"foo\"})\n results = parser.run(self.pipeline)\n self.assertNotIn(\"foo\", results[\"templates\"][1][\"sequence_alignments\"])", "def condenseGappyAlignment(a, thresh=0.9):\n\n a = padAlignment(a)\n smat = align2mat(a)\n gapSiteInd = np.mean(smat == b'-', axis=0) >= thresh\n keepSeqInd = np.all(smat[:, gapSiteInd] == b'-', axis=1)\n print('Removing %d of %d sites and %d of %d sequences from the alignment.' % (gapSiteInd.sum(), smat.shape[1], (~keepSeqInd).sum(), smat.shape[0]))\n\n smat = smat[keepSeqInd,:]\n smat = smat[:, ~gapSiteInd]\n \n return seqmat2align(smat, index=a.index[keepSeqInd])", "def parse_lats(lines):\n class Parser:\n def __init__(self):\n self.state = 'get_utt_id'\n self.utt_id = ''\n self.out = {}\n\n def is_line_utt_id(self, splited_line):\n return len(splited_line) == 1\n\n def new_utt(self, splited_line):\n self.utt_id = splited_line[0]\n self.out[self.utt_id] = []\n self.state = 'get_arc'\n\n def start(self):\n self.state = 'get_utt_id'\n self.utt_id = ''\n self.out = {}\n\n def add(self, line):\n splited_line = line.split()\n if self.state == 'get_utt_id':\n assert self.is_line_utt_id(splited_line), RuntimeError(\"parse_lats init error.\")\n self.new_utt(splited_line)\n return\n if self.state == 'get_arc':\n # if self.is_line_utt_id(splited_line):\n # self.new_utt(splited_line)\n # else:\n if len(splited_line) == 4:\n # classic arc\n state_from, state_to, word_id = map(int, splited_line[:3])\n weight_hclg, weight_am, ali = splited_line[3].split(',')\n weight_hclg, weight_am = float(weight_hclg), float(weight_am)\n self.out[self.utt_id].append((state_from, state_to, word_id, weight_hclg, weight_am, ali))\n elif len(splited_line) == 3:\n state_from, state_to, word_id = map(int, splited_line[:3])\n weight_hclg, weight_am, ali = 0.0, 0.0, ''\n self.out[self.utt_id].append((state_from, state_to, word_id, weight_hclg, weight_am, ali))\n elif len(splited_line) == 2:\n # eos arc\n state_from = int(splited_line[0])\n weight_hclg, weight_am, ali = splited_line[1].split(',')\n weight_hclg, weight_am = float(weight_hclg), float(weight_am)\n self.out[self.utt_id].append((state_from, weight_hclg, weight_am, ali))\n elif len(splited_line) == 1:\n state_from = int(splited_line[0])\n self.out[self.utt_id].append((state_from, 0, 0, ''))\n elif len(splited_line) == 0:\n self.state = 'get_utt_id'\n else:\n raise RuntimeError(f\"parse_lats Wrong line in {self.utt_id}: {line}\")\n return\n\n def get_out(self):\n return self.out\n\n parser = Parser()\n parser.start()\n for i, line in enumerate(lines):\n parser.add(line)\n utt2lat = parser.get_out()\n return utt2lat", "def _scan_alignment(self,handle, consumer):\n while 1:\n line = handle.readline()\n if not line:\n break\n if is_blank_line(line):\n continue\n else:\n consumer.query_alignment(line)\n read_and_call(handle, consumer.positive_alignment)\n read_and_call(handle, consumer.hit_alignment)", "def process_align(self):\n\t\tstm_t_dict = self._process_recog()\n\t\ttrans_t_dict = self._process_trans()\n\t\talign_obj = viterbi_align(stm_t_dict, trans_t_dict, self.label, self.pair_file_path)\n\t\tself.trans_t_dict = align_obj.viterbi(0, len(stm_t_dict)-1, 0, len(trans_t_dict)-1)", "def buildProcessesOutOfAssignments(self):\n assigments = where(self.startsOfDataPaths,\n lambda x: isinstance(x, Assignment)\n )\n for sig, dps in groupedby(assigments, lambda x: x.dst):\n dps = list(dps)\n name = \"\"\n if not sig.hasGenericName:\n name = sig.name\n sig.hidden = False\n \n # render sequential statements in process\n # (conversion from netlist to statements)\n for stm in renderIfTree(dps):\n p = HWProcess(\"assig_process_\" + name)\n if sig._useNopVal and not isEnclosed(stm):\n n = sig._nopVal\n p.statements.append(Assignment(n, sig))\n if isinstance(n, RtlSignal):\n p.sensitivityList.add(n)\n \n p.statements.append(stm)\n sensitivity = discoverSensitivity(stm)\n p.sensitivityList.update(sensitivity)\n for s in p.sensitivityList:\n s.hidden = False\n\n yield p", "def eval_pos():\n annotations_dir = \"/home/sdb/wangshentao/myspace/thesis/data/VisDrone2019-MOT-test-dev/annotations\"\n all_iou = []\n seqs_sample = '''\n uav0000249_00001_v\n uav0000249_02688_v\n '''\n seqs_str = seqs_sample\n seqs = [seq.strip() for seq in seqs_str.split()]\n for seq in seqs:\n print(seq)\n bbox, frame_id = get_frame_bbox(annotations_dir, seq + '.txt')\n predict_bbox = []\n for idx in range(len(bbox)):\n kalman_filter = KalmanFilter()\n trace_bbox = bbox[idx]\n trace_predict_bbox = []\n mean, covariance = kalman_filter.initiate(tlwh_to_xyah(trace_bbox[0]))\n for i in range(1, trace_bbox.shape[0]):\n mean, covariance = kalman_filter.predict(mean, covariance)\n trace_predict_bbox.append(tlwh(mean))\n mean, covariance = kalman_filter.update(mean, covariance, tlwh_to_xyah(trace_bbox[i]))\n\n trace_predict_bbox = np.array(trace_predict_bbox)\n for i in range(trace_predict_bbox.shape[0]):\n trace_predict_bbox[i] = tlwh_to_tlbr(trace_predict_bbox[i])\n for i in range(trace_bbox.shape[0]):\n trace_bbox[i] = tlwh_to_tlbr(trace_bbox[i])\n\n predict_bbox.append(trace_predict_bbox)\n bbox[idx] = bbox[idx][1:]\n frame_id[idx] = frame_id[idx][1:]\n assert bbox[idx].shape[0] == predict_bbox[idx].shape[0]\n iou = []\n for i in range(len(bbox)):\n trace_iou = []\n trace_bbox = bbox[i]\n trace_predict_bbx = predict_bbox[i]\n for j in range(trace_bbox.shape[0]):\n iou_val = bbox_ious(np.ascontiguousarray(trace_bbox[j][np.newaxis, :], dtype=np.float),\n np.ascontiguousarray(trace_predict_bbx[j][np.newaxis, :], dtype=np.float))\n trace_iou.append(iou_val)\n iou.append(np.array(trace_iou))\n iou = [int(np.mean(i)*100) for i in iou]\n all_iou += iou\n bins = np.zeros(101)\n for i in all_iou:\n bins[i] += 1\n plt.bar(np.arange(101), bins)\n plt.ylabel('num')\n plt.xlabel('IoU*100')\n plt.show()", "def parse_paragraphs(self):\n paragraphs = self.paragraphs\n for paragraph in paragraphs:\n try:\n if paragraph == \"Oznaczenie sądu\" and not self.locked_cells[\"Oznaczenie sądu\"]:\n self.search_index(4, \"Oznaczenie sądu\", paragraph)\n\n if paragraph.startswith(\"3.Firma,\") and not self.locked_cells[\"Firma, pod którą spółka działa\"]:\n self.search_index(2, \"Firma, pod którą spółka działa\", paragraph)\n\n if paragraph.startswith(\"3.Nazwa\") and not self.locked_cells[\"Firma, pod którą spółka działa\"]:\n self.search_index(2, \"Firma, pod którą spółka działa\", paragraph)\n\n if paragraph.startswith(\"1.Siedziba\") and not self.locked_cells[\"Siedziba\"]:\n self.search_index(4, \"Siedziba\", paragraph)\n\n if paragraph.startswith(\"2.Adres\") and not self.locked_cells[\"Adres\"]:\n self.search_index(4, \"Adres\", paragraph)\n\n if paragraph.startswith(\"Numer KRS\") and not self.locked_cells[\"KRS\"]:\n self.datafields[\"KRS\"] = paragraph.split()[-1]\n self.locked_cells[\"KRS\"] = True\n\n if paragraph.startswith(\"2.Numer REGON/NIP\") and not self.locked_cells[\"REGON/NIP\"]:\n self.search_index(2, \"REGON/NIP\", paragraph)\n\n if paragraph.startswith(\"1.Oznaczenie formy prawnej\") and not self.locked_cells[\"Forma Prawna\"]:\n self.search_index(2, \"Forma Prawna\", paragraph)\n\n if paragraph.startswith(\"1.Wysokość kapitału zakładowego\"):\n self.search_index(2, \"Kapitał Zakładowy\", paragraph)\n\n if paragraph.startswith(\"5.Kwotowe określenie części kapitału wpłaconego\"):\n self.search_index(2, \"Kapitał Wpłacony\", paragraph)\n\n if paragraph.startswith(\"Rubryka 7 - Dane wspólników\"): # Open \"Wspólnicy\" parsing block.\n self.locked_cells[\"Wspólnicy\"] = True\n\n if paragraph.startswith(\"Rubryka 7 - Komitet założycielski\"): # STOWARZYSZENIE\n break\n\n if paragraph.startswith(\"1.Nazwisko / Nazwa lub firma\") and self.locked_cells[\"Wspólnicy\"]:\n self.active += 1\n self.datafields[f\"Wspólnik {self.active}\"] = {}\n\n pattern = rf\"^[A-Z{self.unicode}]+\"\n self.search_loop(pattern, \"Wspólnik\", \"Nazwisko/Nazwa\", paragraph)\n\n if paragraph.startswith(\"2.Imiona\") and self.locked_cells[\"Wspólnicy\"]:\n pattern = rf\"[A-Z{self.unicode}]+\\s[A-Z{self.unicode}]+$|^[A-Z{self.unicode}]+$|^[*]+$\"\n self.search_loop(pattern, \"Wspólnik\", \"Imiona\", paragraph)\n\n if paragraph.startswith(\"3.Numer PESEL/REGON\") and self.locked_cells[\"Wspólnicy\"]:\n pattern = r\"[-]+|[0-9]{9,11}\"\n self.search_loop(pattern, \"Wspólnik\", \"PESEL/REGON\", paragraph)\n\n if paragraph.startswith(\"4.Numer KRS\") and self.locked_cells[\"Wspólnicy\"]:\n pattern = r\"[-]+|[*]+|[0-9]{10}$\"\n self.search_loop(pattern, \"Wspólnik\", \"KRS\", paragraph)\n\n if paragraph.startswith(\"5.Posiadane przez wspólnika udziały\"):\n index = paragraphs.index(paragraph)\n line_1 = paragraphs[index + 2].strip(\" \")\n line_2 = paragraphs[index + 3].strip(\" \")\n if line_2:\n self.datafields[f\"Wspólnik {self.active}\"][\"Udziały\"] = f\"{line_1} {line_2}\"\n else:\n self.datafields[f\"Wspólnik {self.active}\"][\"Udziały\"] = f\"{line_1}\"\n\n if paragraph == \"ZARZĄD\":\n self.locked_cells[\"Wspólnicy\"] = False # Close \"Wspólnicy\" parsing block.\n self.locked_cells[\"Zarząd\"] = True # Open \"Zarząd\" parsing block.\n self.active = 0\n\n if paragraph.startswith(\"1.Nazwisko\") and self.locked_cells[\"Zarząd\"]:\n self.active += 1\n self.datafields[f\"Zarząd {self.active}\"] = {}\n pattern = rf\"^[A-Z{self.unicode}]+\"\n self.search_loop(pattern, \"Zarząd\", \"Nazwisko/Nazwa\", paragraph)\n\n if paragraph.startswith(\"2.Imiona\") and self.locked_cells[\"Zarząd\"]:\n pattern = rf\"^[A-Z{self.unicode}]+\\s[A-Z{self.unicode}]+$|^[A-Z{self.unicode}]+$|^[*]+$\"\n self.search_loop(pattern, \"Zarząd\", \"Imiona\", paragraph)\n\n if paragraph.startswith(\"5.Funkcja w organie \") and self.locked_cells[\"Zarząd\"]:\n paragraph = paragraph.strip(\"5.Funkcja w organie reprezentującym \")\n self.datafields[f\"Zarząd {self.active}\"][\"Funkcja\"] = paragraph\n\n if paragraph.startswith(\"Rubryka 2 - Organ nadzoru\"):\n self.locked_cells[\"Zarząd\"] = False # Close \"Zarząd\" parsing block.\n except KeyError:\n pass\n return self.datafields", "def readAMBERTop(self, phys, filename):\r\n\r\n def skipLine(data):\r\n nl = data.index('\\n')\r\n return data[nl+1:len(data)]\r\n\r\n def jumpTo(data, target):\r\n fp = data.index(target)\r\n return data[fp:len(data)]\r\n\r\n def readRemove(data, size):\r\n retval = data[0:size-1]\r\n return data[size:len(data)]\r\n\r\n def getInteger(data):\r\n pos = 0\r\n retval = \"\"\r\n while (not data[pos].isdigit()):\r\n pos = pos + 1\r\n while (data[pos].isdigit()):\r\n retval = retval + data[pos]\r\n pos = pos + 1\r\n data = data[pos:len(data)]\r\n return int(retval), data\r\n\r\n def parse(data, arr, str, count, dtype, tupsize=1):\r\n data = jumpTo(data, \"%FLAG \"+str)\r\n data = jumpTo(data, \"%FORMAT\")\r\n numPerLine, data = getInteger(data)\r\n fieldsize, data = getInteger(data)\r\n data = skipLine(data) \r\n \r\n arr2 = []\r\n numread = 0\r\n for j in range(0, (tupsize*count-1) / numPerLine + 1):\r\n for i in range(0, numPerLine):\r\n if (tupsize == 1):\r\n arr.append(dtype(data[0:fieldsize].strip()))\r\n else:\r\n arr2.append(dtype(data[0:fieldsize].strip()))\r\n if (len(arr2) == tupsize):\r\n arr.append(arr2)\r\n arr2 = []\r\n numread += 1\r\n data = data[fieldsize:len(data)]\r\n if (numread == tupsize*count):\r\n break\r\n data = skipLine(data) \r\n return data\r\n\r\n def scan(data, str):\r\n return (data.count(str) != 0)\r\n\r\n\r\n f = open(filename, 'r')\r\n data = f.read()\r\n\r\n # First Line: VERSION ...\r\n data = skipLine(data)\r\n\r\n # Go To: %FLAG POINTERS\r\n data = jumpTo(data, '%FLAG POINTERS')\r\n\r\n data = jumpTo(data, '%FORMAT')\r\n numPerLine, data = getInteger(data)\r\n fieldsize, data = getInteger(data)\r\n data = skipLine(data)\r\n \r\n temp = []\r\n numread = 0\r\n for j in range(0, 31 / numPerLine + 1):\r\n for i in range(0, numPerLine):\r\n temp.append(int(data[0:8]))\r\n data = data[8:len(data)]\r\n numread += 1\r\n if (numread == 31):\r\n break\r\n data = skipLine(data)\r\n \r\n [natoms, ntypes, nbonh, mbona, ntheth, mtheta, nphih, mphia, nhparm, nparm, nnb, nres, nbona, ntheta, nphia, numbnd, numang, nptra, natyp, nphb, ifpert, nbper, ngper, ndper, mbper, mgper, mdper, ifbox, nmxrs, ifcap, numextra] = temp \r\n\r\n\r\n #################################################\r\n # Read AtomTypes\r\n atomnames = []\r\n charges = []\r\n masses = []\r\n atindex = []\r\n exclusions = []\r\n nparams = []\r\n reslabels = []\r\n respointers = []\r\n forceconstants = [[], [], []] # bond, angle, dihedral\r\n equilvals = [[], [], [[], []]] # bond, angle, dihedral\r\n scee_scales = []\r\n scnb_scales = []\r\n solty = []\r\n lj_acoef = []\r\n lj_bcoef = []\r\n\r\n data = parse(data, atomnames, \"ATOM_NAME\", natoms, str) \r\n data = parse(data, charges, \"CHARGE\", natoms, float)\r\n data = parse(data, masses, \"MASS\", natoms, float)\r\n data = parse(data, atindex, \"ATOM_TYPE_INDEX\", natoms, int)\r\n data = parse(data, exclusions, \"NUMBER_EXCLUDED_ATOMS\", natoms, int)\r\n data = parse(data, nparams, \"NONBONDED_PARM_INDEX\", ntypes*ntypes, int)\r\n data = parse(data, reslabels, \"RESIDUE_LABEL\", nres, str)\r\n data = parse(data, respointers, \"RESIDUE_POINTER\", nres, int)\r\n data = parse(data, forceconstants[0], \"BOND_FORCE_CONSTANT\", numbnd, float)\r\n data = parse(data, equilvals[0], \"BOND_EQUIL_VALUE\", numbnd, float)\r\n data = parse(data, forceconstants[1], \"ANGLE_FORCE_CONSTANT\", numang, float)\r\n data = parse(data, equilvals[1], \"ANGLE_EQUIL_VALUE\", numang, float)\r\n data = parse(data, forceconstants[2], \"DIHEDRAL_FORCE_CONSTANT\", nptra, float)\r\n data = parse(data, equilvals[2][0], \"DIHEDRAL_PERIODICITY\", nptra, float)\r\n data = parse(data, equilvals[2][1], \"DIHEDRAL_PHASE\", nptra, float)\r\n if (scan(data, \"SCEE_SCALE_FACTOR\")):\r\n data = parse(data, scee_scales, \"SCEE_SCALE_FACTORS\", nptra, float)\r\n else:\r\n for i in range(0, nptra):\r\n scee_scales.append(1.2) # Default \r\n if (scan(data, \"SCNB_SCALE_FACTOR\")):\r\n data = parse(data, scnb_scales, \"SCNB_SCALE_FACTORS\", nptra, float)\r\n else:\r\n for i in range(0, nptra):\r\n scnb_scales.append(2.0) # Default \r\n\r\n data = parse(data, solty, \"SOLTY\", natyp, float)\r\n data = parse(data, lj_acoef, \"LENNARD_JONES_ACOEF\", ntypes*(ntypes+1)/2, float)\r\n data = parse(data, lj_bcoef, \"LENNARD_JONES_BCOEF\", ntypes*(ntypes+1)/2, float)\r\n\r\n\r\n ##########################################################\r\n # STRUCTURE\r\n\r\n bonds = [[], []] # With H, Without H\r\n angles = [[], []] # With H, Without H\r\n dihedrals = [[], []] # With H, Without H\r\n impropers = [[], []] # With H, Without H\r\n excluded_atoms = [] \r\n hbond_acoef = []\r\n hbond_bcoef = []\r\n hbcut = []\r\n amber_atom_types = []\r\n tree_chain = []\r\n join_array = []\r\n irotat = []\r\n radii = []\r\n screen = []\r\n\r\n data = parse(data, bonds[0], \"BONDS_INC_HYDROGEN\", nbonh, int, 3)\r\n data = parse(data, bonds[1], \"BONDS_WITHOUT_HYDROGEN\", nbona, int, 3)\r\n data = parse(data, angles[0], \"ANGLES_INC_HYDROGEN\", ntheth, int, 4)\r\n data = parse(data, angles[1], \"ANGLES_WITHOUT_HYDROGEN\", ntheta, int, 4)\r\n data = parse(data, dihedrals[0], \"DIHEDRALS_INC_HYDROGEN\", nphih, int, 5)\r\n data = parse(data, dihedrals[1], \"DIHEDRALS_WITHOUT_HYDROGEN\", nphia, int, 5)\r\n \r\n # MERGE ARRAYS - PM HANDLES THE H+\r\n final_bonds = bonds[0] + bonds[1]\r\n final_angles = angles[0] + angles[1]\r\n final_dihedrals = dihedrals[0] + dihedrals[1]\r\n final_impropers = []\r\n \r\n # CLEAN UP THE TRASH\r\n del(bonds)\r\n del(angles)\r\n del(dihedrals)\r\n \r\n\r\n # Move impropers into their own array\r\n i = 0\r\n while (i < len(final_dihedrals)):\r\n if (final_dihedrals[i][2] < 0): # 1-4 exclusions are handled by our back end\r\n final_dihedrals[i][2] *= -1\r\n if (final_dihedrals[i][3] < 0):\r\n final_dihedrals[i][3] *= -1 # Make + again\r\n final_impropers.append(final_dihedrals[i])\r\n final_dihedrals.remove(final_dihedrals[i])\r\n i -= 1\r\n i += 1\r\n\r\n # Convert charge units\r\n for i in range(0, len(charges)):\r\n charges[i] /= 18.223\r\n\r\n\r\n data = parse(data, excluded_atoms, \"EXCLUDED_ATOMS_LIST\", nnb, int)\r\n data = parse(data, hbond_acoef, \"HBOND_ACOEF\", nphb, float)\r\n data = parse(data, hbond_bcoef, \"HBOND_BCOEF\", nphb, float)\r\n data = parse(data, hbcut, \"HBCUT\", nphb, float)\r\n data = parse(data, amber_atom_types, \"AMBER_ATOM_TYPE\", natoms, str)\r\n data = parse(data, tree_chain, \"TREE_CHAIN_CLASSIFICATION\", natoms, str)\r\n data = parse(data, join_array, \"JOIN_ARRAY\", natoms, int)\r\n data = parse(data, irotat, \"IROTAT\", natoms, int)\r\n data = parse(data, radii, \"RADII\", natoms, float)\r\n data = parse(data, screen, \"SCREEN\", natoms, float)\r\n\r\n # Further process dihedrals and impropers\r\n # Deal with multiplicity\r\n # A bit ugly, but the fastest for now\r\n # forceconstants[2][dihedrals[0][i][4]-1], int(equilvals[2][0][dihedrals[0][i][4]-1]), equilvals[2][1][dihedrals[0][i][4]-1]\r\n\r\n mult_di = dict()\r\n mult_im = dict()\r\n for i in range(0, len(final_dihedrals)):\r\n di_id = str(final_dihedrals[i][0])+' '+str(final_dihedrals[i][1])+' '+str(final_dihedrals[i][2])+' '+str(final_dihedrals[i][3])\r\n if (not mult_di.has_key(di_id)):\r\n mult_di[di_id] = [1, False, [forceconstants[2][final_dihedrals[i][4]-1]], [int(equilvals[2][0][final_dihedrals[i][4]-1])], [equilvals[2][1][final_dihedrals[i][4]-1]]]\r\n else:\r\n mult_di[di_id][0] += 1\r\n mult_di[di_id][2].append(forceconstants[2][final_dihedrals[i][4]-1])\r\n mult_di[di_id][3].append(int(equilvals[2][0][final_dihedrals[i][4]-1]))\r\n mult_di[di_id][4].append(equilvals[2][1][final_dihedrals[i][4]-1])\r\n \r\n for i in range(0, len(final_impropers)):\r\n im_id = str(final_impropers[i][0])+' '+str(final_impropers[i][1])+' '+str(final_impropers[i][2])+' '+str(final_impropers[i][3])\r\n if (not mult_im.has_key(di_id)):\r\n mult_im[im_id] = [1, False, [forceconstants[2][final_impropers[i][4]-1]], [int(equilvals[2][0][final_impropers[i][4]-1])], [equilvals[2][1][final_impropers[i][4]-1]]]\r\n else:\r\n mult_im[im_id][0] += 1\r\n mult_im[im_id][2].append(forceconstants[2][final_impropers[i][4]-1])\r\n mult_im[im_id][3].append(int(equilvals[2][0][final_impropers[i][4]-1]))\r\n mult_im[im_id][4].append(equilvals[2][1][final_impropers[i][4]-1])\r\n\r\n\r\n\r\n \r\n #[natoms, ntypes, nbonh, mbona, ntheth, mtheta, nphih, mphia, nhparm, nparm, nnb, nres, nbona, ntheta, nphia, numbnd, numang, nptra, natyp, nphb, ifpert, nbper, ngper, ndper, mbper, mgper, mdper, ifbox, nmxrs, ifcap, numextra] = temp \r\n #phys.myPSF.createAll(natoms, nbonh+mbona, ntheth+mtheta,\r\n # len(dihedrals[0])+len(dihedrals[1]),\r\n # len(impropers[0])+len(impropers[1]),\r\n # 0, 0, 0, 0)\r\n \r\n # Add atoms\r\n curres = 1\r\n for i in range(0, natoms):\r\n phys.myPSF.addAtom(i, 'SIM', curres, reslabels[curres-1],\r\n atomnames[i], atomnames[i], charges[i],\r\n masses[i]) \r\n if (curres != nres and i >= respointers[curres]):\r\n curres += 1\r\n\r\n # Add bonds\r\n for i in range(0, nbonh+nbona):\r\n phys.myPSF.addBond(i+1, final_bonds[i][0]/3+1, final_bonds[i][1]/3+1)\r\n phys.myPAR.addBond(i+1, atomnames[final_bonds[i][0]/3], atomnames[final_bonds[i][1]/3], forceconstants[0][final_bonds[i][2]/3], equilvals[0][final_bonds[i][2]/3])\r\n \r\n # Add angles\r\n for i in range(0, ntheth+ntheta):\r\n phys.myPSF.addAngle(i+1, final_angles[i][0]/3+1, final_angles[i][1]/3+1, final_angles[i][2]/3+1)\r\n phys.myPAR.addAngle(i+1, atomnames[final_angles[i][0]/3], atomnames[final_angles[i][1]/3], atomnames[final_angles[i][2]/3], forceconstants[1][final_angles[i][3]/3], equilvals[1][final_angles[i][3]/3])\r\n \r\n # Add dihedrals\r\n for i in range(0, len(final_dihedrals)):\r\n di_id = str(final_dihedrals[i][0])+' '+str(final_dihedrals[i][1])+' '+str(final_dihedrals[i][2])+' '+str(final_dihedrals[i][3])\r\n mult = mult_di[di_id][0]\r\n checked = mult_di[di_id][1]\r\n print di_id, \" \", mult\r\n if (not checked):\r\n if (mult == 1):\r\n phys.myPSF.addDihedral(i+1, final_dihedrals[i][0]/3+1, final_dihedrals[i][1]/3+1, int(numpy.abs(final_dihedrals[i][2]))/3+1, final_dihedrals[i][3]/3+1)\r\n phys.myPAR.addDihedral(i+1, atomnames[final_dihedrals[i][0]/3], atomnames[final_dihedrals[i][1]/3], atomnames[int(numpy.abs(final_dihedrals[i][2]))/3], atomnames[final_dihedrals[i][3]/3], forceconstants[2][final_dihedrals[i][4]-1], int(equilvals[2][0][final_dihedrals[i][4]-1]), equilvals[2][1][final_dihedrals[i][4]-1])\r\n else:\r\n mult_di[di_id][1] = True\r\n # Add dihedral with the appropriate multiplicity\r\n # Force constants, periodicity and phase shifts are in [2], [3], and [4] respectively\r\n fcvec = PARReader.VectorOfDouble()\r\n periodvec = PARReader.VectorOfInt()\r\n phasevec = PARReader.VectorOfDouble() \r\n for j in range(0, len(mult_di[di_id][2])):\r\n fcvec.push_back(mult_di[di_id][2][j])\r\n periodvec.push_back(mult_di[di_id][3][j])\r\n phasevec.push_back(mult_di[di_id][4][j])\r\n phys.myPSF.addDihedral(i+1, final_dihedrals[i][0]/3+1, final_dihedrals[i][1]/3+1, int(numpy.abs(final_dihedrals[i][2]))/3+1, final_dihedrals[i][3]/3+1)\r\n phys.myPAR.addDihedral(i+1, atomnames[final_dihedrals[i][0]/3], atomnames[final_dihedrals[i][1]/3], atomnames[int(numpy.abs(final_dihedrals[i][2]))/3], atomnames[final_dihedrals[i][3]/3], mult, fcvec, periodvec, phasevec)\r\n \r\n\r\n\r\n\r\n for i in range(0, len(final_impropers)):\r\n im_id = str(final_impropers[i][0])+' '+str(final_impropers[i][1])+' '+str(final_impropers[i][2])+' '+str(final_impropers[i][3])\r\n mult = mult_im[im_id][0]\r\n checked = mult_im[im_id][1]\r\n print im_id, \" \", mult\r\n if (not checked):\r\n if (mult == 1):\r\n phys.myPSF.addImproper(i+1, final_impropers[i][0]/3+1, final_impropers[i][1]/3+1, int(numpy.abs(final_impropers[i][2]))/3+1, final_impropers[i][3]/3+1)\r\n phys.myPAR.addImproper(i+1, atomnames[final_impropers[i][0]/3], atomnames[final_impropers[i][1]/3], atomnames[int(numpy.abs(final_impropers[i][2]))/3], atomnames[final_impropers[i][3]/3], forceconstants[2][final_impropers[i][4]-1], int(equilvals[2][0][final_impropers[i][4]-1]), equilvals[2][1][final_impropers[i][4]-1])\r\n else:\r\n mult_im[im_id][1] = True\r\n # Add dihedral with the appropriate multiplicity\r\n # Force constants, periodicity and phase shifts are in [2], [3], and [4] respectively\r\n fcvec = PARReader.VectorOfDouble()\r\n periodvec = PARReader.VectorOfInt()\r\n phasevec = PARReader.VectorOfDouble() \r\n for j in range(0, len(mult_im[im_id][2])):\r\n fcvec.push_back(mult_im[im_id][2][j])\r\n periodvec.push_back(mult_im[im_id][3][j])\r\n phasevec.push_back(mult_im[im_id][4][j])\r\n phys.myPSF.addImproper(i+1, final_impropers[i][0]/3+1, final_impropers[i][1]/3+1, int(numpy.abs(final_impropers[i][2]))/3+1, final_impropers[i][3]/3+1)\r\n phys.myPAR.addImproper(i+1, atomnames[final_impropers[i][0]/3], atomnames[final_impropers[i][1]/3], atomnames[int(numpy.abs(final_impropers[i][2]))/3], atomnames[final_impropers[i][3]/3], mult, fcvec, periodvec, phasevec)\r\n\r\n \r\n # Need to add garbage nonbonded stuff for now\r\n for i in range(0, natoms):\r\n phys.myPAR.addNonbonded(i, atomnames[i], 1, 1, 1, 1, 1, 1)\r\n\r\n # Add VDW parameters\r\n # AMBER has the Aij and Bij already in the parameter file\r\n # This actually makes life easier.\r\n # CHARMM does not, they simply have the original sigma and epsilon.\r\n # To compensate for this, for now we will leave the nonbondeds empty in phys.myPAR\r\n # We will then access the LennardJones parameter table in Topology directly\r\n k = 0\r\n phys.myTop.resizeLennardJonesParameters(ntypes)\r\n for i in range(0, ntypes):\r\n for j in range(i, ntypes):\r\n params = GenericTopology.LennardJonesParameters(lj_acoef[k], lj_bcoef[k])\r\n k += 1\r\n phys.myTop.setLennardJonesParameters(i, j, params)\r\n \r\n phys.myPAR.readFlag = 1\r\n phys.build()", "def align_reconstruction_segments(data, graph, reconstruction, recon_gps_points):\n segments = []\n\n gps_shot_ids = sorted(recon_gps_points.keys())\n\n for i in range(len(gps_shot_ids) - 1):\n X, Xp = [], []\n onplane, verticals = [], []\n\n for j in range(2):\n shot_id = gps_shot_ids[i+j]\n X.append(reconstruction.shots[shot_id].pose.get_origin())\n Xp.append(recon_gps_points[shot_id])\n\n R = reconstruction.shots[shot_id].pose.get_rotation_matrix()\n onplane.append(R[0,:])\n onplane.append(R[2,:])\n verticals.append(R[1,:])\n\n X = np.array(X)\n Xp = np.array(Xp)\n\n # Estimate ground plane.\n p = multiview.fit_plane(X - X.mean(axis=0), onplane, verticals)\n Rplane = multiview.plane_horizontalling_rotation(p)\n X = Rplane.dot(X.T).T\n\n # Estimate 2d similarity to align to pdr predictions\n T = tf.affine_matrix_from_points(X.T[:2], Xp.T[:2], shear=False)\n s = np.linalg.det(T[:2, :2]) ** 0.5\n A = np.eye(3)\n A[:2, :2] = T[:2, :2] / s\n A = A.dot(Rplane)\n b = np.array([\n T[0, 2],\n T[1, 2],\n Xp[:, 2].mean() - s * X[:, 2].mean() # vertical alignment\n ])\n\n shot_ids = sorted(reconstruction.shots.keys())\n if i == 0:\n # in first iteration, we transform from first shot of recon\n start_index = _shot_id_to_int(shot_ids[0])\n else:\n start_index = _shot_id_to_int(gps_shot_ids[i])\n\n if i == len(gps_shot_ids)-2:\n # in last iteration, we transform until last shot of recon\n end_index = _shot_id_to_int(shot_ids[-1])\n else:\n # subtract 1 at the end, since gps_shots_ids[i+1] will be transformed in the next iteration\n end_index = _shot_id_to_int(gps_shot_ids[i+1]) - 1\n\n segment = extract_segment(data, graph, reconstruction, start_index, end_index)\n apply_similarity(segment, s, A, b)\n\n segment.alignment.aligned = True\n segment.alignment.num_correspondences = 2\n\n segments.append(segment)\n\n return segments", "def test_align_without_gaps(self):\n aln = ArrayAlignment(\n {\"seq1\": \"ACGG\", \"seq2\": \"CGCA\", \"seq3\": \"CCG-\"}, moltype=\"dna\"\n )\n aln_plot = aln.dotplot(\"seq1\")\n self.assertNotEqual(aln_plot._aligned_coords, None)", "def filter_aligned_codons(aln):\n\n ind = find_aligned_codons(aln)\n return subalign(aln, ind)", "def parse_predictions(predicted_boxes, sem_cls_probs, objectness_probs, point_cloud, config_dict):\n\n sem_cls_probs = sem_cls_probs.detach().cpu().numpy() # B,num_proposal,10\n pred_sem_cls_prob = np.max(sem_cls_probs, -1) # B,num_proposal\n pred_sem_cls = np.argmax(sem_cls_probs, -1)\n obj_prob = objectness_probs.detach().cpu().numpy()\n\n pred_corners_3d_upright_camera = predicted_boxes.detach().cpu().numpy()\n\n K = pred_corners_3d_upright_camera.shape[1] # K==num_proposal\n bsize = pred_corners_3d_upright_camera.shape[0]\n nonempty_box_mask = np.ones((bsize, K))\n\n if config_dict[\"remove_empty_box\"]:\n # -------------------------------------\n # Remove predicted boxes without any point within them..\n batch_pc = point_cloud.cpu().numpy()[:, :, 0:3] # B,N,3\n for i in range(bsize):\n pc = batch_pc[i, :, :] # (N,3)\n for j in range(K):\n box3d = pred_corners_3d_upright_camera[i, j, :, :] # (8,3)\n box3d = flip_axis_to_depth(box3d)\n pc_in_box, inds = extract_pc_in_box3d(pc, box3d)\n if len(pc_in_box) < 5:\n nonempty_box_mask[i, j] = 0\n if nonempty_box_mask[i].sum() == 0:\n nonempty_box_mask[i, obj_prob[i].argmax()] = 1\n # -------------------------------------\n\n if \"no_nms\" in config_dict and config_dict[\"no_nms\"]:\n # pred_mask = np.ones((bsize, K))\n pred_mask = nonempty_box_mask\n elif not config_dict[\"use_3d_nms\"]:\n # ---------- NMS input: pred_with_prob in (B,K,7) -----------\n pred_mask = np.zeros((bsize, K))\n for i in range(bsize):\n boxes_2d_with_prob = np.zeros((K, 5))\n for j in range(K):\n boxes_2d_with_prob[j, 0] = np.min(pred_corners_3d_upright_camera[i, j, :, 0])\n boxes_2d_with_prob[j, 2] = np.max(pred_corners_3d_upright_camera[i, j, :, 0])\n boxes_2d_with_prob[j, 1] = np.min(pred_corners_3d_upright_camera[i, j, :, 2])\n boxes_2d_with_prob[j, 3] = np.max(pred_corners_3d_upright_camera[i, j, :, 2])\n boxes_2d_with_prob[j, 4] = obj_prob[i, j]\n nonempty_box_inds = np.where(nonempty_box_mask[i, :] == 1)[0]\n assert len(nonempty_box_inds) > 0\n pick = nms_2d_faster(\n boxes_2d_with_prob[nonempty_box_mask[i, :] == 1, :],\n config_dict[\"nms_iou\"],\n config_dict[\"use_old_type_nms\"],\n )\n assert len(pick) > 0\n pred_mask[i, nonempty_box_inds[pick]] = 1\n # ---------- NMS output: pred_mask in (B,K) -----------\n elif config_dict[\"use_3d_nms\"] and (not config_dict[\"cls_nms\"]):\n # ---------- NMS input: pred_with_prob in (B,K,7) -----------\n pred_mask = np.zeros((bsize, K))\n for i in range(bsize):\n boxes_3d_with_prob = np.zeros((K, 7))\n for j in range(K):\n boxes_3d_with_prob[j, 0] = np.min(pred_corners_3d_upright_camera[i, j, :, 0])\n boxes_3d_with_prob[j, 1] = np.min(pred_corners_3d_upright_camera[i, j, :, 1])\n boxes_3d_with_prob[j, 2] = np.min(pred_corners_3d_upright_camera[i, j, :, 2])\n boxes_3d_with_prob[j, 3] = np.max(pred_corners_3d_upright_camera[i, j, :, 0])\n boxes_3d_with_prob[j, 4] = np.max(pred_corners_3d_upright_camera[i, j, :, 1])\n boxes_3d_with_prob[j, 5] = np.max(pred_corners_3d_upright_camera[i, j, :, 2])\n boxes_3d_with_prob[j, 6] = obj_prob[i, j]\n nonempty_box_inds = np.where(nonempty_box_mask[i, :] == 1)[0]\n assert len(nonempty_box_inds) > 0\n pick = nms_3d_faster(\n boxes_3d_with_prob[nonempty_box_mask[i, :] == 1, :],\n config_dict[\"nms_iou\"],\n config_dict[\"use_old_type_nms\"],\n )\n assert len(pick) > 0\n pred_mask[i, nonempty_box_inds[pick]] = 1\n # ---------- NMS output: pred_mask in (B,K) -----------\n elif config_dict[\"use_3d_nms\"] and config_dict[\"cls_nms\"]:\n # ---------- NMS input: pred_with_prob in (B,K,8) -----------\n pred_mask = np.zeros((bsize, K))\n for i in range(bsize):\n boxes_3d_with_prob = np.zeros((K, 8))\n for j in range(K):\n boxes_3d_with_prob[j, 0] = np.min(pred_corners_3d_upright_camera[i, j, :, 0])\n boxes_3d_with_prob[j, 1] = np.min(pred_corners_3d_upright_camera[i, j, :, 1])\n boxes_3d_with_prob[j, 2] = np.min(pred_corners_3d_upright_camera[i, j, :, 2])\n boxes_3d_with_prob[j, 3] = np.max(pred_corners_3d_upright_camera[i, j, :, 0])\n boxes_3d_with_prob[j, 4] = np.max(pred_corners_3d_upright_camera[i, j, :, 1])\n boxes_3d_with_prob[j, 5] = np.max(pred_corners_3d_upright_camera[i, j, :, 2])\n boxes_3d_with_prob[j, 6] = obj_prob[i, j]\n boxes_3d_with_prob[j, 7] = pred_sem_cls[i, j] # only suppress if the two boxes are of the same class!!\n nonempty_box_inds = np.where(nonempty_box_mask[i, :] == 1)[0]\n assert len(nonempty_box_inds) > 0\n pick = nms_3d_faster_samecls(\n boxes_3d_with_prob[nonempty_box_mask[i, :] == 1, :],\n config_dict[\"nms_iou\"],\n config_dict[\"use_old_type_nms\"],\n )\n assert len(pick) > 0\n pred_mask[i, nonempty_box_inds[pick]] = 1\n # ---------- NMS output: pred_mask in (B,K) -----------\n\n batch_pred_map_cls = (\n []\n ) # a list (len: batch_size) of list (len: num of predictions per sample) of tuples of pred_cls, pred_box and conf (0-1)\n for i in range(bsize):\n if config_dict[\"per_class_proposal\"]:\n assert config_dict[\"use_cls_confidence_only\"] is False\n cur_list = []\n for ii in range(config_dict[\"dataset_config\"].num_semcls):\n cur_list += [\n (\n ii,\n pred_corners_3d_upright_camera[i, j],\n sem_cls_probs[i, j, ii] * obj_prob[i, j],\n )\n for j in range(pred_corners_3d_upright_camera.shape[1])\n if pred_mask[i, j] == 1 and obj_prob[i, j] > config_dict[\"conf_thresh\"]\n ]\n batch_pred_map_cls.append(cur_list)\n elif config_dict[\"use_cls_confidence_only\"]:\n batch_pred_map_cls.append(\n [\n (\n pred_sem_cls[i, j].item(),\n pred_corners_3d_upright_camera[i, j],\n sem_cls_probs[i, j, pred_sem_cls[i, j].item()],\n )\n for j in range(pred_corners_3d_upright_camera.shape[1])\n if pred_mask[i, j] == 1 and obj_prob[i, j] > config_dict[\"conf_thresh\"]\n ]\n )\n else:\n batch_pred_map_cls.append(\n [\n (\n pred_sem_cls[i, j].item(),\n pred_corners_3d_upright_camera[i, j],\n obj_prob[i, j],\n )\n for j in range(pred_corners_3d_upright_camera.shape[1])\n if pred_mask[i, j] == 1 and obj_prob[i, j] > config_dict[\"conf_thresh\"]\n ]\n )\n\n return batch_pred_map_cls", "def setup_parser(self) -> Dict[str, Any]:\n\n\n # % GALAT - SPP Single Point Positioning\n # % -------------------------------------\n # % Processing Option\n # % ------------------\n # % GNSS system(s) : GALILEO\n # % Orbit type : Broadcast - INAV\n # % Solution type : SPP\n # % Frequency : E1\n # % Elevation mask : 5.0 deg\n # % Time interval : 30.0 s\n # % Ionosphere opt : NeQuick-G\n # % Troposhere opt : GMF with GPT\n # % Obs start : 2020/01/04 00:00:00.0 GPST (week 2086 518400.0s)\n # % Obs end : 2020/01/04 23:59:30.0 GPST (week 2086 604770.0s)\n # % Epoch expected : 2880\n # % Epoch have : 2880\n # %\n # % Input file(s) : KOUG00GUF_R_20200040000_01D_30S_MO.rnx\n # % Input file(s) : CNES0030.20L\n # % Input file(s) : CNES0040.20L\n # % Input file(s) : igs14.atx\n # %\n # % RINEX header info\n # % ------------------\n # % Marker : KOUG 97301M402\n # % Receiver T/V/# : SEPT POLARX5TR 5.3.0 17323022503\n # % Antenna T/ /# : LEIAR25.R3 LEIT 10180007\n # % Position XYZ : 3855263.3407 -5049731.9986 563040.4252\n # % Antenna H/E/N : 0.0000 0.0000 0.0000\n self._parse_header()\n\n # ----+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8----+----9----+----0----+--\n # 2020/01/04 00:00:00 5.098466365 -52.639742999 106.8901 -0.603 -0.821 -0.349 1.018 0.349 \n # 2020/01/04 00:00:30 5.098466094 -52.639742684 107.4962 -0.633 -0.856 0.257 1.065 0.257 \n # 2020/01/04 00:01:00 5.098466030 -52.639740961 107.6125 -0.640 -1.047 0.373 1.228 0.373 \n return dict(\n names=(\n \"yyyymmdd\", \n \"hhmmss\", \n \"latitude\", \n \"longitude\", \n \"height\", \n \"dlatitude\", \n \"dlongitude\", \n \"dheight\",\n \"hpe\",\n \"vpe\",\n \"site_vel_3d\",\n \"pdop\",\n \"num_satellite_available\",\n \"num_satellite_used\",\n ),\n comments=\"%\",\n delimiter=(10, 9, 15, 15, 10, 9, 9, 9, 9, 9, 9, 6, 4, 4),\n dtype=(\"U10\", \"U9\", \"f8\", \"f8\", \"f8\", \"f8\", \"f8\", \"f8\", \"f8\", \"f8\", \"f8\", \"f8\", \"f8\", \"f8\"),\n autostrip=True,\n )", "def _preprocess_and_checks(self, das, params):\n das, params = super()._preprocess_and_checks(das, params)\n\n # Convert grouping and check if allowed:\n if isinstance(params[\"group\"], str):\n params[\"group\"] = Grouper(params[\"group\"])\n\n if (\n self.allowed_groups is not None\n and params[\"group\"].prop not in self.allowed_groups\n ):\n raise ValueError(\n f\"Grouping period {params['group'].prop_name} is not allowed for property \"\n f\"{self.identifier} (needs something in \"\n f\"{list(map(lambda g: '<dim>.' + g.replace('group', ''), self.allowed_groups))}).\"\n )\n\n # Convert grouping and check if allowed:\n sim = das[\"sim\"]\n ref = das[\"ref\"]\n units_sim = units2pint(sim)\n units_ref = units2pint(ref)\n\n if units_sim != units_ref:\n das[\"sim\"] = convert_units_to(sim, ref)\n\n return das, params", "def create_labels_from_guide_alignment(events, sam_string, rna=False, reference_path=None, kmer_index=2,\n one_ref_indexing=False):\n # test if the required fields are in structured numpy array\n check_numpy_table(events, req_fields=('raw_start', 'model_state', 'p_model_state', 'raw_length', 'move'))\n assert type(one_ref_indexing) is bool, \"one_ref_indexing must be a boolean\"\n\n psam_h = initialize_pysam_wrapper(sam_string, reference_path=reference_path)\n # create an indexed map of the events and their corresponding bases\n bases, base_raw_starts, base_raw_lengths, probs = index_bases_from_events(events, kmer_index=kmer_index)\n\n # check if string mapped to reverse strand\n if psam_h.alignment_segment.is_reverse:\n probs = probs[::-1]\n base_raw_starts = base_raw_starts[::-1]\n # rna reads go 3' to 5' so we dont need to reverse if it mapped to reverse strand\n if not rna:\n bases = ReverseComplement().reverse(''.join(bases))\n # reverse if it mapped to forward strand and RNA\n elif rna:\n bases = ReverseComplement().reverse(''.join(bases))\n\n # all 'matches' and 'mismatches'\n matches_map = psam_h.seq_alignment.matches_map\n # zero indexed reference start\n ref_start = psam_h.alignment_segment.reference_start + one_ref_indexing\n # set labels\n raw_start = []\n raw_length = []\n reference_index = []\n kmer = []\n posterior_probability = []\n cigar_labels = []\n prev = matches_map[0].reference_index\n for i, alignment in enumerate(matches_map):\n if i == 0 or alignment.reference_index == prev + 1:\n raw_start.append(base_raw_starts[alignment.query_index])\n raw_length.append(base_raw_lengths[alignment.query_index])\n reference_index.append(alignment.reference_index + ref_start)\n kmer.append(alignment.reference_base)\n posterior_probability.append(probs[alignment.query_index])\n else:\n # initialize labels\n cigar_label = np.zeros(len(raw_start),\n dtype=[('raw_start', int), ('raw_length', int), ('reference_index', int),\n ('posterior_probability', float), ('kmer', 'S5')])\n # assign labels\n cigar_label['raw_start'] = raw_start\n cigar_label['raw_length'] = raw_length\n cigar_label['reference_index'] = reference_index\n cigar_label['kmer'] = kmer\n cigar_label['posterior_probability'] = posterior_probability\n # add to other blocks\n cigar_labels.append(cigar_label)\n # reset trackers\n raw_start = [base_raw_starts[alignment.query_index]]\n raw_length = [base_raw_lengths[alignment.query_index]]\n reference_index = [alignment.reference_index + ref_start]\n kmer = [alignment.reference_base]\n posterior_probability = [probs[alignment.query_index]]\n # keep track of reference positions\n prev = alignment.reference_index\n\n # catch the last label\n cigar_label = np.zeros(len(raw_start), dtype=[('raw_start', int), ('raw_length', int), ('reference_index', int),\n ('posterior_probability', float), ('kmer', 'S5')])\n # assign labels\n cigar_label['raw_start'] = raw_start\n cigar_label['raw_length'] = raw_length\n cigar_label['reference_index'] = reference_index\n cigar_label['kmer'] = kmer\n cigar_label['posterior_probability'] = posterior_probability\n # add to other blocks\n cigar_labels.append(cigar_label)\n\n return cigar_labels", "def _scanSpan_(self, span, lan):\n\t\t#pdb.set_trace()\n\t\tif lan == 'src':\n\t\t\twordAlign = self.waMatrix\n\t\telse:\n\t\t\twordAlign = [[self.waMatrix[i][j] for i in xrange(len(self.waMatrix))] for j in xrange(len(self.waMatrix[0]))] \n\t\t\t\n\t\totherSpan = [MAX, MIN]\n\t\tfor i in xrange(span[0], span[1]):\n\t\t\tfor j in xrange(len(wordAlign[i])):\n\t\t\t\tif wordAlign[i][j] == 1:\n\t\t\t\t\tif j < otherSpan[0]:\n\t\t\t\t\t\totherSpan[0] = j\n\t\t\t\t\tif j+1 > otherSpan[1]:\n\t\t\t\t\t\totherSpan[1] = j+1\n\n\t\tif otherSpan[0] == MAX or otherSpan[1] == MIN:\n\t\t\treturn []\n\n\t\t# relax span to include not-aligned words\n\t\totherSpanList = []\n\t\tfor j in xrange(otherSpan[0]-1, -1, -1):\n\t\t\tif sum([wordAlign[i][j] for i in xrange(len(wordAlign))]) == 0:\n\t\t\t\totherSpanList.append((j, otherSpan[1]))\n\t\t\telse:\n\t\t\t\tbreak\n\t\tfor j in xrange(otherSpan[1], len(wordAlign[0])):\n\t\t\tif sum([wordAlign[i][j] for i in xrange(len(wordAlign))]) == 0:\n\t\t\t\totherSpanList.append((otherSpan[0], j+1))\n\t\t\telse:\n\t\t\t\tbreak\n\n\t\totherSpanList.append(tuple(otherSpan))\n\t\treturn otherSpanList", "def compute_local_alignment(seq_x,seq_y,scoring_matrix,alignment_matrix):\n #initialization of variables\n x_pos = -1\n y_pos = -1\n result_seq_x = ''\n result_seq_y = ''\n score = 0\n\n #determine start position in alignment_matrix as position with maximum value \n for row in range(len(seq_x) + 1):\n for col in range(len(seq_y) + 1):\n if alignment_matrix[row][col] > score:\n score = alignment_matrix[row][col]\n x_pos = row\n y_pos = col\n\n #start in start position and go upwards till we reach first entry with value 0\n #in every iteration we reconstruct alignments based on value in alignment_matrix and scoring_matrix\n while x_pos != 0 and y_pos !=0:\n current_value = alignment_matrix[x_pos][y_pos]\n if current_value == 0:\n break\n \n if current_value == alignment_matrix[x_pos-1][y_pos-1] + scoring_matrix[seq_x[x_pos-1]][seq_y[y_pos-1]]:\n result_seq_x = seq_x[x_pos-1] + result_seq_x\n result_seq_y = seq_y[y_pos-1] + result_seq_y\n x_pos -= 1\n y_pos -= 1\n elif current_value == alignment_matrix[x_pos-1][y_pos] + scoring_matrix[seq_x[x_pos-1]][\"-\"]:\n result_seq_x = seq_x[x_pos-1] + result_seq_x\n result_seq_y = \"-\" + result_seq_y\n x_pos -= 1\n else: \n result_seq_x = \"-\" + result_seq_x\n result_seq_y = seq_y[y_pos-1] + result_seq_y\n y_pos -= 1\n\n return (score,result_seq_x,result_seq_y)", "def preprocess(self):\n\n mm_magcoord.add_aacgm_coordinates(self)\n mm_magcoord.add_quasi_dipole_coordinates(self)\n mm_sc.calculate_ecef_velocity(self)\n mm_sc.add_ram_pointing_sc_attitude_vectors(self)\n\n return", "def parse(self, filehandle):\n l = filehandle.readline()\n if l.split()[0] != '##maf':\n return\n else:\n self.setpar(l.split()[1:])\n\n l=filehandle.readline()\n while l:\n la = l.split()\n## print la\n if(len(la)==0 or la[0]=='#'):\n## print \"skipping\"\n 1\n elif(la[0]=='a'):\n## print \"reading alignment\"\n self.readalign(la[1:], filehandle)\n else:\n## print \"end of records\"\n return\n\n l=filehandle.readline()", "def parseJumpt(cmds):\n if (len(cmds) > 0):\n first = str.strip(cmds[0])\n if '==' in first:\n cmds2 = re.split(\"[==|!=|>=|<=|>|<]+\", first)\n cmds[0] = cmds2[0]\n cmds.append(str.strip(cmds2[1]))\n elif '!=' in first:\n cmds2 = re.split(\"[==|!=|>=|<=|>|<]+\", first)\n cmds[0] = cmds2[0]\n cmds.append(str.strip(cmds2[1]))\n elif '>=' in first:\n cmds2 = re.split(\"[==|!=|>=|<=|>|<]+\", first)\n cmds[0] = cmds2[0]\n cmds.append(str.strip(cmds2[1]))\n elif '<=' in first:\n cmds2 = re.split(\"[==|!=|>=|<=|>|<]+\", first)\n cmds[0] = cmds2[0]\n cmds.append(str.strip(cmds2[1]))\n elif '>' in first:\n cmds2 = re.split(\"[==|!=|>=|<=|>|<]+\", first)\n cmds[0] = cmds2[0]\n cmds.append(str.strip(cmds2[1]))\n elif '<' in first:\n cmds2 = re.split(\"[==|!=|>=|<=|>|<]+\", first)\n cmds[0] = cmds2[0]\n cmds.append(str.strip(cmds2[1]))\n parseExpr(cmds[0])\n parseJumpt(cmds[1:])", "def parse_sam(rows):\n row1, row2 = rows\n mseqs = {}\n failed_list = []\n insert_list = []\n rname = row1['rname']\n qname = row1['qname']\n cigar1 = row1['cigar']\n cigar2 = row2['cigar']\n\n # filtering criteria\n reason = None\n if cigar1 == '*':\n reason = 'R1 unmapped'\n if int(row1['mapq']) < read_mapping_cutoff:\n reason = 'R1 low mapq'\n\n if cigar2 == '*':\n reason = 'R2 unmapped'\n if int(row2['mapq']) < read_mapping_cutoff:\n reason = 'R2 low mapq'\n\n genotype1, genotype2 = None, None\n try:\n genotype1 = row1['rname'].split('-')[1][0]\n genotype2 = row2['rname'].split('-')[1][0]\n except:\n reason = 'discordant map'\n pass\n\n if genotype1 != genotype2:\n reason = 'map conflict'\n\n if reason:\n failed_list.append({'qname': qname,\n 'rname1': row1['rname'],\n 'rname2': row2['rname'],\n 'reason': reason})\n else:\n pos1 = int(row1['pos'])-1 # convert 1-index to 0-index\n _, seq1, qual1, inserts = apply_cigar(cigar1, row1['seq'], row1['qual'])\n \n # report insertions relative to sample consensus\n for left, (iseq, iqual) in inserts.iteritems():\n insert_list.append({'qname': qname,\n 'fwd_rev': 'F' if is_first_read(row1['flag']) else 'R',\n 'refname': rname,\n 'pos': pos1+left,\n 'insert': iseq,\n 'qual': iqual})\n \n seq1 = '-'*pos1 + seq1 # pad sequence on left\n qual1 = '!'*pos1 + qual1 # assign lowest quality to gap prefix so it does not override mate\n \n \n # now process the mate\n pos2 = int(row2['pos'])-1 # convert 1-index to 0-index\n _, seq2, qual2, inserts = apply_cigar(cigar2, row2['seq'], row2['qual'])\n for left, (iseq, iqual) in inserts.iteritems():\n insert_list.append({'qname': qname,\n 'fwd_rev': 'F' if is_first_read(row2['flag']) else 'R',\n 'refname': rname,\n 'pos': pos2+left,\n 'insert': iseq,\n 'qual': iqual})\n seq2 = '-'*pos2 + seq2\n qual2 = '!'*pos2 + qual2\n \n # merge reads\n for qcut in sam2aln_q_cutoffs:\n mseq = merge_pairs(seq1, seq2, qual1, qual2, qcut)\n prop_N = mseq.count('N') / float(len(mseq.strip('-')))\n if prop_N > max_prop_N:\n # fail read pair\n failed_list.append({'qname': qname,\n 'reason': 'merge failure'})\n continue\n mseqs[qcut] = mseq\n\n return rname, mseqs, insert_list, failed_list", "def pair_hmm_align_unaligned_seqs(seqs,moltype,params={}):\n \n seqs = LoadSeqs(data=seqs,moltype=moltype,aligned=False)\n try:\n s1, s2 = seqs.values()\n except ValueError:\n raise ValueError,\\\n \"Pairwise aligning of seqs requires exactly two seqs.\"\n \n try:\n gap_open = params['gap_open']\n except KeyError:\n gap_open = 5\n try:\n gap_extend = params['gap_extend']\n except KeyError:\n gap_extend = 2\n try:\n score_matrix = params['score_matrix']\n except KeyError:\n score_matrix = make_dna_scoring_dict(\\\n match=1,transition=-1,transversion=-1)\n \n return global_pairwise(s1,s2,score_matrix,gap_open,gap_extend)", "def locate_prob(raw_text, text, prob):\n\n if raw_text=='' or text=='': return []\n if len(prob) != len(raw_text): return []\n raw_text = raw_text.replace('-', '`')\n text = text.replace('-', '`')\n alignments = pairwise2.align.globalmx(raw_text, text, 2, -1)\n align1, align2, score, begin, end = alignments[-1]\n text_prob = [prob[index] for (index, item) in enumerate(align1[begin:end]) if align2[index]!='-']\n return text_prob", "def _check_unaligned_alns(self, aln_parse_function, *extra_args):\n fake_aln_unaligned_1 = Fake_HTSeq_aln('AAA', 'name', unaligned=True, optional_field_data={'XM':1})\n fake_aln_unaligned_2 = Fake_HTSeq_aln('AAA', 'name', unaligned=True, optional_field_data={})\n fake_aln_multi_aligned_1 = Fake_HTSeq_aln('AAA', 'name', unaligned=True, optional_field_data={'XM':2})\n fake_aln_multi_aligned_2 = Fake_HTSeq_aln('AAA', 'name', unaligned=True, optional_field_data={'XM':20})\n assert aln_parse_function(fake_aln_unaligned_1, *extra_args) == SPECIAL_POSITIONS.unaligned\n assert aln_parse_function(fake_aln_unaligned_2, *extra_args) == SPECIAL_POSITIONS.unaligned\n assert aln_parse_function(fake_aln_multi_aligned_1, *extra_args) == SPECIAL_POSITIONS.multi_aligned\n assert aln_parse_function(fake_aln_multi_aligned_2, *extra_args) == SPECIAL_POSITIONS.multi_aligned", "def get_alignment_from(tree):\r\n msa = []\r\n for node in tree.get_terminals():\r\n alignment = self.msa_by_name[node.name.split(' ')[0]]\r\n if msa:\r\n msa.append(alignment)\r\n else:\r\n msa = MultipleSeqAlignment([alignment])\r\n\r\n return msa", "def init_basic_aligner(allow_mismatches=False):\n a = Align.PairwiseAligner()\n if allow_mismatches:\n a.mismatch_score = -1\n a.gap_score = -3\n a.target_gap_score = -np.inf\n if not allow_mismatches:\n a.mismatch = -np.inf\n a.mismatch_score = -np.inf\n return a", "def __extract_patterns_and_spaces(self):\n\n def __decorate_nodes(nodes, space):\n \"\"\"\n Performs a backward search from a list of pattern nodes and assigns a set of search spaces\n to all encountered nodes.\n :param nodes: List of pattern nodes that belongs to a search space\n :param space: List of search space id\n :return:\n \"\"\"\n for n in nodes:\n if n not in self.__node_spaces:\n self.__node_spaces[n] = set([])\n self.__node_spaces[n].add(space)\n pred_nodes = self.__plan_graph.subjects(AGORA.next, n)\n __decorate_nodes(pred_nodes, space)\n\n # Extract all search spaces in the plan and build a dictionary of subjects-to-ignore per each of them.\n # Ignored subjects are those that won't be dereferenced due to a explicit graph pattern (object) filter,\n # e.g. ?s doap:name \"jenkins\" -> All ?s that don't match the filter will be ignored.\n self.__spaces = set(self.__plan_graph.subjects(RDF.type, AGORA.SearchSpace))\n self.__subjects_to_ignore = dict([(sp, set([])) for sp in self.__spaces])\n\n patterns = list(self.__plan_graph.subjects(RDF.type, AGORA.TriplePattern))\n for tp in patterns:\n # A triple pattern belongs to a UNIQUE search space\n space = list(self.__plan_graph.subjects(AGORA.definedBy, tp)).pop()\n self.__patterns[tp] = {'space': space}\n\n # Depending on the format of each triple pattern (either '?s a Concept' or '?s prop O'),\n # it is required to extract different properties.\n tp_pred = list(self.__plan_graph.objects(tp, predicate=AGORA.predicate)).pop()\n\n if tp_pred == RDF.type: # ?s a Concept\n self.__patterns[tp]['type'] = list(self.__plan_graph.objects(tp, predicate=AGORA.object)).pop()\n try:\n check_type = list(self.__plan_graph.objects(tp, predicate=AGORA.checkType)).pop().toPython()\n except IndexError:\n check_type = True\n self.__patterns[tp]['check'] = check_type\n else: # ?s prop O\n self.__patterns[tp]['property'] = tp_pred\n tp_obj = list(self.__plan_graph.objects(tp, predicate=AGORA.object)).pop()\n if (tp_obj, RDF.type, AGORA.Literal) in self.__plan_graph: # In case O is a Literal\n self.__patterns[tp]['filter_object'] = list(self.__plan_graph.objects(tp_obj, AGORA.value)).pop()\n elif isinstance(tp_obj, URIRef):\n self.__patterns[tp]['filter_object'] = tp_obj\n\n tp_sub = list(self.__plan_graph.objects(tp, predicate=AGORA.subject)).pop()\n if isinstance(tp_sub, URIRef):\n self.__patterns[tp]['filter_subject'] = tp_sub\n\n # Get all pattern nodes (those that have a byPattern properties) of the search plan and search backwards\n # in order to set the scope of each search space.\n nodes = list(self.__plan_graph.subjects(AGORA.byPattern, tp))\n for n in nodes:\n if n not in self.__node_patterns:\n self.__node_patterns[n] = set([])\n self.__node_patterns[n].add(tp)\n __decorate_nodes(nodes, space)", "def test_parallel_align_seqs_pynast(self):\r\n\r\n params = {\r\n 'min_percent_id': 0.75,\r\n 'min_length': 15,\r\n 'template_fp': self.template_fp,\r\n 'pairwise_alignment_method': 'uclust',\r\n 'blast_db': None\r\n }\r\n\r\n app = ParallelAlignSeqsPyNast()\r\n r = app(self.inseqs1_fp,\r\n self.test_out,\r\n params,\r\n job_prefix='PTEST',\r\n poll_directly=True,\r\n suppress_submit_jobs=False)\r\n # confirm that the total number of output sequences equals the total\r\n # number of input sequences\r\n num_input_seqs = count_seqs_in_filepaths([self.inseqs1_fp])[1]\r\n num_template_seqs = count_seqs_in_filepaths([self.template_fp])[1]\r\n num_output_seqs = \\\r\n count_seqs_in_filepaths(glob(join(self.test_out, '*fasta')))[1] \\\r\n - num_input_seqs - num_template_seqs\r\n self.assertEqual(num_input_seqs, num_output_seqs)", "def test_mock_pysam_parse_all():\n mock_sam_path = os.path.join(testdir, \"data\", \"mock.parse-all.sam\")\n mock_chroms_path = os.path.join(testdir, \"data\", \"mock.chrom.sizes\")\n try:\n result = subprocess.check_output(\n [\n \"python\",\n \"-m\",\n \"pairtools\",\n \"parse\",\n \"--walks-policy\",\n \"all\",\n \"-c\",\n mock_chroms_path,\n \"--add-pair-index\",\n mock_sam_path,\n ],\n ).decode(\"ascii\")\n except subprocess.CalledProcessError as e:\n print(e.output)\n print(sys.exc_info())\n raise e\n\n # check if the header got transferred correctly\n sam_header = [l.strip() for l in open(mock_sam_path, \"r\") if l.startswith(\"@\")]\n pairsam_header = [l.strip() for l in result.split(\"\\n\") if l.startswith(\"#\")]\n for l in sam_header:\n assert any([l in l2 for l2 in pairsam_header])\n\n # check that the pairs got assigned properly\n id_counter = 0\n prev_id = \"\"\n for l in result.split(\"\\n\"):\n if l.startswith(\"#\") or not l:\n continue\n\n if prev_id == l.split(\"\\t\")[0]:\n id_counter += 1\n else:\n id_counter = 0\n prev_id = l.split(\"\\t\")[0]\n\n assigned_pair = l.split(\"\\t\")[1:8] + l.split(\"\\t\")[-2:]\n simulated_pair = (\n l.split(\"CT:Z:SIMULATED:\", 1)[1]\n .split(\"\\031\", 1)[0]\n .split(\"|\")[id_counter]\n .split(\",\")\n )\n print(assigned_pair)\n print(simulated_pair, prev_id)\n print()\n\n assert assigned_pair == simulated_pair", "def preprocess(args, id2info, mapping):\n polyline_spans = []\n keys = list(id2info.keys())\n assert 'AV' in keys\n assert 'AGENT' in keys\n keys.remove('AV')\n keys.remove('AGENT')\n keys = ['AGENT', 'AV'] + keys\n vectors = []\n two_seconds = mapping['two_seconds']\n mapping['trajs'] = []\n mapping['agents'] = []\n for id in keys:\n polyline = {}\n\n info = id2info[id]\n start = len(vectors)\n if args.no_agents:\n if id != 'AV' and id != 'AGENT':\n break\n\n agent = []\n for i, line in enumerate(info):\n if larger(line[TIMESTAMP], two_seconds):\n break\n agent.append((line[X], line[Y]))\n\n if args.visualize:\n traj = np.zeros([args.hidden_size])\n for i, line in enumerate(info):\n if larger(line[TIMESTAMP], two_seconds):\n traj = traj[:i * 2].copy()\n break\n traj[i * 2], traj[i * 2 + 1] = line[X], line[Y]\n if i == len(info) - 1:\n traj = traj[:(i + 1) * 2].copy()\n traj = traj.reshape((-1, 2))\n mapping['trajs'].append(traj)\n\n for i, line in enumerate(info):\n if larger(line[TIMESTAMP], two_seconds):\n break\n x, y = line[X], line[Y]\n if i > 0:\n # print(x-line_pre[X], y-line_pre[Y])\n vector = [line_pre[X], line_pre[Y], x, y, line[TIMESTAMP], line[OBJECT_TYPE] == 'AV',\n line[OBJECT_TYPE] == 'AGENT', line[OBJECT_TYPE] == 'OTHERS', len(polyline_spans), i]\n vectors.append(get_pad_vector(vector))\n line_pre = line\n\n end = len(vectors)\n if end - start == 0:\n assert id != 'AV' and id != 'AGENT'\n else:\n mapping['agents'].append(np.array(agent))\n\n polyline_spans.append([start, end])\n\n assert_(len(mapping['agents']) == len(polyline_spans))\n\n assert len(vectors) <= max_vector_num\n\n t = len(vectors)\n mapping['map_start_polyline_idx'] = len(polyline_spans)\n if args.use_map:\n vectors, polyline_spans = get_sub_map(args, mapping['cent_x'], mapping['cent_y'], mapping['city_name'],\n vectors=vectors,\n polyline_spans=polyline_spans, mapping=mapping)\n\n # logging('len(vectors)', t, len(vectors), prob=0.01)\n\n matrix = np.array(vectors)\n # matrix = np.array(vectors, dtype=float)\n # del vectors\n\n # matrix = torch.zeros([len(vectors), args.hidden_size])\n # for i, vector in enumerate(vectors):\n # for j, each in enumerate(vector):\n # matrix[i][j].fill_(each)\n\n labels = []\n info = id2info['AGENT']\n info = info[mapping['agent_pred_index']:]\n if not args.do_test:\n if 'set_predict' in args.other_params:\n pass\n else:\n assert len(info) == 30\n for line in info:\n labels.append(line[X])\n labels.append(line[Y])\n\n if 'set_predict' in args.other_params:\n if 'test' in args.data_dir[0]:\n labels = [0.0 for _ in range(60)]\n\n if 'goals_2D' in args.other_params:\n point_label = np.array(labels[-2:])\n mapping['goals_2D_labels'] = np.argmin(get_dis(mapping['goals_2D'], point_label))\n\n if 'lane_scoring' in args.other_params:\n stage_one_label = 0\n polygons = mapping['polygons']\n min_dis = 10000.0\n for i, polygon in enumerate(polygons):\n temp = np.min(get_dis(polygon, point_label))\n if temp < min_dis:\n min_dis = temp\n stage_one_label = i\n\n mapping['stage_one_label'] = stage_one_label\n\n mapping.update(dict(\n matrix=matrix,\n labels=np.array(labels).reshape([30, 2]),\n polyline_spans=[slice(each[0], each[1]) for each in polyline_spans],\n labels_is_valid=np.ones(args.future_frame_num, dtype=np.int64),\n eval_time=30,\n ))\n\n return mapping", "def test_parser(self):\n parser = hhsuite.FastaParser()\n results = parser.run(self.pipeline)\n self.assertEqual(\n results[\"templates\"][0][\"sequence_alignments\"], {\n \"sequence\": \"---A-A-----\",\n \"query\": \"XXAB-CDEFXX\"\n })\n\n self.assertEqual(\n results[\"templates\"][1][\"sequence_alignments\"], {\n \"foo\": \"---A------\",\n \"sequence\": \"--GG------\",\n \"query\": \"XXABCDEFXX\"\n })", "def MapAlignment(entry, map_a2b):\n\n is_positive = entry.mSbjctStrand == \"+\"\n\n if is_positive:\n sbjct_pos = entry.mSbjctGenomeFrom + 1\n else:\n # no -1, as it starts on the residue\n sbjct_pos = map_a2b.getRowTo() - entry.mSbjctGenomeFrom\n\n last_mapped_pos = map_a2b.mapRowToCol(sbjct_pos)\n\n if last_mapped_pos == 0:\n raise ValueError, \"unmappable starting residue %i\" % sbjct_pos\n\n new_alignment = []\n\n if is_positive:\n entry.mSbjctGenomeFrom = last_mapped_pos - 1\n else:\n entry.mSbjctGenomeFrom = map_a2b.getColTo() - last_mapped_pos\n\n total_d = 0\n for state, l_query, l_sbjct in entry.mMapPeptide2Genome[:-1]:\n\n if is_positive:\n sbjct_pos += l_sbjct\n else:\n sbjct_pos -= l_sbjct\n\n mapped_pos = map_a2b.mapRowToCol(sbjct_pos)\n\n if mapped_pos == 0:\n for x in 1, 2:\n if map_a2b.mapRowToCol(sbjct_pos + x):\n sbjct_pos += x\n mapped_pos = map_a2b.mapRowToCol(sbjct_pos)\n break\n else:\n raise ValueError, \"unmappable residue %i\" % sbjct_pos\n\n d = abs(mapped_pos - last_mapped_pos)\n total_d += d\n new_alignment.append((state, l_query, d))\n\n last_mapped_pos = mapped_pos\n\n state, l_query, l_sbjct = entry.mMapPeptide2Genome[-1]\n\n # process last state, map only to last residue\n if is_positive:\n sbjct_pos += l_sbjct - 1\n else:\n sbjct_pos -= l_sbjct - 1\n\n mapped_pos = map_a2b.mapRowToCol(sbjct_pos)\n\n if mapped_pos == 0:\n raise ValueError, \"unmappable residue %i\" % sbjct_pos\n\n d = abs(mapped_pos - last_mapped_pos) + 1\n total_d += d\n\n new_alignment.append((state, l_query, d))\n\n entry.mSbjctGenomeTo = entry.mSbjctGenomeFrom + total_d\n\n entry.mMapPeptide2Genome = new_alignment", "def postprocess_segments(self):\n # make segs a list of mask arrays, it's easier to store\n # as there is a hdf5 equivalent\n for iseg, seg in enumerate(self.segs):\n mask = np.zeros(self._adata.shape[0], dtype=bool)\n mask[seg] = True\n self.segs[iseg] = mask\n # convert to arrays\n self.segs = np.array(self.segs)\n self.segs_tips = np.array(self.segs_tips)", "def TM_align(PU_name, ref_pdb_name, peel_longer):\n cmdLine_TM = (\"bin/TMalign64 results/\" + PU_name + '.pdb' +\n \" results/\" + ref_pdb_name + '.pdb' + \" -o \" + \"results/\" +\n PU_name + '.sup')\n\n out_TM = sub.Popen(cmdLine_TM.split(), stdout=sub.PIPE).communicate()[0]\n lines_TM = out_TM.decode()\n\n if peel_longer: # If peeled prot is longer, we get \"normalized by chain 2\"\n regex_TMalign = re.compile(\"(?:TM-score.+)([0]\\.[0-9]*)(?:.+Chain_2)\")\n else: # Else we get TMscore \"normalized by chain 1\"\n regex_TMalign = re.compile(\"(?:TM-score.+)([0]\\.[0-9]*)(?:.+Chain_1)\")\n searchObj = re.search(regex_TMalign, lines_TM)\n\n # Remove useless files:\n for ext in (\".sup_all_atm_lig\", \".sup_all\", \".sup\"):\n os.remove(\"results/\" + PU_name + ext)\n\n return float(searchObj.group(1))", "def mergeChainedAlignedSegments(chainedAlignedSegments, refSequence, readSequence):\n cAR = pysam.AlignedSegment()\n aR = chainedAlignedSegments[0]\n cAR.query_name = aR.query_name\n \n #Parameters we don't and therefore set properly\n #cAR.flag = aR.flag\n #cAR.mapq = aR.mapq\n #cAR.mrnm = 0\n #cAR.mpos=0\n #cAR.isize=0\n #cAR.qual = \"<\" * len(readSequence)\n #cAR.tags = aR.tags \n cAR.next_reference_id = -1\n cAR.reference_start = aR.reference_start #Reference start\n cAR.is_reverse = aR.is_reverse\n cAR.query_sequence = reverseComplement(readSequence) if cAR.is_reverse else readSequence\n cAR.reference_id = aR.reference_id\n cigarList = []\n pPos = aR.reference_start\n #Iterate from the other end of the sequence if reversed\n pQPos = -(len(readSequence)-1) if cAR.is_reverse else 0 \n \n for aR in chainedAlignedSegments:\n assert cAR.is_reverse == aR.is_reverse\n #Add a deletion representing the preceding unaligned reference positions\n assert aR.reference_start >= pPos\n if aR.reference_start > pPos:\n cigarList.append((2, aR.reference_start - pPos))\n pPos = aR.reference_start \n \n #Add an insertion representing the preceding unaligned read positions\n #make it a soft clip if it is the first chained alignment\n qPos = getFirstNonClippedPositionInRead(aR, readSequence)\n assert qPos >= pQPos\n if qPos > pQPos:\n cigarList.append((4 if aR == chainedAlignedSegments[0] else 1, qPos - pQPos)) \n pQPos = qPos\n \n #Add the operations of the cigar, filtering hard and soft clipping\n for op, length in aR.cigar:\n assert op in (0, 1, 2, 4, 5)\n if op in (0, 1, 2):\n cigarList.append((op, length))\n if op in (0, 2): #Is match or deletion\n pPos += length\n if op in (0, 1): #Is match or insertion\n pQPos += length\n \n assert pPos <= len(refSequence)\n \n #Set reference end coordinate (which is exclusive)\n #cAR.reference_end = pPos #We don't do this because it is set by cigar string\n \n #Now add any trailing, necessary soft clipping\n if cAR.is_reverse:\n assert pQPos <= 1\n if pQPos < 1:\n cigarList.append((4, -pQPos + 1))\n else:\n assert pQPos <= len(readSequence)\n if pQPos < len(readSequence):\n cigarList.append((4, len(readSequence) - pQPos))\n \n cAR.cigar = tuple(cigarList)\n \n #Check ops\n for op, length in cAR.cigar: #We should have no hard clipped ops\n assert op in (0, 1, 2, 4)\n \n #Reference sequence check coordinates\n assert sum([ length for op, length in cigarList if op in (0, 2)]) == cAR.reference_end - cAR.reference_start\n assert cAR.reference_start >= 0 and cAR.reference_start < len(refSequence)\n assert cAR.reference_end >= 0 and cAR.reference_end <= len(refSequence)\n \n #Read sequence check coordinates\n assert cAR.query_alignment_start >= 0 and cAR.query_alignment_start < len(readSequence)\n assert cAR.query_alignment_end >= 0 and cAR.query_alignment_end <= len(readSequence)\n assert cAR.query_alignment_start + sum([ length for op, length in cigarList if op in (0, 1)]) == cAR.query_alignment_end\n \n return cAR", "def _parse_unground_propositions(self, array):\n prop_list = []\n if array[0:3] == ['(', 'and', '(']:\n array = array[2:-1]\n #Split array into blocks\n opencounter = 0\n prop = []\n for word in array:\n if word == '(':\n opencounter += 1\n if word == ')':\n opencounter -= 1\n prop.append(word)\n if opencounter == 0:\n prop_list.append(self._parse_unground_proposition(prop))\n prop = []\n #print array[:array.index(')') + 1]\n return prop_list", "def parse(\n sam_path, chroms_path, output, output_parsed_alignments, output_stats, **kwargs\n):\n parse_py(\n sam_path, chroms_path, output, output_parsed_alignments, output_stats, **kwargs\n )", "def seq_align(string1,string2,mismatch_penalty,gap_penalty):\n\n # define 2x2 matrix\n matrix = []\n for i in range(len(string1)+1):\n if i == 0:\n matrix.append(list([gap_penalty * x for x in range(len(string2)+1)]))\n else:\n matrix.append(list([gap_penalty * i if x == 0 else None for x in range(len(string2)+1)]))\n\n # populate matrix by looping through the strings and finding optimal value for each spot\n for i in range(len(string1)):\n for j in range(len(string2)):\n if string1[i] == string2[j]:\n val1 = 0 + matrix[i][j]\n else:\n val1 = mismatch_penalty + matrix[i][j]\n val2 = gap_penalty + matrix[i][j+1]\n val3 = gap_penalty + matrix[i+1][j]\n min_val = min(val1,val2,val3)\n matrix[i+1][j+1] = min_val\n\n\n # define values to use while retracing\n result_str1 = ''\n result_str2 = ''\n i = len(matrix)-1\n j = len(matrix[0])-1\n\n # trace through matrix to find the optimal character alignment\n while i > 0 and j > 0:\n val1 = matrix[i-1][j-1]\n val2 = matrix[i-1][j]\n val3 = matrix[i][j-1]\n min_val = min(val1,val2,val3)\n if val1 == min_val:\n result_str1 += string1[i-1]\n result_str2 += string2[j-1]\n i -= 1\n j -= 1\n elif val2 == min_val:\n result_str1 += \"-\"\n result_str2 += string2[j-1]\n i -= 1\n else:\n result_str1 += string1[i-1]\n result_str2 += \"-\"\n j -= 1\n\n # for any leftover j values\n if i == 0:\n while j > 0:\n result_str1 += '-'\n result_str2 += string2[j]\n j -=1\n\n # for any leftover i values\n if j == 0:\n while i > 0:\n result_str1 += string1[i]\n result_str2 += \"-\"\n i -= 1\n\n return matrix[len(matrix)-1][len(matrix[0])-1], result_str1[::-1], result_str2[::-1]", "def apply_lane_mask_and_gap_filter(fastalines, mask,\r\n allowed_gap_frac=1 - finfo(float).eps,\r\n verbose=False, entropy_threshold=None):\r\n if entropy_threshold is not None and not (0 < entropy_threshold < 1):\r\n raise ValueError('Entropy threshold parameter (-e) needs to be '\r\n 'between 0 and 1')\r\n\r\n if mask is not None:\r\n mask = mask_to_positions(mask)\r\n prefilter_f = lambda x: get_masked_string(x, mask)\r\n else:\r\n prefilter_f = lambda x: x\r\n\r\n # resolve the gaps based on masked sequence\r\n gapcounts = None\r\n gapmask = slice(None)\r\n if allowed_gap_frac < 1:\r\n seq_count = 0.0\r\n for seq_id, seq in parse_fasta(fastalines):\r\n seq_count += 1\r\n seq = seq.replace('.', '-')\r\n\r\n seq = prefilter_f(seq)\r\n\r\n if gapcounts is None:\r\n gapcounts = zeros(len(seq))\r\n\r\n gapcounts[find_gaps(seq)] += 1\r\n\r\n gapmask = (gapcounts / seq_count) <= allowed_gap_frac\r\n gapmask = mask_to_positions(gapmask)\r\n attempt_file_reset(fastalines)\r\n\r\n # resolve the entropy mask\r\n if entropy_threshold is not None:\r\n ent_mask = generate_lane_mask(fastalines, entropy_threshold, gapmask)\r\n ent_mask = mask_to_positions(ent_mask)\r\n entropy_filter_f = lambda x: get_masked_string(x, ent_mask)\r\n attempt_file_reset(fastalines)\r\n else:\r\n entropy_filter_f = prefilter_f\r\n\r\n # mask, degap, and yield\r\n for seq_id, seq in parse_fasta(fastalines):\r\n seq = seq.replace('.', '-')\r\n\r\n # The order in which the mask is applied depends on whether a mask is\r\n # specified or inferred. Specifically, if a precomputed mask is\r\n # provided (e.g., the Lane mask) then it must be applied prior to a\r\n # gap filter, whereas if a mask is inferred then it must be applied\r\n # after a gap filter.\r\n if mask is None:\r\n seq = get_masked_string(seq, gapmask)\r\n seq = entropy_filter_f(seq)\r\n else:\r\n seq = entropy_filter_f(seq)\r\n seq = get_masked_string(seq, gapmask)\r\n\r\n yield \">%s\\n\" % seq_id\r\n yield \"%s\\n\" % seq", "def _forwardParsimony(self, aln):\n if self.sequence == None: # no sequence has been assigned\n if self.nChildren() == 0: # no children, so terminal, cannot propagate scores\n raise RuntimeError(\"No sequence assigned to leaf node:\", self.label)\n scores = [None for _ in range(self.nChildren())]\n for i in range(self.nChildren()):\n scores[i] = self.children[i]._forwardParsimony(aln)\n # for each position in the alignment,\n # introduce (initially zero) score for each symbol in alphabet\n self.seqscores = [[0 for _ in aln.alphabet] for col in range(aln.alignlen)]\n # for each position in the alignment,\n # allocate a position to put the each child symbol from which each current node symbol score was determined\n self.backptr = [[[None for _ in aln.alphabet] for _ in range(aln.alignlen)] for _ in range(self.nChildren())]\n for col in range(aln.alignlen):\n for i in range(self.nChildren()):\n # left child will contribute first\n for a_parent in range(len(aln.alphabet)):\n best_score = +9999999\n best_symb = 0\n for a in range(len(aln.alphabet)):\n score = (scores[i][col][a] + (\n 1 if a != a_parent else 0)) # if we want to weight scores, this would need to change\n if score < best_score:\n best_symb = a\n best_score = score\n self.seqscores[col][a_parent] += best_score\n self.backptr[i][col][a_parent] = best_symb\n else:\n self.seqscores = [[0 if a == sym else 999999 for a in aln.alphabet] for sym in\n self.sequence] # if we want to weight scores, this would need to change\n return self.seqscores", "def eval_pos_affine():\n root_dir = \"/home/sdb/wangshentao/myspace/thesis/data/VisDrone2019-MOT-test-dev/\"\n seq_dir = root_dir + \"sequences/\"\n annotations_dir = root_dir + 'annotations/'\n affine_dir = root_dir + \"affine_orig/\"\n all_iou = []\n seqs_sample = '''\n uav0000249_00001_v\n uav0000249_02688_v\n '''\n seqs_str = seqs_sample\n seqs = [seq.strip() for seq in seqs_str.split()]\n for seq in seqs:\n image_file = os.listdir(os.path.join(seq_dir, seq))[0]\n image = cv2.imread(os.path.join(seq_dir, seq, image_file))\n orig_h, orig_w = image.shape[:2]\n\n with open(os.path.join(affine_dir, seq+'.pickle'), 'rb') as fin:\n affine_dict = pickle.load(fin)\n\n bbox, frame_id = get_frame_bbox(annotations_dir, seq + '.txt')\n predict_bbox = []\n for i in range(len(bbox)):\n # convert to std resolution\n bbox[i][:, 0] = bbox[i][:, 0]\n bbox[i][:, 1] = bbox[i][:, 1]\n bbox[i][:, 2] = bbox[i][:, 2]\n bbox[i][:, 3] = bbox[i][:, 3]\n\n # for j in range(bbox[i].shape[0]):\n # bbox[i][j] = tlwh_to_tlbr(bbox[i][j])\n for idx in range(len(bbox)):\n kalman_filter = KalmanFilter()\n trace_bbox = bbox[idx]\n trace_predict_bbox = []\n mean, covariance = kalman_filter.initiate(tlwh_to_xyah(trace_bbox[0]))\n for i in range(1, trace_bbox.shape[0]):\n # i-1 to i M\n frame_name = \"{:07d}.jpg\".format(int(frame_id[idx][i-1]))\n M = affine_dict[frame_name]\n bbox_infer = tlwh(mean)\n bbox_infer = tlwh_to_tlbr(bbox_infer)\n bbox_expand = np.ones((3, 4))\n bbox_expand[:2, 0] = bbox_infer[:2]\n bbox_expand[:2, 1] = bbox_infer[2:]\n # tr\n bbox_expand[:2, 2] = bbox_infer[2], bbox_infer[1]\n # bl\n bbox_expand[:2, 3] = bbox_infer[0], bbox_infer[3]\n bbox_expand = np.dot(M, bbox_expand)\n for t in range(bbox_expand.shape[1]):\n bbox_expand[:2, t] /= bbox_expand[2, t]\n # bbox_infer[:2] = bbox_expand[:2, 0]\n # bbox_infer[2:] = bbox_expand[:2, 1]\n # get the out bounding bbox\n bbox_infer[0] = min(bbox_expand[0, :])\n bbox_infer[1] = min(bbox_expand[1, :])\n bbox_infer[2] = max(bbox_expand[0, :])\n bbox_infer[3] = max(bbox_expand[1, :])\n bbox_infer = tlbr_to_tlwh(bbox_infer)\n # print(bbox_infer)\n trace_predict_bbox.append(bbox_infer)\n # move = mean[:4] - tlwh_to_xyah(bbox_infer)\n # if np.sum(np.square(move)[:2]) > 32*32:\n # print(move)\n # print(idx, frame_name)\n # print(mean)\n mean[:4] = tlwh_to_xyah(bbox_infer)\n # print(mean)\n mean, covariance = kalman_filter.predict(mean, covariance)\n mean, covariance = kalman_filter.update(mean, covariance, tlwh_to_xyah(trace_bbox[i]))\n\n trace_predict_bbox = np.array(trace_predict_bbox)\n for i in range(trace_predict_bbox.shape[0]):\n trace_predict_bbox[i] = tlwh_to_tlbr(trace_predict_bbox[i])\n for i in range(trace_bbox.shape[0]):\n trace_bbox[i] = tlwh_to_tlbr(trace_bbox[i])\n\n predict_bbox.append(trace_predict_bbox)\n bbox[idx] = bbox[idx][1:]\n frame_id[idx] = frame_id[idx][1:]\n assert bbox[idx].shape[0] == predict_bbox[idx].shape[0]\n iou = []\n for i in range(len(bbox)):\n trace_iou = []\n trace_bbox = bbox[i]\n trace_predict_bbx = predict_bbox[i]\n for j in range(trace_bbox.shape[0]):\n iou_val = bbox_ious(np.ascontiguousarray(trace_bbox[j][np.newaxis, :], dtype=np.float),\n np.ascontiguousarray(trace_predict_bbx[j][np.newaxis, :], dtype=np.float))\n trace_iou.append(iou_val)\n iou.append(np.array(trace_iou))\n iou = [int(np.mean(i) * 100) for i in iou]\n all_iou += iou\n bins = np.zeros(101)\n for i in all_iou:\n bins[i] += 1\n plt.bar(np.arange(101), bins)\n plt.ylabel('num')\n plt.xlabel('iou(*100)')\n plt.show()", "def parseInput(toParse):\n splitified = toParse.split('--------')\n statesPath = splitified[0].rstrip().strip()\n availableStates = len(splitified[1].rstrip().strip().split())\n probMatrix = splitified[2].rstrip().strip().splitlines()\n\n return(probMatrix, statesPath, availableStates)", "def degap_fasta_aln(seqs):\r\n\r\n for (label, seq) in seqs:\r\n yield DNASequence(seq, id=label).degap()", "def parse_seqscreen(input, output):\n \n df = pd.read_csv(input, sep='\\t', index_col=0, na_values='-')\n pathogenicity_features = df.loc[:, 'disable_organ':'virulence_regulator'].fillna(0).astype(int).sum(axis=1)\n\n pathogenic_genes_df = df.loc[pathogenicity_features > 0, ['taxid', \n 'centrifuge_multi_tax', \n 'diamond_multi_tax',\n 'go',\n 'multi_taxids_confidence',\n 'go_id_confidence',\n 'size',\n 'organism',\n 'gene_name',\n 'uniprot',\n 'uniprot evalue']]\n \n pathogenic_genes_df['taxid'] = pathogenic_genes_df['taxid'].astype(int)\n \n pathogenic_genes_df.index.name = 'gene'\n \n \n pathogenic_genes = pathogenicity_features[pathogenicity_features > 0].index\n \n gene_pathogenicity_features_dict = {}\n \n for gene, row in df.loc[pathogenic_genes,'disable_organ':'virulence_regulator'].iterrows():\n gene_pathogenicity_features_dict[gene] = ';'.join(row[row>0].index)\n \n pathogenicity_df = pd.DataFrame.from_dict(gene_pathogenicity_features_dict, \n orient='index',\n columns=['Pathogenicity'])\n\n pathogenicity_df.index.name = 'gene'\n\n pd.merge(pathogenic_genes_df, pathogenicity_df, left_index=True, right_index=True).to_csv(output, sep='\\t')", "def pairsParser(seqBlock,names):\n for name in names:\n seq = []\n sIndx = [] #start index, where in the line the sequence start\n struct = [] #structure lines\n record = False\n for line in seqBlock:\n if line.startswith(name+' '):\n tmp = line.split()\n #if seq length is shorter then 80 for one seq and longer\n #for another seq the following block will be empty for the\n #shorter sequence. this if statement protects against that\n if len(tmp) == 4: \n try:\n seq.append(tmp[2])#[name,start nr,seq,end nr]\n except:\n print 'LINE',line\n print 'BLOCK', seqBlock\n sIndx.append(index(line,tmp[2])) \n record = True\n else:\n continue\n else:\n if record:\n record = False\n struct.append(line)\n\n###############################################################################\n# Construction of the full sequence and structure and then mapping each letter\n#in structure to a position\n\n Fseq = '' #full sequence\n Fstruct = '' #full structure\n for i in range(len(seq)):\n # slice out corresponding structure to sequence\n #so you can get the same index for structure and sequence\n tmpStruct = struct[i][sIndx[i]:(sIndx[i]+len(seq[i]))]\n Fseq = ''.join([Fseq,seq[i]])\n Fstruct = ''.join([Fstruct,tmpStruct])\n #Applies a position to every letter in structure sequence \n letterPos = zip(range(len(Fseq)),Fstruct)\n \n###############################################################################\n#Cunstruction of dictionary for where every letter in structure has a list of\n#positions corresponding to that of that letter in respect to the sequence\n\n alphabet = {}\n for pos, letter in letterPos:\n indices = []\n #if the dict contains the letter you want to add to that list\n if alphabet.__contains__(letter): \n indices = alphabet[letter]\n indices.append(pos)\n alphabet[letter] = indices\n #else you want to create a new list for that letter\n elif not letter==' ':\n indices.append(pos)\n alphabet[letter] = indices\n \n###############################################################################\n#Each list in alphabet needs to be split in two,\n#oL and cL (open and close list), to be able to fold the positions into pairs\n\n pairs = []\n for value in alphabet.values():\n middle = len(value)/2\n oL = value[:middle]\n cL = value[middle:]\n #pairs are created by making a tuple of the first in oL to\n #the last in cl, second in oL to second last in cL and so on\n pairs.extend(zip(oL,cL.__reversed__()))\n\n yield Pairs(pairs),Fseq", "def _read_next_alignment(self, stream):", "def _parse_unground_proposition(self, array):\n negative = False\n if array[1] == 'not':\n negative = True\n array = array[2:-1]\n return Predicate(array[1], array[2:-1], False, negative)", "def forward(self, rpn_cls_prob, rpn_bbox_pred, im_info):\n # 1. for each location i in a (H, W) grid:\n # generate A anchor boxes centered on cell i\n # apply predicted bbox deltas to each of the A anchors at cell i\n # 2. clip predicted boxes to image\n # 3. remove predicted boxes with either height or width < threshold\n # 4. sort all (proposal, score) pairs by score from highest to lowest\n # 5. take the top pre_nms_topN proposals before NMS\n # 6. apply NMS with a loose threshold (0.7) to the remaining proposals\n # 7. take after_nms_topN proposals after NMS\n # 8. return the top proposals\n \n \"\"\"Type conversion\"\"\"\n # predicted probability of fg object for each RPN anchor\n scores = rpn_cls_prob.data.cpu().numpy()\n # predicted achors transformations\n bbox_deltas = rpn_bbox_pred.data.cpu().numpy()\n # input image (height, width, scale), in which scale is the scale factor\n # applied to the original dataset image to get the network input image\n im_info = im_info.data.cpu().numpy()\n\n # 1. Generate proposals from bbox deltas and shifted anchors\n height, width = scores.shape[-2:]\n # Enumerate all shifted positions on the (H, W) grid\n shift_x = np.arange(0, width) * self._feat_stride\n shift_y = np.arange(0, height) * self._feat_stride\n shift_x, shift_y = np.meshgrid(shift_x, shift_y, copy=False)\n # Convert to (K, 4), K=H*W, where the columns are (dx, dy, dx, dy)\n # shift pointing to each grid location\n shifts = np.vstack((shift_x.ravel(), shift_y.ravel(), shift_x.ravel(),\n shift_y.ravel())).transpose()\n\n # Broacast anchors over shifts to enumerate all anchors at all positions\n # in the (H, W) grid:\n # - add A anchors of shape (1, A, 4) to\n # - K shifts of shape (K, 1, 4) to get\n # - all shifted anchors of shape (K, A, 4)\n # - reshape to (K*A, 4) shifted anchors\n num_images = scores.shape[0]\n A = self._num_anchors\n K = shifts.shape[0]\n all_anchors = self._anchors[np.newaxis, :, :] + shifts[:, np.newaxis, :]\n all_anchors = all_anchors.reshape((K * A, 4))\n # all_anchors = torch.from_numpy(all_anchors).type_as(scores)\n\n rois = np.empty((0, 5), dtype=np.float32)\n roi_probs = np.empty((0, 1), dtype=np.float32)\n for im_i in range(num_images):\n im_i_boxes, im_i_probs = self.proposals_for_one_image(\n im_info[im_i, :], all_anchors, bbox_deltas[im_i, :, :, :],\n scores[im_i, :, :, :])\n batch_inds = im_i * np.ones(\n (im_i_boxes.shape[0], 1), dtype=np.float32)\n im_i_rois = np.hstack((batch_inds, im_i_boxes))\n rois = np.append(rois, im_i_rois, axis=0)\n roi_probs = np.append(roi_probs, im_i_probs, axis=0)\n\n return rois, roi_probs # Note: ndarrays", "def process(\n self,\n name_and_reads: Tuple[str, Iterable[reads_pb2.Read]],\n ) -> Iterable[Tuple[str, List[reads_pb2.Read]]]:\n\n name, reads = name_and_reads[0], list(name_and_reads[1])\n # Note, examples will only be included in one of the initial counters since\n # we are returning early.\n if not reads:\n self.no_reads_counter.inc()\n return\n\n # Do not error for labels that have multiple alignments to correct molecule.\n # One of the alignments may be a supplementary alignment.\n if self.is_label and len(reads) > 1:\n logging.info('Unexpected: %d labels for %s', len(reads),\n reads[0].fragment_name)\n self.multiple_alignments_counter.inc()\n\n reads_copy = copy.deepcopy(reads)\n for read in reads_copy:\n assert read.aligned_sequence\n base_index = 0\n expanded_sequence = ''\n expanded_cigar_str = ''\n new_cigar_ops = []\n if not self.is_label:\n pw = struct_utils.get_int_field(read.info, 'pw')\n ip = struct_utils.get_int_field(read.info, 'ip')\n new_pw = []\n new_ip = []\n\n for op in read.alignment.cigar:\n # Skip over ops we don't want, such as soft clips.\n if op.operation not in dc_constants.OPS_TO_CONSIDER:\n base_index += op.operation_length\n continue\n if op.operation in dc_constants.READ_ADVANCING_OPS:\n start = base_index\n end = start + op.operation_length\n expanded_sequence += read.aligned_sequence[start:end]\n base_index += op.operation_length\n if not self.is_label:\n new_pw += pw[start:end]\n new_ip += ip[start:end]\n else:\n # Add a special token in sequence where we have deletion.\n expanded_sequence += dc_constants.GAP_OR_PAD * op.operation_length\n\n new_cigar_ops.append(op)\n op_char = cigar_utils.CIGAR_OPS_TO_CHAR[op.operation]\n expanded_cigar_str += op_char * op.operation_length\n\n # Update the read sequence.\n read.aligned_sequence = expanded_sequence\n assert len(read.aligned_sequence) == len(expanded_cigar_str)\n\n # Update the read cigar to only include ops that were kept.\n del read.alignment.cigar[:]\n read.alignment.cigar.extend(new_cigar_ops)\n\n # Save pw, ip, and expanded cigar string to be used downstream.\n if not self.is_label:\n struct_utils.set_int_field(read.info, 'pw', new_pw)\n struct_utils.set_int_field(read.info, 'ip', new_ip)\n # PW and IP won't be the same length as read.aligned_sequence here\n # because we haven't yet spaced out PW and IP based on gaps/padding.\n struct_utils.set_string_field(read.info, 'expanded_cigar',\n expanded_cigar_str)\n yield name, reads_copy", "def singleOptic2(n,misalign=np.zeros(6),srcdist=89.61e3+1.5e3,az=100.,\\\n returnRays=False,f=None,\\\n plist=[[0],[0],[0]],\\\n ax=100.):\n #Establish subannulus of rays\n r0 = conic.primrad(8426.,220.,8400.)\n r1 = conic.primrad(8426.+ax,220.,8400.)\n rays = sources.subannulus(r0,r1,az/220.,n,zhat=-1.)\n #Transform to node position\n tran.transform(rays,220,0,0,0,0,0)\n #Set up finite source distance\n raydist = sqrt(srcdist**2+rays[1]**2+rays[2]**2)\n l = rays[1]/raydist\n m = rays[2]/raydist\n n = -sqrt(1.-l**2-m**2)\n rays = [raydist,rays[1],rays[2],rays[3],l,m,n,rays[7],rays[8],rays[9]]\n #Align perfectly to beam\n tran.steerX(rays)\n #Apply misalignment\n tran.transform(rays,*misalign)\n #Place mirror\n tran.transform(rays,-220.,0,-8400.,0,0,0)\n## surf.wolterprimarynode(rays,220,8400.)\n surf.primaryLL(rays,220.,8400.,8426.+ax,8426.,az/220.,*plist)\n rays = tran.vignette(rays,ind=np.logical_and(rays[3]<8400.+ax,\\\n rays[3]>8400.))\n tran.itransform(rays,-220.,0.,-8400.,0,0,0)\n #Vignette rays not landing in active mirror area\n ind = np.logical_and(rays[3]>26.,rays[3]<(26.+ax))\n## ind = np.logical_and(np.abs(rays[2])<az/2.,indz)\n rays = tran.vignette(rays,ind=ind)\n #Reverse misalignment\n tran.itransform(rays,*misalign)\n #Reflect and go to surface\n tran.reflect(rays)\n if f is None:\n f = surf.focusI(rays)\n else:\n tran.transform(rays,0,0,f,0,0,0)\n surf.flat(rays)\n #Get centroid\n cx,cy = anal.centroid(rays)\n\n if returnRays is True:\n return rays\n \n return anal.hpd(rays)/abs(f)*180/pi*60**2,f,cx", "def _get_arg_MPQA(self, a_toks):\n ret = {}\n j = 0\n entry = ipol = None\n for i, (tok, pos) in enumerate(a_toks):\n if tok in MPQA:\n entry = MPQA[tok]\n ipol = entry[POL_IDX]\n if ipol == BOTH:\n continue\n elif ipol == POS:\n j = max(0, i - 3)\n if any(el[0] in NEGATIONS for el in a_toks[j:i]):\n ipol = \"negatedpos\"\n ret[ipol + '|' + entry[INTENS_IDX]] = 1.\n return ret", "def alignment_org(angle=0.1):\n proposal_id('2023_2', '311564_test')\n yield from alignement_gisaxs_multisample(angle=angle)\n RE.md['ai_0'] = piezo.th.user_setpoint.get()\n proposal_id('2023_2', '311564_Pettersson')", "def _v4_cmds(self, want, have, state=None):\n # The ip address cli does not allow removing primary addresses while\n # secondaries are present, but it does allow changing a primary to a\n # new address as long as the address is not a current secondary.\n # Be aware of scenarios where a secondary is taking over\n # the role of the primary, which must be changed in sequence.\n # In general, primaries/secondaries should change in this order:\n # Step 1. Remove secondaries that are being changed or removed\n # Step 2. Change the primary if needed\n # Step 3. Merge secondaries\n\n # Normalize inputs (add tag key if not present)\n for i in want:\n i[\"tag\"] = i.get(\"tag\")\n for i in have:\n i[\"tag\"] = i.get(\"tag\")\n\n merged = True if state == \"merged\" else False\n replaced = True if state == \"replaced\" else False\n overridden = True if state == \"overridden\" else False\n\n # Create secondary and primary wants/haves\n sec_w = [i for i in want if i.get(\"secondary\")]\n sec_h = [i for i in have if i.get(\"secondary\")]\n pri_w = [i for i in want if not i.get(\"secondary\")]\n pri_h = [i for i in have if not i.get(\"secondary\")]\n pri_w = pri_w[0] if pri_w else {}\n pri_h = pri_h[0] if pri_h else {}\n cmds = []\n\n # Remove all addrs when no primary is specified in want (pri_w)\n if pri_h and not pri_w and (replaced or overridden):\n cmds.append(\"no ip address\")\n return cmds\n\n # 1. Determine which secondaries are changing and remove them. Need a have/want\n # diff instead of want/have because a have sec addr may be changing to a pri.\n sec_to_rmv = []\n sec_diff = self.diff_list_of_dicts(sec_h, sec_w)\n for i in sec_diff:\n if overridden or [\n w for w in sec_w if w[\"address\"] == i[\"address\"]\n ]:\n sec_to_rmv.append(i[\"address\"])\n\n # Check if new primary is currently a secondary\n if pri_w and [h for h in sec_h if h[\"address\"] == pri_w[\"address\"]]:\n if not overridden:\n sec_to_rmv.append(pri_w[\"address\"])\n\n # Remove the changing secondaries\n cmds.extend([\"no ip address %s secondary\" % i for i in sec_to_rmv])\n\n # 2. change primary\n if pri_w:\n diff = dict(set(pri_w.items()) - set(pri_h.items()))\n if diff:\n addr = diff.get(\"address\") or pri_w.get(\"address\")\n cmd = \"ip address %s\" % addr\n tag = diff.get(\"tag\")\n cmd += \" tag %s\" % tag if tag else \"\"\n cmds.append(cmd)\n\n # 3. process remaining secondaries last\n sec_w_to_chg = self.diff_list_of_dicts(sec_w, sec_h)\n for i in sec_w_to_chg:\n cmd = \"ip address %s secondary\" % i[\"address\"]\n cmd += \" tag %s\" % i[\"tag\"] if i[\"tag\"] else \"\"\n cmds.append(cmd)\n\n return cmds", "def _postprocess(self, output: Dict[str, np.ndarray]):\n # Slice to remove padding, omitting initial [CLS] and final [SEP]\n slicer = slice(1, output.pop(\"ntok\") - 1)\n output[\"tokens\"] = self.tokenizer.convert_ids_to_tokens(\n output.pop(\"input_ids\")[slicer])\n probas = output.pop(\"probas\")\n\n # Predictions at every position, regardless of masking.\n output[\"pred_tokens\"] = self._get_topk_tokens(probas[slicer]) # pytype: disable=container-type-mismatch\n\n return output", "def validateInputParams(self): \n # Return dictionary\n retval = {}\n retval['status'] = True\n retval['axis'] = ''\n \n # Get the separationaxis of input MMS. \n sepaxis = ph.axisType(self.__args['vis'])\n if sepaxis.isspace() or sepaxis.__len__() == 0:\n sepaxis = 'unknown'\n elif sepaxis == 'scan,spw':\n sepaxis = 'auto'\n \n #Get list of subMSs in MMS\n subMSList = ParallelTaskHelper.getReferencedMSs(self.__args['vis'])\n \n if self.__taskname == \"mstransform\":\n \n if (self.__args['combinespws'] == True or self.__args['nspw'] > 1) and \\\n (self.__args['timeaverage'] == False):\n spwsel = self.__getSpwIds(self.__args['vis'], self.__args['spw']) \n # Get dictionary with spwids of all subMS in the MMS\n spwdict = ph.getScanSpwSummary(subMSList) \n # For each subMS, check if it has the spw selection\n for subms in subMSList:\n subms_spwids = ph.getSubMSSpwIds(subms, spwdict)\n slist = map(str,subms_spwids)\n # Check if the subms contains all the selected spws\n if not self.__isSpwContained(spwsel, slist):\n casalog.post('Cannot combine or separate spws in parallel because the subMSs do not contain all the selected spws',\\\n 'WARN')\n # Set the new separation axis for the output\n retval['status'] = False\n retval['axis'] = 'scan'\n break\n \n elif (self.__args['timeaverage'] == True and self.__args['timespan'] == 'scan') and \\\n (self.__args['combinespws'] == False and self.__args['nspw'] == 1):\n # Get the value of timebin as a float\n timebin = self.__args['timebin']\n tsec = qa.quantity(timebin,'s')['value']\n scansel = self.__getScanIds(self.__args['vis'], self.__args['scan'])\n # For each subms, check if scans length is <= timebin\n for subms in subMSList:\n if not self.__isScanContained(subms, scansel, tsec):\n casalog.post('Cannot process MMS in parallel when timespan=\\'scan\\' because the subMSs do not contain all the selected scans',\\\n 'WARN')\n # Set the new separation axis for the output\n retval['status'] = False\n retval['axis'] = 'spw'\n break\n \n # Two transformations are requested.\n elif (self.__args['combinespws'] == True or self.__args['nspw'] > 1) and \\\n (self.__args['timeaverage'] == True and self.__args['timespan'] == 'scan'):\n # Check spws and scans in subMSs\n spwsel = self.__getSpwIds(self.__args['vis'], self.__args['spw'])\n spwdict = ph.getScanSpwSummary(subMSList) \n scansel = self.__getScanIds(self.__args['vis'], self.__args['scan'])\n timebin = self.__args['timebin']\n tsec = qa.quantity(timebin,'s')['value']\n for subms in subMSList:\n subms_spwids = ph.getSubMSSpwIds(subms, spwdict)\n slist = map(str,subms_spwids)\n if self.__isSpwContained(spwsel, slist):\n if not self.__isScanContained(subms, scansel, tsec):\n casalog.post('The subMSs of input MMS do not contain the necessary scans','WARN')\n retval['status'] = False\n retval['axis'] = ''\n break \n else:\n casalog.post('The subMSs of input MMS do not contain the necessary spws','WARN')\n retval['status'] = False\n retval['axis'] = ''\n break\n \n \n elif self.__taskname == \"split2\" or self.__taskname == \"split\": \n if (sepaxis != 'spw' and self.__args['combine'] == 'scan'):\n scansel = self.__getScanIds(self.__args['vis'], self.__args['scan'])\n timebin = self.__args['timebin']\n tsec = qa.quantity(timebin,'s')['value']\n for subms in subMSList:\n if not self.__isScanContained(subms, scansel, tsec):\n casalog.post('Cannot process MMS in parallel when combine=\\'scan\\' because the subMSs do not contain all the selected scans',\\\n 'WARN')\n casalog.post(\"Please set keepmms to False or use task mstransform in this case.\",'ERROR')\n retval['status'] = False\n retval['axis'] = ''\n break\n\n elif self.__taskname == \"cvel2\" and sepaxis != 'scan':\n spwsel = self.__getSpwIds(self.__args['vis'], self.__args['spw']) \n spwdict = ph.getScanSpwSummary(subMSList) \n for subms in subMSList:\n subms_spwids = ph.getSubMSSpwIds(subms, spwdict)\n slist = map(str,subms_spwids)\n # Check if the subms contains all the selected spws\n if not self.__isSpwContained(spwsel, slist):\n casalog.post('Cannot combine spws in parallel because the subMSs do not contain all the selected spws',\\\n 'WARN')\n casalog.post(\"Please set keepmms to False or use task mstransform in this case.\",'ERROR')\n # Set the new separation axis for the output\n retval['status'] = False\n retval['axis'] = ''\n break\n \n\n return retval", "def _align_unlabelled_segments_with_adjacents(segments, segments_alignment, partitioned_skeletons, frame_rate: float):\n maximum_gap_allowed = max(1, int(frame_rate * MAXIMUM_GAP_ALLOWED_WITH_ADJACENT_SEGMENT_SEC))\n # ensure that if no segments have been aligned at all, pick one solution randomly to start\n if np.all(segments_alignment.mask):\n logger.info(\"There are no trusted segments with head decision to resolve the whole video, stopping analysis.\")\n return segments_alignment\n\n # fix in priority the segments with known adjacent frames with little gap\n # until all segments are aligned except the isolated ones (further than maximum_gap_allowed)\n unaligned = np.where(segments_alignment.mask)[0]\n while len(unaligned) > 0:\n # we first pick the best candidate segment to align (there are known frames nearby before or after or both)\n all_gaps = [\n _calculate_smallest_gap_to_adjacent(\n segment_index=x,\n segments=segments,\n segments_alignment=segments_alignment,\n )\n for x in unaligned\n ]\n segment_to_fix_index = np.nanargmin(all_gaps, axis=0)[0]\n gap_to_adjacent_segment, adjacent_segment_offset = all_gaps[segment_to_fix_index]\n\n # abort if only isolated segments are left\n if gap_to_adjacent_segment > maximum_gap_allowed:\n break\n\n cur_segment_index = unaligned[segment_to_fix_index]\n cur_segment_skeleton = partitioned_skeletons[segments[cur_segment_index]]\n\n adjacent_segment_index = cur_segment_index + adjacent_segment_offset\n adjacent_alignment = segments_alignment[adjacent_segment_index]\n adjacent_segment = segments[adjacent_segment_index]\n adjacent_segment_skeleton = partitioned_skeletons[adjacent_segment][:, adjacent_alignment]\n\n if adjacent_segment_offset == -1:\n closest_unaligned_skeleton = cur_segment_skeleton[0] # first frame of cur segment\n closest_known_skeleton = adjacent_segment_skeleton[-1] # last frame of prev segment\n elif adjacent_segment_offset == 1:\n closest_unaligned_skeleton = cur_segment_skeleton[-1] # last frame of cur segment\n closest_known_skeleton = adjacent_segment_skeleton[0] # first frame of next segment\n else:\n raise ValueError()\n\n dists = [skeleton_distance(closest_known_skeleton, skel) for skel in closest_unaligned_skeleton]\n segments_alignment[cur_segment_index] = int(np.argmax(dists))\n\n unaligned = np.where(segments_alignment.mask)[0]\n\n return segments_alignment", "def parse_bam():\n global sample_name, header, segmentID, bam\n sys.stderr.write(time.strftime(\"%c\") + \" Busy with parsing bam file...\\n\")\n bam = pysam.AlignmentFile(NanoSV.opts_bam, 'rb')\n if not bam.has_index():\n sys.exit('The bam has no index file')\n header = bam.header\n if 'HD' in header:\n if not header['HD']['SO'] == 'coordinate':\n sys.exit('The bam file is not coordinate sorted')\n if 'RG' in header:\n if type(header['RG']) is list:\n sample_name = header['RG'][0]['SM']\n else:\n sample_name = header['RG']['SM']\n else:\n sample_name = re.sub('(\\.sorted)?\\.bam$', '', str(NanoSV.opts_bam))\n\n for line in bam:\n if line.query_name in reads:\n read = reads[line.query_name]\n else:\n read = r.Read(line.query_name, line.infer_read_length())\n reads[line.query_name] = read\n\n if line.flag & 4 or line.mapping_quality < NanoSV.opts_min_mapq:\n continue\n segment = s.Segment(segmentID, line.query_name, line.flag, line.reference_name, line.reference_start+1, line.mapping_quality,\n line.query_alignment_length)\n segment.end = line.reference_start + line.reference_length\n if line.has_tag('MD'):\n matches = sum(map(int, re.findall(r\"(\\d+)\", line.get_tag('MD'))))\n segment.pid = format(matches / segment.length, '.3f')\n else:\n segment.pid = format(line.get_cigar_stats()[0][7] / segment.length, '.3f')\n if segment.pid == \"0.000\":\n segment.pid = format(line.get_cigar_stats()[0][0] / segment.length, '.3f')\n if line.flag & 16:\n if line.cigartuples[-1][0] == 5 or line.cigartuples[-1][0] == 4:\n segment.clip = line.cigartuples[-1][1]\n else:\n segment.clip = 0\n if line.cigartuples[0][0] == 5 or line.cigartuples[0][0] == 4:\n segment.clip_2 = line.cigartuples[0][1]\n else:\n segment.clip_2 = 0\n else:\n if line.cigartuples[0][0] == 5 or line.cigartuples[0][0] == 4:\n segment.clip = line.cigartuples[0][1]\n else:\n segment.clip = 0\n if line.cigartuples[-1][0] == 5 or line.cigartuples[-1][0] == 4:\n segment.clip_2 = line.cigartuples[-1][1]\n else:\n segment.clip_2 = 0\n if float(segment.pid) < NanoSV.opts_min_pid:\n continue\n read.addSegment(segment)\n segments[segmentID] = segment\n segmentID += 1", "def postprocess_segments(self):\n # make segs a list of mask arrays, it's easier to store\n # as there is a hdf5 equivalent\n for iseg, seg in enumerate(self.segs):\n mask = np.zeros(self.X.shape[0], dtype=bool)\n mask[seg] = True\n self.segs[iseg] = mask\n # convert to arrays\n self.segs = np.array(self.segs)\n self.segs_tips = np.array(self.segs_tips)", "def _self_align(self):\n logging.info(\"Splitting palindrome.\")\n logging.debug(\"Making reverse complement sequences of reads in \" +\n \"{i} to {o}\".format(i=self.ori_all_reads_fasta,\n o=self.rc_all_reads_fasta))\n num_reads = revcmp_fasta(self.ori_all_reads_fasta,\n self.rc_all_reads_fasta)\n\n reads_per_split = max(1, int(num_reads/self.nproc) + 1)\n logging.debug(\"Splitting {f} to small files each containing {n} reads.\".\n format(f=self.ori_all_reads_fasta, n=reads_per_split))\n fs = FastaSplitter(input_fasta=self.ori_all_reads_fasta,\n reads_per_split=reads_per_split,\n out_dir=self.out_dir,\n out_prefix=\"reads.split.\")\n fs.split()\n sp_fasta_files = fs.out_fns\n\n logging.debug(\"Splitting {f} to smaller files.\".\n format(f=self.rc_all_reads_fasta))\n rc_fs = FastaSplitter(input_fasta=self.rc_all_reads_fasta,\n reads_per_split=reads_per_split,\n out_dir=self.out_dir,\n out_prefix=\"rc_reads.split.\")\n rc_fs.split()\n rc_sp_fasta_files = rc_fs.out_fns\n\n logging.debug(\"Aligning each read in {i} to its revese compelement \" +\n \"read using sdpMatcher.\".format(i=self.ori_all_reads_fasta))\n\n sdps = [\"{f}.sdp\".format(f=f) for f in sp_fasta_files]\n jobs = []\n for f, rc_f, sdp in zip(sp_fasta_files, rc_sp_fasta_files, sdps):\n cmd = \"sdpMatcher {f} {rc_f} \".format(f=f, rc_f=rc_f) + \\\n \"10 -local > {sdp} \".format(sdp=sdp)\n logging.debug(\"CMD: {cmd}\".format(cmd=cmd))\n jobs.append(cmd)\n\n pool = Pool(processes=self.nproc)\n rets = pool.map(backticks, jobs)\n pool.close()\n pool.join()\n\n for i, job in enumerate(jobs):\n if rets[i][1] != 0:\n errMsg = \"Job {j} failed.\".format(j=job) + str(rets[i][2])\n raise RuntimeError(errMsg)\n\n logging.debug(\"Concatenating all sdp outputs to {f}\".\n format(f=self.sdp_out_file))\n cat_files(src=sdps, dst=self.sdp_out_file)\n\n logging.debug(\"Cleaning intermediate fasta & sdp files.\")\n fs.rmOutFNs()\n rc_fs.rmOutFNs()\n\n for f in sdps:\n os.remove(f)", "def checkGuide(seq, plen, pam, rpam, is_upstream_pam):\n if is_upstream_pam:\n if pam.match(seq[:plen]):\n yield seq, \"+\"\n if rpam.match(seq[-plen:]):\n yield reverseComplement(seq), \"-\"\n else:\n if pam.match(seq[-plen:]):\n yield seq, \"+\"\n if rpam.match(seq[:plen]):\n yield reverseComplement(seq), \"-\"\n #yield \"\", \"\"", "def build_alignment(self,score,pieces):\n\t \t# build text\n\t\tself.open_seqs()\n\t\ttext1 = text2 = \"\"\n\t\tend1 = end2 = None\n\t\tfor (start1,start2,length,pctId) in pieces:\n\t\t\tif (end1 != None):\n\t\t\t\tif (start1 == end1): # insertion in sequence 2\n\t\t\t\t\ttext1 += self.seq1_gap * (start2-end2)\n\t\t\t\t\ttext2 += self.seq2_file.get(end2,start2-end2)\n\t\t\t\telse: # insertion in sequence 1\n\t\t\t\t\ttext1 += self.seq1_file.get(end1,start1-end1)\n\t\t\t\t\ttext2 += self.seq2_gap * (start1-end1)\n\n\t\t\ttext1 += self.seq1_file.get(start1,length)\n\t\t\ttext2 += self.seq2_file.get(start2,length)\n\t\t\tend1 = start1 + length\n\t\t\tend2 = start2 + length\n\t\t# create alignment\n\t\tstart1 = pieces[0][0]\n\t\tstart2 = pieces[0][1]\n\t\tend1 = pieces[-1][0] + pieces[-1][2]\n\t\tend2 = pieces[-1][1] + pieces[-1][2]\n\t\tsize1 = end1 - start1\n\t\tsize2 = end2 - start2\n\t\ta = Alignment(score=score,species_to_lengths=self.species_to_lengths)\n\t\t#if (self.seq1_strand == \"-\"): start1 = self.seq1_file.length - end1\n\t\ta.add_component(Component(self.seq1_src,start1,size1,self.seq1_strand,text=text1))\n\t\t#if (self.seq2_strand == \"-\"): start2 = self.seq2_file.length - end2\n\t\ta.add_component(Component(self.seq2_src,start2,size2,self.seq2_strand,text=text2))\n\t\treturn a", "def test_get_align_coords(self):\n # 01234 5\n # ACGGT--A\n # 012345\n # --GGTTTA\n m1, seq1 = DNA.make_seq(\"ACGGT--A\").parse_out_gaps()\n m2, seq2 = DNA.make_seq(\"--GGTTTA\").parse_out_gaps()\n path = get_align_coords(m1, m2)\n expect = [2, 4, None, 5, 5], [0, 2, None, 5, 5]\n self.assertEqual(path.get_coords(), expect)\n\n # we have no gaps, so coords will be None\n m1, s1 = seq1.parse_out_gaps()\n m2, s2 = seq2.parse_out_gaps()\n path = get_align_coords(m1, m2)\n self.assertEqual(path.get_coords(), ([], []))\n\n # unless we indicate the seqs came from an Alignment\n m1, seq1 = DNA.make_seq(\"ACGGTTTA\").parse_out_gaps()\n m2, seq2 = DNA.make_seq(\"GGGGTTTA\").parse_out_gaps()\n paths = get_align_coords(m1, m2, aligned=True)\n self.assertEqual(paths.get_coords(), ([0, len(seq1)], [0, len(seq1)]))\n\n # raises an exception if the Aligned seqs are different lengths\n m1, seq1 = DNA.make_seq(\"ACGGTTTA\").parse_out_gaps()\n m2, seq2 = DNA.make_seq(\"GGGGTT\").parse_out_gaps()\n with self.assertRaises(AssertionError):\n get_align_coords(m1, m2, aligned=True)", "def _parse_program(raw_program: list):\n curr_mask = dict()\n masks = list()\n splitter = ' = '\n\n for p in raw_program:\n if 'mask' in p:\n split_pea = p.split(splitter)\n mask = split_pea[1]\n if curr_mask:\n masks.append(curr_mask)\n curr_mask = dict()\n curr_mask['mask'] = mask\n else:\n split_memory = p.split(splitter)\n mem = int(split_memory[0][4:-1])\n val = int(split_memory[1])\n curr_mask[mem] = val\n\n if curr_mask:\n masks.append(curr_mask)\n\n return masks", "def get_alignments(self) -> list:", "def partition(self):\n scanners = {state: regexp_opt(tokens, self.TOKEN_REGEXPS)\n for (state, tokens) in self.TRANSITIONS.items()}\n while True:\n start, token_end, state = self.stack[-1]\n m = scanners[state].search(self.doc, self.pos)\n if not m:\n expected = self.TRANSITIONS[state]\n raise ParsingError(self.doc, state, expected, start, token_end)\n tok, mstart, self.pos = Token(m.lastgroup), m.start(), m.end()\n if not self.step(state, start, tok, mstart):\n break\n return self.spans", "def do_semiglobal_alignment(sequences, matrix, penalty):\n seq1 = '-' + sequences[0].Sequence\n seq2 = '-' + sequences[1].Sequence\n\n # scoring matrix initializer\n scoring = local_setup(len(seq1), len(seq2))\n\n # fill scoring matrix\n aa_start = ord('A')\n for i in range(1, len(seq1)):\n aa_x = seq1[i]\n for j in range(1, len(seq2)):\n aa_y = seq2[j]\n xgap = scoring[i][j-1] - penalty\n ygap = scoring[i-1][j] - penalty\n match = scoring[i-1][j-1] + \\\n matrix[ord(aa_x) - aa_start][ord(aa_y) - aa_start]\n\n # store the max score\n scoring[i].append(max([xgap, ygap, match]))\n\n # find the max score (only the last max score)\n max_i, max_j, max_score = 0, 0, -float('inf')\n for j in range(len(scoring[-1])): # find max low road\n if scoring[-1][j] >= max_score:\n max_i, max_j, max_score = -1, j, scoring[-1][j]\n\n for i in range(len(scoring)): # find max high road (priority)\n if scoring[i][-1] >= max_score:\n max_i, max_j, max_score = i, -1, scoring[i][-1]\n\n # perform traceback\n alignment = traceback(\n scoring, seq1, seq2, penalty, matrix, max_i, max_j, semi=True\n )\n\n # add the endgaps for seq1\n if max_i == -1 and max_j != len(scoring[-1]):\n for j in range(max_j + 1, len(scoring[-1])):\n alignment[0][0] += '-'\n alignment[1][0] += ' '\n alignment[2][0] += seq2[j]\n\n # add the endgaps for seq2\n if max_j == -1 and max_i != len(scoring):\n for i in range(max_i + 1, len(scoring)):\n alignment[0][0] += seq1[i]\n alignment[1][0] += ' '\n alignment[2][0] += '-'\n\n # Add the sequences to the scoring matrix for visualizing\n scoring = add_sequences_to_scoring(scoring, seq1, seq2)\n\n return alignment, scoring", "def motifs(self, thre, align):\n\n if self._parse is None:\n print \"No previous parsing\"\n print \"Parsing file...\"\n seqs = self.parse()\n self._parse = seqs\n print \"Done\"\n else:\n seqs = self._parse\n\n seqs[0].weight(self._seqfile, self._predfile) # weight first sequence\n sleep(1)\n known = seqs[0].motifs(thre, align) # extract motifs from first seq\n\n mot = {} # known motifs dictionary\n\n for i, k in enumerate(known):\n name = \"motif\"+str(i+1) # enumerate motifs\n\n mot[name] = {}\n mot[name][\"size\"] = k[2] - k[1] # size of the motif\n\n # start position of motif real position\n mot[name][\"start\"] = k[1]+1\n mot[name][\"stop\"] = k[2] # end position of motif\n mot[name][\"score\"] = k[3] # average score real position\n\n mot[name][\"align\"] = k[4] # average alignment score of sequence\n\n for j, s in enumerate(seqs):\n mot[name][s.name()] = {}\n # extract motif from each sequence\n mot[name][s.name()][\"seq\"] = s[k[1]+1:k[2]]\n mot[name][s.name()][\"start\"] = s.get_real_pos(k[1])\n if j == 0:\n # real position\n mot[name][\"start\"] = mot[name][s.name()][\"start\"]\n\n mot[\"threshold\"] = thre # general threshold used\n mot[\"align\"] = align # used alignment score\n self._motifs = mot\n\n return mot", "def parse_mutations_uniprot_data(gff_data, start='start', stop='end', mut_types_to_skip=None):\n if mut_types_to_skip is None:\n mut_types_to_skip = [\n 'Chain', # This is the whole protein\n 'Region', # Those are better described in pfam database\n ]\n\n if 'Chain' not in mut_types_to_skip:\n mut_types_to_skip.append('Chain')\n\n # Selects the various mutations types in the dataset, except types contained in the above list\n mut_types = gff_data['mut'].loc[~gff_data['mut'].isin(mut_types_to_skip)].value_counts().index\n\n x = np.array([]).astype('str')\n y = np.array([]).astype('str')\n mutationgroups = np.array([]).astype('str')\n\n for mut_type in mut_types:\n\n # Selects the start and end protein coordinates of the mutation\n data_coord = gff_data[gff_data.mut == mut_type][[start, stop]]\n\n # Sort between the single and multi-site coordinates\n single_sites = data_coord.loc[data_coord[start] == data_coord[stop]]\n multi_sites = data_coord.loc[data_coord[start] != data_coord[stop]]\n\n # Joins the start and end coordinates into one string\n multi_sites['sep'] = \"-\"\n multi_sites[start] = \\\n multi_sites[start].map(str) \\\n + multi_sites['sep'] \\\n + multi_sites[stop].map(str)\n\n # Merge the single and multi-site coordinates in one columns and counts the occurrences\n sorted_data = single_sites[start].append(multi_sites[start]).value_counts()\n n = (len(sorted_data.index))\n\n x = np.append(x, np.array(sorted_data.index).astype('str'))\n y = np.append(y, np.array(sorted_data.values).astype('str'))\n mutationgroups = np.append(mutationgroups, np.repeat(mut_type, n))\n\n formatted_data = dict(\n x=x.tolist(),\n y=y.tolist(),\n mutationGroups=mutationgroups.tolist(),\n domains=[],\n )\n jsonschema.validate(formatted_data, MUT_DATA_SCHEMA)\n return formatted_data", "def load_sequences_pynast(file_pynast_alignment, file_otumap,\n frg_start, frg_stop, frg_expected_length,\n verbose=True, out=sys.stdout, onlyrepr=False,\n nomerge=False):\n # load the full pynast GreenGenes alignment with\n # sequences=1261500 and position=7682\n # deprecation warning from skbio\n ali = TabularMSA.read(file_pynast_alignment,\n format='fasta', constructor=DNA)\n\n # set index of alignment to sequence IDs\n ali.index = [seq.metadata['id'] for seq in ali]\n\n if verbose:\n out.write(\"% 8i rows and %i cols in alignment '%s'\\n\" % (\n ali.shape[0],\n ali.shape[1],\n file_pynast_alignment.split('/')[-1]))\n\n # load OTU map\n (otumap, seqinfo) = read_otumap(file_otumap)\n # all representative seq IDs\n seqids_to_use = list(otumap.index)\n if onlyrepr is False:\n # all non-representative seq IDs\n seqids_to_use += [seqid for otu in otumap.values for seqid in otu]\n if verbose:\n out.write(\"% 8i sequences in OTU map '%s'\\n\" % (\n len(seqids_to_use),\n file_otumap.split('/')[-1]))\n\n # subset the alignment to those sequences that are selected from OTU map\n ali_otumap = ali.loc[set(seqids_to_use) & set(ali.index)]\n if verbose:\n out.write((\"% 8i sequences selected from OTU map and alignment. \"\n \"Surprise: %i sequences of OTU map are NOT in \"\n \"alignment!\\n\") % (\n ali_otumap.shape[0],\n len(seqids_to_use) - ali_otumap.shape[0]))\n # To my surprise, not all OTU-IDs of the SEPP reference tree\n # (same with the 99 tree of GreenGenes) are in the pynast alignment.\n # Daniel says: \"PyNAST fails on some sequences. The tree is constructed\n # from the ssu-align alignment (based on infernal), but that alignment\n # method is lossy so it is not suitable for extracting variable\n # regions\" Therefore, I exclude those 1031 OTU-IDs from further\n # investigation\n\n # trim alignment down to fragment columns\n ali_fragments = ali_otumap.iloc(axis='position')[frg_start:frg_stop]\n if verbose:\n out.write((\"%i -> %i cols: trimming alignment to fragment \"\n \"coordinates\\n\") % (\n ali_otumap.shape[1],\n ali_fragments.shape[1]))\n\n # ungap alignment rows\n fragments = []\n num_frags_toolong = 0\n for fragment_gapped in ali_fragments:\n fragment = fragment_gapped.degap()\n if len(fragment) >= frg_expected_length:\n if len(fragment) > frg_expected_length:\n num_frags_toolong += 1\n fragments.append({\n 'sequence': str(fragment)[:frg_expected_length],\n 'seqID': fragment_gapped.metadata['id'],\n 'otuID': seqinfo.loc[fragment_gapped.metadata['id']]})\n if verbose:\n out.write((\"% 8i fragments with ungapped length >= %int. \"\n \"Surprise: %i fragments are too short and %i fragments \"\n \"where too long (and have been trimmed)!\\n\") % (\n len(fragments),\n frg_expected_length,\n ali_fragments.shape[0] - len(fragments), num_frags_toolong))\n # Another surprise is that the trimmed, degapped sequences from pynast\n # alignment do NOT all have length 150nt. Following is a length\n # distribution plot. I checked with Daniel and we decided to omit\n # frgaments smaller than 150nt and timm all other to 150nt.\n\n # convert fragments into Pandas.DataFrame\n fragments = pd.DataFrame(fragments)\n if verbose:\n out.write('% 8i fragments remaining.\\n' % fragments.shape[0])\n if nomerge:\n return fragments\n # group fragments by sequence and list true OTUids\n unique_fragments = fragments.groupby('sequence').agg(lambda x:\n list(x.values))\n if verbose:\n out.write('% 8i unique fragments after collapsing by sequence.\\n' %\n unique_fragments.shape[0])\n\n frgs = []\n for i, (sequence, row) in enumerate(unique_fragments.iterrows()):\n frgs.append({'sequence': sequence,\n 'seqIDs': row['seqID'],\n 'otuIDs': sorted(list(set(row['otuID']))),\n 'num_non-representative-seqs':\n len(set(row['seqID']) - set(row['otuID'])),\n 'only_repr._sequences':\n len(set(row['seqID']) - set(row['otuID'])) == 0,\n 'num_pointmutations': 0})\n\n return frgs", "def _process_strings(line,\n lang_nlp,\n get_lemmas,\n get_pos,\n remove_stopwords,\n replace_stopwords,\n get_maps):\n\n # strip, replace special tokens\n orig_line = line\n line = line.strip()\n line = re.sub(r'&apos;', '\\'', line.strip())\n line = re.sub(r'&quot;', '\\\"', line.strip())\n # Tokenize etc.\n line_nlp = lang_nlp(line)\n spacy_tokens = [elem.text for elem in line_nlp]\n spacy_tokens_lower = [elem.text.lower() for elem in line_nlp]\n spacy_lemmas = None\n spacy_pos = None\n if get_lemmas:\n spacy_lemmas = list()\n for elem in line_nlp:\n if elem.lemma_ == '-PRON-' or elem.lemma_.isdigit():\n spacy_lemmas.append(elem.lower_)\n else:\n spacy_lemmas.append(elem.lemma_.lower().strip())\n if get_pos:\n spacy_pos = [elem.pos_ for elem in line_nlp]\n\n # Generate a mapping between whitespace tokens and SpaCy tokens\n ws_tokens = orig_line.strip().split()\n ws_tokens_lower = orig_line.strip().lower().split()\n ws_to_spacy_map = dict()\n spacy_to_ws_map = dict()\n if get_maps:\n ws_loc = 0\n ws_tok = ws_tokens[ws_loc]\n\n for spacy_loc, spacy_tok in enumerate(spacy_tokens):\n while True:\n # Map whitespace tokens to be identical to spacy tokens\n ws_tok = re.sub(r'&apos;', '\\'', ws_tok)\n ws_tok = re.sub(r'&quot;', '\\\"', ws_tok)\n\n if spacy_tok == ws_tok or spacy_tok in ws_tok:\n # Terminate\n if ws_loc >= len(ws_tokens):\n break\n\n # Extend maps\n if not ws_to_spacy_map.get(ws_loc, None):\n ws_to_spacy_map[ws_loc] = list()\n ws_to_spacy_map[ws_loc].append(spacy_loc)\n if not spacy_to_ws_map.get(spacy_loc, None):\n spacy_to_ws_map[spacy_loc] = list()\n spacy_to_ws_map[spacy_loc].append(ws_loc)\n\n # Move pointer\n if spacy_tok == ws_tok:\n ws_loc += 1\n if ws_loc < len(ws_tokens):\n ws_tok = ws_tokens[ws_loc]\n else:\n ws_tok = ws_tok[len(spacy_tok):]\n break\n else:\n ws_loc += 1\n\n # Assert full coverage of whitespace and SpaCy token sequences by the mapping\n ws_covered = sorted(list(ws_to_spacy_map.keys()))\n spacy_covered = sorted(list(set(list([val for val_list in ws_to_spacy_map.values() for val in val_list]))))\n assert ws_covered == [n for n in range(len(ws_tokens))], \\\n 'WS-SpaCy mapping does not cover all whitespace tokens: {}; number of tokens: {}'\\\n .format(ws_covered, len(ws_tokens))\n assert spacy_covered == [n for n in range(len(spacy_tokens))], \\\n 'WS-SpaCy mapping does not cover all SpaCy tokens: {}; number of tokens: {}' \\\n .format(spacy_covered, len(spacy_tokens))\n\n if remove_stopwords:\n # Filter out stopwords\n nsw_spacy_tokens_lower = list()\n nsw_spacy_lemmas = list()\n for tok_id, tok in enumerate(spacy_tokens_lower):\n if tok not in STOP_WORDS:\n nsw_spacy_tokens_lower.append(tok)\n if get_lemmas:\n nsw_spacy_lemmas.append(spacy_lemmas[tok_id])\n else:\n if replace_stopwords:\n nsw_spacy_tokens_lower.append('<STPWRD>')\n if get_lemmas:\n nsw_spacy_lemmas.append('<STPWRD>')\n\n spacy_tokens_lower = nsw_spacy_tokens_lower\n if get_lemmas:\n spacy_lemmas = nsw_spacy_lemmas\n\n return line_nlp, spacy_tokens_lower, spacy_lemmas, spacy_pos, ws_tokens, ws_tokens_lower, ws_to_spacy_map, \\\n spacy_to_ws_map", "def preprocess(negative_pcap_dir, sub_dir_name=''):\n # Positive/Abnormal pcaps.\n contexts_dir = os.path.join(\"../AppInspector/data/\", sub_dir_name)\n logger.info('The contexts are stored at %s', os.path.abspath(contexts_dir))\n contexts = Analyzer.pred_pos_contexts(contexts_dir)\n positive_flows = Analyzer.sens_flow_jsons(contexts, Analyzer.filter_pos_flows)\n for flow in positive_flows:\n # The label given by the prediction of AppInspector, may not be as same as the ground truth.\n flow['label'] = '1'\n\n # Negative/Normal pcaps.\n # They have no relationship with \"context\" defined in AppInspector, just a bunch of normal flows.\n negative_flows = []\n for file in os.listdir(negative_pcap_dir):\n if file.endswith('_http_flows.json'):\n with open(os.path.join(negative_pcap_dir, file), 'r', encoding=\"utf8\", errors='ignore') as infile:\n flows = json.load(infile)\n for flow in flows:\n # The context label is as same as the ground truth since they are not labelled by AppInspector.\n flow['real_label'] = '0'\n negative_flows.append(flow)\n\n return positive_flows, negative_flows", "def test_pynast_template_alignment_fp(self):\r\n\r\n test_qiime_config_variable(\"pynast_template_alignment_fp\",\r\n self.config, self)", "def align_pairs_local(ref_seq, other_seq, score_only = False):\n scoring_mat = blosum62\n GAP_OPEN_PEN = -10\n GAP_EXTEND_PEN = -1\n alignments = pairwise2.align.localds(\n ref_seq,\n other_seq,\n scoring_mat,\n GAP_OPEN_PEN,\n GAP_EXTEND_PEN,\n score_only = score_only,\n )\n\n return alignments", "def findPAMs(self,i):\r\n\t\timport sys\r\n\t\tlistofPAMS = [] \t\t\t\t\t# Create a list for the PAM sequences.\r\n\t\tlistofReversedPAMS = [] \t\t\t\t# Create a list for the reverse PAM sequences.\r\n\t\tcounter = 0 \t\t\t\t\t\t# This counter starts for the forward sequences.\r\n\t\tfor nucleotide in self.sequences[i]:\r\n\t\t\tif nucleotide == \"G\" and self.sequences[i][counter-1] == \"G\":\r\n\t\t\t\tif counter > 23: \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Have a set length that is 23 or greater to pass it on.\r\n\t\t\t\t\tlistofPAMS.append((self.sequences[i][counter-22:counter-2],counter-1)) # Add the sequence with the correct position to the list.\r\n\t\t\tcounter+=1\r\n\r\n\t\tcounter = 0 # This counter starts for the reverse sequences\r\n\t\tfor nucleotide in self.reversedSequenceList[i]: # Looking for the sequence in the reversed list.\r\n\t\t\tif nucleotide == \"G\" and self.reversedSequenceList[i][counter-1] == \"G\":\r\n\t\t\t\tif counter > 23:\r\n\t\t\t\t\tlistofReversedPAMS.append((self.reversedSequenceList[i][counter-22:counter-2],len(self.reversedSequenceList[i])-counter+2))\r\n\t\t\tcounter+=1\r\n\t\t\r\n\t\tself.listofPAMS.append((listofPAMS)) \t\t\t # Add to the the forward sequences to the list.\r\n\t\tself.listofReversedPAMS.append((listofReversedPAMS[::-1])) # Add the reverse sequence lists to the lists for reverse sequences.\r", "def cleanAlign(align, badaa=None):\n return align.loc[[isvalidpeptide(s, badaa) for s in align]]", "def procrustes_alignment(sources, rigid=False):\n\n group = vtk.vtkMultiBlockDataGroupFilter()\n for source in sources:\n if sources[0].npoints != source.npoints:\n vedo.logger.error(\"sources have different nr of points\")\n raise RuntimeError()\n group.AddInputData(source.polydata())\n procrustes = vtk.vtkProcrustesAlignmentFilter()\n procrustes.StartFromCentroidOn()\n procrustes.SetInputConnection(group.GetOutputPort())\n if rigid:\n procrustes.GetLandmarkTransform().SetModeToRigidBody()\n procrustes.Update()\n\n acts = []\n for i, s in enumerate(sources):\n poly = procrustes.GetOutput().GetBlock(i)\n mesh = vedo.mesh.Mesh(poly)\n mesh.SetProperty(s.GetProperty())\n if hasattr(s, \"name\"):\n mesh.name = s.name\n acts.append(mesh)\n assem = Assembly(acts)\n assem.transform = procrustes.GetLandmarkTransform()\n assem.info[\"mean\"] = vedo.utils.vtk2numpy(procrustes.GetMeanPoints().GetData())\n return assem", "def parseSpineXout(ofname):\n# 0 1 2 3 4 5 6 7 8 9 10 11 12\n# # index AA SS phi1 psi1 P_E P_C P_H phi0 psi0 ASA S_pk S_SS pk_phi pk_psi pkc_phi pkc_ps\n# 1 E C -85.6 141.3 0.0527 0.8784 0.0689 -87.5 143.0 130.5 0.6941 0.4126 -5.0000 5.0000 0.9924 0.2499\n ss=[]\n phi=[]\n psi=[]\n asa=[]\n rasa=[]\n MAX_ACC=getMAXASA('single')\n for f in open(ofname,'r'):\n f=f.split()\n if f[0]=='#':\n continue\n #ss.append(f[2])\n phi.append([float(f[8]),float(f[3])])\n psi.append([float(f[9]),float(f[4])])\n ss.append([float(i) for i in f[5:8]])\n asa.append(float(f[10]))\n try:\n m=MAX_ACC[f[1]] #if key not found then produce nan\n except KeyError as e:\n print e\n m=np.nan\n continue\n rasa.append(float(f[10])/m)\n return (np.array(asa),np.array(rasa),np.array(ss),np.array(phi),np.array(psi))", "def preprocess_data(train_neg_file_pattern,\n train_pos_file_pattern,\n test_neg_file_pattern,\n test_pos_file_pattern,\n transformed_train_file_pattern,\n transformed_test_file_pattern,\n transformed_metadata_dir,\n raw_metadata_dir,\n transform_func_dir,\n temp_dir,\n vocab_size,\n delimiters):\n pipeline_name = 'DataflowRunner'\n options = {\n 'job_name': ('cloud-ml-hazmat-preprocess-{}'.format(datetime.datetime.now().strftime('%Y%m%d%H%M%S'))),\n 'temp_location': temp_dir,\n 'project': \"stone-outpost-636\",\n 'max_num_workers': 8\n }\n pipeline_options = beam.pipeline.PipelineOptions(flags=[], **options)\n #with beam.Pipeline(pipeline_name, options=pipeline_options) as pipeline:\n # with beam_impl.Context(temp_dir=temp_dir):\n with beam.Pipeline() as pipeline:\n with beam_impl.Context(temp_dir=tempfile.mkdtemp()):\n\n train_data = pipeline | 'ReadTrain' >> ReadAndShuffleData((train_neg_file_pattern, train_pos_file_pattern))\n test_data = pipeline | 'ReadTest' >> ReadAndShuffleData((test_neg_file_pattern, test_pos_file_pattern))\n preprocessing_fn = generate_preprocessing_fn(vocab_size, delimiters)\n\n (transformed_train_data, transformed_metadata), transform_fn = ((train_data, const.RAW_METADATA)\n | 'AnalyzeAndTransform' >> beam_impl.AnalyzeAndTransformDataset(preprocessing_fn))\n\n _ = (transform_fn | 'WriteTransformFn' >> tft_beam_io.WriteTransformFn(transform_func_dir))\n\n transformed_test_data, _ = (((test_data, const.RAW_METADATA), transform_fn)\n | 'Transform' >> beam_impl.TransformDataset())\n\n _ = (transformed_train_data\n | 'WriteTrainData' >> tfrecordio.WriteToTFRecord(transformed_train_file_pattern,\n coder=example_proto_coder.ExampleProtoCoder(transformed_metadata.schema)))\n\n _ = (transformed_test_data\n | 'WriteTestData' >> tfrecordio.WriteToTFRecord(transformed_test_file_pattern,\n coder=example_proto_coder.ExampleProtoCoder(transformed_metadata.schema)))\n\n _ = (transformed_metadata\n | 'WriteTransformedMetadata' >> beam_metadata_io.WriteMetadata(transformed_metadata_dir, pipeline=pipeline))\n\n _ = (const.RAW_METADATA\n | 'WriteRawMetadata' >> beam_metadata_io.WriteMetadata(raw_metadata_dir, pipeline=pipeline))", "def trimmott_se(records, args):\n for record in records:\n record = mottrecord(record, args.q)\n if args.r:\n record = record.reverse_complement(name=True,id=True,description=True)\n if args.d:\n record.name += '/1'\n record.id += '/1'\n record.description += '/1'\n if len(record) >= args.m and numpy.mean(record.letter_annotations['phred_quality']) >= args.a:\n yield record", "def novoalign(self) -> None:\n self.analysis.logger.info(\"Running alignment with NovoAlign\")\n self.chdir()\n config = self.analysis.config\n executor = Executor(self.analysis)\n barcoded = BarcodedFilename.from_sample(self.analysis.sample)\n with tempfile.TemporaryDirectory(dir=config.temporary_dir) as tmpdir:\n filename = os.path.join(tmpdir, \"align.log\")\n fh = logging.FileHandler(filename)\n self.analysis.logger.addHandler(fh)\n if barcoded.analyte == Analyte.WHOLE_EXOME:\n executor(\n f\"{config.novoalign} \"\n f'-oSAM \"@RG\\tID:{self.analysis.basename}\\t'\n f'SM:{self.analysis.sample}\\tLB:lib1\\tPL:ILLUMINA\" '\n f\"-d {{genome_index}} \"\n f\"-i PE {{kit.mean_len_library}},{{kit.sd_len_library}} \"\n f\"-t 90 -f {{input_filename}}> {{output_filename}}\",\n input_function=lambda l: \" \".join(sorted(l)),\n input_split_reads=False,\n output_format=f\"{self.analysis.basename}{{organism_str}}.sam\",\n split_by_organism=True,\n only_human=self.only_human,\n unlink_inputs=True,\n )\n elif barcoded.analyte == Analyte.GENE_PANEL:\n executor(\n f\"{config.novoalign} \"\n f\"-C \"\n f'-oSAM \"@RG\\tID:{self.analysis.basename}\\t'\n f'SM:{self.analysis.sample}\\tLB:lib1\\tPL:ILLUMINA\" '\n f\"-d {{genome_index}} \"\n f\"-i 50-500 -h 8 -H 20 --matchreward 3 -t 90 \"\n f\"-f {{input_filename}}> {{output_filename}}\",\n input_function=lambda l: \" \".join(sorted(l)),\n input_split_reads=False,\n output_format=f\"{self.analysis.basename}{{organism_str}}.sam\",\n split_by_organism=True,\n only_human=self.only_human,\n unlink_inputs=True,\n )\n else:\n raise Exception(\"Unnhandled analyte\")\n # CSV NOVOALIGN\n with open(filename, \"r\") as file_log, open(\n self.output_basename + \"_novoalign.csv\", \"w\"\n ) as csv_file, open(\n self.output_basename + \"_stat_novoalign.csv\", \"w\"\n ) as stat_csv_file:\n writer = csv.writer(csv_file)\n writer_stat = csv.writer(stat_csv_file)\n is_csv = False\n is_stat = False\n values = []\n labels = []\n for line in file_log:\n fields = line.split(\":\")\n label = fields[0][1:].strip()\n\n if is_stat is True:\n if label == \"No Mapping Found\":\n is_stat = False\n values.append(fields[1].strip().split()[0])\n labels.append(label)\n elif label == \"Paired Reads\":\n values.append(fields[1].strip().split()[0])\n labels.append(label)\n is_stat = True\n else:\n fields = line.split()\n if is_csv is True:\n if fields[1] == \"Mean\":\n break\n else:\n writer.writerow(fields[1:4])\n elif fields[1] == \"From\":\n writer.writerow(fields[1:4])\n is_csv = True\n writer_stat.writerow(labels)\n writer_stat.writerow(values)\n self.analysis.logger.removeHandler(fh)\n fh.close()\n self.analysis.logger.info(\"Alignment finished. Aligner used: NovoAlign\")", "def sparsify(self, state):\n print(\"running L0 projection-based (unstructured) sparsification. \\n \")\n model = state.model\n masks = self.get_masks(model)\n self.apply_masks(model, masks)" ]
[ "0.54760754", "0.53752476", "0.50093", "0.49865466", "0.49599963", "0.49551886", "0.49462736", "0.49260557", "0.48655975", "0.48339373", "0.48032323", "0.47868657", "0.47548297", "0.47518492", "0.47361124", "0.4728765", "0.47222483", "0.47153112", "0.4694921", "0.46828434", "0.46724996", "0.46596572", "0.4656335", "0.4639756", "0.46382204", "0.4632283", "0.46284556", "0.46176857", "0.4614535", "0.45741355", "0.45688653", "0.4563109", "0.45603886", "0.4549078", "0.45472655", "0.4536515", "0.4528661", "0.45245713", "0.4516244", "0.45126894", "0.45114642", "0.45114464", "0.45081347", "0.45055783", "0.45031273", "0.45025426", "0.4498831", "0.44980952", "0.44910783", "0.44857633", "0.44765222", "0.44577438", "0.44523674", "0.44453686", "0.44386187", "0.44331405", "0.4431331", "0.44209957", "0.4409464", "0.44064426", "0.4402833", "0.43883342", "0.4386474", "0.43764627", "0.4374051", "0.43723598", "0.43699858", "0.43698364", "0.43656972", "0.4365518", "0.43634158", "0.43633547", "0.4361614", "0.4357246", "0.43570408", "0.43495005", "0.43475062", "0.43451533", "0.43447968", "0.4344098", "0.43433362", "0.43425116", "0.43353784", "0.4334332", "0.4330587", "0.43251204", "0.43250856", "0.43208846", "0.43187195", "0.43032262", "0.43026155", "0.43000343", "0.4298606", "0.42911634", "0.4290893", "0.42899352", "0.42881852", "0.42828262", "0.4280353", "0.42792696", "0.42768732" ]
0.0
-1
Parse all alignment in each read with walkspolicy all and pysam backend.
def test_mock_pysam_parse_all(): mock_sam_path = os.path.join(testdir, "data", "mock.parse-all.sam") mock_chroms_path = os.path.join(testdir, "data", "mock.chrom.sizes") try: result = subprocess.check_output( [ "python", "-m", "pairtools", "parse", "--walks-policy", "all", "-c", mock_chroms_path, "--add-pair-index", mock_sam_path, ], ).decode("ascii") except subprocess.CalledProcessError as e: print(e.output) print(sys.exc_info()) raise e # check if the header got transferred correctly sam_header = [l.strip() for l in open(mock_sam_path, "r") if l.startswith("@")] pairsam_header = [l.strip() for l in result.split("\n") if l.startswith("#")] for l in sam_header: assert any([l in l2 for l2 in pairsam_header]) # check that the pairs got assigned properly id_counter = 0 prev_id = "" for l in result.split("\n"): if l.startswith("#") or not l: continue if prev_id == l.split("\t")[0]: id_counter += 1 else: id_counter = 0 prev_id = l.split("\t")[0] assigned_pair = l.split("\t")[1:8] + l.split("\t")[-2:] simulated_pair = ( l.split("CT:Z:SIMULATED:", 1)[1] .split("\031", 1)[0] .split("|")[id_counter] .split(",") ) print(assigned_pair) print(simulated_pair, prev_id) print() assert assigned_pair == simulated_pair
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def align(aligner, reads):\n counter = 0\n for read in SeqIO.parse(reads, \"fasta\"): \n try:\n alignInfo = next(aligner.map(str(read.seq)))\n print(alignInfo) \n except StopIteration:\n print(read.format(\"fasta\"), end='')", "def align_reads(self):\n self._test_folder_existance(\n self._pathcreator.required_read_alignment_folders()\n )\n assert self._args.paired_end in [True, False]\n self._pathcreator.set_ref_seq_paths_by_species()\n self._ref_seq_files = self._pathcreator.get_ref_seq_files()\n self._pathcreator.set_ref_seq_path_list()\n self._test_align_file_existance()\n if not self._args.paired_end:\n # Single end reads\n self._read_files = self._pathcreator.get_read_files()\n self._lib_names = self._pathcreator.get_lib_names_single_end()\n self._pathcreator.set_read_files_dep_file_lists_single_end(\n self._read_files, self._lib_names\n )\n self._prepare_reads_single_end()\n print(f\"controller align_single_end_reads start {datetime.now()}\")\n self._align_single_end_reads()\n print(f\"controller align_single_end_reads stop {datetime.now()}\")\n else:\n # Paired end reads\n self._read_file_pairs = self._pathcreator.get_read_file_pairs()\n self._lib_names = self._pathcreator.get_lib_names_paired_end()\n self._pathcreator.set_read_files_dep_file_lists_paired_end(\n self._read_file_pairs, self._lib_names\n )\n self._prepare_reads_paired_end()\n print(f\"controller align_paired_end_reads start {datetime.now()}\")\n self._align_paired_end_reads()\n print(f\"controller align_paired_end_reads stop {datetime.now()}\")\n print(\n f\"controller generate_read_alignment_stats start {datetime.now()}\"\n )\n self._generate_read_alignment_stats(\n self._lib_names,\n self._pathcreator.read_alignment_bam_paths,\n self._pathcreator.unaligned_reads_paths,\n self._pathcreator.read_alignments_stats_path,\n self._args.paired_end,\n )\n print(f\"controller generate_read_alignment_stats stop {datetime.now()}\")\n if self._args.crossalign_cleaning:\n self._remove_crossaligned_reads()\n\n if self._args.paired_end:\n # Build a bam file containing fragments merged from read\n # pairs\n if not self._args.no_fragment_building:\n fragments = True\n # sort the bam files by name and sam tag hit index to\n # accelerate fragment building\n print(\n f\"controller sort bams by name and index start {datetime.now()}\"\n )\n self._sort_bams_by_name_and_index()\n print(\n f\"controller sort bams by name and index end {datetime.now()}\"\n )\n # build the fragments bam file\n print(f\"controller build_fragments start {datetime.now()}\")\n self._build_fragments()\n print(f\"controller build_fragments stop {datetime.now()}\")\n # generate fragment alignment stats\n print(\n f\"controller generate_fragment_alignmnet_stats start {datetime.now()}\"\n )\n self._generate_read_alignment_stats(\n self._lib_names,\n self._pathcreator.aligned_fragments_bam_paths,\n self._pathcreator.unaligned_reads_paths,\n self._pathcreator.fragment_alignments_stats_path,\n self._args.paired_end,\n fragments,\n )\n print(\n f\"controller generate_fragment_alignmnet_stats stop {datetime.now()}\"\n )\n # write fragment stats table\n print(\n f\"controller write_alignment_stats_table fragments start {datetime.now()}\"\n )\n self._write_alignment_stat_table(\n self._pathcreator.fragment_alignments_stats_path,\n self._pathcreator.fragment_alignment_stats_table_path,\n self._pathcreator.fragment_alignment_stats_table_transposed_path,\n fragments,\n )\n print(\n f\"controller write_alignment_stats_table fragments stop {datetime.now()}\"\n )\n print(\n f\"controller write_alignment_stats_table reads start {datetime.now()}\"\n )\n self._write_alignment_stat_table(\n self._pathcreator.read_alignments_stats_path,\n self._pathcreator.read_alignment_stats_table_path,\n self._pathcreator.read_alignment_stats_table_transposed_path,\n )\n print(\n f\"controller write_alignment_stats_table reads stop {datetime.now()}\"\n )", "def process(\n self,\n name_and_reads: Tuple[str, List[reads_pb2.Read]],\n ) -> Iterable[Tuple[str, List[reads_pb2.Read]]]:\n name, reads = name_and_reads[0], list(name_and_reads[1])\n reads_copy = copy.deepcopy(reads)\n # Indent sequence strings by starting position.\n for read in reads_copy:\n indent = dc_constants.GAP_OR_PAD * read.alignment.position.position\n read.aligned_sequence = indent + read.aligned_sequence\n indented_cigar_str = indent + struct_utils.get_string_field(\n read.info, 'expanded_cigar')[0]\n struct_utils.set_string_field(read.info, 'expanded_cigar',\n indented_cigar_str)\n yield name, reads_copy", "def _align_single_end_reads(self):\n read_aligner = ReadAligner(self._args.segemehl_bin, self._args.progress)\n if self._file_needs_to_be_created(self._pathcreator.index_path):\n read_aligner.build_index(\n self._pathcreator.ref_seq_path_list,\n self._pathcreator.index_path,\n )\n for read_path, output_path, nomatch_path in zip(\n self._pathcreator.processed_read_paths,\n self._pathcreator.read_alignment_bam_paths,\n self._pathcreator.unaligned_reads_paths,\n ):\n if not self._file_needs_to_be_created(output_path):\n continue\n\n read_aligner.run_alignment(\n read_path,\n self._pathcreator.index_path,\n self._pathcreator.ref_seq_path_list,\n output_path,\n nomatch_path,\n int(self._args.processes),\n int(self._args.segemehl_accuracy),\n float(self._args.segemehl_evalue),\n self._args.split,\n paired_end=False,\n )", "def parse(self, filehandle):\n l = filehandle.readline()\n if l.split()[0] != '##maf':\n return\n else:\n self.setpar(l.split()[1:])\n\n l=filehandle.readline()\n while l:\n la = l.split()\n## print la\n if(len(la)==0 or la[0]=='#'):\n## print \"skipping\"\n 1\n elif(la[0]=='a'):\n## print \"reading alignment\"\n self.readalign(la[1:], filehandle)\n else:\n## print \"end of records\"\n return\n\n l=filehandle.readline()", "def process(\n self,\n name_and_reads: Tuple[str, Iterable[reads_pb2.Read]],\n ) -> Iterable[Tuple[str, List[reads_pb2.Read]]]:\n\n name, reads = name_and_reads[0], list(name_and_reads[1])\n # Note, examples will only be included in one of the initial counters since\n # we are returning early.\n if not reads:\n self.no_reads_counter.inc()\n return\n\n # Do not error for labels that have multiple alignments to correct molecule.\n # One of the alignments may be a supplementary alignment.\n if self.is_label and len(reads) > 1:\n logging.info('Unexpected: %d labels for %s', len(reads),\n reads[0].fragment_name)\n self.multiple_alignments_counter.inc()\n\n reads_copy = copy.deepcopy(reads)\n for read in reads_copy:\n assert read.aligned_sequence\n base_index = 0\n expanded_sequence = ''\n expanded_cigar_str = ''\n new_cigar_ops = []\n if not self.is_label:\n pw = struct_utils.get_int_field(read.info, 'pw')\n ip = struct_utils.get_int_field(read.info, 'ip')\n new_pw = []\n new_ip = []\n\n for op in read.alignment.cigar:\n # Skip over ops we don't want, such as soft clips.\n if op.operation not in dc_constants.OPS_TO_CONSIDER:\n base_index += op.operation_length\n continue\n if op.operation in dc_constants.READ_ADVANCING_OPS:\n start = base_index\n end = start + op.operation_length\n expanded_sequence += read.aligned_sequence[start:end]\n base_index += op.operation_length\n if not self.is_label:\n new_pw += pw[start:end]\n new_ip += ip[start:end]\n else:\n # Add a special token in sequence where we have deletion.\n expanded_sequence += dc_constants.GAP_OR_PAD * op.operation_length\n\n new_cigar_ops.append(op)\n op_char = cigar_utils.CIGAR_OPS_TO_CHAR[op.operation]\n expanded_cigar_str += op_char * op.operation_length\n\n # Update the read sequence.\n read.aligned_sequence = expanded_sequence\n assert len(read.aligned_sequence) == len(expanded_cigar_str)\n\n # Update the read cigar to only include ops that were kept.\n del read.alignment.cigar[:]\n read.alignment.cigar.extend(new_cigar_ops)\n\n # Save pw, ip, and expanded cigar string to be used downstream.\n if not self.is_label:\n struct_utils.set_int_field(read.info, 'pw', new_pw)\n struct_utils.set_int_field(read.info, 'ip', new_ip)\n # PW and IP won't be the same length as read.aligned_sequence here\n # because we haven't yet spaced out PW and IP based on gaps/padding.\n struct_utils.set_string_field(read.info, 'expanded_cigar',\n expanded_cigar_str)\n yield name, reads_copy", "def get_read_alignments(sam_f):\n sparser = samparser.SamParser(sam_f=sam_f, aligned_only=True, mapq=20, mismatches=1)\n \n # parse all the hits into this to make sure multi mapping hits map to the same contig\n hit_dict = {}\n ambig_reads = 0\n processed_reads = 0\n for hit in sparser.parse_sam_file():\n processed_reads += 1\n if hit_dict.get(hit['qname'], 0):\n if hit_dict[hit['qname']] != hit['rname']:\n print(\"Warning read: {} aligns to two different contigs\".format(hit['qname']), file=sys.stderr)\n ambig_reads += 1\n else:\n continue\n else:\n hit_dict[hit['qname']] = hit['rname']\n\n print(\"{} of {} processed reads were ambiguous.\".format(ambig_reads, processed_reads))\n\n # condense the hit dict into a contig dict\n contig_dict = {}\n for read, contig in hit_dict.items():\n if contig_dict.get(contig, 0):\n contig_dict[contig].append(read)\n else:\n contig_dict[contig] = [read]\n\n return contig_dict", "def process_align(self):\n\t\tstm_t_dict = self._process_recog()\n\t\ttrans_t_dict = self._process_trans()\n\t\talign_obj = viterbi_align(stm_t_dict, trans_t_dict, self.label, self.pair_file_path)\n\t\tself.trans_t_dict = align_obj.viterbi(0, len(stm_t_dict)-1, 0, len(trans_t_dict)-1)", "def __init__(self, reads1, reads2):\n print \"Start Analysis...\"\n self.alignment()\n self.sai_to_sam()\n self.sam_to_bam()\n #self.clean_files()", "def readalign(self, opt, fh):\n## print \"entering readalign:\", opt\n edgeInfo = {}\n for p in opt:\n (key, value) = p.split('=')\n edgeInfo[key] = value\n\n s = fh.readline().split()\n## print s;\n if(len(s) == 7 and s[0] == 's'):\n vseq = self._vseq(len(s[6]))\n self.mAlign += vseq\n while len(s) == 7 and s[0] == 's':\n # Add the sequence name to the dictionary,\n # then add a corresponding node to the mapping.\n if s[1] not in self.sequences:\n self.sequences[s[1]] = AnonSequence(int(s[5]), s[1])\n self.mAlign += self.sequences[s[1]]\n\n # PROCESS THE KNOWN INTERVALS\n if(s[4] == '-'):\n ns = self.sequences[s[1]][-int(s[2]):-int(s[2]) - int(s[3])]\n self.sequences[s[1]].seqsplice(reverse_complement(\n s[6].replace('-', '')), ns.start, ns.stop)\n else:\n ns = self.sequences[s[1]][int(s[2]):int(s[2]) + int(s[3])]\n self.sequences[s[1]].seqsplice(s[6].replace('-', ''),\n ns.start, ns.stop)\n\n for inter in refIntervals(s[6]):\n self.mAlign[vseq[inter[0]:inter[1]]][ns[inter[2]:inter[3]]] = \\\n (inter[4])\n self.mAlign[ns[inter[2]:inter[3]]][vseq[inter[0]:inter[1]]] = \\\n (inter[4])\n\n s = fh.readline().split()", "def readalignments(self, *, filename=None, interactive=True):\n interactive = interactive and self.interactive and filename is None\n if filename is None: filename = self.alignmentsfilename\n self.logger.info(\"reading alignments from \"+str(filename))\n\n try:\n alignmentresults = {o.n: o for o in readtable(filename, self.alignmentresulttype, extrakwargs={\"pscale\": self.pscale})}\n except Exception:\n if interactive:\n print()\n traceback.print_exc()\n print()\n answer = \"\"\n while answer.lower() not in (\"y\", \"n\"):\n answer = input(f\"readalignments() gave an exception for {self.SlideID}. Do the alignment? [Y/N] \")\n if answer.lower() == \"y\":\n if not hasattr(self, \"images\"): self.getDAPI()\n self.align()\n return self.readalignments(interactive=False)\n raise\n\n for o in self.overlaps:\n try:\n o.result = alignmentresults[o.n]\n except KeyError:\n pass\n self.logger.info(\"done reading alignments for \"+self.SlideID)", "def read_in_file():\n\t# Declare variables\n\treads = []\n\n\t# Get command line arguments\n\targuments = sys.argv\n\targuments_length = len(arguments)\n\n\t# Read file is the first argument\n\tread_file_name = arguments[1]\n\n\t# Process read file \n\tread_file = open(read_file_name, 'r')\n\tfor line in read_file:\n\t\tread_info = line.split()\n\t\tread_string = read_info[2].replace('\\'', '')\n\t\tnew_read = GenerativeRead(read_string, [], read_info[5], read_info[3], None, [], read_info[0], read_info[1], read_info[4]) \n\t\treads.append(new_read)\n\tread_file.close()\n\n\t# Repeat regions file in the second argument\n\trepeat_file_name = arguments[2]\n\n\t# Process repeat file\n\trepeat_file = open(repeat_file_name, 'r')\n\talignments = [[]]\n\talignment_index = -1\n\tprevious_line = ''\n\n\n\tfor line in repeat_file:\n\t\talignment_info = line.split()\n\n\t\t# This consists of a tuple of alignment string, alignment start position and alignment chromosome\n\t\t#new_align = alignment_info[2], alignment_info[4], alignment_info[3]\n\n\t\tnew_align = Alignment(alignment_info[2], None, alignment_info[4], alignment_info[3])\n\n\t\tif previous_line != alignment_info[0]:\n\t\t\t# It is not a repeat\n\t\t\talignment_index = alignment_index + 1\n\t\t\talignments.append([])\n\t\t\tprevious_line = alignment_info[0]\n\n\t\talignments[alignment_index].append(new_align)\n\n\trepeat_file.close()\n\n\t# Associate each read with the other alignments\n\tfor read in reads:\n\t\t# Find the other alignments\n\t\tpos = read.get_position()\n\t\tfound = False\n\t\tfound_index = -1\n\n\t\tfor a_index, alignment_lists in enumerate(alignments):\n\t\t\t# find matching alignments\n\t\t\t# TODO: Don't add alignment already have\n\t\t\t# TODO: Make functional with filter\n\t\t\tfor align in alignment_lists:\n\t\t\t\tif align.get_position() == pos:\n\t\t\t\t\tfound = True\n\t\t\t\t\tfound_index = a_index\n\t\t\t\t\tbreak\n\n\t\t\tif found is True:\n\t\t\t\tbreak\n\n\t\tif found is True:\n\t\t\tfor new_align in alignments[found_index]:\n\t\t\t\tread.add_alignment(new_align)\n\t\t\t\n\n\n\t# SNP files are the remaining ones\n\tsnp_file_names = [arguments[file_id] for file_id in range(3, arguments_length) ]\n\n\t# Process SNP files\n\tfor file_name in snp_file_names:\n\t\tsnp_file = open(file_name, 'r')\n\n\t\tfor line in snp_file:\n\t\t\tsnp_info = line.split()\n\t\t\tsnps = snp_info[3].split('/')\n\t\t\tsnp_pos = int(float(snp_info[2]))\n\n\t\t\t# Ignore alleles that are longer than one base\n\n\t\t\t\n\t\t\tif all(len(x) < 2 for x in snps):\n\n\t\t\t\t# Iterate through reads and determine whether or not it contains this SNP\n\t\t\t\tpos_low = snp_pos - 49\n\t\t\t\n\n\t\t\t\tfor read in reads:\n\t\t\t\t\tpositions = read.get_alignment_positions()\n\n\t\t\t\t\tfor p_index, p in enumerate(positions):\n\t\t\t\t\t\tp = int(float(p))\n\t\t\t\t\t\tif p >= pos_low and p <= snp_pos:\n\t\t\t\t\t\t\t# Get index of snp\n\t\t\t\t\t\t\toffset = snp_pos - p\n\t\t\t\t\t\t\tcalls = [0, 0, 0, 0]\n\t\t\t\t\t\t\tfor snp in snps:\n\t\t\t\t\t\t\t\tcall_index = get_base_num(snp)\n\t\t\t\t\t\t\t\tcalls[call_index] = 1\n\n\t\t\t\t\t\t\t# Add the SNP to the read\n\t\t\t\t\t\t\tread.add_snp(p_index, offset, calls)\n\t\t\t\t\t\t\t\n\t\tsnp_file.close()\n\treturn reads", "def _align_paired_end_reads(self):\n read_aligner = ReadAligner(self._args.segemehl_bin, self._args.progress)\n if self._file_needs_to_be_created(self._pathcreator.index_path):\n read_aligner.build_index(\n self._pathcreator.ref_seq_path_list,\n self._pathcreator.index_path,\n )\n for read_path_pair, output_path, nomatch_path in zip(\n self._pathcreator.processed_read_path_pairs,\n self._pathcreator.read_alignment_bam_paths,\n self._pathcreator.unaligned_reads_paths,\n ):\n if not self._file_needs_to_be_created(output_path):\n continue\n read_aligner.run_alignment(\n read_path_pair,\n self._pathcreator.index_path,\n self._pathcreator.ref_seq_path_list,\n output_path,\n nomatch_path,\n int(self._args.processes),\n int(self._args.segemehl_accuracy),\n float(self._args.segemehl_evalue),\n self._args.split,\n paired_end=True,\n )", "def _scan_alignment(self,handle, consumer):\n while 1:\n line = handle.readline()\n if not line:\n break\n if is_blank_line(line):\n continue\n else:\n consumer.query_alignment(line)\n read_and_call(handle, consumer.positive_alignment)\n read_and_call(handle, consumer.hit_alignment)", "def process(\n self,\n name_and_reads: Tuple[str, List[reads_pb2.Read]],\n ) -> Iterable[Tuple[str, List[reads_pb2.Read]]]:\n\n name, subreads = name_and_reads\n subreads_copy = copy.deepcopy(subreads)\n for read in subreads_copy:\n pw = struct_utils.get_int_field(read.info, 'pw')\n ip = struct_utils.get_int_field(read.info, 'ip')\n new_pw = []\n new_ip = []\n pw_ip_index = 0\n\n for base in read.aligned_sequence:\n # Padding and gap tokens are strings and cannot directly be added to pw\n # and ip, which are lists of ints. Instead, integer representations of\n # each must be added.\n if base == dc_constants.GAP_OR_PAD:\n new_pw.append(dc_constants.GAP_OR_PAD_INT)\n new_ip.append(dc_constants.GAP_OR_PAD_INT)\n # If base is neither padding nor gap, copy over the existing pw and ip.\n else:\n assert pw_ip_index < len(pw)\n assert pw_ip_index < len(ip)\n new_pw.append(pw[pw_ip_index])\n new_ip.append(ip[pw_ip_index])\n pw_ip_index += 1\n\n # pw, ip, and sequence should all be of the same length.\n assert len(new_pw) == len(read.aligned_sequence)\n assert len(new_ip) == len(read.aligned_sequence)\n struct_utils.set_int_field(read.info, 'pw', new_pw)\n struct_utils.set_int_field(read.info, 'ip', new_ip)\n\n yield name, subreads_copy", "def align(self):\n\n # load the alignment parameters into the align_params object\n self.align_params.load_params_from_file(self.input_file)\n\n # populate the score matrices based on the input parameters\n self.populate_score_matrices()\n\n # perform a traceback and write the output to an output file\n\n ### FILL IN ###", "def align(args) :\n from aligner import align_reads\n align_reads(args)", "def _read_next_alignment(self, stream):", "def process(\n self,\n name_and_reads: Tuple[str, List[reads_pb2.Read]],\n ) -> Iterable[Tuple[str, List[reads_pb2.Read]]]:\n name, subreads = name_and_reads\n subreads_copy = copy.deepcopy(subreads)\n # Stop if we have reached end of all reads.\n base_index = 0\n out_of_bounds = False\n while not out_of_bounds:\n out_of_bounds, has_insert = get_index_info(subreads_copy, base_index)\n # `has_insert` will only be true if we are not out of bounds, meaning\n # at least one read has a base at `base_index`.\n if has_insert:\n shift(subreads_copy, cigar_pb2.CigarUnit.INSERT, base_index)\n base_index += 1\n yield name, subreads_copy", "def parse_reads_and_select_candidates(self, reads):\n st_time = time.time()\n # read_id_list = []\n total_reads = 0\n read_unique_id = 0\n for read in reads:\n # check if the read is usable\n if read.mapping_quality >= DEFAULT_MIN_MAP_QUALITY and read.is_secondary is False \\\n and read.is_supplementary is False and read.is_unmapped is False and read.is_qcfail is False:\n\n read.query_name = read.query_name + '_' + str(read_unique_id)\n if self.find_read_candidates(read=read):\n # read_id_list.append(read.query_name)\n total_reads += 1\n read_unique_id += 1\n\n if total_reads == 0:\n return []\n\n selected_allele_list = []\n postprocess_read_id_list = set()\n for pos in self.positional_allele_dictionary:\n if pos < self.region_start_position or pos > self.region_end_position:\n continue\n ref = self.reference_dictionary[pos]\n\n all_allele_dictionary = self.positional_allele_dictionary[pos]\n all_mismatch_count = 0\n for allele in all_allele_dictionary:\n all_mismatch_count += all_allele_dictionary[allele]\n\n # pick the top 2 most frequent allele\n allele_frequency_list = list(sorted(all_allele_dictionary.items(), key=operator.itemgetter(1, 0),\n reverse=True))[:PLOIDY]\n allele_list = self._filter_alleles(pos, allele_frequency_list)\n alt1 = allele_list[0] if len(allele_list) >= 1 else None\n alt2 = allele_list[1] if len(allele_list) >= 2 else '.'\n if alt1 is None:\n continue\n mq_rms = round(math.sqrt(self.rms_mq[pos]/self.coverage[pos]), 3) if self.coverage[pos] > 0 else 0\n dp = self.coverage[pos]\n ref_count = self.coverage[pos] - all_mismatch_count\n candidate_record = [self.chromosome_name] + self._get_record(pos, alt1, alt2, ref, ref_count) + [mq_rms] + [dp]\n postprocess_read_id_list.update(self.read_id_by_position[pos])\n selected_allele_list.append(candidate_record)\n\n postprocess_read_id_list = list(postprocess_read_id_list)\n if len(selected_allele_list) > 0:\n self.postprocess_reference()\n self.postprocess_reads(postprocess_read_id_list)\n\n return selected_allele_list", "def mapEachAlignment(self, workflow=None, alignmentData=None, passingData=None, transferOutput=True, **keywords):\n\t\treturnData = PassingData(jobDataLs=[])\n\t\t\n\t\tchrIDSet = passingData.chrIDSet\n\t\tchr2IntervalDataLs = passingData.chr2IntervalDataLs\n\t\ttopOutputDirJob = passingData.topOutputDirJob\n\t\tplotOutputDirJob = passingData.plotOutputDirJob\n\t\t\n\t\trefFastaFile = passingData.refFastaFList[0]\n\t\t\n\t\talignmentData = passingData.alignmentData\n\t\t\n\t\talignment = alignmentData.alignment\n\t\t\n\t\tminDP = int(max(1, alignment.median_depth/2))\n\t\tmaxDP = int(alignment.median_depth*2)\n\t\toutputFilenamePrefix = passingData.bamFnamePrefix\n\t\t\n\t\tindividual_alignment_consensus_sequence = self.db_vervet.checkIndividualAlignmentConsensusSequence(individual_alignment_id=alignment.id, \\\n\t\t\t\t\t\t\t\t\tminDP=minDP, \\\n\t\t\t\t\t\t\t\t\tmaxDP=maxDP, minBaseQ=self.minBaseQ, minMapQ=self.minMapQ,\\\n\t\t\t\t\t\t\t\t\tminRMSMapQ=self.minRMSMapQ, minDistanceToIndel=self.minDistanceToIndel)\n\t\tif individual_alignment_consensus_sequence:\n\t\t\tconsensusSequenceFname = individual_alignment_consensus_sequence.getFileAbsPath(oldDataDir=self.db_vervet.data_dir, \\\n\t\t\t\t\t\t\t\t\t\t\t\tnewDataDir=self.data_dir)\n\t\t\tconsensusSequenceFile = self.registerOneInputFile(inputFname=consensusSequenceFname, \\\n\t\t\t\t\t\t\t\t\t\tfolderName=self.pegasusFolderName)\n\t\t\textractConsensusSequenceFromAlignmentJob = PassingData(output=consensusSequenceFile)\n\t\telse:\n\t\t\textractConsensusSubWorkflowData = self.extractConsensusSequenceFromAlignmentSubWorkflow(alignmentData=alignmentData, \\\n\t\t\t\t\t\t\t\t\t\t\t\trefFastaFile=refFastaFile,\\\n\t\t\t\t\t\t\t\t\t\t\t\toutputFilenamePrefix=outputFilenamePrefix,\\\n\t\t\t\t\t\t\t\t\t\t\t\tchrIDSet=chrIDSet, \\\n\t\t\t\t\t\t\t\t\t\t\t\tchr2IntervalDataLs=chr2IntervalDataLs, passingData=passingData, \\\n\t\t\t\t\t\t\t\t\t\t\t\ttransferOutput=False)\n\t\t\textractConsensusSequenceFromAlignmentJob = extractConsensusSubWorkflowData.combineConsensusSequenceJob\n\t\t\t\n\t\t\tlogFile = File(os.path.join(topOutputDirJob.output, '%s_alignment%s_2DB.log'%(outputFilenamePrefix, alignment.id)))\n\t\t\tself.addGenericFile2DBJob(executable=self.AddIndividualAlignmentConsensusSequence2DB, \\\n\t\t\t\t\t\t\t\t\tinputFile=extractConsensusSequenceFromAlignmentJob.output, \\\n\t\t\t\t\t\t\t\t\tinputArgumentOption=\"-i\", \\\n\t\t\t\t\t\tdata_dir=self.data_dir, logFile=logFile, commit=self.commit,\\\n\t\t\t\t\t\tparentJobLs=[extractConsensusSequenceFromAlignmentJob], extraDependentInputLs=None, \\\n\t\t\t\t\t\textraOutputLs=None, transferOutput=True, \\\n\t\t\t\t\t\textraArgumentList=[\"--individual_alignment_id %s\"%(alignment.id), \" --format fastq\", \"--minDP %s\"%(minDP), \\\n\t\t\t\t\t\t\t\t\t\t\"--maxDP=%s\"%(maxDP), \"--minBaseQ=%s\"%(self.minBaseQ), \\\n\t\t\t\t\t\t\t\t\t\t\"--minMapQ %s\"%(self.minMapQ), \"--minRMSMapQ %s\"%(self.minRMSMapQ), \\\n\t\t\t\t\t\t\t\t\t\t\"--minDistanceToIndel %s\"%(self.minDistanceToIndel)], \\\n\t\t\t\t\t\tjob_max_memory=2000, sshDBTunnel=self.needSSHDBTunnel, \\\n\t\t\t\t\t\tkey2ObjectForJob=None)\n\t\t\n\t\t#select sequences only from specific chromosomes\n\t\tchooseChromosomeConsensusSequenceOutputFile = File(os.path.join(topOutputDirJob.output, \\\n\t\t\t\t\t\t\t\t\t\t\t'%s_%sChromosomes.fastq.gz'%(outputFilenamePrefix, len(chrIDSet))))\n\t\tchooseChromosomeConsensusSequenceJob = self.addGenericJob(executable=self.SelectChromosomeSequences, \\\n\t\t\t\t\tinputFile=extractConsensusSequenceFromAlignmentJob.output, \\\n\t\t\t\t\toutputFile=chooseChromosomeConsensusSequenceOutputFile, inputFileList=[], \\\n\t\t\t\t\tparentJobLs=[extractConsensusSequenceFromAlignmentJob, topOutputDirJob], \\\n\t\t\t\t\textraDependentInputLs=[], extraOutputLs=[], transferOutput=False, \\\n\t\t\t\t\textraArguments=None, extraArgumentList=[\"--chromosomeList\", ','.join(list(chrIDSet)), \\\n\t\t\t\t\t\t\t\t\t\t\t\"--inputFileFormat 2 --outputFileFormat 2\"], \\\n\t\t\t\t\tjob_max_memory=1000, sshDBTunnel=None, \\\n\t\t\t\t\tkey2ObjectForJob=None, no_of_cpus=None, walltime=120)\n\t\t\n\t\treturnData = self.PSMCOnFastQSubWorkflow(fastQFile=chooseChromosomeConsensusSequenceOutputFile, \\\n\t\t\t\t\t\toutputFilenamePrefix=outputFilenamePrefix, passingData=passingData, \\\n\t\t\t\t\t\tparentJobLs=[chooseChromosomeConsensusSequenceJob], affiliatedDBEntry=alignment,\\\n\t\t\t\t\t\tpsmcPlotTitle='depth%s-%s'%(minDP, maxDP), transferOutput=transferOutput)\n\t\t\n\t\treturn returnData", "def _parse_alignment( alignment ):\n log.info(\"Parsing subread locations from alignment data\")\n locations = {}\n for entry in BlasrReader( alignment ):\n if entry.tstrand == '1':\n start = int(entry.tlength) - int(entry.tend)\n end = int(entry.tlength) - int(entry.tstart)\n else:\n start = int(entry.tstart)\n end = int(entry.tend)\n locations[entry.qname] = (start, end)\n return locations", "def retrieve_metadata_alignments(self, fields):\n print 'retrieve_metadata_alignments'\n alignments = dict()\n for field in fields:\n root = self.alignment_config_items['alignment_pages_root']\n alignment_template = self.alignment_config_items['alignment_template']\n #wikipage = root + field\n wikipage = field\n alignments[field] = self.retrieve_from_wiki(wikipage,\n alignment_template)\n self.mapper = alignments", "def set_all_lines_alignments(self, alignment):\n for line_no in range(0, self.lines[0].__len__()):\n self.change_alignment_for_a_line(alignment, line_no)\n return self", "def parse(self):\n for line in self.lines:\n self.read_line(line)\n return self.assembler_lines", "def parse_all(self):\n\n # Generates a list of apartment urls\n self.parse_apartment_urls()\n\n # Parses each apartment url and stores it in apartment_data\n for apartment_url in self.apartment_urls:\n self.parse_single_page(apartment_url)", "def _self_align(self):\n logging.info(\"Splitting palindrome.\")\n logging.debug(\"Making reverse complement sequences of reads in \" +\n \"{i} to {o}\".format(i=self.ori_all_reads_fasta,\n o=self.rc_all_reads_fasta))\n num_reads = revcmp_fasta(self.ori_all_reads_fasta,\n self.rc_all_reads_fasta)\n\n reads_per_split = max(1, int(num_reads/self.nproc) + 1)\n logging.debug(\"Splitting {f} to small files each containing {n} reads.\".\n format(f=self.ori_all_reads_fasta, n=reads_per_split))\n fs = FastaSplitter(input_fasta=self.ori_all_reads_fasta,\n reads_per_split=reads_per_split,\n out_dir=self.out_dir,\n out_prefix=\"reads.split.\")\n fs.split()\n sp_fasta_files = fs.out_fns\n\n logging.debug(\"Splitting {f} to smaller files.\".\n format(f=self.rc_all_reads_fasta))\n rc_fs = FastaSplitter(input_fasta=self.rc_all_reads_fasta,\n reads_per_split=reads_per_split,\n out_dir=self.out_dir,\n out_prefix=\"rc_reads.split.\")\n rc_fs.split()\n rc_sp_fasta_files = rc_fs.out_fns\n\n logging.debug(\"Aligning each read in {i} to its revese compelement \" +\n \"read using sdpMatcher.\".format(i=self.ori_all_reads_fasta))\n\n sdps = [\"{f}.sdp\".format(f=f) for f in sp_fasta_files]\n jobs = []\n for f, rc_f, sdp in zip(sp_fasta_files, rc_sp_fasta_files, sdps):\n cmd = \"sdpMatcher {f} {rc_f} \".format(f=f, rc_f=rc_f) + \\\n \"10 -local > {sdp} \".format(sdp=sdp)\n logging.debug(\"CMD: {cmd}\".format(cmd=cmd))\n jobs.append(cmd)\n\n pool = Pool(processes=self.nproc)\n rets = pool.map(backticks, jobs)\n pool.close()\n pool.join()\n\n for i, job in enumerate(jobs):\n if rets[i][1] != 0:\n errMsg = \"Job {j} failed.\".format(j=job) + str(rets[i][2])\n raise RuntimeError(errMsg)\n\n logging.debug(\"Concatenating all sdp outputs to {f}\".\n format(f=self.sdp_out_file))\n cat_files(src=sdps, dst=self.sdp_out_file)\n\n logging.debug(\"Cleaning intermediate fasta & sdp files.\")\n fs.rmOutFNs()\n rc_fs.rmOutFNs()\n\n for f in sdps:\n os.remove(f)", "def parse_match(self, read_id, alignment_position, length, read_sequence, ref_sequence, qualities):\n start = alignment_position\n stop = start + length\n for i in range(start, stop):\n\n self.coverage[i] += 1\n allele = read_sequence[i-alignment_position]\n ref = ref_sequence[i-alignment_position]\n self.base_dictionary[read_id][i] = (allele, qualities[i-alignment_position])\n # self._update_base_dictionary(read_id, i, allele, qualities[i-alignment_position])\n if allele != ref:\n self.mismatch_count[i] += 1\n self._update_read_allele_dictionary(read_id, i, allele, MISMATCH_ALLELE, qualities[i-alignment_position])\n else:\n self.match_count[i] += 1\n # this slows things down a lot. Don't add reference allele to the dictionary if we don't use them\n # self._update_read_allele_dictionary(i, allele, MATCH_ALLELE)", "def parse(self):\n for section in self.sections:\n section.parse()", "def run_all(self):\n\n self.run_mash() ###Run MASH analysis\n self.filter_query() ###Filter fasta sequences out based on p value\n self.build_index(self.filtered_out_path) ###Build index for off-target analysis\n os.remove(self.filtered_out_path) ###Clean up intermediate fasta file\n self.format_gRNA(self.path1) ###Format everything in the right order\n self.run_OTF() ###Run off-target analysis\n self.output_parse() ###Parse output values and update table", "def parse_records(self):\n for record in sp.parse(gzip.open(\n \"./human_uniprot_04_07_20.gz\", 'rt')):\n # print(record.taxonomy_id)\n # if record.organism != \"Homo sapiens\":\n # continue\n # print(record.features[0])\n # for comment in record.comments:\n # if comment.startswith(\"SUBCELLULAR LOCATION\"):\n # print(comment)\n self.extract_features_to_dict(record)\n self.extract_localization(record)", "def parse_sam(in_file, out_file, read_type , strand):\n out_handle = open(out_file , 'a')\n if strand == 'watson':\n nt = ['C']\n else:\n nt = ['G']\n count = 0\n # print 'Warning, only works for forward mapped reads'\n mismatch = 0\n clip_count_total = 0\n for line in open(in_file, 'r'):\n modulo_line_no = count % 2\n #alternates between 0 and 1\n if line.startswith('@'):\n continue\n split_line = line.rstrip('\\n').split('\\t')\n #skip read pairs with improper flags.\n #TODO: do this filtering in mark_PCR_duplicates or elsewhere with access to pysam.\n if split_line[1] not in ['0', '99', '147']:\n mismatch += 1\n count += 1\n # continue\n char_count = ''\n clip_count = 0\n for char in split_line[5]:\n if not char.isalpha():\n char_count += char\n elif char == 'S':\n clip_count += int(char_count)\n else:\n char_count = ''\n if clip_count > 6:\n clip_count_total += 1\n count += 1\n # continue\n header = split_line[0].split('|')\n #meth_post list can be present for both R1 and R2 the last Samtools tag added should be the RN:Z: tag, look\n #to the right of this tag only\n meth_pos_list = split_line[0][split_line[0].rindex(':Z:'):].split('|')[1:]\n out_line = [header[0]]\n out_line += split_line[1:9]\n seq = list(split_line[9])\n try:\n meth_pos = [int(n) for n in meth_pos_list[-modulo_line_no].split(',')]\n for n in meth_pos:\n if n >= len(seq):\n break\n if seq[n] not in ['T','A']:\n break\n seq[n] = nt[-modulo_line_no]\n except ValueError:\n pass\n out_line += [''.join(seq)]\n out_line += split_line[10:]\n for item in header[1:]:\n if ':' in item and item not in out_line:\n out_line.append(item)\n # out_line += header[3:6]\n out_handle.write('\\t'.join(out_line) + '\\n')\n count += 1\n print('%s mismatches out of %s' % (mismatch, count))\n print('%s reads out of %s soft clipped more than 5' % (clip_count_total, count))", "def parse(\n sam_path, chroms_path, output, output_parsed_alignments, output_stats, **kwargs\n):\n parse_py(\n sam_path, chroms_path, output, output_parsed_alignments, output_stats, **kwargs\n )", "def __next__(self):\n handle = self.handle\n\n if self._header is None:\n line = handle.readline()\n else:\n # Header we saved from when we were parsing\n # the previous alignment.\n line = self._header\n self._header = None\n\n if not line:\n raise StopIteration\n\n while line.rstrip() != \"#=======================================\":\n line = handle.readline()\n if not line:\n raise StopIteration\n\n length_of_seqs = None\n number_of_seqs = None\n ids = []\n header_dict = {}\n\n while line[0] == \"#\":\n # Read in the rest of this alignment header,\n # try and discover the number of records expected\n # and their length\n parts = line[1:].split(\":\", 1)\n key = parts[0].lower().strip()\n if key == \"aligned_sequences\":\n number_of_seqs = int(parts[1].strip())\n assert len(ids) == 0\n # Should now expect the record identifiers...\n for i in range(number_of_seqs):\n line = handle.readline()\n parts = line[1:].strip().split(\":\", 1)\n assert i + 1 == int(parts[0].strip())\n ids.append(parts[1].strip())\n assert len(ids) == number_of_seqs\n if key == \"length\":\n length_of_seqs = int(parts[1].strip())\n\n # Parse the rest of the header\n if key == \"identity\":\n header_dict[\"identity\"] = int(parts[1].strip().split(\"/\")[0])\n if key == \"similarity\":\n header_dict[\"similarity\"] = int(parts[1].strip().split(\"/\")[0])\n if key == \"gaps\":\n header_dict[\"gaps\"] = int(parts[1].strip().split(\"/\")[0])\n if key == \"score\":\n header_dict[\"score\"] = float(parts[1].strip())\n\n # And read in another line...\n line = handle.readline()\n\n if number_of_seqs is None:\n raise ValueError(\"Number of sequences missing!\")\n if length_of_seqs is None:\n raise ValueError(\"Length of sequences missing!\")\n\n if (\n self.records_per_alignment is not None\n and self.records_per_alignment != number_of_seqs\n ):\n raise ValueError(\n \"Found %i records in this alignment, told to expect %i\"\n % (number_of_seqs, self.records_per_alignment)\n )\n\n seqs = [\"\" for id in ids]\n seq_starts = []\n index = 0\n\n # Parse the seqs\n while line:\n if len(line) > 21:\n id_start = line[:21].strip().split(None, 1)\n seq_end = line[21:].strip().split(None, 1)\n if len(id_start) == 2 and len(seq_end) == 2:\n # identifier, seq start position, seq, seq end position\n # (an aligned seq is broken up into multiple lines)\n id, start = id_start\n seq, end = seq_end\n if start >= end:\n # Special case, either a single letter is present,\n # or no letters at all.\n if seq.replace(\"-\", \"\") == \"\":\n start = int(start)\n end = int(end)\n else:\n start = int(start) - 1\n end = int(end)\n else:\n assert seq.replace(\"-\", \"\") != \"\", repr(line)\n start = int(start) - 1 # python counting\n end = int(end)\n\n if index < 0 or index >= number_of_seqs:\n raise ValueError(\n \"Expected index %i in range [0,%i)\"\n % (index, number_of_seqs)\n )\n # The identifier is truncated...\n assert id == ids[index] or id == ids[index][: len(id)]\n\n if len(seq_starts) == index:\n # Record the start\n seq_starts.append(start)\n\n # Check the start...\n if start >= end:\n assert seq.replace(\"-\", \"\") == \"\", line\n elif start - seq_starts[index] != len(seqs[index].replace(\"-\", \"\")):\n raise ValueError(\n \"Found %i chars so far for sequence %i (%s, %r), line says start %i:\\n%s\"\n % (\n len(seqs[index].replace(\"-\", \"\")),\n index,\n id,\n seqs[index],\n start,\n line,\n )\n )\n seqs[index] += seq\n\n # Check the end ...\n if end != seq_starts[index] + len(seqs[index].replace(\"-\", \"\")):\n raise ValueError(\n \"Found %i chars so far for sequence %i (%s, %r, start=%i), file says end %i:\\n%s\"\n % (\n len(seqs[index].replace(\"-\", \"\")),\n index,\n id,\n seqs[index],\n seq_starts[index],\n end,\n line,\n )\n )\n\n index += 1\n if index >= number_of_seqs:\n index = 0\n else:\n # just a start value, this is just alignment annotation (?)\n # print \"Skipping: \" + line.rstrip()\n pass\n elif line.strip() == \"\":\n # Just a spacer?\n pass\n else:\n raise ValueError(\"Unrecognised EMBOSS pairwise line: %r\\n\" % line)\n\n line = handle.readline()\n if (\n line.rstrip() == \"#---------------------------------------\"\n or line.rstrip() == \"#=======================================\"\n ):\n # End of alignment\n self._header = line\n break\n\n assert index == 0\n\n if (\n self.records_per_alignment is not None\n and self.records_per_alignment != len(ids)\n ):\n raise ValueError(\n \"Found %i records in this alignment, told to expect %i\"\n % (len(ids), self.records_per_alignment)\n )\n\n records = []\n for id, seq in zip(ids, seqs):\n if len(seq) != length_of_seqs:\n # EMBOSS 2.9.0 is known to use spaces instead of minus signs\n # for leading gaps, and thus fails to parse. This old version\n # is still used as of Dec 2008 behind the EBI SOAP webservice:\n # http://www.ebi.ac.uk/Tools/webservices/wsdl/WSEmboss.wsdl\n raise ValueError(\n \"Error parsing alignment - sequences of \"\n \"different length? You could be using an \"\n \"old version of EMBOSS.\"\n )\n records.append(SeqRecord(Seq(seq), id=id, description=id))\n return MultipleSeqAlignment(records, annotations=header_dict)", "def parse_bam():\n global sample_name, header, segmentID, bam\n sys.stderr.write(time.strftime(\"%c\") + \" Busy with parsing bam file...\\n\")\n bam = pysam.AlignmentFile(NanoSV.opts_bam, 'rb')\n if not bam.has_index():\n sys.exit('The bam has no index file')\n header = bam.header\n if 'HD' in header:\n if not header['HD']['SO'] == 'coordinate':\n sys.exit('The bam file is not coordinate sorted')\n if 'RG' in header:\n if type(header['RG']) is list:\n sample_name = header['RG'][0]['SM']\n else:\n sample_name = header['RG']['SM']\n else:\n sample_name = re.sub('(\\.sorted)?\\.bam$', '', str(NanoSV.opts_bam))\n\n for line in bam:\n if line.query_name in reads:\n read = reads[line.query_name]\n else:\n read = r.Read(line.query_name, line.infer_read_length())\n reads[line.query_name] = read\n\n if line.flag & 4 or line.mapping_quality < NanoSV.opts_min_mapq:\n continue\n segment = s.Segment(segmentID, line.query_name, line.flag, line.reference_name, line.reference_start+1, line.mapping_quality,\n line.query_alignment_length)\n segment.end = line.reference_start + line.reference_length\n if line.has_tag('MD'):\n matches = sum(map(int, re.findall(r\"(\\d+)\", line.get_tag('MD'))))\n segment.pid = format(matches / segment.length, '.3f')\n else:\n segment.pid = format(line.get_cigar_stats()[0][7] / segment.length, '.3f')\n if segment.pid == \"0.000\":\n segment.pid = format(line.get_cigar_stats()[0][0] / segment.length, '.3f')\n if line.flag & 16:\n if line.cigartuples[-1][0] == 5 or line.cigartuples[-1][0] == 4:\n segment.clip = line.cigartuples[-1][1]\n else:\n segment.clip = 0\n if line.cigartuples[0][0] == 5 or line.cigartuples[0][0] == 4:\n segment.clip_2 = line.cigartuples[0][1]\n else:\n segment.clip_2 = 0\n else:\n if line.cigartuples[0][0] == 5 or line.cigartuples[0][0] == 4:\n segment.clip = line.cigartuples[0][1]\n else:\n segment.clip = 0\n if line.cigartuples[-1][0] == 5 or line.cigartuples[-1][0] == 4:\n segment.clip_2 = line.cigartuples[-1][1]\n else:\n segment.clip_2 = 0\n if float(segment.pid) < NanoSV.opts_min_pid:\n continue\n read.addSegment(segment)\n segments[segmentID] = segment\n segmentID += 1", "def parse(self) -> None:\n self._parse_zone_files()\n self._process_rules()\n self._process_zones()\n self._process_links()", "def __parse_docs(self, docs, analyses=True):\n # iter over docs\n for i, doc in enumerate(docs):\n _meta = doc.attrib['title']\n # iter over examples in *doc*\n for snip in doc.getchildren()[1:]:\n _text = str()\n _idx = 0\n _target_idxs = list()\n _ana = list()\n # iter over words in cur example\n for word in snip.getchildren():\n if word.tag == 'text':\n _text += word.text\n _idx += len(word.text)\n \n if len(word.attrib) > 0:\n _text += word.attrib['text']\n # process target\n if word.attrib.get('target') is not None:\n _target_idxs.append((_idx, _idx + len(word.attrib['text'])))\n if analyses:\n _ana.append(self.__get_ana(word))\n \n _idx += len(word.attrib['text'])\n \n if _target_idxs:\n for i, ixs in enumerate(_target_idxs):\n if analyses:\n yield _text, ixs, _meta, _ana[i]\n else:\n yield _text, ixs, _meta, _ana\n else:\n continue", "def miraligner(args):\n hairpin, mirna = _download_mirbase(args)\n precursors = _read_precursor(args.hairpin, args.sps)\n matures = _read_mature(args.mirna, args.sps)\n gtf = _read_gtf(args.gtf)\n out_dts = []\n out_files = []\n for bam_fn in args.files:\n sample = op.splitext(op.basename(bam_fn))[0]\n logger.info(\"Reading %s\" % bam_fn)\n if bam_fn.endswith(\"bam\") or bam_fn.endswith(\"sam\"):\n bam_fn = _sam_to_bam(bam_fn)\n bam_sort_by_n = op.splitext(bam_fn)[0] + \"_sort\"\n pysam.sort(\"-n\", bam_fn, bam_sort_by_n)\n reads = _read_bam(bam_sort_by_n + \".bam\", precursors)\n elif bam_fn.endswith(\"fasta\") or bam_fn.endswith(\"fa\") or \\\n bam_fn.endswith(\"fastq\"):\n if args.collapse:\n bam_fn = _collapse_fastq(bam_fn)\n out_file = op.join(args.out, sample + \".premirna\")\n bam_fn = _filter_seqs(bam_fn)\n if args.miraligner:\n _cmd_miraligner(bam_fn, out_file, args.sps, args.hairpin, args.out)\n reads = _read_miraligner(out_file)\n out_files.append(out_file)\n else:\n raise ValueError(\"Format not recognized.\")\n\n if args.miraligner:\n _mirtop(out_files, args.hairpin, args.gtf, args.sps, args.out)\n\n if not args.miraligner:\n reads = _annotate(reads, matures, precursors)\n\n out_file = op.join(args.out, sample + \".mirna\")\n out_file, dt, dt_pre = _tab_output(reads, out_file, sample)\n try:\n vcf_file = op.join(args.out, sample + \".vcf\")\n if not file_exists(vcf_file):\n # if True:\n create_vcf(dt_pre, matures, gtf, vcf_file)\n try:\n import vcf\n vcf.Reader(filename=vcf_file)\n except Exception as e:\n logger.warning(e.__doc__)\n logger.warning(e)\n except Exception as e:\n # traceback.print_exc()\n logger.warning(e.__doc__)\n logger.warning(e)\n if isinstance(dt, pd.DataFrame):\n out_dts.append(dt)\n\n if out_dts:\n _create_counts(out_dts, args.out)\n else:\n print(\"No files analyzed!\")", "def get_alignments(self) -> list:", "def get_alignment_from(tree):\r\n msa = []\r\n for node in tree.get_terminals():\r\n alignment = self.msa_by_name[node.name.split(' ')[0]]\r\n if msa:\r\n msa.append(alignment)\r\n else:\r\n msa = MultipleSeqAlignment([alignment])\r\n\r\n return msa", "def _read_miraligner(fn):\n reads = defaultdict(realign)\n with open(fn) as in_handle:\n in_handle.readline()\n for line in in_handle:\n cols = line.strip().split(\"\\t\")\n iso = isomir()\n query_name, seq = cols[1], cols[0]\n chrom, reference_start = cols[-2], cols[3]\n iso.mirna = cols[3]\n subs, add, iso.t5, iso.t3 = cols[6:10]\n if query_name not in reads:\n reads[query_name].sequence = seq\n iso.align = line\n iso.start = reference_start\n iso.subs, iso.add = _parse_mut(subs), add\n logger.debug(\"%s %s %s %s %s\" % (query_name, reference_start, chrom, iso.subs, iso.add))\n reads[query_name].set_precursor(chrom, iso)\n return reads", "def filter_reads(alignment_file, readdb, read_dirs, quality_threshold=7, recursive=False, trim=False):\n assert alignment_file.endswith(\"bam\"), \"Alignment file must be in BAM format: {}\".format(alignment_file)\n # grab aligned segment\n if trim:\n assert isinstance(trim, int), \"Trim needs to be an integer: {}\".format(trim)\n else:\n trim = np.inf\n n_bases = 0\n n_files = 0\n with closing(pysam.AlignmentFile(alignment_file, 'rb')) as bamfile:\n name_indexed = pysam.IndexedReads(bamfile)\n name_indexed.build()\n for name, fast5 in parse_read_name_map_file(readdb, read_dirs, recursive=recursive):\n try:\n if trim < n_bases:\n print(\"Filtered {} files for {} bases\".format(n_files, n_bases))\n break\n iterator = name_indexed.find(name)\n for aligned_segment in iterator:\n if aligned_segment.is_secondary or aligned_segment.is_unmapped \\\n or aligned_segment.is_supplementary or aligned_segment.has_tag(\"SA\"):\n continue\n # get data and sanity check\n if aligned_segment.query_qualities is not None:\n if np.mean(aligned_segment.query_qualities) < quality_threshold:\n continue\n n_files += 1\n n_bases += aligned_segment.query_length\n yield fast5, aligned_segment\n except KeyError:\n print(\"Found no alignments for {}\".format(fast5))", "def process(\n self,\n name_and_reads: Tuple[str, List[reads_pb2.Read]],\n ) -> Iterable[Tuple[str, List[reads_pb2.Read]]]:\n name, subreads = name_and_reads\n subreads_copy = copy.deepcopy(subreads)\n pad_reads(subreads_copy)\n yield name, subreads_copy", "def create_read_list(samfile):\n read_sampler = ReadSampler()\n for line in samfile:\n line = sam_utils.SamAlignment(line)\n vals = line.get_aligned_blocks()\n if len(vals) > 1:\n logging.info(\"Skipping gapped read %s %s\"%(line.QNAME, str(vals))) \n read_sampler.add_read(vals[0])\n return read_sampler", "def align_groups(groups_dir, prog, align_dir):\n ra = RunAlign()\n ra.run_for_all_in(groups_dir, prog, align_dir)", "def parse_sam(rows):\n row1, row2 = rows\n mseqs = {}\n failed_list = []\n insert_list = []\n rname = row1['rname']\n qname = row1['qname']\n cigar1 = row1['cigar']\n cigar2 = row2['cigar']\n\n # filtering criteria\n reason = None\n if cigar1 == '*':\n reason = 'R1 unmapped'\n if int(row1['mapq']) < read_mapping_cutoff:\n reason = 'R1 low mapq'\n\n if cigar2 == '*':\n reason = 'R2 unmapped'\n if int(row2['mapq']) < read_mapping_cutoff:\n reason = 'R2 low mapq'\n\n genotype1, genotype2 = None, None\n try:\n genotype1 = row1['rname'].split('-')[1][0]\n genotype2 = row2['rname'].split('-')[1][0]\n except:\n reason = 'discordant map'\n pass\n\n if genotype1 != genotype2:\n reason = 'map conflict'\n\n if reason:\n failed_list.append({'qname': qname,\n 'rname1': row1['rname'],\n 'rname2': row2['rname'],\n 'reason': reason})\n else:\n pos1 = int(row1['pos'])-1 # convert 1-index to 0-index\n _, seq1, qual1, inserts = apply_cigar(cigar1, row1['seq'], row1['qual'])\n \n # report insertions relative to sample consensus\n for left, (iseq, iqual) in inserts.iteritems():\n insert_list.append({'qname': qname,\n 'fwd_rev': 'F' if is_first_read(row1['flag']) else 'R',\n 'refname': rname,\n 'pos': pos1+left,\n 'insert': iseq,\n 'qual': iqual})\n \n seq1 = '-'*pos1 + seq1 # pad sequence on left\n qual1 = '!'*pos1 + qual1 # assign lowest quality to gap prefix so it does not override mate\n \n \n # now process the mate\n pos2 = int(row2['pos'])-1 # convert 1-index to 0-index\n _, seq2, qual2, inserts = apply_cigar(cigar2, row2['seq'], row2['qual'])\n for left, (iseq, iqual) in inserts.iteritems():\n insert_list.append({'qname': qname,\n 'fwd_rev': 'F' if is_first_read(row2['flag']) else 'R',\n 'refname': rname,\n 'pos': pos2+left,\n 'insert': iseq,\n 'qual': iqual})\n seq2 = '-'*pos2 + seq2\n qual2 = '!'*pos2 + qual2\n \n # merge reads\n for qcut in sam2aln_q_cutoffs:\n mseq = merge_pairs(seq1, seq2, qual1, qual2, qcut)\n prop_N = mseq.count('N') / float(len(mseq.strip('-')))\n if prop_N > max_prop_N:\n # fail read pair\n failed_list.append({'qname': qname,\n 'reason': 'merge failure'})\n continue\n mseqs[qcut] = mseq\n\n return rname, mseqs, insert_list, failed_list", "def _generic_alignment(cline, seqrecs, preserve_order=True, **kwargs):\n # convert iterator to list, so that we can extract keys and still run the alignment\n unaligned = list(seqrecs)\n # if alignment sequences from NCBI Blast, id will include spaces\n keys = [seqrec.id.split()[0] for seqrec in unaligned]\n # execute alignment\n aligned = _generic_aligner_commandline_file(cline, unaligned, **kwargs)\n if preserve_order:\n aligned = SeqIO.to_dict(aligned)\n aligned = MultipleSeqAlignment(aligned[key] for key in keys)\n # make all alignment uppercase\n return MultipleSeqAlignment([seqrec.upper() for seqrec in aligned])", "def align_reads(read_fp, # FASTQ file path\n db_fp, # Local path to DB\n temp_folder, # Folder for results\n query_gencode=11, # Genetic code\n threads=1, # Threads\n min_score=20, # Minimum alignment score\n blocks=4, # Memory block size\n top=10, # Report alignments >10% from max\n min_id=80, # Minimum alignment identity\n qcov=95): # Minimum query coverage\n\n align_fp = \"{}.aln\".format(read_fp)\n logging.info(\"Input reads: {}\".format(read_fp))\n logging.info(\"Reference database: {}\".format(db_fp))\n logging.info(\"Genetic code: {}\".format(query_gencode))\n logging.info(\"Threads: {}\".format(threads))\n logging.info(\"Output: {}\".format(align_fp))\n\n run_cmds([\n \"diamond\",\n \"blastx\",\n \"--query\", read_fp, # Input FASTQ\n \"--out\", align_fp, # Alignment file\n \"--threads\", str(threads), # Threads\n \"--db\", db_fp, # Reference database\n \"--outfmt\", \"6\", # Output format\n \"qseqid\", \"sseqid\",\n \"pident\", \"length\",\n \"mismatch\", \"gapopen\",\n \"qstart\", \"qend\",\n \"sstart\", \"send\",\n \"evalue\", \"bitscore\",\n \"qlen\", \"slen\",\n \"--min-score\", str(min_score), # Minimum alignment score\n \"--query-cover\", str(qcov), # Minimum query coverage\n \"--id\", str(min_id), # Minimum alignment identity\n \"--top\", str(top), # Report alignments >10% from max\n \"--block-size\", str(blocks), # Memory block size\n \"--query-gencode\", # Genetic code\n str(query_gencode),\n \"--unal\", \"0\", # Don't report unaligned reads\n ])\n\n return align_fp", "def process(\n self,\n name_and_reads: Tuple[str, List[reads_pb2.Read]],\n ) -> Iterable[deepconsensus_pb2.DeepConsensusInput]:\n\n # Format of aligned_subreads_and_label is [([subreads], [label])].\n molecule_name, subreads = name_and_reads\n if not subreads:\n self.no_subreads_counter.inc()\n return\n subread_protos = get_subread_protos(subreads, molecule_name)\n sn = struct_utils.get_number_field(subreads[0].info, 'sn')\n self.deepconsensus_input_counter.inc()\n yield deepconsensus_pb2.DeepConsensusInput(\n subreads=subread_protos,\n molecule_name=molecule_name,\n molecule_start=0,\n sn=sn,\n )", "def process_all():\n\tfiles = os.listdir('records')\n\tfiles = [file for file in files if file not in ('.DS_Store','old')]\n\tattr_list = []\n\tcorpus = []\n\tsentences = []\n\tcorp_set = set()\n\tfor file in files:\n\t\twith open('records/'+file) as f:\n\t\t\tattr_list, corpus, sentences = proc_file(f,file,corpus,attr_list,corp_set,sentences)\n\treturn attr_list,corpus,sentences", "def parse(self):\n count = [] #count for trainset_size\n with open(self.file) as f:\n for line in f:\n data = line.split(\" \")[0]\n filename = data[:-1]\n id = data[-1:]\n if (filename not in count):\n count.append(filename)\n\n acid = \"\"\n structure = \"\"\n with open(self.directory+\"/\"+filename+\".dssp\") as dssp:\n for i in range(28): #skip lines we don't need\n next(dssp)\n for line in dssp:\n if (line[9] != \" \" and line[10] == \" \" and line[11] == id and line[13] not in (\"*\",\"!\",\"B\",\"Z\",\"X\")):\n #amino acid sequence\n if (line[13].islower()):\n acid += \"C\"\n else:\n acid += line[13]\n\n #sequence of the structure\n if (line[16] in (\"H\",\"G\",\"I\")):\n structure += \"H\"\n elif (line[16] in (\"E\",\"B\")):\n structure += \"E\"\n else:\n structure += \"C\"\n\n if (len(count) > self.trainset_size):\n self.testset.append((acid,structure))\n else:\n self.trainset.append((acid,structure))", "def get_alignments(N):\n\n res = [[] for _ in range(N)] # 0-29\n with open(alignment_path, 'r') as f:\n sent_num = 0\n for line in f:\n tokens = line.strip().split()\n if not tokens: continue\n if tokens[0] == 'SENT:':\n # 0 based indexing\n sent_num = int(tokens[1])-1\n else:\n # append (e, f) alignment pair\n res[sent_num].append((int(tokens[2]), int(tokens[1])))\n return res", "def read_alignment(file):\n alignments = list()\n with open(file, 'r') as f:\n for line in f:\n line_lst = line.strip().split()\n align_lst = list()\n for pair in line_lst:\n src_idx, tgt_idx = pair.split('-')\n align_lst.append((int(src_idx),int(tgt_idx)))\n # print(align_lst)\n alignments.append(align_lst)\n return alignments", "def parseIntoDB(self, filehandle, cursor, alignTab, sequenceTab=None,\n update=None):\n c = filehandle.tell()\n filehandle.seek(0, 2)\n filesize = filehandle.tell()\n filehandle.seek(c)\n l = filehandle.readline()\n rc = 0\n count = 0\n if l.split()[0] != '##maf':\n return\n else:\n self.setpar(l.split()[1:])\n\n l=filehandle.readline()\n while l:\n la = l.split()\n## print la\n if(len(la)==0 or la[0]=='#'):\n## print \"skipping\"\n 1\n elif(la[0]=='a'):\n## print \"reading alignment\"\n count+=1\n self.readalign(la[1:], filehandle)\n self._dump(alignTab, sequenceTab)\n if(update and not count % 1000):\n cursor.execute(update % (int(filehandle.tell() * 100.\n / filesize)))\n else:\n## print \"end of records\"\n return\n l=filehandle.readline()", "def test_sam_parser_success(self):\n # self.cleanup = False\n\n sam_file = '%s/../human_100.sam' % GOLDEN_DIR\n count = 0\n\n with open(sam_file, 'r') as sam:\n for line in sam:\n # Skip tags\n if line[0] == '@':\n continue\n\n alignment = parse_sam_line(line)\n\n # Verify that the type conversions are all correct\n types = {}\n for entry in sam_format():\n types[entry['name']] = entry['type']\n\n for field in alignment:\n self.assertIs(type(alignment[field]), types[field])\n\n count = count + 1\n\n self.assertEqual(count, 100)", "def alignment_org(angle=0.1):\n proposal_id('2023_2', '311564_test')\n yield from alignement_gisaxs_multisample(angle=angle)\n RE.md['ai_0'] = piezo.th.user_setpoint.get()\n proposal_id('2023_2', '311564_Pettersson')", "def _assignAlignment(self, aln):\n self.sequence = None\n for i in range(self.nChildren()):\n self.children[i]._assignAlignment(aln)\n for seq in aln.seqs:\n if seq.name == self.label:\n self.sequence = seq\n break", "def parse_sam(self, sam_handle, append_chr=False):\n vargroup_reads = np.asarray([read\n for read in sam_handle.fetch(self.get_chr(append_chr), self.pos, self.end)\n if not read.is_duplicate])\n\n # Convert some key read information into a dataframe to speed up filtering\n read_df = pd.DataFrame(columns=['rn', 'start', 'end', 'read', 'indels'],\n data=[(rn, read.reference_start, read.aend, read, get_indel_from_cigar(read.cigar))\n for rn, read in enumerate(vargroup_reads)])\n\n reads_coverage = np.zeros((len(vargroup_reads), len(self.variant_list)))\n reads_existence = np.zeros((len(vargroup_reads), len(self.variant_list)))\n\n if len(vargroup_reads) == 0:\n print('Warning: No reads found at {}:{}-{}'.format(self.chrom, self.pos, self.end))\n return self._build_existence_matrix(reads_existence, reads_coverage)\n\n # pylint: disable=invalid-name\n for vn, variant in enumerate(self.variant_list):\n # Cache variant properties: those lookups are expensive in PySam\n var_type = variant.var_type\n is_indel = variant.is_indel\n is_deletion = variant.is_deletion\n\n read_overlap_mask = (read_df['start'] <= variant.POS) & (read_df['end'] >= variant.POS)\n\n # Coverage is easy: all reads which overlap this variant get a coverage of 1\n reads_coverage[read_overlap_mask, vn] = 1\n\n # SNPs\n if var_type == 'snp':\n # for rn, read, indels in itertools.izip(read_df[read_overlap_mask]['rn'], # python2\n for rn, read, indels in zip(read_df[read_overlap_mask]['rn'], read_df[read_overlap_mask]['read'],\n read_df[read_overlap_mask]['indels']):\n # get start position using the cigar string to find the offset\n variant_start = self._get_start(variant, read.reference_start, read.cigar, ignore_softclip=True)\n # If the base matches the alternate read add it to the existence array\n read_alt = read.query[variant_start: variant.end - variant.POS + variant_start + 1]\n if read_alt == variant.ALT[0].sequence:\n reads_existence[rn, vn] = 1\n\n # Insertions/Deletions\n elif is_indel:\n # for rn, read, indels in itertools.izip(read_df[read_overlap_mask]['rn'], # python2\n for rn, read, indels in zip(read_df[read_overlap_mask]['rn'], read_df[read_overlap_mask]['read'],\n read_df[read_overlap_mask]['indels']):\n iloc = self._get_indel_pos(variant.POS, read)\n # If the insertion/deletion exist in the cigar string add it to the existence array\n if is_deletion and iloc in indels and indels[iloc][0] == 'D': # Deletions\n reads_existence[rn, vn] = 1\n elif not is_deletion and iloc in indels and indels[iloc][0] == 'I': # Insertions\n if variant.ALT[0] == read.seq[iloc:iloc + 1 + indels[iloc][1]]:\n reads_existence[rn, vn] = 1\n else:\n print('Warning: Unknown type found: {}'.format(variant.var_type))\n\n return self._build_existence_matrix(reads_existence, reads_coverage)", "def _generate_read_alignment_stats(\n self,\n lib_names,\n result_bam_paths,\n unaligned_reads_paths,\n output_stats_path,\n paired_end=False,\n fragments=False,\n ):\n raw_stat_data_writer = RawStatDataWriter(pretty=True)\n references_by_species = self._get_references_by_species()\n read_files_and_jobs = {}\n if not self._file_needs_to_be_created(output_stats_path):\n return\n with concurrent.futures.ProcessPoolExecutor(\n max_workers=self._args.processes\n ) as executor:\n for (\n lib_name,\n read_alignment_bam_path,\n unaligned_reads_path,\n ) in zip(lib_names, result_bam_paths, unaligned_reads_paths):\n read_aligner_stats = ReadAlignerStats(\n references_by_species, paired_end, fragments\n )\n read_files_and_jobs[lib_name] = executor.submit( # run jobs parallel\n read_aligner_stats.count,\n read_alignment_bam_path,\n unaligned_reads_path,\n )\n # Evaluate thread outcome\n self._check_job_completeness(read_files_and_jobs.values())\n read_files_and_stats = dict(\n [\n (lib_name, job.result())\n for lib_name, job in read_files_and_jobs.items()\n ]\n )\n raw_stat_data_writer.write(read_files_and_stats, output_stats_path)", "def run_parse(self):\n # Data set already has source file names from load_inputs\n parsedset = {}\n parsedset['data_set'] = []\n for log in self.input_files:\n parsemodule = self.parse_modules[self.args.parser]\n try:\n if self.args.tzone:\n parsemodule.tzone = self.args.tzone\n except NameError: pass\n parsedset['data_set'].append(parsemodule.parse_file(log))\n self.data_set = parsedset\n del(parsedset)", "def map_reads(SRA):\n\n #1. bowtie to rRNA\n print(\"Bowtie alignement on contaminant RNA...\")\n cmd_bowtie = 'bowtie'+ ' ' + '-a' + ' ' + '-p6' + ' ' + '-S' + ' ' + '--un' + ' ' + TMP_DIR+SRA+'_rrnaUnmapped.fastq' + ' ' + BOWTIE_DIR+'/rRNA' + ' ' + TMP_DIR+SRA+'_trimmed.fastq' + ' ' + '|' + ' ' + 'samtools view -@ 6 -bS' + ' ' + '>' + TMP_DIR+SRA+'_trimmed_rrna.bam'\n output = subprocess.run(cmd_bowtie, shell=True)\n\n # 2. STAR to ref genome\n print(\"STAR alignement to yeast genome...\")\n cmd_STAR = 'STAR --outSAMtype BAM Unsorted --runThreadN 6 --winAnchorMultimapNmax 200 --seedSearchStartLmax 15 --genomeDir' + ' ' + STAR_DIR + ' ' + '--readFilesIn' + ' ' + TMP_DIR+SRA+'_rrnaUnmapped.fastq' + ' ' + '--outFileNamePrefix' + ' ' + TMP_DIR+SRA+'_STAR_'\n output = subprocess.run(cmd_STAR, shell=True)\n\n # 3. Samtools keep uniquely mapped reads and sort\n print(\"Samtools to keep uniquely mapped reads and sort...\")\n cmd_samtools1 = 'samtools view -@ 6 -b -q 255 -o' + ' ' + TMP_DIR+SRA+'_yeast_uniqueMapped_reads.bam' + ' ' + TMP_DIR+SRA+'_STAR_Aligned.out.bam'\n output = subprocess.run(cmd_samtools1, shell=True)\n\n cmd_samtools2 = 'samtools sort -@ 6 -o' + ' ' + TMP_DIR+SRA+'_yeast_uniqueMapped_reads_sorted.bam' + ' ' + TMP_DIR+SRA+'_yeast_uniqueMapped_reads.bam'\n output = subprocess.run(cmd_samtools2, shell=True)\n\n cmd_samtools3 = 'samtools index' + ' ' + TMP_DIR+SRA+'_yeast_uniqueMapped_reads_sorted.bam'\n output = subprocess.run(cmd_samtools3, shell=True)", "def parse(self, paired=False, shift=100):\n for read in self.handle:\n if read.is_unmapped or read.is_qcfail or read.is_secondary \\\n or read.is_supplementary:\n continue\n if paired:\n if not read.is_paired:\n logger.debug(\n f\"Skipped single-end read: {read.to_string()!r}\")\n continue\n if read.is_read1 and read.is_proper_pair \\\n and not read.mate_is_unmapped:\n chrom1 = read.reference_name\n start1 = read.reference_start\n end1 = read.reference_end\n chrom2 = read.next_reference_name\n if read.is_reverse:\n start = end1 + read.template_length\n end = end1\n else:\n start = start1\n end = start1 + read.template_length\n if read.template_length == 0:\n logger.debug(\n f\"Detected read with TLEN=0: {read.to_string()!r}\")\n if chrom1 == chrom2:\n yield chrom1, (start + end) // 2\n else:\n continue\n else:\n continue\n else:\n if read.is_paired:\n logger.debug(\n f\"Skipped paired-end read: {read.to_string()!r}\")\n continue\n if read.is_unmapped:\n continue\n else:\n chrom = read.reference_name\n start = read.reference_start\n end = read.reference_end\n if read.is_reverse:\n pos = end - shift\n else:\n pos = start + shift\n yield chrom, pos\n self.handle.close()", "def find_read_candidates(self, read):\n self.read_allele_dictionary = {}\n ref_alignment_start = read.reference_start\n ref_alignment_stop = self.get_read_stop_position(read)\n # if the region has reached a very high coverage, we are not going to parse through all the reads\n if self.coverage[ref_alignment_start] > 300:\n return False\n cigar_tuples = read.cigartuples\n read_sequence = read.query_sequence\n read_id = read.query_name\n read_quality = read.query_qualities\n ref_sequence = self.fasta_handler.get_sequence(chromosome_name=self.chromosome_name,\n start=ref_alignment_start,\n stop=ref_alignment_stop+10)\n\n self.read_info[read_id] = (ref_alignment_start, ref_alignment_stop, read.mapping_quality, read.is_reverse)\n for pos in range(ref_alignment_start, ref_alignment_stop):\n self.read_id_by_position[pos].append((read_id, ref_alignment_start, ref_alignment_stop))\n for i, ref_base in enumerate(ref_sequence):\n self.reference_dictionary[ref_alignment_start + i] = ref_base\n\n # read_index: index of read sequence\n # ref_index: index of reference sequence\n read_index = 0\n ref_index = 0\n found_valid_cigar = False\n for cigar in cigar_tuples:\n cigar_code = cigar[0]\n length = cigar[1]\n # get the sequence segments that are effected by this operation\n ref_sequence_segment = ref_sequence[ref_index:ref_index+length]\n read_quality_segment = read_quality[read_index:read_index+length]\n read_sequence_segment = read_sequence[read_index:read_index+length]\n\n if cigar_code != 0 and found_valid_cigar is False:\n read_index += length\n continue\n found_valid_cigar = True\n\n # send the cigar tuple to get attributes we got by this operation\n ref_index_increment, read_index_increment = \\\n self.parse_cigar_tuple(cigar_code=cigar_code,\n length=length,\n alignment_position=ref_alignment_start+ref_index,\n ref_sequence=ref_sequence_segment,\n read_sequence=read_sequence_segment,\n read_id=read_id,\n quality=read_quality_segment)\n\n # increase the read index iterator\n read_index += read_index_increment\n ref_index += ref_index_increment\n\n # after collecting all alleles from reads, update the global dictionary\n for position in self.read_allele_dictionary.keys():\n if position < self.region_start_position or position > self.region_end_position:\n continue\n self.rms_mq[position] += read.mapping_quality * read.mapping_quality\n for record in self.read_allele_dictionary[position]:\n # there can be only one record per position in a read\n allele, allele_type = record\n\n if allele_type == MATCH_ALLELE or allele_type == MISMATCH_ALLELE:\n # If next allele is indel then group it with the current one, don't make a separate one\n if position + 1 <= ref_alignment_stop and position + 1 in self.read_allele_dictionary.keys():\n next_allele, next_allele_type = list(self.read_allele_dictionary[position + 1].keys())[0]\n if next_allele_type == INSERT_ALLELE or next_allele_type == DELETE_ALLELE:\n continue\n self.positional_read_info[position].append(\n (read_id, ref_alignment_start, ref_alignment_stop, read.mapping_quality))\n self._update_positional_allele_dictionary(read_id, position, allele, allele_type,\n read.mapping_quality)\n else:\n # it's an insert or delete, so, add to the previous position\n self.positional_read_info[position-1].append(\n (read_id, ref_alignment_start, ref_alignment_stop, read.mapping_quality))\n self._update_positional_allele_dictionary(read_id, position-1, allele, allele_type,\n read.mapping_quality)\n return True", "def _read_bam(bam_fn, precursors):\n mode = \"r\" if bam_fn.endswith(\"sam\") else \"rb\"\n handle = pysam.Samfile(bam_fn, mode)\n reads = defaultdict(realign)\n for line in handle:\n chrom = handle.getrname(line.reference_id)\n # print(\"%s %s %s %s\" % (line.query_name, line.reference_start, line.query_sequence, chrom))\n query_name = line.query_name\n if query_name not in reads:\n reads[query_name].sequence = line.query_sequence\n iso = isomir()\n iso.align = line\n iso.start = line.reference_start\n iso.subs, iso.add = _realign(reads[query_name].sequence, precursors[chrom], line.reference_start)\n reads[query_name].set_precursor(chrom, iso)\n\n reads = _clean_hits(reads)\n return reads", "def align_one_reads_to_assembly(self, ctx):\n # ctx is the context object\n #BEGIN align_one_reads_to_assembly\n #END align_one_reads_to_assembly\n pass", "def _parse_3files_parallel(cls, file1_fastx, file2_sam, file3_sam):\n generator1 = name_seq_generator_from_fasta_fastq(file1_fastx)\n generator2 = iter(HTSeq.SAM_Reader(file2_sam))\n generator3 = iter(HTSeq.SAM_Reader(file3_sam))\n if_finished_1, if_finished_2, if_finished_3 = False, False, False\n while True:\n try: name1, seq1 = generator1.next()\n except StopIteration: if_finished_1, name1 = True, 'NOTHING_HERE'\n name1 = name1.split()[0]\n try: aln2 = cls._next_until_name_match(generator2, name1)\n except StopIteration: if_finished_2 = True\n try: aln3 = cls._next_until_name_match(generator3, name1)\n except StopIteration: if_finished_3 = True\n # if all the files still contained data, yield it\n if not any([if_finished_1, if_finished_2, if_finished_3]):\n yield (name1, seq1, aln2, aln3)\n # if file1 was finished, we're done - it's okay if the other files had some extra reads\n elif if_finished_1:\n raise StopIteration\n # if file1 WASN'T finished but one of the others was, that's a problem!\n else:\n raise MutantError(\"Parsing seq/aln files in parallel - inconsistent finished states! \"\n +\"(If finished: %s %s, %s %s, %s %s)\"%(file1_fastx, if_finished_1, \n file2_sam, if_finished_2, file3_sam, if_finished_3))\n # TODO unit-tests! There are some in experiments/arrayed_library/internal_barcode_processing/code/clustering_tools.py for a similar function - test__parse_3seq_parallel", "def read_multiple_alignments(tree, directory_path, coordinates):\n multiple_alignment_dict = {}\n for coord in coordinates:\n try:\n handle = open(os.path.join(directory_path, coord[0]), 'rb')\n multiple_alignment_dict[coord[0]] = cpickle.load(handle)\n except Exception, e:\n syserr(\"No alignment for %s, going on without it.\\n\" % coord[0])\n syserr(str(e) + \"\\n\")\n\n return multiple_alignment_dict", "def process_read(self, ref, read, ref_offset=0):\n\n if read.alignment.mapping_quality < self.config.min_mapq:\n return\n\n ref_pos = read.alignment.position.position - ref_offset\n read_pos = 0\n # Use set(), as some cigar operations might generate duplicated positions,\n # E.g. for insertions, it extends the candidate positions to\n # [ins_pos - ins_len, ins_pos + ins_len] which might overlap with some\n # nearby mismatches.\n positions = set()\n for cigar in read.alignment.cigar:\n # Break if it reached the end of reference sequence.\n if ref_pos >= len(ref):\n break\n if cigar.operation not in utils.CIGAR_OPS:\n raise ValueError('Unexpected CIGAR operation', cigar, read)\n\n if cigar.operation == cigar_pb2.CigarUnit.ALIGNMENT_MATCH:\n positions.update(\n self._process_align_match(cigar, ref, read, ref_pos, read_pos))\n read_pos += cigar.operation_length\n ref_pos += cigar.operation_length\n elif cigar.operation == cigar_pb2.CigarUnit.SEQUENCE_MISMATCH:\n positions.update(\n self._process_seq_mismatch(cigar, ref, read, ref_pos, read_pos))\n read_pos += cigar.operation_length\n ref_pos += cigar.operation_length\n elif cigar.operation == cigar_pb2.CigarUnit.INSERT:\n positions.update(\n self._process_insert(cigar, ref, read, ref_pos, read_pos))\n read_pos += cigar.operation_length\n elif cigar.operation == cigar_pb2.CigarUnit.CLIP_SOFT:\n positions.update(\n self._process_soft_clip(cigar, ref, read, ref_pos, read_pos))\n read_pos += cigar.operation_length\n elif (cigar.operation == cigar_pb2.CigarUnit.DELETE or\n cigar.operation == cigar_pb2.CigarUnit.SKIP):\n positions.update(\n self._process_delete(cigar, ref, read, ref_pos, read_pos))\n ref_pos += cigar.operation_length\n elif cigar.operation == cigar_pb2.CigarUnit.SEQUENCE_MATCH:\n ref_pos += cigar.operation_length\n read_pos += cigar.operation_length\n elif (cigar.operation == cigar_pb2.CigarUnit.CLIP_HARD or\n cigar.operation == cigar_pb2.CigarUnit.PAD):\n pass\n\n # Yield positions within the range\n for pos in sorted(positions):\n if pos >= 0 and pos < len(ref):\n yield pos", "def star(self) -> None:\n self.analysis.logger.info(\"Running alignment with STAR\")\n config = self.analysis.config\n executor = Executor(self.analysis)\n\n bam_directory = self.analysis.get_bam_dir()\n\n def create_dirs(*args: str, **kwargs: str) -> None:\n output_path = os.path.join(bam_directory, kwargs[\"output_filename\"])\n os.makedirs(output_path, exist_ok=True)\n os.makedirs(os.path.join(output_path, \"index\"), exist_ok=True)\n\n output_format = f\"{self.analysis.basename}{{organism_str}}\"\n star_dir_format = os.path.join(bam_directory, output_format + \"_star\")\n star_index_dir_format = os.path.join(star_dir_format, \"index\")\n executor(\n create_dirs,\n input_function=lambda l: \" \".join(sorted(l)),\n output_format=star_dir_format,\n input_split_reads=False,\n override_last_files=False,\n split_by_organism=True,\n only_human=self.only_human,\n )\n star_index_format = \"{getattr(config, 'star_index_' + organism)}\"\n features_format = \"{getattr(config, 'features_' + organism)}\"\n\n self.analysis.logger.info(\"Step 1: Alignment 1st Pass:\")\n\n executor(\n f\"{config.star} --genomeDir {star_index_format} \"\n f\"--readFilesIn {{input_filename}} \"\n f\"--outFileNamePrefix {star_dir_format}/{output_format}. \"\n f\"--runThreadN 5 \"\n f\"--outFilterMultimapScoreRange 1 --outFilterMultimapNmax 20 \"\n f\"--outFilterMismatchNmax 10 --alignIntronMax 500000 \"\n f\"--alignMatesGapMax 1000000 --sjdbScore 2 \"\n f\"--alignSJDBoverhangMin 1 --genomeLoad NoSharedMemory \"\n f\"--outFilterMatchNminOverLread 0.33 \"\n f\"--outFilterScoreMinOverLread 0.33 \"\n f\"--sjdbOverhang 100 --outSAMstrandField intronMotif \"\n f\"--outSAMtype None --outSAMmode None\",\n input_function=lambda l: \" \".join(sorted(l)),\n input_split_reads=False,\n override_last_files=False,\n split_by_organism=True,\n only_human=self.only_human,\n )\n self.analysis.logger.info(\"Finished step 1\")\n\n self.analysis.logger.info(\"Step 2: Intermediate Index Generation:\")\n\n executor(\n f\"{config.star} \"\n f\"--runMode genomeGenerate \"\n f\"--genomeDir {star_index_dir_format} \"\n f\"--genomeFastaFiles {{genome_ref}} \"\n f\"--sjdbOverhang 100 \"\n f\"--runThreadN 5 \"\n f\"--sjdbFileChrStartEnd {star_dir_format}/\"\n f\"{output_format}.SJ.out.tab\",\n input_function=lambda l: \" \".join(sorted(l)),\n input_split_reads=False,\n override_last_files=False,\n split_by_organism=True,\n only_human=self.only_human,\n )\n self.analysis.logger.info(\"Finished step 2\")\n self.analysis.logger.info(\"Step 3: Alignment 2nd Pass\")\n\n executor(\n f\"{config.star} \"\n f\"--genomeDir {star_index_dir_format} \"\n f\"--readFilesIn {{input_filename}} \"\n f\"--outFileNamePrefix {star_dir_format}/{output_format}. \"\n f\"--runThreadN 5 \"\n f\"--outFilterMultimapScoreRange 1 \"\n f\"--outFilterMultimapNmax 20 \"\n f\"--outFilterMismatchNmax 10 \"\n f\"--alignIntronMax 500000 \"\n f\"--alignMatesGapMax 1000000 \"\n f\"--sjdbScore 2 \"\n f\"--alignSJDBoverhangMin 1 \"\n f\"--genomeLoad NoSharedMemory \"\n f\"--limitBAMsortRAM 0 \"\n f\"--outSAMattrRGline ID:{self.analysis.basename}\\t\"\n f\"SM:{self.analysis.sample}\\tLB:lib1\\tPL:ILLUMINA \"\n f\"--outFilterMatchNminOverLread 0.33 \"\n f\"--outFilterScoreMinOverLread 0.33 \"\n f\"--sjdbOverhang 100 \"\n f\"--outSAMstrandField intronMotif \"\n f\"--outSAMattributes NH HI NM MD AS XS \"\n f\"--outSAMunmapped Within\",\n input_function=lambda l: \" \".join(sorted(l)),\n input_split_reads=False,\n output_format=f\"{star_dir_format}/{output_format}.Aligned.out.sam\",\n split_by_organism=True,\n only_human=self.only_human,\n )\n\n self.analysis.logger.info(\"Finished step 3\")\n\n executor(\n lambda *args, **kwargs: os.rename(\n str(kwargs[\"input_filename\"]), str(kwargs[\"output_filename\"])\n ),\n split_by_organism=True,\n output_format=lambda *args, **kwargs: kwargs[\n \"input_filename\"\n ].filename.replace(\".Aligned.out\", \"\"),\n only_human=self.only_human,\n )\n\n executor(\n lambda *args, **kwargs: os.rename(\n str(kwargs[\"input_filename\"]), str(kwargs[\"output_filename\"])\n ),\n split_by_organism=True,\n output_format=os.path.join(bam_directory, f\"{output_format}.sam\"),\n only_human=self.only_human,\n )\n\n self.analysis.logger.info(\"Step 4: get HTseq count\")\n\n counts_format = os.path.join(bam_directory, f\"{output_format}_counts.txt\")\n executor(\n f\"{config.samtools} view -F 4 {{input_filename}} |\"\n f\"htseq-count \"\n f\"-m intersection-nonempty \"\n f\"-i gene_id \"\n f\"-r pos \"\n f\"-s no \"\n f\"- {features_format} \"\n f\"> {counts_format}\",\n override_last_files=False,\n split_by_organism=True,\n only_human=self.only_human,\n )\n self.analysis.logger.info(\"Finished HTseq count\")\n self.analysis.logger.info(\"Alignment finished. Aligner used: STAR\")", "def process(self,\n read: reads_pb2.Read) -> Iterable[Tuple[str, reads_pb2.Read]]:\n pacbio_molecule_name = preprocess_utils.get_pacbio_molecule_name(\n read.fragment_name)\n if pacbio_molecule_name is not None:\n yield pacbio_molecule_name, read\n else:\n raise ValueError(str(read))", "def mergeChainedAlignedSegments(chainedAlignedSegments, refSequence, readSequence):\n cAR = pysam.AlignedSegment()\n aR = chainedAlignedSegments[0]\n cAR.query_name = aR.query_name\n \n #Parameters we don't and therefore set properly\n #cAR.flag = aR.flag\n #cAR.mapq = aR.mapq\n #cAR.mrnm = 0\n #cAR.mpos=0\n #cAR.isize=0\n #cAR.qual = \"<\" * len(readSequence)\n #cAR.tags = aR.tags \n cAR.next_reference_id = -1\n cAR.reference_start = aR.reference_start #Reference start\n cAR.is_reverse = aR.is_reverse\n cAR.query_sequence = reverseComplement(readSequence) if cAR.is_reverse else readSequence\n cAR.reference_id = aR.reference_id\n cigarList = []\n pPos = aR.reference_start\n #Iterate from the other end of the sequence if reversed\n pQPos = -(len(readSequence)-1) if cAR.is_reverse else 0 \n \n for aR in chainedAlignedSegments:\n assert cAR.is_reverse == aR.is_reverse\n #Add a deletion representing the preceding unaligned reference positions\n assert aR.reference_start >= pPos\n if aR.reference_start > pPos:\n cigarList.append((2, aR.reference_start - pPos))\n pPos = aR.reference_start \n \n #Add an insertion representing the preceding unaligned read positions\n #make it a soft clip if it is the first chained alignment\n qPos = getFirstNonClippedPositionInRead(aR, readSequence)\n assert qPos >= pQPos\n if qPos > pQPos:\n cigarList.append((4 if aR == chainedAlignedSegments[0] else 1, qPos - pQPos)) \n pQPos = qPos\n \n #Add the operations of the cigar, filtering hard and soft clipping\n for op, length in aR.cigar:\n assert op in (0, 1, 2, 4, 5)\n if op in (0, 1, 2):\n cigarList.append((op, length))\n if op in (0, 2): #Is match or deletion\n pPos += length\n if op in (0, 1): #Is match or insertion\n pQPos += length\n \n assert pPos <= len(refSequence)\n \n #Set reference end coordinate (which is exclusive)\n #cAR.reference_end = pPos #We don't do this because it is set by cigar string\n \n #Now add any trailing, necessary soft clipping\n if cAR.is_reverse:\n assert pQPos <= 1\n if pQPos < 1:\n cigarList.append((4, -pQPos + 1))\n else:\n assert pQPos <= len(readSequence)\n if pQPos < len(readSequence):\n cigarList.append((4, len(readSequence) - pQPos))\n \n cAR.cigar = tuple(cigarList)\n \n #Check ops\n for op, length in cAR.cigar: #We should have no hard clipped ops\n assert op in (0, 1, 2, 4)\n \n #Reference sequence check coordinates\n assert sum([ length for op, length in cigarList if op in (0, 2)]) == cAR.reference_end - cAR.reference_start\n assert cAR.reference_start >= 0 and cAR.reference_start < len(refSequence)\n assert cAR.reference_end >= 0 and cAR.reference_end <= len(refSequence)\n \n #Read sequence check coordinates\n assert cAR.query_alignment_start >= 0 and cAR.query_alignment_start < len(readSequence)\n assert cAR.query_alignment_end >= 0 and cAR.query_alignment_end <= len(readSequence)\n assert cAR.query_alignment_start + sum([ length for op, length in cigarList if op in (0, 1)]) == cAR.query_alignment_end\n \n return cAR", "def test_parser(self):\n parser = hhsuite.FastaParser()\n results = parser.run(self.pipeline)\n self.assertEqual(\n results[\"templates\"][0][\"sequence_alignments\"], {\n \"sequence\": \"---A-A-----\",\n \"query\": \"XXAB-CDEFXX\"\n })\n\n self.assertEqual(\n results[\"templates\"][1][\"sequence_alignments\"], {\n \"foo\": \"---A------\",\n \"sequence\": \"--GG------\",\n \"query\": \"XXABCDEFXX\"\n })", "def parse_metadata(self):\n import csv\n f = open(self.seq_id_list)\n self.names = f.readlines()\n f.close()\n num_samples = len(self.names)\n for i in range(len(self.names)):\n self.names[i] = self.names[i].replace(\"\\n\", \"\")\n # Go through the combined metadata file - it has most of the data we need.\n metadata = csv.DictReader(open(self.nasmnt + \"WGSspades/reports/combinedMetadata.csv\"))\n metadata_count = 0\n for row in metadata:\n # There has to be a more elegant way to do this.\n if row[\"SampleName\"] in self.names:\n data = dict()\n data[\"Investigator\"] = row[\"Investigator\"]\n data[\"Coverage\"] = row[\"AverageCoverageDepth\"]\n data[\"TotalLength\"] = row[\"TotalLength\"]\n data[\"rST\"] = row[\"rMLSTsequenceType\"]\n data[\"PipelineVersion\"] = row[\"PipelineVersion\"]\n data[\"MLST\"] = row[\"MLSTsequencetype\"]\n data[\"geneSeekr\"] = row[\"geneSeekrProfile\"].split(\";\")\n self.metadata[row[\"SampleName\"]] = data\n metadata_count += 1\n # Need to look in external WGS spades as well.\n metadata = csv.DictReader(open(self.nasmnt + \"External_WGSspades/reports/combinedMetadata.csv\"))\n for row in metadata:\n # There has to be a more elegant way to do this.\n if row[\"SampleName\"] in self.names:\n data = dict()\n data[\"Investigator\"] = row[\"Investigator\"]\n data[\"Coverage\"] = row[\"AverageCoverageDepth\"]\n data[\"TotalLength\"] = row[\"TotalLength\"]\n data[\"rST\"] = row[\"rMLSTsequenceType\"]\n data[\"PipelineVersion\"] = row[\"PipelineVersion\"]\n data[\"MLST\"] = row[\"MLSTsequencetype\"]\n data[\"geneSeekr\"] = row[\"geneSeekrProfile\"].split(\";\")\n self.metadata[row[\"SampleName\"]] = data\n metadata_count += 1\n\n\n\n # Also need to go through the rMLST file to make sure that all rMLST genes are covered.\n rMLST_data = csv.DictReader(open(self.nasmnt + \"WGSspades/reports/rmlst.csv\"))\n metadata_count = 0\n for row in rMLST_data:\n if row[\"Strain\"] in self.names:\n self.metadata[row[\"Strain\"]][\"Matches\"] = row[\"Matches\"]\n metadata_count += 1\n # Check external runs.\n rMLST_data = csv.DictReader(open(self.nasmnt + \"External_WGSspades/reports/rmlst.csv\"))\n for row in rMLST_data:\n if row[\"Strain\"] in self.names:\n self.metadata[row[\"Strain\"]][\"Matches\"] = row[\"Matches\"]\n\n\n\n # Finally, need to get info on the MLST sequence type.\n metadata_count = 0\n mlst_data = csv.DictReader(open(self.nasmnt + \"WGSspades/reports/mlst.csv\"))\n for row in mlst_data:\n if row[\"Strain\"] in self.names:\n mlst = list()\n for i in range(1, 8):\n mlst.append(row[str(i)])\n self.metadata[row[\"Strain\"]][\"mlst_info\"] = mlst\n metadata_count += 1\n\n # Also from External.\n mlst_data = csv.DictReader(open(self.nasmnt + \"External_WGSspades/reports/mlst.csv\"))\n for row in mlst_data:\n if row[\"Strain\"] in self.names:\n mlst = list()\n for i in range(1, 8):\n mlst.append(row[str(i)])\n self.metadata[row[\"Strain\"]][\"mlst_info\"] = mlst\n metadata_count += 1\n\n # Go through the ROGA Summary file from the access DB to get strain/textual IDs, and 1' and 2' enzymes.\n try: # Assume we're using ROGA summary OLF. If it isn't there, assume ROGA summary OLC\n df = pd.read_excel('ROGA_summary_OLF.xlsx')\n for i in df.index:\n if df['SeqTracking_SEQID'][i] in self.names:\n seqid = df['SeqTracking_SEQID'][i]\n self.metadata[seqid][\"IsolateID\"] = df['Isolate ID'][i]\n self.metadata[seqid][\"TextualID\"] = df['Textual ID'][i]\n self.metadata[seqid][\"1Enzyme\"] = df[\"1' Enzyme\"][i]\n self.metadata[seqid][\"2Enzyme\"] = df[\"2' Enzyme\"][i]\n self.metadata[seqid][\"Source\"] = df['Source'][i]\n self.metadata[seqid][\"ReceivedDate\"] = df['ReceivedDate'][i]\n self.metadata[seqid][\"SequenceDate\"] = df['SequenceDate'][i]\n self.metadata[seqid][\"SequencedBy\"] = df['SequenceBy'][i]\n metadata_count += 1\n\n\n except FileNotFoundError: # Should be a file not found error - look it up.\n metadata_count = 0\n df = pd.read_excel('ROGA_summary_OLC.xlsx')\n for i in df.index:\n if df['SeqTracking_SEQID'][i] in self.names:\n seqid = df['SeqTracking_SEQID'][i]\n self.metadata[seqid][\"IsolateID\"] = df['OLN ID'][i]\n self.metadata[seqid][\"TextualID\"] = df['Lab ID'][i]\n self.metadata[seqid][\"ReceivedDate\"] = df['ReceivedDate'][i]\n self.metadata[seqid][\"SequenceDate\"] = df['SequenceDate'][i]\n self.metadata[seqid][\"SequencedBy\"] = df['SequenceBy'][i]\n metadata_count += 1\n # print(self.metadata)\n self.check_for_empty_data()", "def load_aligned_seqs(\n filename: Union[str, pathlib.Path],\n format=None,\n array_align=True,\n moltype=None,\n label_to_name=None,\n parser_kw=None,\n info=None,\n **kw,\n):\n file_format, _ = get_format_suffixes(filename)\n if file_format == \"json\":\n return load_from_json(filename, (Alignment, ArrayAlignment))\n\n data = _load_seqs(file_format, filename, format, kw, parser_kw)\n return make_aligned_seqs(\n data,\n array_align=array_align,\n label_to_name=label_to_name,\n moltype=moltype,\n source=filename,\n info=info,\n **kw,\n )", "def _forwardParsimony(self, aln):\n if self.sequence == None: # no sequence has been assigned\n if self.nChildren() == 0: # no children, so terminal, cannot propagate scores\n raise RuntimeError(\"No sequence assigned to leaf node:\", self.label)\n scores = [None for _ in range(self.nChildren())]\n for i in range(self.nChildren()):\n scores[i] = self.children[i]._forwardParsimony(aln)\n # for each position in the alignment,\n # introduce (initially zero) score for each symbol in alphabet\n self.seqscores = [[0 for _ in aln.alphabet] for col in range(aln.alignlen)]\n # for each position in the alignment,\n # allocate a position to put the each child symbol from which each current node symbol score was determined\n self.backptr = [[[None for _ in aln.alphabet] for _ in range(aln.alignlen)] for _ in range(self.nChildren())]\n for col in range(aln.alignlen):\n for i in range(self.nChildren()):\n # left child will contribute first\n for a_parent in range(len(aln.alphabet)):\n best_score = +9999999\n best_symb = 0\n for a in range(len(aln.alphabet)):\n score = (scores[i][col][a] + (\n 1 if a != a_parent else 0)) # if we want to weight scores, this would need to change\n if score < best_score:\n best_symb = a\n best_score = score\n self.seqscores[col][a_parent] += best_score\n self.backptr[i][col][a_parent] = best_symb\n else:\n self.seqscores = [[0 if a == sym else 999999 for a in aln.alphabet] for sym in\n self.sequence] # if we want to weight scores, this would need to change\n return self.seqscores", "def main(argv=None):\n\n if not argv:\n argv = sys.argv\n\n # setup command line parser\n parser = E.ArgumentParser(description=__doc__)\n\n parser.add_argument(\"--version\", action='version', version=\"1.0\")\n\n parser.add_argument(\"-m\", \"--merge-pairs\", dest=\"merge_pairs\",\n action=\"store_true\",\n help=\"merge paired-ended reads and output interval \"\n \"for entire fragment. \")\n\n parser.add_argument(\"--max-insert-size\", dest=\"max_insert_size\", type=int,\n help=\"only merge paired-end reads if they are less than \"\n \"# bases apart. \"\n \" 0 turns off this filter. \")\n\n parser.add_argument(\"--min-insert-size\", dest=\"min_insert_size\", type=int,\n help=\"only merge paired-end reads if they are at \"\n \"least # bases apart. \"\n \" 0 turns off this filter. \")\n\n parser.add_argument(\"--bed-format\", dest=\"bed_format\", type=str,\n choices=('3', '4', '5', '6'),\n help=\"bed format to output. \")\n\n parser.set_defaults(\n region=None,\n call_peaks=None,\n merge_pairs=None,\n min_insert_size=0,\n max_insert_size=0,\n bed_format='6',\n )\n\n (args, unknown) = E.start(parser, argv=argv, unknowns=True)\n\n if len(unknown) == 0:\n unknown.append(\"-\")\n\n samfile = pysam.AlignmentFile(unknown[0], \"rb\")\n\n args.bed_format = int(args.bed_format)\n\n if args.merge_pairs is not None:\n counter = merge_pairs(samfile,\n args.stdout,\n min_insert_size=args.min_insert_size,\n max_insert_size=args.max_insert_size,\n bed_format=args.bed_format)\n\n E.info(\"category\\tcounts\\n%s\\n\" % counter.asTable())\n\n else:\n # use until_eof. Files from stdin have no index\n it = samfile.fetch(until_eof=True)\n\n # more comfortable cigar parsing will\n # come with the next pysam release\n BAM_CMATCH = 0\n BAM_CDEL = 2\n BAM_CREF_SKIP = 3\n take = (BAM_CMATCH, BAM_CDEL, BAM_CREF_SKIP)\n outfile = args.stdout\n\n for read in it:\n if read.is_unmapped:\n continue\n\n t = 0\n for op, l in read.cigar:\n if op in take:\n t += l\n\n if read.is_reverse:\n strand = \"-\"\n else:\n strand = \"+\"\n outfile.write(\"%s\\t%d\\t%d\\t%s\\t%d\\t%c\\n\" %\n (read.reference_name,\n read.pos,\n read.pos + t,\n read.qname,\n read.mapq,\n strand))\n\n E.stop()", "def pairsParser(seqBlock,names):\n for name in names:\n seq = []\n sIndx = [] #start index, where in the line the sequence start\n struct = [] #structure lines\n record = False\n for line in seqBlock:\n if line.startswith(name+' '):\n tmp = line.split()\n #if seq length is shorter then 80 for one seq and longer\n #for another seq the following block will be empty for the\n #shorter sequence. this if statement protects against that\n if len(tmp) == 4: \n try:\n seq.append(tmp[2])#[name,start nr,seq,end nr]\n except:\n print 'LINE',line\n print 'BLOCK', seqBlock\n sIndx.append(index(line,tmp[2])) \n record = True\n else:\n continue\n else:\n if record:\n record = False\n struct.append(line)\n\n###############################################################################\n# Construction of the full sequence and structure and then mapping each letter\n#in structure to a position\n\n Fseq = '' #full sequence\n Fstruct = '' #full structure\n for i in range(len(seq)):\n # slice out corresponding structure to sequence\n #so you can get the same index for structure and sequence\n tmpStruct = struct[i][sIndx[i]:(sIndx[i]+len(seq[i]))]\n Fseq = ''.join([Fseq,seq[i]])\n Fstruct = ''.join([Fstruct,tmpStruct])\n #Applies a position to every letter in structure sequence \n letterPos = zip(range(len(Fseq)),Fstruct)\n \n###############################################################################\n#Cunstruction of dictionary for where every letter in structure has a list of\n#positions corresponding to that of that letter in respect to the sequence\n\n alphabet = {}\n for pos, letter in letterPos:\n indices = []\n #if the dict contains the letter you want to add to that list\n if alphabet.__contains__(letter): \n indices = alphabet[letter]\n indices.append(pos)\n alphabet[letter] = indices\n #else you want to create a new list for that letter\n elif not letter==' ':\n indices.append(pos)\n alphabet[letter] = indices\n \n###############################################################################\n#Each list in alphabet needs to be split in two,\n#oL and cL (open and close list), to be able to fold the positions into pairs\n\n pairs = []\n for value in alphabet.values():\n middle = len(value)/2\n oL = value[:middle]\n cL = value[middle:]\n #pairs are created by making a tuple of the first in oL to\n #the last in cl, second in oL to second last in cL and so on\n pairs.extend(zip(oL,cL.__reversed__()))\n\n yield Pairs(pairs),Fseq", "def parse(self):\n logger=self.logger\n tokenizer=Tokenizer()\n self.scope=produtil.testing.parsetree.Scope()\n self.override(self.scope)\n self.parser=Parser(self.run_mode,logger,self.verbose)\n self.parser.requested_platform_name=self.platform_name\n morevars=self.make_vars()\n with open(self.inloc,'rt') as fileobj:\n self.parse_result=self.parser.parse(\n TokenizeFile(tokenizer,fileobj,self.inloc,1),self.scope,\n unique_id=self.unique_id,morevars=morevars)", "def _calc_multiple_alignment_score(wrapped_data : tuple) -> int: \n (start, finish) = wrapped_data \n score_sum = 0.\n for dna_record in tqdm(dna_sequences[start : finish + 1], total=(finish + 1 - start), desc=\"Training process\"):\n score_sum += self.aligner.score(seq, dna_record.seq)\n return score_sum", "def consensusCalling(spot, args):\n def readTrim(read, start, end):\n \"\"\"\n Trims a pysam.AlignedRead to only include the sequence that's aligned (or should be aligned)\n between start and end on reference\n returns the sequence and quality\n \"\"\"\n score = 0\n if not read.is_unmapped:\n regTrim = 0\n upS = read.cigar[0][1] if read.cigar[0][0] == 4 else 0\n dnS = read.cigar[-1][1] if read.cigar[-1][0] == 4 else 0\n \n trimS = None\n trimE = None\n if start > read.pos:\n for queryPos, targetPos in read.aligned_pairs:\n if trimS is None and targetPos >= start:\n trimS = queryPos\n else:\n score += abs(read.pos - start)\n if end < read.aend:\n for queryPos, targetPos in read.aligned_pairs[::-1]:\n if trimE is None and targetPos <= end:\n trimE = queryPos\n else:\n score += abs(read.aend-end)\n \n if trimS is not None:\n trimS = max(0, trimS) + upS\n else:\n trimS = 0\n \n if trimE is not None:\n trimE = min(len(read.seq), trimE) - dnS\n else:\n trimE = len(read.seq)\n seq = read.seq[trimS:trimE]\n qual = read.qual[trimS:trimE]\n if not read.is_reverse:\n seq = seq.translate(revComp)[::-1]\n qual = qual[::-1]\n \n return seq, qual\n \n #END readTrim\n \n chrom, start, end = spot.chrom, spot.start, spot.end\n buffer = args.buffer\n bam = args.bam\n #work\n supportReads = []\n spanReads = []\n #Fetch reads and trim\n totCnt = 0\n for read in bam.fetch(chrom, start-buffer, end+buffer):\n seq, qual = readTrim(read, start-buffer, end+buffer)\n if read.pos < start-300 and read.aend > end+300:\n spanReads.append((len(seq), seq, qual))\n else:\n supportReads.append((seq, qual))\n totCnt += 1\n \n if len(spanReads) == 0:\n logging.info(\"noone spans - consensus aborted. %s\" % (str(spot)))\n spot.tags[\"noSpan\"] = True\n return [spot]\n \n spanReads.sort(reverse=True)\n refread = spanReads[0]\n logging.debug(\"%d reads %d support\" % (totCnt, len(supportReads)))\n supportReads.extend([(x[1], x[2]) for x in spanReads[1:]])\n #read that spans most of the region goes first\n #use the rest for cleaning\n \n #building consensus sequence\n foutreads = NamedTemporaryFile(suffix=\".fastq\")\n for id, i in enumerate(supportReads):\n foutreads.write(\"@%d\\n%s\\n+\\n%s\\n\" % (id, i[0], i[1]))\n foutreads.flush()\n \n foutref = NamedTemporaryFile(suffix=\".fasta\")\n foutref.write(\">%s:%d-%d\\n%s\"%(\"ecoli\", start, end, refread[1]))\n foutref.flush()\n \n alignOut = NamedTemporaryFile(suffix=\".m5\")\n \n blasr(foutreads.name, foutref.name, bestn=1, nproc=1, outname=alignOut.name)\n #shutil.copyfile(foutreads.name, \"sup.fastq\")\n #shutil.copyfile(foutref.name, \"base.fasta\")\n #shutil.copyfile(alignOut.name, \"align.m5\")\n if not args.pbdagcon:\n aligns = M5File(alignOut.name)\n con = \">con\\n%s\\n\" % consensus(aligns).sequence\n else:\n logging.debug(\"pbdagcon\")\n r, con, e = exe(\"pbdagcon -m 25 -c 1 -t 0 %s\" % (alignOut.name))\n logging.debug(str(r) + \" - \" + str(e))\n con = con[con.index(\"\\n\")+1:]\n logging.debug(\"MySeq: \" + con)\n #Check if con is blank\n \n conOut = NamedTemporaryFile(suffix=\".fasta\")\n conOut.write(con)\n conOut.flush()\n refOut = NamedTemporaryFile(suffix=\".fasta\")\n refOut.write(\">%s:%d-%d\\n%s\\n\" % (chrom, start, end, \\\n args.reference.fetch(chrom, start-buffer, end+buffer)))\n refOut.flush()\n \n #map consensus to refregion\n varSam = NamedTemporaryFile(suffix=\".sam\")\n cmd = \"blasr %s %s -sam -bestn 1 -affineAlign -out %s\" % (conOut.name, refOut.name, varSam.name)\n logging.debug(cmd)\n logging.debug(exe(cmd))\n \n foutreads.close()\n foutref.close()\n alignOut.close()\n\n #convert sam to bam\n input = pysam.Samfile(varSam.name)\n varBam = NamedTemporaryFile(suffix=\".bam\")\n output = pysam.Samfile(varBam.name, 'wb', template=input)\n nReads = 0\n for read in input:\n output.write(read)\n nReads += 1\n logging.info(\"%d consensus reads created\" % (nReads))\n varSam.close()\n input.close()\n output.close()\n \n #do pileup for sequence\n pysam.sort(varBam.name, varBam.name[:-4])\n pysam.index(varBam.name)\n bam = pysam.Samfile(varBam.name, 'rb')\n \n mySpots = []\n for pos in bam.pileup():\n size = pos.pileups[0].indel\n if abs(size) < args.minIndelSize or size == 0:\n continue\n newspot = copy.deepcopy(spot)\n if size > 0:\n newspot.start = pos.pos + start - buffer\n newspot.end = pos.pos + start - buffer\n align = pos.pileups[0]\n newspot.tags[\"seq\"] = align.alignment.seq[align.qpos : align.qpos + align.indel]\n newspot.size = size\n newspot.tags[\"label\"] = \"INS\"\n mySpots.append(newspot)\n elif size < 0:\n newspot.start = pos.pos + start - buffer\n newspot.end = pos.pos + abs(size) + start - buffer\n #newspot.tags[\"seq\"] = args.reference.fetch(chrom, pos.pos, pos.pos + abs(size))\n newspot.size = -size\n newspot.tags[\"label\"] = \"DEL\"\n mySpots.append(newspot)\n bam.close()\n varBam.close()\n logging.debug(\"%d spots found\" % (len(mySpots)))\n return mySpots", "def read_adas(self):\n for name in self.files_atte:\n self.beam_atte.append(adas.ADAS21(name))\n for name in self.files_emis:\n self.beam_emis.append(adas.ADAS22(name))", "def __init__(self,\n seq,\n aligned_index,\n unaligned_index):\n \n self.seq=seq\n self.aligned_index=aligned_index\n self.unaligned_index=unaligned_index\n self.numeric_seq=convert_to_numeric(self.seq)\n self.upstream_regions=[]\n self.downstream_regions=[]\n self.labels=[]\n self.match_count=0\n self.percent_match=0\n self.non_specific_hits=0\n self.non_specific_percent=0\n \n self.std_index = False\n self.f_std_index = None\n self.r_std_index = None", "def perform_parse(self):\n # get folder of pdf files\n folder = QFileDialog.getExistingDirectory(\n parent=self.parent(),\n caption='Get folder with PDF documents to parse'\n )\n if folder:\n # get list of fields and patterns\n field_list = self._get_fields()\n # performing parse\n results = make_parse(folder, field_list)\n self.open_result(results)", "def parse_a_stanza(self):\n\t\t# 's' line -- score, 1 field\n\t\tline = self.fetch_line(report=\" in a-stanza\")\n\t\tfields = line.split()\n\t\tassert (fields[0] == \"s\"), \"s line expected in a-stanza (line %d, \\\"%s\\\")\" \\\n\t\t\t\t\t\t\t\t % (self.lineNumber,line)\n\t\ttry: score = int(fields[1])\n\t\texcept: score = float(fields[1])\n\n\t\t# 'b' line -- begin positions in seqs, 2 fields\n\t\tline = self.fetch_line(report=\" in a-stanza\")\n\t\tfields = line.split()\n\t\tassert (fields[0] == \"b\"), \"b line expected in a-stanza (line %d, \\\"%s\\\")\" \\\n\t\t\t\t\t\t\t\t % (self.lineNumber,line)\n\t\tbeg1 = int(fields[1]) - 1\n\t\tbeg2 = int(fields[2]) - 1\n\n\t\t# 'e' line -- end positions in seqs, 2 fields\n\t\tline = self.fetch_line(report=\" in a-stanza\")\n\t\tfields = line.split()\n\t\tassert (fields[0] == \"e\"), \"e line expected in a-stanza (line %d, \\\"%s\\\")\" \\\n\t\t\t\t\t\t\t\t % (self.lineNumber,line)\n\t\tlen1 = int(fields[1]) - beg1\n\t\tlen2 = int(fields[2]) - beg2\n\n\t\t# 'l' lines\n\t\tpieces = []\n\t\twhile (True):\n\t\t\tline = self.fetch_line(report=\" in a-stanza\")\n\t\t\tfields = line.split()\n\t\t\tif (fields[0] != \"l\"):\n\t\t\t\tbreak\n\t\t\tstart1 = int(fields[1]) - 1\n\t\t\tstart2 = int(fields[2]) - 1\n\t\t\tlength = int(fields[3]) - start1\n\t\t\tlength2 = int(fields[4]) - start2\n\t\t\ttry: pctId = int(fields[5])\n\t\t\texcept: pctId = float(fields[5])\n\t\t\tassert (length2 == length), \"length mismatch in a-stanza\"\n\t\t\tpieces.append((start1+self.seq1_start,start2+self.seq2_start,length,pctId))\n\t\tassert (line == \"}\"), \"improper a-stanza terminator (line %d, \\\"%s\\\")\" \\\n\t\t\t\t\t\t\t% (self.lineNumber,line)\n\t\treturn (score,pieces)", "def nextValues(self):\n return list(i.nextLine[self.idx] for i in self if not i.isFinished)\n\n #def isFinished(self):\n \"\"\"When all the data is read.\"\"\"\n #pass\n\n #def getInitialValue(self):\n \"\"\"Returns the initial alignment value.\"\"\"\n #pass\n\n #def newCurrentValue(self):\n \"\"\"Returns the next alignment value.\"\"\"\n #pass\n\n #def align(self, currentValue):\n \"\"\"Process all the elements of self to make them aligned.\"\"\"\n #pass", "def test_alignments(self):\n # test against the correct input file\n parser = Lav(self.__correct_file)\n for alignment in parser.alignments():\n self.assertEqual(len(alignment), 7)\n for alignment in parser.alignments(gapped=False):\n self.assertEqual(len(alignment), 8)\n # test againts incorrect input files\n for lav_file in self.__incorrect_files:\n parser = Lav(os.path.join(self.__incorrect_file_dir,\n lav_file))\n with self.assertRaises(LavError):\n for alignment in parser.alignments():\n self.assertIsInstance(alignment,\n Lav.GapFreeAlignment)", "def parse(self):\n\n try:\n query = SearchIO.parse(self.resultsFile, \"hmmer3-text\").next()\n except StopIteration:\n raise RuntimeError(\"Invalid HMMER output\")\n\n\n self.hmmLength = query.seq_len\n self.total_gaps = [0]*self.hmmLength\n num_hits = 0\n for i, hit in enumerate(query):\n #if not hit.is_included:\n #Skip sequences below threshold\n #continue\n origSeqLength = int(hit.id.split(\"|\")[-1])\n for j, hsp in enumerate(hit):\n num_hits += 1\n seq = HMMERSequence(\n str(hsp.hit.seq), \n query.seq_len, \n origSeqLength, \n hsp.evalue,\n hsp.hit_start, \n hsp.hit_end, \n hsp.query_start, \n hsp.query_end\n )\n seq.align(hsp.hit_start, hsp.hit_end, hsp.query_start, hsp.query_end)\n seq.determineGapPositions()\n _id = \"{}_{}\".format(num_hits, hit.id)\n desc = \"[Seq:{}-{}; HMM: {}-{}; e-value: {}; program={}]\".format(\n hsp.hit_start+1,\n hsp.hit_end,\n hsp.query_start,\n hsp.query_end,\n hsp.evalue,\n query.program\n )\n record = SeqRecord(seq, id=_id, description=desc)\n\n #Update gaps for all sequences, even if not saved\n self.updateGaps(seq.gaps)\n\n if not seq.skip() and hit.is_included:\n self.records.append(record)", "def initial_sequence_loading(self, work_dir: str):\n # preprocess FASTA with sequences\n # rename IUPAC to N symbols using sed\n fasta_raw = self.from_param(\"manifest_data\", \"fasta_dna\")\n fasta_clean = self.pjc(work_dir, \"fasta\", \"seq_no_iupac.fasta\")\n self.remove_IUPAC(fasta_raw, fasta_clean)\n\n # start coord system ranking and agps processing\n agps = self.from_param(\"manifest_data\", \"agp\", not_throw = True)\n\n # rank cs_names, met in agps.keys (\"-\" separated, i.e. \"scaffold-contig\") based on cs_order\n # use noagp_cs_name_default for \"noagp\" assemblies\n cs_order = self.coord_sys_order(self.param(\"cs_order\"))\n noagps_cs = self.param(\"noagp_cs_name_default\")\n cs_rank = self.used_cs_ranks(agps, cs_order, noagps_cs)\n\n # remove gaps and lower_level mappings if the are coveres by higher level ones\n # i.e.: remove 'contigN to chromosomeZ', if 'contigN to scaffoldM' and 'scaffoldM to chromosomeZ' are in place\n # returns None if no agps provided\n agps_pruned_dir = self.pjc(work_dir, \"agps_pruned\")\n agps_pruned = self.prune_agps(agps, cs_order, agps_pruned_dir, self.param_bool(\"prune_agp\"))\n\n # empty agps_pruned ignored\n self.load_seq_data(fasta_clean, agps_pruned, cs_rank, self.pjc(work_dir, \"load\"))\n\n # mark all the \"contig\"s or noagp_cs as being sourced from ENA\n if not self.param_bool(\"no_contig_ena_attrib\"):\n if agps is None:\n self.add_contig_ena_attrib(self.pjc(work_dir, \"load\", \"set_ena\"), cs_name = noagps_cs)\n else:\n self.add_contig_ena_attrib(self.pjc(work_dir, \"load\", \"set_ena\"))\n\n # unversion scaffold, remove \".\\d$\" from names if there's a need\n if self.param_bool(\"unversion_scaffolds\"):\n self.unversion_scaffolds(cs_rank, self.pjc(work_dir, \"unversion_scaffolds\"))\n\n # add assembly mappings between various cs to meta table for the mapper to work properly\n cs_pairs = agps_pruned and agps_pruned.keys() or None\n self.add_asm_mappings(cs_pairs, self.pjc(work_dir, \"asm_mappings\"))\n\n # set toplevel seq_region attribute\n self.set_toplevel(self.pjc(work_dir, \"set_toplevel\"), self.param(\"not_toplevel_cs\"))\n\n # nullify contig version and update mappings strings accordingly; ignore for \"load_additional_sequences\" mode\n if not self.param_bool(\"load_additional_sequences\"):\n self.nullify_ctg_cs_version(self.pjc(work_dir, \"asm_mapping\", \"nullify_cs_versions\"))", "def setFromAlignment(self, aligned, pseudo_count = 0.0):\n self.cols = -1\n self.nsites = len(aligned)\n seqs = []\n # Below we create a list of Sequence from the alignment,\n # while doing some error checking, and figure out the number of columns\n for s in aligned:\n # probably a text string, so we make a nameless sequence from it\n if not type(s) is Sequence:\n s=Sequence(s, Motif.getAlphabet(self))\n else:\n # it was a sequence, so we check that the alphabet in\n # this motif will be able to process it\n if not Motif.isAlphabet(self, s):\n raise RuntimeError(\"Motif alphabet is not valid for sequence \" + s.getName())\n if self.cols == -1:\n self.cols = s.getLen()\n elif self.cols != s.getLen():\n raise RuntimeError(\"Sequences in alignment are not of equal length\")\n seqs.append(s)\n # The line below initializes the list of Distrib (one for each column of the alignment)\n self.counts = [Distrib(Motif.getAlphabet(self), pseudo_count) for _ in range(self.cols)]\n # Next, we do the counting, column by column\n for c in range( self.cols ): # iterate through columns\n for s in seqs: # iterate through rows\n # determine the index of the symbol we find at this position (row, column c)\n self.counts[c].count(s.getSite(c))\n # Update the length\n self.len = self.cols", "def process_seq_pairs(seq_pairs, args, stats):\n #print '\\n\\n'.join([str(z) for z in seq_pairs])\n seq_to_tree = []\n for x in seq_pairs:\n seq_to_tree.append(x.sequence1.structure)\n seq_to_tree.append(x.sequence2.structure)\n tree_distance = rna_distance('\\n'.join(seq_to_tree))\n tree_distance = tree_distance.strip('\\n').split('\\n') # take off last lr\n assert len(tree_distance) == len(seq_pairs), (\n 'Error length of tree distance %s does not match length of seq_pairs '\n '%s and should -- check installation of RNAdistance'\n % (len(tree_distance), len(seq_pairs))\n )\n for i, x in enumerate(seq_pairs):\n seq_pairs[i].tree_distance = tree_distance[i].split(' ')[1]\n x.output(args)\n stats['energy_delta'].append(x.energy_delta)\n stats['edit_distance'].append(x.edit_distance)\n try:\n stats['tree_distance'].append(float(x.tree_distance))\n except ValueError:\n stats['tree_distance'].append(None)", "def read_all(self, *args, **kwargs):\n pass", "def main():\n arg_parser = argparse.ArgumentParser(description=\"\"\"\n This utility will take a SAM alignment file from paired end reads \n and filter the original read FASTQ files do those reads without\n high-likelihood alignments to human.\n For gzipped alignments, consider using pipes: \n gunzip -c ref.fna.gz | strip_mt_ebv.py | gzip > ref.nomtebv.fna.gz\n \"\"\")\n\n arg_parser.add_argument(\n '--alnfile', '-A',\n type=argparse.FileType('r'),\n help='Alignment File. Can be stdin. For gzip, consider pipes',\n default=sys.stdin\n )\n arg_parser.add_argument(\n '--r1in', '-1',\n required=True,\n help='Input fastq file for R1'\n )\n arg_parser.add_argument(\n '--r2in', '-2',\n required=True,\n help='Input fastq file for R2'\n )\n arg_parser.add_argument(\n '--r1out', '-o1',\n required=True,\n help='Output fastq file for R1'\n )\n arg_parser.add_argument(\n '--r2out', '-o2',\n required=True,\n help='Output fastq file for R2'\n )\n arg_parser.add_argument(\n '--mapq',\n default=30,\n type=int,\n help='Minimum mapq required to be considered a valid read'\n )\n arg_parser.add_argument(\n '--cov_min',\n type=float,\n default=0.9\n )\n\n args = arg_parser.parse_args()\n\n passed_ids = get_passing_ids(\n args.alnfile,\n args.mapq,\n args.cov_min,\n )\n\n filter_fastq(\n passed_ids,\n args.r1in,\n args.r2in,\n args.r1out,\n args.r2out\n )", "def bwa_align(self, input_bam, out_sais):\n for read_num in [1, 2]:\n self.cmd(\"bwa aln -t{threads} -b -{read_num} {reference_genome} {input_bam} > {out_sai}\"\n .format(\n threads=self.n_threads,\n read_num=read_num,\n reference_genome=self.files[\"reference_genome\"],\n input_bam=input_bam,\n out_sai=out_sais[read_num-1] # account for indexing\n ),\n on_error=lambda: self.create_error_file(out_sais[read_num-1]),\n shell=True)", "def parse(self):\n\t\tfor part in self.mail.walk():\n\t\t\tself.process_part(part)", "def run_multimapping(SRA):\n\n if not os.path.exists(\"TMP/ambiguous_reads/\"):\n os.mkdir(\"TMP/ambiguous_reads/\")\n\n cmd_STAR = 'STAR --outSAMtype BAM SortedByCoordinate --runThreadN 8 --winAnchorMultimapNmax 200 --seedSearchStartLmax 15 --genomeDir' + ' ' + STAR_TRANSCRIPTOME_DIR + ' ' + '--readFilesIn' + ' ' + TMP_DIR+SRA+'_rrnaUnmapped.fastq' + ' ' + '--outFileNamePrefix' + ' ' + TMP_DIR+'ambiguous_reads/'+SRA+'_STAR_transcriptome_'\n output = subprocess.run(cmd_STAR, shell=True)\n\n \n # Keep only multi-mapping reads:\n cmd_filter = 'python code/sam_STAR_mapq_filtering.py' + ' ' + TMP_DIR+'ambiguous_reads/'+SRA+'_STAR_transcriptome_Aligned.sortedByCoord.out.bam' + ' ' + TMP_DIR+'ambiguous_reads/'+SRA+'_STAR_transcriptome_multi_mapped_sorted.bam' + ' ' + 'all'\n output = subprocess.run(cmd_filter, shell=True)\n\n cmd_samtools2 = 'samtools index' + ' ' + TMP_DIR+'ambiguous_reads/'+SRA+'_STAR_transcriptome_multi_mapped_sorted.bam'\n output = subprocess.run(cmd_samtools2, shell=True)", "def process(self):\n\n linelang = defaultdict(int)\n wordlang = defaultdict(int)\n\n linefont = defaultdict(int)\n wordfont = defaultdict(int)\n\n inputfiles = self.input_files\n for input_file in inputfiles:\n\n alignurl = input_file.url\n pcgts = parse(alignurl, True)\n page = pcgts.get_Page()\n regions = page.get_TextRegion()\n\n for region in regions:\n lines = region.get_TextLine()\n\n for line in lines:\n try:\n llang = line.primaryLanguage\n linelang[llang] += 1\n except TypeError:\n pass\n\n try:\n lfont = line.fontFamily\n linefont[lfont] += 1\n except TypeError:\n pass\n\n words = line.get_Word()\n for word in words:\n try:\n wlang = word.language\n wordlang[wlang] += 1\n except TypeError:\n pass\n\n try:\n wfont = word.get_TextStyle().fontFamily\n wordfont[wfont] += 1\n except TypeError:\n pass\n\n #predominant language\n try:\n lang = max(linelang, key=lambda k: linelang[k])\n except TypeError:\n try:\n lang = max(wordlang, key=lambda k: wordlang[k])\n except TypeError:\n lang = 'German'\n\n #predominant font\n try:\n font = max(linefont, key=lambda k: linefont[k])\n except TypeError:\n try:\n font = max(wordfont, key=lambda k: wordfont[k])\n except TypeError:\n font = 'Antiqua'\n\n\n print(lang)\n print(font)", "def process_bam(bam, output_dp):\r\n bam_fn = os.path.basename(bam)\r\n coverage_fp = os.path.join(output_dp, bam_fn.replace('.bam', '_coverage.csv'))\r\n reads_fp = os.path.join(output_dp, bam_fn.replace('.bam', '_reads.csv'))\r\n\r\n samfile = pysam.AlignmentFile(bam, \"rb\")\r\n contigs_size = get_ref_lens(samfile)\r\n coverage = coverage_vectors(contigs_size)\r\n\r\n read_output = open(reads_fp, 'w+')\r\n read_output.write('read_length,mapq,start,end,reference')\r\n for l in samfile.fetch():\r\n if l.mapq < 10: continue\r\n if l.rlen < 50: continue\r\n read_output.write('\\n{},{},{},{},{}'.format(l.rlen, l.mapq,\r\n l.reference_start, l.reference_end, samfile.getrname(l.reference_id).split(',')[0]))\r\n coverage[samfile.getrname(l.tid)][\"nb_reads\"] += 1\r\n coverage[samfile.getrname(l.reference_id)][\"positions\"][l.reference_start:l.reference_end] = 1\r\n coverage[samfile.getrname(l.tid)][\"nb_bp\"] += l.rlen\r\n read_output.close()\r\n\r\n coverage_prop = {}\r\n for contig,vector in coverage.items():\r\n if vector['nb_bp'] == 0: # no reads, so output blank file\r\n output = pandas.DataFrame()\r\n output.to_csv(coverage_fp, index=False)\r\n continue\r\n temp = {}\r\n for i in contigs_size:\r\n if contig == i[\"Seq\"]:\r\n temp[\"length\"] = i[\"Length\"]\r\n temp[\"ratio_covered\"] = np.sum(vector[\"positions\"])/float(len(vector[\"positions\"]))\r\n temp[\"number_reads\"] = vector[\"nb_reads\"]\r\n temp[\"number_bp\"] = vector[\"nb_bp\"]\r\n if vector[\"nb_reads\"] > 0 :\r\n coverage_prop[contig] = temp\r\n\r\n output = pandas.DataFrame(coverage_prop).transpose()\r\n output = output.sort_values(['number_bp','ratio_covered'],ascending=[0,0])\r\n output.to_csv(coverage_fp, index=False)\r\n samfile.close()\r\n return coverage_fp, reads_fp", "def get_alignment(self, names=None):\n names = names or self.experiments.keys()\n return dict([(e, self.experiments[e]['align']) \\\n for e in names if 'align' in self.experiments[e]])", "def _parseRecords(self):\n # dict of parse methods for most common records that will be stored in structured arrays\n FLAG2METHOD = {'PS' : self.parseHighPassRecord,\n 'PC' : self.parseLowPassRecord,\n 'VD' : self.parseDigitalSValRecord}\n # dict of (record type, listname to store it in) tuples\n FLAG2REC = {'L' : (LayoutRecord, 'layoutrecords'),\n 'MS' : (SurfMessageRecord, 'messagerecords'),\n 'MU' : (UserMessageRecord, 'messagerecords'),\n 'PE' : (EpochRecord, 'epochrecords'),\n 'D' : (DisplayRecord, 'displayrecords'),\n 'VA' : (AnalogSValRecord, 'analogsvalrecords')}\n f = self.f\n while True:\n # returns an empty string when EOF is reached\n flag = f.read(2).rstrip(NULL).decode() # TODO: should this strip NULL?\n if flag == '':\n break\n # put file pointer back to start of flag\n f.seek(-2, 1) # TODO: unnecessary - doesn't this slow down parsing quite a bit?\n if flag in FLAG2METHOD: # these are the most common\n FLAG2METHOD[flag](f) # call the method\n elif flag in FLAG2REC:\n rectype, reclistname = FLAG2REC[flag]\n rec = rectype()\n rec.parse(f)\n #wx.Yield() # allow wx GUI event processing during parsing\n self._appendRecord(rec, reclistname)\n else:\n raise ValueError('Unexpected flag %r at offset %d' % (flag, f.tell()))\n #self.percentParsed = f.tell() / self.filesize * 100", "def test_get_subalignment_sequence_order_maintained(self):\n result = AlignedSeq.get_sub_alignment_by_list_id([\"s3\", \"s1\"], self.alignment)\n expected = MultipleSeqAlignment([self.alignment[0], self.alignment[2]])\n self.assertTrue(msas_equal(expected, result))" ]
[ "0.64816856", "0.6445793", "0.6301667", "0.62004644", "0.61849123", "0.6097089", "0.60698086", "0.58538115", "0.5847832", "0.5827765", "0.5823239", "0.581179", "0.581081", "0.57999784", "0.57905626", "0.57769024", "0.5743529", "0.5681", "0.5614261", "0.55853", "0.5562853", "0.5543108", "0.55078685", "0.55040014", "0.55016136", "0.54968345", "0.5478513", "0.54779065", "0.54756397", "0.5468621", "0.5461108", "0.54421455", "0.5435498", "0.5416844", "0.5408605", "0.54082525", "0.54000974", "0.5388687", "0.5385783", "0.53815824", "0.5381352", "0.53755933", "0.53684837", "0.535692", "0.5342445", "0.5336435", "0.5327272", "0.5316486", "0.5316125", "0.53106254", "0.5282758", "0.5266857", "0.5230625", "0.5208373", "0.52030027", "0.51989484", "0.5196878", "0.5181823", "0.5172014", "0.5139146", "0.51371646", "0.5134878", "0.5111728", "0.5099212", "0.50984186", "0.50885224", "0.5081424", "0.5076896", "0.50646675", "0.5063264", "0.50545806", "0.5046719", "0.50333554", "0.50325567", "0.5020481", "0.50151676", "0.5004103", "0.49995863", "0.49968237", "0.49963313", "0.49931145", "0.49919072", "0.498436", "0.49765223", "0.49647877", "0.49613893", "0.4960691", "0.4946595", "0.4944633", "0.49355096", "0.49308047", "0.49246377", "0.49241868", "0.49232385", "0.4909461", "0.49024752", "0.49007824", "0.48943642", "0.4889025", "0.48791206" ]
0.4956526
87
Highlights currentSelection on stdscr.
def highlightSelection(stdscr, selection): s = tuple(list(selection.addStrArgs)+[curses.A_REVERSE]) stdscr.addstr(*s)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def switchSelection(stdscr, lastSelection, currentSelection):\n stdscr.addstr(*lastSelection.addStrArgs)\n highlightSelection(stdscr, currentSelection)", "def draw_selected(self):\n if self.get_selected() is not None and not self.check_if_locked(self.get_selected()):\n self.color_cell(pos=self.get_selected(\n ), color=SELECTED_INVALID if self.get_selected() in self.invalid else SELECTED)", "def __highlight_selection(self, x: int, y: int) -> None:\n round_rect(screen, (x-2, y-2, SELECTOR_WIDTH + 4, SELECTOR_HEIGHT + 4), HIGHLIGHT_COLOUR, 6)", "def highlight_color(self):\n return curses.color_pair(4) if self.cycling else curses.color_pair(2)", "def interactive_select(space, current):\n print \"Type an element name, an element index, or an unambiguous prefix to add to your selection.\"\n print \"Type '\" + color_code(MAGENTA) + \"list\" + CLEAR_COLOR +\"' to see the list of valid selections/indices.\"\n print \"Type '\" + color_code(MAGENTA) + \"clear\" + CLEAR_COLOR +\"' to clear selection.\"\n print \"Enter an empty line when done.\\n\"\n \n done = False\n while not done:\n print color_code(BLACK, bold=True), \"\\nCurrent selection\" + CLEAR_COLOR + \":\", (current if current else \"None\")\n tentative = raw_input(color_code(YELLOW) + \"Selection or Command\" + CLEAR_COLOR + \": \")\n matches = [el for el in space if el.startswith(tentative)]\n try: index = int(tentative)\n except ValueError: index = None\n if tentative == 'list':\n for i,el in enumerate(space):\n print \"\\t\", color_code(BLUE, bold=True), i, CLEAR_COLOR, el\n print \"\\n\"\n elif tentative == 'clear':\n current = []\n elif tentative == '':\n if current:\n print color_code(GREEN), \"\\nFinal selection\" + CLEAR_COLOR + \":\", current, \"\\n\\n\"\n done = True\n else:\n print_error(\"Must select at least one\")\n elif len(matches) > 1:\n print_error(\"Multiple matches found for `{}' ({})\".format(tentative, matches))\n elif len(matches):\n if matches[0] in current:\n print_warning(\"{} was already selected\".format(matches[0]))\n else:\n current.append(matches[0])\n elif index is not None:\n if index < 0 or index >= len(space):\n print_error(\"Invalid index {}\".format(index))\n elif space[index] in current:\n print_warning(\"{} was already selected\".format(space[index]))\n else:\n current.append(space[index])\n else:\n print_error(\"Unknown token: {}\".format(tentative))\n \n return current", "def flush(self, header, caret, select_start_pos, select_end_pos, scr_topleft, scr_bottomright):\n self.update_screen_size()\n self.stdscr.erase()\n # header\n for text, color in header:\n self.stdscr.addstr(text, color_pair(color))\n text_selected = select_start_pos is not None\n # display lines\n displayed_lines = self.lines[scr_topleft.y : min(len(self.lines), scr_bottomright.y)]\n for index, line in enumerate(displayed_lines):\n self.stdscr.addstr(PADCHAR)\n if len(line) >= scr_topleft.x:\n # inclusive, position of line start and line end of displayed line\n ln_start = Position(scr_topleft.y + index, scr_topleft.x)\n ln_end = Position(scr_topleft.y + index, scr_topleft.x + self.screen_width())\n displayed_line = line[ln_start.x : min(len(line), scr_bottomright.x - 1)]\n if text_selected:\n # whether start position and end position of line are between selection\n start_between = ln_start.is_between(select_start_pos, select_end_pos)\n end_between = ln_end.is_between(select_start_pos, select_end_pos)\n # whether selection is between start and end position\n select_start_between = select_start_pos.is_between(ln_start, ln_end)\n select_end_between = select_end_pos.is_between(ln_start, ln_end)\n if start_between and end_between:\n # completely enclosed\n self.stdscr.addstr(displayed_line, color_pair(7))\n elif start_between:\n # only start between selection\n # end is on same line\n # only starting portion is highlighted\n self.stdscr.addstr(displayed_line[ : select_end_pos.x - ln_start.x + 1], color_pair(7))\n self.stdscr.addstr(displayed_line[select_end_pos.x - ln_start.x + 1 : ])\n elif end_between:\n # only end between selection\n # start is on same\n # only ending portion is highlighted\n self.stdscr.addstr(displayed_line[ : select_start_pos.x - ln_start.x])\n self.stdscr.addstr(displayed_line[select_start_pos.x - ln_start.x : ], color_pair(7))\n elif select_start_between and select_end_between:\n # selection is all on this line\n # start and end not highlighted\n self.stdscr.addstr(displayed_line[ : select_start_pos.x - ln_start.x])\n self.stdscr.addstr(\n displayed_line[select_start_pos.x - ln_start.x : select_end_pos.x - ln_start.x + 1],\n color_pair(7)\n )\n self.stdscr.addstr(displayed_line[select_end_pos.x + 1 - ln_start.x : ])\n else:\n # not enclosed by selection at all\n self.stdscr.addstr(displayed_line)\n else:\n self.stdscr.addstr(displayed_line)\n if index != len(displayed_lines) - 1:\n self.stdscr.addstr('\\n')\n self.stdscr.move(caret.y - scr_topleft.y + HEADER_LEN, caret.x - scr_topleft.x + PAD_LEN)", "def highlight(self):\n\n if self.selected_text_file is None:\n return\n if self.selected_text_file[FULLTEXT] is None:\n return\n format_ = QtGui.QTextCharFormat()\n cursor = self.ui.textBrowser.textCursor()\n for item in self.case_text:\n try:\n cursor.setPosition(int(item['pos0']), QtGui.QTextCursor.MoveMode.MoveAnchor)\n cursor.setPosition(int(item['pos1']), QtGui.QTextCursor.MoveMode.KeepAnchor)\n format_.setFontUnderline(True)\n format_.setUnderlineColor(QtCore.Qt.GlobalColor.red)\n cursor.setCharFormat(format_)\n except Exception as err:\n msg = \"highlight, text length \" + str(len(self.ui.textBrowser.toPlainText()))\n msg += \"\\npos0:\" + str(item['pos0']) + \", pos1:\" + str(item['pos1'])\n msg += \"\\n\" + str(err)\n logger.debug(msg)", "def on_selected(self):\n self.colour = self.selected_colour\n self.is_selected = True\n self.redraw()", "def active_selection():\r\n\r\n om.MGlobal.getActiveSelectionList()", "def paint(self):\n if self.config['colorize']:\n self.highlight()\n else:\n self.clear_highlight()", "def _render_highlighted(\n document_text: str,\n begin: int,\n end: int,\n context_size: int = 0,\n highlight_color: str = \"On_Green\",\n) -> str:\n black_color = _get_text_color_from_list(\"Color_off\")\n return (\n document_text[begin - context_size : begin]\n + _get_text_color_from_list(highlight_color)\n + document_text[begin:end]\n + black_color\n + document_text[end : end + context_size]\n )", "def unhighlight(self, current=False):\n if current:\n if self.currentEditor is not None:\n self.currentEditor.highlight()\n else:\n for editor in self.editors:\n editor.highlight()", "def highlightCode(self, _event=None):\n count = 0\n if self.text.tag_ranges('sel'):\n self.text.tag_add('color' + str(count), tk.SEL_FIRST, tk.SEL_LAST)\n self.text.tag_configure('color' + str(count), foreground='black', background='yellow')\n count += 1\n else:\n # Do this if you want to overwrite all selection colors when you change color without selection\n # for tag in text.tag_names():\n # text.tag_delete(tag)\n self.text.config(foreground='yellow')\n\n fileContainingText = open(newTextFile, \"a\")\n\n hText = self.text.get(tk.SEL_FIRST, tk.SEL_LAST)\n fileContainingText.write(hText)", "def __update_selection(self):\n if self.selected_offset != self.old_selected_offset:\n if self.old_selected_offset > -1:\n old_offset = (self.old_selected_offset - self.top_offset) * 8\n\n self.display.text(\">\", 0, old_offset, 0)\n\n new_offset = (self.selected_offset - self.top_offset) * 8\n self.display.text(\">\", 0, new_offset, 1)\n self.display.show()\n self.old_selected_offset = self.selected_offset", "def set_mouse_selection(self, item, mpos):\r\n if item.is_mouse_selection(mpos):\r\n item.set_font_color(RED)\r\n item.set_italic(True)\r\n else:\r\n item.set_font_color(WHITE)\r\n item.set_italic(False)", "def set_mouse_selection(self, item, mpos):\r\n\t\tif item.is_mouse_selection(mpos):\r\n\t\t\titem.set_font_color(YELLOW)\r\n\t\t\titem.set_italic(True)\r\n\t\telse:\r\n\t\t\titem.set_font_color(WHITE)\r\n\t\t\titem.set_italic(False)", "def set_highlight(self, highlighted):\n self.highlighted = highlighted", "def BaseSetSelection(self, start, end):\n super(EditraBaseStc, self).SetSelection(start, end)", "def set_current_tool_to_selection_tool(self):\n\n self.variables.current_shape_id = self.variables.select_rect_id\n self.variables.active_tool = TOOLS.SELECT_TOOL\n self.variables.current_tool = TOOLS.SELECT_TOOL", "def highlight(self, *args):\n cw = self.cur_win()\n cw.highlight()\n if self.cur == Win.right:\n cw.down()", "def _conf_highlight(self):\n textbuffer = self.ref_object.get_buffer()\n tag_table = textbuffer.get_tag_table()\n c_tag = tag_table.lookup(\"colored\")\n if not c_tag:\n c_tag = textbuffer.create_tag(\"colored\", foreground=\"#000000\", background=\"#FFFF00\")\n text = textbuffer.get_text(textbuffer.get_bounds()[0], textbuffer.get_bounds()[1])\n textbuffer.delete(textbuffer.get_bounds()[0], textbuffer.get_bounds()[1])\n for line in re.split(r'\\r\\n|\\r|\\n', text):\n for e in re.compile(\"(\" + self.entry.get_text().lower() + \")\", re.I).split(line):\n if re.search(self.entry.get_text().lower(), e, re.I):\n textbuffer.insert_with_tags(textbuffer.get_end_iter(), e, c_tag)\n else:\n textbuffer.insert_with_tags(textbuffer.get_end_iter(), e)\n textbuffer.insert_with_tags(textbuffer.get_end_iter(), '\\n')", "def SetSelection(self, s):\r\n\r\n self.selection = s\r\n self._commandInt = s", "def select(self, selected = True):\n \n if selected != self._selected:\n if selected:\n self._border.set_border_width(globals.HIGHLIGHT_BORDER_WIDTH)\n Member.focus.append(self)\n else:\n self._border.set_border_width(self._border_width)\n Member.focus.remove(self)\n \n self._selected = selected", "def __enter__(self):\n self.stdscr = curses.initscr()\n curses.noecho() # Don't display pressed keys\n curses.cbreak() # React to keys without Enter press\n self.stdscr.keypad(True) # Use keypad & navigation keys\n self.stdscr.nodelay(True) # Non-blocking input reading\n curses.start_color() # Enable coloured outputs\n curses.init_pair(1, curses.COLOR_WHITE, curses.COLOR_BLACK) # Color as (FG, BG)\n curses.init_pair(2, curses.COLOR_BLACK, curses.COLOR_WHITE)\n curses.init_pair(3, curses.COLOR_CYAN, curses.COLOR_BLACK)\n curses.init_pair(4, curses.COLOR_BLACK, curses.COLOR_CYAN)\n return self.stdscr", "def update_extra_selections(self):\n\n if len(self.cursors) > 1:\n # get highlight colors\n highlight_color = self.txt_edit.palette().highlight()\n highlight_txt_color = self.txt_edit.palette().highlightedText()\n\n extra_selections = []\n\n for cursor in self.cursors:\n extra_sel = self.txt_edit.ExtraSelection()\n extra_sel.cursor = cursor\n extra_sel.format.setBackground(highlight_color)\n extra_sel.format.setForeground(highlight_txt_color)\n extra_selections.append(extra_sel)\n\n self.txt_edit.setExtraSelections(extra_selections)\n\n else:\n # clear extra selections\n self.txt_edit.setExtraSelections([])", "def mark_selected():\n (buffer, start, end) = get_selection_or_word()\n selection = buffer.get_chars(start, end)\n\n if selection != \"\":\n for m in buffer.file().search(selection, regexp=False):\n GPS.Locations.add(\"Local occurrences\",\n m.file(), m.line(), m.column(),\n selection,\n highlight=\"dynamic occurrences\",\n length=len(selection))", "def watchSelection(self, sel):\n sel.observers.append(self.selectionLabel.set_text)", "def setSelectionColorScheme(self, focused=None, unfocused=None):\n if focused is None:\n focused = self.selectionColor\n if unfocused is None:\n unfocused = self.unfocusedRegionColor\n self.selection.setColorScheme(focused, unfocused)\n beg = self.selection.getBeginSeconds()\n dur = self.selection.getWidthSeconds()\n wform = self.selection.getSelectedWaveform()\n self.selection.select(beg, dur, wform)", "def highlight(self, **highlight):\n self._evaluated = False\n self._highlight = highlight\n return self", "def SetOldSelection(self, s):\r\n \r\n self.old_selection = s", "def highlightColor( self ):\n return self._highlightColor", "def smart_highlight_on(self, buf, highlight_start, highlight_len):\n if (self.update_colors or\n buf.get_tag_table().lookup('smart_highlight') == None):\n self.fill_tag_table(buf)\n buf.apply_tag_by_name('smart_highlight',\n buf.get_iter_at_offset(highlight_start),\n buf.get_iter_at_offset(highlight_start + highlight_len))", "def _higlightWord(self, bOn=True): #$NON-NLS-1$\r\n if self.currRange:\r\n if bOn:\r\n self.currRange.scrollIntoView()\r\n self.currRange.select()\r\n else:\r\n self.mshtmlEditControl.selectNone()", "def show(self):\n print highlight(self.current_content, self.lexer(), Formatter())", "def drawHighlight( self, painter, option, rect ):\n \n pad = self.highlightPadding()\n left, top, right, bottom = self.contentsMargins()\n \n x = (-pad / 2.0) + left\n y = (-pad / 2.0) + top\n w = (self.rect().width() + pad + 1) - (left + right) - 1\n h = (self.rect().height() + pad + 1) - (top + bottom) - 1\n rradius = self.roundingRadius()\n \n # draw the highlight\n painter.setRenderHint(painter.Antialiasing)\n painter.setPen( Qt.NoPen )\n painter.setBrush( self.highlightBrush() )\n painter.drawRoundedRect( x, y, w, h, rradius, rradius )\n painter.setRenderHint(painter.Antialiasing, False)", "def _highlight_digraph(self, new_screen: Screen) -> None:\n digraph_char = self._get_digraph_char()\n if digraph_char:\n cpos = new_screen.get_cursor_position(self)\n new_screen.data_buffer[cpos.y][cpos.x] = _CHAR_CACHE[\n digraph_char, \"class:digraph\"\n ]", "def _hilightcurrent(self, onoff):\n if len(self.canvas[\"items\"]):\n self.canvas[\"items\"][self.index]['frameColor']=\\\n list(self.highlight)[:3]+[self.highlight[3] if onoff else 0]", "def __editSelectBrace(self):\n self.activeWindow().selectToMatchingBrace()", "def scroll_highlight2(self):\n self._window.column2.align_disp.editor.scroll_highlight(self._window.column2.align_disp.currentWord)", "def setClipboardSelection(self, s: str) -> None:\n # Alas, returning s reopens #218.\n return", "def on_selection_modified_async(self):\n\n if not self.header or len(self.view.sel()) == 0:\n return\n\n lines, line = self.get_details_of_line_being_tested()\n\n if not lines or not lines[0].assertion_colrange:\n self.view.erase_regions('current_syntax_test')\n return\n\n cursor = self.view.sel()[0]\n highlight_only_cursor = False\n if cursor.empty():\n cursor = sublime.Region(cursor.begin(), cursor.end() + 1)\n else:\n highlight_only_cursor = re.match(r'^\\^+$', self.view.substr(cursor)) is not None\n\n col_start, col_end = lines[0].assertion_colrange\n if highlight_only_cursor:\n col_start = self.view.rowcol(cursor.begin())[1]\n col_end = self.view.rowcol(cursor.end())[1]\n elif col_end == col_start:\n col_end += 1\n\n # if the tests extend past the newline character, stop highlighting at the \\n\n # as this is what these tests will assert against\n pos_start = min(line.begin() + col_start, line.end())\n pos_end = min(line.begin() + col_end, line.end() + 1)\n region = sublime.Region(pos_start, pos_end)\n\n scope = get_setting('syntax_test.highlight_scope', 'text')\n styles = get_setting('syntax_test.highlight_styles', ['DRAW_NO_FILL'])\n style_flags = RegionOption(*styles)\n\n self.view.add_regions('current_syntax_test', [region], scope, '', style_flags)", "def sgnRenderCurrentFrame(self):\n\n self.uiSaveSelectedRenders()\n\n self.core.render(self.lstSelectedRenders, bCurrentFrame = True)", "def highlight(self, highlightedItem):\n\t\tfor nr, i in enumerate(self.content):\n\t\t\tself.content[nr][1] = 16 if nr == highlightedItem else self.content[nr][2]", "def GetSelection(self):\r\n\r\n return self._current", "def _update_selected_result(self, old_index, new_index):\n try:\n self.formatted_results[old_index] = (\n self.style[\"unselected\"],\n self.formatted_results[old_index][1],\n )\n except IndexError:\n pass\n try:\n self.formatted_results[new_index] = (\n self.style[\"selected\"],\n self.formatted_results[new_index][1],\n )\n except IndexError:\n pass", "def current_selection(self):\n row_ids = set(item.row() for item in self.tableView.selectedIndexes())\n sel = []\n for row_id in row_ids:\n row = self.table[row_id]\n sel.append('\\t'.join(row))\n return '\\n'.join(sel)", "def current_selection(self):\n row_ids = set(item.row() for item in self.tableView.selectedIndexes())\n sel = []\n for row_id in row_ids:\n row = self.table[row_id]\n sel.append('\\t'.join(row[self.table.ordinal:]))\n return '\\n'.join(sel)", "def _define_highlights(self):\n for ansi_code in dict.fromkeys([*self._colors.values(),\n *self._colors_special.values()]):\n code_safe = ansi_code.replace(';', '_')\n fg, bg, special = ansi_to_vim_color(ansi_code)\n args = ''\n if fg is not None:\n args += 'ctermfg=' + fg\n if bg is not None:\n args += ' ctermbg=' + bg\n if special: # special is never None\n args += ' cterm=' + special\n if args:\n cmd = f'hi color{code_safe} {args}'\n logger.debug(cmd)\n self._vim.command(cmd)", "def set_selection(self, selection):\n self._selection = selection", "def scroll_highlight1(self):\n self._window.column1.align_disp.editor.scroll_highlight(self._window.column1.align_disp.currentWord)", "def _highlight(self, source):\n if not self.hasmarkup:\n return source\n try:\n from pygments.formatters.terminal import TerminalFormatter\n from pygments.lexers.python import PythonLexer\n from pygments import highlight\n except ImportError:\n return source\n else:\n return highlight(source, PythonLexer(), TerminalFormatter(bg=\"dark\"))", "def Reset_Selection(self):\r\n #if previous selection\r\n if( self.selected != 0 ):\r\n self.canvas_one.delete( self.selected ) #remove bounding rectangle\r\n #return chosen node to branch_color\r\n self.canvas_one.itemconfig( self.selected_ls.line_handle , fill = self.branch_color )\r\n self.system.Set_Selected_Node(0)\r\n self.selected = 0\r\n self.selected_ls = 0", "def no_highlight(): #py:no_highlight\n RUR._no_highlight_()", "def exec_selected_text(self):\r\n editor = self.currentWidget()\r\n ls = editor.get_line_separator()\r\n \r\n line_from, _index_from, line_to, index_to = editor.getSelection()\r\n if line_from != line_to:\r\n # Multiline selection -> first line must be entirely selected\r\n editor.setSelection(line_from, 0, line_to, index_to)\r\n lines = unicode( editor.selectedText() )\r\n \r\n # If there is a common indent to all lines, remove it\r\n min_indent = 999\r\n for line in lines.split(ls):\r\n if line.strip():\r\n min_indent = min(len(line)-len(line.lstrip()), min_indent)\r\n if min_indent:\r\n lines = [line[min_indent:] for line in lines.split(ls)]\r\n lines = ls.join(lines)\r\n\r\n last_line = lines.split(ls)[-1]\r\n if last_line.strip() == unicode(editor.text(line_to)).strip():\r\n # If last line is complete, add an EOL character\r\n lines += ls\r\n \r\n self.interactive_console.shell.execute_lines(lines)\r\n self.interactive_console.shell.setFocus()", "def _(event):\n # Take the current cursor position as the start of this selection.\n buff = event.current_buffer\n if buff.text:\n buff.start_selection(selection_type=SelectionType.CHARACTERS)", "def __scanspwSelection(self, scan, spw):\n \n isSelected = False\n mysel = {}\n mysel['scan'] = scan\n mysel['spw'] = spw\n \n if self._msTool is None:\n # Open up the msTool\n self._msTool = mstool()\n self._msTool.open(self._arg['vis']) \n else:\n self._msTool.reset()\n\n try:\n isSelected = self._msTool.msselect(mysel)\n except:\n isSelected = False\n casalog.post('Ignoring NULL combination of scan=%s and spw=%s'% \\\n (scan,spw),'DEBUG1')\n \n return isSelected", "def highlightSearch(self, wordList=None, regExpList=None):\n backColor = self.palette().brush(QPalette.Active,\n QPalette.Highlight)\n foreColor = self.palette().brush(QPalette.Active,\n QPalette.HighlightedText)\n if wordList is None:\n wordList = []\n if regExpList is None:\n regExpList = []\n for regExp in regExpList:\n for match in regExp.finditer(self.toPlainText()):\n matchText = match.group()\n if matchText not in wordList:\n wordList.append(matchText)\n selections = []\n for word in wordList:\n while self.find(word):\n extraSel = QTextEdit.ExtraSelection()\n extraSel.cursor = self.textCursor()\n extraSel.format.setBackground(backColor)\n extraSel.format.setForeground(foreColor)\n selections.append(extraSel)\n cursor = QTextCursor(self.document())\n self.setTextCursor(cursor) # reset main cursor/selection\n self.setExtraSelections(selections)", "def __selectColorName(self):\n editor = e5App().getObject(\"ViewManager\").activeWindow()\n if editor is None:\n return\n \n if editor.hasSelectedText():\n currColor = editor.selectedText()\n if currColor not in QColor.colorNames():\n E5MessageBox.critical(\n self.__ui,\n self.tr(\"Color String\"),\n self.tr(\n \"\"\"<p>The selected string <b>{0}</b> is not a\"\"\"\n \"\"\" valid color name. Aborting!</p>\"\"\")\n .format(currColor))\n return\n else:\n currColor = \"\"\n \n from ColorString.ColorSelectionDialog import ColorSelectionDialog\n dlg = ColorSelectionDialog(currColor, self.__ui)\n if dlg.exec_() == QDialog.Accepted:\n colorStr = dlg.getColor()\n editor.beginUndoAction()\n if editor.hasSelectedText():\n editor.replaceSelectedText(colorStr)\n else:\n line, index = editor.getCursorPosition()\n editor.insert(colorStr)\n editor.setCursorPosition(line, index + len(colorStr))\n editor.endUndoAction()", "def selectPointsUnderCursor(self):\n spw = self.spw\n sw = spw.windows['Sort']\n #if clear:\n # sw.uslist.clearSelection()\n # sw.nlist.clearSelection()\n x, y = self.cursorPosGL()\n sids = self.pick(x, y, pb=10, multiple=True)\n if sids is None:\n return\n #t0 = time.time()\n spw.SelectSpikes(sids, on=self.selecting)\n #print('SelectSpikes took %.3f sec' % (time.time()-t0))\n if self.selecting == True:\n sat = 0.2 # desaturate\n else: # self.selecting == False\n sat = 1 # resaturate\n self.color(sids, sat=sat)\n self.updateGL()", "def selected(self):\r\n self.set_default_style()\r\n self.emit(SIGNAL('valid(bool)'), True)", "def GetSelection(self):\r\n \r\n return self._curpage", "def selectPointsUnderCursor(self):\n #spw = self.spw\n #sw = spw.windows['Sort']\n #if clear:\n # sw.uslist.clearSelection()\n # sw.nlist.clearSelection()\n x, y = self.cursorPosGL()\n sids = self.pick(x, y, pb=10, multiple=True)\n if sids == None:\n return\n #t0 = time.time()\n #if not sw.panel.maxed_out:\n # spw.SelectSpikes(sids, on=self.selecting)\n #else:\n # # for speed, while the mouse is held down and the sort panel is maxed out,\n # # don't call SelectSpikes, only call it once when the mouse is released\n self.collected_sids.append(sids)\n #print('SelectSpikes took %.3f sec' % (time.time()-t0))\n if self.selecting == True:\n sat = 0.2 # desaturate\n else: # self.selecting == False\n sat = 1 # resaturate\n self.color(sids, sat=sat)\n self.updateGL()", "def draw_highlight(self, x1, x2, y1, y2, color):\n self.scene.addLine(x1 - 1, y1 - 1, x1 - 1, y2 + 1, QPen(color))\n self.scene.addLine(x2 + 1, y1 - 1, x2 + 1, y2 + 1, QPen(color))\n self.scene.addLine(x1 - 1, y1 - 1, x2 + 1, y1 - 1, QPen(color))\n self.scene.addLine(x1 - 1, y2 + 1, x2 + 1, y2 + 1, QPen(color))", "def apply_selection(self, rv, index, is_selected):\r\n self.selected = is_selected", "def setSelection(self, current: QModelIndex, old: QModelIndex):\n node = current.internalPointer()\n if node is not None:\n typeInfo = node.typeInfo()\n self.showEditor(typeInfo)\n for type, editor in self._editor_dict.items():\n editor.setSelection(current)", "def draw(self):\n base_x = self.term.width // 2\n base_y = (self.term.height - len(self.OPTIONS)) // 2\n print(end=self.term.home + self.term.clear)\n print(\n self.term.move_xy(base_x - 2, base_y - 2)\n + self.term.green_bold\n + \"SNEK\"\n + self.term.normal\n )\n for index, (label, _action) in enumerate(self.OPTIONS):\n x = base_x - len(label) // 2\n y = base_y + index\n if index == self.selection_index:\n style = self.term.bold_red_reverse\n else:\n style = self.term.red\n print(self.term.move_xy(x, y) + style + label + self.term.normal)", "def on_hovered(self):\n if not self.is_selected:\n self.colour = self.hover_colour\n self.is_hovered = True\n self.redraw()", "def draw(self) -> None:\n offset = 7 + SELECTOR_HEIGHT\n x, y = 4, 5\n screen.fill(BLACK)\n for i in range(self.displayed_jobs_min, self.displayed_jobs_max + 1):\n\n # If this job is selected, hightlight it\n if i == self.sel_idx:\n self.__highlight_selection(x, y)\n\n # Draw the job + offset y each time\n self.draw_selection_item(self.jobs_list[i], x, y)\n y += offset", "def apply_selection(self, rv, index, is_selected):\n self.selected = is_selected", "def perform(self, doc):\n used_colors_list = []\n selected_objects = doc.GetActiveObjects(c4d.GETACTIVEOBJECTFLAGS_CHILDREN)\n\n if len(selected_objects) > 0:\n for obj in selected_objects:\n doc.AddUndo(c4d.UNDOTYPE_CHANGE, obj)\n Helper.set_color(obj, used_colors_list)\n else:\n obj = doc.GetFirstObject()\n while obj is not None:\n doc.AddUndo(c4d.UNDOTYPE_CHANGE, obj)\n Helper.set_color(obj, used_colors_list)\n obj = Helper.get_next_object(obj)", "def select_all(self, value=None):\n self.my_text.tag_add(SEL, \"1.0\", END)\n self.my_text.mark_set(INSERT, \"1.0\")\n self.my_text.see(INSERT)\n return \"break\"", "def select_me(self, mouse_pos):\r\n\t\t#self.active = self.rect.collidepoint(mouse_pos)\r\n\t\tself.active = True", "def highlight_source(source):\n return highlight(source, PythonLexer(), HtmlFormatter())", "def select_color(self):\n\t\tresult = tkinter.colorchooser.askcolor(self.center['bg'])\n\t\tif result:\n\t\t\t# 2nd part of result is the color object\n\t\t\tself.center['bg'] = result[1]", "def update_selection(self):\n raise NotImplementedError", "def clearMouseSelection(self):\n pass", "def setHighlightColor( self, color ):\n self._highlightColor = color\n self.setDirty()", "def __select(self):\n searchRect = QgsRectangle(self.first, self.last)\n for layer in self.canvas().layers():\n if not self.identified or layer.id() not in self.disabled():\n if layer.type() == QgsMapLayer.VectorLayer and layer.geometryType() in self.types:\n renderer = layer.rendererV2()\n context = QgsRenderContext()\n if renderer:\n renderer.startRender(context,layer.pendingFields())\n self.request = QgsFeatureRequest()\n self.request.setFilterRect(searchRect)\n self.request.setFlags(QgsFeatureRequest.ExactIntersect)\n fIds = []\n for feature in layer.getFeatures(self.request):\n try:\n will = renderer.willRenderFeature(feature, context)\n except:\n try:\n will = renderer.willRenderFeature(feature)\n except:\n self.__iface.messageBar().pushMessage(\n QCoreApplication.translate(\"VDLTools\", \"Error\"),\n \"will renderer still not working\", level=QgsMessageBar.CRITICAL, duration=0)\n return\n if will:\n fIds.append(feature.id())\n renderer.stopRender(context)\n layer.selectByIds(fIds)\n self.selectedSignal.emit()", "def draw(self, screen):\r\n if self.selected:\r\n used_color = (255 - self.color[0], 255 - self.color[1], 255 - self.color[2])\r\n else:\r\n used_color = self.color\r\n pygame.draw.rect(screen, used_color,\r\n (self.location_top_left[0], self.location_top_left[1], self.size_x, self.size_y), 0)", "def process_highlighting(htmlcontent):\n\n # Turn this into a html_content() object\n # for easy access to helper functions\n html_con = htmltools.html_content(htmlcontent)\n html_con.highlight()\n return mark_safe(html_con.tostring())", "def __selectHexColor(self):\n editor = e5App().getObject(\"ViewManager\").activeWindow()\n if editor is None:\n return\n \n if editor.hasSelectedText():\n currColor = editor.selectedText()\n if not self.__isValidColor(currColor):\n E5MessageBox.critical(\n self.__ui,\n self.tr(\"Color String\"),\n self.tr(\n \"\"\"<p>The selected string <b>{0}</b> is not a\"\"\"\n \"\"\" valid color string. Aborting!</p>\"\"\")\n .format(currColor))\n return\n \n if currColor.startswith(\"#\"):\n withHash = True\n elif self.__isHexString(currColor):\n withHash = False\n currColor = \"#\" + currColor\n else:\n withHash = True\n initColor = QColor(currColor)\n else:\n withHash = True\n currColor = \"\"\n initColor = QColor()\n \n color = QColorDialog.getColor(\n initColor, self.__ui, self.tr(\"Color String\"))\n if color.isValid():\n colorStr = color.name()\n if not withHash:\n colorStr = colorStr[1:]\n editor.beginUndoAction()\n if editor.hasSelectedText():\n editor.replaceSelectedText(colorStr)\n else:\n line, index = editor.getCursorPosition()\n editor.insert(colorStr)\n editor.setCursorPosition(line, index + len(colorStr))\n editor.endUndoAction()", "def __unhighlight(self):\n self.unhighlight()", "def highlight_input(self):\n text = self.text_transfer.get()\n if 0 <= self.counter < len(text):\n text = text.lower()\n text = text[:self.counter] + text[self.counter].upper() + text[self.counter + 1:]\n self.text_transfer.delete(0, tk.END)\n self.text_transfer.insert(0, text)\n tk.Label(self.model_frame, text=\"Preview: \").grid(row=0, column=0)\n width, height = self.font14.getsize(text[self.counter].upper())\n image = Image.new(\"RGBA\", (width, height), color=(0, 0, 0, 0))\n draw = ImageDraw.Draw(image)\n draw.text((0, 0), text[self.counter].upper(), font=self.font14, fill=\"black\")\n self._photoimage = ImageTk.PhotoImage(image)\n self.model_image.config(image=self._photoimage)\n else:\n self.counter = 0\n self.highlight_input()", "def restore_default_highlights(bv=None):\n highlight_set(covdb.total_coverage)\n log.log_info(\"Default highlight colors restored\")", "def user_selection(self, selected):\n\n source_index = self.proxy_model.mapToSource(selected)\n self.row = self.table_model.selectedRow(source_index.row())\n\n self.curr_selection()\n self.upd_preview()", "def on_unselected(self):\n self.colour = self.normal_colour\n self.is_selected = False\n self.redraw()", "def SetSelection(self, start, end):\n # STC HELL - some methods require UTF-8 offsets while others work\n # with Unicode...\n # Calculate UTF-8 offsets in buffer\n unicode_txt = self.GetText()\n if start != 0:\n start = len(ed_txt.EncodeString(unicode_txt[0:start], 'utf-8'))\n if end != 0:\n end = len(ed_txt.EncodeString(unicode_txt[0:end], 'utf-8'))\n del unicode_txt\n super(EditraBaseStc, self).SetSelection(start, end)", "def on_unhovered(self):\n if not self.is_selected:\n self.colour = self.normal_colour\n self.is_hovered = False\n self.redraw()", "def getSelection(self):\n return self.selection", "def updateSelectionArea(self):\n self.machine.setSelectionArea(self.points,\n fill='hatch',\n color=self.machine.color)\n eventDict = prepareDrawingSignal('drawingProgress',\n 'polygon',\n self.points,\n self.machine.parameters)\n self.machine.plot.notify(**eventDict)", "def GetSelection(self):\r\n\r\n return self.selection", "def currentSelection(self):\n # Get search items\n items = [i.strip().lower() for i in self._main._select.text().split(' ')]\n items = [i for i in items if i]\n \n prefix = ''\n tags = []\n words = []\n \n # First item can be the prefix\n if items and items[0] in '. % %% %%% ! !! !!! ? ?? ???':\n prefix = items.pop(0)\n \n # Next are either words or tags\n for item in items:\n if item.startswith('#'):\n tags.append(item)\n else:\n words.append(item)\n \n # Done\n return prefix, tags, words", "def init(stdscr):\n # Ensures a clean visual space.\n stdscr.clear()\n curses.curs_set(False)\n\n # Set the background of the app to the secondary color.\n stdscr.bkgd(' ', curses.color_pair(1))\n stdscr.refresh()", "def cursor_changed(self, column_side, bypass_selection=\"\"):\n\n column = None\n aligned_column = None\n if column_side == LEFT_TEXT:\n column = self._window.column1\n aligned_column = self._window.column2\n else:\n column = self._window.column2\n aligned_column = self._window.column1\n\n w = None\n if bypass_selection != \"\":\n # bypass the selection and process, used by search_highlight\n w = bypass_selection\n else:\n # else, just select the clicked word\n w = column.align_disp.editor.get_clicked_word()\n\n if w and w != \"\" and w != column.align_disp.currentWord:\n try:\n word, aligned_word, goldsmith_rslt, goldsmith_rslt_2 = self.controller.process_word(w, column_side)\n\n # Highlighting\n column.align_disp.editor.clean_highlight(first_pos=column.align_disp.editor.first_highlighted_block,\n last_pos=column.align_disp.editor.last_highlighted_block)\n column.align_disp.editor.refresh_highlight(word.str)\n aligned_column.align_disp.editor.clean_highlight(first_pos=aligned_column.align_disp.editor.first_highlighted_block,\n last_pos=aligned_column.align_disp.editor.last_highlighted_block)\n aligned_column.align_disp.editor.refresh_highlight(aligned_word.str, color=QtGui.QColor(255, 255, 100))\n\n align_rslt = \"{} : <b>{}</b>\".format(self.model.dist_words[word.str][aligned_word.str], aligned_word.str)\n\n column.info_word.set_word(word.str)\n column.info_word.set_text(align_rslt)\n column.see_also.set_text(goldsmith_rslt)\n column.align_disp.currentWord = word.str\n column.align_disp.sidebar.currentVect = word.pos\n column.align_disp.sidebar.draw_vector()\n\n aligned_column.info_word.set_word(aligned_word.str)\n aligned_column.info_word.set_text(\"See also\")\n # TODO : goldsmith on the second column, maybe paste the code or add eternal function\n aligned_column.see_also.set_text(goldsmith_rslt_2)\n aligned_column.align_disp.currentWord = aligned_word.str\n aligned_column.align_disp.sidebar.currentVect = aligned_word.pos\n aligned_column.align_disp.sidebar.draw_vector()\n\n except WordNotInDatabase:\n column.align_disp.editor.clean_highlight(first_pos=column.align_disp.editor.first_highlighted_block,\n last_pos=column.align_disp.editor.last_highlighted_block)\n aligned_column.align_disp.editor.clean_highlight(first_pos=aligned_column.align_disp.editor.first_highlighted_block,\n last_pos=aligned_column.align_disp.editor.last_highlighted_block)\n column.info_word.set_word(\"Not found\")\n column.info_word.set_text(\"Alignment results\")\n column.see_also.set_text(\"Goldsmith algorithm results\")\n column.align_disp.currentWord = None\n column.align_disp.sidebar.currentVect = [0, 1]\n column.align_disp.sidebar.draw_vector()\n\n aligned_column.info_word.set_word(\"Not found\")\n aligned_column.info_word.set_text(\"See also\")\n aligned_column.see_also.set_text(\"Goldsmith algorithm results\")\n aligned_column.align_disp.currentWord = None\n aligned_column.align_disp.sidebar.currentVect = [0, 1]\n aligned_column.align_disp.sidebar.draw_vector()\n\n except DataNotProcessed:\n column.align_disp.editor.clean_highlight(first_pos=column.align_disp.editor.first_highlighted_block,\n last_pos=column.align_disp.editor.last_highlighted_block)\n aligned_column.align_disp.editor.clean_highlight(first_pos=aligned_column.align_disp.editor.first_highlighted_block,\n last_pos=aligned_column.align_disp.editor.last_highlighted_block)", "def highlight(self):\n \n try:\n code = pygments.highlight(self.code, self.lexer, self.formatter)\n highlighted = '\\n'.join(['<div class=\\'highlighted\\'>\\n',\n code,\n '\\n</div>',\n ])\n except Exception as ex:\n _log.error('wp_highlighter.highlight() error:\\n{}'.format(ex))\n highlighted = ''\n return highlighted", "def DrawItemSelectionRect(*args, **kwargs):\n return _gdi_.RendererNative_DrawItemSelectionRect(*args, **kwargs)", "def highlight_active(series):\n style = []\n for i in range(len(series)):\n if series[i] > 0:\n style.append(\"background-color: lightblue\")\n else:\n style.append(\"background-color: dimgrey\")\n\n return style", "def HasSelection(self):\n sel = super(EditraBaseStc, self).GetSelection()\n return sel[0] != sel[1]", "def reformatSelection(self: Self, event: Event = None, undoType: str = 'Reformat Paragraph') -> None:\n c, undoType = self, 'reformat-selection'\n p, u, w = c.p, c.undoer, c.frame.body.wrapper\n if g.app.batchMode:\n c.notValidInBatchMode(undoType)\n return\n bunch = u.beforeChangeBody(p)\n oldSel, oldYview, original, pageWidth, tabWidth = rp_get_args(c)\n head, middle, tail = c.frame.body.getSelectionLines()\n lines = g.splitLines(middle)\n if not lines:\n return\n indents, leading_ws = rp_get_leading_ws(c, lines, tabWidth)\n result = rp_wrap_all_lines(c, indents, leading_ws, lines, pageWidth)\n s = head + result + tail\n if s == original:\n return\n #\n # Update the text and the selection.\n w.setAllText(s) # Destroys coloring.\n i = len(head)\n j = max(i, len(head) + len(result) - 1)\n j = min(j, len(s))\n w.setSelectionRange(i, j, insert=j)\n #\n # Finish.\n p.v.b = s # p.b would cause a redraw.\n u.afterChangeBody(p, undoType, bunch)\n w.setXScrollPosition(0) # Never scroll horizontally.", "def select_me(self, mouse_pos):\r\n\t\tself.active = self.rect.collidepoint(mouse_pos)" ]
[ "0.77326286", "0.6310758", "0.62849665", "0.6241305", "0.6179359", "0.6145725", "0.6066579", "0.5976721", "0.59747756", "0.5841172", "0.5825609", "0.5814059", "0.57745796", "0.5750539", "0.5745168", "0.5714649", "0.56804645", "0.5667968", "0.5662644", "0.56395173", "0.5605114", "0.5541363", "0.5528447", "0.55247223", "0.54810756", "0.54672265", "0.5445241", "0.543167", "0.5425711", "0.5397137", "0.5393737", "0.5354662", "0.53423375", "0.53325456", "0.5327312", "0.531556", "0.5297838", "0.5282471", "0.5273653", "0.5272844", "0.5267824", "0.52655184", "0.5252689", "0.5235542", "0.5232029", "0.5220724", "0.5209749", "0.5204509", "0.52040166", "0.5196731", "0.5184597", "0.5165916", "0.5155039", "0.515364", "0.5137624", "0.5125831", "0.5122967", "0.51211685", "0.51179534", "0.5112988", "0.5095003", "0.5089361", "0.5086339", "0.5083221", "0.5053894", "0.5053032", "0.50415385", "0.50401694", "0.50388205", "0.5034235", "0.5022883", "0.50210637", "0.5018367", "0.5016811", "0.5014917", "0.50107116", "0.5001695", "0.5000769", "0.50002515", "0.49981946", "0.49980265", "0.4994756", "0.49841928", "0.49836406", "0.49800786", "0.49791285", "0.497616", "0.4974683", "0.4973932", "0.49680942", "0.49656883", "0.49538437", "0.49520338", "0.49516422", "0.4949275", "0.49434254", "0.49394584", "0.49368858", "0.49240777", "0.49115315" ]
0.75370693
1
Removes highlight of lastSelection and highlights currentSelection.
def switchSelection(stdscr, lastSelection, currentSelection): stdscr.addstr(*lastSelection.addStrArgs) highlightSelection(stdscr, currentSelection)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unhighlight(self, current=False):\n if current:\n if self.currentEditor is not None:\n self.currentEditor.highlight()\n else:\n for editor in self.editors:\n editor.highlight()", "def Unselect(self):\r\n\r\n if self._current:\r\n self._current.SetHilight(False)\r\n self.RefreshLine(self._current)\r\n\r\n self._current = None\r\n self._select_me = None", "def __unhighlight(self):\n self.unhighlight()", "def clearMouseSelection(self):\n pass", "def Reset_Selection(self):\r\n #if previous selection\r\n if( self.selected != 0 ):\r\n self.canvas_one.delete( self.selected ) #remove bounding rectangle\r\n #return chosen node to branch_color\r\n self.canvas_one.itemconfig( self.selected_ls.line_handle , fill = self.branch_color )\r\n self.system.Set_Selected_Node(0)\r\n self.selected = 0\r\n self.selected_ls = 0", "def StopSelection( self ):\n\n # Return if the marquee is not running\n if not self.nodePicker.marquee.IsRunning():\n return\n\n # Stop the marquee\n self.nodePicker.StopSelection()\n\n # Set the colour of the selected objects\n for i in self.nodePicker.selection:\n i.setColorScale( Vec4(1, 0, 0, 1) )\n\n # Attach the selection to the gizmo manager\n self.gizmoMgr.AttachNodePaths( self.nodePicker.selection )\n\n # Get the active gizmo\n activeGizmo = self.gizmoMgr.GetActiveGizmo()\n if activeGizmo is not None:\n\n # Refresh the active gizmo so it appears in the right place\n activeGizmo.Refresh()", "def on_unselected(self):\n self.colour = self.normal_colour\n self.is_selected = False\n self.redraw()", "def highlightSelection(stdscr, selection):\n s = tuple(list(selection.addStrArgs)+[curses.A_REVERSE])\n stdscr.addstr(*s)", "def deselectall(self):\n if self.selection:\n for node in self.selection[:]: node.deselect()", "def __highlight_selection(self, x: int, y: int) -> None:\n round_rect(screen, (x-2, y-2, SELECTOR_WIDTH + 4, SELECTOR_HEIGHT + 4), HIGHLIGHT_COLOUR, 6)", "def removeSelection(self):\n try:\n row = self.table.selectedIndexes()[0].row()\n col = self.table.selectedIndexes()[0].column()\n except IndexError:\n self.selectCell(self.currentDate)\n return\n data = self.currentDate\n _data = self.table.cellWidget(row, col).data\n if data != _data:\n self.table.setCurrentCell(row, col, QItemSelectionModel.Deselect)", "def on_unhovered(self):\n if not self.is_selected:\n self.colour = self.normal_colour\n self.is_hovered = False\n self.redraw()", "def select(self, selected = True):\n \n if selected != self._selected:\n if selected:\n self._border.set_border_width(globals.HIGHLIGHT_BORDER_WIDTH)\n Member.focus.append(self)\n else:\n self._border.set_border_width(self._border_width)\n Member.focus.remove(self)\n \n self._selected = selected", "def clearSelection(self):\n selectionGroup = self.getSelectionGroup()\n if selectionGroup is not None:\n selectionGroup.clear()\n selectionGroup = Field() # NULL\n scene = self._sceneviewer.getScene()\n scene.setSelectionField(selectionGroup)", "def clear_highlighting(self):\n for match in vim.eval('getmatches()'):\n if match['group'] == 'PSearchMatches':\n vim.command(\"call matchdelete({0})\".format(match['id']))", "def __update_selection(self):\n if self.selected_offset != self.old_selected_offset:\n if self.old_selected_offset > -1:\n old_offset = (self.old_selected_offset - self.top_offset) * 8\n\n self.display.text(\">\", 0, old_offset, 0)\n\n new_offset = (self.selected_offset - self.top_offset) * 8\n self.display.text(\">\", 0, new_offset, 1)\n self.display.show()\n self.old_selected_offset = self.selected_offset", "def draw_selected(self):\n if self.get_selected() is not None and not self.check_if_locked(self.get_selected()):\n self.color_cell(pos=self.get_selected(\n ), color=SELECTED_INVALID if self.get_selected() in self.invalid else SELECTED)", "def remove_selection(self, coord):\n button = self.grid[coord]\n button['bg'] = default_color\n button['activebackground'] = '#38dcf5'", "def highlight_last(self):\r\n if len(self.list_labels) > 0:\r\n self.highlight_line(len(self.list_labels) - 1)", "def BackTab(self):\n sel = self.GetSelection()\n if sel[0] == sel[1]:\n # There is no selection\n cpos = self.GetCurrentPos()\n cline = self.GetCurrentLine()\n cipos = self.GetLineIndentPosition(cline)\n if cpos <= cipos:\n # In indentation so simply backtab\n super(EditraBaseStc, self).BackTab()\n else:\n # In middle of line somewhere\n text = self.GetLine(cline)\n column = max(0, self.GetColumn(cpos) - 1)\n if len(text) > column and text[column].isspace():\n\n # Find the end of the whitespace\n end = column\n while end < len(text) and \\\n text[end].isspace() and \\\n text[end] not in '\\r\\n':\n end += 1\n\n # Find the start of the whitespace\n end -= 1\n start = end\n while end > 0 and text[start].isspace():\n start -= 1\n\n diff = end - start\n if diff > 1:\n # There is space to compress\n isize = self.GetIndent()\n if isize < diff:\n # More space than indent to remove\n repeat = isize\n else:\n # Less than one indent width to remove\n repeat = end - (start + 1)\n\n # Update the control\n self.BeginUndoAction()\n self.SetCurrentPos(cpos + (end - column))\n for x in range(repeat):\n self.DeleteBack()\n self.EndUndoAction()\n\n else:\n # There is a selection\n super(EditraBaseStc, self).BackTab()", "def remove_highlights(bv=None):\n if no_coverage_warn():\n return\n if bv is None:\n bv = gbv\n clear_highlights(covdb.total_coverage, bv)\n log.log_info(\"Highlights cleared.\")", "def delete_selection(self):\n if self.selected_point_index is not None:\n del self.current_shape[self.selected_point_index]\n self.selected_point_index = None\n self.changed()", "def on_selected(self):\n self.colour = self.selected_colour\n self.is_selected = True\n self.redraw()", "def doDeleteSelection(self):\n upperRow, upperCol, lowerRow, lowerCol = self.startAndEnd()\n self.doDelete(upperRow, upperCol, lowerRow, lowerCol)", "def resetHighlight(label: Shape):\n label.isHighlighted = False\n label.vertices.highlightedVertex = -1\n return label", "def deselect_selected_widget(self) -> None:\n if self.selected_widget is not None:\n self.handles.remove_selected_widget_handles()\n self.selected_widget = None\n self.attr_frame.update(self.selected_widget)", "def no_highlight(): #py:no_highlight\n RUR._no_highlight_()", "def update_extra_selections(self):\n\n if len(self.cursors) > 1:\n # get highlight colors\n highlight_color = self.txt_edit.palette().highlight()\n highlight_txt_color = self.txt_edit.palette().highlightedText()\n\n extra_selections = []\n\n for cursor in self.cursors:\n extra_sel = self.txt_edit.ExtraSelection()\n extra_sel.cursor = cursor\n extra_sel.format.setBackground(highlight_color)\n extra_sel.format.setForeground(highlight_txt_color)\n extra_selections.append(extra_sel)\n\n self.txt_edit.setExtraSelections(extra_selections)\n\n else:\n # clear extra selections\n self.txt_edit.setExtraSelections([])", "def _prev(self, _):\n self.notebook.SetSelection(self.idx-1)", "def set_highlight(self, highlighted):\n self.highlighted = highlighted", "def clear_highlight(self):\n core = cutter.core()\n highlighter = core.getBBHighlighter()\n for bblock in self.config['bb_hits']:\n highlighter.clear(bblock)", "def resetSelectionArea(self):\n for legend in self._selectionAreas:\n self.plot.remove(legend, kind='item')\n self._selectionAreas = set()", "def smart_highlight_off(self, buf):\n start, end = buf.get_bounds()\n if (self.update_colors or\n buf.get_tag_table().lookup('smart_highlight') == None):\n self.fill_tag_table(buf)\n buf.remove_tag_by_name('smart_highlight', start, end)", "def unlight(self):\n\n if self.selected_text_file is None:\n return\n if self.selected_text_file[FULLTEXT] is None:\n return\n cursor = self.ui.textBrowser.textCursor()\n try:\n cursor.setPosition(0, QtGui.QTextCursor.MoveMode.MoveAnchor)\n cursor.setPosition(len(self.selected_text_file[FULLTEXT]) - 1, QtGui.QTextCursor.MoveMode.KeepAnchor)\n cursor.setCharFormat(QtGui.QTextCharFormat())\n except Exception as e:\n logger.debug((str(e) + \"\\n unlight, text length\" + str(len(self.ui.textBrowser.toPlainText()))))", "def OnDeselect( self ):\n bbox = self.data.getPythonTag( TAG_BBOX )\n if bbox is not None:\n bbox.lines.removeNode()\n self.data.clearPythonTag( TAG_BBOX )", "def __editDeselectAll(self):\n self.activeWindow().selectAll(False)", "def restore_default_highlights(bv=None):\n highlight_set(covdb.total_coverage)\n log.log_info(\"Default highlight colors restored\")", "def SetOldSelection(self, s):\r\n \r\n self.old_selection = s", "def highlight(self, **highlight):\n self._evaluated = False\n self._highlight = highlight\n return self", "def exec_selected_text(self):\r\n editor = self.currentWidget()\r\n ls = editor.get_line_separator()\r\n \r\n line_from, _index_from, line_to, index_to = editor.getSelection()\r\n if line_from != line_to:\r\n # Multiline selection -> first line must be entirely selected\r\n editor.setSelection(line_from, 0, line_to, index_to)\r\n lines = unicode( editor.selectedText() )\r\n \r\n # If there is a common indent to all lines, remove it\r\n min_indent = 999\r\n for line in lines.split(ls):\r\n if line.strip():\r\n min_indent = min(len(line)-len(line.lstrip()), min_indent)\r\n if min_indent:\r\n lines = [line[min_indent:] for line in lines.split(ls)]\r\n lines = ls.join(lines)\r\n\r\n last_line = lines.split(ls)[-1]\r\n if last_line.strip() == unicode(editor.text(line_to)).strip():\r\n # If last line is complete, add an EOL character\r\n lines += ls\r\n \r\n self.interactive_console.shell.execute_lines(lines)\r\n self.interactive_console.shell.setFocus()", "def unhighlight_line(self, line):\n self._checkfigure()\n ld = self._get_linedict(line)\n ld['highlighted'] = False\n self.update_lines()", "def _render_highlighted(\n document_text: str,\n begin: int,\n end: int,\n context_size: int = 0,\n highlight_color: str = \"On_Green\",\n) -> str:\n black_color = _get_text_color_from_list(\"Color_off\")\n return (\n document_text[begin - context_size : begin]\n + _get_text_color_from_list(highlight_color)\n + document_text[begin:end]\n + black_color\n + document_text[end : end + context_size]\n )", "def clear(self, event):\r\n self.selectedRegion = None\r\n self.paint()", "def reformatSelection(self: Self, event: Event = None, undoType: str = 'Reformat Paragraph') -> None:\n c, undoType = self, 'reformat-selection'\n p, u, w = c.p, c.undoer, c.frame.body.wrapper\n if g.app.batchMode:\n c.notValidInBatchMode(undoType)\n return\n bunch = u.beforeChangeBody(p)\n oldSel, oldYview, original, pageWidth, tabWidth = rp_get_args(c)\n head, middle, tail = c.frame.body.getSelectionLines()\n lines = g.splitLines(middle)\n if not lines:\n return\n indents, leading_ws = rp_get_leading_ws(c, lines, tabWidth)\n result = rp_wrap_all_lines(c, indents, leading_ws, lines, pageWidth)\n s = head + result + tail\n if s == original:\n return\n #\n # Update the text and the selection.\n w.setAllText(s) # Destroys coloring.\n i = len(head)\n j = max(i, len(head) + len(result) - 1)\n j = min(j, len(s))\n w.setSelectionRange(i, j, insert=j)\n #\n # Finish.\n p.v.b = s # p.b would cause a redraw.\n u.afterChangeBody(p, undoType, bunch)\n w.setXScrollPosition(0) # Never scroll horizontally.", "def _hilightcurrent(self, onoff):\n if len(self.canvas[\"items\"]):\n self.canvas[\"items\"][self.index]['frameColor']=\\\n list(self.highlight)[:3]+[self.highlight[3] if onoff else 0]", "def pop_current_line(self):\n self.current_line.pop()", "def deleteSelected(self):\n self.scene().deleteSelected()", "def _update_selected_result(self, old_index, new_index):\n try:\n self.formatted_results[old_index] = (\n self.style[\"unselected\"],\n self.formatted_results[old_index][1],\n )\n except IndexError:\n pass\n try:\n self.formatted_results[new_index] = (\n self.style[\"selected\"],\n self.formatted_results[new_index][1],\n )\n except IndexError:\n pass", "def invert_selection(self):\n pass", "def GetOldSelection(self):\r\n\r\n return self.old_selection", "def highlight(self):\n\n if self.selected_text_file is None:\n return\n if self.selected_text_file[FULLTEXT] is None:\n return\n format_ = QtGui.QTextCharFormat()\n cursor = self.ui.textBrowser.textCursor()\n for item in self.case_text:\n try:\n cursor.setPosition(int(item['pos0']), QtGui.QTextCursor.MoveMode.MoveAnchor)\n cursor.setPosition(int(item['pos1']), QtGui.QTextCursor.MoveMode.KeepAnchor)\n format_.setFontUnderline(True)\n format_.setUnderlineColor(QtCore.Qt.GlobalColor.red)\n cursor.setCharFormat(format_)\n except Exception as err:\n msg = \"highlight, text length \" + str(len(self.ui.textBrowser.toPlainText()))\n msg += \"\\npos0:\" + str(item['pos0']) + \", pos1:\" + str(item['pos1'])\n msg += \"\\n\" + str(err)\n logger.debug(msg)", "def highlightCompletion(self, new_text):\n cursor_pos = self.cursorPosition()\n old_text = self.text()\n self.setText(old_text[:cursor_pos] + new_text[cursor_pos:])\n text = self.text()\n comma_pos = text.find(',', cursor_pos)\n if comma_pos == -1:\n self.setSelection(len(text), cursor_pos-len(text))\n else:\n self.setSelection(comma_pos, cursor_pos-comma_pos)", "def toggle_selected(self):\n\n self._selected = not self._selected", "def unindent(self):\n\n debug('unindent')\n cursor = self.editor.textCursor()\n debug('cursor has selection %r', cursor.hasSelection())\n if cursor.hasSelection():\n cursor.beginEditBlock()\n self.unindent_selection(cursor)\n cursor.endEditBlock()\n self.editor.setTextCursor(cursor)\n else:\n tab_len = self.editor.tab_length\n indentation = cursor.positionInBlock()\n max_spaces = tab_len - (indentation - (indentation % tab_len))\n spaces = self.count_deletable_spaces(cursor, max_spaces)\n debug('deleting %d space before cursor' % spaces)\n cursor.beginEditBlock()\n if spaces:\n # delete spaces before cursor\n for _ in range(spaces):\n cursor.deletePreviousChar()\n else:\n # un-indent whole line\n debug('un-indent whole line')\n cursor = self.unindent_selection(cursor)\n cursor.endEditBlock()\n self.editor.setTextCursor(cursor)\n debug(cursor.block().text())", "def selectionEnd(self):\n if self.hasSelectedText():\n return self.selectionStart() + len(self.selectedText())\n else:\n return self.cursorPosition()", "def getMarked(self):\n if not self.selection.isSelection():\n return u\"\"\n sm1, sm2 = self.selection.order(self.selection.selectionMark,\n self.selection.selectionMark2)\n w1 = sm1[0]\n w2 = sm2[0]\n cx1 = sm1[1]\n cx2 = sm2[1]\n if (w1 == w2):\n return w1.string[cx1:cx2]\n # Get the word fragments at the beginning and end of the selection\n snip1 = w1.string[cx1:]\n snip2 = w2.string[:cx2]\n tl1 = w1.tline\n wx1 = tl1.twords.index(w1)\n tl2 = w2.tline\n wx2 = tl2.twords.index(w2)\n # Start the text string with the format of the first line\n text = tl1.para.getFormat() + snip1\n # then get all intervening words\n if (tl1 == tl2): # only 1 line is involved\n # get words from wx1+1 to wx2-1 (incl.)\n for w in tl1.twords[wx1+1:wx2]:\n text += u\" \" + w.string\n ch = u\" \"\n\n else: # deletion block covers >1 line\n # get words from wx1+1 to end of paragraph\n for w in tl1.twords[wx1+1:]:\n text += u\" \" + w.string\n # get all the intervening lines\n while True:\n para = tl1.para\n tl1 = self.rsubject.nextLine(tl1)\n if (tl1.para == para):\n text += u\" \"\n else:\n text += u\"\\n\" + tl1.para.getFormat()\n if (tl1 == tl2): break\n text += tl1.getText()\n\n ch = u\"\"\n # Add the remaining words in tl2 up to w2-1\n for w in tl2.twords[:wx2]:\n text += ch + w.string\n ch = u\" \"\n\n # Add the fragment of the last marked word\n return text + ch + snip2", "def interactive_select(space, current):\n print \"Type an element name, an element index, or an unambiguous prefix to add to your selection.\"\n print \"Type '\" + color_code(MAGENTA) + \"list\" + CLEAR_COLOR +\"' to see the list of valid selections/indices.\"\n print \"Type '\" + color_code(MAGENTA) + \"clear\" + CLEAR_COLOR +\"' to clear selection.\"\n print \"Enter an empty line when done.\\n\"\n \n done = False\n while not done:\n print color_code(BLACK, bold=True), \"\\nCurrent selection\" + CLEAR_COLOR + \":\", (current if current else \"None\")\n tentative = raw_input(color_code(YELLOW) + \"Selection or Command\" + CLEAR_COLOR + \": \")\n matches = [el for el in space if el.startswith(tentative)]\n try: index = int(tentative)\n except ValueError: index = None\n if tentative == 'list':\n for i,el in enumerate(space):\n print \"\\t\", color_code(BLUE, bold=True), i, CLEAR_COLOR, el\n print \"\\n\"\n elif tentative == 'clear':\n current = []\n elif tentative == '':\n if current:\n print color_code(GREEN), \"\\nFinal selection\" + CLEAR_COLOR + \":\", current, \"\\n\\n\"\n done = True\n else:\n print_error(\"Must select at least one\")\n elif len(matches) > 1:\n print_error(\"Multiple matches found for `{}' ({})\".format(tentative, matches))\n elif len(matches):\n if matches[0] in current:\n print_warning(\"{} was already selected\".format(matches[0]))\n else:\n current.append(matches[0])\n elif index is not None:\n if index < 0 or index >= len(space):\n print_error(\"Invalid index {}\".format(index))\n elif space[index] in current:\n print_warning(\"{} was already selected\".format(space[index]))\n else:\n current.append(space[index])\n else:\n print_error(\"Unknown token: {}\".format(tentative))\n \n return current", "def highlightCode(self, _event=None):\n count = 0\n if self.text.tag_ranges('sel'):\n self.text.tag_add('color' + str(count), tk.SEL_FIRST, tk.SEL_LAST)\n self.text.tag_configure('color' + str(count), foreground='black', background='yellow')\n count += 1\n else:\n # Do this if you want to overwrite all selection colors when you change color without selection\n # for tag in text.tag_names():\n # text.tag_delete(tag)\n self.text.config(foreground='yellow')\n\n fileContainingText = open(newTextFile, \"a\")\n\n hText = self.text.get(tk.SEL_FIRST, tk.SEL_LAST)\n fileContainingText.write(hText)", "def highlight(self, *args):\n cw = self.cur_win()\n cw.highlight()\n if self.cur == Win.right:\n cw.down()", "def remove_selected(self):\n if not self.tree_widget.selectedItems():\n self.configuration_widgets.logger.warning('Nothing has been selected. Please select an item and try again.')\n return\n _selected_items = self.tree_widget.selectedItems()\n root = self.tree_widget.invisibleRootItem()\n [(item.parent() or root).removeChild(item) for item in _selected_items]", "def BaseSetSelection(self, start, end):\n super(EditraBaseStc, self).SetSelection(start, end)", "def paint(self):\n if self.config['colorize']:\n self.highlight()\n else:\n self.clear_highlight()", "def cancel(self):\n end = self.start\n start = self.start + f'-{self.chars}c'\n self.text.tag_delete('found', 1.0, tk.END)\n self.text.tag_delete('found.focus', 1.0, tk.END)\n self.text.tag_add(tk.SEL, start, end)\n self.text.mark_set(tk.INSERT, start)\n self.text.focus_set()\n self.destroy()", "def cancel(self):\n end = self.start\n start = self.start + f'-{self.chars}c'\n self.text.tag_delete('found', 1.0, tk.END)\n self.text.tag_delete('found.focus', 1.0, tk.END)\n self.text.tag_add(tk.SEL, start, end)\n self.text.mark_set(tk.INSERT, start)\n self.text.focus_set()\n self.destroy()", "def deselect_me(self):\r\n\t\tself.active = False", "def StartSelection( self, clearSelection=True ):\n\n activeGizmo = self.gizmoMgr.GetActiveGizmo()\n if activeGizmo is None or ( activeGizmo is not None and\n not activeGizmo.dragging ):\n\n # Reset selected node colours\n for i in self.nodePicker.selection:\n i.setColorScale( Vec4(1) )\n\n self.nodePicker.StartSelection( clearSelection )", "def highlightBrush( self ):\n return QBrush(self.highlightColor())", "def pop_focus(self):\n self._focus.pop()", "def reset(self):\n self.selection_bounds = None\n self.selection = []\n for c in self.callbacks[\"reset_data\"]:\n c()\n if self.context is not None and self.context.doc is not None:\n self.context.doc.add_next_tick_callback(self.update_source)", "def setSelectionColorScheme(self, focused=None, unfocused=None):\n if focused is None:\n focused = self.selectionColor\n if unfocused is None:\n unfocused = self.unfocusedRegionColor\n self.selection.setColorScheme(focused, unfocused)\n beg = self.selection.getBeginSeconds()\n dur = self.selection.getWidthSeconds()\n wform = self.selection.getSelectedWaveform()\n self.selection.select(beg, dur, wform)", "def test_clear_selected_text(self):\n before_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n after_b = \"\"\"\\\n first line\n line line b\n line c\n last line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"2.4\", \"4.4\"),\n after_sel=(\"2.4\", \"2.4\"),\n command_name=\"clear-selected-text\",\n )", "def deselectCanvasses(self):\n for row in range(self.canvasGrid.getHeight()):\n for column in range(self.canvasGrid.getWidth()):\n self.canvasGrid[row][column][\"background\"] = \"white\"", "def toggle_select(self):\r\n if not len(self.items):\r\n return\r\n item = self.items[self.item_sel]\r\n if item in self.selected:\r\n self.selected.remove(item)\r\n else:\r\n self.selected.append(item)\r\n self.do_paint()", "def highlightSearch(self, wordList=None, regExpList=None):\n backColor = self.palette().brush(QPalette.Active,\n QPalette.Highlight)\n foreColor = self.palette().brush(QPalette.Active,\n QPalette.HighlightedText)\n if wordList is None:\n wordList = []\n if regExpList is None:\n regExpList = []\n for regExp in regExpList:\n for match in regExp.finditer(self.toPlainText()):\n matchText = match.group()\n if matchText not in wordList:\n wordList.append(matchText)\n selections = []\n for word in wordList:\n while self.find(word):\n extraSel = QTextEdit.ExtraSelection()\n extraSel.cursor = self.textCursor()\n extraSel.format.setBackground(backColor)\n extraSel.format.setForeground(foreColor)\n selections.append(extraSel)\n cursor = QTextCursor(self.document())\n self.setTextCursor(cursor) # reset main cursor/selection\n self.setExtraSelections(selections)", "def active_selection():\r\n\r\n om.MGlobal.getActiveSelectionList()", "def on_hovered(self):\n if not self.is_selected:\n self.colour = self.hover_colour\n self.is_hovered = True\n self.redraw()", "def highlight_color(self):\n return curses.color_pair(4) if self.cycling else curses.color_pair(2)", "def delete(self):\n if not self.selection.isSelection(): return False\n\n # Save the current text\n self.saveText()\n\n sm1, sm2 = self.selection.order(self.selection.selectionMark,\n self.selection.selectionMark2)\n w1 = sm1[0]\n w2 = sm2[0]\n cx = sm1[1]\n self.edCursor.setPos(w1, cx)\n # Join words before and after selection\n w1.setString(w1.string[:cx] + w2.string[sm2[1]:])\n # Delete all intervening words, and w2\n tl1 = w1.tline\n wx1 = tl1.twords.index(w1)\n tl2 = w2.tline\n wx2 = tl2.twords.index(w2)\n if (tl1 == tl2): # only delete from 1 line\n # delete words from wx1+1 to wx2 (incl.)\n for w in tl1.twords[wx1+1:wx2+1]:\n w.delete()\n del(tl1.twords[wx1+1:wx2+1])\n\n else: # deletion block covers >1 line\n # delete words from wx1+1 to end of paragraph\n for w in tl1.twords[wx1+1:]:\n w.delete()\n del(tl1.twords[wx1+1:])\n # delete all the intervening lines\n while True:\n tl = self.rsubject.nextLine(tl1)\n if (tl == tl2): break\n self.rsubject.deleteTLine(tl)\n\n # Move remaining words after w2 in tl2 to end of tl1\n for w in tl2.twords[wx2+1:]:\n tl1.insert(w)\n del(tl2.twords[wx2+1:])\n # Delete tl2\n self.rsubject.deleteTLine(tl2)\n\n self.selection.clearSelection()\n\n self.rsubject.renderShortened(w1)\n\n self.edCursor.setPos(w1, cx)\n return True", "def _higlightWord(self, bOn=True): #$NON-NLS-1$\r\n if self.currRange:\r\n if bOn:\r\n self.currRange.scrollIntoView()\r\n self.currRange.select()\r\n else:\r\n self.mshtmlEditControl.selectNone()", "def clear_selected_shapes(self):\n self.shapes_to_draw = []", "def remove_current():\n current.remove()", "def __select(self):\n searchRect = QgsRectangle(self.first, self.last)\n for layer in self.canvas().layers():\n if not self.identified or layer.id() not in self.disabled():\n if layer.type() == QgsMapLayer.VectorLayer and layer.geometryType() in self.types:\n renderer = layer.rendererV2()\n context = QgsRenderContext()\n if renderer:\n renderer.startRender(context,layer.pendingFields())\n self.request = QgsFeatureRequest()\n self.request.setFilterRect(searchRect)\n self.request.setFlags(QgsFeatureRequest.ExactIntersect)\n fIds = []\n for feature in layer.getFeatures(self.request):\n try:\n will = renderer.willRenderFeature(feature, context)\n except:\n try:\n will = renderer.willRenderFeature(feature)\n except:\n self.__iface.messageBar().pushMessage(\n QCoreApplication.translate(\"VDLTools\", \"Error\"),\n \"will renderer still not working\", level=QgsMessageBar.CRITICAL, duration=0)\n return\n if will:\n fIds.append(feature.id())\n renderer.stopRender(context)\n layer.selectByIds(fIds)\n self.selectedSignal.emit()", "def _on_selection_changed(self, selection):\n if self._updating_selection:\n return\n\n self._updating_selection = True\n\n self._tree_selection.unselect_all()\n for widget in selection:\n gadget_iter = self._find_iter_by_widget(widget)\n if gadget_iter:\n select_iter(self._tree_view, gadget_iter)\n\n self._updating_selection = False", "def flush(self, header, caret, select_start_pos, select_end_pos, scr_topleft, scr_bottomright):\n self.update_screen_size()\n self.stdscr.erase()\n # header\n for text, color in header:\n self.stdscr.addstr(text, color_pair(color))\n text_selected = select_start_pos is not None\n # display lines\n displayed_lines = self.lines[scr_topleft.y : min(len(self.lines), scr_bottomright.y)]\n for index, line in enumerate(displayed_lines):\n self.stdscr.addstr(PADCHAR)\n if len(line) >= scr_topleft.x:\n # inclusive, position of line start and line end of displayed line\n ln_start = Position(scr_topleft.y + index, scr_topleft.x)\n ln_end = Position(scr_topleft.y + index, scr_topleft.x + self.screen_width())\n displayed_line = line[ln_start.x : min(len(line), scr_bottomright.x - 1)]\n if text_selected:\n # whether start position and end position of line are between selection\n start_between = ln_start.is_between(select_start_pos, select_end_pos)\n end_between = ln_end.is_between(select_start_pos, select_end_pos)\n # whether selection is between start and end position\n select_start_between = select_start_pos.is_between(ln_start, ln_end)\n select_end_between = select_end_pos.is_between(ln_start, ln_end)\n if start_between and end_between:\n # completely enclosed\n self.stdscr.addstr(displayed_line, color_pair(7))\n elif start_between:\n # only start between selection\n # end is on same line\n # only starting portion is highlighted\n self.stdscr.addstr(displayed_line[ : select_end_pos.x - ln_start.x + 1], color_pair(7))\n self.stdscr.addstr(displayed_line[select_end_pos.x - ln_start.x + 1 : ])\n elif end_between:\n # only end between selection\n # start is on same\n # only ending portion is highlighted\n self.stdscr.addstr(displayed_line[ : select_start_pos.x - ln_start.x])\n self.stdscr.addstr(displayed_line[select_start_pos.x - ln_start.x : ], color_pair(7))\n elif select_start_between and select_end_between:\n # selection is all on this line\n # start and end not highlighted\n self.stdscr.addstr(displayed_line[ : select_start_pos.x - ln_start.x])\n self.stdscr.addstr(\n displayed_line[select_start_pos.x - ln_start.x : select_end_pos.x - ln_start.x + 1],\n color_pair(7)\n )\n self.stdscr.addstr(displayed_line[select_end_pos.x + 1 - ln_start.x : ])\n else:\n # not enclosed by selection at all\n self.stdscr.addstr(displayed_line)\n else:\n self.stdscr.addstr(displayed_line)\n if index != len(displayed_lines) - 1:\n self.stdscr.addstr('\\n')\n self.stdscr.move(caret.y - scr_topleft.y + HEADER_LEN, caret.x - scr_topleft.x + PAD_LEN)", "def GetSelection(self):\r\n\r\n return self._current", "def _bselect(self, selection, start_bindex, end_bindex):\n selection.select(self._model.index2qindexb(start_bindex), self._model.index2qindexb(end_bindex))\n selection.select(self._model.index2qindexc(start_bindex), self._model.index2qindexc(end_bindex))", "def set_current_tool_to_selection_tool(self):\n\n self.variables.current_shape_id = self.variables.select_rect_id\n self.variables.active_tool = TOOLS.SELECT_TOOL\n self.variables.current_tool = TOOLS.SELECT_TOOL", "def _onRemove(self, event):\n index = self.colorlist.GetSelection()\n del self.graphColors[index]\n self._tupleListToStrings()\n if len(self.graphColors) > 0:\n self.colorlist.SetSelection(0)\n self._updateButtons(None)", "def setSelection(self, current: QModelIndex, old: QModelIndex):\n node = current.internalPointer()\n if node is not None:\n typeInfo = node.typeInfo()\n self.showEditor(typeInfo)\n for type, editor in self._editor_dict.items():\n editor.setSelection(current)", "def current_selection(self):\n row_ids = set(item.row() for item in self.tableView.selectedIndexes())\n sel = []\n for row_id in row_ids:\n row = self.table[row_id]\n sel.append('\\t'.join(row))\n return '\\n'.join(sel)", "def __editSelectBrace(self):\n self.activeWindow().selectToMatchingBrace()", "def current_selection(self):\n row_ids = set(item.row() for item in self.tableView.selectedIndexes())\n sel = []\n for row_id in row_ids:\n row = self.table[row_id]\n sel.append('\\t'.join(row[self.table.ordinal:]))\n return '\\n'.join(sel)", "def _do_select(self, start_bindex, end_bindex):\n self.select(QItemSelection(), QItemSelectionModel.Clear)\n if start_bindex > end_bindex:\n start_bindex, end_bindex = end_bindex, start_bindex\n\n selection = QItemSelection()\n if row_number(end_bindex) - row_number(start_bindex) == 0:\n # all on one line\n self._bselect(selection, start_bindex, end_bindex)\n elif row_number(end_bindex) - row_number(start_bindex) == 1:\n # two lines\n self._bselect(selection, start_bindex, row_end_index(start_bindex))\n self._bselect(selection, row_start_index(end_bindex), end_bindex)\n else:\n # many lines\n self._bselect(selection, start_bindex, row_end_index(start_bindex))\n self._bselect(selection, row_start_index(start_bindex) + 0x10, row_end_index(end_bindex) - 0x10)\n self._bselect(selection, row_start_index(end_bindex), end_bindex)\n\n self.select(selection, QItemSelectionModel.SelectCurrent)\n self.start = start_bindex\n self.end = end_bindex\n self.selectionRangeChanged.emit(end_bindex)", "def RefreshSelected(self):\r\n\r\n if self._freezeCount:\r\n return\r\n\r\n # TODO: this is awfully inefficient, we should keep the list of all\r\n # selected items internally, should be much faster\r\n if self._anchor:\r\n self.RefreshSelectedUnder(self._anchor)", "def SetHilightNonFocusColour(self, colour):\r\n\r\n self._hilightUnfocusedBrush = wx.Brush(colour)\r\n self.RefreshSelected()", "def updateSelection(self, selectionItem):\n self.currentLayerData = self.layers[self.getCurrentRow()]\n if self.model.indexFromItem(self.jobRow) == selectionItem.indexes()[0]:\n # Job Row is selected. Update selection to the last selected or first layer.\n if self.selected is None:\n self.selected = self.jobRow.child(0)\n self.setSelectedFromItem(self.selected)\n else:\n currentRow = self.getCurrentRow()\n self.selected = self.jobRow.child(currentRow)\n self.selectionChanged.emit(self.currentLayerData)", "def deselect_me(self):\r\n\t\tself.active = False\t\t\r\n\t\t#print('Frame active')\r", "def clear_highlights(addr_set, bv):\n for addr in addr_set:\n blocks = bv.get_basic_blocks_at(addr)\n for block in blocks:\n block.set_user_highlight(HighlightStandardColor.NoHighlightColor)", "def treeview_select(self, event=None):\n\n for i in self.list_box.curselection():\n self.list_box.selection_clear(i)\n\n self.dialog_selection.clear()\n\n item = normpath(self.climb(self.treeview.focus()))\n self.dialog_selection.append(item)", "def unselectAll(self):\n\t\tself.tree.UnselectAll()" ]
[ "0.71389216", "0.7015364", "0.65264606", "0.6345358", "0.63410205", "0.6158486", "0.61175185", "0.6002156", "0.59705675", "0.5914801", "0.5895148", "0.58718735", "0.5871529", "0.579935", "0.5699949", "0.56871504", "0.5649178", "0.5641103", "0.5631721", "0.5626519", "0.5604202", "0.55580556", "0.5534214", "0.5489285", "0.54664844", "0.54637337", "0.5453814", "0.54407436", "0.5431922", "0.5410711", "0.5378798", "0.5343221", "0.53296953", "0.5312378", "0.5300842", "0.52141035", "0.52023065", "0.5201167", "0.51803136", "0.51739764", "0.5167082", "0.5163494", "0.51466906", "0.5141556", "0.5136347", "0.51325005", "0.51121813", "0.50957215", "0.50925195", "0.50794333", "0.50778764", "0.50704604", "0.50651836", "0.506073", "0.5059907", "0.5056786", "0.5046288", "0.5038923", "0.50291115", "0.5023628", "0.50202954", "0.5009508", "0.50057435", "0.50057435", "0.49967366", "0.49956223", "0.49631673", "0.49519822", "0.49456957", "0.49326053", "0.49280804", "0.491633", "0.49055806", "0.4897549", "0.48675317", "0.4847", "0.48460025", "0.4844929", "0.4840071", "0.48391786", "0.48319373", "0.48315835", "0.48249522", "0.48175648", "0.47969916", "0.4792365", "0.47921336", "0.47609004", "0.47552946", "0.4752138", "0.4750268", "0.4749637", "0.47382498", "0.47339645", "0.4731025", "0.47287568", "0.47258097", "0.47248572", "0.47201326", "0.47012463" ]
0.66989064
2
Opens a display of the todoList for the user to select an item and choose an action to take.
def selectTodoItem(stdscr, todoList): selectionList = displayTodoList(stdscr, todoList) currentSelection = selectionList.current() highlightSelection(stdscr, currentSelection) while True: lastSelection = currentSelection k = stdscr.get_wch() if k in ['k', curses.KEY_UP]: currentSelection = selectionList.prev() elif k in ['j', curses.KEY_DOWN]: currentSelection = selectionList.next() elif k in ['d']: return Action(Action.REMOVE, currentSelection.todoItem) elif k == 'q': return None switchSelection(stdscr, lastSelection, currentSelection) stdscr.refresh()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def trigger_open(self):\n self.get_selected()\n if self.selected_item:\n self.controller.display_item(self.selected_item)", "def do_todo_open(self, arg):\n try:\n open_list = arg[\"<list_param>\"]\n choice = arg[\"--choice\"]\n if choice == \"name\":\n open_list_str = \" \".join(open_list)\n print(open_list_str)\n elif choice == \"id\":\n open_list_str = int(\" \".join(open_list))\n print (open_list_str)\n app.ToDoApp.to_open_todo(open_list_str)\n \n \n except ValueError as e:\n cprint((e), 'red')", "def do_list(self, arg):\n try:\n cprint (\"Here are your todo lists: \\n\", 'blue')\n app.ToDoApp.to_view_todo()\n\n except ValueError as e:\n cprint(e, 'red')", "def open_task_list(self):\n scrollable_task = ScrollView()\n scrollable_task.add_widget(self.get_task_list())\n\n new_task_btn = RightBottomFloatingButton(\n icon='plus',\n md_bg_color=self.app.theme_cls.accent_color,\n elevation_normal=8)\n new_task_btn.bind(on_press=lambda x: self.open_create_task_dialog())\n\n self.task_list_layout.add_widget(scrollable_task)\n self.task_list_layout.add_widget(new_task_btn)\n\n self.task_list.open()", "def display_todo_list_view(request: HttpRequest, pk: int) -> HttpResponse:\n todo_list = TodoListModel.objects.get(id=pk)\n\n return render(request, 'todo/display_todo_list.html', {'todo_list': todo_list})", "def on_actions_list(self, e):\n self.PopupMenu(self.popup_menu())", "def menu(self):\n print('1) Today\\'s tasks')\n print('2) Week\\'s tasks')\n print('3) All tasks')\n print('4) Missed tasks')\n print('5) Add task')\n print('6) Delete task')\n print('0) Exit')\n self.menu_choice = input()", "def menu_loop(todo_list, save_file_location):\r\n show_hidden = False\r\n selection = 0\r\n invalid_input = False\r\n while selection != 6:\r\n if invalid_input:\r\n invalid_input = False\r\n else:\r\n print_list(save_file_location, todo_list, True, show_hidden)\r\n divider(137 + 17) # Length of prompt statement below\r\n list_status = check_list_status(todo_list)\r\n if list_status == 0: # No Issues\r\n selection = int(clean_input(\"Please enter: 1 for Add Item, 2 for \"\r\n \"Remove Item, 3 for Edit Item, \"\r\n \"4 for Mark Item Complete, \"\r\n \"5 for Toggle Hidden, and 6 for \"\r\n \"Exit, 7 for Concept \"\r\n \"Demonstration\\n\"))\r\n elif list_status == 1: # Empty List - No Remove, Edit, Mark, or Toggle\r\n selection = int(clean_input(\"Please enter: 1 for Add Item, and 6 \"\r\n \"for Exit, 7 for Concept \"\r\n \"Demonstration\\n\"))\r\n else: # Entirely Hidden List\r\n selection = int(clean_input(\"Please enter: 1 for Add Item, 5 for \"\r\n \"Toggle Hidden, and 6 for Exit, \"\r\n \"7 for Concept Demonstration\\n\"))\r\n # Uses the clean_input function above to get a number from the\r\n # user, converting it to an int so a decimal won't return an\r\n # invalid input in the following steps\r\n print(\"\") # Blank Print statement to add an extra blank line after\r\n # user input before displaying response\r\n if selection == 1: # Add Item - modify the list variable, then save\r\n # to file\r\n add_item(todo_list)\r\n elif selection == 2: # Remove Item - modify the list variable, then\r\n # save to file\r\n if list_status == 0:\r\n remove_item(todo_list)\r\n elif list_status == 2:\r\n print(\"Invalid Command: The Todo List has no visible items \"\r\n \"to remove\")\r\n else:\r\n print(\"Invalid Command: The Todo List has no items to remove\")\r\n elif selection == 3: # Edit Item - modify the list variable, then save\r\n # to file\r\n if list_status == 0:\r\n edit_item(todo_list)\r\n elif list_status == 2:\r\n print(\"Invalid Command: The Todo List has no visible items \"\r\n \"to edit\")\r\n else:\r\n print(\"Invalid Command: The Todo List has no items to edit\")\r\n elif selection == 4: # Mark Item Complete - modify the list variable,\r\n # then save to file\r\n if list_status == 0:\r\n mark_complete(todo_list)\r\n elif list_status == 2:\r\n print(\"Invalid Command: The Todo List has no visible items \"\r\n \"to mark complete\")\r\n else:\r\n print(\"Invalid Command: The Todo List has no items to mark \"\r\n \"complete\")\r\n elif selection == 5: # Show Hidden - modify the list variable, then\r\n # save to file\r\n if list_status == 0 or list_status == 2:\r\n if show_hidden:\r\n print(\"No longer showing hidden items\")\r\n show_hidden = False\r\n else:\r\n print(\"Now showing hidden items\")\r\n show_hidden = True\r\n else:\r\n print(\"Invalid Command: The Todo List has no items to show or \"\r\n \"hide\")\r\n elif selection == 6: # Exit Program\r\n print(\"Now Closing\")\r\n elif selection == 7: # Extra section to demonstrate proficiency with\r\n # topics covered in class - Sprint 1\r\n concept_demonstration()\r\n else:\r\n invalid_input = True\r\n print(\"Invalid Input\\nPlease Try Again\")", "def tool_open_clicked(self, widget, data=None):\n self.open_chooser.show()", "def _openButton(self):\n #get the specified file\n selected_file = self.view.list.getSelected()\n\n if selected_file:\n self.model.open(selected_file)\n return\n\n #prompt if they really want to open maya\n dialogs = Dialogs(self.view)\n\n msg = 'No file selected!'\n msg += '\\n\\nAre you sure you want to open maya without a file?'\n dialogs.confirmPrompt(msg)\n\n self.model.open()", "def show_edit_actor(self):\n\t\ttry:\n\t\t\tnombre = self.ui.lista_act.currentItem().text()\n\t\t\tformulario = view_form_actor.Form(self)\n\t\t\tformulario.edit(nombre)\n\t\t\tformulario.exec_()\n\t\t\tself.load_data()\n\t\texcept AttributeError as e:\n\t\t\terrorMessageBox = QtGui.QMessageBox.warning(self,\"Error\",\"Debe seleccionar un actor\")", "def action_to_do(self) -> None:\n # 1\n order = ct.Controls.end_round()\n self.master.master.list_instances_menus_tournament = Menu.update_menus_tournament(order, self.master)\n self.master.master.left_window.update_and_display(self.master.master.list_instances_menus_tournament)\n self.master.master.launch()\n # 2\n self.master.destroy()", "def activate_item(self, index):\n item = index.model().listdata[index.row()]\n self.get_selected(item)\n self.controller.display_item(item)", "def DoAction(self,event):\r\n selections = self.list.GetSelections()\r\n if not selections: return bell()\r\n itemDex = selections[0]\r\n item = self.items[itemDex]\r\n self.data.action(item)", "def file_menu_open_activate(self, widget, data=None):\n self.open_chooser.show()", "def _open_item(self, *args, **kwargs):\n \"Does nothing\"", "def clickMe():\r\n global Format\r\n Format = typeToChoose.get()\r\n print(Format)\r\n action.configure(text='selected ' + Format) # show the selected item after clicked\r\n action.configure(state='disabled') # button disabled after clicked\r", "def edit_button_clicked(self, obj):\n handle = self.get_selected()\n if handle:\n note = self.dbstate.db.get_note_from_handle(handle)\n try:\n from .. import EditNote\n EditNote(self.dbstate, self.uistate, self.track, note,\n callertitle = self.callertitle,\n extratype = [self.notetype] )\n except WindowActiveError:\n pass", "def interactive(todofile):\n tmpfile = tempfile.NamedTemporaryFile(suffix='.txt', prefix='todo-',\n delete=False)\n print >> tmpfile\n print >> tmpfile , '# Todo items should be formed as <date> -- <todo>'\n print >> tmpfile , '# The date field is optional.'\n print >> tmpfile , '# Lines starting with # are ignored.'\n tmpfile.close()\n subprocess.call(['sensible-editor', tmpfile.name])\n with open(tmpfile.name) as writtenfile:\n add_items(todofile, writtenfile.readlines())\n os.remove(tmpfile.name)", "def do_list_items(self, arg):\n try:\n cprint (\"These are your items: \\n\", 'blue')\n my_items = arg[\"<all_items>\"]\n choice = arg[\"--choice\"]\n if choice == \"name\":\n my_items_str = \" \".join(my_items)\n print(my_items_str)\n elif choice == \"id\":\n my_items_str = int(\" \".join(my_items))\n print (my_items_str)\n app.ToDoApp.to_view_items(my_items_str)\n \n\n\n \n except ValueError as e:\n cprint((e), 'red')", "def show_popup(self, view, docstring, location=None):", "def mouseDoubleClickEvent(self, e):\n self.win = items.edit.Edit(self)\n self.win.setModal(True)\n self.win.show()", "def see_tasks(self, widget):\n my_task_list = tasklistwindow.TaskListWindow(self.task_list)", "def main_menu():\n\tprint(\n\"\"\"\nUsage :-\n$ ./todo add \"todo item\" # Add a new todo\n$ ./todo ls # Show remaining todos\n$ ./todo del NUMBER # Delete a todo\n$ ./todo done NUMBER # Complete a todo\n$ ./todo help # Show usage\n$ ./todo report # Statistics\"\"\")", "def select_item(todo_list, prompt='Error'): # Ask the user\r\n # which item from the list is to be modified\r\n valid = False\r\n index = 0\r\n while not valid:\r\n counter = 1 # counter for index printing\r\n for item in todo_list: # The range needs to be the length of the list\r\n # being printed\r\n if item.visible:\r\n print(counter, item.text, sep='\\t')\r\n else:\r\n print(counter, \"~ {0} ~\".format(item.text), sep='\\t')\r\n counter += 1\r\n # Printing the item number, then the item, with a tab separating\r\n # them\r\n index = int(clean_input(prompt))\r\n if index < counter:\r\n valid = True\r\n else:\r\n print(\"Invalid Input: Number is too big\")\r\n return index - 1", "def request_context_menu(self, pos):\n super(ItemListView, self).request_context_menu(pos)\n self.get_selected()\n self.manage_actions()\n self.display_context_menu(pos)", "def show(self): \n InputItem.show(self,*args)\n self.input.selectAll()", "def show(self): \n InputItem.show(self,*args)\n self.input.selectAll()", "def open_viewer(self):\r\n choice = self.thoughts_lst.get(tk.ACTIVE)\r\n subject = self.refference[choice]\r\n tbl = self.home_table[subject]\r\n view = kit.SQL_pull('*', tbl, 'subject_id = \"{}\"'.format(subject))\r\n obj = kit.class_fill(tbl, view[0])\r\n self.session = tk.Toplevel(self.master, **jt.bframe_style)\r\n jv.Viewer(self.session, obj)", "def goto_browse_list(self):\n\n self.browse.click()", "def goto_browse_list(self):\n\n self.browse.click()", "def open_restaurant(self):\n print(\"We're Open!\")", "def displayTodoList(stdscr, todoList):\n stdscr.clear()\n i = 1\n selectionList = SelectionList()\n for category in todoList.categories:\n stdscr.addstr(i, 1, '='*len(category))\n stdscr.addstr(i+1, 1, category)\n stdscr.addstr(i+2, 1, '='*len(category))\n i+=4\n j = 1\n for item in todoList[category]:\n stdscr.addstr(i, 1, str(j)+'. '+item.name)\n selectionList.append(Selection((i, 1, str(j)+'. '+item.name), item))\n i += 1\n j += 1\n i += 1\n\n return selectionList", "def open_restaurant(self):\n msg = self.name + \" is open. Come on in!\"\n print(\"\\n\" + msg)", "def on_edit_students_select(self):\n edit_window = Students()\n edit_window.exec_()", "def file_menu_new_activate(self, widget, data=None):\n self.new_chooser.show()", "def show_item(self, show_item):\n\n self._show_item = show_item", "def fuseListItemDoubleClicked(self, index):\r\n idx= index.row()\r\n self.model.get_all_fuses()[idx].get_widget().show()", "def open_restaurant(self):\n\t\tprint(\"The restaurant is now open!\")", "def open_task(self, instance):\n self.task_manager.load(instance.text)\n\n # Build the task in editor\n for component in self.task_manager.task.graph.V:\n self.add_component(component)\n for tagged_link in self.task_manager.task.graph[component]:\n self.task_manager.add_editor_link(\n component,\n self.task_manager.task.graph.V[tagged_link.vertex_index],\n self.ids.edit_area,\n index=tagged_link.tag)\n self.task_list.dismiss()", "def open_restaurant(self):\n print(self.name.title() + \" is now open!\")", "def open_restaurant(self):\n print(self.name.title() + \" is now open!\")", "def open_restaurant(self):\n print(self.name.title() + \" is now open!\")", "def open_restaurant(self):\n print(self.name.title() + \" is now open!\")", "def show(self): \n InputItem.show(self)\n self.input.selectAll()", "def show(self): \n InputItem.show(self)\n self.input.selectAll()", "def __init__(self, todolist):\n\t\tself.todolist = todolist\n\t\tself.selection_id = None;\n\t\t\n\t\t\n\t\tself.store = self.create_model(todolist)\n\t\tself.display_view = gtk.TreeView(self.store)\n\t\tself.display_view.set_size_request(280, 200)\n\t\tself.display_view.set_rules_hint(True)\n\t\t\n\t\tself.display_view.connect('key_press_event', self.key_pressed)\n\n\t\tselection = self.display_view.get_selection()\n\t\tselection.connect(\"changed\", self.selection_changed)\n\t\tselection.set_mode(gtk.SELECTION_SINGLE)\n\n\t\tself.create_columns(self.display_view)\n\t\t\n\t\n\t\tself.sw = gtk.ScrolledWindow()\n\t\tself.sw.add(self.display_view)\n\t\tself.sw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)", "def index():\n\n cur = db.get_db().cursor()\n\n cur.execute('SELECT * FROM todos')\n todos = cur.fetchall()\n\n if request.method == 'POST':\n new_task = request.form['action']\n if new_task == '':\n pass\n else:\n create_new_task(cur)\n cur.execute('SELECT * FROM todos')\n todos = cur.fetchall()\n cur.close()\n\n return render_template(\"index.html\", todos=todos)", "def open_restaurant(self):\r\n print(\"The restaurant is open now \")", "def show(self, item_id):\n pass", "def open_restaurant(self):\n print(f'The Restaurant {self.restaurant_name} is opened...')", "def _handle_popup_open(self):\n log.debug(\"Handling popup open\")\n self.current_selected = self.listbox.get_focus()[1]", "def menuItem(*args):\n\toptionsWindow()", "def open_restaurant(self):\n print(\"O Restaurante esta aberto\")", "def showMenu():\n print( \"1. Create New User\" )\n print( \"2. Authorize\" )\n print( \"3. Send SMS\" )\n print( \"4. Send Email\" )\n print( \"5. Get Recently Sent Message\" )\n print( \"6. Exit\" )", "def todo_list():\n if 'logged_in' not in session:\n return render_template('login.html')\n else:\n #conncetion to the database\n conn = sqlite3.connect(\"todo.db\")\n c = conn.cursor()\n\n #select query to get all values of task\n c.execute(\"SELECT Task_id, Description, Due_date, Modified_date FROM task WHERE status LIKE '1'\")\n result = c.fetchall()\n c.close()\n return render_template(\"task_list.html\", rows=result)", "def view_list():\n # an HTML representation of the user shopping list\n printed_list = user[\"name\"]\n printed_list += \"<form>\"\n printed_list += '<br>'\n printed_list += 'New Item:<br>'\n printed_list += '<input type=\"text\" name=\"newitem\">'\n printed_list += '<br>'\n printed_list += '<input type=\"submit\" value=\"Submit\">'\n printed_list += \"</form>\"\n printed_list += list_to_html(user[\"shopping_list\"])\n\n return printed_list", "def on_activated(self, widget, row, col):\n model = widget.get_model()\n item = model[row][1]\n if os.path.isdir(item) and opt_cmd == 'xdg-open':\n open_file(item)\n elif opt_force:\n open_file(item)\n else:\n md = gtk.MessageDialog(None, \n gtk.DIALOG_DESTROY_WITH_PARENT, gtk.MESSAGE_QUESTION, \n gtk.BUTTONS_YES_NO, \"Run with %s?\" % opt_cmd)\n res = md.run()\n if res == gtk.RESPONSE_YES:\n open_file(item)\n md.destroy()", "def show_done():\n #conncetion to the database\n conn = sqlite3.connect('todo.db')\n c = conn.cursor()\n\n #if status of task is 0 it means task is completed.\n c.execute(\"SELECT Task_id, Description FROM task WHERE status LIKE 0\")\n result = c.fetchall()\n c.close()\n return render_template(\"show_done.html\", rows=result)", "def pick_a_story():\n return render('pick_a_story.html',story_list=story_list)", "def show_help():\n clear_screen()\n print(\"\"\"\n What should we pick up at the store?\\n\n Enter 'DONE' or 'QUIT' to stop adding items.\n Enter 'HELP' for this help.\n Enter 'SHOW' to see your current list.\n Enter 'REMOVE' to remove an item from the list.\n \"\"\")", "def goto_faq(self):\n\n self.faq.click()", "def goto_faq(self):\n\n self.faq.click()", "def open_restaurant(self):\n msg = f\"{self.name} is open. Come on in!\"\n print(f\"\\n{msg}\")", "def show_action(self):\n if self.show_save_action:\n self.ui_SelectedName.setEnabled(True)\n self.ui_Action.setText(\"Save\")\n if self.default_filename is not None:\n self.ui_SelectedName.setText(self.default_filename)\n if self.show_dirs_only:\n self.ui_SelectedName.setEnabled(True)\n self.ui_Action.setText(\"Get Directory\")", "def __showContextMenu(self, pos):\n idx = self.__index.indexAt(pos)\n if idx.isValid():\n menu = QMenu()\n curTab = menu.addAction(self.tr(\"Open Link\"))\n newTab = menu.addAction(self.tr(\"Open Link in New Tab\"))\n newBackgroundTab = menu.addAction(\n self.tr(\"Open Link in Background Tab\"))\n newWindow = menu.addAction(self.tr(\"Open Link in New Window\"))\n menu.move(self.__index.mapToGlobal(pos))\n \n act = menu.exec_()\n model = self.__index.model()\n if model is not None:\n keyword = model.data(idx, Qt.DisplayRole)\n links = model.linksForKeyword(keyword)\n if len(links) == 1:\n link = QUrl(links[list(links.keys())[0]])\n else:\n link = self.__selectLink(links, keyword)\n \n if not link.isEmpty() and link.isValid():\n if act == curTab:\n self.openUrl.emit(link)\n elif act == newTab:\n self.newTab.emit(link)\n elif act == newBackgroundTab:\n self.newBackgroundTab.emit(link)\n elif act == newWindow:\n self.newWindow.emit(link)", "def open_restaurant(self):\n\t\tprint(f\"The restaurant is open.\")", "def tool_new_clicked(self, widget, data=None):\n self.new_chooser.show()", "def main_menu(self):\n\n clear_screen()\n print('\\nWork Log With Database\\n')\n\n options = {'1': 'Add a new task', '2': 'Find a task', '3': 'Quit'}\n\n for k, v in options.items():\n print(k + \". \" + v)\n\n while True:\n print()\n user_choice = input(\"Please enter the number of choice: \").lower().strip()\n\n if user_choice == '1':\n task = self.get_task_info()\n self.task.add_task(task)\n print('Task successfully added')\n self.main_menu()\n elif user_choice == '2':\n search_method_choice = self.search_method_menu()\n self.search_tasks(search_method_choice)\n elif user_choice == '3':\n print(\"\\nExiting Work Logger\")\n exit()\n else:\n print(\"\\nInvalid choice, please try again.\")", "def display_main_menu(my_list1):\n\n user_options = \"\"\"\n \\nWould you like to:\n A. Add a new item\n B. View list\n C. Delete first item in list\n D. Quit the program\n \"\"\"\n\n while True:\n # Collect input and include your if/elif/else statements here.\n print user_options\n user_input = raw_input(\">>> \").upper()\n\n if user_input == \"A\":\n add_to_list(my_list1)\n elif user_input == \"B\":\n view_list(my_list1)\n elif user_input == \"C\":\n delete_first_item(my_list1)\n elif user_input == \"D\":\n break\n else:\n print \"Sorry, I don't know what you mean. Please try again.\"", "def on_mouse_enter(self, event):\n global controller\n controller = self\n if len(self.tasks) == 1:\n # for k, v in self.tasks.items():\n # self.task = v\n self.task = next(iter(self.tasks.values()))\n ht = self.task.helptext()\n self.set_help_text(ht)\n self.task.start(self.x, self.y)\n else:\n # show selection of available tasks\n self.set_help_text(None)", "def switch_to_context(self):\n self.S('button.catalog').click()\n self.S('li.chapterItem:nth-child(2)').click()", "def show_add_actor(self):\n\t\tformulario = view_form_actor.Form(self)\n\t\tformulario.exec_()\n\t\tself.load_data()", "def double_clicked_to_view(self):\n\n # TODO need this method? better in init to go to view_file\n self.view_file()", "def activate_next_item(self):\n select_ok = self.select_next_item()\n if select_ok:\n self.controller.display_item(self.selected_item)\n else:\n if self.can_fetch_more:\n self.controller.display_message(\"No more message, please fetch more !\")\n else:\n self.controller.display_message(\"No more message !\")", "def onOpenMenu(self, item):\n self.dialog = pyui.dialogs.FileDialog(os.getcwd(), self.onOpenChosen, \".*stk\")\n self.dialog.doModal()\n return 1", "def __openFile(self):\n itm = self.findList.selectedItems()[0]\n self.on_findList_itemDoubleClicked(itm, 0)", "def open_restaurant(self):\r\n\t\tprint(self.restaurant_name.title() + \" is open\")", "def on_click(self) -> None:\n self.editing = True", "def open_main_window(self):\r\n track_terms_dic = ''\r\n sg.theme(self.look)\r\n\r\n layout = [[sg.Text('Welcome to tweeet monitor ')],\r\n [sg.Text('Please enter Details ')],\r\n [sg.Text('User Mail', size=(15, 1)), sg.InputText()],\r\n [sg.Text('Timout', size=(15, 1)), sg.InputText('', enable_events=True, key='-DIGITS-')],\r\n [sg.Text('')],\r\n [sg.Text('You can select an existing list or create a new one '),\r\n sg.Combo(self.files, default_value='Select Track Terms List ', key='-COMBO1-')],\r\n [sg.Text('')],\r\n [sg.Button('Select Exists List'), sg.Button('Create a New List')],\r\n [sg.Text('\\n')],\r\n [sg.Button('Start Monitor'), sg.Button('Exit')]\r\n ]\r\n\r\n window = sg.Window('Monitor tweeter', layout)\r\n # Event Loop\r\n while True:\r\n event, values = window.read()\r\n\r\n if event == sg.WIN_CLOSED:\r\n exit()\r\n elif event == 'Select Exists List' or event == 'Create a New List' or event == 'Start Monitor':\r\n user_mail = values[0]\r\n timeout = values['-DIGITS-']\r\n list_dic = values['-COMBO1-']\r\n\r\n if self.check(user_mail) == 'Invalid Email':\r\n self.info_popup_window('You Enter not valid mail ', 'Info', self.look)\r\n elif event == 'Select Exists List':\r\n if list_dic == 'Select Track Terms List ':\r\n self.info_popup_window('Track Terms List ', 'Info', self.look)\r\n else:\r\n file_name = self.path + self.bachslash + list_dic\r\n os.system(file_name)\r\n track_terms_dic = list_dic\r\n elif event == 'Create a New List':\r\n track_terms_dic = self.open_window()\r\n track_terms_dic = track_terms_dic + '.txt'\r\n elif event == 'Start Monitor':\r\n if track_terms_dic == '':\r\n self.info_popup_window('Please, Create new Dictionary or select one ', 'Info', self.look)\r\n elif track_terms_dic != '':\r\n file_name = self.path + self.bachslash + track_terms_dic\r\n my_file = open(file_name, \"r\")\r\n content = my_file.read()\r\n content = content.split(\"\\n\")\r\n content = self.cleanList(content)\r\n # print(content)\r\n my_file.close()\r\n now = datetime.now()\r\n date_time = now.strftime(\"%m/%d/%Y, %H:%M:%S\")\r\n dict_list = {'User': user_mail,\r\n 'Timeout': timeout,\r\n 'Dictionary': list_dic,\r\n 'Create Date': date_time,\r\n 'track_terms_list': content\r\n }\r\n header = ['user_mail', 'Timeout', 'Dictionary', 'Create Date', 'list words']\r\n if os.path.isfile(self.file_track_terms_audit) == False:\r\n # check if the file exsist = if not: create file and print header to the file\r\n with open(self.file_track_terms_audit, 'a', newline='\\n') as file:\r\n try:\r\n write = csv.writer(file)\r\n write.writerow(header)\r\n write.writerows(self.values_list)\r\n file.close()\r\n except:\r\n print(\"Something went wrong when writing to the file\")\r\n else:\r\n self.values_list = list(dict_list.values())\r\n # print ('self.values_list :****',self.values_list)\r\n with open(self.file_track_terms_audit, 'a', newline='\\n') as file:\r\n try:\r\n write = csv.writer(file)\r\n self.values_list = [self.values_list]\r\n write.writerows(self.values_list)\r\n file.close()\r\n except:\r\n print(\"Something went wrong when writing to the file\")\r\n print('self.values_list:', self.values_list)\r\n\r\n window.close()\r\n\r\n print('track_terms_dic: ', track_terms_dic)\r\n print('dict_list:', dict_list)\r\n return (dict_list)\r\n\r\n # always check for closed window\r\n if event in (sg.WIN_CLOSED, 'Exit'):\r\n break\r\n\r\n if event == '-LIST-' and len(values['-LIST-']):\r\n sg.popup('Selected ', values['-LIST-'])\r\n\r\n if len(values['-DIGITS-']) and values['-DIGITS-'][-1] not in ('0123456789'):\r\n # delete last char from input\r\n window['-DIGITS-'].update(values['-DIGITS-'][:-1])\r\n\r\n window.close()", "def menu_open_files(self, event=None):\n self.parentPanel.open(event)", "def open_restaurant(self):\n\t\tprint(f\"The {self.restaurant_name} is open now.\")", "def open_user_page(self):\n self.switch_main_menu(\"Admin\")\n self.wait_unit_el_present(self.user_management_menu)\n self.click_menu(\"User Management\")\n self.click_menu(\"Users\")", "def show(self):\n self.Show()", "def show(self):\n # Display the menu.\n self._print_menu()\n\n # Wait for input.\n selection = None\n while selection not in self.__options:\n selection = input(\"(Choose an option): \")\n\n # Perform the command.\n _, command = self.__options[selection]\n return command(selection)", "def showList(parent,header,items,maxItems=0,title=''):\r\n numItems = len(items)\r\n if maxItems <= 0: maxItems = numItems\r\n message = string.Template(header).substitute(count=numItems)\r\n message += '\\n* '+'\\n* '.join(items[:min(numItems,maxItems)])\r\n if numItems > maxItems:\r\n message += _('\\n(And %d others.)') % (numItems - maxItems,)\r\n return askStyled(parent,message,title,wx.OK)", "def edit_item(todo_list):\r\n item = select_item(todo_list, \"Please enter the item number you wish to \"\r\n \"edit\\nEnter a negative number or zero to \"\r\n \"cancel\")\r\n if item >= 0:\r\n while True:\r\n value = clean_input(\"Which value would you like to edit? Enter:\\n1\"\r\n \" for the Item Text (Currently: {0})\\n2 for \"\r\n \"the Item Priority (Currently: {1})\\n3 to \"\r\n \"Cancel and Exit\".format(todo_list[item].text,\r\n str(todo_list[item].\r\n priority)))\r\n if value == 1: # Item Text Change\r\n print(\"The Current Text is: {0}\".format(todo_list[item].text))\r\n todo_list[item].text = input(\"New Text:\\n\")\r\n elif value == 2: # Item Priority Change\r\n print(\"The Current Priority is: {0}\".format(str(todo_list[item]\r\n .priority)))\r\n todo_list[item].priority = check_priority_overlap(\r\n int(clean_input(\"New Priority:\")), todo_list)\r\n # elif value == 3: # Item Group Change\r\n # print(f\"The Current Group is: {todo_list[item].group}\")\r\n # todo_list[item].group = int(clean_input(\"New Group Number:\"))\r\n elif value == 3: # Exit Changing Menu\r\n break\r\n else:\r\n print(\"Invalid Input - Please Try Again\")\r\n return", "def displayhelp(self):\n helper = HelpView(self)\n helper.activateWindow()\n helper.exec()\n self.activateWindow()", "def display(self):\n\t\tprint('The button in the window was clicked!')", "def show_completed():\n\n cur = db.get_db().cursor()\n\n create_new_task(cur)\n\n cur.execute('SELECT * FROM todos WHERE completed = true')\n todos = cur.fetchall()\n cur.close()\n\n return render_template('index.html', todos=todos)", "def main():\r\n save_file_location = \"Item_List.txt\"\r\n data_file_a = open(save_file_location, \"a\") # Opens ItemList.txt which\r\n # is accessible in the file variable, in append mode (using this so that\r\n # if the file exists, nothing happens, but if it does not exist, it gets\r\n # created from w3schools.com\r\n data_file_a.close() # Close the file, I now know it exists\r\n loaded_list = load_from_file(save_file_location)\r\n print(\"Welcome to the To-Do List - Version: 0.1.2\")\r\n divider(42) # Length of welcome statement above\r\n menu_loop(loaded_list, save_file_location)", "def present_view(self, confirmation=False, error=None):\n if confirmation:\n input(\"The entry has been added. Press Enter to continue\")\n return\n if error:\n print(\n \"\\n** ERROR **\\n{}\\n\\nPlease try again\".format(\n \"\\n\".join(f\"{k}: {' '.join(v)}\" for k, v in error.messages.items())\n )\n )\n print(self._layout)\n task = {\n \"date\": input(\"Enter date (DD/MM/YYYY): \"),\n \"title\": input(\"Task Title: \"),\n \"time_spent\": input(\"Time spent (rounded minutes): \"),\n \"notes\": input(\"Notes (Optional): \"),\n }\n return task", "def invoke (self, context, event):\n context.window_manager.fileselect_add (self)\n return {'RUNNING_MODAL'}", "def show(self) -> None:", "def food_selected(self, arg):\n\t\tfood = fooditemdao.retrieve_food(self.selected_food.get())\n\t\tself.lbl_unit.config(text=food.info['unit'])", "def menu_items():\n def show():\n form.show();\n form.activateWindow()\n form.raise_()\n\n lst = []\n lst.append((\"Import Programmableweb\", show))\n \n return tuple(lst)", "def open_restaurant(self):\n\t\topen = f\"{self.restaurant_name} is now open.\"\n\t\tprint(f\"\\n{open}\")", "def main_menu():\n print('\\n', '='*50, sep='')\n print(\"Choose an option by number: \")\n print(\"\\t 1 = Create or Connect to a new file database\")\n print(\"\\t 2 = Create a new memory database\")\n print('Type exit to quit program!')\n print('='*50, '\\n', sep='')", "def show(*args):\n I = Items()\n for arg in args:\n I.add_item(arg)\n I.write()", "def OnClick(self,event):\n \n item = self.tree.identify('item',event.x,event.y)\n\n self.UpdateText([element[0] for element in self.Input[0]].index(self.List[self.IDs.index(item)]))" ]
[ "0.7201459", "0.6838654", "0.6731044", "0.61206925", "0.61021215", "0.6093656", "0.6077997", "0.600439", "0.5995444", "0.59719115", "0.5954215", "0.5914398", "0.5907249", "0.584988", "0.58350575", "0.58085567", "0.5782954", "0.5759071", "0.57363194", "0.5728824", "0.5727189", "0.5716801", "0.5699352", "0.5678001", "0.56747925", "0.5644101", "0.5623846", "0.5623846", "0.56191295", "0.5619111", "0.5619111", "0.56100917", "0.55785996", "0.5575167", "0.5565886", "0.55600715", "0.555228", "0.55497557", "0.55391777", "0.5524861", "0.55205333", "0.55205333", "0.55205333", "0.55205333", "0.55204886", "0.55204886", "0.5509441", "0.5499362", "0.548848", "0.54874325", "0.5464999", "0.5430423", "0.5426078", "0.54258454", "0.54256314", "0.54175013", "0.5413393", "0.5410831", "0.540825", "0.5407908", "0.54071623", "0.5405316", "0.5405316", "0.5399506", "0.5393782", "0.53883487", "0.53879005", "0.53706986", "0.5362163", "0.536193", "0.5350215", "0.5348351", "0.5347826", "0.5343877", "0.534281", "0.5342506", "0.5342314", "0.53403276", "0.53372616", "0.5334136", "0.53324395", "0.5325605", "0.53195673", "0.53052706", "0.5302662", "0.5283548", "0.52781916", "0.52772707", "0.527671", "0.5269931", "0.52684337", "0.52559686", "0.525299", "0.52518475", "0.5247643", "0.52469134", "0.52404624", "0.52319294", "0.5227644", "0.5223731" ]
0.5482432
50
Creates an initial display of todoList.
def displayTodoList(stdscr, todoList): stdscr.clear() i = 1 selectionList = SelectionList() for category in todoList.categories: stdscr.addstr(i, 1, '='*len(category)) stdscr.addstr(i+1, 1, category) stdscr.addstr(i+2, 1, '='*len(category)) i+=4 j = 1 for item in todoList[category]: stdscr.addstr(i, 1, str(j)+'. '+item.name) selectionList.append(Selection((i, 1, str(j)+'. '+item.name), item)) i += 1 j += 1 i += 1 return selectionList
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def display_todo_list_view(request: HttpRequest, pk: int) -> HttpResponse:\n todo_list = TodoListModel.objects.get(id=pk)\n\n return render(request, 'todo/display_todo_list.html', {'todo_list': todo_list})", "def __init__(self, todolist):\n\t\tself.todolist = todolist\n\t\tself.selection_id = None;\n\t\t\n\t\t\n\t\tself.store = self.create_model(todolist)\n\t\tself.display_view = gtk.TreeView(self.store)\n\t\tself.display_view.set_size_request(280, 200)\n\t\tself.display_view.set_rules_hint(True)\n\t\t\n\t\tself.display_view.connect('key_press_event', self.key_pressed)\n\n\t\tselection = self.display_view.get_selection()\n\t\tselection.connect(\"changed\", self.selection_changed)\n\t\tselection.set_mode(gtk.SELECTION_SINGLE)\n\n\t\tself.create_columns(self.display_view)\n\t\t\n\t\n\t\tself.sw = gtk.ScrolledWindow()\n\t\tself.sw.add(self.display_view)\n\t\tself.sw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)", "def do_list(self, arg):\n try:\n cprint (\"Here are your todo lists: \\n\", 'blue')\n app.ToDoApp.to_view_todo()\n\n except ValueError as e:\n cprint(e, 'red')", "def __init__(self):\n\n\t\tself.tasklist = TaskList()\n\t\tself.legend = '\\nLegend: Not Due ' + Fore.CYAN + Style.BRIGHT + 'Upcoming ' + Fore.BLUE + \\\n\t\t Style.BRIGHT + 'Due ' + Fore.RED + Style.BRIGHT + 'Overdue ' + Fore.WHITE + Style.BRIGHT + \\\n\t\t Back.WHITE + 'Completed' + Fore.RESET + Style.NORMAL + Back.RESET", "def todo_list():\n if 'logged_in' not in session:\n return render_template('login.html')\n else:\n #conncetion to the database\n conn = sqlite3.connect(\"todo.db\")\n c = conn.cursor()\n\n #select query to get all values of task\n c.execute(\"SELECT Task_id, Description, Due_date, Modified_date FROM task WHERE status LIKE '1'\")\n result = c.fetchall()\n c.close()\n return render_template(\"task_list.html\", rows=result)", "def create_widget(self):\n self.widget = ListView(self.get_context())", "def init_list(self):\r\n self.listView.clear()\r\n for i in self.__config_all__[\"config\"]:\r\n self.listView.addItem(\"%s:%s\" % (i[\"paras\"][\"-r\"], i[\"paras\"][\"-p\"]))\r\n self.listView.setCurrentRow(self.__config_all__['select'])", "def __init__(self):\n\t\t\n\t\t# Initialise default list.\n\t\t# TODO: Support multiple lists\n\t\tself.todolist = TodoList(json_folder + 'todo.json')\n\n\t\t# Start of multiple lists support. Currently unused\n\t\tfor infile in glob.glob( os.path.join(json_folder, '*.json') ):\n\t\t\tlistname = infile.replace(json_folder, '').replace('.json', '')\n\t\t\tself.lists[listname] = TodoList(infile)\n\t\t\tprint 'Found:', infile.replace(json_folder, '').replace('.json', '')\n\n\t\tself.window = gtk.Window(gtk.WINDOW_TOPLEVEL)\n\t\tself.window.set_title('Simple Python Todo')\n\t\n\t\tsend_button = gtk.Button(None, gtk.STOCK_ADD)\n\t\tsend_button.connect('clicked', self.send_button_clicked)\n\n\t\tvbox = gtk.VBox(False, 10)\n\t\tvbox.set_border_width(10)\n\n\t\thbox_note_area = gtk.HBox(False, 0)\n\t\thbox_send_area = gtk.HBox(False, 0)\n\n\t\tself.current_list = ListPanel(self.todolist)\n\t\tsw_display = self.current_list.sw\n\t\thbox_note_area.pack_start(sw_display)\n\n\t\t# Set up the text view for adding new notes\n\t\tsw_add = gtk.ScrolledWindow()\n\t\tsw_add.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)\n\t\t\n\t\tself.textview_add = self.create_text_view(230, 80)\n\t\tself.textview_add.connect('key_press_event', self.textbox_key_pressed)\n\t\tsw_add.add(self.textview_add)\n\t\t\n\t\thbox_send_area.pack_start(sw_add)\n\t\thbox_send_area.pack_start(send_button)\n\n\n\t\t# Organise the boxes\n\t\tvbox.pack_start(hbox_note_area)\n\t\thbox_note_area.show()\n\t\tvbox.pack_start(hbox_send_area)\n\t\thbox_send_area.show()\n\t\tself.window.add(vbox)\n\n\t\tself.window.connect('delete_event', self.delete_event)\n\t\tself.window.show_all()", "def todolist(self, todolist_id):\r\n return tdl.Todolist(self, todolist_id)", "def create_model(self, todolist):\n\t\tstore = gtk.ListStore(int, str)\n\n\t\tfor todo in todolist:\n\t\t\tnew_row = (todo.id, todo.text)\n\t\t\tprint new_row\n\t\t\tstore.append(new_row)\n\n\t\treturn store", "def todos(self):\r\n return Todos(self)", "def todo_list_view(request):\n\n context = {}\n queryset = Todo.objects.filter(user=request.user)\n context['lists'] = queryset\n return render(request,'todos/index.html', context)", "def do_todo_create(self, arg):\n try:\n my_list = arg[\"<list_name>\"]\n my_list_str = \" \".join(my_list) \n app.ToDoApp.to_create_todo(my_list_str)\n \n except ValueError as e:\n cprint(e, 'red')", "def onInit(self):\n self.list_control = self.getControl(6)\n self.getControl(1).setLabel(self.window_title)\n self.getControl(3).setVisible(False)\n try:\n self.getControl(7).setLabel(xbmc.getLocalizedString(222))\n except Exception:\n pass\n\n self.getControl(5).setVisible(False)\n\n # add our items to the listing and focus the control\n self.list_control.addItems(self.listing)\n self.setFocus(self.list_control)", "def __str__(self):\n\n index_start = 1\n display_list = []\n max_name_len = 20\n additional_signs = 9\n\n heading = self.create_table_heading()\n underline = \"-\" * (max_name_len + additional_signs)\n display_list.append(\"\".join(heading))\n\n for index, item in enumerate(self.todo_items, index_start):\n display_list.append(\"| \" + str(index) + \" | \" + str(item.name) + \" \" + item.is_done_mark + \"\\n\")\n display_list.append(underline + \"\\n\")\n return \"\".join(display_list)", "def list_todo_table(self):\n if self.is_todo_table_empty():\n print(\"nothing to do!\")\n return []\n else:\n return self.make_list_from_task()", "def index():\n\n cur = db.get_db().cursor()\n\n cur.execute('SELECT * FROM todos')\n todos = cur.fetchall()\n\n if request.method == 'POST':\n new_task = request.form['action']\n if new_task == '':\n pass\n else:\n create_new_task(cur)\n cur.execute('SELECT * FROM todos')\n todos = cur.fetchall()\n cur.close()\n\n return render_template(\"index.html\", todos=todos)", "def page_list_trash(self):\n\n list_taskid_desc = self.task_store_trash.sort_taskid_list_descending_lamport()\n title = \"woolnote - trash\"\n page_header_first_text = \"notes in the trash\"\n page_header_link_button_name = \"reset filter\"\n page_header_link_request_dict = {\"action\": \"show_list\"}\n page_header_list_of_warnings = None\n\n if self.error_msg_queue_list:\n page_header_list_of_warnings = self.helper_convert_msg_queue_list_to_list_for_output()\n\n history_id = self.save_history([\"action\"], alt_task_store_name=None)\n\n return html_page_templates.page_list_notes_template(list_taskid_desc=list_taskid_desc,\n self_sess_action_auth=self.sess_action_auth, title=title,\n primary_task_store=self.task_store, alt_task_store=self.task_store_trash,\n alt_task_store_name=\"task_store_trash\", history_back_id=history_id,\n virtual_folders=self.woolnote_config.virtual_folders,\n single_task_line_ids=set(self.woolnote_config.single_note_line_id.keys()),\n page_header_first_text=page_header_first_text,\n page_header_optional_link_button_name=page_header_link_button_name,\n page_header_optional_link_button_request_dict=page_header_link_request_dict,\n page_header_optional_list_of_warnings=page_header_list_of_warnings)", "def __init__(self, list: List[DiagramView], start_button: StartButtonView):\n super().__init__()\n\n self.__init_ui(list, start_button)", "def init_task(self):\n all_tasks = self.db.get_tasks()\n scroll_parent = Window\n uw = self.ids.upcoming_wrapper\n\n if not all_tasks:\n new_btn = NewButton()\n new_btn.size_hint = [None, None]\n new_btn.size = [\n scroll_parent.width / 1.9,\n scroll_parent.height - (0.45 * scroll_parent.height),\n ]\n new_btn.bind(on_release=self.add_new)\n uw.add_widget(new_btn)\n else:\n for t in all_tasks:\n task = Task()\n task.name = t[1]\n task.details = t[2]\n date, time = t[3].rsplit(\" \", 1)\n x = self.compare_date(date)\n if x == \"today\":\n task.tsk_clr = (0.7, 0.45, 0.1, 0.6)\n elif x == \"past\":\n task.tsk_clr = (0.7, 0, 0, 0.6)\n\n task.time = time\n task.date = date\n\n task.size_hint = [None, None]\n task.size = [\n scroll_parent.width / 1.9,\n scroll_parent.height - (0.45 * scroll_parent.height),\n ]\n\n uw.add_widget(task)", "def show_unfinished():\n\n cur = db.get_db().cursor()\n\n create_new_task(cur)\n\n cur.execute('SELECT * FROM todos WHERE completed = false')\n todos = cur.fetchall()\n cur.close()\n\n return render_template(\"index.html\", todos=todos)", "def todos_index_page():\n return render_template(\n template_name_or_list='index.html',\n todos=Todo.index(mongo.db))", "def _create_displaylist(self, scale=1):\n self.renderlist = get_displaylist()\n glNewList(self.renderlist, GL_COMPILE)\n if scale != 1:\n glPushMatrix()\n glScaled(scale, scale, scale)\n self.render()\n if scale != 1:\n glPopMatrix()\n glEndList()", "def todo(self, todo_id):\r\n return tdl.Todo(self, todo_id)", "def create_task(request):\n all_task_list = Todo.objects.all()\n form = TaskForm()\n if request.method == 'POST':\n form = TaskForm(request.POST)\n if form.is_valid():\n # create default todolist\n user = request.user if request.user.is_authenticated else None\n task = Todo(\n description=request.POST['description'],\n content= request.POST['content'],\n tesk_medium= request.POST['tesk_medium'],\n creator=user\n )\n task.save()\n return redirect('lists:alllist')\n else:\n return render(request, 'lists/index.html', {'form': form})\n\n context = {\n 'form': form, \n 'taskli':all_task_list\n }\n return render(request, 'lists/create_task.html',context )", "def display_list(list=[]):\n\n print(f\"Current List: {list}\")", "def see_tasks(self, widget):\n my_task_list = tasklistwindow.TaskListWindow(self.task_list)", "def setup_lists(self):\n pass", "def view_list():\n # an HTML representation of the user shopping list\n printed_list = user[\"name\"]\n printed_list += \"<form>\"\n printed_list += '<br>'\n printed_list += 'New Item:<br>'\n printed_list += '<input type=\"text\" name=\"newitem\">'\n printed_list += '<br>'\n printed_list += '<input type=\"submit\" value=\"Submit\">'\n printed_list += \"</form>\"\n printed_list += list_to_html(user[\"shopping_list\"])\n\n return printed_list", "def new():\n list_new()", "def _show_task_list_panel(self):\n self.task_list_panel.show()\n self.task_list_panel.load_config()\n # set geometry\n _button_pos = self.task_frame.tasklist_button.pos()\n _button_pos = self.task_frame.mapTo(self, _button_pos)\n _button_height = self.task_frame.tasklist_button.height()\n _glo_pos = self.mapTo(tomaya.GetMayaMainWindowPoint(), _button_pos)\n self.task_list_panel.setGeometry(_glo_pos.x(), _glo_pos.y() + _button_height, self.task_frame.width(), tomaya.GetMayaMainWindowPoint().height()*1/2.0)", "def show_completed():\n\n cur = db.get_db().cursor()\n\n create_new_task(cur)\n\n cur.execute('SELECT * FROM todos WHERE completed = true')\n todos = cur.fetchall()\n cur.close()\n\n return render_template('index.html', todos=todos)", "def index(request):\n return render_to_response('todo/index.html')", "def _init_display(self):\n raise NotImplementedError", "def show_done():\n #conncetion to the database\n conn = sqlite3.connect('todo.db')\n c = conn.cursor()\n\n #if status of task is 0 it means task is completed.\n c.execute(\"SELECT Task_id, Description FROM task WHERE status LIKE 0\")\n result = c.fetchall()\n c.close()\n return render_template(\"show_done.html\", rows=result)", "def open_task_list(self):\n scrollable_task = ScrollView()\n scrollable_task.add_widget(self.get_task_list())\n\n new_task_btn = RightBottomFloatingButton(\n icon='plus',\n md_bg_color=self.app.theme_cls.accent_color,\n elevation_normal=8)\n new_task_btn.bind(on_press=lambda x: self.open_create_task_dialog())\n\n self.task_list_layout.add_widget(scrollable_task)\n self.task_list_layout.add_widget(new_task_btn)\n\n self.task_list.open()", "def todolists(self):\r\n return tdl.Todolists(self)", "def show_tasks():\n\n task = Task(connection=connection, cursor=cursor)\n\n all_tasks = task.get_all_tasks()\n\n context = {\n 'all_tasks': all_tasks\n }\n\n return render_template('pages/tables/tasks.html', **context)", "def create_widgets(self):\n #create description label\n Label(self,\n text = \"Patient Info:\"\n ).grid(row = 0, column = 0, sticky = W)", "def todos_create_page():\n todo = Todo()\n if todo.form_submit():\n todo.update(mongo.db)\n print('Created new TODO: {text}'.format(**todo.doc))\n return redirect('/')\n else:\n return render_template(\n template_name_or_list='todo.html',\n todo=todo,\n handle='Create')", "def show_tasks(self, tasks=None, date_format=None):\n\n\t\tif not tasks:\n\t\t\ttasks = self.tasklist.tasks\n\n\t\tif len(tasks) > 0:\n\n\t\t\ttemplate = '{0:^3} {1:20} {2:^3} {3:20} {4:15} {5:20}'\n\t\t\tprint template.format('\\nID', 'Description', ' Pri', 'Due', 'Created', 'Tags')\n\t\t\tprint template.format('---', '--------------------', '---', '--------------------', '---------------',\n\t\t\t '--------------------')\n\t\t\tfor task in tasks:\n\t\t\t\tif task.priority == 'L':\n\t\t\t\t\tpriority = Fore.YELLOW + Style.BRIGHT + task.priority.center(3) + Fore.RESET + Style.NORMAL\n\t\t\t\telif task.priority == 'M':\n\t\t\t\t\tpriority = Fore.BLUE + Style.BRIGHT + task.priority.center(3) + Fore.RESET + Style.NORMAL\n\t\t\t\telif task.priority == 'H':\n\t\t\t\t\tpriority = Fore.RED + Style.BRIGHT + task.priority.center(3) + Fore.RESET + Style.NORMAL\n\t\t\t\telse:\n\t\t\t\t\tpriority = ''\n\n\t\t\t\tif task.due_date is None:\n\t\t\t\t\tdue_date = ''\n\t\t\t\telse:\n\t\t\t\t\tif date_format:\n\t\t\t\t\t\tdue_date = task.due_date.rsplit(' ', 1)[0].ljust(20)\n\t\t\t\t\telse:\n\t\t\t\t\t\tdue_date = (arrow.get(task.due_date, task.due_date_format).humanize()).ljust(20)\n\n\t\t\t\t\tif not task.completed:\n\t\t\t\t\t\ttoday = arrow.now()\n\t\t\t\t\t\tdiff = arrow.get(task.due_date, task.due_date_format) - today\n\t\t\t\t\t\tif diff.days >= 1 and diff.seconds > 0:\n\t\t\t\t\t\t\tdue_date = Fore.CYAN + Style.BRIGHT + due_date + Fore.RESET + Style.NORMAL\n\t\t\t\t\t\telif diff.days >= 0:\n\t\t\t\t\t\t\tdue_date = Fore.BLUE + Style.BRIGHT + due_date + Fore.RESET + Style.NORMAL\n\t\t\t\t\t\telif diff.days <= 0:\n\t\t\t\t\t\t\tdue_date = Fore.RED + Style.BRIGHT + due_date + Fore.RESET + Style.NORMAL\n\n\t\t\t\tif date_format:\n\t\t\t\t\tage = (str(task.creation_date).split()[0]).ljust(15) # drop the time zone\n\t\t\t\telse:\n\t\t\t\t\tage = (arrow.get(task.creation_date, 'MM/DD/YYYY h:mm:ss A ZZ').humanize()).ljust(15)\n\n\t\t\t\tif task.note:\n\t\t\t\t\tdesc = task.task + ' *'\n\t\t\t\telse:\n\t\t\t\t\tdesc = task.task\n\n\t\t\t\tif task.completed:\n\t\t\t\t\tif task.priority:\n\t\t\t\t\t\tpriority = task.priority\n\t\t\t\t\telse:\n\t\t\t\t\t\tpriority = ''\n\t\t\t\t\ttask_id = Fore.WHITE + Style.BRIGHT + Back.WHITE + str(task.id).center(3)\n\t\t\t\t\ttags = str(task.tags) + Fore.RESET + Style.NORMAL + Back.RESET\n\t\t\t\t\tprint template.format(task_id, desc, priority, due_date, age, tags)\n\t\t\t\telse:\n\t\t\t\t\tprint template.format(task.id, desc, priority, due_date, age, task.tags)\n\n\t\t\tprint self.legend\n\t\telse:\n\t\t\tprint('\\nThere are no tasks to display!\\n')", "def add_item_to_list(self, todolist):\n\t\tnote = self.get_all_text_view_text(self.textview_add)\n\t\ttodolist.add_item(note)\n\t\tself.textview_add.get_buffer().set_text('')", "def display_contents(CurrentList):\n\n print(\"========================Start of display_contents() Method*\")\n print(\"The number of items in list are :\" + str(len(CurrentList)))\n print(\"----- Fl.ID--- ||sub_T|| reqStart||Dur ||Start||End\")\n # Flight ID||sub_Time||reqStart||reqDuration||actualStart||actualEnd\")\n for j in range(len(CurrentList)):\n print(str(j) + \": \" + CurrentList[j].showFlightInfo())\n print(\"========================END of display_contents() Method *\")", "def main_menu():\n\tprint(\n\"\"\"\nUsage :-\n$ ./todo add \"todo item\" # Add a new todo\n$ ./todo ls # Show remaining todos\n$ ./todo del NUMBER # Delete a todo\n$ ./todo done NUMBER # Complete a todo\n$ ./todo help # Show usage\n$ ./todo report # Statistics\"\"\")", "def display_simple(self):\n print(\"\") \n print(\"Date: {}\".format(self.date))\n print(\" Task name: {}\".format(self.task_name))\n print(\" Time spent: {} minutes\".format(self.time_spent))\n print(\" Notes: {}\".format(self.notes))\n print(\" Task number: {}\".format(self.task_number))\n print(\"\")", "def _add_todo_items(self):\n\n todo_list = ToDoList(day=self.day, user=self.user.user.rolllistuser)\n todo_list.save()\n\n items = [\n 'feed the cats',\n 'drive to work',\n 'read a book',\n 'eat some food',\n ]\n todo_items = []\n for item in items:\n new_item = ToDoItem(\n title=item,\n to_do_list=todo_list,\n priority=1\n )\n new_item.save()\n todo_items.append(new_item)\n return todo_items", "def show_list():\n clear_screen()\n print(\"Here is your list: \")\n\n for index, item in enumerate(shopping_list, start = 1):\n print(\"{}. {}\".format(index, item))\n\n print(\"-\" * 10)", "def index():\n # Displays list of topics.\n q = db.topic\n links=[]\n links.append(dict(header='',\n body=lambda r:\n A('Edit', _href=URL('main', 'edit_topic', args=[r.id]))\n if can_edit_topic(r.id) else None\n ))\n links.append(dict(header='',\n body=lambda r:\n A('Delete', _href=URL('main', 'delete_topic', args=[r.id]))\n if can_delete_topic(r.id) else None\n ))\n grid = SQLFORM.grid(q,\n csv=False, details=False,\n links=links,\n create=False,\n editable=False,\n deletable=False,\n maxtextlength=48,\n )\n add_button = A(icon_add, 'Add topic', _class='btn btn-success',\n _href=URL('main', 'create_topic')) if can_create_topic() else None\n return dict(grid=grid, add_button=add_button)", "def show_activities(self): \n database = Database('data/database.db')\n activities = database.read_activities()\n view = Template(filename=\"view/template.html\", lookup=lookup)\n \n \n return view.render(\n rows = [[item.number, item.name] for item in activities],\n pageTitle = \"Activités\",\n tableTitle = \"Liste de toutes les activités\",\n ths = [\"Numéro\", \"Nom\"]\n )", "def page_list_notes(self, no_history=False):\n\n list_taskid_desc = self.task_store.sort_taskid_list_descending_lamport()\n title = \"woolnote - all notes\"\n page_header_first_text = \"all notes\"\n\n if no_history:\n history_id = self.save_history([])\n else:\n history_id = self.save_history([\"action\"], alt_task_store_name=None)\n\n page_header_list_of_warnings = None\n page_header_small_text = None\n\n if self.error_msg_queue_list:\n page_header_list_of_warnings = self.helper_convert_msg_queue_list_to_list_for_output()\n else:\n try:\n # TODO: use asn library?\n # sha256_fp = read_pem_cert_fingerprint(SSL_CERT_PEM_PATH)\n page_header_small_text = config.SSL_CERT_PEM_FINGERPRINT\n except:\n page_header_small_text = \"cannot get ssl cert sha256\"\n\n return html_page_templates.page_list_notes_template(list_taskid_desc=list_taskid_desc,\n self_sess_action_auth=self.sess_action_auth, title=title,\n history_back_id=history_id, primary_task_store=self.task_store,\n virtual_folders=self.woolnote_config.virtual_folders,\n single_task_line_ids=set(self.woolnote_config.single_note_line_id.keys()),\n page_header_first_text=page_header_first_text,\n page_header_optional_small_second_text=page_header_small_text,\n page_header_optional_list_of_warnings=page_header_list_of_warnings)", "def get_list_display(self, request):\n list_display = self.list_display\n\n if 'admin_created' not in list_display:\n list_display += ('admin_created', )\n if 'admin_modified' not in list_display:\n list_display += ('admin_modified', )\n\n return list_display", "def create_list_menu(self):\n films = []\n try:\n for root, dirs, files in os.walk(str(self.initial_directory)):\n for file in files:\n if file.endswith(('.mkv', '.avi', '.mp4')) and not file.endswith(('.sample.mkv', '.sample.avi', 'sample.mp4')):\n films.append(os.path.join(root, file))\n except TypeError:\n raise(\"Error\")\n films.sort(key=lambda s: s.lower().split('/')[-1])\n [self.create_film_entry(film) for film in films]\n self.menubar.add_cascade(label=\"All Films\", menu=self.list_menu)", "def __init__(self, text='', _id=None):\n self._id = _id\n self.text = text\n self.form = TodoForm()", "def create_list(self, name) -> TodoList:\n t = TodoList(name, [])\n if name in self.lists:\n raise HTTPException(409, f\"TODO list with name {name} already exists\")\n self.lists[self.__to_key(name)] = t\n return t", "def showList(parent,header,items,maxItems=0,title=''):\r\n numItems = len(items)\r\n if maxItems <= 0: maxItems = numItems\r\n message = string.Template(header).substitute(count=numItems)\r\n message += '\\n* '+'\\n* '.join(items[:min(numItems,maxItems)])\r\n if numItems > maxItems:\r\n message += _('\\n(And %d others.)') % (numItems - maxItems,)\r\n return askStyled(parent,message,title,wx.OK)", "def fill_sequence_list(self):\n data = Data()\n sequence_list = self.ui.sequence_list\n\n model = QStandardItemModel(sequence_list)\n sequences = data.get_sequence_list()\n if len(sequences) > 0:\n for sequence in sequences:\n note = \"\"\n if sequence[4] is not None:\n note = \" - %s\" % sequence[4]\n item = QStandardItem(\"%s%s\" % (sequence[1], note))\n item.setEditable(False)\n item.setData(str(sequence[0]), QtCore.Qt.UserRole)\n model.insertRow(0, item)\n sequence_list.setModel(model)", "def view_tasks(self):\n if self.db_link.get_num_tasks() > 0:\n self.print_tasks()\n else:\n self.display.print_error('You don\\'t have any tasks! Add a task by calling `python-todo -a`.')", "def view_help_info(self):\n try:\n with open('README.md', 'r') as f:\n data = f.readlines()\n log = Tk.Listbox(self, height=30, width=100)\n for i, item in enumerate(data):\n log.insert(i+1, data[i])\n log.grid(row =0, column=0, sticky='ew')\n yscroll = Tk.Scrollbar(command=log.yview,\n orient=Tk.VERTICAL)\n yscroll.grid(row=0, column=1, sticky='ns')\n except Exception, ex:\n logging.error(ex)\n traceback.print_exc()\n except Exception, ex:\n logging.error(ex)\n traceback.print_exc()", "async def todo(self, ctx):\n\n cursor = await db.execute(\"Select Thing from Todo where MemberID = ?\", (ctx.author.id,))\n result = await cursor.fetchall()\n\n if not result:\n return await send_embed(ctx, \"You do not have anything on your todo list.\", negative=True)\n\n result = [i[0] for i in result]\n\n embeds = []\n description = []\n\n for index, string in enumerate(result, start=1):\n\n description.append(f\"{index}. {string}\")\n\n if index % 10 == 0 or index == len(result):\n embed = discord.Embed(\n colour=discord.Colour.blue(),\n description=\"\\n\".join(description)\n )\n embed.set_author(name=str(ctx.author), icon_url=str(ctx.author.avatar_url))\n embeds.append(embed)\n description = []\n\n await self.bot.paginate(ctx, embeds)", "def init_gui(self):\n # Choose a layout.\n main_vb = QtGui.QVBoxLayout(self)\n\n # Add a list or tree view.\n self.list_view = QtGui.QListWidget()\n\n # Add the buttons.\n load_btn = QtGui.QPushButton('Load Selected')\n cancel_btn = QtGui.QPushButton('Cancel')\n load_btn.clicked.connect(self.update_list_view)\n cancel_btn.clicked.connect(self.close)\n\n # Connect the list/tree view with a method appropriate for user interaction.\n self.list_view.currentItemChanged['QListWidgetItem*', 'QListWidgetItem*'].connect(self.set_current_name)\n self.list_view.itemChanged['QListWidgetItem*'].connect(self.change_name)\n\n # Add the widgets to the layout.\n btn_hb = QtGui.QHBoxLayout()\n btn_hb.addWidget(load_btn)\n btn_hb.addWidget(cancel_btn)\n main_vb.addWidget(self.list_view)\n main_vb.addLayout(btn_hb)\n\n # Show the GUI.\n self.setGeometry(300, 300, 450, 300)\n self.setWindowTitle('Hello World')\n img_icon = 'C:/Users/caj150430/code/so_much_win.png'\n self.setWindowIcon(QtGui.QIcon(img_icon))\n self.show()", "def ConstructList(title, items):\n buf = cStringIO.StringIO()\n fmt = 'list[title=\"{title}\",always-display-title]'.format(title=title)\n resource_printer.Print(sorted(set(items)), fmt, out=buf)\n return buf.getvalue()", "def interactive(todofile):\n tmpfile = tempfile.NamedTemporaryFile(suffix='.txt', prefix='todo-',\n delete=False)\n print >> tmpfile\n print >> tmpfile , '# Todo items should be formed as <date> -- <todo>'\n print >> tmpfile , '# The date field is optional.'\n print >> tmpfile , '# Lines starting with # are ignored.'\n tmpfile.close()\n subprocess.call(['sensible-editor', tmpfile.name])\n with open(tmpfile.name) as writtenfile:\n add_items(todofile, writtenfile.readlines())\n os.remove(tmpfile.name)", "def changelist_view(self, request, extra_context=None):\n if request.user.has_perm('deflect.list_all'):\n self.list_filter = self._list_filter + ('creator__username',)\n self.list_display = self._list_display + ('creator',)\n else:\n self.list_filter = self._list_filter\n self.list_display = self._list_display\n return super(ShortURLAdmin, self).changelist_view(request, extra_context=extra_context)", "def list():\n data = getInstaData()\n return render_template(\"list.html\", data=data)", "def new():\n if request.method == \"POST\":\n #get parameters form form\n Description = request.form[\"Description\"]\n duedate = request.form[\"Due_date\"]\n\n #conncetion to the database\n conn = sqlite3.connect(\"todo.db\")\n c = conn.cursor()\n\n #create table if it is not EXISTS\n conn.execute(\"CREATE TABLE IF NOT EXISTS task (Task_id INTEGER PRIMARY KEY, Description char(100), Created_date DATE, Due_date DATE, Modified_date DATE, deleted bool, status bool)\")\n\n #insert values into databse\n c.execute(\"INSERT INTO task (Description, Created_date, Due_date, Modified_date, deleted, status) VALUES (?,Date('now') ,?, Date('now'), 0, 1)\", (Description,duedate))\n new_id = c.lastrowid\n conn.commit()\n c.close()\n\n return redirect(url_for ('todo_list'))\n else:\n #return template which shows all tasks\n return render_template(\"new_task.html\")", "def index():\n # Displays list of topics.\n q = db.topic\n links=[]\n if auth.user_id:\n links.append(dict(header='',\n body=lambda r: A('Edit', _href=URL('default', 'edit_topic', args=[r.id]))))\n links.append(dict(header='',\n body=lambda r: A('Delete', _href=URL('default', 'delete_topic', args=[r.id]))))\n grid = SQLFORM.grid(q,\n csv=False, details=False,\n links=links,\n create=False,\n editable=False,\n deletable=False,\n maxtextlength=48,\n )\n add_button = A(icon_add, 'Add topic', _class='btn btn-success',\n _href=URL('default', 'create_topic')) if auth.user_id else None\n return dict(grid=grid, add_button=add_button)", "def _show_ingredient_list(self):\n if self._ingredients_view:\n self._ingredients_view.destroy()\n\n username = self.food_service.get_user().get_username()\n ingredients = self.food_service.list_added_ingredients(username, expire=True)\n self._ingredients_view = IngredientsView(\n self._ingredients_frame,\n ingredients,\n self._handle_mark\n )\n\n self._ingredients_view.pack()", "def init_grid(self):\n self.headlabel.collection = self.books\n self.headlabel.set_label_text()\n self.warnlabel.set_label_text('Welcome to the Reading Tracker 2.0!')\n self.building_grid(None, 'Author')", "def add_item(self):\n\n self.todo_scroll_cell.add_item(f'{self.new_todo_textbox.get()}')", "def setup_list(self) -> None:\n style = self.current_line.next_line.line_parts[0].style.copy()\n\n if self.list_style is None:\n self.list_style = {}\n elif isinstance(self.list_style, str):\n self.list_style = process_style(self.list_style, self.pdf)\n\n if not isinstance(self.list_style, dict):\n raise TypeError(\n 'list_style must be a str or a dict. Value: {}'\n .format(self.list_style)\n )\n\n style.update(self.list_style)\n line_part = PDFTextLinePart(style, self.fonts)\n\n self.current_line_used_fonts.add(\n (line_part.state.font_family, line_part.state.font_mode)\n )\n\n if self.list_indent is None:\n self.list_indent = line_part.get_word_width(str(self.list_text))\n elif not isinstance(self.list_indent, (float, int)):\n raise TypeError(\n 'list_indent must be int or float. Value: {}'\n .format(self.list_indent)\n )\n\n self.list_state = line_part.state\n self.current_line.max_width -= self.list_indent", "def read_todo_file(self):\n\n todo = []\n in_progress = []\n done = []\n if os.path.exists('TODO.txt'):\n todo_fp = open('TODO.txt', 'r')\n state = 0\n line = todo_fp.readline()\n while line:\n line = line.strip()\n if state == 0:\n if line == '__IN_PROGRESS__':\n state = 1\n elif len(line) > 1:\n todo.append(line)\n elif state == 1:\n if line == '__DONE__':\n state = 2\n elif len(line) > 1:\n in_progress.append(line)\n elif state == 2:\n if len(line) > 1:\n done.append(line)\n line = todo_fp.readline()\n todo_fp.close()\n self.todo_scroll_cell.add_item_list(todo)\n self.in_progress_scroll_cell.add_item_list(in_progress)\n self.done_scroll_cell.add_item_list(done)", "def __init__(self):\n this = _libsbml.new_ListWrapperModelCreator()\n try: self.this.append(this)\n except: self.this = this", "def view_task(self, task):\n self.layout.clear_widgets()\n self.add_AdDescription(task.url, task.description)\n self.add_CheckBox(task.checkbox_rating)\n self.add_Slider(task.slider_rating)\n self.add_Toggle_Button(task.toggle_button_rating)\n self.layout.add_widget(TextInput(hint_text = 'Add a comment...', multiline = True))\n self.add_NextButton()\n self.add_Exit_Button()", "def fill_ui(self):\n self.review_type_widget.set_review_type(self.review_type)\n\n if self.reviewer:\n self.reviewer_name_widget.setText(self.reviewer.name)\n\n if self.task:\n self.task_name_widget.setText(\n \"%s (%s) (%s)\"\n % (\n self.task.name,\n \" | \".join(\n [self.task.project.name]\n + [parent_task.name for parent_task in self.task.parents]\n ),\n self.task.id,\n )\n )\n\n # from stalker import Version\n # version = Version.query.filter(Version.task == self.task).order_by(Version.date_created.desc()).first()\n #\n # if version:\n # self.latest_version_widget.version = version", "def __init__(self: object) -> None:\n super().__init__()\n self.title(\"dnazip\")\n self.configure(bg='#ebebeb')\n self.create_main()\n self.create_menu()\n self.create_buttons()\n self.file = None", "def _initialize(self):\n self._frame = ttk.Frame(master=self._root)\n self._ingredients_frame = ttk.Frame(master=self._frame)\n\n self._create_header()\n self._show_ingredient_list()\n self._create_footer()\n\n self._ingredients_frame.grid(row=1, column=1, columnspan=2)\n self._frame.grid_columnconfigure(1, weight=1, minsize=250)", "def index(request):\n\n queryset_list = Todo.objects.all() #.order_by(\"-timestamp\")\n page = request.GET.get('page', 1)\n\n paginator = Paginator(queryset_list, 2)\n try:\n queryset = paginator.page(page)\n except PageNotAnInteger:\n queryset = paginator.page(1)\n except EmptyPage:\n queryset = paginator.page(paginator.num_pages)\n\n context = {\n \"taskli\": queryset, \n }\n return render(request, \"lists/task_list.html\", context)", "def _createView(self):\n\n items = []\n\n if self.showProgressBar:\n items.append(Item('progress', show_label=False,\n editor=ProgressEditor(callback=self._seek)))\n\n # Controls\n items.append(\n alignCenter(\n Item('backwardButton', style='custom',\n enabled_when='not object.running and object.mainloopRunning '\n +'and object.sensors and object.iteration > 1'),\n Item('runButton', style='custom',\n enabled_when='object.pause and not object.done'),\n Item('pauseButton', style='custom',\n enabled_when='not (object.pause or object.done)'),\n Item('stepButton', style='custom',\n enabled_when='object.pause and not object.done'),\n show_labels=False,\n orientation='horizontal'\n ))\n\n # Repeat button and pause target buttons\n items.append(\n alignCenter(\n Item('repeatButton', show_label=False,\n enabled_when='not object.running and object.mainloopRunning '\n 'and object.iteration > 0'),\n Item('nextTargetButton', show_label=False,\n editor=ButtonEditor(label_value='targetButtonLabel'),\n enabled_when='not object.running and object.mainloopRunning '\n 'and object.pauseTarget'),\n Item('customTargetButton', show_label=False,\n enabled_when='not object.running and object.mainloopRunning')\n ))\n\n # Speed control\n items.append(Item('speed', style='custom', show_label=False,\n editor=EnumEditor(cols=1, values={\n 1 : '1: Slow (update on every iteration)',\n 10 : '2: Medium (update every 10 iterations)',\n 100 : '3: Fast (update every 100 iterations)'\n })\n ))\n\n\n items.extend([\n Group(\n Item('pauseAtNextStep'),\n show_left=False\n ),\n alignLeft(\n Item('stopButton', show_label=False, enabled_when='object.iteration')\n )\n ])\n\n self.traits_view = View(*items)", "def init_editmenu(self):\n self.menubar[\"editmenu\"] = Menu(self.menubar[\"menubar\"], tearoff=0)\n self.menubar[\"editmenu\"].add_command(label=\"Undo\", command=todo)\n self.menubar[\"editmenu\"].add_separator()\n self.menubar[\"editmenu\"].add_command(label=\"Cut\", command=todo)\n self.menubar[\"editmenu\"].add_command(label=\"Copy\", command=todo)\n self.menubar[\"editmenu\"].add_command(label=\"Paste\", command=todo)\n self.menubar[\"editmenu\"].add_command(label=\"Delete\", command=todo)\n self.menubar[\"editmenu\"].add_command(label=\"Select All\", command=todo)\n self.menubar[\"menubar\"].add_cascade(\n label=\"Edit\", menu=self.menubar[\"editmenu\"])", "def build_gui(self):\n tip = _('Double-click on a row to edit the selected child.')\n self.set_tooltip(tip)\n top = Gtk.TreeView()\n titles = [('', NOSORT, 50,),\n (_('Child'), 1, 250),\n (_('Birth Date'), 3, 100),\n ('', 3, 100),\n (_('Death Date'), 5, 100),\n ('', 5, 100)]\n self.model = ListModel(top, titles, event_func=self.edit_person)\n return top", "def init_helpmenu(self):\n self.menubar[\"helpmenu\"] = Menu(self.menubar[\"menubar\"], tearoff=0)\n self.menubar[\"helpmenu\"].add_command(label=\"Help Index\", command=todo)\n self.menubar[\"helpmenu\"].add_command(label=\"About...\", command=todo)\n self.menubar[\"menubar\"].add_cascade(\n label=\"Help\", menu=self.menubar[\"helpmenu\"])", "def build_gui(self):\n tip = _('Double-click on a row to edit the selected child.')\n self.set_tooltip(tip)\n top = Gtk.TreeView()\n titles = [('', NOSORT, 50,),\n (_('Child'), 1, 250),\n (_('Birth Date'), 3, 100),\n ('', 3, 100),\n (_('Death Date'), 5, 100),\n ('', 5, 100),\n (_('Spouse'), 6, 250)]\n self.model = ListModel(top, titles, event_func=self.edit_person)\n return top", "def create_todo_list_view(request: HttpRequest) -> Union[HttpResponse, HttpResponseRedirect]:\n if request.method == 'GET':\n form = TodoListForm()\n\n return render(request, 'todo/create_todo_list.html', {'form': form})\n elif request.method == 'POST':\n form = TodoListForm(data=deepcopy(request.POST))\n\n if form.is_valid():\n todo_list = form.save()\n\n return redirect(todo_list.get_absolute_url())\n else:\n return render(request, 'todo/create_todo_list.html', {'form': form})", "def init_blank(self, T):\n self.headings = []\n self.table = {}\n self.rowcount = 0\n for e in T.entries:\n self.headings.append(e.name)\n self.table[e.name] = []", "def show_entries():\n db = get_db()\n cur = db.execute('select id, title, ingredients, steps, tags, \\\n url from entries order by id asc')\n entries = cur.fetchall()\n return render_template('show_entries.html', entries=entries)", "def menu_loop(todo_list, save_file_location):\r\n show_hidden = False\r\n selection = 0\r\n invalid_input = False\r\n while selection != 6:\r\n if invalid_input:\r\n invalid_input = False\r\n else:\r\n print_list(save_file_location, todo_list, True, show_hidden)\r\n divider(137 + 17) # Length of prompt statement below\r\n list_status = check_list_status(todo_list)\r\n if list_status == 0: # No Issues\r\n selection = int(clean_input(\"Please enter: 1 for Add Item, 2 for \"\r\n \"Remove Item, 3 for Edit Item, \"\r\n \"4 for Mark Item Complete, \"\r\n \"5 for Toggle Hidden, and 6 for \"\r\n \"Exit, 7 for Concept \"\r\n \"Demonstration\\n\"))\r\n elif list_status == 1: # Empty List - No Remove, Edit, Mark, or Toggle\r\n selection = int(clean_input(\"Please enter: 1 for Add Item, and 6 \"\r\n \"for Exit, 7 for Concept \"\r\n \"Demonstration\\n\"))\r\n else: # Entirely Hidden List\r\n selection = int(clean_input(\"Please enter: 1 for Add Item, 5 for \"\r\n \"Toggle Hidden, and 6 for Exit, \"\r\n \"7 for Concept Demonstration\\n\"))\r\n # Uses the clean_input function above to get a number from the\r\n # user, converting it to an int so a decimal won't return an\r\n # invalid input in the following steps\r\n print(\"\") # Blank Print statement to add an extra blank line after\r\n # user input before displaying response\r\n if selection == 1: # Add Item - modify the list variable, then save\r\n # to file\r\n add_item(todo_list)\r\n elif selection == 2: # Remove Item - modify the list variable, then\r\n # save to file\r\n if list_status == 0:\r\n remove_item(todo_list)\r\n elif list_status == 2:\r\n print(\"Invalid Command: The Todo List has no visible items \"\r\n \"to remove\")\r\n else:\r\n print(\"Invalid Command: The Todo List has no items to remove\")\r\n elif selection == 3: # Edit Item - modify the list variable, then save\r\n # to file\r\n if list_status == 0:\r\n edit_item(todo_list)\r\n elif list_status == 2:\r\n print(\"Invalid Command: The Todo List has no visible items \"\r\n \"to edit\")\r\n else:\r\n print(\"Invalid Command: The Todo List has no items to edit\")\r\n elif selection == 4: # Mark Item Complete - modify the list variable,\r\n # then save to file\r\n if list_status == 0:\r\n mark_complete(todo_list)\r\n elif list_status == 2:\r\n print(\"Invalid Command: The Todo List has no visible items \"\r\n \"to mark complete\")\r\n else:\r\n print(\"Invalid Command: The Todo List has no items to mark \"\r\n \"complete\")\r\n elif selection == 5: # Show Hidden - modify the list variable, then\r\n # save to file\r\n if list_status == 0 or list_status == 2:\r\n if show_hidden:\r\n print(\"No longer showing hidden items\")\r\n show_hidden = False\r\n else:\r\n print(\"Now showing hidden items\")\r\n show_hidden = True\r\n else:\r\n print(\"Invalid Command: The Todo List has no items to show or \"\r\n \"hide\")\r\n elif selection == 6: # Exit Program\r\n print(\"Now Closing\")\r\n elif selection == 7: # Extra section to demonstrate proficiency with\r\n # topics covered in class - Sprint 1\r\n concept_demonstration()\r\n else:\r\n invalid_input = True\r\n print(\"Invalid Input\\nPlease Try Again\")", "def list_items(todofile, opt, args):\n def filt(item):\n \"\"\"Filter function based on options.\"\"\"\n result = (((item.done and opt.list_complete) or\n (not item.done and not opt.hide_incomplete)) and\n ((item.time is None) or\n ((opt.start_date is None or opt.start_date < item.time) and\n item.time < opt.end_date)))\n for arg in args:\n result = result and (re.search(arg, item.text) != None)\n return result\n\n for item in filter(filt, todofile.fetch_items()):\n list_str = ['']\n if (item.done):\n list_str.append('X')\n elif (item.time is not None and item.time < datetime.datetime.now()):\n list_str.append('!')\n else:\n list_str.append('*')\n if(opt.list_id):\n list_str.append('{0:<3d}'.format(item.itemid))\n if(opt.list_date and item.time is not None):\n list_str.append(item.time.strftime('%c') + ' --')\n list_str.append(item.text)\n print ' '.join(list_str)", "def create_view(self):\n title_label = Label(self, text='Upload, Preview, Describe and Visualize',\n fg='blue', font=('Arial', 16))\n title_label.pack(fill=BOTH, expand=True)\n select_file_button = Button(self, background='White', text='Select Data File [.csv, .xlsx, .xls, .json, .txt]',\n command=self.start_upload)\n select_file_button.pack(padx=5, pady=10)", "def dashboard_showall():\n tasks = Task.query.all()\n return render_template('home/taskshowall/dashboard_showall.html',\n tasks=tasks, title=\"Tasks\")", "def as_list_html(queryset, list_title=None):\n node = ModelListNode(queryset)\n return node.render(Context({'title':list_title}))", "def show():\n return render_template(\n 'listUsers.html',\n title='List Users',\n message='These are the users in our system'\n )", "def __init__(self):\n self.liste = []", "def home():\n\n lst = item_list()\n return render_template('index.html', sell_flag=0, items=lst)", "def view_tasks():\n task_list = []\n incomplete_task_list = Tasks.objects.filter(is_complete=False)\n for task in incomplete_task_list:\n tasks = [] #create data structure\n tasks.append(task.id) #add ID \n tasks.append(task.task_text) #add text\n task_list.append(tasks) #append data structure\n\n return task_list", "def __init__(self, list_of_entry_names, screen_size, width=100, height=30, x_offset=0, y_offset=0):\n self.entry_names = list_of_entry_names\n self.screen_size = screen_size\n self.x_offset = x_offset\n self.y_offset = y_offset\n self.main_list = ListObj(list_of_entry_names, screen_size, width=width, height=height, x_offset=x_offset,\n y_offset=y_offset)\n self.entry_value_map = dict()\n for _ in list_of_entry_names:\n self.entry_value_map[_] = 0\n self.values_list = None\n self.update_values()", "def create_widgets( self ):", "def empty_list ( self ):\n control = ImageControl( self.control, \n bitmap_cache( 'list_editor', False ),\n -1, self.popup_empty_menu ) \n control.is_empty = True\n proxy = ListItemProxy( self.object, self.name, -1, None, None )\n pcontrol = wx.StaticText( self.control, -1, ' (Empty List)' )\n pcontrol.proxy = control.proxy = proxy\n self.reload_sizer( [ ( control, pcontrol ) ] )", "def add_new_item():\n\n lst = item_list()\n return render_template('index.html', sell_flag=1, items=lst)", "def add_item(todo_list):\r\n text = input(\"Please enter the name of the new item\\n\")\r\n priority = check_priority_overlap(\r\n int(clean_input(\"Please enter the priority of this item\")), todo_list)\r\n # group = int(clean_input(\"Please enter the group number of this item\"))\r\n group = 0 # Set the group value to zero, group system NYI\r\n visible = True\r\n todo_list.insert(0, ListItem(text, priority, group, visible)) # Join\r\n # the inputs to be added to the overall list\r\n return", "def _default(self):\n\n self.app.render(infoNetwork.all())" ]
[ "0.66892177", "0.6522817", "0.637842", "0.62759596", "0.6237733", "0.6173121", "0.61659193", "0.602686", "0.59981143", "0.59325516", "0.5924609", "0.59100825", "0.5881322", "0.58435386", "0.58121586", "0.5809451", "0.5808901", "0.58015496", "0.5759406", "0.573308", "0.57133734", "0.57008964", "0.5697938", "0.56924486", "0.56597775", "0.56430894", "0.5638119", "0.55978495", "0.5587924", "0.557186", "0.55710006", "0.5555595", "0.55355495", "0.5522524", "0.5506854", "0.54802006", "0.5466248", "0.54658216", "0.54560024", "0.54480916", "0.5440684", "0.5397807", "0.53847784", "0.53809994", "0.5370203", "0.5369959", "0.53601825", "0.53386766", "0.5336524", "0.5320301", "0.53152156", "0.5289999", "0.52680683", "0.52633744", "0.5250664", "0.52474153", "0.5236517", "0.52341795", "0.52228326", "0.52217466", "0.5217814", "0.5216054", "0.52102214", "0.52034587", "0.51848006", "0.51802236", "0.5179741", "0.5166325", "0.5161229", "0.5160251", "0.51522803", "0.5140008", "0.51257384", "0.511952", "0.5112729", "0.51024514", "0.50994617", "0.50978523", "0.5092994", "0.5086805", "0.50757694", "0.5074575", "0.50660723", "0.50629574", "0.5054213", "0.5053149", "0.50514936", "0.5050562", "0.5049276", "0.50338244", "0.501973", "0.50193536", "0.5017796", "0.5016976", "0.5015035", "0.50148255", "0.5011054", "0.50035834", "0.50003797", "0.4994719" ]
0.62428325
4
This initializes the DotStars object by setting a buffer, and creating an SPI object. The start and end frames for the SPI communication are created, and the leds are cleared of values.
def __init__(self, leds): self.ledcount = leds # create a buffer self.buffersize = self.ledcount * 4 self.buffer = bytearray(self.ledcount * 4) self.emptybuffer = bytearray(self.ledcount * 4) for i in range(0, self.buffersize, 4): self.emptybuffer[i] = 0xff self.emptybuffer[i + 1] = 0x0 self.emptybuffer[i + 2] = 0x0 self.emptybuffer[i + 3] = 0x0 # Start frame and endframe for the SPI communication (end frame is not # needed) self.startframe = bytes([0x00, 0x00, 0x00, 0x00]) self.endframe = bytes([0xff, 0xff, 0xff, 0xff]) # initialize SPI (needs to be at 45 MHz in order to maximize the speed. # This is the limiting factor for the system's speed) self.spi = SPI(1, SPI.MASTER, baudrate=45000000, polarity=0, phase=0, bits=8, firstbit=SPI.MSB) self.clearleds()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, spi, dc, rst, led):\n self._spi = spi\n self._spi.open()\n self._spi.set_mode(0)\n self._spi.set_clock_frequency(4000000)\n\n self._dc = dc\n self._rst = rst\n self._led = led\n self._enabled = False", "def SPIsetup(self):\n self.writecmd(0x01,0x10,0,self.data); #SPI/SETUP", "def __init__ ( self, width, height, spi=None, spiMosi= None, spiDC=None, spiCS=None, spiReset=None, spiClk=None ):\n self._init_config(width, height, spi, spiMosi, spiDC, spiCS, spiReset, spiClk)", "def init(\n baudrate=1000000, bits=8, mode=0, sclk=\"pin13\", mosi=\"pin15\", miso=\"pin14\"\n ):\n utils.print_for_unimplemented_functions(SPI.init.__qualname__)\n telemetry_py.send_telemetry(TelemetryEvent.MICROBIT_API_SPI)", "def __init__(self, spi, width, height, rst, dc, cs, backlight=None,\n xstart=-1, ystart=-1):\n self.width = width\n self.height = height\n self.spi = spi\n\n self.rst = rst\n self.dc = dc\n self.cs = cs\n self.backlight = backlight\n\n self.cs.init(self.cs.OUT, value=1)\n self.dc.init(self.dc.OUT, value=0)\n if self.rst is not None:\n self.rst.init(self.rst.OUT, value=0)\n\n self._buf = bytearray(_BUFFER_SIZE * 2)\n # default white foregraound, black background\n self._colormap = bytearray(b'\\x00\\x00\\xFF\\xFF')\n\n if xstart >= 0 and ystart >= 0:\n self.xstart = xstart\n self.ystart = ystart\n elif (self.width, self.height) == (240, 240):\n self.xstart = 0\n self.ystart = 0\n elif (self.width, self.height) == (135, 240):\n self.xstart = 52\n self.ystart = 40\n else:\n raise ValueError(\n \"Unsupported display. Only 240x240 and 135x240 are supported \"\n \"without xstart and ystart provided\"\n )\n\n self.init_pins()\n if self.rst is not None:\n self.reset()\n else:\n self.soft_reset()\n self.init()", "def __init__(self, config):\n spi = SPI(-1, baudrate=config.baudrate,\n sck=config.sck, mosi=config.mosi, miso=config.miso)\n self._epd = epaper2in9.EPD(spi, config.cs, config.dc,\n config.rst1, config.busy)\n self._epd.init()\n self._buffer = Buffer(epaper2in9.EPD_WIDTH, epaper2in9.EPD_HEIGHT)", "def _init_config(self, width, height, spi=None, spiMosi= None, spiDC=None, spiCS=None, spiReset=None, spiClk=None):\n self._spi = spi\n self._spi_mosi = spiMosi\n self._spi_dc = spiDC\n self._spi_cs = spiCS\n self._spi_reset = spiReset\n self._spi_clk = spiClk\n\n self.width = width\n self.height = height", "def init_buffer(self):\n \n self.shape.buf = [pi3d.Buffer(self.shape, self.verts, self.texcoords, self.inds, self.norms)]\n self.shape.set_draw_details(self.shader, [self.spritesheet.img])", "def init_point_buffer(self, configurations={}):\n\n # initialize buffer configuration to indicate that the\n # buffers are not setup in case this function got a problem\n self.buffer_configuration = None\n\n # create new buffer configuration\n buffer_configurations = {}\n for name, configuration in configurations.items():\n buffer_configurations[name] = self._init_plot_buffer(configuration)\n\n self.buffer_configuration = buffer_configurations", "def __init__(self, commands: dict):\n self.__commands = commands\n\n # Wait times (s).\n self.WT_PIN_TOGGLE = 0.2\n self.WT_STATE_LOOKUP = 0.1\n\n # GPIO pins.\n self.RST_PIN = 17\n self.DC_PIN = 25\n self.CS_PIN = 8\n self.BUSY_PIN = 24\n\n # Set GPIO pins.\n RPi.GPIO.setmode(RPi.GPIO.BCM)\n RPi.GPIO.setwarnings(False)\n RPi.GPIO.setup(self.RST_PIN, RPi.GPIO.OUT)\n RPi.GPIO.setup(self.DC_PIN, RPi.GPIO.OUT)\n RPi.GPIO.setup(self.CS_PIN, RPi.GPIO.OUT)\n RPi.GPIO.setup(self.BUSY_PIN, RPi.GPIO.IN)\n\n # SPI device.\n self.__spi = spidev.SpiDev(0, 0)\n\n # Set SPI device.\n self.__spi.max_speed_hz = 2000000\n self.__spi.mode = 0b00", "def initiate():\n\n log = \"Initiate the SPI communication of the OPC-N3\"\n logger.debug(log)\n\n time.sleep(1)\n log = \"Sending bytes to the sensor...\"\n logger.debug(log)\n spi.writebytes([0x5A, 0x01])\n reading = spi.readbytes(3)\n log = \"Data read after sending bytes are: \" + str(reading)\n logger.debug(log)\n time.sleep(wait_between_bytes)\n\n log = \"Sending bytes to the sensor...\"\n logger.debug(log)\n spi.writebytes([0x5A, 0x03])\n reading = spi.readbytes(9)\n log = \"Bytes read after sending bytes are: \" + str(reading)\n logger.debug(log)\n time.sleep(wait_between_bytes)\n\n # SPI conncetion\n log = \"Sending bytes to the sensor...\"\n logger.debug(log)\n spi.writebytes([0x5A, 0x02, 0x92, 0x07])\n reading = spi.readbytes(2)\n log = \"Bytes read after sending bytes are: \" + str(reading)\n logger.debug(log)\n time.sleep(wait_between_bytes)\n\n return", "def __init__(self, stencil_coefs, loffset, roffset):\n self.stencil_coefs = stencil_coefs\n self.loffset = loffset\n self.roffset = roffset", "def spi_controller(\n # ---[ Module Ports]---\n glbl, # global interface, clock, reset, etc.\n spibus, # external SPI bus\n # optional ports\n fifobus=None, # streaming interface, FIFO bus\n mmbus=None, # memory-mapped bus, contro status access\n cso=None, # control-status object\n \n # ---[ Module Parameters ]---\n include_fifo=True, # include aan 8 byte deep FIFO\n):\n clock, reset = glbl.clock, glbl.reset\n if cso is None:\n cso = spi_controller.cso()\n\n # -- local signals --\n ena = Signal(False)\n clkcnt = Signal(modbv(0, min=0, max=2**12))\n bcnt = Signal(intbv(0, min=0, max=8))\n\n # separate tx and rx shift-registers (could be one in the same)\n treg = Signal(intbv(0)[8:]) # tx shift register\n rreg = Signal(intbv(0)[8:]) # rx shift register\n\n x_sck, x_ss, x_mosi, x_miso = Signals(bool(0), 4)\n\n # internal FIFO bus interfaces\n # external FIFO side (FIFO to external SPI bus)\n itx = FIFOBus(size=fifobus.size, width=fifobus.width)\n # internal FIFO side (FIFO to internal bus)\n irx = FIFOBus(size=fifobus.size, width=fifobus.width)\n \n states = enum('idle', 'wait_hclk', 'data_in', 'data_change',\n 'write_fifo', 'end')\n state = Signal(states.idle)\n\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # memory- mapped registers\n # add the peripheral's regfile to the bus (informational only)\n # @todo: the automatic building of the register files is incomplete\n if mmbus is not None:\n # the register-file (rf) will drive all the cso signals\n rf = cso.get_register_file()\n mmbus.add(rf, 'spi')\n\n # FIFO for the wishbone data transfer\n if include_fifo:\n fifo_fast.debug = spi_controller.debug\n fifo_tx_inst = fifo_fast(reset, clock, itx)\n fifo_rx_inst = fifo_fast(reset, clock, irx)\n\n @always_comb\n def rtl_assign():\n cso.tx_fifo_count.next = itx.count\n cso.rx_fifo_count.next = irx.count\n\n if clkcnt > 0:\n ena.next = False\n else:\n ena.next = True\n\n clock_counts = tuple([(2**ii)-1 for ii in range(13)])\n\n @always(clock.posedge)\n def rtl_clk_div():\n if cso.enable and clkcnt != 0 and state != states.idle:\n clkcnt.next = (clkcnt - 1)\n else:\n clkcnt.next = clock_counts[cso.clock_divisor]\n\n @always_seq(clock.posedge, reset=reset)\n def rtl_state_and_more():\n \"\"\"\n Designed to the following timing diagram\n\n SCK CPOL=0 ______/---\\___/---\\___/---\\___/---\\___/---\\___/---\\___/---\\___/---\\___/---\\ \n CPOL=1 ------\\___/---\\___/---\\___/---\\___/---\\___/---\\___/---\\___/---\\___/---\\___/ \n SS ---\\_______________________________________________________________________ \n CPHA=0 MOSI ...|.0....|.1.....|.2.....|.3.....|.4.....|.5.....|.6.....|.7.....|.0.....| \n MISO ...|.0....|.1.....|.2.....|.3.....|.4.....|.5.....|.6.....|.7.....|.0.....| \n CPHA=1 MOSI ...|....0.....|.1.....|.2.....|.3.....|.4.....|.5.....|.6.....|.7.....|.0...\n MISO ......|.0.....|.1.....|.2.....|.3.....|.4.....|.5.....|.6.....|.7.....|.0...\n \"\"\"\n if not cso.enable:\n state.next = states.idle\n bcnt.next = 0\n treg.next = 0\n \n itx.read.next = False\n irx.write.next = False\n\n x_sck.next = False\n x_ss.next = False\n else:\n if not cso.freeze:\n # ~~~~ Idle state ~~~~\n if state == states.idle:\n bcnt.next = 7\n treg.next = itx.read_data\n x_sck.next = cso.clock_polarity\n irx.write.next = False\n \n if not itx.empty and not irx.full:\n itx.read.next = True\n x_ss.next = False\n if cso.clock_phase: # Clock in on second phase\n state.next = states.wait_hclk\n else: # Clock in on first phase\n state.next = states.data_in\n else:\n itx.read.next = False\n x_ss.next = True\n\n # ~~~~ Wait half clock period for cpha=1 ~~~~\n elif state == states.wait_hclk:\n itx.read.next = False\n irx.write.next = False\n if ena:\n x_sck.next = not x_sck\n state.next = states.data_in\n\n # ~~~~ Clock data in (and out) ~~~~\n elif state == states.data_in:\n itx.read.next = False\n irx.write.next = False\n if ena: # clk div\n x_sck.next = not x_sck\n rreg.next = concat(rreg[7:0], x_miso)\n \n if cso.clock_phase and bcnt == 0:\n irx.write.next = True\n if itx.empty or irx.full:\n state.next = states.end\n else:\n state.next = states.data_change\n else:\n state.next = states.data_change\n\n # ~~~~ Get ready for next byte out/in ~~~~\n elif state == states.data_change:\n itx.read.next = False\n irx.write.next = False\n if ena:\n x_sck.next = not x_sck\n if bcnt == 0: \n if not cso.clock_phase:\n irx.write.next = True\n \n if itx.empty or irx.full:\n state.next = states.end\n else: # more data to transfer\n bcnt.next = 7\n state.next = states.data_in\n itx.read.next = True\n treg.next = itx.read_data\n else:\n treg.next = concat(treg[7:0], intbv(0)[1:])\n bcnt.next = bcnt - 1 \n state.next = states.data_in\n\n # ~~~~ End state ~~~~\n elif state == states.end:\n itx.read.next = False\n irx.write.next = False\n if ena: # Wait half clock cycle go idle\n state.next = states.idle\n\n # Shouldn't happen, error in logic\n else:\n state.next = states.idle\n assert False, \"SPI Invalid State\"\n\n @always_comb\n def rtl_fifo_sel():\n \"\"\"\n The `itx` and `irx` FIFO interfaces are driven by different\n logic depending on the configuration. This modules accesses\n the `itx` read side and drives the `irx` write side. The\n `itx` write side is driven by the `cso` or the `fifobus` port.\n The `irx` read side is accessed by the `cso` or the `fifobus`\n port.\n \"\"\"\n if cso.bypass_fifo:\n # data comes from the register file\n cso.tx_empty.next = itx.empty\n cso.tx_full.next = itx.full\n itx.write_data.next = cso.tx_byte\n\n cso.rx_empty.next = irx.empty\n cso.rx_full.next = irx.full\n cso.rx_byte.next = irx.read_data\n cso.rx_byte_valid.next = irx.read_valid\n\n # @todo: if cso.tx_byte write signal (written by bus) drive the\n # @todo: FIFO write signals, same if the cso.rx_byte is accessed\n itx.write.next = cso.tx_write\n irx.read.next = cso.rx_read\n\n else:\n # data comes from external FIFO bus interface\n fifobus.full.next = itx.full\n itx.write_data.next = fifobus.write_data\n itx.write.next = fifobus.write\n\n fifobus.empty.next = irx.empty\n fifobus.read_data.next = irx.read_data\n fifobus.read_valid.next = irx.read_valid\n irx.read.next = fifobus.read\n\n # same for all modes\n irx.write_data.next = rreg\n\n @always_comb\n def rtl_x_mosi():\n # @todo lsb control signal\n x_mosi.next = treg[7]\n\n @always_comb\n def rtl_gate_mosi():\n if cso.loopback:\n spibus.mosi.next = False\n else:\n spibus.mosi.next = x_mosi\n\n @always_comb #(clock.posedge)\n def rtl_spi_sigs():\n spibus.sck.next = x_sck\n if cso.loopback:\n x_miso.next = x_mosi\n else:\n x_miso.next = spibus.miso\n\n @always_comb\n def rtl_slave_select():\n if cso.manual_slave_select:\n spibus.ss.next = ~cso.slave_select\n elif x_ss:\n spibus.ss.next = 0xFF\n else:\n spibus.ss.next = ~cso.slave_select\n\n # myhdl generators in the __debug__ conditionals are not converted.\n if spi_controller.debug:\n @instance\n def mon_state():\n print(\" :{:<8d}: initial state {}\".format(\n now(), str(state)))\n \n while True:\n yield state\n print(\" :{:<8d}: state transition --> {}\".format(\n now(), str(state)))\n \n fbidle = intbv('0000')[4:]\n\n @instance\n def mon_trace():\n while True:\n yield clock.posedge\n ccfb = concat(itx.write, itx.read, irx.write, irx.read)\n if ccfb != fbidle:\n fstr = \" :{:<8d}: tx: w{} r{}, f{} e{}, rx: w{} r{} f{} e{}\"\n print(fstr.format(now(),\n int(itx.write), int(itx.read), int(itx.full), int(itx.empty),\n int(irx.write), int(irx.read), int(irx.full), int(irx.empty),)\n )\n \n @always(clock.posedge)\n def mon_tx_fifo_write():\n if itx.write:\n print(\" WRITE tx fifo {:02X}\".format(int(itx.write_data)))\n if itx.read:\n print(\" READ tx fifo {:02X}\".format(int(itx.read_data)))\n \n @always(clock.posedge)\n def mon_rx_fifo_write():\n if irx.write:\n print(\" WRITE rx fifo {:02X}\".format(int(irx.write_data)))\n \n if irx.read:\n print(\" READ rx fifo {:02X}\".format(int(irx.read_data)))\n\n # return the myhdl generators\n gens = myhdl.instances()\n return gens", "def __init__(self, font=None):\n self._xPosition = 0\n self._yPosition = 0\n self._positionDegree = 0\n self._velocityMag = 0\n self._velocityDegree = 0\n self._accelerationMag = 0\n self._accelerationDegree = 0\n self._thrusters = None\n self._SASmodules = None\n\n if font is None:\n self._font = pg.font.SysFont(\"Futura\", 20)\n else:\n self._font = font", "def __init__(self):\r\n # Check device ID.\r\n chip_id = self._read_byte(_BME280_REGISTER_CHIPID)\r\n if _BME280_CHIPID != chip_id:\r\n raise RuntimeError('Failed to find BME280! Chip ID 0x%x' % chip_id)\r\n self._write_register_byte(_BME280_REGISTER_SOFTRESET, 0xB6)\r\n time.sleep(0.5)\r\n self._read_coefficients()\r\n self.sea_level_pressure = 1013.25\r\n \"\"\"Pressure in hectoPascals at sea level. Used to calibrate `altitude`.\"\"\"\r\n # turn on humidity oversample 16x\r\n self._write_register_byte(_BME280_REGISTER_CTRL_HUM, 0x03)\r\n self._t_fine = None", "def __init__(self, spi_rack, module, max_current=50e-3, reset_currents=True):\n self.spi_rack = spi_rack\n self.module = module\n self.span = [np.NaN]*4\n self.currents = [np.NaN]*4\n self.max_current = max_current\n\n for i in range(4):\n self.get_settings(i)\n\n if reset_currents:\n for i in range(4):\n self.change_span(i, S4g_module.range_max_bi)\n self.set_current(i, 0.0)", "def __init__(self, verbose=False):\n self._verbose = verbose\n self._nSrv = 2\n toLog(\"Initializing ...\", True)\n\n # Create servo manager and servos ...\n self.SM = ServoManager(self._nSrv, verbose=verbose)\n self._Servos = []\n self._SPos = array.array('i', [0] *self._nSrv)\n self._SIDs = array.array('b', [SRV_PAN, SRV_TLT])\n self._Servos.append(Servo(board.SERVO_PAN, verbose=verbose))\n self._Servos[SRV_PAN].change_range(board.PAN_RANGE_US, board.PAN_RANGE_DEG)\n self.SM.add_servo(SRV_PAN, self._Servos[SRV_PAN])\n self._Servos.append(Servo(board.SERVO_TLT, verbose=verbose))\n self._Servos[SRV_TLT].change_range(board.TLT_RANGE_US, board.TLT_RANGE_DEG)\n self.SM.add_servo(SRV_TLT, self._Servos[SRV_TLT])\n toLog(\"Servo manager ready\", True)\n\n # Create spectrometer instance\n self.SP = C12880MA(trg=board.TRG, st=board.STA, clk=board.CLK, video=board.VID)\n self.SP.begin()\n self.SP.setIntegrationTime_s(0.01)\n time.sleep_ms(200)\n toLog(\"Spectrometer ready\", True)", "def setup(self):\n if not self._gpio_setup:\n if self._gpio is None:\n try:\n import RPi.GPIO as GPIO\n self._gpio = GPIO\n except ImportError:\n raise ImportError('This library requires the RPi.GPIO module\\nInstall with: sudo apt install python-rpi.gpio')\n self._gpio.setmode(self._gpio.BCM)\n self._gpio.setwarnings(False)\n self._gpio.setup(self.cs_pin, self._gpio.OUT)\n self._gpio.setup(self.dc_pin, self._gpio.OUT, initial=self._gpio.LOW, pull_up_down=self._gpio.PUD_OFF)\n self._gpio.setup(self.reset_pin, self._gpio.OUT, initial=self._gpio.HIGH, pull_up_down=self._gpio.PUD_OFF)\n self._gpio.setup(self.busy_pin, self._gpio.IN, pull_up_down=self._gpio.PUD_OFF)\n\n if self._spi_bus is None:\n import spidev\n self._spi_bus = spidev.SpiDev()\n\n self._spi_bus.open(0, self.cs_channel)\n self._spi_bus.no_cs = True\n self._spi_bus.max_speed_hz = 5000000\n\n self._gpio_setup = True\n\n self._gpio.output(self.reset_pin, self._gpio.LOW)\n time.sleep(0.1)\n self._gpio.output(self.reset_pin, self._gpio.HIGH)\n time.sleep(0.1)\n\n self._gpio.output(self.reset_pin, self._gpio.LOW)\n time.sleep(0.1)\n self._gpio.output(self.reset_pin, self._gpio.HIGH)\n\n self._busy_wait(1.0)\n\n # Sending init commands to display\n self._send_command(AC073TC1_CMDH, [0x49, 0x55, 0x20, 0x08, 0x09, 0x18])\n\n self._send_command(AC073TC1_PWR, [0x3F, 0x00, 0x32, 0x2A, 0x0E, 0x2A])\n\n self._send_command(AC073TC1_PSR, [0x5F, 0x69])\n\n self._send_command(AC073TC1_POFS, [0x00, 0x54, 0x00, 0x44])\n\n self._send_command(AC073TC1_BTST1, [0x40, 0x1F, 0x1F, 0x2C])\n\n self._send_command(AC073TC1_BTST2, [0x6F, 0x1F, 0x16, 0x25])\n\n self._send_command(AC073TC1_BTST3, [0x6F, 0x1F, 0x1F, 0x22])\n\n self._send_command(AC073TC1_IPC, [0x00, 0x04])\n\n self._send_command(AC073TC1_PLL, [0x02])\n\n self._send_command(AC073TC1_TSE, [0x00])\n\n self._send_command(AC073TC1_CDI, [0x3F])\n\n self._send_command(AC073TC1_TCON, [0x02, 0x00])\n\n self._send_command(AC073TC1_TRES, [0x03, 0x20, 0x01, 0xE0])\n\n self._send_command(AC073TC1_VDCS, [0x1E])\n\n self._send_command(AC073TC1_T_VDCS, [0x00])\n\n self._send_command(AC073TC1_AGID, [0x00])\n\n self._send_command(AC073TC1_PWS, [0x2F])\n\n self._send_command(AC073TC1_CCSET, [0x00])\n\n self._send_command(AC073TC1_TSSET, [0x00])", "def __init__(self, esp_mgr):\n self.esp_mgr = esp_mgr\n adafruit_esp32spi_socket.set_interface(self.esp_mgr.esp)\n self.inbuffer = ''\n self.cmds = []\n self.next_fn = self.state_text\n self.telnet_cmd = []\n self.client_socket = None\n self.server_socket = None\n self.termious = None # termious hack\n self.current_state = ''", "def init_serial():\n\tglobal D\n\t# start serial connection\n\tbaud = 9600\n\ttry:\n\t\tD.gps_serial = serial.Serial(\"/dev/ttyAMA0\",baud,timeout=1)\n\t\tD.gps_serial.open()\n\t\tD.gps_serial.write(\"$PMTK220,200*2C\")\n\t\tD.gps_serial.write(\"$PMTK300,200,0,0,0,0*2F\")\n\texcept:\n\t\tprint \"Failed to open serial\"\n\t\trospy.shutdown(\"Failed to open gps serial\")", "def __init__(self, pitch=30, pitch_type='duo', Z=4, Alt = 100):\n \n self.pitch_type = pitch_type\n self.pitch = pitch\n self.Z = Z\n self.Alt = Alt\n \n \n # set the Ce value (exposure coeff NA 2.16)\n self.Ce = 1\n \n # set the Ct value (thermal coeff NA 2.17)\n self.Ct = 1\n \n # snow load shjape coefficients\n if self.pitch_type == 'mono':\n if self.pitch <= 30:\n self.mu = 0.80\n elif 30 < self.pitch <= 60:\n self.mu = 0.80 * (60 - self.pitch) / 30\n else:\n self.mu = 0.0\n elif self.pitch_type == 'duo':\n if self.pitch <= 15:\n self.mu = 0.80\n elif 15 < self.pitch <= 30:\n self.mu = 0.80 + 0.40*(self.pitch - 15) / 15\n elif 30 < self.pitch <= 60:\n self.mu = 1.2*(60 - self.pitch) / 30\n else:\n self.mu = 0.0\n else:\n self.mu = 0.80 # end conservative number\n \n # calculate the value of the snow load on the ground \n self.sk = (0.15 + (0.1 * self.Z + 0.05) + ((self.Alt - 100) / 525))\n \n # calculate the roof snow load\n self.s = self.mu * self.Ce * self.Ct * self.sk", "def __init__(self):\n self._read_calibration_data()\n self.configure_sensor(\n TemperatureOversamplings.x08,\n PressureOversamplings.x16,\n HumidityOversamplings.x08,\n IIRFilterCoefficients.FC_003,\n 250,\n 250)", "def __init__(self, device = '/dev/spidev0.0', delay = 40, speed = 200000, bits = 8,Port=None,Server=None):\n self.Port = Port\n self.Server=Server\n if self.Server != None:\n self.Transaction=self._NetTransaction\n else:\n if self.Port != None: # Init Server Thread\n self.ServerThread = threading.Thread(target=self.ListenerTread)\n self.ServerThread.start()\n self.Bits = c_uint8(bits)\n self.Speed = self.WriteSpeed\n self.Delay = c_uint16(delay)\n self.Device = device\n self.File = posix.open(self.Device, posix.O_RDWR)\n self.SetBits()\n self.SetSpeed()", "def use_spi():\n _LIB.oled_click_use_spi()", "def qspi_init(self, retain_ram=False, init_params=None):\n class _CtypesQSPIInitParams(ctypes.Structure):\n _fields_ = [(\"read_mode\", ctypes.c_int), (\"write_mode\", ctypes.c_int), (\"address_mode\", ctypes.c_int), (\"frequency\", ctypes.c_int), (\"spi_mode\", ctypes.c_int), (\"sck_delay\", ctypes.c_uint32), (\"custom_instruction_io2_level\", ctypes.c_int), (\"custom_instruction_io3_level\", ctypes.c_int), (\"CSN_pin\", ctypes.c_uint32), (\"CSN_port\", ctypes.c_uint32), (\"SCK_pin\", ctypes.c_uint32), (\"SCK_port\", ctypes.c_uint32), (\"DIO0_pin\", ctypes.c_uint32), (\"DIO0_port\", ctypes.c_uint32), (\"DIO1_pin\", ctypes.c_uint32), (\"DIO1_port\", ctypes.c_uint32), (\"DIO2_pin\", ctypes.c_uint32), (\"DIO2_port\", ctypes.c_uint32), (\"DIO3_pin\", ctypes.c_uint32), (\"DIO3_port\", ctypes.c_uint32), (\"WIP_index\", ctypes.c_uint32), (\"pp_size\", ctypes.c_int)]\n \n if not self._is_bool(retain_ram):\n raise ValueError('The retain_ram parameter must be a boolean value.')\n \n if not self._is_right_class(init_params, QSPIInitParams) and init_params is not None:\n raise ValueError('The init_params parameter must be an instance of class QSPIInitParams.')\n \n if init_params is None:\n init_params = QSPIInitParams()\n\n retain_ram = ctypes.c_bool(retain_ram)\n qspi_init_params = _CtypesQSPIInitParams(init_params.read_mode, init_params.write_mode, init_params.address_mode, init_params.frequency, init_params.spi_mode, init_params.sck_delay, init_params.custom_instruction_io2_level, init_params.custom_instruction_io3_level, init_params.CSN_pin, init_params.CSN_port, init_params.SCK_pin, init_params.SCK_port, init_params.DIO0_pin, init_params.DIO0_port, init_params.DIO1_pin, init_params.DIO1_port, init_params.DIO2_pin, init_params.DIO2_port, init_params.DIO3_pin, init_params.DIO3_port, init_params.WIP_index, init_params.pp_size)\n \n result = self._lib.NRFJPROG_qspi_init(retain_ram, ctypes.byref(qspi_init_params))\n if result != NrfjprogdllErr.SUCCESS:\n raise APIError(result)", "def __init__(self):\r\n super().__init__()\r\n self._name = \"PICOSCOPE2408b\"\r\n self._lib = None\r\n self._handle = None\r\n self._run_lock = Lock()\r\n self._driver_lock = Lock()\r\n\r\n self._sampling_time = 4E-9\r\n self._sampling_duration = 50E-6\r\n self._pulse_time = 100E-9\r\n self._samples = int(self._sampling_duration / self._sampling_time)\r\n self._idx = 0\r\n\r\n w_len = self._samples\r\n location = 0.1\r\n idx1 = int(w_len*(location - self._pulse_time/(2*self._sampling_duration)))\r\n idx2 = int(w_len*(location + self._pulse_time/(2*self._sampling_duration))) - 1\r\n self._waveform = np.array([-1*MAX_EXT if (i < idx1 or i >= idx2) else MAX_EXT for i in range(w_len)],dtype=c_int16)\r\n\r\n self._A_data = np.ones(self._samples)*2\r\n self._B_data = np.ones(self._samples)*-2\r\n self._C_data = np.ones(self._samples)*0\r\n self._window_est = np.ones(self._samples)*0\r\n self._t = np.linspace(0,self._sampling_duration,self._samples)\r\n self._range_A = None\r\n self._range_B = None\r\n self._depol_ratio = None\r\n\r\n self._process_queue = Queue()\r\n self._save_queue = Queue()", "def __init__(self):\n self.ram = bytearray(256)\n self.register = [0] * 8\n self.pc = 0\n self.sp = 7", "def __init__(sp, line) :\n ## frameNumber, eventName, photonEnergyEv, wavelengthA, GMD, peak_index, peak_x_raw, peak_y_raw, peak_r_assembled, peak_q, peak_resA, nPixels, totalIntensity, maxIntensity, sigmaBG, SNR\n #5, LCLS_2015_Feb22_r0169_022047_197ee, 6004.910515, 2.064714, 4.262349, 29997, 508.884796, 19.449471, 441.314606, 1.741234, 5.743053, 5, 361.105774, 112.819145, 19.236982, 18.771435\n\n sp.line = line[:-1] #.rstrip('\\n') # .replace(',',' ')\n sp.fields = sp.line.split()\n\n s_frameNumber, s_eventName, s_photonEnergyEv, s_wavelengthA, s_GMD, s_peak_index, s_peak_x_raw, s_peak_y_raw,\\\n s_peak_r_assembled, s_peak_q, s_peak_resA, s_nPixels, s_totalIntensity, s_maxIntensity, s_sigmaBG, s_SNR =\\\n sp.fields[0:16]\n\n sp.frameNumber, sp.photonEnergyEv, sp.wavelengthA = int(s_frameNumber), float(s_photonEnergyEv), float(s_wavelengthA)\n sp.GMD, sp.peak_index, sp.peak_x_raw, sp.peak_y_raw = float(s_GMD), int(s_peak_index), float(s_peak_x_raw), float(s_peak_y_raw)\n sp.peak_r_assembled, sp.peak_q, sp.peak_resA, sp.nPixels = float(s_peak_r_assembled), float(s_peak_q), float(s_peak_resA), int(s_nPixels)\n sp.totalIntensity, sp.maxIntensity, sp.sigmaBG, sp.SNR = float(s_totalIntensity), float(s_maxIntensity), float(s_sigmaBG), float(s_SNR)\n\n sp.runnum, sp.tstamp, sp.tsec, sp.s_fid = convertCheetahEventName(s_eventName)\n sp.fid = int(sp.s_fid, 16)\n\n #sp.seg, sp.row, sp.col = src_from_rc8x8(sp.peak_y_raw, sp.peak_x_raw)\n\n sp.line = line\n sp.empty = sp.empty_line()", "def teleopInit(self):\n self.Drive.resetEncoder()\n\n self.Drive.disableAutoForward()\n self.Drive.disableAutoTurn()\n self.Drive.disableVision()\n\n self.DS.setWhichVariable(True)\n self.Drive.updateSetpoint(\"teleop\")\n self.DS.setFirstTimeVariable(True)\n self.timer.reset()\n\n self.matchTime.startMode(isAuto=False)", "def __init__(self, gameCanvas, specs):\n\t\n\t\t# Initialization of the Dot\n\t\tsuper(Dot, self).__init__(gameCanvas, specs, specs['points'])\n\t\t\n\t\t# Draw the Dot\n\t\tself.draw()", "def setup(self, gl_buffers, color_vbo, pos_vbo, partNumber):\n self.gl_objects = gl_buffers\n self.color_vbo, self.pos_vbo = color_vbo, pos_vbo\n self.partNumber = partNumber", "def __init__(self):\n self.tape_tag = None\n self.independentVariableShapeList = []\n self.dependentVariableShapeList = []", "def init(self):\n self.reset()\n\n self.__interface.send_command('POWER_SETTING')\n self.__interface.send_data(0x37)\n self.__interface.send_data(0x00)\n\n self.__interface.send_command('PANEL_SETTING')\n self.__interface.send_data(0xCF)\n self.__interface.send_data(0x08)\n\n self.__interface.send_command('BOOSTER_SOFT_START')\n self.__interface.send_data(0xc7)\n self.__interface.send_data(0xcc)\n self.__interface.send_data(0x28)\n\n self.__interface.send_command('POWER_ON')\n self.wait_until_idle()\n\n self.__interface.send_command('PLL_CONTROL')\n self.__interface.send_data(0x3c)\n\n self.__interface.send_command('TEMPERATURE_CALIBRATION')\n self.__interface.send_data(0x00)\n\n self.__interface.send_command('VCOM_AND_DATA_INTERVAL_SETTING')\n self.__interface.send_data(0x77)\n\n self.__interface.send_command('TCON_SETTING')\n self.__interface.send_data(0x22)\n\n self.__interface.send_command('TCON_RESOLUTION')\n self.__interface.send_data(0x02) #source 640\n self.__interface.send_data(0x80)\n self.__interface.send_data(0x01) #gate 384\n self.__interface.send_data(0x80)\n\n self.__interface.send_command('VCM_DC_SETTING')\n self.__interface.send_data(0x1E) #decide by LUT file\n\n self.__interface.send_command(0xe5, False) #FLASH MODE\n self.__interface.send_data(0x03)", "def __init__(self):\r\n try:\r\n print(f\"Connecting to Arduino on '{self._SERIAL_PORT}'...\", end='')\r\n self.ser = serial.Serial(self._SERIAL_PORT, self._BAUD, timeout=self._TIMEOUT)\r\n # Reset buffers to start with a clean slate\r\n self.ser.reset_input_buffer()\r\n self.ser.reset_output_buffer()\r\n print(\"Ok\")\r\n except serial.SerialException as e:\r\n print(\"Failed:\", e)", "def __init__(self):\n this = _libsbml.new_SBO()\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, **stn_dict):\n self._last_rain = None\n\n global DEBUG_READ\n DEBUG_READ = int(stn_dict.get('debug_read', 0))\n global DEBUG_DECODE\n DEBUG_DECODE = int(stn_dict.get('debug_decode', 0))\n global DEBUG_PRESSURE\n DEBUG_PRESSURE = int(stn_dict.get('debug_pressure', 0))\n\n self.model = stn_dict.get('model', 'TE923')\n self.max_tries = int(stn_dict.get('max_tries', 5))\n self.retry_wait = int(stn_dict.get('retry_wait', 30))\n self.polling_interval = int(stn_dict.get('polling_interval', 10))\n self.sensor_map = stn_dict.get('sensor_map', DEFAULT_SENSOR_MAP)\n self.battery_map = stn_dict.get('battery_map', DEFAULT_BATTERY_MAP)\n self.memory_size = stn_dict.get('memory_size', 'small')\n\n vendor_id = int(stn_dict.get('vendor_id', '0x1130'), 0)\n product_id = int(stn_dict.get('product_id', '0x6801'), 0)\n device_id = stn_dict.get('device_id', None)\n\n loginf('driver version is %s' % DRIVER_VERSION)\n loginf('polling interval is %s' % str(self.polling_interval))\n loginf('sensor map is %s' % self.sensor_map)\n loginf('battery map is %s' % self.battery_map)\n\n self.station = TE923(vendor_id, product_id, device_id,\n memory_size=self.memory_size)\n self.station.open()", "def __init__(self):\n self.ram = [0] * 256\n self.reg = [0] * 8\n self.pc = 0\n self.SP = 7\n self.reg[self.SP] = 0xf4\n self.E = 0\n self.L = 0\n self.G = 0", "def setUp(self):\n self.ser = Serial()\n self.device_obj = ZBSensor(self.ser)", "def __init__ (self, nTel=0, centroid=(-0.5, 0.5), length=(0.005, 0.09), width=(0.0005, 0.003), psi=(0, 360), nsb=(10, 60)) :\n super().__init__ (nTel)\n self.centroid = [centroid, centroid]\n self.length, self.width = length, width\n self.psi = psi\n self.nsb = nsb\n self.tab_inj, self.mat_event, self.geom = None, None, None\n self.load_telescope()", "def _init_io(self):\n GPIO.setwarnings(False)\n GPIO.setmode( GPIO.BCM )\n pins = [ self._spi_dc ]\n for pin in pins:\n GPIO.setup( pin, GPIO.OUT )", "def initialize(self):\n self.log.info(\"Initialize raspPinball hardware.\")\n\n self.config = self.machine.config['rasppinball']\n self.machine.config_validator.validate_config(\"rasppinball\", self.config)\n print(\"***************************\")\n print(self.config)\n #self.machine_type = (\n # self.machine.config['hardware']['driverboards'].lower())\n\n self._connect_to_hardware()\n\n\n # keypad\n self._kp = Keypad()\n self.old_key = \"\"\n self.key = \"\"\n # leds\n self.init_strips()", "def __init__(self, device):\n self.device = device\n self.io = serial.Serial(device, 57600, timeout=1)\n self.keys = ['time', 'centroid_x', 'centroid_y', 'centroid_r',\n 'level_1', 'level_2', 'level_3',\n 'width_1', 'width_2', 'width_3',\n 'height_1', 'height_2', 'height_3',\n 'power']", "def __init__(self, pitch, color, our_side, video_port=0, comm_port='/dev/ttyACM0', penalty=False, comms=1):\n assert pitch in [0, 1]\n assert color in ['yellow', 'blue']\n assert our_side in ['left', 'right']\n\n self.pitch = pitch\n\n # Set up the Arduino communications\n self.arduino = Arduino(comm_port, 115200, 1, comms)\n\n # Set up camera for frames\n self.camera = Camera(port=video_port, pitch=self.pitch)\n frame = self.camera.get_frame()\n center_point = self.camera.get_adjusted_center(frame)\n\n # Set up vision\n self.calibration = tools.get_colors(pitch)\n self.vision = Vision(\n pitch=pitch, color=color, our_side=our_side,\n frame_shape=frame.shape, frame_center=center_point,\n calibration=self.calibration)\n\n # Set up postprocessing for vision\n self.postprocessing = Postprocessing(our_side)\n\n # Set up main planner\n self.planner = Planner(our_side=our_side, pitch_num=self.pitch, isPenalty=penalty)\n\n # Set up GUI\n self.GUI = GUI(calibration=self.calibration, arduino=self.arduino, pitch=self.pitch)\n\n self.color = color\n self.side = our_side\n\n self.preprocessing = Preprocessing()\n\n self.robot = Robot_Controller()", "def __init__(self, seat: Seat) -> None:\n self._ptr = ffi.new(\"struct wlr_seat_keyboard_grab *\")\n self._seat = seat", "def __init__(self):\n self.__deviceselected__ = \"SR-DMS4AP{LOCALBUMP}DEV:Sel-SP\"\n self.__source__ = \"SR-DMS4AP{LOCALBUMP}S-SP\"\n self.__plane__ = \"SR-DMS4AP{LOCALBUMP}PLANE-SP\"\n #self.__xshift__ = \"SR-DMS4AP{LOCALBUMP}SHIFT:X-SP\"\n #self.__yshift__ = \"SR-DMS4AP{LOCALBUMP}SHIFT:Y-SP\"\n #self.__xangle__ = \"SR-DMS4AP{LOCALBUMP}ANGLE:X-SP\"\n #self.__yangle__ = \"SR-DMS4AP{LOCALBUMP}ANGLE:Y-SP\"\n self.__shift__ = \"SR-DMS4AP{LOCALBUMP}SHIFT-SP\"\n self.__angle__ = \"SR-DMS4AP{LOCALBUMP}ANGLE-SP\"\n # with all offsets\n self.__anglerb__ = \"SR-DMS4AP{LOCALBUMP}ANGLE-I\"\n self.__positionrb__ = \"SR-DMS4AP{LOCALBUMP}POS-I\"\n # with BBA offset only\n self.__anglerb0__ = \"SR-DMS4AP{LOCALBUMP}ANGLE:BBA-I\"\n self.__positionrb0__ = \"SR-DMS4AP{LOCALBUMP}POS:BBA-I\"\n\n self.__bpmposition__ = \"SR-DMS4AP{LOCALBUMP:BPM}Pos-I\"\n self.__bpmorbitx__ = \"SR-DMS4AP{LOCALBUMP:BPM}ORB:X-I\"\n self.__bpmorbity__ = \"SR-DMS4AP{LOCALBUMP:BPM}ORB:Y-I\"\n self.__bpmorbitx0__ = \"SR-DMS4AP{LOCALBUMP:BPM}ORB:X0-I\"\n self.__bpmorbity0__ = \"SR-DMS4AP{LOCALBUMP:BPM}ORB:Y0-I\"\n\n self.__correctorposition__ = \"SR-DMS4AP{LOCALBUMP:COR}Pos-I\"\n self.__hcorrectorcurrent__ = \"SR-DMS4AP{LOCALBUMP:HCOR}PS-SP\"\n self.__hcorrectordiff__ = \"SR-DMS4AP{LOCALBUMP:HCOR}PS:Delta-SP\"\n self.__vcorrectorcurrent__ = \"SR-DMS4AP{LOCALBUMP:VCOR}PS-SP\"\n self.__vcorrectordiff__ = \"SR-DMS4AP{LOCALBUMP:VCOR}PS:Delta-SP\"\n\n self.__undo__ = \"SR-DMS4AP{LOCALBUMP}Enbl:Undo-Cmd\"\n self.__apply__ = \"SR-DMS4AP{LOCALBUMP}Enbl-Cmd\"\n self.__status__ = \"SR-DMS4AP{LOCALBUMP}TS-I\"\n self.__idposinfo__ = \"SR-DMS4AP{LOCALBUMP}S-I\"\n self.__srcposition__ = \"SR-DMS4AP{LOCALBUMP}SRC-SP\"", "def write(self, buffer):\n utils.print_for_unimplemented_functions(SPI.write.__name__)\n telemetry_py.send_telemetry(TelemetryEvent.MICROBIT_API_SPI)", "def _initialize_buffers(self) -> None:", "def __init__(self) -> None:\n self.sensor = serial.Serial(config.DEVICE)\n super().__init__()", "def __init__(self,address,InUseBy=None,Bus = None,Ident=''):\n self.Ident = Ident\n self.InUseBy = InUseBy\n VersionStrip =Ident.split(' ')[0].lower()\n if Ident !='' and VersionStrip in SPI.DeviceList:\n self.InUseBy = SPI.DeviceList[VersionStrip](Bus,address)\n else:\n self.Type=None\n if self.InUseBy!=None:\n self.Ident=VersionStrip", "def __init__(self, pockets):\n self._is_on = False\n self._pockets = pockets", "def __init__(self):\n # FIXME: IS this needed?\n super(ArduinoStation, self).__init__()\n\n self.serial_port_pattern = '/dev/ttyACM{port_num}'\n self.serial_port_num = None\n self.baudrate = 9600\n self.ser = self._setup_serial_connection()\n\n\n # Sensor 1 (DHT11) has 2 readings, Sensor 2 has 1\n ## FIXME: Should look for key pairs in list and submit when no more unique readings are coming through\n if config.SCB_CONFIGURATION == 'standard':\n self.lines_per_observation = 3\n else:\n self.lines_per_observation = 7 # Allows for up to 5 DS18B20 along w/ DHT-11.", "def __init__(self, address=0x76):\n self.address = address\n self.bus = self._initialize_bus()\n\n self.chip_id, self.chip_version = self._get_info_about_sensor()", "def __init__(self, segments, display_res=\"1920x1080\", stream_id=None):\n self.segments = segments\n self.display_res = display_res\n self.stream_id = stream_id\n self.o22 = []\n self.mode = None", "def __init__(self, envirophat, use_leds):\n self.envirophat = envirophat\n self.use_leds = use_leds\n # sensors readings\n self.light = None\n self.light_red = None\n self.light_green = None\n self.light_blue = None\n self.accelerometer_x = None\n self.accelerometer_y = None\n self.accelerometer_z = None\n self.magnetometer_x = None\n self.magnetometer_y = None\n self.magnetometer_z = None\n self.temperature = None\n self.pressure = None\n self.voltage_0 = None\n self.voltage_1 = None\n self.voltage_2 = None\n self.voltage_3 = None", "def __init__(self, name: str, address: str,\n channel_map: dict, reset_currents: bool=False):\n t0 = time.time()\n super().__init__(name)\n self.channel_map = channel_map\n self.spi_rack = SPI_rack(address, 9600, timeout=1)\n self.spi_rack.unlock()\n\n self.add_parameter(\n 'cfg_ramp_rate', unit='A/s',\n initial_value=0.1e-3, # 0.1 mA/s\n docstring='Limits the rate at which currents can be changed.',\n vals=validators.Numbers(min_value=0, max_value=np.infty),\n parameter_class=ManualParameter)\n self.add_parameter(\n 'cfg_verbose',\n initial_value=True,\n vals=validators.Bool(),\n docstring='If True, prints progress while ramping values.',\n parameter_class=ManualParameter)\n\n # Determine the set of modules required from the channel map\n module_ids = set([ch_map[0] for ch_map in channel_map.values()])\n # instantiate the controllers for the individual modules\n self.current_sources = {}\n for mod_id in module_ids:\n # N.B. reset currents has a slow ramp build in.\n self.current_sources[mod_id] = S4g_module(\n self.spi_rack,\n module=mod_id,\n max_current=50e-3,\n reset_currents=reset_currents)\n\n for parname, (mod_id, dac) in self.channel_map.items():\n self.add_parameter(\n parname,\n get_cmd=partial(self._get_current, parname),\n set_cmd=partial(self._set_current, parname),\n unit=\"A\",\n vals=validators.Numbers(min_value=-25e-3, max_value=25e-3))\n\n self.connect_message(begin_time=t0)", "def __init__(self):\n self.ram = [0]*256\n self.reg = [0] * 8\n self.fl = 0b00000000\n self.pc = 0\n self.sp = 0xF3\n self.is_run = False\n self.call_flag = False\n pass", "def __init__(self, serialNumber, idProduct, idVendor, model):\n Spectrometer.__init__(self, serialNumber, idProduct, idVendor)\n\n self.device = None\n self.configuration = None\n self.interface = None\n self.inputEndpoints = []\n self.outputEndpoints = []\n\n self.epParametersIdx = 2\n self.epStatusIdx = 2\n self.epCommandOutIdx = 0\n self.epMainInIdx = 2\n self.epSecondaryInIdx = 1\n\n self.epCommandOut = None\n self.epMainIn = None\n self.epSecondaryIn = None\n self.epParameters = None\n self.epStatus = None\n\n self.model = model\n self.wavelength = None\n self.discardLeadingSamples = 0 # In some models, the leading data is meaningless\n self.discardTrailingSamples = 0 # In some models, the trailing data is meaningless\n self.lastStatus = None", "def __init__(self,devId, varsToStream, printingRate = 10,labels = None,updateFreq = 100, shouldLog = False, shouldAuto = 1):\n\t\t#init printer settings\n\t\tself.counter = 0\n\t\tself.prev_data = None\n\t\tself.labels = labels\n\t\tself.data = None\n\t\tself.rate = printingRate\n\n\t\t# load stream data\n\t\tself.varsToStream = varsToStream\n\t\tself.devId = devId\n\t\tself.shouldAuto = shouldAuto\n\t\tself.updateFreq = updateFreq\n\t\tself.shouldLog = shouldLog\n\t\tself.prevReadTime = time.time()\n\n\t\t# Start stream\n\t\tfxSetStreamVariables(self.devId,self.varsToStream)\n\t\tif not fxStartStreaming(self.devId,self.updateFreq,self.shouldLog,self.shouldAuto):\n\t\t\tprint(\"Streaming failed...\")\n\t\t\tsys.exit(-1)\n\t\telse:\n\t\t\tsleep(0.4)", "def __init__(self):\n super().__init__()\n\n # Robot state\n self.ask_mode = False\n\n # Connect two large motors on output ports B and C\n self.sound = Sound()\n self.leds = Leds()\n self.p1 = TouchSensor(INPUT_1)\n self.p2 = TouchSensor(INPUT_2)\n self.p3 = TouchSensor(INPUT_3)\n self.p4 = TouchSensor(INPUT_4)", "def initialize_spike_pos_fig(self):\n self._pos_fig = plt.figure()\n self._spk_pos_ax = plt.axes()\n self._spk_pos_ax.set_xlabel(\"x (bin)\")\n self._spk_pos_ax.set_ylabel(\"y (bin)\")\n self._spk_pos_ax.set_xlim((-0.5, 0.5+PositionAnalysis.N_POSITION_BINS[0]))\n self._spk_pos_ax.set_ylim((-0.5, 0.5+PositionAnalysis.N_POSITION_BINS[1]))\n self._spk_pos_ax.grid(True)\n\n # Create graphics entries for the actual position and also each of the spike clusters\n pos_frame, = plt.plot([], [], animated=True)\n spk_frame, = plt.plot([], [], linestyle='None', marker='o', alpha=0.4, animated=True)\n # vel_frame = plt.text(30.0, 10.0, 'speed = 0 cm/s', transform=self._spk_pos_ax.transAxes)\n vel_frame = plt.text(40.0, 2.0, 'speed = 0cm/s')\n self._spk_pos_frame.append(spk_frame)\n self._spk_pos_frame.append(pos_frame)\n self._spk_pos_frame.append(vel_frame)\n\n anim_obj = animation.FuncAnimation(self._pos_fig, self.update_position_and_spike_frame, frames=self.__N_ANIMATION_FRAMES, interval=5, blit=True)\n self._anim_objs.append(anim_obj)", "def __init__(self):\n \n self.load_PSF_data()", "def init(self, x0=None, control=None):\n if x0 is not None:\n self._x = base.getvector(x0, 3)\n else:\n self._x = self._x0.copy()\n\n self._x_hist = []\n\n if self._seed is not None:\n self._random = np.random.default_rng(self._seed)\n\n if control is not None:\n # override control\n self._control = control\n \n if self._control is not None:\n self._control.init()\n\n self._t = 0\n\n # initialize the graphics\n if self._animation is not None:\n\n # setup the plot\n self._ax = base.plotvol2(self.workspace)\n \n self._ax.set_xlabel('x')\n self._ax.set_ylabel('y')\n self._ax.set_aspect('equal')\n self._ax.figure.canvas.manager.set_window_title(\n f\"Robotics Toolbox for Python (Figure {self._ax.figure.number})\")\n\n self._animation.add(ax=self._ax) # add vehicle animation to axis\n self._timer = plt.figtext(0.85, 0.95, '') # display time counter\n\n # initialize the driver\n if isinstance(self._control, VehicleDriver):\n self._control.init(ax=self._ax)", "def __init__(self, params=None):\n if isinstance(params, SSDParams):\n self.params = params\n else:\n self.params = SSDNet.default_params", "def __init__(self, por=\"/dev/ttyS\", par=['1','115200','8','N','1'], ope=True, deb=False):\r\n\t\t# referenzio il flag di Debug\r\n\t\tself.deb = deb\r\n\t\tself.ope = ope\r\n\t\tself.par = par\r\n\t\tself.por = por+par[0]\r\n\t\t# Gestione apertura collegamento\r\n\t\tif self.ope:\r\n\t\t\ttry:\r\n\t\t\t\t# provo ad aprire la connessione\r\n\t\t\t\tself.ser = serial.Serial(self.por)\r\n\t\t\t\t# configuro i parametri e provo apertura\r\n\t\t\t\ttry:\r\n\t\t\t\t\tself.ser.close()\r\n\t\t\t\t\tself.ser.open()\r\n\t\t\t\t\t# set parameters\r\n\t\t\t\t\tself.chaSetting()\r\n\t\t\t\t\t# debug\r\n\t\t\t\t\tif self.deb:\r\n\t\t\t\t\t\t# ok\r\n\t\t\t\t\t\tprint \"%s now is open.\" %self.por\r\n\t\t\t\texcept:\r\n\t\t\t\t\tself.ser = None\r\n\t\t\t\t\t# seriale gia' aperta\r\n\t\t\t\t\tprint \"%s port not opened!\" %self.por\r\n\t\t\texcept:\r\n\t\t\t\t# nessuna seriale presente\r\n\t\t\t\tself.ser = None\r\n\t\t\t\t# debug\r\n\t\t\t\tif self.deb:\r\n\t\t\t\t\tprint \"%s not present!\" %self.por\r\n\t\t\t\t\t#sys.exit()\r\n\t\t# nessuna verifica di presenza device!\r\n\t\telse:\r\n\t\t\tself.ser = serial.Serial()\r\n\t\t# ritardo tra un invio e il successivo\r\n\t\tself.dlTx = 0.002", "def __init__(self):\n self.ctrl = src.robot_controller.RobotController()\n self.recorder = robot_recorder.RobotRecorder(save_dir=\"/home/guser/sawyer_data/test_recording\", start_loop=False)\n\n # drive to neutral position:\n self.ctrl.set_neutral()\n # import pdb; pdb.set_trace()\n\n self.num_traj = 10\n\n\n limb = 'right'\n self.name_of_service = \"ExternalTools/\" + limb + \"/PositionKinematicsNode/FKService\"\n self.fksvc = rospy.ServiceProxy(self.name_of_service, SolvePositionFK)\n\n self.run_data_collection()", "def init():\n\t# init the node\n\trospy.init_node(\"gps_node\")\n\t\n\t# init the publishers\n\tD.gpsPub = rospy.Publisher(\"gps_data\",RosGPS)\n\n\t# init gps connection\n\t#init_gpsd()\n\tinit_serial()\n\t\n\t# gps data\n\tD.NSatellites = 0\t# the number of satellites in view\n\n\t# time conversion info\n\tD.tzOffset = -8 \t# offset in hours due to timezones\n\tD.dst = 1\t\t# daylight savings. 1 = yes, 0 = no", "def __init__(self, address=0x68, config=0):\r\n\t\tself.i2c = FT232H.I2CDevice(ft232h, address)\r\n\t\tif config == 0:\r\n\t\t\tself.setWake()\r\n\t\t\tself.setScale(mode='ACC',scale=0)\r\n\t\t\tself.setScale(mode='GYR',scale=0)\r\n\t\telif config == 1:\r\n\t\t\tself.setWake()\r\n\t\t\tself.setScale(mode='ACC',scale=1)\r\n\t\t\tself.setScale(mode='GYR',scale=1)\t\t\t\t\r\n\t\telif config == 2:\r\n\t\t\tself.setWake()\r\n\t\t\tself.setScale(mode='ACC',scale=2)\r\n\t\t\tself.setScale(mode='GYR',scale=2)\t\r\n\t\telif config == 3:\r\n\t\t\tself.setWake()\r\n\t\t\tself.setScale(mode='ACC',scale=3)\r\n\t\t\tself.setScale(mode='GYR',scale=3)\t\t\t\t\r\n\t\telif config == 4:\r\n\t\t\tself.setWake()\r\n\t\t\tself.setScale(mode='ACC',scale=1)\t\r\n\t\t\tself.setTempDisable()\r\n\t\t\tself.setGYRStandby(axis='X')\r\n\t\t\tself.setGYRStandby(axis='Y')\r\n\t\t\tself.setGYRStandby(axis='Z')", "def initialize(self) -> None:\n # Set motors to stop, read encoder values for starting point\n self.drive.arcadeDrive(0, 0)\n self.drive.resetEncoders()", "def __init__(self, starting_point=-1):\n self.i_read = starting_point\n self.data = [['fake_chip_id', 'fake_version'],\n [96, 110, 203, 104, 50, 0, 29, 145, 59, 215, 208, 11,\n 232, 38, 42, 255, 249, 255, 172, 38, 10, 216, 189, 16],\n [75],\n [129, 1, 0, 16, 44, 3, 30],\n [76, 60, 128, 129, 49, 128, 94, 120]]", "def _initialize(self):\r\n print(\"Set the CP mode to EVSE\")\r\n self.whitebeet.controlPilotSetMode(1)\r\n print(\"Set the CP duty cycle to 100%\")\r\n self.whitebeet.controlPilotSetDutyCycle(100)\r\n print(\"Start the CP service\")\r\n self.whitebeet.controlPilotStart()\r\n print(\"Start SLAC in EVSE mode\")\r\n self.whitebeet.slacStart(1)\r\n time.sleep(2)", "def init(self):\n\n pygame.init()\n pygame.display.set_mode((640, 480))\n pygame.display.set_caption(\"Gears 4 Geeks\")\n pygame.joystick.init()\n self.controller = pygame.joystick.Joystick(0)\n self.controller.init()\n self.ser = serial.Serial('COM4', 9600)\n\n #ADAFRUIT_IO_KEY = 'd1a1bd3737714fa488e0364c775a4b4d' ##This will only be good until the end of the competition\n #self.aio = Client(ADAFRUIT_IO_KEY)", "def __init__(self):\n self._read_calibration_data()\n self.set_oversamplings_and_mode(\n HumidityOversampling.x08,\n TemperatureOversampling.x08,\n PressureOversampling.x16,\n SensorMode.Normal)\n self.set_config(\n InactiveDuration.ms1000,\n FilterCoefficient.fc04)", "def startup(self):\n self.prev_gray = None\n self.frame_idx = 1\n self.tracks = []\n self.fps = []\n self.vid_info = None\n self.track_new_points_count = 0", "def __init__(self, dev='/dev/ttyUSB0', dbhost='localhost' ,db=DEFAULTDB):\n self.circles = {}\n self.dev = Stick(dev)\n self.connectdb(dbhost,db)", "def __init__(self, robot):\n\n #initialise the stick and the smart dashboard (in case we need stuff for auton):\n self.stick = wpilib.Joystick(0)\n self.smart_dashboard = NetworkTable.getTable(\"SmartDashboard\")\n\n #Main stick buttons.\n #-----------------------------------------------------------------------\n trigger = JoystickButton(self.stick, 1)\n thumb = JoystickButton(self.stick, 2)\n three = JoystickButton(self.stick, 3)\n four = JoystickButton(self.stick, 4)\n five = JoystickButton(self.stick, 5)\n six = JoystickButton(self.stick, 6)\n seven = JoystickButton(self.stick, 7)\n eight = JoystickButton(self.stick, 8)\n nine = JoystickButton(self.stick, 9)\n ten = JoystickButton(self.stick, 10)\n eleven = JoystickButton(self.stick, 11)\n twelve = JoystickButton(self.stick, 12)\n\n #Hat switch POV stuff.\n #-----------------------------------------------------------------------\n pov_north = POVButton(self.stick, 0)\n pov_northeast = POVButton(self.stick, 45)\n pov_east = POVButton(self.stick, 90)\n pov_southeast = POVButton(self.stick, 135)\n pov_south = POVButton(self.stick, 180)\n pov_southwest = POVButton(self.stick, 225)\n pov_west = POVButton(self.stick, 270)\n pov_northwest = POVButton(self.stick, 315)\n\n pov_south.whenPressed(SuperStrafeEntertainmentSystem(robot, SuperStrafeEntertainmentSystem.kBack))\n pov_north.whenPressed(SuperStrafeEntertainmentSystem(robot, SuperStrafeEntertainmentSystem.kForward))\n pov_east.whenPressed(SuperStrafeEntertainmentSystem(robot, SuperStrafeEntertainmentSystem.kRight))\n pov_west.whenPressed(SuperStrafeEntertainmentSystem(robot, SuperStrafeEntertainmentSystem.kLeft))", "def __init__(self):\n self.pc = 0\n self.reg = [0] * 8\n self.ram = [0] * 256\n\n self.running = False", "def __init__(__self__, *,\n driver: 'outputs.CSIVXFlexOSSpecDriver'):\n pulumi.set(__self__, \"driver\", driver)", "def __init__(self):\n super().__init__()\n\n # Gadget state\n \n self.isDoorOpen = False\n self.verified = True\n\n # Ev3dev initialization\n self.leds = Leds()\n self.sound = Sound()\n self.drive = MoveTank(OUTPUT_B, OUTPUT_C)\n \n self.ir_sensor = InfraredSensor()\n self.ir_sensor.mode = self.ir_sensor.MODE_IR_REMOTE\n self.color_sensor = ColorSensor()\n self.color_sensor.mode = 'COL-COLOR' # WHITE\n\n # Start threads\n threading.Thread(target=self._patrol_thread, daemon=True).start()", "def setup(self):\n\n warnings.simplefilter(\"always\", DeprecationWarning)\n\n orbit_info = {'index': 'slt', 'kind': 'lt'}\n self.tinst = pysat.Instrument('pysat', 'testing', orbit_info=orbit_info)\n self.tinst.bounds = (dt.datetime(2008, 1, 1), dt.datetime(2008, 1, 2))\n\n self.warn_msgs = []\n self.war = \"\"\n return", "def __init__(self):\n self.bytes = bytearray(3)\n MCP4725.__init__(self)", "def init():\n\n global leftDriverStick\n global rightDriverStick\n global goGamePad\n\n try:\n leftDriverStick = T16000M(0)\n except:\n print('OI: Error - Could not instantiate Left Driver Stick on USB port 0!!!')\n\n try:\n rightDriverStick = T16000M(1)\n except:\n print('OI: Error - Could not instantiate Right Driver Stick on USB port 0!!!')\n\n try:\n goGamePad = Joystick(2)\n except:\n print('OI: Error - Could not instantiate Right Driver Stick on USB port 2!!!')\n\n\n # ----------------------------------------------------------\n # Driver Controls\n # ----------------------------------------------------------\n #global resetYawBtn\n #resetYawBtn = JoystickButton(rightDriverStick, config.btnResetYawAngleIndex)\n #resetYawBtn.whenPressed(NavxResetYawAngle())\n\n global btnDriveSlow\n btnDriveSlow = JoystickButton(leftDriverStick, config.btnDriveSlow)\n \n global btnEnableLightSensor\n btnEnableLightSensor = JoystickButton(leftDriverStick, config.btnEnableLightSensorIndex)\n\n global btnExtendAll\n btnExtendAll = JoystickButton(rightDriverStick, config.btnExtendAllIndex)\n btnExtendAll.whenPressed(ExtendAll())\n\n global btnRetract\n btnRetract = JoystickButton(rightDriverStick, config.btnRetractAllIndex)\n btnRetract.whenPressed(RetractAll())\n\n global btnExtendFront\n btnExtendFront = JoystickButton(rightDriverStick, config.btnExtendFrontIndex)\n btnExtendFront.whenPressed(ExtendFront())\n\n global btnExtendBack\n btnExtendBack = JoystickButton(rightDriverStick, config.btnExtendBackIndex)\n btnExtendBack.whenPressed(ExtendBack())\n\n global btnRetractFront\n btnRetractFront = JoystickButton(rightDriverStick, config.btnRetractFrontIndex)\n btnRetractFront.whenPressed(RetractFront())\n\n global btnCargoGrabTog\n btnCargoGrabTog = JoystickButton(goGamePad, config.btnHatchGrabTogIndex)\n btnCargoGrabTog.whenPressed(ExtendBack())\n \n \"\"\"\n global btnResetEncoders\n btnResetEncoders = JoystickButton(leftDriverStick, config.btnResetEncodersIndex)\n btnResetEncoders.whenPressed(TankDriveResetEncoders())\n \"\"\"\n\n \"\"\"\n global axisElevator\n axisElevator = JoystickAxis(goGamePad, config.axisElevatorIndex)\n axisElevator. #??? idk how to configure joystick axis\n \"\"\"\n\n \"\"\"\n global btnRampTog\n btnRampTog = JoystickButton(goGamePad, config.btnRampTogIndex)\n btnRampTog.whenPressed(ExtendFront())\n \"\"\"\n #global btnResetEncoders\n #btnResetEncoders = JoystickButton(leftDriverStick, config.btnResetEncodersIndex)\n #btnResetEncoders.whenPressed(TankDriveResetEncoders())\n\n # These variable names are inconsistent, need to be fixed!!!!\n #global btnRampExtendTog\n #btnRampExtendTog = JoystickButton(goGamePad, config.btnRampExtendTogIndex)\n #btnRampExtendTog.whenPressed(RampExtend())\n\n #global btnRampRetractTog\n #btnRampRetractTog = JoystickButton(goGamePad, config.btnRampRetractTogIndex)\n #btnRampRetractTog.whenPressed(RampRetract())", "def __init__(self, oscPort = \"COM1\", zaberStagePort = 2):\n\t\t# Serial() input depends on where stage is connected\n\t\tself.stage = serial.Serial(zaberStagePort)\n\t\t# 9600 = baudrate\n\t\tself.osc = TDS3k(serial.Serial(oscPort, 9600, timeout=1))", "def __init__(self, port='/dev/ttyUSB0', baudrate=1200,\n calib=[-0.49125, 1.0613], Tident='LSCI,MODEL321', mode=0):\n # for the baratron reading and updating display\n self.dmm = gpib.find('3478a')\n self.__bytes__ = 32\n # so that DMM knows to put something in the output buffer\n gpib.read(self.dmm, self.__bytes__)\n \n # for the temperature reading, many values hardcoded for\n # Lakeshore 321 cryogenic temperature sensor\n self.Tsensor = serial.Serial(port=port, baudrate=baudrate,\n bytesize = 7, parity = 'O')\n self.Tsensor.setTimeout(1)\n self.Tsensor.flushInput()\n self.Tsensor.write('*IDN?\\n')\n answer = self.Tsensor.readline()\n\n if (re.match(Tident, answer) == None):\n raise Exception, \"LS321 ident string not matched\"\n \n # calibration factors consist of two numbers: voltage reading\n # at vacuum, and voltage reading at 1 atm.\n self.calib = calib\n self.mode = mode\n \n # some constants; declared here so that improved versions\n # of front-ends could modify them.\n self.atm = 760.0\n self.unit='TORR'\n self.pascalPerTorr = 133.322\n self.boltzmann = 1.38065e-23\n self.BGUnit='HE'", "def _setup_sensor ( self ):\n self.spectral = Spectral ( np.array([450, 520, 630, 770., 1550, 2090.] ),\n np.array([ 520, 600, 690, 900., 1750., 2350.] ) )", "def __init__(self, buff):\n fmt = 'hiSi'\n response = struct_helpers.unpack_from(fmt, buff, 0)\n\n error_code = response[0]\n if error_code != 0:\n self.raise_error(error_code, response)\n self.coordinator_id = response[1]\n self.coordinator_host = response[2]\n self.coordinator_port = response[3]", "def __init__(self):\n self.position = Vector2()\n self.velocity = Vector2()\n self.update_parameters()\n self.mass = 0.18 # Mass of Sphero robot in kilograms", "def __init__(self):\n self.x_coord = default_init\n self.y_coord = default_init\n self._init_random_coord() # generating random coordinates\n self.x_speed = default_init\n self.y_speed = default_init\n self.degrees = default_init\n self.radius = ship_def_radius", "def __init__(self):\n \n self.packetType = DATA\n self.types = [BYTE, # Packet type\n FLOAT, # Battery voltage\n FLOAT, FLOAT, FLOAT, FLOAT, # Temperature readings\n FLOAT, FLOAT, # Pressure and humidity readings\n BYTE, BYTE, BYTE, # GPS Year, month, date (sensor computer)\n BYTE, BYTE, BYTE, # GPS Hour, minute, second (sensor computer)\n LONG, LONG, LONG, # GPS latitude, longitude, altitude (sensor computer)\n ULONG, UINT, BYTE, # GPS speed, heading, num satellites (sensor computer)\n FLOAT, FLOAT, FLOAT, # IMU data (accelerometer)\n FLOAT, FLOAT, FLOAT, # IMU data (gyroscope)\n FLOAT, FLOAT, FLOAT, # IMU data (magnetometer)\n FLOAT, FLOAT, FLOAT, # Attitude data\n ULONG, # Time since reset\n BOOL, UINT, # Data logging\n ULONG, # Time since last data arrival\n ULONG, # Relay states\n BYTE, BYTE, BYTE, # GPS Year, month, date (comm computer)\n BYTE, BYTE, BYTE, # GPS Hour, minute, second (comm computer)\n LONG, LONG, LONG # GPS latitude, longitude, altitude (comm computer)\n ] \n\n self.values = [0]*len(self.types)\n self.values[0] = DATA", "def initialize_relays(self):\n #create list of bytes to clear out relays\n zeroed_bytes = []\n for i in range(self.num_registers):\n zeroed_bytes.append(0x00)\n\n #clear out any data in the shift registers\n ret = self.e.write_SPI_bytes_to_portA(zeroed_bytes)\n self.strobe_relays()\n print \"read from SPI: \",\n print ret\n\n #enable the relays\n self.enable_relays()", "def __init__(self, portname, devicetype):\n if devicetype == DEVICE_DEBUG_BOARD:\n self.device = debugbox.DebugBox(portname)\n elif devicetype == DEVICE_SFP_BREAKOUT:\n self.device = sfpbreakout.SFP(portname)\n elif devicetype == DEVICE_DUMMY_BOARD:\n self.device = dummybox.DummyBox(portname)\n else:\n raise IOError(\"Invalid Device Type\")\n \n # Set up laser sections\n self.mirror1 = Testrig.LaserSection(self, 814, 10, 90, 728, 844, 860, 864)\n self.laser_phase = Testrig.LaserSection(self, 810, 11, 20, 726, 846, 858, 866)\n self.gain = Testrig.LaserSection(self, 818, 12, 180, 730, 848, None, None)\n# self.gain1.validator.setTop(150)\n self.mirror2 = Testrig.LaserSection(self, 826, 14, 90, 734, 850, 862, 864)\n# self.front.validator.setTop(60)\n self.soa1 = Testrig.LaserSection(self, 822, 15, 300, 736, 852, None, None)\n# self.soa1.validator.setTop(150)\n self.soa2 = Testrig.LaserSection(self, 830, 16, 100, 738, 854, None, None, self.to_display_current_section2, self.to_internal_current_section2)\n self.phase1 = Testrig.LaserSection(self, 834, 13, 100, 732, 856, None, None, self.to_display_current_section2, self.to_internal_current_section2)\n \n self.voltage_max = 2.5 # This is now a constant\n \n self.full_rig = True", "def __init__(self, resolution=None, colour='multi', cs_pin=CS0_PIN, dc_pin=DC_PIN, reset_pin=RESET_PIN, busy_pin=BUSY_PIN, h_flip=False, v_flip=False, spi_bus=None, i2c_bus=None, gpio=None): # noqa: E501\n self._spi_bus = spi_bus\n self._i2c_bus = i2c_bus\n self.eeprom = eeprom.read_eeprom(i2c_bus=i2c_bus)\n\n # Check for supported display variant and select the correct resolution\n # Eg: 600x480 and 640x400\n if resolution is None:\n if self.eeprom is not None and self.eeprom.display_variant in (14, 15, 16):\n resolution = [_RESOLUTION_7_3_INCH, None, _RESOLUTION_7_3_INCH][self.eeprom.display_variant - 14]\n else:\n resolution = _RESOLUTION_7_3_INCH\n\n if resolution not in _RESOLUTION.keys():\n raise ValueError('Resolution {}x{} not supported!'.format(*resolution))\n\n self.resolution = resolution\n self.width, self.height = resolution\n self.border_colour = WHITE\n self.cols, self.rows, self.rotation, self.offset_x, self.offset_y, self.resolution_setting = _RESOLUTION[resolution]\n\n if colour not in ('multi'):\n raise ValueError('Colour {} is not supported!'.format(colour))\n\n self.colour = colour\n self.lut = colour\n\n self.buf = numpy.zeros((self.rows, self.cols), dtype=numpy.uint8)\n\n self.dc_pin = dc_pin\n self.reset_pin = reset_pin\n self.busy_pin = busy_pin\n self.cs_pin = cs_pin\n try:\n self.cs_channel = [8, 7].index(cs_pin)\n except ValueError:\n self.cs_channel = 0\n self.h_flip = h_flip\n self.v_flip = v_flip\n\n self._gpio = gpio\n self._gpio_setup = False\n\n self._luts = None", "def _setup_sensor ( self ):\n self.spectral = Spectral ( np.array([500, 610, 780, 1580.] ),\n np.array([590, 680, 890, 1750.] ) )", "def init_gps(configuration: Configuration):\n gps_serial_port = configuration.get_device_configuration(\"gps\", \"serial_device\")\n if not gps_serial_port:\n return None\n ser = serial.Serial(gps_serial_port, 9600, timeout=5.0)\n sio = io.TextIOWrapper(io.BufferedRWPair(ser, ser))\n return sio", "def __init__(self, shape):\n self.eyes = [(), ()]\n self.shape = shape\n self.state = 0\n self.new_frame()", "def __init__(\n self,\n poly_modulus_degree=8192,\n coeff_mod_bit_sizes=[60, 40, 40],\n scale_bits=40,\n scheme=\"CKKS\",\n ):\n self._context = None\n self.scheme_type_mapping = {\n \"CKKS\": ts.SCHEME_TYPE.CKKS,\n \"BFV\": ts.SCHEME_TYPE.BFV,\n }\n self.poly_modulus_degree = poly_modulus_degree\n self.coeff_mod_bit_sizes = coeff_mod_bit_sizes\n self.scale_bits = scale_bits\n _scheme = scheme\n # Setup TenSEAL context\n self.scheme_type = self.scheme_type_mapping[_scheme]\n self.serialized = None", "def __init__(self):\n GPIO.setmode(GPIO.BCM)\n pipes = [[0xC2, 0xC2, 0xC2, 0xC2, 0x01],\n [0xC2, 0xC2, 0xC2, 0xC2, 0x02],\n [0xC2, 0xC2, 0xC2, 0xC2, 0x03],\n [0xC2, 0xC2, 0xC2, 0xC2, 0x04],\n [0xC2, 0xC2, 0xC2, 0xC2, 0x05],\n [0xC2, 0xC2, 0xC2, 0xC2, 0x06],\n ]\n self.radio = NRF24(GPIO, spidev.SpiDev())\n self.radio.begin(0, 17)\n\n self.radio.setPayloadSize(32)\n self.radio.setChannel(0x76)\n self.radio.setDataRate(NRF24.BR_1MBPS)\n self.radio.setPALevel(NRF24.PA_MAX)\n\n self.radio.enableDynamicPayloads()\n self.radio.enableAckPayload()\n self.radio.setAutoAck(True)\n\n # Initialize pipes\n for i in xrange(6):\n self.radio.openReadingPipe(i, pipes[i])\n # Premptively set intial autoacknowledge command\n #for i in xrange(2):\n # self.radio.writeAckPayload(i, ackPayload, 4)\n # time.sleep(.1)\n\n self.radio.startListening()\n self.radio.printDetails()\n\n # holds [bot id]\n self.botList = []\n self.targetFound = False\n\n \"\"\"\n exploregrid logic\n \"\"\"", "def __init__(self):\n self.cad = pifacecad.PiFaceCAD()\n self.listener = pifacecad.SwitchEventListener(chip=self.cad)\n for i in range(8):\n self.listener.register(i, pifacecad.IODIR_FALLING_EDGE, self.press_key)\n self.listener.activate()\n atexit.register(self.atexit)", "def __init__(self,x_pos, y_pos, velocity, kind, fillcolor = 'red'):\n self._velocity = velocity\n self._kind = kind\n super().__init__(x = x_pos, y=y_pos, width = BOLT_WIDTH, \\\n height = BOLT_HEIGHT, fillcolor=fillcolor)", "def __init__(self, pts=[]):\n self.set_points(pts)", "def __init__(self, id: int, baudrate: int=9600, bits: int=8, parity: int=None, stop: int=1, tx: Pin=None, rx: Pin=None):" ]
[ "0.6566145", "0.6351516", "0.63373953", "0.6181888", "0.61446565", "0.6111663", "0.59349567", "0.57059205", "0.55544007", "0.554452", "0.5522998", "0.5509588", "0.5503774", "0.5380477", "0.53760433", "0.5336554", "0.5321746", "0.5299691", "0.52520025", "0.5248618", "0.5240898", "0.5219589", "0.5202608", "0.5198033", "0.5195962", "0.5170883", "0.5133692", "0.5128285", "0.51246643", "0.5120659", "0.5117634", "0.5113094", "0.51020736", "0.5077908", "0.5074333", "0.5062656", "0.5059081", "0.5058802", "0.50555795", "0.5026696", "0.5025937", "0.49972227", "0.49859428", "0.49851456", "0.49847975", "0.4967207", "0.4958621", "0.4951006", "0.49445385", "0.49432147", "0.49320188", "0.4927584", "0.49252674", "0.49164078", "0.4914648", "0.49066213", "0.49043554", "0.48915204", "0.4883599", "0.488174", "0.4878899", "0.4877542", "0.48754916", "0.48703107", "0.48657566", "0.48631698", "0.48606008", "0.48559728", "0.48506984", "0.48501936", "0.48487642", "0.48418346", "0.48322824", "0.48320255", "0.48254734", "0.48132473", "0.4813096", "0.48130068", "0.4809703", "0.47955266", "0.4789465", "0.47859904", "0.47847673", "0.47825488", "0.4781668", "0.47763354", "0.4774383", "0.4773635", "0.47724608", "0.47708207", "0.47706902", "0.4769254", "0.47657144", "0.47628587", "0.47612777", "0.47547013", "0.47518408", "0.47478813", "0.4743045", "0.47426757" ]
0.74753237
0
This method clears all the LEDs in the DotStar object
def clearleds(self): self.buffer = self.emptybuffer[:]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reset(self):\n for tlight in self.trafficLights:\n self.trafficLights[tlight].reset()", "def clear(tft, oled):\n oled.fill(tft.BLACK)", "def clear(self):\n self._delayvalue = _CFG[\"delay\"]\n self._colormode = _CFG[\"colormode\"]\n self._delete(\"all\")\n self._bgpic = self._createimage(\"\")\n self._bgpicname = \"nopic\"\n self._tracing = 1\n self._updatecounter = 0\n self._turtles = []\n self.bgcolor(\"white\")\n for btn in 1, 2, 3:\n self.onclick(None, btn)\n self.onkeypress(None)\n for key in self._keys[:]:\n self.onkey(None, key)\n self.onkeypress(None, key)\n Myturtle._pen = None", "def off(self):\n for light in self.all:\n GPIO.output(light, 0)", "def all_off():\n Leds.red_left.brightness = 0\n Leds.red_right.brightness = 0\n Leds.green_left.brightness = 0\n Leds.green_right.brightness = 0\n Leds.blue_left.brightness = 0\n Leds.blue_right.brightness = 0", "def turn_all_off(self):\n for led_type in LED:\n self.led_off(led_type)\n logging.info('LED: ALL - Status: 0')", "def clear_all(self):\n self._set_all(0x00, 0x00, 0x00)", "def clear(self):\n self.np.fill(OFF)\n self.np.show()\n return True", "def clear(self):\n black = neo.Color(0,0,0)\n self.set_all(black)\n self.draw()", "def reset(self):\n for i in range(self.shapeRow):\n for j in range(self.shapeColumn):\n self.buttons[i][j].setText(\" \")", "def clear_strip(self):\r\n wlogger.log_info(\"Clear Strip\")\r\n for led in range(self.num_led):\r\n self.set_pixel(led, 0, 0, 0)\r\n self.show()", "def clear(self):\n self._frame.clear()\n self._turtles = []\n self._gpens = []", "def clear_all(cls):\n del cls.buttons[:]", "def clear(self):\n self.initialize()\n self.device_disconnect()", "def clear(self):\n for i in range(len(self.canvas)):\n self.canvas[i] = 0", "def all_off(self):\n self.fill_off()\n self.update()\n self.fill_off()\n self.update()", "def remove_all_lights(self):\n self.RemoveAllLights()\n self._lights.clear()", "def stop_all():\r\n motors.stop_all_motors()\r\n led.set_colour_solid(0)\r\n display.clear()", "def reset(self):\n self.obstacles = []\n self._tick = 0", "def remove_all(self):\n self.initial = None\n self.contour = None\n self.control_points = []", "def clear(self):\n self.state = [[None, None, None],\n [None, None, None],\n [None, None, None]]", "def reset(self):\n for gate in self.gates:\n gate.reset()", "def off_all(self):\n self._set_status(\"off\", \"11111111\")", "def clear(self):\n if self.flag == 0:\n for coord in INDICES:\n self.kill(coord)\n self.chart[coord] = DEAD", "def deleteAll(self):\n for tlight in self.trafficLights:\n del tlight\n del self.trafficLights", "def reset(self):\n for lane in self.lanes.values():\n lane.puck_area.clear_widgets()\n lane.patrons = list()\n lane.disabled = False\n lane.beers = list()\n\n self.message_holder.remove_widget(self.you_lose_label)\n self.message_holder.remove_widget(self.you_win_label)", "def clear(self):\n self.raster_path_line.clear()\n self.labels_path.clear()\n self.shapefile_path.clear()\n self.costumelabels.clear()\n self.layer_name.clear()\n self.class_name.clear()\n self.idfield.clear()", "def __clear(self):\n for i in range(len(self.buttons_list)):\n self.labels_strvar[i].set(\"\")\n if self.buttons_list[i][\"state\"] == DISABLED:\n self.buttons_list[i][\"state\"] = NORMAL\n self.entered_list = []\n return", "def clearHotspots( self ):\n self._hotspots = []", "def eraseAll(self): # remove all robots\n\t\tself.__robotList = []", "def Clear(self) -> None:", "def _clear_drawing(self) -> None:\n self.vertices.clear()\n self.edges.clear()\n self.subplot.clear()\n self.selected_element = None\n self.pressed_elements.clear()", "def clear(self):\n self.recorders = set([])\n self.reset()\n\n # Stop any currently running SpiNNaker application\n self.stop()", "def clear_all(self):\n for octet in self.master.children['!registerframe']._octets:\n for button in octet._bits_val:\n button.set(0)\n octet._update_value()\n self.master.children[\"!registerframe\"].update_reg_value()\n return None", "def resetDeviceStates(self):", "def reset(self):\r\n\r\n self.make_board()\r\n\r\n # configure each buttons text option to an empty string\r\n for row in range(3):\r\n for column in range(3):\r\n self.board[row][column][0]['text'] = ''", "def reset(self):\n for layer in self.network:\n layer.clean()", "def clear(self):", "def clear(self):", "def clear(self):", "def clear(self):", "def clear(self):", "def clear(self):", "def clear(self):", "def clear(self) -> None:", "def clear(self):\n self._turtle.clear()", "def clear(self):\n self._turtle.clear()", "def clear_all(cls):\n del cls.text_labels[:]", "def __clearBonuses(self, hp=1):\n if hp:\n self.hpBonuses = [{}, {}, {}, {}]\n else:\n self.kbBonuses = [{}, {}, {}, {}]", "def reset(self, board):", "def clearAnim():\n for node in nuke.selectedNodes():\n # rotopaint\n if node.Class() == \"RotoPaint\":\n rotoCurves = node['curves']\n for knob in node.knobs():\n if nuke.Knob.isAnimated(node[knob]):\n nuke.Knob.clearAnimated(node[knob]) \n print \"clearing animation of: \"+node.name()+\" \"+node[knob].name()\n # other nodes\n if not node.Class() == \"RotoPaint\":\n for knob in node.knobs():\n if nuke.Knob.isAnimated(node[knob]):\n nuke.Knob.clearAnimated(node[knob]) \n print \"clearing animation of: \"+node.name()+\" \"+node[knob].name()", "def _stop_all(self):\n # LEDs\n self.cam_led.off\n self.analysis_led[0].off\n self.analysis_led[1].off\n self.error.off\n \n # motors\n self.motor.stop()\n self.wash.stop()", "def clear():", "def reset ( self ):\n self.hex = ''\n self.r = 0.0\n self.g = 0.0\n self.b = 0.0\n self.h = 0.0\n self.s = 0.0\n self.l = 0.0\n self.a = 1.0\n self.rgb = []\n self.hsl = []\n self.rgba = []\n self.hsla = []\n return self", "def glclear(self):\n self.pixels = [\n [color(self.r, self.g, self.b) for x in range(self.width)]\n for y in range(self.height)\n ]", "def clickClearReferences(self, event):\n self.whiteReference = None\n self.lightBtn.color = '0.85'\n self.darkReference = None\n self.darkBtn.color = '0.85'\n plt.pause(0.3)\n self.axes.autoscale_view()", "def clear(self):\n for row in range(self.rows):\n for col in range(self.cols):\n self.data[row][col] = '.'", "def clear(self):\n for row in range(self.rows):\n for col in range(self.cols):\n self.data[row][col] = '.'", "def clear(self):\n \n self.node_set.clear()\n self.prefix.clear()\n self.suffix.clear()\n self.num_node = 0\n self.edges = 0", "async def clear_all(self) -> None:", "def clear_all(self):\n raise NotImplementedError", "def reset(self, balls=None):\r\n if balls is None:\r\n balls = self.balls\r\n for ball in balls:\r\n ball.reset()", "def clear(self):\n for ob in self.obs:\n ob.clear()\n return", "def clear(self):\n self._nodes = { }\n self._arcs = set()", "def reset():\n Vessel.reset_instances()", "def clear_elements(self):\n\n pass", "def clear(self):\n self.blocks.clear()", "def reset(self):\n for Myturtle in self._turtles:\n Myturtle._setmode(self._mode)\n Myturtle.reset()", "def clear(self):\n ...", "def clear(self):\n for i in range(0, len(self.weights)):\n self.weights[i] = 0", "def clear(self):\n self.fill(None)", "def clear(self):\n pass", "def clear(self):\n pass", "def clear(self):\n pass", "def destroy(self):\n\t\tfor team in range(len(self.dots)): #will cycle through each team\n\t\t\tfor i in range(len(self.dots[team])): #will cycle through each member of the team\n\t\t\t\tdot = self.dots[team][i]\n\t\t\t\tdot.removeNode()\n\t\tself.mousePosition.removeNode()\n\t\tself.mapimage.removeNode()\n\t\tself.map.removeNode()", "def clear(self):\n self.clear_actors()\n if self.__charts is not None:\n self._charts.deep_clean()\n self.remove_all_lights()\n self.RemoveAllViewProps()\n self.Modified()\n\n self._scalar_bar_slots = set(range(MAX_N_COLOR_BARS))\n self._scalar_bar_slot_lookup = {}", "def ClearTools(self):\r\n\r\n self.Clear()", "def kill_all(self):\n self.settings['lights_on'] = 12\n self.settings['lights_off'] = 12\n self.settings['overhead_level'] = 0\n self.settings['soil_1'] = 0\n self.settings['soil_2'] = 0\n self.settings['soil_3'] = 0\n self.settings['soil_4'] = 0\n self.scale_overhead_level.set(self.settings['overhead_level'])\n self.scale_smc1.set(self.settings['soil_1'])\n self.scale_smc2.set(self.settings['soil_2'])\n self.scale_smc3.set(self.settings['soil_3'])\n self.scale_smc4.set(self.settings['soil_4'])\n self.active_changes = True # (flag) Once changes are retrieved, we assume that they will be sent to the controller", "def free(self):\n self._cords.free()\n self._colors.free()", "def reset_all(self):\n for i, stop in enumerate(self):\n stop._map = self\n stop.reset()", "def reset_all(self) -> None:\n for metric in self:\n metric.reset()", "def clear(self) -> None:\n for y in range(self.width):\n for x in range(self.height):\n self.set_value(Point(y, x), FieldState.EMPTY)", "def reset(self):\n\n self.ids.score.text = '0'\n self.ids.end_button.text = 'submit'\n self.ids.end_button.disabled = True\n self.ids.end_button.opacity = 0\n self.time = None\n\n board = self.ids.board\n\n for item in board.children:\n if isinstance(item, Space) is True:\n space = game.spacelist[int(item.number)]\n item.text = ''\n item.atom = space.atom\n item.guess = False\n item.correct = False\n item.disabled = False\n\n elif isinstance(item, Marker) is True:\n item.text = ''\n item.disabled = False\n\n for i in range(1, 6):\n self.ids['tracker' + str(i)].color = scheme.white", "def clear(self) -> None:\n ...", "def clr(self):\n self.a = 0.0", "def clearAll(self):\r\n self.metricListWidget.clearSelection()\r\n self.metricListWidget.repaint()", "def reset(self):\n for c in self.children:\n c.reset()\n self.marked = False", "def button_reset(self): \n self.button_1 = arcade.Sprite(settings.button, .7, 0, 0, 0, 0, 50,\n 570)\n self.button_2 = arcade.Sprite(settings.button, .7, 0, 0, 0, 0, 50,\n 75)\n self.button_3 = arcade.Sprite(settings.button, .7, 0, 0, 0, 0, 750,\n 570)\n self.button_4 = arcade.Sprite(settings.button, .7, 0, 0, 0, 0, 750,\n 75)\n a = [1, 2, 3, 4]\n for i in a:\n self.puzzle.remove_value(i)", "def reset(self, fullreset=True):\n self.controlpoints = []\n self.contour = []\n self.ext_energies = []\n self.update()\n if fullreset:\n self.optimized = False", "def full_clear(self):\n self.clear()\n self.class_hooks.clear()", "def Clear(self):\n it = self.ctx.Iterator5(\n self.addr,\n ScType.EdgeDCommonConst,\n ScType.Unknown,\n ScType.EdgeAccessConstPosPerm,\n self.relAddr)\n \n while it.Next():\n self.ctx.DeleteElement(it.Get(1))", "def reset(self):\n self.state.fill(EMPTY)", "def reset(self):\n\n # the 'cached' data to be displayed by the hex view\n self.data = None\n self.mask = None\n self.data_size = 0\n self.delta = None\n\n self.address = 0\n self.fade_address = 0\n\n # pinned memory / breakpoint selections\n self._pinned_selections = []", "def Reset(self):\n pass", "def clear_strip(self):\n self.spi.write(bytearray([0] * 4 +\n [255, 0, 0, 0] * self.bmp2led.num_pixels +\n [255] * ((self.bmp2led.num_pixels + 15) //\n 16)))", "def clear(self):\n self.xi[:] = 0\n self.meanlogr[:] = 0\n self.weight[:] = 0\n self.npairs[:] = 0", "def clear(self):\n board.change_grid(self.x, self.y, 0)", "def Clear(self):\n self.output = []\n self.out_byte = 0\n self.out_boff = 0\n self.idx_byte = 0\n self.idx_boff = 0", "def reset(self):\n \n pass", "def clear(self):\n self.nodes = list()\n self.inputs = list()\n self.nodes += [self]" ]
[ "0.7280109", "0.71445733", "0.70629704", "0.7045428", "0.7045238", "0.6974367", "0.6902242", "0.6812371", "0.679938", "0.6789614", "0.6783468", "0.6701193", "0.6695715", "0.66421294", "0.6637276", "0.6621712", "0.6612257", "0.6599522", "0.6587036", "0.6560823", "0.65587413", "0.6557263", "0.6530591", "0.6514564", "0.6512429", "0.6502498", "0.64983493", "0.6476238", "0.6462477", "0.64595515", "0.64412266", "0.6437849", "0.64189225", "0.63954943", "0.6393635", "0.6348698", "0.6306452", "0.62826705", "0.62826705", "0.62826705", "0.62826705", "0.62826705", "0.62826705", "0.62826705", "0.62791294", "0.6277002", "0.6277002", "0.62725645", "0.62681794", "0.6242505", "0.62352467", "0.622942", "0.6224886", "0.6223442", "0.62229514", "0.6216571", "0.62074566", "0.62074566", "0.6201602", "0.6199695", "0.6197135", "0.6196919", "0.61945814", "0.6194384", "0.6193694", "0.618523", "0.61833733", "0.6181445", "0.6181167", "0.617389", "0.61687446", "0.616773", "0.616773", "0.616773", "0.6166", "0.6158737", "0.61482364", "0.61473215", "0.61391616", "0.6129238", "0.6125768", "0.6123486", "0.6119795", "0.6117107", "0.61153907", "0.61082864", "0.61052114", "0.6092857", "0.6086981", "0.6085673", "0.6075046", "0.60723597", "0.607082", "0.6070809", "0.6069781", "0.60671765", "0.60667855", "0.6060278", "0.6058809", "0.6058181" ]
0.7821158
0
This sets the led to the specified color
def setled(self, led, red=0, green=0, blue=0): # Set the offset for the bytes to be sent over SPI offset = led * 4 self.buffer[offset] = 255 # equals a 1 or 0 self.buffer[offset + 1] = blue self.buffer[offset + 2] = green self.buffer[offset + 3] = red
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_color(self, color):\n pass", "def led(color: int, /) -> None:", "def set_color(self, color):\n\t\tpass", "def set_status_led(self, color):\n raise NotImplementedError", "def set_led_color(color):\n requests.post('http://192.168.4.1/pixel', data=json.dumps(color))", "def set_color(self, color):\n self.color = color", "def change_color(self, color):\n self.color = color", "def set_color(self, new_color):\n self.color = new_color", "def set_color(color):\r\n global _current_color\r\n _current_color = color", "def set_color(self, color):\n self._color = color", "def set_color(self, color: str):\n self.color = color", "def set_light_rgb(self, light, color):\n light_kwargs = { \"rgb_color\": color }\n if not self.use_current_brightness:\n light_kwargs[\"brightness\"] = 255\n self.turn_on(light, **light_kwargs)", "def led(color: Tuple[int, int, int], /) -> None:", "def set_color(self, color):\n self.light_color = color\n for f in self.color_change_cb:\n f(self)", "def lamp_set_color(color):\n addresses = lamp_addresses\n\n r = hex(color[0])[2:].zfill(2)\n g = hex(color[1])[2:].zfill(2)\n b = hex(color[2])[2:].zfill(2)\n\n # the 01 here at the beginning of the message means that\n # the lamp will hold the color even when the connection is lost\n value = \"01\" + r + g + b\n\n for address in addresses:\n command = \"gatttool -b {} -t random --char-write --handle=0x0011 --value={}\".format(address, value)\n logging.debug(\"Sending command to RFduino: \" + command)\n os.system(command)", "def set_led_color():\n webserver_node = webserver_publisher_node.get_webserver_node()\n try:\n data = request.json\n if int(data[\"red\"]) < 0 or int(data[\"red\"]) > 255 \\\n or int(data[\"green\"]) < 0 or int(data[\"green\"]) > 255 \\\n or int(data[\"blue\"]) < 0 or int(data[\"blue\"]) > 255:\n return jsonify(success=False, reason=\"Input is not valid\")\n # Convert to PWM\n red = int(data[\"red\"]) * LED_SCALING_FACTOR\n green = int(data[\"green\"]) * LED_SCALING_FACTOR\n blue = int(data[\"blue\"]) * LED_SCALING_FACTOR\n webserver_node.get_logger().info(\"Set LED Color: \"\n f\"Red: {red} \"\n f\"Green: {green} \"\n f\"Blue: {blue}\")\n\n set_led_color_req = SetLedCtrlSrv.Request()\n set_led_color_req.red = red\n set_led_color_req.green = green\n set_led_color_req.blue = blue\n set_led_color_res = call_service_sync(webserver_node.set_led_color_cli,\n set_led_color_req)\n if set_led_color_res and set_led_color_res.error == 0:\n data = {\"success\": True}\n else:\n webserver_node.get_logger().error(\"Set led color service call failed\")\n data = {\n \"reason\": \"Error\",\n \"success\": False\n }\n return jsonify(data)\n\n except Exception as ex:\n webserver_node.get_logger().error(f\"Unable to reach set led color server: {ex}\")\n return jsonify(success=False, reason=\"Error\")", "def setColor(self, color, group=None):\n group = group is None and self.group or group\n r = self.controller.send(self.light.color(milight.color_from_rgb(*color), group))\n logger.debug('Set color to %s (group: %s): %s' % (color, self.group, r))", "def set_color(self, color: str):\n self.color = bytes.fromhex(color.replace('#', ''))", "def setColor(self, color):\n self.__color = color", "def set_color(self, color):\n # type: (Color) -> None\n\n self.color = color", "def setColor(color):\n turtleTmp.color = color\n turtleTmp.penColor(color)", "def _update_color(self, color):\n self.color = color", "def led(red: int, green: int, blue: int, /) -> None:", "def color(self, color):\n #self._color = color\n new_color = \"{0}{1}{2}\".format(hex(int(color[0]))[2:].zfill(2),\n hex(int(color[1]))[2:].zfill(2),\n hex(int(color[2]))[2:].zfill(2))\n #self.log.info(\"RASPLes.color(%s : %s -> %s)\" % (self.number, color, new_color))\n #print(\"color(%s -> %s)\" % (self.number, new_color))\n try:\n self.current_color = new_color\n #self.strip.setPixelColor(int(self.number), self.current_color)\n self.strip.setPixelColorRGB(int(self.number), color[0], color[1], color[2])\n\n self.strip.updated = True\n except Exception as e:\n self.log.error(\"led update error\" + str(e))", "def setPixelColor(self, n, color):\n\t\t#print \"pxl %s = %s\" % (n, color)\n\t\tif isinstance(n, slice):\n\t\t\tself.leds[n] = [color]*len(self.leds[n])\n\t\telse:\n\t\t\tif n >= 0 or n <= self.size:\n\t\t\t\tself.leds[n] = color\n\t\t#pprint(self.leds)", "def fill(self, color: int) -> None:\n red = (color >> 16) & 0xFF\n green = (color >> 8) & 0xFF\n blue = color & 0xFF\n for x in range(24):\n offset = unpack_from(\">HHH\", self.ledmap_bytes, x * 6)\n self._is31[offset[self.r_offset]] = red\n self._is31[offset[self.g_offset]] = green\n self._is31[offset[self.b_offset]] = blue", "def fill(self, color: int) -> None:\n red = (color >> 16) & 0xFF\n green = (color >> 8) & 0xFF\n blue = color & 0xFF\n for x in range(24):\n offset = unpack_from(\">HHH\", self.ledmap_bytes, x * 6)\n self._is31[offset[self.r_offset]] = red\n self._is31[offset[self.g_offset]] = green\n self._is31[offset[self.b_offset]] = blue", "def color(self, color):\n\n self.container['color'] = color", "def _set_color(color):\r\n\r\n return FontColor.get_color(color, add_reset=False)", "def set_light_color(self, light_color):\n\n self.light_color = light_color", "def setColor(clr):\n if type(clr) == types.StringType:\n setColorString(clr)\n return \n if type(clr) == types.IntType:\n setColorIndex(clr)\n return\n if type(clr) == types.TupleType:\n setColorRGB(*clr)", "def set_color(self, red, green, blue, white):\n color_specs = [self._red_spec, self._green_spec, \n self._blue_spec, self._white_spec]\n\n for spec, color in zip(color_specs, [red, green, blue, white]):\n driver = DRIVERS[spec.addr]\n driver.set_time_off(spec.pin, color)", "def brighter_switch(turtle, color):\n turtle.fillcolor(color + \"1\")", "async def set_rgb_led(self,\n red=0,\n green=0,\n blue=0,\n save_as_user_led_color=False,\n wait_for_response=True,\n reset_inactivity_timeout=True,\n response_timeout_in_seconds=None):\n command = _create_set_rgb_led_command(red,\n green,\n blue,\n save_as_user_led_color,\n sequence_number=self._get_and_increment_command_sequence_number(),\n wait_for_response=wait_for_response,\n reset_inactivity_timeout=reset_inactivity_timeout)\n\n await self._send_command(command,\n response_timeout_in_seconds=response_timeout_in_seconds)", "def set_color(color='black', index=-1): # (8)\n if index == -1:\n global color_buffer\n color_buffer = deque([color]*NUM_LEDS, maxlen=NUM_LEDS)\n else:\n color_buffer[index] = color", "def _set_color(self, r):\n c = COLORS[self.color]\n r.setLineColor(c[0], c[1], c[2])\n r.setColor(c[0], c[1], c[2])", "def set_led(self, *args, **kw):\n return self.execute_command('set_led', *args, **kw)", "def _set_color(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"color\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"color must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"color\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__color = t\n if hasattr(self, '_set'):\n self._set()", "def _set_color(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"color\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"color must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"color\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__color = t\n if hasattr(self, '_set'):\n self._set()", "def _set_color(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"color\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"color must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"color\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__color = t\n if hasattr(self, '_set'):\n self._set()", "def _set_color(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"color\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"color must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"color\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__color = t\n if hasattr(self, '_set'):\n self._set()", "def setLeds(number: int, red: int, green: int, blue: int):\n pass", "def setColor(self, color):\n color = rgba(color)\n if color != self._color:\n self._color = color\n self._updateColor(self._color)\n self._updated(ItemChangedType.COLOR)", "def set_color_brightness(color, brightness):\n if brightness == 0:\n put_light_state(False, 0, 0, 16000)\n else:\n put_light_state(\n True,\n 254 * color // 100,\n 254 * brightness // 100,\n 16000\n )", "def set_color(self, color):\n with doc_ctrl.open_command():\n doc_ctrl.set_color(self.lbl, color)\n std_events.document_modified.emit()", "async def Turn_On_Lights_With_Color(\n color: str = Path(..., title=\"Color name or hexadecimal string\")\n) -> Dict[str, Any]:\n busylightapi.manager.light_on(ALL_LIGHTS, color)\n return {\n \"action\": \"on\",\n \"light_id\": \"all\",\n \"color\": color,\n }", "def setColor(pnj, color):\r\n\r\n assert isinstance(color, (int, tuple, str))\r\n pnj[\"color\"] = color", "def initColor(self, color=None, brightness=100):\n r = self.controller.send(self.light.on(self.group))\n logger.debug('Turned on lights (group: %s): %s' % (self.group, r))\n r = self.controller.send(self.light.brightness(brightness, self.group))\n logger.debug('Set brightness to %s (group: %s): %s' % (brightness, self.group, r))\n if color:\n self.setColor(color)", "def set_trace_color(color): #py:set_trace_color\n RUR._set_trace_color_(color)", "def change_color(self, color):\r\n if color == \"black\":\r\n self.color = \"white\"\r\n self.canvas.itemconfig(self.ball, fill='white')\r\n else:\r\n self.color = \"black\"\r\n self.canvas.itemconfig(self.ball, fill='black')", "def dimmer_switch(turtle, color):\n turtle.fillcolor(color + \"4\")", "def set_color(self, r=0, g=0, b=0):\n r = clamp(r)\n g = clamp(g)\n b = clamp(b)\n self._state.color = (r, g, b)\n self.send_command(Command.SET_COLOR, [int(r), int(g), int(b)])", "def set_color(self, color, filled):\n for cell in filled:\n self.board[cell[0], cell[1]] = color", "def setColor(self, color):\n for patch in self._patches:\n patch.setColor(color)", "def color(self, color=0):\n if color not in [0, 1, 2, 3, 4, 5, 6, 7]:\n raise ValueError('color must be a positive integer less than and 8 or 0')\n else:\n self._write(self.__class__.__ESC + 'r' + chr(color))", "def setColor(self, color):\n self.point_color = color\n self.side_color = color\n self.area_color = color", "def color(self, color_value):\n self.app.color = color_value", "def _updateColor(self, color):\n primitive = self._getScenePrimitive()\n if len(primitive.children) != 0:\n primitive.children[0].setAttribute('color', color)", "def setNewColor(self, color: QColor):\n self.drawNewColor = color", "def set_color(color, level):\n ok = True\n if color not in ['r', 'g', 'b']:\n ok = False\n\n if len(level) > 3:\n ok = False\n\n if level in ['0', '1']:\n ok = True\n\n if len(level) == 3 and is_float(level) and float(level) < 1:\n ok = True\n\n if ok:\n logger.info('set color '+color+' at level '+level)\n pin = get_pin_by_color(color)\n set_gpio(pin, level)\n return False\n else:\n logger.info('set color, unrecognized combination: color '+color+' at level '+level)\n return False", "def set_color_rgb(r, g, b):\r\n global _current_color\r\n _current_color = (r, g, b)", "def set_red_light(self, value):\n self.diffuse_light[0] = value\n self.redraw()", "async def Turn_On_Light_With_Color(\n light_id: int = Path(..., title=\"Numeric light identifier\", ge=0),\n color: str = Path(..., title=\"Color name or hexadecimal string\"),\n) -> Dict[str, Any]:\n busylightapi.manager.light_on(light_id, color)\n return {\n \"action\": \"on\",\n \"light_id\": light_id,\n \"color\": color,\n }", "def setColour(self, col):\n\t\tself.colour = col", "def setColorString(clr):\n dislin.color(clr)", "def set_pixel_rgb(self, led_num, rgb_color, bright_percent=100):\r\n self.set_pixel(led_num, (rgb_color & 0xFF0000) >> 16,\r\n (rgb_color & 0x00FF00) >> 8, rgb_color & 0x0000FF,\r\n bright_percent)", "def setRandomColor():\n setColor(getRandomColor())", "def UseColor(self, use_color):\n self.use_color = use_color", "def set_led(self, value):\n GPIO.output(LED_PIN, value)", "def set_color(objname, rgb):\r\n return f'\\ncmd.set_color(\"{objname}\", {(rgb[0], rgb[1], rgb[2])})'", "def set_colour(self, address, colour):\n idaapi.set_item_color(address, colour)", "def resetColor(self):\n self.setColor(255, 255, 255 ,255)", "def ledFlash(strip, color, t = 1):\r\n utime.sleep(t)\r\n setStrip(strip, color)\r\n utime.sleep(t)\r\n setStrip(strip, LED_COLOR_OFF)", "def led(self, value):\n self._write(MX_LED, value)", "def color(self, value: tuple) -> None:\n if value in Color.PALETTE:\n self._color = value", "def _set_hsv(self, color):\n\n self.qcolor.setHsv(color[0], color[1], color[2], 255)", "def setColor(self, color):\n color = QtGui.QColor(color)\n color.setAlpha(50)\n self.setBrush(QtGui.QBrush(color))\n color.setAlpha(255)\n self.setPen(QtGui.QPen(color, 1.0))", "def goToRGB(self, color: tuple) -> None:\n r, g, b = color\n self._sendi2c('n', [r, g, b])", "def set_color(mask: int, position: int, color: int):\n return mask | (color << (position << 1))", "def setPixelColor(self, n, color):\n self._logger.debug(\"setPixelColor\")", "def set_led(self, on=True):\n if on:\n GPIO.output(self.LED, GPIO.HIGH)\n else:\n GPIO.output(self.LED, GPIO.LOW)", "def color(self, color):\n if color is None:\n raise ValueError(\"Invalid value for `color`, must not be `None`\") # noqa: E501\n\n self._color = color", "def set_rgbColorAtPowerOn(self, ledIndex, count, rgbValue):\n return self.sendCommand(\"SC\" + str(int(ledIndex)) + \",\" + str(int(count)) + \",\" + (\"%x\" % rgbValue))", "def updateColorFor(self, id, color):\n\n # find the good LED strip\n currentStrip = None\n index = 0\n for LEDStrip in self._LEDStrips:\n if LEDStrip._id == id:\n currentStrip = LEDStrip\n if currentStrip == None:\n index += 1\n \n if currentStrip == None:\n return\n\n self._colors[index] = color", "def color(self, new_color):\n if not isinstance(new_color, str):\n raise TypeError('''Color should be a valid str object''')\n else:\n self._color = new_color", "def _set_backpack_led(self, msg):\n # setup color as integer values\n color = [int(x * 255) for x in [msg.r, msg.g, msg.b, msg.a]]\n # create lights object with duration\n light = cozmo.lights.Light(cozmo.lights.Color(rgba=color), on_period_ms=1000)\n # set lights\n self._cozmo.set_all_backpack_lights(light)", "def switch_color(color):\n return \"b\" if color == \"w\" else \"w\"", "def set(self, coords, colors):\n if all(isinstance(e, list) for e in coords):\n # unpack list of coordinates\n for e, c in zip(coords, colors):\n self.set(e, c)\n else:\n led_nr = self.pos_to_led_nr(coords)\n #print \"Setting LED at [%d, %d] (nr. %d) to color %s\" % (coords[0], coords[1], led_nr, colors)\n self.strip.setPixelColor(led_nr, colors)", "def _set_all(self, red, green, blue):\n self.blinkt_iface.WriteValue([0x06, 0x01, red, green, blue], ())", "def the_user_changes_the_color_of_the_device(color):\n web_app.change_property_softassert(\"color\",color)", "def changeColor(self):\n self.layer.new_colormap()", "def setDisabledColor(*args):", "def setDisabledColor(*args):", "def set_led(self, led, value):\n if led < 0 or led > 127:\n raise ValueError('LED must be value of 0 to 127.')\n\n # Calculate position in byte buffer and bit offset of desired LED.\n pos = led // 8\n offset = led % 8\n\n if not value:\n # Turn off the specified LED (set bit to zero).\n self.buffer[pos] &= ~(1 << offset)\n else:\n # Turn on the specified LED (set bit to one).\n self.buffer[pos] |= (1 << offset)", "def setSegmentColor(self, color):\n for segment in self.segments:\n segment.color = color", "def setColorIndex(idx):\n dislin.setclr(idx)", "def setColor(val):\n if isinstance(val, str):\n val = val.upper()\n colors = {\"BLACK\": \"\\u001b[30m\",\n \"RED\": \"\\u001b[31m\",\n \"GREEN\": \"\\u001b[32m\",\n \"YELLOW\": \"\\u001b[33m\",\n \"BLUE\": \"\\u001b[34m\",\n \"MAGENTA\": \"\\u001b[35m\",\n \"CYAN\": \"\\u001b[36m\",\n \"WHITE\": \"\\u001b[37m\",\n \"RESET\": \"\\u001b[0m\"}\n if val in colors.keys():\n print(colors[val], end=\"\")\n return 0\n return -1", "def changeColor( self ):\n\t\t\n\t\tx, y = self.position.xy\n\t\tself.color = ( int((x / WINDOW_X) * 128), int((x / WINDOW_X) * 128) + int((y / WINDOW_Y) * 128 ), int((y / WINDOW_Y) * 128))", "def set_color(self, background_color, color):\n self.background_color = background_color\n self.tile_color = color\n self.controller.refresh_board()", "def setFill(self, color):\n self._reconfig(\"fill\", color)" ]
[ "0.79685086", "0.7963265", "0.7882642", "0.7830038", "0.7777075", "0.7772559", "0.7689048", "0.7609751", "0.7561696", "0.7547319", "0.7526898", "0.75140256", "0.74926007", "0.7445177", "0.74438715", "0.7430585", "0.741375", "0.73898387", "0.73771507", "0.7346666", "0.7305123", "0.72747093", "0.72687155", "0.71840864", "0.7130954", "0.7103072", "0.7103072", "0.70966136", "0.7046952", "0.7029563", "0.6967964", "0.6948798", "0.6924422", "0.6887847", "0.6868575", "0.6865929", "0.6848465", "0.6811287", "0.6811287", "0.6811287", "0.6811287", "0.6798989", "0.67960006", "0.6779764", "0.6775324", "0.67571574", "0.6752627", "0.67466164", "0.6742793", "0.6742194", "0.6740286", "0.6725959", "0.672419", "0.6718614", "0.67122793", "0.67021793", "0.66584283", "0.6609771", "0.66043717", "0.6604235", "0.65972656", "0.6593919", "0.6588328", "0.6556946", "0.65535915", "0.65468264", "0.65445507", "0.65379393", "0.6523506", "0.6514353", "0.6507703", "0.64842826", "0.6463011", "0.6457596", "0.64515984", "0.6449057", "0.6389546", "0.6386578", "0.636371", "0.63579094", "0.6357788", "0.6347943", "0.63428676", "0.63385856", "0.63299257", "0.6327272", "0.63087106", "0.6301098", "0.6286001", "0.6285706", "0.6285045", "0.62839437", "0.62839437", "0.62779385", "0.6277856", "0.62774694", "0.6270267", "0.6269983", "0.6268607", "0.62666315" ]
0.76878345
7
This method send the led data over SPI
def send(self): self.spi.send(self.startframe + self.buffer)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def send(self, data):\n self._serial.write('spi = SPI(2, SPI.SLAVE, baudrate=500000, polarity=0, phase=0)\\r\\n'.encode('utf-8'))\n self._serial.write('data=bytearray({})\\r\\n'.format(data).encode('utf-8'))\n self._serial.write('spi.send(data, timeout=50000)\\r\\n'.encode('utf-8'))\n sleep(1)", "def _data(self, data):\n# \"\"\"Send data to spi bus of display chip, most DC pin need set to HIGH \"\"\"\n# if self._spi == None: raise \"Do not setting SPI\"\n# GPIO.output( self._spi_dc, 1 )\n# self._spi.writebytes( data )\n raise NotImplementedError", "def send_recv(self, data):\n self._serial.write('spi = SPI(2, SPI.SLAVE, baudrate=500000, polarity=0, phase=0)\\r\\n'.encode('utf-8'))\n self._serial.write('data=bytearray({})\\r\\n'.format(data).encode('utf-8'))\n self._serial.write('list(spi.send_recv(data, timeout=50000))\\r\\n'.encode('utf-8'))\n sleep(1)", "def update(self):\n\t\tfor x in range(self.leds):\n\t\t\tself.spi.write(self.buffer[x])\n\t\t\t#self.spi.flush()\n\t\t\t\n\t\tself.spi.write(bytearray(b'\\x00'))\n\t\tself.spi.flush()", "def _send_data(self, data):\n if isinstance(data, int):\n data = [data]\n self._spi_write(_SPI_DATA, data)", "def send_to_port(self):\r\n time.sleep(2)\r\n # ser.write(\"R\".encode())\r\n ser.flush()\r\n ser.write(\"{},{},{},{},{}\".format(self.x_Pos, self.y_Pos, self.t_Tap, self.U_on, self.u_off).encode())\r\n # ser.flush()\r\n # while (1 == 1):\r\n # mydata = ser.readline().lstrip()\r\n # print(mydata.decode('utf-8'))\r\n # value = str(mydata)\r", "def SPIwriteenable(self):\n data=[0x06];\n self.SPItrans(data);", "def data(self, data):\n self._gpio.set_high(self._dc)\n for t in data:\n\t\t self._spi.write([t])", "def SPItrans(self,data):\n self.data=data;\n self.writecmd(0x01,0x00,len(data),data);\n return self.data;", "def send_traffic_data(serialport, pack):\n pack[0] = 0x01\n pack[1] = 0x00\n serialport.write(pack)\n logging.debug(\"Traffic Data - Sent.\")\n logging.debug(str(pack))", "def SPIsetup(self):\n self.writecmd(0x01,0x10,0,self.data); #SPI/SETUP", "def write(self, buffer):\n utils.print_for_unimplemented_functions(SPI.write.__name__)\n telemetry_py.send_telemetry(TelemetryEvent.MICROBIT_API_SPI)", "def __transfer(self, data: int):\n self.__spi.writebytes(data)", "def send_data(self, data: int):\n self.write_pin(self.DC_PIN, RPi.GPIO.HIGH)\n self.__transfer([data])", "def _command(self, commands):\n# \"\"\"Send command to spi bus of display chip, most DC pin need set to LOW \"\"\"\n# if self._spi == None: raise \"Do not setting SPI\"\n# GPIO.output( self._spi_dc, 0 )\n# self._spi.writebytes( commands )\n raise NotImplementedError", "def send(self, data):\n \n try:\n self.s.send(data)\n LED.blink(2, 0.1, 0x00ff00)\n print(\"Sending data:\")\n print(data)\n except OSError as e:\n if e.errno == 11:\n print(\"Caught exception while sending\")\n print(\"errno: \", e.errno)\n \n LED.off()\n data = self.s.recv(64)\n print(\"Received data:\", data)\n\n return data", "def send_event(ser, led, total, on, off):\n ser.flushInput()\n # Start byte\n ser.write('~')\n # Led id\n ser.write(chr(led))\n # Total time\n ser.write(chr(total))\n # On Time\n ser.write(chr(on))\n # Off Time\n ser.write(chr(off))", "def send_byte(byte_out):\n GPIO.output(clock_pin, 0)\n # set the chip select to write\n GPIO.output(chip_select, 1)\n # send the byte \n values = [(ord(byte_out) >> i) % 2 for i in range(0, 8)]\n GPIO.setup(data_pins, GPIO.OUT)\n GPIO.output(data_pins, values)\n # flash the clock pin\n GPIO.output(clock_pin, 1)\n GPIO.output(clock_pin, 0)", "def send_data(self, SPEED, STEER, BRAKE, GEAR):\n GEAR = 2 if SPEED >= 0.0 else 0\n\n # if self.feedbackMsg.AorM == 0:\n # return\n\n if self.doPIControl is True:\n\n current_speed = self.mps2kph(self.feedbackMsg.speed) # kph\n desired_speed = SPEED # kph\n SPEED, BRAKE = self.PIControl(\n currentSpeed=current_speed, desiredSpeed=desired_speed, brake=BRAKE)\n\n SPEED = abs(SPEED) * 10\n if SPEED > 200:\n SPEED = 200\n elif SPEED < 0:\n SPEED = 0\n\n STEER = STEER * 71\n if STEER > 1999:\n STEER = 1999\n if STEER < -1999:\n STEER = -1999\n\n try:\n\n if STEER >= 0:\n self.DATA[8] = int(STEER // 256)\n self.DATA[9] = int(STEER % 256)\n else:\n STEER = -STEER\n self.DATA[8] = int(255 - STEER // 256)\n self.DATA[9] = int(255 - STEER % 256)\n\n self.DATA[5] = GEAR # GEAR\n self.DATA[6] = int(SPEED // 256)\n self.DATA[7] = int(SPEED % 256)\n self.DATA[10] = BRAKE # BREAK\n self.DATA[11] = self.ALIVE\n\n self.ser.write((self.DATA))\n\n self.ALIVE = self.ALIVE + 1\n if self.ALIVE == 256:\n self.ALIVE = 0\n\n except Exception as ex:\n print(ex)", "def send_event(self, led):\n ser.flushInput()\n # Start byte\n ser.write('~')\n # Led id\n ser.write(chr(led))\n # Total time\n ser.write(chr(self.total))\n # On Time\n ser.write(chr(self.on))\n # Off Time\n ser.write(chr(self.off))", "def setled(self, led, red=0, green=0, blue=0):\n\n # Set the offset for the bytes to be sent over SPI\n offset = led * 4\n self.buffer[offset] = 255 # equals a 1 or 0\n self.buffer[offset + 1] = blue\n self.buffer[offset + 2] = green\n self.buffer[offset + 3] = red", "def _send_command(self, command, data=None):\n self._spi_write(_SPI_COMMAND, [command])\n if data is not None:\n self._send_data(data)", "def _write_spi(self, register, data):\n logger.debug(\"Writing to register {0}: {1}\".format(register, data))\n if isinstance(register, str):\n w_data = [self.registers[register]]\n elif isinstance(register, int):\n w_data = [register]\n else:\n raise (ValueError(\"Register must be string name or int address\"))\n # Select write register:\n self._toggle_pin(\"IO_RESET\")\n # self.spi.writebytes(w_data)\n # # Write data to register:\n w_data = [int(x) for x in np.uint8(data)]\n self.spi.writebytes(w_data)\n\n for x in data:\n w_data.append(int(np.uint8(x)))\n self.spi.writebytes(w_data)", "def apa102_send_bytes( clock_pin, data_pin, bytes_ ):\n \n # implementeer deze functie:\n \n # zend iedere byte in bytes:\n assert len(bytes_) == 4\n for byte in bytes_:\n # zend ieder bit in byte:\n# print(byte)\n assert len(byte) == 8\n for bit in byte:\n GPIO.output(data_pin, bit)\n #time.sleep(.1)\n GPIO.output(clock_pin, 1)\n #time.sleep(.1)\n GPIO.output(clock_pin, 0)\n # maak de data pin hoog als het bit 1 is, laag als het 0 is\n # maak de clock pin hoog\n # maak de clock pin laag", "def _spi_cmd(command, data=None):\n spi_cs.write_digital(CS_ACTIVE)\n spi_dc.write_digital(0)\n spi.write(bytearray([command]))\n if data is not None:\n spi_dc.write_digital(1)\n spi.write(bytearray(data))\n spi_cs.write_digital(CS_INACTIVE)", "def send(self, data):\r\n\r\n self._serial_object.write(data)", "def recv(self):\n self._serial.write('spi = SPI(2, SPI.SLAVE, baudrate=500000, polarity=0, phase=0)\\r\\n'.encode('utf-8'))\n self._serial.write('list(spi.recv(16, timeout=50000))\\r\\n'.encode('utf-8'))\n sleep(1)", "def write(self, data):\n try:\n self.arduino.write(data)\n except Exception:\n raise", "def _spi_write(self, dc, values):\n self._gpio.output(self.cs_pin, 0)\n self._gpio.output(self.dc_pin, dc)\n\n if type(values) is str:\n values = [ord(c) for c in values]\n\n for byte_value in values:\n self._spi_bus.xfer([byte_value])\n\n self._gpio.output(self.cs_pin, 1)", "def initiate():\n\n log = \"Initiate the SPI communication of the OPC-N3\"\n logger.debug(log)\n\n time.sleep(1)\n log = \"Sending bytes to the sensor...\"\n logger.debug(log)\n spi.writebytes([0x5A, 0x01])\n reading = spi.readbytes(3)\n log = \"Data read after sending bytes are: \" + str(reading)\n logger.debug(log)\n time.sleep(wait_between_bytes)\n\n log = \"Sending bytes to the sensor...\"\n logger.debug(log)\n spi.writebytes([0x5A, 0x03])\n reading = spi.readbytes(9)\n log = \"Bytes read after sending bytes are: \" + str(reading)\n logger.debug(log)\n time.sleep(wait_between_bytes)\n\n # SPI conncetion\n log = \"Sending bytes to the sensor...\"\n logger.debug(log)\n spi.writebytes([0x5A, 0x02, 0x92, 0x07])\n reading = spi.readbytes(2)\n log = \"Bytes read after sending bytes are: \" + str(reading)\n logger.debug(log)\n time.sleep(wait_between_bytes)\n\n return", "def sendCommand(self, data):\n #make sure data has an even number of elements\n if(len(data) % 2 == 1):\n data.append(0)\n\n #Initiate message as an empty list\n message = []\n\n #Fill message by combining two bytes in one register\n for i in range(0, len(data)/2):\n message.append((data[2*i] << 8) + data[2*i+1])\n\n #To do!: Implement try/except\n with self.lock:\n self.client.write_registers(0, message)", "def transmit(self, data):\n # ascii_stream = bytes(data, 'ascii')\n # self.log.info('Encoding bytestream: %s to ascii: %s', data, ascii_stream)\n try:\n self.ser.write(data)\n # Purge scheduler and reboot transmitter\n except serial.SerialTimeoutException as exc:\n self.log.exception(\"Frame write to transmitter timed out: %s\", exc)\n self.log.info(\"Successfully wrote %s to tesseract transmitter\", data)\n # # disconnect handle\n # if bin_frame is None:\n # # delete `SessionQueue` instance from `ReceiverRegister`\n # self.rec_reg.write(ap_index)\n # else:\n # hex_code = binascii.hexlify(bin_frame.tobytes())\n # hardware_encode = self.cache.cache_map(bin_frame,ap_index)\n # transmit_hex = binascii.hexlify(hardware_encode.tobytes())\n # print(str(hex_code) + \" | To Access Point \" + str(ap_index), end='\\r')\n # if os.environ['TCS_ENV'] == 'dev':\n # # TODO: Move these prints to cache logger\n # self.log.debug(\"Binary Frame Data: %s\", bin_frame)\n # self.log.debug(\"Hardware Mapping: %s\", hardware_encode)\n # self.log.debug(\"Decode from AP0: %s\", self.cache._cache[-1][0])\n # self.log.debug(\"Decode from AP1: %s\", self.cache._cache[-1][1])\n # self.log.debug(\"Decode from AP2: %s\", self.cache._cache[-1][2])\n # self.log.debug(\"Decode from AP3: %s\", self.cache._cache[-1][3])", "def write_data(self, data):\n # send data\n for ptr, value in data:\n self._write_byte(ptr, value)\n # set 'data changed'\n self._write_byte(fixed_format['data_changed'][0], 0xAA)\n # wait for station to clear 'data changed'\n while True:\n ack = _decode(self._read_fixed_block(0x0020),\n fixed_format['data_changed'])\n if ack == 0:\n break\n log.debug('waiting for ack')\n time.sleep(6)", "def expanderWrite( self, _data ): # uint8_t\n\t\t#Wire.beginTransmission(_Addr);\n\t\t#printIIC((int)(_data) | _backlightval) # print II\n\t\tself.i2c.writeto( self.address, bytes( [_data | self._backlightval] ))\n\t\t#Wire.endTransmission();", "def send_data(servo: str, degrees: float, subtract: bool, fire: bool):\n data = json.dumps({\n 'servo': servo,\n 'degrees': degrees,\n 'subtract': subtract,\n 'fire': fire\n })\n ser.write(bytes(data.strip('\\n').encode('utf-8') + '\\0'.encode('utf-8')))", "def _send_command(self, data):\n # make sure data has an even number of elements\n if(len(data) % 2 == 1):\n data.append(0)\n\n # Initiate message as an empty list\n message = []\n\n # Fill message by combining two bytes in one register\n for i in range(int(len(data)/2)):\n message.append((data[2*i] << 8) + data[2*i+1])\n\n # To do!: Implement try/except\n self.client.write_registers(0x03E8, message, unit=0x0009)", "def Send(self, data):\n # TODO(josephsih): should have a method to check the connection status.\n # Currently, once RN-42 is connected to a remote host, all characters\n # except chr(0) transmitted through the serial port are interpreted\n # as characters to send to the remote host.\n logging.debug('HID device sending %r...', data)\n self.SerialSendReceive(data, msg='BluetoothHID.Send')\n time.sleep(self.send_delay)", "def sndData(self, dat):\r\n\t\t# comando da inviare\r\n\t\tstrCmd=\"\"\r\n\t\tfor byt in dat:\r\n\t\t\tstrCmd+=(\"%c\" %byt)\r\n\t\t# Non uso sndByte per evitare il ritardo self.dlTx\r\n\t\tself.ser.write(strCmd)\r\n\t\tif self.deb:\r\n\t\t\tfor ele in dat:\r\n\t\t\t\tprint \"\\\\x%02X\" %ele,\r\n\t\t\tprint\r\n\t\tsleep(0.001)", "def send(self, output):\n assert output == 0 or output == 1\n GPIO.output(self.d_out, output)", "def send_data(data_string):\n if connection_type == USE_I2C:\n cmd = \"\"\n cmd += chr( SSD1306_ADDRESS )\n cmd += chr( SELECT_DATA_BYTE )\n cmd += data_string\n i2cWrite(cmd, 10, False)\n else:\n print \"Not implemented for that connection type yet.\"", "def __send__(self,val):\n assert(len(val) == 1)\n assert(type(val) == bytes)\n v = int.from_bytes(val,byteorder=\"little\")\n if(self.verbose):\n pc.color_stdout(\"GREEN\")\n print(\">> %s\\t - %s\\t - %d\"% (hex(v),bin(v),v))\n pc.color_stdout(\"RESET\")\n self.port.write(val)", "def command(self, *cmd):\n self._gpio.set_low(self._dc)\n for t in cmd:\n self._spi.write([t])\n #assert(len(cmd) <= 32)\n #self.bus.write_i2c_block_data(self.addr, self.cmd_mode, list(cmd))", "def data(self, c):\n if self._spi is not None:\n # SPI write.\n self._gpio.set_high(self._dc)\n self._spi.write([c])\n else:\n # I2C write.\n control = 0x40 # Co = 0, DC = 0\n self._i2c.write8(control, c)", "def _send(self, value, mode):\n\n # Choose instruction or data mode\n self.output(self._rs_pin, mode)\n\n # If the RW pin is used, set it to low in order to write.\n if self._rw_pin is not None:\n self.output(self._rw_pin, 0)\n\n # Write data out in chunks of 4 or 8 bit\n if self._bus_mode == self.LCD_8BITMODE:\n self._write8bits(value)\n else:\n self._write4bits(value >> 4)\n self._write4bits(value)", "def send_btn_clicked(self):\n command = self.SendLine.text()\n self.Serial.send(command)", "def request_realtime_info(self):\n self.socket_datastream.sendto(b\"!r\", self.ip_port_arduino_datastream)\n self.socket_datastream.sendto(b\"!s\", self.ip_port_arduino_datastream)", "def send(self,cmd):\n bit_list = '{:b}'.format(int(cmd,16))\n self._lead()\n for i in bit_list:\n self.ir_pin.duty(512)\n time.sleep_us(_Const.NEC_BIT_MARK)\n self.ir_pin.duty(0)\n if i == '0':\n time.sleep_us(_Const.NEC_ZERO_SPACE)\n else:\n time.sleep_us(_Const.NEC_ONE_SPACE)\n self._end()", "def sendInstruction(self, instruction):\n # instruction = '!'\n print(f'Sending: {instruction}')\n self.ser.write(instruction.encode(\"ascii\"))\n self.ser.write('\\n'.encode(\"ascii\"))\n\n self.ser.reset_input_buffer()\n\n ser_bytes = self.ser.read(1)\n print(f'Receiving\\nraw data: {ser_bytes}')\n\n # decoded_bytes = (ser_bytes.decode(\"ascii\"))\n # print(f'Ascii Value: {decoded_bytes}', flush=True)", "def send_command_with_parameter(self):\n button = self.sender()\n param = self.spins[button].value() if button != self.BtnSetFine else\\\n get_dac_value(self.SpinFine.value(), self.calibr_table, self.state.range)\n\n answer: str = self.UsbHost.send_command(self.state.ser, self.command_dict[button],\n str(self.state.device_id), param)\n if answer != 'Ok':\n error_message(self.error_dict[button])\n self.statusbar.showMessage(answer_translate[answer])\n else:\n self.statusbar.showMessage(self.result_dict[button])\n try:\n # добавляем размерность\n if self.val_labels[button]:\n self.val_labels[button].setText(str(self.spins[button].value()) + self.val_dimensions[button])\n # устанавливаем зависимость цап и сдвига\n self.set_fine_and_dac(button)\n # пересчитываем значение сдвига\n self.LblMoveVal.setText(str(self.SpinFine.value() + self.SpinRough.value())\n + self.val_dimensions[self.BtnSetRough])\n # пересчитываем результирующую частоту\n if self.state.range:\n self.LblResVal.setText(\"%.4f MГц\" % (states_dict[self.state.range][0] +\n float(self.LblMoveVal.text().split()[0])/1000000))\n except KeyError:\n pass\n self.create_log_message(self.command_dict[button], answer, str(param))", "def send_data(self, data):\n self._transport.write(data)", "def send_reg_command(self):\n button = self.sender()\n if button in self.btns.keys():\n state: str = button.text()\n move = None\n else:\n state = self.state.range\n move = (self.SpinRough.value() // 500) * 500\n param = registers.get_reg_str(state, move)\n answer: str = self.UsbHost.send_command(self.state.ser, \"SetAdf14\", str(self.state.device_id),\n param)\n if answer == 'Ok':\n self.statusbar.showMessage(self.result_dict[button])\n # при установлении диапазона, он записывается в интерфейсе везде\n if button in self.btn.keys():\n self.state.range = button.text()\n self.LblFreqVal.setText(button.text())\n self.LblResVal.setText(str(states_dict[button.text()][0] +\n self.SpinFine.value() + self.SpinRough.value()))\n # все кнопки серые, кроме кнопки режима\n for btn in self.btns.keys():\n btn.setStyleSheet(\"\")\n self.btns[btn].setStyleSheet(\"font: 16px\")\n button.setStyleSheet(\"background-color : rgb(70, 210, 00)\")\n self.btns[button].setStyleSheet(\"color: red; font: bold 16px\")\n else:\n error_message(self.error_dict[button])\n self.statusbar.showMessage(answer_translate[answer])\n\n # add commands to log\n param = registers.get_reg_str(state, move).replace(\" \", '\\n')\n param.replace(\" \", \"\\n\")\n self.create_log_message(button.text(), answer, param)\n\n # set sw command\n if answer == 'Ok' and button in self.btn.keys():\n params = '0 1' if button == self.BtnL1 else \"1 0\"\n self.set_sw(params)", "def write_firmware(self, data):\n self.check_validity()\n\n data = list(map(int, data))\n\n return self.ipcon.send_request(self, BrickletIndustrialDualAnalogInV2.FUNCTION_WRITE_FIRMWARE, (data,), '64B', 9, 'B')", "def _write(self, data):\n\n ret = False\n extended_address = SettingsBase.get_setting(self, \"extended_address\")\n addr = (extended_address, 0xe8, 0xc105, 0x11)#prey this works, I can't test it...\n try:\n self.__xbee_manager.xbee_device_xmit(0xe8, data, addr)\n ret = True\n print \"success!\" #\n except:\n print \"(..., 0xc105, 0x11) faild, trying (..., 0, 0)\" #\n try: #\n addr = (extended_address, 0xe8, 0, 0) #\n self.__xbee_manager.xbee_device_xmit(0xe8, data, addr) #\n ret = True #\n print \"success!\" #\n except: #\n print \"(..., 0, 0) faild\" #\n pass\n return ret", "def _write_v1(self, data):\n return self.usb_dev.write(self.ep_out, data, self.interface, self.usb_wr_timeout)", "def wr32(add,dat):\r\n global this_fpga_ip\r\n global this_sock\r\n global this_udp_port\r\n \r\n pkt = array.array('L')\r\n pkt.append(1001) #psn\r\n pkt.append(2) #opcode\r\n pkt.append(1) #noo\r\n pkt.append(add) #sa\r\n pkt.append(dat) #dat\r\n \r\n this_sock.sendto(bytes(pkt.tostring()),(this_fpga_ip,this_udp_port))\r\n data, addr = this_sock.recvfrom(10240)\r\n return", "def send(self, value, char_mode=False):\n \n # Sends 8b ``value`` in ``char_mode``.\n # :param value: bytes\n # :param char_mode: character/data mode selector. False (default) for\n # data only, True for character bits.\n # one ms delay to prevent writing too quickly.\n time.sleep(0.001)\n # set character/data bit. (charmode = False)\n self.reset.value = char_mode\n # WRITE upper 4 bits\n self.dl4.value = ((value >> 4) & 1) > 0\n self.dl5.value = ((value >> 5) & 1) > 0\n self.dl6.value = ((value >> 6) & 1) > 0\n self.dl7.value = ((value >> 7) & 1) > 0\n # send command\n self._pulse_enable()\n # WRITE lower 4 bits\n self.dl4.value = (value & 1) > 0\n self.dl5.value = ((value >> 1) & 1) > 0\n self.dl6.value = ((value >> 2) & 1) > 0\n self.dl7.value = ((value >> 3) & 1) > 0\n self._pulse_enable()", "def sendData(self, data):\n self.tx.sendBuffer(data)", "def WriteFLASH_old(self, data):\n # print('program flash start')\n start_addr = 9 << 18\n cmd = self.board_def.CMD_WRITE_MEM\n pad = 0xFFFFFF\n #I need to pack bank into 4 bytes and then only use the 3\n packedPad = struct.pack(\"L\", pad)\n unpackedPad = struct.unpack('4b', packedPad)\n length = len(data)\n packet = struct.pack(\"4bLL\", cmd, unpackedPad[0], unpackedPad[1], unpackedPad[2], start_addr, length)\n #Next I need to send the command\n self.send_data(packet)\n #next read from the socket\n recv_stat, recv_data = self.receive_data()\n if recv_stat != 0x0:\n print ('Ram Write cmd Error stat={}!!!'.format(recv_stat))\n return self.board_def.STAT_ERROR\n\n self.send_data(data)\n #next read from the socket to ensure no errors occur\n self.sockfd.settimeout(1000);\n stat, data = self.receive_data()\n self.sockfd.settimeout(5)\n # print(packet)\n\n # print('program flash end')\n if stat != 0x0:\n print ('Ram Write Error stat={}!!!'.format(stat))\n return self.board_def.STAT_ERROR", "def command(self, c):\n if self._spi is not None:\n # SPI write.\n self._gpio.set_low(self._dc)\n self._spi.write([c])\n else:\n # I2C write.\n control = 0x00 # Co = 0, DC = 0\n self._i2c.write8(control, c)", "def send_command(self, command):\n self.enable_serial_port(self.port)\n time.sleep(.2)\n self.serial_com.write(command.encode() + b'\\r\\n')\n time.sleep(.2)", "def callback_serial_write(data):\n serial_write(data.data)", "def _write(self, register, data):\n\n # data: list of bytes to write to register\n assert register in _registers, '%r is not a valid register. Register must be passed as string.' %register\n assert len(data) == _register_len[register], 'Must pass %r byte(s) to %r register.' %(_register_len[register], register)\n\n # send the register we want to write to\n self.spi.writebytes([_registers[register]])\n\n # send the bytes we write to the register\n self.spi.writebytes(data)", "def sn(self):\n\t\tstring = []\n\t\tresp = [0x00]\n\t\tself.spi.transfer([0x10], [0x00], 1)\n\t\ttime.sleep(9e-3)\n\t\tfor i in range(60):\n\t\t\tself.spi.transfer([0x00], resp, 1)\n\t\t\tstring.append(chr(resp[0]))\n\t\ttime.sleep(0.1)\n\t\treturn ''.join(string).strip()", "def write(self, data):\n try:\n self.ser.write(data)\n except SerialException as se:\n log.debug('Serial connection write error: {}'.format(se))", "def s_write(self, data):\n self.s.flushOutput()\n\n if self.s.is_open:\n try:\n self.s.write(data)\n if self.log_output:\n self.logfile.write('\\nIN :' + str(len(data)) + '[' + hexlify(data) + ']' + '\\n')\n except Exception as e:\n print(\"Could not write to port \" + str(e))\n else:\n raise IOError('Comport is not open, use ctl_connect()')", "def leds(self, state):\n if state:\n state = b'\\x01'\n else:\n state = b'\\x00'\n msg = b'\\x0D' + state\n self.__bt.write(msg)", "def send(self, data):", "def write_cmd(self, cmd):\n self._dc_pin.value = False\n with self.spi_device as spi:\n spi.write(bytearray([cmd])) # pylint: disable=no-member", "def _send_data(self, data, time):\n pass", "def send_to_engine(self, wi):\n pass", "def __init__(self, leds):\n self.ledcount = leds\n # create a buffer\n self.buffersize = self.ledcount * 4\n self.buffer = bytearray(self.ledcount * 4)\n self.emptybuffer = bytearray(self.ledcount * 4)\n for i in range(0, self.buffersize, 4):\n self.emptybuffer[i] = 0xff\n self.emptybuffer[i + 1] = 0x0\n self.emptybuffer[i + 2] = 0x0\n self.emptybuffer[i + 3] = 0x0\n # Start frame and endframe for the SPI communication (end frame is not\n # needed)\n self.startframe = bytes([0x00, 0x00, 0x00, 0x00])\n self.endframe = bytes([0xff, 0xff, 0xff, 0xff])\n # initialize SPI (needs to be at 45 MHz in order to maximize the speed.\n # This is the limiting factor for the system's speed)\n self.spi = SPI(1, SPI.MASTER, baudrate=45000000,\n polarity=0, phase=0, bits=8, firstbit=SPI.MSB)\n self.clearleds()", "def use_spi():\n _LIB.oled_click_use_spi()", "def write(self):\n mask = 0\n for pin in self.pins:\n if pin.mode == OUTPUT:\n if pin.value == 1:\n pin_nr = pin.pin_number - self.port_number * 8\n mask |= 1 << pin_nr\n msg = chr(DIGITAL_MESSAGE + self.port_number)\n msg += chr(mask % 128)\n msg += chr(mask >> 7)\n self.board.sp.write(msg)", "def write(self, data: bytes) -> None:\n self.device.write(binascii.unhexlify(data))", "def sendValue(self, value):\n\n print(f'Sending: {value}\\n')\n self.ser.write(bytes([value]))\n self.ser.write('\\n'.encode(\"ascii\"))\n\n self.ser.reset_input_buffer()\n ser_bytes = self.ser.read(1)\n print(f'Receiving\\nraw data: {ser_bytes}')\n\n\n #decoded_bytes = (ser_bytes.decode(\"ascii\"))\n\n #print(f'Ascii Value: {decoded_bytes}', flush=True)", "def send_packet(self, buffer_id, raw_data, out_port, in_port):\n # We tell the switch to take the packet with id buffer_if from in_port \n # and send it to out_port\n # If the switch did not specify a buffer_id, it must have specified\n # the raw data of the packet, so in this case we tell it to send\n # the raw data\n msg = of.ofp_packet_out()\n msg.in_port = in_port\n if buffer_id != -1 and buffer_id is not None:\n # We got a buffer ID from the switch; use that\n msg.buffer_id = buffer_id\n else:\n # No buffer ID from switch -- we got the raw data\n if raw_data is None:\n # No raw_data specified -- nothing to send!\n return\n msg.data = raw_data\n \n # Add an action to send to the specified port\n if out_port == of.OFPP_FLOOD:\n # send to all active ports according to STP\n for outPort in self.ports_use:\n if outPort != in_port:\n action = of.ofp_action_output(port=outPort)\n msg.actions.append(action)\n else:\n action = of.ofp_action_output(port=out_port)\n msg.actions.append(action)\n # Send message to switch\n self.connection.send(msg)", "def _send_data_to_wbt(self,nnData):\n\t\tnnData += \"END\\n\"\n\t\tself._conn.send(nnData)", "def send_data(self, data):\r\n try:\r\n self.sock.sendto(data, self.addr)\r\n except Exception:\r\n print(\"Cant't send a package\")", "def send_data(self, str_data):\n try:\n self.s.sendall(str_data.encode())\n except OSError as e:\n print(e)", "def serial_write(data):\n global ser\n if ser.writable():\n ser.write(data)\n else:\n print 'The serial', ser.portstr, 'cannot be written.'", "def sendtoserial(self, module, msg):\n self.send(\"sendserial/{}/{}:{}\\n\".format(self.msg_id, module, msg))\n self.msg_id += 1", "def write(byte, inc=False):\n # Make the D-ports output\n for port in D_ports:\n GPIO.setup(port, GPIO.OUT)\n\n # write the byte as zero-padded binary\n byte_string = '{0:08b}'.format(byte)\n # put the reversed bits on the D-pins\n for i,b in enumerate(byte_string[::-1]):\n set(D_ports[i], int(b))\n # READ to HI (inactive)\n on(READ)\n # pulse WR to LO (active) for 5ms\n pulse_lo(WRITE, length=0.005)\n # OE to LO (active)\n off(READ)\n\n # Put the ports back to read and they should read the byte written\n for port in D_ports:\n GPIO.setup(port, GPIO.IN)\n\n if inc:\n incr()", "def send_serial_command(data):\n print(data)\n serial_command = data\n SERIAL_PARENT.send(serial_command)\n OUTGOING.append(serial_command)", "def led1(self, val):\n data = val & self.LED1_MASK\n self._ftdi.spi_write(self.LED1_ADDR, [data], burst='fixed')", "def _sendSRAM(self, data):\n p = self.makePacket()\n self.makeSRAM(data, p)\n p.send()", "def transmit(self) -> None:\n # Like RadioHead library, turn on high power boost if enabled.\n self.set_boost(_TEST_PA1_BOOST)\n # Enable packet sent interrupt for D0 line.\n self.dio_0_mapping = 0b00\n # Enter TX mode (will clear FIFO!).\n self.operation_mode = TX_MODE", "def wr(self, port, val):\n hw = self.device.peripherals[port]\n hw.ODR.wr(val)", "def main():\n if not bcm2835_init():\n return\n\n bcm2835_spi_begin()\n bcm2835_spi_setBitOrder(BCM2835_SPI_BIT_ORDER_MSBFIRST) # The default\n bcm2835_spi_setDataMode(BCM2835_SPI_MODE0) # The default\n bcm2835_spi_setClockDivider(BCM2835_SPI_CLOCK_DIVIDER_65536) # The default\n bcm2835_spi_chipSelect(BCM2835_SPI_CS0) # The default\n bcm2835_spi_setChipSelectPolarity(BCM2835_SPI_CS0, LOW) # the default\n\n # Send a byte to the slave and simultaneously read a byte back from the slave\n # If you tie MISO to MOSI, you should read back what was sent\n data = bcm2835_spi_transfer(0x23);\n print(\"Read from SPI: %02X\" % data)\n\n bcm2835_spi_end()\n bcm2835_close()", "def _send_command(self, command):\n self._serial_port.write(command + '\\n')\n self._serial_port.flush()", "def send(self, seq_number, *voltage_list):\r\n\r\n timestamp = time.perf_counter()\r\n volt_list = list()\r\n for volt in voltage_list:\r\n volt_list.append(volt)\r\n try:\r\n self.sock.sendto(struct.pack(packet.H2R_PACKET_FORMAT, seq_number, packet.time2int(timestamp), *volt_list),\r\n (self.robot_ip, self.robot_port))\r\n\r\n self.tx_cntr.inc()\r\n\r\n self.tastx_ks[seq_number + 1] = timestamp\r\n\r\n try:\r\n tsr_k = self.tsr_ks[seq_number]\r\n tsstx_k = self.tsstx_ks[seq_number]\r\n tssrx_k = self.tssrx_ks[seq_number]\r\n tastx_k = self.tastx_ks[seq_number]\r\n tasrx_k = self.tasrx_ks[seq_number]\r\n taw_k = self.taw_ks[seq_number]\r\n\r\n if packet.time2int(tasrx_k) is not 0:\r\n self.tsr_k_logger.timestamp(timestamp=tsr_k, value=tsr_k)\r\n self.tsstx_k_logger.timestamp(timestamp=tsstx_k, value=tsstx_k)\r\n\r\n self.tssrx_k_logger.timestamp(timestamp=tssrx_k, value=tssrx_k)\r\n self.tastx_k_logger.timestamp(timestamp=tastx_k, value=tastx_k)\r\n\r\n self.tasrx_k_logger.timestamp(timestamp=tasrx_k, value=tasrx_k)\r\n self.taw_k_logger.timestamp(timestamp=taw_k, value=taw_k)\r\n\r\n del self.tsr_ks[seq_number - 1]\r\n del self.tsstx_ks[seq_number - 1]\r\n\r\n del self.tasrx_ks[seq_number - 1]\r\n del self.taw_ks[seq_number - 1]\r\n\r\n except KeyError:\r\n logging.debug(\"Packet not found\")\r\n\r\n except socket.error:\r\n logging.error('Tx error')\r\n return", "def on(self):\n\t\trb0 = [0x00]\n\t\trb1 = [0x00, 0x00]\n\t\tattempts = 0\n\n\t\twhile self.state != ON and attempts < MAX_RETRIES:\n\t\t\tself.spi.transfer([0x03], rb0, 1)\t\t## Send the command byte; response will be written to rb0\n\t\t\ttime.sleep(9e-3) \t\t\t\t\t\t## Sleep for 9 ms\n\t\t\tself.spi.transfer([0x00, 0x01], rb1, 2)\t## Send the following 2 bytes; response will be written to rb1\n\t\t\ttime.sleep(0.1)\n\n\t\t\tif rb0[0] < 0: \t\t\t\t\t\t## Account for implicit unsigned-to-signed \n\t\t\t\trb0[0] += 256\t\t\t\t\t## conversion from the transfer operation\n\n\t\t\tattempts += 1\n\t\t\tprint(f\"[{self.__class__.__name__}::on]\", end=' ')\n\t\t\tif rb0[0] == 0xF3 and rb1[0] == 0x03: \t## Ensure response values are as expected\n\t\t\t\tself.state = ON \n\t\t\t\tprint(\"SUCCESS -- device powered on.\")\n\t\t\telse:\n\t\t\t\tif attempts != MAX_RETRIES:\n\t\t\t\t\tprint(f\"Attempt #{attempts} failed -- retrying after delay ...\")\n\t\t\t\t\ttime.sleep(RETRY_DELAY)\n\t\t\t\telse:\n\t\t\t\t\tprint(\"ERROR -- command failed.\")\n\n\t\treturn self.state == ON", "def spi_controller(\n # ---[ Module Ports]---\n glbl, # global interface, clock, reset, etc.\n spibus, # external SPI bus\n # optional ports\n fifobus=None, # streaming interface, FIFO bus\n mmbus=None, # memory-mapped bus, contro status access\n cso=None, # control-status object\n \n # ---[ Module Parameters ]---\n include_fifo=True, # include aan 8 byte deep FIFO\n):\n clock, reset = glbl.clock, glbl.reset\n if cso is None:\n cso = spi_controller.cso()\n\n # -- local signals --\n ena = Signal(False)\n clkcnt = Signal(modbv(0, min=0, max=2**12))\n bcnt = Signal(intbv(0, min=0, max=8))\n\n # separate tx and rx shift-registers (could be one in the same)\n treg = Signal(intbv(0)[8:]) # tx shift register\n rreg = Signal(intbv(0)[8:]) # rx shift register\n\n x_sck, x_ss, x_mosi, x_miso = Signals(bool(0), 4)\n\n # internal FIFO bus interfaces\n # external FIFO side (FIFO to external SPI bus)\n itx = FIFOBus(size=fifobus.size, width=fifobus.width)\n # internal FIFO side (FIFO to internal bus)\n irx = FIFOBus(size=fifobus.size, width=fifobus.width)\n \n states = enum('idle', 'wait_hclk', 'data_in', 'data_change',\n 'write_fifo', 'end')\n state = Signal(states.idle)\n\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # memory- mapped registers\n # add the peripheral's regfile to the bus (informational only)\n # @todo: the automatic building of the register files is incomplete\n if mmbus is not None:\n # the register-file (rf) will drive all the cso signals\n rf = cso.get_register_file()\n mmbus.add(rf, 'spi')\n\n # FIFO for the wishbone data transfer\n if include_fifo:\n fifo_fast.debug = spi_controller.debug\n fifo_tx_inst = fifo_fast(reset, clock, itx)\n fifo_rx_inst = fifo_fast(reset, clock, irx)\n\n @always_comb\n def rtl_assign():\n cso.tx_fifo_count.next = itx.count\n cso.rx_fifo_count.next = irx.count\n\n if clkcnt > 0:\n ena.next = False\n else:\n ena.next = True\n\n clock_counts = tuple([(2**ii)-1 for ii in range(13)])\n\n @always(clock.posedge)\n def rtl_clk_div():\n if cso.enable and clkcnt != 0 and state != states.idle:\n clkcnt.next = (clkcnt - 1)\n else:\n clkcnt.next = clock_counts[cso.clock_divisor]\n\n @always_seq(clock.posedge, reset=reset)\n def rtl_state_and_more():\n \"\"\"\n Designed to the following timing diagram\n\n SCK CPOL=0 ______/---\\___/---\\___/---\\___/---\\___/---\\___/---\\___/---\\___/---\\___/---\\ \n CPOL=1 ------\\___/---\\___/---\\___/---\\___/---\\___/---\\___/---\\___/---\\___/---\\___/ \n SS ---\\_______________________________________________________________________ \n CPHA=0 MOSI ...|.0....|.1.....|.2.....|.3.....|.4.....|.5.....|.6.....|.7.....|.0.....| \n MISO ...|.0....|.1.....|.2.....|.3.....|.4.....|.5.....|.6.....|.7.....|.0.....| \n CPHA=1 MOSI ...|....0.....|.1.....|.2.....|.3.....|.4.....|.5.....|.6.....|.7.....|.0...\n MISO ......|.0.....|.1.....|.2.....|.3.....|.4.....|.5.....|.6.....|.7.....|.0...\n \"\"\"\n if not cso.enable:\n state.next = states.idle\n bcnt.next = 0\n treg.next = 0\n \n itx.read.next = False\n irx.write.next = False\n\n x_sck.next = False\n x_ss.next = False\n else:\n if not cso.freeze:\n # ~~~~ Idle state ~~~~\n if state == states.idle:\n bcnt.next = 7\n treg.next = itx.read_data\n x_sck.next = cso.clock_polarity\n irx.write.next = False\n \n if not itx.empty and not irx.full:\n itx.read.next = True\n x_ss.next = False\n if cso.clock_phase: # Clock in on second phase\n state.next = states.wait_hclk\n else: # Clock in on first phase\n state.next = states.data_in\n else:\n itx.read.next = False\n x_ss.next = True\n\n # ~~~~ Wait half clock period for cpha=1 ~~~~\n elif state == states.wait_hclk:\n itx.read.next = False\n irx.write.next = False\n if ena:\n x_sck.next = not x_sck\n state.next = states.data_in\n\n # ~~~~ Clock data in (and out) ~~~~\n elif state == states.data_in:\n itx.read.next = False\n irx.write.next = False\n if ena: # clk div\n x_sck.next = not x_sck\n rreg.next = concat(rreg[7:0], x_miso)\n \n if cso.clock_phase and bcnt == 0:\n irx.write.next = True\n if itx.empty or irx.full:\n state.next = states.end\n else:\n state.next = states.data_change\n else:\n state.next = states.data_change\n\n # ~~~~ Get ready for next byte out/in ~~~~\n elif state == states.data_change:\n itx.read.next = False\n irx.write.next = False\n if ena:\n x_sck.next = not x_sck\n if bcnt == 0: \n if not cso.clock_phase:\n irx.write.next = True\n \n if itx.empty or irx.full:\n state.next = states.end\n else: # more data to transfer\n bcnt.next = 7\n state.next = states.data_in\n itx.read.next = True\n treg.next = itx.read_data\n else:\n treg.next = concat(treg[7:0], intbv(0)[1:])\n bcnt.next = bcnt - 1 \n state.next = states.data_in\n\n # ~~~~ End state ~~~~\n elif state == states.end:\n itx.read.next = False\n irx.write.next = False\n if ena: # Wait half clock cycle go idle\n state.next = states.idle\n\n # Shouldn't happen, error in logic\n else:\n state.next = states.idle\n assert False, \"SPI Invalid State\"\n\n @always_comb\n def rtl_fifo_sel():\n \"\"\"\n The `itx` and `irx` FIFO interfaces are driven by different\n logic depending on the configuration. This modules accesses\n the `itx` read side and drives the `irx` write side. The\n `itx` write side is driven by the `cso` or the `fifobus` port.\n The `irx` read side is accessed by the `cso` or the `fifobus`\n port.\n \"\"\"\n if cso.bypass_fifo:\n # data comes from the register file\n cso.tx_empty.next = itx.empty\n cso.tx_full.next = itx.full\n itx.write_data.next = cso.tx_byte\n\n cso.rx_empty.next = irx.empty\n cso.rx_full.next = irx.full\n cso.rx_byte.next = irx.read_data\n cso.rx_byte_valid.next = irx.read_valid\n\n # @todo: if cso.tx_byte write signal (written by bus) drive the\n # @todo: FIFO write signals, same if the cso.rx_byte is accessed\n itx.write.next = cso.tx_write\n irx.read.next = cso.rx_read\n\n else:\n # data comes from external FIFO bus interface\n fifobus.full.next = itx.full\n itx.write_data.next = fifobus.write_data\n itx.write.next = fifobus.write\n\n fifobus.empty.next = irx.empty\n fifobus.read_data.next = irx.read_data\n fifobus.read_valid.next = irx.read_valid\n irx.read.next = fifobus.read\n\n # same for all modes\n irx.write_data.next = rreg\n\n @always_comb\n def rtl_x_mosi():\n # @todo lsb control signal\n x_mosi.next = treg[7]\n\n @always_comb\n def rtl_gate_mosi():\n if cso.loopback:\n spibus.mosi.next = False\n else:\n spibus.mosi.next = x_mosi\n\n @always_comb #(clock.posedge)\n def rtl_spi_sigs():\n spibus.sck.next = x_sck\n if cso.loopback:\n x_miso.next = x_mosi\n else:\n x_miso.next = spibus.miso\n\n @always_comb\n def rtl_slave_select():\n if cso.manual_slave_select:\n spibus.ss.next = ~cso.slave_select\n elif x_ss:\n spibus.ss.next = 0xFF\n else:\n spibus.ss.next = ~cso.slave_select\n\n # myhdl generators in the __debug__ conditionals are not converted.\n if spi_controller.debug:\n @instance\n def mon_state():\n print(\" :{:<8d}: initial state {}\".format(\n now(), str(state)))\n \n while True:\n yield state\n print(\" :{:<8d}: state transition --> {}\".format(\n now(), str(state)))\n \n fbidle = intbv('0000')[4:]\n\n @instance\n def mon_trace():\n while True:\n yield clock.posedge\n ccfb = concat(itx.write, itx.read, irx.write, irx.read)\n if ccfb != fbidle:\n fstr = \" :{:<8d}: tx: w{} r{}, f{} e{}, rx: w{} r{} f{} e{}\"\n print(fstr.format(now(),\n int(itx.write), int(itx.read), int(itx.full), int(itx.empty),\n int(irx.write), int(irx.read), int(irx.full), int(irx.empty),)\n )\n \n @always(clock.posedge)\n def mon_tx_fifo_write():\n if itx.write:\n print(\" WRITE tx fifo {:02X}\".format(int(itx.write_data)))\n if itx.read:\n print(\" READ tx fifo {:02X}\".format(int(itx.read_data)))\n \n @always(clock.posedge)\n def mon_rx_fifo_write():\n if irx.write:\n print(\" WRITE rx fifo {:02X}\".format(int(irx.write_data)))\n \n if irx.read:\n print(\" READ rx fifo {:02X}\".format(int(irx.read_data)))\n\n # return the myhdl generators\n gens = myhdl.instances()\n return gens", "def send( self, value, mode=LCD_RS ): # RegisterSelect bit by default\n\t\thighnib = value & 0xF0\n\t\tlownib=(value<<4) & 0xF0\n\t\tself.write4bits( highnib | mode )\n\t\tself.write4bits( lownib | mode )", "def sendAuto(self):\n if (self.ser is not None):\n if len(self.cmd)>0:\n try:\n # TODO gestion python 2.0 (str) ou 3.0 (encode)\n cmd = self.cmd[0] + \"\\n\"\n self.ser.write(cmd.encode())\n self.t_TX = time.time()\n if self.mutexCmd.tryLock(100):\n del self.cmd[0]\n self.mutexCmd.unlock()\n else:\n print(\"WARN: cmd not send\")\n print(self.t_TX, cmd[:-1])\n except Exception as e:\n print(\"ERROR:Serial:sendAuto\",e)", "def write(self, register, value): #good\r\n\t\tself.i2c.write8(register, value)", "def send(self, data):\n starttime = time.time()\n while 1:\n if self._waiting_response==1:\n if time.time() - starttime > self._maxrespdelay:\n break\n _LOGGER.debug(\"Send going to sleep\\n\")\n time.sleep(self._sleeptime)\n else:\n break\n\n currtime = time.time()\n if currtime - self._lastcall > self._maxtime:\n self.reset()\n self._lastcall = currtime\n _LOGGER.debug(\"Sending: %s\", data)\n if not testing:\n self.serial.reset_input_buffer()\n bytessent = self.serial.write(data.encode())\n return bytessent\n else:\n self._waiting_response = 1\n return len(data)", "def write_firmware(self, data):\n data = list(map(int, data))\n\n return self.ipcon.send_request(self, BrickletBarometerV2.FUNCTION_WRITE_FIRMWARE, (data,), '64B', 'B')", "def flash_write16(self, addr, data):\n return self.flash_write(addr, data, 16)", "def _command(self, servo_id, instruction, *params):\n length = 3 + len(params)\n #print('length', length)\n \"\"\"\n checksum calculation:\n checksum = ~(ID + length+instruction+parms) if the numbers in the brackets\n are calculated and exceeded 255, then it takes the lowest one byte, \"~\"\n means Negation\n \"\"\"\n checksum = 255 - ((servo_id + length + instruction + sum(params))% 256)\n #print('checksum', checksum)\n packet = [0x55, 0x55, servo_id, length, instruction, *params, checksum]\n #print('packet', packet)\n self._serial.write(bytearray(packet))\n #print('Sending packet', packet)", "def set_switch(self, node_uuid, index, data):\n if data == \"on\":\n self._bus.i2c_acquire()\n try:\n p = self.values['num'].get_data_index(index=index)\n self._bus.pca9685_manager.set_pwm(p, 4096, 0)\n self.values['level'].set_data_index(index=index, data=100)\n except Exception:\n logger.exception('[%s] - Exception when switching on', self.__class__.__name__)\n finally:\n self._bus.i2c_release()\n elif data == \"off\":\n self._bus.i2c_acquire()\n try:\n p = self.values['num'].get_data_index(index=index)\n self._bus.pca9685_manager.set_pwm(p, 0, 4096)\n self.values['level'].set_data_index(index=index, data=0)\n except Exception:\n logger.exception('[%s] - Exception when switching off', self.__class__.__name__)\n finally:\n self._bus.i2c_release()\n else:\n logger.warning(\"[%s] - set_switch unknown data : %s\", self.__class__.__name__, data)" ]
[ "0.79480565", "0.75796175", "0.74145585", "0.71241754", "0.70519936", "0.7026349", "0.69845456", "0.69544286", "0.68782145", "0.68735623", "0.6855078", "0.6780902", "0.67396015", "0.6739508", "0.6665076", "0.66579545", "0.6576727", "0.6573629", "0.6531924", "0.6527273", "0.6477034", "0.64688987", "0.6459149", "0.64537716", "0.64059985", "0.64010686", "0.6366813", "0.6349548", "0.6340872", "0.6185139", "0.61171186", "0.6102041", "0.60558033", "0.6049153", "0.601123", "0.60008967", "0.59840375", "0.5981327", "0.59785175", "0.5946093", "0.5908002", "0.590496", "0.5896874", "0.589377", "0.5854047", "0.5828965", "0.58159703", "0.5780565", "0.5769477", "0.57427156", "0.5742339", "0.5739623", "0.5738773", "0.573436", "0.5723754", "0.5713511", "0.5710283", "0.57084894", "0.57061815", "0.56993264", "0.5694183", "0.5691384", "0.56761533", "0.5674242", "0.5670356", "0.56672037", "0.5651529", "0.5648787", "0.5625144", "0.5614202", "0.56087196", "0.560422", "0.5599468", "0.5597625", "0.5595329", "0.55912566", "0.5587608", "0.55875313", "0.55639285", "0.5554185", "0.5551544", "0.5549665", "0.5546999", "0.5540577", "0.5510476", "0.5502372", "0.55002826", "0.5499179", "0.5495476", "0.5485021", "0.54847836", "0.5467527", "0.5465589", "0.54632926", "0.5463004", "0.5462038", "0.54528844", "0.54502106", "0.54422647", "0.54306906" ]
0.58544874
44
Extracts function arguments for the decorated function.
def _extract_func_args( obj: str, arg_formats: Dict[str, int], arg_defaults: Dict[str, Any], input_dict: Dict[str, List[tfx_types.Artifact]], output_dict: Dict[str, List[tfx_types.Artifact]], exec_properties: Dict[str, Any], beam_pipeline: Optional[_BeamPipeline] = None, ) -> Dict[str, Any]: result = {} for name, arg_format in arg_formats.items(): if arg_format == function_parser.ArgFormats.INPUT_ARTIFACT: input_list = input_dict.get(name, []) if len(input_list) == 1: result[name] = input_list[0] elif not input_list and name in arg_defaults: # Do not pass the missing optional input. pass else: raise ValueError( ('Expected input %r to %s to be a singleton ValueArtifact channel ' '(got %s instead).') % (name, obj, input_list)) elif arg_format == function_parser.ArgFormats.OUTPUT_ARTIFACT: output_list = output_dict.get(name, []) if len(output_list) == 1: result[name] = output_list[0] else: raise ValueError( ('Expected output %r to %s to be a singleton ValueArtifact channel ' '(got %s instead).') % (name, obj, output_list)) elif arg_format == function_parser.ArgFormats.ARTIFACT_VALUE: input_list = input_dict.get(name, []) if len(input_list) == 1: result[name] = input_list[0].value elif not input_list and name in arg_defaults: # Do not pass the missing optional input. pass else: raise ValueError( ('Expected input %r to %s to be a singleton ValueArtifact channel ' '(got %s instead).') % (name, obj, input_list)) elif arg_format == function_parser.ArgFormats.PARAMETER: if name in exec_properties: result[name] = exec_properties[name] elif name in arg_defaults: # Do not pass the missing optional input. pass else: raise ValueError( ('Expected non-optional parameter %r of %s to be provided, but no ' 'value was passed.') % (name, obj)) elif arg_format == function_parser.ArgFormats.BEAM_PARAMETER: result[name] = beam_pipeline if name in arg_defaults and arg_defaults[name] is not None: raise ValueError('beam Pipeline parameter does not allow default ', 'value other than None.') else: raise ValueError('Unknown argument format: %r' % (arg_format,)) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def extract_captured_arguments(func):\n captured_arguments = getattr(func, ATTR_NAME)\n if type(captured_arguments) is not _CapturedArguments: # pylint: disable=unidiomatic-typecheck\n # The attribute was not set by tcm, so effectively it does not exist.\n raise AttributeError\n delattr(func, ATTR_NAME)\n return captured_arguments", "def _extract_args(self, func):\n sig = inspect.signature(func)\n\n # Backwards compatibility\n if len(sig.parameters) == 1:\n ((name, parameter),) = sig.parameters.items()\n if (\n parameter.kind is parameter.POSITIONAL_OR_KEYWORD\n and parameter.annotation in (parameter.empty, argparse.Namespace)\n ):\n self._require_namespace = name\n return\n\n for name, parameter in sig.parameters.items():\n if parameter.annotation is argparse.Namespace:\n self._require_namespace = name\n else:\n arg = Argument.from_parameter(name, parameter)\n action = arg.register_with_proxy(self)\n self._args.append((name, action.dest))", "def inspect_args_func(frame):\n args, _, _, values = inspect.getargvalues(frame)\n return {key: values[key] for key in args if key != 'self'}", "def arguments(*args):\n def decorate(func):\n func.arguments = args\n return func\n return decorate", "def arguments_from_call_funccode(f):\n fc = fc_or_c(f.__call__)\n argcount = fc.co_argcount\n args = list(fc.co_varnames[1:argcount])\n if not args:\n raise RuntimeError('Function has variable number of arguments')\n return args", "def extract_args(*args: str, **kwargs) -> Optional[AttributeExtractor]:\n def extract(values: Dict[str, any], fn) -> Dict[Attribute, any]:\n out = {}\n\n for name in args:\n if name not in values:\n logging.warning(\n f\"@trace decorator refers to an argument '{name}' that was not found in the \"\n f\"signature for {fn.__qualname__}! (this attribute will not be added)\")\n else:\n out[Attribute(name, register=False)] = values[name]\n\n for name, value in kwargs.items():\n if name not in values:\n logging.warning(\n f\"@trace decorator refers to an argument '{name}' that was not found in the \"\n f\"signature for {fn.__qualname__}! (this attribute will not be added)\")\n else:\n if isinstance(value, Attribute):\n out[value] = values[name]\n elif isinstance(value, str):\n out[Attribute(value, register=False)] = values[name]\n elif value == Label:\n out[Label(name, register=False)] = values[name]\n elif value == Attribute:\n out[Attribute(name, register=False)] = values[name]\n else:\n logging.warning(\n f\"@trace decorator has invalid mapping for argument '{name}'. Expected one of Label, Attribute or str but got {type(value)}\")\n return out\n return extract", "def extractArguments(frame):\n\n\targuments = ([], None, None)\n\ttry:\n\t\tsource = textwrap.dedent(str().join(inspect.getsourcelines(frame)[0]).replace(\"\\\\\\n\", str()))\n\texcept (IOError, TypeError) as error:\n\t\treturn arguments\n\n\ttry:\n\t\tnode = ast.parse(source)\n\texcept:\n\t\treturn arguments\n\n\tif not node.body:\n\t\treturn arguments\n\n\tnode = node.body[0]\n\tif not isinstance(node, ast.FunctionDef):\n\t\treturn arguments\n\n\treturn [arg.id for arg in node.args.args], node.args.vararg, node.args.kwarg", "def GetFunctionParametersAndValues():\n frame = inspect.currentframe().f_back\n args, _, _, values = inspect.getargvalues(frame)\n return ([(i, values[i]) for i in args])", "def arguments_from_funccode(f):\n fc = fc_or_c(f)\n vnames = fc.co_varnames\n nargs = fc.co_argcount\n # bound method and fake function will be None\n args = vnames[1 if is_bound(f) else 0:nargs]\n if not args:\n raise RuntimeError('Function has variable number of arguments')\n return list(args)", "def get_input_arguments(kwargs, function, warn=True):\n np.set_printoptions(threshold=20)\n print('\\narguments to {}:'.format(function.__qualname__))\n params = inspect.signature(function)\n input_kwargs = {}\n not_arguments = {}\n for k, v in kwargs.items():\n if k in params.parameters:\n input_kwargs[k] = v\n print_item(k, v)\n else:\n not_arguments[k] = v\n if warn:\n print('\\nother arguments:')\n for k, v in not_arguments.items():\n #print('{}: {}'.format(k, v))\n print_item(k, v)\n print('\\n')\n return input_kwargs", "def get_input_arguments(kwargs, function, warn=True):\n np.set_printoptions(threshold=20)\n print('\\narguments to {}:'.format(function.__qualname__))\n params = inspect.signature(function)\n input_kwargs = {}\n not_arguments = {}\n for k, v in kwargs.items():\n if k in params.parameters:\n input_kwargs[k] = v\n print_item(k, v)\n else:\n not_arguments[k] = v\n if warn:\n print('\\nother arguments:')\n for k, v in not_arguments.items():\n #print('{}: {}'.format(k, v))\n print_item(k, v)\n print('\\n')\n return input_kwargs", "def as_args(function):\n return lambda x: function(*x)", "def to_args(f, *args, **kwargs):\n kwargs = to_kwargs(f, *args, **kwargs)\n s = inspect.getargspec(f)\n return [kwargs[a] for a in s.args]", "def get_python_function_arguments(f):\n # Note that we only return non-optional arguments (we assume that any optional args are not specified).\n # This allows to, e.g., accept max(a, b, *more, name='') as a binary function\n param_specs = inspect.getfullargspec(f)\n annotations = param_specs.annotations\n arg_names = param_specs.args\n defaults = param_specs.defaults # \"if this tuple has n elements, they correspond to the last n elements listed\n # in args\"\n if defaults:\n arg_names = arg_names[:-len(defaults)]\n return (arg_names, annotations)", "def derive_args(func):\n args = inspect.getfullargspec(func).args\n if args and is_selfish_name(args[0]):\n del args[0]\n return args", "def _get_args(function, varargs=False):\n\n try:\n params = signature(function).parameters\n except ValueError:\n # Error on builtin C function\n return []\n args = [\n key\n for key, param in params.items()\n if param.kind not in (param.VAR_POSITIONAL, param.VAR_KEYWORD)\n ]\n if varargs:\n varargs = [\n param.name\n for param in params.values()\n if param.kind == param.VAR_POSITIONAL\n ]\n if len(varargs) == 0:\n varargs = None\n return args, varargs\n else:\n return args", "def extract_function_metadata(wrapped, instance, args, kwargs, return_value):\n LOGGER.debug(\n \"Extracting function call metadata\", args=args, kwargs=kwargs,\n )\n return {\n \"metadata\": {\"args\": args, \"kwargs\": kwargs},\n }", "def list_kwargs(func):\n \n details = inspect.getargspec(func)\n nopt = len(details.defaults)\n \n return details.args[-nopt:]", "def preprocess_arguments(self, *args, **kwargs):\n return (args, kwargs)", "def func_args(self) -> str:\n\n return self.call_data[10:]", "def as_kwargs(function):\n return lambda x: function(**x)", "def extract_arguments(args, method):\n intersection = lambda list1, list2: [x for x in list1 if x in list2]\n filterByKey = lambda keys, data: {x: data[x] for x in keys if x in data }\n keys = intersection(signature(method).parameters.keys(), args.keys())\n params = filterByKey(keys, args)\n return params", "def extracts(*extract):\n def decorate(func):\n function = to_function(func)\n setattr(function, EXTRACTS, extract)\n return function\n return decorate", "def dump_args(func):\n\n def wrapper(*args, **kwargs):\n func_args = inspect.signature(func).bind(*args, **kwargs).arguments\n func_args_str = ', '.join('{} = {!r}'.format(*item) for item in func_args.items())\n log(f'{func.__module__}.{func.__qualname__} ( {func_args_str} )')\n return func(*args, **kwargs)\n\n return wrapper", "def get_keyword_args(function):\n argspec = inspect.getargspec(function)\n kwargs = argspec.args[len(argspec.args) - len(argspec.defaults):]\n kwargs = {arg: value for arg, value in zip(kwargs, argspec.defaults)}\n return kwargs", "def dumpArgs(func):\n def wrapper(*args, **kwargs):\n func_args = inspect.signature(func).bind(*args, **kwargs).arguments\n func_args_str = ', '.join('{} = {!r}'.format(*item)\n for item in func_args.items())\n print(f'{func.__module__}.{func.__qualname__} ( {func_args_str} )')\n result = func(*args, **kwargs)\n print(f'{func.__module__}.{func.__qualname__} Return Result: \\n {result}')\n return result\n return wrapper", "def test_arguments(self):\n calls = []\n decorator = self.decorator()\n\n @decorator\n def func(a, b, c):\n calls.append((a, b, c))\n\n func(1, 2, c=3)\n self.assertEqual(calls, [(1, 2, 3)])", "def filter_function_arguments(self, fn_node):\n return self.filter_nodes('Argument', parent=fn_node)", "def getargvalues(frame):\r\n args, varargs, varkw = getargs(frame.f_code)\r\n return ArgInfo(args, varargs, varkw, frame.f_locals)", "def kwargsdec(f):\n def wrapper(**kwargs):\n args = inspect.getargspec(f).args\n return f(**{ k: kwargs[k] for k in args})\n return wrapper", "def dump_args(func):\n\n def wrapper(*args, **kwargs):\n func_args = inspect.signature(func).bind(*args, **kwargs).arguments\n func_args_str = ', '.join('{} = {!r}'.format(*item) for item in func_args.items())\n print(f'{func.__module__}.{func.__qualname__} ( {func_args_str} )')\n return func(*args, **kwargs)\n\n return wrapper", "def getargspec(self,obj):\n\n if inspect.isfunction(obj):\n func_obj = obj\n elif inspect.ismethod(obj):\n func_obj = obj.im_func\n else:\n raise TypeError, 'arg is not a Python function'\n args, varargs, varkw = inspect.getargs(func_obj.func_code)\n return args, varargs, varkw, func_obj.func_defaults", "def getFunctionArguments(tokens: List[LEX_Type], last_token: LEX_Type) -> (List[AST_FunctionArgument], List[LEX_Type]):\n if tokens[0].type == \"LineEnd\":\n if tokens[1].type == \"Keyword\":\n return [], tokens\n return getFunctionArguments(tokens[1:], last_token)\n if tokens[0].type == \"ItemLister\":\n if tokens[1].type == \"Type\":\n if tokens[2].type == \"Identifier\":\n arguments: List[AST_FunctionArgument]\n last: List[LEX_Type]\n arguments, last = getFunctionArguments(tokens[3:], last_token)\n return [AST_FunctionArgument(tokens[1].value, tokens[2].value)] + arguments, last", "def filter_args(fn, args_tuple):\n sig = inspect.signature(fn)\n flag_var_positional = any([\n inspect.Parameter.VAR_POSITIONAL == value.kind for\n value in sig.parameters.values()])\n if flag_var_positional:\n return args_tuple\n else:\n num_args = len(sig.parameters.items())\n return args_tuple[:num_args]", "def get_kwargs_applicable_to_function(function, kwargs):\n return {\n key: value\n for key, value in kwargs.items()\n if key in inspect.getfullargspec(function).args\n }", "def request_args(args):\n\n def _decorator(fn):\n fn.args = args\n return fn\n\n return _decorator", "def create_args(func):\n # Get a dictionary of the params of the function\n params = dict(inspect.signature(func).parameters)\n # We will always use z for the vector input so delete that from the dict\n del params['z']\n return {k: peturb(v) for k, v in params.items()}", "def getArgs(func):\n # exclude the defaults at the end (hence the [:-1])\n args = list(utils.flatten(inspect.getargspec(func)[:-1]))\n return set(args).difference(set([None]))", "def params(funcarglist):\n def wrapper(function):\n function.funcarglist = funcarglist\n return function\n return wrapper", "def register_args(f):\n\n @wraps(f)\n def inner(*_args, **_kwargs):\n self = _args[0]\n args, kwargs = args_kwargs(f)\n args = dict(zip(args[1::], _args[1::]))\n kwargs.update(_kwargs)\n self.args = args\n self.kwargs = kwargs\n return f(self, *args.values(), **kwargs)\n\n return inner", "def get_fn_arg_contexts(cls, ctx: AntlrTelParser.FnContext) -> List[Any]:\n if len(ctx.children) <= 3:\n # [fn_name,(,)] => 3 children means no args, return empty array\n return []\n else:\n # Skip fnname and '(', step 2 to skip ','\n return ctx.children[2::2]", "def _get_required_args(func):\n module_logger.debug(f\"_get_required_args: func={func}\")\n fas = inspect.getfullargspec(func)\n module_logger.debug(f\"_get_required_args: fas={fas}\")\n len_args = len(fas.args)\n len_args += len(fas.kwonlyargs)\n if fas.kwonlydefaults is not None:\n len_args -= len(fas.kwonlydefaults)\n if fas.defaults is not None:\n len_args -= len(fas.defaults)\n return len_args", "def get_arguments(callable, exclude):\n info = arginfo(callable)\n defaults = info.defaults or []\n defaults = [None] * (len(info.args) - len(defaults)) + list(defaults)\n return {name: default for (name, default) in zip(info.args, defaults)\n if name not in exclude}", "def _get_arguments(self) -> str:\n func = self.node\n\n # Early logic used to iterate over, `func.get_arguments()`, however when there\n # is an unknown type clang will sometimes fail to provide tokens for that\n # argument. For example in \"unknown_type foo[]\" the brackets will cause clang\n # to return back no tokens for the argument.\n start = func.location\n end = func.extent.end\n if func.is_definition():\n # When a function is a definition the last child is the compound statement\n # so we need to move prior to the compound statement\n children = list(func.get_children())\n body_start = children[-1].extent.start.offset\n end = cindex.SourceLocation.from_offset(func.tu, start.file, body_start - 1)\n\n extent = cindex.SourceRange.from_locations(start, end)\n non_comment_tokens = (\n t\n for t in cindex.TokenGroup.get_tokens(func.tu, extent=extent)\n if t.kind != cindex.TokenKind.COMMENT\n )\n\n # Even though this will place spaces around all the tokens, the sphinx C domain\n # will provide some formatting to make it look nicer in the final output.\n full_signature = \" \".join(t.spelling for t in non_comment_tokens)\n\n _, _, arguments = full_signature.partition(\"(\")\n arguments = arguments.rstrip(\")\")\n arguments = arguments.strip()\n\n return arguments", "def _getargs(fn_sig):\n params = fn_sig.parameters\n args = []\n for k, v in params.items():\n if (v.kind & v.POSITIONAL_OR_KEYWORD) == v.POSITIONAL_OR_KEYWORD:\n args.append(k)\n else:\n msg = \"%s argument type unsupported in jitclass\" % v.kind\n raise errors.UnsupportedError(msg)\n return args", "def print_args(func):\n def new_func(*args, **kwargs):\n print(args)\n print(kwargs)\n return func(*args, **kwargs)\n return new_func", "def get_arguments(self, args=(), kwargs=None, onlykeys=False, onlyused=False,\n func=None):\n if func is None:\n func = self.__init__\n\n # check what parameters to add\n adds, params, kwargs = _helper_parameters(func=func, args=args, kwargs=kwargs,\n onlykeys=onlykeys, onlyused=onlyused)\n\n _map_parameters = getattr(self, \"_map_parameters\", None)\n for add, key in zip(adds, params):\n if add and key not in kwargs:\n try:\n if _map_parameters is not None and key in _map_parameters:\n mapped_key = _map_parameters[key]\n # if mapped_key is None then it means variable is not\n # assigned in the __init__ of the instance so ignore it\n if mapped_key is not None:\n kwargs[key] = getattr(self, mapped_key)\n else:\n kwargs[key] = getattr(self, key)\n except AttributeError:\n e, msg, traceback = sys.exc_info()\n msg.args = (\n msg.args[0] + \". Review @copy_support decorator or \"\n \"BaseCopySupporter class for more info.\",)\n raise_(e, msg, traceback)\n\n if onlykeys:\n return kwargs\n return args, kwargs", "def __parse_function_args(self, buffer):\n\t\targs = []\n\t\ttoken = buffer.read(1)\n\t\twhile token != \"(\": # FIXME don't duplicate code with __read_block\n\t\t\ttoken = buffer.read(1)\n\t\t\tassert token\n\t\tcount = 1\n\t\t\n\t\tdef flusharg(arg, args):\n\t\t\targ = \"\".join(arg)\n\t\t\targ = SpellString(arg).format(self.obj, proxy=self.proxy)\n\t\t\targs.append(arg)\n\t\t\treturn []\n\t\t\n\t\t_arg = []\n\t\twhile count:\n\t\t\ttoken = buffer.read(1)\n\t\t\tif token == \"(\":\n\t\t\t\tcount += 1\n\t\t\telif token == \")\":\n\t\t\t\tcount -= 1\n\t\t\tif not count or not token:\n\t\t\t\t_arg = flusharg(_arg, args)\n\t\t\t\tbreak\n\t\t\t\n\t\t\tif token == \",\" and count == 1:\n\t\t\t\t_arg = flusharg(_arg, args)\n\t\t\t\tcontinue\n\t\t\t\n\t\t\t_arg.append(token)\n\t\t\n\t\treturn args", "def fetch_arguments(op_def, arg, ws):\n return [fetch_argument(op_def, desc, ws) for desc in arg.strings]", "def unpack_args(kwargs):\n return [v for p in zip(list(kwargs.keys()), list(kwargs.values())) for v in p]", "def get_all_arguments(self):\n args, varargs, keyword, defaults = inspect.getargspec(self.exec_obj)\n if args.count('self') > 0:\n args.remove('self')\n return args", "def args_extract(self, args, kwargs):\n # make popable (can't pop tuple of args)\n args = list(args)\n\n def getarg(name, num):\n if args and len(args) > num:\n return args.pop(num)\n elif kwargs.get('files'):\n return kwargs.pop('files')\n return None\n\n # First to not affect data = args.pop(0)\n files = getarg('files', 1)\n data = getarg('data', 0)\n\n # make mutable if something\n if files:\n files = MultiValueDict(files)\n if data:\n data = MultiValueDict(data)\n\n return data, files, args, kwargs", "def log_kwargs(wrapped: Callable):\n\n @functools.wraps(wrapped)\n def wrapper(*args, **kwargs):\n logger.debug(f\"Keyword arguments: {kwargs}\")\n return wrapped(*args, **kwargs)\n\n return wrapper", "def positional_args(func):\n def inner(s, parser, *args, **kwargs):\n clargs = parser.run()\n return func(s, clargs.posn, *args, **kwargs)\n \n if (func.__doc__ != None): inner.__doc__=func.__doc__+\"\\n\\n[decorated by @positional_arguments]\\n\"\n inner.__name__=func.__name__\n return inner", "def build_arg_list(fn, env):\r\n kw = {}\r\n argspec = inspect.getargspec(fn)\r\n\r\n # if there is a **kw argument in the fn definition,\r\n # just pass along the environment\r\n if argspec[2]:\r\n kw = env\r\n #else for each entry in the arglist set the value from the environment\r\n else:\r\n #skip self\r\n argnames = argspec[0][1:]\r\n for name in argnames:\r\n if name in env:\r\n kw[name] = env[name]\r\n return kw", "def get_args( self, **kwargs ):\n args = []\n for at in self.arg_types:\n args.append( kwargs[at] )\n return args", "def get_args(inst):\n if is_estimator(inst):\n args = inspect.getargspec(inst.update).args\n args = [arg for arg in args if arg != 'self' and arg != 'X']\n else:\n args = inspect.getargspec(inst).args\n ignore_args = {'self', 'X', 'y', 'pattern', 'normalizer', 'coef'}\n args = [arg for arg in args if arg not in ignore_args]\n\n return args", "def get_kwd_args(func):\n try:\n sig = inspect.signature(func)\n except AttributeError:\n args, _, _, defaults = inspect.getargspec(func)\n if defaults:\n kwonlyargs = args[-len(defaults):]\n else:\n kwonlyargs = []\n else:\n kwonlyargs = {p.name:p.default for p in sig.parameters.values()\n if p.default is not p.empty}\n\n return kwonlyargs", "def arguments(self):\n return parse_arguments(self['data'])", "def filter_kwargs(function, **kwargs):\n\n kwargs = deepcopy(kwargs)\n if sys.version_info[0] >= 3:\n args = function.__code__.co_varnames\n else:\n args = function.func_code.co_varnames\n\n args = set(kwargs.keys()) - set(args)\n for key in args:\n kwargs.pop(key)\n\n return kwargs", "def log_arguments(f):\n\n if hasattr(f, '__qualname__'):\n name = f.__qualname__\n else:\n name = f.__name__\n @functools.wraps(f)\n def log_arguments_wrapper(*args, **kwargs):\n if name.endswith('__init__'):\n assert len(args) >= 1\n args_str = ', '.join(map(str, args[1:]))\n else:\n args_str = ', '.join(map(str, args))\n kwargs_str = ', '.join([f'{k} = {v}' for k, v in kwargs.items()])\n args_kwargs_str = ', '.join([s for s in [args_str, kwargs_str] if s])\n _logger.info(f'{name}({args_kwargs_str})')\n result = f(*args, **kwargs)\n return result\n return log_arguments_wrapper", "def getargspec(func):\n if isinstance(func, partial):\n return inspect.getargspec(func.func)\n else:\n if isinstance(func, type):\n return inspect.getargspec(func.__init__)\n else:\n return inspect.getargspec(func)", "def filter_extra_accepted_kwargs(fun, kwargs, skip_positional=0):\n sig = inspect.signature(fun)\n # the params from signature with up to skip_positional filtered out\n # (less only if there is not enough of positional args)\n params = [(name, param) for i, (name, param) in enumerate(sig.parameters.items())\n if i >= skip_positional or param.kind not in\n [inspect.Parameter.POSITIONAL_OR_KEYWORD, inspect.Parameter.POSITIONAL_ONLY]]\n extra = [\n name for (name, param) in params\n if param.kind in [inspect.Parameter.POSITIONAL_OR_KEYWORD, inspect.Parameter.KEYWORD_ONLY]\n ]\n return {name: value for name, value in kwargs.items() if name in extra}", "def extract_keywords(func):\n if hasattr(func, 'im_func'):\n func = func.im_func\n\n try:\n return func.func_code.co_varnames[-len(func.func_defaults):]\n except (TypeError, ValueError, IndexError):\n return tuple()", "def get_partial_arguments(self):\n return (), {}", "def get_kwargs():\n\treturn get_kwargs_raw(sys.argv)", "def get_args(self):\r\n return self.args", "def selectArgsFromDict(func, argdict):\n return dict([(i, argdict[i]) for i in getArgs(func) if i in argdict])", "def get_args(self, action):\n\n def args_function(wildcards):\n return {\n \"input\": self._collect_bams(wildcards, wildcards.library_name),\n \"sample_name\": wildcards.library_name,\n \"platform\": \"EXTERNAL\",\n }\n\n assert action == \"run\", \"Unsupported actions\"\n return args_function", "def get_args(self, action):\n\n def args_function(wildcards):\n result = {\n \"input\": {\n \"reads_left\": list(\n sorted(self._collect_reads(wildcards, wildcards.library_name, \"\"))\n )\n },\n \"sample_name\": wildcards.library_name,\n \"platform\": \"ILLUMINA\",\n }\n reads_right = list(\n sorted(self._collect_reads(wildcards, wildcards.library_name, \"right-\"))\n )\n if reads_right:\n result[\"input\"][\"reads_right\"] = reads_right\n return result\n\n assert action == \"run\", \"Unsupported actions\"\n return args_function", "def getArgument(self, *args):\n return _libsbml.FunctionDefinition_getArgument(self, *args)", "def _create_argument_generator(func: PredictorMethod) -> PredictorArgumentGenerator:\n return _InjectedParams.from_func(func).make_arguments", "def get_filter_kwargs(self, *_, **__) -> Dict[str, Any]:", "def parse_args():\n parser = argparse.ArgumentParser( description='Required: function-name.' )\n parser.add_argument( '--function', '-f', help='function name required', required=True )\n args_dict = vars( parser.parse_args() )\n return args_dict", "def get_x_args_dict(self):\n return self.__x_args", "def _get_context(argspec, kwargs):\n if argspec.has_kwargs:\n return kwargs\n return dict((arg, kwargs[arg]) for arg in argspec.args if arg in kwargs)", "def arguments(args_to_pop=None) :\n posname, kwname, args = inspect.getargvalues(inspect.stack()[1][0])[-3:]\n posargs = args.pop(posname, [])\n args.update(args.pop(kwname, []))\n if args_to_pop is not None :\n for arg in args_to_pop :\n args.pop(arg)\n return args, posargs", "def good_decorator_accepting_args(decorator): \n def new_decorator(*f, **k):\n g = decorator(*f, **k)\n if 1 == len(f) and isinstance(f[0], types.FunctionType):\n g.__name__ = f[0].__name__\n g.__doc__ = f[0].__doc__\n g.__dict__.update(f[0].__dict__)\n pass\n return g\n \n new_decorator.__name__ = decorator.__name__\n new_decorator.__doc__ = decorator.__doc__\n new_decorator.__dict__.update(decorator.__dict__)\n # Required for Sphinx' automodule.\n new_decorator.__module__ = decorator.__module__\n return new_decorator", "def get_required_kwargs(fun, skip_positional=0):\n sig = inspect.signature(fun)\n # the params from signature with up to skip_positional filtered out\n # (less only if there is not enough of positional args)\n params = [(name, param) for i, (name, param) in enumerate(sig.parameters.items())\n if i >= skip_positional or param.kind not in\n [inspect.Parameter.POSITIONAL_OR_KEYWORD, inspect.Parameter.POSITIONAL_ONLY]]\n return [\n name for name, param in params if param.default is inspect.Parameter.empty\n and param.kind in [inspect.Parameter.POSITIONAL_OR_KEYWORD, inspect.Parameter.KEYWORD_ONLY]\n ]", "def get_id_args(func, arg):\n\n return \"{} {}\".format(func.__name__, arg)", "def set_func_args(self, *args, **kwargs):\n self._func_args = args \n self._func_kw_args = kwargs", "def get_args(self):\n rqst = self.request\n args = rqst.arguments()\n resp = {}\n for arg in args:\n resp[arg] = repr(rqst.get_all(arg))\n return resp", "def register_args(op_type, args_getter=None):\n def decorated(inner_function):\n return OpSchema.register_args(op_type, inner_function)\n if args_getter is not None:\n return OpSchema.register_args(op_type, args_getter)\n return decorated", "def _get_reproducing_arguments(self):\n reproducing_arguments = {\n 'include': self.include,\n 'exclude': self.exclude,\n 'copy': self.copy,\n }\n args_names = {name: getattr(self, name) for name in self.args_names}\n reproducing_arguments.update(args_names)\n return reproducing_arguments", "def filter_args(func, keys):\n filtered = {}\n sign = list(signature(func).parameters.keys())\n for k, v in {**keys}.items():\n if k in sign:\n filtered[k] = v\n return filtered", "def _get_mock_args(node):\n args = []\n PREFIX_LENGTH = len(\"mock_\")\n\n for arg in node.args.args:\n name = getattr(arg, \"id\", getattr(arg, \"arg\", None))\n if not name.startswith(\"mock_\"):\n continue\n args.append(name[PREFIX_LENGTH:])\n\n return args", "def inspect_arg(node):\n return inspect_ann(node)", "def decorator(arg):\n return lambda: list(arg)", "def get_Callable_args_res(clb):\n try:\n return clb.__args__, clb.__result__\n except AttributeError:\n # Python 3.6\n return clb.__args__[:-1], clb.__args__[-1]", "def wrapper_fun(*args):\n print(\"Hello Decorator\")\n return fun(*args)", "def get_additional_hook_arguments(\n self,\n path_manager: PathManager,\n extra_arguments: Optional[Dict[str, str]] = None,\n ) -> List[str]:", "def getPositionalArgs():", "def _handle_func_args(func, *args, **kwargs):\n if not isinstance(func, (types.FunctionType, types.MethodType)):\n raise RuntimeError('fn {} is not function or method'.format(func))\n if kwargs:\n bound_arguments = inspect.signature(func).bind(*args, **kwargs)\n bound_arguments.apply_defaults()\n args = bound_arguments.args\n kwargs = bound_arguments.kwargs\n\n positional_args = 0\n default_args = 0\n has_var = False\n for value in inspect.signature(func).parameters.values():\n if value.kind is inspect.Parameter.VAR_POSITIONAL or value.kind is inspect.Parameter.VAR_KEYWORD:\n has_var = True\n if value.kind is inspect.Parameter.POSITIONAL_OR_KEYWORD:\n if value.default is inspect.Parameter.empty:\n positional_args += 1\n else:\n default_args += 1\n\n if has_var:\n return args, kwargs\n\n if len(args) < positional_args:\n raise TypeError(f\"Function {func.__name__} needs {positional_args} positional argument, but got {len(args)}.\")\n if len(args) > positional_args + default_args:\n raise TypeError(f\"Function {func.__name__} needs {positional_args} positional argument and {default_args} \"\n f\"default argument, total {positional_args + default_args}, but got {len(args)}.\")\n return args, kwargs", "def expand(self) -> List[TOKEN]:\n return [self.function, *self.args]", "def format_args(self, **kwargs: Any) -> str:\n decl = self.declaration\n\n # The logic allows this to be used for both function like and non\n # function like macros.\n # 'SOME_DEFINE'.partition('(')\n # >>> 'SOME_DEFINE', '', ''\n #\n # 'FUNCTION_LIKE(_a, _b)'.partition('(')\n # >>> 'FUNCTION_LIKE', '(', '_a, _b)'\n _, part, args = decl.partition(\"(\")\n return part + args", "def format_arguments(self, **kwargs):\n return kwargs", "def _convert_args(self, expr, args, kwargs):\n assert expr is not None\n\n if not kwargs:\n return args\n\n if kwargs and not isinstance(expr, Function):\n raise Exception(\"can only supply keyword parameters for a \"\n \"relay.Function, found {0}\".format(expr))\n\n params = expr.params\n param_names = [p.name_hint for p in params]\n num_of_args = len(args)\n\n cargs = list(args)[:]\n for i, name in enumerate(param_names):\n if i < num_of_args:\n if kwargs.get(name):\n raise Exception(\n \"duplicate argument supplied in \"\n \"both positional args (at position: {0}), \"\n \"and keyword argument (with name: {1})\".format(i, name))\n else:\n cargs.append(kwargs[name])\n\n if len(cargs) != len(params):\n raise Exception(\n \"insufficient arguments, expected \"\n \"{0}, provided {1}\".format(len(cargs), len(params)))\n\n return tuple(cargs)", "def as_kwargs(self) -> Dict[str, Any]:\n ret = {}\n for arg in self.args.values():\n ret[arg.name] = arg.value\n return ret", "def extend_with_args(f):\n @wraps(f)\n def wrapper(*args, **kwargs):\n if request.get_json():\n kwargs.update(request.get_json())\n\n # WARNING: This may get us into trouble when it comes to\n # list-style \"get\" args.\n kwargs.update({k: v for k, v in request.args.iteritems()})\n\n # \"auth\" is special -- it's an authentication token, not an\n # argument.\n if \"auth\" in kwargs:\n kwargs.pop(\"auth\")\n\n # This is JUST for the 1.0 sidebar app.\n if \"access_token\" in kwargs:\n kwargs.pop(\"access_token\")\n\n return f(*args, **kwargs)\n return wrapper", "def filter_kwargs(dict_to_filter, function_to_call):\n\n sig = inspect.signature(function_to_call)\n filter_keys = [param.name for param in sig.parameters.values() if (param.kind == param.POSITIONAL_OR_KEYWORD)]\n valid_args = {}\n invalid_args = {}\n\n for key in dict_to_filter:\n if key in filter_keys:\n valid_args[key] = dict_to_filter[key]\n else:\n invalid_args[key] = dict_to_filter[key]\n return valid_args, invalid_args", "def get_json_argument_list():\n list_of_arguments_to_get = [\"finish_time\", \"segmentation_training_samples\", \"patch_count_per_image\", \"learning_rate\", \"batch_k\",\n \"batch_p\", \"flip_augment\", \"standardize\", \"margin\", \"metric\"]\n\n return list_of_arguments_to_get" ]
[ "0.73196673", "0.72977483", "0.70101774", "0.69297165", "0.67886466", "0.6777163", "0.66621184", "0.66599494", "0.6646892", "0.6559392", "0.6559392", "0.6526404", "0.6525048", "0.6484637", "0.6477483", "0.63901955", "0.6309247", "0.6304008", "0.6285241", "0.6279031", "0.62567276", "0.621607", "0.62098473", "0.62020326", "0.61953586", "0.61158305", "0.6105116", "0.6095368", "0.6094999", "0.6067567", "0.6049048", "0.60332817", "0.6026479", "0.6019232", "0.6018263", "0.6003445", "0.5995536", "0.5985236", "0.5939428", "0.5934477", "0.59226364", "0.59124327", "0.588432", "0.5875205", "0.5873027", "0.5867631", "0.5810621", "0.57894284", "0.5778229", "0.5766073", "0.5724669", "0.5719134", "0.5718195", "0.5716646", "0.5683957", "0.56715655", "0.56572706", "0.56542236", "0.56482315", "0.56428885", "0.56397045", "0.56241876", "0.56067866", "0.56052744", "0.55989003", "0.5596854", "0.557872", "0.55626893", "0.5544393", "0.55346817", "0.55298376", "0.552657", "0.5524341", "0.5523359", "0.5521927", "0.55144507", "0.5509959", "0.5504894", "0.550289", "0.5495836", "0.5484896", "0.54782164", "0.54756504", "0.54729974", "0.5463715", "0.54617685", "0.54606265", "0.54422706", "0.5442181", "0.54380006", "0.5437035", "0.5433753", "0.5416578", "0.54160243", "0.541252", "0.54091555", "0.540913", "0.5403992", "0.5402226", "0.53892833", "0.53885156" ]
0.0
-1
Validates and assigns the outputs to the output_dict.
def _assign_returned_values( function, outputs: Dict[str, Any], returned_values: Dict[str, Any], output_dict: Dict[str, List[tfx_types.Artifact]], json_typehints: Dict[str, Type], # pylint: disable=g-bare-generic ) -> Dict[str, List[tfx_types.Artifact]]: result = copy.deepcopy(output_dict) if not isinstance(outputs, dict): raise ValueError( ('Expected component executor function %s to return a dict of ' 'outputs (got %r instead).') % (function, outputs)) # Assign returned ValueArtifact values. for name, is_optional in returned_values.items(): if name not in outputs: raise ValueError( 'Did not receive expected output %r as return value from ' 'component executor function %s.' % (name, function)) if not is_optional and outputs[name] is None: raise ValueError('Non-nullable output %r received None return value from ' 'component executor function %s.' % (name, function)) try: result[name][0].value = outputs[name] except TypeError as e: raise TypeError( ('Return value %r for output %r is incompatible with output type ' '%r.') % (outputs[name], name, result[name][0].__class__)) from e # Handle JsonValue runtime type check. if name in json_typehints: ret = function_parser.check_strict_json_compat(outputs[name], json_typehints[name]) if not ret: raise TypeError( ('Return value %r for output %r is incompatible with output type ' '%r.') % (outputs[name], name, json_typehints[name])) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_output_dictionary(self):\n\n return_dictionary = {}\n\n for output_path in self.provided_outputs:\n return_dictionary[output_path.full_path] = self.get_value(output_path)\n\n return return_dictionary", "def _check_output_observable_dict(\n self, output_observable_dict, outputs, observables):\n if output_observable_dict is None:\n if (len(outputs) == 1) and (len(observables) == 1):\n # Create map of single output to single observable\n output_observable_dict = {outputs[0]: observables[0]}\n else:\n # Assume trivial map\n output_observable_dict = {output: output for output in outputs}\n\n # Check that output-observable map is valid\n for output in outputs:\n if output not in list(output_observable_dict.keys()):\n raise ValueError(\n 'The output <' + str(output) + '> could not be identified '\n 'in the output-observable map.')\n\n observable = output_observable_dict[output]\n if observable not in observables:\n raise ValueError(\n 'The observable <' + str(observable) + '> could not be '\n 'identified in the dataframe.')\n\n return output_observable_dict", "def init_output_dict(self):\n return {\n \"outputs\": torch.FloatTensor(),\n \"pred_probs\": torch.FloatTensor(),\n \"labels\": torch.LongTensor(),\n }", "def finalize_output_dict(self):\n self.output_dict = {\n key: torch.cat(value).numpy() for key, value in self.output_dict.items()\n }", "def finalize_output_dict(self, output_dict):\n return {key: output_dict[key].cpu().numpy() for key in output_dict.keys()}", "def _transform_outputs(self) -> None:\n self.outputs = None if self.outputs == {} else self.outputs", "def _populate_output(self):\n pass", "def collect_output(workdir_path, outputs):\n output_dict = {}\n for output_parameter in outputs:\n if 'id' not in output_parameter:\n exit_validation(\"Error: output without id member\")\n if 'type' not in output_parameter:\n exit_validation(\"Error: output without type member\")\n if output_parameter['type'] != 'File':\n exit_system_error(\"Sorry, I only know about File outputs\")\n if 'outputBinding' in output_parameter:\n binding = output_parameter['outputBinding']\n paths = []\n if 'glob' in binding:\n paths = glob.glob(os.path.join(workdir_path, binding['glob']))\n log(\"Paths after globbing: \" + str(paths))\n if paths != []:\n output_dict[output_parameter['id']] = {\n 'class': 'File',\n 'location': 'file:///' + paths[0]\n }\n return output_dict", "def update_outputs(self, outnames):\n for cname, vnames in partition_names_by_comp(outnames).items():\n if cname is None: # boundary outputs\n self.update_inputs(None, vnames)\n else:\n getattr(self, cname).update_outputs(vnames)\n self.set_valid(vnames, True)", "def _ParseOutputs(self, output_data, output_parameters=None):\n\n try:\n\n output_data.iteritems()\n\n except AttributeError, e:\n\n raise errors.ScheduleError(\"Error parsing outputs, %s\" % e)\n\n\n for output_type, data in output_data.iteritems():\n\n output_name = \"\"\n \n if output_data.has_key('name'):\n\n output_name = data['name']\n\n else:\n \n output_name = \"%s (%s)\" % (self.name, output_type)\n\n\n if self.verbose:\n\n print \"Loading Output '%s' of type '%s'\" % (output_name, output_type)\n\n output = self._output_registry.Create(output_name, output_type, output_data)\n\n\n # After giving initializing data, we give the output parameters\n #\n if output_parameters is not None:\n\n for o in output_parameters:\n\n self.AddOutput(path=o['component_name'],\n parameter=o['field'],\n output_type=o['outputclass'],)\n\n self._outputs.append(output)", "def validate_output_values(self, source, **kwargs):\n return self._validate_values(\"output_values\", source, **kwargs)", "def __set_outputs__(self):\n self.__set_in_out_var__(None, 1)", "def get_output_data(\n self,\n inputs: Dict[str, Any]) -> Any:\n return inputs", "def outputs_prepare_hook(\n self,\n outputs: Any,\n ) -> Any:\n return outputs", "def initialize_output_dict(self, label: Optional[str] = None):\n if label is not None or not self._does_output_dict_contain_info():\n for species in self.species_list:\n if label is None or species.label == label:\n if species.label not in self.output:\n self.output[species.label] = dict()\n if 'paths' not in self.output[species.label]:\n self.output[species.label]['paths'] = dict()\n path_keys = ['geo', 'freq', 'sp', 'composite']\n for key in path_keys:\n if key not in self.output[species.label]['paths']:\n self.output[species.label]['paths'][key] = ''\n if 'irc' not in self.output[species.label]['paths'] and species.is_ts:\n self.output[species.label]['paths']['irc'] = list()\n if 'job_types' not in self.output[species.label]:\n self.output[species.label]['job_types'] = dict()\n for job_type in list(set(self.job_types.keys())) + ['opt', 'freq', 'sp', 'composite', 'onedmin']:\n if job_type in ['rotors', 'bde']:\n # rotors could be invalidated due to many reasons,\n # also could be falsely identified in a species that has no torsional modes.\n self.output[species.label]['job_types'][job_type] = True\n else:\n self.output[species.label]['job_types'][job_type] = False\n keys = ['conformers', 'isomorphism', 'convergence', 'restart', 'errors', 'warnings', 'info']\n for key in keys:\n if key not in self.output[species.label]:\n if key == 'convergence':\n self.output[species.label][key] = None\n else:\n self.output[species.label][key] = ''", "def solid_output_values(self) -> Mapping[str, Union[Any, Mapping[str, Any]]]:\n results: Dict[str, Union[Any, Dict[str, Any]]] = {}\n captured = self._step_execution_context.step_output_capture\n\n if captured is None:\n check.failed(\"Outputs were unexpectedly not captured for hook\")\n\n # make the returned values more user-friendly\n for step_output_handle, value in captured.items():\n if step_output_handle.mapping_key:\n if results.get(step_output_handle.output_name) is None:\n results[step_output_handle.output_name] = {\n step_output_handle.mapping_key: value\n }\n else:\n results[step_output_handle.output_name][step_output_handle.mapping_key] = value\n else:\n results[step_output_handle.output_name] = value\n\n return results", "def _output_update(self):\n self._outputtype = self.inputs.outputtype", "def validate(self):\n self.__log('Validating whether all conditions are met.')\n if not self.config['OUT_FOLDER'] or not self.config['OUTPUT_FOLDER']:\n self.__log('The path to the output folder cannot be found.', 'error')\n raise FileNotFoundError\n\n try:\n if '.' in self.output_filename:\n self.__log('The output filename should not contain an extension.', 'error')\n raise ValueError\n except TypeError:\n pass\n\n if not self.output_filename:\n self.__log('The output filename has not been specified.', 'warning')\n self.output_filename = self.hash_time()\n i = 0\n while self.output_file_exists():\n self.__log('Adding a unique identifier to current filename.', 'warning')\n self.output_filename = self.output_filename + '-' + i\n i += 1\n self.__log(f'Continuing with file: \"{self.output_filename}\"', 'success')\n\n # Iterate over options to check for required parameters, as to not waste requests\n self.__log('Starting to check if all required parameters are set')\n for key, value in self.options.items():\n if key in self.config['REQUIRED_PARAMETERS'] and not value:\n self.__log(f'Missing a required parameter: {key}', 'error')\n raise MissingRequiredParameterError(key)\n\n self.__log('All validation successful.', 'success')", "def _translate_outputs(self, outputs):\n HOT_TO_CFN_ATTRS = {'description': 'Description',\n 'value': 'Value'}\n\n cfn_outputs = {}\n\n for output_name, attrs in six.iteritems(outputs):\n cfn_output = {}\n\n for attr, attr_value in six.iteritems(attrs):\n cfn_attr = self._translate(attr, HOT_TO_CFN_ATTRS,\n _('\"%s\" is not a valid keyword '\n 'inside an output definition'))\n cfn_output[cfn_attr] = attr_value\n\n cfn_outputs[output_name] = cfn_output\n\n return cfn_outputs", "def get_outputs(self, input_dict: Dict) -> Dict[str, np.ndarray]:\n activation_values = self.session.run(self.activation_names, input_dict)\n return dict(zip(self.sanitized_activation_names, activation_values))", "def set_outputs(self, outputs):\n self.attributes[\"outputs\"] = outputs", "def _parse_output_variables(self):\n self._output_variables_by_name = {}\n self._output_variables_by_type = {}\n for ov in self._output_variables:\n # parse the variable to get individual parts\n parsed_variable = self.parse_variable(ov)\n variable_name = parsed_variable.get('name')\n variable_type = parsed_variable.get('type')\n\n # store the variables in dict by name (e.g. \"status_code\")\n self._output_variables_by_name[variable_name] = {'variable': ov}\n\n # store the variables in dict by name-type (e.g. \"status_code-String\")\n self._output_variables_by_type[f'{variable_name}-{variable_type}'] = {'variable': ov}", "def __generate_output_data(self):\n if not len(self.output_data) == 0:\n return\n try:\n self.output_data = s.load(open('output/output_data.p', 'rb'))\n self.class_indices = s.load(open('output/class_indices.p', 'rb'))\n if not self.classes_to_visualise == None:\n self.__filter_output_data(self.classes_to_visualise)\n except:\n self.output_data = generate_output_for_test_data(image_data=self.image_data,\n binary_output=self.binary_output) if self.testing else generate_output_for_train_data(\n image_data=self.image_data, binary_output=self.binary_output)\n self.class_indices = get_all_class_indices(training=False) if self.testing else get_all_class_indices()\n if not self.classes_to_visualise == None:\n self.__filter_output_data(self.classes_to_visualise)\n s.dump([out.tolist() for out in self.output_data], open('output/output_data.p', 'wb'))\n s.dump(self.class_indices, open('output/class_indices.p', 'wb'))\n\n self.legend = get_class_names_for_class_indices(list(set(sorted(self.class_indices))))", "def prepare_multiple_out_parsers(run_dict):\n output_parser_dict = {}\n for run_label, run_name in run_dict.items():\n output_parser_dict[run_label] = OutputParser(run_name, use_most_recent=False)\n return output_parser_dict", "def output_definition(self):\n return {\n 'error': [0.0, 10.0]\n }", "def _format_calc(self, outputs: Dict[str, np.array], system: System):\n results = {p: [] for p in self.required_properties}\n\n for output in outputs:\n for p in self.required_properties:\n # Check for convergence\n if output[p] is None:\n raise QMCalculatorError(\"Errors encountered during computation.\")\n\n results[p].append(torch.from_numpy(output[p]))\n\n for p in self.required_properties:\n results[p] = torch.stack(results[p]).to(system.device, system.dtype)\n\n return results", "def decode_results(self, outputs):\n ...", "def outputs(self):\n pass", "def _create_output_alternatives(self, predictions):\n return {self.head_name: (self._problem_type, predictions)}", "def outputs(self):\n return {\"path_to_validation_pdf\": File_IO(\n self.node.outputs[0])}", "def get_outputs():\n outputs = {}\n for obj in vars(acsploit.output).values():\n if hasattr(obj, 'OUTPUT_NAME'):\n outputs[obj.OUTPUT_NAME] = obj\n\n return outputs", "def postprocess_model_outputs(self, predictions, expected):\n\n for key, val in predictions.items():\n predictions[key] = val.numpy()\n\n for key, val in expected.items():\n expected[key] = val.numpy()\n\n return predictions, expected", "def collect_outputs(self, output_lookup=None):\n\n if output_lookup is None:\n output_lookup = {'cf_mean': self.cf_mean,\n 'cf_mean_ac': self.cf_mean_ac,\n 'cf_profile': self.cf_profile,\n 'cf_profile_ac': self.cf_profile_ac,\n 'annual_energy': self.annual_energy,\n 'energy_yield': self.energy_yield,\n 'gen_profile': self.gen_profile,\n 'ac': self.ac,\n 'dc': self.dc,\n 'clipped_power': self.clipped_power,\n 'system_capacity_ac': self.system_capacity_ac,\n }\n\n super().collect_outputs(output_lookup=output_lookup)", "def postprocess_model_outputs(self, predictions, expected):\n\n predictions = {k: t.numpy() for k, t in predictions.items()}\n\n return predictions, expected", "def build_outputs(self, **inputs):\n print(\"Building all outputs, \", self.name)\n# invscale, _ = self.build_output('invscale', **inputs)\n# loc, _ = self.build_output('loc', invscale=invscale, **inputs)\n# samp, _ = self.build_output('main', invscale=invscale, loc=loc)\n self.build_output('invscale', **inputs)\n self.build_output('loc', **inputs)\n self.build_output('main', **inputs)", "def build_outputs(self):\n with tf.variable_scope(\"build_outputs\"):\n\n self.optical_flow = self.__optical_flow_src2_tgt\n self.disp = self.baselineNet.disp_tgt\n self.semantic = self.__semantic\n self.motion_mask = self.prepare_final_motion_mask()", "def collect_outputs(self, output_lookup=None):\n\n if output_lookup is None:\n output_lookup = {'cf_mean': self.cf_mean,\n 'cf_profile': self.cf_profile,\n 'annual_energy': self.annual_energy,\n 'energy_yield': self.energy_yield,\n 'gen_profile': self.gen_profile,\n }\n\n super().collect_outputs(output_lookup=output_lookup)", "def _validate_results(self, task, result):\n assert isinstance(result, dict), \\\n f\"{task} returned a {type(result)} rather than a dict\"\n for k in result:\n assert k in self.provides, \\\n f\"{task} provided unwanted output {k}\"\n for k in self.provides:\n assert k in result, \\\n f\"{task} failed to provide needed output {k}\"", "def validate(cls, output_destination):\n # nothing to check :)\n pass", "def validate(cls, output_destination):\n # nothing to check :)\n pass", "def compute_metrics(self):\n self.finalize_output_dict()\n self.metric_dict = {\n key: value(self.output_dict[\"labels\"], self.output_dict[\"pred_probs\"])\n for key, value in self.metric_fns.items()\n }", "def validation_epoch_end(self, outputs):\n avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()\n val_acc = sum([x['n_correct_pred'] for x in outputs]) / sum(x['n_pred'] for x in outputs)\n tensorboard_logs = {'val_loss': avg_loss, 'val_acc': val_acc}\n return {'val_loss': avg_loss, 'log': tensorboard_logs}", "def _process_convert_output_(self, output_data, **kwargs):\n accept_input, current_state, output = output_data\n return (accept_input, current_state, output)", "def _load_results(self, filename):\n cr = CaseReader(filename)\n case = cr.system_cases.get_case(-1)\n loaded_outputs = cr.list_outputs(case=case, explicit=True, implicit=True, values=True,\n units=True, shape=True, out_stream=None)\n\n self.outputs = {'indep': {}, 'states': {}, 'controls': {}, 'control_rates': {},\n 'design_parameters': {}, 'input_parameters': {}, 'ode': {}}\n\n for output_name, options in loaded_outputs:\n\n if output_name.startswith('inputs.'):\n output_name = output_name.replace('inputs.', '')\n\n if output_name == 'time':\n var_type = 'indep'\n var_name = 'time'\n if output_name.startswith('states:'):\n var_type = 'states'\n var_name = output_name.replace('states:', '', 1)\n elif output_name.startswith('controls:'):\n var_type = 'controls'\n var_name = output_name.replace('controls:', '', 1)\n elif output_name.startswith('control_rates:'):\n var_type = 'control_rates'\n var_name = output_name.replace('control_rates:', '', 1)\n elif output_name.startswith('design_parameters:'):\n var_type = 'design_parameters'\n var_name = output_name.replace('design_parameters:', '', 1)\n # elif output_name.startswith('traj_design_parameters:'):\n # var_type = 'traj_design_parameters'\n # var_name = output_name.replace('traj_design_parameters:', '', 1)\n\n val = options['value']\n\n elif output_name.startswith('ode.'):\n var_type = 'ode'\n var_name = output_name.replace('ode.', '')\n\n if len(options['value'].shape) == 1:\n val = options['value'][:, np.newaxis]\n else:\n val = options['value']\n else:\n raise RuntimeError('unexpected output in file {1}: {0}'.format(output_name,\n filename))\n\n self.outputs[var_type][var_name] = {}\n self.outputs[var_type][var_name]['value'] = val\n self.outputs[var_type][var_name]['units'] = convert_to_ascii(options['units'])\n self.outputs[var_type][var_name]['shape'] = tuple(val.shape[1:])", "def solid_output_values(self) -> Mapping[str, Union[Any, Mapping[str, Any]]]:\n raise DagsterInvalidPropertyError(_property_msg(\"solid_output_values\", \"method\"))", "def solid_output_values(self) -> Mapping[str, Union[Any, Mapping[str, Any]]]:\n raise DagsterInvalidPropertyError(_property_msg(\"solid_output_values\", \"method\"))", "def _assign_output_states(self):\n for mech in self.terminalMechanisms.mechanisms:\n self.outputStates[mech.name] = mech.outputStates", "def setOutputs(self, output_list):\n self.output_list = output_list", "def fixupProcess(self):\n # Make sure that for each output module the following parameters exist\n # in the PSet returned from the framework:\n # fileName\n # logicalFileName\n # dataset.dataTier\n # dataset.filterName\n if hasattr(self.process, \"outputModules\"):\n outputModuleNames = self.process.outputModules.keys()\n else:\n outputModuleNames = self.process.outputModules_()\n for outMod in outputModuleNames:\n outModRef = getattr(self.process, outMod)\n if not hasattr(outModRef, \"dataset\"):\n outModRef.dataset = cms.untracked.PSet()\n if not hasattr(outModRef.dataset, \"dataTier\"):\n outModRef.dataset.dataTier = cms.untracked.string(\"\")\n if not hasattr(outModRef.dataset, \"filterName\"):\n outModRef.dataset.filterName = cms.untracked.string(\"\")\n if not hasattr(outModRef, \"fileName\"):\n outModRef.fileName = cms.untracked.string(\"\")\n if not hasattr(outModRef, \"logicalFileName\"):\n outModRef.logicalFileName = cms.untracked.string(\"\")\n return", "def outputs(self):\n return super().outputs", "def outputs(self):\n return super().outputs", "def outputs(self):\n return super().outputs", "def outputs(self):\n return super().outputs", "def convert_outputs(self):\n self.out('relaxed_structure', self.ctx.workchain.outputs.output_structure)\n self.out('total_energy', get_total_energy(self.ctx.workchain.outputs.output_parameters))\n self.out('forces', get_forces_from_trajectory(self.ctx.workchain.outputs.output_trajectory))\n self.out('stress', get_stress_from_trajectory(self.ctx.workchain.outputs.output_trajectory))", "def output_from_json(self, output: Dict[str, Any]) -> OutputInfo:", "def evaluate_outputs(self):\n raise NotImplementedError(\n 'evaluate_outputs called but not implemented in the derived class.'\n )", "def _update_output_after_create_node(self):\n # Constants and parameter should not exist for input and output.\n filtered_node = {NodeTypeEnum.CONST.value, NodeTypeEnum.PARAMETER.value}\n for node in self._normal_node_map.values():\n for src_name, input_attr in node.inputs.items():\n src_node = self._get_normal_node(node_name=src_name)\n if src_node.type in filtered_node:\n continue\n\n src_node.add_outputs(node.name, input_attr)", "def validation_epoch_end(self, outputs):\n # outputs contain data for 2 dataloaders - val and test\n for out_idx, outputs_i in enumerate(outputs):\n # validation loss\n val_loss_mean = torch.stack([x['val_loss_b'] for x in outputs_i]).mean()\n\n # predicts\n logits = np.concatenate([x['logits_b'] for x in outputs_i], axis=0)\n labels = np.concatenate([x['labels_b'] for x in outputs_i], axis=0)\n emph_probs = np.concatenate([x['emph_probs_b'] for x in outputs_i], axis=0)\n probs = np.exp(logits) / np.sum(np.exp(logits), axis=2, keepdims=True)\n preds = np.argmax(logits, axis=2)\n\n label_map = {i: label for i, label in enumerate(self.labels)}\n out_label_list = [[] for _ in range(labels.shape[0])]\n preds_list = [[] for _ in range(labels.shape[0])]\n probs_list = [[] for _ in range(labels.shape[0])]\n emph_probs_list = [[] for _ in range(labels.shape[0])]\n\n for i in range(labels.shape[0]):\n for j in range(labels.shape[1]):\n if labels[i, j] != self.pad_token_label_id:\n out_label_list[i].append(label_map[labels[i][j]])\n preds_list[i].append(label_map[preds[i][j]])\n probs_list[i].append(probs[i][j][1])\n emph_probs_list[i].append(emph_probs[i][j])\n\n # for validation\n if out_idx == 0:\n # show random example\n rand_idx = np.random.randint(0, len(out_label_list))\n logger.info('True: %s', out_label_list[rand_idx])\n logger.info('Pred: %s', preds_list[rand_idx])\n logger.info('Emph: %s', emph_probs_list[rand_idx])\n logger.info('Prob: %s', probs_list[rand_idx])\n\n # validation score\n val_score = match_m(probs_list, emph_probs_list)\n val_score_mean = np.mean(list(val_score.values()))\n\n # validation accuracy\n val_acc = accuracy_score(out_label_list, preds_list)\n\n # validation f1 score\n val_f1 = f1_score(out_label_list, preds_list)\n\n # score_logs\n score_logs = {\n 'val_score1': val_score[1],\n 'val_score2': val_score[2],\n 'val_score3': val_score[3],\n 'val_score4': val_score[4],\n 'val_score_mean': val_score_mean,\n }\n\n # logs\n logs = {\n 'val_loss': val_loss_mean.detach().numpy(),\n 'val_acc': val_acc,\n 'val_f1': val_f1,\n 'val_score_mean': val_score_mean,\n }\n\n # output dict\n output = {\n 'val_loss': val_loss_mean,\n 'val_f1': val_f1,\n 'val_acc': val_acc,\n 'progress_bar': logs,\n 'log': {**logs, **score_logs},\n }\n\n # write validation results to csv\n csv_dict = {k: v for k, v in vars(self.hparams).items() if k in FIELDNAMES}\n csv_dict.update(score_logs)\n csv_dict.update(logs)\n\n csv_dict['current_epoch'] = self.current_epoch\n csv_dict['global_step'] = self.global_step\n\n write_csv('results.csv', csv_dict)\n self.preds['val'][self.current_epoch][self.global_step] = probs_list\n\n # save prediction for test\n if out_idx == 1:\n self.preds['test'][self.current_epoch][self.global_step] = probs_list\n\n # write to file all val and test predictions\n with open('predicts/' + self.model_id_name + '.pkl', 'wb') as f:\n pickle.dump(self.preds, f)\n\n return output", "def prepare_results(self) -> dict:\n if not hasattr(self, \"results\"):\n raise AttributeError(\n \"Results have not been finalized. Please call \"\n \"finalize_results() before saving output.\"\n )\n\n output = {\n \"armory_version\": armory.__version__,\n \"config\": self.config,\n \"results\": self.results,\n \"timestamp\": int(self.time_stamp),\n }\n return output", "def outputs(self, inputs):\n return inputs", "def _map_output_parameters(self, results, algorithm):\n if results is not None:\n\n # update python data objects\n for result_name in results:\n result_type = algorithm.get_type_from_output_name(result_name)\n if result_type is None:\n raise exceptions.PacmanTypeError(\n \"Unrecognised result name {} for algorithm {} with \"\n \"outputs {}\".format(\n result_name, algorithm.algorithm_id,\n algorithm.outputs))\n self._internal_type_mapping[result_type] = results[result_name]\n elif len(algorithm.outputs) != 0:\n raise exceptions.PacmanAlgorithmFailedToGenerateOutputsException(\n \"Algorithm {} did not generate any outputs\".format(\n algorithm.algorithm_id))", "def _compute_outputs(self, *args, **kwargs):\n pass\n # self.outputs = self.model(input_ids=self.input_ids, masked_lm_labels=self.input_ids)\n # self.logits = self.outputs[0][0]\n # self.probs = torch.softmax(self.logits, 1)", "async def collect_final_outputs(self) -> None: # pylint: disable=too-many-branches\n self._become_current()\n\n missing_outputs = False\n assert self.step is not None\n\n did_sleep = False\n\n for pattern in sorted(self.step.output): # pylint: disable=too-many-nested-blocks\n formatted_pattern = fmt_capture(self.kwargs, pattern)\n if is_phony(pattern):\n Invocation.up_to_date[formatted_pattern] = UpToDate(self.name, self.newest_input_mtime_ns + 1)\n continue\n\n try:\n paths = glob_paths(formatted_pattern)\n if not paths:\n Logger.debug(f\"Did not make the optional output(s): {pattern}\")\n else:\n for path in paths:\n self.built_outputs.append(path)\n\n global touch_success_outputs # pylint: disable=invalid-name\n if touch_success_outputs.value:\n if not did_sleep:\n await self.done(asyncio.sleep(1.0))\n did_sleep = True\n Logger.file(f\"Touch the output: {path}\")\n Stat.touch(path)\n\n mtime_ns = Stat.stat(path).st_mtime_ns\n Invocation.up_to_date[path] = UpToDate(self.name, mtime_ns)\n\n if Logger.isEnabledFor(logging.DEBUG):\n if path == formatted_pattern:\n Logger.debug(f\"Has the output: {path} \" f\"time: {_datetime_from_nanoseconds(mtime_ns)}\")\n else:\n Logger.debug(\n f\"Has the output: {pattern} -> {path} \"\n f\"time: {_datetime_from_nanoseconds(mtime_ns)}\"\n )\n\n except NonOptionalException:\n self._become_current()\n Logger.error(f\"Missing the output(s): {pattern}\")\n missing_outputs = True\n break\n\n if missing_outputs:\n self.abort(\"Missing some output(s)\")", "def postprocess(self, inputs: Dict[str, Any], **kwargs) -> Dict[str, Any]:\n return inputs", "def output_data_definitions(self):\n return {\n self.key_outputs: DataDefinition([-1, self.output_size], [torch.Tensor], \"Batch of outputs [BATCH_SIZE x OUTPUT_SIZE]\")\n }", "def _parse_outputs(self, outputs, calculate_confusion_matrix):\n epoch_loss = []\n epoch_accuracy = []\n if calculate_confusion_matrix:\n y_true = []\n y_pred = []\n for step in outputs:\n epoch_loss.append(step['loss'].item())\n epoch_accuracy.append(step['accuracy'].item())\n if calculate_confusion_matrix:\n y_pred.append(step['y_hat'])\n y_true.append(step['y'])\n if calculate_confusion_matrix:\n y_pred = torch.cat(y_pred, 0)\n y_true = torch.cat(y_true, 0)\n confmat = pd.DataFrame(self.confusion_matrix(y_pred, y_true).tolist(\n ), columns=self.project_parameters.classes, index=self.project_parameters.classes).astype(int)\n return epoch_loss, epoch_accuracy, confmat\n else:\n return epoch_loss, epoch_accuracy", "def with_output(self, output, output_format):\n\t\tself.variables['output'] = output\n\t\tself.variables['output_format'] = output_format\n\t\treturn self", "def compute_metrics(self, outputs: List[Dict[str, torch.Tensor]]) -> dict:\n distance_pos, distance_neg = [], []\n for minibatch in outputs:\n minibatch = minibatch[\"val_prediction\"]\n src_embedding = minibatch[\"src_sentemb\"]\n ref_embedding = minibatch[\"ref_sentemb\"]\n pos_embedding = minibatch[\"pos_sentemb\"]\n neg_embedding = minibatch[\"neg_sentemb\"]\n\n distance_src_pos = F.pairwise_distance(pos_embedding, src_embedding)\n distance_ref_pos = F.pairwise_distance(pos_embedding, ref_embedding)\n harmonic_distance_pos = (2 * distance_src_pos * distance_ref_pos) / (\n distance_src_pos + distance_ref_pos\n )\n distance_pos.append(harmonic_distance_pos)\n\n distance_src_neg = F.pairwise_distance(neg_embedding, src_embedding)\n distance_ref_neg = F.pairwise_distance(neg_embedding, ref_embedding)\n harmonic_distance_neg = (2 * distance_src_neg * distance_ref_neg) / (\n distance_src_neg + distance_ref_neg\n )\n distance_neg.append(harmonic_distance_neg)\n\n return {\n \"kendall\": self.metrics.compute(\n torch.cat(distance_pos), torch.cat(distance_neg)\n )\n }", "def get_new_input_output_maps(self, mapped_input_output_maps={}):\n inputs = self.get_input_contents()\n mapped_inputs = self.get_mapped_inputs(mapped_input_output_maps)\n mapped_inputs_scope_name = [ip['scope'] + \":\" + ip['name'] for ip in mapped_inputs]\n\n new_inputs = []\n new_input_output_maps = {}\n for ip in inputs:\n ip_scope_name = ip['scope'] + \":\" + ip['name']\n if ip_scope_name not in mapped_inputs_scope_name:\n new_inputs.append(ip)\n\n # to avoid cheking new inputs if there are no new inputs anymore\n if (not new_inputs and 'status' in self.collections[self._primary_input_collection]\n and self.collections[self._primary_input_collection]['status'] in [CollectionStatus.Closed]): # noqa: W503\n self.set_has_new_inputs(False)\n else:\n mapped_keys = mapped_input_output_maps.keys()\n if mapped_keys:\n next_key = max(mapped_keys) + 1\n else:\n next_key = 1\n for ip in new_inputs:\n out_ip = copy.deepcopy(ip)\n out_ip['coll_id'] = self.collections[self._primary_output_collection]['coll_id']\n new_input_output_maps[next_key] = {'inputs': [ip],\n 'outputs': [out_ip]}\n next_key += 1\n\n self.unfinished_points = 1\n\n return new_input_output_maps", "def generate_output_data(self):\n for socket_name, c in self.__taskobject.outputs.items():\n data = Data(self.__project, type=c['type'])\n if c['optional']:\n data.enable(False)\n self.__outputs[socket_name] = {'data': data, 'enable': False}\n else:\n data.enable(True)\n self.__outputs[socket_name] = {'data': data, 'enable': True}", "def process_output(self, state: str, data: SimData, tb_manager: TestbenchManager\n ) -> Tuple[bool, str, Dict[str, Any]]:\n return False, '', {}", "def prep_outputs(self):\n return self._prep_outputs", "def interpret_output(self, batch_output):\n raise NotImplementedError", "def parsed_output(output_elements):\n parsed_output = {}\n for stanza, stanza_value in output_elements.items():\n fake_section = MagicMock()\n fake_section.options = {}\n fake_section.name = stanza\n parsed_output.update({stanza: fake_section})\n for option, value in stanza_value.items():\n fake_setting = MagicMock()\n fake_setting.name = option\n fake_setting.value = value\n parsed_output[stanza].options.update({option: fake_setting})\n return parsed_output", "def update_output(self, ):\n input_ids, outputs, grads, adv_tokens = self.batch_output\n\n probs = softmax(outputs, dim=-1)\n probs, labels = torch.max(probs, dim=-1)\n\n tokens = [\n self.tokenizer.convert_ids_to_tokens(input_ids_)\n for input_ids_ in input_ids\n ]\n\n embedding_grads = grads.sum(dim=2)\n \n # norm for each sequence\n norms = torch.norm(embedding_grads, dim=1, p=2) # need check hyperparameter\n \n # normalizing\n for i, norm in enumerate(norms):\n embedding_grads[i] = torch.abs(embedding_grads[i]) / norm\n\n batch_output = []\n \n # check probs, labels shape\n labels = torch.reshape(labels, (1, -1))\n probs = torch.reshape(probs, (1, -1))\n iterator = zip(tokens, probs, embedding_grads, labels)\n\n for example_tokens, example_prob, example_grad, example_label in iterator:\n example_dict = dict()\n # as we do it by batches we has a padding so we need to remove it\n \n example_tokens = [t for t in example_tokens if t != self.tokenizer.pad_token]\n example_dict['tokens'] = example_tokens\n example_dict['grad'] = example_grad.cpu().tolist()[:len(example_tokens)]\n example_dict['label'] = example_label.cpu().tolist()[:len(example_tokens)] # example_label.item()\n example_dict['prob'] = example_prob.cpu().tolist()[:len(example_tokens)] # example_prob.item() \n\n batch_output.append(example_dict)\n\n return batch_output", "def outputs(self):\n\t\treturn {k: v * self.throughput for k, v in self.per_process_outputs.items()}", "def _convert_outputs(outputs, reduction_factor, batch_size):\n\n with tf.variable_scope(\"output_converter\"):\n for key in [\"spec\", \"post_net_spec\", \"stop_token_logits\", \"mag_spec\"]:\n outputs[key] = CentaurDecoder._expand(outputs[key], reduction_factor)\n\n alignments = []\n for sample in range(batch_size):\n alignments.append([outputs[\"alignments\"][0][:, sample, :, :, :]])\n mel_spec = outputs[\"spec\"]\n post_net_spec = outputs[\"post_net_spec\"]\n alignments = alignments\n stop_token_logits = tf.sigmoid(outputs[\"stop_token_logits\"])\n sequence_lengths = outputs[\"lengths\"]\n mag_spec = outputs[\"mag_spec\"]\n stop_token_prediction = outputs[\"stop_token_logits\"]\n return mel_spec, post_net_spec, alignments, stop_token_logits, sequence_lengths, mag_spec, stop_token_prediction", "def calculate(self, assignments):\n # Build a tuple of the relevant input states from the set of\n # assignments given.\n states = tuple([assignments[v] for v in self.inputs])\n\n # Look them up\n try:\n results = self.lookup[states]\n except KeyError:\n raise RuntimeError(\"Error in {} with key {}\".format(self, states))\n\n # Now, construct a mapping over th output variables and return that.\n return dict(zip(self.outputs, results))", "def _postprocess(self, output: Dict[str, np.ndarray]):\n # Slice to remove padding, omitting initial [CLS] and final [SEP]\n slicer = slice(1, output.pop(\"ntok\") - 1)\n output[\"tokens\"] = self.tokenizer.convert_ids_to_tokens(\n output.pop(\"input_ids\")[slicer])\n probas = output.pop(\"probas\")\n\n # Predictions at every position, regardless of masking.\n output[\"pred_tokens\"] = self._get_topk_tokens(probas[slicer]) # pytype: disable=container-type-mismatch\n\n return output", "def _populate_output(self):\n self._store_atomic_queries_table()\n self._store_composite_queries_table()", "def parse_output(param_names, params):\n\tif not os.path.isfile(params.output_file):\n\t\tprint(\"output file does not exist! ({})\".format(params.output_file))\n\t\tsys.exit(NO_OUTPUT)\n\n\tresults = {}\n\n\twith open(params.output_file, 'r') as out:\n\t\titeration_matches = re.findall(params.iteration_regex, out.read())\n\t\tfor iteration_match in iteration_matches:\n\t\t\titeration = int(iteration_match[0])\n\t\t\tresults[iteration] = [float(iteration_match[m]) for m in range(1,len(iteration_match))]\n\n\treturn results", "def process(self):\n self.output_info = self.attributes.copy()", "def validate_output(self):\n if self.dimension == 2:\n required = SEGMENT_GEO_SIG | self.output_signature\n for rays in [\n self.active_rays,\n self.finished_rays,\n self.stopped_rays,\n self.dead_rays\n ]:\n if bool(rays):\n sig = set(rays.keys())\n if not (sig >= required):\n raise RuntimeError(\n f\"Optical engine failed output signature check. System \" \n f\"signature is {sig}, but needed {required}.\"\n )", "def _init_results(self) -> None:\n pt_bond_dimensions = {}\n for site, pt in enumerate(self._process_tensors):\n if pt is not None:\n pt_bond_dimensions[site] = pt.get_bond_dimensions()\n\n self._results = {\n 'time':[],\n 'norm': [],\n 'bond_dimensions': [],\n 'dynamics': {},\n 'pt_bond_dimensions': pt_bond_dimensions,\n }\n for sites in self._dynamics_sites:\n self._results['dynamics'][sites] = Dynamics(name=f\"site{sites}\")", "def _generate_output(self):\n raise NotImplementedError()", "def organize_outputs(\n command_outputs: list,\n prefix: str,\n multiple_prefixes: bool = False,\n file_output: bool = False,\n ) -> List[dict]:\n organized_outputs = []\n if file_output:\n command_outputs = command_outputs + [\n OutputArgument(\n name=\"EntryID\",\n prefix=\"InfoFile\",\n output_type=dict,\n description=\"The EntryID of the report file.\",\n ),\n OutputArgument(\n name=\"Extension\",\n prefix=\"InfoFile\",\n output_type=str,\n description=\"The extension of the report file.\",\n ),\n OutputArgument(\n name=\"Name\",\n prefix=\"InfoFile\",\n output_type=str,\n description=\"The name of the report file.\",\n ),\n OutputArgument(\n name=\"Info\",\n prefix=\"InfoFile\",\n output_type=str,\n description=\"The info of the report file.\",\n ),\n OutputArgument(\n name=\"Size\",\n prefix=\"InfoFile\",\n output_type=int,\n description=\"The size of the report file.\",\n ),\n OutputArgument(\n name=\"Type\",\n prefix=\"InfoFile\",\n output_type=str,\n description=\"The type of the report file.\",\n ),\n ]\n for output_key in command_outputs:\n context_path = output_key.name\n if prefix:\n context_path = f\"{prefix}.{output_key.name}\"\n if multiple_prefixes:\n if output_key.prefix:\n context_path = f\"{output_key.prefix}.{output_key.name}\"\n elif \".\" in output_key.name:\n context_path = output_key.name\n\n if output_key:\n organized_outputs.append(\n {\n \"contextPath\": context_path,\n \"description\": output_key.description,\n \"type\": MetadataToDict.get_metadata_type(\n output_key.output_type\n ),\n }\n )\n\n return organized_outputs", "def output_data_definitions(self):\n return {\n self.key_outputs: DataDefinition(\n [-1]*(self.num_inputs_dims-1) + [self.input_size],\n [torch.Tensor],\n \"Batch of outputs [DIM 1 x DIM 2 x ... x INPUT_SIZE]\")\n }", "def _process_convert_output_(self, output_data, **kwargs):\n if kwargs['always_include_output']:\n return super(Automaton, self)._process_convert_output_(\n output_data, **kwargs)\n accept_input, current_state, output = output_data\n if kwargs['full_output']:\n return (accept_input, current_state)\n else:\n return accept_input", "def check(self):\n self.init()\n self.calculate_output()\n self.compare_outputs_with_expects()", "def load_outputs(self):\n\n # Get all the data series from the CSV files (for every input of the model)\n LoadedOutputs = True\n for o in self.outputs:\n if o.is_measured_output():\n LoadedOutputs = LoadedOutputs and o.read_data_series()\n \n if not LoadedOutputs:\n logger.error(\"An error occurred while loading the outputs\")\n else:\n logger.debug(\"Outputs loaded correctly\")\n \n return LoadedOutputs", "def collectOutput(self, finishedJob, output):\n evaluation = finishedJob.getEvaluation()\n outputDict = evaluation[1]\n # FIXED: writing directly to file is no longer an option!\n #if isinstance(output, Files.File):\n # availExtens = ['xml']\n # outputExtension = output.getExt().lower()\n # if outputExtension not in availExtens:\n # self.raiseAMessage('Metric postprocessor did not recognize extension \".', str(outputExtension), '\". The output will be dumped to a text file')\n # output.setPath(self._workingDir)\n # self.raiseADebug('Write Metric prostprocessor output in file with name: ', output.getAbsFile())\n # self._writeXML(output, outputDict)\n if output.type in ['PointSet', 'HistorySet']:\n self.raiseADebug('Adding output in data object named', output.name)\n rlz = {}\n for key, val in outputDict.items():\n newKey = key.replace(\"|\",\"_\")\n rlz[newKey] = val\n if self.dynamic:\n rlz[self.pivotParameter] = np.atleast_1d(self.pivotValues)\n output.addRealization(rlz)\n # add metadata\n xml = self._writeXML(output, outputDict)\n output._meta['MetricPP'] = xml\n elif output.type == 'HDF5':\n self.raiseAnError(IOError, 'Output type', str(output.type), 'is not yet implemented. Skip it')\n else:\n self.raiseAnError(IOError, 'Output type ', str(output.type), ' can not be used for postprocessor', self.name)", "def build(self):\r\n self.dirty = 0\r\n \r\n # Files first\r\n for output in self.files.keys():\r\n params = self.files[output]\r\n if (params[1] != -1):\r\n filename = params[0]\r\n freq = params[1]\r\n if (output == 'energies'):\r\n self.myOutputs.append(OutputEnergies.OutputEnergies(filename, freq, 1,0,1.0,0))\r\n elif (output == 'dcdtrajpos'):\r\n if (os.path.exists(filename)): # Continue\r\n self.myOutputs.append(OutputDCDTrajectory.OutputDCDTrajectory(filename, freq, 1, 1))\r\n else: # Overwrite\r\n self.myOutputs.append(OutputDCDTrajectory.OutputDCDTrajectory(filename, freq, 1, 0))\r\n elif (output == 'dcdtrajvel'):\r\n if (os.path.exists(filename)):\r\n self.myOutputs.append(OutputDCDTrajectoryVel.OutputDCDTrajectoryVel(filename, freq, 1, 1))\r\n else:\r\n self.myOutputs.append(OutputDCDTrajectoryVel.OutputDCDTrajectoryVel(filename, freq, 1, 0))\r\n elif (output == 'xyztrajforce'):\r\n self.myOutputs.append(OutputXYZTrajectoryForce.OutputXYZTrajectoryForce(filename, freq))\r\n elif (output == 'xyztrajpos'):\r\n self.myOutputs.append(OutputXYZTrajectoryPos.OutputXYZTrajectoryPos(filename, freq, 1))\r\n elif (output == 'xyztrajvel'):\r\n self.myOutputs.append(OutputXYZTrajectoryVel.OutputXYZTrajectoryVel(filename, freq))\r\n elif (output == 'gui'):\r\n self.myOutputs.append(OutputFAHGUI.OutputFAHGUI(filename, freq, 52753, 1, \"MDL_3.0\", 0.0, 0))\r\n\r\n if (self.screen != -1):\r\n self.myOutputs.append(OutputScreen.OutputScreen(self.screen))\r\n\r\n\r\n # Now plots\r\n for plot in self.plots.keys():\r\n freq = self.plots[plot]\r\n if (freq != -1):\r\n\r\n # Initialize a plot\r\n if (not self.doMPL): # Gnuplot\r\n self.xyData[plot] = []\r\n self.graphs[plot] = Gnuplot(debug=0)\r\n else: # Matplotlib\r\n self.xData[plot] = []\r\n self.yData[plot] = []\r\n self.figures[plot] = 0\r\n\r\n # Add the function to plot the data,\r\n # and the frequency at which to execute it\r\n self.myPlots.append([self.plotFunctions[plot], freq])", "def add_warnings_and_errors(self, output_data):\n # add the dictionary with warnings and errors\n warnings = self.retrieved.get_object_content(self.node.get_option(\"scheduler_stderr\"))\n # for some reason, errors may be in the stdout, but not the log.lammps\n stdout = self.retrieved.get_object_content(self.node.get_option(\"scheduler_stdout\"))\n errors = [line for line in stdout.splitlines() if line.startswith(\"ERROR\")]\n\n for error in errors:\n self.logger.error(error)\n\n output_data.update({'warnings': warnings})\n output_data.update({'errors': errors})", "def parse_output(self, outputfile, directory):\n out_dictionary = {}\n if os.path.exists(outputfile) and \\\n os.path.getsize(outputfile) > 600: # make sure exists and not empty.\n f = open(outputfile)\n file = f.readlines()\n try:\n rating = float(file[-2].split(\" \")[6].split(\"/\")[0])\n out_dictionary[\"pylint_rating\"] = rating\n f.close()\n except:\n print(\"something went wrong when parsing pylint\")\n\n return out_dictionary", "def calculate_output(self):", "def outputs(self):\n return {\"path_to_dtb_evaluation_result\": File_IO(\n self.node.outputs[0])}", "def _process_output(self, driver_output):\n fs = self._port._filesystem\n failures = self._handle_error(driver_output)\n expected_driver_output = self._expected_driver_output()\n\n # Check the output and save the results.\n start_time = time.time()\n time_for_diffs = {}\n for test_type in self._test_types:\n start_diff_time = time.time()\n new_failures = test_type.compare_output(\n self._port, self._filename, self._options, driver_output,\n expected_driver_output)\n # Don't add any more failures if we already have a crash, so we don't\n # double-report those tests. We do double-report for timeouts since\n # we still want to see the text and image output.\n if not driver_output.crash:\n failures.extend(new_failures)\n test_result_writer.write_test_result(\n self._port, self._options.results_directory, self._filename,\n driver_output, expected_driver_output, new_failures)\n time_for_diffs[test_type.__class__.__name__] = (\n time.time() - start_diff_time)\n\n total_time_for_all_diffs = time.time() - start_diff_time\n return TestResult(self._filename, failures, driver_output.test_time,\n total_time_for_all_diffs, time_for_diffs)", "def validation_epoch_end(self, outputs):\n avg_loss = torch.stack([x[\"val_loss\"] for x in outputs]).mean()\n tensorboard_logs = {\"val_loss\": avg_loss}\n return {\"avg_val_loss\": avg_loss, \"log\": tensorboard_logs}", "def validation_epoch_end(self, outputs):\n avg_loss = torch.stack([x[\"val_loss\"] for x in outputs]).mean()\n tensorboard_logs = {\"val_loss\": avg_loss}\n return {\"avg_val_loss\": avg_loss, \"log\": tensorboard_logs}", "def get_outputs(self, inputs):\n \n # Paths\n input_path = self.input_path\n output_path = self.output_path\n\n # Filename changes\n output_extension = stringify(self.executor.output_extension)\n output_prefix = stringify(self.executor.output_prefix) or ''\n\n if self.output:\n # Combine all inputs into one output\n output = output_prefix + change_extension(self.output, output_extension)\n output = join_path(output_path, output)\n \n if self.output_transform:\n output = self.output_transform(output)\n \n if self.run_output:\n if self.run_command:\n verify_type(self.run_command, list)\n run_command = [stringify(v).format(output=output) for v in self.run_command]\n else:\n run_command = [output]\n with current_context() as ctx:\n ctx.current.project.run[self.run_output] = run_command\n \n return True, [Output(output_path, output)]\n elif inputs:\n # Each input matches an output\n \n # Strip prefix\n if self.output_strip_prefix_from:\n with current_context() as ctx:\n _, p = ctx.current.project.get_phase_for(self.output_strip_prefix_from,\n 'output_strip_prefix_from')\n if p:\n output_strip_prefix = p.output_path\n else:\n output_strip_prefix = None\n else:\n output_strip_prefix = stringify(self.output_strip_prefix)\n if output_strip_prefix is None:\n output_strip_prefix = input_path\n if not output_strip_prefix.endswith(os.sep):\n output_strip_prefix += os.sep\n output_strip_prefix_length = len(output_strip_prefix)\n \n outputs = [] \n for the_input in inputs:\n output = the_input\n \n # Strip prefix\n if output.startswith(output_strip_prefix):\n output = output[output_strip_prefix_length:]\n\n # Filename changes\n if output_prefix:\n p, f = os.path.split(output)\n output = join_path(p, output_prefix + f)\n output = change_extension(output, output_extension)\n \n output = join_path(output_path, output)\n\n if self.output_transform:\n output = self.output_transform(output)\n\n outputs.append(Output(output_path, output))\n \n return False, outputs\n else:\n return False, []" ]
[ "0.6837641", "0.67924696", "0.6737341", "0.6717199", "0.65907484", "0.64653313", "0.64050525", "0.6332762", "0.6276232", "0.62465364", "0.62231946", "0.61539495", "0.6134005", "0.61062723", "0.6100173", "0.60465825", "0.60387117", "0.6037164", "0.60108835", "0.60005236", "0.59186393", "0.59162885", "0.5847018", "0.5843437", "0.5810045", "0.5800307", "0.57823455", "0.57765514", "0.5774133", "0.5768928", "0.5762034", "0.5756177", "0.57544506", "0.57348514", "0.57251865", "0.57113135", "0.5708255", "0.5698998", "0.568852", "0.568852", "0.56721234", "0.56593335", "0.56531864", "0.56427693", "0.56383", "0.56383", "0.5631596", "0.5615691", "0.561284", "0.5609294", "0.5609294", "0.5609294", "0.5609294", "0.5600891", "0.55810225", "0.5575506", "0.5570602", "0.5568907", "0.55381984", "0.5536898", "0.553552", "0.55287206", "0.5503354", "0.5502427", "0.55021334", "0.5489169", "0.5487788", "0.5486073", "0.5481114", "0.5468121", "0.5458374", "0.5455963", "0.54531276", "0.5451641", "0.5451387", "0.5450865", "0.5444401", "0.54337955", "0.542382", "0.54232025", "0.5420143", "0.5413334", "0.5404052", "0.54022545", "0.5401567", "0.54004276", "0.53998077", "0.53991646", "0.53884923", "0.53881484", "0.5383326", "0.53778356", "0.53735495", "0.53701454", "0.5365872", "0.53606904", "0.53561497", "0.53515947", "0.53515947", "0.5349471" ]
0.5601338
53
View all the images in the dataset, on a 3 by X grid size.
def view_images(dataset, size): images, labels = dataset assert images.shape[0] == labels.shape[0] num_images = images.shape[0] num_cols = 3 num_rows = np.ceil(num_images / num_cols).astype("int") plt.figure(figsize=size) for i in range(num_images): image = images[i] label = labels[i] ax = plt.subplot(num_rows, num_cols, i + 1) plt.imshow(np.array(image, dtype="float")) plt.title("Number: " + str(label)) plt.axis("off")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show_imgs(dataset, n_imgs, plot_size=(15, 15), cmap=None):\n n_cols = int(np.sqrt(n_imgs))\n n_rows = int(np.ceil(np.sqrt(n_imgs)))\n class_idx = dataset.class_to_idx\n idx_class = idx_to_class(class_idx)\n\n fig, axes = plt.subplots(n_rows, n_cols, figsize=plot_size)\n for i, ax in enumerate(axes.flatten()):\n ax.axis('off')\n title = f'Class : {idx_class[dataset.targets[i]]}'\n ax.imshow(dataset.data[i], cmap=cmap)\n ax.set_title(title)\n fig.tight_layout()", "def display_sample_images(self):\n if self.train_dataset is None:\n self.init_datasets()\n\n images, labels = next(self.train_dataset)\n plt.figure(figsize=(5,5))\n for n in range(min(25, images.shape[0])):\n ax = plt.subplot(5,5,n+1)\n plt.imshow(images[n])\n if len(labels.shape) == 1:\n plt.title(self.class_names[int(labels[n])].title())\n else:\n m = np.argmax(labels[n])\n plt.title(self.class_names[int(labels[n, m])].title())\n plt.axis('off')\n\n plt.tight_layout()\n plt.show()", "def PlotImages(x):\r\n # 5.1 Create figure-window and axes\r\n _, ax = plt.subplots(nrows = 2, ncols= 3)\r\n # 5.2\r\n ax[0,0].imshow(x[0, :].reshape(75,75))\r\n ax[0,1].imshow(x[1, :].reshape(75,75))\r\n ax[0,2].imshow(x[2, :].reshape(75,75))\r\n ax[1,0].imshow(x[3, :].reshape(75,75))\r\n ax[1,1].imshow(x[4, :].reshape(75,75))\r\n ax[1,2].imshow(x[5, :].reshape(75,75))\r\n plt.show()", "def show_imagegrid_dataset(dataset,\n num=10,\n shuffle=True,\n classes='auto',\n figsize=None,\n fontsize=20,\n image_attr={'cmap': plt.cm.Greys_r}):\n sample = dataset[0]\n if isinstance(sample, tuple) and len(sample) == 2:\n images_per_class = get_labeled_imagegrid(dataset,\n num=num,\n shuffle=shuffle,\n classes=classes)\n num = min(num, max(map(len, images_per_class.values())))\n classes = list(images_per_class.keys())\n\n if figsize is None:\n figsize = (2 * num, 2 * len(classes))\n fig, axs = plt.subplots(figsize=figsize, nrows=len(classes), ncols=num)\n if len(classes) == 1:\n axs = np.expand_dims(axs, 0)\n if num == 1:\n axs = np.expand_dims(axs, -1)\n for i, (class_name, class_images) in enumerate(images_per_class.items()):\n for j, img in enumerate(class_images):\n show_image(img, axs[i][j], image_attr)\n axs[i][0].set_ylabel(str(class_name), fontsize=fontsize)\n elif isinstance(sample, (Image, torch.Tensor, np.ndarray)):\n image_list = get_imagegrid(dataset,\n num=num,\n shuffle=shuffle)\n num = min(len(image_list), num)\n nrows = math.ceil(math.sqrt(num))\n ncols = math.ceil(num / nrows)\n if figsize is None:\n figsize = (2 * nrows, 2 * ncols)\n fig, axs = plt.subplots(figsize=figsize, nrows=nrows, ncols=ncols)\n axs = axs.flatten()\n for i, img in enumerate(image_list):\n show_image(img, axs[i], image_attr)", "def show_images(images, save=None, size=None):\n assert len(images) > 0, \"images should contain at least 1 element\"\n assert len(images[0].shape) == 3, \"each image should contain 3 elements (c, w,h)\"\n \n fig, ax = plt.subplots(nrows=images[0].shape[0], ncols=len(images))\n \n for i in range(len(images)): \n for j in range(images[0].shape[0]):\n ax[i,j].imshow(images[i][j,:,:], cmap='gray')\n \n plt.show()", "def show_images(imgs, nrows, ncols, figsize=None):\n figsize = (ncols, nrows)\n _, figs = plt.subplots(nrows, ncols, figsize=figsize)\n for i in range(nrows):\n for j in range(ncols):\n figs[i][j].imshow(imgs[i*ncols+j].asnumpy())\n figs[i][j].axes.get_xaxis().set_visible(False)\n figs[i][j].axes.get_yaxis().set_visible(False)\n plt.show()", "def view_image(train_dataloader):\n for (x, target) in train_dataloader:\n np.save(\"img.npy\", x)\n print(x.shape)\n exit(0)", "def display_images_in_grid(imgs, row, col):\n if len(imgs) != (row * col):\n raise ValueError(f\"Invalid imgs len:{len(imgs)} col:{row} row:{col}\")\n\n for i, img in enumerate(imgs):\n plot_num = i + 1\n plt.subplot(row, col, plot_num)\n plt.tick_params(labelbottom=False) # remove x axis\n plt.tick_params(labelleft=False) # remove y axis\n plt.imshow(img)\n plt.show()", "def grid(images, cols = 2, save = False, filename = \"\", show = False):\n \n rows = ceil(len(images) / cols)\n \n fig, ax = plt.subplots(rows, 1)\n\n index = 0\n element = []\n for row in range(rows):\n for col in range(cols): \n if index < len(images):\n element.append(images[index])\n index += 1\n \n stack = np.hstack(tuple(element))\n ax[row].axis('off')\n ax[row].imshow(stack)\n element = []\n \n plt.tight_layout()\n \n if save:\n fig.savefig(filename)\n\n if show:\n plt.show(fig)\n \n return 0", "def display_images(digits_im):\n i = 0\n\n for img in digits_im:\n if i < N_NEIGHBOURS:\n # Visualize your data\n im_max = np.max(img)\n img = PIXELS * (np.abs(im_max - img) / im_max)\n res = cv2.resize(img, (DIM, DIM), interpolation=cv2.INTER_CUBIC)\n cv2.imwrite('digit ' + str(i) + '.png', res)\n i += 1\n else:\n break", "def visulize_5(X):\n fig, axes1 = plt.subplots(5,5,figsize=(3,3))\n for j in range(5):\n for k in range(5):\n i = np.random.choice(range(len(X)))\n axes1[j][k].set_axis_off()\n axes1[j][k].imshow(X[:,i].reshape(32, 32, 3))\n plt.show()", "def img_viewer_examples(images, labels, prediction = None, size=0, greyscale=False):\n batchSize = min(size, images.shape[0])\n \n if size == 0:\n batchSize = images.shape[0]\n\n # I CAN TAKE THE BATCH_SIZE from the images size/shape according the sent data type\n no_of_columns = round(math.sqrt(batchSize))\n no_of_rows = math.ceil(batchSize / no_of_columns)\n print(\"batch size {}, no_of_rows {}, no_of_columns {}\".format(batchSize, no_of_rows, no_of_columns))\n fig = plt.figure(figsize=(no_of_columns*1.25, no_of_rows*1.5))\n # (width, height)\n for idx in np.arange(batchSize):\n ax = fig.add_subplot(no_of_rows, no_of_columns,\n idx+1, xticks=[], yticks=[])\n if greyscale:\n ax.imshow(np.squeeze(images[idx]), cmap='gray')\n else:\n ax.imshow(np.squeeze(images[idx]))\n # print out the correct label for each image\n # .item() gets the value contained in a Tensor\n # WAIT FOR TASNEEM TO SEE THE RETURNED DATA TYPE\n if not prediction is None:\n ax.set_title(\"{} ({})\".format(str(prediction[idx]), str(labels[idx])),\n color=(\"green\" if prediction[idx] == labels[idx] else \"red\"))\n else:\n ax.set_title(str(labels[idx]))", "def show_images(imgs, num_rows, num_cols, scale=2):\n figsize = (num_cols * scale, num_rows * scale)\n _, axes = plt.subplots(num_rows, num_cols, figsize=figsize)\n for i in range(num_rows):\n for j in range(num_cols):\n axes[i][j].imshow(imgs[i * num_cols + j].asnumpy())\n axes[i][j].axes.get_xaxis().set_visible(False)\n axes[i][j].axes.get_yaxis().set_visible(False)\n return axes", "def imshow_grid(images, shape=[2, 8]):\n fig = plt.figure(1)\n grid = ImageGrid(fig, 111, nrows_ncols=shape, axes_pad=0.05)\n\n size = shape[0] * shape[1]\n for i in range(size):\n grid[i].axis('off')\n grid[i].imshow(images[i]) # The AxesGrid object work as a list of axes.\n\n plt.show()", "def imshow_grid(images, shape=[2, 8]):\n fig = plt.figure(1)\n grid = ImageGrid(fig, 111, nrows_ncols=shape, axes_pad=0.05)\n\n size = shape[0] * shape[1]\n for i in range(size):\n grid[i].axis('off')\n grid[i].imshow(images[i]) # The AxesGrid object work as a list of axes.\n\n plt.show()", "def imshow_grid(images, shape=[2, 2], name='default', save=False):\n fig = plt.figure()\n grid = ImageGrid(fig, 111, nrows_ncols=shape, axes_pad=0.05)\n\n size = shape[0] * shape[1]\n for i in range(size):\n grid[i].axis('off')\n img = images[i]\n if img.shape[0]==3:\n img = img.transpose(1, 2, 0)\n img = (img - img.min())/(img.max() - img.min())\n grid[i].imshow(img, vmin=-132, vmax = 164) # The AxesGrid object work as a list of axes.\n\n plt.show()", "def show_imgs(imgs, row, col):\n if len(imgs) != (row * col):\n raise ValueError(\n \"Invalid imgs len:{} col:{} row:{}\".format(len(imgs), row, col))\n\n for i, img in enumerate(imgs):\n plot_num = i+1\n plt.subplot(row, col, plot_num)\n plt.tick_params(labelbottom=False) # x軸の削除\n plt.tick_params(labelleft=False) # y軸の削除\n plt.imshow(img)\n plt.show()", "def show_images(imgs, num_rows, num_cols, scale=2):\n figsize = (num_cols*scale, num_rows*scale)\n _, axes = plt.subplots(num_rows, num_cols, figsize=figsize)\n for i in range(num_rows):\n for j in range(num_cols):\n # show the target image\n axes[i][j].imshow(imgs[i*num_cols+j])\n # set the sub-axis to be invisible\n axes[i][j].axes.get_xaxis().set_visible(False)\n axes[i][j].axes.get_yaxis().set_visible(False)\n # remember to show figure at last\n plt.show()\n return axes", "def show_train_images(train_data, train_labels):\n plt.figure(1, figsize=(8, 8))\n n = 0\n\n for i in range(16):\n n += 1\n # each time random images are loaded\n # r = np.random.randint(0, train_data.shape[0], 1)\n plt.subplot(4, 4, n)\n plt.subplots_adjust(hspace=0.5, wspace=0.5)\n plt.imshow(train_data[i] / 255.)\n plt.title('{}'.format(train_labels[i]))\n plt.xticks([]), plt.yticks([])\n plt.show()", "def show_images(imgs, num_rows, num_cols, titles=None, scale=1.5): #@save\n figsize = (num_cols * scale, num_rows * scale)\n _, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize)\n axes = axes.flatten()\n for i, (ax, img) in enumerate(zip(axes, imgs)):\n ax.imshow(d2l.numpy(img))\n ax.axes.get_xaxis().set_visible(False)\n ax.axes.get_yaxis().set_visible(False)\n if titles:\n ax.set_title(titles[i])\n return axes", "def show_images(images, db):\n images = [int(image) for image in images]\n files = get_img_files(images, db)\n show_files(files)", "def plot_100_images(X, indices=None):\n width, height = IMAGE_WIDTH, IMAGE_HEIGHT\n nrows, ncols = 10, 10\n if indices is None:\n indices = range(X.shape[0])\n indices_to_display = np.random.choice(indices, nrows * ncols)\n\n big_picture = np.zeros((height * nrows, width * ncols))\n\n irow, icol = 0, 0\n for idx in indices_to_display:\n if icol == ncols:\n irow += 1\n icol = 0\n iimg = X[idx].reshape(width, height).T # transpose the data set\n big_picture[irow * height:irow * height + iimg.shape[0],\n icol * width:icol * width + iimg.shape[1]] = iimg\n icol += 1\n plt.imshow(big_picture, cmap=matplotlib.cm.Greys_r)\n\n plt.show()", "def display_data():\n data = read_data_from_file()\n data_size = len(data)\n print('Data size - ' + str(data_size))\n\n\n for i in range(data_size-1, 0, -1):\n image = data[i]\n cv2.imshow('window', image[0])\n print(str(image[1]))\n cv2.waitKey(50)", "def show_image_grid(imgs):\n grd = make_grid(imgs)\n npimg = grd.numpy()\n plt.imshow(np.transpose(npimg, (1,2,0)), interpolation='nearest')\n plt.ion()\n plt.show()", "def plot_n_image(X, n):\n pic_size = int(np.sqrt(X.shape[1]))\n grid_size = int(np.sqrt(n))\n\n first_n_images = X[:n, :]\n\n fig, ax_array = plt.subplots(nrows=grid_size, ncols=grid_size,sharey=True, sharex=True, figsize=(8, 8))\n\n for r in range(grid_size):\n for c in range(grid_size):\n ax_array[r, c].imshow(first_n_images[grid_size * r + c].reshape((pic_size, pic_size)))\n plt.xticks(np.array([]))\n plt.yticks(np.array([]))", "def show_torch_imgs(imgs, nrow=8, figsize=(8, 5), axis_off=True , **opt):\n import torchvision\n import torch\n if not torch.is_tensor(imgs):\n # Not a torch tensor. Assume that it is torch.autograd.Variable\n # Try to get the tensor inside the Variable.\n try:\n imgs = imgs.data\n except:\n raise ValueError('Expect input imgs to be a torch Tensor or torch.autograd.Variable.')\n # https://gist.github.com/anonymous/bf16430f7750c023141c562f3e9f2a91\n img = torchvision.utils.make_grid(imgs, nrow=nrow, **opt)\n npimg = img.cpu().numpy()\n # make it height x width x channels\n npimg = np.transpose(npimg, (1, 2, 0))\n\n plt.figure(figsize=figsize)\n plt.imshow(npimg, interpolation='nearest')\n if axis_off:\n plt.axis('off')", "def display_imgs(img_dir,img_list):\n for img in img_list:\n display_img(img_dir, img)", "def plot_many_images(images, titles, rows=1, columns=2):\n for i, image in enumerate(images):\n plt.subplot(rows, columns, i + 1)\n plt.imshow(image, \"gray\")\n plt.title(titles[i])\n plt.xticks([]), plt.yticks([]) # Hide tick marks\n plt.show()", "def show_images(images):\n for name, img in images:\n cv2.imshow(name, img)\n\n cv2.waitKey(0)", "def three_sample_images():\n samples = samples_path()\n _truck = np.array(Image.open(os.path.join(samples, \"truck.png\")))\n _deer = np.array(Image.open(os.path.join(samples, \"deer.png\")))\n _frog = np.array(Image.open(os.path.join(samples, \"frog.png\")))\n truck = transforms.ToTensor()(_truck)\n deer = transforms.ToTensor()(_deer)\n frog = transforms.ToTensor()(_frog)\n return torch.stack([truck, deer, frog])", "def displayData(indices_to_display = None):\n width, height = 20, 20\n nrows, ncols = 10, 10\n if not indices_to_display:\n indices_to_display = random.sample(range(X.shape[0]), nrows*ncols)\n \n big_picture = np.zeros((height*nrows,width*ncols))\n \n irow, icol = 0, 0\n for idx in indices_to_display:\n if icol == ncols:\n irow += 1\n icol = 0\n iimg = getDatumImg(X[idx])\n big_picture[irow*height:irow*height+iimg.shape[0], icol*width:icol*width+iimg.shape[1]] = iimg\n icol += 1\n fig = plt.figure(figsize=(6,6))\n\n big_picture = (big_picture * 255).astype(np.int8)\n img = Image.fromarray(big_picture, mode='L')\n plt.imshow(img, cmap = cm.Greys)", "def grid_image(output):\n grid = []\n for data in output:\n grid += [make_grid(data, nrow=5, normalize=True)]\n return grid", "def print_images(i, df):\n \n images_folder_path = \"dataset/petfinder-adoption-prediction/train_images/\"\n plt.imshow(cv2.cvtColor(cv2.imread(images_folder_path+df.filename[i]), cv2.COLOR_BGR2RGB),);\n plt.axis(\"off\");\n plt.show()", "def display_images(filenames):\n for filename in filenames:\n display(Image(filename))", "def show_data(self, samples=5):\n self._samples = samples\n \n idx = 0\n batch = next(iter(self._loader))\n while idx <= self._samples:\n\n (img_batch, label_batch) = batch\n\n fig = plt.figure()\n ax1 = fig.add_subplot(121)\n ax2 = fig.add_subplot(122)\n \n ax1.imshow(np.transpose(img_batch[idx], (1, 2, 0)))\n ax2.imshow(np.transpose(label_batch[idx], (1, 2, 0)).squeeze(dim=2))\n\n plt.show()\n idx += 1", "def visualize_image(images, save_name):\n dim = images.shape[0]\n n_image_rows = int(np.ceil(np.sqrt(dim)))\n n_image_cols = int(np.ceil(dim * 1.0 / n_image_rows))\n gs = gridspec.GridSpec(n_image_rows, n_image_cols, top=1., bottom=0.,\n right=1., left=0., hspace=0., wspace=0.)\n\n for g, count in zip(gs, range(int(dim))):\n ax = plt.subplot(g)\n ax.imshow(images[count, :].astype(np.float32).reshape((28, 28)))\n ax.set_xticks([])\n ax.set_yticks([])\n plt.savefig(save_name + '_vis.png')", "def plot_gallery(images , h, w, n_row=3, n_col=6):\n plt.figure(figsize=(1.7 * n_col, 2.3 * n_row))\n plt.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)\n for i in range(len(images)):\n plt.subplot(n_row, n_col, i + 1)\n plt.imshow(images[i].reshape((h, w)), cmap=plt.cm.gray)\n\n plt.xticks(())\n plt.yticks(())", "def plot_10_by_10_images(images):\n\n\tn = images.shape[0]\n\n\tq = n // 10\n\tr = n%10\n\tprint n,q,r\n\n\tfig = plt.figure()\n\tplt.ion()\n\n\tfor x in range(q):\n\t\tprint x\n\t\tif not x%10:\n\t\t\tplt.clf()\n\t\tfor y in range(10):\n\t\t\tax = fig.add_subplot(10, 10, 10*y+x%10+1)\n\t\t\tax.matshow(images[10*y+x%10], cmap = mpl.cm.binary)\n\t\t\tplt.xticks(np.array([]))\n\t\t\tplt.yticks(np.array([]))\n\t\tplt.show()\n\t\t_=raw_input(\"Press enter to show next 10\")", "def read_vanhateren_images (n_imgs=5):\n folder_name = r'D:\\VanHateren\\vanhateren_imc' # change this to point to the directory which holds the van hateren data\n # files = listdir(folder_name)\n onlyfiles = [ f for f in listdir(folder_name) if isfile(join(folder_name,f)) ]\n imgs = []\n for i in range(n_imgs):\n filename = join(folder_name, onlyfiles[i])\n with open(filename, 'rb') as handle:\n s = handle.read()\n arr = array.array('H', s)\n arr.byteswap()\n img_i = np.array(arr, dtype='uint16').reshape(1024, 1536)\n imgs.append(img_i) \n return imgs\n #pylab.imshow(img)\n #pylab.show()", "def show_four_images(img1, img2, img3, img4, title):\n shape = (460, 250)\n # Get all images in same size for better display\n img1 = cv2.resize(img1, shape)\n img2 = cv2.resize(img2, shape)\n img3 = cv2.resize(img3, shape)\n img4 = cv2.resize(img4, shape)\n # combined 2 images horizontally\n numpy_horizontal1 = np.hstack((img1, img2))\n # combined the rest 2 images horizontally\n numpy_horizontal2 = np.hstack((img3, img4))\n # now combined all vertically to 1 image and display\n numpy_vertical = np.vstack((numpy_horizontal1, numpy_horizontal2))\n # final thing - show the output:\n show_image(numpy_vertical, title)", "def plot_images(imgs, layout, img_sz = 0.7, suptitle = ''):\n\tnrows, ncols = layout \n\tfig, axes = plt.subplots(nrows, ncols, \n\t\tfigsize = (img_sz * ncols, img_sz * nrows))\n\taxes = axes.ravel()\n\tfig.subplots_adjust(hspace = 0, wspace = 0)\n\tfig.suptitle(suptitle)\n\tfor i, img in enumerate(imgs):\n\t\taxes[i].get_xaxis().set_visible(False)\n\t\taxes[i].get_yaxis().set_visible(False)\n\t\taxes[i].imshow(img)", "def visualize(**images):\n n = len(images)\n plt.figure(figsize=(16, 5))\n for i, (name, image) in enumerate(images.items()):\n plt.subplot(1, n, i + 1)\n plt.xticks([])\n plt.yticks([])\n plt.title(' '.join(name.split('_')).title())\n plt.imshow(image)\n plt.show()", "def visualize(**images):\n n = len(images)\n plt.figure(figsize=(16, 5))\n for i, (name, image) in enumerate(images.items()):\n plt.subplot(1, n, i + 1)\n plt.xticks([])\n plt.yticks([])\n plt.title(' '.join(name.split('_')).title())\n plt.imshow(image)\n plt.show()", "def visualize(**images):\n n = len(images)\n plt.figure(figsize=(16, 5))\n for i, (name, image) in enumerate(images.items()):\n plt.subplot(1, n, i + 1)\n plt.xticks([])\n plt.yticks([])\n plt.title(' '.join(name.split('_')).title())\n plt.imshow(image)\n plt.show()", "def images(self, **kwargs):\n\n raise NotImplementedError", "def visualize(**images):\n n = len(images)\n plt.figure(figsize=(16, 5))\n for i, (name, image) in enumerate(images.items()):\n plt.subplot(1, n, i + 1)\n plt.xticks([])\n plt.yticks([])\n plt.title(' '.join(name.split('_')).title())\n plt.imshow(image)\n plt.show()\n # plt.savefig('./drive/My Drive/Colab Notebooks/TACK/Large/result' + ' '.join(name.split('_')).title() + '.png')", "def load_images(folder_path, num_images):\n imgs = np.zeros(shape=[num_images, 400, 400, 3])\n for i in range(1, num_images + 1):\n image_name = \"satImage_%.3d\" % i\n image_path = folder_path + image_name + \".png\"\n if os.path.isfile(image_path):\n print('Loading ' + image_path)\n img = mpimg.imread(image_path)\n\n #imgs[i - 1] = np.asarray(img).reshape(400, 400, 3)\n imgs[i - 1] = img.reshape(400, 400, 3)\n else:\n print('File ' + image_path + ' does not exist')\n return imgs", "def plot_latent_images(self, n):\n\n norm = tfp.distributions.Normal(0, 1)\n grid_x = norm.quantile(np.linspace(0.05, 0.95, n))\n grid_y = norm.quantile(np.linspace(0.05, 0.95, n))\n image_width = self.data.digit_size*n\n image_height = image_width\n image = np.zeros((image_height, image_width))\n\n for i, yi in enumerate(grid_x):\n for j, xi in enumerate(grid_y):\n z = np.array([[xi, yi]])\n x_decoded = self.model.sample(z)\n digit = tf.reshape(x_decoded[0], (self.data.digit_size, self.data.digit_size))\n image[i * self.data.digit_size: (i + 1) * self.data.digit_size,\n j * self.data.digit_size: (j + 1) * self.data.digit_size] = digit.numpy()\n\n plt.figure(figsize=(10, 10))\n plt.imshow(image, cmap='Greys_r')\n plt.axis('Off')\n plt.show()", "def extract_images(dimension = (_HEIGHT, _WIDTH), n = 100, color = True, include = ['00', '01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12', '13']):\n # establish directory routes\n origin = '/Users/jaoming/Active Projects/Shopee Challenge/shopee-product-detection-dataset'\n main_train_folder = '/Users/jaoming/Active Projects/Shopee Challenge/shopee-product-detection-dataset/train/train'\n os.chdir(main_train_folder)\n if color:\n imread_color = cv2.IMREAD_COLOR\n else:\n imread_color = cv2.IMREAD_GRAYSCALE\n\n # setting up the variables \n data, labels = [], []\n for name in include:\n os.chdir(name)\n image_namelist = os.listdir()\n if '.DS_Store' in image_namelist: # removing unnecessary files\n image_namelist.remove('.DS_Store')\n count = 0\n while count < n:\n data.append(cv2.resize(\n cv2.imread(image_namelist[count], imread_color),\n dimension,\n interpolation = cv2.INTER_CUBIC\n ))\n labels.append(int(name))\n count += 1\n os.chdir(main_train_folder)\n\n os.chdir(origin)\n return data, labels", "def plot_images_grid(images, labels, title):\n images = images.cpu()\n labels = labels.cpu()\n \n assert type(images[0]) is torch.Tensor, 'Image to plot is not torch.Tensor'\n image_size = int(np.sqrt(images[0].shape[0]))\n \n fig = plt.figure(figsize=(10,4))\n for idx in range(10):\n ax = fig.add_subplot(2,10/2,idx+1, xticks=[], yticks=[])\n ax.imshow(images[idx].view(image_size, image_size), cmap = 'gray')\n label = labels[idx].item()\n ax.set_title(label)\n #end\n fig.suptitle(title, fontsize = 14)\n plt.show()\n plt.close('all')", "def visualize(base_path, test_dataset, plot_dir, batch_size=4, ):\n device = torch.device('cuda')\n dataset = HeadDataset(test_dataset,\n base_path,\n dataset_param={},\n train=False)\n batch_iterator = iter(data.DataLoader(dataset, batch_size,\n shuffle=False,\n num_workers=4,\n collate_fn=coco_collate))\n for ind, (images, targets) in enumerate(tqdm(batch_iterator)):\n images = list(img.to(device) for img in images)\n np_images = [(ims.cpu().numpy()*255.).astype(np.uint8) for ims in images]\n gt_boxes = [gt['boxes'].numpy().astype(np.float64) for gt in targets]\n for np_im, gt_box in zip(np_images, gt_boxes):\n plot_images = plot_ims(np_im, [], gt_box)\n imsave(osp.join(plot_dir, str(ind) + '.jpg'), plot_images)", "def collage(images, cols = 2, save = False, filename = \"\", show = False):\n \n rows = ceil(len(images) / cols)\n \n fig, ax = plt.subplots(rows, cols)\n\n index = 0\n\n for row in range(rows):\n for col in range(cols):\n \n if index < len(images):\n ax[row, col].imshow(images[index])\n \n ax[row, col].axis('off')\n index += 1\n plt.tight_layout()\n \n if save:\n fig.savefig(filename)\n\n if show:\n plt.show(fig)\n \n return 0", "def visualization(data, rows, cols, titles, figsize):\n fig, ax = plt.subplots(nrows=rows, ncols=cols, figsize=figsize)\n # plot image on each subplot\n for i, axi in enumerate(ax.flat):\n # i is in range [0, nrows * ncols)\n # axi is equivalent to ax[rowid][colid]\n axi.imshow(data[i])\n axi.set_title(titles[i])\n plt.tight_layout(True)\n plt.show()", "def load_dataset(path_test, width, height):\n tot_images = 0\n for label in listdir(path_test):\n label_full = join(path_test, label)\n for img_name in listdir(label_full):\n tot_images += 1\n\n # allocate the memory\n # THE DTYPE is float, should be the right one\n all_images = np.zeros((tot_images, width, height, 3))\n\n true_labels = []\n num_images = 0\n for label in listdir(path_test):\n label_full = join(path_test, label)\n for img_name in listdir(label_full):\n # for img_name in listdir(label_full)[:10]:\n img_name_full = join(label_full, img_name)\n print(f\"Opening {img_name_full} {width}\")\n\n image = cv2.imread(img_name_full)\n\n image = cv2.resize(image, (width, height))\n\n # scale the pixel values to [0, 1]\n image = image.astype(\"float\") / 255.0\n\n all_images[num_images, :, :, :] = image\n\n num_images += 1\n true_labels.append(label)\n\n print(f\"All_images.shape {all_images.shape}\")\n\n # cv2.imshow('Resized all_images[0]', all_images[0])\n # cv2.waitKey(0)\n\n return all_images, true_labels", "def show(self):\n \n \n \n \n \n \n r = 4\n f, axarr = plt.subplots(r, r, figsize=(8,8))\n counter = 0\n for i in range(r):\n for j in range(r):\n temp = self.x[counter,:]\n counter += 1\n img = self.x[counter,:]\n axarr[i][j].imshow(img)\n #######################################################################\n # #\n # #\n # TODO: YOUR CODE HERE #\n # #\n # #\n #######################################################################", "def plot_images(imgs_org, imgs_pred, nb_examples=10):\n if nb_examples == 0:\n return\n\n n = nb_examples\n plt.figure(figsize=(nb_examples * 2, 4))\n for i in range(n):\n # display original\n ax = plt.subplot(2, n, i + 1)\n plt.imshow(imgs_org[i].reshape(84, 84))\n plt.gray()\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n\n # display reconstruction\n ax = plt.subplot(2, n, i + 1 + n)\n plt.imshow(imgs_pred[i].reshape(84, 84))\n plt.gray()\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n plt.show()", "def displayFaceData(X, rows, cols):\n\twidth, height = 32, 32 # This is the shape of original photo (32*32)\n\tpictures_combined = np.zeros((height*rows, width*cols))\n\t\n\trow, col = 0, 0\n\tfor a_picture_index in xrange(rows*cols):\n\t\tif col == cols:\n\t\t\trow += 1\n\t\t\tcol = 0\n\t\ta_picture = ReshapeIntoImage(X[a_picture_index],width)\n\t\tpictures_combined[row*height:(row*height+a_picture.shape[0]), col*width:(col*width+a_picture.shape[1])] = a_picture\n\t\tcol += 1\n\n\tfig = plt.figure(figsize=(10,10))\n\timg = scipy.misc.toimage( pictures_combined )\n\tplt.imshow(img,cmap = cm.Greys_r)\n\tplt.show(block=False)", "def main():\n labels, data = load_image_data()\n print(labels.shape, data.shape)", "def retrieve_images(self, batch_size=5, offset=0):\n\t\treturn self.session.query(Image.id, Image.image_path, Image.quantity, Image.cost) \\\n\t\t\t\t\t\t .order_by(Image.id.desc()).offset(offset).limit(batch_size).all()", "def display_image(df, fname_col, img_dir, n):\n\t# Display some train images\n\tnrows = 1+n//20 \n\tfig, axs = plt.subplots(nrows,20, figsize=(20,1.2*nrows),\n\t\t\t\t\t\t facecolor='w', edgecolor='k')\n\taxs = axs.ravel()\n\n\tfor idx, filename in enumerate (df[fname_col][0:n].values):\n\n\t\tif not os.path.isfile(img_dir+filename):\n\t\t\tlogger.error(\"path {} does not exit\".format(img_dir+filename))\n\t\t\t\t\t\t\n\t\timg = mpimg.imread(img_dir + filename)\n\n\t\taxs[idx].imshow(img)\n\t\taxs[idx].set_axis_off()\n\t \n\tplt.subplots_adjust(wspace=0, hspace=0)\n\tplt.show()", "def loadDataset(dataset):\n # List of images.\n images = []\n\n\n\n # Read all filenames from the dataset.\n for filename in dataset:\n # Read the input image.\n image = cv2.imread(filename)\n\n # Add the current image on the list.\n if image is not None: \n images.append(image)\n else:\n print(\"Could not read file: {}\".format(filename))\n sys.exit()\n\n # Return the images list.\n return images", "def graphical(cls, images):\n images = [(plt.imread(i), i.split(os.path.sep)[-1]) for i in images]\n views = [View(ViewPosition.click(img, path), img, img) for img, path in images]\n return cls(views)", "def show_slice(file_path, x, y, z):\n img = nib.load(file_path)\n img = img.get_fdata()\n print(\"The scan has dimensions {}.\".format(img.shape))\n slice_0 = img[x, :, :]\n slice_1 = img[:, y, :]\n slice_2 = img[:, :, z]\n slices = [slice_0, slice_1, slice_2]\n fig, axes = plt.subplots(1, len(slices), figsize=[12, 4])\n for i, sli in enumerate(slices):\n axes[i].imshow(sli.T, cmap=\"gray\", origin=\"lower\")", "def images(self):\n return self.gameimage_set.all()", "def generate_image_grid(sess, df, filenames,op, op2):\n #x_points = np.arange(0, 1, 1.5).astype(np.float32)\n #y_points = np.arange(0, 1, 1.5).astype(np.float32)\n\n nx, ny = 12, 1\n #plt.subplot()\n gs = gridspec.GridSpec(nx, ny, hspace=1, wspace=0.05)\n # input_x = sess.run(op2, feed_dict={x_input: df[0:24]})\n #\n # plt.imshow(np.array(df[0].tolist()).reshape(28, 28), cmap='gray')\n # plt.show()\n # x = sess.run(op, feed_dict={decoder_input: input_x[0].reshape(1,2)})\n # img = np.array(x.tolist()).reshape(28, 28)\n #\n # plt.imshow(img, cmap='gray')\n # plt.show()\n\n \"\"\" grid \"\"\"\n input_x = sess.run(op2, feed_dict={x_input: df[0:24]})\n for i, g in enumerate(gs):\n\n x = sess.run(op, feed_dict={decoder_input: input_x[i].reshape(1,2)})\n ax = plt.subplot(g)\n img = np.array(x.tolist()).reshape(28, 28)\n ax.imshow(img, cmap='gray')\n ax.set_xticks([])\n ax.set_yticks([])\n #ax.set_aspect('auto')\n ax.set_title(filenames[i])\n plt.show()\n\n for i, g in enumerate(gs):\n\n ax = plt.subplot(g)\n img = np.array(df[i].tolist()).reshape(28, 28)\n ax.imshow(img, cmap='gray')\n ax.set_xticks([])\n ax.set_yticks([])\n #ax.set_aspect('auto')\n ax.set_title(filenames[i])\n plt.show()", "def plot_gallery(self,images, titles, h, w, n_row=5, n_col=5):\n plt.figure(figsize=(1.8 * n_col, 2.4 * n_row))\n plt.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)\n for i in range(n_row * n_col):\n plt.subplot(n_row, n_col, i + 1)\n plt.imshow(images[i].reshape((h, w)), cmap=plt.cm.gray)\n plt.title(titles[i], size=12)\n plt.xticks(())\n plt.yticks(())\n plt.show()", "def showImages(images):\n idx = 0\n\n while True:\n\n cv2.imshow(\"Image\", images[idx])\n\n if cv2.waitKey(15) & 0xFF == ord(\"d\"):\n if idx+1 >= len(images):\n print(\"This is the last image in the set.\")\n else:\n idx += 1\n print(\"Viewing image no. {0} / {1}\".format(idx+1, len(images)))\n\n if cv2.waitKey(15) & 0xFF == ord(\"a\"):\n if idx-1 < 0:\n print(\"This is the first image in the set.\")\n else:\n idx -= 1\n print(\"Viewing image no. {0} / {1}\".format(idx+1, len(images)))\n\n if cv2.waitKey(15) & 0xFF == ord(\"q\"):\n break", "def displayImages(self):\n\n plt.figure(figsize=(8,6))\n plt.subplot(1,2,1)\n plt.imshow( self.original_image, cmap=\"gray\")\n plt.title(\"Original Image\")\n plt.subplot(1,2,2)\n plt.imshow( self.blurred_image, cmap=\"gray\")\n plt.title(\"Blurred Image\")", "def vis(group, indexlist, save=False):\n ##### Modify these: #####\n # imageloc = '/home/teja/Project_005/toronto/iaprtc12/images/'\n # trainloc = '/home/teja/Project_005/toronto/iaprtc12_2/iaprtc12_train_list.txt'\n # testloc = '/home/teja/Project_005/toronto/iaprtc12_2/iaprtc12_test_list.txt'\n #########################\n\n ##### Modify these: #####\n imageloc = '/home/teja/Programs/convnet/examples/imagenet/'\n trainloc = '/home/teja/Project_005/toronto/iaprtc12_2/iaprtc12_train_list.txt'\n testloc = '/home/teja/Programs/convnet/examples/imagenet/images_list.txt'\n #########################\n\n\n if group == 'train':\n listloc = trainloc\n else:\n listloc = testloc\n f = open(listloc, 'rb')\n ims = []\n for line in f:\n ims.append(line.strip() + '.jpg')\n f.close()\n for i in range(len(indexlist)):\n imloc = imageloc + ims[indexlist[i]]\n im = Image.open(imloc)\n # im.thumbnail((256,256), Image.ANTIALIAS)\n # im.show()\n if save:\n im.save('r' + str(indexlist[i]) + '.jpg')", "def getimgs():", "def makeImage(self):\n\n for row in range(self.height):\n self.makeRow(row)\n self.window.update() # display a row of pixels", "def visualize(**images):\r\n n_images = len(images)\r\n plt.figure(figsize=(20, 8))\r\n for idx, (name, image) in enumerate(images.items()):\r\n plt.subplot(1, n_images, idx + 1)\r\n plt.xticks([])\r\n plt.yticks([])\r\n # get title from the parameter names\r\n plt.title(name.replace('_', ' ').title(), fontsize=20)\r\n plt.imshow(image)\r\n plt.show()", "def plot_image_grid(epoch, generated_images):\n\n fig = plt.figure(figsize=(GRID_SIZE, GRID_SIZE))\n fig.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.001, hspace=0.001)\n\n for ind in range(GRID_IMGS):\n ax = fig.add_subplot(GRID_SIZE, GRID_SIZE, ind + 1, xticks=[], yticks=[])\n ax.imshow(np.uint8(((generated_images[ind] + 1) / 2) * 255), cmap='gray')\n\n plt.savefig('/content/drive/My Drive/WGAN/train_images/image_iteration_{:05d}.png'.format(epoch + 1))\n plt.close()", "def display(self):\n fig, axes = plt.subplots(1, len(self.views),\n figsize=self._figsize(\n [(self.views[0].image, len(self.views))]),\n squeeze=True)\n for ax, view in zip(axes.ravel(), self.views):\n ax.imshow(view.grey)\n points = self._common_keypoints(view).reshape(-1, 2)[::-1]\n ax.plot(points[..., 0], points[..., 1], 'r+')\n ax.axis('off')\n ax.xaxis.set_visible(False)\n ax.yaxis.set_visible(False)\n ax.set(title=view.position.id)\n fig.tight_layout()\n fig.canvas.draw()\n img_array = np.array(fig.canvas.renderer._renderer)\n plt.close('all')\n return img_array", "def display_some_traffic_sign_examples(path):\n\n images_paths = glob.glob(path + \"\\\\*.png\") # image paths\n plt.figure(figsize = (10, 10))\n\n for i in range(25):\n # select a random index in the dataset\n idx = np.random.randint(0, len(images_paths) - 1)\n img = imread(images_paths[idx])\n\n # subplot the dataset examples\n plt.subplot(5, 5, i + 1)\n plt.tight_layout(rect = [0, 0, 1, 0.95])\n plt.imshow(img)\n\n plt.suptitle(\"Dataset Examples\")\n plt.show()", "def plot_gallery(images, titles, n_row=3, n_col=4):\n plt.figure(figsize=(2.5 * n_col, 2.4 * n_row))\n plt.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35, wspace = .35)\n for i in range(n_row * n_col):\n plt.subplot(n_row, n_col, i + 1)\n #plt.imshow(images[i].reshape((h, w)), cmap=plt.cm.gray)\n # image = images[i].reshape(h, w)\n # img = []\n # for j in range(len(image)):\n # img.append(list(image[j]))\n # #print(img[:5])\n plt.imshow(images[i])\n #plt.imshow(images[i])\n plt.title(titles[i], size=12)\n plt.xticks(())\n plt.yticks(())", "def plot_gallery(images, titles, h, w, n_row=3, n_col=4):\n pl.figure(figsize=(1.8 * n_col, 2.4 * n_row))\n pl.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)\n for i in range(n_row * n_col):\n pl.subplot(n_row, n_col, i + 1)\n pl.imshow(images[i].reshape((h, w)), cmap=pl.cm.gray)\n pl.title(titles[i], size=12)\n pl.xticks(())\n pl.yticks(())", "def process_images(self, images, mode='train'):\n if self.dataset_name == 'dataset1':\n return images[:5000]\n elif self.dataset_name == 'dataset2':\n return np.add(images, 745)\n elif self.dataset_name == 'dataset3':\n # concatenate three images into three-digit image\n if mode == 'train':\n return np.concatenate((images[:40000], images[10000:50000],\n images[20000:60000]), axis=1)\n elif mode == 'test':\n return np.concatenate((images[:8000], images[1000:9000],\n images[2000:10000]), axis=1)\n elif self.dataset_name == 'dataset4':\n # merge two images into one\n if mode == 'train':\n return images[:50000] + images[-50000:]\n elif mode == 'test':\n return images[:9000] + images[-9000:]\n else:\n return images", "def _showphotos(self, btn):\n global layout\n global curdir\n\n # Create the layouts.\n layout = GridLayout(cols=5, padding=0, spacing=0, size_hint=(1, None))\n layout.bind(minimum_height=layout.setter(\"height\"))\n\n foldername = btn\n\n # Args is combined with \"curdir\" to load the thumbnails, and add them to the Gridlayout.\n if foldername == \"\":\n pass\n else:\n for filename in sorted(glob(join(curdir, \"thumb\", foldername, \"*\"))):\n try:\n canvas = BoxLayout(size_hint=(1, None))\n im = Image(source=filename)\n canvas.add_widget(im)\n layout.add_widget(canvas)\n\n except Exception:\n print(\"Pictures: Unable to load <%s>\" % filename)\n\n return layout", "def crop_images(dataset_dir):\n data = []\n for folder in os.listdir(dataset_dir):\n path = os.path.join(dataset_dir, folder, \"*.png\")\n data.extend(glob(path))\n\n for index, filePath in enumerate(data):\n print ('{}/{}'.format(index, len(data)))\n\n img = scipy.misc.imread(filePath).astype(np.uint8)\n img = scipy.misc.imresize(img, 0.25, interp='bilinear', mode=None)\n scipy.misc.imsave('/data/vllab1/dataset/CITYSCAPES/CITY_test/fine_image/' + filePath.split('/')[-1], img)", "def plotImages(images_arr):\n \n fig, axes = plt.subplots(1, 5, figsize=(20,20))\n axes = axes.flatten()\n for img, ax in zip( images_arr, axes):\n ax.imshow(img)\n ax.axis('off')\n plt.tight_layout()\n plt.show()\n \n return", "def images_example(path='train_images.pickle'):\n patch_size = (8, 8)\n\n with open('train_images.pickle', 'rb') as f:\n train_pictures = pickle.load(f)\n\n patches = sample_patches(train_pictures, psize=patch_size, n=20000)\n\n plt.figure()\n plt.imshow(train_pictures[0])\n plt.title(\"Picture Example\")\n\n plt.figure()\n for i in range(4):\n plt.subplot(2, 2, i + 1)\n plt.imshow(patches[:, i].reshape(patch_size), cmap='gray')\n plt.title(\"Patch Example\")\n plt.show()", "def build_dataset(self):\n print(\"reading data of images currently , please wait......\")\n x_train, y_train, _ = get_images(self.train_directory)\n x_test, y_test, _ = get_images(self.test_directory)\n x_train, y_train = image_subset(self.num_classes, x_train, y_train)\n x_test, y_test = image_subset(self.num_classes, x_test, y_test)\n x_train = x_train.astype('float32')\n x_test = x_test.astype('float32')\n self.x_train = x_train / 255\n self.x_test = x_test / 255\n self.y_train = utils.to_categorical(y_train, self.num_classes)\n self.y_test = utils.to_categorical(y_test, self.num_classes)", "def display_images(images, imageConversion=cv.COLOR_BGR2RGB, titles=[], columns=4, rows=None, show=True):\n if not show:\n return\n if imageConversion is not None:\n images = [cv.cvtColor(img, imageConversion) for img in images]\n\n # append filtered image\n if rows is None:\n rows = ceil(float(len(images)) / columns)\n\n try:\n for i in xrange(len(images)):\n plt.subplot(rows,columns,i+1),plt.imshow(images[i],'gray')\n if titles:\n plt.title(titles[i])\n plt.xticks([]),plt.yticks([])\n plt.show()\n except:\n logging.exception(\"Could not plot / show images. Saving instead.\")\n save_plt_figure(plt, \"img_show\")", "def plotgrid(data,d=10,shape=(30,30)):\n ion()\n gray()\n clf()\n for i in range(min(d*d,len(data))):\n subplot(d,d,i+1)\n row = data[i]\n if shape is not None: row = row.reshape(shape)\n imshow(row)\n ginput(1,timeout=0.1)", "def plot_sample_imgs(get_imgs_fun, img_shape, plot_side=5, savepath=None):\n f, axarr = plt.subplots(plot_side, plot_side)\n samples = get_imgs_fun(plot_side*plot_side)\n for row in range(plot_side):\n for col in range(plot_side):\n axarr[row, col].imshow(samples[plot_side*row+col].reshape(img_shape))\n axarr[row, col].set_title('')\n axarr[row, col].axis('off')\n if savepath:\n f.savefig(savepath)\n plt.close()\n else:\n plt.show()", "def visualize(**images):\n n_images = len(images)\n plt.figure(figsize=(20,8))\n for idx, (name, image) in enumerate(images.items()):\n plt.subplot(1, n_images, idx + 1)\n plt.xticks([]); \n plt.yticks([])\n # get title from the parameter names\n plt.title(name.replace('_',' ').title(), fontsize=20)\n plt.imshow(image)\n plt.savefig('sample_gt_pred_2_max.jpeg')\n plt.show()", "def init_datasets(self, display_samples = False):\n print(\"==> Loading images from \", self.img_dir)\n self.image_data_gen = ImageDataGenerator(\n rescale=1./255,\n #rotation_range=30,\n #shear_range=30,\n #width_shift_range=.15,\n #height_shift_range=.15,\n #zoom_range=0.5,\n validation_split=0.2)\n\n self.train_dataset = self.image_data_gen.flow_from_directory(\n batch_size=BATCH_SIZE,\n directory=self.img_dir,\n shuffle=True,\n target_size=self.img_size,\n class_mode='sparse',\n subset='training')\n\n self.val_dataset = self.image_data_gen.flow_from_directory(\n batch_size=BATCH_SIZE,\n directory=self.img_dir,\n shuffle=True,\n target_size=self.img_size,\n class_mode='sparse',\n subset='validation')\n\n if display_samples:\n self.display_sample_images()", "def plotRandImages(train_data, output):\n rint_array = np.random.randint(0,train_data.shape[0],size=4)\n\n fig = plt.figure(figsize=(8,8))\n plt.subplot(2,2,1)\n for ii,rint in enumerate(rint_array):\n plt.subplot(2,2,ii+1)\n img = train_data[rint].reshape((20,20)).T\n #print \" output: \",output[rint]\n plt.imshow(img,aspect='auto',interpolation='nearest',\n origin='lower')\n plt.title('Image of %d'%output.T[rint])\n plt.show()\n\n return", "def display(self):\n nrow = 2\n ncol = len(self.views) + 1\n rows = [(self.views[0].original, len(self.views)),\n (self.views[0].image, len(self.views) + 1)]\n fig, axes = plt.subplots(nrows=nrow, ncols=ncol,\n figsize=self._figsize(rows),\n squeeze=True)\n originals = [(v.position.id, v.original) for v in self.views] + [\n ('combined', np.median(np.stack([v.original for v in self.views]), axis=0))]\n warped = [(v.position.id, v.image) for v in self.views] + [\n ('combined', self.image)]\n for ax, (title, img) in zip(axes.ravel(), originals + warped):\n ax.imshow(img)\n ax.axis('off')\n ax.xaxis.set_visible(False)\n ax.yaxis.set_visible(False)\n ax.set(title=title)\n fig.tight_layout()\n fig.canvas.draw()\n img_array = np.array(fig.canvas.renderer._renderer)\n plt.close('all')\n return img_array", "def plot_gallery(images, titles, h, w, n_row=3, n_col=4):\n plt.figure(figsize=(1.8 * n_col, 2.4 * n_row))\n plt.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)\n for i in range(n_row * n_col):\n plt.subplot(n_row, n_col, i + 1)\n plt.imshow(images[i].reshape((h, w)), cmap=plt.cm.gray)\n plt.title(titles[i], size=12)\n plt.xticks(())\n plt.yticks(())\n plt.show()\n plt.savefig(\"feature_{}.png\".format(50))", "def visualize(imgobjs, cols=4, collated=True, size=None):\n\n ## Separate into list of single instance image objects\n imgs = []\n if isinstance(imgobjs, list):\n for io in imgobjs:\n imgs += images._create_img_list(io)\n else:\n imgs = images._create_img_list(imgobjs)\n\n ## Grid layout settings. Sets N, N_rows, N_cols\n N = len(imgs)\n assert N > 0\n if not size:\n size = [0, 0] # H, W\n for img in imgs:\n _, _, H, W = get_dimensions(img)\n size[0] += H\n size[1] += W\n size = [int(d/len(imgs)) for d in size]\n else:\n assert len(size) == 2\n\n N_cols = cols if cols else 4\n if N < 4:\n N_cols = N\n N_rows = math.ceil(N/N_cols)\n print(f\"Cols: {N_cols}, Rows: {N_rows}\")\n\n ## Display Figure\n figure = plt.figure(figsize=(15, 10))\n for i in range(N):\n dims = images.get_dimensions(imgs[i])[1:]\n title = f\"[Image {i+1}/{N}]\"\n if isinstance(imgs[i], str):\n title = f\"[Image {i+1}/{N}] {files.get_filename(imgs[i])}\"\n title += f\"\\n shape{dims}\"\n img = images.to_np(imgs[i], size=size, color='rgb')\n subplt = figure.add_subplot(N_rows, N_cols, i+1)\n subplt.set_title(title, fontsize=10)\n subplt.axis('off')\n plt.imshow(img)\n figure.tight_layout()\n # plt.subplots_adjust(wspace=.25, hspace=.5)\n plt.show()", "def display_dataset(path, save, dset='sum'):\n # List datasets\n files_surf = os.listdir(path[0])\n files_surf.sort()\n files_deep = os.listdir(path[1])\n files_deep.sort()\n files_calc = os.listdir(path[2])\n files_calc.sort()\n\n # Corrected names\n files = os.listdir(r'Y:\\3DHistoData\\Subvolumes_2mm')\n files.sort()\n\n k = 0\n # Loop for displaying images\n for fsurf, fdeep, fcalc in zip(files_surf, files_deep, files_calc):\n # Load images\n im_surf = loadh5(path[0], fsurf, dset)\n im_deep = loadh5(path[1], fdeep, dset)\n im_calc = loadh5(path[2], fcalc, dset)\n # Create figure\n fig = plt.figure(dpi=300)\n ax1 = fig.add_subplot(131)\n ax1.imshow(im_surf, cmap='gray')\n plt.title(fsurf + ', Surface')\n ax2 = fig.add_subplot(132)\n ax2.imshow(im_deep, cmap='gray')\n plt.title('Deep')\n ax3 = fig.add_subplot(133)\n ax3.imshow(im_calc, cmap='gray')\n plt.title('Calcified')\n if save is not None:\n while files[k] == 'Images' or files[k] == 'MeanStd':\n k += 1\n\n # Save figure\n if not os.path.exists(save):\n os.makedirs(save, exist_ok=True)\n plt.tight_layout()\n fig.savefig(os.path.join(save, files[k]), bbox_inches=\"tight\", transparent=True)\n plt.close()\n\n # Save h5\n if not os.path.exists(save + '\\\\MeanStd\\\\'):\n os.makedirs(save + '\\\\MeanStd\\\\', exist_ok=True)\n\n h5 = h5py.File(save + \"\\\\MeanStd\\\\\" + files[k] + '.h5', 'w')\n h5.create_dataset('surf', data=im_surf)\n h5.create_dataset('deep', data=im_deep)\n h5.create_dataset('calc', data=im_calc)\n h5.close()\n else:\n plt.show()\n k += 1", "def matplotlibDisplayMulti(imgs, titles=None, colorFlag='gray'):\n if titles is None:\n titles = []\n for i in range(len(imgs)):\n titles.append(\"IMAGE \" + str(i))\n for i in range(len(imgs)):\n plt.subplot(1, len(imgs), 1+i)\n plt.imshow(imgs[i], colorFlag)\n plt.title(titles[i])\n plt.xticks([])\n plt.yticks([])\n plt.show()", "def load_images(self, tmx):\n for image_data in tmx.images:\n if image_data:\n image, _, _ = image_data\n self.load_image(image)", "def show_dataset(folder, n=10):\n volumes = volume_paths(folder)\n for i, v in enumerate(volumes):\n print(load_pages(v)[0][0:500])\n if i >= n:\n break", "def get_images(self, page_number):", "def gather_images():\n # Import an empty image\n null_img = Image.open('assests/null/null.png')\n null_img = ImageTk.PhotoImage(null_img.resize((100,100), Image.ANTIALIAS))\n\n # Import image and icon for X\n X_img = Image.open('assests/X_Assets/X.png')\n X_icon = ImageTk.PhotoImage(X_img.resize((15, 12), Image.ANTIALIAS))\n X_img = ImageTk.PhotoImage(X_img.resize((95, 80), Image.ANTIALIAS))\n\n # Import horizontally striked X\n X_hor = Image.open('assests/X_Assets/X_hor.png')\n X_hor = ImageTk.PhotoImage(X_hor.resize((95, 80), Image.ANTIALIAS))\n\n # Import vertically striked X\n X_vert = Image.open('assests/X_Assets/X_vert.png')\n X_vert = ImageTk.PhotoImage(X_vert.resize((95, 80), Image.ANTIALIAS))\n\n # Import diagonally strikedX\n X_diag = Image.open('assests/X_Assets/X_diag.png')\n X_diag = ImageTk.PhotoImage(X_diag.resize((95, 80), Image.ANTIALIAS))\n\n # Import another diagonally striked X\n X_diag2 = Image.open('assests/X_Assets/X_diag2.png')\n X_diag2 = ImageTk.PhotoImage(X_diag2.resize((95, 80), Image.ANTIALIAS))\n\n # Import image and icon for O\n O_img = Image.open('assests/O_Assets/O.png')\n O_icon = ImageTk.PhotoImage(O_img.resize((14, 14), Image.ANTIALIAS))\n O_img = ImageTk.PhotoImage(O_img.resize((90, 90), Image.ANTIALIAS))\n\n # Import horizontally striked O\n O_hor = Image.open('assests/O_Assets/O_hor2.png')\n O_hor = ImageTk.PhotoImage(O_hor.resize((90, 90), Image.ANTIALIAS))\n\n # Import vertically striked O\n O_vert = Image.open('assests/O_Assets/O_vert2.png')\n O_vert = ImageTk.PhotoImage(O_vert.resize((90, 90), Image.ANTIALIAS))\n\n # Import diagonally striked O\n O_diag = Image.open('assests/O_Assets/O_diag.png')\n O_diag = ImageTk.PhotoImage(O_diag.resize((90, 90), Image.ANTIALIAS))\n\n # Import another diagonally striked O\n O_diag2 = Image.open('assests/O_Assets/O_diag2.png')\n O_diag2 = ImageTk.PhotoImage(O_diag2.resize((90, 90), Image.ANTIALIAS))\n\n return (null_img, X_icon, X_img, X_hor, X_vert, X_diag, X_diag2, O_icon, O_img, O_hor, O_vert, O_diag, O_diag2)", "def plot_random_generated_images(self):\n dimensions=(10, 10)\n figsize=(10, 10)\n n_samples=100\n \n (X, _), _ = self.generate_generator_prediction_samples(n_samples)\n \n self.grid_plot(X, dimensions=dimensions, figsize=figsize)", "def plot_examples(n_samples, images, start_image=0):\n n_cols = len(images)\n index = 0\n for j in range(0+start_image, n_samples+start_image):\n for col_id, img in enumerate(images):\n img_to_plot = img[j:j+1, :, :, :]\n img_to_plot = np.squeeze(img_to_plot)\n plt.subplot(n_samples, n_cols, index + col_id + 1)\n plt.imshow(img_to_plot, cmap='gray', vmin=0, vmax=1)\n plt.axis('off')\n index += n_cols" ]
[ "0.6872789", "0.6706518", "0.66048354", "0.65576744", "0.6477456", "0.6438162", "0.64273864", "0.6386546", "0.6381494", "0.63184965", "0.6311449", "0.62808603", "0.6278003", "0.6259482", "0.6259482", "0.62534124", "0.6243781", "0.6227257", "0.6224085", "0.6216505", "0.6158989", "0.61508155", "0.61446035", "0.61427486", "0.61373985", "0.6089885", "0.6060406", "0.6059162", "0.6047537", "0.60443485", "0.60263276", "0.6013175", "0.6011418", "0.59987587", "0.5993068", "0.5985406", "0.5968301", "0.59430104", "0.5929085", "0.5920354", "0.59108514", "0.59048563", "0.59048563", "0.59048563", "0.59023833", "0.58959174", "0.5891544", "0.5885532", "0.5877551", "0.58716077", "0.58641285", "0.5863476", "0.5861947", "0.5860006", "0.5841318", "0.5837511", "0.58219117", "0.58169425", "0.5814045", "0.5812875", "0.5807936", "0.5807744", "0.5784084", "0.5775105", "0.57567185", "0.5756346", "0.5755834", "0.5752832", "0.5752499", "0.5747984", "0.57474875", "0.5744836", "0.5743422", "0.57373273", "0.5737096", "0.57312435", "0.5728227", "0.5727724", "0.57121646", "0.57119465", "0.5706884", "0.5705012", "0.57045", "0.5701718", "0.56953335", "0.56890804", "0.5686475", "0.568529", "0.56784683", "0.5675977", "0.56746984", "0.5670269", "0.56687915", "0.5667783", "0.5661418", "0.5658541", "0.56561935", "0.5656007", "0.56541514", "0.5649831" ]
0.760682
0
Normalises and reshapes the images in the dataset.
def normalise(dataset): # Scale images to the [0, 1] range dataset = dataset.astype("float32") / 255 # Make sure images have shape (28, 28, 1) return np.expand_dims(dataset, -1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def normalize_dataset(self):", "def normalize_images(data, blend_cat, Args):\n im = data['X_train']['blend_image']\n std = np.std(im)\n mean = np.mean(im)\n data['X_train']['blend_image'] = (im - mean) / std\n data['X_val']['blend_image'] = (data['X_val']['blend_image'] - mean) / std\n data['X_train'] = normalize_other_inputs(data['X_train'], Args)\n data['X_val'] = normalize_other_inputs(data['X_val'], Args)\n for key in data['Y_train'].keys():\n data['Y_train'][key] = (data['Y_train'][key] - mean) / std\n data['Y_val'][key] = (data['Y_val'][key] - mean) / std\n blend_cat['std'] = std\n blend_cat['mean'] = mean\n return data", "def flatten_image(data):\n\t# print(img.shape[0])\n\t# print(img.shape[1])\n\t# cv2.imshow('image',img)\n\t# gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n\t# plt.imshow(gray)\n\t# plt.show()\n\t# X_normalized = preprocessing.normalize(img, norm='l2')\n\t\n\t# s = img.shape[0] * img.shape[1]\n\t# img_wide = img.reshape((1, s,-1))\t\n\t# img_wide = np.rollaxis(X_normalized, axis=1, start=0)\n\t# plt.imshow(img_wide[0])\n\t# plt.show()\n\t# print(X_normalized)\n\tnsamples, nx, ny = data.shape\n\td2_train_dataset = data.reshape((nsamples,nx*ny))\n\treturn d2_train_dataset", "def normalize_data(img):\n nor = np.linalg.norm(img, axis = 1)\n nor = np.reshape(nor, (len(img), 1))\n img = np.divide(img, nor)\n return img", "def _normalize(images):\n images -= images.mean(axis=0, keepdims=True)\n images /= np.maximum(images.std(axis=0, keepdims=True), 3e-1)", "def _normalize(images):\n images -= images.mean(axis=0, keepdims=True)\n images /= np.maximum(images.std(axis=0, keepdims=True), 3e-1)", "def normalise(image):", "def normalize_data(data):\n if data.element_spec[0].shape[2] == 1:\n data = data.map(lambda x, y: (tf.image.grayscale_to_rgb(\n tf.image.resize(x, [32, 32])), y))\n else:\n data = data.map(lambda x, y: (tf.image.resize(x, [32, 32]), y))\n normalization_layer = tf.keras.layers.experimental.preprocessing.Rescaling(1. / 255)\n normalized_ds = data.map(lambda x, y: (normalization_layer(x), y))\n return normalized_ds", "def preprocess(imgs):\n imgs_p = np.ndarray((len(imgs), img_rows, img_cols), dtype=np.float32)\n for i in range(len(imgs)):\n imgs_p[i] = imgs[i].reshape((img_rows, img_cols))/255.\n\n imgs_p = imgs_p[..., np.newaxis]\n\n # Perform data normalization\n mean = imgs_p.mean()\n std = imgs_p.std()\n imgs_p -= mean\n imgs_p /= std\n\n return imgs_p", "def normalize_image(self):\n # The image normalization is identical to Cloud TPU ResNet.\n self._image = tf.image.convert_image_dtype(self._image, dtype=tf.float32)\n offset = tf.constant(DATASET_MEAN)\n offset = tf.expand_dims(offset, axis=0)\n offset = tf.expand_dims(offset, axis=0)\n self._image -= offset\n\n scale = tf.constant(DATASET_VAR)\n scale = tf.expand_dims(scale, axis=0)\n scale = tf.expand_dims(scale, axis=0)\n self._image /= scale", "def unnormalize(images, mean, std):\n \n unnorm_images = images * std + mean\n \n \n return unnorm_images", "def set_normalization(self, dataloader):\n mean = 0\n square = 0\n for (data_in, _) in dataloader:\n mean += data_in.mean()\n square += data_in.pow(2).mean()\n\n mean /= len(dataloader)\n square /= len(dataloader)\n std = np.sqrt(square - mean ** 2)\n\n # The input data should be roughly normally distributed after\n # passing through net_fixed.\n self.scale_in.bias.data.fill_(- mean / std)\n self.scale_in.weight.data.fill_(1 / std)", "def normalize(self):\n d = learning_utils.convert_data_to_2d(self._data)\n d = learning_utils.normalize_2d(d)\n self._data = learning_utils.convert_data_to_1d(d)", "def normalization(imgs):\n\n imgs = np.asarray(imgs).astype(np.float32)\n imgs = np.expand_dims(imgs / 255, axis=-1)\n return imgs", "def denormalize(img, dataset=\"imagenet\"):\r\n if dataset == \"cifar10\":\r\n c_std = [0.247, 0.243, 0.261]\r\n c_mean = [0.4914, 0.4822, 0.4466]\r\n elif dataset == \"imagenet\":\r\n c_std = [0.229, 0.224, 0.225]\r\n c_mean = [0.485, 0.456, 0.406]\r\n for i in [0, 1, 2]:\r\n img[i] = img[i] * c_std[i] + c_mean[i]\r\n return img", "def normalize(dataset):\n minVals = dataset.min(axis=0)\n maxVals = dataset.max(axis=0)\n factors = maxVals-minVals\n num = dataset.shape[0]\n norm_data = (dataset - np.tile(minVals,(num,1)))/np.tile(factors,(num,1)) \n return norm_data", "def _normalize_images(self, images: th.Tensor) -> th.Tensor:\n output = ((images+2)/4 - self._norm_mean)/self._norm_std\n return output", "def denormalize(img, means, stds, resize_to_original=False):\n\n img = np.moveaxis(img, 0, 2)\n img = img*stds + means\n img = np.clip(img, 0, 255).astype('uint8')\n\n if resize_to_original:\n # revert def preprocess_image()\n img = img[:,(img_w//4): (img_w - img_w//4),:]\n img = cv2.copyMakeBorder( img, img.shape[0], 0,0,0, cv2.BORDER_CONSTANT) #, borderType)\n img = cv2.resize(img, (img_orig_w, img_orig_h))\n \n return img", "def normalize_ds(dataset):\n dataset = copy.copy(dataset)\n\n dim_dataset = dataset.shape\n\n for n_row in range(dim_dataset[0]):\n k = dataset[n_row,:]\n k_norm =(k - np.min(k))/(np.max(k) - np.min(k))\n dataset[n_row,:] = k_norm\n\n return dataset", "def _normalize(self, dataset):\n if self.max is None: # if we are normalizing the training set\n self.max, self.min = dataset.max(), dataset.min() # find max, min value for each columns\n for row in dataset.index: # for each row in dataset\n for col in self.features: # for each feature in the instance (exclude target)\n dataset.at[row, col] = (dataset.at[row, col] - self.min[col]) / (self.max[col] - self.min[col]) if col != \"Bias\" else 1", "def reshape_normalise(img):\n\t# The image shape is expected to match the input of VGG19\n\timg = np.resize(img, (1, CONFIG.IMAGE_HEIGHT, CONFIG.IMAGE_WIDTH, CONFIG.COLOR_CHANNELS)).astype('float32')\n\timg -= CONFIG.MEAN_PIXEL\n\treturn img", "def _normalize(\n ds: xr.Dataset,\n *,\n dim: Sequence[str],\n kind: str = ADDITIVE,\n) -> xr.Dataset:\n if \"norm\" in ds:\n norm = ds.norm\n else:\n norm = ds.data.mean(dim=dim)\n norm.attrs[\"_group_apply_reshape\"] = True\n\n return xr.Dataset(\n dict(data=apply_correction(ds.data, invert(norm, kind), kind), norm=norm)\n )", "def reshape_dataset(self, dataset, params):\n assert hasattr(params, \"vectorize_data\"), (\n \"Model params must set vectorize_data.\")\n for key in dataset.keys():\n if dataset[key] is None:\n continue\n dataset[key].images = dp.reshape_data(dataset[key].images, params.vectorize_data)[0]\n dataset[key].shape = dataset[key].images.shape\n return dataset", "def normalize_images(x_images, mean_value):\n x_flat = np.zeros((x_images.shape[0], 784))\n for k in range(0, x_images.shape[0]):\n img = x_images[k, ...] - mean_value\n img = cv2.normalize(img.astype('float'), None, 0.0, 1.0, cv2.NORM_MINMAX).astype(np.float32)\n x_flat[k, ...] = np.reshape(img, [-1])\n\n return x_flat", "def preprocess_image(self, batched_inputs):\n images = [x[\"image\"].float().to(self.device) for x in batched_inputs]\n images = [self.normalizer(img) for img in images]\n images = ImageList.from_tensors(images, self.backbone.size_divisibility)\n return images", "def test_normalize(dummy_input):\n # Test the 2D image: H, W, C\n image, label = dummy_input(image_size=(512, 512, 3),\n label_size=(512, 512, 1))\n transform = Normalize(means=None, stds=None)\n _image, _label = transform(image, label, normalize_tags=[True, False])\n assert not (image == _image).all()\n assert (label == _label).all()\n\n # Test the 3D image: H, W, D, C\n image, label = dummy_input(image_size=(512, 512, 20, 3),\n label_size=(512, 512, 20, 1))\n transform = Normalize(means=None, stds=None)\n _image, _label = transform(image, label, normalize_tags=[True, False])\n assert not (image == _image).all()\n assert (label == _label).all()\n assert np.abs(np.mean(_image)-0) < 1e-8\n assert np.abs(np.std(_image)-1) < 1e-8", "def normalize_data(data=None):\n # Data pre-processing\n n = data.shape[0]\n for i in range(n):\n xx = data[i,:,:]\n xx -= np.mean(xx) # Centering in 0\n xx /= np.linalg.norm(xx) # Normalizing to 1\n data[i] = xx # Affect value\n return data", "def img_normalize(image, label):\n mean, std = ds_stats\n image -= tf.constant(mean, shape=[1, 1, num_channels], dtype=image.dtype)\n image /= tf.constant(std, shape=[1, 1, num_channels], dtype=image.dtype)\n return image, label", "def normalize(self, image, transpose=False, data_type=None):\n return normalize(image, self.mean, self.std, transpose)", "def pre_process_data(input_path: list, cuts: int, shape: int = 32, normalize: bool = True) -> list:\n images = []\n images_uncut = []\n for files_path in input_path:\n\n files = os.listdir(files_path) # TODO paths\n for f in files:\n file_path = f'{files_path}/{f}'\n im_uncut = cv2.imread(file_path)\n im_uncut = cv2.cvtColor(im_uncut, cv2.COLOR_RGB2GRAY)\n images_uncut.append(cv2.resize(im_uncut, (shape * cuts, shape * cuts)))\n x = np.array(images_uncut)\n\n if normalize:\n x_mean = np.mean(x, axis=(0, 1, 2))\n x_std = np.std(x, axis=(0, 1, 2))\n x = (x - x_mean) / (x_std + 1e-9)\n\n for im in x:\n height = im.shape[0]\n width = im.shape[1]\n frac_h = height // cuts\n frac_w = width // cuts\n i = 0\n image = []\n for h in range(cuts):\n for w in range(cuts):\n crop = im[h * frac_h:(h + 1) * frac_h, w * frac_w:(w + 1) * frac_w]\n crop_rehaped = cv2.resize(crop, (shape, shape))\n image.append([crop_rehaped, i, number_to_angle(i, cuts), neighbours(i, cuts)])\n i = i + 1\n images.append(image)\n # return np.array(images) # todo back to array\n return images", "def normalize_data(self):\n self.x_mean, self.x_std = du.get_mean_std(self.x_train)\n self.x_train = du.normalize(self.x_train, self.x_mean, self.x_std)\n if self.x_test is not None and self.y_test is not None:\n self.x_test = du.normalize(self.x_test, self.x_mean, self.x_std)\n self.normalized_data = True", "def normalize_images(image_sitk):\n\n max = 400\n min = -1000\n\n image_np = sitk.GetArrayFromImage(image_sitk)\n\n # Normalization\n image_np = (image_np - min)/(max - min)\n image_np[image_np > 1] = 1\n image_np[image_np < 0] = 0\n\n # Convert back to SITK\n out_image_sitk = sitk.GetImageFromArray(image_np)\n out_image_sitk.CopyInformation(image_sitk)\n\n return out_image_sitk", "def preprocess_image(self, batched_inputs):\n images = [x.to(self.device) for x in batched_inputs]\n norms = [self.normalizer(x) for x in images]\n size = (norms[0].shape[1],norms[0].shape[2])\n images = ImageList.from_tensors(norms, self.backbone.size_divisibility)\n return images, size", "def normalize(image, label):\n image -= settings.DATASET_MEAN\n image /= settings.DATASET_STD\n\n return image, label", "def prepare(dataset):\n dataset = dataset.reshape(dataset.shape[0], 1, 28, 28)\n dataset = dataset.astype('float32')\n dataset /= 255\n return dataset", "def normalize(dataset):\n return normalize_standard_deviation(normalize_mean(dataset))", "def run(self):\n self.run_tasks()\n self.images = np.array(self.images)\n self.shapes.extend(self.images.shape[-2:])\n\n self.images = np.reshape(self.images, self.shapes)", "def normalize(img, mean, std, data_format='CHW'):\n _assert_image_tensor(img, data_format)\n\n mean = paddle.to_tensor(mean, place=img.place)\n std = paddle.to_tensor(std, place=img.place)\n\n if _is_channel_first(data_format):\n mean = mean.reshape([-1, 1, 1])\n std = std.reshape([-1, 1, 1])\n\n return (img - mean) / std", "def rescale_data(self):\n\n # Dividing every array of simulated data vectors by the mean of that array.\n '''# Didnt work\n for key in self.data.keys():\n self.data[key] /= np.mean(self.data[key])\n '''\n\n self.rescaled = True\n\n # Mean normalization\n \"\"\" didnt work\n for key in self.data.keys():\n self.data[key] -= np.mean(self.data[key])\n self.data[key] /= (np.max(self.data[key]) - np.min(self.data[key]))\n \"\"\"\n\n # Median normalization\n \"\"\" didnt work, still dividing by large number \n for key in self.data.keys():\n self.data[key] -= np.median(self.data[key])\n self.data[key] /= (np.max(self.data[key]) - np.min(self.data[key]))\n \"\"\"\n\n # Divide by median\n \"\"\" didnt work\n for key in self.data.keys():\n self.data[key] -= np.median(self.data[key])\n self.data[key] /= (np.median(self.data[key]))\n \"\"\"\n\n # Take logarithm of data\n \"\"\" didnt work\n for key in self.data.keys():\n self.data[key] = np.log10(self.data[key])\n \"\"\"\n\n # Scale by length of vector\n \"\"\"\n for key in self.data.keys():\n self.data[key] /= np.linalg.norm(self.Cl_noiseless)\n \"\"\"\n\n \n # Scale by negative of the natural logarithm \n for key in self.data.keys():\n self.data[key] = -1 * np.log(self.data[key]) \n \n \"\"\"\n # Scale by subtracting the mean and dividing by std\n std = np.nanstd(self.data['data'])\n mean = np.nanmean(self.data['data'])\n for key in self.data.keys():\n # self.data[key] -= np.log(self.Cl_noiseless) # -1* # scale this same way\n # self.data[key] -= self.Cl_noiseless # -1* # scale this same way\n self.data[key] -= mean \n self.data[key] /= std\n \"\"\"", "def normalize(img):\n # TODO: implement this function.\n min_img = min([min(i) for i in img])\n max_img = max([max(i) for i in img])\n\n for i in range(len(img)):\n \tfor j in range(len(img[0])):\n \t\timg[i][j] = ((img[i][j] - min_img) / (max_img - min_img))\n #raise NotImplementedError\n return img", "def preprocess_image(self, batched_inputs):\n images = [x[\"image\"].to(self.device) for x in batched_inputs]\n images_aug = [x[\"image_color\"].to(self.device) for x in batched_inputs]\n\n images = [self.normalizer(x) for x in images]\n images_aug = [self.normalizer(x) for x in images_aug]\n\n images = ImageList.from_tensors(images,\n self.backbone.size_divisibility)\n images_aug = ImageList.from_tensors(images_aug,\n self.backbone.size_divisibility)\n return images, images_aug", "def preprocess(img):\n \n scaler=StandardScaler() ## scaler object to perform preprocessing\n img=scaler.fit_transform(img) ## zero-center and normalize\n \n return img", "def normalize(self):\n self._data /= self.norm()", "def normalise(self):\n return self / self.mean(axis=1).reshape(self.shape[0], 1)", "def unNormalizeData(normalizedData, data_mean, data_std, dimensions_to_ignore):\n T = normalizedData.shape[0]\n D = data_mean.shape[0]\n\n origData = np.zeros((T, D), dtype=np.float32)\n dimensions_to_use = []\n for i in range(D):\n if i in dimensions_to_ignore:\n continue\n dimensions_to_use.append(i)\n dimensions_to_use = np.array(dimensions_to_use)\n\n origData[:, dimensions_to_use] = normalizedData\n\n # potentially ineficient, but only done once per experimentdata_conversions\n stdMat = data_std.reshape((1, D))\n stdMat = np.repeat(stdMat, T, axis=0)\n meanMat = data_mean.reshape((1, D))\n meanMat = np.repeat(meanMat, T, axis=0)\n origData = np.multiply(origData, stdMat) + meanMat\n return origData", "def normalizeData(meanAndStd, dataset):\n\n for i in range(len(dataset)):\n for j in range(len(dataset[i])-1):\n mean = meanAndStd[j][\"mean\"]\n std = meanAndStd[j][\"std\"]\n dataset[i][j] = (dataset[i][j] - mean)/std", "def pre_process(self, images: Union[np.ndarray, List]) -> np.ndarray:\n images = validate_image(images)\n image_sizes = []\n image_arr = []\n for image in images:\n image_sizes.append(image.shape)\n image = resize(image,\n height=self.in_h,\n width=self.in_w)\n image = normalize(image)\n image_arr.append(image)\n image_arr = np.array(image_arr)\n return image_arr, image_sizes", "def _images_and_boxes_preprocessing(self, imgs, boxes):\r\n # Image [0, 255] -> [0, 1].\r\n imgs = imgs.float()\r\n imgs = imgs / 255.0\r\n\r\n height, width = imgs.shape[2], imgs.shape[3]\r\n # The format of boxes is [x1, y1, x2, y2]. The input boxes are in the\r\n # range of [0, 1].\r\n boxes[:, [0, 2]] *= width\r\n boxes[:, [1, 3]] *= height\r\n boxes = transform.clip_boxes_to_image(boxes, height, width)\r\n\r\n if self._split == \"train\":\r\n # Train split\r\n imgs, boxes = transform.random_short_side_scale_jitter(\r\n imgs,\r\n min_size=self._jitter_min_scale,\r\n max_size=self._jitter_max_scale,\r\n boxes=boxes,\r\n )\r\n imgs, boxes = transform.random_crop(imgs, self._crop_size, boxes=boxes)\r\n\r\n # Random flip.\r\n imgs, boxes = transform.horizontal_flip(0.5, imgs, boxes=boxes)\r\n elif self._split == \"val\":\r\n # Val split\r\n # Resize short side to crop_size. Non-local and STRG uses 256.\r\n imgs, boxes = transform.random_short_side_scale_jitter(\r\n imgs, min_size=self._crop_size, max_size=self._crop_size, boxes=boxes\r\n )\r\n\r\n # Apply center crop for val split\r\n imgs, boxes = transform.uniform_crop(\r\n imgs, size=self._crop_size, spatial_idx=1, boxes=boxes\r\n )\r\n\r\n if self._test_force_flip:\r\n imgs, boxes = transform.horizontal_flip(1, imgs, boxes=boxes)\r\n elif self._split == \"test\":\r\n # Test split\r\n # Resize short side to crop_size. Non-local and STRG uses 256.\r\n imgs, boxes = transform.random_short_side_scale_jitter(\r\n imgs, min_size=self._crop_size, max_size=self._crop_size, boxes=boxes\r\n )\r\n\r\n if self._test_force_flip:\r\n imgs, boxes = transform.horizontal_flip(1, imgs, boxes=boxes)\r\n else:\r\n raise NotImplementedError(\"{} split not supported yet!\".format(self._split))\r\n\r\n # Do color augmentation (after divided by 255.0).\r\n if self._split == \"train\" and self._use_color_augmentation:\r\n if not self._pca_jitter_only:\r\n imgs = transform.color_jitter(\r\n imgs, img_brightness=0.4, img_contrast=0.4, img_saturation=0.4\r\n )\r\n\r\n imgs = transform.lighting_jitter(\r\n imgs,\r\n alphastd=0.1,\r\n eigval=np.array(self._pca_eigval).astype(np.float32),\r\n eigvec=np.array(self._pca_eigvec).astype(np.float32),\r\n )\r\n\r\n # Normalize images by mean and std.\r\n imgs = transform.color_normalization(\r\n imgs,\r\n np.array(self._data_mean, dtype=np.float32),\r\n np.array(self._data_std, dtype=np.float32),\r\n )\r\n\r\n if self._use_bgr:\r\n # Convert image format from RGB to BGR.\r\n # Note that Kinetics pre-training uses RGB!\r\n imgs = imgs[:, [2, 1, 0], ...]\r\n\r\n boxes = transform.clip_boxes_to_image(boxes, self._crop_size, self._crop_size)\r\n\r\n return imgs, boxes", "def _compute_normalization(self, normalize=True):\n if normalize:\n if self._img_norm is None:\n if np.sum(self._data) == 0:\n self._img_norm = 1\n else:\n self._img_norm = self._compute_raw_image_norm()\n\n if self._img_norm != 0.0 and np.isfinite(self._img_norm):\n self._data /= (self._img_norm * self._normalization_correction)\n self._normalization_status = 0\n else:\n self._normalization_status = 1\n self._img_norm = 1\n warnings.warn('Overflow encountered while computing '\n 'normalization constant. Normalization '\n 'constant will be set to 1.', NonNormalizable)\n else:\n self._normalization_status = 2", "def normalize(\n ds: xr.Dataset,\n *,\n dim: str,\n kind: str = ADDITIVE,\n) -> xr.Dataset:\n\n if \"norm\" in ds:\n norm = invert(ds.norm, kind)\n else:\n norm = invert(ds.data.mean(dim=dim), kind)\n\n return xr.Dataset(dict(data=apply_correction(ds.data, norm, kind)))", "def scale_and_normalize_images(images, means, scales, invert_channels, normalize_to_unit_scale):\n means = torch.tensor(means, dtype=torch.float32)[None, :, None, None] # [1, 3, 1, 1]\n scales = torch.tensor(scales, dtype=torch.float32)[None, :, None, None] # [1. 3. 1. 1]\n if normalize_to_unit_scale:\n images = images / 255.\n\n images = (images - means) / scales\n if invert_channels:\n return images.flip(dims=[1])\n else:\n return images", "def normalize(\n self,\n image: np.ndarray,\n data_format: Optional[Union[str, ChannelDimension]] = None,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n ) -> np.ndarray:\n image = rescale(image=image, scale=1 / 127.5, data_format=data_format, input_data_format=input_data_format)\n image = image - 1\n return image", "def normalize_batch(batch, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]):\n # normalize using imagenet mean and std\n batch = batch.clone()\n mean = torch.tensor(mean).view(-1, 1, 1)\n std = torch.tensor(std).view(-1, 1, 1)\n # if your image data is scaled to scale 0-255, uncomment the line below\n # batch.div_(255.0)\n return (batch - mean) / std", "def normalize_datasets(train, test):\n columns = train.columns[:-1]\n train[columns] = (train[columns] - train[columns].mean()) / (train[columns].max() - train[columns].min())\n test[columns] = (test[columns] - test[columns].mean()) / (test[columns].max() - test[columns].min())\n\n return train, test", "def _preprocess(self, data, normalize=False) -> np.ndarray:\n \n preprocessor = StandardScaler() if not normalize else Normalizer()\n\n data = preprocessor.fit_transform(data)\n \n return data", "def fit(self, dataset, labels):\n self.dataset = dataset\n self.labels = labels\n self.normalization_n = []\n self.normalization_d = []\n self.first_title = list(self.dataset.keys())[0]\n for ind in range(len(self.dataset[self.first_title])):\n self.normalize_features(self.dataset, ind)", "def _normalize_patches(patches):\n patches = array_ops.concat(patches, 0)\n mean, variance = nn.moments(patches, [1, 2, 3], keep_dims=True)\n patches = (patches - mean) / math_ops.sqrt(variance)\n return array_ops.reshape(patches, [array_ops.shape(patches)[0], -1])", "def normalize_data(X_genesets):\n normalized_datasets = []\n for Xg in X_genesets:\n averages = np.average(Xg, axis=0)\n Xg_centered = Xg - np.repeat(averages, Xg.shape[0], axis=0)\n std_devs = np.sqrt(np.var(Xg_centered, axis=0))\n Xg_normalized = np.divide(Xg, np.repeat(std_devs, Xg.shape[0], axis=0))\n normalized_datasets.append(Xg_normalized)\n return normalized_datasets", "def dimension_postprocess(self, chunked_data, original_data, scale=1, padding=True):\r\n\r\n assert len(original_data.shape) == 2, \"data dimension expected to be (xline ,samp_point)\"\r\n assert len(chunked_data.shape) == 3, \"Chunked data dimension expected to be (batch_size, xline, samp_point)\"\r\n\r\n if padding:\r\n if original_data.shape[0] < self.rows:\r\n new_images = []\r\n for data in chunked_data:\r\n new_images.append(data[0:scale * original_data.shape[0], :])\r\n chunked_data = np.array(new_images)\r\n\r\n if original_data.shape[1] < self.cols:\r\n new_images = []\r\n for data in chunked_data:\r\n new_images.append(data[:, 0:scale * original_data.shape[1]])\r\n chunked_data = np.array(new_images)\r\n\r\n new_shape = (\r\n original_data.shape[0] * scale,\r\n original_data.shape[1] * scale\r\n )\r\n reconstruction = np.zeros(new_shape)\r\n x_chunks, y_chunks = self.get_chunks(original_data)\r\n\r\n i = 0\r\n s = scale\r\n for x in x_chunks:\r\n for y in y_chunks:\r\n prior_fill = reconstruction != 0\r\n chunk = np.zeros(new_shape)\r\n chunk[x[0] * s:x[1] * s, y[0] * s:y[1] * s] += chunked_data[i]\r\n chunk_fill = chunk != 0\r\n reconstruction += chunk\r\n reconstruction[prior_fill & chunk_fill] = reconstruction[prior_fill & chunk_fill] / 2\r\n i += 1\r\n return reconstruction", "def normalize_data(batch_data):\n B, N, C = batch_data.shape\n normal_data = np.zeros((B, N, C))\n for b in range(B):\n pc = batch_data[b]\n centroid = np.mean(pc, axis=0)\n pc = pc - centroid\n m = np.max(np.sqrt(np.sum(pc ** 2, axis=1)))\n pc = pc / m\n normal_data[b] = pc\n return normal_data", "def normalize_data(self):\n\t\tfull_matrix = self.balance_clases()\n\t\ttexture_matrix = Normalizer().fit_transform(X=full_matrix[:,range(0,24)])\n\n\t\treturn texture_matrix", "def add_image_normalization(self):\n self.methods.append(self._normalize_image)\n self.args.append(None)", "def cast_and_normalise_images(images):\n images = (tf.cast(images, tf.float32) / 255.0) - 0.5\n return images", "def normalize_features(self, data_dict, ind):\n pre_norm_list = []\n for title in data_dict:\n pre_norm_list.append(data_dict[title][ind])\n if self.normalization_method == 'min_max':\n mini, maxi, norm_list = normalize.min_max_normalize(pre_norm_list)\n self.normalization_n.append(mini)\n self.normalization_d.append(maxi - mini)\n elif self.normalization_method == 'z_score':\n mean, var, norm_list = normalize.z_score_normalize(pre_norm_list)\n self.normalization_n.append(mean)\n self.normalization_d.append(var)\n elif self.normalization_method == 'none':\n norm_list = pre_norm_list[:]\n self.normalization_n.append(0)\n self.normalization_d.append(1)\n for i, title in enumerate(data_dict):\n data_dict[title][ind] = norm_list[i]", "def _images_and_boxes_preprocessing(self, imgs, boxes, gt_boxes=None):\n # Image [0, 255] -> [0, 1].\n imgs = imgs.float()\n imgs = imgs / 255.0\n\n height, width = imgs.shape[2], imgs.shape[3]\n # The format of boxes is [x1, y1, x2, y2]. The input boxes are in the\n # range of [0, 1].\n # boxes[:, [0, 2]] *= width\n # boxes[:, [1, 3]] *= height\n boxes = transform.clip_boxes_to_image(boxes, height, width)\n\n if self._split == \"train\":\n # Train split\n imgs, boxes = transform.random_short_side_scale_jitter(\n imgs,\n min_size=self._jitter_min_scale,\n max_size=self._jitter_max_scale,\n boxes=boxes,\n )\n imgs, boxes = transform.random_crop(\n imgs, self._crop_size, boxes=boxes\n )\n\n # Random flip.\n imgs, boxes = transform.horizontal_flip(0.5, imgs, boxes=boxes)\n elif self._split == \"val\":\n # Val split\n # Resize short side to crop_size. Non-local and STRG uses 256.\n imgs, boxes = transform.random_short_side_scale_jitter(\n imgs,\n min_size=self._crop_size,\n max_size=self._crop_size,\n boxes=boxes,\n )\n\n # Apply center crop for val split\n imgs, boxes = transform.uniform_crop(\n imgs, size=self._crop_size, spatial_idx=1, boxes=boxes\n )\n\n if self._test_force_flip:\n imgs, boxes = transform.horizontal_flip(1, imgs, boxes=boxes)\n elif self._split == \"test\":\n # Test split\n # Resize short side to crop_size. Non-local and STRG uses 256.\n imgs, boxes = transform.random_short_side_scale_jitter(\n imgs,\n min_size=self._crop_size,\n max_size=self._crop_size,\n boxes=boxes,\n )\n\n if self._test_force_flip:\n imgs, boxes = transform.horizontal_flip(1, imgs, boxes=boxes)\n else:\n raise NotImplementedError(\n \"{} split not supported yet!\".format(self._split)\n )\n\n # Do color augmentation (after divided by 255.0).\n if self._split == \"train\" and self._use_color_augmentation:\n if not self._pca_jitter_only:\n imgs = transform.color_jitter(\n imgs,\n img_brightness=0.4,\n img_contrast=0.4,\n img_saturation=0.4,\n )\n\n imgs = transform.lighting_jitter(\n imgs,\n alphastd=0.1,\n eigval=np.array(self._pca_eigval).astype(np.float32),\n eigvec=np.array(self._pca_eigvec).astype(np.float32),\n )\n\n # Normalize images by mean and std.\n imgs = transform.color_normalization(\n imgs,\n np.array(self._data_mean, dtype=np.float32),\n np.array(self._data_std, dtype=np.float32),\n )\n\n if not self._use_bgr:\n # Convert image format from BGR to RGB.\n # Note that Kinetics pre-training uses RGB!\n imgs = imgs[:, [2, 1, 0], ...]\n\n boxes = transform.clip_boxes_to_image(\n boxes, self._crop_size, self._crop_size\n )\n\n return imgs, boxes", "def denormalize_detections(detections, resized_size, scale, pad):\n detections[:, 0] = (detections[:, 0] * resized_size - pad[0]) * scale\n detections[:, 1] = (detections[:, 1] * resized_size - pad[1]) * scale\n detections[:, 2] = (detections[:, 2] * resized_size - pad[0]) * scale\n detections[:, 3] = (detections[:, 3] * resized_size - pad[1]) * scale\n\n detections[:, 4::2] = (detections[:, 4::2] * resized_size - pad[1]) * scale\n detections[:, 5::2] = (detections[:, 5::2] * resized_size - pad[0]) * scale\n return detections", "def _postprocess(img):\n img = _scale_to_zero_one(img)\n img = img.reshape(1, -1) # to avoid a scikit-learn deprecation warning later\n return img", "def preprocess_data():\n le = preprocessing.LabelEncoder()\n # Reshape and normalize pixel values to be between 0 and 1\n train_images_reshaped = train_images.reshape(len(train_images), 1024, 1024, 1)/255.\n test_images_reshaped = test_images.reshape(len(test_images), 1024, 1024, 1)/255.\n\n return train_images_reshaped, test_images_reshaped, le.fit_transform(train_labels), le.fit_transform(test_labels)", "def preprocess(self):\n \n file_name_list = os.listdir(self.image_dir)\n random.seed(1234)\n random.shuffle(file_name_list)\n \n for i,d in enumerate(self.domains):\n self.attr2idx[d]=i \n\n for i, file_name in enumerate(file_name_list):\n if (file_name.startswith('X_')):\n continue\n \n parts = file_name.split(\"-\")\n label = int(parts[0])\n if label not in self.domains:\n continue\n img_name = file_name\n\n count=self.get_sample_count(label)\n if count<self.valid_set_size:\n # create holdout set on the fly\n utils.copy_file(self.image_dir,self.valid_set_dir,img_name)\n else:\n self.dataset.append([img_name, self.attr2idx[label]])\n \n self.increment_sample_count(label)\n\n print(\"Sample count per domain: \"+str(self.sample_count)+\" (including holdout set, holdout size per domain is: \"+str(self.valid_set_size)+\")\")\n print('Finished preprocessing the dataset...')", "def normalize_image(img):\n arr = np.array(img)\n new_img = Image.fromarray(normalize(arr).astype('uint8'),'L')\n return new_img", "def normalize(self):\n self.image = rescale_intensity(self.image, out_range=(0, 255))", "def image_preprocess(image, image_size, mean_rgb, stddev_rgb):\n input_processor = dataloader.DetectionInputProcessor(image, image_size)\n input_processor.normalize_image(mean_rgb, stddev_rgb)\n input_processor.set_scale_factors_to_output_size()\n image = input_processor.resize_and_crop_image()\n image_scale = input_processor.image_scale_to_original\n return image, image_scale", "def reshape_and_normalize_image(image):\n # Reshape image to mach expected input of VGG16\n image = np.reshape(image, ((1,) + image.shape))\n # Substract the mean to match the expected input of VGG16\n image = image - CONFIG.MEANS\n \n return image", "def processImage(imgs):\r\n imgs = imgs.astype(np.float32)\r\n for i, img in enumerate(imgs):\r\n m = img.mean()\r\n s = img.std()\r\n imgs[i] = (img - m) / s\r\n return imgs", "def _preprocess_data(self, name, directory):\n if name.endswith('data'):\n for path in glob(str(directory / '**/*.jpg'), recursive=True):\n try:\n with Image.open(path) as img:\n if not name.startswith('feature'):\n img = img.rotate(-90, 0, 1)\n img = img.resize(self.input_shape)\n except (ValueError, OSError):\n print(\"Couldn't open {}\".format(path))\n else:\n path = Path(path)\n filename = path.name.split('img-')[1]\n target = (path.parent / filename).with_suffix('.image.png')\n img.save(target, 'PNG')\n os.remove(str(path))\n elif name.endswith('targets'):\n for path in glob(str(directory / '**/*.mat'), recursive=True):\n try:\n mat = spio.loadmat(path)['depthMap']\n img = spmisc.toimage(mat).resize(self.target_shape)\n except ValueError:\n print(\"Couldn't open {}\".format(path))\n else:\n path = Path(path)\n name = path.name[path.name.index('-') + 1:]\n target = (path.parent / name).with_suffix('.depth.png')\n img.save(target, 'PNG')\n os.remove(str(path))", "def imnormalize_tensor(self, img, mean, std, to_rgb=True):\n mean = np.float32(mean.reshape(1, -1))\n stdinv = 1 / np.float32(std.reshape(1, -1))\n if to_rgb:\n img = img[:, :, [2, 1, 0]]\n img = torch.sub(img, torch.tensor(mean).cuda())\n img = torch.mul(img, torch.tensor(stdinv).cuda())\n return img", "def initial_resizing(fr_raw_data_path, fr_data_path, dim=300):\n with h5py.File(fr_raw_data_path, 'r') as data:\n images = resize_array(np.asarray(data['images'].value), dim=dim)\n labels = data['labels'].value\n \n with h5py.File(fr_data_path, 'w') as f:\n f.create_dataset('images', data=images)\n\n with h5py.File(fr_raw_data_path, 'r') as data: \n f.copy(data['fri_data'], 'fri_data')\n f.copy(data['frii_data'], 'frii_data')\n f.copy(data['labels'], 'labels')", "def _preprocessing(self, input_image):\n if self.resize:\n input_image = self._np_resize_image(input_image,\n self.input_size,\n dtype='int')\n image = self._np_transpose(input_image)\n image = self._np_normalize(image)\n image = self._np_flip_n_cat(image)\n return image", "def run_phot_normalization(setup, **params):\n\n log = logs.start_stage_log( setup.red_dir, 'postproc_phot_norm', version=VERSION )\n\n xmatch = crossmatch.CrossMatchTable()\n xmatch.load(params['crossmatch_file'],log=log)\n\n # Identify the datasets to be used as the primary reference in each\n # filter:\n xmatch.id_primary_datasets_per_filter()\n log.info('Identified datasets to be used as the primary references in each filter: '\\\n +repr(xmatch.reference_datasets))\n\n # Add columns to the dataset Table to hold the photometric calibration\n # parameters\n ndset = len(xmatch.datasets)\n ncol = len(xmatch.datasets.colnames)\n if 'norm_a0' not in xmatch.datasets.colnames:\n xmatch.datasets.add_column(np.zeros(ndset), name='norm_a0', index=ncol+1)\n xmatch.datasets.add_column(np.zeros(ndset), name='norm_a1', index=ncol+2)\n xmatch.datasets.add_column(np.zeros(ndset), name='norm_covar_0', index=ncol+3)\n xmatch.datasets.add_column(np.zeros(ndset), name='norm_covar_1', index=ncol+4)\n xmatch.datasets.add_column(np.zeros(ndset), name='norm_covar_2', index=ncol+5)\n xmatch.datasets.add_column(np.zeros(ndset), name='norm_covar_3', index=ncol+6)\n log.info('Expanded xmatch.datasets table for normalization parameters')\n\n # Extract list of filters from xmatch.images['filter'] column\n filter_list = np.unique(xmatch.images['filter'].data)\n log.info('Identified list of filters to process: '+repr(filter_list))\n\n # Read data from quadrant 1\n # Reading in the timeseries data for all four quadrants is at the very\n # edge of the memory limits on the machines available, so it is preferable\n # to calibrate the quadrant's data separately. However, there are sufficient\n # stars in each quantant to be able to determine the photometric calibration\n # from a single quadrant, and apply it to the rest of the image.\n log.info('Loading the timeseries photometry from quadrant 1')\n file_path = path.join(setup.red_dir, params['field_name']+'_quad1_photometry.hdf5')\n phot_data = hd5_utils.read_phot_from_hd5_file(file_path, return_type='array')\n log.info('-> Completed photometry load')\n\n # Identify constant stars in the dataset\n constant_stars = find_constant_stars(xmatch, phot_data, log)\n star = 1\n\n # Normalize the photometry of each dataset to that of the reference\n # image in the primary reference dataset in that filter\n #for filter in filter_list:\n for filter in filter_list:\n\n # Plot an RMS diagram of the lightcurves for all stars in this filter,\n # prior to normalization, for comparison\n image_index = np.where(xmatch.images['filter'] == filter)[0]\n phot_data_filter = phot_data[:,image_index,:]\n (mag_col, mag_err_col) = field_photometry.get_field_photometry_columns('corrected')\n qc_col = 16\n\n plot_multisite_rms(params, phot_data_filter, mag_col, mag_err_col, qc_col,\n 'rms_prenorm_'+str(filter)+'.png', log)\n\n # Extract the reference image photometry for the primary-ref dataset\n # for this filter\n ref_datacode = xmatch.reference_datasets[filter]\n sitecode = get_site_code(ref_datacode)\n log.info('Reference dataset in '+filter+' is '+ref_datacode+', sitecode='+sitecode)\n\n ref_phot = np.zeros((len(xmatch.stars),2))\n ref_phot[:,0] = xmatch.stars['cal_'+filter.replace('p','')+'_mag_'+sitecode]\n ref_phot[:,1] = xmatch.stars['cal_'+filter.replace('p','')+'_magerr_'+sitecode]\n\n # Extract the lightcurves for all other datasets in this filter in turn\n dataset_index = np.where(xmatch.datasets['dataset_filter'] == filter)[0]\n\n for idset in dataset_index:\n dset_datacode = xmatch.datasets['dataset_code'][idset]\n dset_sitecode = get_site_code(dset_datacode)\n\n # If the dataset is the reference dataset, replicate the photometric\n # measurements from the corrected columns to the normalized columns,\n # since no normalization is required - this ensures the full\n # lightcurve can be accessed from the normalization columns.\n if dset_datacode == ref_datacode:\n log.info('Replicating primary reference photometry from dataset '\\\n +dset_datacode+' to the normalized photometry columns')\n image_index = np.where(xmatch.images['dataset_code'] == dset_datacode)[0]\n (mag_col, mag_err_col) = field_photometry.get_field_photometry_columns('corrected')\n (norm_mag_col, norm_mag_err_col) = field_photometry.get_field_photometry_columns('normalized')\n for i in image_index:\n phot_data[:,i,norm_mag_col] = phot_data[:,i,mag_col]\n phot_data[:,i,norm_mag_err_col] = phot_data[:,i,mag_err_col]\n\n # Normalize any dataset that isn't the same as the reference dataset\n else:\n log.info('Normalizing dataset '+dset_datacode+', sitecode='+dset_sitecode)\n image_index = np.where(xmatch.images['dataset_code'] == dset_datacode)[0]\n\n ## Dset created to hold all stars in field, not quadrant - \n # normalization is calculated from whole field.\n dset_phot = np.zeros((len(xmatch.stars),2))\n dset_phot[:,0] = xmatch.stars['cal_'+filter.replace('p','')+'_mag_'+dset_sitecode]\n dset_phot[:,1] = xmatch.stars['cal_'+filter.replace('p','')+'_magerr_'+dset_sitecode]\n\n # Calculate their weighted offset relative to the primary-ref\n # dataset for the filter\n (fit, covar_fit) = calc_phot_normalization(ref_phot, dset_phot,\n constant_stars, log,\n diagnostics=True, ref=sitecode,\n dset=dset_sitecode, f=filter)\n\n # Store the fit results for this dataset\n xmatch = store_dataset_phot_normalization(idset, xmatch, fit, covar_fit, log)\n\n # Apply the normalization calibration to the dataset's reference\n # image photometry, and store the results in the xmatch.stars table\n log.info('Applying normalization to the datasets reference image photometry')\n cal_phot = apply_phot_normalization_single_frame(fit, covar_fit, dset_phot,\n 0, 1, log,\n diagnostics=True, ref=sitecode,\n dset=dset_sitecode, f=filter)\n xmatch.stars['norm_'+filter.replace('p','')+'_mag_'+dset_sitecode] = cal_phot[:,0]\n xmatch.stars['norm_'+filter.replace('p','')+'_magerr_'+dset_sitecode] = cal_phot[:,1]\n\n # Apply the photometry calibration to the timeseries data\n # for this dataset\n (mag_col, mag_err_col) = field_photometry.get_field_photometry_columns('corrected')\n (norm_mag_col, norm_mag_err_col) = field_photometry.get_field_photometry_columns('normalized')\n phot_data = normalize_timeseries_photometry(phot_data, image_index,\n fit, covar_fit,\n mag_col, mag_err_col,\n norm_mag_col, norm_mag_err_col,\n log)\n\n # Plot a second RMS diagram of the lightcurves for all stars in this\n # filter, post normalization, for comparison\n image_index = np.where(xmatch.images['filter'] == filter)[0]\n phot_data_filter = phot_data[:,image_index,:]\n (mag_col, mag_err_col) = field_photometry.get_field_photometry_columns('normalized')\n plot_multisite_rms(params, phot_data_filter, mag_col, mag_err_col, qc_col,\n 'rms_postnorm_'+str(filter)+'.png', log)\n\n\n fig = plt.figure(3,(10,10))\n (norm_mag_col, norm_mag_err_col) = field_photometry.get_field_photometry_columns('normalized')\n idx = np.where(phot_data[star,:,norm_mag_col] > 0.0)[0]\n plt.errorbar(phot_data[star,idx,0], phot_data[star,idx,norm_mag_col],\n yerr=phot_data[star,idx,norm_mag_err_col], fmt='none', color='k')\n (xmin,xmax,ymin,ymax) = plt.axis()\n ymin = max(ymin,14.0)\n ymax = min(ymax,22.0)\n plt.axis([xmin,xmax,ymax,ymin])\n plt.xlabel('HJD')\n plt.ylabel('Mag')\n plt.savefig('Star_'+str(star)+'_lc_norm.png')\n plt.close(3)\n\n # Output updated crossmatch table\n xmatch.save(params['crossmatch_file'])\n\n # Output the photometry for quadrant 1:\n output_quadrant_photometry(params, setup, 1, phot_data, log)\n\n logs.close_log(log)\n\n status = 'OK'\n report = 'Completed successfully'\n return status, report", "def norm_and_stack(images):\n imagestack = np.dstack(tuple([cv2.imread(image, cv2.IMREAD_UNCHANGED) for image in images]))\n mean = np.mean(imagestack)\n std = np.std(imagestack)\n new_im = (imagestack - mean)/std \n \n return new_im, mean, std", "def normalize_all_data_in_dict(data: Data_dict_type, normalizers: Tuple[object, ...]) -> Data_dict_type:\n for key, item in data.items():\n values, sample_rate = item\n # save old shape and reshape data to supported format for normalizer\n old_shape = values.shape\n values = values.reshape((-1, values.shape[-1]))\n # normalize data\n for normalizer in normalizers:\n values = normalizer.transform(values)\n # Reshape data back to old shape\n values = values.reshape(old_shape)\n data[key] = (values, sample_rate)\n return data", "def normalize(imgs, idx=[0]):\n tmp = [img for img in imgs]\n for i in idx:\n tmp[i] = cv2.normalize(imgs[i], None, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32FC1)\n return tmp", "def applyMorphologicalCleaning(self, image):", "def un_normalize(tensor, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]):\n mean = torch.FloatTensor(mean).view(1,3,1,1)\n std = torch.FloatTensor(std).view(1,3,1,1)\n \n image = tensor.cpu().detach()\n image = image*std+mean\n image = image.numpy()\n \n image = np.transpose(image, (0,2,3,1))\n \n #print(np.max(image))\n #print(np.min(image))\n return image", "def normalize_train_data(train_data, hter=False):\n feats = train_data[:, :-1]\n labels = train_data[:, -1]\n if hter:\n labels_pw = labels\n else:\n labels_pw = labels / feats[:, 1]\n scaler = pp.StandardScaler()\n scaler.fit(feats)\n norm_feats = scaler.transform(feats)\n return np.concatenate((norm_feats, labels_pw[:, None]), axis=1), scaler", "def _reshape(self, data):\n\n\t\td = np.zeros((32,32,3))\n\t\td_r = data[0:1024].reshape(32,32)\n\t\td_g = data[1024:2048].reshape(32,32)\n\t\td_b = data[2048:].reshape(32,32)\n\n\t\tfor h in range(32):\n\t\t for w in range(32):\n\t\t for c in range(3):\n\n\t\t if c == 0 : d[h,w,c] = d_r[h,w]\n\t\t elif c == 1 : d[h,w,c] = d_g[h,w]\n\t\t else : d[h,w,c] = d_b[h,w]\n\n\t\tarray = np.array(d, dtype=np.uint8)\n\t\timg = Image.fromarray(array)\n\t\ttemp = img.resize(size = (64,64))\n\t\td = image.img_to_array(temp)\n\n\t\t#plt.imshow(d)\n\t\t#plt.show()\n\t\treturn d", "def preprocess(self):\n print(\"processing content images...\")\n for dir_item in self.selectedContent:\n join_path = Path(self.content_image_dir,dir_item.replace('/','_'))\n if join_path.exists():\n print(\"processing %s\"%dir_item,end='\\r')\n images = join_path.glob('*.%s'%(self.subffix))\n for item in images:\n self.content_dataset.append(item)\n else:\n print(\"%s dir does not exist!\"%dir_item,end='\\r')\n label_index = 0\n print(\"processing style images...\")\n for class_item in self.selectedStyle:\n images = Path(self.style_image_dir).glob('%s/*.%s'%(class_item, self.subffix))\n for item in images:\n self.art_dataset.append([item, label_index])\n label_index += 1\n random.seed(self.random_seed)\n random.shuffle(self.content_dataset)\n random.shuffle(self.art_dataset)\n # self.dataset = images\n print('Finished preprocessing the Art Works dataset, total image number: %d...'%len(self.art_dataset))\n print('Finished preprocessing the Content dataset, total image number: %d...'%len(self.content_dataset))", "def norm_data(self):\n if (self.nrows, self.ncolumns) < self.data.shape:\n self.data = self.data[0:self.nrows, 0:self.ncolumns]\n if self.data.dtype != np.float64:\n self.data = self.data.astype(np.float64)\n self.meanval = self.data.mean()\n self.stdval = self.data.std()", "def _generate_dataset(self):\n # create train images\n train_path = os.path.join(self.root_dir, \"shapes\", \"train\", \"good\")\n os.makedirs(train_path, exist_ok=True)\n for i in range(self.num_train):\n result = generate_random_anomaly_image(\n image_width=self.image_width,\n image_height=self.image_height,\n shapes=self.train_shapes,\n generate_mask=False,\n )\n image = result[\"image\"]\n imsave(os.path.join(train_path, f\"{i:03}.png\"), image, check_contrast=False)\n\n # create test images\n for test_category in self.test_shapes:\n test_path = os.path.join(self.root_dir, \"shapes\", \"test\", test_category)\n mask_path = os.path.join(self.root_dir, \"shapes\", \"ground_truth\", test_category)\n os.makedirs(test_path, exist_ok=True)\n os.makedirs(mask_path, exist_ok=True)\n # anomaly and masks. The idea is to superimpose anomalous shapes on top of correct ones\n for i in range(self.num_test):\n correct_shapes = generate_random_anomaly_image(\n image_width=self.image_width,\n image_height=self.image_height,\n shapes=self.train_shapes,\n generate_mask=False,\n )\n result = generate_random_anomaly_image(\n image_width=self.image_width,\n image_height=self.image_height,\n shapes=[test_category],\n generate_mask=True,\n )\n correct_shapes = correct_shapes[\"image\"]\n image, mask = result[\"image\"], result[\"mask\"]\n image = np.minimum(image, correct_shapes) # since 255 is white\n imsave(os.path.join(test_path, f\"{i:03}.png\"), image, check_contrast=False)\n imsave(os.path.join(mask_path, f\"{i:03}_mask.png\"), mask, check_contrast=False)\n # good test\n test_good = os.path.join(self.root_dir, \"shapes\", \"test\", \"good\")\n os.makedirs(test_good, exist_ok=True)\n for i in range(self.num_test):\n result = generate_random_anomaly_image(\n image_width=self.image_width,\n image_height=self.image_height,\n shapes=self.train_shapes,\n )\n image = result[\"image\"]\n imsave(os.path.join(test_good, f\"{i:03}.png\"), image, check_contrast=False)", "def make_flat_avg(images, out):\n image = Image(avg_images(images, out))\n image.normalise()\n return out", "def __normalizeData__(self,tensors = None):\n if(tensors is None):\n tensors = self.__tensors__\n new_tensors=[]\n for tensor in tensors:\n new_tensors.append( [ [1/(1+x) for x in tensor[0] ] , tensor[1],tensor[2],tensor[3] ])\n return new_tensors", "def normalize(batch_img: np.ndarray) -> np.ndarray:\n batch_img = batch_img.astype('float32')\n return batch_img / 127.5 - 1", "def _reshape(self, data):\n\n\t\t\td = np.zeros((32,32,3))\n\t\t\td_r = data[0:1024].reshape(32,32)\n\t\t\td_g = data[1024:2048].reshape(32,32)\n\t\t\td_b = data[2048:].reshape(32,32)\n\n\t\t\tfor h in range(32):\n\t\t\t for w in range(32):\n\t\t\t for c in range(3):\n\n\t\t\t if c == 0 : d[h,w,c] = d_r[h,w]\n\t\t\t elif c == 1 : d[h,w,c] = d_g[h,w]\n\t\t\t else : d[h,w,c] = d_b[h,w]\n\n\t\t\tarray = np.array(d, dtype=np.uint8)\n\t\t\timg = Image.fromarray(array)\n\t\t\ttemp = img.resize(size = (64,64))\n\t\t\td = image.img_to_array(temp)\n\n\t\t\t#plt.imshow(d)\n\t\t\t#plt.show()\n\t\t\treturn d", "def unnormalize(self, image, transpose=False):\n return unnormalize(image, self.mean, self.std, transpose)", "def scale_dataset(ds):\n for i in range(0,ds.dims):\n fmax = ds.data[0][i]\n for j in range(1,len(ds)):\n curr = ds.data[j][i]\n if curr > fmax:\n fmax = curr \n if fmax > 0:\n for j in range(0,len(ds)):\n ds.data[j][i] /= fmax", "def specific_normalization(df):\n # Need to scale some vars. This is done using a StandardScaler from sklearn package\n scaler = StandardScaler()\n df['Pclass'] = df['Pclass'].astype('float64')\n df['Family'] = df['Family'].astype('float64')\n # .reshape(-1, 1) is mandatory otherwise an exception is thrown (as 'data has a single feature')\n df['Pclass'] = scaler.fit_transform(df['Pclass'].values.reshape(-1, 1))\n df['Family'] = scaler.fit_transform(df['Family'].values.reshape(-1, 1))\n\n return df", "def normalize_labels(self):\n self.y_mean, self.y_std = du.get_mean_std(self.y_train)\n self.y_train = du.normalize(self.y_train, self.y_mean, self.y_std)\n if self.x_test is not None and self.y_test is not None:\n self.y_test = du.normalize(self.y_test, self.y_mean, self.y_std)", "def process(self, data):\n # get truncation parameters from config or container defaults\n self._get_params(type(data))\n\n if self.weight_dataset is None:\n self.weight_dataset = [None] * len(self.dataset)\n\n for dset, wgt in zip(self.dataset, self.weight_dataset):\n old_shape = data[dset].local_shape\n val = np.ndarray.reshape(data[dset][:], data[dset][:].size)\n if wgt is None:\n if np.iscomplexobj(data[dset]):\n data[dset][:].real = bit_truncate_fixed(\n val.real, self.fixed_precision\n ).reshape(old_shape)\n data[dset][:].imag = bit_truncate_fixed(\n val.imag, self.fixed_precision\n ).reshape(old_shape)\n else:\n data[dset][:] = bit_truncate_fixed(\n val, self.fixed_precision\n ).reshape(old_shape)\n else:\n if data[dset][:].shape != data[wgt][:].shape:\n raise pipeline.PipelineRuntimeError(\n \"Dataset and weight arrays must have same shape ({} != {})\".format(\n data[dset].shape, data[wgt].shape\n )\n )\n invvar = np.ndarray.reshape(data[wgt][:], data[dset][:].size)\n if np.iscomplexobj(data[dset]):\n data[dset][:].real = bit_truncate_weights(\n val.real,\n invvar * 2.0 / self.variance_increase,\n self.fixed_precision,\n ).reshape(old_shape)\n data[dset][:].imag = bit_truncate_weights(\n val.imag,\n invvar * 2.0 / self.variance_increase,\n self.fixed_precision,\n ).reshape(old_shape)\n else:\n data[dset][:] = bit_truncate_weights(\n val, invvar / self.variance_increase, self.fixed_precision\n ).reshape(old_shape)\n\n return data", "def _data_generation(self, batch_data):\n # Initialization\n batch_x = []\n batch_y = defaultdict(list)\n\n for ind, item_data in batch_data.iterrows():\n img_path = os.path.join(self.img_dir, \"images\", \"rgb\", item_data[\"name\"])\n img = cv2.imread(img_path)\n try:\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n except Exception as error:\n print(img_path)\n print(error)\n not_valid_mask = self.read_masks_borders(item_data[\"name\"])\n img[not_valid_mask] = 0\n\n # getmasks\n targets = np.zeros((img.shape[0], img.shape[1], len(self.classes)))\n for i, c in enumerate(self.classes):\n mask_path = os.path.join(self.img_dir, \"labels\", c, item_data[\"name\"])\n mask = cv2.imread(\n mask_path.replace(\".jpg\", \".png\"), cv2.IMREAD_GRAYSCALE\n )\n mask[not_valid_mask[:, :, 0]] = 0\n mask = mask > 0\n targets[:, :, i] = mask\n\n res = self.reshape_func(image=img, mask=targets)\n img, targets = res['image'], res['mask']\n if self.do_aug:\n res = self.aug(image=img, mask=targets)\n img, targets = res['image'], res['mask']\n\n for i, c in enumerate(self.classes):\n batch_y[c].append(targets[:, :, i])\n\n batch_x.append(img)\n\n batch_x = np.array(batch_x, np.float32)\n batch_y = {k: np.array(v, np.float32) for k, v in batch_y.items()}\n batch_y = {k: np.expand_dims(v, axis=-1) for k, v in batch_y.items()}\n\n return (\n imagenet_utils.preprocess_input(batch_x, \"channels_last\", mode=\"tf\"),\n batch_y\n )", "def unnormalize_multivariate_data(normed_data, scaling_values):\n data = np.zeros(normed_data.shape, dtype=normed_data.dtype)\n for i in range(normed_data.shape[-1]):\n data[:, :, :, i] = normed_data[:, :, :, i] * scaling_values.loc[i, \"std\"] + scaling_values.loc[i, \"mean\"]\n return data" ]
[ "0.69572496", "0.67912114", "0.67600954", "0.6746723", "0.6741553", "0.6741553", "0.6712757", "0.667788", "0.66753983", "0.66424495", "0.6628572", "0.6604109", "0.657797", "0.65456814", "0.6539488", "0.64729387", "0.6469001", "0.64613545", "0.6418981", "0.63990253", "0.6396894", "0.63954324", "0.63640827", "0.6331304", "0.6283619", "0.6265867", "0.6249871", "0.624411", "0.62260234", "0.6211367", "0.6192262", "0.61917", "0.6155012", "0.6153514", "0.61427623", "0.61223453", "0.6076508", "0.6069619", "0.6050932", "0.60416406", "0.60411507", "0.603168", "0.6028816", "0.5989901", "0.59816337", "0.5975304", "0.59629685", "0.59586686", "0.5923289", "0.5920414", "0.59184664", "0.5917082", "0.59088665", "0.58951575", "0.58924943", "0.587319", "0.58715814", "0.58714926", "0.5856627", "0.5847649", "0.5844181", "0.5841126", "0.5828049", "0.5824001", "0.581776", "0.5812312", "0.5807249", "0.5802737", "0.5778224", "0.5770125", "0.5767544", "0.5764253", "0.57605", "0.57120025", "0.5702945", "0.57022", "0.5699028", "0.5694016", "0.56916636", "0.56868047", "0.56855875", "0.5674896", "0.56708866", "0.56551486", "0.565431", "0.5653328", "0.5650138", "0.56439793", "0.5637077", "0.5625967", "0.5621813", "0.56208646", "0.56174517", "0.5614007", "0.56125826", "0.5607665", "0.56040967", "0.55962735", "0.55935323", "0.55892307" ]
0.7404679
0
Randomly chooses some data from the dataset.
def choice(dataset, size): x, y = dataset assert x.shape[0] == y.shape[0] num_datapoints = x.shape[0] mask = np.zeros(num_datapoints).astype("bool") mask[:size] = True np.random.default_rng().shuffle(mask) return x.compress(mask, axis=0), y.compress(mask, axis=0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getRandom(self):\n return random.choice(self.data)", "def data_source():\n dataset = [0.1, 0.2, 0.3, 0.4, 0.5]\n while True:\n time.sleep(2)\n yield random.choice(dataset)", "def randomRow(self):\r\n l = []\r\n for row in self.data:\r\n l.append(row)\r\n return random.choice(l)", "def data_feeder_2():\n return random.sample(range(100), 10)", "def Chose_rand():\r\n total_list=list(range(1,467681))\r\n select=13788\r\n random_selected= random.sample(total_list,select)\r\n return (random_selected)", "def selectRandomFromList(ldata):\n\treturn ldata[randint(0, len(ldata)-1)]", "def bootstrap_sample(data):\n return [random.choice(data) for _ in data]", "def getRandom(self):\n \n return self.data[random.randint(0, len(self.data) - 1)]", "def sample(self):\n return self.items[self.np_random.choice(len(self.items))]", "def gen_data(self, amount):\n\n return random.choices(self.indices, weights=self.weights, k=amount)", "def sample_train_data(dataset ,target,data_len, resp = True ):\r\n np.random.seed(222)\r\n ixes = np.random.choice(dataset.index, data_len, replace = False)\r\n print(ixes)\r\n under_df = dataset.iloc[ixes]\r\n if resp==True:\r\n under_target = target.iloc[ixes]\r\n return under_df, under_target\r\n else:\r\n return under_df", "def post(self, s):\n return np.random.choice(self.sample_list)", "def generate_test_set(data, pts): \n test_set = np.asarray(random.sample(data, pts))\n \n return test_set", "def dataset_choice(self):\n # while running:\n\n # select the dataset file for this cycle\n dataset = self.which_dataset()\n # print('A2. dataset = ', dataset)\n\n # send to list making function\n self.data_list = self.dataparsing(dataset)\n\n # how long to read a dataset file for this cycle\n dataset_choice_dur = (random.randrange(6000, 26000) / 1000) * self.glob_speed\n if self.debug_choose:\n print(f'A4 dataset choice duration = {dataset_choice_dur} seconds')\n\n # wait for this process to timeout 6-26 seconds\n # time.sleep(dataset_choice_dur)\n for _ in range (int(dataset_choice_dur) * 100):\n if config.affect_interrupt:\n continue\n else:\n time.sleep(0.01)", "def runRandom(self):\n \n # Implementing Random Selection\n\n N = self.myDS.shape[0] \n d = self.myDS.shape[1] \n self.opt_selected = []\n total_reward = 0\n for n in range(0, N):\n ad = random.randrange(d)\n self.opt_selected.append(ad)\n reward = self.myDS.values[n, ad]\n total_reward = total_reward + reward\n \n return total_reward", "def sample(self):\n return gc.rand_state.choice(self.domain)", "def getRandom(self):\r\n return self.data[rnd.randrange(self.len)]", "def _choose_sample(self):\n\n \t #periodically generate a new reconstruction for the purposes of sampling", "def __sample(self, data, los, n: Optional[int], random: bool):\n if n is None:\n n = len(data)\n else:\n n = min(len(data), n)\n # Uniform random sampling from our data array\n indices = list(range(len(data)))\n if random:\n np.random.shuffle(indices)\n indices = indices[:n]\n data = torch.Tensor(data[indices])\n los = torch.Tensor(los[indices])\n if self.device != 'cpu' and 'cuda' in self.device.type:\n data = data.cuda()\n los = los.cuda()\n return data, los", "def fit(self, data):\n self.seed = random.choice(range(100))", "def rand_data():\n # 100 examples, with seq_len=10, each holding 300 features\n return torch.randn((100, 10, 300))", "def getRandom(self) -> int:\n return random.choice(list(self.set))", "def sample_data(self):\n print(\"Start data sampling...\")\n positives, negatives = [], []\n groups = self.group_data()\n for group in groups:\n positives.extend(list(combinations(group, 2)))\n for _ in range(len(positives)):\n group1, group2 = sample(groups, 2)\n negatives.append((sample(group1, 1)[0], sample(group2, 1)[0]))\n print(f\"\\x1b[32mSuccessfully completed data sampling ({len(positives)} x 2).\\x1b[0m\")\n return [negatives, positives]", "def random(self=None, sample=100, min=0, max=100):\r\n\t\treturn DataStatistics([randint(min, max) for i in range(sample)])", "def rand(self):\n raise NotImplementedError", "def randomize(data):\r\n permutation = np.random.permutation(data.shape[0])\r\n shuffled_data = data[permutation, :]\r\n # shuffled_y = y[permutation]\r\n return shuffled_data", "def getRandom(self):\n return random.choice(self.table.keys())", "def getRandom(self):\n return random.choice(self.ls)", "def select_data(datadir, device):\n images, labels = dataloader.select_n_random('train', datadir, n=2)\n images, labels = images.to(device), labels.to(device)\n return images.float(), labels", "def subsample(self, dataset):\n sample_idx = np.random.choice(\n dataset.shape[0], self.sample_size, replace=True)\n sample = dataset[sample_idx,...]\n return sample", "def __call__(self, num_actions):\n return np.random.choice(num_actions)", "def get_train_data(df):\n\n srch_order = []\n cat0 = df[df.category == 5].index\n cat1 = df[df.category == 1].index\n cat2 = df[df.category == 0].index\n amount = int(len(df) * .04)\n print(\"amount of rows selected: \", amount)\n\n cat2_selec = np.random.choice(cat2, amount, replace=False)\n\n cat012 = np.concatenate((cat0, cat1, cat2_selec))\n\n df_selection = df.loc[cat012]\n\n return df_selection", "def get_data(generator, random, bench_id):\n x_train, y_train, x_test, y_test = generator(random, bench_id)\n x_train = np.c_[np.ones(len(x_train)), x_train]\n x_test = np.c_[np.ones(len(x_test)), x_test]\n return x_train, y_train, x_test, y_test", "def shuffle_dataset(self):\n # TODO explain approached used for selecting training and test data\n labels = self.dataset.label.unique()\n good_jobs = self.dataset[self.dataset.label == \"Good\"]\n bad_jobs = self.dataset[self.dataset.label == \"Bad\"]\n\n # TODO n>2 probablly won't work the way it's supposed to currently\n if len(labels) == 2:\n # oversample\n resize = max(len(good_jobs.label),len(bad_jobs.label))\n # undersample\n resize = min(len(good_jobs.label), len(bad_jobs.label))\n good_jobs_re = good_jobs.sample(resize)\n bad_jobs_re = bad_jobs.sample(resize)\n dataset = pd.concat([good_jobs_re, bad_jobs_re])\n elif len(labels) == 3:\n neutral_jobs = self.dataset[self.dataset.label == \"Neutral\"]\n # oversample\n resize = max(len(good_jobs.label), len(bad_jobs.label),len(neutral_jobs.label))\n # undersample\n resize = min(len(good_jobs.label), len(bad_jobs.label),len(neutral_jobs.label))\n\n good_jobs_re = good_jobs.sample(resize, replace=True)\n bad_jobs_re = bad_jobs.sample(resize, replace=True)\n neutral_jobs_re = bad_jobs.sample(resize, replace=True)\n dataset = pd.concat([good_jobs_re, bad_jobs_re,neutral_jobs_re])\n elif len(labels) == 4:\n neutral_jobs = self.dataset[self.dataset.label == \"Neutral\"]\n ideal_jobs = self.dataset[self.dataset.label == \"Ideal\"]\n\n # middle of the road approach\n resize = int(mean([len(good_jobs.label), len(bad_jobs.label),len(neutral_jobs.label),len(ideal_jobs.label)]))\n good_jobs_re = good_jobs.sample(resize, replace=True)\n bad_jobs_re = bad_jobs.sample(resize, replace=True)\n neutral_jobs_re = bad_jobs.sample(resize, replace=True)\n ideal_jobs_re = ideal_jobs.sample(resize,replace=True)\n dataset = pd.concat([good_jobs_re, bad_jobs_re,neutral_jobs_re,ideal_jobs_re])\n\n train,test = train_test_split(dataset,test_size=0.25,stratify = dataset.label,shuffle=True)\n #test = self.dataset[~self.dataset.isin(train)].dropna()\n #test = self.dataset[(~dataset.label.isin(self.dataset.label))&(~dataset.description.isin(self.dataset.description))]\n #0tr_hashes = [hash(tuple(d)) for d in train.description]\n #ytest = [val for iter,val in self.dataset.iterrows() if hash(tuple(val.description)) not in tr_hashes]\n\n self.y_train,self.y_test = train.label.values,test.label.values\n self.X_train,self.X_test = train.description.values,test.description.values", "def _get_dataset_node(self, nodes):\n if not nodes:\n raise WNoNodesFound()\n return random.choice(nodes)", "def selectRandomFromDict(ddata):\n\tdkeys = list(ddata.keys())\n\tdk = selectRandomFromList(dkeys)\n\tel = (dk, ddata[dk])\n\treturn el", "def __call__(self):\n return random.choice(self.fakers)", "def select_n_random(data, labels, n=100):\n assert len(data) == len(labels)\n\n # TODO: sort this out for 3D data\n # p1 = torch.randperm(len(data))\n # sample_labels = labels[p1][:n]\n # sample_data = data[p1][:n]\n return data[:n], labels[:n]", "def random_sample(self, n):\n indices = random.sample(xrange(np.shape(self.data)[0]), n)\n table = DataTable(self.data[indices], self.dims, self.legends, self.tags.copy())\n return table", "def touching_choice(self,p):\n choose = random.sample(part,2)\n\n return choose", "def totem_random():\n random_head()\n random_head()\n random_head()", "def split_data(data, prob):\n results = [], []\n for row in data:\n results[0 if random.random() < prob else 1].append(row)\n return results", "def sample(data, k):\n\n # create random number generator\n r = Random()\n r.seed()\n\n # load all n items into dictionary\n n = len(data)\n data_dict = {i: data[i] for i in range(n)}\n samples = []\n\n for i in range(k):\n # select random item\n rand_i = r.randrange(0, n - 1) if n > 1 else 0 # randrange fails if start==stop\n samples.append(data_dict[rand_i])\n\n # replace selected item with last item and decrement number of items\n # to prevent duplicates\n data_dict[rand_i] = data_dict[n - 1]\n n -= 1\n\n return samples", "def _select(self):\n sel = []\n\n # choose randomly while favouring fit individuals\n lp = len(self.population) // 2\n for _ in range(lp):\n idx1 = idx2 = int(math.sqrt(random.randrange(lp**2+1)))\n while idx1 == idx2:\n idx2 = int(math.sqrt(random.randrange(lp**2+1)))\n\n p1, p2 = self.population[idx1], self.population[idx2]\n sel.append((p1, p2))\n\n return sel", "def load_data_set(file):\n df = pd.read_csv(file)\n msk = np.random.rand(len(df)) < 0.8\n return df[msk], df[~msk]", "def draw_bs_sample(data):\n return rg.choice(data, size=len(data))", "def getRandom(self) -> int:\n # print(self.ind)\n return choice(self.items)", "def randomLeggings():\n return random.choice(LEGGINGS)", "def draw_bs_sample(data):\n return np.random.choice(data, size=len(data))", "def getRandom(self) -> int:\n return random.choice(list(self.d.keys()))", "def split_data(data, prob):\n\tresults = [], []\n\tfor row in data:\n\t\tresults[0 if random.random() < prob else 1].append(row)\n\treturn results", "def split_data(data, prob):\n\tresults = [], []\n\tfor row in data:\n\t\tresults[0 if random.random() < prob else 1].append(row)\n\treturn results", "def getRandom(self) -> int:\n return choice(self.arr)", "def getRandom(self) -> int:\n return choice(self.arr)", "def getRandom(self) -> int:\n return choice(self.arr)", "def some_simple_data(length=1000000):\n data = list(range(length))\n random.shuffle(data)\n return data", "def random_pick(id_list):\n return random.choice(id_list)", "def random(self, af=False):\n rank = randrange(self.order())\n return self.coset_unrank(rank, af)", "def sample(self):\n # return [v.sample() for v in self.variables]\n return self.domain[gc.rand_state.choice(len(self.domain))]", "def rand(self):\n raise NotImplementedError(\"Not implemented yet.\")", "def sample_personas(self):\n persona = self.personas_list\n n = constants.CURATED_PERSONA_CHOICES\n logging.info(\n f'Randomly choosing {n} personas from {len(persona)} available ones.'\n )\n if self.persona_replacement:\n return random.sample(persona, k=n)\n else:\n return [persona.pop() for _ in range(n)]", "def sample(self, seed=None):\n raise NotImplementedError()", "def getRandom(self) -> int:\n return choice(self.array)", "def _get_random_bandit(self)-> Bandit:\n return np.random.choice(self.bandits)", "def touching_choice(self,p):\n\n part = ['head', 'foot1', 'foot2', 'foot3', 'foot4', 'back', 'stomach', 'tail']\n if len(self.select[p]) == 0:\n return random.sample(part,2)\n elif len(self.select[p]) == 1:\n part.remove(self.select[p][0])\n c = random.sample(part,1)\n return [self.select[p][0], c[0]]\n else:\n return random.sample(self.select[p],2)", "def randomSubData(self, number): \n if number < 0 or number > self.__numExamples: \n raise ValueError(\"Random subset size must be between 0 and \" + str(self.__numExamples))\n \n self.__exampleIndices = array(sample(list(range(0, self.__numExamples)), number))", "def generate_data(data, model, samples, targeted=True, target_num=9, start=0, inception=False, seed=3, handpick=False ):\n random.seed(seed)\n inputs = []\n targets = []\n labels = []\n true_ids = []\n sample_set = []\n\n data_d = data.test_data\n labels_d = data.test_labels\n\n if handpick:\n if inception:\n deck = list(range(0, 1500))\n else:\n deck = list(range(0, 10000))\n random.shuffle(deck)\n print('Handpicking')\n\n while (len(sample_set) < samples):\n rand_int = deck.pop()\n pred = model.model.predict(data_d[rand_int:rand_int + 1])\n\n if inception:\n pred = np.reshape(pred, (labels_d[0:1].shape))\n\n if (np.argmax(pred, 1) == np.argmax(labels_d[rand_int:rand_int + 1], 1)):\n sample_set.append(rand_int)\n print('Handpicked')\n else:\n sample_set = random.sample(range(0, 10000), samples)\n\n for i in sample_set:\n if targeted:\n if inception:\n seq = random.sample(range(1, 1001), target_num)\n else:\n seq = range(labels_d.shape[1])\n\n for j in seq:\n if (j == np.argmax(labels_d[start + i])) and (inception == False):\n continue\n inputs.append(data_d[start + i])\n targets.append(np.eye(labels_d.shape[1])[j])\n labels.append(labels_d[start + i])\n true_ids.append(start + i)\n else:\n inputs.append(data_d[start + i])\n targets.append(labels_d[start + i])\n labels.append(labels_d[start + i])\n true_ids.append(start + i)\n\n inputs = np.array(inputs)\n targets = np.array(targets)\n labels = np.array(labels)\n true_ids = np.array(true_ids)\n return inputs, targets, labels, true_ids", "def pick(self, mess, args):\n return random.choice(args)", "def random_item(self):\n if self.sample_negative_items_empirically:\n # just pick something someone rated!\n # TODO: choose a user randomly\n u = self.uniform_user()\n i = random.choice(self.data[u].indices)\n else:\n i = random.randint(0, self.num_items - 1)\n return i", "def _random_subset(seq,m):\n targets=random.sample(seq,m)\n return targets", "def randomHelmet():\n return random.choice(HELMETS)", "def default_selection(random, population, args):\r\n return population", "def getRandom(self) -> int:\n # 此处可以优化\n datas = list(self.data.keys())\n pos = self.rand.randint(0, len(datas) - 1)\n val = datas[pos]\n return val", "def random_item(self):\n if self.sample_negative_items_empirically:\n # just pick something someone rated!\n u = self.uniform_user()\n i = random.choice(self.data[u].indices)\n else:\n i = random.randint(0,self.num_items-1)\n return i", "def random_item(self):\n if self.sample_negative_items_empirically:\n # just pick something someone rated!\n u = self.uniform_user()\n i = random.choice(self.dataModel.getItemIDsFromUid(u))\n else:\n i = random.randint(0,self.num_items-1)\n return i", "def shuffle_data(data):\n idx = np.arange(len(data))\n np.random.shuffle(idx)\n return data[idx, ...]", "def get_Sample(self, values, probabilities):\r\n return choices(values,probabilities)\r\n # return np.random.choice(values,p=probabilities)\r", "def data(i):\n m = i*100\n return [random.randint(0, m) for j in range(i)]", "def computer_random():\r\n ci = random.sample(range(1,43),5)\r\n return ci", "def getRandom(self) -> int:\n return random.choice(self.store_list)", "def test_0():\n sync.gen_multi_fake_data()#default is only one randomly selected data set\n sync.main(testing=True)", "def rs():\n return random.choice([-1,1])", "def rs():\n return random.choice([-1,1])", "def getRandom(self) -> int:\n return random.choice(self.arr)", "def getRandom(self):\n return self.nums[random.randint(0, len(self.nums) - 1)]\n\n # Your RandomizedSet object will be instantiated and called as such:\n # obj = RandomizedSet()\n # param_1 = obj.insert(val)\n # param_2 = obj.remove(val)\n # param_3 = obj.getRandom()", "def spinit(list):\n return (random.choice(list))", "def getRandom(self) -> int:\n if self.data:\n return self.data[random.randrange(len(self.data))]\n else:\n return None", "def computer_generate(self):\n return choice[random.randrange(3)]", "def getRandom(self) -> int:\n return random.choice(self.array)", "def __randomize_data(self):\n times = map(lambda (t, _): t, self.data)\n coords = map(lambda (_, c): c, self.data)\n return zip(times, map(DataGen.__randomize_coord, coords))", "def updateCalculatedDataModelRandomly(self):\n model = self._calculatedDataModel.asDataModel()\n for column_index in range(1, model.columnCount()):\n for row_index in range(model.rowCount()):\n index = model.index(row_index, column_index)\n value = random.randrange(100)\n role = Qt.DisplayRole\n model.setData(index, value, role)", "def test_get_random_data_basic(self):\n cdata = random_data(num_features=2,\n num_samples=4,\n labels=None)\n self.assertEqual(cdata.num_features, 2)\n self.assertEqual(cdata.num_samples, 4)", "def random_choice(var_name):\r\n return random.choice(var_name)", "def sample(self):\n sampleIndices = self.random_state.choice(len(self.X), int(len(self.X)*self.sample_ratio), replace=False)\n\n return self.X[sampleIndices]\n pass", "def sample_data(_,\n val,\n sampling_strategy=spec.SamplingStrategy.UNDERSAMPLE,\n side=0):\n\n if sampling_strategy == spec.SamplingStrategy.UNDERSAMPLE:\n random_sample_data = random.sample(val, side)\n elif sampling_strategy == spec.SamplingStrategy.OVERSAMPLE:\n random_sample_data = random.choices(val, k=side)\n else:\n raise ValueError(\"Invalid value for sampling_strategy variable!\")\n\n for item in random_sample_data:\n yield item", "def option():\n\toption = df[\"option\"]\n\tprint(\"You can \" + option.sample())", "def randomize_soy(soy_data):\n\n print('[ INFO ]: Randomizing soy data...')\n\n np.random.seed(4)\n soy_data['rand'] = np.random.rand(len(soy_data))\n soy_data = soy_data.sort_values(by=['rand']).reset_index()\n soy_data = soy_data.drop(['rand'], axis=1)\n\n return soy_data", "def sample_random_architecture(self, dataset_api=None):\n channels = np.random.choice(self.channel_candidates, size=len(self.channels)).tolist()\n self.set_channels(channels)", "def _sample_mini_dataset(dataset, num_classes, num_shots):\n shuffled = list(dataset)\n random.shuffle(shuffled)\n for class_idx, class_obj in enumerate(shuffled[:num_classes]):\n for sample in class_obj.sample(num_shots):\n yield (sample, class_idx)", "def pick_one(self):\n index = 0\n r = random.random()\n while r >= 0:\n r = r - self.normalised_fitness[index]\n index += 1\n index -= 1\n return self.population[index]" ]
[ "0.73725927", "0.7179318", "0.70049816", "0.6915993", "0.68809456", "0.6742614", "0.66800225", "0.6568194", "0.65536875", "0.65441173", "0.6533348", "0.652268", "0.65050435", "0.64911234", "0.6435912", "0.64282745", "0.6402564", "0.6368382", "0.63608557", "0.6303325", "0.6302801", "0.62998617", "0.62880856", "0.6250222", "0.624839", "0.6236472", "0.62349945", "0.6219508", "0.62085956", "0.61937565", "0.6189435", "0.61823297", "0.6164767", "0.6163929", "0.615513", "0.61492723", "0.61439776", "0.6142531", "0.6142359", "0.6141548", "0.61395425", "0.61358666", "0.61274236", "0.6121665", "0.61080223", "0.61031336", "0.60894674", "0.60773265", "0.6075797", "0.60743284", "0.6069612", "0.6069612", "0.60661846", "0.60661846", "0.60661846", "0.6051552", "0.60457903", "0.6045385", "0.6040441", "0.6039906", "0.6039411", "0.6036293", "0.60356456", "0.6024902", "0.60219955", "0.60195047", "0.60191023", "0.60077506", "0.6006434", "0.6002031", "0.59929985", "0.59919196", "0.59907913", "0.5990753", "0.5982621", "0.59733653", "0.597189", "0.596615", "0.59644896", "0.59607834", "0.59520566", "0.5949235", "0.5949235", "0.5946438", "0.59451884", "0.5939399", "0.5936389", "0.59337395", "0.5932308", "0.59195036", "0.59171987", "0.591682", "0.59149593", "0.5910622", "0.5910393", "0.5905551", "0.5898947", "0.58953226", "0.58860445", "0.58803594" ]
0.65095335
12
__init__(self, filename) Create a configparser object and store config values in class variables for later retrieval
def __init__(self, filename="config.ini"): if not os.path.isfile(filename): self.set_default_config(filename) self.config = configparser.ConfigParser() self.config.read(filename) self.filename = filename self.database_name = self.config.get('config', 'database_name', fallback='manga.db') self.volume_limit = self.config.getint('config', 'volume_limit', fallback=128) self.series_per_page = self.config.getint('config', 'series_per_page', fallback=0) self.compact_list = self.config.getboolean('config', 'compact_list', fallback=False) self.show_empty_series = self.config.getboolean('config', 'show_empty_series', fallback=False) self.default_to_gui = self.config.getboolean('config', 'default_to_gui', fallback=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, filename, dirname='~'):\n self.config = configparser.ConfigParser()\n\n expanded_dirname = os.path.expanduser(dirname)\n self.configuration_filename = os.path.join(expanded_dirname, filename)\n if os.path.isfile(self.configuration_filename):\n self.config.read(self.configuration_filename)", "def __init__(self):\n\n self.path = os.path.dirname(os.path.realpath(__file__)) + '/config.ini'\n self.config = configparser.ConfigParser()\n self.config.read(self.path)", "def __init__(self):\n self.filename = pathlib.Path(__file__).parent.absolute().__str__() + '/../../data/config.ini'\n self.data = ConfigParser()\n self.data.read(self.filename)", "def __init__(self, ini_file):\n self.config = configparser.ConfigParser()\n self.config.read(ini_file)", "def __init__(self, ini_file):\n self.config = configparser.ConfigParser()\n self.config.read(ini_file)\n #print(self.config)", "def __init__(self, config_file):\n \n self.log = logging.getLogger(__name__)\n\n self.parser = ConfigParser.ConfigParser()\n if os.path.exists(config_file) and os.path.isfile(config_file):\n self.parser.read(config_file)\n self.log.debug(\"opened configuration '%s'\" % config_file)\n else:\n raise ConfigError(\"Config file missing\", \"File '%s' doesn't exist.\" % (config_file))\n\n self.config_file = config_file\n self.check_config()", "def __init__(self, config_file_name=\"config.json\"):\n self.config_file_name = config_file_name\n self._config = self._open_config_file()", "def __init__(self, filepath=None):\n self.path = self.__default_filepath if filepath is None else filepath\n self.parser = configparser.ConfigParser()\n if self.__section_default not in self.parser.sections():\n self.parser.add_section(self.__section_default)\n\n self.parser.read(self.path)", "def __init__(self, filename):\n self.cfg_spec = ConfigObj(config_spec_text.splitlines(), list_values=False)\n self.cfg_filename = filename\n valid = Validator()\n if not os.path.exists(self.cfg_filename):\n #no configuration file found\n logger.info(\"File %s not found, so creating one from you from defaults\" % self.cfg_filename)\n cfg = ConfigObj(configspec=self.cfg_spec, stringify=True, list_values=True)\n cfg.filename = self.cfg_filename\n test = cfg.validate(valid, copy=True)\n cfg.write()\n self.cfg = ConfigObj(self.cfg_filename, configspec=self.cfg_spec)\n rtn = self.cfg.validate(valid, preserve_errors=True)\n if type(rtn) == types.BooleanType and rtn:\n logger.info(\"Config file validated\")\n self.tested = True\n else:\n self.tested = False\n res = flatten_errors(self.cfg, rtn)\n self.errortxt = ''\n for row in res:\n self.errortxt += 'In Section %s, key %s has error: %s' % (row[0], row[1], row[2])\n logger.error(self.errortxt)", "def __init__(self, path_to_config_file):\n self.file_path = path_to_config_file", "def __init__(self, file_handle):\n config = ConfigParser.ConfigParser()\n config.readfp(file_handle)\n self.database_address_ = config.get('General', 'database_address')\n self.google_developer_key_ = config.get('Google', 'developer_key')\n self.google_cref_ = config.get('Google', 'cref')", "def __init__(self, __file):\n\n\t\tself.fileName = __file\n\t\tif (os.path.isfile(self.fileName)):\n\t\t\t# config.ini found, load it\n\t\t\tself.config.read(self.fileName)\n\t\t\tself.default = False\n\t\telse:\n\t\t\t# config.ini not found, generate a default one\n\t\t\tself.generateDefaultConfig()\n\t\t\tself.default = True", "def __init__(self, config_file=None):\n self.file = config_file\n self.parser = SafeConfigParser()\n if isinstance(self.file, (str, list)):\n self.parser.read(self.file)\n else: # assume file object was given instead\n self.parser.read_file(self.file)\n self._flask_cache = None\n self._assets_cache = None\n self._gridrealm_cache = None", "def __init__(self, config_file=None):\n\t\tself.options = {}\n\n\t\tif config_file:\n\t\t\tself.set_file(config_file)", "def __init__(self, config_file = 'config.yaml'):\n\n self.name = ''\n self.img_dir = ''\n self.out_dir = ''\n self.cam_file = ''\n self.options_file = ''\n self.output_xml_file = ''\n\n # If there is an options file, it will overwrite the defaults \n if config_file is not None:\n self.load(config_file)", "def __init__(self, config_file, verbose):\r\n self.loadConfig(config_file)\r\n self.verbose = verbose", "def __init__(self, filename=None):\n if filename:\n if not os.path.exists(filename):\n raise Exception(\"No configuration found at %s\" % filename)\n super(Configuration, self).__init__(filename)", "def __init__(self, config_file):\n with open(config_file, 'r') as file:\n self.config = json.load(file)\n self.set_config(self.config)", "def __init__(self):\n # Read configuration into dictionary\n self.directories = general.config_directories()\n self.config = general.read_yaml_files(self.directories)", "def __init__(self, file):\n self.file = file\n self.config = self.__analyze_config()", "def __init__(self, config_file):\n Config = ConfigParser.ConfigParser()\n Config.read(config_file)\n \n self.port_id = Config.get(\"SerialPortSection\", \"ComPort\")\n self.baud_rate = Config.get(\"SerialPortSection\", \"BaudRate\")\n self. timeout = Config.get(\"SerialPortSection\", \"TimeOut\")\n self.config_file = config_file", "def __init__(self):\n ConfigParser.RawConfigParser.OPTCRE = re.compile(r'(?P<option>[^=\\s][^=]*)\\s*(?P<vi>[=])\\s*(?P<value>.*)$')\n self.CONFIG = ConfigParser.ConfigParser()\n self.CONFIG.read(os.path.join(os.path.dirname(__file__)))\n self.IPS = []", "def __init__(self):\n\t\t\n\t\tsettings = configparser.SafeConfigParser(allow_no_value=True)\n\t\tlist=settings.read('data/settings.cfg')\n\t\tif not 'data/settings.cfg' in list:\n\t\t\tprint('no configuration file present.. making one')\n\t\t\tself.makeConfigFile(settings)\n\t\t\tshare = ['']\n\t\t\tself.nodes = []\n\t\telse:\n\t\t\tshare, nodes = self.openConfig(settings)\n\t\t\tself.nodes = nodes\n\t\t\n\t\t\n\t\tself.files = self.loadFiles(share)\t\t\n\t\tself.share = share\n\t\tself.kill= False\n\t\tself.downloads = {}\n\t\tself.currentVersion = (0,2,1)\n\t\tself.totalDownloads = 0\n\t\tself.current = 0\n\t\tself.config = settings", "def __init__(self, filename=None):\r\n self._specs = self._load_spec(filename)\r\n self._properties = {}\r\n self._names = []\r\n self._defaults = {}", "def __init__(self, config_file: str = \"config.json\"):\n path_to_config = (Path(sys.modules[self.__module__].__file__).parent\n / config_file)\n with open(path_to_config, \"r\") as f:\n self.options = json.load(f)", "def __init__(self, filepath):\n try:\n config_file_r = open(filepath)\n self.sim_parametres = yaml.load(config_file_r, Loader=yaml.FullLoader)\n except:\n raise Exception(\"Le fichier de configuration n'a pas été atteint ou n'a pas pu être lu. Veuillez vérifier \"\n \"qu'il n'y ait aucune erreur de syntaxe.\")", "def __init__(self, config_file_name=\"config.json\"):\n with open(config_file_name, \"r\") as config:\n f = dict(json.load(config))\n for key, value in f.items():\n setattr(self, key, value)", "def __init__(self, filename):\n if filename is None:\n self.config = toml.load('configs/default.conf')\n return\n self.config = toml.load(filename)\n self.config['directory'] = {}\n self.config['directory']['root'] = os.path.dirname(\n os.path.realpath(__file__))\n self.config['directory']['datasets'] = os.path.join(\n self.config['directory']['root'], 'datasets')", "def __init__(self):\n cfg = ConfigParser.ConfigParser()\n\n if sys.executable == sys.argv[0]: # Windows binary\n self.VISIONEGG_SYSTEM_DIR = os.curdir\n self.VISIONEGG_USER_DIR = os.curdir\n else:\n # non-standard VisionEgg installations\n try:\n self.VISIONEGG_SYSTEM_DIR = os.environ['VISIONEGG_SYSTEM_DIR']\n except KeyError:\n self.VISIONEGG_SYSTEM_DIR = os.path.split(__file__)[0]\n user_dir = os.path.expanduser(\"~\")\n self.VISIONEGG_USER_DIR = os.path.join(user_dir,\"VisionEgg\")\n\n # See if there's an environment variable for the config file\n if 'VISIONEGG_CONFIG_FILE' in os.environ.keys():\n configFile = os.environ['VISIONEGG_CONFIG_FILE']\n else:\n # Is there one in VISIONEGG_USER_DIR?\n configFile = os.path.join(self.VISIONEGG_USER_DIR,\"VisionEgg.cfg\")\n if not os.path.isfile(configFile):\n configFile = os.path.join(self.VISIONEGG_SYSTEM_DIR,\"VisionEgg.cfg\")\n if not os.path.isfile(configFile):\n configFile = None # No file, use defaults specified in environment variables then here\n\n if configFile:\n cfg.read(configFile)\n else:\n # pretend we have a config file\n cfg.add_section('General')\n for key in defaults.keys():\n cfg.set('General',key,str(defaults[key]))\n if sys.platform == 'darwin':\n cfg.add_section('darwin')\n for key in extra_darwin_defaults.keys():\n cfg.set('darwin',key,str(extra_darwin_defaults[key]))\n\n # Do the general stuff first\n # Set the default values\n for name in defaults.keys():\n if name in os.environ.keys():\n value = os.environ[name]\n else:\n value = defaults[name]\n if isinstance(defaults[name], int):\n\t\tif value == 'False':\n\t\t value = 0\n\t\telif value == 'True':\n\t\t value = 1\n setattr(self,name,int(value))\n elif isinstance(defaults[name], float):\n setattr(self,name,float(value))\n else:\n setattr(self,name,value)\n\n # Get the values from the configFile\n general_options = cfg.options('General')\n\n self._delayed_configuration_log_warnings = [] # chick and egg problem\n # set defaults from config file\n for option in general_options:\n name = option.upper()\n if name not in defaults.keys():\n self._delayed_configuration_log_warnings.append(\n \"While reading %s: The variable \\\"%s\\\" is not (anymore) a Vision Egg variable.\"%(os.path.abspath(configFile),option))\n continue\n value = cfg.get('General',option)\n if name in os.environ.keys():\n value = os.environ[name]\n if isinstance(defaults[name], int):\n\t\tif value == 'False':\n\t\t value = 0\n\t\telif value == 'True':\n\t\t value = 1\n setattr(self,name,int(value))\n elif isinstance(defaults[name], float):\n setattr(self,name,float(value))\n else:\n setattr(self,name,value)\n\n # Do platform specific stuff\n # Set the default values\n platform_name = sys.platform\n extra_name = \"extra_%s_defaults\"%(platform_name,)\n if extra_name in globals().keys():\n extra_defaults = globals()[extra_name]\n for name in extra_defaults.keys():\n setattr(self,name,extra_defaults[name])\n\n # Get the values from the configFile\n platform_options = cfg.options(platform_name)\n for option in platform_options:\n name = option.upper()\n if name not in extra_defaults.keys():\n raise KeyError(\"No Vision Egg configuration variable \\\"%s\\\"\"%option)\n value = cfg.get(platform_name,option)\n if name in os.environ.keys():\n value = os.environ[name]\n if isinstance(extra_defaults[name], int):\n\t\t if value == 'False':\n\t\t value = 0\n \t\t elif value == 'True':\n\t\t value = 1\n setattr(self,name,int(value))\n elif isinstance(extra_defaults[name], float):\n setattr(self,name,float(value))\n else:\n setattr(self,name,value)\n\n if(configFile):\n self.VISIONEGG_CONFIG_FILE = os.path.abspath(configFile)\n else:\n self.VISIONEGG_CONFIG_FILE = None", "def __init__(self, settings):\n self._read_config(settings)", "def __init__(self, conf_file_location: str, template_dir: str, target_dir: str, hard_reset: bool):\n self.config: Config = yaml_loader.load(conf_file_location, Config)\n self.massage_config_file()\n self.config_dict: Dict = as_dict(self.config)\n self.template_dir = template_dir\n self.target_dir = target_dir\n self.hard_reset = hard_reset", "def __init__(self, config_file='/etc/sfa/ldap_config.py'):\n\n try:\n execfile(config_file, self.__dict__)\n\n self.config_file = config_file\n # path to configuration data\n self.config_path = os.path.dirname(config_file)\n except IOError:\n raise IOError, \"Could not find or load the configuration file: %s\" \\\n % config_file", "def __init__(self, default_cfg='default.ini', argv=None):\n\n # Read the default settings\n self.config = configparser.ConfigParser()\n self.config.read(default_cfg)\n # Check for a command line config argument\n if argv is not None and '--config_fn' in argv:\n # Get the config filename\n config_fn = argv[argv.index('--config_fn') + 1]\n argv.remove('--config_fn')\n argv.remove(config_fn)\n # Read the new config file\n new_cfg = configparser.ConfigParser()\n new_cfg.read(config_fn)\n # Overwrite the default values\n self._overwrite_with_config(new_cfg)\n if argv is not None:\n self._parse_arguments(argv)\n self._update()", "def __init__(self, rootdir, filename, clean=False):\n self._autoconfig_filename = rootdir + filename\n self._rootdir = rootdir\n self._metadata = {}\n self._nodes = {}\n self._vpp_devices_node = {}\n self._hugepage_config = \"\"\n self._clean = clean\n self._loadconfig()\n self._sockfilename = \"\"", "def __init__(self, file):\n self.__config = file\n with open(self.__config) as json_file:\n data = json.load(json_file)\n self.__data = data", "def __init__(self, name, defaults = {} ):\n self.defaults = defaults\n self.filename = os.path.expanduser(name)+\".ini\"\n self.conf = {}\n self.reset()\n if os.path.exists(self.filename):\n self.load()", "def __init__(self, paths=('',), global_paths=()):\n config = RawConfigParser()\n\n def _search(paths, filename):\n for path in paths:\n path = os.path.join(path, filename)\n if os.path.exists(path):\n self.paths.append(path)\n try:\n config.read(path)\n except ConfigParser.ParsingError as e:\n raise ValidationError('%s: %s' % (path, e))\n\n self.paths = []\n _search(global_paths, self.GLOBAL_FILENAME)\n _search(paths, self.FILENAME)\n\n self.config = config\n\n self._validate()", "def __init__(self):\n\n if Config._instance:\n raise Exception('Config singleton is already instantiated. User Config.get_instance() obtain it.')\n\n parser = configparser.ConfigParser()\n parser.read('C:\\\\Users\\\\Akatosh\\\\PythonProjects\\\\note-it\\\\config\\\\config.ini')\n\n self.sections = {}\n\n for section in parser:\n self.sections[section] = _Section(parser[section])\n\n Config._instance = self", "def __init__(self, config_directory: Optional[pathlib.Path] = None):\n self._config_parser = configparser.ConfigParser()\n # Preserve case for keys.\n self._config_parser.optionxform = lambda x: x\n\n if config_directory is None:\n self._config_filepath = pathlib.Path(_KFP_CONFIG_FILE)\n else:\n self._config_filepath = config_directory / _KFP_CONFIG_FILE\n\n try:\n with open(str(self._config_filepath), 'r') as f:\n self._config_parser.read_file(f)\n except IOError:\n warnings.warn('No existing KFP Config file found')\n\n if not self._config_parser.has_section(_COMPONENTS_SECTION):\n self._config_parser.add_section(_COMPONENTS_SECTION)\n\n self._components = {}", "def _init_config(self, configPath=None):\n # TODO: The SafeConfigParser class has been renamed to ConfigParser in Python 3.2.\n # This alias will be removed in future versions.\n # We still use SafeConfigParser for backwards compatibility with Python 2.\n self.config = SafeConfigParser()\n # Make option names case sensitive\n self.config.optionxform = str\n\n if configPath and os.path.isdir(configPath):\n configDir = configPath\n else:\n configDir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'conf')\n\n # List filenames in configDir alphabetically\n _, _, configFiles = next(os.walk(configDir))\n configFiles = sorted(configFiles, key=str.lower)\n\n # Read configuration pipeline\n for f in configFiles:\n with open(os.path.join(configDir, f)) as configFile:\n self.config.readfp(configFile)\n self._store_config_pass()\n\n if configPath and os.path.isfile(configPath):\n self.config.read(configPath)\n self._store_config_pass()\n\n appSection = 'application'\n self.appName = self._get_option_value(appSection, 'appName')\n self.appResource = self._get_option_value(appSection, 'appResource')\n self.appArgs = []\n appArgs = self._get_option_value(appSection, 'appArgs')\n if appArgs:\n self.appArgs = appArgs.split(' ')\n self.mainClass = self._get_option_value(appSection, 'mainClass')", "def __init__(self):\n\n self.root_path = os.path.dirname(os.path.abspath(__file__))[:-5]\n self.config_path = os.path.join(self.root_path, \"files\\\\CONFIG.txt\")\n self.metrics_path = os.path.join(self.root_path, \"files\\\\metrics.txt\")\n\n self.setup_metrics_file()\n\n if self.check_configuration() is False:\n self.setup_configuration_file()", "def __init__(self, file):\n\n self.read(file)\n\n for key in [\"sqlite3dir\", \"htmldir\"]:\n print(key)\n if not hasattr(self, \"_{:s}\".format(key)):\n raise ValueError(\"misspecification in config file for \\\"{:s}\\\"\".format(key) + \\\n \" in [observations] section\")", "def __init__(self, name=None):\n self.name = name or \"default\"\n config_path = os.path.join(get_config_directory(), self.name + JSON)\n try:\n with open(config_path, mode='r') as config_file:\n self.config_dict = json.load(config_file)\n except Exception as ex:\n raise ColinConfigException(\"Config file '{}' cannot be loaded.\".format(config_path))", "def __init__(self, configfile_name):\n assert os.path.exists(configfile_name), \\\n 'Config file %s does not exist' % (configfile_name)\n\n correct_param_load = self.load_config_file(configfile_name)\n assert correct_param_load,\\\n 'Config params could not be loaded from file'\n\n self.fullpath_input_configfile = configfile_name\n self.word_det_rfc = None\n self.reg_coeffs = None\n self.bb_reg = None\n self.img_files = None", "def __init__(self, config_file):\n defaults = {'kmsauth_user_key': None, 'logging_level_option': 'INFO'}\n ConfigParser.RawConfigParser.__init__(self, defaults=defaults)\n self.read(config_file)\n\n if not self.has_section(SECTION):\n raise ValueError(\n \"Missing {0} configuration section.\".format(SECTION)\n )\n\n for option in ['kmsauth_key', 'kmsauth_to_context']:\n if not self.has_option(SECTION, option):\n raise ValueError(\"{0} not set.\".format(option))", "def load_config(cls, config_file = None):\n config = ConfigParser()\n \n files = [\"/etc/imp.cfg\", os.path.expanduser(\"~/.imp.cfg\"), \".wm\", \".imp\"]\n if config_file is not None:\n files.append(config_file)\n \n config.read(files)\n cls.__instance = config", "def _new():\n\treturn ConfigParser(\n\tdelimiters = ('=',),\n\tcomment_prefixes = ('#', ';'),\n\tdefault_section = 'default',\n\tallow_no_value = False,\n\tstrict = False,\n\tinterpolation = ExtendedInterpolation(),\n\tdefaults = {\n\t\t'debug': False,\n\t\t'datadir': path.join(path.expanduser('~'), '.local', 'rosshm'),\n\t\t'log.level': 'warn',\n\t\t'core.enable': True,\n\t\t'db.driver': 'sqlite',\n\t\t'db.name': 'rosshmdb',\n\t\t'db.config': '',\n\t\t'static.enable': True,\n\t\t'web.enable': True,\n\t},\n)", "def __init__(self, filename='', complement='', silent=True, config='config.txt'):\n # load preferences\n if config is not None:\n if config == 'config.txt':\n config = os.path.join(os.path.dirname(os.path.realpath(__file__)), config)\n if Graph.CONFIG_GRAPH is not None and Graph.CONFIG_FILENAME == config:\n self._config = Graph.CONFIG_GRAPH\n else:\n self._config = Graph(config, complement={'readas': 'database'},\n config=None)\n if self.CONFIG_FILENAME is None:\n Graph.CONFIG_FILENAME = config\n Graph.CONFIG_GRAPH = self._config\n # actually load the file\n self.reset(filename, complement=complement, silent=silent)", "def __init__(self, *args, **kwargs):\r\n super().__init__()\r\n self._cfg = ConfigDict() # current configuration\r\n self._default_config = ConfigDict() # default configuration\r\n self._temp_config = OrderedDict() # temporary configuration\r\n self._path = Path() # current configuration path\r\n self._default_path = Path() # default configuration path\r\n self._conversion_dict = None\r\n self._auto_cast = None\r\n self._write_flags = None\r\n self._force_load = None\r\n self._load_empty = None\r\n self._ask_path = None\r\n self._search_in_default_config = None\r\n self._init_count = 0\r\n self._policies = defaultdict(bool) # by default every modification is forbidden # WIP\r\n if args or kwargs:\r\n self.init(*args, **kwargs)\r\n logger.debug(\"Config object created.\")", "def __init__(self, filename):\n\n parser = Parser(filename=filename)\n self.uuid = parser.segregated(parser.read(),'UUID')\n self.id = parser.segregated(parser.read(),'ID')\n self.rate = parser.segregated(parser.read(),'RATE')\n self.gpio = parser.segregated(parser.read(),'GPIO')\n self.ddl = parser.segregated(parser.read(),'DATA_DELIVERY_LOCATION')", "def readConfigFile(self):\n self.config_obj = ConfigParser.ConfigParser()\n self.config_obj.readfp(open(self.configfile))\n\n # Set the log file\n if (not self.args_obj.log_file and self.config_obj.has_option('DEFAULT','logfile')):\n self.logfile = self.config_obj.get('DEFAULT', 'logfile')\n\n # Set the baud rate\n if (not self.args_obj.baud_rate and self.config_obj.has_option('DEFAULT','baud')):\n self.baudrate = self.config_obj.get('DEFAULT', 'baud')\n\n # Set the device port \n if (not self.args_obj.device and self.config_obj.has_option('DEFAULT','device')):\n self.device = self.config_obj.get('DEFAULT', 'device')\n\n # Set the connection timeout\n if (not self.args_obj.timeout and self.config_obj.has_option('DEFAULT','timeout')):\n self.timeout = self.config_obj.get('DEFAULT','timeout')\n\n if DEBUG:\n print('(DEBUG) Config Options:')\n self.pp.pprint(self.config_obj.sections())", "def __init__(self, filename=None, use_argv=True):\n self._init_filename = filename\n if use_argv:\n self.options, self.args = [self.get_parser().parse_args()] * 2\n else:\n self.options = self.args = None\n self._wrapped = self.load(file=self.settings_file)\n # build a special dynamic section for things the user wants,\n # ie, things that have been passed into the option\n # parser but are not useful in the .ini\n if not self.get_section('user'):\n self['user'] = {}\n if self.options is not None:\n self['user']['shell'] = self.options.shell and 'true' or ''\n else:\n self['user']['shell'] = ''", "def __init__ (self, fileName : String):\n\n Logging.trace(\">>: %r\", fileName)\n\n self._keyToValueMap = {}\n self._keyToStringValueMap = {}\n visitedFileNameSet = set()\n lineList = []\n isOkay = self._readFile(\"\", fileName, lineList, visitedFileNameSet)\n self._parseConfiguration(lineList)\n\n Logging.trace(\"<<: %s\",\n _reprOfStringToValueMap(self._keyToValueMap))", "def __init__(self):\n\n # open json config file that reads in information\n config_path = open(\"config.json\", \"r\")\n config_json = config_path.read()\n config_dict = json.loads(config_json)\n\n # assign object variables\n self.project_id = config_dict[\"project-id\"]\n self.bucket_name = config_dict[\"bucket-name\"]\n self.location_id = config_dict[\"key-location\"]\n self.key_ring_id = config_dict[\"key-ring-id\"]\n self.crypto_key_id = config_dict[\"crypto-key-id\"]\n self.service_account_email = config_dict[\"service-account-email\"]\n\n # close the file\n config_path.close()", "def __init__(self):\n self.options, self.args = self.get_parser().parse_args()\n\n # special case\n self.done=False\n if self.options.encode:\n print generate_password_hash(self.options.encode)\n self.doit()\n self.done=True\n return\n\n self._settings = {}\n if self.options.config:\n _file = self.options.config\n else:\n report(\"You did not pass in a config file with --config, assuming you want %s\"%self.default_file)\n _file = self.default_file\n self._settings.update(self.load(file=_file))\n\n # a few command line options are allowed to override the .ini\n if self.options.port:\n self._settings.update({'flask.port':self.options.port})\n\n # build a special section for things the user wants,\n # ie, things that have been passed into the option\n # parser but are not useful in the .ini\n self._settings.update({'user.shell' : self.options.shell and 'true' or ''})\n self._settings.update({'user.encode_password':self.options.encode})\n def prepare(k,v):\n \"\"\" allow pythonic comments in the .ini files,\n and strip any trailing whitespace.\n\n TODO: move this to ConfigParser subclass.\n \"\"\"\n self._settings[k]=v.strip()\n if '#' in v:\n self._settings[k]=v[:v.find('#')]\n\n [ prepare(k,v) for k,v in self._settings.items() ]\n\n self.doit()", "def __init__(self):\n\t\tConfigFile.__init__(self)\n\t\tself.created_by = None\n\t\tself.created_on = None\n\t\tself.definition = None\n\t\tself.note = None\n\t\tself.rejected_by = None\n\t\tself.rejected_on = None\n\t\tself.replaced_by = None\n\t\tself.status = 'elaboration'\n\t\tself.status_reason = None\n\t\tself.term = None\n\t\tself.todo = None\n\t\tself._valid_file_extension = 'def'", "def __init__(self, custom_file=None):\n self.home = os.path.abspath(os.path.expanduser('~'))\n # Static Defaults\n defaults = \\\n {\n 'cfg_sn_username' : '',\n 'cfg_sn_password' : '',\n 'cfg_nt_ext' : 'txt',\n 'cfg_nt_path' : os.path.join(self.home, 'Simplenote'),\n 'cfg_nt_trashpath' : '.trash',\n 'cfg_nt_filenamelen' : '60',\n 'cfg_log_level' : 'info'\n }\n\n cp = configparser.SafeConfigParser(defaults)\n if custom_file is not None:\n self.configs_read = cp.read([custom_file])\n else:\n self.configs_read = cp.read([os.path.join(self.home, '.snsync')])\n\n cfg_sec = 'snsync'\n\n if not cp.has_section(cfg_sec):\n cp.add_section(cfg_sec)\n\n self.configs = collections.OrderedDict()\n\n #\n # Environment Varialbles over-ride config file settings.\n # Config files are cfg_abc\n # Envs are sn_abc\n #\n\n if os.environ.get('sn_username') is None:\n val_sn_username = cp.get(cfg_sec, 'cfg_sn_username', raw=True)\n else:\n val_sn_username = os.environ.get('sn_username')\n self.configs['sn_username'] = [val_sn_username, 'Simplenote Username']\n\n if os.environ.get('sn_password') is None:\n val_sn_passowrd = cp.get(cfg_sec, 'cfg_sn_password', raw=True)\n else:\n val_sn_passowrd = os.environ.get('sn_password')\n self.configs['sn_password'] = [val_sn_passowrd, 'Simplenote Password']\n\n if os.environ.get('sn_nt_ext') is None:\n val_sn_nt_ext = cp.get(cfg_sec, 'cfg_nt_ext')\n else:\n val_sn_nt_ext = os.environ.get('sn_nt_ext')\n self.configs['cfg_nt_ext'] = [val_sn_nt_ext, 'Note file extension']\n\n if os.environ.get('sn_nt_path') is None:\n val_sn_nt_path = cp.get(cfg_sec, 'cfg_nt_path')\n else:\n val_sn_nt_path = os.environ.get('sn_nt_path')\n self.configs['cfg_nt_path'] = [val_sn_nt_path, 'Note storage path']\n\n if os.environ.get('sn_nt_trashpath') is None:\n val_sn_nt_trashpath = cp.get(cfg_sec, 'cfg_nt_trashpath')\n else:\n val_sn_nt_trashpath = os.environ.get('sn_nt_trashpath')\n self.configs['cfg_nt_trashpath'] = [val_sn_nt_trashpath, 'Note Trash Bin Folder for deleted notes']\n\n if os.environ.get('sn_nt_filenamelen') is None:\n val_sn_nt_filenamelen = cp.get(cfg_sec, 'cfg_nt_filenamelen')\n else:\n val_sn_nt_filenamelen = os.environ.get('sn_nt_filenamelen')\n self.configs['cfg_nt_filenamelen'] = [val_sn_nt_filenamelen, 'Length of Filename']\n\n if os.environ.get('sn_log_level') is None:\n val_sn_log_level = cp.get(cfg_sec, 'cfg_log_level')\n else:\n val_sn_log_level = os.environ.get('sn_log_level')\n self.configs['cfg_log_level'] = [val_sn_log_level, 'snsync log level']\n\n # Dynamic Defaults\n if os.environ.get('sn_db_path') is None:\n if cp.has_option(cfg_sec, 'cfg_db_path'):\n val_sn_db_path = cp.get(cfg_sec, 'cfg_db_path')\n else:\n val_sn_db_path = os.path.join(cp.get(cfg_sec, 'cfg_nt_path'), '.snsync.sqlite')\n else:\n val_sn_db_path = os.environ.get('sn_db_path')\n self.configs['cfg_db_path'] = [val_sn_db_path, 'snsync database location']\n\n if os.environ.get('sn_log_path') is None:\n if cp.has_option(cfg_sec, 'cfg_log_path'):\n val_sn_log_path = cp.get(cfg_sec, 'cfg_log_path')\n else:\n val_sn_log_path = os.path.join(cp.get(cfg_sec, 'cfg_nt_path'), '.snsync.log')\n else:\n val_sn_log_path = os.environ.get('sn_log_path')\n self.configs['cfg_log_path'] = [val_sn_log_path, 'snsync log location']", "def __init__(self, int_conf_file):\n self.fname = int_conf_file\n with open(int_conf_file) as fin:\n self.cfg = cfg = json.load(fin)\n self.probes = cfg['all_probes']\n self.state = None\n self.queue = None\n self.notifiers = {}\n self.notif_cfg = cfg.get('notifiers', [])\n self.users = users = cfg.get('users') or {}\n for name, userinfo in users.items():\n if 'name' not in userinfo:\n userinfo['name'] = name", "def load_config(self, config_file):\n self.config = ConfigParser.ConfigParser()\n self.config.read(config_file)", "def __init__(self,filename):\n\n\t\tself.filename = filename", "def __init__(self, fileName = \"netspy.conf\"):\n\t\tself.options = {'port' : '', 'server_port' : '', 'server_ip': '', 'service_list': [] }\n\t\tself.fileName = fileName\n\t\tself.loadFromFile(self.fileName)", "def build_config_parser(filename='GradientOneAuthConfig.txt'):\n cfg = ConfigParser(dict_type=dict)\n cfg.optionxform = str\n cfgfile = None\n try:\n cfgfile = find_file(filename)\n except IOError:\n raise ValueError(\"Could not find a {} file. Please download \"\n \"one for this machine.\".format(filename))\n try:\n cfg.read(cfgfile)\n except IOError:\n raise ValueError(\"Could not read the {} file. Please download a \"\n \"valid config file for this machine.\"\n .format(filename))\n return cfg", "def create_from_config_file(cls, filename: str):\n return cls(load_yaml_config(filename))", "def __init__(self, filepath):\n self.filepath = filepath", "def __init__(self, *args, **kwargs):\n # This is a non-positional argument parser that can be used for\n # --config processing\n self.parser = argparse.ArgumentParser(*args, **kwargs)\n self.parser.add_argument(\"--config\", metavar=\"FILE\",\n help=\"specify a configuration file\")\n self.parser.add_argument(\"--log\", metavar=\"FILE\",\n help=\"specify a log file\")\n self.parser.add_argument(\"--log-level\", metavar=\"LEVEL\",\n choices=[\"DEBUG\", \"INFO\", \"WARNING\", \"ERROR\",\n \"CRITICAL\"],\n default=\"INFO\",\n help=\"{DEBUG,INFO,WARNING,ERROR,CRITICAL} \"\n \"(default=INFO)\")\n\n # Save in case they are needed for reinitialization\n self.kwargs = kwargs\n self.kwargs['add_help'] = False\n self.kwargs['parents'] = [self.parser]\n argparse.ArgumentParser.__init__(self, *args, **self.kwargs)", "def __init__(self, filename):\n self.filename = filename", "def __init__(self):\n with open('config.json', encoding='UTF-8') as json_data_file:\n self.config = json.load(json_data_file)\n self._get_credential()\n self.file_tree = [{}] * 100", "def __init__(self, cooper_config_file):\n\t\t# Determine if the default config file will be used or a user-defined file\n\t\ttry:\n\t\t\tif cooper_config_file is None:\n\t\t\t\tprint(\"[+] Using the default config file: {}\".format(self.cooper_config_file))\n\t\t\telse:\n\t\t\t\tself.cooper_config_file = cooper_config_file\n\t\t\t\tprint(\"[+] Alternate config file identified: {}\".format(self.cooper_config_file))\n\t\texcept Exception as err:\n\t\t\tprint(\"[!] \")\n\t\t\tprint(\"L.. Details: {}\".format())\n\t\t# Open the config file for parsing\n\t\ttry:\n\t\t\tself.config_parser = configparser.ConfigParser()\n\t\t\tself.config_parser.read(self.cooper_config_file)\n\t\texcept Exception as err:\n\t\t\tprint(\"[!] Could not open the config file -- make sure it exists and is readable.\")\n\t\t\tprint(\"L.. Details: {}\".format(err))\n\t\t# Parse the config file's values\n\t\ttry:\n\t\t\tself.landing_page_url_replacement = self.config_section_map(\"Replacement URLs\")[\"landing_page_url_replacement\"]\n\t\t\tself.landing_page_form_action = self.config_section_map(\"Replacement URLs\")[\"landing_page_form_action\"]\n\t\t\tself.email_replacement_url = self.config_section_map(\"Replacement URLs\")[\"email_replacement_url\"]\n\t\t\tself.email_tracker_url = self.config_section_map(\"Replacement URLs\")[\"email_tracker_url\"]\n\t\t\tself.path_to_chromedriver = self.config_section_map(\"Browser\")[\"driver_path\"]\n\t\t\tif self.config_section_map(\"Browser\")[\"user_agent\"] == \"\":\n\t\t\t\tself.user_agent = \"(Mozilla/5.0 (Windows; U; Windows NT 6.0;en-US; rv:1.9.2) Gecko/20100115 Firefox/3.6\"\n\t\t\telse:\n\t\t\t\tself.user_agent = self.config_section_map(\"Browser\")[\"user_agent\"]\n\t\texcept Exception as err:\n\t\t\tprint(\"[!] Failed to read all values from the config file! Exiting...\")\n\t\t\tprint(\"L.. Details: {}\".format(err))\n\t\t\tsys.exit()", "def __init__(self, appname):\n self.exepath = '%s' % (os.path.dirname(os.path.realpath(__file__)))\n self.cnfgfile = '%s/versions.cfg' % self.exepath\n self.static_path = '%s/app/static' % self.exepath\n self.config = ConfigParser.RawConfigParser()", "def __init__(self, filename):\r\n self._results = SpecParser(filename).parse()", "def __init__(self, filename):\n self._filename = filename\n pass", "def __init__(self, mbox_file, configs):\n self.filename = mbox_file\n if os.path.isfile(mbox_file):\n self.mbox = mailbox.mbox(mbox_file)\n else:\n raise IOError(\"Can't find that file\")\n\n self.config = dict()\n cfg_file = file(configs, 'r')\n for line in cfg_file:\n if line[0] != '#':\n parsed = line.strip().split(\"=\")\n if len(parsed) != 2:\n raise ValueError(\"Bad config file.\")\n else:\n self.config[parsed[0]] = parsed[1]\n self.messages = list()", "def init_config(filename):\n config = configparser.ConfigParser()\n try:\n with open(filename, 'r') as config_file:\n config.read(config_file)\n except EnvironmentError:\n print(\"FATAL ERROR: Failed to open config file at \" + filename)\n sys.exit(1)\n return config", "def __init__(self, conf=None):\n # set interpolation to None so you can supply filename template\n # that contain % to config.set\n conf = ConfigParser(strict=False,\n inline_comment_prefixes=(';',),\n interpolation=None) if (conf is None) else conf\n super().__init__(conf)\n self._cycle = None\n self._logger = logging.getLogger('metplus')\n # config.logger is called in wrappers, so set this name\n # so the code doesn't break\n self.logger = self._logger\n\n # get the OS environment and store it\n self.env = os.environ.copy()\n\n # add section to hold environment variables defined by the user\n self.add_section('user_env_vars')", "def __init__(self, cfg=None, **kwargs):\n self.__dir = KITConfig.configDir\n self.__cfgFile = \"\"\n\n self.__cfg = {}\n self.__default = KITConfig.defaultConfig\n\n self.__setupLogger()\n\n if cfg is not None:\n self.__cfgFile = cfg\n self.load(cfg)", "def __init__(self, config_path: str = \"config.json\"):\n # Change here if you want to relocate you config file\n self.config = {}\n self.load_configuration(config_path)\n self.app_name = self.config.get('app_name', self.APP_NAME)", "def __init__(self, filename, registry):\n self.filename = filename\n self.registry = registry", "def __init__(self, filename):\r\n\r\n self.filename = filename", "def __init__(self, settings_file_name):\n with open(settings_file_name, 'r') as f:\n # load config file\n self.settings = yaml.load(f)\n\n # get key values\n sit_names = self.settings[HNF.Consts.SIT_NAMES]\n row_action_names = self.settings[HNF.Consts.ROW_ACT_NAMES]\n column_action_names = self.settings[HNF.Consts.COL_ACT_NAMES]\n name = self.settings[HNF.Consts.NAME]\n\n # init HNG object\n self.HNFOut = HNF.HNFInstance(sit_names, row_action_names, column_action_names, name)\n\n # set the values found in the settings\n self.__initFromFile()\n\n # calc the summary and expected utility\n self.HNFOut.initSummaryBelief()\n self.HNFOut.initExpectedUtility()\n self.HNFOut.calcHypergameExpectedUtility()\n self.HNFOut.calcModelingOpponentUtility()", "def __init__(self, filename=None):\n if filename:\n dict.__init__(self, self.read_file(filename))\n else:\n dict.__init__(self)\n self.new_plist()", "def __init__(self, filename=None):\n self._filename = filename", "def __init__(self, filename):\n self.from_file(filename)\n self.parse_cell()\n self.parse_atom()\n self.apply_symops()", "def __init__(self, cfg_path):\n\t\tself.cfg_path = cfg_path\n\t\tself.cfg_root = self.load_cfg(self.cfg_path)", "def __init__(self):\n if not os.path.isfile(CONFIG_FILENAME):\n first_time_run()\n raise SystemExit()\n\n \"\"\"\n Init file is present, read and parse it:\n \"\"\"\n conf = configparser.ConfigParser(interpolation=configparser.ExtendedInterpolation(), inline_comment_prefixes='#')\n conf.read(CONFIG_FILENAME)\n \"\"\"\n Process certain paths:\n \"\"\"\n\n path = conf['Paths']\n self.ffc_dir = path['FlatFieldCalDir']\n self.capture_dir = path['CaptureDir']\n self.image_dir = path['ImageDir']\n\n \"\"\"\n Process options\n \"\"\"\n\n self.cal_auto_save = conf.getboolean('Options', 'CalAutoSave', fallback=True)\n self.cal_auto_load = conf.getboolean('Options', 'CalAutoLoad', fallback=True)\n self.sound_on_capture = conf.getboolean('Options', 'SoundOnCapture', fallback=True)\n self.exp_init1 = conf.getint('Options', 'ExpInit1', fallback=100)\n self.exp_init2 = conf.getint('Options', 'ExpInit2', fallback=100)\n self.black_correct = conf.getboolean('Options', 'BlackCorrect', fallback=True)\n # Setup square window, default of full-screen height\n self.tiff_seq_x_window = conf.getint('Options', 'TiffSeqXWindow', fallback=cameras.FRAME_HEIGHT)\n self.tiff_seq_y_window = conf.getint('Options', 'TiffSeqYWindow', fallback=cameras.FRAME_HEIGHT)\n self.tiff_seq_rebin = conf.getint('Options', 'TiffSeqRebin', fallback = 2)", "def __init__(self):\n self.storefn = Config.getConfigFnPath()\n\n # Load the configuration file file\n self.load()", "def __init__(self, environment):\n with open('config.json') as f:\n self.config = eval(f.read())\n self.config = self.config[environment]", "def __init__(self, config):\n\n self.root = config.root\n self.pidfile = config.pidfile\n self.log_conf = config.logging", "def _get_config(self, unit, filename):\n file_contents = unit.file_contents(filename)\n config = ConfigParser.ConfigParser()\n config.readfp(io.StringIO(file_contents))\n return config", "def __init__(self, config_path, normalize=False):\n self.config = {}\n _config_dict = {}\n self._config_path = Utils.expand_path(config_path)\n self.update = None\n self.normalize = normalize", "def load_configuration(self) -> None:\n config_file = self.default_config_file\n if self.config_file:\n config_file = self.config_file\n self.config = configparser.ConfigParser(delimiters=\"=\")\n # mypy is unhappy with us assigning to a method - (monkeypatching?)\n self.config.optionxform = lambda option: option # type: ignore\n self.config.read(config_file)", "def __init__(self, configfile='settings.cfg'):\n \n self.configfile = configfile\n \n # Load parameters from config file\n config = ConfigParser.RawConfigParser()\n config.read(self.configfile)\n \n # Set parameters to default if not in config file \n self.title=config.get('Settings','title') if config.has_option(\n 'Settings','title') else 'REDPy Catalog'\n self.filename=config.get('Settings','filename') if config.has_option(\n 'Settings','filename') else 'redpytable.h5'\n self.groupName=config.get('Settings','groupName') if config.has_option(\n 'Settings','groupName') else 'default'\n self.groupDesc=config.get('Settings','groupDesc') if config.has_option(\n 'Settings','groupDesc') else 'Default Test Run'\n self.nsta=config.getint('Settings','nsta') if config.has_option(\n 'Settings','nsta') else 8 \n self.station=config.get('Settings','station') if config.has_option(\n 'Settings','station') else 'SEP,YEL,HSR,SHW,EDM,STD,JUN,SOS'\n self.channel=config.get('Settings','channel') if config.has_option(\n 'Settings','channel') else 'EHZ,EHZ,EHZ,EHZ,EHZ,EHZ,EHZ,EHZ'\n self.network=config.get('Settings','network') if config.has_option(\n 'Settings','network') else 'UW,UW,UW,UW,UW,UW,UW,UW'\n self.location=config.get('Settings','location') if config.has_option(\n 'Settings','location') else '--,--,--,--,--,--,--,--'\n self.samprate=config.getfloat('Settings','samprate') if config.has_option(\n 'Settings','samprate') else 100.\n self.nstaC=config.getint('Settings','nstaC') if config.has_option(\n 'Settings','nstaC') else 5\n self.printsta=config.getint('Settings','printsta') if config.has_option(\n 'Settings','printsta') else 2\n self.server=config.get('Settings','server') if config.has_option(\n 'Settings','server') else 'IRIS'\n self.port=config.getint('Settings','port') if config.has_option(\n 'Settings','port') else 16017\n self.nsec=config.getint('Settings','nsec') if config.has_option(\n 'Settings','nsec') else 3600\n self.lwin=config.getfloat('Settings','lwin') if config.has_option(\n 'Settings','lwin') else 7.\n self.swin=config.getfloat('Settings','swin') if config.has_option(\n 'Settings','swin') else 0.8\n self.trigon=config.getfloat('Settings','trigon') if config.has_option(\n 'Settings','trigon') else 3.\n self.trigoff=config.getfloat('Settings','trigoff') if config.has_option(\n 'Settings','trigoff') else 2.\n self.kurtmax=config.getfloat('Settings','kurtmax') if config.has_option(\n 'Settings','kurtmax') else 80.\n self.kurtfmax=config.getfloat('Settings','kurtfmax') if config.has_option(\n 'Settings','kurtfmax') else 150.\n self.oratiomax=config.getfloat('Settings','oratiomax') if config.has_option(\n 'Settings','oratiomax') else 0.06\n self.kurtwin=config.getfloat('Settings','kurtwin') if config.has_option(\n 'Settings','kurtwin') else 5.\n self.winlen=config.getint('Settings','winlen') if config.has_option(\n 'Settings','winlen') else 1024\n self.fmin=config.getfloat('Settings','fmin') if config.has_option(\n 'Settings','fmin') else 1.\n self.fmax=config.getfloat('Settings','fmax') if config.has_option(\n 'Settings','fmax') else 10.\n self.filomin=config.getfloat('Settings','filomin') if config.has_option(\n 'Settings','filomin') else 1.\n self.filomax=config.getfloat('Settings','filomax') if config.has_option(\n 'Settings','filomax') else 2.5\n self.fiupmin=config.getfloat('Settings','fiupmin') if config.has_option(\n 'Settings','fiupmin') else 5.\n self.fiupmax=config.getfloat('Settings','fiupmax') if config.has_option(\n 'Settings','fiupmax') else 10.\n self.telefi=config.getfloat('Settings','telefi') if config.has_option(\n 'Settings','telefi') else -1.\n self.teleok=config.getint('Settings','teleok') if config.has_option(\n 'Settings','teleok') else 1 \n self.cmin=config.getfloat('Settings','cmin') if config.has_option(\n 'Settings','cmin') else 0.7\n self.ncor=config.getint('Settings','ncor') if config.has_option(\n 'Settings','ncor') else 4\n self.minorph=config.getfloat('Settings','minorph') if config.has_option(\n 'Settings','minorph') else 0.05\n self.maxorph=config.getfloat('Settings','maxorph') if config.has_option(\n 'Settings','maxorph') else 7.\n self.minplot=config.getint('Settings','minplot') if config.has_option(\n 'Settings','minplot') else 3\n self.dybin=config.getfloat('Settings','dybin') if config.has_option(\n 'Settings','dybin') else 1.\n self.hrbin=config.getfloat('Settings','hrbin') if config.has_option(\n 'Settings','hrbin') else 1.\n self.recplot=config.getfloat('Settings','recplot') if config.has_option(\n 'Settings','recplot') else 14.\n \n # Derived Settings\n self.ptrig=1.5*self.winlen/self.samprate\n self.atrig=3*self.winlen/self.samprate\n self.mintrig=self.winlen/self.samprate\n self.wshape = int((self.ptrig + self.atrig)*self.samprate) + 1", "def __init__(self, path_to_the_file):", "def __init__(self, path_to_the_file):", "def initialize_from_config(self):", "def __init__(self, defaults=None):\n self._configFileName = None\n if defaults is None:\n self._defaults = ConfigDict()\n else:\n self._defaults = ConfigDict(copy.deepcopy(defaults))\n self.importedFiles = []\n self.includedFiles = []\n self.missingFiles = []", "def __init__(self, environment='develop'):\n\n cwd = path.dirname(path.abspath(__file__))\n config_dir = path.join(cwd, 'configs')\n\n config_files = []\n for (root, _, file_names) in walk(config_dir):\n for file_name in file_names:\n config_files.append(path.join(root, file_name))\n config_files = sorted(config_files)\n\n for config_file in config_files:\n config = anyconfig.load(config_file)\n for key in config:\n self[key] = config[key]\n\n if environment in config_file:\n break", "def __init__(self, filename=None):\n self.content = dict()\n if filename and os.path.exists(filename):\n self.parse(filename)\n elif filename:\n self.new(filename)", "def __init__(self, config: str) -> None:\n self.configuration = config", "def __init__(self, config: str) -> None:\n self.configuration = config", "def read_config(self, config_filename):" ]
[ "0.8556377", "0.8332688", "0.83071285", "0.83033603", "0.80947435", "0.80304974", "0.79447347", "0.7944664", "0.7938114", "0.79321945", "0.7806958", "0.77945864", "0.7792556", "0.777831", "0.7728286", "0.7725837", "0.77198017", "0.7715732", "0.7694171", "0.768226", "0.76366454", "0.7546511", "0.75385946", "0.75208086", "0.74739563", "0.74458665", "0.73782194", "0.73237413", "0.7302583", "0.72780174", "0.72639775", "0.7252057", "0.72406894", "0.7188388", "0.71810293", "0.7151967", "0.713625", "0.71339357", "0.7132767", "0.7095283", "0.7095253", "0.70670336", "0.7056573", "0.70538825", "0.7051931", "0.7040554", "0.70366794", "0.7032364", "0.7027211", "0.7008821", "0.7001761", "0.69970655", "0.6974757", "0.6970606", "0.69627404", "0.6956133", "0.69378626", "0.693469", "0.6905583", "0.6901588", "0.6898062", "0.6897036", "0.68939626", "0.68938994", "0.6881193", "0.6867148", "0.6865259", "0.6853059", "0.68496436", "0.6843709", "0.6839659", "0.68377066", "0.6837166", "0.68365985", "0.68356586", "0.6830307", "0.68239677", "0.68226403", "0.68214494", "0.6821217", "0.68175465", "0.6813035", "0.68124145", "0.6788772", "0.6775822", "0.6774666", "0.6761457", "0.67582625", "0.6744345", "0.67383134", "0.6724691", "0.6720834", "0.6720834", "0.6708601", "0.67050993", "0.6700591", "0.6695053", "0.6688623", "0.6688623", "0.6687822" ]
0.792374
10
set_property(self, prop_name, prop_value) Set a config property to a new value. Checks to ensure that prop_name refers to a valid property, and prop_value is a valid value for that property
def set_property(self, prop_name, prop_value): if prop_name == "database_name": if (prop_value and isinstance(prop_value, str) and prop_value[-3:] == ".db"): self.config["config"]["database_name"] = prop_value self.database_name = prop_value elif prop_name == "volume_limit": if isinstance(prop_value, int) and prop_value > 0: self.config["config"]["volume_limit"] = str(prop_value) self.volume_limit = prop_value elif prop_name == "series_per_page": if isinstance(prop_value, int) and prop_value >= 0: self.config["config"]["series_per_page"] = str(prop_value) self.series_per_page = prop_value elif prop_name == "compact_list": if ((isinstance(prop_value, int) and prop_value in [0, 1]) or isinstance(prop_value, bool)): self.config["config"]["compact_list"] = str(prop_value) self.compact_list = prop_value elif prop_name == "show_empty_series": if ((isinstance(prop_value, int) and prop_value in [0, 1]) or isinstance(prop_value, bool)): self.config["config"]["show_empty_series"] = str(prop_value) self.show_empty_series = prop_value elif prop_name == "default_to_gui": if ((isinstance(prop_value, int) and prop_value in [0, 1]) or isinstance(prop_value, bool)): self.config["config"]["default_to_gui"] = str(prop_value) self.default_to_gui = prop_value with open(self.filename, 'w') as config_ini: self.config.write(config_ini)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def SetProp(self, name, value):\n self._props[name] = value\n self._changed(name, value)", "def set_prop(prop, value, config_type=\"\", config_path=\"\"):\n\n path = get_config_path(config_type, config_path)\n set_property_value(prop, value, path)", "def set_property(self, name, value):\n self.properties[name] = value", "def do_set_property(self, spec, value):\n attribute = self.find_attribute(spec.name)\n if attribute is not None and isinstance(attribute, property):\n attribute.fset(self, value)\n else:\n raise ValueError(\"No such property\", spec.name)", "def set_property(self, key, value):\n if not key:\n raise Exception('A key should be provided.')\n if not value:\n raise Exception('A value should be provided.')\n self._properties[key] = value", "def setprop(name, value):\n _slp.setprop(name, value)", "def set_property(self, name, prop_name, prop_value, ignore_items=False):\n valid_props = ['base_text', '_no_valid_items', '_no_valid_values']\n if prop_name not in valid_props:\n raise ValueError(\"'prop_name' must be one of {}\".format(valid_props))\n self._set_property(name, prop_name, prop_value, ignore_items)\n return None", "def SetProperty(self, propertyname, value):\n if self.serviceimplementation == 'basic':\n flag = self.vbLet\n if isinstance(value, datetime.datetime):\n value = SFScriptForge.SF_Basic.CDateToUnoDateTime(value)\n flag += self.flgDateArg\n if repr(type(value)) == \"<class 'pyuno'>\":\n flag += self.flgUno\n return self.EXEC(self.objectreference, flag, propertyname, value)", "def set_role_property(self, obj, property_name, property_value): # pylint: disable=no-self-use\n if isinstance(obj, dict):\n if 'properties' in obj:\n obj = obj['properties']\n obj[property_name] = property_value\n else:\n if hasattr(obj, 'properties'):\n obj = obj.properties\n obj.property_name = property_value", "def setProp(self, name, value):\n ret = libxml2mod.xmlSetProp(self._o, name, value)\n if ret is None:raise treeError('xmlSetProp() failed')\n __tmp = xmlAttr(_obj=ret)\n return __tmp", "def set_property_value(prop, value, path):\n\n if not isfile(path):\n raise ConfigFileNotCreatedException(path)\n\n content = \"\"\n updated = False\n\n try:\n with open(path, \"r+\") as f:\n for line in f:\n m = _search_pattern.search(line)\n\n if m:\n result = m.groupdict()\n\n # check if the found property matches with the wanted property\n if result[\"key\"] == prop:\n content += \"{}={}\\n\".format(prop, value)\n updated = True\n continue\n\n content += line\n\n # if the last character is a newline, remove it\n if len(content) and content[-1] == \"\\n\":\n content = content[:-1]\n\n # create a new entry if we property does not yet exist\n if not updated:\n content += \"\\n{}={}\".format(prop, value)\n\n # overwrite old config file content\n f.seek(0)\n f.truncate()\n f.write(content)\n except PermissionError:\n raise ConfigFilePermissionErrorException(path)", "def set_user_property(self, property, value):\n self._user_properties.update({property: value})", "def set(self, attr_name, value):\n\n get_logger().debug('Going to set XML attribute property: %s', attr_name)\n\n if self.textproperty is not None:\n # TODO: potentially programming error\n raise MarshallingError()\n\n try:\n # pylint: disable=unnecessary-dunder-call\n self.attributes[attr_name].__set__(self.obj, value)\n get_logger().debug('Set XML attribute property: %s', attr_name)\n except AttributeError as ex:\n get_logger().error('XML property %s: %s', attr_name, str(ex))\n except KeyError:\n get_logger().debug('Not an XML attribute property: %s', attr_name)", "def set_property(self, key: str, value: Any):\n if not key:\n raise ValueError(\"Key is required\")\n\n if value is None:\n raise ValueError(\"Value is required\")\n\n self._node[\"app_data\"][\"properties\"][key] = value", "def add_property(self, name, value=None):\n self.properties[name] = value", "def set_property(key, value):\n return impl.set_property(**locals())", "def set(self, database_name, property_, value):\n return isempty(self._send_command(database_name, \"%s=%s\" % (property_, value)))", "def set_property(self, key, value):\n self.properties[key] = value\n return self.properties[key]", "def prop_set(prop, value, extra_args=None, cibfile=None):\n return item_create(\n item=\"property\",\n item_id=\"{}={}\".format(prop, value),\n item_type=None,\n create=\"set\",\n extra_args=extra_args,\n cibfile=cibfile,\n )", "def do_set_property(self, pspec, val):\n # FIXME: need an asynchronous API to set these properties,\n # particularly 'private'\n\n if pspec.name == 'name':\n self._name = val\n elif pspec.name == 'color':\n self._color = val\n elif pspec.name == 'tags':\n self._tags = val\n elif pspec.name == 'private':\n self._private = val\n else:\n raise ValueError('Unknown property %r', pspec.name)\n\n self._publish_properties()", "def set_property(self, name, value, persist_changes=True):\n self._properties_metadata[name] = {'readonly': not persist_changes}\n self._properties[name] = value", "def set_property(wellorcontainer, property_name, value):\n wells = convert_to_wellgroup(wellorcontainer)\n \n if not isinstance(value, str):\n value = str(value)\n \n for well in wells:\n assert isinstance(well, Well)\n well.properties[property_name] = value", "def setDynamicProperty(self, widget, property_name, property_value):\n\n\t\twidget.setProperty(property_name, property_value)\n\t\twidget.style().unpolish(widget)\n\t\twidget.style().polish(widget)", "def put_prop(self, obj_type, obj_id, prop_name, value):\n ierr = exolib.py_expp(self.exoid, obj_type, obj_id, prop_name, value)\n if ierr:\n raise ExodusIIWriterError(\"Error putting prop value\")", "def addPropertie(self, propname, value):\n if isinstance(propname, types.IntType) or isinstance(propname, types.StringType):\n self.properties[propname] = value\n else:\n raise Exception(\"la propiedad debe ser de tipo int o string\")", "def __setattr__(self, name, value):\n if self.serviceimplementation == 'basic':\n if name in ('serviceproperties', 'localProperties', 'internal_attributes', 'propertysynonyms',\n 'forceGetProperty'):\n pass\n elif name[0:2] == '__' or name in self.internal_attributes or name in self.localProperties:\n pass\n elif name in self.serviceproperties or name in self.propertysynonyms:\n if name in self.propertysynonyms: # Reset real name if argument provided in lower or camel case\n name = self.propertysynonyms[name]\n if self.internal: # internal = True forces property local setting even if property is read-only\n pass\n elif self.serviceproperties[name] is True: # True == Editable\n self.SetProperty(name, value)\n return\n else:\n raise AttributeError(\n \"type object '\" + self.objecttype + \"' has no editable property '\" + name + \"'\")\n else:\n raise AttributeError(\"type object '\" + self.objecttype + \"' has no property '\" + name + \"'\")\n object.__setattr__(self, name, value)\n return", "def changeProperty(self, node, name, propertyName, value, setIfNotExist=False):", "def set_property_value(self, name, value, dry_run=False):\n raise DAVError(\n HTTP_FORBIDDEN, err_condition=PRECONDITION_CODE_ProtectedProperty\n )", "def set_property(self,obj_property,value):\n existing_properties = self.get_property(obj_property)\n if existing_properties is None:\n if type(value) == list:\n if len(value) == 1:\n self.redis_server.hset(self.frbr_key,\n obj_property,\n value[0])\n else:\n new_redis_key = \"%s:%s\" % (self.frbr_key,\n obj_property)\n for row in value:\n self.redis_server.sadd(new_redis_key,\n row)\n self.redis_server.hset(self.frbr_key,\n obj_property,\n new_redis_key)\n else:\n self.redis_server.hset(self.frbr_key,\n obj_property,\n value)", "def setProperty(self, path, key, value):\n \n try:\n self._client.propset(key, value, self._workingCopyPath + path)\n self.checkin(path)\n except ClientError, error:\n raise SubversionError(error)", "def add_property_setting(self, key, prop):\n\n setting = self.settings().new_property(key, prop)\n self._add_to_list_field(\"settings\", setting)", "def run_setprop(self, expanded, unexpanded) :\n\t\toptions, args = self.getopt([\"name=\", \"value=\"], expanded)\n\t\tif (options is None) and (args is None) :\n\t\t\treturn -1\t# message was already displayed in self.getopt()\n\t\tif not args :\n\t\t\treturn self.errormessage(\"Needs at least one object to change this property\")\n\t\tif not (options.has_key(\"name\") and options.has_key(\"value\")) :\n\t\t\treturn self.errormessage(\"You must supply a property name and value\")\n\t\tproperty = options[\"name\"]\n\t\tif options.has_key(\"value\") :\n\t\t\tpropvalue = options[\"value\"]\n\t\t\ttry :\n\t\t\t\t# maybe it's a list in a string, e.g. \"['e', 'f']\"\n\t\t\t\t# or something like that\n\t\t\t\tnewvalue = eval(propvalue)\n\t\t\t\tif (type(newvalue) != type(0)) and (type(newvalue) != type(0.0)) :\n\t\t\t\t\t# we mustn't convert numeric to string\n\t\t\t\t\tpropvalue = newvalue\n\t\t\texcept NameError :\n\t\t\t\tpass\t# normal string\n\t\telse :\n\t\t\tpropvalue = \"\"\n\n\t\tstatus = 0\n\t\tfor arg in args :\n\t\t\tobject = self.toObject(self.__context, arg)\n\t\t\tif object is not None :\n\t\t\t\tif not self.HasPerms(object, 'Manage properties') :\n\t\t\t\t\tstatus = status - 1\n\t\t\t\telif hasattr(object, 'hasProperty') :\n\t\t\t\t\tif not object.hasProperty(property) :\n\t\t\t\t\t\tstatus = status + self.errormessage(\"Object %s has no property %s\" % (self.ObjectPath(object), property))\n\t\t\t\t\telse :\n\t\t\t\t\t\t# in the following lines the absence of a _updateProperty\n\t\t\t\t\t\t# attribute indicates an object without properties (e.g. a method)\n\t\t\t\t\t\t# which indicates an object for which setting properties is a nonsense\n\t\t\t\t\t\tif hasattr(object, \"_updateProperty\") :\n\t\t\t\t\t\t\tobject._updateProperty(property, propvalue)\n\t\t\t\t\t\t\tself.htmlmessage(\"Object %s property %s was modified to %s\" % (self.ObjectPath(object), property, str(propvalue)))\n\t\treturn status", "def put_prop(self, obj_type, obj_id, prop_name, value):\n ierr = exolib.py_expp(self.exoid, obj_type, obj_id, prop_name, value)\n if ierr:\n raise ExodusIIWriterError(\"Error putting prop\")", "def persist_cluster_config_environment_prop(cluster_config, property_name, value, property_name_check=True):\n\n if property_name_check is True:\n valid_props = [\"cbs_ssl_enabled\", \"xattrs_enabled\", \"sg_lb_enabled\", \"sync_gateway_version\", \"server_version\",\n \"no_conflicts_enabled\", \"sync_gateway_ssl\", \"sg_use_views\", \"number_replicas\",\n \"delta_sync_enabled\", \"x509_certs\", \"hide_product_version\", \"cbs_developer_preview\", \"disable_persistent_config\",\n \"server_tls_skip_verify\", \"disable_tls_server\", \"disable_admin_auth\", \"trace_logs\"]\n if property_name not in valid_props:\n raise ProvisioningError(\"Make sure the property you are trying to change is one of: {}\".format(valid_props))\n\n # Write property = value in the cluster_config.json\n cluster_config_json = \"{}.json\".format(cluster_config)\n with open(cluster_config_json) as f:\n cluster = json.loads(f.read())\n\n cluster[\"environment\"][property_name] = value\n with open(cluster_config_json, \"w\") as f:\n json.dump(cluster, f, indent=4)\n\n # Write [section] property = value in the cluster_config\n config = CustomConfigParser()\n config.read(cluster_config)\n config.set('environment', property_name, str(value))\n\n with open(cluster_config, 'w') as f:\n config.write(f)", "def set(settingName, value):\n if settingName not in _loaded:\n raise ValueError(\"Specified configuration setting \\\"%s\\\" does not exist\" % settingName)\n \n _loaded[settingName] = value", "def set(node_key:str, property_name:str, value):\r\n node_names = split_node_key(node_key)\r\n node = root\r\n for node_name in node_names:\r\n node = node.nodes[node_name]\r\n node.properties[property_name] = value", "def set(self, key: str, value) -> 'Property':\n\n if not hasattr(self, key):\n if key in LIMB_KEY_LOOKUP:\n key = LIMB_KEY_LOOKUP[key]\n else:\n raise KeyError('\"{}\" not a valid Property key'.format(key))\n setattr(self, key, value)\n\n return self", "def set_property(self, client, key, value):\r\n client.setProperty(key, value)\r\n return True", "def properties_set(self, properties):\n self._put('properties', properties)", "def XPSetWidgetProperty(inWidget, inProperty, inValue):\n pass", "def SetWirelessProperty(self, networkid, prop, value):\n if (prop.strip()).endswith(\"script\"):\n print \"Setting script properties through the daemon is not\" \\\n + \" permitted.\"\n return False\n self.LastScan[networkid][prop] = misc.Noneify(value)", "def _update_property(self, field, value):\n if value is not None:\n self.properties[field] = value\n elif field in self.properties:\n del self.properties[field]", "def set(self, prop, value):\r\n\r\n prop_parts = prop.split(\".\")\r\n if self.copy_dict:\r\n new_dict = copy.deepcopy(self.obj)\r\n else:\r\n new_dict = self.obj\r\n pointer = None\r\n parts_length = len(prop_parts) - 1\r\n for i, part in enumerate(prop_parts):\r\n if pointer is None and i == parts_length:\r\n new_dict[part] = value\r\n elif pointer is None:\r\n pointer = new_dict.get(part)\r\n elif i == parts_length:\r\n pointer[part] = value\r\n else:\r\n pointer = pointer.get(part)\r\n return new_dict", "def __setitem__(self, name, value):\n datastore_types.ValidateProperty(name, value)\n dict.__setitem__(self, name, value)", "def config_set(self, name, value):\n self.redis_config[name] = value", "def object_property_name(self, object_property_name):\n\n self._object_property_name = object_property_name", "def _set_property(self, name, prop_name, prop_value, ignore_items=False):\n prop_update = {prop_name: prop_value}\n for n in name:\n collection = 'masks' if self.is_array(n) else 'columns'\n if not 'properties' in self._meta[collection][n]:\n self._meta[collection][n]['properties'] = {}\n self._meta[collection][n]['properties'].update(prop_update)\n if ignore_items: continue\n for s in self.sources(n):\n self._set_property(s, prop_name, prop_value)\n return None", "def property_name(self, name: str) -> None:\n name = str(name)\n if len(name) > 100:\n name = name[:100]\n self.prop_name = name", "def autoprops_generated_setter(self, **kwargs):\n setattr(self, private_property_name, kwargs[property_name])", "def set(self, key, value):\n if key == key.upper():\n if key not in self.config:\n self.__log(f'Error when setting configuration variable \"{key}\", it does not exist', 'error')\n raise KeyError\n self.__log(f'Setting configuration variable \"{key}\" to \"{value}\"')\n self.config[key] = value\n else:\n if key not in self.options:\n self.__log(f'Error when setting option \"{key}\", it does not exist.', 'error')\n raise KeyError\n self.__log(f'Setting option \"{key}\" to \"{value}\"')\n self.options[key] = value", "def property_id(self, property_id):\n\n self._property_id = property_id", "def SetWiredProperty(self, prop, value):\n if self.WiredNetwork:\n if (prop.strip()).endswith(\"script\"):\n print \"Setting script properties through the daemon\" \\\n + \" is not permitted.\"\n return False\n self.WiredNetwork[prop] = misc.Noneify(value)\n return True\n else:\n print 'SetWiredProperty: WiredNetwork does not exist'\n return False", "def migrate_property ( self, name, property, property_info, class_dict ):\n get = _property_method( class_dict, '_get_' + name )\n set = _property_method( class_dict, '_set_' + name )\n val = _property_method( class_dict,\n '_validate_' + name )\n if ((get is not None) or (set is not None) or (val is not None)):\n old_get, old_set, old_val = property_info\n return Property( get or old_get, set or old_set, val or old_val,\n True, **property.__dict__ )\n return property", "def __process_property(self, element, property_name):\n try:\n expected_value = element.properties[property_name]\n except KeyError:\n return\n\n value_changed = self.__check_property_changed(element, property_name)\n if isinstance(expected_value, svgelements.svgelements.Color):\n if value_changed or expected_value.opacity:\n self.path_began = False\n elif not value_changed:\n return\n\n if property_name == 'fill':\n if value_changed:\n self.generator.fill_color(expected_value)\n if expected_value.opacity:\n self.generator.fill()\n elif property_name == 'linecap':\n self.generator.line_cap(expected_value)\n elif property_name == 'linejoin':\n self.generator.line_join(expected_value)\n elif property_name == 'miterlimit':\n self.generator.miter_limit(expected_value)\n elif property_name == 'stroke':\n if value_changed:\n self.generator.stroke_color(expected_value)\n if expected_value.opacity:\n self.generator.stroke()\n elif property_name == 'stroke_width':\n self.generator.stroke_width(expected_value)\n elif property_name == 'transform':\n self.__save(element)\n self.generator.transform(expected_value[0], expected_value[1],\n expected_value[2], expected_value[3],\n expected_value[4], expected_value[5])\n\n self.properties[-1][property_name] = expected_value", "def set(key: str, value: Any):\n _check_key(key)\n config = _read_config()\n config[key] = value\n _write_config(config)", "def __setattr__(self, name, value):\n # Can't set namespace variables\n if name.startswith('_'):\n raise ValueError('Settings cannot start with an underscore')\n\n if name in self._settings:\n # Set an existing setting's value\n if isinstance(value, Setting):\n raise ValueError('Settings cannot be redefined')\n self._settings[name].set(value)\n else:\n # Create a new setting\n if not isinstance(value, Setting):\n raise ValueError(\n 'Settings must be defined before they can be assigned',\n )\n self._settings[name] = value", "def _write_node_property(self, new_property, property_name):\n self.graph.vertex_properties[property_name] = new_property", "def __setattr__(self,name,value):\n # Assignment during initialisation\n if not self.__dict__.has_key('_PreferencesSection__initialised'):\n self.__dict__[name]=value\n return self.__dict__[name]\n # Assignment after initialisation\n if name not in ['_PreferencesSection__section','_PreferencesSection__options',\n '_PreferencesSection__config','_PreferencesSection__initialised','__get_option','__set_option']:\n # Set option value\n return self.set_option(name,value)\n else:\n # Call original __setattr__ method\n raise Exception('(EVOGTK - Preferences Helper) Trying to set protected \"%s\" property' % name)", "def update(name, value, config_dir=None):\n if name not in Config.__ALLOWED:\n msg = f'Cannot update configuration; value \"{name}\" is not allowed.'\n raise ConfigurationError(msg)\n config_dir = Config.resolve_config_dir(config_dir)\n config_dat, config_file = Config.get_config_file(\n config_dir,\n round_trip_load=True,\n quiet=True,\n )\n config_dat.update({name: value})\n Config.write_config_file(config_dat, config_file)\n if Config.is_set:\n Config.__conf[name] = value", "def _setPropertyValue(self, name, value, typeString = ''):\n method = getattr(self.__class__, \"_setPropertyValue\" + getTypeString(value))\n return method(self, name, value, typeString)", "def test_set_property_success(self):\r\n self.config.option1 = 9001\r\n self.assertEqual(self.config.values['option1'], 9001)\r\n\r\n self.config.option2 = 'bar'\r\n self.assertEqual(self.config.values['option2'], 'bar')", "def setProperty(self, newProp, objectID=0):\n\n # Set the new property to container\n key = (newProp.getPropertyID(),\n newProp.getObjectID(),\n newProp.getTime())\n self.properties.set_value(key, newProp)", "def set(self, name, value):\n pass", "async def set_property(self, product_type: ProductType, serial_no: str, name: str, value: Any) -> None:\n await self._send_message_get_response(OutgoingMessage(OutgoingMessageType.set_property, domain=product_type.name, serial_no=serial_no, name=name, value=value))", "def set_value(self, section_name: str, attr_name: str, value: str) -> None:\n config = ConfigParser(allow_no_value=True)\n if value is not None:\n value = str(value)\n if self.is_section_exist(section_name=section_name) is True:\n if self.is_attr_exist(section_name=section_name, attr_name=attr_name) is True:\n config.read(self.connection_string)\n for section in config.sections():\n if section.lower().replace(' ', '_') == section_name.lower().replace(' ', '_'):\n for attr in config[section]:\n if attr.lower().replace(' ', '_') == attr_name.lower().replace(' ', '_'):\n config.set(section=section, option=attr, value=value)\n with open(file=self.connection_string, mode='w') as file:\n config.write(fp=file)\n else:\n self.add_attr(section_name=section_name, attr_name=attr_name, value=value)", "def test_set_new_property():\n\n value = '1'\n contents = (\"[info]\\n\"\n \"real = not_real\")\n\n testutils.deploy_config_raw(contents)\n\n prop.set_prop('info', 'sdk', value)\n assert prop.get_prop('info', 'sdk') == value\n\n testutils.undeploy()", "def set_value(self, _, prop_value_pair):\n (prop, val) = prop_value_pair\n\n _, _, obj = self.popup_data\n if not isinstance(obj, value_model.Value):\n raise TypeError(\"Expected %s\" % type(value_model.Value))\n\n if not prop == obj.parent:\n raise ValueError(\"Property '%s' is not the parent of '%s'\" % (prop, obj))\n\n # To enable undo redo for this we need a bit of trickery\n new_prop = prop.clone(keep_id=True)\n create_pseudo_values([new_prop])\n if new_prop.pseudo_values[obj.index].pseudo_values != obj.pseudo_values:\n raise ValueError(\"Cannot find replacement value\")\n\n # Update the value in the new property\n new_prop.pseudo_values[obj.index].pseudo_values = val\n\n # Lets replace the old property with the new and updated one\n cmd = commands.ReplaceObject(obj=prop, repl=new_prop)\n self.execute(cmd)\n\n # Reset the view to make sure the changes are properly displayed.\n self.select_object(new_prop)\n self.reset_value_view(None)", "def setValue(self, valueName, valueSetting):\n\t\tself.settings[valueName][0] = valueSetting", "def test_set_existing_property():\n\n value = 'new'\n\n contents = (\"[Info]\\n\"\n \"sdk = old\")\n\n testutils.deploy_config_raw(contents)\n\n prop.set_prop('info', 'sdk', value)\n assert prop.get_prop('info', 'sdk') == value\n\n testutils.undeploy()\n\n return 0", "def update(self, **kwargs):\n for key, value in kwargs.items():\n try:\n is_property = isinstance(getattr(self.__class__, key), property)\n except AttributeError:\n continue\n\n if is_property:\n setattr(self, key, value)", "def write_property(self, td, name, value, timeout=None):\n\n raise NotImplementedError()", "def SetParserProp(self, prop, value):\n ret = libxml2mod.xmlTextReaderSetParserProp(self._o, prop, value)\n return ret", "def set_config(self, attr, value):\n setattr(self.config, attr, value)", "def set_config(self, attr, value):\n setattr(self.config, attr, value)", "def setconfig(self, key, value):\n self.config[key] = value", "def __setitem__(self, name : str, value : object) -> None:\n self._client.set_config(name, value)", "def SetProperties(self, prop_lst):\n # Parses Property list, ignoring all bad values\n for prop in prop_lst:\n if len(prop) != 2:\n continue\n else:\n if not isinstance(prop[0], basestring) or not \\\n isinstance(prop[1], basestring):\n continue\n else:\n self.SetProperty(prop[0], prop[1])\n return True", "def set(self, properties):\n raise NotImplementedError", "def set_attribute(self, name, value):\n setattr(self, '%s__' % name, value_or_none(value))", "def declare_property(self, name): # noqa: E501 # The links in the docstring above are too long to wrap to 120 chars.\n\n self.forced_properties.add(name)", "def set(self, attribute_name, value_name):\n self.__set[attribute_name] = value_name\n return self", "def set_attribute(self, name, value):\n\n pass", "def __setattr__(self, name, value):\n self.set(**{name: value})", "def set_property(self, vtk_property):\n self.vtk_act.SetProperty(vtk_property)", "def set(self, name, val, section=section_default):\n if section not in self.config.sections():\n self.config.add_section(section)\n\n self.config.set(section, name, val)\n self.save()", "def set_system_config_property(connection, config_key, value):\n\n body = {\n 'key': config_key,\n 'value': value,\n }\n response = connection.post_obj_as_json('system/config', body).json()\n config_value = response.get('result')\n if config_value is None:\n raise SAPCliError(\"gCTS response does not contain 'result'\")\n\n return config_value", "def __setattr__(self, key, value):\n self._config.__setitem__(key, value)", "def propset(self, name, value, *args):\r\n d = py.path.local.mkdtemp() \r\n try: \r\n p = d.join('value') \r\n p.write(value) \r\n self._svn('propset', name, '--file', str(p), *args)\r\n finally: \r\n d.remove()", "def set_variable(self, name, value):\n self.send_to_backend('set', name=name, value=value)\n self.refresh_variable(name)", "def setProperty(*args):", "def setProperty(*args):", "def setProperty(*args):", "def setProperty(*args):", "def setProperty(*args):", "def __property_config__(self, model_class, property_name):\n super(MemcacheReferenceProperty, self).__property_config__(model_class, property_name)\n\n if self.reference_class is _SELF_REFERENCE:\n self.reference_class = self.data_type = model_class\n\n if self.collection_name is None:\n self.collection_name = '%s_set' % (model_class.__name__.lower())\n existing_prop = getattr(self.reference_class, self.collection_name, None)\n if existing_prop is not None:\n if not (isinstance(existing_prop, db._ReverseReferenceProperty) and\n existing_prop._prop_name == property_name and\n existing_prop._model.__name__ == model_class.__name__ and\n existing_prop._model.__module__ == model_class.__module__):\n raise DuplicatePropertyError('Class %s already has property %s '\n % (self.reference_class.__name__,\n self.collection_name))\n setattr(self.reference_class,\n self.collection_name,\n db._ReverseReferenceProperty(model_class, property_name))", "def __setattr__ (self, name, value):\n\t\ttry:\n\t\t\tself.__dict__[name] # Do not delete this line (it verifies the existence of an attribute)\n\t\t\t# Positioning of the existing attribute\n\t\t\tself.__dict__[name] = value\n\t\texcept KeyError:\n\t\t\t# The attribute does not exist is probably value of the structure\n\t\t\tself.__dict__[\"value\"][name] = value", "def setProperty(self,prop=None):\n objects = self.check()\n if objects:\n if prop is None:\n res = askItems([['property',0]],\n caption = 'Set Property Number for Selection (negative value to remove)')\n if res:\n prop = int(res['property'])\n if prop < 0:\n prop = None\n for o in objects:\n if hasattr(o,'setProp'):\n o.setProp(prop)\n self.draw()", "def set(self, name, value, autosave=True, override=True):\n if self._is_section(name) and not isinstance(value, _Section):\n raise SectionError(\"Cannot override section with value.\")\n if isinstance(value, _Section):\n if override:\n self._config_obj[name] = {}\n for key, _value in value._config_obj.items():\n self[name].set(key, _value, autosave, override)\n elif name not in self._config_obj or override:\n self._config_obj[name] = value\n if autosave:\n self.save()", "def setValue(self, name, value):\n values = self.__get('values')\n values[name] = value\n self.__set('values', values)", "def add_property(self, property):\n # check input data type\n if not isinstance(property, dict):\n raise TypeError\n # check data types of keys in dict\n if not all([isinstance(key, str) for key in property.keys()]):\n raise TypeError\n # check that values are lists of strings\n for key, value in property.items():\n if isinstance(value, str):\n property[key] = [value]\n elif not isinstance(value, list):\n raise TypeError\n else:\n if not all([isinstance(val, str) for val in value]):\n raise TypeError\n\n # add properties to the dict\n for key, value in property.items():\n if key not in self.properties:\n self.properties[key] = value\n else:\n self.properties[key] += value\n self.properties[key] = list(set(self.properties[key]))" ]
[ "0.7744824", "0.7589677", "0.7511228", "0.71817976", "0.7180543", "0.71250933", "0.70207524", "0.6867484", "0.6805842", "0.66848767", "0.6684025", "0.6615753", "0.64751947", "0.6462285", "0.6445133", "0.6340753", "0.6315831", "0.6308148", "0.62673914", "0.6257923", "0.6245488", "0.622961", "0.6123482", "0.6040454", "0.59751207", "0.59010977", "0.58931476", "0.58642673", "0.58554286", "0.5852848", "0.58253753", "0.581846", "0.5810388", "0.5737535", "0.57112765", "0.56629163", "0.55839056", "0.55604166", "0.554588", "0.55406266", "0.553353", "0.5528683", "0.5444581", "0.5422219", "0.541916", "0.5406026", "0.53901714", "0.5364961", "0.53643215", "0.53580654", "0.5345323", "0.53442943", "0.5338469", "0.5336238", "0.5332302", "0.5330189", "0.5325056", "0.5272518", "0.52641696", "0.52329105", "0.5224203", "0.52205557", "0.5202875", "0.51943207", "0.51921034", "0.51887286", "0.5163683", "0.516288", "0.5157594", "0.5145322", "0.51066256", "0.51038504", "0.5099274", "0.5099274", "0.5096015", "0.5090443", "0.5087344", "0.50820506", "0.50810754", "0.5068598", "0.5063478", "0.50585455", "0.50545233", "0.505417", "0.504435", "0.5028018", "0.502171", "0.5013491", "0.4992848", "0.49889082", "0.49889082", "0.49889082", "0.49889082", "0.49889082", "0.49831074", "0.49786824", "0.49781156", "0.49749288", "0.49575105", "0.4956257" ]
0.7238388
3
set_default_config() Saves default config to desired filename
def set_default_config(self, filename): if os.path.isfile(filename): os.remove(filename) config = configparser.ConfigParser() default_cfg = {'config': {'database_name': 'manga.db', 'volume_limit': 128, 'series_per_page': 0, 'compact_list': 0, 'show_empty_series': False, 'default_to_gui': True}} config.read_dict(default_cfg) with open(filename, 'w') as config_ini: config.write(config_ini) # Reset class variables for config object as well self.config = config self.filename = filename self.database_name = 'manga.db' self.volume_limit = 128 self.series_per_page = 0 self.compact_list = False self.show_empty_series = False self.default_to_gui = True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def writeDefaultConfig(conffile='default.conf', defaults={}):\n if not os.path.exists(conffile):\n cp = createConfigParserfromDict(defaults, ['base'])\n cp.write(open(conffile,'w'))\n return conffile", "def write_default_config():\n # TODO: BROKEN!\n config_path = pathlib.Path(xdg.BaseDirectory.xdg_config_home) / \"awiesm_bc\"\n config_file = config_path / DEFAULT_CONFIG_FILENAME\n if not os.path.isdir(config_path):\n os.makedirs(config_path)\n\n if not os.path.isfile(config_file):\n # TODO: write file\n pass", "def write_default_config(self):\n if self.config_file:\n config_file = self.config_file\n else:\n config_file = os.path.join(self.config_dir, self.config_file_name + '.py')\n \n if os.path.exists(config_file) and not self.answer_yes:\n answer = ''\n def ask():\n prompt = \"Overwrite %s with default config? [y/N]\" % config_file\n try:\n return raw_input(prompt).lower() or 'n'\n except KeyboardInterrupt:\n print('') # empty line\n return 'n'\n answer = ask()\n while not answer.startswith(('y', 'n')):\n print(\"Please answer 'yes' or 'no'\")\n answer = ask()\n if answer.startswith('n'):\n return\n \n config_text = self.generate_config_file()\n if isinstance(config_text, bytes):\n config_text = config_text.decode('utf8')\n print(\"Writing default config to: %s\" % config_file)\n ensure_dir_exists(os.path.abspath(os.path.dirname(config_file)), 0o700)\n with open(config_file, mode='w') as f:\n f.write(config_text)", "def write_starter_configfile(\n basedir=None,\n default_configfile_name=DEFAULT_CONFIGFILE_NAME,\n default_config_yaml=DEFAULT_CONFIG_YAML,\n):\n if not basedir:\n basedir = Path.cwd()\n configfile_pathname = Path(basedir) / default_configfile_name\n if os.path.exists(configfile_pathname):\n raise ConfigError(\n f\"Found existing {str(configfile_pathname)} - delete to re-generate.\"\n )\n with open(configfile_pathname, \"w\", encoding=\"utf-8\") as outfile:\n outfile.write(default_config_yaml)\n print(f\"Wrote config defaults (for editing) to: {str(configfile_pathname)}\")", "def __create_default_config(self):\n if not os.path.exists(self.__configfile):\n path=os.path.dirname(self.__configfile)\n try:\n os.makedirs(path)\n except:\n pass\n if os.path.exists(path):\n self.save(defaults=True)", "def set_as_default (self):\n\t\ttry:\n\t\t\tself.config.set('Global', 'Default', self.currentAccount.data['name'])\n\t\texcept ConfigParser.NoSectionError:\n\t\t\tself.setup_config()\n\t\t\tself.config.set('Global', 'Default', self.currentAccount.data['name'])\n\t\tself.config.write(open(self.configFile, 'w'))", "def createConfig():\n\twith open(configPath, 'w', encoding='utf-8') as file:\n\t\tjson.dump(default_config, file, indent=3)", "def add_default_settings_config(self):\n config = {\n mconst.DEF_SETTINGNAME_default_logfilename: mconst.DEF_SETTINGVAL_default_logfilename_defaultvalue,\n }\n self.settings.merge_settings_key(mconst.DEF_SETTINGSEC_config, config)", "def save_config(conf, default):\n print()\n if yes_no('Would you like to save your configuration?'):\n name = simple_response(\n 'What would you like to name your configuration?')\n path = ask_path(\n 'Please enter the path you would like your configuration saved to',\n default=default)\n file_path = os.path.join(path, name)\n if file_path.find('.json') == -1:\n file_path += '.json'\n with open(file_path, 'w+') as f:\n json.dump(conf, f, indent=4)", "def create_default(cls, env: str, config_f: Path) -> None:\n # create default file\n _config = Config()\n _config.save(env)\n log.info(f\"Created config file at {config_f}\")", "def run(self):\n write_config(self.filename)\n print('Wrote default config to', self.filename)", "def dump_default_config():\n output = \"PythiaPlotter_config.py\"\n log.info(\"Dumping config to %s\", output)\n import pythiaplotter.default_config as dc\n shutil.copy(dc.__file__.replace(\".pyc\", \".py\"), output)", "def _write_default_config(self, force=False):\n\t\t\n\t\tif self.configfilepath is not None:\n\t\t\tlogger.debug(\"You use the existing config file %s, I don't have to write one.\" % \\\n (self._get_config_filepath()))\n\t\t\treturn\n\t\t\n\t\tif force or not os.path.exists(self._get_config_filepath()):\t\n\t\t\tp = subprocess.Popen([self.sexpath, \"-dd\"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\t\t\tout, err = p.communicate()\n\t\t\tif err != \"\":\n\t\t\t\tlogger.warning(\"Ouch, SExtractor complains :\")\n\t\t\t\tlogger.warning(err)\n\t\t\tf = open(self._get_config_filepath(), 'w')\n\t\t\tf.write(out.decode(encoding='UTF-8'))\n\t\t\tf.close()\n\t\t\tlogger.debug(\"Wrote %s\" % (self._get_config_filepath()))\n\t\telse:\n\t\t\tlogger.debug(\"Default config file already exists, I don't overwrite it.\")", "def init_default_config_file_if_nonexistent(package, default_config):\n config_file = os.path.join(conf.PAULA_USER_CONFIG_DIR, package + conf.CONFIG_EXTENSION)\n debug('config_file = ' + config_file)\n if not os.path.exists(config_file):\n debug('making: \\\"' + config_file+ '\\\"')\n shutil.copyfile(default_config, config_file)", "def load(filename, defaultConfig={}, writeIfNonExistent=True, quitIfNonExistent=False):\n global _loaded\n assert filename and isinstance(filename, basestring)\n\n #all keys and values in defaultConfig must be strings\n #(to make it match what would be read in by ConfigParser)\n #lets error out if we find a key or value that isn't a string\n for (key, value) in defaultConfig.iteritems():\n if not isinstance(key, basestring):\n raise KeyError(\"Invalid defaultConfig: key \\\"%s\\\" is not a string\" % str(key))\n if not isinstance(value, basestring):\n raise ValueError(\"Invalid defaultConfig: value of key \\\"%s\\\" (\\\"%s\\\") is not a string\" \n % (str(key), str(value)))\n\n _loaded = defaultConfig.copy()\n \n configFileExists = os.path.exists(filename)\n \n #If we just want to \"load\" the default configuration\n if writeIfNonExistent and not configFileExists:\n log.msg(\"Configuration file \\\"%s\\\" does not exist...\" % filename, lvl='w', ss='ss_configfile')\n try:\n write(filename)\n except:\n raise\n \n #if quitIfNonExistent is set, keep going to the next block of code\n #so we can quit out properly\n if not quitIfNonExistent:\n return\n\n if quitIfNonExistent and not configFileExists:\n #print different messages depending on if the default\n #config file was written or not\n if writeIfNonExistent:\n log.msg(\"Quitting as configuration file did not exist. Please \"\n \"edit the default configuration that has been written \"\n \"to \\\"%s\\\" to suit your environment and start the program \"\n \"again.\" % filename, lvl='c', ss='ss_configfile')\n sys.exit(0)\n else:\n log.msg(\"Quitting as configuration file \\\"%s\\\" does not exist.\"\n \" Please create this file and try again.\" % filename)\n sys.exit(1)\n\n log.msg(\"Reading configuration from \\\"%s\\\"...\" % filename, lvl='i', ss='ss_configfile')\n _loaded = simpleReadConfigFile(filename, useLogging=True,\n level=consts.LOGLVL_DEBUG3)\n \n log.msg(\"ACTIVE CONFIGURATION: \" + str(_loaded),\n lvl='d2', ss='ss_configfile')", "def setup(self):\n\n default_config = self.read()\n\n self.write(default_config)", "def _set_config():\n\n\tdebug_msg = \"load default config yaml file\"\n\tlogger.debug(debug_msg)\n\n\tconfig_file_parser(paths.CONFIG_FILE, override_options=True)", "def default_config_file(self):\n return DEFAULT_CONFIG_FILEPATH", "def antenny_config_make_default(self):\n return self.antenny_config.save_as_default_config()", "def servo_make_default(self):\n self.servo_config.save_as_default_config()", "def save(self,filename=None,defaults=False):\n # Check filename or use default filename\n if not filename:\n if self.__configfile:\n filename=self.__configfile\n else:\n raise Exception(_('EVOGTK: Need a filename for saving preferences'))\n # Set widget values on config parser\n for section in self.__optionstruct:\n for option in self.__optionstruct[section]:\n widgets=self.__optionstruct[section][option][1]\n # Get default value\n value=vars(self)[section].get_option(option)\n if not defaults and widgets:\n # Use widget value\n val=self.__guidata.__getattr__(widgets[0])\n if val:\n value=val\n # Create section in file if not exists\n if not self.__config.has_section(section):\n self.__config.add_section(section)\n value=vars(self)[section].set_option(option,value)\n # Write config to file\n fd=open(filename,'wb')\n self.__config.write(fd)\n fd.close()", "def generateDefaultConfig(self):\n\n\t\t# Open config.ini in write mode\n\t\tf = open(self.fileName, \"w\")\n\n\t\t# Set keys to config object\n\t\tself.config.add_section(\"db\")\n\t\tself.config.set(\"db\", \"host\", \"localhost\")\n\t\tself.config.set(\"db\", \"username\", \"root\")\n\t\tself.config.set(\"db\", \"password\", \"\")\n\t\tself.config.set(\"db\", \"database\", \"ripple\")\n\t\tself.config.set(\"db\", \"pingtime\", \"600\")\n\n\t\tself.config.add_section(\"server\")\n\t\tself.config.set(\"server\", \"server\", \"tornado\")\n\t\tself.config.set(\"server\", \"host\", \"0.0.0.0\")\n\t\tself.config.set(\"server\", \"port\", \"5001\")\n\t\tself.config.set(\"server\", \"localizeusers\", \"1\")\n\t\tself.config.set(\"server\", \"outputpackets\", \"0\")\n\t\tself.config.set(\"server\", \"outputrequesttime\", \"0\")\n\t\tself.config.set(\"server\", \"timeoutlooptime\", \"100\")\n\t\tself.config.set(\"server\", \"timeouttime\", \"100\")\n\n\t\tself.config.add_section(\"flask\")\n\t\tself.config.set(\"flask\", \"threaded\", \"1\")\n\t\tself.config.set(\"flask\", \"debug\", \"0\")\n\t\tself.config.set(\"flask\", \"logger\", \"0\")\n\n\t\tself.config.add_section(\"ci\")\n\t\tself.config.set(\"ci\", \"key\", \"changeme\")\n\n\t\t# Write ini to file and close\n\t\tself.config.write(f)\n\t\tf.close()", "def write_config(config_filename, config=default_config):\n comments=\"\"\n if config==default_config:\n comments+=\"# %s \\n\" % version.version_string\n comments+=\"# This is the default config file\\n\"\n comments+=\"# Generated on %s \\n\" % time.strftime('%x %X %Z')\n comments+=\"# Visit %s for more informations \\n\" % version.project_url\n c=SafeConfigParser()\n f=open(config_filename,'w')\n f.write(comments+\"\\n\")\n for section in config.items():\n name=section[0]\n values=section[1]\n c.add_section(name)\n for value in values.items():\n c.set(name, value[0], value[1])\n c.write(f)\n f.flush()\n f.close()", "def save_config(self, new_config, filename=None):\n self.cfg.update(new_config)\n if filename is None:\n self.cfg.filename = self.cfg_filename\n else:\n self.cfg.filename = filename\n self.cfg.write()\n logger.info(\"Config file %s written out\" % self.cfg.filename)", "def write_config(self, filename):\n self.config.filename = filename\n self.config.write()", "def set_config_all_to_defaults():\n logging.debug(\"Creating default config\")\n for section in all_defaults:\n set_config_section_to_defaults(section)\n global config_changed\n config_changed = True", "def save_default_connections_file():\n if os.path.exists(DEFAULT_CONNECTIONS_FILE):\n if os.path.exists(DEFAULT_CONNECTIONS_FILE_BAK):\n os.remove(DEFAULT_CONNECTIONS_FILE_BAK)\n os.rename(DEFAULT_CONNECTIONS_FILE, DEFAULT_CONNECTIONS_FILE_BAK)", "def create_default_config(self, parser):\n parser.add_section('irc')\n parser.set('irc', 'channels', '')\n \n # create the full path, and the file\n try:\n os.makedirs(self.config_dir_path, mode=0700)\n except OSError:\n pass\n file_resource = open(self.config_file_path, 'w')\n parser.write(file_resource)", "def create_default_config():\n import codecs\n config = ConfigParser.SafeConfigParser()\n config.readfp(StringIO(DEFAULT_CONFIG))\n\n # Load user settings\n filename = get_user_config_filename()\n if not os.path.exists(filename):\n from wizard import setup_wizard\n setup_wizard(config)\n else:\n try:\n fi = codecs.open(filename, 'r', encoding='utf-8')\n config.readfp(fi)\n finally:\n fi.close()\n return config", "def overrideCurrentConfiguration(self):\n if not self._currentConfiguration:\n self.saveNewConfiguration()\n else:\n self._saveToFilePath(self._currentConfiguration)", "def save_defaults(self, overwrite=False):\r\n for (section, option), value in self.defaults.iteritems():\r\n if value is None:\r\n continue\r\n if section not in self.__config:\r\n self.__config[section] = {}\r\n if overwrite or option not in self.__config[section]:\r\n self.__config[section][option] = value\r\n self.save()", "def get_default_config_file() -> Path:\n return get_path_to_pyflow() / \"pyflow\" / \"conf\" / CONFIG_FILE", "def write_default_config(output_config: Pathlike, feature_type: str):\n create_default_feature_extractor(feature_type).to_yaml(output_config)", "def SaveConfig(self):\n config_value = getattr(self, APPDATA)\n path_value = config_value.AbsolutePaths[0]\n default_cfg_file = os.path.join(path_value, CONFIG_FILE_NAME)\n temp_file = default_cfg_file + '.TEMP'\n if os.path.exists(default_cfg_file):\n json.dump(type(self)._CURRENT_CONFIG,\n open(temp_file.lower(),\n mode='w'),\n cls=ConfigEncoder,\n sort_keys=False,\n indent=4)\n EnsureBackup(temp_file, default_cfg_file)\n else:\n if not os.path.isdir(path_value):\n os.mkdir(path_value)\n json.dump(type(self)._CURRENT_CONFIG,\n open(default_cfg_file.lower(),\n mode='w'),\n cls=ConfigEncoder,\n sort_keys=False,\n indent=4)", "def build_default_cfg():\n with open(Daemon.CONFIG_FILEPATH, 'wb') as fo:\n json.dump(Daemon.DEF_CONF, fo, skipkeys=True, ensure_ascii=True, indent=4)\n return Daemon.DEF_CONF", "def initConfig(defaults, filepath=None):\n result = False\n if filepath is None:\n filepath = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"res/\", \"config.ini\")\n if not os.path.exists(filepath):\n config = ConfigParser.ConfigParser(defaults)\n result = Helpers.saveConfig(config, filepath)\n return result", "def get_default_config(self):\n return config.read(pathlib.Path(__file__).parent / \"ext.conf\")", "def create_default_sensor_config_file(self, sensor_config_file_path):\n data = dict()\n data[constants.SensorConfigurationKeys.LIDAR] = \\\n constants.DEFAULT_LIDAR_CONFIGURATION\n data[constants.SensorConfigurationKeys.LIDAR_OVERLAY] = \\\n constants.DEFAULT_LIDAR_OVERLAY_CONFIGURATION\n with open(sensor_config_file_path, \"w\") as outfile:\n json.dump(data, outfile, indent=4,)", "def set_config_defaults(config):\n new_config = config.copy()\n\n new_config.setdefault(\"window_title\", \"Materials Cloud Tool\")\n new_config.setdefault(\n \"page_title\",\n \"<PLEASE SPECIFY A PAGE_TITLE AND A WINDOW_TITLE IN THE CONFIG FILE>\",\n )\n\n new_config.setdefault(\"custom_css_files\", {})\n new_config.setdefault(\"custom_js_files\", {})\n new_config.setdefault(\"templates\", {})\n\n return new_config", "def set_last_config(self, config_name):\n with open(FILEPATH_MANAGER.get_last_config_file_path(), 'w') as f:\n f.write(config_name + \"\\n\")", "def default(path: str = 'setings.INI'):\n Setings._delete_setings(path)\n Setings._create_default_setting(path)", "def save_config(**kwargs):\n if kwargs == {}:\n kwargs = config._config\n current_config = _load_config()\n current_config.update(**kwargs)\n # write to disk\n fname = _get_config_fname()\n if fname is None:\n raise RuntimeError('config filename could not be determined')\n if not op.isdir(op.dirname(fname)):\n os.mkdir(op.dirname(fname))\n with open(fname, 'w') as fid:\n json.dump(current_config, fid, sort_keys=True, indent=0)", "def set_default_profile(self):\n profile = textwrap.dedent(\n \"\"\"\n config:\n boot.autostart: \"true\"\n description: Default LXD profile\n devices:\n eth0:\n name: eth0\n nictype: bridged\n parent: conjureup1\n type: nic\n eth1:\n name: eth1\n nictype: bridged\n parent: conjureup0\n type: nic\n root:\n path: /\n pool: default\n type: disk\n name: default\n \"\"\")\n with NamedTemporaryFile(mode='w', encoding='utf-8',\n delete=False) as tempf:\n utils.spew(tempf.name, profile)\n out = utils.run_script(\n 'cat {} |conjure-up.lxc profile edit default'.format(\n tempf.name))\n if out.returncode != 0:\n raise Exception(\"Problem setting default profile: {}\".format(\n out))", "def load_config_with_defaults(config_path):\n config = neat.Config(\n neat.DefaultGenome,\n neat.DefaultReproduction,\n neat.DefaultSpeciesSet,\n neat.DefaultStagnation,\n config_path\n )\n return config", "def load_default_config_file(filename: str) -> str:\n try:\n raw_contents = resource_string(__name__, 'configuration/' + filename)\n except FileNotFoundError:\n raise BscanConfigError(\n 'Unable to find default configuration file `' + filename + '`')\n return raw_contents.decode('utf-8')", "def config_skeleton():\n config = Config()\n config.set_to_default()\n config.save()", "def saveCurrentConfig():\n cf = ConfigParser.ConfigParser()\n cf.add_section(\"dir_config\")\n cf.set(\"dir_config\", \"7zpath\", SW_CONFIG['7zpath'])\n cf.set(\"dir_config\", \"sharefolder\", SW_CONFIG['sharefolder'])\n cf.set(\"dir_config\", \"distpath\", SW_CONFIG['distpath'])\n cf.add_section(\"sw_config\")\n cf.set(\"sw_config\", \"version\", SW_CONFIG['sw_version'])\n cf.set(\"sw_config\", \"startup\", SW_CONFIG['startup'])\n cf.add_section(\"run_config\")\n cf.set(\"run_config\", \"pop\", RUN_CONFIG['pop'])\n cf.set(\"run_config\", \"backup\", RUN_CONFIG['backup'])\n cf.add_section(\"hook_config'\")\n for k, v in HOOK_CONFIG:\n cf.set(\"hook_config\", k, v)\n fp = open(CONFIG_FILE, \"w\")\n cf.write(fp)\n fp.close()", "def setup_configuration_file(self):\n\n with open(self.config_path, \"w+\") as f_config:\n\n f_config.write(get_configuration_file_form())", "def generate_config(args):\n default_config = resource_string('webrpg', 'scripts/templates/default_config.txt').decode('utf-8')\n if args.sqla_connection_string:\n default_config = default_config.replace('%(sqlalchemy_url)s', args.sqla_connection_string)\n else:\n default_config = default_config.replace('%(sqlalchemy_url)s', get_user_parameter('SQL Alchemy Connection String', 'sqlite:///%(here)s/pyire_test.db'))\n\n with open(args.filename, 'w') as out_f:\n out_f.write(default_config)", "def save():\n\n env.config.save(env.config_file)", "def default_page_config(self, default_page_config):\n\n self._default_page_config = default_page_config", "def test_config_create_file_with_default_dict(get_root, get_empty_config, monkeypatch):\n path = os.path.join(get_root, 'res', 'non_existent.yml')\n test_dict = {'this': 'test', 'test': 'de'}\n monkeypatch.setattr(DeviceConfig, 'minimal_essential_conf', test_dict)\n cfg = get_empty_config(DeviceConfig, path)\n\n with open(path, 'r') as fh:\n content = yaml.load(fh, Loader=get_yaml_loader())\n\n assert cfg.config_path == path, \"config path is incorrect\"\n assert cfg.minimal_essential_conf == test_dict, \"bad minimal running\"\n assert content == test_dict, \"config not written to file\"\n assert cfg.data == test_dict, \"default config not loaded from file\"", "def save_config(self):\n config.save_config(self.config, self.config_file)", "def set_default_paths(args):\n filename = os.path.join(os.path.expanduser('~'), '.gfail_defaults')\n if os.path.exists(filename):\n D = ConfigObj(filename)\n else:\n D = {}\n if args.data_path is not None:\n if args.data_path == 'reset':\n D.pop('data_path')\n else:\n # check that it's a valid path\n if os.path.exists(args.data_path):\n D.update({'data_path': args.data_path})\n else:\n print('Path given for data_path does not exist: %s'\n % args.data_path)\n if args.output_filepath is not None:\n if args.output_filepath == 'reset':\n D.pop('output_filepath')\n else:\n # check that it's a valid path\n if os.path.exists(args.output_filepath):\n D.update({'output_filepath': args.output_filepath})\n else:\n print('Path given for output_filepath does not exist: %s'\n % args.output_filepath)\n if args.config_filepath is not None:\n if args.config_filepath == 'reset':\n D.pop('config_filepath')\n else:\n # check that it's a valid path\n if os.path.exists(args.config_filepath):\n D.update({'config_filepath': args.config_filepath})\n else:\n print('Path given for config_filepath does not exist: %s'\n % args.config_filepath)\n if args.popfile is not None:\n if args.popfile == 'reset':\n D.pop('popfile')\n else:\n # check that it's a valid path\n if os.path.exists(args.popfile):\n D.update({'popfile': args.popfile})\n else:\n print('Path given for population file does not exist: %s'\n % args.popfile)\n if args.trimfile is not None:\n if args.trimfile == 'reset':\n D.pop('trim')\n else:\n # check that it's a valid path and that it's a shapefile\n if os.path.exists(args.trimfile):\n filename4, fileextension = os.path.splitext(args.trimfile)\n if fileextension == '.shp':\n D.update({'trimfile': args.trimfile})\n else:\n print('Ocean trimming file is not a shapefile: %s'\n % args.trimfile)\n else:\n print('Path given for ocean trimming file does not exist: %s'\n % args.trimfile)\n if args.pdl_config is not None:\n if args.pdl_config == 'reset':\n D.pop('pdl_config')\n else:\n # check that it's a valid path\n if os.path.exists(args.pdl_config):\n D.update({'pdl_config': args.pdl_config})\n else:\n print('Path given for pdl config file does not exist: %s'\n % args.pdl_config)\n if args.log_filepath is not None:\n if args.log_filepath == 'reset':\n D.pop('log_filepath')\n else:\n # check that it's a valid path\n if os.path.exists(args.log_filepath):\n D.update({'log_filepath': args.log_filepath})\n else:\n print('Path given for log file does not exist: %s'\n % args.log_filepath)\n if args.dbfile is not None:\n if args.dbfile == 'reset':\n D.pop('dbfile')\n else:\n # check that it's a valid path (file itself doesnt have to exist)\n if os.path.exists(os.path.dirname(args.dbfile)):\n D.update({'dbfile': args.dbfile})\n else:\n print('Path given for database file does not exist: %s'\n % args.dbfile)\n\n print('New default paths set.\\n')\n\n if D:\n C = ConfigObj(D)\n C.filename = filename\n C.write()\n list_default_paths()\n else:\n print('no defaults set because no paths were input\\n')", "def saveConfig(self, name=None):\n\n configDir = self.mwGlob['configDir']\n\n if self.config.get('profileName', '') == 'config':\n if 'reference' in self.config:\n del self.config['reference']\n\n # default saving for reference\n if name is None:\n name = self.config.get('reference', 'config')\n\n fileName = configDir + '/' + name + '.cfg'\n with open(fileName, 'w') as outfile:\n json.dump(self.config,\n outfile,\n sort_keys=True,\n indent=4)\n # if we save a reference first, we have to save the config as well\n if name != 'config':\n fileName = configDir + '/config.cfg'\n with open(fileName, 'w') as outfile:\n json.dump(self.config,\n outfile,\n sort_keys=True,\n indent=4)\n return True", "def restore_default_connections_file():\n if os.path.exists(DEFAULT_CONNECTIONS_FILE_BAK):\n if os.path.exists(DEFAULT_CONNECTIONS_FILE):\n os.remove(DEFAULT_CONNECTIONS_FILE)\n os.rename(DEFAULT_CONNECTIONS_FILE_BAK, DEFAULT_CONNECTIONS_FILE)", "def bootstrap_default():\n\treturn default_configuration", "def init_config(args=None):\n if args is None:\n args = parser_args()\n\n # Set the experiment directory\n exp_dir = args.exp_dir\n if exp_dir is None:\n exp_dir = 'experiment/' + args.model_name + '/' + osp.splitext(osp.basename(args.default_config_path))[0]\n\n # copy file\n dst_config_path = osp.join(exp_dir, osp.basename(args.default_config_path))\n src_copy_to_dst(args.default_config_path, dst_config_path)\n\n # overwrite\n if args.ow_config_path != 'None':\n print('ow_config_path is: {}'.format(args.ow_config_path))\n overwrite_config_file(dst_config_path, ow_file=args.ow_config_path)\n if args.ow_str != 'None':\n print('ow_str is: {}'.format(args.ow_str))\n overwrite_config_file(dst_config_path, ow_str=args.ow_str)\n\n # import config\n cfg = import_file(dst_config_path).cfg\n\n # Set log experiment dir\n cfg.log.exp_dir = exp_dir\n return cfg", "def save_defaults(self):\n\n pass", "def saveNewConfiguration(self):\n selection = tk.filedialog. \\\n asksaveasfilename(title=\"Save CHUM configuration\")\n if selection:\n self._currentConfiguration = selection\n self._saveToFilePath(selection)", "def saveSettignsAsDefault(self, config = None):\n if config:\n resp = self.setPineAPSettings(config)\n if (resp['error']):\n return resp\n return self.request('saveAsDefault')", "def save_config(self, filename: str=None):\n if not filename:\n filename = self.config_file\n with open(filename, \"w\") as file_object:\n json.dump(self.config, file_object, indent=4, sort_keys=True)", "def save_configuration(config):\n with open(cwd + '/configuration.pickle', 'wb') as handle:\n pickle.dump(config, handle, protocol=pickle.HIGHEST_PROTOCOL)", "def default():\n raise NotImplementedError(\"Pvwattsv7 default file no longer exists!\")", "def test_get_config(default_config, tmp_path):\n abcconfig.write_config(default_config, configpath=tmp_path)\n config = abcconfig.get_config(configpath=tmp_path)\n assert config == default_config", "def default_configfile():\n dirname=None\n if os.getenv(\"HOME\"):\n dirname=os.getenv(\"HOME\")\n elif os.getenv(\"USERPROFILE\"):\n dirname=os.getenv(\"USERPROFILE\")\n\n else:\n raise FattyException(\"No HOME or USERPROFILE variable set, unable to determine default config file\")\n\n return os.path.join(dirname,\".fattybugs\")", "def __init__(self, name, defaults = {} ):\n self.defaults = defaults\n self.filename = os.path.expanduser(name)+\".ini\"\n self.conf = {}\n self.reset()\n if os.path.exists(self.filename):\n self.load()", "def set_default_paths(args):\n filename = os.path.join(os.path.expanduser('~'), '.gfail_defaults')\n if os.path.exists(filename):\n D = ConfigObj(filename)\n else:\n D = {}\n if args.data_path is not None:\n if args.data_path == 'reset':\n D.pop('data_path')\n else:\n # check that it's a valid path\n if os.path.exists(args.data_path):\n D.update({'data_path': args.data_path})\n else:\n print('Path given for data_path does not exist: %s' % args.data_path)\n if args.output_filepath is not None:\n if args.output_filepath == 'reset':\n D.pop('output_filepath')\n else:\n # check that it's a valid path\n if os.path.exists(args.output_filepath):\n D.update({'output_filepath': args.output_filepath})\n else:\n print('Path given for output_filepath does not exist: %s' % args.output_filepath)\n if args.config_filepath is not None:\n if args.config_filepath == 'reset':\n D.pop('config_filepath')\n else:\n # check that it's a valid path\n if os.path.exists(args.config_filepath):\n D.update({'config_filepath': args.config_filepath})\n else:\n print('Path given for config_filepath does not exist: %s' % args.config_filepath)\n if args.mapconfig is not None:\n if args.mapconfig == 'reset':\n D.pop('mapconfig')\n else:\n # check that it's a valid path\n if os.path.exists(args.mapconfig):\n D.update({'mapconfig': args.mapconfig})\n else:\n print('Path given for mapconfig does not exist: %s' % args.mapconfig)\n if args.mapdata_filepath is not None:\n if args.mapdata_filepath == 'reset':\n D.pop('mapdata_filepath')\n else:\n # check that it's a valid path\n if os.path.exists(args.mapdata_filepath):\n D.update({'mapdata_filepath': args.mapdata_filepath})\n else:\n print('Path given for mapdata_filepath does not exist: %s' % args.mapdata_filepath)\n\n if D:\n C = ConfigObj(D)\n C.filename = filename\n C.write()\n list_default_paths()\n else:\n print('no defaults set because no paths were input')", "def _create_default_config(self):\n self.options.setdefault('options.admin_passwd', '')\n sys.path.append(self.openerp_dir)\n sys.path.extend([egg.location for egg in self.ws])\n from openerp.tools.config import configmanager\n configmanager(self.config_path).save()", "def reload_default(self, write=True, backup=True, how='right', how_section=None, sections=None):\r\n # self._cfg = self.default_config.deepcopy()\r\n self._path = self._default_path.copy()\r\n dico = self.default_config.deepcopy()\r\n sections = {sections} if isinstance(sections, str) else sections\r\n n_dico = dico if sections is None else {k: dico[k] for k in sections & dico.keys()}\r\n self.merge(n_dico, how=how, how_section=how_section, inplace=True)\r\n if write:\r\n self.save_config(overwrite=True, backup=backup)\r\n logger.info(\"Configuration reloaded and saved to '{}'.\".format(self._path))\r\n else:\r\n logger.info(\"Configuration reloaded.\")", "def saveConfig():\n with open(_CONFIG_FNM, 'w') as configfile:\n CONFIG_DICT.write(configfile,\n space_around_delimiters=True)", "def save_config_file(self):\n wkdir = Path(self.config_dict[\"outputdir\"])\n config_filename = str(wkdir / f\"{self.config_dict['name']}.json\")\n save_config(self.config_dict, config_filename)", "def set_default_save_location(self):\n home = os.path.expanduser(\"~\")\n self.path = home+'\\\\Desktop\\\\'\n filename = datetime.datetime.now().strftime(\"%Y-%m-%d_%H-%M\")+'.csv'\n self.full_file_path = self.path+filename\n self.settings['csv_save_path'] = self.full_file_path\n self.firstopened = True", "def save(self):\r\n with open(self.filename, 'wb') as configfile:\r\n self.write(configfile)", "def save_config() -> None:\n with open(_config_file, \"w\", newline=\"\") as config_file:\n json.dump(_config, config_file, indent=4)\n config_file.truncate()", "def UnsetWiredDefault(self):\n config = ConfigParser.ConfigParser()\n config.read(self.wired_conf)\n profileList = config.sections()\n for profile in profileList:\n if config.has_option(profile, \"default\"):\n if misc.to_bool(config.get(profile, \"default\")):\n config.set(profile, \"default\", False)\n config.write(open(self.wired_conf, \"w\"))\n self.SaveWiredNetworkProfile(profile)", "def add_defaults(cls, defaults):\n defaults.wallet = bittensor.Config()\n defaults.wallet.name = os.getenv('BT_WALLET_NAME') if os.getenv('BT_WALLET_NAME') != None else 'default'\n defaults.wallet.hotkey = os.getenv('BT_WALLET_HOTKEY') if os.getenv('BT_WALLET_HOTKEY') != None else 'default'\n defaults.wallet.path = os.getenv('BT_WALLET_PATH') if os.getenv('BT_WALLET_PATH') != None else '~/.bittensor/wallets/'", "def save(self):\n file = open(self.path, 'w')\n self.config.write(file)\n file.close()", "def get_default_config_filename():\n if 'PYWREN_CONFIG_FILE' in os.environ:\n config_filename = os.environ['PYWREN_CONFIG_FILE']\n # FIXME log this\n\n elif os.path.exists(\".pywren_config\"):\n config_filename = os.path.abspath('.pywren_config')\n\n else:\n config_filename = get_default_home_filename()\n\n return config_filename", "def antenny_config_load_default(self):\n return self.antenny_config.load_default_config()", "def default(config_data=None): \n if not config_data:\n if 'PYWREN_CONFIG' in os.environ:\n config_data = json.loads(os.environ.get('PYWREN_CONFIG'))\n else:\n config_filename = get_default_config_filename()\n if config_filename is None:\n raise ValueError(\"could not find configuration file\")\n \n config_data = load(config_filename)\n \n # Apply defualt values\n if 'storage_backend' not in config_data:\n config_data['storage_backend'] = DEFAULT_STORAGE_BACKEND \n if 'pywren' not in config_data:\n config_data['pywren'] = dict()\n config_data['pywren']['storage_bucket'] = COS_BUCKET_DEFAULT\n config_data['pywren']['storage_prefix'] = COS_PREFIX_DEFAULT\n elif 'storage_bucket' not in config_data['pywren']:\n config_data['pywren']['storage_bucket'] = COS_BUCKET_DEFAULT\n elif 'storage_prefix' not in config_data['pywren']:\n config_data['pywren']['storage_prefix'] = COS_PREFIX_DEFAULT\n if 'action_name' not in config_data['ibm_cf']:\n config_data['ibm_cf']['action_name'] = CF_ACTION_NAME_DEFAULT\n \n return config_data", "def test_write_config(default_config, tmp_path):\n testpath = Path(tmp_path, \"write_config\")\n testpath.mkdir()\n abcconfig.write_config(default_config, configpath=testpath)\n assert Path(testpath, \"config.yml\").exists()", "def save_default_environment(\n environment=None,\n cwd=None\n):\n env_file = get_local_default_file(cwd=cwd)\n with open(env_file, 'w') as f_out:\n f_out.write(f'{str(environment)}\\n')\n return True", "def default_configfile(self):\r\n config = None\r\n for path in self.searchpaths:\r\n if os.path.exists(path):\r\n config = path\r\n break\r\n if config is None and self.require_configfile:\r\n self.usage('No config file found at default paths (%s); '\r\n 'use the -c option to specify a config file '\r\n 'at a different path' % ', '.join(self.searchpaths))\r\n return config", "def save_config(cp, cfile):\n cp['DEFAULT'] = {\n 'last': datetime.datetime.utcnow().replace(tzinfo=pytz.utc).isoformat()\n\n }\n with open(cfile, 'w') as cfh:\n cp.write(cfh)", "def default_config_yaml(cls):\n return DEFAULT_CONFIG", "def get_config_filepath(config: configs.Config) -> str:\n return os.path.join(config.model_training.dir_out, configs.DEFAULT_FILENAME_CONFIG)", "def save_config(config, filename=None):\n if filename is None:\n filename = CONFIG_FN\n with open(filename, \"w\", encoding=\"utf-8\") as fh:\n json.dump(\n config,\n fh,\n sort_keys=True,\n indent=4,\n separators=(\",\", \": \"),\n )", "def save():\n print(\"Saving config file..\")\n\n res = yaml.round_trip_dump(_conf, indent=2, block_seq_indent=1)\n\n with open(__config_file, 'w', encoding='utf-8') as stream:\n stream.write(res)", "def setup_logging(save_dir, log_config='logger/logger_config.json', default_level=logging.INFO):\n log_config = Path(log_config)\n if log_config.is_file():\n config = read_json(log_config)\n # modify logging paths based on run config\n for _, handler in config['handlers'].items():\n if 'filename' in handler:\n handler['filename'] = str(save_dir / handler['filename'])\n\n logging.config.dictConfig(config)\n else:\n print(\"Warning: logging configuration file is not found in {}.\".format(log_config), file=sys.stderr)\n logging.basicConfig(level=default_level)", "def test_set_default_config(qibuild_action, build_worktree):\n qibuild_action(\"add-config\", \"foo\", \"--default\")\n assert build_worktree.default_config == \"foo\"", "def add_config(config_name, params, config_file=None, make_default=True):\n\n if config_file is None:\n config_file = pkgrs.resource_filename('latools', 'latools.cfg')\n cf = configparser.ConfigParser()\n cf.read(config_file)\n\n # if config doesn't already exist, create it.\n if config_name not in cf.sections():\n cf.add_section(config_name)\n # iterate through parameter dict and set values\n for k, v in params.items():\n cf.set(config_name, k, v)\n # make the parameter set default, if requested\n if make_default:\n cf.set('DEFAULT', 'default_config', config_name)\n\n cf.write(open(config_file, 'w'))\n\n return", "def _write_config(self, config_path: Path):\n with open(config_path, \"w\") as f:\n json.dump(self.config_overrides, f)", "def save(config, filename=None):\n filename = add_directory(filename or 'configure.json')\n directory = os.path.dirname(filename)\n if not os.path.exists(directory):\n os.makedirs(directory, 0o700)\n with open(filename, \"w\") as f:\n json.dump(config, f, indent=2, sort_keys=True)", "def saveConfig(self):\n newPath = self.newFolderPath.text()\n config.set(\"saveLocation\", str(newPath))\n config.save()\n self.reloadSettings()", "def init_cfg(input_path, quiet):\n config_file = getConfigPath(input_path)\n\n if not os.path.exists(config_file) or quiet:\n confirm_overwrite = True\n else:\n confirm_overwrite = click.confirm(\n '\\n{} already exists. Do you want to overwrite it?'.format(config_file))\n if confirm_overwrite:\n configHandler(config_file).resetConfig()\n click.secho('{} created'.format(config_file), fg='green')", "def init():\n for config_filename in _config_utils.CONFIG_FILENAMES:\n if os.path.isfile(config_filename):\n config_filepath = os.path.abspath(config_filename)\n click.echo(\"found existing config file {}\".format(config_filepath))\n return\n\n config_filepath = _config_utils.create_empty_config_file('.')\n click.echo(\"initialized empty config file {}\".format(config_filepath))", "def save():\n with open(CONFIG_FILE, 'w') as f:\n json.dump(config, f, indent=4, sort_keys=True)", "async def setconfigfile(self, ctx, *, config_file):\n self.settings.setConfigFile(config_file)\n await ctx.send(inline('Done'))", "def update_gui_defaults(GUI_defaults):\n # failure flag\n config_import_error = False\n \n # xml source directory\n src_dir = os.getcwd() + '\\\\src'\n \n # attempt to parse the xml file and get it's root\n try:\n tree = ET.parse(src_dir + '\\\\pySCPI_config.xml')\n root = tree.getroot()\n \n except (IOError, ET.ParseError):\n # parsing failed for some reason\n config_import_error = True\n GUI_defaults.log_error('*** pySCPI_config.xml is'\n 'missing or corrupt ***')\n # end try\n \n # import the default values from the xml file\n if not config_import_error:\n \n # list of tags to look for\n config_tags = ['default_filename', 'default_delay', \n 'default_length', 'default_dp']\n \n # iterate through tags\n for tag in config_tags:\n # find each tag\n config_element = root.findall(tag)\n \n # if there is only one of a tag\n if len(config_element) == 1:\n # convert it to text\n config_text = config_element[0].text\n \n # update the appropriate field\n if tag == 'default_filename':\n GUI_defaults.update_filename(config_text)\n \n elif tag == 'default_delay':\n GUI_defaults.update_delay(config_text)\n \n elif tag == 'default_length':\n GUI_defaults.update_length(config_text)\n \n elif tag == 'default_dp':\n GUI_defaults.update_dp(config_text)\n # end if\n \n else:\n GUI_defaults.log_error('*** There is the wrong number '\n 'of ' + tag + ' declarations in '\n 'pySCPI_config.xml ***') \n # end if\n # end for\n \n # find the default addresses\n address_elements = root.findall('addresses')\n \n # if there are addresses\n if (len(address_elements) == 1) and (len(address_elements[0]) > 0):\n for element in address_elements[0]:\n # add each address to the list\n GUI_defaults.add_address(element.tag, element.get('address'))\n # end for\n \n else:\n GUI_defaults.log_error('*** No addresses were provided in '\n 'pySCPI_config.xml ***') \n # end if\n \n # find the default commands\n command_elements = root.findall('default_commands')\n \n # if there are commands\n if (len(command_elements) == 1) and (len(command_elements[0]) > 0):\n for command in command_elements[0]:\n # add each command to the list\n GUI_defaults.add_command(command.text)\n # end for\n\n else:\n GUI_defaults.log_error('*** No commands were provided in '\n 'pySCPI_config.xml ***') \n # end if \n # end if" ]
[ "0.7795459", "0.76893884", "0.7577117", "0.7335307", "0.7192635", "0.71420926", "0.71333855", "0.7080361", "0.70510966", "0.7046068", "0.70410866", "0.7012563", "0.70065206", "0.693917", "0.6721765", "0.6714416", "0.67111033", "0.66713166", "0.66688925", "0.66178155", "0.642376", "0.6404036", "0.6403735", "0.6389879", "0.63797796", "0.6355066", "0.6335641", "0.62977684", "0.62890416", "0.6239152", "0.6233682", "0.62206256", "0.62126535", "0.6203658", "0.6157822", "0.613148", "0.6110743", "0.6109495", "0.6098885", "0.6098818", "0.60904056", "0.60874736", "0.6082604", "0.60788214", "0.60492504", "0.6029501", "0.60184664", "0.60085815", "0.6005163", "0.59972996", "0.59685814", "0.5964843", "0.59616107", "0.59338665", "0.59214944", "0.5920631", "0.5920102", "0.5912273", "0.5909892", "0.58940035", "0.58847433", "0.5882714", "0.5878263", "0.587364", "0.58727247", "0.58666164", "0.58595276", "0.58583367", "0.585136", "0.5836644", "0.5836457", "0.5827649", "0.58229125", "0.58087885", "0.5805702", "0.5779824", "0.57765377", "0.57652736", "0.5762971", "0.5762074", "0.57607126", "0.5747282", "0.5730954", "0.57270944", "0.57243234", "0.572119", "0.57088244", "0.5703823", "0.56999815", "0.56993675", "0.5694747", "0.5692062", "0.56918406", "0.5684825", "0.5683432", "0.56805843", "0.56718624", "0.56697226", "0.56669277", "0.56637967" ]
0.67744064
14