query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Return all image assets associated with a facet and the forms to associate more.
def facet_image_assets(self): self.object = self.get_object() images = self.object.get_facet_images() org_images = self.object.organization.get_org_image_library() uploadform = ImageAssetForm() return {'images': images, 'org_images': org_images, 'uploadform': uploadform}
[ "def facet_document_assets(self):\r\n\r\n self.object = self.get_object()\r\n documents = self.object.get_facet_documents()\r\n org_documents = self.object.organization.get_org_document_library()\r\n uploadform = DocumentAssetForm()\r\n return {'documents': documents, 'org_documen...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return all document assets associated with a facet and the forms to associate more.
def facet_document_assets(self): self.object = self.get_object() documents = self.object.get_facet_documents() org_documents = self.object.organization.get_org_document_library() uploadform = DocumentAssetForm() return {'documents': documents, 'org_documents': org_document...
[ "def facet_image_assets(self):\r\n\r\n self.object = self.get_object()\r\n images = self.object.get_facet_images()\r\n org_images = self.object.organization.get_org_image_library()\r\n uploadform = ImageAssetForm()\r\n return {'images': images, 'org_images': org_images, 'uploadfor...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return all audio assets associated with a facet and the forms to associate more.
def facet_audio_assets(self): self.object = self.get_object() audio = self.object.get_facet_audio() org_audio = self.object.organization.get_org_audio_library() uploadform = AudioAssetForm() return {'audio': audio, 'org_audio': org_audio, 'uploadform': uploadform}
[ "def facet_document_assets(self):\r\n\r\n self.object = self.get_object()\r\n documents = self.object.get_facet_documents()\r\n org_documents = self.object.organization.get_org_document_library()\r\n uploadform = DocumentAssetForm()\r\n return {'documents': documents, 'org_documen...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return all video assets associated with a facet and the forms to associate more.
def facet_video_assets(self): self.object = self.get_object() videos = self.object.get_facet_video() org_videos = self.object.organization.get_org_video_library() uploadform = VideoAssetForm() return {'videos': videos, 'org_videos': org_videos, 'uploadform': uploadform}
[ "def facet_image_assets(self):\r\n\r\n self.object = self.get_object()\r\n images = self.object.get_facet_images()\r\n org_images = self.object.organization.get_org_image_library()\r\n uploadform = ImageAssetForm()\r\n return {'images': images, 'org_images': org_images, 'uploadfor...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Query the given host for the given kf_id
def run_kf_id_query(ctx, kf_id, key): host = ctx.obj["host"] for e in yield_entities_from_kfids(host, [kf_id], show_progress=False): entity_handler(e, key)
[ "def get_host_by_id(self, host_id):\n raise NotImplementedError('override me')", "def test_clusterhost_get_by_id(self):\n # 1. Get host sucessfully\n url = '/clusterhosts/1'\n return_value = self.test_client.get(url)\n self.assertEqual(200, return_value.status_code)\n hos...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Query the given host using the given filter
def run_filter_query(ctx, endpoint, filter, key): host = ctx.obj["host"] filter_dict = literal_eval(filter) for e in yield_entities_from_filter( host, endpoint, filter_dict, show_progress=False ): entity_handler(e, key)
[ "def search_host(self, host):\n collection = self._get_collection('host')\n host = collection.find_one({'url': {'$regex': 'http(s)?://' + host}})\n return host", "def scantarget(host,status_filter):\n user_agent = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) Ap...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tries to parse out an ``Emoji`` from the inputted text. This emoji can be custom and unicode emoji as well. If the parsing fails the function returns `None`.
def parse_emoji(text): parsed = EMOJI_RP.fullmatch(text) if (parsed is not None): animated, name, emoji_id = parsed.groups() animated = (animated is not None) emoji_id = int(emoji_id) return Emoji._create_partial(emoji_id, name, animated) try: return UNICODE_...
[ "def parse_reaction(text):\n try:\n emoji = UNICODE_TO_EMOJI[text]\n except KeyError:\n parsed = REACTION_RP.fullmatch(text)\n if parsed is None:\n emoji = None\n else:\n name, emoji_id = parsed.groups()\n emoji_id = int(emoji_id)\n emoji...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Iterates over all the custom emojis in the text as they appear. This function is an iterable generator.
def _iter_parse_custom_emojis(text): for groups in EMOJI_RP.findall(text): animated, name, emoji_id = groups animated = (True if animated else False) emoji_id = int(emoji_id) yield Emoji._create_partial(emoji_id, name, animated)
[ "def _iter_parse_all_emojis(text):\n for groups in EMOJI_ALL_RP.findall(text):\n \n unicode_value, unicode_name, custom_animated, custom_name, custom_emoji_id = groups\n if unicode_value:\n yield UNICODE_TO_EMOJI[unicode_value]\n continue\n \n if unicode_n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Iterates over all emojis in the text as they appear. This function is an iterable generator.
def _iter_parse_all_emojis(text): for groups in EMOJI_ALL_RP.findall(text): unicode_value, unicode_name, custom_animated, custom_name, custom_emoji_id = groups if unicode_value: yield UNICODE_TO_EMOJI[unicode_value] continue if unicode_name: ...
[ "def _iter_parse_custom_emojis(text):\n for groups in EMOJI_RP.findall(text):\n \n animated, name, emoji_id = groups\n animated = (True if animated else False)\n emoji_id = int(emoji_id)\n \n yield Emoji._create_partial(emoji_id, name, animated)", "def extract_emojis(t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parses out every custom emoji from the given text.
def parse_custom_emojis(text): if text is None: return set() return {*_iter_parse_custom_emojis(text)}
[ "def find_custom_emojis(text):\n emoji_list = []\n data = regex.findall(r\"<(a?):([a-zA-Z0-9\\_]+):([0-9]+)>\", text)\n for a, emoji_name, emoji_id in data:\n emoji_list.append(f\"<{a}:{emoji_name}:{emoji_id}>\")\n\n return emoji_list", "def _iter_parse_custom_emojis(text):\n for groups in E...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parses emojis of the given `text` with the given `parser`. Returns them ordered based on their appearance in the text.
def _parse_emojis_ordered(text, parser): emojis_ordered = [] if (text is not None): emojis_unique = set() for emoji in parser(text): if emoji not in emojis_unique: emojis_ordered.append(emoji) emojis_unique.add(emoji) return emojis_or...
[ "def parse_all_emojis_ordered(text):\n return _parse_emojis_ordered(text, _iter_parse_all_emojis)", "def parse_custom_emojis_ordered(text):\n return _parse_emojis_ordered(text, _iter_parse_custom_emojis)", "def parse_custom_emojis(text):\n if text is None:\n return set()\n \n return {*_ite...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parses out every custom emoji from the given text. Returns them ordered based on their appearance in the text.
def parse_custom_emojis_ordered(text): return _parse_emojis_ordered(text, _iter_parse_custom_emojis)
[ "def parse_all_emojis_ordered(text):\n return _parse_emojis_ordered(text, _iter_parse_all_emojis)", "def parse_custom_emojis(text):\n if text is None:\n return set()\n \n return {*_iter_parse_custom_emojis(text)}", "def _parse_emojis_ordered(text, parser):\n emojis_ordered = []\n if (te...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parses out every emoji from the given text. Returns them ordered based on their appearance in the text.
def parse_all_emojis_ordered(text): return _parse_emojis_ordered(text, _iter_parse_all_emojis)
[ "def _parse_emojis_ordered(text, parser):\n emojis_ordered = []\n if (text is not None):\n emojis_unique = set()\n \n for emoji in parser(text):\n if emoji not in emojis_unique:\n emojis_ordered.append(emoji)\n emojis_unique.add(emoji)\n \n r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parses out an emoji from the given reaction string.
def parse_reaction(text): try: emoji = UNICODE_TO_EMOJI[text] except KeyError: parsed = REACTION_RP.fullmatch(text) if parsed is None: emoji = None else: name, emoji_id = parsed.groups() emoji_id = int(emoji_id) emoji = Emoji._creat...
[ "def parse_emoji(text):\n parsed = EMOJI_RP.fullmatch(text)\n if (parsed is not None):\n animated, name, emoji_id = parsed.groups()\n animated = (animated is not None)\n emoji_id = int(emoji_id)\n return Emoji._create_partial(emoji_id, name, animated)\n \n try:\n r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates the concentration of DPX at each quenching step.
def calc_dpx_concs(dpx_vol_steps, dpx_stock_conc=0.1, starting_well_vol=102.): # The cumulative volumes of stock DPX added at each step dpx_vols_added = np.cumsum(dpx_vol_steps) # The number of points in the standard curve num_dilutions = len(dpx_vols_added) dpx_concs = np.zeros(num_dilutions) ...
[ "def concentration_final(self) -> float:\n return self.cell_per_well / (self.volume_per_well / 1000)", "def _calc_concentration(self):\n total_mol_conc = self._pressure \\\n / (self.gas_constant * self._temperature)\n return self._mole_fraction * total_mol_conc", "def cdf(self, x...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculating the TTA output logits out of the inputs, transforms, to the tta_dir
def get_tta_logits(dataset, args, net, X, y, tta_size, num_classes): tta_transforms = get_tta_transforms(dataset, args.gaussian_std, args.soft_transforms, args.clip_inputs) tta_dataset = TTADataset( torch.from_numpy(X), torch.from_numpy(y), tta_size, transform=tta_transforms) ...
[ "def transformer(inputs):\n X = inputs\n n_files = X[0]\n total_mb = X[1]\n # apply power transformer normalization to continuous vars\n x = np.array([[n_files], [total_mb]]).reshape(1, -1)\n pt = PowerTransformer(standardize=False)\n pt.lambdas_ = np.array([-1.51, -0.12])\n xt = pt.transfor...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ensures that useless nodes are forbidden.
def test_useless_nodes( assert_errors, parse_ast_tree, code, statement, default_options, mode, ): tree = parse_ast_tree(mode(code.format(statement))) visitor = StatementsWithBodiesVisitor(default_options, tree=tree) visitor.run() assert_errors(visitor, [UselessNodeViolation])
[ "def test_useless_try_nodes(\n assert_errors,\n assert_error_text,\n parse_ast_tree,\n code,\n statement,\n default_options,\n mode,\n):\n tree = parse_ast_tree(mode(code.format(statement)))\n\n visitor = StatementsWithBodiesVisitor(default_options, tree=tree)\n visitor.run()\n\n as...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ensures that useless loop nodes are forbidden.
def test_useless_loop_nodes( assert_errors, parse_ast_tree, code, statement, default_options, mode, ): tree = parse_ast_tree(mode(code.format(statement))) visitor = StatementsWithBodiesVisitor(default_options, tree=tree) visitor.run() assert_errors(visitor, [UselessNodeViolatio...
[ "def test_useless_nodes(\n assert_errors,\n parse_ast_tree,\n code,\n statement,\n default_options,\n mode,\n):\n tree = parse_ast_tree(mode(code.format(statement)))\n\n visitor = StatementsWithBodiesVisitor(default_options, tree=tree)\n visitor.run()\n\n assert_errors(visitor, [Useles...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ensures that useless try nodes are forbidden.
def test_useless_try_nodes( assert_errors, assert_error_text, parse_ast_tree, code, statement, default_options, mode, ): tree = parse_ast_tree(mode(code.format(statement))) visitor = StatementsWithBodiesVisitor(default_options, tree=tree) visitor.run() assert_errors(visitor...
[ "def susceptible(self):\r\n self.state = NodeState.SUSCEPTIBLE", "def _detect_illegal_cycles(node: NodeBase, visited_nodes: List[NodeBase], low_priority_edge_found: bool = False):\n if node in visited_nodes:\n if low_priority_edge_found:\n return\n else:\n raise Illeg...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a new PythonPlugin object. tool The tool associated with this plugin.
def __init__(self, tool: ghidra.framework.plugintool.PluginTool): ...
[ "def instantiatePlugin(pluginClass: java.lang.Class, tool: ghidra.framework.plugintool.PluginTool) -> object:\n ...", "def load_tool(tool_module: str) -> HammerTool:\n mod = importlib.import_module(tool_module)\n tool_class = getattr(mod, \"tool\")\n tool: HammerTool = tool_class()\n tool.packa...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a list of possible command completion values. cmd current command line (without prompt) A list of possible command completion values. Could be empty if there aren't any.
def getCompletions(self, cmd: unicode) -> List[ghidra.app.plugin.core.console.CodeCompletion]: ...
[ "def autocomplete(self):\n if self.completion_env_var_name not in os.environ:\n return\n cwords = os.environ['COMP_WORDS'].split()[1:]\n cword = int(os.environ['COMP_CWORD'])\n try:\n current = cwords[cword-1]\n except IndexError:\n current = ''\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Handle a change in one of our options. options the options handle optionName name of the option changed oldValue the old value newValue the new value
def optionsChanged(self, options: ghidra.framework.options.ToolOptions, optionName: unicode, oldValue: object, newValue: object) -> None: ...
[ "def on_option_change(self, event):\n\t\telement = event.GetEventObject()\n\t\t_id = element.GetId()\n\t\tvar_name = self.var_ids[_id]\n\t\tif var_name == 'time_index' or var_name == 'pl_index':\n\t\t\tval = int(element.GetValue().split(\" \")[0])\n\t\telif var_name == 'preset':\n\t\t\tval = element.GetValue()\n\t\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Unwraps IMDB suggestions result from jsonp callback. Basically we ignore jsonp; what is interesting is suggestions json.
def _unwrap_jsonp(suggestions: str) -> [dict]: return json.loads(suggestions.split("(", 1)[1].strip(")"))
[ "def handle_suggest_response(query, callback):\n def inner_callback(response):\n try:\n result = json.loads(response.body)\n except StandardError as e:\n log.error('Error handling search response: %s - %s' % (str(e),\n response.body))\n callback...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Filter only movies in IMDB suggestions.
def _filter_movies(suggestions: [dict]) -> [dict]: return list( filter(lambda s: _has_attr('q', s) and ('feature' == s.get('q') or 'TV movie' == s.get('q')), suggestions['d']) )
[ "def movie_suggestions(self, movie_id):\n self.endpoint = 'movie_suggestions.json'\n self.payload = {'movie_id': movie_id}\n return self.__make_request()", "def search_videos(self, search_term):\n results = []\n for video in self._video_library.get_all_videos():\n if ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A newly joined node is catching up and sends catchup requests to other nodes but one of the nodes does not reply and the newly joined node cannot complete the process till the timeout and then requests the missing transactions.
def testNodeRequestingTxns(reduced_catchup_timeout_conf, txnPoolNodeSet, nodeCreatedAfterSomeTxns): looper, newNode, client, wallet, _, _ = nodeCreatedAfterSomeTxns new_node_ledger = newNode.ledgerManager.ledgerRegistry[DOMAIN_LEDGER_ID] old_size = len(new_node_ledger.ledger) ...
[ "def retry_notifier():\n if graph_copy.has_node(target.name):\n ready_nodes.append(target.name)\n produced_event.set()", "def queuing_requests(self):\r\n\r\n for node in self.want_enter_to_cs:\r\n self.send_requests_to_root(node)", "def rejo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load team info, subset to relevant columns, and remap column names
def team_info(self): df_team = pd.read_csv(datadir / 'TEAM.csv.gz') team_cols = { 'gid': 'game_id', 'tname': 'team', #'pts': 'tm_pts', 'ry': 'tm_rush_yds', 'ra': 'tm_rush_att', 'py': 'tm_pass_yds', 'pa': 'tm_pass_att', ...
[ "def _load_teams(self):\n self.teams = list(np.unique(self.input_df[[\"HomeTeam\", \"AwayTeam\"]].values.ravel('F')))\n self.results_df = pd.DataFrame(self.teams, columns=['team'])", "def _load_team_cities(self, country):\n url = SoccerParser.team_cities_lookup[country]['url']\n table_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute expected points added by each QB per play
def qb_points(self): # load required datasets df_play = pd.read_csv(datadir / 'PLAY.csv.gz') df_sched = pd.read_csv(datadir / 'SCHEDULE.csv.gz') df_rush = pd.read_csv(datadir / 'RUSH.csv.gz') df_drive = pd.read_csv(datadir / 'DRIVE.csv.gz') # Merge schedule and play data...
[ "def q_expected(self):\n total = 0.0\n for a in self.pre:\n if self.atom_state[a] == ATOM_ENABLED:\n total += self.usecount * self.Q[a]\n else:\n for a2 in (a, a.negate()):\n total += self.frequencies[a2] * self.Q[a2]\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute home and away teams days rested
def rest_days(self, games): game_dates = pd.concat([ games[["date", "team_home"]].rename( columns={"team_home": "team"}), games[["date", "team_away"]].rename( columns={"team_away": "team"}), ]).sort_values("date") game_dates['date_prev'] =...
[ "def day_night_duration(\n self,\n daybreak: datetime.time = datetime.time(NORMAL_DAY_START_H),\n nightfall: datetime.time = datetime.time(NORMAL_DAY_END_H),\n ) -> Tuple[datetime.timedelta, datetime.timedelta]:\n daytotal = datetime.timedelta()\n nighttotal = datetime.timedelt...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Keep track of previous quarterback for each game and each team.
def previous_quarterback(self, games): game_dates = pd.concat([ games[["date", "team_home", "qb_home"]].rename( columns={"team_home": "team", "qb_home": "qb"}), games[["date", "team_away", "qb_away"]].rename( columns={"team_away": "team", "qb_away": "qb"})...
[ "def test_previous(self):\n with mn.model() as m:\n mn.stock('Foo', 1, 0)\n LastFoo = mn.previous('LastFoo', 'Foo')\n\n self.assertEqual(LastFoo[''], 0)\n m.step()\n self.assertEqual(LastFoo[''], 0)\n m.step()\n self.assertEqual(LastFoo[''], 1)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Run notebook, convert, save and return PDF path.
def export_notebook_to_pdf(self, nb_path: str, nb_params: Dict) -> str: # type: ignore report_file = Path(nb_path) if not report_file.is_file(): raise ValueError("Notebook path does not point to a file.") nb_params = self._parse_params(nb_params) report_filename, report_e...
[ "def run(self, nb_path: str, nb_params: Dict) -> str: # type: ignore[override]\n return self.export_notebook_to_pdf(nb_path, nb_params)", "def test_to_pdf_with_nb_path(self):\n nb = nbformat.v4.new_notebook()\n text = \"\"\"\\\n This is an auto-generated notebook.\"\"\"\n nb['c...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Task to export the notebook into a pdf.
def run(self, nb_path: str, nb_params: Dict) -> str: # type: ignore[override] return self.export_notebook_to_pdf(nb_path, nb_params)
[ "def write_pdf(self, submission_path):\n ...", "def test_to_pdf_with_nb_path(self):\n nb = nbformat.v4.new_notebook()\n text = \"\"\"\\\n This is an auto-generated notebook.\"\"\"\n nb['cells'] = [nbformat.v4.new_markdown_cell(text)]\n with open(TEST_FILES_PATH + 'test-nb...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert a matrix to a pickled form format is ,,rowcountcolumncount e.g., [[True, False, True], [True, True, True]] > "2,3,101111"
def to_value(matrix): h = len(matrix) w = 0 if h == 0 else len(matrix[0]) return ",".join( ( str(h), str(w), "".join(["".join(["1" if v else "0" for v in row]) for row in matrix]), ) )
[ "def matrix2string(matrix):\n\tlines = ''\n\tfor entry in matrix:\n\t\ts = ''\n\t\tfor j, field in enumerate(entry):\n\t\t\tif j > 0:\n\t\t\t\ts += ','\n\t\t\ts += '\"' + str(field) + '\"'\n\t\tlines += s + '\\n'\n\treturn lines", "def serialize(puzzle):\n assert(len(puzzle)==9)\n assert(all([len(row)==9 fo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Node label to use when rendering patterns as graphs using dot.
def dot_label(self) -> str:
[ "def node_label(node):\n return NODE_LABELS[type(node)]", "def as_dot(self):\r\n node_labels = ''\r\n for (id, node) in self._node_list(0):\r\n node_labels += '{0}[label=\"{1}\"];\\n'.format(id, node)\r\n\r\n (_, tree) = self._as_dot(0)\r\n\r\n return 'graph ast {{\\n{0}\\n{1}\\n}}'.format(nod...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compares two predicates and return true if they are equivalent
def is_equivalent(self, other: "NodePredicate") -> bool:
[ "def __eq__(self, other):\n if len(self.tensors) != len(other.tensors):\n return False\n fermions = tuple(pos for pos, tensor in enumerate(self.tensors)\n if tensor.statistics == fermion)\n for match in match_tensor_lists(self.tensors, other.tensors):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determines whether a NodePredicate matches another Node Predicate
def matches_predicate(self, predicate_node: "NodePredicate") -> bool:
[ "def containsNode(*args, **kwargs):\n \n pass", "def __eq__(self, other): # overload the == operator\n return isinstance(other, SearchNode) and self.getPriority() == other.getPriority()", "def isEqualToNode(self, other):\n is_lower = self.nodeName.lower() == other.nodeName.lower()\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get a single Artwork by its id
def artworks_id_get(id): # noqa: E501 return query_manager.get_resource(id=id, rdf_type_uri=ARTWORK_TYPE_URI, rdf_type_name=ARTWORK_TYPE_NAME, kls=Artwork)
[ "def get_artikel_by_id(self, id):\n with ArtikelMapper() as mapper:\n return mapper.find_by_id(id)", "def detail_view(request, id):\n\n # To get object\n #artwork = ArtWork.objects.filter(id=id).first()\n #print(artwork)\n\n # To get query set\n #artwork = ArtWork.objects.filter(i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate the dark current based off the dark data It takes the filename of Light data and searches for the mathing integration time in the dark data directory, finds the required dark data, subtracts off the offset and smear and computes the dark current
def calculate_dark_current(image, temp, i, int_time): dark_data_dir = r'F:\TEMPO\Data\GroundTest\FPS\FPA_Gain_vs_Temp' tem_file = temp +r'_PT_Dark\Script_Data\saved_quads' data_file = os.path.join(dark_data_dir, tem_file) data_path_name_split = image.split('_') all_int_files = [each for each in os.l...
[ "def getdarkcurrent(self):\n darkrate = 0.005 # electrons/s\n \n try:\n darkcurrent = self.header['DARKTIME'] * darkrate\n \n except:\n str = \"#############################################\\n\"\n str += \"# ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
the underlying assumption in smear subtraction is that the dark current in the storage region is really small and hence neglected from the analysis. typically, Csmear = tFT / (ti+ tFT) (AVG[C(w)] DCStor tRO tft = 8ms
def perform_smear_subtraction(active_quad, int_time): frame_transfer = 8.333 smear_factor = (frame_transfer / (int_time+ frame_transfer))* np.mean(active_quad, axis=0) #print(active_quad.shape) #print(smear_factor.shape) smear_subtracted_quad = active_quad - smear_factor[None, :] return smear_su...
[ "def calculate_shear(self,B31c = 0):\n logger.debug('Calculating magnetic shear...')\n \n # Shorthand introduced: we also have to ransform to 1/B**2 expansion parameters, taking into account the \n # difference in the definition of the radial coordinate. In the work of Rodriguez et al.,\n # Phys. Plasma...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Configures streaming execution mode.
def as_streaming_execution(self): self._is_stream = True return self
[ "def __init__(__self__, *,\n stream_mode: 'StreamModeDetailsStreamMode'):\n pulumi.set(__self__, \"stream_mode\", stream_mode)", "def set_stream_on(self, session, params=None):\n if params is None:\n params = {}\n\n force = params.get('force', False)\n if for...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the parallelism for all operations.
def set_parallelism(self, parallelism): self._parallelism = parallelism return self
[ "def set_parallelism(self, parallelism):\n self._j_execution_environment.setParallelism(parallelism)", "def set_parallelism_factor(self, factor):\n self.parallelism_factor = factor", "def set_thread_count(self, threads):\n self.parallelism = threads\n self.info.parallelism = threads"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Util func to check if the object is a FunctionBuilder
def isFunctionBuilder(obj): if isclass(obj) and not isabstract(obj): return issubclass(obj, FunctionBuilder) return False
[ "def _isScoringFunction(obj):\n if inspect.isclass(obj):\n if callable(obj):\n sig = inspect.getfullargspec(obj.__call__)\n if len(sig.args) == 2:\n if \"return\" in sig.annotations and 'smiles' in sig.annotations:\n if sig.annotations['return'] == d...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a dictionary of all functions from a dictionary (or configparser).
def create_function_dict(self, conf): all_funcs = process_args(conf, factory=self, str_keys=['type', 'path']) funcs_dict = {} for k, v in all_funcs.items(): if isinstance(v, dict): f_type = v.pop('type...
[ "def import_parser_funcs(self):\n self.business_configs['funcs'] = {}\n for business_function, values in self.business_configs.items():\n try:\n if business_function == 'funcs':\n continue\n modname, funcname = values['parser'].rsplit('.', 1)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add KafkaManagerrelated commandline arguments to the given parser.
def add_cli_arguments(self, parser): super(Application, self).add_cli_arguments(parser) add_kafka_manager_api_cli_arguments(parser)
[ "def add_custom_cli_args(self, cli_parser):\n pass", "def add_cmdline_args(parser):\n group = parser.add_argument_group(\"Model\")\n\n # Model\n group.add_argument(\"--model\", type=str, required=True)\n\n # Config\n group.add_argument(\"--config_path\", type=str, required=True)\n\n # Mod...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Scrape access request data from the access request page for the specified request status.
def get_request_data(self, status: RequestStatus = RequestStatus.ALL) -> List[dict]: SEE_MORE_BTN_XPATH = "//a[text()='See More']" # filter page to only include requests of the given status self._browser.click_element_by_xpath(self._status_btn_xpath(status)) self._browser.wait_until_ele...
[ "async def access_details_handler(request: aiohttp.web.Request) -> aiohttp.web.Response:\n access_details = {}\n access_details = await request.app[\"db_conn\"].get_access_container_details(\n request.match_info[\"user\"],\n request.query[\"owner\"],\n request.match_info[\"container\"],\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ensure that the given URL is a valid URL. Since the URL is not entered by the user, it is always valid.
def _validate_url(self, url): return
[ "def validateURL(url):", "def validate_url(url):\n\n if not url:\n raise SystemError(\"validate_url() was given an empty URL\")\n\n protocol = \"http://\"\n protocol_error_message = ValueError(\"A URL beginning with \" \\\n \"'http://' is required\")\n\n if len(url) < len(protocol):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the xpath for the button to load request data for the given status
def _status_btn_xpath(status: RequestStatus): return f'//div[@id="notifications-affix"]//a[./span[text()="{status.value}"]]'
[ "def get_status(self):\n status_elem = self.tds[self.STATUS_COL]\n status_icon = status_elem.find_element_by_tag(\"mat-icon\")\n if status_icon is None:\n # Check for spinner\n if status_elem.find_element_by_tag(\"mat-spinner\") is None:\n raise ValueError(\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load the bolometric light curves from lbolfile.
def load(name, path='.', tcol='time', cols=('L_ubvri', 'L_bol'), is_lum=False): fname = os.path.join(path, f'{name}.lbol') # print(f"Loading {fname}") d, header = read_obs_table_header(fname) # print(header) time = np.array(d[tcol]) cut = time > 0 t = time[cut] res = SetLightCurve(f'L_bo...
[ "def load_LSRK(file,lstype='2S',has_emb=False):\n from numpy import sum\n #Read in coefficients\n f=open(file,'r')\n coeff=[]\n for line in f:\n coeff.append(float(line))\n f.close()\n if has_emb:\n f=open(file+'.bhat','r')\n bhat=[]\n for line in f:\n bha...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Prepare proxied file for HTTP
def __prepareFileForHTTP( self, lfn, key ): global HTTP_PATH res = self.__prepareSecurityDetails() if not res['OK']: return res # Clear the local cache getFileDir = "%s/%s" % ( HTTP_PATH, key ) os.makedirs(getFileDir) # Get the file to the cache from DIRAC.DataManageme...
[ "def __setHTTPProxy():\n\n global proxyHandler\n\n if not conf.proxy: \n if conf.hostname in ('localhost', '127.0.0.1') or conf.ignoreProxy:\n proxyHandler = urllib2.ProxyHandler({})\n return\n\n debugMsg = \"setting the HTTP proxy to pass by all HTTP requests\"\n logger.debug(d...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method to send files to clients. fileID is the local file name in the SE. token is used for access rights confirmation.
def transfer_toClient( self, fileID, token, fileHelper ): file_path = "%s/%s" % ( BASE_PATH, fileID ) result = fileHelper.getFileDescriptor( file_path, 'r' ) if not result['OK']: result = fileHelper.sendEOF() # check if the file does not really exist if not os.path.exists(file_path): ...
[ "def fileTransferToClient(id, filename, client):\r\n path = (r\"C:\\Cyber\\SafeBoxOnlline\\\\\" + id + r\"\\\\\" + filename)\r\n size = os.path.getsize(path)\r\n if size == 0:\r\n client.send(b\"empty file\")\r\n else:\r\n with open(path, \"rb\") as f:\r\n file_data = f.read(102...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method to receive file from clients. fileID is the local file name in the SE. fileSize can be Xbytes or 1 if unknown. token is used for access rights confirmation.
def transfer_fromClient( self, fileID, token, fileSize, fileHelper ): if not self.__checkForDiskSpace( BASE_PATH, fileSize ): return S_ERROR('Not enough disk space') file_path = "%s/%s" % ( BASE_PATH, fileID ) if not os.path.exists( os.path.dirname( file_path ) ): os.makedirs( os.path.dirname( ...
[ "def receive_file_from_socket(self):\n pass", "def fileTransferToClient(id, filename, client):\r\n path = (r\"C:\\Cyber\\SafeBoxOnlline\\\\\" + id + r\"\\\\\" + filename)\r\n size = os.path.getsize(path)\r\n if size == 0:\r\n client.send(b\"empty file\")\r\n else:\r\n with open(pa...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if the directory dpath can accomodate 'size' volume of data
def __checkForDiskSpace( dpath, size ): dsize = (getDiskSpace(dpath)-1)*1024*1024 maxStorageSizeBytes = 1024*1024*1024 return ( min(dsize, maxStorageSizeBytes) > size )
[ "def check_disk_usage():\n du = shutil.disk_usage(\"/\")\n free = du.free / du.total * 100\n return free < 20", "def check_free_space_in_dir(path, size):\n from ..utils.console import human_file_size\n\n space = get_free_space_in_dir(path)\n if space < size:\n raise IOError(\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r""" formatter(...) formatter( fname, fout = None, space_count = 2, kargs, special = 0, EXCEPTION = True ) Given a correct filename fname, this program autoformats the program file. This function formats source code, in a similar fashion to Python, in which "proper" matching spacing is applied for each line between an ...
def formatter( fname, fout = None, space_count = 2, *kargs, special = 0, EXCEPTION = True, DEBUG = False ): import sys if special == None: special = 0 # Prevent user from accessing 16 if special & 16: special ^= 16 shift = 0 shift_delay = 0 #For 4 cond_shift = 0 #For 16 c...
[ "def format_file(filename, args, standard_out):\n encoding = detect_encoding(filename)\n with open_with_encoding(filename, encoding=encoding) as input_file:\n source = input_file.read()\n formatted_source = format_code(\n source,\n preferred_quote=args.quote)\n\n if sour...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r""" lcount(...) lcount( fname , fout = None, width = 6, kargs, code = "UTF8" ) Writes the line number of each line into the output text file.
def lcount( fname , fout = None, width = 5, *kargs, code = "UTF-8" ) : import sys #Files file_in = open(fname, "r", 1, code) fout = (fname + '_counted.txt') if (fout == None) else fout file_out = open(fout, "w" , 1, code) print("%s starting with %s. Output is %s." % (sys._getframe(0).f_code.co_nam...
[ "def count_lines(filename):\n pass", "def mapcount(self):\n f = open(self.__filename, \"r+\")\n buf = mmap.mmap(f.fileno(), 0)\n lines = 0\n readline = buf.readline\n while readline():\n lines += 1\n return lines", "def linecountinfile(file_or_filename):\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r""" rspace_killer(...) rspace_killer ( fname, fout = None ) Removes excess white space on the right
def rspace_killer ( fname, fout = None ) : import sys fin = open(source,"r") fout = source + '_wk.txt' if ( fout == None ) else fout dest = open(fout,"w") print("%s starting with %s. Output is %s." % (sys._getframe(0).f_code.co_name , fname, fout) ) for line in fin : fout.write( line.rstr...
[ "def _write_clean(file, outfile, run = False):\r\n\tif run:\r\n\t\tfile = open(f\"../lyrics/{file}.txt\", \"r\").readlines()\r\n\t\twrite_clean_file(file, outfile)", "def safe_cleanup(file):\n remove_duplicates(file)\n remove_isolated_articles(file)\n lines_altered = clean_article_titles(file)\n while...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Prepare the inputs for the simulator. The signature follows that given in `elfi.tools.external_operation`. This function appends kwinputs with unique and descriptive filenames and writes an input file for the bdm executable.
def prepare_inputs(*inputs, **kwinputs): alpha, delta, tau, N = inputs meta = kwinputs['meta'] # Organize the parameters to an array. The broadcasting works nicely with constant # arguments. param_array = np.row_stack(np.broadcast(alpha, delta, tau, N)) # Prepare a unique filename for parallel...
[ "def _populate_inputs(self):\n\n self.inputs = Bunch(outfile=None,\n infile=None)", "def createNewInput(self,currentInputFiles,oriInputFiles,samplerType,**Kwargs): \n import DecayParser\n import FissionYieldParser\n import QValuesParser\n import MaterialParser\n im...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the path to the C++ source code.
def get_sources_path(): return os.path.join(os.path.dirname(os.path.realpath(__file__)), 'cpp')
[ "def source_path(path):\n if path is None:\n return path\n for extension in ['$py.class', '.pyc', '.pyo']:\n if path.endswith(extension):\n return ''.join([path[:-len(extension)], '.py'])\n return path", "def compile_path(self):\n if not self.precompile:\n retur...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the example model used in Lintusaari et al. 2016. Here we infer alpha using the summary statistic T1. We expect the executable `bdm` be available in the working directory.
def get_model(alpha=0.2, delta=0, tau=0.198, N=20, seed_obs=None): if seed_obs is None and N == 20: y = np.zeros(N, dtype='int16') data = np.array([6, 3, 2, 2, 1, 1, 1, 1, 1, 1, 1], dtype='int16') y[0:len(data)] = data else: y = BDM(alpha, delta, tau, N, random_state=np.random.R...
[ "def ModelOne(patient):\n\n # import model using pickle de-serializer\n with open('./data/10featint_model400_15b.b', 'rb') as f:\n deployed_model = pickle.load(f)\n\n # import complete dataset\n final_features_raw_wid, final_features_raw, active_all = fns.import_features()\n\n # get normalizin...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
active and inactive should be lists of usernames
def assertUserState(self, active, inactive): for usernames, is_active in [(active, True), (inactive, False)]: self.assertItemsEqual( usernames, [user.raw_username for user in CommCareUser.by_domain(self.domain, is_active=is_active)] )
[ "def active_user():\n return [user for user in session['user'] if user['active']][0]", "def getActiveUsers(self):\n if self.activeUserCount == 0:\n logger.info(\"Empty room %s\" % self.roomName)\n return list()\n \n userList = list()\n for user in self.activeUse...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sample from discrete distribution ps over set xs
def inverse_cdf_sample(xs,ps): r = random.random() acc = 0 for x,p in zip(xs,ps): acc += p if acc > r: return x
[ "def discrete_sample(prob, rng):\n return (np.cumsum(prob) > rng.rand()).argmax()", "def sample_from_D(self, m):\n xs = np.random.uniform(0, 1, m)\n xs.sort()\n probabilities = numpy.random.random(len(xs))\n A = [(0, 0.2), (0.4, 0.6), (0.8, 1)]\n sample = np.ndarray((m, 2))\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute partition function for Gq model by direct enumeration
def compute_partition(ks,q): return sum(falling_fac(q,j)*esp(ks,j) for j in range(q+1))
[ "def get_partition_function(self, T):\n cython.declare(Q=cython.double)\n if self.conformer is not None and len(self.conformer.modes) > 0:\n Q = self.conformer.get_partition_function(T)\n else:\n raise SpeciesError('Unable to calculate partition function for transition sta...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
given ks, the roots of a polynomial P(x) = a_n x^n+...+a_1x^1+a_0, compute sequence of coefficients a_n...a_0
def compute_coefficients_ref(ks): coeffs = [1] for k in ks: coeffs = zipWith(lambda x,y:x+y,coeffs+[0],[0]+[-k*c for c in coeffs]) return coeffs
[ "def LagrangePolynomial( k ): # Inputs arrays\r\n assert len(x) == len(f_x) , \" x and f not same size \"\r\n sum_ = 0\r\n for i in range( n ): \r\n product = 1 # Reset the product for each i\r\n for j in range( len(x) ):\r\n if i!=j:\r\n produc...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute log probability of k out N successes at probability p
def log_dbinom(k,N,p): return log(choose(N,k)) + k*log(p) + (N-k)*log(1-p)
[ "def log_likelihood(p):\n\tp_full = np.append(p, [1.0 - sum(p)]) # one parameter for the probability of each review score\n\tprobability_list = binom.pmf(review_frequencies, nbr_reviews, p_full)\n\tlog_probability_sum = np.sum(np.log(probability_list))\n\t\n\tif np.isnan(log_probability_sum):\n\t\treturn -np.inf\n\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return max in window of radius k over circular array of scores
def max_in_window_ref(scores,k): G = len(scores) max_scores = np.empty(G) for i in verbose_gen(xrange(G),10000): m = None for j in xrange(-k,k+1): if scores[(i+j) % G] > m: m = scores[(i+j) % G] max_scores[i] = m return max_scores
[ "def max_in_window(scores,k):\n max_scores = np.copy(scores)\n for j in verbose_gen(xrange(-k,k+1)):\n max_scores = np.maximum(max_scores,np.roll(scores,j))\n return max_scores", "def maxScore(self, cardPoints: list[int], k: int) -> int:\n maxLen = len(cardPoints) - k\n minSum = floa...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return max in window of radius k over circular array of scores
def max_in_window(scores,k): max_scores = np.copy(scores) for j in verbose_gen(xrange(-k,k+1)): max_scores = np.maximum(max_scores,np.roll(scores,j)) return max_scores
[ "def max_in_window_ref(scores,k):\n G = len(scores)\n max_scores = np.empty(G)\n for i in verbose_gen(xrange(G),10000):\n m = None\n for j in xrange(-k,k+1):\n if scores[(i+j) % G] > m:\n m = scores[(i+j) % G]\n max_scores[i] = m\n return max_scores", "de...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Construct a CertificateType object.
def __init__(self, value=CertificateTypeEnum.X_509): super(CertificateType, self).__init__(value, Tags.CERTIFICATE_TYPE)
[ "def create_certificate(self, name=None, request_type=None, subject_dn=None,\n source_container_ref=None, ca_id=None, profile=None,\n request_data=None):\n name = name or self.generate_random_name()\n order = self._clients.barbican().orders.create_ce...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Construct a DigestValue object.
def __init__(self, value=b''): super(DigestValue, self).__init__(value, Tags.DIGEST_VALUE)
[ "def test05_digest_value(self):\n d = Digest()\n d.want_digest = 'md5'\n self.assertEqual(d.digest_value(b'hello'), 'md5=XUFAKrxLKna5cZ2REBfFkg==')", "def load_digest(cls, alg: HashAlgorithm, digest: ByteString) -> MessageDigest:\n return MessageDigest(bytes(digest), cls(alg))", "def...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Construct a Digest object.
def __init__(self, hashing_algorithm=None, digest_value=None, key_format_type=None): super(Digest, self).__init__(Tags.DIGEST) if hashing_algorithm is None: self.hashing_algorithm = HashingAlgorithm() else: self.hashing_...
[ "def load_digest(cls, alg: HashAlgorithm, digest: ByteString) -> MessageDigest:\n return MessageDigest(bytes(digest), cls(alg))", "def _get_digest_auth(self) -> httpx.DigestAuth:\n username = \"\" if self._username is None else self._username\n return httpx.DigestAuth(username, self._password...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read the data encoding the Digest object and decode it into its constituent parts.
def read(self, istream): super(Digest, self).read(istream) tstream = BytearrayStream(istream.read(self.length)) self.hashing_algorithm.read(tstream) self.digest_value.read(tstream) self.key_format_type.read(tstream) self.is_oversized(tstream) self.validate()
[ "def readData(self, det):\n f = open(self.file, \"rb\")\n fortran.skip(f) # Skip header\n for _ in range(2 * det):\n fortran.skip(f) # Detector Header & Data\n fortran.skip(f) # Detector Header\n data = fortran.read(f)\n f.close()\n return data", "def...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Write the data encoding the Digest object to a stream.
def write(self, ostream): tstream = BytearrayStream() self.hashing_algorithm.write(tstream) self.digest_value.write(tstream) self.key_format_type.write(tstream) self.length = tstream.length() super(Digest, self).write(ostream) ostream.write(tstream.buffer)
[ "def send_message(stream, message, digest, pickle_dep=pickle):\n if hasattr(stream, 'getsockname'):\n stream = FileLikeSocket(stream)\n\n if not callable(digest):\n raise ValueError('digest must be callable')\n\n serialized = pickle_dep.dumps(message, pickle_dep.HIGHEST_PROTOCOL)\n data_di...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Construct a Digest object from provided digest values.
def create(cls, hashing_algorithm=HashingAlgorithmEnum.SHA_256, digest_value=b'', key_format_type=KeyFormatTypeEnum.RAW): algorithm = HashingAlgorithm(hashing_algorithm) value = DigestValue(bytearray(digest_value)) format_type = KeyFormatType(key_form...
[ "def load_digest(cls, alg: HashAlgorithm, digest: ByteString) -> MessageDigest:\n return MessageDigest(bytes(digest), cls(alg))", "def parse_digests(digests):\n def _atom(orig):\n s = orig\n if s.startswith(\"hash://\"):\n s = os.path.split(s[len(\"hash://\"):])[1]\n if '...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Construct an ApplicationNamespace object.
def __init__(self, value=None): super(ApplicationNamespace, self).__init__( value, Tags.APPLICATION_NAMESPACE)
[ "def CreateAbsentNamespace (cls):\n rv = Namespace(None)\n rv.__absentNamespaceID = cls.__absentNamespaceID\n cls.__absentNamespaceID += 1\n\n return rv", "def create_namespace(self):\n name = 'namespace-{random_string}'.format(random_string=random_str(5))\n\n namespace =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Construct an ApplicationData object.
def __init__(self, value=None): super(ApplicationData, self).__init__(value, Tags.APPLICATION_DATA)
[ "def create(cls, application_namespace, application_data):\n namespace = ApplicationNamespace(application_namespace)\n data = ApplicationData(application_data)\n return ApplicationSpecificInformation(\n application_namespace=namespace, application_data=data)", "def app_data():\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Construct an ApplicationSpecificInformation object.
def __init__(self, application_namespace=None, application_data=None): super(ApplicationSpecificInformation, self).__init__( Tags.APPLICATION_SPECIFIC_INFORMATION) if application_namespace is None: self.application_namespace = ApplicationNamespace() else: sel...
[ "def create(cls, application_namespace, application_data):\n namespace = ApplicationNamespace(application_namespace)\n data = ApplicationData(application_data)\n return ApplicationSpecificInformation(\n application_namespace=namespace, application_data=data)", "def __init__(self, v...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read the data encoding the ApplicationSpecificInformation object and decode it into its constituent parts.
def read(self, istream): super(ApplicationSpecificInformation, self).read(istream) tstream = BytearrayStream(istream.read(self.length)) self.application_namespace.read(tstream) self.application_data.read(tstream) self.is_oversized(tstream) self.validate()
[ "def decode(self, data):\n\t\traise NotImplementedError()", "def __init__(self, application_namespace=None, application_data=None):\n super(ApplicationSpecificInformation, self).__init__(\n Tags.APPLICATION_SPECIFIC_INFORMATION)\n\n if application_namespace is None:\n self.appl...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Write the data encoding the ApplicationSpecificInformation object to a stream.
def write(self, ostream): tstream = BytearrayStream() self.application_namespace.write(tstream) self.application_data.write(tstream) self.length = tstream.length() super(ApplicationSpecificInformation, self).write(ostream) ostream.write(tstream.buffer)
[ "def write(self, data, metadata):\n raise NotImplementedError", "def _encode_to_stream(self, output_stream, data, options=None, **kwargs):\n output_stream.write(self._encode(data, options=options, **kwargs))", "def serialize(self, data):\n if isinstance(data[0], dict):\n self.wri...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Construct an ApplicationSpecificInformation object from provided data and namespace values.
def create(cls, application_namespace, application_data): namespace = ApplicationNamespace(application_namespace) data = ApplicationData(application_data) return ApplicationSpecificInformation( application_namespace=namespace, application_data=data)
[ "def __init__(self, application_namespace=None, application_data=None):\n super(ApplicationSpecificInformation, self).__init__(\n Tags.APPLICATION_SPECIFIC_INFORMATION)\n\n if application_namespace is None:\n self.application_namespace = ApplicationNamespace()\n else:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Juan's code.Perform a biclustering, plot a heatmap with dendrograms on each axis.
def plot_bicluster(data, row_linkage, col_linkage, x_label, y_label, row_nclusters=10, col_nclusters=3): fig = plt.figure(figsize=(10, 10)) # Compute and plot row-wise dendrogram # `add_axes` takes a "rectangle" input to add a subplot to a figure. # The figure is considered to ...
[ "def myDendrogram(shopping_data):\r\n dataframe = shopping_data.values\r\n Z = hierarchy.linkage(dataframe, 'centroid')\r\n plt.figure()\r\n plt.xlabel('Node ID')\r\n plt.ylabel('Euclidean distance')\r\n plt.title('Agglomerative Clustering')\r\n dn = hierarchy.dendrogram(Z)\r\n plt.show()", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a grid ('quilt') out of an bunch of input patches in patch_data. PARAMS
def form_quilt(quilt_w, patch_data): n_patches = len(patch_data) quilt_ls = [] for i in range(0, len(patch_data), quilt_w): # start the row to have something to hstack to q_row = patch_data[i] for j in range(i+1, i+quilt_w): q_row = np.hstack((q_row, patch_data[j...
[ "def create_patches_test_data(imgs, patch_size, stride, padding):\n # Extract patches from input images\n img_patches = [img_crop(imgs[i], patch_size, patch_size, stride, padding) for i in range(len(imgs))]\n\n # Linearize list of patches, code from tf_aerial_images.py\n img_patches = np.asarray([img_pa...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute the reconstruction error of the array `XV`, which should be just np.dot(X, V). Both XV and original should be 01 normalized. PARAMS
def get_reconstruction_error(XV, original, alpha): err_arr = [] loss_arr = [] for i in range(len(XV)): err = np.sqrt(np.sum((XV[i]-original[i])**2)) err_arr.append(err) err_arr = np.array(err_arr) # Get the stats err_mean = np.average(err_arr) err_sd = np.sqrt(np...
[ "def reconstruction_error(self, X):\n \n Z = self.reconstruct_input(X) \n L = self.loss(X,Z)\n return T.mean(L)", "def compute_reconstruction_error(self, X0, X0RM=None):\n if X0RM is None:\n _,X0RM,_,_,_=self.mean_field_approximate_inference(X0,NMF=self.NMF,rand_in...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Locate where cuda is installed on the system. Return a dict with values 'home', 'nvcc', 'sdk', 'include' and 'lib' and values giving the absolute path to each directory. Looks for the CUDAHOME environment variable. If not found, searches $PATH for 'cuda' and 'nvcc'.
def locate_cuda(): home = None if platform.architecture()[0]=='64bit': arch = '64' else: arch = '' if 'CUDAHOME' in os.environ: home = os.environ['CUDAHOME'] nvcc = pjoin(home, 'bin', 'nvcc') else: # Otherwise search PATH for cuda and nvcc for element...
[ "def locate_cuda():\n \n # first check if the CUDAHOME env variable is in use\n if 'CUDAHOME' in os.environ:\n home = os.environ['CUDAHOME']\n nvcc = join(home, 'bin', 'nvcc')\n else:\n # otherwise, search the PATH for NVCC\n nvcc = find_in_path('nvcc', os.environ['PATH'])\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Locate where the HDF5 libraries are located on the system. Returns a dict with keys 'home', 'lib' and 'include' and values giving the absolute path to each directory. Looks for the HDF5HOME environment variable.
def locate_hdf5(): home = None if platform.architecture()[0]=='64bit': arch = '64' else: arch = '' if 'HDF5_BASE' in os.environ: home = os.environ['HDF5_BASE'] else: raise EnvironmentError('Unable to locate the HDF libraries on ' 'this system. Set the...
[ "def _find_home():\n\n # this is used below to make fix up encoding issues that sometimes crop up\n # in py2.x but not in py3.x\n if PY2:\n decodepath = lambda pth: pth.decode(sys.getfilesystemencoding())\n elif PY3:\n decodepath = lambda pth: pth\n\n # First find the home directory - t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Locate where the DIY libraries are located on the system. Returns a dict with keys 'home', 'lib' and 'include' and values giving the absolute path to each directory. Looks for the DIYHOME environment variable.
def locate_diy(): home = None if platform.architecture()[0]=='64bit': arch = '64' else: arch = '' if 'DIY_BASE' in os.environ: home = os.environ['DIY_BASE'] else: raise EnvironmentError('Unable to locate the DIY libraries on ' 'this system. Set the DIY...
[ "def libdirfind():\n libdir = DEWELIBDIR\n if libdir and os.path.exists(libdir):\n return libdir\n elif libdir:\n raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT),\n libdir)\n\n thisdir = os.path.abspath(os.path.dirname(__file__))\n libdir =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
generates license file for the project
def generate_license() -> None: license_result = os.system(f"lice {LICENSE} -o '{ORGANIZATION}' -p {REPO_NAME} > {PROJECT_DIRECTORY}/LICENSE") if license_result: # it means that return code is not 0, print exception print(license_result)
[ "def license(outfile=sys.stdout):\n # Looks like we're maintaining this in parallel with the LICENSE\n # file. I'd like to avoid that, but I don't see how. The MIT\n # license text itself won't change, but the copyright years will\n # from time to time, and the copyright holder could as well.\n license_str =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
is the policy different between rew_loc and prev_diff_rew_loc? An important question is the below, what happens when you are in the previous reward location?
def policy_changed_with_rew_loc(pk_ctr,state_seq,rew_loc,prev_diff_rew_loc,if_is_rew_loc=False): if prev_diff_rew_loc is not None: same_as_prev_pol = (((state_seq[pk_ctr]-rew_loc)>0)== #direction to reward with location ((state_seq[pk_ctr]-prev_diff_rew_loc)>0)) #direc...
[ "def get_reward(self):\n\n # Premise is sound, as we want to reward highest when sim.pose x,y,z is \n # essentially equal target_pos x,y,z (making the product of discount rate\n # and pose diff essentially 0 -- therefore, reward would be close to 1).\n #reward = 1.-.3*(abs(self.sim.pose[...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Searches the given html file for images which are referenced in the standard way (src attribute on img tag) and returns them as they are written.
def find_referenced_images(html): with open(html, 'r') as infile: soup = BeautifulSoup(infile.read(), 'html.parser') return set(img['src'] for img in soup.find_all('img'))
[ "def find_img_tags(document):\n return document.findAll('img')", "def embed_images_in_html(html_file):\n with open(html_file, 'r') as f:\n s = f.read()\n\n s1 = sub('<img src=\"([a-zA-Z0-9_/\\.]*)\" ', _embed_png, s)\n\n with open(html_file, 'w') as f:\n f.write(s1)", "def get_specimen...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Analyzes the given folder produced by the model manager, storing the results in folder/analysis/html
def text_analyze(settings: AnalysisSettings, folder: str): with open(os.path.join(folder, 'hparams', 'misc.json')) as infile: misc = json.load(infile) with np.load(os.path.join(folder, 'results.npz')) as infile: results = dict(infile.items()) in_html_folder = os.path.join(os.path....
[ "def results_analysis(dataset, model_folder='results/models/model_85/'):\n # load atlas\n atlas_tsv, _ = load_atlas(custom_tsv=True)\n # load classification\n df_classification = pd.read_csv(os.path.join(model_folder, 'predictions', dataset, 'df_classification.csv'))\n for target in ['disease', 'sex'...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load data from a file and convert to binary using the given cutoff, and only keep the first dim_feature columns. (for MNIST data, the last column is label and should be ignored)
def get_data(filename, dim_feature, cutoff=0.5): data = np.loadtxt(filename, dtype=float, delimiter=",") data_labels = data[:, dim_feature] data = data[:, xrange(dim_feature)] ## convert to binary data = np.greater_equal(data, cutoff).astype(int) return (data_labels, data)
[ "def extract_sklearn_features_categorical(categories, categories_to_val_map,\n dataset):\n dataset_binary = []\n for row in dataset.iterrows():\n row = list(row[1][categories])\n row_binary = binarize_categorical_row(\n categories, categories_to_val_map, row)\n dataset_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
y is n X d, x is n X p returns y_res, an n X d matrix such that the ith column of y_res contains the residuals of the ith column of y after regressing out x.
def regress_out(y, x): regr = linear_model.LinearRegression(True) n = y.shape[0] # if x is a n X 1 vector in the format (n,), change it's size to (n,1) if x.ndim == 1: x = x.reshape(-1,1) if get_dim(y) == 2 : d = y.shape[1] y_res = zeros([n,d]...
[ "def regress(X, y):\n w = np.linalg.solve(np.dot(X.T, X), np.dot(X.T, y))\n return w", "def regression(Y, X):\n # Add a constant column to X\n X = numpy.hstack((numpy.array([[1]*X.shape[0]]).T, X))\n (coeffs, residuals, rank, s) = numpy.linalg.lstsq(X, numpy.transpose(Y))\n return coeffs", "def residu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the Amino Acid Substitution classification type.
def classification_type(self) -> ClassificationType: return ClassificationType.AMINO_ACID_SUBSTITUTION
[ "def classify(self):\n\n if self.annotation_type == None or self.annotation_type not in self._annotation_classifications:\n return None\n\n try:\n classification = self._annotation_classifications[self.annotation_type][self.attributes['assertion']]\n except KeyError:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the exact match token type candidates.
def exact_match_candidates(self) -> List[List[str]]: return [ ['AminoAcidSubstitution'], ['GeneSymbol', 'AminoAcidSubstitution'], ['HGVS', 'AminoAcidSubstitution'], ['ReferenceSequence', 'AminoAcidSubstitution'] ]
[ "def get_token_types(self):\r\n \r\n # With help from: https://deplinenoise.wordpress.com/2012/01/04/python-tip-regex-based-tokenizer/\r\n SCANNER = re.compile(r'''\r\n (\\s+) | # whitespace\r\n (//)[^\\n]* | # comments\r\n 0[xX]([0-9A-Fa-f]+) ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if calc_countdown returns days, hours and minutes.
def test_calc_countdown(self): # FIXME: This will break when there is 0 seconds/hours/days left pattern = re.compile(r"\d* days, \d* hours, \d* minutes") countdown = calc_countdown(self.job) assert pattern.match(countdown)
[ "def can_countdown():\n if get_current_round(g) != \"day\":\n return False, 'It is not day.'\n elif not is_player_alive(g, user_id):\n return False, 'You are not in the game.'\n # get list of all alive\n # get list of votes\n # if list of votes == all alive - 1\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the place id for place of given name and location Returns place id as string for candidate with highest probability, or empty string if no candidate found
def get_place_id(name: str, location: Optional[str]) -> Optional[str]: place_id = None result = '' if location is None: result = gmaps.find_place(name, 'textquery') else: result = gmaps.find_place(name + ', ' + location, 'textquery') if len(result['candidates']) >= 1: place_i...
[ "def return_location(research):\n with urllib.request.urlopen(research, timeout=4) as url:\n data = json.loads(url.read().decode())\n\n data_place_id = data[\"results\"][0][\"place_id\"]\n\n return data_place_id", "def choose_place_name_to_put_token(self):\n prob = collections.Counter()\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get all links from a list of URLs
def get_all_links(url_list): full_link_list = [] skipped_urls = [] for idx, url in enumerate(url_list): # progress_bar(idx+1, len(url_list)) try: link_list = get_list_of_links(url) except (UnicodeError, IndexError): skipped_urls.append(url) link_list = [] full_link_list = full_link_list + link_list ...
[ "def _extract_url_links(base_url, html):\n print('extract url links')\n soup = BeautifulSoup(html, \"html.parser\")\n anchors = soup.find_all('a')\n print('anchors: ', anchors)\n links = []\n for anchor in anchors:\n href = anchor.get('href')\n link = urljoin(base_url, href)\n links...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Make AhoCorasick automaton from a list of strings
def init_automaton(string_list): A = ahocorasick.Automaton() for idx, s in enumerate(string_list): A.add_word(s, (idx, s)) return A
[ "def parse_string_to_automata(string):\n\tlist_of_states = []\n\tno_rubish = find_between(string, '\\[', '\\]')\n\tfor i in no_rubish.split(','):\n\t\tstate_strings = find_between(i, '/').split(' ')\n\t\tlist_of_states.append(State(i.strip()[0], int(state_strings[0]), int(state_strings[1]), int(state_strings[2]), i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return 1 if company URL, 0 otherwise
def get_reward(url, A_company, company_urls): if sum(check_strings(A_company, company_urls, url)) > 0: return 1 return 0
[ "def abnormal_URL(self):\n\n servers = whois.whois(self._url).name_servers\n safe_URL = [True for server in servers if server.split('.')[1].lower() in ('citizenhawk', 'ultimatesoftware')]\n return 2 if len(safe_URL) == 0 else 0", "def is_dataone_url(url):\n\n res = url.find('dataone.org')\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns false or a decomposition of the GeneratorId specific to the AWS CIS Foundations Benchmark ruleset
def is_cis_ruleset(self): # GeneratorId identifies the specific compliance matched. Examples: # arn:aws:securityhub:::ruleset/cis-aws-foundations-benchmark/v/1.2.0/rule/1.3 genid_regex = re.search( '^arn:.*?:ruleset/cis-aws-foundations-benchmark/v/(?P<version>.*?)/rule/(?P<rule>.*?)...
[ "def is_well_generated(self):\n return True", "def test_combination_id(self):\n self._test_combination(labels=False)", "def test(mfcc, correctID, models, k=5):\n bestModel = -1\n print(\"TODO\")\n\n return 1 if (bestModel == correctID) else 0", "def sagittalFlag(): \n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns false or a decomposition of the GeneratorId specific to the AWS Foundational Security Best Practices ruleset
def is_aws_fsbp_ruleset(self): # Generator Id example: # aws-foundational-security-best-practices/v/1.0.0/CloudTrail.1 genid_regex = re.search( '^aws-foundational-security-best-practices/v/(?P<version>.*?)/(?P<rule>.*?)$', self.generator_id) if not genid_regex: ...
[ "def is_well_generated(self):\n return True", "def _generate_expected_sg_rules(self, prs):\n # Get all the needed cidrs:\n prs = self._gbp_plugin.get_policy_rule_set(self._context, prs)\n\n providing_ptg_cidrs = self._get_cidrs_from_ptgs(\n prs['providing_policy_targ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialize the class applogger_name determines the log stream name in CW Logs. Use notification_type='SHARR' and security_standard='APP' for applevel logs For orchestrator, specify security_standard='APP' for general log, otherwise specify security_standard and controlid ex. SHARRNotification('APP', None, 'SHARR') > lo...
def __init__(self, security_standard, controlid=None, notification_type='ORCHESTRATOR'): from applogger import LogHandler self.__security_standard = security_standard self.__notification_type = notification_type applogger_name = self.__notification_type + '-' + self.__security_standard ...
[ "def initialize_logger(logger, log_name=None):\n \n # Set up general logger and formatting\n logger.setLevel(logging.DEBUG) # listen to everything\n formatter = logging.Formatter('%(asctime)s [%(name)-10s] [%(threadName)-12s] %(message)s [in %(pathname)s:%(lineno)d]')\n \n\n # If file_name provid...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Send notifications to the application CW Logs stream and sns
def notify(self): if self.send_to_sns: publish_to_sns('SO0111-SHARR_Topic', self.severity + ':' + self.message, AWS_REGION) self.applogger.add_message( self.severity + ': ' + self.message ) if self.logdata: for line in self.logdata: s...
[ "def send_notifications():\n CONFIG = create_app().config\n r = Redis(db=1)\n amz = boto.sns.connect_to_region(\"us-west-2\",\n aws_access_key_id=CONFIG[\"AWS_ACCESS_KEY\"],\n aws_secret_access_key=CONFIG[\"AWS_SECRET_KEY\"])\n\n keys = r.hkeys('prkng:push')\n if not keys:\n retu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add chunks of data from a stream to a queue until the stream is empty.
def _enqueue_output(stream, queue): for line in iter(lambda: stream.read(4096), b''): queue.put(line) stream.close()
[ "async def next_chunk():\n async for chunk_bytes in stream:\n return chunk_bytes", "def fill_queue(sock_udp, q_data, count_packets, bytes_per_packet):\n for _ in range(count_packets):\n buf, _ = sock_udp.recvfrom(bytes_per_packet)\n q_data.put(buf)\n cleanup_ifcs()", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }