query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Identifies genes that are significantly enriched for insertions (CTGs). This function takes a DataFrame of insertions, coming from multiple samples, and identifies if any genes are more frequently affected by an insertion than would be expected by chance. These genes are called Commonly Targeted Genes (CTGs). CTGs are ...
def test_ctgs( insertions, # type: List[Insertion] reference, # type: Reference gene_ids=None, # type: Set[str] chromosomes=None, # type: Set[str] pattern=None, # type: str per_sample=True, # type: bool window=None #type: Tuple[int, int] ): # Default t...
[ "def genes_GT():\n df1=pd.read_csv(config['geneInfo'], sep=\" \")\n df1=df1[df1.chr == '22']\n df2=pd.read_csv(config['counts'], sep=\" \")\n genes=df1.merge(df2.gene_id, on=\"gene_id\")\n return list(set(genes['gene_id']))", "def joint_genotypes(variant_df, all_have_gt_samples=None, any_has_gt_sam...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Subsets insertions for given gene windows.
def _subset_to_windows( insertions, # type: List[Insertion] gene_windows # type: Dict[str, Tuple[str, int, int]] ): # type: (...) -> List[Insertion] # Create lookup trees. trees = { chrom: IntervalTree.from_tuples((i[1:]) for i in chrom_int) for chrom, chrom_int in itertools....
[ "def make_windows(annotations, window_size, step):\n annotations_bin_keys = {\"gene_name\", \"gene_chrom\", \"gene_start\", \"gene_end\", \"gene_strand\", \"gene_region_end\", \"gene_region_start\"}\n annotations_bin = {k: [] for k in annotations_bin_keys}\n annotations_bin[\"bin_start\"] = []\n annotat...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests a given genomic region for enrichment in insertions.
def test_region( insertions, # type: List[Insertion] reference_seq, # type: pyfaidx.Fasta region, # type: Tuple[str, int, int] pattern=None, # type: Optional[str] intervals=None, # type: Optional[Iterable[Tuple[str, int, int]]] total=None, # type: Optional[int] ...
[ "def test_build_genomic_regions(self):\n\n CDS = pybedtools.BedTool(\"\"\"chr1\\t7700\\t7900\\tfoo\\t0\\t+\\n\n chr1\\t7999\\t8500\\tfoo\\t0\\t+\\n\"\"\", from_string = True)\n UTR5 = pybedtools.BedTool(\"\"\"chr1\\t7499\\t7700\\tfoo\\t0\\t+\\n\"\"\", from_string = Tr...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Counts occurrences of pattern within given genomic region.
def count_region( reference_seq, # type: pyfaidx.Fasta region, # type: Tuple[str, int, int] pattern=None # type: Optional[str] ): # type: (...) -> int chrom, start, end = region seq = reference_seq[chrom][int(start):int(end)] return _count_sequence(seq, regex=_build_regex(patte...
[ "def approx_pattern_count(pattern: str, genome: str, d: int) -> int:\n return len(approx_pattern_matching(pattern, genome, d))", "def CountOccurrences(pattern, bwt, starts, occ_counts_before):\n # Implement this function yourself\n return 0", "def PatternCount(text, pattern):\n l_p = len(pattern)\n l...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Counts occurrences of pattern in sequence.
def _count_sequence(sequence, regex=None): # type: (pyfaidx.Sequence, Pattern[str]) -> int if regex is None: count = len(sequence) else: count = sum((1 for _ in regex.finditer(str(sequence)))) return count
[ "def count_pattern(sentence, pattern):\n n = len(pattern)\n counter = 0\n for i in range(len(sentence) - n + 1):\n if sentence[i:i+n] == pattern:\n counter += 1\n\n return counter", "def PatternCount(text, pattern):\n l_p = len(pattern)\n l_t = len(text)\n tot = 0\n for i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Merges overlapping genomic intervals.
def merge_genomic_intervals(intervals): # type: (Iterable[Tuple[str, int, int]]) -> Iterable[Tuple[str, int, int]] # Group intervals by chromosome. grouped_intervals = itertools.groupby( sorted(intervals), operator.itemgetter(0)) # Now yield merged intervals per chromosome. for chrom, grp ...
[ "def merge_ranges():", "def mergeOverlapping(intervals) :\n ## We sort the interval to allow for easy merging:\n slist = sorted(intervals,key = lambda val : val.lowerBound)\n retlist = []\n curr = None\n for i in range(len(slist)) :\n if curr is None :\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read CSV in folder "general" in database. Also used in setup.py
def open_general(file, setup=False): try: if setup is False: p = datapath(True, 'general', file) df = _pd.read_csv(p + '.csv') elif setup is True: p = datapath(True, 'general', file) df = _pd.read_csv(p + '.py') else: df = None # n...
[ "def read_csv_file(self):\n pass", "def _read_csv(self):\n self.function_name = '_read_csv'\n with open(os.path.join(self.task.downloads, self.csv_name)) as csv_file:\n reader = csv.reader(csv_file, dialect='excel')\n for row in reader:\n self.input_data.a...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determines whether the discrepancy has been sufficiently resolved; used as return value for fix_discrepancy.
def discrepancy_resolved(self): # If there's a discrepancy and distance change matches the existing data, we're good. if self.distance_change == self.existing_data: return True # If recommend_updates, i.e., if self.distance_change == self.new_data, we'll update the data and we're goo...
[ "def checkIfRecovered(self, person) -> bool:\n # Days that the person showing symptom is around 17.8 days ~= 18 days \n if (person.getInfectedLength() >= 23):\n return True\n return False", "def is_solved(value):\n return len(value) == 1", "def check_initial_confidence(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Run when the palette is closed
def on_palette_close(self): pass
[ "def _on_close(self):\n self.shell_obj.closed()", "def panel_close_callback(self, panel, data):\n # We better make sure the presets are stored and saved\n self.store_preset()\n\n # Calling destroy() here, crashes LightWave (v11.0), so I have it\n # commented out, and relies on o...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function is run when the palette is executed. Useful to gather initial data and send to html page
def on_palette_execute(self, palette: adsk.core.Palette): pass
[ "def __init__(self,palette_to_use='default'):\n palettes = {'default':self.initialize_default_palette,'gmd_paper':self.initialize_gmd_paper_palette}\n palettes[palette_to_use]()", "def _on_palette_change(self, palette_data: dict) -> None:\n # set the color from the metadata\n color = s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Builds the selection spec.
def build_selection_spec(client_factory, name): sel_spec = client_factory.create('ns0:SelectionSpec') sel_spec.name = name return sel_spec
[ "def build_selection_spec(name):\n sel_spec = vmodl.query.PropertyCollector.SelectionSpec()\n sel_spec.name = name\n return sel_spec", "def _make_select(self):\n conditions = []\n values = []\n picklist = None\n if self.selection_dict:\n select_d = self.selection_di...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Builds the traversal spec object.
def build_traversal_spec(client_factory, name, spec_type, path, skip, select_set): traversal_spec = client_factory.create('ns0:TraversalSpec') traversal_spec.name = name traversal_spec.type = spec_type traversal_spec.path = path traversal_spec.skip = skip traversa...
[ "def build_traversal_spec(name, type_, path, skip, select_set):\n traversal_spec = vmodl.query.PropertyCollector.TraversalSpec()\n traversal_spec.name = name\n traversal_spec.type = type_\n traversal_spec.path = path\n traversal_spec.skip = skip\n traversal_spec.selectSet = select_set\n return ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Builds the Recursive Traversal Spec to traverse the object managed object hierarchy.
def build_recursive_traversal_spec(client_factory): visit_folders_select_spec = build_selection_spec(client_factory, "visitFolders") # For getting to hostFolder from datacenter dc_to_hf = build_traversal_spec(client_factory, "dc_to_hf", "Datacenter", ...
[ "def build_recursive_traversal_spec():\n visit_folders_select_spec = build_selection_spec('visitFolders')\n # Next hop from Datacenter\n dc_to_hf = build_traversal_spec('dc_to_hf',\n vim.Datacenter,\n 'hostFolder',\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Builds the Property Spec.
def build_property_spec(client_factory, type="VirtualMachine", properties_to_collect=["name"], all_properties=False): property_spec = client_factory.create('ns0:PropertySpec') property_spec.all = all_properties property_spec.pathSet = properties_to_collec...
[ "def build_property_spec(type_=vim.VirtualMachine,\n properties_to_collect=None, all_properties=False):\n if not properties_to_collect:\n properties_to_collect = ['name']\n\n property_spec = vmodl.query.PropertyCollector.PropertySpec()\n property_spec.all = all_properties\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Builds the Property Filter Spec.
def build_property_filter_spec(client_factory, property_specs, object_specs): property_filter_spec = client_factory.create('ns0:PropertyFilterSpec') property_filter_spec.propSet = property_specs property_filter_spec.objectSet = object_specs return property_filter_spec
[ "def build_property_filter_spec(property_specs, object_specs):\n property_filter_spec = vmodl.query.PropertyCollector.FilterSpec()\n property_filter_spec.propSet = property_specs\n property_filter_spec.objectSet = object_specs\n return property_filter_spec", "def get_prop_filter_spec(client_factory, o...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the properties of the Managed object specified.
def get_object_properties(vim, collector, mobj, type, properties): client_factory = vim.client.factory if mobj is None: return None usecoll = collector if usecoll is None: usecoll = vim.get_service_content().propertyCollector property_filter_spec = client_factory.create('ns0:P...
[ "def getProperties( cls ):\n\t\timport inspect\n\t\tfrom basicproperty.basic import BasicProperty\n\t\tdef isABasicProperty( object ):\n\t\t\t\"\"\"Predicate which checks to see if an object is a property\"\"\"\n\t\t\treturn isinstance( object, BasicProperty )\n\t\treturn dict(getmembers( cls, isABasicProperty)).v...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Builds the Property Filter Spec Object.
def get_prop_filter_spec(client_factory, obj_spec, prop_spec): prop_filter_spec = \ client_factory.create('ns0:PropertyFilterSpec') prop_filter_spec.propSet = prop_spec prop_filter_spec.objectSet = obj_spec return prop_filter_spec
[ "def build_property_filter_spec(property_specs, object_specs):\n property_filter_spec = vmodl.query.PropertyCollector.FilterSpec()\n property_filter_spec.propSet = property_specs\n property_filter_spec.objectSet = object_specs\n return property_filter_spec", "def build_property_filter_spec(client_fact...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the list of properties for the collection of objects of the type specified.
def get_properties_for_a_collection_of_objects(vim, type, obj_list, properties): client_factory = vim.client.factory if len(obj_list) == 0: return [] prop_spec = get_prop_spec(client_factory, type, properties) lst_obj_specs = [] for obj i...
[ "def get_all_properties_type():\n\n results = client.db.property_types.find({})\n return send_result(data=list(results))", "def getProperties( cls ):\n\t\timport inspect\n\t\tfrom basicproperty.basic import BasicProperty\n\t\tdef isABasicProperty( object ):\n\t\t\t\"\"\"Predicate which checks to see if an ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Run `code` with profiler. Used by ``%prun`` and ``%run p``.
def _run_with_profiler(self, code, opts, namespace): # Fill default values for unspecified options: opts.merge(Struct(D=[''], l=[], s=['time'], T=[''])) prof = profile.Profile() try: prof = prof.runctx(code, namespace, namespace) sys_exit = '' e...
[ "def runcode(self, code):\n try:\n buf = io.StringIO()\n with redirect_stdout(buf):\n exec(code, self.locals)\n result = self._result_from_stdout(buf)\n if result is None:\n result = self._result_from_code(code)\n self._last...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
read feature file, find out mass shift then correct
def feature_file_mass_correction(feature_filename: str): output_feature_filename = feature_filename + '.mass_corrected' ppm_shift = [] with open(feature_filename, 'r') as f: reader = csv.reader(f, delimiter=',') header = next(reader) seq_index = header.index("seq") mz_index =...
[ "def read_msp(infile_name,feat_lim_file=\"\",\n\t\t\t sum_feats=False,selected_features=[],\n\t\t\t max_dist=275,step_size=0.005,feat_bins=[],\n\t\t\t top_peaks=50,windowed_mode=False):\n\n\tinfile = open(infile_name)\n\n\tif len(feat_lim_file) > 0:\n\t\tselected_features = [float(f.strip()) for f in open(feat_lim_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Factory method to create a cache object from github/spilchen/baseball_id_db This is called as part of package initialization and so can be refered to via the Lookup variable. >>> from baseball_id import Lookup >>> Lookup.from_yahoo_ids([10794, 9542, 7578])
def create(cls): ssl._create_default_https_context = ssl._create_unverified_context c = lookup.Cache('https://raw.githubusercontent.com/spilchen/baseball_id_db/main/master.csv') return c
[ "def create_fake(cls):\n source = pkg_resources.open_text('baseball_id', 'sample.master.csv',\n encoding='iso-8859-1')\n c = lookup.Cache(source)\n return c", "def load_by_ids(cls,ids):\n if not ids or ids[0] == '':\n return None\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Factory method to create a fake data source This refers to a static data file that is in the current package. This function exists for testing purposes as it avoids network traffic to get the actual uptodate ID mapping.
def create_fake(cls): source = pkg_resources.open_text('baseball_id', 'sample.master.csv', encoding='iso-8859-1') c = lookup.Cache(source) return c
[ "def make_test_data(self):\n import data", "def _create_data_source(metadata):\n factory = metadata.get_callable()\n src = factory()\n engine = tools.get_engine()\n engine.add_source(src)\n return src", "def test_data_source_soaps_id_dynamic_datas_get(self):\n pass", "def test_fac...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The extracter moves files. Arguments input_folder and output_folder are set through GUI. Based on the values in the column called column_name in the spreadsheet, files are copied from input_folder to output_folder. Here, these are the gilbert_numbers in the spreadsheet fed from main(). The are matched to the file names...
def extracter(spreadsheet, column_name): print header, "Running the extracter." root=Tkinter.Tk() root.withdraw() root.update() input_folder=tkFileDialog.askdirectory(title="Inputfolder: Please choose a directory that contains your corpus files") root=Tkinter.Tk() root.withdraw() root.update() output_fold...
[ "def move_input_files():\n\n back_calc_dir = r\"C:\\Users\\Ginger\\Dropbox\\NatCap_backup\\Forage_model\\Forage_model\\model_results\\regional_properties\\back_calc_2014_total\"\n orig_input_dir = r\"C:\\Users\\Ginger\\Dropbox\\NatCap_backup\\Forage_model\\CENTURY4.6\\Kenya\\input\\regional_properties\\Worldc...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Matches gilbert_sentences as contained in the inputfile to user input. Input can be numeric (e.g. entries in inputfile[items] that are = 2). Input can be characters (e.g. all entries in inputfile[transcription] that contain 'ɪ'). If output_csv is set, the resulting data will be written to a csv file. If move_data is se...
def main(inputfile, column, search_term, move_data=True, output_csv=False): parser = argparse.ArgumentParser() print header, "Running the sentence finder", header with open(inputfile, "r") as inputspread: inputdata=pandas.read_csv(inputspread, encoding="utf-8") for c in inputdata.columns: parser.add_argument("-...
[ "def process_file(path_in, path_out, threshold):\n infile = open(path_in, \"r\", encoding=\"utf-8\")\n outfile = open(path_out, \"w\", encoding=\"utf-8\")\n csv_reader = csv.reader(infile)\n csv_writer = csv.writer(outfile)\n for i, line in enumerate(csv_reader):\n try:\n text_id, t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Build or update a Ticker metrics using a Quotecast object. Only the metrics which can be converted to float are supported. But that should be enough to handle all the real use cases.
def build_ticker_from_quotecast( quotecast: Quotecast, references: Dict[int, List[str]] = None, ticker: Ticker = None, ) -> Ticker: if references is None: references = dict() if ticker is None: ticker = Ticker() # SETUP PRODUCTS & METRICS ...
[ "def _update_metrics(self):\n raise NotImplementedError", "def update_metrics(self, round_num: int, metrics_to_append: Dict[str, Any]):\n raise NotImplementedError", "def add_ticker_to_df(self, ticker):\n\n new_stock = TickerData(ticker=ticker, use_early_replacements=self.use_early_replacements...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Rebuild the request from history (self.__references).
def rebuild_request(self) -> Quotecast.Request: references = self.references request = Quotecast.Request() for vwd_id, metric in references.values(): request.subscriptions[vwd_id].append(metric) return request
[ "def action_rebuild(self, *args):\n\t\tself.get_active_image()._history._rebuild_from_history()", "def rebuild(self):\n _logger.info( \"Rebuilding the API Caches...\" )\n\n # fill out the data structures\n self._buildApiTypesList()\n #_buildMayaTypesList()\n \n self._buil...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
check to see whether an id is for a group
def is_group(id): return id.startswith('G')
[ "def group_exists(groupid):", "def has_group():", "def group_exists(self, group_name):", "def check_uuid(self, obj, groupid):\n if self.get_uuid(obj) == groupid:\n return True", "def test_get_group__valid_id(self):\n\n self.assertEqual(entities.Group(self.config_dict['groups'][0]['id'],...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
check to see whether an id is for a user
def is_user(id): return id.startswith('U')
[ "def _check_id(self, user_id):\n add_params = {'user_ids': user_id}\n response = self._execute_requests('users.get', add_params)\n\n if 'error' in response:\n # проверка id на существование\n result = response['error']['error_msg']\n logger.error(f\"{result} '{u...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
broadcast a new user joining the group
def user_joined_group(cls, group, user): text = "{} joined the group chat".format(user.username) cls._broadcast_group(group, None, group, text)
[ "def cli(ctx, group, user):\n return ctx.gi.users.add_to_group(group, user)", "def join(self):\n channel = self.data[0]\n user_pseudonym = VALIDATED_USERS.get_pseudonym(SOCKET_TO_USERID.get(self.source, None))\n\n if user_pseudonym and self.target:\n target_server = self.target...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
broadcast a user leaving the group
def user_left_group(cls, group, user): text = "{} left the group chat".format(user.username) cls._broadcast_group(group, None, group, text)
[ "def leave_group(self):\n\t\tself.sendMessage(ID_CTRL + \"LEAVE\", True)\n\t\tself.joinstate = 0\n\t\tself.createstate = 0\n\t\tself.__key = None", "def on_leave(self, room, user):\n pass", "def leave_group(self) -> Result:\n return self._execute_command('leaveGroup')", "async def user_removed_f...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Start an oef node.
def _start_oef_node(self, network_node):
[ "def launch_oef():\n script_path = os.path.join(\"scripts\", \"oef\", \"launch.py\")\n configuration_file_path = os.path.join(\"scripts\", \"oef\", \"launch_config.json\")\n print(\"Launching new OEF Node...\")\n subprocess.Popen(\n [\"python3\", script_path, \"-c\", configuration_file_path, \"--...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that a generated protocol's serialisation + deserialisation work correctly.
def test_generated_protocol_serialisation(self): # create a message reply_message = {1: "number one", 2: "number two", 7: "number seven"} # message 1 message = TwoPartyNegotiationMessage( message_id=1, dialogue_reference=(str(0), ""), target=0, ...
[ "def test_generated_protocol_serialisation_ct(self):\n # create a message with pt content\n some_dict = {1: True, 2: False, 3: True, 4: False}\n data_model = TProtocolMessage.DataModel(\n bytes_field=b\"some bytes\",\n int_field=42,\n float_field=42.7,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that a generated protocol could be used in exchanging messages between two agents.
def test_generated_protocol_end_to_end(self): # AEA components ledger_apis = LedgerApis({}, FETCHAI) wallet_1 = Wallet({FETCHAI: FETCHAI_PRIVATE_KEY_FILE}) wallet_2 = Wallet({FETCHAI: FETCHAI_PRIVATE_KEY_FILE}) identity_1 = Identity( name="my_aea_1", add...
[ "def test_trackProtocols(self):\n f = EventFeedLineFactory()\n p1 = f.buildProtocol(None)\n p2 = f.buildProtocol(None)\n \n self.assertNotIn(p1, f.connected_protocols)\n self.assertNotIn(p2, f.connected_protocols)\n\n p1.makeConnection(StringTransport())\n sel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test _specification_type_to_python_type method unsupported type.
def test__specification_type_to_python_type_unsupported_type(self): with self.assertRaises(TypeError): _specification_type_to_python_type("unsupported_type")
[ "def test__specification_type_to_python_type_unsupported_type(self):\n with self.assertRaises(ProtocolSpecificationParseError):\n _specification_type_to_python_type(\"unsupported_type\")", "def test_value_error_for_computing_missing_type():\n with pytest.raises(ValueError):\n compute_t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test _union_sub_type_to_protobuf_variable_name method tuple.
def test__union_sub_type_to_protobuf_variable_name_tuple(self, mock): _union_sub_type_to_protobuf_variable_name("content_name", "Tuple") mock.assert_called_once()
[ "def test__union_sub_type_to_protobuf_variable_name_tuple(self, mock):\n pytest.skip()\n _union_sub_type_to_protobuf_variable_name(\"content_name\", \"Tuple[str, ...]\")\n mock.assert_called_once()", "def _union_sub_type_to_protobuf_variable_name(\n content_name: str, content_type: str\n) ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test _includes_custom_type method positive result.
def test__includes_custom_type_positive(self, *mocks): content_type = "Union[str]" result = self.protocol_generator._includes_custom_type(content_type) self.assertTrue(result) content_type = "Optional[str]" result = self.protocol_generator._includes_custom_type(content_type) ...
[ "def _includes_custom_type(content_type: str) -> bool:\n\n if content_type.startswith(\"Optional\"):\n sub_type = _get_sub_types_of_compositional_types(content_type)[0]\n result = _includes_custom_type(sub_type)\n elif content_type.startswith(\"Union\"):\n sub_types = _get_sub_types_of_co...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert a text to a format ROUGE understands. The text is assumed to contain one sentence per line.
def convert_text_to_rouge_format(text, title="dummy title"): sentences = text.split("\n") sent_elems = [ "<a name=\"{i}\">[{i}]</a> <a href=\"#{i}\" id={i}>" "{text}</a>".format(i=i, text=sent) for i, sent in enumerate(sentences, start=1) if sent != ''] html = """<html> <head> <title...
[ "def nltk_text(self, text):\n text = nltk.Text(word_tokenize(text))\n return text", "def process_text(model_name: str, text: str) -> spacy.tokens.Doc:\r\n nlp = load_model(model_name)\r\n return nlp(text)", "def normalize(self, text: str) -> str:", "def format_ocr_text(self, page):\n \n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Cross Entropy Calculates the cross entropy of two discrete distributions x and y.
def cross_entropy(x, y, bins, xy_probabilities=False): # calculate probabilities if probabilities == False if xy_probabilities: # same bins for x and y -> same length of x and y if xy_probabilities == True assert len(x) == len(y) # if x does not sum up to 1, raise an error if not...
[ "def joint_entropy(x: np.array, y: np.array):\n # Note the dimensions of X and Y should be same\n xy = np.c_[x, y] # [[x1,y1], [x2,y2]...[xn,yn]]\n h_xy = entropy(xy)\n return h_xy", "def cross_entropy(p1, p2):\n xh = 0\n\n # TODO -- Calculate cross-entropy value H(p1, p2) in nats\n for x in...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r"""Joint Entropy Calculates the joint entropy of two discrete distributions x and y. This is the combined Entropy of X added to the conditional Entropy of x given y.
def joint_entropy(x, y, bins): # assert array length assert len(x) == len(y) # get the bins, x and y get their own bins in case of joint entropy bins = get_2D_bins(x, y, bins) # get the joint histogram joint_hist = np.histogram2d(x, y, bins)[0] # calculate the joint probability and add a ...
[ "def joint_entropy(x: np.array, y: np.array):\n # Note the dimensions of X and Y should be same\n xy = np.c_[x, y] # [[x1,y1], [x2,y2]...[xn,yn]]\n h_xy = entropy(xy)\n return h_xy", "def H_2(x, y, ns):\n\n if (len(x) != len(y)):\n print(\"H_2 warning : sequences of different lengths, using...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r"""KullbackLeibler Divergence Calculates the KullbackLeibler Divergence between two discrete distributions x and y. X is considered to be an empirical discrete distribution while y is considered to be the real discrete distribution of the underlying population.
def kullback_leibler(x, y, bins, xy_probabilities=False): if xy_probabilities: # if x does not sum up to 1, raise an error if not np.isclose(sum(x),1,atol=0.0001): raise ValueError('Probabilities in vector x do not sum up to 1.') # if y does not sum up to 1, raise an error ...
[ "def kl_divergence(x,y):\n\tassert (isinstance(x, BayesNet) and isinstance(y, BayesNet)), 'Must pass in BayesNet objects.'\n\tassert (x==y), 'Passed-in BayesNet objects are not structurally equal.'\n\n\tdistance = np.sum( x.flat_cpt() * np.log( x.flat_cpt() / y.flat_cpt() ) )\n\treturn distance", "def kl_divergen...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Main method to get dependent review IDs of a specific review request on the ReviewBoard.
def main(): parameters = parse_parameters() review_request_url = "%s/api/review-requests/%s/" % (REVIEWBOARD_URL, parameters.review_id) handler = ReviewBoardHandler() review_request = handler.api(review_request_url)["review_request"] review_id...
[ "def __extract_review_ids(self, soup):\n try:\n id_tags = soup.find_all('div', attrs={'class':'review', 'itemprop':'reviews'})\n review_ids = [int(re.sub('review_', '', tag.get('id'))) for tag in id_tags]\n return review_ids\n except:\n raise", "def get_re...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initalize with a usersupplied list of segments.
def __init__(self, segments, lemma = None, case = None): self.segments = segments if isinstance(self.segments, str): self.segments = [Segment.new_segment(s) for s in self.segments] self.lemma = lemma self.case = case
[ "def construct_segments(self):\n for strand in self.strand_list:\n strand.construct_segment()", "def set_segments(self, segments):\n self.send_command(Command.SET_SEGMENT_COUNT, [segments])", "def form_segment(self, node_oid):\n # init empty segment and stuff\n new_segment = S...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a WordForm of the given CV shape with random segments.
def random_segs(cls, shape, lemma = None, case = None): # For each C or V segment in `shape`, initialize a random Segment of the # appropriate type. Initialize a new WordForm with all these Segments. return cls([Segment(seg_type = seg) for seg in shape], lemma, case)
[ "def create_word(self):\r\n\r\n template = self.word_constructions.get()\r\n word = \"\"\r\n for c in template:\r\n if c == \"v\":\r\n letter = self.get_letter(100)\r\n else:\r\n letter = self.get_letter(0)\r\n word += letter\r\n\r\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add the suffix vowel.
def add_suffix(self, suffix): # Append the suffix vowel to this WordForm. self.segments.append(Segment.new_segment(suffix))
[ "def _replace_suffix(self, word, suffix, replacement):\n ...", "def add_letter_suffix(self, letter_char, suffix_char):\n letter = self.add_letter(letter_char)\n suffix = self.add_letter(suffix_char)\n letter.add_suffix(suffix)", "def add_suffix(word, suffix):\n suffix, sep, rest =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Entrench at the level of the WordForm.
def entrench_word(self, cloud, paradigms, informativity, categorization, unique_base): # Entrench within the WordForm's own cloud. Iterate over positions in # the WordForm (up to three Segments). for pos, seg in enumerate(self.segments): if pos < 3: ...
[ "def set_level(self,level):\r\n \r\n self.level = level", "def level_up(self, elemental) -> None:\n elemental.add_exp(elemental.exp_to_level)", "def _page_update_higher_textequiv_levels(level, pcgts):\n regions = pcgts.get_Page().get_TextRegion()\n if level != 'region':\n for r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add noise to the nonsuffix segments in the WordForm.
def add_noise(self): self.segments = deepcopy(self.segments) # Iterate through each of the first three Segments in the WordForm. for i in range(3): # Add noise to each Segment. self.segments[i].add_noise()
[ "def _update_noise(self, nsig):\n self.integrator.noise.nsig = numpy.array([nsig, ])", "def add_noise(self):\n \n self.vis_freq = self.skyvis_freq + self.vis_noise_freq", "def remove_noise(text):\n\n text = text.split()\n word = [word for word in text if word not in [\n 'pertai...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given an Frame object, will return the bytes of that Frame's file. If provided, will also scale the size of the image and convert to the required format.
def convert_frames(frame, img_format: str, scale=None) -> bytes: path = frame.filename with open(path, "rb") as image_file: im = Image.open(image_file) converted_img = BytesIO() if scale: _LOGGER.debug("Scaling the image") (width, height) = (int(im.width * scale...
[ "def get_frame(self):\n success, image = self.streamer.read()\n ret, jpeg = cv2.imencode('.jpg', image)\n return jpeg.tobytes()", "def get_frame(self):\n if self.stream.isOpened():\n success, image = self.stream.read()\n if image is None:\n image = ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a dictionary, changes the key from snake case to lower camel case.
def lower_camel_casify_dict_keys(d: dict) -> dict: return {to_camel_case(key): value for key, value in d.items()}
[ "def dict_keys_snake_to_camel_case(snake_dict: dict) -> dict:\n\n camel_dict = dict()\n\n for key, val in snake_dict.items():\n if isinstance(key, str):\n camel_dict[snake_to_camel_case(key)] = val\n else:\n camel_dict[key] = val\n\n return camel_dict", "def convert_ca...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ensure IPCMessageSubscriber.connect gets wrapped by salt.utils.asynchronous.SyncWrapper.
async def test_ipc_connect_sync_wrapped(io_loop, tmp_path): if salt.utils.platform.is_windows(): socket_path = ports.get_unused_localhost_port() else: socket_path = str(tmp_path / "noexist.ipc") subscriber = salt.utils.asynchronous.SyncWrapper( salt.transport.ipc.IPCMessageSubscriber...
[ "def _adapter_connect(self):\r\n error = super(AsyncoreConnection, self)._adapter_connect()\r\n if not error:\r\n self.socket = PikaDispatcher(self.socket, None,\r\n self._handle_events)\r\n self.ioloop = self.socket\r\n self._on...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Receives a list and a search term. Use a loop to go through the list and see if the string is there. if it is return "string found". if not, return "string not found"
def search_for_string(lst_str, stringy): if stringy in lst_str: return "Found string" else: return "string not found"
[ "def search(self, q):\n for x in self.strings:\n if q in x:\n return True\n \n return False\n\n\n pass", "def grep(lst, term):\n if isinstance(term, list):\n term = \"\".join([f\"(?=.*{x})\" for x in term])\n\n matches = [i for (i, s) in enume...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
andExpr = relationalExpr { "and" relationalExpr }
def andExpr( ): #DOUBLE CHECK THIS tok = tokens.peek( ) if debug: print("andExpr: ", tok) left = relationalExpr( ) #does the left side of the grammar tok = tokens.peek( ) while tok == "and": #checks to see if there is the token "and" and will preform what is inside the curly bracket since it is a series tokens...
[ "def and_expression(cls, tree):\n if len(tree.children) == 1:\n assert tree.child(0).data == 'cmp_expression'\n return cls.cmp_expression(tree.child(0))\n\n assert tree.child(1).type == 'AND'\n op = tree.child(1)\n return cls.build_binary_expression(\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
relationalExpr = addExpr [ relation addExpr ]
def relationalExpr( ):#MAKE SURE I USED THE RIGHT LOGIC FOR THIS tok = tokens.peek( ) if debug: print("relationalExpr: ", tok) left = addExpr( ) expr = "" tok = tokens.peek( ) if tok in relations: rel = relation( ) # expecting a relation to start off right = expression( ) # if there is a relation we expect ...
[ "def relation( ):\n \n tok = tokens.peek( )\n if syntaxDebug: print (\"relation: \", tok)\n left = addExpr( )\n tok = tokens.peek( )\n while str(tok) in tokens.relational:\n op = tok\n tokens.next()\n \n right = relation()\n left = BinaryExpr(op, left, right)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
factor = number | '(' expression ')'
def factor( ): tok = tokens.peek( ) if debug: print ("Factor: ", tok) if re.match( Lexer.number, tok ): expr = Number(tok) tokens.next( ) tok = tokens.peek( ) return expr if tok == "(": tokens.next( ) # or match( tok ) expr = addExpr( )#might need to change to expression( ) tokens.peek( ) tok = ma...
[ "def factor( ):\n \n tok = tokens.peek( )\n if syntaxDebug: print (\"Factor: \", tok)\n if tok == \"(\":\n tokens.next()\n expr = addExpr()\n tokens.next()\n return expr\n if re.match(tokens.number, tokens.peek()):\n expr = Number(tok)\n tokens.next( )\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
term = factor { ('' | '/') factor }
def term( ): tok = tokens.peek( ) if debug: print ("Term: ", tok) left = factor( ) tok = tokens.peek( ) while tok == "*" or tok == "/": tokens.next() right = factor( ) left = BinaryExpr( tok, left, right ) tok = tokens.peek( ) return left
[ "def term( ):\n \n tok = tokens.peek( )\n if syntaxDebug: print (\"Term: \", tok)\n left = factor( )\n tok = tokens.peek( )\n while tok == \"*\" or tok == \"/\":\n op = tok\n tokens.next()\n \n right = term( )\n left = BinaryExpr(op, left, right)\n tok = t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
addExpr = term { ('+' | '') term }
def addExpr( ): tok = tokens.peek( ) if debug: print ("addExpr: ", tok) left = term( ) tok = tokens.peek( ) while tok == "+" or tok == "-": tokens.next() right = term( ) left = BinaryExpr( tok, left, right ) tok = tokens.peek( ) return left
[ "def addExpr( ):\n tok = tokens.peek( )\n if syntaxDebug: print (\"addExpr: \", tok)\n \n left = term( )\n tok = tokens.peek( )\n while tok == \"+\" or tok == \"-\":\n op = tok\n tokens.next()\n \n right = addExpr( )\n left = BinaryExpr(op, left, right)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
whileStatement = "while" expression block
def parseWhileStatement( ): # parse rountine for while and uses the while class to print out the appropriate string tok = tokens.peek( ) if debug: print( "whileStatement: ", tok ) start = match( "while" ) expr = expression( ) blk = parseBlock( ) tok = tokens.peek( ) whileString = whileStatement( start, expr, bl...
[ "def _parse_while_statement(self):\n self._match('TK_WHILE')\n self._parse_expression()\n self._match('TK_DO')\n self._parse_statement()", "def _analyse_stmt_While(self, statement: ast.While, *, next: CFNode) -> CFNode:\n # Analyse the else branch.\n else_node = self._ana...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
ifStatement = "if" expression block [ "else" block ]
def parseIfStatement( ): # parse rountine for the if and uses the if class to print out the appropriate string tok = tokens.peek( ) if debug: print( "ifStatement: ", tok ) start = match( "if" ) expr = expression( ) blk = parseBlock( ) elseblk = None tok = tokens.peek( ) if tok == "else": match( "else" ) el...
[ "def stmt_if(executor, stmt):\n e = Expression()\n result = e.eval(stmt._tokens, symbols=executor._symbols)\n if not result:\n executor.goto_next_line()", "def test_if_elseif_paren_statement():\n r = convert_code(\n \"{if foo}\\nbar\\n{elseif (foo and bar) or foo and (bar or (foo and bar...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
assign = ident "=" expression eoln
def parseAssign( ): # parse rountine for the assign and uses the assign class to print out the appropriate string tok = tokens.peek( ) if debug: print( "assign: ", tok ) if re.match( Lexer.identifier, tok ): ident = VarRef( tok ) else: error( "Invalid identifier" ) tok = tokens.next( ) equals = match( "=" )...
[ "def handle_assignment(stmt):\n\n identifier = ast.Name(id=stmt[0][1], ctx=ast.Store())\n value = Parser.handle_arithmetic(stmt[2:])\n return ast.Assign(targets=[identifier], value=value)", "def _analyse_stmt_Assign(self, statement: ast.Assign, *, next: CFNode) -> CFNode:\n return self...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
statement = ifStatement | whileStatement | assign
def statement( ): # parse rountin for statement that makes sure the token is one of the following, eventually there will be an error caught tok = tokens.peek( ) if debug: print( "statement: ", tok ) if tok == "if": stat = parseIfStatement( ) return stat elif tok == "while": stat = parseWhileStatement( ) re...
[ "def stmt_if(executor, stmt):\n e = Expression()\n result = e.eval(stmt._tokens, symbols=executor._symbols)\n if not result:\n executor.goto_next_line()", "def _analyse_stmt_Assign(self, statement: ast.Assign, *, next: CFNode) -> CFNode:\n return self._ast_node(statement, next=next, error=s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
stmtList = { statement }
def stmtList( ): tok = tokens.peek( ) if debug: print( "stmtList: ", tok ) stat = statement( ) return stat
[ "def add_statements(self, stmts):\n for stmt in stmts:\n self.statements.append(stmt)", "def get_statement_list(self, insupdel=0):\n #NOTE: statement = [record, {...}]\n result = []\n try:\n if insupdel == StatementType.INSERT:\n statements = self.s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns ssh username for connecting to cluster workers.
def get_ssh_user(): return getpass.getuser()
[ "def get_ssh_user(self):\n if self.configuration.get(\"pg_ssh_user\"):\n return \"%s@\" % self.configuration.get(\"pg_ssh_user\")\n else:\n return \"%s@\" % DEFAULT_SSH_USER", "def master_username(self) -> str:\n return pulumi.get(self, \"master_username\")", "def exec...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns ssh key to connecting to cluster workers. If the env var TUNE_CLUSTER_SSH_KEY is provided, then this key will be used for syncing across different nodes.
def get_ssh_key(): path = os.environ.get("TUNE_CLUSTER_SSH_KEY", os.path.expanduser("~/ray_bootstrap_key.pem")) if os.path.exists(path): return path return None
[ "def ssh_key(self) -> str:\n return pulumi.get(self, \"ssh_key\")", "def cluster_key(self):\n node = self.get_node()\n try:\n key = node.oget(\"cluster\", \"secret\")\n return self.prepare_key(key)\n except Exception as exc:\n pass\n import uuid\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
writes uuids and extras of given nodes to a file (json). This is useful for import/export because currently extras are lost. Therefore this can be used to save and restore the extras on the nodes.
def export_extras(nodes, filename='node_extras.txt'): #outstring = ''#' node uuid | extras \n' outdict = {} for node in nodes: if isinstance(node, int): #pk node = load_node(node) elif isinstance(node, basestring): #uuid node = load_node(node) if not isinsta...
[ "def write(node, filepath):\n data = read(node)\n\n if not data:\n return\n\n with open(filepath, 'w') as f:\n json.dump(data, f, indent=4, sort_keys=True)\n\n return data", "def import_extras(filename):\n\n all_extras = {}\n\n # read file\n #inputfile = open(filename, 'r')\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
reads in nodes uuids and extras from a file and aplies them to nodes in the DB. This is useful for import/export because currently extras are lost. Therefore this can be used to save and restore the extras on the nodes.
def import_extras(filename): all_extras = {} # read file #inputfile = open(filename, 'r') #lines = inputfile.readlines() #for line in lines[1:]: # splitted = line.split(' | ') # uuid = splitted[0].rstrip(' ') # extras = splitted[1].rstrip(' ') # #extras = dict(extras) ...
[ "def export_extras(nodes, filename='node_extras.txt'):\n\n #outstring = ''#' node uuid | extras \\n'\n outdict = {}\n for node in nodes:\n if isinstance(node, int): #pk\n node = load_node(node)\n elif isinstance(node, basestring): #uuid\n node = load_node(node)\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This method deletes all AiiDA nodes in the DB, which have a extra trash=True And all their children. Could be advanced to a garbage collector. Be careful to use it.
def delete_trash(): #query db for marked trash q = QueryBuilder() nodes_to_delete_pks = [] q.append(Node, filters = {'extras.trash': {'==' : True} } ) res = q.all() for node in res: nodes_to_delete_pks.append(node[0].dbnode.pk) pri...
[ "def _delete_all(self):\n logging.info(\"Remove all nodes and relations from database.\")\n self.graph.delete_all()\n return", "def DeleteAllItems(self):\r\n\r\n self.DeleteRoot()", "def clear_db():\n humans = Human4j.nodes.all()\n for h in humans:\n h.delete()\n bino...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
returns a list of node uuids for a given group as, name, pk, uuid or group object
def get_nodes_from_group(group, return_format='uuid'): from aiida.orm import Group from aiida.common.exceptions import NotExistent nodes = [] g_nodes = [] try: group_pk = int(group) except ValueError: group_pk = None group_name = group if group_pk is not None: ...
[ "def get_groups_uuid(\n export_data: Dict[str, Dict[int, dict]], silent: bool\n) -> Dict[str, List[str]]:\n EXPORT_LOGGER.debug(\"GATHERING GROUP ELEMENTS...\")\n groups_uuid = defaultdict(list)\n # If a group is in the exported data, we export the group/node correlation\n if GROUP_ENTITY_NAME in exp...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function returns the default anchors given the image shapes and the anchors per grid point. The grid has width and height equal to the final's layer output.
def set_anchors(mc): H, W, C = _get_output_shape(mc) B = mc.ANCHOR_PER_GRID X = np.array(mc.INITIAL_ANCHOR_SHAPES) X[:,0] *= mc.IMAGE_WIDTH X[:,1] *= mc.IMAGE_HEIGHT anchor_shapes = np.reshape( # it refers to the anchor width and height [X] * H * W, (H, W, B, 2) ) center_x = np.reshape( ...
[ "def generate_anchors(img_params, hyper_params):\n anchor_count = hyper_params[\"anchor_count\"]\n stride = hyper_params[\"stride\"]\n height, width, output_height, output_width = img_params\n #\n grid_x = np.arange(0, output_width) * stride\n grid_y = np.arange(0, output_height) * stride\n #\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a list of created posts for the given author
def create_multiple_posts(author, num, ptext = TEXT, visibility = ACL_DEFAULT): posts = [] for i in range(num): posts.append(Post.objects.create(content = ptext, author = author, visibility=visibility)) return posts
[ "def get_posts(from_users):\r\n articles = list()\r\n load_dotenv()\r\n resp = requests.get(URL, auth=(os.getenv('NEWSGROUP_USR'), os.getenv('NEWSGROUP_PASS')))\r\n if resp.ok:\r\n content = BeautifulSoup(resp.content, 'html.parser')(\"tr\")\r\n id_pattern = re.compile(r'id=(\\d+)')\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test to ensure that all authors added to relationship are in the returned data Called after a retrieve relationship test has passed
def authors_in_relation(context, data, authors): guids = [a.id for a in authors] guids = map( lambda x: str(x).replace('-', ''), guids) for guid in guids: context.assertTrue(unicode(guid) in data)
[ "def test_item_add_authors(self):\n\n actual_item = Item.objects.get(id=101)\n user_first = CustomUser.objects.get(id=101)\n user_second = CustomUser.objects.get(id=102)\n users = [user_first, user_second]\n actual_item.update_authors(authors_add=users)\n expected_item = It...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create Friends and Friends of Friends and associated posts
def create_friends(friend, friendors, create_post = True, visibility = ACL_DEFAULT): for friendor in friendors: friend.add_friend(friendor) friendor.add_friend(friend) # FriendRelationship.objects.create(friendor = friendor, friend = friend) if create_post: Post.objects....
[ "def add_friend(request, pk):\n new_friend = User.objects.get(pk=pk)\n Friend.make_friend(request.user, new_friend)\n return redirect('posts:posts-list')", "def create_friends(user, existing_friends):\n #ToDo Add error handling\n bulk_insert = []\n existing_friend_ids = []\n for friend in exi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes post author, comment author and creates a post and associated comment
def create_post_with_comment(pauthor, cauthor, visibility, ptext, ctext): post = Post.objects.create(content = ptext, author = pauthor, visibility=visibility) comment = Comment.objects.create(comment = ctext, post = post, author = cauthor) return (post, comment)
[ "def create_comment(post, author, content):\n return Comment.objects.create(post=post, author=author, content=content)", "def create(cls, author, raw_comment, parent):\n\n html_comment = mistune.markdown(raw_comment)\n # todo: any exceptions possible?\n comment = cls(author=author,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes response.data and confirms no repeated guids (No repeated posts)
def assertNoRepeatGuids(context, posts): guids = [p['guid'] for p in posts] context.assertTrue(len(set(guids)) == len(posts), "Some guids repeated")
[ "def check_duplicates():\n print \"Building data set...\\n\"\n\n rows = db.links.find()\n seen = set()\n count = 0\n for row in rows:\n value = hashlib.md5(row['body'].encode('utf8')).hexdigest()\n if value in seen:\n count += 1\n print row['category'], row['_id']\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compares a list of authors against a list of displaynames
def cross_check(context, authors, poscom): displaynames = [x['author']['displayname'] for x in poscom] for author in authors: if author.user.username not in displaynames: context.assertFalse(True, "%s not in list" %author.user.username)
[ "def compare_authors(query_author, rg_author):\n \n # Checks if rg_author has any special non-ASCII characters. Translates query_author based on that and sets the author's first and last name strings.\n # Still doesn't address if one half of name uses UTF-8 only characters and the other half doesn't) but u...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Cross checks a list of authors against post
def assertAuthorsInPosts(context, authors, posts): cross_check(context, authors, posts)
[ "def cross_check(context, authors, poscom):\n displaynames = [x['author']['displayname'] for x in poscom]\n\n for author in authors:\n if author.user.username not in displaynames:\n context.assertFalse(True, \"%s not in list\" %author.user.username)", "def author_ManyToMany_entry_check(): ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Cross checks a list of authors against comments
def assertAuthorsInComments(context, authors, comments): cross_check(context, authors, comments)
[ "def cross_check(context, authors, poscom):\n displaynames = [x['author']['displayname'] for x in poscom]\n\n for author in authors:\n if author.user.username not in displaynames:\n context.assertFalse(True, \"%s not in list\" %author.user.username)", "def assertAuthorsInPosts(context, aut...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes a list of cachedauthors and adds them to the author follower list
def create_cached_author_followers(author, followers): for f in followers: author.followers.add(f)
[ "def set_authors(self, authors):\n\t\tself.authors = authors", "def add_all_followers(twitter, users):\n for u in users:\n #print(\"Outside: Requesting followers for screen_name %s\" % u['screen_name'])\n if u['protected'] != True:\n response = get_followers(twitter, u['screen_name'])\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates an interior node with the given operator (a token), and left and right operands (other nodes).
def __init__(self, opToken, leftOper, rightOper): self.operator = opToken self.leftOperand = leftOper self.rightOperand = rightOper
[ "def __create_internal_node_by_operator(operator: PatternStructure, sliding_window: timedelta, parent: Node = None):\n operator_type = operator.get_top_operator()\n if operator_type == SeqOperator:\n return SeqNode(sliding_window, parent)\n if operator_type == AndOperator:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the expression in prefix form.
def prefix(self): return str(self.operator) + " " + self.leftOperand.prefix() + " " + self.rightOperand.prefix()
[ "def generate_prefix_expression(self, prefix):\n list_prefixes = [prefix]\n\n for index in range(1, len(prefix)):\n expr = \"{}{}\".format('|^', prefix[index:])\n list_prefixes.append(expr)\n\n return ''.join(list_prefixes)", "def prefix_to_infix(self, expr):\n p,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the expression in infix form (fully parenthesized).
def infix(self): return "(" + self.leftOperand.infix() + " " + str(self.operator) + " " + self.rightOperand.infix() + ")"
[ "def trans_infix_prefix(expression):\n expression = expression.replace(' ', '')\n symbol_priority = {'*': 10, '/': 10, '+': 5, '-': 5, '(': 0, ')': 0}\n symbol_stack = []\n new_expression = ''\n\n for i in range(len(expression) - 1, -1, -1):\n item = expression[i]\n\n if item.isdigit():...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns all possible velocity dispersons from all particles found in the data set. A particle filter can be passed using "filter" which is a list
def compute_velocity_dispersion(data, types = None, fields = None, filter = None): types_to_fields = {'x': 'particle_velocity_x', 'y': 'particle_velocity_y', 'z': 'particle_velocity_z', 'r': 'particle_velocity_spherical_radius', ...
[ "def get_velocities(self):\n\n return np.array([p.velocity for p in self.particles])", "def __repr__(self):\n print(\"Particle Filter (each element: [particle_x, particle_y, particle_weight])\")\n print(self.all_particles_coordinates())", "def particle_filter(Y, initial, transition, emissio...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This api does not return xml
def xml(self): raise NotImplementedError('This api does not return xml')
[ "def content_api_xml(url, request):\n headers = {'content-type': 'application/xml'}\n content = 'xml string'\n return response(status_code=200,\n content=content,\n headers=headers,\n request=request)", "def get_document_xml...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns whether erorr is NOAUTH
def noauth(self): try: # some endpoints dont return json return self.json['response'].get('error_id') == 'NOAUTH' except: return False
[ "def unauthorized():\n return HttpError(401)", "def check_unauthorized_response(response: HTTPResponse) -> bool:\n return response.status_code == 403", "def send_not_authenticate_resp():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with p...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify can select Maven option
def test_should_choose_maven(self): search_str = "//*[text()='Maven Project']" els = self.driver.find_elements_by_xpath(search_str) self.assertGreater(len(els), 0, 'Maven project is not found!') els[0].click()
[ "def check():\n if not has('cpan-outdated', 'cpanm'):\n return False\n return True", "def can_install_project(self):\n return True", "def test_validate_project(self):\n pass", "def validate_project():\n conf = get_config() \n # TODO: Run checks on partitioning paramters", "d...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks that all transformers in self.transformer_list are compatible with methods fit, transform and fit_transform.
def _check_transformers(self): assert all([hasattr(trf, "fit") for trf in self.transformer_list]), "At least one transformer object is not " \ "compatible with 'fit' method." assert all([hasattr(trf, "transform") for trf in sel...
[ "def _validate_transformer(\n self,\n ) -> None:\n if not (\n hasattr(self.transformer, \"fit\") # noqa: WPS421\n and hasattr(self.transformer, \"transform\") # noqa: WPS421\n and hasattr(self.transformer, \"fit_transform\") # noqa: WPS421\n ):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deduce correct spark dtype from pandas dtype for column col of pandas dataframe df
def infer_spark_dtype(df, col): logger = logging.getLogger(__name__ + ".infer_spark_dtype") pd_dtype = df.dtypes[col] # get a sample from column col sample = df[col].dropna() if sample.shape[0] == 0: logger.warning("column %s of dtype %s containing nulls found" % (col, pd_dtype)) ...
[ "def get_data_type(df, col):\n if col not in df.columns:\n raise KeyError(f'Column \"{col:s}\" not in input dataframe.')\n dt = dict(df.dtypes)[col]\n\n if hasattr(dt, \"type\"):\n # convert pandas types, such as pd.Int64, into numpy types\n dt = type(dt.type())\n\n try:\n # ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Run a command and echo it first
def run_cmd(call, cmd, *, echo=True, **kwargs): if echo: print('$> ' + ' '.join(map(pipes.quote, cmd))) return call(cmd, **kwargs)
[ "def echo(what):\n if '\\n' not in what:\n cmd_print(f'echo {shlex.quote(what)}')\n return CommandOutput(what + '\\n')", "def run_single_process(command):\n try:\n print command\n call(command, shell=1)\n sys.stdout.write('.')\n sys.stdout.flush()\n except KeyboardIn...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the last commit to modify the given paths
def last_modified_commit(*paths, **kwargs): return check_output([ 'git', 'log', '-n', '1', '--pretty=format:%h', '--', *paths ], **kwargs).decode('utf-8')
[ "def get_last_commit(self, repo):\n return self.get_commits(repo, 1)[0]", "def last_commit(self, tree, path):\n raise RepositoryError(\"Abstract Repository\")", "def last_commit(repopath):\n command = \"cd %s; git log -1i --date=iso\" % repopath\n out = run.command(command)\n if out:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the last modified date (as a string) for the given paths
def last_modified_date(*paths, **kwargs): return check_output([ 'git', 'log', '-n', '1', '--pretty=format:%cd', '--date=iso', '--', *paths ], **kwargs).decode('utf-8')
[ "def _get_last_modified_date(path):\n last_date = 0\n root_dir, subdirs, files = os.walk(path).next()\n # get subdirs and remove hidden ones\n subdirs = [s for s in subdirs if not s.startswith('.')]\n for subdir in subdirs:\n for root, _, _ in os.walk(join(path, subdir)):\n base = o...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return whether the given paths have been changed in the commit range Used to determine if a build is necessary
def path_touched(*paths, commit_range): return check_output([ 'git', 'diff', '--name-only', commit_range, '--', *paths ]).decode('utf-8').strip() != ''
[ "def check_if_changed():\n if not len(os.listdir(images_path)) == 0:\n current_staging_hashes = get_all_path_hashes(staging_path)\n head_path = get_wit_path(keyword=get_current_commit_id())\n head_hashes = get_all_path_hashes(head_path)\n else:\n return True\n changed = False\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get docker build args dict, rendering any templated args.
def render_build_args(options, ns): build_args = options.get('buildArgs', {}) for key, value in build_args.items(): build_args[key] = value.format(**ns) return build_args
[ "def render_build_args(image_options, ns):\n build_args = image_options.get('buildArgs', {})\n for key, value in build_args.items():\n build_args[key] = value.format(**ns)\n return build_args", "def dockerargs(self) -> Dict:\n return (\n {'PIP_REQUIREMENTS': self.requirements}\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Cached getter for docker client
def docker_client(): return docker.from_env()
[ "async def get_docker_client(self) -> \"DockerClient\":", "def docker(self) -> DockerClient:\n return self.__docker", "def docker_client() -> DockerClient:\n return docker.from_env()", "def highlevel_docker_client(self):\n \n return utils.init_docker_client(self.client_kwargs, self.tls...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return whether an image needs pushing
def image_needs_pushing(image): d = docker_client() try: d.images.get_registry_data(image) except docker.errors.APIError: # image not found on registry, needs pushing return True else: return False
[ "def isImageAvailable(self) -> bool:\n if not self.GUIFeatures:\n if self.imageReturnQueue.empty():\n return False\n else:\n return True\n\n else:\n if len(self.images)==0:\n return False\n else:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return whether an image needs building Checks if the image exists (ignores commit range), either locally or on the registry.
def image_needs_building(image): d = docker_client() # first, check for locally built image try: d.images.get(image) except docker.errors.ImageNotFound: # image not found, check registry pass else: # it exists locally, no need to check remote return False ...
[ "def image_needs_pushing(image):\n d = docker_client()\n try:\n d.images.get_registry_data(image)\n except docker.errors.APIError:\n # image not found on registry, needs pushing\n return True\n else:\n return False", "def check_image(self, tag):\n image_name = self.b...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update name/values.yaml with modifications
def build_values(name, values_mods): values_file = os.path.join(name, 'values.yaml') with open(values_file) as f: values = yaml.load(f) for key, value in values_mods.items(): parts = key.split('.') mod_obj = values for p in parts: mod_obj = mod_obj[p] pr...
[ "def _write_values(self, app_name, chart_dir, values):\n\n data = self._get_values(app_name, chart_dir)\n new_data = {**data, **values}\n new_raw = yaml.dump(new_data)\n\n values_path = \"%s/%s/values.yaml\" % (chart_dir, app_name)\n with open(values_path, mode=\"w\") as values_fi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Publish helm chart index to github pages
def publish_pages(name, paths, git_repo, published_repo, extra_message=''): version = last_modified_commit(*paths) checkout_dir = '{}-{}'.format(name, version) check_call([ 'git', 'clone', '--no-checkout', git_remote(git_repo), checkout_dir], echo=False, ) check_call(['git', ...
[ "def publish_pages(chart_name, chart_version, chart_repo_github_path, chart_repo_url, extra_message=''):\n\n # clone the Helm chart repo and checkout its gh-pages branch,\n # note the use of cwd (current working directory)\n checkout_dir = '{}-{}'.format(chart_name, chart_version)\n check_call(\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add the domain restrictions.
def add_domains_restriction(self, domain_restriction): self._domain_restricion = domain_restriction self._size_var = self._get_size_var() self._nr_of_bits = self._get_nr_of_bits()
[ "def prepare_domain_restrictions(self):\n for index, restriction in enumerate(self._domain_restrictions):\n self.add_specific_domain_restriction(index+1, restriction)", "async def setjradd(self, ctx, domain):\n allowedDomains = await self.config.guild(ctx.guild).allowedDomains()\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the number of bits needed for an item.
def _get_nr_of_bits(self): return sum(self._size_var)
[ "def getBitSize(self) -> int:\n return self._bitSize", "def get_item_size(item_type):\n\n if item_type == \"Boolean\":\n return 1\n elif item_type == \"Unsigned_8\":\n return 8\n elif item_type == \"Unsigned_16\":\n return 16\n elif item_type == \"Unsigned_32\":\n re...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get a random genom.
def get_random(self): base_genom = "1" * sum(self._size_var) return utils.randomise_a_string(base_genom)
[ "def get_random_genome(self):\n return random.choice(self.genomes)", "def generate_random_genome (size):\n return Genome(\"\".join([str(randint(0, 3)) for i in range(size)]))", "def generate():\n s = random_data.random_bytes(100)\n return generate_from_string(s)", "def get_random(self, miRormRNA):...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a new block cipher, configured in CTR mode.
def __init__(self, block_cipher, initial_counter_block, prefix_len, counter_len, little_endian): if len(initial_counter_block) == prefix_len + counter_len: self.nonce = _copy_bytes(None, prefix_len, initial_counter_block) """Nonce; not available if there is a fixed suff...
[ "def _create_ctr_cipher(factory, **kwargs):\n\n cipher_state = factory._create_base_cipher(kwargs)\n\n counter = kwargs.pop(\"counter\", None)\n nonce = kwargs.pop(\"nonce\", None)\n initial_value = kwargs.pop(\"initial_value\", None)\n if kwargs:\n raise TypeError(\"Invalid parameters for CTR...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Instantiate a cipher object that performs CTR encryption/decryption.
def _create_ctr_cipher(factory, **kwargs): cipher_state = factory._create_base_cipher(kwargs) counter = kwargs.pop("counter", None) nonce = kwargs.pop("nonce", None) initial_value = kwargs.pop("initial_value", None) if kwargs: raise TypeError("Invalid parameters for CTR mode: %s" % str(kwa...
[ "def __init__(self, block_cipher, initial_counter_block,\n prefix_len, counter_len, little_endian):\n\n if len(initial_counter_block) == prefix_len + counter_len:\n self.nonce = _copy_bytes(None, prefix_len, initial_counter_block)\n \"\"\"Nonce; not available if there is...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }