query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Check that inner properties are stored as utf8
def _check_utf8(self, m, t, i): for term in six.itervalues(t): # six.text_type is str in Py3, unicode in Py2 self.assertIsInstance(term.id, six.text_type) self.assertIsInstance(term.name, six.text_type) self.assertIsInstance(term.desc, six.text_type) for ...
[ "def test_encoding(self):\n b = self.locator.get_model('b').model\n self.assertEqual(b['unicode'],\n self.unicode_data)", "def test_json_unicode(self):\n unicode_string = u\"東西\"\n encoded_and_decoded_string = json.loads(json.dumps(unicode_string))\n self...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
load one or more results if there are tasks completed
def load_part_result(self): # while self.all_alive(): # when all the subp is alive # self.signal.wait(timeout=100) # check the subp every 100s self.signal.wait() # while not self.result_buffer.empty(): # empty() is unreliable!!! # while self.result_buffer.qsize() > 0: # q...
[ "def get_results(self):\n for t in self.task:\n print t.get()", "def load_all_runs(self) -> Sequence[RunResult]:", "def _perform_wait_any(self):\n\n if len(self._results_waiting) > 0:\n return self._extract_result()\n\n all_results = []\n for client in itervalue...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Match rows of a 2D array based on norm. Returns index of best match and a bool to indicate not being able to find a row. Assumes that nearest neighbours are similar in both images, i.e. not too many particles have moved out of FoV
def match_row(array,row): best_match = 0 best_difference = 100000 sucess = True for i in range(len(array[:,0])): #Loop over rows to find best match difference = np.sum((array[i,:]-row)**2) if difference < best_difference: best_difference = difference best_...
[ "def _match_when_rows_are_non_empty():\n # Matches for each column\n with tf.name_scope('non_empty_gt_boxes'):\n matches = tf.argmax(similarity_matrix, axis=-1, output_type=tf.int32)\n\n # Get logical indices of ignored and unmatched columns as tf.int64\n matched_vals = tf.reduce_max(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes 2 sets of locations and sorts them into nice format for affine magnifier using the scikitlean kNN implementation This also sorts the issue of different numbers of features being detected. Will return 2 lists, possibly empty if no good matches are found...
def sort_locations(loc1,loc2): distancelist = [] #List for both arrays for locations in [loc1,loc2]: nbrs = NearestNeighbors(n_neighbors=4, algorithm='ball_tree').fit(np.array(locations)) distances, indices = nbrs.kneighbors(np.array(locations)) distancelist.append(distances) newloc1...
[ "def accelerated_matching(features1, features2, x1, y1, x2, y2):\n\n #############################################################################\n # TODO: YOUR CODE HERE #\n #############################################################################\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes img and locations, Extracts windows around nanoparticles wholly contained within field of view windowsize = square box
def combine_windows(image,nanoparticles,windowsize): oversamplesdshape = (windowsize*10,windowsize*10) total = np.zeros(oversamplesdshape) counter = 0 for point in nanoparticles: print(point) blobimage = image[int(point[1])-windowsize:int(point[1])+windowsize,int(point[0])-win...
[ "def display_image_with_windows(image):\n windows = Perspective_grid(image.shape[1], image.shape[0])\n\n plt.subplots()\n for enlargement in range(2, 4):\n image_copy = np.copy(image)\n color = [0, 255, 0]\n for window in windows:\n if window[2] - window[0] + 1 == 64 * enlar...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reads parameters from a readme file in comma seperated format image, param \n Ignores lines which cant be read
def read_params(folderpath): f = open(folderpath+"readme.txt", 'r') x = f.readlines() f.close() imagelist, paramlist = [], [] for line in x: line = line.strip() seperated = line.split(",") try: imagelist.append(int(seperated[0])) paramlist.append(float...
[ "def read_ini_file( ):\n pkeys = {}\n for p in __para_name: pkeys[__para_name[p].lower()] = p\n set_defaults()\n iniFile = __param[init_file]\n cfg = ConfigParser()\n if iniFile and cfg.read(iniFile):\n for sect in cfg.sections():\n plist = [pname for pname in cfg[sect] if pname ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Standard jobs should run at most daily
def allowed_stdjob_schedule(build_schedule, data): # non standard jobs are not validated by this rule if not is_standard_job(data): return True raw_schedule = build_schedule # This check is about the interval, so the H values # in the input cron expression should be translated # to a si...
[ "def test_daily_fantasy_scoring(self):\n pass", "def runAllJobs():\n minSeconds = appConf.getint(\"TrendCron\", \"minSeconds\")\n\n enabled = db.PlaceJob.selectBy(enabled=True)\n queued = enabled.filter(jobs.orCondition())\n\n print(\"Starting PlaceJob cron_jobs\")\n print(f\" queued items:...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create finite element. Arguments family (string) The finite element family cell The geometric cell degree (int) The polynomial degree (optional) form_degree (int) The form degree (FEEC notation, used when field is viewed as kform) quad_scheme The quadrature scheme (optional) variant Hint for the local basis function va...
def __init__(self, family, cell=None, degree=None, form_degree=None, quad_scheme=None, variant=None): # Note: Unfortunately, dolfin sometimes passes None for # cell. Until this is fixed, allow it: ...
[ "def __init__(self, family, domain=None, degree=None, quad_scheme=None,\n form_degree=None):\n if domain is None:\n cell = None\n else:\n domain = as_domain(domain)\n cell = domain.cell()\n ufl_assert(cell is not None, \"Missing cell in given...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Construct a new FiniteElement object with some properties replaced with new values.
def reconstruct(self, family=None, cell=None, degree=None): if family is None: family = self.family() if cell is None: cell = self.cell() if degree is None: degree = self.degree() return FiniteElement(family, cell, degree, quad_scheme=self.quadrature_s...
[ "def reconstruct(self, **kwargs):\n kwargs[\"family\"] = kwargs.get(\"family\", self.family())\n kwargs[\"domain\"] = kwargs.get(\"domain\", self.domain())\n kwargs[\"degree\"] = kwargs.get(\"degree\", self.degree())\n kwargs[\"quad_scheme\"] = kwargs.get(\"quad_scheme\", self.quadrature...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate incremental calculation tasks of different frequency;
def generate_tasks(update_time_l, update_time_r=None, freq=None, **kwargs): dates = dates_after_updateddate(update_time_l, update_time_r, freq, **kwargs) tasks = {} if freq in {"d", "w"}: for k, v in zip(dates["statistic_date"], dates["fund_id"]): tasks.setdefault(k, set()).add(v) el...
[ "def generate_parallel_tasks(name_prefix, num_of_tasks, deps):\n tasks = []\n for t_id in range(num_of_tasks):\n run_this = PythonOperator(\n task_id=f\"{name_prefix}_{t_id}\",\n python_callable=print_context,\n )\n run_this << deps\n tasks.append(run_this)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Match objects with time series to standard time series, and apply the strategy to its other attributes.
def match_by_std(obj, **kwargs): kw_used = ("key_tm", "key_used", "date_s", "date_e", "intervals", "freq", "shift", "extend", "apply") key_tm, key_used, date_s, date_e, intervals, freq, shift, extend, apply = meta.get_kwargs_used(kw_used, **kwargs) if isinstance(date_e, (dt.date, dt.datetime, float)): ...
[ "def stationarize(self, time_series):", "def _create_init_time_series(self, ts_data):\n\n # Avoid changing ts_data outside function\n ts_data_used = ts_data.copy()\n\n if self.model_name == '1_region':\n expected_columns = {'demand', 'wind'}\n elif self.model_name == '6_regi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Collect all available metadata.
def get_all_metadata(self): metadata = {} for key in self.METADATA_KEYS: try: val = self.get_metadata(key) except MissingMetadataError: pass else: metadata[key] = val return metadata
[ "def get_metadata(self):\n self.METADATA = []\n for file in self.METADATA_FILE_NAMES:\n self.METADATA.append(pd.read_csv(file))", "def get_metadata(self):\n\n tree = lxml.etree.parse(self.manifest)\n\n self.get_remotes(tree)\n self.get_projects(tree)", "def _scrape_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse the given event from the trace and return a
def parse_event(self, event):
[ "def parse_event(self, event):\n # how do I do what event it is without a type\n if \"type\" not in event:\n return\n # look for chat messages\n if (event[\"type\"] == \"message\") & (\"text\" in event):\n print(event)\n # grab message info\n t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the regex to parse the fields part of the event line.
def _get_fields_regex(cls, event, fields, positional_field, greedy_field): # pylint: disable=unused-argument fields = fields.keys() - {positional_field} if fields: def combine(fields): return r'(?:{})+'.format( '|'.join(fields) ) ...
[ "def _get_regex(self, event, fields, positional_field, greedy_field):\n fields = self._get_fields_regex(event, fields, positional_field, greedy_field)\n header = self._get_header_regex(event)\n return r'{header} *{fields}'.format(header=header, fields=fields, **self.PARSER_REGEX_TERMINALS)", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the regex for the header of the event.
def _get_header_regex(cls, event): blank = cls.PARSER_REGEX_TERMINALS['blank'] regex_map = dict( __comm=r'.+', __pid=cls.PARSER_REGEX_TERMINALS['integer'], __cpu=cls.PARSER_REGEX_TERMINALS['integer'], __timestamp=cls.PARSER_REGEX_TERMINALS['floating'], ...
[ "def _get_regex(self, event, fields, positional_field, greedy_field):\n fields = self._get_fields_regex(event, fields, positional_field, greedy_field)\n header = self._get_header_regex(event)\n return r'{header} *{fields}'.format(header=header, fields=fields, **self.PARSER_REGEX_TERMINALS)", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the full regex to parse the event line. This includes both the header and the fields part.
def _get_regex(self, event, fields, positional_field, greedy_field): fields = self._get_fields_regex(event, fields, positional_field, greedy_field) header = self._get_header_regex(event) return r'{header} *{fields}'.format(header=header, fields=fields, **self.PARSER_REGEX_TERMINALS)
[ "def _get_header_regex(cls, event):\n blank = cls.PARSER_REGEX_TERMINALS['blank']\n regex_map = dict(\n __comm=r'.+',\n __pid=cls.PARSER_REGEX_TERMINALS['integer'],\n __cpu=cls.PARSER_REGEX_TERMINALS['integer'],\n __timestamp=cls.PARSER_REGEX_TERMINALS['floa...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Build an instance from a path to a text file.
def from_txt_file(cls, path, **kwargs): with open(path, 'rb') as f: return cls(lines=f, **kwargs)
[ "def __init__(self, path=\"\", text=\"\"):\n\n if path is not \"\" and text is not \"\":\n print('Error! Only one between file path and text must be provided')\n exit(1)\n\n eng = spacy.load('en', disable=['ner'])\n tokenizer = English().Defaults.create_tokenizer(eng)\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Filter the lines to select the ones with events. Also eagerly parse events from them to avoid the extra memory consumption from line storage, and to speed up parsing by acting as a pipeline on lazy lines stream.
def _eagerly_parse_lines(self, lines, skeleton_regex, event_parsers, events, time=None): # Recompile all regex so that they work on bytes rather than strings. # This simplifies the rest of the code while allowing the raw output # from a process to be fed def encode(string): ...
[ "def __filter_event_type__(trace_events, event_type):\n filtered = []\n for line in trace_events:\n if line[0] == event_type:\n filtered.append(line)\n return filtered", "def _filter_log_lines(log_entry):\n final_log_events = []\n for event in log_entry[\"logEvents\"]:\n me...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
State of the trace object that might impact the output of dataframe
def trace_state(self): return None
[ "def trace_off(self): #Funciona\n self._trace=False", "def __trace_lines(self, frame, event, arg):\n curr_execution_log = {\"step\": self.step, \"timestamp\": time.time(), \"line_num\": self.curr_line, \"actions\": []}\n self.results[\"execution_log\"].append(curr_execution_log)\n\n if...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Same as ``(trace.start, trace.end)``. This is handy to pass to functions expecting a window tuple.
def window(self): return (self.start, self.end)
[ "def tuple(self):\n return self.start.coordinates[0], self.start.coordinates[1], self.end.coordinates[0], self.end.coordinates[1]", "def get_launch_window(self, idx):\n start = self.results[idx][\"window_start\"]\n end = self.results[idx][\"window_end\"]\n return (start, end)", "def ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
JSON converts the original tuples into lists, so we need to convert it back.
def _coerce_json(cls, x): if isinstance(x, str): return x elif isinstance(x, Sequence): return tuple(map(cls._coerce_json, x)) else: return x
[ "def test_tuple_encoding(self):\n self.assertEqual(self.conn._encode_json({'hi': (1, 2, 3)}),\n '{\"hi\": [1, 2, 3]}')", "def _json_convert(set_):\n if isinstance(set_, set):\n return list(set_)\n else:\n raise TypeError(\"Not a set\")", "def convert_to_json_fo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Filename of the data file in the swap.
def data_filename(self): return f'{self.name}.{self.cache_desc_nf._fmt}'
[ "def get_filename(self) -> str:\n return self._filename", "def current_filename(self):\n return self.dr.fileName()", "def get_data_filename(kind):\n # TODO assert that the file exisits\n return get_data_dir() + '/' + kind + '.csv'", "def filename(self):\n return get_product_filename(s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update the metadata mapping with the given ``metadata`` mapping and write it back to the swap area.
def update_metadata(self, metadata): if metadata: self._metadata.update(metadata) self.to_swap_dir()
[ "def _insert_metadata(self, metadata):\n kwargs = {\n 'output': self.output,\n }\n\n for key, metadata_key in LOSS_MAP_METADATA_KEYS:\n kwargs[key] = metadata.get(metadata_key)\n\n self.metadata = models.LossMap(**kwargs)\n\n self.metadata.save()", "def upd...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reload the persistent state from the given ``swap_dir``.
def from_swap_dir(cls, swap_dir, **kwargs): if swap_dir: try: return cls._from_swap_dir(swap_dir=swap_dir, **kwargs) except (FileNotFoundError, TraceCacheSwapVersionError, json.decoder.JSONDecodeError): pass return cls(swap_dir=swap_dir, **kwargs)
[ "def load(self, dir_path, file_name=None):\n if file_name is None:\n file_name = find_latest_model(dir_path)\n self.load_state_dict(torch.load(os.path.join(dir_path, file_name)))", "def reload(self):\n with self.lock:\n self.db = _load_json(self.path, driver=self.driver)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Equivalent to `pd.read_parquet(...)` but also load the metadata back into dataframes's attrs
def _data_from_parquet(path): data = pd.read_parquet(path) # Load back LISA metadata into "df.attrs", as they were written in # _data_to_parquet() if isinstance(data, pd.DataFrame): schema = pyarrow.parquet.read_schema(path) attrs = schema.metadata.get(b'lisa', '...
[ "def read(\n cls, file_path: str, unflatten_kwargs: dict = None, **read_kwargs\n ) -> pd.DataFrame:\n return pd.read_parquet(path=file_path, **read_kwargs)", "def read_metadata_table(path: str) -> Optional[pd.DataFrame]:\n FILE_EXT_TO_PD_READ_FUNC = {\n '.tab': pd.read_table,\n '...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Scrub the swap area to remove old files if the storage size limit is exceeded.
def scrub_swap(self): # TODO: Load the file information from __init__ by discovering the swap # area's content to avoid doing it each time here if self._swap_size > self.max_swap_size and self.swap_dir: stats = { dir_entry.name: dir_entry.stat() for di...
[ "def _clean_cache(self):\n cache_size = self.current_size\n max_size = Config.get_config().getint(\"cache\", \"maxCacheSize\", fallback=\"10737418240\")\n if cache_size <= max_size:\n return\n for key in sorted(self.objects, key=lambda k: self.objects[k][\"last_accessed\"]):\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fetch an entry from the cache or the swap.
def fetch(self, cache_desc, insert=True): try: return self._cache[cache_desc] except KeyError as e: # pylint: disable=raise-missing-from try: path = self._cache_desc_swap_path(cache_desc) # If there is no swap, bail out except (...
[ "def _cache_get(self, key):\r\n entry = None\r\n if self.cache is not None:\r\n entry = self.cache.get(key)\r\n\r\n return entry or (None, 0, 0)", "def __getitem__(self, key):\n\n # check for slycat path\n self.check_fs_path()\n\n # is item in cache?\n if key in self:\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Evict the given descriptor from memory.
def evict(self, cache_desc): self.write_swap(cache_desc) try: del self._cache[cache_desc] except KeyError: pass
[ "def _unregisterFd(self, fd):\n\n bench = self._fds[fd]\n\n self._fds.pop(fd)\n self._poller.unregister(fd)\n\n return bench", "def unregister(self, fd):\r\n self.read.discard(fd)\r\n self.write.discard(fd)\r\n self.error.discard(fd)", "def evict(self):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Write the given descriptor to the swap area if that would be faster to reload the data rather than recomputing it. If the descriptor is not in the cache or if there is no swap area, ignore it.
def write_swap(self, cache_desc, force=False, write_meta=True): try: data = self._cache[cache_desc] except KeyError: pass else: if force or self._should_evict_to_swap(cache_desc, data): self._write_swap(cache_desc, data, write_meta)
[ "def evict(self, cache_desc):\n self.write_swap(cache_desc)\n\n try:\n del self._cache[cache_desc]\n except KeyError:\n pass", "def _cache_descriptor(self,force=False):\n if not self._descriptor or force:\n response = requests.get('%s/describe/%s/%s/%s....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Attempt to write all cached data to the swap.
def write_swap_all(self): for cache_desc in self._cache.keys(): self.write_swap(cache_desc)
[ "def write(self):\r\n try:\r\n with open(self.cachefile, 'wb') as open_cache:\r\n pickle.dump(self.cache, open_cache)\r\n logging.debug('Cache file entries written: filename:cnt: %s:%s', \r\n self.cachefile, len(self.cachefile))\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Clear cache entries referencing a given event.
def clear_event(self, event, raw=None): self._cache = { cache_desc: data for cache_desc, data in self._cache.items() if not ( cache_desc.get('event') == event and ( raw is None or cache_desc.get('raw') ==...
[ "def garbageCollector(self):\n tcutoff = self.latest_event - TimeSpan(self.expirationtime)\n for evID in self.event_dict.keys():\n evt = self.cache.get(seiscomp3.DataModel.Event, evID)\n if self.event_dict[evID]['timestamp'] < tcutoff:\n self.event_dict.pop(evID)",...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the possible sources events of a given event. For normal events, this will just be a list with the event itself in it. For meta events, this will be the list of source events hosting that metaevent.
def get_event_sources(cls, event): try: prefix, _ = event.split('@', 1) except ValueError: return [event] try: return sorted(cls._META_EVENT_SOURCE[prefix].keys()) except KeyError: return [event]
[ "def GetSources(self, event_object):\n if self.DATA_TYPE != event_object.data_type:\n raise errors.WrongFormatter(u'Unsupported data type: {0:s}.'.format(\n event_object.data_type))\n\n source_long = getattr(event_object, u'source_long', u'UNKNOWN')\n source_append = getattr(event_object, u's...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the PIDs of all tasks with the specified name. The same PID can have different task names, mainly because once a task is generated it inherits the parent name and then its name is updated to represent what the task really is.
def get_task_name_pids(self, name, ignore_fork=True): pids = self._task_name_map[name] if ignore_fork: pids = [ pid for pid in pids # Only keep the PID if its last name was the name we are # looking for. if self...
[ "def by_name(self, name):\n name_like = \"%{}%\".format(name)\n with self.cursor() as cursor:\n cursor.execute(\"SELECT TASK, NAME, DESCRIPTION FROM\"\n \" TASK WHERE NAME LIKE ?\", (name_like,))\n return [Task.map_row(row) for row in cursor.fetchall()]"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the all the names of the task(s) with the specified PID, in appearance order. The same PID can have different task names, mainly because once a task is generated it inherits the parent name and then its name is updated to represent what the task really is.
def get_task_pid_names(self, pid): return self._task_pid_map[pid]
[ "def get_task_names_for_shot(self, shot_name):\n\n tasks_for_shot = self.get_tasks_for_shot(shot_name)\n if not tasks_for_shot:\n return\n\n return [task.name for task in tasks_for_shot]", "def _short_task_names(self):\n return [name[len(self.task_prefix):] for name in self....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Open the parsed trace using the most appropriate native viewer.
def show(self): path = self.trace_path if not path: raise ValueError('No trace file is backing this Trace instance') if path.endswith('.dat'): cmd = 'kernelshark' else: cmd = 'xdg-open' return os.popen(f"{cmd} {shlex.quote(path)}")
[ "def trace_open(self, tracefile=None, **kwargs):\n if tracefile is None:\n tracefile = self.tracefile\n self.dprint('DBG1', \"trace_open [%s]\" % tracefile)\n self.pktt = Pktt(tracefile, **kwargs)\n return self.pktt", "def _view(self, filepath, format, quiet):\n metho...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Turn a bitmask (like cpu_mask) formated by tracecmd in nonraw mode into a list of integers for each bitmask position that is set. ``mask`` is a string with commaseparated hex numbers like "000001,12345,..."
def _expand_bitmask_field(mask): numbers = mask.split(b',') # hex number, so 4 bit per digit nr_bits = len(numbers[0]) * 4 def bit_pos(number): # Little endian number = int(number, base=16) return ( i for i in range(nr...
[ "def mask2list(bitmask, number=1):\n if not bitmask:\n return []\n this = []\n if bitmask & 1:\n this = [number]\n return this + mask2list(bitmask >> 1, number + 1)", "def bitpos_from_mask(mask, lsb_pos=0, increment=1):\n out = []\n while mask:\n if mask & 0x01:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Combine two event checkers into one that checks the presence of both.
def __and__(self, other): return AndTraceEventChecker([self, other])
[ "def __or__(self, other):\n return OrTraceEventChecker([self, other])", "def check_events(self, event:Event):\n pass", "def eventcheckin():", "def testSameEvent(self):\n event_a = event.EventObject()\n event_b = event.EventObject()\n event_c = event.EventObject()\n event_d = event.Ev...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Combine two event checkers into one that checks the presence of either of them.
def __or__(self, other): return OrTraceEventChecker([self, other])
[ "def check_events(self, event:Event):\n pass", "def incompatible_calendar_event(calendar_event1, calendar_event2):\r\n\r\n if calendar_event1.typ == calendar_event2.typ:\r\n if calendar_event1.pointwise_equal(calendar_event2):\r\n return False\r\n else:\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Build an instance of the class, converting ``str`` to ``TraceEventChecker``.
def from_events(cls, events, **kwargs): def make_event(e): if isinstance(e, TraceEventCheckerBase): return e else: return TraceEventChecker(e) return cls(map(make_event, events), **kwargs)
[ "def build(cls, rule):\n\n if isinstance(rule, cls):\n return rule\n\n rule = parse(rule)\n assert isinstance(rule, dict), f'Not a valid rule: {rule}'\n type = get_event_class_by_type(rule.pop('type') if 'type' in rule else 'Event')\n\n args = {}\n for key, value...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Decorator for methods that require some given trace events.
def requires_events(*events, **kwargs): return AndTraceEventChecker.from_events(events, **kwargs)
[ "def require_event(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n if 'event_id' not in kwargs:\n raise errors.BadRequest('The request requires an event ID')\n event = validate_and_get_current_event(kwargs['event_id'])\n\n kwargs['event'] = event\n del ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Mergein a configuration source.
def add_merged_src(self, src, conf, optional_events=False, **kwargs): if not isinstance(conf, self.__class__): conf = self.__class__(conf=conf) def merge_conf(key, val, path): new = _merge_conf(key, val, path) try: existing = get_nested_key(self, pat...
[ "def mergeConfig(self, *args, **kwargs):\n other = cherrypy.lib.reprconf.Config(*args, **kwargs)\n # Top-level keys are namespaces to merge, second level should get replaced\n for k, v in other.items():\n mergeFrom = self.get(k, {})\n mergeFrom.update(v)\n self[k] = mergeFrom", "def ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function for creating the TMS pulse current injectors for L5 neurons.
def pulse_l5_place(onset): tms_pulse = [] pulse_axon = h.TMSpulse_bi(0.5, sec=h.soma) pulse_axon.onset = onset tms_pulse.append(pulse_axon) return tms_pulse
[ "def pulse_l5_inject(tms_pulse, scale, intensity):\n\n\n gamma = 0.091755*intensity-8.05707\n\n # generate amplitude from L5 TMS induced current amplitude distribution\n tms_pulse[0].imax = gamma*np.random.exponential(scale)\n return tms_pulse", "def pulse_simple(self):\r\n # All time are in us...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
function for setting up the TMS pulse injectors for L5 Cell
def pulse_l5_inject(tms_pulse, scale, intensity): gamma = 0.091755*intensity-8.05707 # generate amplitude from L5 TMS induced current amplitude distribution tms_pulse[0].imax = gamma*np.random.exponential(scale) return tms_pulse
[ "def pulse_simple(self):\r\n # All time are in us\r\n t1_laser = 100 # First time to turn ON the laser\r\n t2_laser = 650 # Last time to turn ON the laser\r\n dt_laser = 30 # Pulse duration of the laser \r\n dt_trig = 10 # Duration of the pulse for the trigger\r\n dt_pulse...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests whether ``EmbedProvider.clean_copy`` works as intended.
def test__EmbedProvider__clean_copy(): user = User.precreate(202303310004, name = 'koishi') name = user.mention url = 'https://orindance.party/' field = EmbedProvider(name = name, url = url) copy = field.clean_copy() _assert_fields_set(copy) vampytest.assert_is_not(field, copy) ...
[ "def test__EmbedProvider__copy():\n name = 'orin'\n url = 'https://orindance.party/'\n \n field = EmbedProvider(name = name, url = url)\n copy = field.copy()\n _assert_fields_set(copy)\n vampytest.assert_is_not(field, copy)\n \n vampytest.assert_eq(field, copy)", "def test__EmbedProvide...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests whether ``EmbedProvider.copy`` works as intended.
def test__EmbedProvider__copy(): name = 'orin' url = 'https://orindance.party/' field = EmbedProvider(name = name, url = url) copy = field.copy() _assert_fields_set(copy) vampytest.assert_is_not(field, copy) vampytest.assert_eq(field, copy)
[ "def test__EmbedProvider__copy_with__0():\n name = 'orin'\n url = 'https://orindance.party/'\n \n field = EmbedProvider(name = name, url = url)\n copy = field.copy_with()\n _assert_fields_set(copy)\n vampytest.assert_is_not(field, copy)\n \n vampytest.assert_eq(field, copy)", "def test_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests whether ``EmbedProvider.copy_with`` works as intended.
def test__EmbedProvider__copy_with__0(): name = 'orin' url = 'https://orindance.party/' field = EmbedProvider(name = name, url = url) copy = field.copy_with() _assert_fields_set(copy) vampytest.assert_is_not(field, copy) vampytest.assert_eq(field, copy)
[ "def test__EmbedProvider__copy_with__1():\n old_name = 'orin'\n old_url = 'https://orindance.party/'\n \n new_name = 'rin'\n new_url = 'https://www.astil.dev/'\n \n field = EmbedProvider(name = old_name, url = old_url)\n copy = field.copy_with(\n name = new_name,\n url = new_ur...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests whether ``EmbedProvider.copy_with`` works as intended.
def test__EmbedProvider__copy_with__1(): old_name = 'orin' old_url = 'https://orindance.party/' new_name = 'rin' new_url = 'https://www.astil.dev/' field = EmbedProvider(name = old_name, url = old_url) copy = field.copy_with( name = new_name, url = new_url, ) _a...
[ "def test__EmbedProvider__copy_with__0():\n name = 'orin'\n url = 'https://orindance.party/'\n \n field = EmbedProvider(name = name, url = url)\n copy = field.copy_with()\n _assert_fields_set(copy)\n vampytest.assert_is_not(field, copy)\n \n vampytest.assert_eq(field, copy)", "def test_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests whether ``EmbedProvider.contents`` works as intended.
def test__EmbedProvider__contents(): name = 'orin' url = 'https://orindance.party/' for field, expected_output in ( (EmbedProvider(), set()), (EmbedProvider(name = name), {name}), (EmbedProvider(url = url), set()), (EmbedProvider(name = name, url = url), {name}), ): ...
[ "def test__EmbedProvider__iter_contents():\n name = 'orin'\n url = 'https://orindance.party/'\n \n for field, expected_output in (\n (EmbedProvider(), set()),\n (EmbedProvider(name = name), {name}),\n (EmbedProvider(url = url), set()),\n (EmbedProvider(name = name, url = url)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests whether ``EmbedProvider.iter_contents`` works as intended.
def test__EmbedProvider__iter_contents(): name = 'orin' url = 'https://orindance.party/' for field, expected_output in ( (EmbedProvider(), set()), (EmbedProvider(name = name), {name}), (EmbedProvider(url = url), set()), (EmbedProvider(name = name, url = url), {name}), ...
[ "def test__EmbedProvider__contents():\n name = 'orin'\n url = 'https://orindance.party/'\n \n for field, expected_output in (\n (EmbedProvider(), set()),\n (EmbedProvider(name = name), {name}),\n (EmbedProvider(url = url), set()),\n (EmbedProvider(name = name, url = url), {na...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A function that return a dictionary from a list of lists, with any depth
def get_dict(self, a_list): d = {} for k in a_list: if (isinstance(k, list)): if isinstance(k[1], list) and k[0]: d.update({k[0]: self.get_dict(k[1])}) elif k[0]: d.update({k[0]: k[1]}) return d
[ "def _list_depth_to_dict(self, li):\n dict_ret = {}\n if self._depth(li) == 1:\n return self._handle_key_value(dict_ret, li[0], li[1])\n else:\n for sub_item in li[1]:\n tmp_dict = _list_depth_to_dict(self, sub_item)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Connect to Twitter firehose, and yield each line of the HTTP response.
def stream_lines(username, password, filter): if filter: address = 'http://stream.twitter.com/1/statuses/filter.json?track=' + urllib.quote(filter) else: address = 'http://stream.twitter.com/1/statuses/sample.json' scheme, host, path, p, query, f = urlparse.urlparse(address) credentials...
[ "async def test_stream_yes():\n max_lines = 1000\n i = 0\n async with TestClient(stream_yes) as client:\n response = await client.get(\"/stream_yes\", stream=True)\n async for line in response.iter_content(2):\n if i > max_lines:\n break\n line = line.deco...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the formula id for the mathml equation if wikipedia
def parse_formula_id_wiki(document_id, mathml): index = mathml.index('id="') formula_id = "?" if(index != -1): # find document id start = index + len('id="') end = start while (not mathml[start:end+1].endswith('"')): end += 1 doc_id = mathml[start:end] ...
[ "def parse_formula_id(document_id, mathml):\n index = mathml.index('id=\"math.')\n formula_id = \"?\"\n if(index != -1):\n # find document id\n start = index + len('id=\"math.')\n end = start\n while \".\" not in mathml[start:end+1]:\n end += 1\n doc_id = mathm...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the formula id for the mathml equation
def parse_formula_id(document_id, mathml): index = mathml.index('id="math.') formula_id = "?" if(index != -1): # find document id start = index + len('id="math.') end = start while "." not in mathml[start:end+1]: end += 1 doc_id = mathml[start:end] ...
[ "def parse_formula_id_wiki(document_id, mathml):\n index = mathml.index('id=\"')\n formula_id = \"?\"\n if(index != -1):\n # find document id\n start = index + len('id=\"')\n end = start\n while (not mathml[start:end+1].endswith('\"')):\n end += 1\n doc_id = ma...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Saves the formulas from the file in their seperate files in the directory
def save_formulas(file, directory, wikipedia=False): (__, content) = MathDocument.read_doc_file(file) while len(content) != 0: (start, end) = MathExtractor.next_math_token(content) if(start != -1): file_name = os.path.splitext(os.path.basename(file))[0] ext = os.path.spli...
[ "def perform(self):\n for k, v in self.formulas.items():\n value = self.format_and_eval_string(v)\n self.write_in_database(k, value)", "def write(self):\n\n # Write file lines according to gaussian requirements\n with open(self.filepath, 'w') as file:\n # file...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This sends the message via the given asyncio StreamWriter.
async def send(self, writer: asyncio.StreamWriter) -> None: writer.write(self.encode()) await writer.drain()
[ "async def writer_worker(self):\n try:\n while True:\n data = await self.inbound_queue.get()\n print('SOCKET > ', data)\n self.writer.write(data.encode())\n await self.writer.drain()\n finally:\n self.writer = None", "...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function reads exacly one message from the asyncio.StreamReader and returns it as a Message. A MessageException is raised when the magic value check fails.
async def read_message(reader: asyncio.StreamReader) -> Message: mv = await reader.read(n=2) if mv != MAGIC_VALUE: if mv == b'': raise EOFError() else: raise MessageException('Magic value check failed') _type = await reader.read(n=1) type = MessageType.get(_type)...
[ "def read_message(message):\n try:\n received_message = json.loads(message)\n return defenition_answer(received_message)\n except:\n create_answer(400)", "def _read_message(data, msg):\n if msg.type in IGNORED_MESSAGES:\n data = _ignore(data, msg)\n elif msg.type == 'time_s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a list of previously defined companies SQL select of full table (defined_company_types), which contains a list company names and predefined company. The list of dfined companies are returned as a dictionary with key as the description and a list containing the proper company name and the type as the second entr...
def defined_companies(self): sql_st = ''' SELECT * FROM defined_company_types ''' cur = self.conn.cursor() defined_types = cur.execute(sql_st).fetchall() defined_comp_types = dict() for record in defined_types: defined_comp_types[recor...
[ "def get_companies(self) -> List[Company]:\n companies = []\n data = self.get_data('''SELECT * FROM Company''', None)\n for company in data:\n companies.append(\n Company(\n company[0],\n company[1],\n company[2]...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Module returns the company type given the company name and location The module checks if the company name is in the defined company name table, if it is not then a the google_search function is run in order to construct a search query using google api, to find the comapany type by sending the location of the user the d...
def company_type(self,comp_name,lat,lng): dc = self.defined_companies() comp_type = '' goog_details = [] if comp_name != None: for company,tags in dc.comp_types_.items(): if company in comp_name: comp_type = tags[1] goog_deta...
[ "def search_company(query):\n lookup = requests.get(SEARCH_QUERY, params={'query': query, 'limit': 10})\n if 200 <= lookup.status_code < 300:\n if len(lookup.json()) == 0:\n return None # Nothing found\n else:\n # Create dict with company name as key\n company_d...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
For each transaction, finds the type based on company name and location SQL select from the geo_expense_data table, for each transaction the company type is launched. If the response is from a google query, then each result of possible companies matching the query is evaluated based on the distance between visited loca...
def data_retriever(self): sql_st = ''' SELECT * FROM geo_expense_data ''' cur = self.conn.cursor() geo_comp_data = cur.execute(sql_st).fetchall() for record in geo_comp_data: geo_expense_id = record[0] year = record[2] ...
[ "def query_city():\n\n try: \n locations = pd.read_sql(\"\"\"\n SELECT DISTINCT(event_city)\n FROM ticket_sales;\n \"\"\",\n con=engine)\n \n # removes enclosing brackets of dataframe elements using list slicing and trans...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the latitude and longitude for a user on a given day SQL select statement for table goog_locations, which returns the latitude and longitude of all places visited by user for that day.
def locations_visited(self,year,month,day): sql_st = ''' SELECT lat, lng FROM goog_locations WHERE (goog_locations.yr = ?) and (goog_locations.mnth = ?) and (goog_locations.dy = ?) ''' cur = self.conn.cursor() ...
[ "def get_locations_for(username):\n locations = []\n with sql.connect(database_locations) as cur:\n if username == 'admin':\n res = cur.execute(f\"\"\"\n SELECT DISTINCT * \n From Location \n ORDER B...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Helper function to write to SQL table exp_comp_type Query writes the results of the company type search to the database. Parameter
def data_writer(self,sql_record): sql_st = ''' INSERT OR IGNORE INTO exp_comp_type(geo_expense_id,goog_name,comp_type,address,placeid,goog_lat,goog_lng) VALUES (?,?,?,?,?,?,?) ''' cur = self.conn.cursor() cur.execute(sql_st,sql_record) self.conn.commit()
[ "def exp_type_loc_table(self):\n\n sql_st = '''\n INSERT INTO exp_type_loc(yr, mnth, dy, general_name,goog_name, comp_type, country, city, state, postcode, lat,lng,goog_lat,goog_lng, value)\n SELECT yr, mnth, dy, general_name, goog_name, comp_type, country, city, state, postcode, la...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Aggregates SQL tables containing company type and transaction Insert & Join SQL statement, creating table exp_type_loc which contains the transaction data and predicted company name and company type.
def exp_type_loc_table(self): sql_st = ''' INSERT INTO exp_type_loc(yr, mnth, dy, general_name,goog_name, comp_type, country, city, state, postcode, lat,lng,goog_lat,goog_lng, value) SELECT yr, mnth, dy, general_name, goog_name, comp_type, country, city, state, postcode, lat,lng,goo...
[ "def summary_table(self):\n\n sql_st = '''\n INSERT OR IGNORE INTO expenses(yr, mnth, dy, general_name, comp_type,value)\n SELECT yr, mnth, dy, general_name, comp_type,value\n FROM exp_type_loc\n '''\n\n cur = self.conn.cursor()\n cur.execute(sql_st)\n self.co...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Primary aggregation launcher Script launches data.retriever and exp_type_loc table in order to retrieve the company type given a name and then aggregate, company type and location into a table.
def company_info_loader(self): self.data_retriever() self.exp_type_loc_table()
[ "def exp_type_loc_table(self):\n\n sql_st = '''\n INSERT INTO exp_type_loc(yr, mnth, dy, general_name,goog_name, comp_type, country, city, state, postcode, lat,lng,goog_lat,goog_lng, value)\n SELECT yr, mnth, dy, general_name, goog_name, comp_type, country, city, state, postcode, la...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates summary transaction information to display in online table' Inserts summary data of expenses to be displayed online.
def summary_table(self): sql_st = ''' INSERT OR IGNORE INTO expenses(yr, mnth, dy, general_name, comp_type,value) SELECT yr, mnth, dy, general_name, comp_type,value FROM exp_type_loc ''' cur = self.conn.cursor() cur.execute(sql_st) self.conn.commit()
[ "def createSummaryTable(self):\n\n\n # create an empty dataframe with the column headings (must create a dummy row)\n summary = pd.DataFrame(\n [['Dummy', '00-00-00', np.nan, np.nan,np.nan ,np.nan, np.nan, np.nan, np.nan, np.nan]],\n columns=['Asset ID', 'Purchase date', 'Purchas...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
_setSubmitter_ Allow dynamic changing of Submitter plugin. Sets the SubmitterName to the value provided.
def setSubmitter(self, submitterName): self.args['SubmitterName'] = submitterName return
[ "def get_submitter():\n return submit.Submitter(\n remove_messages=False,\n send_submissions=False,)", "def __call__(cls, _name, *args, **kwargs):\n\n try:\n return ASubmitter[str(_name).lower()](**kwargs)\n except KeyError as e:\n raise ValueError(\"This submi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
_invokeSubmitter_ Invoke the submission plugin for the spec provided for normal 1submit jobs
def invokeSubmitter(self, jobCache, jobToSubmit, jobSpecId, jobSpecInstance, specToCacheMap = {}): # // # // Retrieve the submitter plugin and invoke it #// submitter = retrieveSubmitter(self.args['SubmitterName']) try: submitter( ...
[ "def submit(self):\n \n print 'Submitting the job'\n runner = Runner(self)\n runner.start()", "def submit(self):\n \n # TODO: send job to scheduler ", "def __call__(cls, _name, *args, **kwargs):\n\n try:\n return ASubmitter[str(_name).lower()](**kwargs...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
_checkJobState_ Check JobStates DB for jobSpecId prior to submission. Check job is resubmittable. Return Cache dir, or None, if job shouldnt be submitted
def checkJobState(self, jobSpecId): # // # // Should we actually submit the job? #// The Racers settings in the JobStates DB define how many # //times the same identical job can be submitted in parallel # // So we check to see how many jobs have been submitted #// for...
[ "def get_proper_state(job, state):\n\n if job.serverstate == \"finished\" or job.serverstate == \"failed\":\n pass\n elif job.serverstate == \"\" and state != \"finished\" and state != \"failed\":\n job.serverstate = 'starting'\n elif state == \"finished\" or state == \"failed\" or state == \...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The method created a list of pivot column names of the new pivoted table.
def _create_piv_col_names(self, add_col_nm_suffix, prefix, suffix): prefix = prefix + "_" if prefix else "" suffix = "_" + suffix if suffix else "" if add_col_nm_suffix: piv_col_names = ["{0}{1}_{2}{3}".format(prefix, self._clean_col_name(piv_col_val), self.values_col.lower(), suffi...
[ "def get_column_names(self):\r\n return [column.key for column in self.table.columns]", "def rename_columns(self):\r\n self.columns = [self._date, self._net_purchase, self._gross_sale, self._tax, self._margin]\r\n self.all_data.columns = self.columns", "def colnames(self):\n return l...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds the select statement part of the query.
def _add_select_statement(self): query = "select " + "".join([index_col + ", " for index_col in self.index_col]) + "\n" return query
[ "def add_select(self):\n if self.query_model.select_all == True:\n select_string = \"SELECT * \\n\"\n self.query_string += select_string\n return\n elif len(self.query_model.select_columns) > 0 or len(self.query_model.auto_generated_select_columns) ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds the case statement part of the query.
def _add_case_statement(self): case_query = self.function.format("case when {0} = \"{1}\" then {2} else {3} end") + " as {4},\n" query = "".join([case_query.format(self.pivot_col, piv_col_val, self.values_col, self.not_eq_default, piv_col_name) ...
[ "def visitCaseFunctionCall(self, ctx: MySqlParser.CaseFunctionCallContext) -> SQLToken:\n branches = []\n for func_alternative in ctx.caseFuncAlternative():\n branches.append(self.visit(func_alternative))\n if ctx.ELSE():\n branches.append((None, self.visit(ctx.functionArg...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds the from statement part of the query.
def _add_from_statement(self): query = "from {0}\n".format(self.table_name) return query
[ "def add_from(self):\n if self.query_model.parent_query_model is None:\n if len(self.query_model.from_clause) > 0:\n from_clause = \"FROM \"\n for graph in self.query_model.from_clause:\n from_clause += '<{}>, '.format(graph)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds the group by part of the query.
def _add_group_by_statement(self): query = "group by " + "".join(["{0},".format(x) for x in range(1, len(self.index_col) + 1)]) return query[:-1]
[ "def add_groupby(self):\n if self.query_model.groupBy_columns is not None and len(self.query_model.groupBy_columns) > 0:\n groupby_clause = \" GROUP BY \"\n for col_name in self.query_model.groupBy_columns:\n groupby_clause += \"?\" + col_name + \" \"\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the query to create the pivoted table.
def generate_query(self): self.query = self._add_select_statement() +\ self._add_case_statement() +\ self._add_from_statement() +\ self._add_group_by_statement() return self.query
[ "def _make_create_table_q(self):\n self._get_column_types()\n\n # Convert column types back to strings for use in the create table\n # statement\n types= ['{name} {raw_type}'.format(**x) for x in self.columns]\n args = {'table': self.table, 'columns': (', ').join(types)}\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Writes the query to a text file.
def write_query(self, output_file): text_file = open(output_file, "w") text_file.write(self.generate_query()) text_file.close()
[ "def writeToQueryFile(string, fName = \"../queryOutput/queryResult.txt\"):\n with open(fName, 'a+') as f:\n f.write(string)", "def writeResultToFile(results, filename='all_searches.txt'):\n with open(filename, 'w') as f:\n for query in results:\n f.writelines(query.__repr__() + '\\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Show random reminder immediately
def show_random_once(message): chat_id = message.from_user.id if chat_id not in threads.keys(): threads[chat_id] = MyThread(chat_id, users, bot) bot.send_message(chat_id, random.choice(users.select(chat_id, 'reminders')))
[ "def randomise(self):\n self.timer = self.period * random.random()", "async def random_day(self, ctx):\n # Note: day command invokes this command\n await ctx.embed_reply(random.choice(calendar.day_name))", "async def reminder() -> None:\n\n r: tasks.Loop = pack.reminder\n if r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Setting period for messages sender
def set_period(message): chat_id = message.from_user.id if chat_id not in threads.keys(): threads[chat_id] = MyThread(chat_id, users, bot) current_period = int(users.select(chat_id, 'period')) bot.send_message(chat_id, f'Current message period is *{current_period} seconds*...
[ "def set_time_based_notification(domain_class, state, roles, time):", "def update_period_secs(self):\n return 1", "def reliable_time_period_notifications(self):\n pass", "def with_delay(self, delay: float) -> \"WSMessage\":\n self.delay = delay\n return self", "def teleopPeriodic...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Control the buffer size of _encoder. Flush if necessary
def _refresh_buffer(self): if len(self._encoder) > self._buffer_size: self.flush()
[ "def setWriteBufferSize(self, size: int) -> None:\n ...", "def adjust_buffers(self):\n #self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 1048576)\n #self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 1048576)\n pass", "def set_buffer_size(self, buffer_size):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Round number of repeats based on depth multiplier.
def round_repeats(repeats, depth_coefficient): return int(math.ceil(depth_coefficient * repeats))
[ "def depth(self) -> float:", "def _get_num_to_fold(stretch: float, ngates: int) -> int:\n return int(round(ngates * (stretch - 1.0) / 2.0))", "def depth(self):\n return self._depth * 10", "def improved_score_depth_n(game, player, max_depth=5):\n def _bfs_score(p):\n location = game.get_pla...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This method will discover visual data and will return paths and type
def discover(cls, path: str) -> Tuple[List[VisualData], str]:
[ "def draw_visual(self):\n visual = []\n for native_geometry in self._iter_geometry(self.model, \"visual\"):\n visual.append(native_geometry)\n for tool in self.attached_tool_models.values():\n for native_geometry in self._iter_geometry(tool, \"visual\"):\n v...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a set of buttons in Kupu, translate them to a set for TinyMCE toolbar
def translateButtonsFromKupu(self, context, buttons): return_buttons = [] for button in buttons: if button == 'save-button': try: if not context.checkCreationFlag(): return_buttons.append('save') except AttributeErr...
[ "def workbenchButtons(workbench):\n clearList(menuList)\n clearList(buttonList)\n\n g = None\n uid = None\n actions = cpc.actionList()\n base = p.GetGroup(\"User\").GetGroup(workbench)\n cpc.defaultGroup(base)\n if base.GetBool(\"default\", 0):\n uid = base.GetString(\"default\")\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return valid (X)HTML elements and their attributes that can be used within TinyMCE
def getValidElements(self): # Get safe html transform safe_html = getattr(getToolByName(self, 'portal_transforms'), 'safe_html') if safe_html.get_parameter_value('disable_transform'): return {'*': ['*']} COMMON_ATTRS = CORE_ATTRS | I18N_ATTRS valid_elements = { ...
[ "def as_markup(self):\r\n return self.__markup_safe", "def build(element_unparsed, owner):\n\n short_tags = ['area', 'base', 'basefont', 'br', 'embed', 'hr', 'input',\n 'img', 'link', 'param', 'meta']\n\n required = {\n 'a': {'href': ''},\n 'base': {'href': ''},\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Connect to Mongo and return a new SocketInfo. Can raise ConnectionFailure or CertificateError. Note that the pool does not keep a reference to the socket you must call return_socket() when you're done with it.
def connect(self): return SocketInfo(FakeSocket(self._db), self, None, self.address)
[ "def try_connect(self):\n sock = RecvBufferedSocket(socket.socket(socket.AF_INET, socket.SOCK_STREAM))\n try:\n sock.connect((self._host, self._port))\n except (socket.error, socket.gaierror) as e:\n logger.debug('Encountered socket exception {!r} when attempting connect to nailgun'.format(e))\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Writes a dictionary to the strategies.ini Stores as a config object
def writeSettings(dictToWrite): config_object["Strategy's"] = dictToWrite with open('FileStorage.ini', 'w') as conf: config_object.write(conf)
[ "def store(self):\n\n cfg = ConfigParser.SafeConfigParser()\n for setting in self:\n cfg.add_section(setting)\n for option, value in self[setting].items():\n cfg.set(setting, option, str(value))\n\n with open(self.file_name, \"w\") as cfg_file:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reads and returns a dictionary of settings from strategies.ini
def readSettings(): config_object.read("FileStorage.ini") return config_object["Strategy's"]
[ "def load_settings():\n settings = {}\n with open('settings.ini', 'r') as f:\n lines = f.readlines()\n for line in lines:\n if '//' not in line and line != '\\n': # ignoring comments and blank lines\n setting = line.split(\"=\")\n settings[setting[0]] = ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Locates strategy files in the "Strategies" file and returns them Returns a list cointaining the full name of all files Strategy files must contain strategy.py on the end
def findStrategies(): listOfStrategyFiles = [] print(os.path.normpath(os.path.realpath(__file__) + os.sep + os.pardir + os.sep + os.pardir + os.sep + 'strategies')) #os.chdir(os.path.dirname(os.path.realpath(__file__)) + os.sep + 'strategies') os.chdir(os.path.normpath(os.path.realpath(__file__) + os.s...
[ "def py_files(self):\n base = dirname(self.fnam) # Paths are relative to config.\n return [abspath(join(base, c.py_path)) for c in self.conversions]", "def list_py(path = None):\n if(path == None):\n path =os.getcwd()\n return [fname for fname in os.listdir(path)\n if os.pat...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Cleans provided dictionary and reorganizes running algorithms accordingly
def cleanDictionary(OldDictionary): newPositionNumber = 0 cleanedDictionary = {} for strategy in OldDictionary: #Rearange Running Algoritms listOfRunningAlgos = algoHandler.getRunningAlgos() for algorithm in listOfRunningAlgos: oldStrategyNumber = re.sub('\D', '', str(str...
[ "def clean(self):\n self.num_rules = 0\n self.lut = dict()\n self.bigrams = dict()\n self.root = None", "def Map_clean(aMap):\n # First get the full buckets\n for i, bucket in enumerate(aMap):\n if bucket != []:\n aMap[i] = []", "def force_clean(self, caller=True): # TODO: c...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Disables all current strategies and runnin algorithms Called when the program shuts down (and in configurable emergencys to do)
def disableStrategies(): stratDictionary = readSettings() position = 0 for strategy in stratDictionary: listOfSettings = literal_eval(stratDictionary["strategy"+str(position)]) if listOfSettings[3] == 1: listOfSettings[3] = 0 stratDictionary["strategy"+str(position)] = st...
[ "def shutdown(self) -> None:\n for integration in self.get_integrations():\n integration.disable_integration()\n\n self._integration_classes = {}\n self._integration_configs = {}\n self._integration_instances = {}\n self._needs_recalc = False\n\n try:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Resize an image to the given img_size by first rescaling it and then applying a central crop to fit the given dimension.
def resize_and_crop(image, img_size): source_size = np.array(image.shape[:2], dtype=float) target_size = np.array(img_size, dtype=float) # Scale scale = np.amax(target_size / source_size) inter_size = np.round(source_size * scale).astype(int) image = cv2.resize(image, (inter_size[1], inter_size...
[ "def crop_resize(img, size):\n short_edge = min(img.shape[:2])\n yy = int((img.shape[0] - short_edge) / 2)\n xx = int((img.shape[1] - short_edge) / 2)\n crop_img = img[yy: yy + short_edge, xx: xx + short_edge]\n resized_img = transform.resize(crop_img, (size, size))\n resized_img *= 255\n retur...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Store the input and output of a contest problem on disk. The files are organized as numerical increments each in their own contest/problem directory. This allows us to keep them separated and track error in which test cases.
def store(contest: str, problem: str, io: List[Tuple[str, str]]): directory = '{}/.cf-samples/{}/{}'.format( os.path.expanduser('~'), contest, problem) if not os.path.exists(directory): os.makedirs(directory) for i, (inp, out) in enumerate(io): with open('{}/{}.in'.format(directory, ...
[ "def create_files(contest_id, problem):\n # create source file\n\n # create directory with contest_id\n dir_name = str(contest_id)\n if not os.path.exists(dir_name):\n print \"Create directory: \", dir_name\n os.mkdir(dir_name)\n\n # create file if not exists\n src_file = SRC_FORMAT....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes the cosine similarity score between a tweet and a doc
def cos_dist(self,doc,tweet,col): MAX_TAKE = 10 v1 = [(x,self.__tf__(x,doc)*self.__idf__(x,col)) for x in set(doc.split())] v2 = [(x,self.__tf__(x,tweet)*self.__idf__(x,col)) for x in set(tweet.split())] v2.sort(key=lambda x:x[1],reverse=True) # determine how many words to compar...
[ "def cosine_score(self):\n for i in self.all_results: \n length = 0\n for j in self.all_results[i]:\n\n length += self.all_results[i][j] ** 2\n length = math.sqrt(length)\n \n for j in self.all_results[i]:\n self.all_res...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Finds the doc in the collection with the highest cosine similarity score
def find_max_match(self,tweet,col): best_doc = None best_score = 0 for d in col: score = self.cos_dist(d['text'],tweet,col) if score > best_score: best_score = score best_doc = d return best_doc,best_score
[ "def _get_most_similar_vector(self, query):\n most_similar = \"\"\n most_similar_distance = self.MAX_COSINE_DISTANCE\n for values, vector in self._document_vectors.items():\n distance = spatial.distance.cosine(query, vector)\n if distance < most_similar_distance:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
When given instance return the instance 'Name' from name tag. if regexp is defined, only return Name/Value matching regexp.
def get_instance_name(instance, regexp): instancename = '' try: p = re.compile(regexp, re.I) try: for t in instance["Tags"]: if t['Key'] == 'Name': if regexp == '': instancename += t['Value'] + ' ' else: ...
[ "def get_name(self):\n return self.pattern.name", "def instance_name(self):\n return self.name[0].lower() + self.name[1:]", "def process_tvshow_name(tvshow_name):\n tvshow_match = None\n for item in get_tvshow_items():\n if tvshow_match is not None:\n break \n n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
When given instance ID, try to determine the Platform, if regexp is defined, look in tags matching regexp.
def get_platform(instance): platformname = '' try: p = re.compile('(rhel|ubuntu)', re.I) try: for t in instance["Tags"]: if t['Key'] == 'EMR': platformname = 'AWS Linux' elif t['Key'] == 'OS': platformname = t['V...
[ "def get_instance_name(instance, regexp):\n instancename = ''\n try:\n p = re.compile(regexp, re.I)\n try:\n for t in instance[\"Tags\"]:\n if t['Key'] == 'Name':\n if regexp == '':\n instancename += t['Value'] + ' '\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find all instances in EC2 that match `tag` and generate output that can be added to /etc/hosts.
def main(pattern, debug, verbose, hosts, platform, subnet, region): ec2 = boto3.client('ec2', region_name=region) try: response = ec2.describe_instances() if subnet: print("SubnetId,VpcId,NI-Id,SourceDestCheck,PublicDns,PublicIp,Primary,PrivateIp,PrivateDns") if hosts: ...
[ "def ec2_list_instances(tag_key, tag_value):\n instance_list = []\n ec2 = boto3.client(\"ec2\")\n paginator = ec2.get_paginator(\"describe_instances\")\n page_iterator = paginator.paginate(\n Filters=[\n {\"Name\": \"tag:\" + tag_key, \"Values\": [tag_value]},\n {\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Call dReal. OS specific
def call_dReal(verifier): path = verifier.path file = verifier.file_name client = docker.from_env() volume_dict = {path: {'bind': '/data', 'mode': 'ro'}} container = client.containers.run("dreal/dreal4", " dreal data/" + file + " --model", ...
[ "def cli():\n\n if os.geteuid() != 0:\n click.echo(\"Root privileges are required for this operation\")\n sys.exit(1)\n\n # Load the helper class\n helper = UtilHelper()\n\n if not helper.check_pddf_mode():\n click.echo(\"PDDF mode should be supported and enabled for this platform f...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Run a task on different queue
def test_another_queue(self): tasks.print_task2('hello another') with run_kuyruk(queue='another_queue') as worker: worker.expect('another_queue') worker.expect('hello another') worker.expect('Task is processed')
[ "def get_queue(self, task_name):", "def main():\n queue1 = queue.Queue()\n t1 = threading.Thread(target=put_queue, args=[queue1])\n t2 = threading.Thread(target=get_queue, args=[queue1])\n t1.start()\n t2.start()", "def _apply_queue(self, args, thisTask, cmd_args, payload, setup):\n if not...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Respawn a new worker if dead This test also covers the broker disconnect case because when the connection drops the master worker will raise an unhandled exception. This exception will cause the worker to exit. After exiting, master worker will spawn a new master worker.
def test_respawn(self): def get_worker_pids(): pids = get_pids('kuyruk: worker') assert len(pids) == 2 return pids with run_kuyruk(process='master') as master: master.expect('Start consuming') master.expect('Start consuming') pids_...
[ "def test_dead_master(self):\n tasks.print_task('hello world')\n with run_kuyruk(terminate=False) as worker:\n worker.expect('hello world')\n worker.kill()\n worker.expect_exit(-signal.SIGKILL)\n wait_until(not_running, timeout=TIMEOUT)", "def test_master_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Failed tasks are saved to Redis
def test_save_failed(self): tasks.raise_exception() with run_kuyruk(save_failed_tasks=True) as worker: worker.expect('ZeroDivisionError') worker.expect('No retry left') worker.expect('Saving failed task') worker.expect('Saved') worker.expect('T...
[ "def test_save_failed_class_task(self):\n cat = tasks.Cat(1, 'Felix')\n\n cat.raise_exception()\n with run_kuyruk(save_failed_tasks=True) as worker:\n worker.expect('raise Exception')\n worker.expect('Saving failed task')\n worker.expect('Saved')\n\n asse...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Failed tasks are saved to Redis (class tasks)
def test_save_failed_class_task(self): cat = tasks.Cat(1, 'Felix') cat.raise_exception() with run_kuyruk(save_failed_tasks=True) as worker: worker.expect('raise Exception') worker.expect('Saving failed task') worker.expect('Saved') assert is_empty('k...
[ "def test_save_failed_arg_class(self):\n cat = tasks.Cat(1, 'Felix')\n\n tasks.jump_fail(cat)\n with run_kuyruk(save_failed_tasks=True) as worker:\n worker.expect('ZeroDivisionError')\n worker.expect('Saving failed task')\n worker.expect('Saved')\n\n asse...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Failed tasks are saved to Redis (arg class)
def test_save_failed_arg_class(self): cat = tasks.Cat(1, 'Felix') tasks.jump_fail(cat) with run_kuyruk(save_failed_tasks=True) as worker: worker.expect('ZeroDivisionError') worker.expect('Saving failed task') worker.expect('Saved') assert is_empty('k...
[ "def test_save_failed_class_task(self):\n cat = tasks.Cat(1, 'Felix')\n\n cat.raise_exception()\n with run_kuyruk(save_failed_tasks=True) as worker:\n worker.expect('raise Exception')\n worker.expect('Saving failed task')\n worker.expect('Saved')\n\n asse...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }