query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Updates the status of `pin` to the HIGH or LOW value specified by new_status This function is mostly just here to make status update changes easier if the way in which that happens in the future should change to something more complicated than just writing a pin since this function currently just does what pigpio's wri...
def __update_status(self, pin, new_status): self.pi.write(pin, new_status)
[ "def update_flash_status(self, new_status):\n self.__update_status(self.FLASH_STATUS_PIN, new_status)", "def pin_update(self, mailboxno, new_pin):\n if not self.authenticated:\n return False\n params = {\n 'type': self.response_type,\n 'func': 'pinupdate',\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Updates the FLASH_STATUS_PIN to the new_status value
def update_flash_status(self, new_status): self.__update_status(self.FLASH_STATUS_PIN, new_status)
[ "def __update_status(self, pin, new_status):\n self.pi.write(pin, new_status)", "def update_flash_error_status(self, new_status):\n self.__update_status(self.FLASH_ERROR_STATUS_PIN, new_status)", "def update_boot_status(host_id, boot_status):\n boot_status_string = \"%s\" % (boot_status)\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Updates the CLEAR_MODE_STATUS_PIN to the new_status value
def update_clear_mode_status(self, new_status): self.__update_status(self.CLEAR_MODE_STATUS_PIN, new_status)
[ "def __update_status(self, pin, new_status):\n self.pi.write(pin, new_status)", "def update_flash_status(self, new_status):\n self.__update_status(self.FLASH_STATUS_PIN, new_status)", "def _update_car_status(self, content):\n LOG.info(\"[car control] Recevied status_ctrl data %s.\", content...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Updates the DECK_EMPTY_STATUS_PIN to the new_status value
def update_deck_empty_status(self, new_status): self.__update_status(self.DECK_EMPTY_STATUS_PIN, new_status)
[ "def __update_status(self, pin, new_status):\n self.pi.write(pin, new_status)", "def update_flash_status(self, new_status):\n self.__update_status(self.FLASH_STATUS_PIN, new_status)", "def update_clear_mode_status(self, new_status):\n self.__update_status(self.CLEAR_MODE_STATUS_PIN, new_sta...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Updates the FLASH_ERROR_STATUS_PIN to the new_status value
def update_flash_error_status(self, new_status): self.__update_status(self.FLASH_ERROR_STATUS_PIN, new_status)
[ "def update_flash_status(self, new_status):\n self.__update_status(self.FLASH_STATUS_PIN, new_status)", "def __update_status(self, pin, new_status):\n self.pi.write(pin, new_status)", "def ppu_status_changed(self, is_error, text):\n self.emit('ppuStatusChanged', (is_error, text))", "def u...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Applies overrides to a benchmark.
def _apply_overrides( benchmark, n_runs: Optional[int] = None, only_problems: str = "", exclude_problems: str = "$", only_algorithms: str = "", exclude_algorithms: str = "$", output_dir: Optional[Path] = None, ) -> None: if n_runs is not None: benchmark._n_runs = n_runs l...
[ "def _UpdateBenchmarkSpecWithFlags(benchmark_spec):\n benchmark_spec.max_sentences = FLAGS.robertammlm_max_sentences\n benchmark_spec.nproc_per_node = FLAGS.robertammlm_nproc_per_node\n benchmark_spec.log_interval = FLAGS.robertammlm_log_interval\n benchmark_spec.profiler = FLAGS.robertammlm_profiler\n benchma...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Filters the items of a dictionary based on a include / exclude regexp pair. Returns `True` if the size of the dictionary changed.
def _include_exclude( dictionary: dict, include_pattern: str, exclude_pattern: str, ) -> bool: incl, excl = re.compile(include_pattern), re.compile(exclude_pattern) keys = list(dictionary.keys()) for k in keys: if excl.match(k) or not incl.match(k): del dictionary[k] retu...
[ "def match_filters(self, sub_entry: dict) -> bool:\n for attribute, keep in self.map_filter.items():\n if attribute in sub_entry.keys():\n if not keep(sub_entry[attribute]):\n return False\n return True", "def filter_dic(bites=bites, bites_done=exclude_bi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
From the XML file containing the list of FFParam objects and the forcefield template file, load these items
def loadFFtpl(fftpl_file): from xml.etree.ElementTree import parse root = parse(fftpl_file).getroot() params=[] for ff_param in root.find('FFParams'): x=FFParam() params.append(x.fromElementTreeElement(ff_param)) template=root.find('FFTemplate').text return params,template
[ "def extractFields(self, dxlFileContent):\n \n extractedFields = []\n fields = dxlFileContent.getElementsByTagName(\"field\")\n \n for field in fields:\n dico = {}\n settings = {}\n dico['type'] = 'PlominoField'\n dico['id'], dico['title...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function that return the probability vector
def prob_vector(self) -> list: return self.__ps__
[ "def to_vector(self):\n self.prob[self.prob == 0] += 1e-5\n self.prob[self.prob == 1] -= 1e-5\n self.prob /= self.prob.sum()\n\n return np.array([\n *np.log(self.prob / (1 - self.prob)),\n *self.means.ravel(),\n *np.ravel([c[np.triu_indices(self.n_dim)] f...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate n events of a discrete distribution using a loaded coin
def gen_loaded_coin(distribution: DiscreteDistribution, n: int): result = [] for i in range(0, n): tot = 0 ind = 0 for p in distribution.prob_vector(): v = r.random() if v <= p / (1 - tot): result.append(ind) break tot +...
[ "def gen_roulette(distribution: DiscreteDistribution, n: int):\n # compute the accumulated probability of each event\n prob_cum = []\n tot = 0\n for p in distribution.prob_vector():\n prob_cum.append(tot)\n tot += p\n # generate the n events\n result = []\n for i in range(0, n):\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate n events of a discrete distribution using a loaded coin
def gen_roulette(distribution: DiscreteDistribution, n: int): # compute the accumulated probability of each event prob_cum = [] tot = 0 for p in distribution.prob_vector(): prob_cum.append(tot) tot += p # generate the n events result = [] for i in range(0, n): v = r.r...
[ "def gen_loaded_coin(distribution: DiscreteDistribution, n: int):\n result = []\n for i in range(0, n):\n tot = 0\n ind = 0\n for p in distribution.prob_vector():\n v = r.random()\n if v <= p / (1 - tot):\n result.append(ind)\n break\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Field's name as defined in FIT global profile. If name was not found in global profile, a string is created with the
def name(self): return self.field.name if self.field else 'unknown_%d' % self.def_num
[ "def name(field: BaseField) -> str:\n return field.NAME", "def field_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"field_name\")", "def _get_field_name(self):\n return self.field_name", "def user_name_field(self) -> pulumi.Input[str]:\n return pulumi.get(self, ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Field's name as defined in FIT global profile. If name was not found in global profile, ``self.def_num`` is returned (`int`). This value is compatible with `is_named`.
def name_or_num(self): return self.field.name if self.field else self.def_num
[ "def name(self):\n return self.field.name if self.field else 'unknown_%d' % self.def_num", "def _get_field_name(self):\n return self.field_name", "def field_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"field_name\")", "def name(field: BaseField) -> str:\n retu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Field's definition number (`int`)
def def_num(self): # prefer to return the def_num on the field since field_def may be None # if this field is dynamic return self.field.def_num if self.field else self.field_def.def_num
[ "def name_or_num(self):\n return self.field.name if self.field else self.def_num", "def name(self):\n return self.field.name if self.field else 'unknown_%d' % self.def_num", "def getNumFields(self) -> \"int\":\n return _coin.SoFieldData_getNumFields(self)", "def get_num_of_plain_fields(se...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Flag to indicate whether this field has been generated through expansion
def is_expanded(self): return not self.field_def
[ "def _needs_expansion(value):\n return Config.RE_HAS_VAR_REF.match(value) is not None", "def is_expansion(self):\n return is_set_bit(self.char_status, 5)", "def is_well_generated(self):\n return True", "def is_temp_field(self: Fdef) -> bool:\n self._resolve_if_needed()\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if this field has the specified name (`str`) or definition number (`int`)
def is_named(self, name_or_num): if self.field: if name_or_num in (self.field.def_num, self.field.name): return True if self.parent_field: if name_or_num in (self.parent_field.def_num, self.parent_field.name): return True if self.field_de...
[ "def name_or_num(self):\n return self.field.name if self.field else self.def_num", "def check_name_field(self):\n num_pattern = re.compile(r'\\d', re.I | re.M)\n white_space = re.compile(r'^\\s')\n if num_pattern.search(self.ar_first_name):\n raise exceptions.ValidationError...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Prepare output folder to receive images and bounding box data.
def _prepare_output_path(self): self._image_dir = os.path.join(self._output_dir, 'images') self._annotation_dir = os.path.join(self._output_dir, 'annotations') self._resized_dir = os.path.join(self._output_dir, 'resized') if not os.path.exists(self._output_dir): os.makedirs...
[ "def prepareOutput():\r\n\r\n os.removedirs(\"output\")\r\n os.mkdir(\"output\")", "def prepare_visualization_directory():\n src = html_source_path\n dst = os.path.abspath(os.path.expanduser(\"~/.netwulf/\"))\n\n # always copy source files to the subdirectory\n copy_tree(src, dst)", "def prepr...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Copy annotation xml and trainval files and labelmap.pbtxt to deep_detection.
def _copy_annotation_to_deep_detection(self): label_map_src = os.path.join(self._output_dir, 'label_map.pbtxt') label_map_dest = os.path.join(self._detection_annotation_dir, 'label_map.pbtxt') train_val_src = os.path.join(self._output_dir, 'trainval.txt') train_val_dest = os.path.join(s...
[ "def setup_annotations(self):\n sbd_path = self.sbd_path\n target_path = pjoin(self.root, \"SegmentationClass/pre_encoded\")\n if not os.path.exists(target_path):\n os.makedirs(target_path)\n #print ('sbd_path = ', sbd_path)\n path = pjoin(sbd_path, \"dataset/train.txt\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Copy resized images to deep_detection.
def _copy_resized_images_to_deep_detection(self): resized_image_files = glob(os.path.join(self._resized_dir, '*.jpg')) resized_image_files_dest = os.path.join(self._detection_dir, 'images') if os.path.exists(resized_image_files_dest): filelist = glob(os.path.join(os.path.join(resiz...
[ "def crop_and_resize(imgs, shape=(32, 16, 3)):\n height, width, channels = shape\n imgs_resized = np.empty([len(imgs), height, width, channels])\n for i, img in enumerate(imgs):\n cropped = img[55:135, :, :]\n imgs_resized[i] = imresize(cropped, shape)\n #imgs_resized[i] = cv2.resize(i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes one or more features and calculates the centroid using the mean of all vertices. This lessens the effect of small islands and artifacts when calculating the centroid of a set of polygons.
def centroid(features, options=None): if not options: options = {} coords = get_coords_from_features(features) if get_input_dimensions(coords) == 1: coords = [coords] x_sum = 0 y_sum = 0 length = 0 x_sum, y_sum, length = reduce(reduce_coords, coords, [x_sum, y_sum, lengt...
[ "def centroid(points):\n return np.mean(points, axis=0)", "def calc_centroid(self, points):", "def centroid(vertices):\n return (vertices[0] + vertices[1] + vertices[2]) / 3", "def compute_centroid(self):\n u_m = self.__u ** self.__m\n\n sum_data_weights = np.dot(u_m, self.__obs)\n if self.__ob...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Acquires a new WSS URL using rtm.connect API method
def issue_new_wss_url(self) -> str: try: api_response = self.web_client.rtm_connect() return api_response["url"] except SlackApiError as e: if e.response["error"] == "ratelimited": delay = int(e.response.headers.get("Retry-After", "30")) # Tier1 ...
[ "def getRGWConnection():\n\n connection = RGWAdmin(\n access_key = parser.get('ceph-admin', 'access_key'),\n secret_key = parser.get('ceph-admin', 'secret_key'),\n server = parser.get('ceph-admin', 'server'), \n ) \n\n return connection", "def connect_via_lightstream(self):\n print(\"Starting c...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Disconnects the current session.
def disconnect(self): self.current_session.disconnect()
[ "def disconnect():\n\n if login_session['provider'] == 'google':\n return gdisconnect()", "def at_disconnect(self):\r\n if self.logged_in:\r\n sessid = self.sessid\r\n player = self.player\r\n _GA(player.dbobj, \"unpuppet_object\")(sessid)\r\n uaccount ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a new ir.Set from another ir.Set. The new Set inherits source Set's scope, schema item, expression, and, if preserve_scope_ns is set, path_id. If preserve_scope_ns is False, the new Set's path_id will be namespaced with the currently active scope namespace.
def new_set_from_set( ir_set: irast.Set, *, preserve_scope_ns: bool=False, path_id: Optional[irast.PathId]=None, stype: Optional[s_types.Type]=None, rptr: Optional[irast.Pointer]=None, ctx: context.ContextLevel) -> irast.Set: if path_id is None: path_id = ir_s...
[ "def new_set_from_set(\n ir_set: irast.Set, *,\n preserve_scope_ns: bool=False,\n ctx: context.ContextLevel) -> irast.Set:\n path_id = ir_set.path_id\n if not preserve_scope_ns:\n path_id = path_id.merge_namespace(ctx.path_id_namespace)\n result = new_set(\n path_id=path_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return an interesection of source_set with type stype.
def type_intersection_set( source_set: irast.Set, stype: s_types.Type, *, optional: bool, ctx: context.ContextLevel, ) -> irast.Set: arg_type = get_set_type(source_set, ctx=ctx) result = schemactx.apply_intersection(arg_type, stype, ctx=ctx) if result.stype is arg_type: return ...
[ "def get_sources_by_type(self, source_type):\r\n\t\tif not source_type:\r\n\t\t\treturn self.sources\r\n\t\telse:\r\n\t\t\tmeth_name = \"get_%s_sources\" % source_type\r\n\t\t\treturn getattr(self, meth_name)()", "def interset(genotypes):\n\tsnplist = map(lambda x: getsnps(x), genotypes)\n\tprint len(snplist)\n\t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return ir.Set for a pointer defined as a computable.
def computable_ptr_set( rptr: irast.Pointer, *, unnest_fence: bool=False, hoist_iterators: bool=False, same_computable_scope: bool=False, ctx: context.ContextLevel, ) -> irast.Set: ptrcls = typegen.ptrcls_from_ptrref(rptr.ptrref, ctx=ctx) source_set = rptr.source source_scls = get_se...
[ "def computable_ptr_set(\n rptr: irast.Pointer, *,\n unnest_fence: bool=False,\n ctx: context.ContextLevel) -> irast.Set:\n ptrcls = rptr.ptrcls\n\n # Must use an entirely separate context, as the computable\n # expression is totally independent from the surrounding query.\n subctx ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Helper to see if the string contains any python formatting. >>> tests = [ ... u"This does not have any python formating", ... u"%(This)s however does.", ... u"%d also has", ... u"%s also has", ... u"%s and %s and %s and %s and %s and %s and, also have", ... u"%(hex)E also has" ... ]
def check_python_format(key): # Lets try for one string try: m = key % 'a' return True except TypeError: pass # Lets try for a single float try: m = key % 1.0 return True except TypeError: pass # The above failed, let's see if there we're several python format...
[ "def contains_any_py_chars(input_str):\n # return any(c in PYTHON for c in list(input_str.lower()))\n return re.search(r'[python]', input_str.lower()) # good example of search()", "def contains_any_py_chars(input_str):\n #PY = 'PYTHON'\n count = [letter for letter in input_str.upper() if letter in 'P...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initializes the Prometheus manager with configuration from the Prometheus yml file
def __init__(self, namespace, config): with open("prometheus.yml") as f: prometheus_yml = list(safe_load_all(f)) self.redis_metrics = prometheus_yml[0] self.bps_metrics = prometheus_yml[1] self.__config = config self.__namespace = namespace self.__cus...
[ "def setup(self) -> None:\n # read config from container, use service name as default prefix\n service_name = self.container.service_name\n config = self.container.config.get(\"PROMETHEUS\", {})\n service_config = config.get(service_name, {})\n prefix = service_config.get(\"prefix...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Processing of automatic sun scans for monitoring purposes of the radar system.
def process_sunscan(procstatus, dscfg, radar_list=None): if procstatus != 1: return None, None pwrh_field = None pwrv_field = None zdr_field = None sun_hit_method = dscfg.get('sun_hit_method', 'PSR') sun_position = dscfg.get('sun_position', 'MF') n_noise_bins = dscfg.get('n_noise_...
[ "def _fillscan(scan, radar, index=0):\n\n startray = radar.sweep_start_ray_index['data'][index]\n stopray = radar.sweep_end_ray_index['data'][index]\n sweep_times = radar.time['data'][startray:stopray+1]\n\n # Dataset-specific 'where'\n scan.elangle = radar.elevation[\"data\"][startray] * dr\n sca...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the Dest IP from the RX pcap file
def get_pcap_info(file_prefix): exec_dir = BuiltIn().get_variable_value("${EXECDIR}") rx_pcapfile = '{0}/{1}/{2}_rx.pcap' \ .format(exec_dir, con.TLDK_TESTCONFIG, file_prefix) packets = rdpcap(rx_pcapfile) count = len(packets) ### the first packet pkt = pack...
[ "def srcip(self) :\n\t\ttry :\n\t\t\treturn self._srcip\n\t\texcept Exception as e:\n\t\t\traise e", "def getFirstSourceIPAddress(pkts):\n for pkt in pkts:\n for layer in findLayers(pkt):\n if layer == 'TCP':\n return pkt[IP].src", "def _get_dest(self, instruction):\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Execute the udpfwd on the dut_node.
def exec_the_udpfwd_test(dut_node, dut_if, file_prefix, \ dest_ip, is_ipv4=True): pci_address = Topology.get_interface_pci_addr(dut_node, dut_if) ssh = SSH() ssh.connect(dut_node) if is_ipv4: cmd = 'cd {0}/{4} && ./run_tldk.sh {0}/{5}/{2}_rx.pcap ' \ ...
[ "def forward(local_port, pid):\n return _adb_command(\"forward tcp:{} jdwp:{}\".format(local_port, pid))", "def udp_client(client, vs_name, **kwargs):\n addr_type = kwargs.get('addr_type', 'V4')\n vip_id = kwargs.get('vip_id', 0)\n\n port = kwargs.get('port', 8000)\n data = kwargs.get('data', 512)\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
After execute the udpfwd cmd, use this to get the test result.
def get_the_test_result(dut_node, file_prefix): ssh = SSH() ssh.connect(dut_node) cmd = 'cd {0}; sudo /usr/sbin/tcpdump -nnnn -vvv -r ./{2}/{1}_tx.pcap' \ ' | grep \'udp sum ok\' | wc -l' \ .format(con.REMOTE_FW_DIR, file_prefix, con.TLDK_TESTCONFIG) (ret_code,...
[ "def test_udp_traceroute(self):\n def finished(packets, port):\n log.debug(\"Finished running UDP traceroute test on port %s\" % port)\n answered, unanswered = packets\n self.report['hops_' + str(port)] = []\n for snd, rcv in answered:\n report = {'t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
convert string to fileIO
def s_to_fio(s: str) -> fileIO: fio = IO.BytesIO(s.encode('utf8')) fio.seek(0) return fio
[ "def get_file_obj(path: str) -> TextIOWrapper:\n return open(path, 'r')", "def test_convert_file_type(self):\n\n data = \"\"\"id,name,surname\n1,Adam,Kowalski\n2,Seth,McFarlane\"\"\"\n\n with patch('builtins.open', mock_open(read_data=data)):\n result = zadanie.convert_file('foo')\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
convert bytes to fileIO
def b_to_fio(b: bytes) -> fileIO: fio = IO.BytesIO(b) fio.seek(0) return fio
[ "def s_to_fio(s: str) -> fileIO:\n fio = IO.BytesIO(s.encode('utf8'))\n fio.seek(0)\n return fio", "def b64s_to_fio(b64s: str) -> fileIO:\n fio = IO.BytesIO(base64.b64decode(b64s.encode('utf8')))\n fio.seek(0)\n return fio", "def fio_to_b(fio: fileIO) -> bytes:\n fio.seek(0)\n b = fio.re...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
convert base 64 strting to fileIO
def b64s_to_fio(b64s: str) -> fileIO: fio = IO.BytesIO(base64.b64decode(b64s.encode('utf8'))) fio.seek(0) return fio
[ "def fio_to_b64s(fio: fileIO) -> str:\n fio.seek(0)\n b64s = base64.b64encode(fio.read()).decode('utf8')\n fio.seek(0)\n return b64s", "def convertToBase64(raw_bytes):\n return raw_bytes.encode('base64')", "def _encode_file_base64_(self, file_path):\n encoded_file = base64.b64encode(open(f...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
convert fileIO to bytes
def fio_to_b(fio: fileIO) -> bytes: fio.seek(0) b = fio.read() fio.seek(0) return b
[ "def read_file_as_bytes(path: str) -> bytes:\n with open(path, \"rb\") as f:\n return f.read()", "def bytes(self):\n assert self.hasfilename(), \"Invalid filename\"\n with open(self.filename(), 'rb') as f:\n data = io.BytesIO(f.read())\n return str(data.read()).encode('UT...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
convert fileIO to string
def fio_to_s(fio: fileIO) -> str: fio.seek(0) s = fio.read().decode('utf8') fio.seek(0) return s
[ "def next_bytes_as_str(file: BinaryIO) -> str:\n return next_bytes_as_binary(file).decode()", "def binaryFileToString(filename):\n with open(filename, 'rb') as source:\n fileContent = source.read()\n return fileContent", "def file_to_string(file_name):\n with open(file_name, 'r') as f:\n tex...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
convert fileIO to base 64 string
def fio_to_b64s(fio: fileIO) -> str: fio.seek(0) b64s = base64.b64encode(fio.read()).decode('utf8') fio.seek(0) return b64s
[ "def _encode_file_base64_(self, file_path):\n encoded_file = base64.b64encode(open(file_path, 'rb').read())\n return self._base64_to_str(encoded_file)\n # return str(encoded_file)[2:-1]", "def b64s_to_fio(b64s: str) -> fileIO:\n fio = IO.BytesIO(base64.b64decode(b64s.encode('utf8')))\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
create a zip fileIO from filenames and datas this function creates a fileIO with a zip file containing all the input files specified by their filenames and their contents as base 64 strings.
def create_zip_fio(names_s: list, datas_b64s: list) -> fileIO: # Create empty bytesIO out_fio = IO.BytesIO() # Open it as a zip with ZipFile(out_fio, 'w') as f: # Write each data to a file called name for name, data in zip(names_s, datas_b64s): f.writestr(name, b64s_to_b(data...
[ "def create_zip_file(files):\n\n with tempfile.NamedTemporaryFile() as temp_file:\n with zipfile.ZipFile(temp_file.name, \"w\") as zip_file:\n for filename, contents in files.items():\n zip_file.writestr(filename, contents)\n yield temp_file.name", "def zip_response(self...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
generator that yields files from zip
def files_from_zip(zip_fio: fileIO): # Open zip file to read with ZipFile(zip_fio, 'r') as f: # Extract list of fullpath filenames names = f.namelist() for name in names: # Extract name and extension nameext = nameext_from_path(name) # If it's not a di...
[ "def test_iter_files(self):\n handle = self.make_archive(io.BytesIO())\n\n self.assertEqual(\n sorted(self.iter_files(handle)),\n sorted(self.source_fs.walk.files())\n )", "def open_zip(path_or_file, *args, **kwargs):\n with closing(zipfile.ZipFile(path_or_file, *args, ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
check if fileIO is a zip
def is_zip(zip_fio: fileIO) -> bool: try: ZipFile(zip_fio, 'r') return True except: return False
[ "def bytes_are_zip_file(content: bytes):\n return zipfile.is_zipfile(io.BytesIO(content))", "def is_zip(self, document):\n fileName, fileExtension = os.path.splitext(document)\n if fileExtension == \".zip\":\n return True\n return False", "def is_zip_file(self):\n retur...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
extract name and extension from path
def nameext_from_path(path: str) -> str: nameext = os.path.split(path)[-1] return nameext
[ "def path_ext(path: str) -> str:\n return os.path.splitext(path)[1]", "def file_ext(path):\n\tpath = DataItem(path).rr('\\?.*?$').split('.')\n\text = path[-1].lower() if len(path)>1 else None\n\n\treturn ext", "def splitext(path):\r\n i = 0\r\n n = -1\r\n for c in path:\r\n if c == '.': n = ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Makes sure the passed dataframes are converted to numpy arrays if they are not already. Also checks to make passed df's are either ndarrays or pandas DataFrames and if they aren't raises an exception
def ensure_numpy(self, *args): npdarrays = [] for arg in args: if not isinstance(arg,(pandas.core.frame.DataFrame, np.ndarray)): raise Exception("Wrong type", type(arg)) elif isinstance(arg,pandas.core.frame.DataFrame): npdarrays.append(arg.t...
[ "def _validateDataFrame(self, df):\n #if the df is a standard DataFrame\n if type(df) == pd.DataFrame:\n self._logger.info('Using regular dataframe')\n\n if df.empty:\n self._logger.error('Empty dataframe')\n raise EmptyDataError('DataFrame is empty'...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
finds which features to removed based on the low and high cutoff points
def fit(self, x, y): mutual_info = self.MI(x, y) #print(mutual_info) self.features_to_remove = [int(attr) for attr in mutual_info if mutual_info[attr] <= self.low or mutual_info[attr] >= self.high] #print(self.features_to_remove)
[ "def _get_current_features_to_remove(self, shap_importance_df, columns_to_keep=None):\n\n # Bounding the variable.\n num_features_to_remove = 0\n\n # If columns_to_keep is not None, exclude those columns and\n # calculate features to remove.\n if columns_to_keep is not None:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Removes the selected features and returns the resulting array
def transform(self, x): x_np = self.ensure_numpy(x) return np.delete(x_np, self.features_to_remove, axis=1)
[ "def remove_features(X: np.ndarray):\n print('Removing features...')\n all_feature_names = read_feature_names()\n alives_features = read_useful_feature_names()\n print('all_features: {}'.format(len(all_feature_names)))\n print('good_features: {}'.format(len(alives_features)))\n \"\"\" Validity che...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The default configuration for sessions that run on this server.
def default_session_config(self) -> tensorflow.core.protobuf.config_pb2.ConfigProto:
[ "def get_default_config(self):\n config = super(UsersCollector, self).get_default_config()\n config.update({\n 'path': 'users',\n 'utmp': None,\n })\n return config", "def set_defaults(self):\n self._config[\"DEFAULT\"] = Config.Default\n\n i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Device filters for remote tasks in the cluster.
def cluster_device_filters(self) -> tensorflow.core.protobuf.device_filters_pb2.ClusterDeviceFilters:
[ "def FilterDevices(devices, cluster_prefix):\n return [d for d in devices if d.physical_cluster.startswith(cluster_prefix)]", "def filter_task(self, task, feature, date_ranges):\n all_source_times = list()\n if self.filter_product is not None and self.filter_product != {}:\n for sr in ta...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
(array) > int Takes in an array of integers and returns the int that appears an odd number of times. There will always be _only one_ integer that appears an odd number of times. We use .pop to pop the last value out of the set. We can also use `next(iter(set(list)))` to extract the value.
def find_it(n): return set([i for i in n if n.count(i) % 2]).pop()
[ "def find_odd_occurring(alist):\r\n\tans=0\r\n\tfor element in alist:\r\n\t\tans^= element\r\n\treturn ans", "def odd_int1(list1):\n\tcount_elements = {i: list1.count(i) for i in list1}\n\t\n\tfor i in count_elements:\n\t\tif count_elements[i] % 2 == 0:\n\t\t\treturn i", "def odd_int3(list1):\n\t\n\twhile len(l...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create bins from min_gc value to max_gc value in increments of 0.05 (for GC content table)
def get_bins(tbl): logging.info('GC table generation') logging.info(tbl) min_gc = np.min(tbl['gc']) max_gc = np.max(tbl['gc']) start = round(min_gc - np.mod(min_gc, 0.05), 2) stop = round(max_gc + 0.1 - np.mod(max_gc, 0.05), 2) all_bins = np.arange(start, stop, step=0.05) return all_bin...
[ "def gen_binc_binl(min_value, max_value, bin_width):\n \n bin_list = np.arange(min_value, max_value, bin_width)\n \n bin_centres = bin_list[:-1] + bin_width\n \n return bin_list, bin_centres", "def _create_bins(self):\n min_conf = self.data[self.conf].min()\n max_conf = self.data[s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates table of collapsed coverage per interval
def get_coverage_per_interval(tbl): # Coverage per interval Graph comes from unfiltered Bam, Pool A Targets unfiltered_boolv = (tbl['method'] == UNFILTERED_COLLAPSING_METHOD) # Filter out MSI & Fingerprinting intervals exon_boolv = ['exon' in y for y in tbl[WALTZ_INTERVAL_NAME_COLUMN]] relevant_cov...
[ "def get_coverage_per_interval_exon_level(tbl):\n total_boolv = (tbl['method'] == DUPLEX_COLLAPSING_METHOD)\n final_tbl = tbl[total_boolv]\n return final_tbl", "def _format_cov_table(coverage_data: Dict[str, Any]) -> List[str]:\n col_key_map = {\n 'Statements': 'num_statements',\n 'Missi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
ExonLevel Coverage per Interval Graph comes from Duplex Bam, Pool A Targets
def get_coverage_per_interval_exon_level(tbl): total_boolv = (tbl['method'] == DUPLEX_COLLAPSING_METHOD) final_tbl = tbl[total_boolv] return final_tbl
[ "def get_coverage_per_interval(tbl):\n # Coverage per interval Graph comes from unfiltered Bam, Pool A Targets\n unfiltered_boolv = (tbl['method'] == UNFILTERED_COLLAPSING_METHOD)\n\n # Filter out MSI & Fingerprinting intervals\n exon_boolv = ['exon' in y for y in tbl[WALTZ_INTERVAL_NAME_COLUMN]]\n r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Copy the fragmentsizes.txt files from the Waltz output folders, and create a combined table for all bam types Fragment Sizes graph comes from Unfiltered Bam, Pool A Targets
def copy_fragment_sizes_files(args): fragment_sizes_files = [ (args.standard_waltz_pool_a, 'Standard_A'), (args.unfiltered_waltz_pool_a, 'Unfiltered_A'), (args.simplex_waltz_pool_a, 'Simplex_A'), (args.duplex_waltz_pool_a, 'Duplex_A'), (args.standard_waltz_po...
[ "def calculate_sizes(filename):\n\n global ram_size\n global xip_rom_size\n global xip_ram_size\n\n objdump_command = \"objdump -h \" + filename\n objdump_output = subprocess.check_output(objdump_command,\n shell=True).splitlines()\n\n for line in objdum...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
DMPspecific format for coverage_per_interval_table file
def reformat_exon_targets_coverage_file(coverage_per_interval_table): for method in coverage_per_interval_table[METHOD_COLUMN].unique(): subset = coverage_per_interval_table[coverage_per_interval_table['method'] == method] subset = subset.pivot('interval_name', SAMPLE_ID_COLUMN, 'peak_coverage') ...
[ "def reformat_coverage_files(coverage_table):\n coverage_table_A_targets = coverage_table[coverage_table['pool'] == POOL_A_LABEL]\n coverage_table_B_targets = coverage_table[coverage_table['pool'] == POOL_B_LABEL]\n coverage_table_A_targets = coverage_table_A_targets.pivot(SAMPLE_ID_COLUMN, 'method', 'aver...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Output coverage files in DMPspecific DB format for upload
def reformat_coverage_files(coverage_table): coverage_table_A_targets = coverage_table[coverage_table['pool'] == POOL_A_LABEL] coverage_table_B_targets = coverage_table[coverage_table['pool'] == POOL_B_LABEL] coverage_table_A_targets = coverage_table_A_targets.pivot(SAMPLE_ID_COLUMN, 'method', 'average_cove...
[ "def coverage_report():\n sh(\"coverage combine\")\n sh(\"coverage report\")\n sh(\"coverage html\")\n info(\"WRITTEN TO: build/coverage.html/\")\n # -- DISABLED: sh(\"coverage xml\")", "def coverage_wrapper(dset_id, filtered_reads, utrfile_path):\n\n ## XXX This is a sin: I'm introducing a once...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reades the file with the share ids and returs them as a list of strings.
def _read_share_keys(): print("Read indexed shares") with open(args.sharefile) as f: keys = f.readlines() # Filter out commented lines and look for #END tag # to skip the following shares keys = [k.replace('\n', '') for k in keys] if '#END' in keys: idx = keys.index('#END') ...
[ "def list_file_share_internals(self, file_share):\n files_list = list()\n generator = self.file_srv.list_directories_and_files(file_share)\n for file_or_dir in generator:\n files_list.append(file_or_dir.name)\n return files_list", "def list_file_shares(self):\n file_s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Extracts the currency out of the description of a share
def _extract_currency(description): try: pattern = '<br>Currency: ' start_idx = description.index(pattern)+len(pattern) return description[start_idx : start_idx+3] #The currency string has always langht 3 except: return "unkown"
[ "def get_share_price(ticker=''):\n try:\n earnings_url = 'https://finance.yahoo.com/q/ks?s=' + ticker.lower() + '+Key+Statistics'\n request = requests.get(earnings_url, timeout=timeout)\n soup = bs4.BeautifulSoup(request.text, 'html.parser')\n # TODO replace magic string with reasonab...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Extracts the isin out of the description of a share
def _extract_isin(description): try: pattern = '<br>ISIN: ' start_idx = description.index(pattern)+len(pattern) end_idx = description[start_idx:].index('<')+start_idx return description[start_idx : end_idx] except: return "unkown"
[ "def naive_extract_shares(share):\n return share[0][1], share[1][1]", "def issn(self):\n return self._entry.get('prism:issn')", "def extract_issn(self) -> ISSN:\n return ISSN(self._id[3:10])", "def _getSubtitleNumber(entry):\n return entry['SN']", "def __get_instrument_details(self):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Inserts the data in the temporary share table.
def _insert_in_tmp_share_table(data): description = data['description'] values = ( data['database_code'] + "/" + data['dataset_code'], data['name'], _extract_isin(description), "Quandl", _extract_currency(description), description, data['oldest_available_d...
[ "def _move_temp_data_to_data(key):\n sql=\"\"\"\n SELECT\n %s as key,\n c.day as day,\n c.date as date,\n tmp.value as value\n\tFROM\n stocksearch.\"Calendar\" as c\n\tLEFT OUTER JOIN\n\t\t(select * from \"TmpShareData\" where key=%s) as tmp\n\t ON tmp.date = c.date\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fills the calendar table
def _fill_calendar_table(): sql=""" /*Create calendar*/ INSERT INTO stocksearch."Calendar" SELECT row_number() OVER (ORDER BY date) as day, date FROM "TmpShareData" GROUP BY date HAVING count(key) > 0 ORDER...
[ "def fill_calendar_fields(self, calendar_data):\n s = self.selenium\n for field in calendar_data:\n # click on calendar button\n xpath = self.elements['pages']['listing']['datepicker'] % {\n \"field\" : field\n }\n s.click(xpath)\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Moves the data from the temporary share data table to the real share data table. Here we do also the gap filling.
def _move_temp_data_to_data(key): sql=""" SELECT %s as key, c.day as day, c.date as date, tmp.value as value FROM stocksearch."Calendar" as c LEFT OUTER JOIN (select * from "TmpShareData" where key=%s) as tmp ON tmp.date = c.date ORDER BY c.day; "...
[ "def unbind_data(self):\n self.trajectory_df = pd.DataFrame()", "def pop(self):\n self.df = self.df[1:]", "def data_reset(self):\n # ic()\n self.arches.clear()\n self.arch_ids.clear()\n self.data_1d.clear()\n self.data_2d.clear()\n self.new_scan = True", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates the temporary tables to save the downloaded data
def _create_tmp_tables(): sql = """ CREATE TEMPORARY TABLE "TmpShare" ( key character varying(50) COLLATE pg_catalog."default" NOT NULL, name character varying(50) COLLATE pg_catalog."default", isin character varying(50) COLLATE pg_catalog."default", "dataSource" character v...
[ "def create_table(self):\n table_path = os.path.join(self.opts[\"data_dir\"], self.table_name())\n self.output_file = open_fw(table_path, encoding=self.encoding)\n self.output_file.write(u'<?xml version=\"1.0\" encoding=\"UTF-8\"?>')\n self.output_file.write(u'\\n<root>')\n self.t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialize the LDProofVCDetail instance.
def __init__( self, credential: Optional[Union[dict, VerifiableCredential]], options: Optional[Union[dict, LDProofVCDetailOptions]], ) -> None: self.credential = credential self.options = options
[ "def __init__(self, cb, model_unique_id, os_product_id=None, initial_data=None):\n super(Vulnerability, self).__init__(cb, model_unique_id, initial_data)\n\n if model_unique_id is not None and initial_data is None:\n # os_product_id required if CVE occurs in more than one OS/Product\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Writes the surface interaction for the ccx solver.
def ccx(self): res = ["*SURFACE INTERACTION,NAME=%s" % self.name] res.append("*SURFACE BEHAVIOR,PRESSURE-OVERCLOSURE=%s" % self.int_type) if self.int_type == 'EXPONENTIAL': res.append("%f,%f" % (self.c0, self.p0)) elif self.int_type == 'LINEAR': res.append("%e" % ...
[ "def output(self, fp: 'FILE *') -> \"void\":\n return _coin.SbVec3d_output(self, fp)", "def writeSurfaceTecplot(self, fileName, surfaceName=\"default\", fromDVGeo=None):\n\n p0, p1, p2 = self._getSurfaceVertices(surfaceName, fromDVGeo)\n\n f = open(fileName, \"w\")\n f.write('TITLE = \...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Writes the contact pair for the ccx solver.
def ccx(self): res = [] end = '' int_name = self.surf_int.name m_name = self.master_comp.name s_name = self.slave_comp.name if self.surf_to_surf: end = ',TYPE=SURFACE TO SURFACE' line = "*CONTACT PAIR,INTERACTION=%s%s" % (int_name, end) res.app...
[ "def Save_Contacts(self):\n text_file = open(\"contactbook.txt\",\"w\")\n for i in range(self.Get_ContactList_Length()):\n if i == self.Get_ContactList_Length(): # (If this is the last entry in the list, dont include a \\n at the end)\n text_file.write(self.__contactList[i].G...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns context variables required by apps that use Django's authentication system. If there is no 'user' attribute in the request, uses AnonymousUser (from django.contrib.auth).
def auth(request): if hasattr(request, 'user'): user = request.user else: user = AnonymousUser() return { 'user': user, 'perms': PermWrapper(user), }
[ "def auth(request):\n import warnings\n warnings.warn(\n \"The context processor at `django.core.context_processors.auth` is \" \\\n \"deprecated; use the path `django.contrib.auth.context_processors.auth` \" \\\n \"instead.\",\n DeprecationWarning\n )\n #from django.contrib....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the min and max range of the provided array that excludes outliers following the IQR rule. This function computes the interquartilerange (IQR), defined by Q3Q1, i.e. the percentiles for 75% and 25% of the destribution. The region without outliers is defined by [Q11.5IQR, Q3+1.5IQR].
def auto_range_iqr(data_array: np.ndarray, cutoff_percentile: Union[ Tuple[Number, Number], Number]=DEFAULT_PERCENTILE ) -> Tuple[float, float]: if isinstance(cutoff_percentile, tuple): t = cutoff_percentile[0] b = cutoff_percentile[1] ...
[ "def remove_outlier_IQR(df, q1=0.25, q3=0.75):\r\n Q1 = df.quantile(q1)\r\n Q3 = df.quantile(q3)\r\n IQR = Q3 - Q1\r\n df_final = df[~((df < (Q1 - 1.5 * IQR)) | (df > (Q3 + 1.5 * IQR)))]\r\n return df_final", "def generate_outlier_bounds_iqr(df, column, multiplier=1.5):\n q1 = df[column].quantil...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Workaround for a missing setter for the extend property of a matplotlib colorbar. The colorbar object in matplotlib has no setter method and setting the colorbar extend does not take any effect. Calling a subsequent update will cause a runtime error because of the internal implementation of the rendering of the colorba...
def _set_colorbar_extend(colorbar: matplotlib.pyplot.colorbar, extend: str): colorbar.extend = extend colorbar._inside = colorbar._slice_dict[extend]
[ "def refresh_colorbar(self, cb_min, cb_max, width = None, height = None, xMin = None, yMin = None):\n\n if width is None:\n width = self.width\n else:\n self.width = width\n\n# FIXME: Until now, if you want to refresh the colorbar, a new QPainter\n# object has ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Applies limits to colorscale and updates extend. This function applies the limits `new_lim` to the heatmap plot associated with the provided `colorbar`, updates the colorbar limits, and also adds the colorbar clipping indicators in form of small triangles on the top and bottom of the colorbar, according to where the li...
def apply_color_scale_limits(colorbar: matplotlib.pyplot.colorbar, new_lim: Tuple[Optional[float], Optional[float]], data_lim: Optional[Tuple[float, float]]=None, data_array: Optional[np.ndarray]=None, co...
[ "def clim(fig=1, subplot=0, vmin=-1, vmax=1):\n #bewhere colorbars with get_axes method\n ax = plt.figure(fig).axes[subplot]\n plt.sca(ax)\n plt.sci(ax.images[0])\n plt.clim((vmin,vmax))", "def colorbar_only(vmin,vmax,outname='colorbar.png',figsize=(4,1),\n cbsize=[0.05,0.5,0.9,0.2...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Only accumulate weights in the given list of keys.
def accumulate_by_keys(keys, g_ema, g_train, decay=0.999): dict_trn = dict(g_train.named_parameters()) dict_ema = dict(g_ema.named_parameters()) for k in keys: assert k in dict_ema, "key %s is not in the param dict of G_ema." % k dict_ema[k].data.mul_(decay).add_(dict_trn[k].data, alpha=1 -...
[ "def aggregate_weights(self, clients_params):", "def add_weights(self, key, weights):\n if self.num_structures != len(weights):\n raise AttributeError(\n \"Length of weights must match number of structures \"\n f\"{len(weights)} != {self.num_structures}.\"\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialize an instance Initialize the data and the columns to deal with specific data
def __init__(self, data, columns:list): self.data = data.loc[:, columns] self.columns = columns
[ "def __init__(self, data_dict=None):\n if data_dict is None:\n data_dict = {}\n\n impute = data_dict.get('imputation')\n aggregate = data_dict.get('aggregation')\n\n self._data_type = ColumnType[data_dict.get('dataType') or 'string']\n self._role = Role[data_dict.get('r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Split long recordings into sub files depending on the seconds specified per each recording and the sampling frequency of the recorded file. Returns a folder containing the split recordings located in the path of the read file
def split_recordings(path,recording_file,file_extension,fs,seconds_split): N = (fs * seconds_split) + 1 # number of data points per file for a chosen seconds_split data_path = os.path.join(path, recording_file + file_extension) write_path = os.path.join(path, recording_file + " split_folder") i...
[ "def split_audio(path, audio_file, logfile):\n \n # Parse logfile\n log = pd.read_csv(logfile)\n rel_time = log['Relative Time'][2:].tolist()\n rel_time.pop(1)\n for i in range(0, len(rel_time) - 1):\n t1 = timestring_to_seconds(rel_time[i])\n t2 = timestring_to_seconds(rel_time[i+1]...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function retreives all the sentiments of the profile's friend's posts and outputs them in a list of tuples.
def facebook_status1(): f = Facebook(license='',throttle=1) me = f.profile() my_friends = f.search(me[0], type=FRIENDS, count=5) statuses = [] status_sentiments = [] for i in range(len(my_friends)): #loop through friends retry = True while retry: try: ...
[ "def get_friends_tweets(self):\n tweets = []\n for friend in self.friends:\n for tweet in tweepy.Cursor(self.API.user_timeline).items():\n tweets.append(tweet._json)\n print(tweets,\"\\n\")\n \n return tweets", "def get_general_feed(user):\n followin...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function gets the first element of each tuple in a list of sentiment tuples, which is the feelings polarity for the sentiment analysis.
def get_feelings(sentiment_tuple_list): feelings = [x[0] for x in sentiment_tuple_list] #gets the first element of each tuple return feelings
[ "def get_subjectivity(sentiment_tuple_list): \n subjectivity = [x[1] for x in sentiment_tuple_list] #gets the second element of each tuple\n return subjectivity", "def get_weight_from_sentiments(sentiment):\n #TODO: Make sure best formula\n polarity, subjectivity = sentiment\n return polarity",...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This is the unit test for get_feelings. It prints out all the sentiment tuples then the first element of each tuple for easy comparison.
def get_feelings_unit_test(): all_sentiment = facebook_search() print 'All sentiment tuples' + all_sentiment print '' feelings = get_feelings(all_sentiment) print 'Feelings Values' + feelings
[ "def get_feelings(sentiment_tuple_list): \n feelings = [x[0] for x in sentiment_tuple_list] #gets the first element of each tuple\n return feelings", "def get_subjectivity_unit_test(): \n all_sentiment = facebook_search() \n print 'All sentiment tuples' + all_sentiment\n print ''\n subjec...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function gets the second element of each tuple in a list of sentiment tuples, which is the subjectivity for the sentiment analysis.
def get_subjectivity(sentiment_tuple_list): subjectivity = [x[1] for x in sentiment_tuple_list] #gets the second element of each tuple return subjectivity
[ "def get_feelings(sentiment_tuple_list): \n feelings = [x[0] for x in sentiment_tuple_list] #gets the first element of each tuple\n return feelings", "def get_subjectivity_unit_test(): \n all_sentiment = facebook_search() \n print 'All sentiment tuples' + all_sentiment\n print ''\n subjec...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This is the unit test for get_subjectivity. It prints out all the sentiment tuples then the second element of each tuple for easy comparison.
def get_subjectivity_unit_test(): all_sentiment = facebook_search() print 'All sentiment tuples' + all_sentiment print '' subjectivity = get_subjectivity(all_sentiment) print 'Subjectivity Values'+ subjectivity
[ "def get_subjectivity(sentiment_tuple_list): \n subjectivity = [x[1] for x in sentiment_tuple_list] #gets the second element of each tuple\n return subjectivity", "def analyzeSubjectivity(self):\n return self.blob.sentiment.subjectivity", "def get_subjectivity(text):\n res = []\n blob = T...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Connects to Exchange server and generates an Account object
def connect_exchange(username, password, server, mail_box, auto_discover=False, access_type=DELEGATE): # Setting up credentials, configuration and returning account creds = Credentials(username=username, password=password) config = Configuration(server=server, credentials=creds) account = Account(p...
[ "def create_account():\n eth_account = Account()\n account = eth_account.create(get_random_bytes(32))\n return (account.address, account.privateKey)", "def ews_config_setup(cls, user, domain, password):\n try:\n config = Configuration(\n server='outlook.office365.com/EWS/...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Stores DataFrames object on buffers and transform the content on bytes for sending attached
def buffer_dataframe(name, df): # Creating a buffer for storing bytes buffer = io.BytesIO() # Returning file extension file_name, file_ext = os.path.splitext(name) # Saving file on buffer according to its extension try: if file_ext in ['.csv', '.txt']: df.to_cs...
[ "def _dataframe_to_pybytes(df):\n table = pa.Table.from_pandas(df)\n sink = pa.BufferOutputStream()\n writer = pa.RecordBatchStreamWriter(sink, table.schema)\n writer.write_table(table)\n writer.close()\n return sink.getvalue().to_pybytes()", "def df2bytes(dataframe):\n return '\\n'.join(\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Formats a mail string body using HTMLBody class. In addition, the function can receive a DataFrame object and transform it using pretty_html_table package for customizing the source object in a custom table to be sent on mail body.
def format_html_body(string_mail_body, mail_signature='', **kwargs): # Extracting parameters from kwargs df = kwargs['df'] if 'df' in kwargs else None color = kwargs['color'] if 'color' in kwargs else 'blue_light' font_size = kwargs['font_size'] if 'font_size' in kwargs else 'medium' font_famil...
[ "def MailHTMLReport(self, recipient, subject=None):\n dt = rdfvalue.RDFDatetime().Now().Format(\"%Y-%m-%dT%H-%MZ\")\n subject = subject or \"%s - %s\" % (self.REPORT_NAME, dt)\n report_text = self.AsHtmlTable()\n\n email_alerts.SendEmail(recipient, self.EMAIL_FROM, subject,\n s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute conformity scores based on validation set. This is only applicable to quantile regression problems. The scores are used to conformalize new quantile predictions.
def compute_conformity_score(y_val_pred: np.ndarray, y_val: np.ndarray, quantile_levels: list): num_samples = y_val.shape[0] y_val = y_val.reshape(-1) assert y_val_pred.shape[0] == num_samples assert y_val_pred.shape[1] == len(quantile_levels) conformalize_list = [] for i, q in enumerate(quant...
[ "def cv_score(iterableFolds):\n dblCorrectTotal = dblWeightTotal = 0.0\n for cvf in iterableFolds:\n rslt = evaluate_classification(cvf)\n dblCorrect,dblIncorrect = weight_correct_incorrect(rslt)\n dblCorrectTotal += dblCorrect\n dblWeightTotal += dblCorrect + dblIncorrect\n ret...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates an Incident. Fetches the maximum identifier value for the org and increments it by one. If two incidents are created for the Organization at the same time then an integrity error will be thrown, and we'll retry again several times. I prefer to lock optimistically here since if we're creating multiple Incidents ...
def create(self, organization, **kwargs): with transaction.atomic(): result = self.filter(organization=organization).aggregate(models.Max('identifier')) identifier = result['identifier__max'] if identifier is None: identifier = 1 else: ...
[ "def _createIncident(self, event, incident, cursor):\n assert type(event) is Event\n assert incident.number is None\n incident.number = self._nextIncidentNumber(event, cursor)\n self._importIncident(event, incident, cursor)", "def create_org(self):\n return self.client().post('/...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the current end of the incident. Either the date it was closed, or the current time if it's still open.
def current_end_date(self): return self.date_closed if self.date_closed else timezone.now()
[ "def end_time(self):\n # TODO: use pd.Timestamp instead\n return self.time[-1].to_pydatetime()", "def end_date(self):\n return self.end.date()", "def end_date(self):\r\n return self._end_date", "def get_end_date(self):\n if self.current_company:\n return 'Presente...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns values matching the snuba format, a list of dicts with 'time' and 'count' keys.
def snuba_values(self): return {'data': [{'time': time, 'count': count} for time, count in self.values]}
[ "def get_count_timeseries():\n\n\n count_data = (db.session.query(CountItem)\n .join(UserCountType)\n .join(UserCondition)\n .filter(UserCountType.is_tracked==True,\n CountItem.count > 0,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function is for calculating the square of Euclidean Distance between two sequences. If the square distance is greater than "bestsofar" square distance, we should assume that distance is infinity.
def squareEuclidean(query: list, seq: list, seqMean: float, seqStd: float, order: list, bestSoFar: float): distance = 0 for i in order: d = (seq[i] - seqMean) / seqStd - query[i] distance = distance + d*d if (distance > bestSoFar): distance = float('inf') b...
[ "def euclidean_distance(a, b):\n return euclidean_distance_sqrt(a, b)", "def euclidean_distance(a, b):\n ### BEGIN SOLUTION\n return np.sqrt(np.sum((a - b) ** 2))\n ### END SOLUTION", "def euclideanDistance(timeSeries1, timeSeries2):\n squaredDiffs = [ (t1-t2)**2 for (t1,t2) in zip(timeS...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function is for calculating the LB_LowResED If the square distance is greater than "bestsofar" square distance, we should assume that distance is infinity.
def LB_LowResED(query: list, seq: list, blockLen: int, order: list, bestSoFar: float): distance = 0 for i in order: if (i == len(seq[0]) - 1): d = 0 else: # Shifting window d = max( query[1][i] - max(seq[0][i], seq[0][i+1]), ...
[ "def low_LZSN(self, tol = 0.5):\n\n if self.parameters.lzsn < tol:\n print('The watershed PERLNDs have LZSN values that are below ' +\n 'typical values. Try increasing these parameters.\\n')\n return True\n \n return False", "def test_bcs_chsh_lower_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function is for calculating the LB_Kim between two sequences. If the square distance is greater than "bestsofar" square distance, we should assume that distance is infinity. LB_Kim says the front, back, top, bottem are lower bounds. But the original UCR Suite (written in cpp) suggest than znormalization the top an...
def LB_Kim(query: list, seq: list, seqMean: float, seqStd: float, cumLB: list, bestSoFar: float): # 1st point at front and back sfront0 = (seq[0] - seqMean) / seqStd dfront0 = sfront0 - query[0] dfront0 = dfront0 * dfront0 sback0 = (seq[len(seq)-1] - seqMean) / seqStd dback0 = sback0 -...
[ "def get_k(self, b_mirror):\n\n klist = self.__get_k_list__(b_mirror)\n print (\"K-list: {}\".format(klist))\n\n if len(klist) > 0:\n return np.max(klist)\n else:\n return np.NaN\n\n # # print (\"Valid Line? {} = {}\".format(self.valid, self.valid_code))\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Discover, connect to, and read from a scale.
def test_read_from_scale(self, mock_usb_core): mock_device = DummyDevice() mock_device.is_kernel_driver_active.return_value = True mock_usb_core.find.return_value = mock_device notify_event = Event() blk = ScaleEvents(notify_event) cfg = {} self.configure_block(bl...
[ "def read_scale(hid_num=0, debug=False):\n scale_device_name = '/dev/hidraw{}'.format(hid_num)\n if os.path.exists(scale_device_name):\n with open(scale_device_name, 'rb') as scale:\n #the scale outputs 6 bytes at a time:\n byte = scale.read(6)\n while(byte != ''):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get list of metals from a panel and a boolean column.
def get_metals_from_panel( panel_csv_file: Optional[Union[str, Path]], usedcolumn: str = "ilastik", metalcolumn: str = "Metal Tag", sort_channels=True, ): metals = None if panel_csv_file is not None: panel = pd.read_csv(panel_csv_file) if panel.shape[1] > 1: selecte...
[ "def atom_cols(self, va_as_specie=False):\n everything=self._master.columns.get_level_values(\"atomic\").unique()\n if va_as_specie:\n return everything\n else:\n return [x for x in everything if \"Va\" not in x]", "def get_conditions_list_from_mat(spm):\n\n return [s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Save the given assembly source to disk at asm_filename. asm_source (str) Full assembly source code. asm_filename (str) Filename to which to save the generated assembly.
def write_asm(asm_source, asm_filename): try: with open(asm_filename, "w") as s_file: s_file.write(asm_source) except IOError: comment = f"could not write output file '{asm_filename}'" issue_collector.add(ErrorIssue(comment))
[ "def write_python_source_code(source_file_path, src):\n dirname = os.path.dirname(source_file_path)\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n\n with open(source_file_path, 'w') as outfile:\n outfile.write(src)", "def save_asm_file(self, file_name, include_data=False):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Search the given library file by searching in common directories. If found, returns the path. Otherwise, returns None.
def find_library(file): search_paths = [pathlib.Path("/usr/local/lib/x86_64-linux-gnu"), pathlib.Path("/lib/x86_64-linux-gnu"), pathlib.Path("/usr/lib/x86_64-linux-gnu"), pathlib.Path("/usr/local/lib64"), pathlib.Path("/lib64"), ...
[ "def find_library_or_err(file):\n path = find_library(file)\n if not path:\n comment = f\"could not find {file}\"\n issue_collector.add(ErrorIssue(comment))\n return None\n else:\n return path", "def find_libfile(path):\n path = os.path.realpath(path)\n for root, _, name...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Search for the crt0, crt1, or crt2.o files on the system. The crt1.o, crti.o, and crtn.o objects comprise the core CRT (C RunTime) objects required to enable basic C programs to start and run. crt1.o provides the _start symbol that the runtime linker, ld.so.1, jumps to in order to pass control to the executable, and is...
def find_crtnum(): for file in ["crt2.o", "crt1.o", "crt0.o"]: crt = find_library(file) if crt: return crt comment = "could not find crt0.o, crt1.o, or crt2.o for linking" issue_collector.add(ErrorIssue(comment)) return None
[ "def find_ca_cert_files():\n # Widely used locations for CA certificate files\n well_known_ca_cert_locations = [\n # Ubuntu\n '/etc/ssl/certs/ca-certificates.crt',\n # RedHat\n '/etc/pki/tls/certs/ca-bundle.crt',\n ]\n # Load all of the above locations that we can find\n f...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Search the given library file and return path if found. If not found, add an error to the error collector and return None.
def find_library_or_err(file): path = find_library(file) if not path: comment = f"could not find {file}" issue_collector.add(ErrorIssue(comment)) return None else: return path
[ "def find_library(file):\n search_paths = [pathlib.Path(\"/usr/local/lib/x86_64-linux-gnu\"),\n pathlib.Path(\"/lib/x86_64-linux-gnu\"),\n pathlib.Path(\"/usr/lib/x86_64-linux-gnu\"),\n pathlib.Path(\"/usr/local/lib64\"),\n pathlib.Path(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Assemble the given assembly file into an object file.
def assemble(asm_name, obj_name): try: subprocess.check_call(["as", "-o", obj_name, asm_name]) except subprocess.CalledProcessError: comment = "assembler returned non-zero status" issue_collector.add(ErrorIssue(comment))
[ "def setup_assembly_file(self):\n\t\t# Add comment lines just to make the parser work harder\n\t\tcontent = '# Comment Line\\n' + json.dumps(self._assembly_desc, indent=4) + \\\n\t\t\t'\\n# Comment Line\\n'\n\t\tcreate_repo_with_files(os.path.join(self.tempfolder, 'gitrepos',\n\t\t\t\t'assemblies'), {'testassembly....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Step to get hypervisors.
def get_hypervisors(self, check=True): hypervisors = list(self._client.list()) if check: assert_that(hypervisors, is_not(empty())) return hypervisors
[ "def find_all():\n return ItopapiPrototype.find_all(ItopapiHypervisor)", "def getPlugsVisited(*args, **kwargs):\n \n pass", "def train_decision_tree():\n return train_decision_tree_service()", "def main():\n start = \"http://www.harkavagrant.com/archive.php\"\n pagelinks = getPageLin...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Step to get hypervisor capacity. This method calculates max available count of instances, which can be booted on hypervisor with choosen flavor.
def get_hypervisor_capacity(self, hypervisor, flavor, check=True): if hypervisor.vcpus < flavor.vcpus: capacity = 0 elif flavor.disk > 0: capacity = min( hypervisor.disk_available_least // flavor.disk, hypervisor.free_ram_mb // flavor.ram) ...
[ "def capacity(self) -> int:\n return sys.maxsize", "def max_guest_cpu_count(self):\n ret = self._get_attr(\"maxGuestCPUCount\")\n return ret", "def capacity(self):\n return", "def capacity(cls):\n return WellPlate96.size() // PreyStoragePlate.capacity()", "def max_instance...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Step to get any hypervisor except occupied by servers.
def get_another_hypervisor(self, servers, check=True): busy_hypervisors = [ getattr(server, config.SERVER_ATTR_HYPERVISOR_HOSTNAME) for server in servers] for hypervisor in self.get_hypervisors(): if hypervisor.hypervisor_hostname not in busy_hypervisors: ...
[ "def fetch_servers_on_hypervisor(self, hypervisor):\n opts = {\n 'host': hypervisor,\n 'all_tenants': True,\n }\n try:\n msg = ('Fetch Server list on %s' % hypervisor)\n LOG.info(msg)\n servers = self.nova_client.servers.list(detailed=False...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to process data for unlock car before sending it to server
def unlockCar(choice, username, password): data_to_transfer = "{},{},{}".format(choice,username,password) return data_to_transfer
[ "def unlockCarFaceAuth(choice, status):\n data_to_transfer = \"{},{}\".format(choice,status)\n\n return data_to_transfer", "def handle_data(self):\n self.client.settimeout(5) # should really be enough\n loops = 0\n payload = None\n self.buffer = bytearray()\n\n # only all...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to process data for unlock car before sending it to server
def unlockCarFaceAuth(choice, status): data_to_transfer = "{},{}".format(choice,status) return data_to_transfer
[ "def unlockCar(choice, username, password):\n data_to_transfer = \"{},{},{}\".format(choice,username,password)\n\n return data_to_transfer", "def handle_data(self):\n self.client.settimeout(5) # should really be enough\n loops = 0\n payload = None\n self.buffer = bytearray()\n\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Expect l to be a crossover function. generates two random graphs and checks that l(g1, g2) does not error out and returns a graph of the same size.
def test_crossover_function(l): g1 = graphs.RandomGNP(20, .5) g2 = graphs.RandomGNP(20, .5) child_graph = l(g1, g2) assert child_graph.order() == 20
[ "def test_mutation_function(l):\n g = graphs.RandomGNP(20, .5)\n mutant_graph = l(g)\n #print l.__name__\n #print mutant_graph.order()\n assert mutant_graph.order() == 20", "def crossover_tests():\n crossovers = [FUN.cr4,FUN.cr5,FUN.cr6,FUN.cr7,FUN.cr8]\n #These are the crossover functions wh...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks that remove_extra_edges does not affect the independence number.
def test_remove_extra_edges(): g = graphs.RandomGNP(20, .5) r=g r, _ = FUN.remove_extra_edges(r) assert len(r.independent_set()) == len(g.independent_set())
[ "def removeUnusedEdges():\n\n mod = False\n for e in graphEdges.keys(): # use key as iterator so we can modify the dict\n if graphEdges[e].amount <= 0:\n del graphEdges[e]\n mod = True\n return mod", "def check(pos, unlink):\n if unlink.any():\n sel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
runs all the crossover tests
def crossover_tests(): crossovers = [FUN.cr4,FUN.cr5,FUN.cr6,FUN.cr7,FUN.cr8] #These are the crossover functions which preserve the order of the graph. for c in crossovers: test_crossover_function(c) test_cr4()
[ "def test_all_scenarios(self):\n\n exr_bash = self.prep_exr()\n percents = [1, 50, 90]\n # TODO: Don't use a for loop, use the trials kwarg\n for i in range(0, 2):\n Simulator().run(attack_types=Attack.runnable_attacks,\n adopt_policies=list(Non_Defa...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }