content
stringlengths
22
815k
id
int64
0
4.91M
def store_dns_info( matchobj): """ 02:28:21.105740 IP 172.30.253.88.(53418) > (1.1.1.1.53: 18184)+ A? (blah.com). (26) 02:28:21.177084 IP 172.30.253.88.40745 > 1.1.1.1.53: 25446+ AAAA? blah.com. (26) """ key = matchobj.group(1) + matchobj.group(2) domain = matchobj.group(3) #print( f'store_dns() key: [{key}] domain: [{domain}]') dns_cache['queries'][key] = domain
32,400
def edit_schedule(request): """Edit automatic updates schedule""" if request.method == "POST": schedule = models.UpdateSchedule.objects.get() def fun(query): return [int(x.strip()) for x in query.split(" ") if x.strip() != ""] schedule.text = json.dumps({ models.YoutubeChannel.PRIORITY_LOW: fun(request.POST["low"]), models.YoutubeChannel.PRIORITY_MEDIUM: fun(request.POST["medium"]), models.YoutubeChannel.PRIORITY_HIGH: fun(request.POST["high"]), }) schedule.save() return redirect("notifpy:settings")
32,401
def infection_formula(name_model, infectious_number, classroom_volume, classroom_ach): """ Calculate infection rate of with/without a mask by selected model. """ if name_model == "wells_riley": # Use wells riley model. effect_mask = 1.0 / ((1.0 - config.EXHALATION_FILTRATION_EFFICIENCY) * (1.0 - config.RESPIRATION_FILTRATION_EFFICIENCY)) infection_rate_w_mask = 1.0 - math.exp(-infectious_number * config.QUANTUM_GENERATION_RATE * config.PULMONARY_VENTILATIION_RATE * (config.LESSON_TIME / 60) / (classroom_volume * classroom_ach * effect_mask)) infection_rate_wo_mask = 1.0 - math.exp(-infectious_number * config.QUANTUM_GENERATION_RATE * config.PULMONARY_VENTILATIION_RATE * (config.LESSON_TIME / 60) / (classroom_volume * classroom_ach)) else: # Future Work: Add infection models for calculate infection rate. infection_rate_w_mask = 0.0 infection_rate_wo_mask = 0.0 return infection_rate_w_mask, infection_rate_wo_mask
32,402
def RetryWithBackoff(opts, fn, args=None, kwargs=None): """`fn` function must follow the interface suggested: * it should return tuple <status, err> where status - backoff status err - error that happend in function to propogate it to caller.""" args = args or () kwargs = kwargs or {} update_opts(opts) count = 0 backoff = opts['backoff'] while True: count += 1 status, err_or_rv = fn(*args, **kwargs) print status, err_or_rv if status == RETRY_BREAK: return err_or_rv if status == RETRY_RESET: backoff = opts['backoff'] count = wait = 0 if status == RETRY_CONTINUE: if opts['max_attempts'] > 0 and count >= opts['max_attempts']: raise RetryMaxAttemptsError( opts['max_attempts'], reason=err_or_rv) wait = (backoff + backoff * retry_jitter) * opts['constant_factor'] print "RETRIED IN ... %s" % wait if backoff > opts['max_backoff']: backoff = opts['max_backoff'] gevent.sleep(wait)
32,403
def export(args): """ This command export something """ if args['deb']: print("run export deb packages") elif args['docs']: print("run export sources documents") elif args['iso']: print("run export iso images") else: print("You must specific that export")
32,404
def fill_missing_ids(filename, id_map, highest_id): """ Open the workbook, fill in any None ids from the id_map with incremental values, and save it :param filename: Name of the worksheet to be edited :param id_map: The id_map generated by build_id_dict() :param highest_id: The highest id used by any sheet :return: """ logger.info(f'Opening workbook {filename} to fill any missing ids...') wb = openpyxl.load_workbook(filename, data_only=True) for sheet_name in id_map.keys(): for row in id_map[sheet_name].keys(): current_id = id_map[sheet_name][row] if current_id is None: sheet = wb[sheet_name] id_column = get_id_column_index(filename, sheet) cell = sheet.cell(row=row, column=id_column+1) highest_id += 1 cell.value = highest_id logger.info(f'Overwriting cell id value in cell {row}, {id_column+1}') add_overwrite_msg_to_workbook(wb, changed_sheet=sheet_name, changed_cell=cell.coordinate) wb.save(filename) wb.close() logger.info(f'Saved and closed workbook {filename}')
32,405
def test_non_routable_igmp_pkts(do_test, ptfadapter, setup, fanouthost, tx_dut_ports, pkt_fields, igmp_version, msg_type, ports_info): """ @summary: Create an IGMP non-routable packets. """ # IGMP Types: # 0x11 = Membership Query # 0x12 = Version 1 Membership Report # 0x16 = Version 2 Membership Report # 0x17 = Leave Group # IP destination address according to the RFC 2236: # Message Type Destination Group # ------------ ----------------- # General Query ALL-SYSTEMS (224.0.0.1) # Group-Specific Query The group being queried # Membership Report The group being reported # Leave Message ALL-ROUTERS (224.0.0.2) # TODO: fix this workaround as of now current PTF and Scapy versions do not support creation of IGMP packets # Temporaly created hex of IGMP packet layer by using scapy version 2.4.3. # Example how to get HEX of specific IGMP packets: # v3_membership_query = IGMPv3(type=0x11, mrcode=0, chksum=None)/scapy.contrib.igmpv3.IGMPv3mq(gaddr="224.0.0.1", # srcaddrs=["172.16.11.1", "10.0.0.59"], qrv=1, qqic=125, numsrc=2) # gr_obj = scapy.contrib.igmpv3.IGMPv3gr(rtype=1, auxdlen=0, maddr="224.2.2.4", numsrc=2, srcaddrs=["172.16.11.1", # "10.0.0.59"]).build() # v3_membership_report = IGMPv3(type=0x22, mrcode=0, chksum=None)/scapy.contrib.igmpv3.IGMPv3mr(res2=0x00, numgrp=1, # records=[gr_obj]).build() # The rest packets are build like "simple_igmp_packet" function from PTF testutils.py # FIXME: Need some sort of configuration for EOS and SONiC fanout hosts to # not drop IGMP packets before they reach the DUT if not fanouthost: pytest.skip("Test case requires explicit fanout support") from scapy.contrib.igmp import IGMP Ether = testutils.scapy.Ether IP = testutils.scapy.IP if "vlan" in tx_dut_ports[ports_info["dut_iface"]].lower() and msg_type == "membership_report": pytest.skip("Test case is not supported on VLAN interface") igmp_proto = 0x02 multicast_group_addr = "224.1.1.1" ethernet_dst = "01:00:5e:01:01:01" ip_dst = {"general_query": "224.0.0.1", "membership_report": multicast_group_addr} igmp_types = {"v1": {"general_query": IGMP(type=0x11, gaddr="224.0.0.1"), "membership_report": IGMP(type=0x12, gaddr=multicast_group_addr)}, "v2": {"membership_report": IGMP(type=0x16, gaddr=multicast_group_addr), "leave_group": IGMP(type=0x17, gaddr=multicast_group_addr)}, "v3": {"general_query": "\x11\x00L2\xe0\x00\x00\x01\x01}\x00\x02\xac\x10\x0b\x01\n\x00\x00;", "membership_report": "\"\x009\xa9\x00\x00\x00\x01\x01\x00\x00\x02\xe0\x02\x02\x04\xac\x10\x0b\x01\n\x00\x00;"} } if igmp_version == "v3": pkt = testutils.simple_ip_packet( eth_dst=ethernet_dst, eth_src=ports_info["src_mac"], ip_src=pkt_fields["ipv4_src"], ip_dst=ip_dst[msg_type], ip_ttl=1, ip_proto=igmp_proto ) del pkt["Raw"] pkt = pkt / igmp_types[igmp_version][msg_type] else: eth_layer = Ether(src=ports_info["src_mac"], dst=ethernet_dst) ip_layer = IP(src=pkt_fields["ipv4_src"], ) igmp_layer = igmp_types[igmp_version][msg_type] assert igmp_layer.igmpize(ip=ip_layer, ether=eth_layer), "Can't create IGMP packet" pkt = eth_layer/ip_layer/igmp_layer log_pkt_params(ports_info["dut_iface"], ethernet_dst, ports_info["src_mac"], pkt.getlayer("IP").dst, pkt_fields["ipv4_src"]) do_test("L3", pkt, ptfadapter, ports_info, setup["dut_to_ptf_port_map"].values(), tx_dut_ports)
32,406
def noaa_api_formatter(raw, metrics=None, country_aggr=False): """Format the output of the NOAA API to the task-geo Data Model. Arguments: raw(pandas.DataFrame):Data to be formatted. metrics(list[str]): Optional.List of metrics requested,valid metric values are: TMIN: Minimum temperature. TMAX: Maximum temperature. TAVG: Average of temperature. SNOW: Snowfall (mm). SNWD: Snow depth (mm). PRCP: Precipitation country_aggr(bool): When True, only an aggregate for each date/country will be returned. Returns: pandas.DataFrame """ if metrics is None: metrics = [metric.lower() for metric in DEFAULT_METRICS if metric in raw.columns] data = raw.copy() data.columns = [column.lower() for column in data.columns] column_order = [ 'latitude', 'longitude', 'elevation', 'country', 'name', 'date', 'station'] column_order.extend(metrics) data.date = pd.to_datetime(data.date) for column in ['tmax', 'tavg', 'tmin']: if column in data.columns: data[column] = data[column].astype(float) if 'snwd' in data.columns: data['snwd'] = data['snwd'].astype(float) / 1000 data.snwd.fillna(0, inplace=True) if 'prcp' in data.columns: data['prcp'] = data['prcp'].astype(float) / 1000 data.prcp.fillna(0, inplace=True) data['country'] = data.station.str.slice(0, 2).apply(fips_to_name) data = data[column_order] if country_aggr: aggregations = {} if 'tmin' in metrics: aggregations['tmin'] = np.min if 'tmax' in metrics: aggregations['tmax'] = np.max agg_columns = list(aggregations.keys()) return data.groupby(['country', 'date'])[agg_columns].aggregate(aggregations).reset_index() return data
32,407
def crop_wav(wav, center, radius): """ Crop wav on [center - radius, center + radius + 1], and pad 0 for out of range indices. :param wav: wav :param center: crop center :param radius: crop radius :return: a slice whose length is radius*2 +1. """ left_border = center - radius right_border = center + radius + 1 if left_border < 0: zeros = np.zeros(-left_border) cropped_wav = np.concatenate([zeros, wav[0: right_border]]) elif right_border > len(wav): zeros = np.zeros(right_border - len(wav)) cropped_wav = np.concatenate([wav[left_border: len(wav)], zeros]) else: cropped_wav = wav[left_border: right_border] assert len(cropped_wav) == radius * 2 + 1 return cropped_wav
32,408
def create_setup(filename_pyx, vobj_name, model_name, setup_filename, obj_directory): """ Create a setup.py file to compile the Cython Verilator wrapper. """ content = TEMPLATE_PYX.format( model_name=model_name, filename_pyx=filename_pyx, verilator_include=VERILATOR_INCLUDE_DIR, sources=', '.join(["'{}'".format(x) for x in glob.glob(os.path.join(obj_directory, '*.cpp'))]), ) with open(setup_filename, 'w') as f: f.write(content)
32,409
def markdown(text: str) -> str: """Helper function to escape markdown symbols""" return MD_RE.sub(r'\\\1', text)
32,410
def import_sample(sample_name, db): """Import sample""" cur = db.cursor() cur.execute('select sample_id from sample where sample_name=?', (sample_name, )) res = cur.fetchone() if res is None: cur.execute('insert into sample (sample_name) values (?)', (sample_name, )) sample_id = cur.lastrowid else: sample_id = res[0] return sample_id
32,411
def change_background_color_balck_digit(images, old_background, new_background, new_background2=None, p=1): """ :param images: BCHW :return: """ if new_background2 is None: assert old_background == [0] if not torch.is_tensor(new_background): new_background = torch.tensor(new_background, dtype=images.dtype) if images.max() <= 1 and new_background.max() > 1: new_background /= 255 if images.size(1) == 1 and len(new_background) == 3: images = images.expand(-1, 3, -1, -1) else: assert images.size(1) == len(new_background) # raise NotImplementedError(images.size(), new_background) images = images.clone() new_background = new_background.view(-1, 1, 1) n=images.size(0) ch=images.size(1) if (images.view(n,ch,-1).sum(2)==0).sum(1).sum()>n: #when input is already colored (digit or background) non_zero_ch_idx=torch.nonzero(images[0].view(ch,-1).sum(1)).squeeze() #torch.nonzero(images[0].view(n,ch,-1).sum(2)) non_zero_chnls = images[:,non_zero_ch_idx] if len(non_zero_chnls.shape)==3: non_zero_chnls=non_zero_chnls.unsqueeze(1) else: non_zero_chnls=non_zero_chnls[:,0].unsqueeze(1) if torch.sum(non_zero_chnls.view(n,-1)==0)>torch.sum(non_zero_chnls.view(n,-1)==1): #digit was previously colored bg_ratio = images.max() - non_zero_chnls bg = bg_ratio * new_background return images + bg else: #background is previously colored bg = (non_zero_chnls.expand(-1, 3, -1, -1)*new_background) images*=images.max()-new_background return images+bg else: #when input is greyscale bg_ratio = images.max() - images bg = bg_ratio * new_background # imgs = images + bg # print(images[:, 0, :, :].std().item(),images[:, 1, :, :].std().item(),images[:, 2, :, :].std().item()) # print(imgs[:, 0, :, :].std().item(), imgs[:, 1, :, :].std().item(), imgs[:, 2, :, :].std().item()) return bg #imgs else: assert old_background == [0] if not torch.is_tensor(new_background): new_background = torch.tensor(new_background, dtype=images.dtype) if images.max() <= 1 and new_background.max() > 1: new_background /= 255 if not torch.is_tensor(new_background2): new_background2 = torch.tensor(new_background2, dtype=images.dtype) if images.max() <= 1 and new_background2.max() > 1: new_background2 /= 255 if images.size(1) == 1 and len(new_background) == 3: images = images.expand(-1, 3, -1, -1) else: assert images.size(1) == len(new_background) # raise NotImplementedError(images.size(), new_background) images = images.clone() new_background = new_background.view(-1, 1, 1) new_background2 = new_background2.view(-1, 1, 1) n=images.size(0) ch=images.size(1) if (images.view(n,ch,-1).sum(2)==0).sum(1).sum()>n: raise NotImplementedError #when input is already colored (digit or background) non_zero_ch_idx=torch.nonzero(images[0].view(ch,-1).sum(1)).squeeze() #torch.nonzero(images[0].view(n,ch,-1).sum(2)) non_zero_chnls = images[:,non_zero_ch_idx] if len(non_zero_chnls.shape)==3: non_zero_chnls=non_zero_chnls.unsqueeze(1) else: non_zero_chnls=non_zero_chnls[:,0].unsqueeze(1) if torch.sum(non_zero_chnls.view(n,-1)==0)>torch.sum(non_zero_chnls.view(n,-1)==1): #digit was previously colored bg_ratio = images.max() - non_zero_chnls bg = bg_ratio * new_background return images + bg else: #background is previously colored bg = (non_zero_chnls.expand(-1, 3, -1, -1)*new_background) images*=images.max()-new_background return images+bg else: #when input is greyscale bg_ratio = images.max() - images idxs = torch.randperm(len(bg_ratio)) n_imgs=int(p*len(bg_ratio)) bg_ratio[idxs[:n_imgs]] *= new_background2 bg_ratio[idxs[n_imgs:]] *= new_background # imgs = images + bg # print(images[:, 0, :, :].std().item(),images[:, 1, :, :].std().item(),images[:, 2, :, :].std().item()) # print(imgs[:, 0, :, :].std().item(), imgs[:, 1, :, :].std().item(), imgs[:, 2, :, :].std().item()) return bg_ratio
32,412
def main(args=None): """Console script for {{cookiecutter.repo_name}}.""" parser = get_parser() args = parser.parse_args(args) # Process args here print( "Replace this message by putting your code into " "{{cookiecutter.project_slug}}.cli.main" ) print( "See argparse tutorial at https://docs.python.org/3/howto/argparse.html" )
32,413
def render_sprites(sprites, scales, offsets, backgrounds, name="render_sprites"): """ Render a scene composed of sprites on top of a background. An scene is composed by scaling the sprites by `scales` and offseting them by offsets (using spatial transformers), and merging the sprites and background together using per-sprite alpha and importance channels. Sprites are organized into a series of `flights`. Each flight can use a different shape for the sprite maps, and there can be a different number of sprites in each flight. The coordinate system for scales and offsets has (0, 0) at the image top-left and (1, 1) at the image bottom-right. A sprite with scale (1, 1) and offset (0, 0) would occupy the whole output image. Uses bilinear interpolation for the spatial transformer sections. Args: sprites: List of tensors of length `n_flights`, each of shape (batch_size, sprite_height_i, sprite_width_i, n_channels+2) The sprite maps in flight i are assumed to have shape (sprite_height_i, sprite_width_i). The final two channels are the alpha and importance channels. scales: Tensor of shape `[batch_size, n_sprites, 2]` Amount to scale sprites by. Order is y, x. A value of 1 will have the sprite occupy the whole output image. offsets: Tensor of shape `[batch_size, n_sprites, 2]` Location of top-left corner of each sprite. Order is y, x. backgrounds: Tensor of shape `[batch_size, output_height, output_width, n_channels]` The background for each image. name: Optional name of the op. Returns: Tensor giving the stitched images. Shape is `(batch_size, output_height, output_width, n_channels)`, same as `backgrounds`. Raises: ImportError: if the wrapper generated during compilation is not present when the function is called. """ with ops.name_scope(name, "render_sprites", [sprites, scales, offsets, backgrounds]): sprites_tensor_list = [ ops.convert_to_tensor(s, name="sprites_flight_{}".format(i)) for i, s in enumerate(sprites)] scales_tensor_list = [ ops.convert_to_tensor(s, name="scales_flight_{}".format(i)) for i, s in enumerate(scales)] offsets_tensor_list = [ ops.convert_to_tensor(s, name="offsets_flight_{}".format(i)) for i, s in enumerate(offsets)] backgrounds_tensor = ops.convert_to_tensor(backgrounds, name="backgrounds") lib = render_sprites_so() output = lib.render_sprites( sprites_tensor_list, scales_tensor_list, offsets_tensor_list, backgrounds_tensor) return output
32,414
def load_shared_library(dll_path, lib_dir): """ Return the loaded shared library object from the dll_path and adding `lib_dir` to the path. """ # add lib path to the front of the PATH env var update_path_environment(lib_dir) if not exists(dll_path): raise ImportError('Shared library does not exists: %(dll_path)r' % locals()) if not isinstance(dll_path, bytes): # ensure that the path is not Unicode... dll_path = fsencode(dll_path) lib = ctypes.CDLL(dll_path) if lib and lib._name: return lib raise ImportError('Failed to load shared library with ctypes: %(dll_path)r and lib_dir: %(lib_dir)r' % locals())
32,415
def test_yadis_user(client): """Check GET user-specific XRDS response""" response = client.get(f'{_OPENID_URI_PATH_PREFIX}jbloggs') assert response.status_code == 200 assert b"jbloggs</LocalID>" in response.data
32,416
def smartAppend(table,name,value): """ helper function for appending in a dictionary """ if name not in table.keys(): table[name] = [] table[name].append(value)
32,417
def reply_garage_status(client, userdata, message): """ Callback method for when the latest status of the garage is requested. """ if sensor.is_active | sensor.value == 1: lps("OPEN") elif sensor.is_held | sensor.value == 0: lps("CLOSED") else: lps("UNKNOWN")
32,418
def apply_strategy_profile(player_configuration, strategy_profile): """ Applies a strategy profile to a list of players :param player_configuration: List of players. :param strategy_profile: Profile to apply :return: None """ for reporter in player_configuration: strategy_config = strategy_profile[reporter['name']] mixed_profile = 'strategy_configs' in strategy_config.keys() if not mixed_profile: reporter[simmodel.STRATEGY_KEY] = simutils.EmpiricalInflationStrategy( strategy_config=strategy_config) else: reporter[simmodel.STRATEGY_KEY] = simutils.MixedEmpiricalInflationStrategy( mixed_strategy_config=strategy_config)
32,419
def preprocess_img(image): """Preprocess the image to adapt it to network requirements Args: Image we want to input the network (W,H,3) numpy array Returns: Image ready to input the network (1,W,H,3) """ # BGR to RGB in_ = image[:, :, ::-1] # image centralization # They are the mean color values of BSDS500 dataset in_ = np.subtract(in_, np.array((104.00699, 116.66877, 122.67892), dtype=np.float32)) # in_ = tf.subtract(tf.cast(in_, tf.float32), np.array((104.00699, 116.66877, 122.67892), dtype=np.float32)) # (W,H,3) to (1,W,H,3) in_ = np.expand_dims(in_, axis=0) return in_
32,420
def tag_filter_matcher( conjunction=None, tag_key1_state=None, tag_value1_state=None, tag_key2_state=None, tag_value2_state=None, resource_inventory=None, filter_tags=None, tag_dict=None, resource_name=None, resource_arn=None, ): """Updates the passed resource_inventory dictionary with ARN & name of all resources matching the user-selected filter tag keys & values. User-selected filter tag keys & tag key:value combinations are AND'ed or OR'ed based on value of conjunction. """ def _intersection_union_invalid(tag_dict, resource_name, resource_arn): resource_inventory.clear() def _intersection_union_fftt(tag_dict, resource_name, resource_arn): if tag_dict.get(filter_tags.get("tag_key2")) == filter_tags.get("tag_value2"): resource_inventory[resource_arn] = resource_name def _intersection_union_ttff(tag_dict, resource_name, resource_arn): if tag_dict.get(filter_tags.get("tag_key1")) == filter_tags.get("tag_value1"): resource_inventory[resource_arn] = resource_name def _intersection_tfff(tag_dict, resource_name, resource_arn): if filter_tags.get("tag_key1") in tag_dict: resource_inventory[resource_arn] = resource_name def _intersection_fftf(tag_dict, resource_name, resource_arn): if filter_tags.get("tag_key2") in tag_dict: resource_inventory[resource_arn] = resource_name def _intersection_tftf(tag_dict, resource_name, resource_arn): if ( filter_tags.get("tag_key1") in tag_dict and filter_tags.get("tag_key2") in tag_dict ): resource_inventory[resource_arn] = resource_name def _intersection_tftt(tag_dict, resource_name, resource_arn): if ( filter_tags.get("tag_key1") in tag_dict and filter_tags.get("tag_key2") in tag_dict ): if tag_dict.get(filter_tags.get("tag_key2")) == filter_tags.get( "tag_value2" ): resource_inventory[resource_arn] = resource_name def _intersection_tttf(tag_dict, resource_name, resource_arn): if ( filter_tags.get("tag_key1") in tag_dict and filter_tags.get("tag_key2") in tag_dict ): if tag_dict.get(filter_tags.get("tag_key1")) == filter_tags.get( "tag_value1" ): resource_inventory[resource_arn] = resource_name def _intersection_tttt(tag_dict, resource_name, resource_arn): if tag_dict.get(filter_tags.get("tag_key1")) == filter_tags.get( "tag_value1" ) and tag_dict.get(filter_tags.get("tag_key2")) == filter_tags.get( "tag_value2" ): resource_inventory[resource_arn] = resource_name def _intersection_ffff(tag_dict, resource_name, resource_arn): resource_inventory[resource_arn] = resource_name def _union_tfff_tftf_fftf(tag_dict, resource_name, resource_arn): if ( filter_tags.get("tag_key1") in tag_dict or filter_tags.get("tag_key2") in tag_dict ): resource_inventory[resource_arn] = resource_name def _union_tttf(tag_dict, resource_name, resource_arn): if filter_tags.get("tag_key1") in tag_dict: if tag_dict[filter_tags.get("tag_key1")] == filter_tags.get("tag_value1"): resource_inventory[resource_arn] = resource_name elif filter_tags.get("tag_key2") in tag_dict: resource_inventory[resource_arn] = resource_name def _union_tftt(tag_dict, resource_name, resource_arn): if filter_tags.get("tag_key2") in tag_dict: if tag_dict[filter_tags.get("tag_key2")] == filter_tags.get("tag_value2"): resource_inventory[resource_arn] = resource_name elif filter_tags.get("tag_key1") in tag_dict: resource_inventory[resource_arn] = resource_name def _union_tttt(tag_dict, resource_name, resource_arn): if tag_dict.get(filter_tags.get("tag_key1")) == filter_tags.get( "tag_value1" ) or tag_dict.get(filter_tags.get("tag_key2")) == filter_tags.get("tag_value2"): resource_inventory[resource_arn] = resource_name def _union_ffff(tag_dict, resource_name, resource_arn): resource_inventory[resource_arn] = resource_name # "AND" Truth table check for tag_key1, tag_value1, tag_key2, tag_value2 intersection_combos = { (False, False, False, True): _intersection_union_invalid, (False, True, False, False): _intersection_union_invalid, (False, True, False, True): _intersection_union_invalid, (True, False, False, True): _intersection_union_invalid, (True, True, False, True): _intersection_union_invalid, (False, True, True, False): _intersection_union_invalid, (False, False, True, False): _intersection_fftf, (False, False, True, True): _intersection_union_fftt, (True, False, False, False): _intersection_tfff, (True, True, False, False): _intersection_union_ttff, (True, False, True, False): _intersection_tftf, (True, False, True, True): _intersection_tftt, (True, True, True, False): _intersection_tttf, (True, True, True, True): _intersection_tttt, (False, False, False, False): _intersection_ffff, } # "OR" Truth table check for tag_key1, tag_value1, tag_key2, tag_value2 union_combos = { (False, False, False, True): _intersection_union_invalid, (False, True, False, False): _intersection_union_invalid, (False, True, False, True): _intersection_union_invalid, (False, True, True, True): _intersection_union_invalid, (True, True, False, True): _intersection_union_invalid, (False, False, True, False): _union_tfff_tftf_fftf, (False, False, True, True): _intersection_union_fftt, (True, False, False, False): _union_tfff_tftf_fftf, (True, False, True, False): _union_tfff_tftf_fftf, (True, False, True, True): _union_tftt, (True, True, False, False): _intersection_union_ttff, (True, True, True, False): _union_tttf, (True, True, True, True): _union_tttt, (False, False, False, False): _union_ffff, } if conjunction == "AND": intersection_combos[ ( tag_key1_state, tag_value1_state, tag_key2_state, tag_value2_state, ) ]( tag_dict, resource_name, resource_arn, ) elif conjunction == "OR": union_combos[ ( tag_key1_state, tag_value1_state, tag_key2_state, tag_value2_state, ) ]( tag_dict, resource_name, resource_arn, ) else: _intersection_union_invalid(tag_dict, resource_name, resource_arn)
32,421
def run(reload, once_time, debug): """ Запускает бота """ click.clear() if reload: args = sys.argv[1:] if "-r" in args: args.remove("-r") if "--reload" in args: args.remove("--reload") args.append("--once-time") # I tried to do smth for stopping flood. # Еhe best solution I can create is doing # click.clear() before process start # # print("> All prints you see will be changed to logger later.") # prev_out = None # proc = subprocess.run(["bot", *args], stderr=subprocess.STDOUT) # prev_out = proc.stderr # while True: # print("Run") # proc = subprocess.run(["bot", *args], stderr=subprocess.STDOUT) # if prev_out == proc.stderr and prev_out is not None: # proc = subprocess.run(["bot", *args], capture_output=True) # else: # proc = subprocess.run(["bot", *args], stderr=subprocess.STDOUT) # # prev_out = proc.stderr # print("Reload...") while True: click.secho("Listen", fg="green") proc = subprocess.run(["bot", *args]) click.secho( "Found some changes in bot's code. Reload...", fg="yellow" ) elif once_time: # Your bot project path sys.path.append(os.getcwd()) class AllEventsHandler(PatternMatchingEventHandler): def on_any_event(self, event): self.bot.reaload_now = True event_handler = AllEventsHandler( ignore_patterns=["__pycache__", "*.pyc"], ignore_directories=True ) # Bot's import src config = attrdict.AttrMap(toml.load("config.toml")) URL = ( config.api.URL if "URL" in config.api else "https://api.vk.com/method/" ) settings = dict( token=config.api.token, group_id=config.api.group_id, version=config.api.version, owner=config.api.owner, wait=config.longpoll.wait, debug=debug, URL=URL, config=config, ) reactions = [] signals = [] for var in src.__dict__.values(): if isinstance(var, vq.Reaction): reactions.append(var) elif isinstance(var, vq.Signal): signals.append(var) reactions = vq.ReactionsList(reactions) signals = vq.SignalsList(signals) bot = vq.Bot(reactions=reactions, signals=signals, **settings) AllEventsHandler.bot = bot observer = Observer() observer.schedule(event_handler, ".", recursive=True) observer.start() bot.run() observer.stop() observer.join() else: # Your bot project path sys.path.append(os.getcwd()) # Bot's import src config = attrdict.AttrMap(toml.load("config.toml")) # Все эти конструкции дико костыльные. # Глобальные изменения будут в 1.0 URL = ( config.api.URL if "URL" in config.api else "https://api.vk.com/method/" ) settings = dict( token=config.api.token, group_id=config.api.group_id, version=config.api.version, owner=config.api.owner, wait=config.longpoll.wait, URL=URL, debug=debug, config=config, ) reactions = [] signals = [] for var in src.__dict__.values(): if isinstance(var, vq.Reaction): reactions.append(var) elif isinstance(var, vq.Signal): signals.append(var) reactions = vq.ReactionsList(reactions) signals = vq.SignalsList(signals) bot = vq.Bot(reactions=reactions, signals=signals, **settings) bot.run()
32,422
def test_client_exclude_fails_missing_session(server, client): """Should fail if a wrong access token is used.""" server.add( responses.POST, "https://example.com/api/panel/syncSendCommand", status=401, ) client._session_id = "test" client._lock.acquire() with pytest.raises(InvalidToken): client.exclude([1]) assert len(server.calls) == 1
32,423
def main(key, root): """ Execution key: str, Secret to encrypt the Thumbor URL root: Path, Website root """ crypto = CryptoURL(key) for i in root.glob('**/*.html'): file = Path(i) html = BeautifulSoup(file.read_text(), 'html.parser') imgs = html.find_all('img') for img in imgs: src = img['src'] # Do not process SVGs because doing it is useless if not src.endswith('.svg'): if src.startswith('/'): # Some links are already absolute paths, # so do not make them absolute again src = src[1:] else: # Get the path of the image relative to the website root src = (str(file.parent.relative_to(root)) + '/' + src) img['src'] = process_url( crypto, args.thumbor_site, root, src, args.width, args.height, args.smart) print(img['src']) i.write_text(str(html))
32,424
def _parse_ec_record(e_rec): """ This parses an ENSDF electron capture + b+ record Parameters ---------- e_rec : re.MatchObject regular expression MatchObject Returns ------- en : float b+ endpoint energy in keV en_err : float error in b+ endpoint energy ib : float b+ branch intensity dib : float error in b+ branch intensity ie : float ec branch intensity die : float error in ec branch intensity logft : float logft of the decay dft : float error in logft """ en, en_err = _get_val_err(e_rec.group(2), e_rec.group(3)) ib, dib = _get_val_err(e_rec.group(4), e_rec.group(5)) ie, die = _get_val_err(e_rec.group(6), e_rec.group(7)) logft, dft = _get_val_err(e_rec.group(8), e_rec.group(9)) tti, dtti = _get_val_err(e_rec.group(10), e_rec.group(11)) return en, en_err, ib, dib, ie, die, logft, dft, tti, dtti
32,425
def setup(bot): """ Mandatory function to add the Cog to the bot. """ bot.add_cog(attribute_checker(AntistasiLogWatcherCog(bot)))
32,426
def process_node_properties(node, node_type, intermine_model, rdf_prefixes, prefixes_used, pos): """ Process the properties of a graph node :param node: :param node_type: :param intermine_model: :param rdf_prefixes: :param prefixes_used: :param pos: :return: """ for key, value in sorted(node.items()): print('KEY-VALUE: %s,%s' % (key, value)) if key == 'class': term = intermine_model.get(node_type).get('term') p, o = 'a', term else: path = '%s.%s' % (node_type, key) print('Looking for path [%s]' % path) node = intermine_model.get(path) term = node.get('term') if node else None p, o = term, value print('Term was [%s]' % term) if term: prefix, _ = find_rdf_prefix(term, rdf_prefixes) if prefix: prefixes_used.add(prefix) pos.append((p, o))
32,427
def skpTime(time): """ Retorna un datetime con la hora en que la unidad genero la trama. >>> time = '212753.00' >>> datetime.time(int(time[0:2]), int(time[2:4]), int(time[4:6]), int(time[-2])) datetime.time(21, 27, 53) >>> """ import datetime return datetime.time(int(time[0:2]), int(time[2:4]), int(time[4:6]), int(time[-2]), tzinfo=timezone('UTC'))
32,428
def calc_max_moisture_set_point(bpr, tsd, t): """ (76) in ISO 52016-1:2017 Gabriel Happle, Feb. 2018 :param bpr: Building Properties :type bpr: BuildingPropertiesRow :param tsd: Time series data of building :type tsd: dict :param t: time step / hour of the year :type t: int :return: max moisture set point (kg/kg_dry_air) :rtype: double """ # from bpr get set point for humidification phi_int_set_dhu = bpr.comfort['RH_max_pc'] t_int = tsd['T_int'][t] p_sat_int = calc_saturation_pressure(t_int) x_set_max = 0.622 * (phi_int_set_dhu / 100 * p_sat_int) / ( P_ATM - phi_int_set_dhu / 100 * p_sat_int) return x_set_max
32,429
def test_constructed_board_has_correct_open_cell(sol_board): """Test that the new Board has open cell in bottom corner.""" assert sol_board.open_cell_coords == (3, 3)
32,430
def compress_pub_key(pub_key: bytes) -> bytes: """Convert uncompressed to compressed public key.""" if pub_key[-1] & 1: return b"\x03" + pub_key[1:33] return b"\x02" + pub_key[1:33]
32,431
def is_tensor(blob): """Whether the given blob is a tensor object.""" return isinstance(blob, TensorBase)
32,432
def transform_dlinput( tlist=None, make_tensor=True, flip_prob=0.5, augment_stain_sigma1=0.5, augment_stain_sigma2=0.5): """Transform input image data for a DL model. Parameters ---------- tlist: None or list. If testing mode, pass as None. flip_prob augment_stain_sigma1 augment_stain_sigma2 """ tmap = { 'hflip': tvdt.RandomHorizontalFlip(prob=flip_prob), 'augment_stain': tvdt.RandomHEStain( sigma1=augment_stain_sigma1, sigma2=augment_stain_sigma2), } tlist = [] if tlist is None else tlist transforms = [] # go through various transforms for tname in tlist: transforms.append(tmap[tname]) # maybe convert to tensor if make_tensor: # transforms.append(tvdt.PILToTensor(float16=ISCUDA)) transforms.append(tvdt.PILToTensor(float16=False)) return tvdt.Compose(transforms)
32,433
def do_rollouts(env, num_episodes: int, max_episode_length: Optional[int] = None, action_fn: Optional[Callable[[np.ndarray], np.ndarray]] = None, render_mode: Optional[str] = None): """Performs rollouts with the given environment. Args: num_episodes: The number of episodes to do rollouts for. max_episode_length: The maximum length of an episode. action_fn: The function to use to sample actions for steps. If None, uses random actions. render_mode: The rendering mode. If None, no rendering is performed. Yields: Trajectory containing: observations: The observations for the episode. rewards: The rewards for the episode. total_reward: The total reward during the episode. infos: The auxiliary information during the episode. renders: Rendered frames during the episode. durations: The running execution durations. """ # If no action function is given, use random actions from the action space. if action_fn is None: action_fn = lambda _: env.action_space.sample() # Maintain a dictionary of execution durations. durations = collections.defaultdict(float) # Define a function to maintain a running average of durations. def record_duration(key: str, iteration: int, value: float): durations[key] = (durations[key] * iteration + value) / (iteration + 1) total_steps = 0 for episode in range(num_episodes): episode_start = time.time() obs = env.reset() record_duration('reset', episode, time.time() - episode_start) done = False episode_actions = [] episode_obs = [obs] episode_rewards = [] episode_total_reward = 0 episode_info = collections.defaultdict(list) episode_renders = [] while not done: step_start = time.time() # Get the action for the current observation. action = action_fn(obs) action_time = time.time() record_duration('action', total_steps, action_time - step_start) # Advance the environment with the action. obs, reward, done, info = env.step(action) step_time = time.time() record_duration('step', total_steps, step_time - action_time) # Render the environment if needed. if render_mode is not None: render_result = env.render(render_mode) record_duration('render', total_steps, time.time() - step_time) if render_result is not None: episode_renders.append(render_result) # Record episode information. episode_actions.append(action) episode_obs.append(obs) episode_rewards.append(reward) episode_total_reward += reward for key, value in info.items(): episode_info[key].append(value) total_steps += 1 if (max_episode_length is not None and len(episode_obs) >= max_episode_length): done = True # Combine the information into a trajectory. trajectory = Trajectory( actions=np.array(episode_actions), observations=np.array(episode_obs), rewards=np.array(episode_rewards), total_reward=episode_total_reward, infos={key: np.array(value) for key, value in episode_info.items()}, renders=np.array(episode_renders) if episode_renders else None, durations=dict(durations), ) yield trajectory
32,434
def _(text): """Normalize white space.""" return ' '.join(text.strip().split())
32,435
def mean_IOU_primitive_segment(matching, predicted_labels, labels, pred_prim, gt_prim): """ Primitive type IOU, this is calculated over the segment level. First the predicted segments are matched with ground truth segments, then IOU is calculated over these segments. :param matching :param pred_labels: N x 1, pred label id for segments :param gt_labels: N x 1, gt label id for segments :param pred_prim: K x 1, pred primitive type for each of the predicted segments :param gt_prim: N x 1, gt primitive type for each point """ batch_size = labels.shape[0] IOU = [] IOU_prim = [] for b in range(batch_size): iou_b = [] iou_b_prim = [] iou_b_prims = [] len_labels = np.unique(predicted_labels[b]).shape[0] rows, cols = matching[b] count = 0 for r, c in zip(rows, cols): pred_indices = predicted_labels[b] == r gt_indices = labels[b] == c # use only matched segments for evaluation if (np.sum(gt_indices) == 0) or (np.sum(pred_indices) == 0): continue # also remove the gt labels that are very small in number if np.sum(gt_indices) < 100: continue iou = np.sum(np.logical_and(pred_indices, gt_indices)) / ( np.sum(np.logical_or(pred_indices, gt_indices)) + 1e-8) iou_b.append(iou) # evaluation of primitive type prediction performance gt_prim_type_k = gt_prim[b][gt_indices][0] try: predicted_prim_type_k = pred_prim[b][r] except: import ipdb; ipdb.set_trace() iou_b_prim.append(gt_prim_type_k == predicted_prim_type_k) iou_b_prims.append([gt_prim_type_k, predicted_prim_type_k]) # find the mean of IOU over this shape IOU.append(np.mean(iou_b)) IOU_prim.append(np.mean(iou_b_prim)) return np.mean(IOU), np.mean(IOU_prim), iou_b_prims
32,436
def get_tmp_directory_path(): """Get the path to the tmp dir. Creates the tmp dir if it doesn't already exists in this file's dir. :return: str -- abs path to the tmp dir """ tmp_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'tmp') if not os.path.exists(tmp_directory): os.mkdir(tmp_directory) return tmp_directory
32,437
def _infer_title(ntbk, strip_title_header=True): """Infer a title from notebook metadata. First looks in metadata['title'] and if nothing is found, looks for whether the first line of the first cell is an H1 header. Optionally it strips this header from the notebook content. """ # First try the notebook metadata, if not found try the first line title = ntbk.metadata.get('title') # If the first line of the ontebook is H1 header, assume it's the title. if title is None: first_cell_lines = ntbk.cells[0].source.lstrip().split('\n') if first_cell_lines[0].startswith('# '): title = first_cell_lines.pop(0).strip('# ') if strip_title_header is True: ntbk.cells[0].source = '\n'.join(first_cell_lines) return title
32,438
def stable_hash(value): """Return a stable hash.""" return int(hashlib.md5(str(value).encode('utf-8')).hexdigest(), 16)
32,439
def test_api_page_delete_admin(): """Can a user patch /api/v1/pages/<page_id> if admin""" app = create_ctfd() with app.app_context(): gen_page(app.db, title="title", route="/route", content="content") with login_as_user(app, "admin") as client: r = client.delete("/api/v1/pages/2", json="") assert r.status_code == 200 assert r.get_json().get("data") is None destroy_ctfd(app)
32,440
def minimal_community(community_owner): """Minimal community data as dict coming from the external world.""" return { "id": "comm_id", "access": { "visibility": "public", }, "metadata": { "title": "Title", "type": "topic" } }
32,441
def retrieve_descriptions(gene, descriptions, empties): """Given single gene name, grab possible descriptions from NCBI and prompt user to select one""" # Perform ESearch and grab list of IDs query = gene + '[Gene Name]' handle = Entrez.esearch(db='gene', term=query, retmax=100, retmode='xml') record = Entrez.read(handle) handle.close() idlist = ','.join(record["IdList"]) # Ensure you have results, exit if not if idlist == '': print('No records for {}, skipping...\n'.format(gene)) empties.append(gene) return # Generate summary from UID list handle = Entrez.esummary(db='gene', id=idlist) record = Entrez.read(handle) handle.close() # Grab description, counter for unique values desc_cnt = Counter() doc_sums = record[u'DocumentSummarySet'][u'DocumentSummary'] for i in range(len(doc_sums)): if doc_sums[i][u'NomenclatureName'] != '': desc = doc_sums[i][u'NomenclatureName'] else: desc = doc_sums[i][u'OtherDesignations'].split('|')[0] desc_cnt[desc] += 1 # Create list from counter keys for indexing purposes desc_list = filter(None, desc_cnt) if len(desc_cnt) > 1: print('{} has {} unique descriptions from {} results. These are:'.format( gene, len(desc_list), len(doc_sums))) ans_range = range(len(desc_list)) for i in ans_range: print ('{}: {} [{}/{}]'.format(i+1, desc_list[i], desc_cnt[desc_list[i]], len(doc_sums))) # Take user input to accept/reject a description while True: ans = raw_input('Which do you accept? [{}-{}/N]: '.format( min(ans_range)+1, max(ans_range)+1)) # Check if int or str entered try: ans = int(ans)-1 if ans in ans_range: print('Accepting #{}.\n'.format(ans+1)) descriptions[gene] = desc_list[ans] break else: print('{} is outside acceptable range. Try again.'.format( ans)) except: if ans in ['N', 'n', 'no', 'No']: print('Skipping this gene.\n') break else: print('Invalid input, try again.') # If there's only one unique description, accept/reject elif len(desc_cnt) == 1: desc_list2 = list(desc_cnt) desc = desc_list2[0] if desc == '': print('{} has empty description.'.format(gene)) empties.append(gene) return print('{} only has one unique description from {} results.'.format( gene, len(doc_sums))) print('This is:\n{}'.format(desc)) while True: ans = raw_input('Accept? Y/N: ') if ans in ['Y', 'y', 'yes', 'Yes']: print('Description accepted.\n') descriptions[gene] = desc break elif ans in ['N', 'n', 'no', 'No']: print('Skipping this gene.\n') empties.append(gene) break else: print('Invalid input, try again.') return(descriptions)
32,442
def readCmd(): """ Parses out a single character contained in '<>' i.e. '<1>' returns int(1) returns the single character as an int, or returns -1 if it fails""" recvInProgress = False timeout = time.time() + 10 while time.time() < timeout: try: rc = ser.read().decode("utf-8") except(UnicodeDecodeError): continue if recvInProgress == True: if rc != '>': cmd = rc else: #while(ser.in_waiting != 0): # ser.read() try: return int(cmd) except: print("Bad command parse") return -1 elif rc == '<': recvInProgress = True print("Timeout on readCmd") return -1
32,443
def map_ground_truth(bounding_boxes, anchor_boxes, threshold=0.5): """ Assign a ground truth object to every anchor box as described in SSD paper :param bounding_boxes: :param anchor_boxes: :param threshold: :return: """ # overlaps shape: (bounding_boxes, anchor_boxes) overlaps = jaccard_overlap(bounding_boxes, anchor_boxes) # best_bbox_overlaps and best_bbox_ids shape: (bounding_boxes) # best_bbox_overlaps: IoU of overlap with the best anchor box for every ground truth box # best_bbox_ids: indexes of anchor boxes best_bbox_overlaps, best_bbox_ids = overlaps.max(1) # overlaps and bbox_ids shape: (anchor_boxes) # IoU and indexes of bounding boxes with the best overlap for every anchor box overlaps, bbox_ids = overlaps.max(0) # Combine the two: # best_bbox_overlaps takes precedence overlaps[best_bbox_ids] = 2 for bbox_id, anchor_id in enumerate(best_bbox_ids): bbox_ids[anchor_id] = bbox_id # Check for the threshold and return binary mask and bbox ids for each anchor is_positive = overlaps > threshold return is_positive, bbox_ids
32,444
def create_jumpbox(username, network, image_name='jumpBox-Ubuntu18.04.ova'): """Make a new jumpbox so a user can connect to their lab :Returns: Dictionary :param username: The user who wants to delete their jumpbox :type username: String :param network: The name of the network the jumpbox connects to :type network: string """ with vCenter(host=const.INF_VCENTER_SERVER, user=const.INF_VCENTER_USER, \ password=const.INF_VCENTER_PASSWORD) as vcenter: ova = Ova(os.path.join(const.VLAB_JUMPBOX_IMAGES_DIR, image_name)) try: network_map = vim.OvfManager.NetworkMapping() network_map.name = ova.networks[0] try: network_map.network = vcenter.networks[network] except KeyError: raise ValueError('No such network named {}'.format(network)) the_vm = virtual_machine.deploy_from_ova(vcenter, ova, [network_map], username, 'jumpBox', logger) finally: ova.close() _setup_jumpbox(vcenter, the_vm, username) # VMTools will be ready long before the full network stack is up. # Pause for a moment here so we can return an IP time.sleep(70) return virtual_machine.get_info(vcenter, the_vm)
32,445
def MakeFrame(ea, lvsize, frregs, argsize): """ Make function frame @param ea: any address belonging to the function @param lvsize: size of function local variables @param frregs: size of saved registers @param argsize: size of function arguments @return: ID of function frame or -1 If the function did not have a frame, the frame will be created. Otherwise the frame will be modified """ func = idaapi.get_func(ea) if func is None: return -1 frameid = idaapi.add_frame(func, lvsize, frregs, argsize) if not frameid: if not idaapi.set_frame_size(func, lvsize, frregs, argsize): return -1 return func.frame
32,446
def mip_solver(f, strides, arch, part_ratios, global_buf_idx, A, Z, compute_factor=10, util_factor=-1, traffic_factor=1): """CoSA mixed integer programming(MIP) formulation.""" logging.info(f"LAYER {f}") num_vars = len(A[0]) num_mems = len(Z[0]) m = Model("mip") cost = [] constraints = [] org = ['spatial', 'temporal'] M = [] # ignore DRAM cap for i in range(num_mems - 1): mem_cap = arch.mem_entries[i] mem_cap_arr = [] for j in range(num_vars): var_mem_cap = mem_cap * part_ratios[i][j] mem_cap_arr.append(var_mem_cap) M.append(mem_cap_arr) # log friendly M M_log = [] for i, mem in enumerate(M): M_v = [] for bound in mem: if bound == 0: # turn 0 to 1 for taking the log bound = 1 M_v.append(bound) M_log.append(M_v) # spatial constraints S = arch.S # set the levels to be equal to the number of factors + 4 memory levels perm_levels = 0 for j, f_j in enumerate(f): perm_levels += len(f_j) gb_start_level = global_buf_idx total_levels = num_mems - 1 + perm_levels logging.info(f"total {total_levels} levels") x = {} # x_jn_jn for i in range(total_levels): for j, f_j in enumerate(f): for n, f_jn in enumerate(f_j): for k in range(2): name = "X({},{},{},{})".format(i, j, n, k) x[(i, j, n, k)] = m.addVar(vtype=GRB.BINARY, name=name) # sum for each sub factor spatial and temp must be less than 1 # NOT equals to one spatial_temp_sum = 0 for k in range(2): name = "X({},{},{},{})".format(i, j, n, k) spatial_temp_sum += x[(i, j, n, k)] m.addConstr(spatial_temp_sum <= 1, "spatial_temp_sum_{}_{}_{}".format(i, j, n)) # j, n is the loop level # each mapper must have a mapping i = 0 x_row_sums = [] x_col_sums = [] # for i in range(total_levels): for i in range(gb_start_level, gb_start_level + perm_levels): row_sum = 0 for j, f_j in enumerate(f): for n, f_jn in enumerate(f_j): for k in range(2): name = "X({},{},{},{})".format(i, j, n, k) row_sum += x[(i, j, n, k)] m.addConstr(row_sum <= 1, "row_sum_{}".format(i)) x_row_sums.append(row_sum) for j, f_j in enumerate(f): for n, f_jn in enumerate(f_j): col_sum = 0 for i in range(total_levels): for k in range(2): name = "X({},{},{},{})".format(i, j, n, k) col_sum += x[(i, j, n, k)] # assume perm can be interleaved in diff perm level m.addConstr(col_sum == 1, "col_sum_{}_{}".format(j, n)) x_col_sums.append(col_sum) # make sure v is one for all outer loop level, once a correlation exists # add another relation var v - f, 3 - 7*n loop-level s = {} y = {} for v in range(num_vars): for i in range(gb_start_level, gb_start_level + perm_levels): row_sum = 0 y[(v, i)] = m.addVar(lb=0, ub=1, vtype=GRB.INTEGER, name="y({},{})".format(v, i)) for j, f_j in enumerate(f): for n, f_jn in enumerate(f_j): row_sum += x[(i, j, n, 1)] * A[j][v] if i > gb_start_level: m.addConstr(y[(v, i)] >= y[(v, i - 1)], "y_v_i_sv_{}_{}".format(v, i)) # can be == m.addConstr(y[(v, i)] >= row_sum, "y_v_i_row_sum_{}_{}".format(v, i)) else: # can be == m.addConstr(y[(v, i)] == row_sum, "y_v_i_row_sum_{}_{}".format(v, i)) s[(v, i)] = row_sum ## exhausively list all scenarios where p or q is inside current mem zz = {} prefix = 0 for var in [2, 3]: for mem_level in [3]: zz[(var, mem_level)] = m.addVar(lb=0, ub=1, vtype=GRB.INTEGER, name="zz({},{},{})".format(prefix, var, mem_level)) x_sums = 0 for n, prime_factor in enumerate(f[var]): for inner_mem_level_i in range(mem_level + 1): for k in range(2): filter_in = x[(inner_mem_level_i, var, n, k)] m.addConstr(zz[(var, mem_level)] >= filter_in, "zz_x_sum_{}_{}_{}_{}_{}_{}".format(prefix, var, n, mem_level, inner_mem_level_i, k)) x_sums += filter_in m.addConstr(zz[(var, mem_level)] <= x_sums, "z_x_sum_{}_{}_{}".format(prefix, var, mem_level)) l = {} for v in range(num_vars): for i in range(gb_start_level, gb_start_level + perm_levels): row_sum = 0 for j, f_j in enumerate(f): for n, f_jn in enumerate(f_j): row_sum += np.log2(f[j][n]) * (x[(i, j, n, 1)]) l[(v, i)] = row_sum # Add spatial constraints spatial_tile = 0 for i in range(gb_start_level, gb_start_level + perm_levels): for j, f_j in enumerate(f): for n, f_jn in enumerate(f_j): spatial_tile += np.log2(f[j][n]) * x[(i, j, n, 0)] m.addConstr(spatial_tile <= np.log2(S[gb_start_level]), "spatial_tile_gb_{}".format(prefix)) for i in range(gb_start_level): spatial_tile = 0 for j, f_j in enumerate(f): for n, f_jn in enumerate(f_j): spatial_tile += np.log2(f[j][n]) * x[(i, j, n, 0)] m.addConstr(spatial_tile <= np.log2(S[i]), f"spatial_tile_{prefix}_{i}") for i in range(gb_start_level + perm_levels, total_levels): spatial_tile = 0 for j, f_j in enumerate(f): for n, f_jn in enumerate(f_j): spatial_tile += np.log2(f[j][n]) * x[(i, j, n, 0)] m.addConstr(spatial_tile <= np.log2(S[i - perm_levels + 1]), f"spatial_tile_{i - perm_levels + 1}") # Add inner gb buffer constraints buf_util = {} for v in range(num_vars): for i in range(num_mems): buf_util[(i, v)] = 0 for v in range(num_vars): for i_ in range(gb_start_level + perm_levels): for i in range(num_mems): for j, f_j in enumerate(f): for n, f_jn in enumerate(f_j): factor = 1 if v == 1 and j == 2: factor = strides[0] if v == 1 and j == 3: factor = strides[1] if i_ > gb_start_level and i_ < gb_start_level + perm_levels: Z_const = Z[v][i][gb_start_level] else: Z_const = Z[v][i][i_] buf_util[(i, v)] += np.log2(factor * f[j][n]) * (x[(i_, j, n, 0)] + x[i_, j, n, 1]) * A[j][ v] * Z_const # use the i for the cur mem for relationship # only add once if i == 3 and j in [0, 1] and v == 1: buf_util[(i, v)] += (x[(i_, j, n, 0)] + x[(i_, j, n, 1)]) * (1 - zz[(j + 2, i)]) * np.log2( f[j][n]) buf_util[(i, v)] += (x[(i_, j, n, 0)] + x[(i_, j, n, 1)]) * zz[(j + 2, i)] * np.log2(2) for v in range(num_vars): # excluding DRAM for i in range(num_mems - 1): if M_log[i][v] > 0: m.addConstr(buf_util[(i, v)] <= np.log2(M_log[i][v]), f"buffer_size_{i}_{v}") # get compute cost inner_gb_cycles = 0 for i in range(gb_start_level): for j, f_j in enumerate(f): for n, f_jn in enumerate(f_j): inner_gb_cycles += np.log2(f[j][n]) * (x[(i, j, n, 1)]) gb_cycles = 0 for i in range(gb_start_level, gb_start_level + perm_levels): for j, f_j in enumerate(f): for n, f_jn in enumerate(f_j): gb_cycles += np.log2(f[j][n]) * (x[(i, j, n, 1)]) dram_cycles = 0 for i in range(gb_start_level + perm_levels, total_levels): for j, f_j in enumerate(f): for n, f_jn in enumerate(f_j): dram_cycles += np.log2(f[j][n]) * (x[(i, j, n, 1)]) total_compute = inner_gb_cycles + gb_cycles + dram_cycles gb_compute = inner_gb_cycles + gb_cycles # get traffic cost spatial_cost = {} for v in range(num_vars): size = 0 for i in range(gb_start_level, gb_start_level + perm_levels): for j, f_j in enumerate(f): for n, f_jn in enumerate(f_j): size += np.log2(f[j][n]) * (x[(i, j, n, 0)]) spatial_cost[v] = size data_size = {} for v in range(num_vars): size = 0 for i in range(gb_start_level): for j, f_j in enumerate(f): for n, f_jn in enumerate(f_j): # TRICK prioritize spatial factors = 0.8 + 0.04 * i size += factors * np.log2(f[j][n]) * (x[(i, j, n, 0)] + x[i, j, n, 1]) * A[j][v] data_size[v] = size gb_traffic = {} for v in range(num_vars): size = 0 for i in range(gb_start_level, gb_start_level + perm_levels): size += l[(v, i)] * y[(v, i)] gb_traffic[v] = size # use the last level gb y for DRAM dram_traffic = {} for v in range(num_vars): corr = y[(v, gb_start_level + perm_levels - 1)] i = gb_start_level + perm_levels # DRAM size = 0 for j, f_j in enumerate(f): for n, f_jn in enumerate(f_j): size += np.log2(f[j][n]) * (x[(i, j, n, 1)]) # * corr dram_traffic[v] = size total_util = 0 for i in range(gb_start_level): # for each memory and each variable there is a constraint for v in range(num_vars): # make weight util more important since it directly comes from dram factor = 1.01 if i == 2 else 1 total_util += buf_util[(i, v)] * factor total_traffic = 0 for v in range(num_vars): # TRICKS if v == 0: # encode dram latency for weights factor = 1.01 else: factor = 1 total_traffic += 0.99 * data_size[v] + 0.99 * spatial_cost[v] + gb_traffic[v] + dram_traffic[v] * factor # ========================== user-defined objective function ========================== # cosa_obj = total_util * util_factor + total_compute * compute_factor + total_traffic * traffic_factor max_it = m.addVar(vtype=GRB.CONTINUOUS, name="max_it") its = [] its.append(m.addVar(vtype=GRB.CONTINUOUS, name="a")) m.addConstr(its[-1] == total_traffic, "total_traffic") its.append(m.addVar(vtype=GRB.CONTINUOUS, name="b")) m.addConstr(its[-1] == total_compute, "total_compute") m.addConstr(max_it == max_(its), name="max_it_constr") total_util_var = m.addVar(vtype=GRB.CONTINUOUS, name="total_util_var") total_comp_var = m.addVar(vtype=GRB.CONTINUOUS, name="total_comp_var") total_traf_var = m.addVar(vtype=GRB.CONTINUOUS, name="total_traf_var") # cycle count = total max 3 * all log factors variables m.addConstr(total_util_var == total_util, "total_util_constraint") m.addConstr(total_comp_var == total_compute, "total_comp_constraint") m.addConstr(total_traf_var == total_traffic, "total_traf_constraint") m.ModelSense = GRB.MINIMIZE m.setObjective(cosa_obj, GRB.MINIMIZE) # optimize for the objective function milp_time = 0 begin_time = time.time() m.optimize() end_time = time.time() milp_runtime = end_time - begin_time # output all constraints and variables m.write("debug.lp") result_dict = {} for variable in m.getVars(): # logging.debug("Variable %s: Value %s" % (variable.varName, variable.x)) assert (variable.varName not in result_dict) result_dict[variable.varName] = variable.x logging.debug('Obj: %g' % m.objVal) all_x = np.zeros((total_levels, perm_levels, 2)) for i in range(total_levels): level_idx = 0 for j, f_j in enumerate(f): for n, f_jn in enumerate(f_j): for k in range(2): name = "X({},{},{},{})".format(i, j, n, k) val = result_dict[name] all_x[i, level_idx, k] = val level_idx += 1 np.set_printoptions(precision=0, suppress=True) var_outer_perm_config = [-1] * perm_levels outer_perm_config = [-1] * perm_levels x_arr = np.zeros((perm_levels, perm_levels, 2)) for i in range(gb_start_level, gb_start_level + perm_levels): level_idx = 0 for j, f_j in enumerate(f): for n, f_jn in enumerate(f_j): for k in range(2): name = "X({},{},{},{})".format(i, j, n, k) val = result_dict[name] x_arr[i - gb_start_level, level_idx, k] = val name = "X({},{},{},{})".format(i, j, n, 1) val = result_dict[name] if val == 1: var_outer_perm_config[i - gb_start_level] = j level_idx += 1 logging.info(f'var_outer_perm_config: {var_outer_perm_config}') y_arr = np.zeros((num_vars, perm_levels)) for v in range(num_vars): for i in range(gb_start_level, gb_start_level + perm_levels): row_sum = 0 val = result_dict["y({},{})".format(v, i)] y_arr[v, i - gb_start_level] = val # Merge the permutation, taking the first appearance of a prob to be the merge_outer_perm_config = [] for i, var in enumerate(var_outer_perm_config): if var != -1 and var not in merge_outer_perm_config: merge_outer_perm_config.append(var) for i in range(len(f)): if i not in merge_outer_perm_config: merge_outer_perm_config.append(i) logging.info("var idx as the value {}".format(var_outer_perm_config)) logging.info("merged var idx as the value {}".format(merge_outer_perm_config)) outer_perm_config = [1] * len(f) for i, var in enumerate(merge_outer_perm_config): outer_perm_config[var] = i logging.info("ordering idx as the value {}".format(outer_perm_config)) # init factor_config # DRAM is the last level factor_config = [] spatial_config = [] dram_level = -1 for j, f_j in enumerate(f): sub_factor_config = [] sub_spatial_config = [] for n, f_jn in enumerate(f_j): sub_factor_config.append(dram_level) sub_spatial_config.append(0) factor_config.append(sub_factor_config) spatial_config.append(sub_spatial_config) for i in range(gb_start_level): for j, f_j in enumerate(f): for n, f_jn in enumerate(f_j): if f[j][n] == 1: factor_config[j][n] = num_mems - 1 spatial_config[j][n] = 0 continue for k in range(2): name = "X({},{},{},{})".format(i, j, n, k) val = result_dict[name] if val >= 0.9: factor_config[j][n] = i if k == 0: spatial_config[j][n] = 1 for i in range(gb_start_level + perm_levels, total_levels): for j, f_j in enumerate(f): for n, f_jn in enumerate(f_j): if f[j][n] == 1: factor_config[j][n] = num_mems - 1 spatial_config[j][n] = 0 continue for k in range(2): name = "X({},{},{},{})".format(i, j, n, k) val = result_dict[name] if val >= 0.9: if k == 0: raise ValueError('Invalid Mapping') factor_config[j][n] = i - perm_levels + 1 # set to -1 for not specified for j, f_j in enumerate(f): for n, f_jn in enumerate(f_j): for i in range(gb_start_level, gb_start_level + perm_levels): for k in range(2): name = "X({},{},{},{})".format(i, j, n, k) val = result_dict[name] if val >= 0.9: factor_config[j][n] = gb_start_level if k == 0: spatial_config[j][n] = 1 logging.info(f"prime factors: {f}") logging.info(f"factor configs: {factor_config}") logging.info(f"spatial configs: {spatial_config}") return (factor_config, spatial_config, outer_perm_config, milp_runtime)
32,447
def get_image_ids(idol_id): """Returns all image ids an idol has.""" c.execute("SELECT id FROM groupmembers.imagelinks WHERE memberid=%s", (idol_id,)) all_ids = {'ids': [current_id[0] for current_id in c.fetchall()]} return all_ids
32,448
def sortarai(datablock, s, Zdiff, **kwargs): """ sorts data block in to first_Z, first_I, etc. Parameters _________ datablock : Pandas DataFrame with Thellier-Tellier type data s : specimen name Zdiff : if True, take difference in Z values instead of vector difference NB: this should always be False **kwargs : version : data model. if not 3, assume data model = 2.5 Returns _______ araiblock : [first_Z, first_I, ptrm_check, ptrm_tail, zptrm_check, GammaChecks] field : lab field (in tesla) """ if 'version' in list(kwargs.keys()) and kwargs['version'] == 3: dec_key, inc_key = 'dir_dec', 'dir_inc' Mkeys = ['magn_moment', 'magn_volume', 'magn_mass', 'magnitude'] meth_key = 'method_codes' temp_key, dc_key = 'treat_temp', 'treat_dc_field' dc_theta_key, dc_phi_key = 'treat_dc_field_theta', 'treat_dc_field_phi' # convert dataframe to list of dictionaries datablock = datablock.to_dict('records') else: dec_key, inc_key = 'measurement_dec', 'measurement_inc' Mkeys = ['measurement_magn_moment', 'measurement_magn_volume', 'measurement_magn_mass', 'measurement_magnitude'] meth_key = 'magic_method_codes' temp_key, dc_key = 'treatment_temp', 'treatment_dc_field' dc_theta_key, dc_phi_key = 'treatment_dc_field_theta', 'treatment_dc_field_phi' first_Z, first_I, zptrm_check, ptrm_check, ptrm_tail = [], [], [], [], [] field, phi, theta = "", "", "" starthere = 0 Treat_I, Treat_Z, Treat_PZ, Treat_PI, Treat_M = [], [], [], [], [] ISteps, ZSteps, PISteps, PZSteps, MSteps = [], [], [], [], [] GammaChecks = [] # comparison of pTRM direction acquired and lab field rec = datablock[0] for key in Mkeys: if key in list(rec.keys()) and rec[key] != "": momkey = key break # first find all the steps for k in range(len(datablock)): rec = datablock[k] temp = float(rec[temp_key]) methcodes = [] tmp = rec[meth_key].split(":") for meth in tmp: methcodes.append(meth.strip()) if 'LT-T-I' in methcodes and 'LP-TRM' not in methcodes and 'LP-PI-TRM' in methcodes: Treat_I.append(temp) ISteps.append(k) if field == "": field = float(rec[dc_key]) if phi == "": phi = float(rec[dc_phi_key]) theta = float(rec[dc_theta_key]) # stick first zero field stuff into first_Z if 'LT-NO' in methcodes: Treat_Z.append(temp) ZSteps.append(k) if 'LT-T-Z' in methcodes: Treat_Z.append(temp) ZSteps.append(k) if 'LT-PTRM-Z' in methcodes: Treat_PZ.append(temp) PZSteps.append(k) if 'LT-PTRM-I' in methcodes: Treat_PI.append(temp) PISteps.append(k) if 'LT-PTRM-MD' in methcodes: Treat_M.append(temp) MSteps.append(k) if 'LT-NO' in methcodes: dec = float(rec[dec_key]) inc = float(rec[inc_key]) st = float(rec[momkey]) first_I.append([273, 0., 0., 0., 1]) first_Z.append([273, dec, inc, st, 1]) # NRM step for temp in Treat_I: # look through infield steps and find matching Z step if temp in Treat_Z: # found a match istep = ISteps[Treat_I.index(temp)] irec = datablock[istep] methcodes = [] tmp = irec[meth_key].split(":") for meth in tmp: methcodes.append(meth.strip()) # take last record as baseline to subtract brec = datablock[istep - 1] zstep = ZSteps[Treat_Z.index(temp)] zrec = datablock[zstep] # sort out first_Z records if "LP-PI-TRM-IZ" in methcodes: ZI = 0 else: ZI = 1 dec = float(zrec[dec_key]) inc = float(zrec[inc_key]) st = float(zrec[momkey]) first_Z.append([temp, dec, inc, st, ZI]) # sort out first_I records try: idec = float(irec[dec_key]) iinc = float(irec[inc_key]) istr = float(irec[momkey]) except TypeError as ex: raise Exception('Malformed data of some sort for dec/inc/moment in measurement: {}. You must fix this before proceeding.\n Bad record: {}'.format(irec.get('measurement', ''), irec)) X = dir2cart([idec, iinc, istr]) BL = dir2cart([dec, inc, st]) I = [] for c in range(3): I.append((X[c] - BL[c])) if I[2] != 0: iDir = cart2dir(I) if Zdiff == 0: first_I.append([temp, iDir[0], iDir[1], iDir[2], ZI]) else: first_I.append([temp, 0., 0., I[2], ZI]) gamma = angle([iDir[0], iDir[1]], [phi, theta]) else: first_I.append([temp, 0., 0., 0., ZI]) gamma = 0.0 # put in Gamma check (infield trm versus lab field) if 180. - gamma < gamma: gamma = 180. - gamma GammaChecks.append([temp - 273., gamma]) for temp in Treat_PI: # look through infield steps and find matching Z step step = PISteps[Treat_PI.index(temp)] rec = datablock[step] dec = float(rec[dec_key]) inc = float(rec[inc_key]) st = float(rec[momkey]) brec = datablock[step - 1] # take last record as baseline to subtract pdec = float(brec[dec_key]) pinc = float(brec[inc_key]) pint = float(brec[momkey]) X = dir2cart([dec, inc, st]) prevX = dir2cart([pdec, pinc, pint]) I = [] for c in range(3): I.append(X[c] - prevX[c]) dir1 = cart2dir(I) if Zdiff == 0: ptrm_check.append([temp, dir1[0], dir1[1], dir1[2]]) else: ptrm_check.append([temp, 0., 0., I[2]]) # in case there are zero-field pTRM checks (not the SIO way) for temp in Treat_PZ: step = PZSteps[Treat_PZ.index(temp)] rec = datablock[step] dec = float(rec[dec_key]) inc = float(rec[inc_key]) st = float(rec[momkey]) brec = datablock[step - 1] pdec = float(brec[dec_key]) pinc = float(brec[inc_key]) pint = float(brec[momkey]) X = dir2cart([dec, inc, st]) prevX = dir2cart([pdec, pinc, pint]) I = [] for c in range(3): I.append(X[c] - prevX[c]) dir2 = cart2dir(I) zptrm_check.append([temp, dir2[0], dir2[1], dir2[2]]) # get pTRM tail checks together - for temp in Treat_M: # tail check step - just do a difference in magnitude! step = MSteps[Treat_M.index(temp)] rec = datablock[step] st = float(rec[momkey]) if temp in Treat_Z: step = ZSteps[Treat_Z.index(temp)] brec = datablock[step] pint = float(brec[momkey]) # X=dir2cart([dec,inc,st]) # prevX=dir2cart([pdec,pinc,pint]) # I=[] # for c in range(3):I.append(X[c]-prevX[c]) # d=cart2dir(I) # ptrm_tail.append([temp,d[0],d[1],d[2]]) # difference - if negative, negative tail! ptrm_tail.append([temp, 0, 0, st - pint]) else: print( s, ' has a tail check with no first zero field step - check input file! for step', temp - 273.) # # final check # if len(first_Z) != len(first_I): print(len(first_Z), len(first_I)) print(" Something wrong with this specimen! Better fix it or delete it ") input(" press return to acknowledge message") araiblock = (first_Z, first_I, ptrm_check, ptrm_tail, zptrm_check, GammaChecks) return araiblock, field
32,449
def GetRemoteBuildPath(build_revision, target_platform='chromium', target_arch='ia32', patch_sha=None): """Compute the url to download the build from.""" def GetGSRootFolderName(target_platform): """Gets Google Cloud Storage root folder names""" if IsWindowsHost(): if Is64BitWindows() and target_arch == 'x64': return 'Win x64 Builder' return 'Win Builder' if IsLinuxHost(): if target_platform == 'android': return 'android_perf_rel' return 'Linux Builder' if IsMacHost(): return 'Mac Builder' raise NotImplementedError('Unsupported Platform "%s".' % sys.platform) base_filename = GetZipFileName( build_revision, target_arch, patch_sha) builder_folder = GetGSRootFolderName(target_platform) return '%s/%s' % (builder_folder, base_filename)
32,450
def _extract_then_dump(hex_string: str) -> str: """Extract compressed content json serialized list of paragraphs.""" return json.dumps( universal_extract_paragraphs( unpack(bytes.fromhex(hex_string)) ) )
32,451
def sso_redirect_url(nonce, secret, email, external_id, username, name, avatar_url, is_admin , **kwargs): """ nonce: returned by sso_validate() secret: the secret key you entered into Discourse sso secret user_email: email address of the user who logged in user_id: the internal id of the logged in user user_username: username of the logged in user return value: URL to redirect users back to discourse, now logged in as user_username """ kwargs.update({ 'nonce': nonce, 'email': email, 'external_id': external_id, 'username': username, 'name' : name, 'avatar_url' : avatar_url, 'avatar_force_update' : 'true', 'admin':is_admin }) uencode = urlencode(kwargs) return_payload = base64.encodestring(uencode.encode()) h = hmac.new(secret.encode(), return_payload, digestmod=hashlib.sha256) query_string = urlencode({'sso': return_payload, 'sig': h.hexdigest()}) return '/session/sso_login?%s' % query_string
32,452
def normalized_cluster_entropy(cluster_labels, n_clusters=None): """ Cluster entropy normalized by the log of the number of clusters. Args: cluster_labels (list/np.ndarray): Cluster labels Returns: float: Shannon entropy / log(n_clusters) """ if n_clusters is None: n_clusters = len(np.unique(cluster_labels)) counts = np.unique(cluster_labels, return_counts=True)[1] return entropy(counts) / np.log(n_clusters)
32,453
def step_impl(context): """ :type context: behave.runner.Context """ WebDriverWait(context.driver, 10).until( EC.title_contains("Signup")) current_page_title = context.driver.title assert_that(current_page_title, contains_string("Signup"))
32,454
def ingest_data(data, schema=None, date_format=None, field_aliases=None): """ data: Array of Dictionary objects schema: PyArrow schema object or list of column names date_format: Pandas datetime format string (with schema only) field_aliases: dict mapping Json field names to desired schema names return: a PyArrow Batch """ if isinstance(schema, list) and isinstance(field_aliases, dict): return _convert_data_with_column_names_dict(data, field_aliases) elif isinstance(schema, dict): return _convert_data_with_column_names_dict(data, schema) elif isinstance(schema, list): return _convert_data_with_column_names(data, schema) elif isinstance(schema, pa.Schema): return _convert_data_with_schema(data, schema, date_format=date_format, field_aliases=field_aliases) else: return _convert_data_without_schema(data)
32,455
def build_messages(missing_scene_paths, update_stac): """ """ message_list = [] error_list = [] for path in missing_scene_paths: landsat_product_id = str(path.strip("/").split("/")[-1]) if not landsat_product_id: error_list.append( f"It was not possible to build product ID from path {path}" ) message_list.append( { "Message": { "landsat_product_id": landsat_product_id, "s3_location": str(path), "update_stac": update_stac, } } ) return {"message_list": message_list, "failed": error_list}
32,456
def DecrementPatchNumber(version_num, num): """Helper function for `GetLatestVersionURI`. DecrementPatchNumber('68.0.3440.70', 6) => '68.0.3440.64' Args: version_num(string): version number to be decremented num(int): the amount that the patch number need to be reduced Returns: string: decremented version number """ version_num_list = version_num.split('.') version_num_list[-1] = str(int(version_num_list[-1]) - num) assert int(version_num_list[-1]) >= 0, 'patch number cannot be negative' return '.'.join(version_num_list)
32,457
def hi_means(steps, edges): """This applies kmeans in a hierarchical fashion. :param edges: :param steps: :returns: a tuple of two arrays, ´´kmeans_history´´ containing a number of arrays of varying lengths and ´´labels_history´´, an array of length equal to edges.shape[0] """ sub_edges = edges kmeans_history = [] labels_history = [] for _ in xrange(steps): kmeans = nkm.kmeans(sub_edges.shape[0] / 2, sub_edges) sub_edges = kmeans[0] kmeans_history += [kmeans[0]] labels_history += [kmeans[1]] kmeans_history = np.array(kmeans_history) labels_history = np.array(labels_history) return kmeans_history, labels_history
32,458
def tag_item(tag_name, link_flag=False): """ Returns Items tagged with tag_name ie. tag-name: django will return items tagged django. """ print C3 % ("\n_TAGGED RESULTS_") PAYLOAD["tag"] = tag_name res = requests.post( GET_URL, data=json.dumps(PAYLOAD), headers=HEADERS, verify=False) if res.json()['status'] == 2: print C3 % ("Invalid tag: Tag not found!") exit() return render(res.json()['list'], link_flag=link_flag)
32,459
def movie_info(tmdb_id): """Renders salient movie data from external API.""" # Get movie info TMDB database. print("Fetching movie info based on tmdb id...") result = TmdbMovie.get_movie_info_by_id(tmdb_id) # TMDB request failed. if not result['success']: print("Error!") # Can't find movie referenced by id. if result['status_code'] == 404: abort(404) else: # Some other error, e.g. 429: too many request. err_message = f"TMDB API query failed; HTTP response = {result['status_code']}" return render_template("errors/misc-error.html", err_message=err_message) # Collect movie object. movie = result['movie'] # To check a user's personal movie list, user must be logged in. # Also, limiting the fetching of NYT movie reviews to authenticated users. # This will speed up display of movie info for anonymous users as NYT review # fetching requires time delays between API requests. # See whether movie is already on user's list. on_user_list, film_list_item_id = False, None # Get search-engine queries for movie. search_engines = { 'Google': movie.get_query('google'), 'DuckDuckGo': movie.get_query('duckduckgo') } if current_user.is_authenticated: # CHECK PERSONAL MOVIE LIST!!! print(f"Checking whether '{movie.title}' on user list...") film = FilmListItem.query.filter_by(tmdb_id=tmdb_id, user_id=current_user.id).first() if film: on_user_list = True film_list_item_id = film.id # on_user_list = True if film else False print(f"On user list? {on_user_list}, id: {film_list_item_id}") return render_template("movie.html", movie=movie, on_user_list=on_user_list, search_engines=search_engines)
32,460
def diabetic(y, t, ui, dhat): """ Expanded Bergman Minimal model to include meals and insulin Parameters for an insulin dependent type-I diabetic States (6): In non-diabetic patients, the body maintains the blood glucose level at a range between about 3.6 and 5.8 mmol/L (64.8 and 104.4 mg/dL with 1:18 conversion between mmol/L and mg/dL) :param y: input state :param t: time step :param ui: Insulin infusion rate (mU/min) :param dhat: Meal disturbance (mmol/L-min) :return: change in states """ g = y[0] # blood glucose (mg/dL) x = y[1] # remote insulin (micro-u/ml) i = y[2] # plasma insulin (micro-u/ml) q1 = y[3] # S1 q2 = y[4] # S2 g_gut = y[5] # gut blood glucose (mg/dl) # Parameters: gb = 291.0 # (mg/dL) Basal Blood Glucose p1 = 3.17e-2 # 1/min p2 = 1.23e-2 # 1/min si = 2.9e-2 # 1/min * (mL/micro-U) ke = 9.0e-2 # 1/min Insulin elimination from plasma kabs = 1.2e-2 # 1/min t max,G inverse kemp = 1.8e-1 # 1/min t max,I inverse f = 8.00e-1 # L vi = 12.0 # L Insulin distribution volume vg = 12.0 # L Glucose distibution volume # Compute ydot: dydt = np.empty(6) dydt[0] = -p1 * (g - gb) - si * x * g + f * kabs / vg * g_gut + f / vg * dhat # (1) dydt[1] = p2 * (i - x) # remote insulin compartment dynamics (2) dydt[2] = -ke * i + ui # plasma insulin concentration (3) dydt[3] = ui - kemp * q1 # two-part insulin absorption model dS1/dt dydt[4] = -kemp * (q2 - q1) # two-part insulin absorption model dS2/dt dydt[5] = kemp * q2 - kabs * g_gut # convert from minutes to hours dydt = dydt * 60 return dydt
32,461
def get_spacy_sentences(doc_text): """ Split given document into its sentences :param doc_text: Text to tokenize :return: list of spacy sentences """ doc = _get_spacy_nlp()(doc_text) return list(doc.sents)
32,462
def get_recommendations(commands_fields, app_pending_changes): """ :param commands_fields: :param app_pending_changes: :return: List of object describing command to run >>> cmd_fields = [ ... ['cmd1', ['f1', 'f2']], ... ['cmd2', ['prop']], ... ] >>> app_fields = { ... 'f2': {'field': 'f2', 'user': 'api', 'updated': '00:00'} ... } >>> from pprint import pprint >>> pprint(get_recommendations(cmd_fields, app_fields)) [{'command': 'cmd1', 'field': 'f2', 'updated': '00:00', 'user': 'api'}] """ recommended_cmds = [] for cmd in commands_fields: cmd_name = cmd[0] cmd_fields = cmd[1] for field in cmd_fields: if field in app_pending_changes.keys(): recommended_cmds.append({ 'command': cmd_name, 'field': field, 'user': app_pending_changes[field]['user'], 'updated': app_pending_changes[field]['updated'], }) break return recommended_cmds
32,463
def file_based_convert_examples_to_features(examples, label_list, max_seq_length, tokenizer, output_file): """Convert a set of `InputExample`s to a TFRecord file.""" def create_int_feature(values): return tf.train.Feature(int64_list=tf.train.Int64List(value=list(values))) writer = tf.python_io.TFRecordWriter(output_file) for (ex_index, example) in enumerate(examples): if ex_index % 10000 == 0: tf.logging.info("Writing example %d of %d" % (ex_index, len(examples))) feature = convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer) features = collections.OrderedDict() features["input_ids"] = create_int_feature(feature.input_ids) features["input_mask"] = create_int_feature(feature.input_mask) features["segment_ids"] = create_int_feature(feature.segment_ids) features["label_ids"] = create_int_feature([feature.label_id]) tf_example = tf.train.Example(features=tf.train.Features(feature=features)) writer.write(tf_example.SerializeToString()) writer.close()
32,464
def main(): """Digital bit output example.""" daq_device = None dio_device = None port_to_write = None port_info = None interface_type = InterfaceType.ANY port_types_index = 0 try: # Get descriptors for all of the available DAQ devices. devices = get_daq_device_inventory(interface_type) number_of_devices = len(devices) if number_of_devices == 0: raise RuntimeError('Error: No DAQ devices found') print('Found', number_of_devices, 'DAQ device(s):') for i in range(number_of_devices): print(' [', i, '] ', devices[i].product_name, ' (', devices[i].unique_id, ')', sep='') descriptor_index = input('\nPlease select a DAQ device, enter a number' + ' between 0 and ' + str(number_of_devices - 1) + ': ') descriptor_index = int(descriptor_index) if descriptor_index not in range(number_of_devices): raise RuntimeError('Error: Invalid descriptor index') # Create the DAQ device from the descriptor at the specified index. daq_device = DaqDevice(devices[descriptor_index]) # Get the DioDevice object and verify that it is valid. dio_device = daq_device.get_dio_device() if dio_device is None: raise RuntimeError('Error: The device does not support digital ' 'output') # Establish a connection to the DAQ device. descriptor = daq_device.get_descriptor() print('\nConnecting to', descriptor.dev_string, '- please wait...') # For Ethernet devices using a connection_code other than the default # value of zero, change the line below to enter the desired code. daq_device.connect(connection_code=0) # Get the port types for the device(AUXPORT, FIRSTPORTA, ...) dio_info = dio_device.get_info() port_types = dio_info.get_port_types() if port_types_index >= len(port_types): port_types_index = len(port_types) - 1 port_to_write = port_types[port_types_index] # Get the port I/O type and the number of bits for the first port. port_info = dio_info.get_port_info(port_to_write) # If the port is bit configurable, then configure the individual bits # for output; otherwise, configure the entire port for output. if port_info.port_io_type == DigitalPortIoType.BITIO: # Configure all of the bits for output for the first port. for bit_number in range(port_info.number_of_bits): dio_device.d_config_bit(port_to_write, bit_number, DigitalDirection.OUTPUT) elif port_info.port_io_type == DigitalPortIoType.IO: # Configure the entire port for output. dio_device.d_config_port(port_to_write, DigitalDirection.OUTPUT) max_port_value = pow(2.0, port_info.number_of_bits) - 1 print('\n', descriptor.dev_string, ' ready', sep='') print(' Function demonstrated: dio_device.d_bit_out()') print(' Port: ', port_to_write.name) print(' Port I/O type: ', port_info.port_io_type.name) print(' Bits: ', port_info.number_of_bits) try: input('\nHit ENTER to continue\n') except (NameError, SyntaxError): pass system('clear') try: while True: try: reset_cursor() print('Active DAQ device: ', descriptor.dev_string, ' (', descriptor.unique_id, ')\n', sep='') clear_eol() data = input('Enter a value between 0 and ' + '{:.0f}'.format(max_port_value) + ' (or non-numeric character to exit): ') for bit_number in range(port_info.number_of_bits): bit_value = (int(data) >> bit_number) & 1 dio_device.d_bit_out(port_to_write, bit_number, bit_value) print('Bit(', bit_number, ') Data: ', bit_value) sleep(0.1) except (ValueError, NameError, SyntaxError): break except KeyboardInterrupt: pass except RuntimeError as error: print('\n', error) finally: if daq_device: if dio_device and port_to_write and port_info: # before disconnecting, set the port back to input if (port_info.port_io_type == DigitalPortIoType.IO or port_info.port_io_type == DigitalPortIoType.BITIO): dio_device.d_config_port(port_to_write, DigitalDirection.INPUT) if daq_device.is_connected(): daq_device.disconnect() daq_device.release()
32,465
def hello(): """Return a friendly HTTP greeting.""" return 'Hello World!!!'
32,466
def test_land_initialisation(): """ For all colors test that lands have given color and enter untapped. """ for color in ertai.colors: card = ertai.BasicLand(color=color) assert card.color == color assert card.tapped is False
32,467
def merge_day_month_year(data, day, month, year): """This method is not implemented yet.""" pass
32,468
def _findStress( syllables: Union[List[Syllable], List[List[str]]] ) -> Tuple[List[int], List[int]]: """Find the syllable and phone indicies for stress annotations""" tmpSyllables = [_toSyllable(syllable) for syllable in syllables] stressedSyllables: List[int] = [] stressedPhones: List[int] = [] for syllableI, syllable in enumerate(tmpSyllables): for phoneI, phone in enumerate(syllable.phonemes): if "ˈ" in phone: stressedSyllables.insert(0, syllableI) stressedPhones.insert(0, phoneI) break if "ˌ" in phone: stressedSyllables.append(syllableI) stressedPhones.append(phoneI) return stressedSyllables, stressedPhones
32,469
def test_discogs_ok_wantlist(mocker): """test_discogs_ok_default Test if Discogs can obtain a wantlist. Args: mocker (pytest_mock.plugin.MockerFixture): Mocker. """ response_0 = { 'id': 1234567, 'username': 'dummy', 'resource_url': 'https://api.discogs.com/users/dummy', 'consumer_name': 'dummy'} response_1 = { 'pagination': { 'page': 1, 'pages': 1, 'per_page': 100, 'items': 1, 'urls': {}}, 'wants': [{ 'id': 3099920, 'resource_url': 'https://api.discogs.com/users/dummy/wants/3099920', 'rating': 0, 'date_added': '2018-09-25T02:12:31-07:00', 'basic_information': { 'id': 3099920, 'master_id': 42496, 'master_url': 'https://api.discogs.com/masters/42496', 'resource_url': 'https://api.discogs.com/releases/3099920', 'title': 'Lunar Womb', 'year': 2006, 'formats': [{ 'name': 'Vinyl', 'qty': '1', 'text': 'Red', 'descriptions': [ 'LP', 'Album', 'Limited Edition', 'Reissue', 'Remastered']}], 'labels': [{ 'name': '20 Buck Spin', 'catno': 'spin:004', 'entity_type': '1', 'entity_type_name': 'Label', 'id': 42593, 'resource_url': 'https://api.discogs.com/labels/42593'}], 'artists': [{ 'name': 'The Obsessed', 'anv': '', 'join': '', 'role': '', 'tracks': '', 'id': 311946, 'resource_url': 'https://api.discogs.com/artists/311946'}], 'thumb': 'https://img.discogs.com/dummy.jpg', 'cover_image': 'https://img.discogs.com/dummy.jpg', 'genres': ['Rock'], 'styles': ['Stoner Rock', 'Doom Metal', 'Heavy Metal']}, 'notes': ''}]} response_2 = { 'id': 3099920, 'status': 'Accepted', 'year': 2006, 'resource_url': 'https://api.discogs.com/releases/3099920', 'uri': 'https://www.discogs.com/The-Obsessed-Lunar-Womb/release/3099920', 'artists': [], 'artists_sort': 'Obsessed, The', 'labels': [], 'series': [], 'companies': [], 'formats': [], 'data_quality': 'Needs Vote', 'community': { 'have': 73, 'want': 120, 'rating': {'count': 13, 'average': 4.46}, 'submitter': { 'username': 'dummy', 'resource_url': 'https://api.discogs.com/users/dummy'}, 'contributors': [], 'data_quality': 'Needs Vote', 'status': 'Accepted'}, 'format_quantity': 1, 'date_added': '2011-09-11T03:51:00-07:00', 'date_changed': '2014-12-05T09:07:02-08:00', 'num_for_sale': 1, 'lowest_price': 60, 'master_id': 42496, 'master_url': 'https://api.discogs.com/masters/42496', 'title': 'Lunar Womb', 'country': 'US', 'released': '2006', 'notes': 'Pressing Info:\r\n\r\n1000 Black\r\n300 Red\r\n', 'released_formatted': '2006', 'identifiers': [], 'videos': [], 'genres': ['Rock'], 'styles': ['Stoner Rock', 'Doom Metal', 'Heavy Metal'], 'tracklist': [], 'extraartists': [], 'images': [], 'thumb': '', 'estimated_weight': 230} response_3 = { 'Mint (M)': {'currency': 'EUR', 'value': 27.31}, 'Near Mint (NM or M-)': {'currency': 'EUR', 'value': 24.44}, 'Very Good Plus (VG+)': {'currency': 'EUR', 'value': 18.69}, 'Very Good (VG)': {'currency': 'EUR', 'value': 12.94}, 'Good Plus (G+)': {'currency': 'EUR', 'value': 7.19}, 'Good (G)': {'currency': 'EUR', 'value': 4.31}, 'Fair (F)': {'currency': 'EUR', 'value': 2.87}, 'Poor (P)': {'currency': 'EUR', 'value': 1.44}} result = { 'username': 'dummy', 'wantlist': { 'The Obsessed': { 3099920: { 'album': 'Lunar Womb', 'year': 2006, 'id': 3099920, 'instance_id': 3099920, 'format': 'VINYL', 'quantity': '1', 'catno': 'SPIN:004', 'styles': 'Stoner Rock, Doom Metal, Heavy Metal', 'url': 'https://api.discogs.com/releases/3099920', 'have': 73, 'want': 120, 'uri': 'https://www.discogs.com/The-Obsessed-Lunar-Womb/release/3099920', 'notes': 'Pressing Info:\r\n\r\n1000 Black\r\n300 Red\r\n', 'num_for_sale': 1, 'lowest_price': '60.00', 'prices': { 'mint': '27.31', 'near_mint': '24.44', 'very_good_plus': '18.69', 'very_good': '12.94', 'good_plus': '7.19', 'good': '4.31', 'fair': '2.87', 'poor': '1.44'}}}}} # mocker.patch( # 'discogs2xlsx.discogs.Discogs._Discogs__request', # side_effect=[ # response_0, response_1, response_1, response_2, response_3]) mocker.patch( 'discogs2xlsx.discogs.Discogs.DiscogsRequests.request', side_effect=[ response_0, response_1, response_1, response_2, response_3]) l = Logger(level=Logger.Level.NONE) d = Discogs(key='dummy', logger=l) w = d.get_wantlist(details=True, prices=True) assert isinstance(d, Discogs) assert w == result
32,470
def add_flair_specific_reply(replyable, response_info): """Method to add a author's flair specific reply to the comment/submission. :param replyable: The comment/submission on reddit :param response_info: ResponseInfo containing hero_id and link for response :return: None """ create_and_add_reply(replyable=replyable, response_url=response_info.link, hero_id=response_info.hero_id)
32,471
def test_import_validation_fail(tmp_trestle_dir: pathlib.Path) -> None: """Validation failed.""" # catalog data dup_cat = { 'uuid': '525f94af-8007-4376-8069-aa40179e0f6e', 'metadata': { 'title': 'Generic catalog created by trestle.', 'last-modified': '2020-12-11T02:04:51.053+00:00', 'version': '0.0.0', 'oscal-version': oscal.OSCAL_VERSION }, 'back-matter': { 'resources': [ { 'uuid': 'b1101385-9e36-44a3-ba03-98b6ebe0a367' }, { 'uuid': 'b1101385-9e36-44a3-ba03-98b6ebe0a367' } ] } } catalog_data = parser.parse_dict(dup_cat, 'trestle.oscal.catalog.Catalog') repo = Repository(tmp_trestle_dir) with pytest.raises(TrestleError, match=r'Validation .* did not pass'): repo.import_model(catalog_data, 'imported')
32,472
def gen_dummy_object(class_title, doc): """ Create a dummy object based on the definitions in the API Doc. :param class_title: Title of the class whose object is being created. :param doc: ApiDoc. :return: A dummy object of class `class_title`. """ object_ = { "@type": class_title } expanded_base_url = DocUrl.doc_url for class_path in doc.collections: if class_title == doc.collections[class_path]["collection"].name: members = list() manages_class_titles = list() collection_manages = doc.collections[class_title]["collection"].manages if type(collection_manages) is dict: # only one manages block manages_class = collection_manages['object'].split(expanded_base_url)[1] manages_class_titles.append(manages_class) elif type(collection_manages) is list: # multiple manages block for manages_block in collection_manages: manages_class = collection_manages['object'].split(expanded_base_url)[1] manages_class_titles.append(manages_class) for _ in range(3): member_class = random.choice(manages_class_titles) member = gen_dummy_object(member_class, doc) member_id = crud.insert(object_=member, session=get_session(), collection=False) from hydrus.data.helpers import get_path_from_type member_class_path = get_path_from_type(member_class) member_api_path = f'/{get_api_name()}/{member_class_path}/{member_id}' members.append({ "@id": member_api_path, "@type": member_class, }) object_['members'] = members return object_ for class_path in doc.parsed_classes: if class_title == doc.parsed_classes[class_path]["class"].title: for prop in doc.parsed_classes[class_path]["class"].supportedProperty: if prop.write is False: continue if isinstance(prop.prop, HydraLink): object_[prop.title] = ''.join(random.choice( string.ascii_uppercase + string.digits) for _ in range(6)) pass elif expanded_base_url in prop.prop: prop_class = prop.prop.split(expanded_base_url)[1] object_[prop.title] = gen_dummy_object(prop_class, doc) else: type_ = prop.kwargs.get('range') if type_ is not None: object_[prop.title] = random.randint(50,100) else: object_[prop.title] = ''.join(random.choice( string.ascii_uppercase + string.digits) for _ in range(6)) return object_
32,473
def gather_info(arguments) -> Info: """Gather info.""" if arguments.integration: info = {"domain": arguments.integration} elif arguments.develop: print("Running in developer mode. Automatically filling in info.") print() info = {"domain": "develop"} else: info = _gather_info( { "domain": { "prompt": "What is the domain?", "validators": [ CHECK_EMPTY, [ "Domains cannot contain spaces or special characters.", lambda value: value == slugify(value), ], ], } } ) info["is_new"] = not (COMPONENT_DIR / info["domain"] / "manifest.json").exists() if not info["is_new"]: return _load_existing_integration(info["domain"]) if arguments.develop: info.update( { "name": "Develop Hub", "codeowner": "@developer", "requirement": "aiodevelop==1.2.3", "oauth2": True, } ) else: info.update(gather_new_integration(arguments.template == "integration")) return Info(**info)
32,474
def get_git_version(): """ Get the version from git. """ return subprocess.check_output('git describe --tags'.split()).strip()
32,475
def test_gamma_map_vol_sphere(): """Gamma MAP with a sphere forward and volumic source space.""" evoked = read_evokeds(fname_evoked, condition=0, baseline=(None, 0), proj=False) evoked.resample(50, npad=100) evoked.crop(tmin=0.1, tmax=0.16) # crop to window around peak cov = read_cov(fname_cov) cov = regularize(cov, evoked.info, rank=None) info = evoked.info sphere = mne.make_sphere_model(r0=(0., 0., 0.), head_radius=0.080) src = mne.setup_volume_source_space(subject=None, pos=30., mri=None, sphere=(0.0, 0.0, 0.0, 0.08), bem=None, mindist=5.0, exclude=2.0, sphere_units='m') fwd = mne.make_forward_solution(info, trans=None, src=src, bem=sphere, eeg=False, meg=True) alpha = 0.5 pytest.raises(ValueError, gamma_map, evoked, fwd, cov, alpha, loose=0, return_residual=False) pytest.raises(ValueError, gamma_map, evoked, fwd, cov, alpha, loose=0.2, return_residual=False) stc = gamma_map(evoked, fwd, cov, alpha, tol=1e-4, xyz_same_gamma=False, update_mode=2, return_residual=False) assert_array_almost_equal(stc.times, evoked.times, 5) # Compare orientation obtained using fit_dipole and gamma_map # for a simulated evoked containing a single dipole stc = mne.VolSourceEstimate(50e-9 * np.random.RandomState(42).randn(1, 4), vertices=stc.vertices[:1], tmin=stc.tmin, tstep=stc.tstep) evoked_dip = mne.simulation.simulate_evoked(fwd, stc, info, cov, nave=1e9, use_cps=True) dip_gmap = gamma_map(evoked_dip, fwd, cov, 0.1, return_as_dipoles=True) amp_max = [np.max(d.amplitude) for d in dip_gmap] dip_gmap = dip_gmap[np.argmax(amp_max)] assert (dip_gmap[0].pos[0] in src[0]['rr'][stc.vertices]) dip_fit = mne.fit_dipole(evoked_dip, cov, sphere)[0] assert (np.abs(np.dot(dip_fit.ori[0], dip_gmap.ori[0])) > 0.99)
32,476
def get_mem_access_map(program, numpy_types=True, count_redundant_work=False, subgroup_size=None): """Count the number of memory accesses in a loopy kernel. :arg knl: A :class:`loopy.LoopKernel` whose memory accesses are to be counted. :arg numpy_types: A :class:`bool` specifying whether the types in the returned mapping should be numpy types instead of :class:`loopy.LoopyType`. :arg count_redundant_work: Based on usage of hardware axes or other specifics, a kernel may perform work redundantly. This :class:`bool` flag indicates whether this work should be included in the count. (Likely desirable for performance modeling, but undesirable for code optimization.) :arg subgroup_size: An :class:`int`, :class:`str` ``'guess'``, or *None* that specifies the sub-group size. An OpenCL sub-group is an implementation-dependent grouping of work-items within a work-group, analagous to an NVIDIA CUDA warp. subgroup_size is used, e.g., when counting a :class:`MemAccess` whose count_granularity specifies that it should only be counted once per sub-group. If set to *None* an attempt to find the sub-group size using the device will be made, if this fails an error will be raised. If a :class:`str` ``'guess'`` is passed as the subgroup_size, get_mem_access_map will attempt to find the sub-group size using the device and, if unsuccessful, will make a wild guess. :return: A :class:`ToCountMap` of **{** :class:`MemAccess` **:** :class:`islpy.PwQPolynomial` **}**. - The :class:`MemAccess` specifies the characteristics of the memory access. - The :class:`islpy.PwQPolynomial` holds the number of memory accesses with the characteristics specified in the key (in terms of the :class:`loopy.LoopKernel` *inames*). Example usage:: # (first create loopy kernel and specify array data types) params = {'n': 512, 'm': 256, 'l': 128} mem_map = get_mem_access_map(knl) f32_s1_g_ld_a = mem_map[MemAccess( mtype='global', dtype=np.float32, lid_strides={0: 1}, gid_strides={0: 256}, direction='load', variable='a', count_granularity=CountGranularity.WORKITEM) ].eval_with_dict(params) f32_s1_g_st_a = mem_map[MemAccess( mtype='global', dtype=np.float32, lid_strides={0: 1}, gid_strides={0: 256}, direction='store', variable='a', count_granularity=CountGranularity.WORKITEM) ].eval_with_dict(params) f32_s1_l_ld_x = mem_map[MemAccess( mtype='local', dtype=np.float32, lid_strides={0: 1}, gid_strides={0: 256}, direction='load', variable='x', count_granularity=CountGranularity.WORKITEM) ].eval_with_dict(params) f32_s1_l_st_x = mem_map[MemAccess( mtype='local', dtype=np.float32, lid_strides={0: 1}, gid_strides={0: 256}, direction='store', variable='x', count_granularity=CountGranularity.WORKITEM) ].eval_with_dict(params) # (now use these counts to, e.g., predict performance) """ from loopy.preprocess import preprocess_program, infer_unknown_types program = infer_unknown_types(program, expect_completion=True) program = preprocess_program(program) access_map = ToCountMap() callables_count = program.callables_table.callables_count for func_id, in_knl_callable in program.callables_table.items(): if isinstance(in_knl_callable, CallableKernel): knl = in_knl_callable.subkernel knl_access_map = get_mem_access_map_for_single_kernel(knl, program.callables_table, numpy_types, count_redundant_work, subgroup_size) # FIXME: didn't see any easy way to multiply for i in range(callables_count[func_id]): access_map += knl_access_map elif isinstance(in_knl_callable, ScalarCallable): pass else: raise NotImplementedError("Unknown callabke types %s." % ( type(in_knl_callable).__name__)) return access_map
32,477
def __is_geotagging_input(question_input, _): """Validates the specified geotagging input configuration. A geotagging input configuration contains the following optional fields: - location: a string that specifies the input's initial location. Args: question_input (dict): An input configuration to validate. Returns: <bool, str|None>: A pair containing the value True if the specified configuration is valid, False otherwise; as well as an error message in case it is invalid. """ location = question_input.get("location") if location is not None: message = "A geotagging input's 'location' field must be a non-empty string." try: if is_empty_string(location): return (False, message) except TypeError: return (False, message) return (True, None)
32,478
def _be_num_input(num_type, than, func=_ee_num_input, text='', error_text="Enter number great or equal than ", error_text_format_bool=True, error_text_format="Enter number great or equal than {}", pause=True, pause_text_bool=True, pause_text='Press Enter...', clear=True, error_text_input="Enter number!", pause_input=True, pause_input_text_bool=True, pause_input_text=True, clear_input=True, error_text_bool=True, error_text_input_bool=True, sleep_bool=True, sleep_time=1, sleep_text_bool=True, sleep_format_text_bool=True, sleep_text="Sleeping time", sleep_format_text="Sleep for {} seconds!", sleep_bool_input=True, sleep_time_input=1, sleep_text_bool_input=True, sleep_format_text_bool_input=True, sleep_text_input="Sleeping time", sleep_format_text_input="Sleep for {} seconds!"): """ :param func: function that instantly returned :param error_text_format_bool: bool to show formatted error text or not :param error_text_format: formatted error text :param error_text_input_bool: bool to show error text or not in input error case :param num_type: type to input :param text: text that shows in input default= '' :param error_text_bool: bool to show error text or not :param error_text: text that show in case of error bool is true :param pause: bool to pause for a while :param pause_text_bool: bool to show text op pause :param pause_text: text show on pause :param clear: bool to clear cmd :param than: number that input number must be great or equal :param error_text_input: error_text_bool but in input :param pause_input: pause but in input :param pause_input_text_bool: pause_text_bool but in input :param pause_input_text: pause_text but in input :param clear_input: bool to clear cmd in input but in input :param sleep_format_text: formatted sleep text :param sleep_text: sleep text :param sleep_format_text_bool: if set True that show sleep_format_text else show sleep text :param sleep_time: time to sleep :param sleep_text_bool: if set True show text on sleeping :param sleep_bool: if True sleep program for a sleep_time :param sleep_format_text_input: formatted sleep text :param sleep_text_input: sleep text :param sleep_format_text_bool_input: if set True that show sleep_format_text else show sleep text :param sleep_time_input: time to sleep :param sleep_text_bool_input: if set True show text on sleeping :param sleep_bool_input: if True sleep program for a sleep_time :return: number """ return func(num_type=num_type, eq='<=<=', than=than, text=text, error_text=error_text, error_text_format_bool=error_text_format_bool, error_text_format=error_text_format, pause=pause, pause_text_bool=pause_text_bool, pause_text=pause_text, clear=clear, error_text_input=error_text_input, pause_input=pause_input, pause_input_text_bool=pause_input_text_bool, pause_input_text=pause_input_text, clear_input=clear_input, error_text_bool=error_text_bool, error_text_input_bool=error_text_input_bool, sleep_bool_input=sleep_bool_input, sleep_time_input=sleep_time_input, sleep_text_bool_input=sleep_text_bool_input, sleep_format_text_bool_input=sleep_format_text_bool_input, sleep_text_input=sleep_text_input, sleep_format_text_input=sleep_format_text_input, sleep_bool=sleep_bool, sleep_time=sleep_time, sleep_text_bool=sleep_text_bool, sleep_format_text_bool=sleep_format_text_bool, sleep_text=sleep_text, sleep_format_text=sleep_format_text)
32,479
def createParetoFig(_pareto_df,_bestPick): """ Initalize figure and axes objects using pyplot for pareto curve Parameters ---------- _pareto_df : Pandas DataFrame DataFrame from Yahoo_fin that contains all the relevant options data _bestPick : Pandas Series Option data for the best pick given the user input settings Returns ------- pareto_fig : matplotlib figure object figure used to plot the stockPareto data from the _pareto_df input pareto_ax : matplotlib axes object axes object that holds the stockPareto data from _pareto_df input plotted using pandas integrated matplotlib .plot function """ pareto_fig = Figure(figsize=(6,6), dpi=100) pareto_ax = pareto_fig.add_subplot(111) pareto_ax.set_title('Pareto Curve of Available Options in DOW JONES Index') _pareto_df.plot.scatter(x='POP',y='Potential Gain Multiple Contracts', ax = pareto_ax) pareto_ax.set_xlabel('Probability of Profit (%)') pareto_ax.set_ylabel('Potential Gain ($)') # ax = finalFrame.plot(kind = 'scatter', x='POP',y='Potential Gain Multiple Contracts') pareto_ax.axvline(_bestPick['POP'], color='green', ls='--') pareto_ax.axhline(_bestPick['Potential Gain Multiple Contracts'], color='green', ls='--') return pareto_fig, pareto_ax
32,480
def _try_to_add_secondary_path(context: DependencyContext, dependency: Dependency, key: str, name: str, path_set: DependencyPathSet, signatures: Optional[Dict[str, str]] = None): """ A function that attempts to load a secondary path (sources or javadoc) and, if successful, adds them to the given path set. :param context: the current dependency context in play. :param dependency: the dependency we are to resolve. :param key: the key by which the secondary path will be known. :param name: the name of the secondary path. :param path_set: the path set to add a successfully isolated path to. :param signatures: the set of signatures to verify against (if any). """ path = context.to_local_path(dependency, name, signatures) if path: path_set.add_secondary_path(key, path)
32,481
def Plot_SNR(var_x,sample_x,var_y,sample_y,SNRMatrix, fig=None,ax=None, display=True,dl_axis=False,lb_axis=False, smooth_contours=False, cfill=True, display_cbar=True,x_axis_label=True,y_axis_label=True, logLevels_min=-1.0,logLevels_max=0.0, hspace=0.15,wspace=0.1, contour_kwargs={},contourf_kwargs={}, xticklabels_kwargs={},xlabels_kwargs={}, yticklabels_kwargs={},ylabels_kwargs={}): """Plots the SNR contours from calcSNR Parameters ---------- fig : object matplotlib figure object on which to collate the individual plots ax : object matplotlib axes object on which to plot the individual plot var_x : str x-axis variable sample_x : array samples at which SNRMatrix was calculated corresponding to the x-axis variable var_y : str y-axis variable sample_y : array samples at which SNRMatrix was calculated corresponding to the y-axis variable SNRMatrix : array-like the matrix at which the SNR was calculated corresponding to the particular x and y-axis variable choices display : bool, optional Option to turn off display if saving multiple plots to a file dl_axis : bool, optional Option to turn on the right hand side labels of luminosity distance smooth_contours : bool, optional Option to interpolate contours to a finer mesh size to appear smooth instead of tiered contours cfill : bool, optional Option to use filled contours or not, default is True display_cbar : bool, optional Option to display the colorbar on the axes object x_axis_label : bool, optional Option to display the x axis label y_axis_label : bool, optional Option to display the y axis label logLevels_min : float, optional Sets the minimum log level of the colorbar, default is -1.0 logLevels_max : float, optional Sets the maximum log level of the colorbar, default is 0.0, which sets the maximum to the log maximum value of SNRMatrix hspace : float, optional Sets the horizontal space between axes objects, default is 0.15 wspace : float, optional Sets the horizontal space between axes objects, default is 0.1 contour_kwargs : dict, optional Sets additional kwargs taken by contour in matplotlib contourf_kwargs : dict, optional Sets additional kwargs taken by contourf in matplotlib xticklabels_kwargs : dict, optional Sets additional kwargs taken by xticklabel in matplotlib xlabels_kwargs= : dict, optional Sets additional kwargs taken by xlabel in matplotlib yticklabels_kwargs : dict, optional Sets additional kwargs taken by yticklabel in matplotlib ylabels_kwargs : dict, optional Sets additional kwargs taken by ylabel in matplotlib """ if fig is not None: if ax is not None: pass else: fig,ax = plt.subplots() else: fig,ax = plt.subplots() if 'colors' not in contour_kwargs.keys(): contour_kwargs['colors'] = 'k' if 'linewidths' not in contour_kwargs.keys(): contour_kwargs['linewidths'] = 2.0 if 'cmap' not in contourf_kwargs.keys(): contourf_kwargs['cmap'] = 'viridis' logSNR = np.log10(SNRMatrix) if logLevels_min == -1.0: logLevels_min = np.log10(np.array([1.])) if logLevels_max == 0.0: logLevels_max = np.ceil(np.amax(logSNR)) if logLevels_max < logLevels_min: raise ValueError('All SNRs are lower than 5.') logLevels_add = np.log10(np.array([3.,10.,31.])) print_logLevels = np.concatenate((logLevels_min,logLevels_add,np.arange(2.,logLevels_max+1.))) if smooth_contours: logLevels = np.linspace(logLevels_min,logLevels_max,100)[:,0] else: logLevels = print_logLevels ylabel_min = min(sample_y) ylabel_max = max(sample_y) xlabel_min = min(sample_x) xlabel_max = max(sample_x) #Set whether log or linearly spaced axes if xlabel_max < 0.0 or xlabel_min < 0.0 or var_x in ['n_p','T_obs']: xaxis_type = 'lin' x_labels = np.linspace(xlabel_min,xlabel_max,xlabel_max-xlabel_min+1) else: x_log_range = np.log10(xlabel_max) - np.log10(xlabel_min) if x_log_range >= 2.: xaxis_type = 'log' x_labels = np.logspace(np.log10(xlabel_min),np.log10(xlabel_max),np.log10(xlabel_max)-np.log10(xlabel_min)+1) else: xaxis_type = 'lin' x_scale = 10**round(np.log10(xlabel_min)) x_labels = np.arange(round(xlabel_min/x_scale),round(xlabel_max/x_scale)+1,1)*x_scale if ylabel_max < 0.0 or ylabel_min < 0.0 or var_y in ['n_p','T_obs']: yaxis_type = 'lin' y_labels = np.linspace(ylabel_min,ylabel_max,ylabel_max-ylabel_min+1) else: y_log_range = np.log10(ylabel_max) - np.log10(ylabel_min) if y_log_range >= 2.: yaxis_type = 'log' y_labels = np.logspace(np.log10(ylabel_min),np.log10(ylabel_max),np.log10(ylabel_max)-np.log10(ylabel_min)+1) else: yaxis_type = 'lin' y_scale = 10**round(np.log10(ylabel_min)) y_labels = np.arange(round(ylabel_min/y_scale),round(ylabel_max/y_scale)+1,1)*y_scale #Set axis scales based on what data sampling we used if yaxis_type == 'lin' and xaxis_type == 'log': if cfill == False: CS1 = ax.contour(np.log10(sample_x),sample_y,logSNR,print_logLevels,**contour_kwargs) else: CS1 = ax.contourf(np.log10(sample_x),sample_y,logSNR,logLevels,**contourf_kwargs) ax.contour(np.log10(sample_x),sample_y,logSNR,print_logLevels,**contour_kwargs) ax.set_xlim(np.log10(xlabel_min),np.log10(xlabel_max)) ax.set_ylim(ylabel_min,ylabel_max) elif yaxis_type == 'log' and xaxis_type == 'lin': if cfill == False: CS1 = ax.contour(sample_x,np.log10(sample_y),logSNR,print_logLevels,**contour_kwargs) else: CS1 = ax.contourf(sample_x,np.log10(sample_y),logSNR,logLevels,**contourf_kwargs) ax.contour(sample_x,np.log10(sample_y),logSNR,print_logLevels,**contour_kwargs) ax.set_xlim(xlabel_min,xlabel_max) ax.set_ylim(np.log10(ylabel_min),np.log10(ylabel_max)) elif yaxis_type == 'lin' and xaxis_type == 'lin': if cfill == False: CS1 = ax.contour(sample_x,sample_y,logSNR,print_logLevels,**contour_kwargs) else: CS1 = ax.contourf(sample_x,sample_y,logSNR,logLevels,**contourf_kwargs) ax.contour(sample_x,sample_y,logSNR,print_logLevels,**contour_kwargs) ax.set_xlim(xlabel_min,xlabel_max) ax.set_ylim(ylabel_min,ylabel_max) else: if cfill == False: CS1 = ax.contour(np.log10(sample_x),np.log10(sample_y),logSNR,print_logLevels,**contour_kwargs) else: CS1 = ax.contourf(np.log10(sample_x),np.log10(sample_y),logSNR,logLevels,**contourf_kwargs) ax.contour(np.log10(sample_x),np.log10(sample_y),logSNR,print_logLevels,**contour_kwargs) ax.set_xlim(np.log10(xlabel_min),np.log10(xlabel_max)) ax.set_ylim(np.log10(ylabel_min),np.log10(ylabel_max)) Get_Axes_Labels(ax,'x',var_x,x_labels,xlabels_kwargs,xticklabels_kwargs) Get_Axes_Labels(ax,'y',var_y,y_labels,ylabels_kwargs,yticklabels_kwargs) if not x_axis_label: ax.set_xticklabels('') ax.set_xlabel('') if not y_axis_label: ax.set_yticklabels('') ax.set_ylabel('') #If true, display luminosity distance on right side of plot if dl_axis: if var_y != 'z': raise ValueError('Sorry, we can only plot luminosity distance when redshift is on the y axis.') #Set other side y-axis for luminosity distance scalings ax2 = ax.twinx() #Set axis scales based on what data sampling we used if yaxis_type == 'lin' and xaxis_type == 'log': ax2.contour(np.log10(sample_x),sample_y,logSNR,print_logLevels,**contour_kwargs) elif yaxis_type == 'log' and xaxis_type == 'lin': ax2.contour(sample_x,np.log10(sample_y),logSNR,print_logLevels,**contour_kwargs) else: ax2.contour(np.log10(sample_x),np.log10(sample_y),logSNR,print_logLevels,**contour_kwargs) dists_min = cosmo.luminosity_distance(ylabel_min).to('Gpc') dists_min = np.ceil(np.log10(dists_min.value)) dists_max = cosmo.luminosity_distance(ylabel_max).to('Gpc') dists_max = np.ceil(np.log10(dists_max.value)) dists = np.arange(dists_min,dists_max) dists = 10**dists*u.Gpc distticks = [z_at_value(cosmo.luminosity_distance,dist) for dist in dists] #Set other side y-axis for lookback time scalings ax2.set_yticks(np.log10(distticks)) #ax2.set_yticklabels(['%f' %dist for dist in distticks],fontsize = axissize) ax2.set_yticklabels([r'$10^{%i}$' %np.log10(dist) if np.abs(int(np.log10(dist))) > 1 else '{:g}'.format(dist) for dist in dists.value]) ax2.set_ylabel(r'$D_{L}$ [Gpc]') #cbar = fig.colorbar(CS1,cax=cbar_ax,ax=(ax,ax2),pad=0.01,ticks=print_logLevels) elif lb_axis: if var_y != 'z': raise ValueError('Sorry, we can only plot lookback time when redshift is on the y axis.') #Set other side y-axis for lookback time scalings ax2 = ax.twinx() #Set axis scales based on what data sampling we used if yaxis_type == 'lin' and xaxis_type == 'log': ax2.contour(np.log10(sample_x),sample_y,logSNR,print_logLevels,**contour_kwargs) elif yaxis_type == 'log' and xaxis_type == 'lin': ax2.contour(sample_x,np.log10(sample_y),logSNR,print_logLevels,**contour_kwargs) else: ax2.contour(np.log10(sample_x),np.log10(sample_y),logSNR,print_logLevels,**contour_kwargs) ages1 = np.array([13.5,13,10,5,1])*u.Gyr ages2 = np.array([500,100,10,1])*u.Myr ages2 = ages2.to('Gyr') ages = np.hstack((ages1.value,ages2.value)) ages = ages*u.Gyr ageticks = [z_at_value(cosmo.age,age) for age in ages] #Set axes limits ax2.set_yticks(np.log10(ageticks)) ax2.set_yticklabels(['{:g}'.format(age) for age in ages.value]) ax2.set_ylabel(r'$t_{\rm cosmic}$ [Gyr]') ax2.yaxis.set_label_coords(1.2,.5) if display_cbar: if lb_axis or dl_axis: fig.subplots_adjust(right=0.8) cbar_ax = fig.add_axes([0.9, 0.15, 0.025, 0.7]) #Make colorbar if cfill == False: #Make colorbar norm= colors.Normalize(vmin=logLevels_min, vmax=logLevels_max) tick_levels = np.linspace(float(logLevels_min),logLevels_max,len(print_logLevels)) cbar = mpl.colorbar.ColorbarBase(cbar_ax,ax=(ax,ax2),pad=0.01,cmap=CS1.cmap,norm=norm,boundaries=tick_levels, ticks=tick_levels,spacing='proportional') else: cbar = fig.colorbar(CS1,cax=cbar_ax,ax=(ax,ax2),pad=0.01) else: fig.subplots_adjust(right=0.8) cbar_ax = fig.add_axes([0.82, 0.15, 0.025, 0.7]) if cfill == False: #Make colorbar norm= colors.Normalize(vmin=logLevels_min, vmax=logLevels_max) tick_levels = np.linspace(float(logLevels_min),logLevels_max,len(print_logLevels)) cbar = mpl.colorbar.ColorbarBase(cbar_ax,cmap=CS1.cmap,norm=norm,boundaries=tick_levels, ticks=tick_levels,spacing='proportional') else: #Make colorbar cbar = fig.colorbar(CS1, cax=cbar_ax,ticks=print_logLevels) #cbar.set_label(r'$SNR$') cbar.ax.set_yticklabels([r'$10^{%i}$' %x if int(x) > 1 else r'$%i$' %(10**x) for x in print_logLevels],**yticklabels_kwargs) if display: #fig.tight_layout() fig.subplots_adjust(hspace=hspace,wspace=wspace) plt.show()
32,482
def rem4(rings, si): """finds if the silicon atom is within a 4 membered ring""" for i in range(len(rings)): triangles = 0 distances = [] locations = [] for n in range(len(rings[i]) - 1): for m in range(1, len(rings[i]) - n): distances.append(distance(rings[i][n], rings[i][n + m])) locations.append([n, n + m]) locations.append(len(rings[i])) for n in range(2): del locations[distances.index(max(distances))] del distances[distances.index(max(distances))] for n in range(len(locations)): triangles += triarea(rings[i][locations[n][0]], rings[i][locations[n][1]], si) if ringarea(rings[i]) == triangles: return"n" return"y"
32,483
def ilogit(x): """Return the inverse logit""" return exp(x) / (1.0 + exp(x))
32,484
def get_out_of_bounds_func(limits, bounds_check_type="cube"): """returns func returning a boolean array, True for param rows that are out of bounds""" if bounds_check_type == "cube": def out_of_bounds(params): """ "cube" bounds_check_type; checks each parameter independently""" return ~np.alltrue( np.logical_and(limits[0] <= params, params <= limits[1]), axis=-1 ) else: raise ValueError( f'Only "cube" bounds checks are currently supported; You selected {bounds_check_type}' ) return out_of_bounds
32,485
def inspectors_for_each_mode(lead_type="lead_inspector") -> Dict[str, Set[str]]: """ We want to be able to group lead inspectors by submode. """ if lead_type not in ["lead_inspector", "deputy_lead_inspector"]: raise ValueError("Can only query for lead_inspector and deputy_lead_inspector attributes.") submodes = Submode.objects.all() out = {} for sm in submodes: insp = set() orgs = sm.organisation_set.all() for org in orgs: insp.add(getattr(org, lead_type)) insp = {x for x in insp if x is not None} out[sm.descriptor] = insp del insp return out
32,486
def add_shortcut_to_desktop_for_module(name): """ Adds a shortcut on a module which includes a script. @param name name of the module @return shortcut was added or not """ if name == "spyder": from .link_shortcuts import add_shortcut_to_desktop, suffix from .module_install import ModuleInstall md = ModuleInstall("spyder", "exe", script="spyder.bat") sc = md.Script if os.path.exists(sc): ver = suffix() r = add_shortcut_to_desktop(sc, name + "." + ver, name + "." + ver) return os.path.exists(r) else: return False else: raise NotImplementedError( "nothing implemented for module: {0}".format(name))
32,487
def is_hdf_file(f): """Checks if the given file object is recognized as a HDF file. :type f: str | tables.File :param f: The file object. Either a str object holding the file name or a HDF file instance. """ import tables if((isinstance(f, str) and (f[-4:] == '.hdf' or f[-3:] == '.h5')) or (isinstance(f, tables.File)) ): return True return False
32,488
def dummy_receivers(request, dummy_streamers): """Provides `acquire.Receiver` objects for dummy devices. Either constructs by giving source ID, or by mocking user input. """ receivers = {} for idx, (_, _, source_id, _) in enumerate(dummy_streamers): with mock.patch('builtins.input', side_effect=str(idx)): receiver = request.param(source_id=source_id, autostart=False) receivers[source_id] = receiver def teardown(): for sid, receiver in receivers.items(): receiver.stop() del(receiver) request.addfinalizer(teardown) return receivers
32,489
def test_parse_one_histogram(p_check, mocked_prometheus_scraper_config): """ name: "etcd_disk_wal_fsync_duration_seconds" help: "The latency distributions of fsync called by wal." type: HISTOGRAM metric { histogram { sample_count: 4 sample_sum: 0.026131671 bucket { cumulative_count: 2 upper_bound: 0.001 } bucket { cumulative_count: 2 upper_bound: 0.002 } bucket { cumulative_count: 2 upper_bound: 0.004 } bucket { cumulative_count: 2 upper_bound: 0.008 } bucket { cumulative_count: 4 upper_bound: 0.016 } bucket { cumulative_count: 4 upper_bound: 0.032 } bucket { cumulative_count: 4 upper_bound: 0.064 } bucket { cumulative_count: 4 upper_bound: 0.128 } bucket { cumulative_count: 4 upper_bound: 0.256 } bucket { cumulative_count: 4 upper_bound: 0.512 } bucket { cumulative_count: 4 upper_bound: 1.024 } bucket { cumulative_count: 4 upper_bound: 2.048 } bucket { cumulative_count: 4 upper_bound: 4.096 } bucket { cumulative_count: 4 upper_bound: 8.192 } bucket { cumulative_count: 4 upper_bound: inf } } } """ text_data = ( '# HELP etcd_disk_wal_fsync_duration_seconds The latency distributions of fsync called by wal.\n' '# TYPE etcd_disk_wal_fsync_duration_seconds histogram\n' 'etcd_disk_wal_fsync_duration_seconds_bucket{le="0.001"} 2\n' 'etcd_disk_wal_fsync_duration_seconds_bucket{le="0.002"} 2\n' 'etcd_disk_wal_fsync_duration_seconds_bucket{le="0.004"} 2\n' 'etcd_disk_wal_fsync_duration_seconds_bucket{le="0.008"} 2\n' 'etcd_disk_wal_fsync_duration_seconds_bucket{le="0.016"} 4\n' 'etcd_disk_wal_fsync_duration_seconds_bucket{le="0.032"} 4\n' 'etcd_disk_wal_fsync_duration_seconds_bucket{le="0.064"} 4\n' 'etcd_disk_wal_fsync_duration_seconds_bucket{le="0.128"} 4\n' 'etcd_disk_wal_fsync_duration_seconds_bucket{le="0.256"} 4\n' 'etcd_disk_wal_fsync_duration_seconds_bucket{le="0.512"} 4\n' 'etcd_disk_wal_fsync_duration_seconds_bucket{le="1.024"} 4\n' 'etcd_disk_wal_fsync_duration_seconds_bucket{le="2.048"} 4\n' 'etcd_disk_wal_fsync_duration_seconds_bucket{le="4.096"} 4\n' 'etcd_disk_wal_fsync_duration_seconds_bucket{le="8.192"} 4\n' 'etcd_disk_wal_fsync_duration_seconds_bucket{le="+Inf"} 4\n' 'etcd_disk_wal_fsync_duration_seconds_sum 0.026131671\n' 'etcd_disk_wal_fsync_duration_seconds_count 4\n' ) expected_etcd_metric = HistogramMetricFamily( 'etcd_disk_wal_fsync_duration_seconds', 'The latency distributions of fsync called by wal.' ) expected_etcd_metric.add_metric( [], buckets=[ ('0.001', 2.0), ('0.002', 2.0), ('0.004', 2.0), ('0.008', 2.0), ('0.016', 4.0), ('0.032', 4.0), ('0.064', 4.0), ('0.128', 4.0), ('0.256', 4.0), ('0.512', 4.0), ('1.024', 4.0), ('2.048', 4.0), ('4.096', 4.0), ('8.192', 4.0), ('+Inf', 4.0), ], sum_value=0.026131671, ) # Iter on the generator to get all metrics response = MockResponse(text_data, text_content_type) check = p_check metrics = [k for k in check.parse_metric_family(response, mocked_prometheus_scraper_config)] assert 1 == len(metrics) current_metric = metrics[0] assert expected_etcd_metric.documentation == current_metric.documentation assert expected_etcd_metric.name == current_metric.name assert expected_etcd_metric.type == current_metric.type assert sorted(expected_etcd_metric.samples, key=lambda i: i[0]) == sorted( current_metric.samples, key=lambda i: i[0] )
32,490
def get_package_formats(): """Get the list of available package formats and parameters.""" # pylint: disable=fixme # HACK: This obviously isn't great, and it is subject to change as # the API changes, but it'll do for now as a interim method of # introspection to get the parameters we need. def get_parameters(cls): """Build parameters for a package format.""" params = {} # Create a dummy instance so we can check if a parameter is required. # As with the rest of this function, this is obviously hacky. We'll # figure out a way to pull this information in from the API later. dummy_kwargs = { k: 'dummy' for k in cls.swagger_types } instance = cls(**dummy_kwargs) for k, v in six.iteritems(cls.swagger_types): attr = getattr(cls, k) docs = attr.__doc__.strip().split('\n') doc = (docs[1] if docs[1] else docs[0]).strip() try: setattr(instance, k, None) required = False except ValueError: required = True params[cls.attribute_map.get(k)] = { 'type': v, 'help': doc, 'required': required } return params return { key.replace('PackagesUpload', '').lower(): get_parameters(cls) for key, cls in inspect.getmembers(cloudsmith_api.models) if key.startswith('PackagesUpload') }
32,491
def construct_reverse_protocol(splitting="OVRVO"): """Run the steps in the reverse order, and for each step, use the time-reverse of that kernel.""" step_length = make_step_length_dict(splitting) protocol = [] for step in splitting[::-1]: transition_density = partial(reverse_kernel(step_mapping[step]), dt=step_length[step]) protocol.append(transition_density) return protocol
32,492
def tabinv(xarr, x): """ Find the effective index in xarr of each element in x. The effective index for each element j in x is the value i such that :math:`xarr[i] <= x[j] <= xarr[i+1]`, to which is added an interpolation fraction based on the size of the intervals in xarr. Parameters ---------- x_arr : array-like The array of values to search x : float or array-like Value (or list of values) to look for in x_arr Returns ------- ieff : float Effective index """ npoints, npt = len(xarr), len(xarr) - 1 if npoints <= 1: raise ValueError("Search array must contain at least 2 elements") if not (np.all(np.diff(xarr) >= 0) or (np.all(np.diff(xarr) <= 0))): raise ValueError("Search array must be monotonic") if not isinstance(x, (list, tuple, np.ndarray)): x = np.array([x]) # ieff contains values j1, ..., jn such that # ji = x where xarr[x-1] <= ji < xarr[x] # If no position is found, ji = len(xarr) ieff = np.searchsorted(xarr, x, side='right').astype(np.float64) g = np.where((ieff >= 0) & (ieff < (len(xarr) - 1))) if len(g) > 0 and len(g[0] > 0): neff = ieff[g].astype(np.int32) x0 = xarr[neff].astype(np.float64) diff = x[g] - x0 ieff[g] = neff + diff / (xarr[neff+1] - x0) ieff = np.where(ieff>0., ieff, 0.) return ieff
32,493
def getldapconfig() : """ Renvoie la configuration ldap actuelle""" cfg = configparser.ConfigParser() cfg.read(srv_path) try : return (cfg.get('Ldap', 'ldap_address'), cfg.get('Ldap', 'ldap_username'), cfg.get('Ldap', 'ldap_password').replace("$percent", "%"), cfg.get('Ldap', 'ldap_base')) except : sleep(0.4) return getldapconfig()
32,494
def rename_dict_key(_old_key, _new_key, _dict): """ renames a key in a dict without losing the order """ return { key if key != _old_key else _new_key: value for key, value in _dict.items()}
32,495
def api_browse_use_case() -> use_cases.APIBrowseUseCase: """Get use case instance.""" return use_cases.APIBrowseUseCase(items_repository)
32,496
def treeIntersectIds(node, idLookup, sampleSet, lookupFunc=None): """For each leaf in node, attempt to look up its label in idLookup; replace if found. Prune nodes with no matching leaves. Store new leaf labels in sampleSet. If lookupFunc is given, it is passed two arguments (label, idLookup) and returns a possible empty list of matches.""" if (node['kids']): # Internal node: prune prunedKids = [] for kid in (node['kids']): kidIntersected = treeIntersectIds(kid, idLookup, sampleSet, lookupFunc) if (kidIntersected): prunedKids.append(kidIntersected) if (len(prunedKids) > 1): node['kids'] = prunedKids elif (len(prunedKids) == 1): node = prunedKids[0] else: node = None else: # Leaf: lookup, prune if not found label = node['label'] if (lookupFunc): matchList = lookupFunc(node['label'], idLookup) elif label in idLookup: matchList = idLookup[label] else: matchList = [] if (not matchList): logging.info("No match for leaf '" + label + "'") node = None else: if (len(matchList) != 1): logging.warn("Non-unique match for leaf '" + label + "': ['" + "', '".join(matchList) + "']") else: logging.debug(label + ' --> ' + matchList[0]); node['label'] = matchList[0] sampleSet.add(matchList[0]) return node
32,497
def get_descriptive_verbs(tree, gender): """ Returns a list of verbs describing pronouns of the given gender in the given dependency tree. :param tree: dependency tree for a document, output of **generate_dependency_tree** :param gender: `Gender` to search for usages of :return: List of verbs as strings """ verbs = [] for sentence in tree: for triple in sentence: if triple[1] == "nsubj" and (triple[0][1] == "VBD" or triple[0][1] == "VB" or triple[0][1] == "VBP" or triple[0][1] == "VBZ"): if triple[2][0] in gender.identifiers: verbs.append(triple[0][0]) return verbs
32,498
def client(): """Return a client instance""" return Client('192.168.1.1')
32,499