content
stringlengths
22
815k
id
int64
0
4.91M
def content_loss_func(sess, model): """Content loss function defined in the paper.""" def _content_loss(p, x): # N is the number of filters at layer 1 N = p.shape[3] # M is the height * width of the feature map at layer 1 M = p.shape[1] * p.shape[2] return (1 / (4 * N * M)) * tf.reduce_mean(tf.pow(x - p, 2)) return _content_loss(sess.run(model["conv4_2"]), model["conv4_2"])
29,900
def Extract_from_DF_kmeans(dfdir,num,mode=True): """ PlaneDFを読み込んで、client_IP毎に該当index番号の羅列をそれぞれのtxtに書き出す modeがFalseのときはシーケンスが既にあっても上書き作成 """ flag = exists("Database/KMeans/km_full_"+dfdir+"_database_name")#namelistが存在するかどうか if(flag and mode):return plane_df = joblib.load("./DFs/"+dfdir) result_df = joblib.load("./DFs/Results/KMeans/Result_km_"+str(num)+"_full_Input_"+dfdir+"_continuous") iplist=list(set(plane_df["client_ip"]))#読み込んだDFに含まれるclient_ipのリスト(重複はsetによって削除済み) joblib.dump(iplist,"./List/iplist_"+dfdir)#iplistを出力:異常検知に各シーケンスを入れるときに利用 database = []#シーケンスをどんどん追加して最後に出力する database_name = [] if(not(flag)):database_name = []#シーケンス毎の名前を記録 命名規則:(client_ip)_(server_ip) for ip in iplist: result_list = list(result_df.loc[list(plane_df[plane_df["client_ip"]==ip].index)].values.flatten())#client_IPでシーケンス作成 database.append(result_list) database_name.append(ip) #if(len(list(set(result_list)))>1):print(" "+ip+"_"+sip+" : "+str(result_list)) joblib.dump(database,"Database/KMeans/km_"+str(num)+"_full_"+dfdir+"_database") if(not(flag)):joblib.dump(database_name,"Database/KMeans/km_full_"+dfdir+"_database_name") return [database,database_name]
29,901
def get_workspace(workspace_id: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetWorkspaceResult: """ Resource schema for AWS::IoTTwinMaker::Workspace :param str workspace_id: The ID of the workspace. """ __args__ = dict() __args__['workspaceId'] = workspace_id if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('aws-native:iottwinmaker:getWorkspace', __args__, opts=opts, typ=GetWorkspaceResult).value return AwaitableGetWorkspaceResult( arn=__ret__.arn, creation_date_time=__ret__.creation_date_time, description=__ret__.description, role=__ret__.role, s3_location=__ret__.s3_location, tags=__ret__.tags, update_date_time=__ret__.update_date_time)
29,902
def find_open_port(): """ Use socket's built in ability to find an open port. """ sock = socket.socket() sock.bind(('', 0)) host, port = sock.getsockname() return port
29,903
def split_list_round_robin(data: tp.Iterable, chunks_num: int) -> tp.List[list]: """Divide iterable into `chunks_num` lists""" result = [[] for _ in range(chunks_num)] chunk_indexes = itertools.cycle(i for i in range(chunks_num)) for item in data: i = next(chunk_indexes) result[i].append(item) return result
29,904
def calc_Q_loss_FH_d_t(Q_T_H_FH_d_t, r_up): """温水床暖房の放熱損失 Args: Q_T_H_FH_d_t(ndarray): 温水暖房の処理暖房負荷 [MJ/h] r_up(ndarray): 当該住戸の温水床暖房の上面放熱率 [-] Returns: ndarray: 温水床暖房の放熱損失 """ return hwfloor.get_Q_loss_rad(Q_T_H_rad=Q_T_H_FH_d_t, r_up=r_up)
29,905
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool: """Unload a config entry.""" _LOGGER.debug("__init__ async_unload_entry") unload_ok = all( await asyncio.gather( *( hass.config_entries.async_forward_entry_unload(entry, component) for component in PLATFORMS ) ) ) if unload_ok: config_data = hass.data[DOMAIN].pop(entry.data["unique_id"]) if "api" in config_data: energy_api = config_data[CONF_API] await energy_api.close() return unload_ok
29,906
def main(hgt_results_fp, genbank_fp, method, ncbi_nr, darkhorse_low_lpi, darkhorse_high_lpi, darkhorse_output_fp=None): """ Parsing functions for various HGT detection tool outputs. """ output = parse_output(hgt_results_fp=hgt_results_fp, method=method, genbank_fp=genbank_fp, low_lpi=darkhorse_low_lpi, high_lpi=darkhorse_high_lpi, output_fp=darkhorse_output_fp) sys.stdout.write(output)
29,907
def t06_ManyGetPuts(C, pks, crypto, server): """Many clients upload many files and their contents are checked.""" clients = [C("c" + str(n)) for n in range(10)] kvs = [{} for _ in range(10)] for _ in range(200): i = random.randint(0, 9) uuid1 = "%08x" % random.randint(0, 100) uuid2 = "%08x" % random.randint(0, 100) clients[i].upload(str(uuid1), str(uuid2)) kvs[i][str(uuid1)] = str(uuid2) good = total = 0 # verify integrity for i, (c, kv) in enumerate(zip(clients, kvs)): for k, v in kv.items(): vv = c.download(k) if vv == v: good += 1 total += 1 return float(good) / total
29,908
def multi_lightness_function_plot(functions=None, **kwargs): """ Plots given *Lightness* functions. Parameters ---------- functions : array_like, optional *Lightness* functions to plot. \*\*kwargs : \*\* Keywords arguments. Returns ------- bool Definition success. Raises ------ KeyError If one of the given *Lightness* function is not found in the factory *Lightness* functions. Examples -------- >>> fs = ('CIE 1976', 'Wyszecki 1964') >>> multi_lightness_function_plot(fs) # doctest: +SKIP True """ if functions is None: functions = ('CIE 1976', 'Wyszecki 1964') samples = np.linspace(0, 100, 1000) for i, function in enumerate(functions): function, name = LIGHTNESS_METHODS.get(function), function if function is None: raise KeyError( ('"{0}" "Lightness" function not found in factory ' '"Lightness" functions: "{1}".').format( name, sorted(LIGHTNESS_METHODS.keys()))) pylab.plot(samples, [function(x) for x in samples], label=u'{0}'.format(name), linewidth=2) settings = { 'title': '{0} - Lightness Functions'.format(', '.join(functions)), 'x_label': 'Luminance Y', 'y_label': 'Lightness L*', 'x_tighten': True, 'legend': True, 'legend_location': 'upper left', 'x_ticker': True, 'y_ticker': True, 'grid': True, 'limits': [0, 100, 0, 100]} settings.update(kwargs) bounding_box(**settings) aspect(**settings) return display(**settings)
29,909
def delete_notification(request): """ Creates a Notification model based on uer input. """ print request.POST # Notification's PK Notification.objects.get(pk=int(request.POST["pk"])).delete() return JsonResponse({})
29,910
def parse_query_value(query_str): """ Return value for the query string """ try: query_str = str(query_str).strip('"\' ') if query_str == 'now': d = Delorean(timezone=tz) elif query_str.startswith('y'): d = Delorean(Delorean(timezone=tz).midnight) d -= timedelta(days=len(query_str)) elif query_str.startswith('t'): d = Delorean(Delorean(timezone=tz).midnight) d += timedelta(days=len(query_str) - 1) else: # Parse datetime string or timestamp try: ts = float(query_str) if ts >= 1000000000000: ts /= 1000 d = epoch(float(ts)) d.shift(tz) except ValueError: d = parse(str(query_str), tz, dayfirst=False) except (TypeError, ValueError): d = None return d
29,911
def build_model(): """ Build the model :return: the model """ model = keras.Sequential([ layers.Dense(64, activation='relu', input_shape=[len(train_dataset.keys())]), layers.Dense(64, activation='relu'), layers.Dense(1) ]) optimizer = tf.keras.optimizers.RMSprop(0.001) model.compile(loss='mse', optimizer=optimizer, metrics=['mae', 'mse']) return model
29,912
def detect(stream): """Returns True if given stream is a readable excel file.""" try: opendocument.load(BytesIO(stream)) return True except: pass
29,913
def test_invalid_json(): """Test Invalid response Exception""" loop = asyncio.get_event_loop() with aioresponses() as m: m.post('https://api.idex.market/returnTicker', body='<head></html>') async def _run_test(): client = await AsyncClient.create(api_key) with pytest.raises(IdexRequestException): await client.get_tickers() loop.run_until_complete(_run_test())
29,914
def create_region_file(filename, reg_file): """.""" # Read the OSM file osm_data = np.loadtxt(filename) coords = osm_data[:, 0:2] if (os.path.exists(reg_file)): os.remove(reg_file) file = open(reg_file, 'w') file.write('global color=green dashlist=8 3 width=1 ' 'font="helvetica 10 normal roman" select=1 highlite=1 dash=0 ' 'fixed=0 edit=1 move=1 delete=1 include=1 source=1\nfk5\n') for i in range(0, len(coords)): file.write('point(%10.6f, %10.6f) # point=x\n' % (coords[i, 0], coords[i, 1])) file.close()
29,915
def new_default_channel(): """Create new gRPC channel from settings.""" channel_url = urlparse(format_url(settings.SERVICE_BIND)) return Channel(host=channel_url.hostname, port=channel_url.port)
29,916
def iou(bbox1, bbox2): """ Calculates the intersection-over-union of two bounding boxes. Args: bbox1 (numpy.array, list of floats): bounding box in format x1,y1,x2,y2. bbox2 (numpy.array, list of floats): bounding box in format x1,y1,x2,y2. Returns: int: intersection-over-onion of bbox1, bbox2 """ bbox1 = [float(x) for x in bbox1] bbox2 = [float(x) for x in bbox2] (x0_1, y0_1, x1_1, y1_1) = bbox1 (x0_2, y0_2, x1_2, y1_2) = bbox2 # get the overlap rectangle overlap_x0 = max(x0_1, x0_2) overlap_y0 = max(y0_1, y0_2) overlap_x1 = min(x1_1, x1_2) overlap_y1 = min(y1_1, y1_2) # check if there is an overlap if overlap_x1 - overlap_x0 <= 0 or overlap_y1 - overlap_y0 <= 0: return 0 # if yes, calculate the ratio of the overlap to each ROI size and the unified size size_1 = (x1_1 - x0_1) * (y1_1 - y0_1) size_2 = (x1_2 - x0_2) * (y1_2 - y0_2) size_intersection = (overlap_x1 - overlap_x0) * (overlap_y1 - overlap_y0) size_union = size_1 + size_2 - size_intersection return size_intersection / size_union
29,917
def test_agent_proxy_wait_running_timeout(nsproxy, timeout): """ Check that the `wait_for_running` method times out if the agent is not running after the specified number of seconds. """ AgentProcess('agent').start() time0 = time.time() with pytest.raises(TimeoutError) as error: Proxy('agent').wait_for_running(timeout=timeout) elapsed = time.time() - time0 assert 'Timed out' in str(error.value) assert elapsed >= timeout assert elapsed < timeout + .5
29,918
def seed_request(): """ Adds request seeds to the database. """ today = datetime.utcnow() yesterday = datetime.now() - timedelta(1) before_yesterday = datetime.now() - timedelta(2) way_before_yesterday = datetime.now() - timedelta(3) future_day = datetime.now() + timedelta(20) # Create seeds for request. requests = [ Request( title='Add option for clearing transactions or archiving them' ' transactions', description='People want to be able to clear old' ' transaction list to reduce UI clutter. This will require' ' the frontend and backend team working together to create' ' a new user experience', product_area=ProductArea.CLAIMS, target_date=future_day, created_at=today, priority=2, staff_id=2, client_id=3, id=12, resolved=True, ), Request( title='Improve customer care services to reduce client churn', description='The current customer care services are reported to ' ' be abysmal with representatives dropping calls on customer or ' ' being rather unpleasant. ', product_area=ProductArea.POLICIES, target_date=today, created_at=way_before_yesterday, priority=1, staff_id=1, client_id=1, ), Request( title='Fix issue with the customisation section. It hangs' ' sometimes and breaks immersion', description='People want to be able to clear old' ' transaction list to reduce UI clutter', product_area=ProductArea.CLAIMS, target_date=today, created_at=before_yesterday, priority=7, staff_id=3, client_id=3, ), Request( title='Create a new option to make payment vis Visa to open door' ' to new clients', description='The current customer care services are reported to' ' be abysmal with representatives dropping calls on customer or' ' being rather unpleasant. ', product_area=ProductArea.POLICIES, target_date=future_day, created_at=before_yesterday, priority=2, staff_id=3, client_id=1, ), Request( title='Add PayPal payment support and improve payment systems', description='People want to be able to purchase' ' using PayPal. We need to also imptove the existing PayStack' ' support to allow new implementation and get rid of old crust', product_area=ProductArea.BILLING, target_date=future_day, created_at=before_yesterday, priority=1, staff_id=2, client_id=2, resolved=True, ), Request( title='Remove old UI feature that prevents clients from making' ' withdrawals on Sunday', description='People want to be able to purchase' ' using his PayPal.', product_area=ProductArea.BILLING, target_date=future_day, created_at=before_yesterday, priority=3, staff_id=2, client_id=2, ), Request( title='Add a chat section. We need to be able discuss about' ' products and interests', description='People want to be able to clear old' ' transaction list to reduce UI clutter', product_area=ProductArea.CLAIMS, target_date=today, created_at=yesterday, priority=5, staff_id=2, client_id=3, ), Request( title='Add shopping cart to make it easy for clients to keep logs' ' of interested items', description='People want to be able to purchase' ' using their PayPal. We need', product_area=ProductArea.BILLING, target_date=future_day, created_at=yesterday, priority=1, staff_id=2, client_id=3, resolved=True, ), Request( title='Add a ratings system to make it possible to rate sellers' ' based on their past services.', description='The current customer care services are reported to' ' be abysmal with representatives dropping calls on customer or' ' being rather unpleasant. ', product_area=ProductArea.POLICIES, target_date=today, created_at=yesterday, priority=6, staff_id=2, client_id=2, ), Request( title='Make it possible for users to wipe all their data on the' ' platform if they choose to do so', description='People want to be able to purchase' ' using PayPal. We need to also imptove the existing PayStack' ' support to allow new implementation and get rid of old crust', product_area=ProductArea.CLAIMS, target_date=future_day, created_at=yesterday, priority=4, staff_id=2, client_id=3, resolved=True, ), Request( title='Implement payment sharing feature to allow clients share' ' payment of items', description='The current customer care services are reported to' ' be abysmal with representatives dropping calls on customer or' ' being rather unpleasant. ', product_area=ProductArea.POLICIES, target_date=future_day, created_at=today, priority=3, staff_id=2, client_id=3, ), Request( title='Create a portfolio section for allowing sellers to showcase' ' their experience in the sale of a particular good', description='People want to be able to purchase' ' using PayPal. We need to also imptove the existing PayStack' ' support to allow new implementation and get rid of old crust', product_area=ProductArea.BILLING, target_date=future_day, created_at=today, priority=4, staff_id=2, client_id=2, resolved=True, ), ] # Save to the database. db.session.add_all(requests) db.session.commit()
29,919
def set_keymap_settings(keymap): """Activate certain keymap through Gnome, changing settings directly without delay. The new keymap will be changed for all windows immediately. The keymap parameter must be directly suitable for Gnome, e.g. 'fi', 'us', or 'fi\\tnodeadkeys' (where the separator is a tab. Use get_keymap_list() to get a list of keymaps (both Gnome name and user friently name). XXX: for practical reasons we also accept gnome escaped names (e.g. 'fi\\tnodeadkeys' -> 'fi:nodeadkeys') because they are easier for the web UI to deal with. Should be fixed properly. """ if ':' in keymap: _log.warning('colon(s) in keymap, caller is not apparently using gnomename') keymap = string.replace(keymap, ':', '\t') # running gconf needs environment to be exactly correct, at least the # following need to be correctly set: # 1. user ID (and group ID) # 2. HOME environment variable # 3. DISPLAY environment variable # # without these the commands may well succeed but do nothing. myenv = dict(os.environ) myenv['HOME'] = '/home/%s' % constants.ADMIN_USER_NAME myenv['DISPLAY'] = ':0.0' # gconf-editor is best for checking these out run_command([constants.CMD_SUDO, '-u', constants.ADMIN_USER_NAME, constants.CMD_GCONFTOOL2, '--set', '/desktop/gnome/peripherals/keyboard/general/defaultGroup', '--type', 'int', '0'], retval=runcommand.FAIL, env=myenv) run_command([constants.CMD_SUDO, '-u', constants.ADMIN_USER_NAME, constants.CMD_GCONFTOOL2, '--set', '/desktop/gnome/peripherals/keyboard/kbd/layouts', '--type', 'list', '--list-type', 'string', '[%s]' % keymap], retval=runcommand.FAIL, env=myenv) run_command([constants.CMD_SUDO, '-u', constants.ADMIN_USER_NAME, constants.CMD_GCONFTOOL2, '--set', '/desktop/gnome/peripherals/keyboard/general/groupPerWindow', '--type', 'bool', 'false'], retval=runcommand.FAIL, env=myenv)
29,920
def get_version(): """ Obtain the version of the ITU-R P.1511 recommendation currently being used. Returns ------- version: int Version currently being used. """ return __model.__version__
29,921
def sample_filepaths(filepaths_in, filepaths_out, intensity): """ `filepaths_in` is a list of filepaths for in-set examples. `filepaths_out` is a list of lists, where `filepaths_out[i]` is a list of filepaths corresponding to the ith out-of-set class. `intensity` is the number of in-set examples as a proportion of the total number of examples: `intensity = N_in / (N_in + N_out)`. We can rearrange this to get `N_out = N_in * ((1 / intensity) - 1)`, which we use to set `n_left_to_sample`. An intensity of 0.5 gives `N_in = N_out`. """ filepaths_out_copy = copy.deepcopy(filepaths_out) filepaths_out_sampled = [] inds_to_sample_from = range(len(filepaths_out)) n_left_to_sample = int(len(filepaths_in) * ((1 / intensity) - 1)) while n_left_to_sample > 0: if n_left_to_sample < len(filepaths_out): inds_to_sample_from = np.random.choice( inds_to_sample_from, n_left_to_sample, replace=False) for i in inds_to_sample_from: sample = np.random.choice(filepaths_out_copy[i]) filepaths_out_copy[i].remove(sample) filepaths_out_sampled.append(sample) n_left_to_sample -= len(inds_to_sample_from) return np.random.permutation(filepaths_in + filepaths_out_sampled)
29,922
def makekey(s): """ enerates a bitcoin private key from a secret s """ return CBitcoinSecret.from_secret_bytes(sha256(s).digest())
29,923
def shape_broadcast(shape1: tuple, shape2: tuple) -> tuple: """ Broadcast two shapes to create a new union shape. Args: shape1 (tuple) : first shape shape2 (tuple) : second shape Returns: tuple : broadcasted shape Raises: IndexingError : if cannot broadcast """ for shape in (shape1, shape2): if not shape: raise IndexingError(f"Shape must have at least one dimension: {shape}") len_shape1 = len(shape1) len_shape2 = len(shape2) max_length = max(len_shape1, len_shape2) new_shape = [0] * max_length shape1_reversed = list(reversed(shape1)) shape2_reversed = list(reversed(shape2)) for idx in range(max_length): # iterate over every index. check if values are broadcastable, and if # so, add to new shape dimension if idx >= len_shape1: new_shape[idx] = shape2_reversed[idx] elif idx >= len_shape2: new_shape[idx] = shape1_reversed[idx] else: new_shape[idx] = max(shape1_reversed[idx], shape2_reversed[idx]) if ( shape1_reversed[idx] != new_shape[idx] and shape1_reversed[idx] != 1 ) or (shape2_reversed[idx] != new_shape[idx] and shape2_reversed[idx] != 1): raise IndexingError( f"The size of tensor a ({shape1_reversed[idx]}) must match the size " f"of tensor b ({shape2_reversed[idx]}) at non-singleton dimension {idx}" ) return tuple(reversed(new_shape))
29,924
def hash_array(kmer): """Return a hash of a numpy array.""" return xxhash.xxh32_intdigest(kmer.tobytes())
29,925
def GenerateDiskTemplate( lu, template_name, instance_uuid, primary_node_uuid, secondary_node_uuids, disk_info, file_storage_dir, file_driver, base_index, feedback_fn, full_disk_params): """Generate the entire disk layout for a given template type. """ vgname = lu.cfg.GetVGName() disk_count = len(disk_info) disks = [] CheckDiskTemplateEnabled(lu.cfg.GetClusterInfo(), template_name) if template_name == constants.DT_DISKLESS: pass elif template_name == constants.DT_DRBD8: if len(secondary_node_uuids) != 1: raise errors.ProgrammerError("Wrong template configuration") remote_node_uuid = secondary_node_uuids[0] minors = lu.cfg.AllocateDRBDMinor( [primary_node_uuid, remote_node_uuid] * len(disk_info), instance_uuid) (drbd_params, _, _) = objects.Disk.ComputeLDParams(template_name, full_disk_params) drbd_default_metavg = drbd_params[constants.LDP_DEFAULT_METAVG] names = [] for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % (base_index + i) for i in range(disk_count)]): names.append(lv_prefix + "_data") names.append(lv_prefix + "_meta") for idx, disk in enumerate(disk_info): disk_index = idx + base_index data_vg = disk.get(constants.IDISK_VG, vgname) meta_vg = disk.get(constants.IDISK_METAVG, drbd_default_metavg) disk_dev = _GenerateDRBD8Branch(lu, primary_node_uuid, remote_node_uuid, disk[constants.IDISK_SIZE], [data_vg, meta_vg], names[idx * 2:idx * 2 + 2], "disk/%d" % disk_index, minors[idx * 2], minors[idx * 2 + 1]) disk_dev.mode = disk[constants.IDISK_MODE] disk_dev.name = disk.get(constants.IDISK_NAME, None) disks.append(disk_dev) else: if secondary_node_uuids: raise errors.ProgrammerError("Wrong template configuration") name_prefix = _DISK_TEMPLATE_NAME_PREFIX.get(template_name, None) if name_prefix is None: names = None else: names = _GenerateUniqueNames(lu, ["%s.disk%s" % (name_prefix, base_index + i) for i in range(disk_count)]) if template_name == constants.DT_PLAIN: def logical_id_fn(idx, _, disk): vg = disk.get(constants.IDISK_VG, vgname) return (vg, names[idx]) elif template_name in (constants.DT_FILE, constants.DT_SHARED_FILE): logical_id_fn = \ lambda _, disk_index, disk: (file_driver, "%s/%s" % (file_storage_dir, names[idx])) elif template_name == constants.DT_BLOCK: logical_id_fn = \ lambda idx, disk_index, disk: (constants.BLOCKDEV_DRIVER_MANUAL, disk[constants.IDISK_ADOPT]) elif template_name == constants.DT_RBD: logical_id_fn = lambda idx, _, disk: ("rbd", names[idx]) elif template_name == constants.DT_EXT: def logical_id_fn(idx, _, disk): provider = disk.get(constants.IDISK_PROVIDER, None) if provider is None: raise errors.ProgrammerError("Disk template is %s, but '%s' is" " not found", constants.DT_EXT, constants.IDISK_PROVIDER) return (provider, names[idx]) else: raise errors.ProgrammerError("Unknown disk template '%s'" % template_name) dev_type = template_name for idx, disk in enumerate(disk_info): params = ExtractDiskParams(disk, template_name) disk_index = idx + base_index size = disk[constants.IDISK_SIZE] feedback_fn("* disk %s, size %s" % (disk_index, utils.FormatUnit(size, "h"))) disk_dev = objects.Disk(dev_type=dev_type, size=size, logical_id=logical_id_fn(idx, disk_index, disk), iv_name="disk/%d" % disk_index, mode=disk[constants.IDISK_MODE], params=params, spindles=disk.get(constants.IDISK_SPINDLES)) disk_dev.name = disk.get(constants.IDISK_NAME, None) disk_dev.uuid = lu.cfg.GenerateUniqueID(lu.proc.GetECId()) disks.append(disk_dev) return disks
29,926
def check_pc_overlap(pc1, pc2, min_point_num): """ Check if the bounding boxes of the 2 given point clouds overlap """ b1 = get_pc_bbox(pc1) b2 = get_pc_bbox(pc2) b1_c = Polygon(b1) b2_c = Polygon(b2) inter_area = b1_c.intersection(b2_c).area union_area = b1_c.area + b2_c.area - inter_area if b1_c.area > 11 and b2_c.area > 11: overlap = (inter_area / union_area) > 0.5 elif inter_area > 0: overlap = True else: overlap = False pc_merged = pc2 if overlap: bbox_min = MinimumBoundingBox.MinimumBoundingBox( np.concatenate((pc1[:, 0:2], pc2[:, 0:2]), axis=0) ) l01 = bbox_min.length_parallel l02 = bbox_min.length_orthogonal area = l01 * l02 # shape doesn't look like car bbox if ((area < 2 or area > 12) or ((l01 > 4.6 or l02 > 4.6)) or ((l01 < 1 or l02 < 1)) or union_area > 15 ): if b1_c.area > b2_c.area: pc_merged = pc1 else: pc_merged = pc2 else: idx_overlap = np.zeros((len(pc1))) for i in range(len(pc1)): diff = pc2 - pc1[i] diff = np.sum(diff ** 2, axis=1) if 0 in diff: idx_overlap[i] = 1 pc_merged = np.concatenate((pc_merged, pc1[idx_overlap == 0]), axis=0) if not is_car(pc_merged, min_point_num): overlap = False return overlap, pc_merged
29,927
def build_for_lambda(c, no_clean=False): """ aws lambda 用に executable なコードをビルドする :param c: :param no_clean: :return: """ if not no_clean: clean(c) c.run("mkdir -p {}".format(DIST_PATH)) file_list = [ 'requirements.txt', 'constraints.txt', 'lambda_function.py', 'lambda_handler/director_of_system.py', 'lambda_handler/notifier.py', 'lambda_handler/worker_of_item.py', ] for file in file_list: c.run("cp -p {file} {path}".format(file=file, path=DIST_PATH)) directory_list = [ 'bin', 'find_sale_in_wish_list', ] for directory in directory_list: c.run("cp -pR {directory} {path}".format(directory=directory, path=DIST_PATH)) with c.cd(DIST_PATH): c.run('docker run --rm -v "${PWD}":/var/task lambda_headless_chrome')
29,928
def apply_hypercube(cube: DataCube, context: dict) -> DataCube: """Reduce the time dimension for each tile and compute min, mean, max and sum for each pixel over time. Each raster tile in the udf data object will be reduced by time. Minimum, maximum, mean and sum are computed for each pixel over time. Args: udf_data (UdfData): The UDF data object that contains raster and vector tiles Returns: This function will not return anything, the UdfData object "udf_data" must be used to store the resulting data. """ # The list of tiles that were created array: xarray.DataArray = cube.get_array() result = xarray.concat( [array.min(dim='t'), array.max(dim='t'), array.sum(dim='t'), array.mean(dim='t')], dim='bands' ) return DataCube(result)
29,929
def main(): """Exercise get_bucket_acl()""" # Assign this value before running the program test_bucket_name = 'BUCKET_NAME' # Set up logging logging.basicConfig(level=logging.DEBUG, format='%(levelname)s: %(asctime)s: %(message)s') # Retrieve the current bucket ACL acl = get_bucket_acl(test_bucket_name) if acl is None: exit(-1) # Output the bucket ACL grantees and permissions for grantee in acl['Grants']: # The grantee type determines the grantee_identifier grantee_type = grantee['Grantee']['Type'] if grantee_type == 'CanonicalUser': grantee_identifier = grantee['Grantee']['DisplayName'] elif grantee_type == 'AmazonCustomerByEmail': grantee_identifier = grantee['Grantee']['EmailAddress'] elif grantee_type == 'Group': grantee_identifier = grantee['Grantee']['URI'] else: grantee_identifier = 'Unknown' logging.info(f'Grantee: {grantee_identifier}, ' f'Permissions: {grantee["Permission"]}')
29,930
def cli(env, identifier): """Delete an image.""" image_mgr = SoftLayer.ImageManager(env.client) image_id = helpers.resolve_id(image_mgr.resolve_ids, identifier, 'image') image_mgr.delete_image(image_id)
29,931
def dateIsBefore(year1, month1, day1, year2, month2, day2): """Returns True if year1-month1-day1 is before year2-month2-day2. Otherwise, returns False.""" if year1 < year2: return True if year1 == year2: if month1 < month2: return True if month1 == month2: if day1 < day2: return True else: return False else: return False else: return False
29,932
def all_stat(x, stat_func=np.mean, upper_only=False, stat_offset=3): """ Generate a matrix that contains the value returned by stat_func for all possible sub-windows of x[stat_offset:]. stat_func is any function that takes a sequence and returns a scalar. if upper_only is False, values are added to both the upper and lower triangular sections of the matrix. If True, only the upper section is populated """ if len(x) < stat_offset: return np.zeros([]) stat = np.zeros((len(x), len(x))) for i in range(0, len(x)): for j in range(i + stat_offset, len(x)): v = stat_func(x[i:j]) stat[i, j] = v if not upper_only: stat[j, i] = v return stat
29,933
def PyCallable_Check(space, w_obj): """Determine if the object o is callable. Return 1 if the object is callable and 0 otherwise. This function always succeeds.""" return int(space.is_true(space.callable(w_obj)))
29,934
def test_Circular_Aperture_PTP_short(display=False, npix=512, oversample=4, include_wfe=True, display_proper=False): """ Tests plane-to-plane propagation at short distances, by comparison of the results from propagate_ptp and propagate_direct calculations This test also now include wavefront error, so as to demonstrate consistent sign conventions for phase aberrations in both methods. """ #test short distance propagation, as discussed in issue #194 (https://github.com/mperrin/poppy/issues/194) wf_direct = fresnel.FresnelWavefront( 2 * u.um, wavelength=10e-9*u.m, npix=npix, oversample=oversample) wf_direct *= optics.CircularAperture(radius=800 * 1e-9*u.m) if include_wfe: wf_direct *= poppy.wfe.ZernikeWFE(radius=800 * 1e-9*u.m, coefficients=(0,0,0,0,0, 0,1e-9)) wf_fresnel = wf_direct.copy() z = 12. * u.um # Calculate same result using 2 different algorithms: wf_direct.propagate_direct(z) wf_fresnel.propagate_fresnel(z) # The results have different pixel scales so we need to resize # in order to compare them scalefactor = (wf_direct.pixelscale/wf_fresnel.pixelscale).decompose().value zoomed_direct=zoom(wf_direct.intensity,scalefactor) print(f"Rescaling by {scalefactor} to match pixel scales") n = zoomed_direct.shape[0] center_npix = npix*oversample/2 print(f"center npix: {center_npix}") cropped_fresnel = wf_fresnel.intensity[int(center_npix - n / 2):int(center_npix + n / 2), int(center_npix - n / 2):int(center_npix + n / 2)] # Zooming also shifts the centroids, so we have to re-align before we can compare pixel values. # In theory we could figure out this offset directly from the pixel scales and how zoom works, which would be # more elegant. However for current purposes it is sufficient to brute force it by registering the images together if not include_wfe and False: # For in-focus images, we can just measure the centroids empirically to align #zooming shifted the centroids, find new centers cent=fwcentroid.fwcentroid(zoomed_direct,halfwidth=8) cent2=fwcentroid.fwcentroid(cropped_fresnel,halfwidth=8) center_offset = np.asarray(cent)-np.asarray(cent2) print(f"After rescaling, found center offset = {center_offset}") else: # For defocused images, after rescaling we can register via FFT correlation import skimage center_offset, error, diffphase = skimage.registration.phase_cross_correlation(zoomed_direct, cropped_fresnel, upsample_factor=50) # upsample_factor of 50 or more is required to get sufficiently good alignment to pass the test criterion below print(f"Offset from skimage: {center_offset}") shifted_fresnel=shift(cropped_fresnel, (center_offset[1], center_offset[0])) normalization = zoomed_direct.max() / shifted_fresnel.max() # work around different normalizations # In some sense this is a bug in propagate_direct that they are not consistent zoomed_direct /= normalization print(f"Making consistent normalization with scale factor {normalization}") diff=shifted_fresnel-zoomed_direct if display: boxhalfsize = npix//4 zoomed_crop = zoomed_direct[n//2-boxhalfsize:n//2+boxhalfsize, n//2-boxhalfsize:n//2+boxhalfsize] shifted_crop = shifted_fresnel[n//2-boxhalfsize:n//2+boxhalfsize, n//2-boxhalfsize:n//2+boxhalfsize] diff_crop = diff[n//2-boxhalfsize:n//2+boxhalfsize, n//2-boxhalfsize:n//2+boxhalfsize] plt.imshow(zoomed_crop) plt.colorbar() plt.title("From propagate_direct\nrescaled to match scale of propagate_fresnel") plt.figure() plt.imshow(shifted_crop) plt.colorbar() plt.title("From propagate_fresnel\nshifted to align to propagate_direct") plt.figure() plt.imshow(diff_crop) plt.colorbar() plt.title("Difference of those two, after normalization") maxreldiff = diff.max() / shifted_fresnel.max() assert maxreldiff < 1e-3 , f"Pixel values different more than expected; max relative difference is {maxreldiff}"
29,935
def save_nii(obj, outfile, data=None, is_nii=False): """ save a nifti object """ if not is_nii: if data is None: data = obj.get_data() nib.Nifti1Image(data, obj.affine, obj.header)\ .to_filename(outfile) else: obj.to_filename(outfile)
29,936
def _multiclass_metric_evaluator(metric_func: Callable[..., float], n_classes: int, y_test: np.ndarray, y_pred: np.ndarray, **kwargs) -> float: """Calculate the average metric for multiclass classifiers.""" metric = 0 for label in range(n_classes): metric += metric_func(y_test[:, label], y_pred[:, label], **kwargs) metric /= n_classes return metric
29,937
def TestResumeWatcher(): """Tests and unpauses the watcher. """ master = qa_config.GetMasterNode() AssertCommand(["gnt-cluster", "watcher", "continue"]) cmd = ["gnt-cluster", "watcher", "info"] output = GetCommandOutput(master.primary, utils.ShellQuoteArgs(cmd)) AssertMatch(output, r"^.*\bis not paused\b.*")
29,938
def insert_phots_into_database(framedir, frameglob='rsub-*-xtrns.fits', photdir=None, photglob='rsub-*-%s.iphot', maxframes=None, overwrite=False, database=None): """ This makes photometry index rows in the postgresql database. Intended for use when the sqlite3 databases get out of hand. """ # open a database connection if database: cursor = database.cursor() closedb = False else: database = pg.connect(user=PGUSER, password=PGPASSWORD, database=PGDATABASE, host=PGHOST) cursor = database.cursor() closedb = True # first, figure out the directories if not photdir: photdir = framedir # start work here try: if isinstance(framedir, list): framelist = framedir else: # first, find all the frames framelist = glob.glob(os.path.join(os.path.abspath(framedir), frameglob)) # restrict to maxframes max frames if maxframes: framelist = framelist[:maxframes] # turn off table logging and drop indexes for speed cursor.execute('drop index if exists photindex_iphots_rjd_idx') cursor.execute('drop index if exists photindex_iphots_objectid_idx') starttime = time.time() # go through all the frames for ix, frame in enumerate(framelist): print('%sZ: inserting %d frame %s into pg database' % (datetime.utcnow().isoformat(), ix, frame)) # generate the names of the associated phot and sourcelist files frameinfo = FRAMEREGEX.findall(os.path.basename(frame)) framekey = '%s-%s_%s' % (frameinfo[0][0], frameinfo[0][1], frameinfo[0][2]) photsearch = photglob % ('%s-%s_%s' % (frameinfo[0][0], frameinfo[0][1], frameinfo[0][2])) originalframe = '%s-%s_%s.fits' % (frameinfo[0][0], frameinfo[0][1], frameinfo[0][2]) photmatch = glob.glob(os.path.join(os.path.abspath(photdir), photsearch)) originalframe = os.path.join(os.path.abspath(framedir), originalframe) # check these files exist, and populate the dict if they do if (photmatch and os.path.exists(photmatch[0]) and os.path.exists(originalframe)): phot = photmatch[0] # get the JD from the FITS file. # NOTE: this is the ORIGINAL FITS frame, since the subtracted # one contains some weird JD header (probably inherited from the # photref frame) framerjd = get_header_keyword(originalframe, 'JD') # now get the phot file and read it photf = open(phot, 'rb') photo = StringIO() for line in photf: hatid = line.split()[0] photo.write('%.5f,%s,%s,%s' % (framerjd, hatid, framekey, line)) photf.close() photo.seek(0) # do a fast insert using pg's copy protocol cursor.copy_from(photo,'photindex_iphots',sep=',') photo.close() # if some associated files don't exist for this frame, ignore it else: print('WRN! %sZ: ignoring frame %s, ' 'photometry for this frame is not available!' % (datetime.utcnow().isoformat(), frame)) # now we're all done with frame inserts # regenerate the indexes and reset table logging for durability print('%sZ: recreating indexes' % (datetime.utcnow().isoformat())) cursor.execute('create index on photindex_iphots(rjd)') cursor.execute('create index on photindex_iphots(objectid)') cursor.execute('analyze photindex_iphots') # commit the transaction database.commit() print('%sZ: done, time taken: %.2f minutes' % (datetime.utcnow().isoformat(), (time.time() - starttime)/60.0)) returnval = (framedir, True) # catch the overwrite = False scenario except pg.IntegrityError as e: database.rollback() message = ('failed to insert photometry from %s ' 'into DB because some of it exists already ' 'and overwrite = False' % framedir) print('EXC! %sZ: %s\n%s' % (datetime.utcnow().isoformat(), message, format_exc()) ) returnval = (framedir, False) # if everything goes wrong, exit cleanly except Exception as e: database.rollback() message = 'failed to insert photometry from %s into DB' % framedir print('EXC! %sZ: %s\nexception was: %s' % (datetime.utcnow().isoformat(), message, format_exc()) ) returnval = (framedir, False) finally: cursor.close() if closedb: database.close() return returnval
29,939
def idxs_of_duplicates(lst): """ Returns the indices of duplicate values. """ idxs_of = dict({}) dup_idxs = [] for idx, value in enumerate(lst): idxs_of.setdefault(value, []).append(idx) for idxs in idxs_of.values(): if len(idxs) > 1: dup_idxs.extend(idxs) return dup_idxs
29,940
def test_get_fitness_value(case_data): """ Test :meth:`.FitnessCalculator.get_fitness_value`. Parameters ---------- case_data : :class:`.CaseData` A test case. Holds the fitness calculator to test and the correct fitness value. Returns ------- None : :class:`NoneType` """ _test_get_fitness_value( fitness_calculator=case_data.fitness_calculator, molecule=case_data.molecule, fitness_value=case_data.fitness_value, )
29,941
async def store_rekey( handle: StoreHandle, wrap_method: str = None, pass_key: str = None, ) -> StoreHandle: """Replace the wrap key on a Store.""" return await do_call_async( "askar_store_rekey", handle, encode_str(wrap_method and wrap_method.lower()), encode_str(pass_key), )
29,942
def prepare_bitbucket_data(data, profile_data, team_name): """ Prepare bitbucket data by extracting information needed if the data contains next page for this team/organisation continue to fetch the next page until the last page """ next_page = False link = None profile_data = append_bitbucket_data(data.json(), profile_data, team_name) if data.json().get('next'): next_page = True link = data.json().get('next') while next_page: next_data = requests.get(link) profile_data = append_bitbucket_data(next_data.json(), profile_data, team_name) if next_data.json().get('next'): link = next_data.json().get('next') else: next_page = False return profile_data
29,943
def add(x1: Array, x2: Array, /) -> Array: """ Array API compatible wrapper for :py:func:`np.add <numpy.add>`. See its docstring for more information. """ if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes: raise TypeError("Only numeric dtypes are allowed in add") # Call result type here just to raise on disallowed type combinations _result_type(x1.dtype, x2.dtype) x1, x2 = Array._normalize_two_args(x1, x2) return Array._new(np.add(x1._array, x2._array))
29,944
def aslist(l): """Convenience function to wrap single items and lists, and return lists unchanged.""" if isinstance(l, list): return l else: return [l]
29,945
def form_03(request_data): """ Статистическая форма 066/у Приложение № 5 к приказу Минздрава России от 30 декабря 2002 г. № 413 """ num_dir = request_data["dir_pk"] direction_obj = Napravleniya.objects.get(pk=num_dir) hosp_nums_obj = hosp_get_hosp_direction(num_dir) hosp_nums = f"- {hosp_nums_obj[0].get('direction')}" ind_card = direction_obj.client patient_data = ind_card.get_data_individual() hospital: Hospitals = request_data["hospital"] hospital_name = hospital.safe_short_title hospital_address = hospital.safe_address hospital_kod_ogrn = hospital.safe_ogrn if sys.platform == 'win32': locale.setlocale(locale.LC_ALL, 'rus_rus') else: locale.setlocale(locale.LC_ALL, 'ru_RU.UTF-8') pdfmetrics.registerFont(TTFont('PTAstraSerifBold', os.path.join(FONTS_FOLDER, 'PTAstraSerif-Bold.ttf'))) pdfmetrics.registerFont(TTFont('PTAstraSerifReg', os.path.join(FONTS_FOLDER, 'PTAstraSerif-Regular.ttf'))) buffer = BytesIO() doc = SimpleDocTemplate(buffer, pagesize=A4, leftMargin=25 * mm, rightMargin=5 * mm, topMargin=6 * mm, bottomMargin=10 * mm, allowSplitting=1, title="Форма {}".format("066/у-02")) width, height = portrait(A4) styleSheet = getSampleStyleSheet() style = styleSheet["Normal"] style.fontName = "PTAstraSerifReg" style.fontSize = 12 style.leading = 15 style.spaceAfter = 0.5 * mm styleBold = deepcopy(style) styleBold.fontName = "PTAstraSerifBold" styleCenter = deepcopy(style) styleCenter.alignment = TA_CENTER styleCenter.fontSize = 12 styleCenter.leading = 15 styleCenter.spaceAfter = 1 * mm styleCenterBold = deepcopy(styleBold) styleCenterBold.alignment = TA_CENTER styleCenterBold.fontSize = 12 styleCenterBold.leading = 15 styleCenterBold.face = 'PTAstraSerifBold' styleCenterBold.borderColor = black styleJustified = deepcopy(style) styleJustified.alignment = TA_JUSTIFY styleJustified.spaceAfter = 4.5 * mm styleJustified.fontSize = 12 styleJustified.leading = 4.5 * mm objs = [] styleT = deepcopy(style) styleT.alignment = TA_LEFT styleT.fontSize = 10 styleT.leading = 4.5 * mm styleT.face = 'PTAstraSerifReg' print_district = '' if SettingManager.get("district", default='True', default_type='b'): if ind_card.district is not None: print_district = 'Уч: {}'.format(ind_card.district.title) opinion = [ [ Paragraph('<font size=11>{}<br/>Адрес: {}<br/>ОГРН: {} <br/><u>{}</u> </font>'.format(hospital_name, hospital_address, hospital_kod_ogrn, print_district), styleT), Paragraph('<font size=9 >Код формы по ОКУД:<br/>Код организации по ОКПО: 31348613<br/>' 'Медицинская документация<br/>форма № 066/у-02</font>', styleT), ], ] tbl = Table(opinion, 2 * [90 * mm]) tbl.setStyle( TableStyle( [ ('GRID', (0, 0), (-1, -1), 0.75, colors.white), ('LEFTPADDING', (1, 0), (-1, -1), 80), ('VALIGN', (0, 0), (-1, -1), 'TOP'), ] ) ) objs.append(tbl) space_symbol = '&nbsp;' if patient_data['age'] < SettingManager.get("child_age_before", default='15', default_type='i'): patient_data['serial'] = patient_data['bc_serial'] patient_data['num'] = patient_data['bc_num'] else: patient_data['serial'] = patient_data['passport_serial'] patient_data['num'] = patient_data['passport_num'] card_num_obj = patient_data['card_num'].split(' ') p_card_num = card_num_obj[0] sex = patient_data['sex'] if sex == 'м': sex = f'{sex} - 1' if sex == 'ж': sex = f'{sex} - 2' doc_patient = f"{patient_data['type_doc']}, {patient_data['serial']} - {patient_data['num']}" polis_data = f"{patient_data['oms']['polis_serial']} {patient_data['oms']['polis_num']}" ############################################################################################################ # Получить данные из первичного приема (самого первого hosp-направления) hosp_first_num = hosp_nums_obj[0].get('direction') primary_reception_data = primary_reception_get_data(hosp_first_num) hospitalized = '' if primary_reception_data['what_time_hospitalized'] and primary_reception_data['plan_hospital']: if primary_reception_data['what_time_hospitalized'].lower().replace(' ', '') == 'впервые': hospitalized = "первично - 1" if primary_reception_data['what_time_hospitalized'].lower().replace(' ', '') == 'повторно': hospitalized = "повторно - 2" if primary_reception_data['plan_hospital'].lower().replace(' ', '') == 'да': hospitalized = f"{hospitalized}; в плановом порядке -4" if primary_reception_data['extra_hospital'].lower().replace(' ', '') == 'да': hospitalized = f"{hospitalized}; по экстренным показаниям - 3" # Получить отделение - из названия услуги или самого главного направления hosp_depart = hosp_nums_obj[0].get('research_title') # взять самое последнее направленеие из hosp_dirs hosp_last_num = hosp_nums_obj[-1].get('direction') # 'Время выписки', 'Дата выписки', 'Основной диагноз (описание)', 'Осложнение основного диагноза (описание)', 'Сопутствующий диагноз (описание)' date_value, time_value, outcome, result_hospital = '', '', '', '' hosp_extract_data = hosp_extract_get_data(hosp_last_num) days_count = '__________________________' doc_fio = '' if hosp_extract_data: if hosp_extract_data['result_hospital']: result_hospital = hosp_extract_data['result_hospital'] if hosp_extract_data['outcome']: outcome = hosp_extract_data['outcome'] if hosp_extract_data['date_value']: date_value = hosp_extract_data['date_value'] if hosp_extract_data['time_value']: time_value = hosp_extract_data['time_value'] days_count = hosp_extract_data['days_count'] doc_fio = hosp_extract_data['doc_fio'] title_page = [ Indenter(left=0 * mm), Spacer(1, 8 * mm), Paragraph( '<font fontname="PTAstraSerifBold" size=13>СТАТИСТИЧЕСКАЯ КАРТА ВЫБЫВШЕГО ИЗ СТАЦИОНАРА<br/> ' 'круглосуточного пребывания, дневного стационара при больничном<br/> учреждении, дневного стационара при' ' амбулаторно-поликлиническом<br/> учреждении, стационара на дому<br/>' 'N медицинской карты {} {}</font>'.format(p_card_num, hosp_nums), styleCenter, ), Spacer(1, 2 * mm), Spacer(1, 2 * mm), Spacer(1, 2 * mm), Paragraph('1. Код пациента: ________ 2. Ф.И.О.: {}'.format(patient_data['fio']), style), Paragraph('3. Пол: {} {}4. Дата рождения {}'.format(sex, space_symbol * 24, patient_data['born']), style), Paragraph('5. Документ, удостов. личность: (название, серия, номер) {} {}'.format(space_symbol * 2, doc_patient), style), Paragraph('6. Адрес: регистрация по месту жительства: {}'.format(patient_data['main_address']), style), Paragraph('7. Код территории проживания: ___ Житель: город - 1; село - 2.', style), Paragraph('8. Страховой полис (серия, номер):{}'.format(polis_data), style), Paragraph('Выдан: {}'.format(patient_data['oms']['polis_issued']), style), Paragraph('9. Вид оплаты:______________', style), Paragraph('10. Социальный статус: {}'.format(primary_reception_data['social_status']), style), Paragraph('11. Категория льготности: {}'.format(primary_reception_data['category_privilege']), style), Paragraph('12. Кем направлен больной: {}'.format(primary_reception_data['who_directed']), style), Paragraph('13. Кем доставлен: _________________________________ Код______ Номер наряда__________', style), Paragraph('14. Диагноз направившего учреждения: {}'.format(primary_reception_data['diagnos_who_directed']), style), Paragraph('14.1 Состояние при поступлении: {}'.format(primary_reception_data['state']), style), Paragraph('15. Диагноз приемного отделения:{}'.format(primary_reception_data['diagnos_entered']), style), Paragraph('16. Доставлен в состоянии опьянения: Алкогольного — 1; Наркотического — 2.', style), Paragraph('17. Госпитализирован по поводу данного заболевания в текущем году: {}'.format(hospitalized), style), Paragraph('18.Доставлен в стационар от начала заболевания(получения травмы): {}'.format(primary_reception_data['time_start_ill']), style), Paragraph('19. Травма: {}'.format(primary_reception_data['type_trauma']), style), Paragraph('20. Дата поступления в приемное отделение:______________ Время__________', style), Paragraph( '21. Название отделения: <u>{}</u>; дата поступления: <u>{}</u>; время: <u>{}</u>'.format( hosp_depart, primary_reception_data['date_entered_value'], primary_reception_data['time_entered_value'] ), style, ), Paragraph('Подпись врача приемного отделения ______________ Код __________', style), Paragraph('22. Дата выписки (смерти): {}; Время {}'.format(date_value, time_value), style), Paragraph('23. Продолжительность госпитализации (койко - дней): {}'.format(days_count), style), Paragraph('24. Исход госпитализации: {}'.format(outcome), style), Paragraph('24.1. Результат госпитализации: {}'.format(result_hospital), style), ] closed_bl_result = closed_bl(hosp_nums_obj[0].get('direction')) title_page.append( Paragraph( f"25. Листок нетрудоспособности: открыт <u>{closed_bl_result['start_date']}</u> закрыт: <u>{closed_bl_result['end_date']}</u>" f" к труду: <u>{closed_bl_result['start_work']}</u>", style, ) ) title_page.append(Paragraph(f"25.1. Номере ЛН : <u>{closed_bl_result['num']}</u>", style)) title_page.append(Paragraph(f"25.2. Выдан кому : {closed_bl_result['who_get']}", style)) title_page.append(Paragraph('25.3. По уходу за больным Полных лет: _____ Пол: {}'.format(sex), style)) title_page.append(Paragraph('26. Движение пациента по отделениям:', style)) objs.extend(title_page) styleTB = deepcopy(style) styleTB.fontSize = 8.7 styleTB.alignment = TA_CENTER styleTB.leading = 3.5 * mm styleTC = deepcopy(style) styleTC.fontSize = 9.5 styleTC.alignment = TA_LEFT styleTCright = deepcopy(styleTC) styleTCright.alignment = TA_RIGHT styleTCcenter = deepcopy(styleTC) styleTCcenter.alignment = TA_CENTER opinion = [ [ Paragraph('N', styleTB), Paragraph('Код отделения', styleTB), Paragraph('Профиль коек', styleTB), Paragraph('Код врача', styleTB), Paragraph('Дата поступления', styleTB), Paragraph('Дата выписки, перевода', styleTB), Paragraph('Код диагноза по МКБ', styleTB), Paragraph('Код медицинского стандарта', styleTB), Paragraph('Код прерванного случая', styleTB), Paragraph('Вид оплаты', styleTB), ] ] patient_movement = hosp_patient_movement(hosp_nums_obj) x = 0 for i in patient_movement: x += 1 doc_code = '' if i['doc_confirm_code']: doc_code = str(i['doc_confirm_code']) tmp_data = [ [ Paragraph(str(x), styleTB), Paragraph('', styleTB), Paragraph(i['bed_profile_research_title'], styleTB), Paragraph(doc_code, styleTB), Paragraph(i['date_entered_value'], styleTB), Paragraph(i['date_oute'], styleTB), Paragraph(i['diagnos_mkb'], styleTB), Paragraph('', styleTB), Paragraph('', styleTB), Paragraph('ОМС', styleTB), ], ] opinion.extend(tmp_data.copy()) # получить структуру данных для таблицы tbl_act = Table(opinion, repeatRows=1, colWidths=(7 * mm, 15 * mm, 30 * mm, 20 * mm, 21 * mm, 21 * mm, 20 * mm, 14 * mm, 14 * mm, 20 * mm)) tbl_act.setStyle( TableStyle( [ ('GRID', (0, 0), (-1, -1), 1.0, colors.black), ('BOTTOMPADDING', (0, 0), (-1, -1), 1.5 * mm), ('VALIGN', (0, 0), (-1, -1), 'MIDDLE'), ] ) ) objs.append(tbl_act) objs.append(Spacer(1, 2 * mm)) objs.append( Paragraph('27. Хирургические операции(обозначить: основную операцию, использование спец.аппаратуры):', style), ) opinion = [ [ Paragraph('Дата, Час', styleTB), Paragraph('Код <br/>хирурга', styleTB), Paragraph('Код отделения', styleTB), Paragraph('наименование операции', styleTB), Paragraph('код операции', styleTB), Paragraph('наименование осложнения', styleTB), Paragraph('Код ослонения', styleTB), Paragraph('Анестезия (код врача)', styleTB), Paragraph('энд.', styleTB), Paragraph('лазер.', styleTB), Paragraph('криог.', styleTB), Paragraph('Вид оплаты', styleTB), ] ] patient_operation = hosp_get_operation_data(num_dir) operation_result = [] for i in patient_operation: operation_template = [''] * 12 operation_template[0] = Paragraph(i['date'] + '<br/>' + i['time_start'] + '-' + i['time_end'], styleTB) operation_template[1] = Paragraph(str(i['doc_code']), styleTB) operation_template[3] = Paragraph(f"{i['name_operation']} <br/><font face=\"PTAstraSerifBold\" size=\"8.7\">({i['category_difficult']})</font>", styleTB) operation_template[4] = Paragraph('{}'.format(i['code_operation'] + '<br/>' + i['plan_operation']), styleTB) operation_template[7] = Paragraph('{}'.format(i['anesthesia method'] + '<br/> (' + i['code_doc_anesthesia'] + ')'), styleTB) operation_template[5] = Paragraph(i['complications'], styleTB) operation_template[11] = Paragraph(" ОМС", styleTB) operation_result.append(operation_template.copy()) opinion.extend(operation_result) tbl_act = Table(opinion, repeatRows=1, colWidths=(22 * mm, 12 * mm, 11 * mm, 26 * mm, 26 * mm, 20 * mm, 10 * mm, 15 * mm, 7 * mm, 7 * mm, 7 * mm, 16 * mm)) tbl_act.setStyle( TableStyle( [ ('GRID', (0, 0), (-1, -1), 1.0, colors.black), ('BOTTOMPADDING', (0, 0), (-1, -1), 1.5 * mm), ('VALIGN', (0, 0), (-1, -1), 'MIDDLE'), ] ) ) objs.append(tbl_act) objs.append(Spacer(1, 2 * mm)) space_symbol = '&nbsp;' objs.append( Paragraph('28. Обследован: RW {} AIDS '.format(space_symbol * 10), style), ) objs.append(Spacer(1, 2 * mm)) objs.append( Paragraph('29. Диагноз стационара(при выписке):', style), ) opinion = [ [ Paragraph('Клинический заключительный', styleTB), Paragraph('Основное заболевание', styleTB), Paragraph('Код МКБ', styleTB), Paragraph('Осложнение', styleTB), Paragraph('Код МКБ', styleTB), Paragraph('Сопутствующее заболевание', styleTB), Paragraph('Код МКБ', styleTB), ] ] hosp_last_num = hosp_nums_obj[-1].get('direction') hosp_extract_data = hosp_extract_get_data(hosp_last_num) opinion_diagnos = [] if hosp_extract_data: opinion_diagnos = [ [ Paragraph('', styleTB), Paragraph(hosp_extract_data['final_diagnos'], styleTB), Paragraph(hosp_extract_data['final_diagnos_mkb'], styleTB), Paragraph(hosp_extract_data['other_diagnos'], styleTB), Paragraph(hosp_extract_data['other_diagnos_mkb'], styleTB), Paragraph(hosp_extract_data['near_diagnos'].replace('<', '&lt;').replace('>', '&gt;'), styleTB), Paragraph(hosp_extract_data['near_diagnos_mkb'], styleTB), ] ] opinion.extend(opinion_diagnos) opinion_pathologist = [ [ Paragraph('Патологоанатомический ', styleTB), Paragraph('', styleTB), Paragraph('', styleTB), Paragraph('', styleTB), Paragraph('', styleTB), Paragraph('', styleTB), Paragraph('', styleTB), ] ] opinion.extend(opinion_pathologist) tbl_act = Table(opinion, repeatRows=1, colWidths=(28 * mm, 45 * mm, 15 * mm, 30 * mm, 15 * mm, 30 * mm, 15 * mm)) tbl_act.setStyle( TableStyle( [ ('GRID', (0, 0), (-1, -1), 1.0, colors.black), ('BOTTOMPADDING', (0, 0), (-1, -1), 1.5 * mm), ('VALIGN', (0, 0), (-1, -1), 'MIDDLE'), ('SPAN', (0, 0), (0, 1)), ] ) ) objs.append(tbl_act) objs.append(Spacer(1, 2 * mm)) objs.append( Paragraph('30.В случае смерти указать основную причину:______________________________________________________________' 'Код МКБ', style), ) objs.append(Spacer(1, 20 * mm)) objs.append( Paragraph( '31. Дефекты догоспитального этапа: несвоевременность госпитализации - 1; недостаточный объем клинико - диагностического обследования - 2; ' 'неправильная тактика лечения - 3; несовпадение диагноза - 4.', style, ), ) objs.append(Spacer(1, 7 * mm)) objs.append( Paragraph('Подпись лечащего врача ({}) ____________________________'.format(doc_fio), style), ) objs.append(Spacer(1, 7 * mm)) objs.append( Paragraph('Подпись заведующего отделением', style), ) def first_pages(canvas, document): canvas.saveState() canvas.restoreState() def later_pages(canvas, document): canvas.saveState() canvas.restoreState() doc.build(objs, onFirstPage=first_pages, onLaterPages=later_pages) pdf = buffer.getvalue() buffer.close() return pdf
29,946
def getKeyList(rootFile,pathSplit): """ Get the list of keys of the directory (rootFile,pathSplit), if (rootFile,pathSplit) is not a directory then get the key in a list """ if isDirectory(rootFile,pathSplit): changeDirectory(rootFile,pathSplit) return ROOT.gDirectory.GetListOfKeys() else: return [getKey(rootFile,pathSplit)]
29,947
def get_args(): """Parse command line arguments and return namespace object""" parser = argparse.ArgumentParser(description='Transcode some files') parser.add_argument('-c', action="store", dest="config", required=True) parser.add_argument('-l', action="store", dest="limit", type=int, default=None) parser.add_argument('-p', action="store", dest="processes", type=int) parser.add_argument('-q', action="store_true", dest="quiet") return parser.parse_args()
29,948
def query_from_json(query_json: Any, client: cl.Client = None): """ The function converts a dictionary or json string of Query to a Query object. :param query_json: A dictionary or json string that contains the keys of a Query. :type query_json: Any :param client: An IBM PAIRS client. :type client: ibmpairs.client.Client :rtype: ibmpairs.query.Query :raises Exception: if not a dict or a str. """ query = Query.from_json(query_json) cli = common.set_client(input_client = client, global_client = cl.GLOBAL_PAIRS_CLIENT) query.client = cli return query
29,949
def _write(info, directory, format, name_format): """ Writes the string info Args: directory (str): Path to the directory where to write format (str): Output format name_format (str): The file name """ #pylint: disable=redefined-builtin file_name = name_format file_path = os.path.join( directory, f"{file_name}.{format}") os.makedirs(os.path.dirname(file_path), exist_ok=True) with open(file_path,'w',encoding='utf-8') as f: f.write(info) return file_path
29,950
def fetch_validation_annotations(): """ Returns the validation annotations Returns: complete_annotations: array of annotation data - [n_annotations, 4] row format is [T, X, Y, Z] """ ann_gen = _annotation_generator() data = [] for annotation in ann_gen: if annotation[0] in VAL_TIMEPOINTS: data.append(annotation) data = np.asarray(data) # scale z down to expected range data *= [1, 1, 1, 0.2] return data
29,951
def enough_data(train_data, test_data, verbose=False): """Check if train and test sets have any elements.""" if train_data.empty: if verbose: print('Empty training data\n') return False if test_data.empty: if verbose: print('Empty testing data\n') return False return True
29,952
def test_authorized_rpc_call2(volttron_instance, build_two_test_agents): """Tests an agent with two capability calling a method that requires those same two capabilites """ agent1, agent2 = build_two_test_agents # Add another required capability agent1.vip.rpc.allow(agent1.foo, 'can_call_foo2') volttron_instance.add_capabilities(agent2.publickey, ['can_call_foo', 'can_call_foo2']) gevent.sleep(.1) result = agent2.vip.rpc.call(agent1.core.identity, 'foo', 42).get(timeout=2) assert result == 42
29,953
def main(argv): """Go Main Go""" if len(argv) == 2: hr = int(argv[1]) if hr == 12: # Run for the previous UTC day ts = datetime.datetime.utcnow() - datetime.timedelta(days=1) ts = ts.replace(tzinfo=pytz.utc).replace(hour=12, minute=0, second=0, microsecond=0) else: ts = datetime.datetime.utcnow() ts = ts.replace(tzinfo=pytz.utc).replace(hour=0, minute=0, second=0, microsecond=0) makeplot(ts) # Run a day, a week ago ago as well for d in [1, 5]: ts -= datetime.timedelta(days=d) makeplot(ts, 'a') else: ts = datetime.datetime(int(argv[1]), int(argv[2]), int(argv[3]), int(argv[4])) ts = ts.replace(tzinfo=pytz.utc) makeplot(ts, 'a')
29,954
def parse_args(args): """ Parse the arguments to this application, then return the constructed namespace argument. :param args: list of arguments to parse :return: namespace argument """ parser = argparse.ArgumentParser( description="Connects data from F prime flight software to the GDS tcp server" ) # Setup this parser to handle MiddleWare arguments fprime_gds.executables.cli.MiddleWareParser.add_args(parser) # Add a parser for each adapter subparsers = parser.add_subparsers( help="Type of adapter used for processing", dest="subcommand" ) for ( adapter_name ) in fprime_gds.common.adapters.base.BaseAdapter.get_adapters().keys(): adapter = fprime_gds.common.adapters.base.BaseAdapter.get_adapters()[ adapter_name ] # Check adapter real quick before moving on if not hasattr(adapter, "get_arguments") or not callable( getattr(adapter, "get_arguments", None) ): LOGGER.error( "'%s' does not have 'get_arguments' method, skipping.", (adapter_name) ) continue subparse = subparsers.add_parser(adapter_name) # Add arguments for the parser for argument in adapter.get_arguments().keys(): subparse.add_argument(*argument, **adapter.get_arguments()[argument]) args = parser.parse_args(args) try: extras = fprime_gds.executables.cli.refine(parser, args) fprime_gds.common.logger.configure_py_log(extras["logs"], "comm-adapter.log") except ValueError as exc: print("[ERROR] {}".format(exc), file=sys.stderr) parser.print_help(sys.stderr) sys.exit(-1) return args
29,955
def calculate_tidal_offset(TIDE, GM, R, refell): """ Calculates the spherical harmonic offset for a tide system to change from a tide free state where there is no permanent direct and indirect tidal potentials Arguments --------- TIDE: output tidal system R: average radius used in gravity model GM: geocentric graviational constant used in gravity model refell: reference ellipsoid name Returns ------- deltaC20: offset for changing from tide free system """ #-- get ellipsoid parameters for refell ellip = ref_ellipsoid(refell) #-- standard gravitational acceleration gamma = 9.80665 trans = (-0.198*gamma*R**3)/(np.sqrt(5.0)*GM*ellip['a']**2) #-- load love number for degree 2 from PREM (Han and Wahr, 1995) k2 = -0.30252982142510 #-- conversion for each tidal system if (TIDE == 'mean_tide'): conv = (1.0 + k2) elif (TIDE == 'zero_tide'): conv = k2 #-- return the C20 offset return conv*trans
29,956
def get_questions(set_id, default_txt=None): """Method to get set of questions list.""" try: cache_key = 'question_list_%s' % (set_id) cache_list = cache.get(cache_key) if cache_list: v_list = cache_list print('FROM Cache %s' % (cache_key)) else: v_list = ListAnswers.objects.filter( answer_set_id=set_id, is_void=False) cache.set(cache_key, v_list, 300) my_list = v_list.values_list( 'answer_code', 'answer').order_by('the_order') if default_txt: initial_list = ('', default_txt) final_list = [initial_list] + list(my_list) return final_list except Exception as e: print('error - %s' % (e)) return () else: return my_list
29,957
def _export_photo_uuid_applescript( uuid, dest, filestem=None, original=True, edited=False, live_photo=False, timeout=120, burst=False, ): """ Export photo to dest path using applescript to control Photos If photo is a live photo, exports both the photo and associated .mov file uuid: UUID of photo to export dest: destination path to export to filestem: (string) if provided, exported filename will be named stem.ext where ext is extension of the file exported by photos (e.g. .jpeg, .mov, etc) If not provided, file will be named with whatever name Photos uses If filestem.ext exists, it wil be overwritten original: (boolean) if True, export original image; default = True edited: (boolean) if True, export edited photo; default = False If photo not edited and edited=True, will still export the original image caller must verify image has been edited *Note*: must be called with either edited or original but not both, will raise error if called with both edited and original = True live_photo: (boolean) if True, export associated .mov live photo; default = False timeout: timeout value in seconds; export will fail if applescript run time exceeds timeout burst: (boolean) set to True if file is a burst image to avoid Photos export error Returns: list of paths to exported file(s) or None if export failed Note: For Live Photos, if edited=True, will export a jpeg but not the movie, even if photo has not been edited. This is due to how Photos Applescript interface works. """ # setup the applescript to do the export export_scpt = AppleScript( """ on export_by_uuid(theUUID, thePath, original, edited, theTimeOut) tell application "Photos" set thePath to thePath set theItem to media item id theUUID set theFilename to filename of theItem set itemList to {theItem} if original then with timeout of theTimeOut seconds export itemList to POSIX file thePath with using originals end timeout end if if edited then with timeout of theTimeOut seconds export itemList to POSIX file thePath end timeout end if return theFilename end tell end export_by_uuid """ ) dest = pathlib.Path(dest) if not dest.is_dir: raise ValueError(f"dest {dest} must be a directory") if not original ^ edited: raise ValueError(f"edited or original must be True but not both") tmpdir = tempfile.TemporaryDirectory(prefix="osxphotos_") # export original filename = None try: filename = export_scpt.call( "export_by_uuid", uuid, tmpdir.name, original, edited, timeout ) except Exception as e: logging.warning("Error exporting uuid %s: %s" % (uuid, str(e))) return None if filename is not None: # need to find actual filename as sometimes Photos renames JPG to jpeg on export # may be more than one file exported (e.g. if Live Photo, Photos exports both .jpeg and .mov) # TemporaryDirectory will cleanup on return filename_stem = pathlib.Path(filename).stem files = glob.glob(os.path.join(tmpdir.name, "*")) exported_paths = [] for fname in files: path = pathlib.Path(fname) if len(files) > 1 and not live_photo and path.suffix.lower() == ".mov": # it's the .mov part of live photo but not requested, so don't export logging.debug(f"Skipping live photo file {path}") continue if len(files) > 1 and burst and path.stem != filename_stem: # skip any burst photo that's not the one we asked for logging.debug(f"Skipping burst photo file {path}") continue if filestem: # rename the file based on filestem, keeping original extension dest_new = dest / f"{filestem}{path.suffix}" else: # use the name Photos provided dest_new = dest / path.name logging.debug(f"exporting {path} to dest_new: {dest_new}") _copy_file(str(path), str(dest_new)) exported_paths.append(str(dest_new)) return exported_paths else: return None
29,958
def get_mms_operation(workspace, operation_id): """ Retrieve the operation payload from MMS. :return: The json encoded content of the reponse. :rtype: dict """ response = make_mms_request(workspace, 'GET', '/operations/' + operation_id, None) return response.json()
29,959
def _check_data_nan(data): """Ensure data compatibility for the series received by the smoother. (Without checking for inf and nans). Returns ------- data : array Checked input. """ data = np.asarray(data) if np.prod(data.shape) == np.max(data.shape): data = data.ravel() if data.ndim > 2: raise ValueError( "The format of data received is not appropriate. " "Pass an objet with data in this format (series, timesteps)") if data.ndim == 0: raise ValueError( "Pass an object with data in this format (series, timesteps)") if data.dtype not in [np.float16, np.float32, np.float64, np.int8, np.int16, np.int32, np.int64]: raise ValueError("data contains not numeric types") return data
29,960
def function(row, args): """Execute a named function function(arg, arg...) @param row: the HXL data row @param args: the arguments parsed (the first one is the function name) @returns: the result of executing the function on the arguments """ f = FUNCTIONS.get(args[0]) if f: return f(row, args[1:], True) else: logger.error("Unknown function %s", args[0]) return ''
29,961
def setup_client(public_key: str, secret_key: str, **_): """Create a Culqi client from set-up application keys.""" culqipy.public_key = public_key culqipy.secret_key = secret_key
29,962
def currencyrates(): """ print a sh-friendly set of variables representing todays currency rates for $EXCHANGERATES which is a semicolon- separated list of currencyexchange names from riksbanken.se using daily avg aggregation :return: none """ #print(ratesgroup()) rates = os.environ.get("EXCHANGERATES",'SEKEURPMI;SEKUSDPMI') series = [dict(groupid=foreign_rates_groupid, seriesid=id) for id in rates.split(';')] #print(series) query = dict(languageid='en', min=False, max=True, ultimo=False, aggregateMethod='D', avg=True, dateto=date.today(), datefrom=date.today() - timedelta(days=7), searchGroupSeries=series) result = client.service.getInterestAndExchangeRates(searchRequestParameters=query) #print(result) def avg(s): vals = list(filter(lambda x: x is not None, [x['value'] for x in s['resultrows']])) return '{0:.4f}'.format(sum(vals)/len(vals)) print (";".join(["{}={}".format(str(s['seriesid']).strip(),avg(s)) for s in result['groups'][0]['series']]))
29,963
def extract_rfc2822_addresses(text): """Returns a list of valid RFC2822 addresses that can be found in ``source``, ignoring malformed ones and non-ASCII ones. """ if not text: return [] candidates = address_pattern.findall(tools.ustr(text).encode('utf-8')) return filter(try_coerce_ascii, candidates)
29,964
def set_edge_font_size_mapping(table_column, table_column_values=None, sizes=None, mapping_type='c', default_size=None, style_name=None, network=None, base_url=DEFAULT_BASE_URL): """Map table column values to sizes to set the edge size. Args: table_column (str): Name of Cytoscape table column to map values from table_column_values (list): List of values from Cytoscape table to be used in mapping sizes (list): List of size values to map to ``table_column_values`` mapping_type (str): discrete or passthrough (d,p); default is discrete default_size (int): Size value to set as default style_name (str): name for style network (SUID or str or None): Name or SUID of a network or view. Default is the "current" network active in Cytoscape. base_url (str): Ignore unless you need to specify a custom domain, port or version to connect to the CyREST API. Default is http://127.0.0.1:1234 and the latest version of the CyREST API supported by this version of py4cytoscape. Returns: str: '' Raises: CyError: if table column doesn't exist, table column values doesn't match values list, or invalid style name, network or mapping type, or if invalid size requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error Examples: >>> set_edge_font_size_mapping('EdgeBetweenness', table_column_values=[2.0, 20000.0], sizes=[20, 80], style_name='galFiltered Style') '' >>> set_edge_font_size_mapping('interaction', table_column_values=['pp', 'pd'], sizes=[40, 90], mapping_type='d', style_name='galFiltered Style') '' >>> set_edge_font_size_mapping(**gen_edge_size_map('interaction', mapping_type='d')) '' >>> set_edge_font_size_mapping(**gen_edge_size_map('EdgeBetweenness', scheme_c_number_continuous(100, 200), style_name='galFiltered Style')) '' >>> set_edge_font_size_mapping('PassthruCol', mapping_type='p', default_size=20, style_name='galFiltered Style') '' See Also: :meth:`gen_edge_size_map` See Also: `Value Generators <https://py4cytoscape.readthedocs.io/en/0.0.9/concepts.html#value-generators>`_ in the Concepts section in the py4cytoscape User Manual. """ verify_dimensions('size', sizes) if default_size is not None: style_defaults.set_edge_font_size_default(default_size, style_name=style_name, base_url=base_url) return _update_visual_property('EDGE_LABEL_FONT_SIZE', table_column, table_column_values=table_column_values, range_map=sizes, mapping_type=mapping_type, style_name=style_name, network=network, base_url=base_url, table='edge')
29,965
def compute_time_overlap(appointment1, appointment2): """ Compare two appointments on the same day """ assert appointment1.date_ == appointment2.date_ print("Checking for time overlap on \"{}\"...". format(appointment1.date_)) print("Times to check: {}, {}". format(appointment1.time_range_, appointment2.time_range_)) latest_start = max(appointment1.start_time_, appointment2.start_time_) earliest_end = min(appointment1.end_time_, appointment2.end_time_) delta = (earliest_end - latest_start).seconds overlap = max(0, delta) if overlap == 0: print("No time overlap.") return False print("\033[93mFound time overlap.\033[0m") return True
29,966
def set_up_prior(data, params): """ Function to create prior distribution from data Parameters ---------- data: dict catalog dictionary containing bin endpoints, log interim prior, and log interim posteriors params: dict dictionary of parameter values for creation of prior Returns ------- prior: chippr.mvn object prior distribution as multivariate normal """ zs = data['bin_ends'] # print(str(len(zs))+' input redshift bin ends') log_nz_intp = data['log_interim_prior'] # modify above line if testing implicit prior misspecification print('reading implicit prior '+str(log_nz_intp)) log_z_posts = data['log_interim_posteriors'] z_difs = zs[1:]-zs[:-1] z_mids = (zs[1:]+zs[:-1])/2. n_bins = len(z_mids) # print(str(n_bins)+' bin centers') n_pdfs = len(log_z_posts) a = 1.#amplitude b = 5.#inverse wavelength c = 1.e-2#random fluctuations prior_var = np.eye(n_bins) for k in range(n_bins): # print(k) prior_var[k] = a * np.exp(-0.5 * b * (z_mids[k] - z_mids) ** 2) prior_var += c * np.eye(n_bins) prior_mean = log_nz_intp # print('prior dimensions: '+str((np.shape(prior_mean), np.shape(prior_var)))) prior = mvn(prior_mean, prior_var) if params['prior_mean'] is 'sample': prior_mean = prior.sample_one() prior = mvn(prior_mean, prior_var) return (prior, prior_var)
29,967
def is_nonincreasing(arr): """ Returns true if the sequence is non-increasing. """ return all([x >= y for x, y in zip(arr, arr[1:])])
29,968
def restore_missing_features(nonmissing_X, missing_features): """Insert columns corresponding to missing features. Parameters ---------- nonmissing_X : array-like, shape (n_samples, n_nonmissing) Array containing data with missing features removed. missing_features : array-like, shape (n_missing,) Array containing the column indices in the original data that correspond to missing features. Returns ------- X : array-like, shape (n_samples, n_features) Array with missing features inserted. """ if missing_features is None: missing_features = [] n_samples, n_nonmissing = nonmissing_X.shape n_missing = len(missing_features) n_features = n_missing + n_nonmissing # Ensure missing indices are consistent with the # inferred number of features. if len(missing_features) > 0: if min(missing_features) < 0 or max(missing_features) >= n_features: raise ValueError( 'Missing features are inconsistent with ' 'number of features in complete data') if is_dask_array(nonmissing_X): cols = [] idx = 0 for i in range(n_features): if i in missing_features: cols.append(dask.array.full((n_samples, 1), np.NaN)) else: cols.append(nonmissing_X[:, idx].reshape((n_samples, 1))) idx += 1 X = dask.array.hstack(cols) else: nonmissing_features = [i for i in range(n_features) if i not in missing_features] X = np.full((n_samples, n_features), np.NaN) X[:, nonmissing_features] = nonmissing_X return X
29,969
def fontifyPythonNode(node): """ Syntax color the given node containing Python source code. @return: C{None} """ oldio = cStringIO.StringIO() latex.getLatexText(node, oldio.write, entities={'lt': '<', 'gt': '>', 'amp': '&'}) oldio = cStringIO.StringIO(oldio.getvalue().strip()+'\n') newio = cStringIO.StringIO() htmlizer.filter(oldio, newio, writer=htmlizer.SmallerHTMLWriter) newio.seek(0) newel = microdom.parse(newio).documentElement newel.setAttribute("class", "python") node.parentNode.replaceChild(newel, node)
29,970
def get_sample_type_from_recipe(recipe): """Retrieves sample type from recipe Args: recipe: Recipe of the project Returns: sample_type_mapping, dic: Sample type of the project For Example: { TYPE: "RNA" } , { TYPE: "DNA" }, { TYPE: "WGS" } """ return find_mapping(recipe_type_mapping, recipe)
29,971
def _is_download_necessary(path, response): """Check whether a download is necessary. There three criteria. 1. If the file is missing, download it. 2. The following two checks depend on each other. 1. Some files have an entry in the header which specifies when the file was modified last. If the file has been modified, download it. 2. If the header has no entry for the last modified date, we compare file sizes. If the file sizes do not match, the file is downloaded. """ path_yaml = path.with_suffix(".yaml") if path_yaml.exists(): last_modified_offline = pd.to_datetime( yaml.safe_load(path_yaml.read_text())["last_modified"] ) else: last_modified_offline = None last_modified_online = pd.to_datetime(response.headers.get("last-modified", None)) path.with_suffix(".yaml").write_text( yaml.dump({"last_modified": response.headers.get("last-modified", None)}) ) if not path.exists(): is_necessary = True reason = f"The file {path.name} does not exist." elif ( last_modified_online is not None and last_modified_online > last_modified_offline ): is_necessary = True reason = f"{path.name} has been modified online." elif last_modified_online is None: file_size_offline = path.stat().st_size file_size_online = int(response.headers.get("content-length", 0)) if file_size_online != file_size_offline: is_necessary = True reason = f"File sizes differ for {path.name}" else: is_necessary = False reason = f"File {path.name} is already downloaded." else: is_necessary = False reason = f"File {path.name} is already downloaded." return is_necessary, reason
29,972
def hash64(s): """Вычисляет хеш - 8 символов (64 бита) """ hex = hashlib.sha1(s.encode("utf-8")).hexdigest() return "{:x}".format(int(hex, 16) % (10 ** 8))
29,973
def test_import(sfinit): """ Test code by importing all available classes for this module. If any of these fails then the module itself has some code error (e.g., syntax errors, inheritance errors). """ sfinit for name in config.NAMES: modules = return_modules()[name] for package, module_list in modules.items(): for module in module_list: config.custom_import(name, module)()
29,974
def course_runs(): """Fixture for a set of CourseRuns in the database""" return CourseRunFactory.create_batch(3)
29,975
def _response(data=None, status_code=None): """Build a mocked response for use with the requests library.""" response = MagicMock() if data: response.json = MagicMock(return_value=json.loads(data)) if status_code: response.status_code = status_code response.raise_for_status = MagicMock() return response
29,976
def test_baked_django_with_custom_issue_template_files(cookies): """Test Django project has custom ISSUE templates generated correctly. Tests that the Custom Issue templates have had the "assignee" generated correctly, and post_gen deleted the standard template. """ default_django = cookies.bake() bug_path = default_django.project_path / ".github/ISSUE_TEMPLATE/bug-report.md" bug_file = bug_path.read_text().splitlines() chore_path = default_django.project_path / ".github/ISSUE_TEMPLATE/chore.md" chore_file = chore_path.read_text().splitlines() documentation_path = ( default_django.project_path / ".github/ISSUE_TEMPLATE/documentation-request.md" ) documentation_file = documentation_path.read_text().splitlines() feature_path = ( default_django.project_path / ".github/ISSUE_TEMPLATE/feature-request.md" ) feature_file = feature_path.read_text().splitlines() assert 'assignees: "imAsparky"' in bug_file assert 'assignees: "imAsparky"' in chore_file assert 'assignees: "imAsparky"' in documentation_file assert 'assignees: "imAsparky"' in feature_file assert "ISSUE_TEMPLATE.md" not in os.listdir( default_django.project_path / ".github" )
29,977
def findtailthreshold(v, figpath=None): """ function [f,mns,sds,gmfit] = findtailthreshold(v,wantfig) <v> is a vector of values <wantfig> (optional) is whether to plot a diagnostic figure. Default: 1. Fit a Gaussian Mixture Model (with n=2) to the data and find the point that is greater than the median and at which the posterior probability is equal (50/50) across the two Gaussians. This serves as a nice "tail threshold". To save on computational load, we take a random subset of size 1000000 if there are more than that number of values. We also use some discretization in computing our solution. return: <f> as the threshold <mns> as [A B] with the two means (A < B) <sds> as [C D] with the corresponding std devs <gmfit> with the gmdist object (the order might not be the same as A < B) example: from numpy.random import randn f, mns, sds, gmfit = findtailthreshold(np.r_[randn(1000), 5+3*randn(500)], figpath='test.png') """ # internal constants numreps = 3 # number of restarts for the GMM maxsz = 1000000 # maximum number of values to consider nprecision = 500 # linearly spaced values between median and upper robust range # inputs if figpath is None: wantfig = 0 else: wantfig = 1 # quick massaging of input v2 = v[np.isfinite(v)] if len(v2) > maxsz: print('warning: too big, so taking a subset') v2 = picksubset(v2, maxsz) # fit mixture of two gaussians gmfit = gmdist(n_components=2, n_init=numreps).fit(v2.reshape(-1, 1)) # figure out a nice range rng = robustrange(v2.flatten())[0] # evaluate posterior allvals = np.linspace(np.median(v2), rng[1], num=nprecision) checkit = gmfit.predict_proba(allvals.reshape(-1, 1)) # figure out crossing np.testing.assert_equal( np.any(checkit[:, 0] > .5) and np.any(checkit[:, 0] < .5), True, err_msg='no crossing of 0.5 detected') ix = np.argmin(np.abs(checkit[:, 0]-.5)) # return it f = allvals[ix] # prepare other outputs mns = gmfit.means_.flatten() sds = np.sqrt(gmfit.covariances_.flatten()) if mns[1] < mns[0]: mns = mns[[1, 0]] sds = sds[[1, 0]] # start the figure if wantfig: # make figure plt.plot(allvals, checkit) plt.plot([allvals[ix], allvals[ix]], plt.ylim(), 'k-', linewidth=2) plt.title('Posterior Probabilities') plt.savefig(figpath) plt.close('all') return f, mns, sds, gmfit
29,978
async def make_request_and_envelope_response( app: web.Application, method: str, url: URL, headers: Optional[Dict[str, str]] = None, data: Optional[bytes] = None, ) -> web.Response: """ Helper to forward a request to the catalog service """ session = get_client_session(app) try: async with session.request(method, url, headers=headers, data=data) as resp: payload = await resp.json() try: resp.raise_for_status() resp_data = wrap_as_envelope(data=payload) except ClientResponseError as err: if 500 <= err.status: raise err resp_data = wrap_as_envelope(error=payload["errors"]) return web.json_response(resp_data, status=resp.status, dumps=json_dumps) except (asyncio.TimeoutError, ClientConnectionError, ClientResponseError) as err: logger.warning( "Catalog service errors upon request %s %s: %s", method, url.relative(), err ) raise web.HTTPServiceUnavailable( reason="catalog is currently unavailable" ) from err
29,979
def tprint(string, indent=4): """Print with indent.""" print indent * ' ' + str(string)
29,980
def _app_node(app_id, existing=True): """Returns node path given app id.""" path = os.path.join(z.SCHEDULED, app_id) if not existing: path = path + '#' return path
29,981
def make_dirs(path): """ Create dir if path does not exist. :param str path: Directory path. """ if not os.path.isdir(path): os.makedirs(path, mode=0o777, exist_ok=True)
29,982
def get_provincial_miif_sets(munis): """ collect set of indicator values for each province, MIIF category and year returns dict of the form { 'cash_coverage': { 'FS': { 'B1': { '2015': [{'result': ...}] } } } } """ prov_sets = defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(list)))) dev_cat_key = lambda muni: muni['municipality.miif_category'] dev_cat_sorted = sorted(munis, key=dev_cat_key) prov_key = lambda muni: muni['municipality.province_code'] for calculator in get_indicator_calculators(has_comparisons=True): name = calculator.indicator_name for dev_cat, dev_cat_group in groupby(dev_cat_sorted, dev_cat_key): prov_sorted = sorted(dev_cat_group, key=prov_key) for prov_code, prov_group in groupby(prov_sorted, prov_key): for muni in prov_group: for period in muni[name]['values']: if period['result'] is not None: prov_sets[name][prov_code][dev_cat][period['date']].append(period) return prov_sets
29,983
def create_kernel(radius=2, invert=False): """Define a kernel""" if invert: value = 0 k = np.ones((2*radius+1, 2*radius+1)) else: value = 1 k = np.zeros((2*radius+1, 2*radius+1)) y,x = np.ogrid[-radius:radius+1, -radius:radius+1] mask = x**2 + y**2 <= radius**2 k[mask] = value return k
29,984
def edimax_get_power(ip_addr="192.168.178.137"): """ Quelle http://sun-watch.net/index.php/eigenverbrauch/ipschalter/edimax-protokoll/ """ req = """<?xml version="1.0" encoding="UTF8"?><SMARTPLUG id="edimax"><CMD id="get"> <NOW_POWER><Device.System.Power.NowCurrent> </Device.System.Power.NowCurrent><Device.System.Power.NowPower> </Device.System.Power.NowPower></NOW_POWER></CMD></SMARTPLUG> """ r = requests.post("http://{0}:10000/smartplug.cgi".format(ip_addr), auth=("admin","1234"), data=req) soup = BeautifulSoup(r.text, features="xml") power = soup.find(name="Device.System.Power.NowPower").get_text() print r.text return float(power)
29,985
def get_class_inst_data_params_n_optimizer(nr_classes, nr_instances, device): """Returns class and instance level data parameters and their corresponding optimizers. Args: nr_classes (int): number of classes in dataset. nr_instances (int): number of instances in dataset. device (str): device on which data parameters should be placed. Returns: class_parameters (torch.Tensor): class level data parameters. inst_parameters (torch.Tensor): instance level data parameters optimizer_class_param (SparseSGD): Sparse SGD optimizer for class parameters optimizer_inst_param (SparseSGD): Sparse SGD optimizer for instance parameters """ class_parameters = torch.tensor( np.ones(nr_classes) * np.log(1.0), dtype=torch.float32, requires_grad=True, device=device ) optimizer_class_param = SparseSGD( [class_parameters], lr=0.1, momentum=0.9, skip_update_zero_grad=True ) inst_parameters = torch.tensor( np.ones(nr_instances) * np.log(1.0), dtype=torch.float32, requires_grad=True, device=device ) optimizer_inst_param = SparseSGD( [inst_parameters], lr=0.1, momentum=0.9, skip_update_zero_grad=True ) return class_parameters, inst_parameters, optimizer_class_param, optimizer_inst_param
29,986
def inc(x): """ Add one to the current value """ return x + 1
29,987
def regionError(df, C, R): """Detects if a selected region is not part of one of the selected countries Parameters: ----------- df : Pandas DataFrame the original dataset C : str list list of selected countries R : str list list of selected regions Returns ----------- bool True if the error is detected """ if C == None: C = ['USA'] available_regions = list(regions_of_country(df, C)) + ['All_regions', 'All'] for region in R: if not(region in available_regions): return True return False
29,988
def lherzolite(): """ Elastic constants of lherzolite rock (GPa) from Peselnick et al. (1974), in Voigt notation - Abbreviation: ``'LHZ'`` Returns: (tuple): tuple containing: * C (np.ndarray): Elastic stiffness matrix (shape ``(6, 6)``) * rho (float): Density (3270 kg/m^3) Example ------- >>> from telewavesim import elast >>> elast.lherzolite()[0] array([[ 1.8740e+02, 6.3710e+01, 6.3870e+01, 7.8000e-01, 2.0200e+00, -3.2000e+00], [ 6.3710e+01, 2.1125e+02, 6.4500e+01, -3.0700e+00, 8.7000e-01, -5.7800e+00], [ 6.3870e+01, 6.4500e+01, 1.9000e+02, 3.8000e-01, 2.3800e+00, -1.2000e-01], [ 7.8000e-01, -3.0700e+00, 3.8000e-01, 6.7900e+01, -2.1200e+00, 1.6000e+00], [ 2.0200e+00, 8.7000e-01, 2.3800e+00, -2.1200e+00, 6.3120e+01, -5.5000e-01], [-3.2000e+00, -5.7800e+00, -1.2000e-01, 1.6000e+00, -5.5000e-01, 6.6830e+01]]) >>> elast.lherzolite()[1] 3270.0 """ rho = 3270. C = np.zeros((6,6), dtype=float) C[0,0] = 187.4; C[0,1] = 63.71; C[0,2] = 63.87; C[0,3] = 0.78; C[0,4] = 2.02; C[0,5] = -3.2 C[1,0] = C[0,1]; C[1,1] = 211.25; C[1,2] = 64.5; C[1,3] = -3.07; C[1,4] = 0.87; C[1,5] = -5.78 C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 190.; C[2,3] = 0.38; C[2,4] = 2.38; C[2,5] = -0.12 C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 67.9; C[3,4] = -2.12; C[3,5] = 1.6 C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 63.12; C[4,5] = -0.55 C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 66.83 return C, rho
29,989
def int_to_bit(x_int, nbits, base=2): """Turn x_int representing numbers into a bitwise (lower-endian) tensor.""" x_l = tf.expand_dims(x_int, axis=-1) x_labels = [] for i in range(nbits): x_labels.append( tf.floormod( tf.floordiv(tf.to_int32(x_l), tf.to_int32(base)**i), tf.to_int32(base))) res = tf.concat(x_labels, axis=-1) return tf.to_float(res)
29,990
def create_ldap_external_user_directory_config_content(server=None, roles=None, role_mappings=None, **kwargs): """Create LDAP external user directory configuration file content. """ entries = { "user_directories": { "ldap": { } } } entries["user_directories"]["ldap"] = [] if server: entries["user_directories"]["ldap"].append({"server": server}) if roles: entries["user_directories"]["ldap"].append({"roles": [{r: None} for r in roles]}) if role_mappings: for role_mapping in role_mappings: entries["user_directories"]["ldap"].append({"role_mapping": role_mapping}) return create_xml_config_content(entries, **kwargs)
29,991
def fasta_iter(fh): """ Given a fasta file. yield tuples of header, sequence Clears description from seq name. From: https://www.biostars.org/p/710/ Updated: 11/09/2018 Version: 0.2 """ # ditch the boolean (x[0]) and just keep the header or sequence since # we know they alternate. faiter = (x[1] for x in groupby(fh, lambda line: line[0] == ">")) for header in faiter: # drop the ">" and description header = header.next()[1:].strip().split(' ')[0] # join all sequence lines to one. seq = "".join(s.strip() for s in faiter.next()) yield header, seq
29,992
def intersection(lst1, lst2): """! \details Finds hashes that are common to both lists and stores their location in both documents Finds similarity that is measured by sim(A,B) = number of hashes in intersection of both hash sets divided by minimum of the number of hashes in lst1 and lst2 \param lst1 : 1st list whose elements are of the form [hash, start location, end location] \param lst2: 2nd list whose elements are of the form [hash, start location, end location] \return l3: list of common hashes and their locations in both documents. This is a list whose elements are of the form [common hash, [start location in 1, end location in 1], [start location in 2, end location in 2]] \return sim: similarity measure evaluated """ l1h = [h[0] for h in lst1] l2h = [h[0] for h in lst2] l1loc = {h[0]:h[1:] for h in lst1} l2loc = {h[0]:h[1:] for h in lst2} l3h = list(set(l1h)&set(l2h)) l3 = [[h, l1loc[h], l2loc[h]] for h in l3h] sim = len(l3)/min(len(set(l1h)), len(set(l2h))) return l3, sim
29,993
def apply_colour_to_surface(colour: pygame.Color, shape_surface: pygame.surface.Surface, rect: Union[pygame.Rect, None] = None): """ Apply a colour to a shape surface by multiplication blend. This works best when the shape surface is predominantly white. :param colour: The colour to apply. :param shape_surface: The shape surface to apply the colour to. :param rect: A rectangle to apply the colour inside of. """ if rect is not None: colour_surface = pygame.surface.Surface(rect.size, flags=pygame.SRCALPHA, depth=32) colour_surface.fill(colour) shape_surface.blit(colour_surface, rect, special_flags=pygame.BLEND_RGBA_MULT) else: colour_surface = pygame.surface.Surface(shape_surface.get_size(), flags=pygame.SRCALPHA, depth=32) colour_surface.fill(colour) shape_surface.blit(colour_surface, (0, 0), special_flags=pygame.BLEND_RGBA_MULT)
29,994
def allrad2(F_nm, hull, N_sph=None, jobs_count=1): """Loudspeaker signals of All-Round Ambisonic Decoder 2. Parameters ---------- F_nm : ((N_sph+1)**2, S) numpy.ndarray Matrix of spherical harmonics coefficients of spherical function(S). hull : LoudspeakerSetup N_sph : int Decoding order, defaults to hull.characteristic_order. jobs_count : int or None, optional Number of parallel jobs, 'None' employs 'cpu_count'. Returns ------- ls_sig : (L, S) numpy.ndarray Loudspeaker L output signal S. References ---------- Zotter, F., & Frank, M. (2018). Ambisonic decoding with panning-invariant loudness on small layouts (AllRAD2). In 144th AES Convention. Examples -------- .. plot:: :context: close-figs ls_setup = spa.decoder.LoudspeakerSetup(ls_x, ls_y, ls_z) ls_setup.pop_triangles(normal_limit=85, aperture_limit=90, opening_limit=150) ls_setup.ambisonics_setup(update_hull=True) spa.plots.decoder_performance(ls_setup, 'ALLRAD2') """ if not hull.ambisonics_hull: raise ValueError('Run LoudspeakerSetup.ambisonics_setup() first!') if hull.kernel_hull: kernel_hull = hull.kernel_hull else: raise ValueError('Run LoudspeakerSetup.ambisonics_setup() first!') if N_sph is None: N_sph = hull.characteristic_order N_sph_in = int(np.sqrt(F_nm.shape[0]) - 1) assert(N_sph == N_sph_in) # for now if N_sph_in > kernel_hull.N_kernel: warn("Undersampling the sphere. Needs higher N_Kernel.") # virtual t-design loudspeakers J = len(kernel_hull.points) # virtual speakers expressed as phantom sources (Kernel) G_k = allrap2(src=kernel_hull.points, hull=hull, N_sph=N_sph, jobs_count=jobs_count) # tapering already applied in kernel, sufficient? # virtual Ambisonic decoder _k_azi, _k_colat, _k_r = utils.cart2sph(kernel_hull.points[:, 0], kernel_hull.points[:, 1], kernel_hull.points[:, 2]) # band-limited Dirac Y_bld = sph.sh_matrix(N_sph, _k_azi, _k_colat, SH_type='real') # ALLRAD2 Decoder D = 4 * np.pi / J * G_k.T @ Y_bld # loudspeaker output signals ls_sig = D @ F_nm return ls_sig
29,995
def run_algorithms(window, size, rows, algorithm, maze_type): """Runs the maze window, where the chosen algorithm can be executed""" grid = assets.board.make_grid(rows, size) if maze_type == "Random": grid = assets.maze.completely_random(grid) if maze_type == "Swirl": grid = assets.maze.basic_swirl(grid) if maze_type == "Imperfect": grid = assets.maze.imperfect(grid) if maze_type == "Simple": grid = assets.maze.simple_maze(grid) start = None end = None run = True # Keeps track of whether algorithm has been started, so user input can be disabled for its duration started = False while run: assets.board.draw_board(window, grid, rows, size) for event in pygame.event.get(): if event.type == pygame.QUIT: quit() if event.type == pygame.KEYDOWN: if event.key == pygame.K_ESCAPE: run = False # If algorithm has started, does not allow the user to give commands if started: continue # Changing nodes if pygame.mouse.get_pressed()[0]: # left click pos = pygame.mouse.get_pos() row, col = assets.board.get_clicked_position(pos, rows, size) node = grid[row][col] if not start and node != end and not node.is_hard_barrier: start = node start.make_start() elif not end and node != start and not node.is_hard_barrier: end = node end.make_end() elif node != end and node != start and not node.is_hard_barrier: node.make_barrier() elif pygame.mouse.get_pressed()[2]: # right click pos = pygame.mouse.get_pos() row, col = assets.board.get_clicked_position(pos, rows, size) node = grid[row][col] if not node.is_hard_barrier: node.reset() # Reset start and end if they are deleted if node == start: start = None if node == end: end = None if event.type == pygame.KEYDOWN: # Pressing c resets all nodes if event.key == pygame.K_c: start = None end = None for row in grid: for node in row: if not node.is_hard_barrier: node.reset() # Starts the chosen algorithm if event.type == pygame.KEYDOWN: if event.key == pygame.K_SPACE and start and end and not started: started = True # Update possible neighbours for every node for row in grid: for node in row: node.update_neighbours(grid) # Which algorithm to use: # Lambda function allows function with these parameters to be called over and over without having to define a whole other function if algorithm == "a*": assets.algorithms.a_star_algorithm(lambda: assets.board.draw_board(window, grid, rows, size), grid, start, end) if algorithm == "breadth first": assets.algorithms.breadth_first_search(lambda: assets.board.draw_board(window, grid, rows, size), grid, start, end) if algorithm == "depth first": assets.algorithms.depth_first_search(lambda: assets.board.draw_board(window, grid, rows, size), grid, start, end) if algorithm == "dijkstra's": assets.algorithms.dijkstras(lambda: assets.board.draw_board(window, grid, rows, size), grid, start, end) if algorithm == "best-first": assets.algorithms.best_first(lambda: assets.board.draw_board(window, grid, rows, size), grid, start, end) started = False
29,996
def match_histogram(reference, image, ref_mask=None, img_mask=None): """Match the histogram of the T2-like anatomical with the EPI.""" import os import numpy as np import nibabel as nb from nipype.utils.filemanip import fname_presuffix from skimage.exposure import match_histograms nii_img = nb.load(image) img_data = np.asanyarray(nii_img.dataobj) ref_data = np.asanyarray(nb.load(reference).dataobj) ref_mask = ( np.ones_like(ref_data, dtype=bool) if ref_mask is None else np.asanyarray(nb.load(ref_mask).dataobj) > 0 ) img_mask = ( np.ones_like(img_data, dtype=bool) if img_mask is None else np.asanyarray(nb.load(img_mask).dataobj) > 0 ) out_file = fname_presuffix(image, suffix="_matched", newpath=os.getcwd()) img_data[img_mask] = match_histograms( img_data[img_mask], ref_data[ref_mask], ) nii_img.__class__( img_data, nii_img.affine, nii_img.header, ).to_filename(out_file) return out_file
29,997
def main(input_data_path, output_data_path, window): """ Convert the Volumetric CT data and mask (in NIfTI format) to a dataset of 2D images in tif and masks in bitmap for the brain extraction. """ # open data info dataframe info_df = pd.read_csv(os.path.join(input_data_path, 'info.csv'), index_col=0) # make patient directory if not os.path.exists(output_data_path): os.mkdir(output_data_path) # iterate over volume to extract data output_info = [] for n, id in enumerate(info_df.id.values): # read nii volume ct_nii = nib.load(os.path.join(input_data_path, f'ct_scans/{id}.nii')) mask_nii = nib.load(os.path.join(input_data_path, f'masks/{id}.nii.gz')) # get np.array ct_vol = ct_nii.get_fdata() mask_vol = skimage.img_as_bool(mask_nii.get_fdata()) # rotate 90° counter clockwise for head pointing upward ct_vol = np.rot90(ct_vol, axes=(0,1)) mask_vol = np.rot90(mask_vol, axes=(0,1)) # window the ct volume to get better contrast of soft tissues if window is not None: ct_vol = window_ct(ct_vol, win_center=window[0], win_width=window[1], out_range=(0,1)) if mask_vol.shape != ct_vol.shape: print(f'>>> Warning! The ct volume of patient {id} does not have ' f'the same dimension as the ground truth. CT ({ct_vol.shape}) vs Mask ({mask_vol.shape})') # make patient directory if not os.path.exists(os.path.join(output_data_path, f'{id:03}/ct/')): os.makedirs(os.path.join(output_data_path, f'{id:03}/ct/')) if not os.path.exists(os.path.join(output_data_path, f'{id:03}/mask/')): os.makedirs(os.path.join(output_data_path, f'{id:03}/mask/')) # iterate over slices to save slices for i, slice in enumerate(range(ct_vol.shape[2])): ct_slice_fn =f'{id:03}/ct/{slice+1}.tif' # save CT slice skimage.io.imsave(os.path.join(output_data_path, ct_slice_fn), ct_vol[:,:,slice], check_contrast=False) is_low = True if skimage.exposure.is_low_contrast(ct_vol[:,:,slice]) else False # save mask if some brain on slice if np.any(mask_vol[:,:,slice]): mask_slice_fn = f'{id:03}/mask/{slice+1}_Seg.bmp' skimage.io.imsave(os.path.join(output_data_path, mask_slice_fn), skimage.img_as_ubyte(mask_vol[:,:,slice]), check_contrast=False) else: mask_slice_fn = 'None' # add info to output list output_info.append({'volume':id, 'slice':slice+1, 'ct_fn':ct_slice_fn, 'mask_fn':mask_slice_fn, 'low_contrast_ct':is_low}) print_progessbar(i, ct_vol.shape[2], Name=f'Volume {id:03} {n+1:03}/{len(info_df.id):03}', Size=20, erase=False) # Make dataframe of outputs output_info_df = pd.DataFrame(output_info) # save df output_info_df.to_csv(os.path.join(output_data_path, 'slice_info.csv')) print('>>> Slice informations saved at ' + os.path.join(output_data_path, 'slice_info.csv')) # save patient df info_df.to_csv(os.path.join(output_data_path, 'volume_info.csv')) print('>>> Volume informations saved at ' + os.path.join(output_data_path, 'volume_info.csv'))
29,998
def outfeed(token, xs): """Outfeeds value `xs` to the host. Experimental. `token` is used to sequence infeed and outfeed effects. """ flat_xs, _ = pytree.flatten(xs) return outfeed_p.bind(token, *flat_xs)
29,999