content
stringlengths
22
815k
id
int64
0
4.91M
def get_dataset(dataset_id: Optional[str] = None, location: Optional[str] = None, project: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDatasetResult: """ Gets any metadata associated with a dataset. """ __args__ = dict() __args__['datasetId'] = dataset_id __args__['location'] = location __args__['project'] = project if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('google-native:healthcare/v1:getDataset', __args__, opts=opts, typ=GetDatasetResult).value return AwaitableGetDatasetResult( name=__ret__.name, time_zone=__ret__.time_zone)
10,200
def is_email_available() -> bool: """ Returns whether email services are available on this instance (i.e. settings are in place). """ return bool(settings.EMAIL_HOST)
10,201
def open_link(url): """ Takes a QtCore.QUrl """ ok = QtGui.QDesktopServices.openUrl(url) if (not ok): detail = ["Could not find location:", unicode(url.toString())] Error.show_error("Failed to open URL", "\n\n".join(detail))
10,202
def sql2label(sql, num_cols): """encode sql""" # because of classification task, label is from 0 # so sel_num and cond_num should -1,and label should +1 in prediction phrase cond_conn_op_label = sql.cond_conn_op sel_num_label = sql.sel_num - 1 # the new dataset has cond_num = 0, do not -1 cond_num_label = len(sql.conds) + len(sql.having) sel_label = np.zeros(num_cols, dtype='int32') sel_agg_label = np.zeros((num_cols, SQL.num_agg_ops), dtype='int32') for col_id, agg_op in zip(sql.sel, sql.agg): assert col_id < num_cols, f"select col_id({col_id}) >= num_cols({num_cols}): {sql}" sel_agg_label[col_id][agg_op] = 1 sel_label[col_id] = 1 # len(SQL.op_sql_dict) over all op ID range,which means defaults to no OP cond_op_label = np.ones(num_cols, dtype='int32') * len(SQL.op_sql_dict) having_agg_label = np.zeros((num_cols, SQL.num_agg_ops), dtype='int32') for col_id, cond_op, _ in sql.conds: assert col_id < num_cols, f"where col_id({col_id}) >= num_cols({num_cols}): {sql}" cond_op_label[col_id] = cond_op for agg, col_id, cond_op, _ in sql.having: assert col_id < num_cols, f"having col_id({col_id}) >= num_cols({num_cols}): {sql}" cond_op_label[col_id] = cond_op having_agg_label[col_id][agg] = 1 order_col_label = np.zeros(num_cols, dtype='int32') order_agg_label = np.zeros((num_cols, SQL.num_agg_ops), dtype='int32') order_direction_label = sql.order_direction for agg, order_col in sql.order_by: order_col_label[order_col] = 1 order_agg_label[order_col][agg] = 1 group_num_label = sql.group_num having_num_label = len(sql.having) group_col_label = np.zeros(num_cols, dtype='int32') for col_id in sql.group_by: assert col_id < num_cols, f"group_by col_id({col_id}) >= num_cols({num_cols}): {sql}" group_col_label[col_id] = 1 return sel_num_label, cond_num_label, cond_conn_op_label, \ sel_agg_label, sel_label, cond_op_label, \ order_col_label, order_agg_label, order_direction_label, \ group_num_label, having_num_label, group_col_label, having_agg_label
10,203
def retrieveToken(verbose: bool = False, save: bool = False, **kwargs)->str: """ LEGACY retrieve token directly following the importConfigFile or Configure method. """ token_with_expiry = token_provider.get_token_and_expiry_for_config(config.config_object,**kwargs) token = token_with_expiry['token'] config.config_object['token'] = token config.config_object['date_limit'] = time.time() + token_with_expiry['expiry'] / 1000 - 500 config.header.update({'Authorization': f'Bearer {token}'}) if verbose: print(f"token valid till : {time.ctime(time.time() + token_with_expiry['expiry'] / 1000)}") return token
10,204
def format_str_for_write(input_str: str) -> bytes: """Format a string for writing to SteamVR's stream.""" if len(input_str) < 1: return "".encode("utf-8") if input_str[-1] != "\n": return (input_str + "\n").encode("utf-8") return input_str.encode("utf-8")
10,205
def do(createMap=False): """ creates maps for all recorded experiments :param createMap: :return: """ experimentLog = Experiments.getExperimentLog() for log in experimentLog: exp = Experiments() exp.package = log[0] exp.time = log[1] exp.test_case = log[2] exp.log_folder = log[3] logger.info("%s %s %s analyzing", log[0], log[1], log[2]) logger.debug("%s extracting URLs", log[0]) extractURLs(exp) if createMap: prep_generate_map(exp)
10,206
def reverse_result(func): """The recursive function `get_path` returns results in order reversed from desired. This decorator just reverses those results before returning them to caller. """ @wraps(func) def inner(*args, **kwargs): result = func(*args, **kwargs) if result is not None: return result[::-1] return inner
10,207
def convert_from_opencorpora_tag(to_ud, tag: str, text: str): """ Конвертировать теги их формата OpenCorpora в Universal Dependencies :param to_ud: конвертер. :param tag: тег в OpenCorpora. :param text: токен. :return: тег в UD. """ ud_tag = to_ud(str(tag), text) pos = ud_tag.split()[0] gram = ud_tag.split()[1] return pos, gram
10,208
def reg_to_float(reg): """convert reg value to Python float""" st = struct.pack(">L", reg) return struct.unpack(">f", st)[0]
10,209
def wvelocity(grid, u, v, zeta=0): """ Compute "true" vertical velocity Parameters ---------- grid : seapy.model.grid, The grid to use for the calculations u : ndarray, The u-field in time v : ndarray, The v-field in time zeta : ndarray, optional, The zeta-field in time Returns ------- w : ndarray, Vertical Velocity """ grid=seapy.model.asgrid(grid) u=np.ma.array(u) v=np.ma.array(v) zeta=np.ma.array(zeta) # Check the sizes while u.ndim < 4: u=u[np.newaxis, ...] while v.ndim < 4: v=v[np.newaxis, ...] while zeta.ndim < 3: zeta=zeta[np.newaxis, ...] # Get omega W, z_r, z_w, thick_u, thick_v=omega(grid, u, v, zeta, scale=True, work=True) # Compute quasi-horizontal motions (Ui + Vj)*GRAD s(z) vert=z_r * 0 # U-contribution wrk=u * (z_r[:, :, :, 1:] - z_r[:, :, :, :-1]) * \ (grid.pm[:, 1:] - grid.pm[:, :-1]) vert[:, :, :, 1:-1]=0.25 * (wrk[:, :, :, :-1] + wrk[:, :, :, 1:]) # V-contribution wrk = v * (z_r[:, :, 1:, :] - z_r[:, :, :-1, :]) * \ (grid.pn[1:, :] - grid.pn[:-1, :]) vert[:, :, 1:-1, :] += 0.25 * (wrk[:, :, :-1, :] + wrk[:, :, 1:, :]) # Compute barotropic velocity [ERROR IN FORMULATION RIGHT NOW] wrk = np.zeros((vert.shape[0], vert.shape[2], vert.shape[3])) ubar = np.sum(u * thick_u, axis=1) / np.sum(thick_u, axis=1) vbar = np.sum(v * thick_v, axis=1) / np.sum(thick_v, axis=1) # wrk[:, 1:-1, 1:-1] = (ubar[:, 1:-1, :-1] - ubar[:, 1:-1, 1:] + # vbar[:, :-1, 1:-1] - vbar[:, 1:, 1:-1]) # Shift vert from rho to w wvel = z_w * 0 # First two layers slope = (z_r[:, 0, :, :] - z_w[:, 0, :, :]) / \ (z_r[:, 1, :, :] - z_r[:, 0, :, :]) wvel[:, 0, :, :] = 0.375 * (vert[:, 0, :, :] - slope * (vert[:, 1, :, :] - vert[:, 0, :, :])) + \ 0.75 * vert[:, 0, :, :] - \ 0.125 * vert[:, 1, :, :] wvel[:, 1, :, :] = W[:, 1, :, :] + wrk + \ 0.375 * vert[:, 0, :, :] + \ 0.75 * vert[:, 1, :, :] - 0.125 * vert[:, 2, :, :] # Middle of the grid wvel[:, 2:-2, :, :] = W[:, 2:-2, :, :] + \ wrk[:, np.newaxis, :, :] + \ 0.5625 * (vert[:, 1:-2, :, :] + vert[:, 2:-1, :, :]) - \ 0.0625 * (vert[:, :-3, :, :] + vert[:, 3:, :, :]) # Upper two layers slope = (z_w[:, -1, :, :] - z_r[:, -1, :, :]) / \ (z_r[:, -1, :, :] - z_r[:, -2, :, :]) wvel[:, -1, :, :] = wrk + 0.375 * (vert[:, -1, :, :] + slope * (vert[:, -1, :, :] - vert[:, -2, :, :])) + \ 0.75 * vert[:, -1, :, :] - \ 0.0625 * vert[:, -2, :, :] wvel[:, -2, :, :] = W[:, -2, :, :] + 0.375 * vert[:, -1, :, :] + \ wrk + 0.75 * vert[:, -2, :, :] - \ 0.125 * vert[:, -3, :, :] # No gradient at the boundaries wvel[:, :, 0, :] = wvel[:, :, 1, :] wvel[:, :, -2:, :] = wvel[:, :, -3:-2, :] wvel[:, :, :, 0] = wvel[:, :, :, 1] wvel[:, :, :, -2:] = wvel[:, :, :, -3:-2] return wvel
10,210
def test_bltz(): """Test the bgtz instruction.""" bltz_regex = Instructions["bltz"].regex assert re.match(bltz_regex, "bltz $zero, $t1, label") is None assert re.match(bltz_regex, "bltz $zero, label") is not None assert re.match(bltz_regex, "bltz $zero") is None assert re.match(bltz_regex, "bltz $zero, $t1, $s1") is None
10,211
def _as_nested_lists(vertices): """ Convert a nested structure such as an ndarray into a list of lists. """ out = [] for part in vertices: if hasattr(part[0], "__iter__"): verts = _as_nested_lists(part) out.append(verts) else: out.append(list(part)) return out
10,212
def test_aspect_ratio(DSHAPE): """Tests aspect ratio computation.""" # VMEC value file = Dataset(str(DSHAPE["vmec_nc_path"]), mode="r") AR_vmec = float(file.variables["aspect"][-1]) file.close # DESC value eq = EquilibriaFamily.load(load_from=str(DSHAPE["desc_h5_path"]))[-1] AR_desc = eq.aspect_ratio assert abs(AR_vmec - AR_desc) < 5e-3
10,213
def markdown(code: str) -> str: """Convert markdown to HTML using markdown2.""" return markdown2.markdown(code, extras=markdown_extensions)
10,214
async def post_notification(request): """ Create a new notification to run a specific plugin :Example: curl -X POST http://localhost:8081/fledge/notification -d '{"name": "Test Notification", "description":"Test Notification", "rule": "threshold", "channel": "email", "notification_type": "one shot", "enabled": false}' curl -X POST http://localhost:8081/fledge/notification -d '{"name": "Test Notification", "description":"Test Notification", "rule": "threshold", "channel": "email", "notification_type": "one shot", "enabled": false, "rule_config": {}, "delivery_config": {}}' """ try: notification_service = ServiceRegistry.get(s_type=ServiceRecord.Type.Notification.name) _address, _port = notification_service[0]._address, notification_service[0]._port except service_registry_exceptions.DoesNotExist: raise web.HTTPNotFound(reason="No Notification service available.") try: data = await request.json() if not isinstance(data, dict): raise ValueError('Data payload must be a valid JSON') name = data.get('name', None) description = data.get('description', None) rule = data.get('rule', None) channel = data.get('channel', None) notification_type = data.get('notification_type', None) enabled = data.get('enabled', None) rule_config = data.get('rule_config', {}) delivery_config = data.get('delivery_config', {}) retrigger_time = data.get('retrigger_time', None) try: if retrigger_time: if float(retrigger_time) > 0 and float(retrigger_time).is_integer(): pass else: raise ValueError except ValueError: raise ValueError('Invalid retrigger_time property in payload.') if name is None or name.strip() == "": raise ValueError('Missing name property in payload.') if description is None: raise ValueError('Missing description property in payload.') if rule is None: raise ValueError('Missing rule property in payload.') if channel is None: raise ValueError('Missing channel property in payload.') if notification_type is None: raise ValueError('Missing notification_type property in payload.') if utils.check_reserved(name) is False: raise ValueError('Invalid name property in payload.') if utils.check_reserved(rule) is False: raise ValueError('Invalid rule property in payload.') if utils.check_reserved(channel) is False: raise ValueError('Invalid channel property in payload.') if notification_type not in NOTIFICATION_TYPE: raise ValueError('Invalid notification_type property in payload.') if enabled is not None: if enabled not in ['true', 'false', True, False]: raise ValueError('Only "true", "false", true, false are allowed for value of enabled.') is_enabled = "true" if ((type(enabled) is str and enabled.lower() in ['true']) or ( (type(enabled) is bool and enabled is True))) else "false" storage = connect.get_storage_async() config_mgr = ConfigurationManager(storage) curr_config = await config_mgr.get_category_all_items(name) if curr_config is not None: raise ValueError("A Category with name {} already exists.".format(name)) try: # Get default config for rule and channel plugins url = '{}/plugin'.format(request.url) try: # When authentication is mandatory we need to pass token in request header auth_token = request.token except AttributeError: auth_token = None list_plugins = json.loads(await _hit_get_url(url, auth_token)) r = list(filter(lambda rules: rules['name'] == rule, list_plugins['rules'])) c = list(filter(lambda channels: channels['name'] == channel, list_plugins['delivery'])) if len(r) == 0 or len(c) == 0: raise KeyError rule_plugin_config = r[0]['config'] delivery_plugin_config = c[0]['config'] except KeyError: raise ValueError("Invalid rule plugin {} and/or delivery plugin {} supplied.".format(rule, channel)) # Verify if rule_config contains valid keys if rule_config != {}: for k, v in rule_config.items(): if k not in rule_plugin_config: raise ValueError("Invalid key {} in rule_config {} supplied for plugin {}.".format(k, rule_config, rule)) # Verify if delivery_config contains valid keys if delivery_config != {}: for k, v in delivery_config.items(): if k not in delivery_plugin_config: raise ValueError( "Invalid key {} in delivery_config {} supplied for plugin {}.".format(k, delivery_config, channel)) # First create templates for notification and rule, channel plugins post_url = 'http://{}:{}/notification/{}'.format(_address, _port, urllib.parse.quote(name)) await _hit_post_url(post_url) # Create Notification template post_url = 'http://{}:{}/notification/{}/rule/{}'.format(_address, _port, urllib.parse.quote(name), urllib.parse.quote(rule)) await _hit_post_url(post_url) # Create Notification rule template post_url = 'http://{}:{}/notification/{}/delivery/{}'.format(_address, _port, urllib.parse.quote(name), urllib.parse.quote(channel)) await _hit_post_url(post_url) # Create Notification delivery template # Create configurations notification_config = { "description": description, "rule": rule, "channel": channel, "notification_type": notification_type, "enable": is_enabled, } if retrigger_time: notification_config["retrigger_time"] = retrigger_time await _update_configurations(config_mgr, name, notification_config, rule_config, delivery_config) audit = AuditLogger(storage) await audit.information('NTFAD', {"name": name}) except ValueError as ex: raise web.HTTPBadRequest(reason=str(ex)) except Exception as e: raise web.HTTPInternalServerError(reason=str(e)) else: return web.json_response({'result': "Notification {} created successfully".format(name)})
10,215
def test_send_file_to_router(monkeypatch, capsys): """ . """ # pylint: disable=unused-argument @counter_wrapper def get_commands(*args, **kwargs): """ . """ return "commands" @counter_wrapper def add_log(log: Log, cursor=None): """ . """ assert ( log.message == "Adding command set /tmp/foo.sh to router" ), "Log has correct file name" monkeypatch.setattr(deploy_helper, "generate_bash_commands", get_commands) monkeypatch.setattr(db, "add_deployment_log", add_log) monkeypatch.setattr( deploy_helper, "write_data_to_router_file", lambda *args, **kwargs: False ) with pytest.raises(ValueError): deployment.send_file_to_router( "before", "after", None, ["commands"], {}, "/tmp/foo.sh" ) assert get_commands.counter == 1, "Commands generated" assert add_log.counter == 1, "Log added" printed = capsys.readouterr() assert printed.out == "Failed to write /tmp/foo.sh to router\n", "Error printed" monkeypatch.setattr( deploy_helper, "write_data_to_router_file", lambda *args, **kwargs: True ) deployment.send_file_to_router( "before", "after", None, ["commands"], {}, "/tmp/foo.sh" ) assert get_commands.counter == 2, "Commands generated" assert add_log.counter == 2, "Log added"
10,216
def sem_get(epc): """プロパティ値要求 'Get' """ global tid_counter frame = sem.GET_FRAME_DICT['get_' + epc] tid_counter = tid_counter + 1 if tid_counter + 1 != 65536 else 0 # TICカウントアップ frame = sem.change_tid_frame(tid_counter, frame) res = y3.udp_send(1, ip6, True, y3.Y3_UDP_ECHONET_PORT, frame)
10,217
def serialize_entities_graph(graph: GeNeG): """ Serialize the complete entity graph as individual files. """ utils.get_logger().info('GeNeG: Serializing the entity graph as individual files..') _write_lines_to_file(_get_lines_metadata(graph), 'results.geneg_entities.metadata') _write_lines_to_file(_get_lines_instance_types(graph), 'results.geneg_entities.instances_types') _write_lines_to_file(_get_lines_instances_labels(graph), 'results.geneg_entities.instances_labels') _write_lines_to_file(_get_lines_instances_metadata_resources(graph), 'results.geneg_entities.instances_metadata_resources') _write_lines_to_file(_get_lines_instances_event_mapping(graph), 'results.geneg_entities.instances_event_mapping') _write_lines_to_file(_get_lines_event_relations(graph), 'results.geneg_entities.event_relations') _write_lines_to_file(_get_lines_wiki_relations(graph), 'results.geneg_entities.wiki_relations') utils.get_logger().info('GeNeG: Completed serialization.\n')
10,218
def smiles2mol(smiles): """Convert SMILES string into rdkit.Chem.rdchem.Mol. Args: smiles: str, a SMILES string. Returns: mol: rdkit.Chem.rdchem.Mol """ smiles = canonicalize(smiles) mol = Chem.MolFromSmiles(smiles) if mol is None: return None Chem.Kekulize(mol) return mol
10,219
def parallel_execute(objects, func, get_name, msg, get_deps=None): """Runs func on objects in parallel while ensuring that func is ran on object only after it is ran on all its dependencies. get_deps called on object must return a collection with its dependencies. get_name called on object must return its name. """ objects = list(objects) stream = get_output_stream(sys.stderr) writer = ParallelStreamWriter(stream, msg) for obj in objects: writer.initialize(get_name(obj)) q = setup_queue(objects, func, get_deps, get_name) done = 0 errors = {} error_to_reraise = None returned = [None] * len(objects) while done < len(objects): try: obj, result, exception = q.get(timeout=1) except Empty: continue # See https://github.com/docker/compose/issues/189 except thread.error: raise ShutdownException() if exception is None: writer.write(get_name(obj), 'done') returned[objects.index(obj)] = result elif isinstance(exception, APIError): errors[get_name(obj)] = exception.explanation writer.write(get_name(obj), 'error') else: errors[get_name(obj)] = exception error_to_reraise = exception done += 1 for obj_name, error in errors.items(): stream.write("\nERROR: for {} {}\n".format(obj_name, error)) if error_to_reraise: raise error_to_reraise return returned
10,220
def process_config(config_path, script_dir): """Process the users config file and set defaults. Processing of user config file, setting defaults, printing overview and write processed config parameters to temporary config file Args: config_path(str): path to user config file script_dir(str): path of scripts directory """ # read parameters from config file config_obj = yaml.safe_load(open(config_path,'r')) config_vars = set_config_defaults(config_obj) config_vars["script_dir"] = script_dir config_vars["usr_cfg_path"] = config_path config_vars["cfg_path"] = config_vars.get('output_path')+'tmp/tmp.cfg.yml' # write config to file and to console write_cfg2file(config_vars) write_run_overview(config_path, config_vars)
10,221
def build_term_map(deg, blocklen): """ Builds term map (degree, index) -> term :param deg: :param blocklen: :return: """ term_map = [[0] * comb(blocklen, x, True) for x in range(deg + 1)] for dg in range(1, deg + 1): for idx, x in enumerate(term_generator(dg, blocklen - 1)): term_map[dg][idx] = x return term_map
10,222
def gen_sets(): """ List of names of all available problem generators """ return registered_gens.keys()
10,223
async def _ensure_system_is(status: TecStatus) -> None: """Ensure that the TEC subsystem status is `status` and raise otherwise. :raises TecStatusError: System is not `status`. """ tec_status = await get_tec_status() # type: TecStatus if tec_status != status: raise TecStatusError( "TEC system is {}, refusing to do thing.".format(repr(tec_status)))
10,224
def is_valid_currency(currency_: str) -> bool: """ is_valid_currency:判断给定货币是否有效 @currency_(str):货币代码 return(bool):FROM_CNY、TO_CNY均有currency_记录 """ return currency_ in FROM_CNY and currency_ in TO_CNY
10,225
def load_gene_prefixes() -> List[Tuple[str, str, str]]: """Returns FamPlex gene prefixes as a list of rows Returns ------- list List of lists corresponding to rows in gene_prefixes.csv. Each row has three columns [Pattern, Category, Notes]. """ return _load_csv(GENE_PREFIXES_PATH)
10,226
def create_page_panels_base(num_panels=0, layout_type=None, type_choice=None, page_name=None): """ This function creates the base panels for one page it specifies how a page should be layed out and how many panels should be in it :param num_panels: how many panels should be on a page if 0 then the function chooses, defaults to 0 :type num_panels: int, optional :param layout_type: whether the page should consist of vertical, horizontal or both types of panels, defaults to None :type layout_type: str, optional :param type_choice: If having selected vh panels select a type of layout specifically, defaults to None :type type_choice: str, optional :param page_name: A specific name for the page :type page_name: str, optional :return: A Page object with the panels initalized :rtype: Page """ # TODO: Skew panel number distribution # Page dimensions turned to coordinates topleft = (0.0, 0.0) topright = (cfg.page_width, 0.0) bottomleft = (0.0, cfg.page_height) bottomright = cfg.page_size coords = [ topleft, topright, bottomright, bottomleft ] if layout_type is None: layout_type = np.random.choice(["v", "h", "vh"]) # Panels encapsulated and returned within page if page_name is None: page = Page(coords, layout_type, num_panels) else: page = Page(coords, layout_type, num_panels, name=page_name) # If you want only vertical panels if layout_type == "v": max_num_panels = 4 if num_panels < 1: num_panels = np.random.choice([3, 4]) page.num_panels = num_panels else: page.num_panels = num_panels draw_n_shifted(num_panels, page, "v") # If you want only horizontal panels elif layout_type == "h": max_num_panels = 5 if num_panels < 1: num_panels = np.random.randint(3, max_num_panels+1) page.num_panels = num_panels else: page.num_panels = num_panels draw_n_shifted(num_panels, page, "h") # If you want both horizontal and vertical panels elif layout_type == "vh": max_num_panels = 8 if num_panels < 1: num_panels = np.random.randint(2, max_num_panels+1) page.num_panels = num_panels else: page.num_panels = num_panels if num_panels == 2: # Draw 2 rectangles # vertically or horizontally horizontal_vertical = np.random.choice(["h", "v"]) draw_two_shifted(page, horizontal_vertical) if num_panels == 3: # Draw 2 rectangles # Vertically or Horizontally horizontal_vertical = np.random.choice(["h", "v"]) draw_two_shifted(page, horizontal_vertical) next_div = invert_for_next(horizontal_vertical) # Pick one and divide it into 2 rectangles choice_idx = choose(page) choice = page.get_child(choice_idx) draw_two_shifted(choice, next_div) if num_panels == 4: horizontal_vertical = np.random.choice(["h", "v"]) # Possible layouts with 4 panels if type_choice is None: type_choice = np.random.choice(["eq", "uneq", "div", "trip", "twoonethree"]) # Draw two rectangles if type_choice == "eq": draw_two_shifted(page, horizontal_vertical, shift=0.5) next_div = invert_for_next(horizontal_vertical) # Divide each into 2 rectangles equally shift_min = 25 shift_max = 75 shift = np.random.randint(shift_min, shift_max) shift = shift/100 draw_two_shifted(page.get_child(0), next_div, shift) draw_two_shifted(page.get_child(1), next_div, shift) # Draw two rectangles elif type_choice == "uneq": draw_two_shifted(page, horizontal_vertical, shift=0.5) next_div = invert_for_next(horizontal_vertical) # Divide each into 2 rectangles unequally draw_two_shifted(page.get_child(0), next_div) draw_two_shifted(page.get_child(1), next_div) elif type_choice == "div": draw_two_shifted(page, horizontal_vertical, shift=0.5) next_div = invert_for_next(horizontal_vertical) # Pick one and divide into 2 rectangles choice1_idx = choose(page) choice1 = page.get_child(choice1_idx) draw_two_shifted(choice1, next_div) # Pick one of these two and divide that into 2 rectangles choice2_idx = choose(choice1) choice2 = choice1.get_child(choice2_idx) next_div = invert_for_next(next_div) draw_two_shifted(choice2, next_div) # Draw three rectangles elif type_choice == "trip": draw_n(3, page, horizontal_vertical) # Pick one and divide it into two choice_idx = choose(page) choice = page.get_child(choice_idx) next_div = invert_for_next(horizontal_vertical) draw_two_shifted(choice, next_div) # Draw two rectangles elif type_choice == "twoonethree": draw_two_shifted(page, horizontal_vertical) # Pick one and divide it into 3 rectangles choice_idx = choose(page) choice = page.get_child(choice_idx) next_div = invert_for_next(horizontal_vertical) draw_n_shifted(3, choice, next_div) if num_panels == 5: # Draw two rectangles horizontal_vertical = np.random.choice(["h", "v"]) # Possible layouts with 5 panels if type_choice is None: type_choice = np.random.choice(["eq", "uneq", "div", "twotwothree", "threetwotwo", "fourtwoone"]) if type_choice == "eq" or type_choice == "uneq": draw_two_shifted(page, horizontal_vertical, shift=0.5) next_div = invert_for_next(horizontal_vertical) # Pick one and divide it into two then choice_idx = choose(page) choice = page.get_child(choice_idx) draw_two_shifted(choice, next_div) # Divide each into 2 rectangles equally if type_choice == "eq": shift_min = 25 shift_max = 75 shift = np.random.randint(shift_min, shift_max) set_shift = shift / 100 else: # Divide each into 2 rectangles unequally set_shift = None next_div = invert_for_next(next_div) draw_two_shifted(choice.get_child(0), next_div, shift=set_shift) draw_two_shifted(choice.get_child(1), next_div, shift=set_shift) # Draw two rectangles elif type_choice == "div": draw_two_shifted(page, horizontal_vertical, shift=0.5) next_div = invert_for_next(horizontal_vertical) # Divide both equally draw_two_shifted(page.get_child(0), next_div) draw_two_shifted(page.get_child(1), next_div) # Pick one of all of them and divide into two page_child_chosen = np.random.choice(page.children) choice_idx, left_choices = choose_and_return_other( page_child_chosen ) choice = page_child_chosen.get_child(choice_idx) next_div = invert_for_next(next_div) draw_two_shifted(choice, horizontal_vertical=next_div, shift=0.5 ) # Draw two rectangles elif type_choice == "twotwothree": draw_two_shifted(page, horizontal_vertical, shift=0.5) next_div = invert_for_next(horizontal_vertical) # Pick which one gets 2 and which gets 3 choice_idx, left_choices = choose_and_return_other(page) choice = page.get_child(choice_idx) other = page.get_child(left_choices[0]) # Divide one into 2 next_div = invert_for_next(horizontal_vertical) draw_two_shifted(choice, next_div) # Divide other into 3 draw_n(3, other, next_div) # Draw 3 rectangles (horizontally or vertically) elif type_choice == "threetwotwo": draw_n(3, page, horizontal_vertical) next_div = invert_for_next(horizontal_vertical) choice1_idx, left_choices = choose_and_return_other(page) choice2_idx = np.random.choice(left_choices) choice1 = page.get_child(choice1_idx) choice2 = page.get_child(choice2_idx) # Pick two and divide each into two draw_two_shifted(choice1, next_div) draw_two_shifted(choice2, next_div) # Draw 4 rectangles vertically elif type_choice == "fourtwoone": draw_n(4, page, horizontal_vertical) # Pick one and divide into two choice_idx = choose(page) choice = page.get_child(choice_idx) next_div = invert_for_next(horizontal_vertical) draw_two_shifted(choice, next_div) if num_panels == 6: # Possible layouts with 6 panels if type_choice is None: type_choice = np.random.choice(["tripeq", "tripuneq", "twofourtwo", "twothreethree", "fourtwotwo"]) horizontal_vertical = np.random.choice(["v", "h"]) # Draw 3 rectangles (V OR H) if type_choice == "tripeq" or type_choice == "tripuneq": draw_n_shifted(3, page, horizontal_vertical) # Split each equally if type_choice == "tripeq": shift = np.random.randint(25, 75) shift = shift/100 # Split each unequally else: shift = None next_div = invert_for_next(horizontal_vertical) for panel in page.children: draw_two_shifted(panel, next_div, shift=shift) # Draw 2 rectangles elif type_choice == "twofourtwo": draw_two_shifted(page, horizontal_vertical) # Split into 4 one half 2 in another next_div = invert_for_next(horizontal_vertical) draw_n_shifted(4, page.get_child(0), next_div) draw_two_shifted(page.get_child(1), next_div) # Draw 2 rectangles elif type_choice == "twothreethree": # Split 3 in each draw_two_shifted(page, horizontal_vertical) next_div = invert_for_next(horizontal_vertical) for panel in page.children: # Allow each inital panel to grow to up to 75% of 100/n n = 3 shifts = [] choice_max = round((100/n)*1.5) choice_min = round((100/n)*0.5) for i in range(0, n): shift_choice = np.random.randint( choice_min, choice_max ) choice_max = choice_max + ((100/n) - shift_choice) shifts.append(shift_choice) to_add_or_remove = (100 - sum(shifts))/len(shifts) normalized_shifts = [] for shift in shifts: new_shift = shift + to_add_or_remove normalized_shifts.append(new_shift/100) draw_n_shifted(3, panel, next_div, shifts=normalized_shifts ) # Draw 4 rectangles elif type_choice == "fourtwotwo": draw_n_shifted(4, page, horizontal_vertical) # Split two of them choice1_idx, left_choices = choose_and_return_other(page) choice2_idx = np.random.choice(left_choices) choice1 = page.get_child(choice1_idx) choice2 = page.get_child(choice2_idx) next_div = invert_for_next(horizontal_vertical) draw_two_shifted(choice1, next_div) draw_two_shifted(choice2, next_div) if num_panels == 7: # Possible layouts with 7 panels types = ["twothreefour", "threethreetwotwo", "threefourtwoone", "threethreextwoone", "fourthreextwo"] if type_choice is None: type_choice = np.random.choice(types) # Draw two split 3-4 - HV # Draw two rectangles if type_choice == "twothreefour": horizontal_vertical = np.random.choice(["h", "v"]) draw_two_shifted(page, horizontal_vertical, shift=0.5) # Pick one and split one into 4 rectangles choice_idx, left_choices = choose_and_return_other(page) choice = page.get_child(choice_idx) other = page.get_child(left_choices[0]) next_div = invert_for_next(horizontal_vertical) draw_n_shifted(4, choice, next_div) # Some issue with the function calls and seeding n = 3 shifts = [] choice_max = round((100/n)*1.5) choice_min = round((100/n)*0.5) for i in range(0, n): shift_choice = np.random.randint(choice_min, choice_max) choice_max = choice_max + ((100/n) - shift_choice) shifts.append(shift_choice) to_add_or_remove = (100 - sum(shifts))/len(shifts) normalized_shifts = [] for shift in shifts: new_shift = shift + to_add_or_remove normalized_shifts.append(new_shift/100) # Pick another and split into 3 rectangles draw_n_shifted(3, other, next_div, shifts=normalized_shifts) # Draw three rectangles elif type_choice == "threethreetwotwo": draw_n(3, page, "h") # Pick one and split it into 3 rectangles choice_idx, left_choices = choose_and_return_other(page) choice = page.get_child(choice_idx) draw_n_shifted(3, choice, "v") # Split the other two into 2 rectangles draw_two_shifted(page.get_child(left_choices[0]), "v") draw_two_shifted(page.get_child(left_choices[1]), "v") # Draw 3 rectangles elif type_choice == "threefourtwoone": draw_n(3, page, "h") # Pick two of three rectangles and let one be choice_idx, left_choices = choose_and_return_other(page) choice = page.get_child(choice_idx) other_idx = np.random.choice(left_choices) other = page.get_child(other_idx) # Of the picked split one into 4 rectangles draw_n_shifted(4, choice, "v") # Split the other into 2 rectangles draw_two_shifted(other, "v") # Draw 3 rectangles elif type_choice == "threethreextwoone": draw_n(3, page, "h") # Pick two and leave one choice_idx, left_choices = choose_and_return_other(page) choice = page.get_child(choice_idx) other = page.get_child(left_choices[0]) # Of the picked split one into 3 draw_n_shifted(3, choice, "v") # Some issue with the function calls and seeding n = 3 shifts = [] choice_max = round((100/n)*1.5) choice_min = round((100/n)*0.5) for i in range(0, n): shift_choice = np.random.randint(choice_min, choice_max) choice_max = choice_max + ((100/n) - shift_choice) shifts.append(shift_choice) to_add_or_remove = (100 - sum(shifts))/len(shifts) normalized_shifts = [] for shift in shifts: new_shift = shift + to_add_or_remove normalized_shifts.append(new_shift/100) # Split the other into 3 as well draw_n_shifted(3, other, "v", shifts=normalized_shifts) # Draw 4 split 3x2 - HV # Draw 4 rectangles elif type_choice == "fourthreextwo": horizontal_vertical = np.random.choice(["h", "v"]) draw_n(4, page, horizontal_vertical) # Choose one and leave as is choice_idx, left_choices = choose_and_return_other(page) # Divide the rest into two next_div = invert_for_next(horizontal_vertical) for panel in left_choices: draw_two_shifted(page.get_child(panel), next_div) if num_panels == 8: # Possible layouts for 8 panels types = ["fourfourxtwoeq", "fourfourxtwouneq", "threethreethreetwo", "threefourtwotwo", "threethreefourone"] if type_choice is None: type_choice = np.random.choice(types) # Draw 4 rectangles # equal or uneqal 4-4x2 if type_choice == types[0] or type_choice == types[1]: # panels = draw_n_shifted(4, *coords, "h") draw_n(4, page, "h") # Equal if type_choice == "fourfourxtwoeq": shift_min = 25 shift_max = 75 shift = np.random.randint(shift_min, shift_max) set_shift = shift/100 # Unequal else: set_shift = None # Drivide each into two for panel in page.children: draw_two_shifted(panel, "v", shift=set_shift) # Where three rectangles need to be drawn if type_choice in types[2:]: draw_n(3, page, "h") # Draw 3 rectangles then if type_choice == "threethreethreetwo": # Choose one and divide it into two choice_idx, left_choices = choose_and_return_other(page) choice = page.get_child(choice_idx) draw_two_shifted(choice, "v") # Divide the rest into 3 for panel in left_choices: # Some issue with the function calls and seeding n = 3 shifts = [] choice_max = round((100/n)*1.5) choice_min = round((100/n)*0.5) for i in range(0, n): shift_choice = np.random.randint( choice_min, choice_max ) choice_max = choice_max + ((100/n) - shift_choice) shifts.append(shift_choice) to_add_or_remove = (100 - sum(shifts))/len(shifts) normalized_shifts = [] for shift in shifts: new_shift = shift + to_add_or_remove normalized_shifts.append(new_shift/100) draw_n_shifted(3, page.get_child(panel), "v", shifts=normalized_shifts ) # Draw 3 rectangles then elif type_choice == "threefourtwotwo": # Choosen one and divide it into 4 choice_idx, left_choices = choose_and_return_other(page) choice = page.get_child(choice_idx) draw_n_shifted(4, choice, "v") for panel in left_choices: draw_two_shifted(page.get_child(panel), "v") # Draw 3 3-4-1 - H # Draw three rectangles then elif type_choice == "threethreefourone": # Choose two and leave one as is choice_idx, left_choices = choose_and_return_other(page) choice = page.get_child(choice_idx) other_idx = np.random.choice(left_choices) other = page.get_child(other_idx) # Divide one into 3 rectangles draw_n_shifted(3, choice, "v") # Some issue with the function calls and seeding n = 4 shifts = [] choice_max = round((100/n)*1.5) choice_min = round((100/n)*0.5) for i in range(0, n): shift_choice = np.random.randint( choice_min, choice_max ) choice_max = choice_max + ((100/n) - shift_choice) shifts.append(shift_choice) to_add_or_remove = (100 - sum(shifts))/len(shifts) normalized_shifts = [] for shift in shifts: new_shift = shift + to_add_or_remove normalized_shifts.append(new_shift/100) # Divide the other into 4 rectangles draw_n_shifted(4, other, "v", shifts=normalized_shifts) return page
10,227
def uri2dict(uri): """Take a license uri and convert it into a dictionary of values.""" if uri.startswith(LICENSES_BASE) and uri.endswith('/'): base = LICENSES_BASE license_info = {} raw_info = uri[len(base):] raw_info = raw_info.rstrip('/') info_list = raw_info.split('/') if len(info_list) not in (1,2,3): raise InvalidURIError, "Invalid Creative Commons URI: <%s>"%uri retval = dict( code=info_list[0] ) if len(info_list) > 1: retval['version'] = info_list[1] if len(info_list) > 2: retval['jurisdiction'] = info_list[2] # XXX perform any validation on the dict produced? return retval elif uri.startswith(CC0_BASE) and uri.endswith('/'): base = CC0_BASE retval = {'code': 'CC0', 'jurisdiction': None} retval['version'] = uri.rstrip('/').split('/')[-1] return retval elif uri.startswith(PUBLICDOMAIN_MARK_BASE) and uri.endswith('/'): base = PUBLICDOMAIN_MARK_BASE retval = {'code': 'mark', 'jurisdiction': None} retval['version'] = uri.rstrip('/').split('/')[-1] return retval else: raise InvalidURIError, "Invalid Creative Commons URI: <%s>" % uri
10,228
def Reboot() -> None: """Reboots the device""" try: _MPuLib.Reboot.restype = c_int32 CTS3Exception._check_error(_MPuLib.Reboot()) finally: CloseCommunication()
10,229
def test_clean_str(text, language='english'): """ Method to pre-process an text for training word embeddings. This is post by Sebastian Ruder: https://s3.amazonaws.com/aylien-main/data/multilingual-embeddings/preprocess.py and is used at this paper: https://arxiv.org/pdf/1609.02745.pdf """ """ Cleans an input string and prepares it for tokenization. :type text: unicode :param text: input text :return the cleaned input string """ text = text.lower() # replace all numbers with 0 text = re.sub(r"[-+]?[-/.\d]*[\d]+[:,.\d]*", ' 0 ', text) # English-specific pre-processing if language == 'english': text = re.sub(r"\'s", " \'s", text) text = re.sub(r"\'ve", " \'ve", text) text = re.sub(r"n\'t", " n\'t", text) text = re.sub(r"\'re", " \'re", text) text = re.sub(r"\'d", " \'d", text) text = re.sub(r"\'ll", " \'ll", text) elif language == 'french': # French-specific pre-processing text = re.sub(r"c\'", " c\' ", text) text = re.sub(r"l\'", " l\' ", text) text = re.sub(r"j\'", " j\' ", text) text = re.sub(r"d\'", " d\' ", text) text = re.sub(r"s\'", " s\' ", text) text = re.sub(r"n\'", " n\' ", text) text = re.sub(r"m\'", " m\' ", text) text = re.sub(r"qu\'", " qu\' ", text) elif language == 'spanish': # Spanish-specific pre-processing text = re.sub(r"¡", " ", text) elif language == 'chinese': pass text = re.sub(r'[,:;\.\(\)-/"<>]', " ", text) # separate exclamation marks and question marks text = re.sub(r"!+", " ! ", text) text = re.sub(r"\?+", " ? ", text) text = re.sub(r"\s+", " ", text) return text.strip()
10,230
def plot_wpm(output): """Reads `output` and plots typing speeds uniformly apart. Adds a trendline. """ df = pd.read_csv( output, header=None, names=["timestamp", "wpm", "accuracy", "actual_duration", "duration", "hash"], ) if len(df) < 2: print( "More data is needed, before analysing is possible. " + "A minimum of 2 tests is required." ) return df.timestamp = pd.to_datetime(df.timestamp) # df = df.set_index(df.timestamp) min_wpm = None gdf = defaultdict(lambda: [[], pd.DataFrame()]) for index, row in df.iterrows(): h = row["hash"] indexes, hdf = gdf[h] indexes.append(index) hdf = hdf.append(row) gdf[h] = indexes, hdf if min_wpm is None or row["wpm"] < min_wpm: min_wpm = row["wpm"] # grouped = sorted(gdf.items(), key=lambda x: x[1][1]['wpm'].mean(), # reverse=True) grouped = gdf.items() fig, ax = plt.subplots() colors = cycle(sns.color_palette()) for h, (indexes, hdf) in grouped: if h in known_hashes: h = known_hashes[h] x = indexes y = hdf.wpm color = next(colors) ax.plot(x, y, color=color, lw=3, label=h) # ax.fill_between(x, y, min_wpm, facecolor=color, label=h) trendline = np.poly1d(np.polyfit(x, y, 1))(x) ax.plot(x, trendline, "-", lw=4, color="white") ax.plot(x, trendline, "--", lw=2, color=color, label="trendline") ax.plot(df.accuracy, color="white", lw=4, alpha=0.5) ax.plot(df.accuracy, color=next(colors), lw=1.5, label="accuracy [%]", alpha=0.5) ax.set_title("typing speed per typing test") ax.set_xlabel("") ax.set_ylabel("typing speed [wpm]") ax.legend() # ticks = plt.yticks()[0] # plt.yticks(np.arange(0, ticks[-1], 10)) plt.xticks(df.index, df.timestamp.dt.date, rotation=90) # label only every 50th tick for i, label in enumerate(ax.xaxis.get_ticklabels()): if i % math.ceil(len(df) / 50): label.set_visible(False) show_diagram()
10,231
def run_venv_script(venv, script, fLOG=None, file=False, is_cmd=False, skip_err_if=None, platform=None, **kwargs): # pragma: no cover """ Runs a script on a vritual environment (the script should be simple). @param venv virtual environment @param script script as a string (not a file) @param fLOG logging function @param file is script a file or a string to execute @param is_cmd if True, script is a command line to run (as a list) for python executable @param skip_err_if do not pay attention to standard error if this string was found in standard output @param platform platform (``sys.platform`` by default) @param kwargs others arguments for function @see fn run_cmd. @return output The function does not work from a virtual environment. """ from ..loghelper import run_cmd if fLOG is None: from ..loghelper import noLOG fLOG = noLOG def filter_err(err): lis = err.split("\n") lines = [] for li in lis: if "missing dependencies" in li: continue if "' misses '" in li: continue lines.append(li) return "\n".join(lines).strip(" \r\n\t") if is_virtual_environment(): raise NotImplementedErrorFromVirtualEnvironment() if platform is None: platform = sys.platform if platform.startswith("win"): exe = os.path.join(venv, "Scripts", "python") else: exe = os.path.join(venv, "bin", "python") if is_cmd: cmd = " ".join([exe] + script) out, err = run_cmd(cmd, wait=True, fLOG=fLOG, **kwargs) err = filter_err(err) if len(err) > 0 and (skip_err_if is None or skip_err_if not in out): raise VirtualEnvError( "unable to run cmd at {2}\n--CMD--\n{3}\n--OUT--\n{0}\n[pyqerror]" "\n{1}".format(out, err, venv, cmd)) return out else: script = ";".join(script.split("\n")) if file: if not os.path.exists(script): raise FileNotFoundError(script) cmd = " ".join([exe, "-u", '"{0}"'.format(script)]) else: cmd = " ".join([exe, "-u", "-c", '"{0}"'.format(script)]) out, err = run_cmd(cmd, wait=True, fLOG=fLOG, **kwargs) err = filter_err(err) if len(err) > 0: raise VirtualEnvError( "Unable to run script at {2}\n--CMD--\n{3}\n--OUT--\n{0}\n" "[pyqerror]\n{1}".format(out, err, venv, cmd)) return out
10,232
def calc_iou(boxes1, boxes2, scope='iou'): """calculate ious Args: boxes1: 5-D tensor [BATCH_SIZE, CELL_SIZE, CELL_SIZE, BOXES_PER_CELL, 4] ====> (x_center, y_center, w, h) boxes2: 5-D tensor [BATCH_SIZE, CELL_SIZE, CELL_SIZE, BOXES_PER_CELL, 4] ===> (x_center, y_center, w, h) Return: iou: 4-D tensor [BATCH_SIZE, CELL_SIZE, CELL_SIZE, BOXES_PER_CELL] """ with tf.variable_scope(scope): # transform (x_center, y_center, w, h) to (x1, y1, x2, y2) boxes1_t = tf.stack([boxes1[..., 0] - boxes1[..., 2] / 2.0, boxes1[..., 1] - boxes1[..., 3] / 2.0, boxes1[..., 0] + boxes1[..., 2] / 2.0, boxes1[..., 1] + boxes1[..., 3] / 2.0], axis=-1) boxes2_t = tf.stack([boxes2[..., 0] - boxes2[..., 2] / 2.0, boxes2[..., 1] - boxes2[..., 3] / 2.0, boxes2[..., 0] + boxes2[..., 2] / 2.0, boxes2[..., 1] + boxes2[..., 3] / 2.0], axis=-1) # calculate the left up point & right down point lu = tf.maximum(boxes1_t[..., :2], boxes2_t[..., :2]) rd = tf.minimum(boxes1_t[..., 2:], boxes2_t[..., 2:]) # intersection intersection = tf.maximum(0.0, rd - lu) inter_square = intersection[..., 0] * intersection[..., 1] # calculate the boxs1 square and boxs2 square square1 = boxes1[..., 2] * boxes1[..., 3] square2 = boxes2[..., 2] * boxes2[..., 3] union_square = tf.maximum(square1 + square2 - inter_square, 1e-10) return tf.clip_by_value(inter_square / union_square, 0.0, 1.0)
10,233
def not_before(cert): """ Gets the naive datetime of the certificates 'not_before' field. This field denotes the first date in time which the given certificate is valid. :param cert: :return: Datetime """ return cert.not_valid_before
10,234
def get_data_from_dict_for_2pttype(type1,type2,datadict): """ Given strings identifying the type of 2pt data in a fits file and a dictionary of 2pt data (i.e. the blinding factors), returns the data from the dictionary matching those types. """ #spectra type codes in fits file, under hdutable.header['quant1'] and quant2 galaxy_position_fourier = "GPF" galaxy_shear_emode_fourier = "GEF" galaxy_shear_bmode_fourier = "GBF" galaxy_position_real = "GPR" galaxy_shear_plus_real = "G+R" galaxy_shear_minus_real = "G-R" if type1==galaxy_position_fourier and type2 == galaxy_position_fourier: yfromdict=datadict['gal_gal_cl'] xfromdict=datadict['gal_gal_l'] elif (type1==galaxy_shear_emode_fourier and type2 == galaxy_position_fourier) or (type2==galaxy_shear_emode_fourier and type2 == galaxy_position_fourier): yfromdict=datadict['gal_shear_cl'] xfromdict=datadict['gal_shear_l'] elif (type1==galaxy_shear_emode_fourier and type2 == galaxy_shear_emode_fourier): yfromdict=datadict['shear_shear_cl'] xfromdict=datadict['shear_shear_l'] elif type1==galaxy_position_real and type2 == galaxy_position_real: yfromdict=datadict['gal_gal_xi'] xfromdict=datadict['gal_gal_theta'] elif (type1==galaxy_shear_plus_real and type2 == galaxy_position_real) or (type2==galaxy_shear_plus_real and type1 == galaxy_position_real): yfromdict=datadict['gal_shear_xi'] xfromdict=datadict['gal_shear_theta'] elif (type1==galaxy_shear_plus_real and type2 == galaxy_shear_plus_real): yfromdict=datadict['shear_shear_xip'] xfromdict=datadict['shear_shear_theta'] elif (type1==galaxy_shear_minus_real and type2 == galaxy_shear_minus_real): yfromdict=datadict['shear_shear_xim'] xfromdict=datadict['shear_shear_theta'] else: print "Spectra type {0:s} - {1:s} not recognized.".format(type1,type2) return xfromdict,yfromdict
10,235
def simple_unweighted_distance(g, source, return_as_dicts=True): """Returns the unweighted shortest path length between nodes and source.""" dist_dict = nx.shortest_path_length(g, source) if return_as_dicts: return dist_dict else: return np.fromiter((dist_dict[ni] for ni in g), dtype=int)
10,236
def A070939(i: int = 0) -> int: """Length of binary representation of n.""" return len(f"{i:b}")
10,237
def feed_pump(pin: int, water_supply_time: int=FEED_PUMP_DEFAULT_TIME) -> bool: """ feed water Parameters ---------- pin : int target gpio (BCM) water_supply_time : int water feeding time Returns ------- bool Was water feeding successful ? """ is_running = gpio_read(pin) if is_running: return False # pump on gpio_write(pin, 1) try: publish_device_state() except: gpio_write(pin, 0) return False time.sleep(water_supply_time) # pump off gpio_write(pin, 0) publish_device_state() return True
10,238
def add_player(url): """Add a player to the sc2monitor by Battl.net URL.""" kwargs = {} kwargs['db'] = '{protocol}://{user}:{passwd}@{host}/{db}'.format( **db_credentials) controller = Controller(**kwargs) controller.add_player(url=url)
10,239
def app_nav(context): """Renders the main nav, topnav on desktop, sidenav on mobile""" url_name = get_url_name(context) namespace = get_namespace(context) cache_id = "{}:{}x".format(context['request'].user.username, context.request.path) cache_key = make_template_fragment_key('app_nav', [cache_id]) context['app_nav_cache_id'] = cache_id # Only bother doing this work if we don't have a cached template render if not cache.get(cache_key): # Build an app list for the page and user app_list = [] for app in APP_LIST: # Check we have access if app['access'](context.request.user): # Set active flag if namespace matches app['active'] = (app['app'] == namespace) # Add to returned list app_list.append(app) context['app_list'] = app_list context['app'] = namespace if namespace: context['page_title'] = get_page_title(get_module_nav_list(namespace, url_name, context.request.user), context) return context
10,240
def centre_to_zeroes(cartesian_point, centre_point): """Converts centre-based coordinates to be in relation to the (0,0) point. PIL likes to do things based on (0,0), and in this project I'd like to keep the origin at the centre point. Parameters ---------- cartesian_point : (numeric) x, y coordinates in terms of the centre centre_point : (numeric) x, y coordinates of the centre """ x = cartesian_point[0] + centre_point[0] y = centre_point[1] - cartesian_point[1] return x, y
10,241
def check_write_defaults(querier, varenv): """ It is painful to deal with $user, $permission, $authority and $attribution all the time, so this function verifies them and the sets them to member variables. """ if not varenv.get_user_guid(): raise MQLAccessError( None, 'You must specify a valid user to write', user=None) # must check authority before permission as authority affects the check_permission() call later if varenv.get('$authority'): if not varenv.get('$privileged') is Authority: # ***************************************************************************************************************** raise MQLAccessError( None, 'user %(user)s cannot use authority %(authority)s without scope.Authority', user=varenv.get_user_id(), authority=varenv.get('$authority')) # ***************************************************************************************************************** varenv.authority_guid = querier.lookup.lookup_guid( varenv.get('$authority'), varenv) else: varenv.authority_guid = None if varenv.get('$permission'): permission_guid = querier.lookup.lookup_guid( varenv.get('$permission'), varenv) if not check_permission(querier, varenv, permissionguid=permission_guid): # ***************************************************************************************************************** raise MQLAccessError( None, 'User %(user)s cannot create with permission %(permission)s', user=varenv.get_user_id(), permission=permission_guid) # ***************************************************************************************************************** # permission checks out OK (this implies the user checked out OK too) varenv.default_permission_guid = permission_guid else: # ***************************************************************************************************************** raise MQLAccessError( None, 'You must specify a default permission to write with', permission=None) # ***************************************************************************************************************** if varenv.get('$attribution'): attribution_guid = querier.lookup.lookup_guid( varenv.get('$attribution'), varenv) if not check_attribution_to_user(querier, varenv, attribution_guid): # ***************************************************************************************************************** raise MQLAccessError( None, 'User %(user)s cannot attribute to a node %(attribution)s that they did not create, or is not of type /type/attribution', user=varenv.get_user_id(), attribution=varenv.get('$attribution')) # ***************************************************************************************************************** # attribution checks out OK varenv.attribution_guid = attribution_guid else: varenv.attribution_guid = varenv.get_user_guid()
10,242
def get_all_stack_names(cf_client=boto3.client("cloudformation")): """ Get all stack names Args: cf_client: boto3 CF client Returns: list of StackName """ LOGGER.info("Attempting to retrieve stack information") response = cf_client.describe_stacks() LOGGER.info("Retrieved stack information: %s", response) return [stack["StackName"] for stack in response["Stacks"]]
10,243
def match_date(date, date_pattern): """ Match a specific date, a four-tuple with no special values, with a date pattern, four-tuple possibly having special values. """ # unpack the date and pattern year, month, day, day_of_week = date year_p, month_p, day_p, day_of_week_p = date_pattern # check the year if year_p == 255: # any year pass elif year != year_p: # specific year return False # check the month if month_p == 255: # any month pass elif month_p == 13: # odd months if (month % 2) == 0: return False elif month_p == 14: # even months if (month % 2) == 1: return False elif month != month_p: # specific month return False # check the day if day_p == 255: # any day pass elif day_p == 32: # last day of the month last_day = calendar.monthrange(year + 1900, month)[1] if day != last_day: return False elif day_p == 33: # odd days of the month if (day % 2) == 0: return False elif day_p == 34: # even days of the month if (day % 2) == 1: return False elif day != day_p: # specific day return False # check the day of week if day_of_week_p == 255: # any day of the week pass elif day_of_week != day_of_week_p: # specific day of the week return False # all tests pass return True
10,244
def get_heating_features(df, fine_grained_HP_types=False): """Get heating type category based on HEATING_TYPE category. heating_system: heat pump, boiler, community scheme etc. heating_source: oil, gas, LPC, electric. Parameters ---------- df : pandas.DataFrame Dataframe that is updated with heating features. fine_grained_HP_types : bool, default=False If True, get different heat pump types (air sourced, ground sourced etc.). If False, return "heat pump" as heating type category. Return --------- df : pandas.DataFrame Updated dataframe with heating system and source.""" # Collections heating_system_types = [] heating_source_types = [] # Get heating types heating_types = df["MAINHEAT_DESCRIPTION"] # Get specific and general heating category for each entry for heating in heating_types: # Set default value system_type = "unknown" source_type = "unknown" # If heating value exists if not (pd.isnull(heating) and isinstance(heating, float)): # Lowercase heating = heating.lower() other_heating_system = [ ("boiler and radiator" in heating), ("boiler & radiator" in heating), ("boiler and underfloor" in heating), ("boiler & underfloor" in heating), ("community scheme" in heating), ("heater" in heating), # not specified heater ] # Different heat pump types # -------------------------- if "ground source heat pump" in heating: system_type = "ground source heat pump" source_type = "electric" elif "air source heat pump" in heating: system_type = "air source heat pump" source_type = "electric" elif "water source heat pump" in heating: system_type = "water source heat pump" source_type = "electric" elif "heat pump" in heating: system_type = "heat pump" source_type = "electric" # Electric heaters # -------------------------- elif "electric storage heaters" in heating: system_type = "storage heater" source_type = "electric" elif "electric underfloor heating" in heating: system_type = "underfloor heating" source_type = "electric" # Warm air # -------------------------- elif "warm air" in heating: system_type = "warm air" source_type = "electric" # Boiler and radiator / Boiler and underfloor / Community scheme / Heater (unspecified) # -------------------------- elif any(other_heating_system): # Set heating system dict heating_system_dict = { "boiler and radiator": "boiler and radiator", "boiler & radiator": "boiler and radiator", "boiler and underfloor": "boiler and underfloor", "boiler & underfloor": "boiler and underfloor", "community scheme": "community scheme", "heater": "heater", # not specified heater (otherwise handeld above) } # Set heating source dict heating_source_dict = { "gas": "gas", ", oil": "oil", # with preceeding comma (!= "boiler") "lpg": "LPG", "electric": "electric", } # If heating system word is found, save respective system type for word, system in heating_system_dict.items(): if word in heating: system_type = system # If heating source word is found, save respective source type for word, source in heating_source_dict.items(): if word in heating: source_type = source # Don't differentiate between heat pump types if not fine_grained_HP_types: if "heat pump" in system_type: system_type = "heat pump" # Save heating system type and source type heating_system_types.append(system_type) heating_source_types.append(source_type) # Add heating system and source to df df["HEATING_SYSTEM"] = heating_system_types df["HEATING_SOURCE"] = heating_source_types return df
10,245
def integrate_eom(initial_conditions, t_span, design_params, SRM1, SRM2): """Numerically integrates the zero gravity equations of motion. Args: initial_conditions (np.array()): Array of initial conditions. Typically set to an array of zeros. t_span (np.array()): Time vector (s) over which to integrate the equations of motions. design_params (np.array()): Array of design parameters. [r1, r2, d1, d2, Ixx, Iyy, Izz] where r1 and r2 are the radial locations of the solid rocket motors (m), d1 and d2 are the longitudinal locations of the two motors (m), and Ixx, Iyy, and Izz are the interia values (kg-m^2). SRM1 (SolidRocketMotor()): First solid rocket motor organized into a class. SRM2 (SolidRocketMotor()): Second solid rocket motor organized into a class. Returns: (np.array()): Numerical solutions for wx, wy, wz, psi, theta, and phi. """ return odeint(euler_eom, initial_conditions, t_span, args=(design_params, SRM1, SRM2))
10,246
def activation_sparse(net, transformer, images_files): """ Activation bottom/top blob sparse analyze Args: net: the instance of Caffe inference transformer: images_files: sparse dataset Returns: none """ print("\nAnalyze the sparse info of the Activation:") # run float32 inference on sparse dataset to analyze activations for i , image in enumerate(images_files): net_forward(net, image, transformer) # analyze bottom/top blob for layer in sparse_layer_lists: blob = net.blobs[layer.bottom_blob_name].data[0].flatten() layer.analyze_bottom_blob(blob) blob = net.blobs[layer.top_blob_name].data[0].flatten() layer.analyze_top_blob(blob) # calculate top blob and flag the sparse channels in every layers for layer in sparse_layer_lists: layer.sparse_bottom_blob() layer.sparse_top_blob() return None
10,247
def save(obr, destination=False): """Save the current state of the image.""" if destination: obr.save(destination) else: main = Tk() filename = asksaveasfilename(initialfile="image", defaultextension=".jpg", filetypes=[ ("JPEG", ".jpg"), ("PNG", ".png"), ("BMP", ".bmp"), ("TIF", ".tif")]) if filename != "": obr.save(filename) main.destroy()
10,248
def cfg_load(filename): """Load a config yaml file.""" return omegaconf2namespace(OmegaConf.load(filename))
10,249
def char_to_num(x: str) -> int: """Converts a character to a number :param x: Character :type x: str :return: Corresponding number :rtype: int """ total = 0 for i in range(len(x)): total += (ord(x[::-1][i]) - 64) * (26 ** i) return total
10,250
def time_it(f: Callable): """ Timer decorator: shows how long execution of function took. :param f: function to measure :return: / """ def timed(*args, **kwargs): t1 = time.time() res = f(*args, **kwargs) t2 = time.time() log("\'", f.__name__, "\' took ", round(t2 - t1, 3), " seconds to complete.", sep="") return res return timed
10,251
def has_prefix(sub_s, dictionary): """ :param sub_s: (str) A substring that is constructed by neighboring letters on a 4x4 square grid :return: (bool) If there is any words with prefix stored in sub_s """ s = '' for letter in sub_s: s += letter for words in dictionary: if words.startswith(s): return True return False
10,252
def search_paths_for_executables(*path_hints): """Given a list of path hints returns a list of paths where to search for an executable. Args: *path_hints (list of paths): list of paths taken into consideration for a search Returns: A list containing the real path of every existing directory in `path_hints` and its `bin` subdirectory if it exists. """ executable_paths = [] for path in path_hints: if not os.path.isdir(path): continue path = os.path.abspath(path) executable_paths.append(path) bin_dir = os.path.join(path, 'bin') if os.path.isdir(bin_dir): executable_paths.append(bin_dir) return executable_paths
10,253
def tf_center_crop(images, sides): """Crops central region""" images_shape = tf.shape(images) top = (images_shape[1] - sides[0]) // 2 left = (images_shape[2] - sides[1]) // 2 return tf.image.crop_to_bounding_box(images, top, left, sides[0], sides[1])
10,254
def get_video_features(image_volume, occlusion_volume, max_level, file): """ this function retrieves a spatio-temporal pyramid of features for the purpose of video inpainting :param image_volume: :param occlusion_volume: :param max_level: :param file: :return: feature pyramid """ pass
10,255
def convert_timezone(time_in: datetime.datetime) -> datetime.datetime: """ 用来将系统自动生成的datetime格式的utc时区时间转化为本地时间 :param time_in: datetime.datetime格式的utc时间 :return:输出仍旧是datetime.datetime格式,但已经转换为本地时间 """ time_utc = time_in.replace(tzinfo=pytz.timezone("UTC")) time_local = time_utc.astimezone(pytz.timezone(settings.TIME_ZONE)) return time_local
10,256
def gpupdate_install(): """ Install the Update Tool. """ file = '\\gpupdate.zip' url = SERVER + '/GPU/gpupdate.zip' download(url, file) unzip(file, 'GPUpdate') return
10,257
def add_aws_cluster(name, cluster): """Add an aws cluster to config.""" config = get_config() config.set("general", "provider", "gke") config.set("aws", "current-cluster", name) section = "aws.{}".format(name) if not config.has_section(section): config.add_section(section) config.set(section, "cluster", cluster.endpoint) write_config(config)
10,258
def convert_bool( key: str, val: bool, attr_type: bool, attr: dict[str, Any] = {}, cdata: bool = False ) -> str: """Converts a boolean into an XML element""" if DEBUGMODE: # pragma: no cover LOG.info( f'Inside convert_bool(): key="{str(key)}", val="{str(val)}", type(val) is: "{type(val).__name__}"' ) key, attr = make_valid_xml_name(key, attr) if attr_type: attr["type"] = get_xml_type(val) attrstring = make_attrstring(attr) return f"<{key}{attrstring}>{str(val).lower()}</{key}>"
10,259
def plot_model(model, to_file='model.png', show_shapes=False, show_dtype=False, show_layer_names=True, rankdir='TB', expand_nested=False, dpi=96, layer_range=None, show_layer_activations=False): """Converts a Keras model to dot format and save to a file. Example: ```python input = tf.keras.Input(shape=(100,), dtype='int32', name='input') x = tf.keras.layers.Embedding( output_dim=512, input_dim=10000, input_length=100)(input) x = tf.keras.layers.LSTM(32)(x) x = tf.keras.layers.Dense(64, activation='relu')(x) x = tf.keras.layers.Dense(64, activation='relu')(x) x = tf.keras.layers.Dense(64, activation='relu')(x) output = tf.keras.layers.Dense(1, activation='sigmoid', name='output')(x) model = tf.keras.Model(inputs=[input], outputs=[output]) dot_img_file = '/tmp/model_1.png' tf.keras.utils.plot_model(model, to_file=dot_img_file, show_shapes=True) ``` Args: model: A Keras model instance to_file: File name of the plot image. show_shapes: whether to display shape information. show_dtype: whether to display layer dtypes. show_layer_names: whether to display layer names. rankdir: `rankdir` argument passed to PyDot, a string specifying the format of the plot: 'TB' creates a vertical plot; 'LR' creates a horizontal plot. expand_nested: Whether to expand nested models into clusters. dpi: Dots per inch. layer_range: input of `list` containing two `str` items, which is the starting layer name and ending layer name (both inclusive) indicating the range of layers for which the plot will be generated. It also accepts regex patterns instead of exact name. In such case, start predicate will be the first element it matches to `layer_range[0]` and the end predicate will be the last element it matches to `layer_range[1]`. By default `None` which considers all layers of model. Note that you must pass range such that the resultant subgraph must be complete. show_layer_activations: Display layer activations (only for layers that have an `activation` property). Returns: A Jupyter notebook Image object if Jupyter is installed. This enables in-line display of the model plots in notebooks. """ dot = model_to_dot( model, show_shapes=show_shapes, show_dtype=show_dtype, show_layer_names=show_layer_names, rankdir=rankdir, expand_nested=expand_nested, dpi=dpi, layer_range=layer_range, show_layer_activations=show_layer_activations) to_file = path_to_string(to_file) if dot is None: return _, extension = os.path.splitext(to_file) if not extension: extension = 'png' else: extension = extension[1:] # Save image to disk. dot.write(to_file, format=extension) # Return the image as a Jupyter Image object, to be displayed in-line. # Note that we cannot easily detect whether the code is running in a # notebook, and thus we always return the Image if Jupyter is available. if extension != 'pdf': try: from IPython import display return display.Image(filename=to_file) except ImportError: pass
10,260
def catalog_info(EPIC_ID=None, TIC_ID=None, KIC_ID=None): """Takes EPIC ID, returns limb darkening parameters u (linear) and a,b (quadratic), and stellar parameters. Values are pulled for minimum absolute deviation between given/catalog Teff and logg. Data are from: - K2 Ecliptic Plane Input Catalog, Huber+ 2016, 2016ApJS..224....2H - New limb-darkening coefficients, Claret+ 2012, 2013, 2012A&A...546A..14C, 2013A&A...552A..16C""" if (EPIC_ID is None) and (TIC_ID is None) and (KIC_ID is None): raise ValueError("No ID was given") if (EPIC_ID is not None) and (TIC_ID is not None): raise ValueError("Only one ID allowed") if (EPIC_ID is not None) and (KIC_ID is not None): raise ValueError("Only one ID allowed") if (TIC_ID is not None) and (KIC_ID is not None): raise ValueError("Only one ID allowed") # KOI CASE (Kepler K1) if KIC_ID is not None: Teff, logg, radius, radius_min, radius_max, mass, mass_min, mass_max = catalog_info_KIC( KIC_ID ) # EPIC CASE (Kepler K2) if EPIC_ID is not None: Teff, logg, radius, radius_min, radius_max, mass, mass_min, mass_max = catalog_info_EPIC( EPIC_ID ) # TESS CASE if TIC_ID is not None: Teff, logg, radius, radius_min, radius_max, mass, mass_min, mass_max = catalog_info_TIC( TIC_ID ) ld = numpy.genfromtxt( path.join(tls_constants.resources_dir, "ld_claret_tess.csv"), skip_header=1, delimiter=",", dtype="f8, int32, f8, f8", names=["logg", "Teff", "a", "b"], ) else: # Limb darkening is the same for K1 (KIC) and K2 (EPIC) ld = numpy.genfromtxt( path.join(tls_constants.resources_dir, "JAA546A14limb1-4.csv"), skip_header=1, delimiter=",", dtype="f8, int32, f8, f8, f8", names=["logg", "Teff", "u", "a", "b"], ) if logg is None: logg = 4 warnings.warn("No logg in catalog. Proceeding with logg=4") if Teff is None: Teff = 6000 warnings.warn("No Teff in catalog. Proceeding with Teff=6000") """From here on, K2 and TESS catalogs work the same: - Take Teff from star catalog and find nearest entry in LD catalog - Same for logg, but only for the Teff values returned before - Return stellar parameters and best-match LD """ nearest_Teff = ld["Teff"][(numpy.abs(ld["Teff"] - Teff)).argmin()] idx_all_Teffs = numpy.where(ld["Teff"] == nearest_Teff) relevant_lds = numpy.copy(ld[idx_all_Teffs]) idx_nearest = numpy.abs(relevant_lds["logg"] - logg).argmin() a = relevant_lds["a"][idx_nearest] b = relevant_lds["b"][idx_nearest] mass = numpy.array(mass) mass_min = numpy.array(mass_min) mass_max = numpy.array(mass_max) radius = numpy.array(radius) radius_min = numpy.array(radius_min) radius_max = numpy.array(radius_max) if mass == 0.0: mass = numpy.nan if mass_min == 0.0: mass_min = numpy.nan if mass_max == 0.0: mass_max = numpy.nan if radius == 0.0: radius = numpy.nan if radius_min == 0.0: radius_min = numpy.nan if radius_max == 0.0: radius_max = numpy.nan return ((a, b), mass, mass_min, mass_max, radius, radius_min, radius_max)
10,261
def classifier_fn_from_tfhub(output_fields, inception_model, return_tensor=False): """Returns a function that can be as a classifier function. Copied from tfgan but avoid loading the model each time calling _classifier_fn Args: output_fields: A string, list, or `None`. If present, assume the module outputs a dictionary, and select this field. inception_model: A model loaded from TFHub. return_tensor: If `True`, return a single tensor instead of a dictionary. Returns: A one-argument function that takes an image Tensor and returns outputs. """ if isinstance(output_fields, six.string_types): output_fields = [output_fields] def _classifier_fn(images): output = inception_model(images) if output_fields is not None: output = {x: output[x] for x in output_fields} if return_tensor: assert len(output) == 1 output = list(output.values())[0] return tf.nest.map_structure(tf.compat.v1.layers.flatten, output) return _classifier_fn
10,262
def get_rate_limit(client): """ Get the Github API rate limit current state for the used token """ query = '''query { rateLimit { limit remaining resetAt } }''' response = client.execute(query) json_response = json.loads(response) return json_response['data']['rateLimit']
10,263
def dns(args): """Create/Delete dns entries""" delete = args.delete name = args.name net = args.net domain = net ip = args.ip config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace) k = config.k if delete: if config.type == 'kvm': common.pprint("No deletion on kvm yet" % name, color='blue') else: common.pprint("Deleting Dns entry for %s..." % name, color='green') k.delete_dns(name, domain) else: common.pprint("Creating Dns entry for %s..." % name, color='green') k.reserve_dns(name=name, nets=[net], domain=domain, ip=ip)
10,264
def pret(f): """ Decorator which prints the result returned by `f`. >>> @pret ... def f(x, y): return {'sum': x + y, 'prod': x * y} >>> res = f(2, 3) ==> @pret(f) -- {'prod': 6, 'sum': 5} """ @functools.wraps(f) def g(*args, **kwargs): ret = f(*args, **kwargs) _pdeco("pret", f.__name__, "{retstr}".format( retstr=tstr(pprint.pformat(ret), 120, "<... truncated>"), )) return ret return g
10,265
def parse_arguments() -> tuple[str, str, bool]: """Return the command line arguments.""" current_version = get_version() description = f"Release Quality-time. Current version is {current_version}." epilog = """preconditions for release: - the current folder is the release folder - the current branch is master - the workspace has no uncommitted changes - the generated data model documentation is up-to-date - the change log has an '[Unreleased]' header - the change log contains no release candidates""" parser = ArgumentParser(description=description, epilog=epilog, formatter_class=RawDescriptionHelpFormatter) allowed_bumps_in_rc_mode = ["rc", "rc-major", "rc-minor", "rc-patch", "drop-rc"] # rc = release candidate allowed_bumps = ["rc-patch", "rc-minor", "rc-major", "patch", "minor", "major"] bumps = allowed_bumps_in_rc_mode if "rc" in current_version else allowed_bumps parser.add_argument("bump", choices=bumps) parser.add_argument( "-c", "--check-preconditions-only", action="store_true", help="only check the preconditions and then exit" ) arguments = parser.parse_args() return current_version, arguments.bump, arguments.check_preconditions_only
10,266
def test_empty_conf(): """Test to check an unexiting path""" with pytest.raises(Exception): ConfUltimate.load([])
10,267
def toggle(knob): """ "Inverts" some flags on the selected nodes. What this really does is set all of them to the same value, by finding the majority value and using the inverse of that.""" value = 0 n = nuke.selectedNodes() for i in n: try: val = i.knob(knob).value() if val: value += 1 else: value -= 1 except: pass status = value < 0 for i in n: if not nuke.exists(i.name()+"."+knob): continue knobbie = i.knob(knob) knobbie_str = i.name()+"."+knob size = nuke.animation(knobbie_str, "size") if size is not None and int(size) > 0: knobbie.setKeyAt(nuke.frame()) knobbie.setValue(status) else: knobbie.setValue(status) nuke.modified(True)
10,268
def process_submission(problem_id: str, participant_id: str, file_type: str, submission_file: InMemoryUploadedFile, timestamp: str) -> STATUS_AND_OPT_ERROR_T: """ Function to process a new :class:`~judge.models.Submission` for a problem by a participant. :param problem_id: Problem ID for the problem corresponding to the submission :param participant_id: Participant ID :param file_type: Submission file type :param submission_file: Submission file :param timestamp: Time at submission :returns: A 2-tuple - 1st element indicating whether the processing has succeeded, and 2nd element providing a ``ValidationError`` if processing is unsuccessful. """ problem = models.Problem.objects.filter(code=problem_id) if not problem.exists(): return (False, ValidationError('Problem with code = {} not found' .format(problem_id))) problem = problem[0] if file_type not in problem.file_exts.split(','): return (False, ValidationError({'file_type': ['Accepted file types: \"{}\"' .format(', '.join(problem.file_exts.split(',')))]})) participant = models.Person.objects.filter(email=participant_id.lower()) if not participant.exists(): return (False, ValidationError('Person with email = {} not found' .format(participant_id.lower()))) participant = participant[0] try: sub = problem.submission_set.create(participant=participant, file_type=file_type, submission_file=submission_file, timestamp=timestamp) sub.save() # Catch any weird errors that might pop up during the creation except Exception as other_err: print_exc() return (False, ValidationError(str(other_err))) testcases = models.TestCase.objects.filter(problem=problem) if not os.path.exists(os.path.join('content', 'tmp')): os.makedirs(os.path.join('content', 'tmp')) # NB: File structure here # PROBLEM_ID # SUBMISSION_ID # FILE_FORMAT # TIME_LIMIT # MEMORY_LIMIT # TESTCASE_1 # TESTCASE_2 # .... with open(os.path.join('content', 'tmp', 'sub_run_' + str(sub.pk) + '.txt'), 'w') as f: f.write('{}\n'.format(problem.pk)) f.write('{}\n'.format(sub.pk)) f.write('{}\n'.format(file_type)) f.write('{}\n'.format(int(problem.time_limit.total_seconds()))) f.write('{}\n'.format(problem.memory_limit)) for testcase in testcases: f.write('{}\n'.format(testcase.pk)) try: for testcase in testcases: models.SubmissionTestCase.objects.create(submission=sub, testcase=testcase, verdict='R', memory_taken=0, time_taken=timedelta(seconds=0)) # Catch any weird errors that might pop up during the creation except Exception as other_err: print_exc() return (False, ValidationError(other_err)) else: return (True, None)
10,269
def eq_text_partially_marked( ann_objs, restrict_types=None, ignore_types=None, nested_types=None): """Searches for spans that match in string content but are not all marked.""" # treat None and empty list uniformly restrict_types = [] if restrict_types is None else restrict_types ignore_types = [] if ignore_types is None else ignore_types nested_types = [] if nested_types is None else nested_types # TODO: check that constraints are properly applied matches = SearchMatchSet("Text marked partially") text_type_ann_map = _get_text_type_ann_map( ann_objs, restrict_types, ignore_types, nested_types) max_length_tagged = max([len(s) for s in text_type_ann_map] + [0]) # TODO: faster and less hacky way to detect missing annotations text_untagged_map = {} for ann_obj in ann_objs: doctext = ann_obj.get_document_text() # TODO: proper tokenization. # NOTE: this will include space. #tokens = re.split(r'(\s+)', doctext) try: tokens = _split_and_tokenize(doctext) tokens = _split_tokens_more(tokens) except BaseException: # TODO: proper error handling print("ERROR: failed tokenization in %s, skipping" % ann_obj._input_files[ 0], file=sys.stderr) continue # document-specific map offset_ann_map = _get_offset_ann_map([ann_obj]) # this one too sentence_num = _get_offset_sentence_map(doctext) start_offset = 0 for start in range(len(tokens)): for end in range(start, len(tokens)): s = "".join(tokens[start:end]) end_offset = start_offset + len(s) if len(s) > max_length_tagged: # can't hit longer strings, none tagged break if s not in text_type_ann_map: # consistently untagged continue # Some matching is tagged; this is considered # inconsistent (for this check) if the current span # has no fully covering tagging. Note that type # matching is not considered here. start_spanning = offset_ann_map.get(start_offset, set()) # NOTE: -1 needed, see _get_offset_ann_map() end_spanning = offset_ann_map.get(end_offset - 1, set()) if len(start_spanning & end_spanning) == 0: if s not in text_untagged_map: text_untagged_map[s] = [] text_untagged_map[s].append( (ann_obj, start_offset, end_offset, s, sentence_num[start_offset])) start_offset += len(tokens[start]) # form match objects, grouping by text for text in text_untagged_map: assert text in text_type_ann_map, "INTERNAL ERROR" # collect tagged and untagged cases for "compressing" output # in cases where one is much more common than the other tagged = [] untagged = [] for type_ in text_type_ann_map[text]: for ann_obj, ann in text_type_ann_map[text][type_]: #matches.add_match(ann_obj, ann) tagged.append((ann_obj, ann)) for ann_obj, start, end, s, snum in text_untagged_map[text]: # TODO: need a clean, standard way of identifying a text span # that does not involve an annotation; this is a bit of a hack tm = TextMatch(start, end, s, snum) #matches.add_match(ann_obj, tm) untagged.append((ann_obj, tm)) # decide how to output depending on relative frequency freq_ratio_cutoff = 3 cutoff_limit = 5 if (len(tagged) > freq_ratio_cutoff * len(untagged) and len(tagged) > cutoff_limit): # cut off all but cutoff_limit from tagged for ann_obj, m in tagged[:cutoff_limit]: matches.add_match(ann_obj, m) for ann_obj, m in untagged: matches.add_match(ann_obj, m) print("(note: omitting %d instances of tagged '%s')" % (len(tagged) - cutoff_limit, text)) elif (len(untagged) > freq_ratio_cutoff * len(tagged) and len(untagged) > cutoff_limit): # cut off all but cutoff_limit from tagged for ann_obj, m in tagged: matches.add_match(ann_obj, m) for ann_obj, m in untagged[:cutoff_limit]: matches.add_match(ann_obj, m) print("(note: omitting %d instances of untagged '%s')" % (len(untagged) - cutoff_limit, text)) else: # include all for ann_obj, m in tagged + untagged: matches.add_match(ann_obj, m) return matches
10,270
def wait_for_status(status_key, status, get_client, object_id, interval: tobiko.Seconds = None, timeout: tobiko.Seconds = None, error_ok=False, **kwargs): """Waits for an object to reach a specific status. :param status_key: The key of the status field in the response. Ex. provisioning_status :param status: The status to wait for. Ex. "ACTIVE" :param get_client: The tobiko client get method. Ex. _client.get_loadbalancer :param object_id: The id of the object to query. :param interval: How often to check the status, in seconds. :param timeout: The maximum time, in seconds, to check the status. :param error_ok: When true, ERROR status will not raise an exception. :raises TimeoutException: The object did not achieve the status or ERROR in the check_timeout period. :raises UnexpectedStatusException: The request returned an unexpected response code. """ for attempt in tobiko.retry(timeout=timeout, interval=interval, default_timeout=( CONF.tobiko.octavia.check_timeout), default_interval=( CONF.tobiko.octavia.check_interval)): response = get_client(object_id, **kwargs) if response[status_key] == status: return response if response[status_key] == octavia.ERROR and not error_ok: message = ('{name} {field} was updated to an invalid state of ' 'ERROR'.format(name=get_client.__name__, field=status_key)) raise octavia.RequestException(message) # it will raise tobiko.RetryTimeLimitError in case of timeout attempt.check_limits() LOG.debug(f"Waiting for {get_client.__name__} {status_key} to get " f"from '{response[status_key]}' to '{status}'...")
10,271
def can_hold_bags(rule: str, bag_rules: dict) -> dict: """ Returns a dict of all bags that can be held by given bag color :param rule: Color of a given bag :param bag_rules: Dictionary of rules :type rule: str :type bag_rules: dict :return: """ return bag_rules[rule]
10,272
def eWriteNameArray(handle, name, numValues, aValues): """Performs Modbus operations that writes values to a device. Args: handle: A valid handle to an open device. name: The register name to write an array to. numValues: The size of the array to write. aValues: List of values to write. This list needs to be at least size numValues. Raises: TypeError: name is not a string. LJMError: An error was returned from the LJM library call. Note: If numValues is large enough, this functions will automatically split writes into multiple packets based on the current device's effective data packet size. Using both non-buffer and buffer registers in one function call is not supported. """ if not isinstance(name, str): raise TypeError("Expected a string instead of " + str(type(name)) + ".") cNumVals = ctypes.c_int32(numValues) cVals = _convertListToCtypeArray(aValues, ctypes.c_double) cErrorAddr = ctypes.c_int32(-1) error = _staticLib.LJM_eWriteNameArray(handle, name.encode("ascii"), cNumVals, ctypes.byref(cVals), ctypes.byref(cErrorAddr)) if error != errorcodes.NOERROR: errAddr = cErrorAddr.value if errAddr == -1: errAddr = None raise LJMError(error, errAddr)
10,273
def fix_levers_on_same_level(same_level, above_level): """ Input: 3D numpy array with malmo_object_to_index mapping Returns: 3D numpy array where 3 channels represent object index, color index, state index for minigrid """ lever_idx = malmo_object_to_index['lever'] condition = above_level == lever_idx minimap_array = np.where(condition, above_level, same_level) return minimap_array
10,274
def test_words(): """Tests basic functionality of word segmentation.""" language = common.Common words = language.words(u"") assert words == [] words = language.words(u"test sentence.") assert words == [u"test", u"sentence"] # Let's test Khmer with zero width space (\u200b) words = language.words(u"ផ្ដល់​យោបល់") assert words == [u"ផ្ដល់", u"យោបល់"] words = language.words(u"This is a weird test .") assert words == [u"This", u"is", u"a", u"weird", u"test"] words = language.words(u"Don't send e-mail!") assert words == [u"Don't", u"send", u"e-mail"] words = language.words(u"Don’t send e-mail!") assert words == [u"Don’t", u"send", u"e-mail"]
10,275
def persist_to_json_file(activations, filename): """ Persist the activations to the disk :param activations: activations (dict mapping layers) :param filename: output filename (JSON format) :return: None """ with open(filename, 'w') as w: json.dump(fp=w, obj=OrderedDict({k: v.tolist() for k, v in activations.items()}), indent=2, sort_keys=False)
10,276
def iv_plot(df, var_name=None, suffix='_dev'): """Returns an IV plot for a specified variable""" p_suffix = suffix.replace('_','').upper() sub_df = df if var_name is None else df.loc[df.var_name==var_name, ['var_cuts_string'+suffix, 'ln_odds'+suffix, 'resp_rate'+suffix, 'iv'+suffix]] sub_df['resp_rate_trend'+suffix] = _trend(sub_df['resp_rate'+suffix]) iv_val = round(sub_df['iv'+suffix].sum(), 4) f, ax = plt.subplots() ax2 = ax.twinx() sns.lineplot(x='var_cuts_string'+suffix, y='resp_rate'+suffix, data=sub_df, color='red', ax=ax) sns.lineplot(x='var_cuts_string'+suffix, y='resp_rate_trend'+suffix, data=sub_df, color='red', linestyle='--', ax=ax) sns.lineplot(x='var_cuts_string'+suffix, y='ln_odds'+suffix, data=sub_df, color='darkgreen', ax=ax2) ax.set_xticklabels(list(sub_df['var_cuts_string'+suffix]), rotation=45, ha='right') ax.set(xlabel='Variable Bins', ylabel=f'Resp Rate ({p_suffix})', title=f'IV of {var_name} ({iv_val})') ax2.set(ylabel=f'Log Odds ({p_suffix})') ax.legend(handles=[l for a in [ax, ax2] for l in a.lines], labels=[f'Resp Rate ({p_suffix})', f'Resp Rate Trend ({p_suffix})', f'Log Odds ({p_suffix})'], loc=0) return f
10,277
def is_alive(pid): """Return whether a process is running with the given PID.""" return LocalPath('/proc').join(str(pid)).isdir()
10,278
def inheritable_thread_target(f: Callable) -> Callable: """ Return thread target wrapper which is recommended to be used in PySpark when the pinned thread mode is enabled. The wrapper function, before calling original thread target, it inherits the inheritable properties specific to JVM thread such as ``InheritableThreadLocal``. Also, note that pinned thread mode does not close the connection from Python to JVM when the thread is finished in the Python side. With this wrapper, Python garbage-collects the Python thread instance and also closes the connection which finishes JVM thread correctly. When the pinned thread mode is off, it return the original ``f``. .. versionadded:: 3.2.0 Parameters ---------- f : function the original thread target. Notes ----- This API is experimental. It is important to know that it captures the local properties when you decorate it whereas :class:`InheritableThread` captures when the thread is started. Therefore, it is encouraged to decorate it when you want to capture the local properties. For example, the local properties from the current Spark context is captured when you define a function here instead of the invocation: >>> @inheritable_thread_target ... def target_func(): ... pass # your codes. If you have any updates on local properties afterwards, it would not be reflected to the Spark context in ``target_func()``. The example below mimics the behavior of JVM threads as close as possible: >>> Thread(target=inheritable_thread_target(target_func)).start() # doctest: +SKIP """ from pyspark import SparkContext if isinstance(SparkContext._gateway, ClientServer): # type: ignore[attr-defined] # Here's when the pinned-thread mode (PYSPARK_PIN_THREAD) is on. # NOTICE the internal difference vs `InheritableThread`. `InheritableThread` # copies local properties when the thread starts but `inheritable_thread_target` # copies when the function is wrapped. properties = ( SparkContext._active_spark_context._jsc.sc() # type: ignore[attr-defined] .getLocalProperties() .clone() ) @functools.wraps(f) def wrapped(*args: Any, **kwargs: Any) -> Any: try: # Set local properties in child thread. SparkContext._active_spark_context._jsc.sc().setLocalProperties( # type: ignore[attr-defined] properties ) return f(*args, **kwargs) finally: InheritableThread._clean_py4j_conn_for_current_thread() return wrapped else: return f
10,279
def build_grad(verts, edges, edge_tangent_vectors): """ Build a (V, V) complex sparse matrix grad operator. Given real inputs at vertices, produces a complex (vector value) at vertices giving the gradient. All values pointwise. - edges: (2, E) """ edges_np = toNP(edges) edge_tangent_vectors_np = toNP(edge_tangent_vectors) # TODO find a way to do this in pure numpy? # Build outgoing neighbor lists N = verts.shape[0] vert_edge_outgoing = [[] for i in range(N)] for iE in range(edges_np.shape[1]): tail_ind = edges_np[0, iE] tip_ind = edges_np[1, iE] if tip_ind != tail_ind: vert_edge_outgoing[tail_ind].append(iE) # Build local inversion matrix for each vertex row_inds = [] col_inds = [] data_vals = [] eps_reg = 1e-5 for iV in range(N): n_neigh = len(vert_edge_outgoing[iV]) lhs_mat = np.zeros((n_neigh, 2)) rhs_mat = np.zeros((n_neigh, n_neigh + 1)) ind_lookup = [iV] for i_neigh in range(n_neigh): iE = vert_edge_outgoing[iV][i_neigh] jV = edges_np[1, iE] ind_lookup.append(jV) edge_vec = edge_tangent_vectors[iE][:] w_e = 1. lhs_mat[i_neigh][:] = w_e * edge_vec rhs_mat[i_neigh][0] = w_e * (-1) rhs_mat[i_neigh][i_neigh + 1] = w_e * 1 lhs_T = lhs_mat.T lhs_inv = np.linalg.inv(lhs_T @ lhs_mat + eps_reg * np.identity(2)) @ lhs_T sol_mat = lhs_inv @ rhs_mat sol_coefs = (sol_mat[0, :] + 1j * sol_mat[1, :]).T for i_neigh in range(n_neigh + 1): i_glob = ind_lookup[i_neigh] row_inds.append(iV) col_inds.append(i_glob) data_vals.append(sol_coefs[i_neigh]) # build the sparse matrix row_inds = np.array(row_inds) col_inds = np.array(col_inds) data_vals = np.array(data_vals) mat = scipy.sparse.coo_matrix( (data_vals, (row_inds, col_inds)), shape=( N, N)).tocsc() return mat
10,280
def join_label_groups(grouped_issues, grouped_prs, issue_label_groups, pr_label_groups): """Combine issue and PR groups in to one dictionary. PR-only groups are added after all issue groups. Any groups that are shared between issues and PRs are added according to the order in the issues list of groups. This results in "label-groups" remaining in the same order originally specified even if a group does not have issues in it. Otherwise, a shared group may end up at the end of the combined dictionary and not in the order originally specified by the user. """ issue_group_names = [x['name'] for x in issue_label_groups] pr_group_names = [x['name'] for x in pr_label_groups] shared_groups = [] for idx, group_name in enumerate(issue_group_names): if len(pr_group_names) > idx and group_name == pr_group_names[idx]: shared_groups.append(group_name) else: break label_groups = OrderedDict() # add shared groups first for group_name in shared_groups: # make sure to copy the issue group in case it is added to label_groups[group_name] = grouped_issues.get(group_name, [])[:] # add any remaining issue groups for group_name, group in grouped_issues.items(): if group_name in shared_groups: continue label_groups[group_name] = group[:] # add any remaining PR groups (extending any existing groups) for group_name, group in grouped_prs.items(): label_groups.setdefault(group_name, []).extend(group) return label_groups
10,281
def meth_browser(meth_data, window, gtf=False, bed=False, simplify=False, split=False, outfile=None, dotsize=4, static=False, binary=False, minqual=20): """ meth_Data is a list of Methylation objects from the import_methylation submodule annotation is optional and is a gtf or bed file if the traces are to be --split per sample (or include raw data as flagged in data.split), then show one line per sample and one for the annotation, with methrows = number of datasets if no splitting is needed, then 4/5 of the browser is used for overlayed samples and one for gtf annotation the trace to be used for annotation is thus always num_methrows + 1 """ meth_traces = plots.methylation(meth_data, dotsize=dotsize, binary=binary, minqual=minqual) logging.info("Prepared methylation traces.") if split or meth_traces.split: num_methrows = len(meth_data) logging.info(f'Making browser in split mode, with {num_methrows} modification rows.') annot_row = num_methrows + 1 annot_axis = f'yaxis{annot_row}' fig = utils.create_subplots(num_methrows, split=True, names=meth_traces.names, annotation=bool(bed or gtf)) for y, (sample_traces, sample_type) in enumerate(meth_traces, start=1): logging.info(f"Adding traces of type {sample_type} at height {y}") for meth_trace in sample_traces: fig.append_trace(trace=meth_trace, row=y, col=1) if sample_type == 'nanopolish_freq': fig["layout"][f"yaxis{y}"].update(title="Modified <br> frequency", range=[0, 1]) fig["layout"].update(showlegend=False) fig["layout"].update(legend=dict(orientation='h')) elif sample_type in ['nanopolish_call', 'nanopolish_phased']: fig["layout"][f"yaxis{y}"].update(title="Reads", tickformat=',d') fig["layout"].update(showlegend=False) elif sample_type == 'nanocompore': fig["layout"][f"yaxis{y}"].update(title="-log10(pval)") fig["layout"].update(legend=dict(orientation='h')) elif sample_type == 'ont-cram': fig["layout"][f"yaxis{y}"].update(title="Reads", tickformat=',d') elif sample_type == 'bedgraph': fig["layout"][f"yaxis{y}"].update(title="Value") else: sys.exit(f"ERROR: unrecognized data type {sample_type}") else: logging.info('Making browser in overlaying mode.') num_methrows = 4 annot_row = 5 annot_axis = 'yaxis2' fig = utils.create_subplots(num_methrows, split=False, annotation=bool(bed or gtf)) for meth_trace in meth_traces.traces: for trace in meth_trace: fig.append_trace(trace=trace, row=1, col=1) fig["layout"].update(legend=dict(orientation='h')) if meth_traces.types[0] == 'nanopolish_freq': fig["layout"]['yaxis'].update(title="Modified <br> frequency", range=[0, 1]) elif meth_traces.types[0] == 'bedgraph': fig["layout"]["yaxis"].update(title="Value") else: sys.exit(f"ERROR: unexpectedly not splitting for input of type {sample_type}") logging.info("Prepared modification plots.") if bed: for annot_trace in plots.bed_annotation(bed, window): fig.append_trace(trace=annot_trace, row=annot_row, col=1) y_max = -2 if gtf: annotation_traces, y_max = plots.gtf_annotation(gtf, window, simplify) for annot_trace in annotation_traces: fig.append_trace(trace=annot_trace, row=annot_row, col=1) if bed or gtf: fig["layout"][annot_axis].update(range=[-2, y_max + 1], showgrid=False, zeroline=False, showline=False, ticks='', showticklabels=False) logging.info("Prepared annotation plots.") fig["layout"]["xaxis"].update(tickformat='g', separatethousands=True, range=[window.begin, window.end]) fig["layout"].update(barmode='overlay', title="Nucleotide modifications", hovermode='closest', plot_bgcolor='rgba(0,0,0,0)') if num_methrows > 10: for i in fig['layout']['annotations']: i['font']['size'] = 10 utils.create_browser_output(fig, outfile, window) if static: import plotly.io as pio pio.write_image(fig, static, engine="kaleido")
10,282
def healpix_ijs_neighbours(istar, jstar, nside): """Gets the healpix i, jstar neighbours for a single healpix pixel. Parameters ---------- istar : array Healpix integer i star index. jstar : array Healpix integer i star index. nside : int Healpix nside. Returns ------- istar_neigh : array Neighbour healpix integer i star index. jstar_neigh : array Neighbour healpix integer j star index. """ if jstar - istar + 1 == 2*nside: istar_neigh = [istar, istar + 1, istar + 1, istar + nside, istar + nside, istar - nside, istar + 1 - nside, istar+2*nside] jstar_neigh = [jstar - 1, jstar - 1, jstar, jstar - 1 + nside, jstar + nside, jstar - nside, jstar - nside, jstar+2*nside] elif istar - jstar + 1 == 2*nside: istar_neigh = [istar, istar - 1, istar - 1, istar - nside, istar - nside, istar + nside, istar - 1 + nside, istar-2*nside] jstar_neigh = [jstar + 1, jstar + 1, jstar, jstar + 1 - nside, jstar - nside, jstar + nside, jstar + nside, jstar-2*nside] elif jstar - istar + 1 == nside and istar % nside == 0: istar_neigh = [istar - 1, istar, istar + 1, istar - 1, istar + 1, istar, istar + 1] jstar_neigh = [jstar - 1, jstar - 1, jstar - 1, jstar, jstar, jstar + 1, jstar + 1] elif istar - jstar + 1 == nside and jstar % nside == 0: istar_neigh = [istar - 1, istar, istar - 1, istar + 1, istar - 1, istar, istar + 1] jstar_neigh = [jstar - 1, jstar - 1, jstar, jstar, jstar + 1, jstar + 1, jstar + 1] elif istar % nside == 0 and jstar + 1 - nside*(np.floor(istar/nside) + 1) > 0: istar_neigh = [istar, istar + 1, istar + 1, istar, istar + 1, istar - ((jstar+1)-nside*np.floor(jstar/nside)), istar - ((jstar)-nside*np.floor(jstar/nside)), istar - ((jstar-1)-nside*np.floor(jstar/nside))] jstar_neigh = [jstar - 1, jstar - 1, jstar, jstar + 1, jstar + 1, nside*np.floor(jstar/nside)-1, nside*np.floor(jstar/nside)-1, nside*np.floor(jstar/nside)-1] elif jstar % nside == 0 and istar + 1 - nside*(np.floor(jstar/nside) + 1) > 0: jstar_neigh = [jstar, jstar + 1, jstar + 1, jstar, jstar + 1, jstar - ((istar+2)-nside*np.floor(istar/nside)), jstar - ((istar+1)-nside*np.floor(istar/nside)), jstar - ((istar)-nside*np.floor(istar/nside))] istar_neigh = [istar - 1, istar - 1, istar, istar + 1, istar + 1, nside*np.floor(istar/nside)-1, nside*np.floor(istar/nside)-1, nside*np.floor(istar/nside)-1] elif (jstar + 1 - nside) % nside == 0 and jstar + 1 - nside*(np.floor(istar/nside) + 1) > 0: jstar_neigh = [jstar, jstar - 1, jstar - 1, jstar, jstar - 1, jstar + nside*(np.floor(istar/nside)+1)-istar, jstar + nside*(np.floor(istar/nside)+1)-istar-1, jstar + nside*(np.floor(istar/nside)+1)-istar+1] istar_neigh = [istar - 1, istar - 1, istar, istar + 1, istar + 1, nside*(np.floor(istar/nside)+1), nside*(np.floor(istar/nside)+1), nside*(np.floor(istar/nside)+1)] elif (istar + 1 - nside) % nside == 0 and istar + 1 - nside*(np.floor(jstar/nside) + 1) > 0: istar_neigh = [istar, istar - 1, istar - 1, istar, istar - 1, istar + nside*(np.floor(jstar/nside)+1)-jstar, istar + nside*(np.floor(jstar/nside)+1)-jstar-1, istar + nside*(np.floor(jstar/nside)+1)-jstar+1] jstar_neigh = [jstar - 1, jstar - 1, jstar, jstar + 1, jstar + 1, nside*(np.floor(jstar/nside)+1), nside*(np.floor(jstar/nside)+1), nside*(np.floor(jstar/nside)+1)] else: istar_neigh = [istar - 1, istar, istar + 1, istar - 1, istar + 1, istar - 1, istar, istar + 1] jstar_neigh = [jstar - 1, jstar - 1, jstar - 1, jstar, jstar, jstar + 1, jstar + 1, jstar + 1] istar_neigh = np.array(istar_neigh) jstar_neigh = np.array(jstar_neigh) cond = np.where(istar_neigh + jstar_neigh > 9*nside-1)[0] istar_neigh[cond] = istar_neigh[cond] - 4*nside jstar_neigh[cond] = jstar_neigh[cond] - 4*nside cond = np.where(istar_neigh + jstar_neigh < nside-1)[0] istar_neigh[cond] = istar_neigh[cond] + 4*nside jstar_neigh[cond] = jstar_neigh[cond] + 4*nside istar_neigh = np.unique(istar_neigh) jstar_neigh = np.unique(jstar_neigh) return istar_neigh, jstar_neigh
10,283
def micropub_blog_endpoint_POST(blog_name: str): """The POST verb for the micropub blog route Used by clients to change content (CRUD operations on posts) If this is a multipart/form-data request, note that the multiple media items can be uploaded in one request, and they should be sent with a `name` of either `photo`, `video`, or `audio`. (multipart/form-data POST requests can send more than one attachment with the same `name`.) This is in contrast to the media endpoint, which expects a single item with a `name` of simply `file`. """ blog: HugoBase = current_app.config["APPCONFIG"].blog(blog_name) content_type = request.headers.get("Content-type") if not content_type: raise MicropubInvalidRequestError("No 'Content-type' header") request_body, request_files = process_POST_body(request, content_type) current_app.logger.debug( f"/{blog_name}: all headers before calling authentiate_POST: {request.headers}" ) verified = authenticate_POST(request.headers, request_body, blog) auth_test = request.headers.get("X-Interpersonal-Auth-Test") # Check for the header we use in testing, and return a success message if auth_test: return jsonify({"interpersonal_test_result": "authentication_success"}) contype_test = request_body.get("interpersonal_content-type_test") # Check for the value we use in testing, and return a success message if contype_test: return jsonify( { "interpersonal_test_result": contype_test, "content_type": content_type, "uploaded_file_count": len(listflatten(request_files.values())), } ) # Per spec, missing 'action' should imply create action = request_body.get("action", "create") # Ahh yes, the famous CUUD. # These are all actions supported by the spec: # supported_actions = ["delete", "undelete", "update", "create"] # But I don't support them all right now. # TODO: Support delete, undelete, and update actions supported_actions = ["create"] if action not in verified["scopes"]: raise MicropubInsufficientScopeError(action) if action not in supported_actions: raise MicropubInvalidRequestError(f"'{action}' action not supported") actest = request_body.get("interpersonal_action_test") if actest: return jsonify({"interpersonal_test_result": actest, "action": action}) if action == "create": if content_type == "application/json": mf2obj = request_body elif content_type == "application/x-www-form-urlencoded": mf2obj = form_body_to_mf2_json(request_body) elif content_type.startswith("multipart/form-data"): mf2obj = form_body_to_mf2_json(request_body) # Multipart forms contain attachments. # Upload the attachments, then append the URIs to the mf2 object. # We want to append, not replace, the attachments - # if the post includes a photo URI and also some photo uploads, # we need to keep both. # (Not sure if that actually happens out in the wild, but maybe?) # mtype will be one of 'photo', 'video', 'audio'. for mtype in request_files: mitems = request_files[mtype] added = blog.add_media(mitems) if mtype not in mf2obj["properties"]: mf2obj["properties"][mtype] = [] mf2obj["properties"][mtype] += [a.uri for a in added] else: raise MicropubInvalidRequestError( f"Unhandled 'Content-type': '{content_type}'" ) new_post_location = blog.add_post_mf2(mf2obj) resp = Response("") resp.headers["Location"] = new_post_location resp.status_code = 201 return resp else: return json_error(500, f"Unhandled action '{action}'")
10,284
def create_parser(config: YAMLConfig) -> ArgumentParser: """ Automatically creates a parser from all of the values specified in a config file. Will use the dot syntax for nested dictionaries. Parameters ---------- config: YAMLConfig Config object Returns ------- ArgumentParser Parser loaded up with all of the values specified in the config """ key_pairs = config.keys() parser = ArgumentParser( description=f""" This argument parser was autogenerated from the config file. This allows you to overwrite specific YAML values on the fly. The options listed here do not entail an exhaustive list of the things that you can configure. For more information on possible kwargs, refer to the class definition of the object in question. """ ) parser.add_argument(f"config_file", help="YAML config file") for k in key_pairs: current = config.access(k) parser.add_argument(f"--{k}", type=type(current)) return parser
10,285
def detect_version(conn): """ Detect the version of the database. This is typically done by reading the contents of the ``configuration`` table, but before that was added we can guess a couple of versions based on what tables exist (or don't). Returns ``None`` if the database appears uninitialized, and raises :exc:`RuntimeError` is the version is so ancient we can't do anything with it. """ try: with conn.begin(): db_version = conn.scalar(text( "SELECT version FROM configuration")) except exc.ProgrammingError: with conn.begin(): packages_exists = bool(conn.scalar(text( "SELECT 1 FROM pg_catalog.pg_tables " "WHERE schemaname = 'public' AND tablename = 'packages'"))) with conn.begin(): statistics_exists = bool(conn.scalar(text( "SELECT 1 FROM pg_catalog.pg_views " "WHERE schemaname = 'public' AND viewname = 'statistics'"))) with conn.begin(): files_exists = bool(conn.scalar(text( "SELECT 1 FROM pg_catalog.pg_tables " "WHERE schemaname = 'public' AND tablename = 'files'"))) if not packages_exists: # Database is uninitialized return None elif not files_exists: # Database is too ancient to upgrade raise RuntimeError("Database version older than 0.4; cannot upgrade") elif not statistics_exists: return "0.4" else: return "0.5" else: return db_version
10,286
def nIonDotBHmodel2(z): """Ionization model 2 from BH2007: constant above z=6. """ return ((z < 6) * nIonDotLowz(z) + (z >= 6) * nIonDotLowz(6))
10,287
def p_procedure(t): """ procedure : ID LPAREN procedure_expression_list RPAREN | ID LPAREN RPAREN """ pass
10,288
def answer(input): """ >>> answer("1234") 1234 """ lines = input.split('\n') for line in lines: return int(line)
10,289
def attach_model_to_base( config: Configurator, ModelClass: type, Base: type, ignore_reattach: bool = True ): """Dynamically add a model to chosen SQLAlchemy Base class. More flexibility is gained by not inheriting from SQLAlchemy declarative base and instead plugging in models during the pyramid configuration time. Directly inheriting from SQLAlchemy Base class has non-undoable side effects. All models automatically pollute SQLAlchemy namespace and may e.g. cause problems with conflicting table names. This also allows @declared_attr to access Pyramid registry. :param ModelClass: SQLAlchemy model class :param Base: SQLAlchemy declarative Base for which model should belong to :param ignore_reattach: Do nothing if ``ModelClass`` is already attached to base. Base registry is effectively global. ``attach_model_to_base()`` may be called several times within the same process during unit tests runs. Complain only if we try to attach a different base. """ def register(): if ignore_reattach: if "_decl_class_registry" in ModelClass.__dict__: assert ( ModelClass._decl_class_registry == Base._decl_class_registry ), "Tried to attach to a different Base" return instrument_declarative(ModelClass, Base._decl_class_registry, Base.metadata) # TODO: Fire some events or does SQLA do it? discriminator = ("sqlalchemy-model", Base, ModelClass) intr = config.introspectable( "sqlalchemy models", discriminator, ModelClass.__name__, "sqlalchemy model" ) intr["Base"] = Base intr["Class"] = ModelClass config.action(discriminator, callable=register, introspectables=(intr,))
10,290
def get_leading_states(contributions): """ Return state contributions, names as lists in descending order of contribution amount :param contributions: :return: """ contributions['state'] = contributions['clean_fips'].apply(get_state) states = contributions.groupby('state') state_sums = states.sum() ordered_sums = state_sums.sort('clean_contribution', ascending=False)['clean_contribution'] names = list(ordered_sums.index) values = list(ordered_sums) unwanted = ['NO_STATE_NAME', 'american samoa', 'northern mariana islands', 'guam', 'virgin islands', 'puerto rico'] state_contributions = [] state_names = [] for i in range(0, len(values)): amount = values[i] name = names[i] if name not in unwanted: state_contributions.append(amount) state_names.append(name) return state_contributions, state_names
10,291
def convertFoot(tweakParameters, kneePosition, anklePosition, footPosition): """ This method takes the position of the foot guides coming out of the embedding algorithm (knee, ankle and foot) and maps them to foot joints that fits what HumanIK expects. """ pass
10,292
def plot_lr(list_lr, list_steps): """ Utility function to track and plot learning rate :param list_lr: list of learning rates tracked duriong training :param list_steps: list of steps/iterations :return: """ plt.figure(figsize=(8, 6)) plt.plot(list_steps, list_lr) plt.title('Learning rate by training iterations') plt.ylabel('learning rate') plt.xlabel('Iterations') plt.show()
10,293
def contacts_per_person_normal_self_20(): """ Real Name: b'contacts per person normal self 20' Original Eqn: b'30' Units: b'contact/Day' Limits: (None, None) Type: constant b'' """ return 30
10,294
def copy_file(src, dst): """ Tried to copy utf-8 text files line-by-line to avoid getting CRLF characters added on Windows. If the file fails to be decoded with utf-8, we revert to a regular copy. """ try: with io.open(src, "r", encoding="utf-8") as fh_src: with io.open(dst, "w", encoding="utf-8", newline="\n") as fh_dst: for line in fh_src: fh_dst.write(line) except UnicodeDecodeError: # Leave any other files alone. shutil.copy(src, dst) shutil.copymode(src, dst) repo = get_repo(dst) if repo: repo.index.add([dst])
10,295
def __compute_partition_gradient(data, fit_intercept=True): """ Compute hetero regression gradient for: gradient = ∑d*x, where d is fore_gradient which differ from different algorithm Parameters ---------- data: DTable, include fore_gradient and features fit_intercept: bool, if model has interception or not. Default True Returns ---------- numpy.ndarray hetero regression model gradient """ feature = [] fore_gradient = [] for key, value in data: feature.append(value[0]) fore_gradient.append(value[1]) feature = np.array(feature) fore_gradient = np.array(fore_gradient) gradient = [] if feature.shape[0] <= 0: return 0 for j in range(feature.shape[1]): feature_col = feature[:, j] gradient_j = fate_operator.dot(feature_col, fore_gradient) gradient.append(gradient_j) if fit_intercept: bias_grad = np.sum(fore_gradient) gradient.append(bias_grad) return np.array(gradient)
10,296
def calc_nsd(x, n=21): """ Estimate Noise Standard Deviation of Data. Parameters ---------- x : 1d-ndarray Input data. n : int Size of segment. Returns ------- result : float Value of noise standard deviation. """ x_diff = np.diff(x, n=2) x_frag = np.array_split(x_diff, len(x_diff) // n) cursor = np.argmin([np.std(i, ddof=1) for i in x_frag]) for i in range(n * (cursor + 1), len(x_diff)): i_frag = x_diff[i-n:i-1] i_frag_avg = np.mean(i_frag) i_frag_std = np.std(i_frag, ddof=1) if np.abs(x_diff[i] - i_frag_avg) > 3 * i_frag_std: x_diff[i] = i_frag_avg for i in range(0, n * cursor - 1)[::-1]: if n * cursor - 1 < 0: break i_frag = x_diff[i+1:i+n] i_frag_avg = np.mean(i_frag) i_frag_std = np.std(i_frag, ddof=1) if np.abs(x_diff[i] - i_frag_avg) > 3 * i_frag_std: x_diff[i] = i_frag_avg return np.std(x_diff, ddof=1) / 6 ** 0.5
10,297
def mixed_args(fixed1, fixed2, var1, var2, kwarg1=None, kwarg2=None): """ Test function for mix of fixed and live args | str --> None Copy paste following to test: foo, bar, kwarg1 = barfoo, kwarg2 = foobar """ print('Builtin: ' + kwarg2 + ' ' + kwarg1 + ' ' + var2 + ' ' + var1 + ' ' + fixed2 + ' ' + fixed1)
10,298
def deflection_from_kappa_grid_adaptive(kappa_high_res, grid_spacing, low_res_factor, high_res_kernel_size): """ deflection angles on the convergence grid with adaptive FFT the computation is performed as a convolution of the Green's function with the convergence map using FFT The grid is returned in the lower resolution grid :param kappa_high_res: convergence values for each pixel (2-d array) :param grid_spacing: pixel size of high resolution grid :param low_res_factor: lower resolution factor of larger scale kernel. :param high_res_kernel_size: int, size of high resolution kernel in units of degraded pixels :return: numerical deflection angles in x- and y- direction """ kappa_low_res = image_util.re_size(kappa_high_res, factor=low_res_factor) num_pix = len(kappa_high_res) * 2 if num_pix % 2 == 0: num_pix += 1 #if high_res_kernel_size % low_res_factor != 0: # assert ValueError('fine grid kernel size needs to be a multiplicative factor of low_res_factor! Settings used: ' # 'fine_grid_kernel_size=%s, low_res_factor=%s' % (high_res_kernel_size, low_res_factor)) kernel_x, kernel_y = deflection_kernel(num_pix, grid_spacing) grid_spacing_low_res = grid_spacing * low_res_factor kernel_low_res_x, kernel_high_res_x = kernel_util.split_kernel(kernel_x, high_res_kernel_size, low_res_factor, normalized=False) f_x_high_res = scp.fftconvolve(kappa_high_res, kernel_high_res_x, mode='same') / np.pi * grid_spacing ** 2 f_x_high_res = image_util.re_size(f_x_high_res, low_res_factor) f_x_low_res = scp.fftconvolve(kappa_low_res, kernel_low_res_x, mode='same') / np.pi * grid_spacing_low_res ** 2 f_x = f_x_high_res + f_x_low_res kernel_low_res_y, kernel_high_res_y = kernel_util.split_kernel(kernel_y, high_res_kernel_size, low_res_factor, normalized=False) f_y_high_res = scp.fftconvolve(kappa_high_res, kernel_high_res_y, mode='same') / np.pi * grid_spacing ** 2 f_y_high_res = image_util.re_size(f_y_high_res, low_res_factor) f_y_low_res = scp.fftconvolve(kappa_low_res, kernel_low_res_y, mode='same') / np.pi * grid_spacing_low_res ** 2 f_y = f_y_high_res + f_y_low_res return f_x, f_y
10,299