content
stringlengths
22
815k
id
int64
0
4.91M
def get_restricted_area(path1, path2, restricted_pos1, restricted_pos2, time_step): """Computes the restricted area and the start- and end-time steps for both agents. * start time-step: The first time step where an agent occupies a position within the restricted area. * end time-step: The last time step where an agent occupies a position with the restricted area :param path1: Path (previous solution) from the first agent. :param path2: Path (previous solution) from the second agent. :param restricted_pos1: The first position which agent one would occupy within the restricted area. :param restricted_pos2: The first position which agent two would occupy within the restricted area. :param time_step: The time step where the agents would collide. :return: The positions included within the restricted area, the start time steps for both agents and the end time steps for both agents. """ sub_sequence1 = find_stop_position(path1[:time_step + 2][::-1], restricted_pos1)[::-1] sub_sequence2 = find_stop_position(path2[:time_step + 2][::-1], restricted_pos2) restricted_area = list(dict.fromkeys(sub_sequence1)) + list(dict.fromkeys(sub_sequence2)) # Determine time step where agent enters restricted area fst_enter_r = find_stop_position( list(zip(path1, range(len(path1))))[:time_step + 2], restricted_pos1 )[-1][1] snd_enter_r = find_stop_position( list(zip(path2, range(len(path2))))[:time_step + 2], restricted_pos2 )[-1][1] start_time_steps = [fst_enter_r, snd_enter_r] # Determine how long the agent remains within the restricted area end_time_steps = [] for path, r, enter in [ (path1, restricted_area, fst_enter_r), (path2, restricted_area[::-1], snd_enter_r) ]: path_idx = 0 for idx in range(len(restricted_area)): # Agent might wait in the restricted area because of other constraints while path_idx < len(path[enter:]) \ and path[enter:][path_idx] == path[enter:][path_idx - 1]: path_idx += 1 # The last position of the agent is within the restricted area if path_idx >= len(path[enter:]) - 1: path_idx = len(path[enter:]) break if path[enter:][path_idx] != r[idx]: break path_idx += 1 end_time_steps.append(path_idx) end_time_steps[0] += start_time_steps[0] end_time_steps[1] += start_time_steps[1] return restricted_area, start_time_steps, end_time_steps
12,600
def execute_config(config_subparser: argparse.ArgumentParser, argv: List[str]) -> int: """ Boolean logic of config subparser triggering. """ args = config_subparser.parse_args(argv[1:]) if args.show_settings: print(settings_msg) return 0 if args.turn_log_on: config['LOG-SETTINGS']['logging_turned_on'] = args.turn_log_on.capitalize() with open(settings_file, 'w') as fp: config.write(fp) log_state = config.getboolean('LOG-SETTINGS', 'logging_turned_on') if log_state: print('Logging is activated.') else: print('Logging is deactivated.') return 0 if args.log_name: old_logger_path = get_logger_path() config['LOG-SETTINGS']['logger_filename'] = args.log_name with open(settings_file, 'w') as fp: config.write(fp) new_logger_path = get_logger_path() os.rename(old_logger_path, new_logger_path) print(f"The new log filename is {config.get('LOG-SETTINGS', 'logger_filename')!r}.",) return 0 if args.log_location: old_logger_path = get_logger_path() log_location = args.log_location if '~' in args.log_location: log_location = os.path.expanduser(args.log_location) if not os.path.isdir(log_location): print(f'The given path {args.log_location!r} is not a valid directory!') return 1 config['LOG-SETTINGS']['logger_location'] = log_location with open(settings_file, 'w') as fp: config.write(fp) new_logger_path = get_logger_path() os.rename(old_logger_path, new_logger_path) print(f"The new log location is {config.get('LOG-SETTINGS', 'logger_location')!r}.",) return 0 if args.set_search_value: if args.set_search_value == ' ': config['VALUE-SETTINGS']['search_value'] = "' '" with open(settings_file, 'w') as fp: config.write(fp) print(f"The new search-value is {config.get('VALUE-SETTINGS', 'search_value')}.",) else: config['VALUE-SETTINGS']['search_value'] = args.set_search_value with open(settings_file, 'w') as fp: config.write(fp) print(f"The new search-value is {config.get('VALUE-SETTINGS', 'search_value')!r}.",) return 0 if args.set_new_value == '': config['VALUE-SETTINGS']['new_value'] = "''" with open(settings_file, 'w') as fp: config.write(fp) print(f"The new 'new-value' is {config.get('VALUE-SETTINGS', 'new_value')}.") return 0 if args.set_new_value: config['VALUE-SETTINGS']['new_value'] = args.set_new_value with open(settings_file, 'w') as fp: config.write(fp) print(f"The new 'new-value' is {config.get('VALUE-SETTINGS', 'new_value')!r}.") return 0 config_subparser.print_help() return 1
12,601
def FR_highpass(freq: np.ndarray, hp_freq: float, trans_width: float) -> np.ndarray: """Frequency responce for highpass filter Parameters ---------- ``freq``: np.ndarray frequency array ``hp_freq``: float highpass frequency ``trans_width``: float width of the transition region between bands Returns ------- np.ndarray with values in [0, 1] """ sigma = trans_width / 6. return 1 / (1 + np.exp((hp_freq - freq) / sigma))
12,602
def _rexec(params): """Start a subprocess shell to execute the specified command and return its output. params - a one element list ["/bin/cat /etc/hosts"] """ # check that params is a list if not isinstance(params, list) or len(params) == 0: return "Parameter must be a not empty list" command = params[0] try: subprocess.check_call(command,shell=True) out = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE).stdout.read() return '\n' + out.decode() except Exception, e: print e return "{\"msg\":\"Invalid command.\"}"
12,603
def test_control_failures(tmp_path: pathlib.Path) -> None: """Test various failure modes.""" part = common.Part(name='foo') assert ControlIOWriter._get_label(part) == '' assert ControlIOReader._strip_to_make_ncname('1a@foo') == 'afoo' with pytest.raises(TrestleError): ControlIOReader._strip_to_make_ncname('1@') with pytest.raises(TrestleError): ControlIOReader._indent('') with pytest.raises(TrestleError): ControlIOReader._indent(' foo')
12,604
def _get_compose_template(manifest): """ Build the service entry for each one of the functions in the given context. Each docker-compose entry will depend on the same image and it's just a static definition that gets built from a template. The template is in the artifacts folder. """ artifact = get_artifact('compose-template.yml') def build_section(label): return [ { 'name': name, 'image': _get_docker_image(manifest, sls_section), 'volumes': _get_volumes(manifest, sls_section) } for name, sls_section in manifest.get(label, {}).items() ] # Load the jinja template and build the sls functions and layers. return Template(artifact).render( functions=build_section('functions'), layers=build_section('layers') )
12,605
def _ParsePackageNode(package_node): """Parses a <package> node from the dexdump xml output. Returns: A dict in the format: { 'classes': { <class_1>: { 'methods': [<method_1>, <method_2>] }, <class_2>: { 'methods': [<method_1>, <method_2>] }, } } """ classes = {} for child in package_node: if child.tag == 'class': classes[child.attrib['name']] = _ParseClassNode(child) return {'classes': classes}
12,606
def greet(lang): """This function is for printing a greeting in some selected languages: Spanish, Swedish, and German""" if lang == 'es': return 'Hola' elif lang == 'ge': return 'Hallo' elif lang == 'sv': return 'Halla' else: return 'Hello'
12,607
def add_hook( show_original=False, show_transformed=False, predictable_names=False, verbose_finder=False, ): """Creates and adds the import hook in sys.meta_path""" callback_params = { "show_original": show_original, "show_transformed": show_transformed, "predictable_names": predictable_names, } hook = import_hook.create_hook( transform_source=transform_source, callback_params=callback_params, hook_name=__name__, verbose_finder=verbose_finder, ) return hook
12,608
def correlate_two_dicts(xdict, ydict, subset_keys=None): """Find values with the same key in both dictionary and return two arrays of corresponding values""" x, y, _ = correlate_two_dicts_verbose(xdict, ydict, subset_keys) return x, y
12,609
def find_gateways(unicast_gateway, session, apic) -> tuple: """Search for ACI Gateways and get configurations""" get_gateway = get_subnets(session, apic) aps = [] epgs = [] l3Outs = [] gateways = [] location, bridge_domain, uni_route, scope, unkwn_uni, tenant, bd_vrf, iplearn = None, "DoesntExist", None, None, None, None, None, None try: # Locate subnet in ACI, get scope, map location for fvSubnet in get_gateway.iter("fvSubnet"): ip = fvSubnet.get("ip") gateways.append(ip) if unicast_gateway in ip: location = fvSubnet.get("dn") scope = fvSubnet.get("scope") break # Find BD, check to see if unicast routing is enable and unknown unicast setting is for fvBD in get_gateway.iter("fvBD"): bds = fvBD.get("name") iplearn = fvBD.get("ipLearning") mtu = fvBD.get("mtu") learn_limit = fvBD.get("limitIpLearnToSubnets") mac = fvBD.get("mac") if location.rfind(bds) != -1: bridge_domain = bds uni_route = fvBD.get("unicastRoute") unkwn_uni = fvBD.get("unkMacUcastAct") # Find vrf associated with BD for fvRsCtx in get_gateway.iter("fvRsCtx"): vrf = fvRsCtx.get("tnFvCtxName") location = fvRsCtx.get("dn") if location.rfind(bridge_domain) != -1: bd_vrf = vrf # Find tenant, ap, and epgs, save to list for fvRtBd in get_gateway.iter("fvRtBd"): dn = fvRtBd.get("dn") if dn.rfind(bridge_domain) != -1: tenant = dn.split("/")[1].strip("tn-") aps.append(dn.split("/")[5].strip("ap-")) epgs.append(dn.split("/")[6].strip("epg-").strip("]")) # Find L3outs, save to list for fvRsBDToOut in get_gateway.iter("fvRsBDToOut"): dn = fvRsBDToOut.get("dn") if dn.rfind(bridge_domain) != -1: l3Outs.append(dn.split("/")[3].strip("rsBDToOut-")) # Find L3outs, save to list for ipLearning in get_gateway.iter("ipLearning"): iplearn = ipLearning.get("ipLearning") except AttributeError: pass # Set variables from conditions if aps: join_aps = ', '.join(aps) else: join_aps = None if epgs: join_epgs = ', '.join(epgs) else: join_epgs = None if l3Outs: join_l3outs = ', '.join(l3Outs) else: join_l3outs = None if not bd_vrf: bd_vrf = None if not unicast_gateway: bridge_domain = 0 # Return to user input return bridge_domain, uni_route, scope, unkwn_uni, tenant, join_aps, join_epgs, join_l3outs, bd_vrf, iplearn, mtu, learn_limit, mac, gateways
12,610
def test_list_date_time_max_length_4_nistxml_sv_iv_list_date_time_max_length_5_4(mode, save_output, output_format): """ Type list/dateTime is restricted by facet maxLength with value 10. """ assert_bindings( schema="nistData/list/dateTime/Schema+Instance/NISTSchema-SV-IV-list-dateTime-maxLength-5.xsd", instance="nistData/list/dateTime/Schema+Instance/NISTXML-SV-IV-list-dateTime-maxLength-5-4.xml", class_name="NistschemaSvIvListDateTimeMaxLength5", version="1.1", mode=mode, save_output=save_output, output_format=output_format, structure_style="filenames", )
12,611
def disable_servo(pin): """ Attempt to disable the specified servo by turning off the PWM signal. Note that this method does NOT work for digital servos--they will continue to run until they are powered off. """ set_pwm(pin, 0)
12,612
def hamming_dist(y_true, y_pred): """ Calculate the Hamming distance between a given predicted label and the true label. Assumes inputs are torch Variables! Args: y_true (autograd.Variable): The true label y_pred (autograd.Variable): The predicted label Returns: (float): The Hamming distance between the two vectors """ # Make sure y_pred is rounded to 0/1 y_pred = torch.round(y_pred) result = torch.mean(torch.abs(y_true - y_pred), dim=1) result = torch.mean(result, dim=0) return float(result.data.cpu().numpy())
12,613
def task_mo(): """Create bynary wheel distribution""" return { "actions": [ """pybabel compile -D todo -i frontend/po/eng/LC_MESSAGES/todo.po -o frontend/po/eng/LC_MESSAGES/todo.mo""" ], "file_dep": ["frontend/po/eng/LC_MESSAGES/todo.po"], "targets": ["frontend/po/eng/LC_MESSAGES/todo.mo"], }
12,614
def tuple_action_to_int( action: Tuple[int, int], slot_based: bool, end_trial_action: bool ) -> int: """Converts tuple action to integer.""" stone, potion = action num_special_actions = 2 if end_trial_action else 1 if stone < 0: return stone + num_special_actions if slot_based: potions_and_cauldron = MAX_POTIONS + 1 else: potions_and_cauldron = PerceivedPotion.num_types + 1 return stone * potions_and_cauldron + potion + 1 + num_special_actions
12,615
def PDM(signal=50, angle=0, n_points=1000, motion_slow=0, motion_size=75, box_size=8, point_size=0.05, point_speed=1, ITI=1000): """ Pattern Detection in Motion """ angle_rad = np.radians(angle) y_movement = np.sin(np.radians(angle))*point_speed x_movement = np.cos(np.radians(angle))*point_speed random_rad_angle = np.random.uniform(0, 360, int(n_points*(100-signal)/100)) random_y_movement = np.sin(np.radians(random_rad_angle))*point_speed random_x_movement = np.cos(np.radians(random_rad_angle))*point_speed # Generate points circle_r = n.Coordinates.to_pygame(distance_x=box_size/2) circle_x = n.Coordinates.to_pygame(x=0) circle_y = n.Coordinates.to_pygame(y=0) signal_x = [] signal_y = [] random_x = [] random_y = [] for point in range(int(n_points*signal/100)): alpha = 2 * np.pi * np.random.random() r = circle_r * np.random.random() x = r * np.cos(alpha) + circle_x y = r * np.sin(alpha) + circle_y signal_x.append(x) signal_y.append(y) for point in range(int(n_points*(100-signal)/100)): alpha = 2 * np.pi * np.random.random() r = circle_r * np.random.random() x = r * np.cos(alpha) + circle_x y = r * np.sin(alpha) + circle_y random_x.append(x) random_y.append(y) signal_x = np.array(signal_x) signal_y = np.array(signal_y) random_x = np.array(random_x) random_y = np.array(random_y) # Mask box_size = n.Coordinates.to_pygame(distance_y = box_size) x = n.screen_width/2-box_size/2 y = (n.screen_height-box_size)/2 # Preparation n.newpage("black", auto_refresh=False) # n.newpage("grey", auto_refresh=False) pygame.draw.circle(n.screen, n.color("grey"), (int(n.screen_width/2), int(n.screen_height/2)), int(abs(box_size)/2), 0) n.write("+", color="white", size=1.5) n.refresh() n.time.wait(ITI) # Movement time_start = datetime.datetime.now() for i in range(motion_size): n.newpage("black", auto_refresh=False) # n.newpage("grey", auto_refresh=False) pygame.draw.circle(n.screen, n.color("grey"), (int(n.screen_width/2), int(n.screen_height/2)), int(abs(box_size)/2), 0) for point in range(len(signal_x)): pygame.draw.circle(n.screen, n.color("black"), (int(signal_x[point]), int(signal_y[point])), 3, 0) # n.circle(x=half1_x[point], y=half1_y[point], size=point_size, fill_color="black") for point in range(len(random_x)): pygame.draw.circle(n.screen, n.color("black"), (int(random_x[point]), int(random_y[point])), 3, 0) # n.circle(x=half2_x[point], y=half2_y[point], size=point_size, fill_color="black") signal_x += x_movement signal_y -= y_movement random_x -= random_x_movement random_y += random_y_movement # TODO: ensure that points stay in the mask area (and transport them from one side to another if needed) n.refresh() if motion_slow > 0: n.time.wait(motion_slow) # Save duration = datetime.datetime.now()-time_start parameters = {"Angle": angle, "Angle_Radian": angle_rad, "Signal": signal, "n_Points": n_points, "Box_Size": box_size, "Motion_Size": motion_size, "Point_Size": point_size, "Point_Speed": point_speed, "Mask_Corrdinates": (int(n.screen_width/2), int(n.screen_height/2)), "Mask_Size": int(abs(box_size)/2), "ITI": ITI, "Movement_Duration": duration} return(parameters)
12,616
def warpImage(imIn, pointsIn, pointsOut, delaunayTri): """ 变换图像 参数: =========== imIn:输出图像 pointsIn:输入点 pointsOut:输出点: delaunayTri:三角形 返回值: ============ imgOut:变形之后的图像 """ pass h, w, ch = imIn.shape imOut = np.zeros(imIn.shape, dtype=imIn.dtype) for j in range(0, len(delaunayTri)): tin = [] tout = [] for k in range(0, 3): pIn = pointsIn[delaunayTri[j][k]] pIn = constrainPoint(pIn, w, h) pOut = pointsOut[delaunayTri[j][k]] pOut = constrainPoint(pOut, w, h) tin.append(pIn) tout.append(pOut) warpTriangle(imIn, imOut, tin, tout) return imOut
12,617
def test_check_fields(): """Check to make sure the right fields have been created. Check that the sizes of at least one are also correct (they are at node so if one is the right size, then they all should be) """ mg0 = RasterModelGrid((10, 10), xy_spacing=(1, 1)) mg0.add_field( "topographic__elevation", mg0.node_x ** 2 + mg0.node_y ** 2, at="node" ) _FlowDirector(mg0, "topographic__elevation") assert sorted(list(mg0.at_node.keys())) == [ "flow__sink_flag", "topographic__elevation", ] assert np.size(mg0.at_node["topographic__elevation"]) == mg0.number_of_nodes mg1 = RasterModelGrid((10, 10), xy_spacing=(1, 1)) mg1.add_field( "topographic__elevation", mg1.node_x ** 2 + mg1.node_y ** 2, at="node" ) _FlowDirectorToMany(mg1, "topographic__elevation") assert sorted(list(mg1.at_node.keys())) == [ "flow__sink_flag", "topographic__elevation", ] assert np.size(mg1.at_node["topographic__elevation"]) == mg1.number_of_nodes mg2 = RasterModelGrid((10, 10), xy_spacing=(1, 1)) mg2.add_field( "topographic__elevation", mg2.node_x ** 2 + mg2.node_y ** 2, at="node" ) _FlowDirectorToOne(mg2, "topographic__elevation") assert sorted(list(mg2.at_node.keys())) == [ "flow__link_to_receiver_node", "flow__receiver_node", "flow__sink_flag", "topographic__elevation", "topographic__steepest_slope", ] assert np.size(mg2.at_node["topographic__elevation"]) == mg2.number_of_nodes mg3 = RasterModelGrid((10, 10), xy_spacing=(1, 1)) mg3.add_field( "topographic__elevation", mg3.node_x ** 2 + mg3.node_y ** 2, at="node" ) FlowDirectorMFD(mg3, "topographic__elevation") assert sorted(list(mg3.at_node.keys())) == [ "flow__link_to_receiver_node", "flow__receiver_node", "flow__receiver_proportions", "flow__sink_flag", "topographic__elevation", "topographic__steepest_slope", ] assert np.size(mg3.at_node["topographic__elevation"]) == mg3.number_of_nodes mg4 = RasterModelGrid((10, 10), xy_spacing=(1, 1)) mg4.add_field( "topographic__elevation", mg4.node_x ** 2 + mg4.node_y ** 2, at="node" ) FlowDirectorDINF(mg4, "topographic__elevation") assert sorted(list(mg4.at_node.keys())) == [ "flow__link_to_receiver_node", "flow__receiver_node", "flow__receiver_proportions", "flow__sink_flag", "topographic__elevation", "topographic__steepest_slope", ] assert np.size(mg4.at_node["topographic__elevation"]) == mg4.number_of_nodes mg5 = RasterModelGrid((10, 10), xy_spacing=(1, 1)) mg5.add_field( "topographic__elevation", mg5.node_x ** 2 + mg5.node_y ** 2, at="node" ) FlowDirectorSteepest(mg5, "topographic__elevation") assert sorted(list(mg5.at_node.keys())) == [ "flow__link_to_receiver_node", "flow__receiver_node", "flow__sink_flag", "topographic__elevation", "topographic__steepest_slope", ] assert np.size(mg5.at_node["topographic__elevation"]) == mg5.number_of_nodes mg6 = RasterModelGrid((10, 10), xy_spacing=(1, 1)) mg6.add_field( "topographic__elevation", mg6.node_x ** 2 + mg6.node_y ** 2, at="node" ) FlowDirectorD8(mg6, "topographic__elevation") assert sorted(list(mg6.at_node.keys())) == [ "flow__link_to_receiver_node", "flow__receiver_node", "flow__sink_flag", "topographic__elevation", "topographic__steepest_slope", ] assert np.size(mg6.at_node["topographic__elevation"]) == mg6.number_of_nodes
12,618
def rmchars(value): """Remove special characters from alphanumeric values except for period (.) and negative (-) characters. :param value: Alphanumeric value :type value: string :returns: Alphanumeric value stripped of any special characters :rtype: string >>> import utils >>> utils.rmchars(value = "*6.5_") '6.5' >>> utils.rmchars(value = "ICE") 'ICE' >>> utils.rmchars(value = "-4.2") '-4.2' >>> utils.rmchars(value = "%&!@#8.32&#*;") '8.32' """ value = re.sub("[^A-Za-z0-9.-]+", "", value) return value
12,619
def test_find_missing(source, target, result): """Test that function find_missing returns the missing element between 2 lists.""" from missing_element import find_missing_counter assert find_missing_counter(source, target) == result
12,620
def ema(x): """ [Definition] 以period为周期的指数加权移动平均线 [Category] 技术指标 """ return 'ema(%s,%s)' %(x, pe.gen_param('ema', 'period'))
12,621
def tokenized(phrase: str) -> Set[str]: """Split a phrase into tokens and remove stopwords.""" return set(normalize(phrase).split()) - STOPWORDS
12,622
def synthesize(pipeline_in, net, dev, res_alloc, output_dir, prefix="", override_ibits=0): """ Create an FPGA accelerator given a QNN and compute resource allocator. Returns an ExternalExecutionLayer wrapping the compiled simulation executable. pipeline_in : list of input layers res_alloc : function that takes in a pipeline and returns PE/SIMD annotated copy output_dir : where the generated code will be placed prefix : prefix for the generated files (unused) """ # before applying any transforms, pick up pipeline input precision # unless it is specified as override if override_ibits != 0: pipeline_ibits = override_ibits else: pipeline_ibits = pipeline_in[0].ibits # turn pipeline into a form synthesizable by the FPGA backend pipeline = convert(pipeline_in, net, dev, res_alloc, pipeline_ibits) # create output dir if it does not exist if not os.path.exists(output_dir): os.makedirs(output_dir) # collect parameters (side effect: file generation, no return values) map(lambda x: x.codegen_params(output_dir), pipeline) # collect globals (include statements etc.) glob = map(lambda x: x.codegen_globals(), pipeline) glob = "".join(i for i in glob) glob = indent(glob, 0) # collect variable declarations and other preparation decls = map(lambda x: x.codegen_declarations(), pipeline) decls = "".join(i for i in decls) decls = indent(decls, 1) # collect architecture instantiation code arch = map(lambda x: x.codegen_architecture(), pipeline) arch = "".join(i for i in arch) arch = indent(arch, 1) # get input/output stream declarations instream_decl = pipeline[0].getInStreamDecl() outstream_decl = pipeline[-1].getOutStreamDecl() # generate code for single i/o (useful for simulation) singleiodecls = "\n" + instream_decl.replace("&","") + ";" singleiodecls += "\n" + outstream_decl.replace("&","") + ";" singleiodecls = indent(singleiodecls, 1) single2instream = pipeline[0].codegen_single2instream("singleInStrm", "inStream") single2instream = indent(single2instream, 1) outstream2single = pipeline[-1].codegen_outstream2single("outStream", "singleOutStrm") outstream2single = indent(outstream2single, 1) memresources = determine_memory_resources(pipeline) memresources = indent(memresources,0) numInElems = pipeline[0].getNumInputElems() numOutElems = pipeline[-1].getNumOutputElems() # put generated text into template ret = docompute_template ret = ret.replace("$MEMRESOURCES$", memresources) ret = ret.replace("$GLOBALS$", glob) ret = ret.replace("$INSTREAM$", instream_decl) ret = ret.replace("$OUTSTREAM$", outstream_decl) ret = ret.replace("$DECLS$", decls) ret = ret.replace("$ARCH$", arch) # emit code with open(output_dir + "/docompute.cpp", "w") as f: f.write(ret) # emit wrapper ret = wrapper_template ret = ret.replace("$INSTREAM$", instream_decl) ret = ret.replace("$OUTSTREAM$", outstream_decl) ret = ret.replace("$SINGLEIODECLS$", singleiodecls) ret = ret.replace("$SINGLE2INSTREAM$", single2instream) ret = ret.replace("$OUTSTREAM2SINGLE$", outstream2single) ret = ret.replace("$IN_ELEMS$", str(numInElems)) ret = ret.replace("$OUT_ELEMS$", str(numOutElems)) with open(output_dir + "/wrapper.h", "w") as f: f.write(ret) # emit and run compile script for simulation sim_compile_script = sim_compile_script_template sim_compile_script = sim_compile_script.replace("$GENSRCDIR$", output_dir) script_fn = output_dir + "/simcompile.sh" with open(script_fn, "w") as f: f.write(sim_compile_script) # emit script for on-device emu with MLBP mlbp_script = ondevice_compile_script_template mlbp_script = mlbp_script.replace("$GENSRCDIR$", output_dir) script_fn = output_dir + "/mlbpcompile.sh" with open(script_fn, "w") as f: f.write(mlbp_script) # emit script for HLS synthesis hls_script = Template(open(finnroot + "/backend/fpga/scripts/hls-syn-template.tcl").read()) # TODO part and clkperiod should come from selected device hls_script = hls_script.substitute({ "config_proj_name" : "hls_syn", "config_hwsrcdir" : output_dir, "config_bnnlibdir" : finnroot + "/backend/fpga/hls", "config_proj_part" : dev.part, "config_clkperiod" : float(1000/dev.frequency), "config_toplevelfxn" : "BlackBoxJam" }) with open(output_dir + "/hls_syn.tcl", "w") as f: f.write(hls_script) # emit script for Verilator emu compilation after synthesis shutil.copy2(finnroot + "/backend/fpga/scripts/hwemu.sh", output_dir+"/hwemu.sh") # emit BNN-PYNQ bitfile and standalone executable scripts shutil.copy2(finnroot + "/backend/fpga/scripts/make_pynq_standalone_exe.sh", output_dir+"/make_pynq_standalone_exe.sh") shutil.copy2(finnroot + "/backend/fpga/scripts/make_pynq_bitfile.sh", output_dir+"/make_pynq_bitfile.sh") print "Outputting to: ", output_dir ret = backend_util.FPGABackendProduct(output_dir, pipeline, dev) return ret
12,623
async def server_error(request, exc): """ Return an HTTP 500 page. """ template = '500.html' context = {'request': request} return templates.TemplateResponse(template, context, status_code=500)
12,624
def return_post(): """" Returns the post-processing plugins. :param: None :return: POST_PROCESSING_PLUGINS """ return POST_PROCESSING_PLUGINS
12,625
def redis_uri() -> typing.Optional[str]: """Connection URI for Redis server.""" value = os.environ.get("REDIS_URI") if not value: log.warning('Optional environment variable "REDIS_URI" is missing') return value
12,626
def numpy_to_b64str(img): """ Converts a numpy array into a base 64 string Args: img (np.array): Returns: str: base 64 representation of the numpy array/image. """ img = img[..., ::-1] # flip for cv conversion _, img = cv2.imencode('.jpg', img) # strips header image_base64 = base64.b64encode(img) base64_string = image_base64.decode('utf-8') # convert to string return base64_string
12,627
def test_top_images_dataset_init_missing_files(top_images_root, subpath, error_pattern): """Test TopImagesDataset.__init__ dies when files are missing.""" path = top_images_root / subpath if path.is_dir(): shutil.rmtree(path) else: assert path.is_file() path.unlink() with pytest.raises(FileNotFoundError, match=error_pattern): datasets.TopImagesDataset(top_images_root)
12,628
def lennard_jones(r, epsilon, sigma, index=(12, 6)): """ General pair potential resembling a Lennard Jones model. Default indexes values are for a typical LJ potential, also called 12-6 potential. Parameters ---------- r : float or np.ndarray Distance between interacting particles. It can be a float or a numpy arrays containing a set of particle-particle distances. epsilon : float Dispersion energy, i.e. depth of the potential well. sigma : float Distance at which the potential energy is zero. index : tuple, optional Power indexes for repulsive and attractive terms. The default is (12, 6). Returns ------- float or np.ndarray Potential energies at the corresponding distances. """ sig_r = sigma/r return 4*epsilon*(m.pow(sig_r, index[0]) - m.pow(sig_r, index[1]))
12,629
def check_mine_detonation(bot): """ Check if a bot stepped on a mine, and if so, detonate it. """ for mine in arena_globals.mines: # Don't want bot detonating its own mine if bot is not mine.owner_bot: mine_rect = pygame.Rect(mine.x, mine.y, MINE_SIZE, MINE_SIZE) bot_rect = pygame.Rect(bot.x, bot.y, BOT_SIZE, BOT_SIZE) if bot_rect.colliderect(mine_rect): mine.detonate(MINE_SIZE, BOT_SIZE)
12,630
def get_unique_name(x, mult=0, extra=''): """ Returns a unique key composed of inchikey and multiplicity >>> mol = get_mol('[O][O]') >>> get_unique_name(mol) 'MYMOFIZGZYHOMD-UHFFFAOYSA-N3' """ mol = get_mol(x, make3D=True) if mult == 0: mult = mol.spin return mol.write("inchikey").strip() + str(mult) + extra
12,631
def get_individual_user(user_id: int) -> JSONResponse: """ Lists all information belonging to one user. :param user_id: the id of the user :return: status code and response data """ user = _get_db()["users"].find_one({"user_id": user_id}) return JSONResponse(status_code=status.HTTP_200_OK, content=dumps(user))
12,632
def portfolio_averages( df: pd.DataFrame, groupvar: str, avgvars: Union[str, List[str]], ngroups: int = 10, byvars: Optional[Union[str, List[str]]] = None, cutdf: pd.DataFrame = None, wtvar: Optional[str] = None, count: Union[str, bool] = False, portvar: str = "portfolio", avgonly: bool = False, ) -> Union[pd.DataFrame, Tuple[pd.DataFrame, pd.DataFrame]]: """ Creates portfolios and calculates equal- and value-weighted averages of variables within portfolios. If ngroups=10, then will form 10 portfolios, with portfolio 1 having the bottom 10 percentile of groupvar, and portfolio 10 having the top 10 percentile of groupvar. :Notes: Resets index and drops in output data, so don't use if index is important (input data not affected) :param df: input data :param groupvar: name of variable in df to form portfolios on :param avgvars: variables to be averaged :param ngroups: number of portfolios to form :param byvars: name of variable(s) in df, finds portfolios within byvars. For example if byvars='Month', would take each month and form portfolios based on the percentiles of the groupvar during only that month :param cutdf: optionally determine percentiles using another dataset :param wtvar: name of variable in df to use for weighting in weighted average :param count: pass variable name to get count of non-missing of that variable within groups. :param portvar: name of portfolio variable in the output dataset :param avgonly: True to return only averages, False to return (averages, individual observations with portfolios) :return: """ ports = portfolio( df, groupvar, ngroups=ngroups, byvars=byvars, cutdf=cutdf, portvar=portvar ) if byvars: assert isinstance(byvars, (str, list)) if isinstance(byvars, str): byvars = [byvars] by = [portvar] + byvars avgs = averages(ports, avgvars, byvars=by, wtvar=wtvar, count=count) else: avgs = averages(ports, avgvars, byvars=portvar, wtvar=wtvar, count=count) if avgonly: return avgs else: return avgs, ports
12,633
def TestQuery(): """Runs a test query against the measurement-lab BigQuery database. Returns: (string) The query results formatted as an HTML page. """ # Certify BigQuery access credentials. credentials = AppAssertionCredentials( scope='https://www.googleapis.com/auth/bigquery') http = credentials.authorize(httplib2.Http(memcache)) service = build('bigquery', 'v2', http=http) job_runner = service.jobs() # Run a query against the BigQuery database. logging.debug('Query: %s' % TEST_QUERY) jobdata = {'configuration': {'query': {'query': TEST_QUERY}}} insert = job_runner.insert(projectId=PROJECT_ID, body=jobdata).execute() logging.debug('Response: %s' % insert) currentRow = 0 queryReply = job_runner.getQueryResults( projectId=PROJECT_ID, jobId=insert['jobReference']['jobId'], startIndex=currentRow).execute() results = queryReply while 'rows' in queryReply and currentRow < queryReply['totalRows'] : currentRow += len(queryReply['rows']) queryReply = job_runner.getQueryResults( projectId=PROJECT_ID, jobId=queryReply['jobReference']['jobId'], startIndex=currentRow).execute() if 'schema' not in results or 'fields' not in results['schema']: if 'schema' in queryReply and 'fields' in queryReply['schema']: results['schema'] = queryReply['schema'] if 'rows' in queryReply: results['rows'].extend(queryReply['rows']) # Format the results as an HTML page. body = '<h2>The Query</h2><pre>%s</pre>\n<hr>\n' % TEST_QUERY tablerows = '<tr>' for field in results['schema']['fields']: tablerows += '<th>%s</th>' % field['name'] for row in results['rows']: tablerows += '</tr><tr>' for value in row['f']: tablerows += '<td>%s</td>' % value['v'] tablerows += '</tr>' body += '<table border=1>\n%s\n</table>\n' % tablerows return '<!DOCTYPE html><html><body>%s</body></html>' % body
12,634
def get_zero_crossing_rate(y, get_mean=True): """ Compute the Zero Crossing Rate (ZCR) :param y: np.ndarray [shape=(n,)] Sampling rate of y :param get_mean: bool Whether to instead return the mean of ZCR over all frames :return: np.ndarray [shape=(1,t)] or float ZCR for each frame, or the mean ZCR """ zcrs = librosa.feature.zero_crossing_rate(y=y) if get_mean: return zcrs.mean() else: return zcrs
12,635
def parse_arguments(): """ Parse the command line arguments of the program. """ parser = argparse.ArgumentParser(description='Train or test the CRNN model.') parser.add_argument( "--train", action="store_true", help="Define if we train the model" ) parser.add_argument( "--test", action="store_true", help="Define if we test the model" ) parser.add_argument( "-ttr", "--train_test_ratio", type=float, nargs="?", help="How the data will be split between training and testing", default=0.70 ) parser.add_argument( "-m", "--model_path", type=str, nargs="?", help="The path where the pretrained model can be found or where the model will be saved", required=True ) parser.add_argument( "-ex", "--examples_path", type=str, nargs="?", help="The path to the file containing the examples (training samples)", required=True ) parser.add_argument( "-bs", "--batch_size", type=int, nargs="?", help="Size of a batch", default=64 ) parser.add_argument( "-it", "--iteration_count", type=int, nargs="?", help="How many iteration in training", default=10 ) parser.add_argument( "-miw", "--max_image_width", type=int, nargs="?", help="Maximum width of an example before truncating", default=2000 ) parser.add_argument( "-mtl", "--max_text_length", type=int, nargs="?", help="Max text length in character", default=200 ) return parser.parse_args()
12,636
def UF9(x): """ adapted from https://github.com/Project-Platypus/Platypus/blob/master/platypus/problems.py """ nvars = len(x) count1 = 0 count2 = 0 count3 = 0 sum1 = 0.0 sum2 = 0.0 sum3 = 0.0 E = 0.1 for j in range(3, nvars+1): yj = x[j-1] - 2.0*x[1]*math.sin(2.0*math.pi*x[0] + j*math.pi/nvars) if j % 3 == 1: sum1 += yj**2 count1 += 1 elif j % 3 == 2: sum2 += yj**2 count2 += 1 else: sum3 += yj**2 count3 += 1 yj = (1.0 + E) * (1.0 - 4.0*(2.0*x[0] - 1.0)**2) yj = max(yj, 0.0) f1 = 0.5*(yj + 2.0*x[0])*x[1] + 2.0*sum1/count1 f2 = 0.5*(yj - 2.0*x[0] + 2.0)*x[1] + 2.0*sum2/count2 f3 = 1.0 - x[1] + 2.0*sum3/count3 return np.array([f1, f2, f3])
12,637
def run_command(cmd_str, stdin=None, stdout_devnull=False): """ run command """ cmd = shlex.split(cmd_str) try: if stdout_devnull: # for pg_ctl command with open(os.devnull, 'w') as devnull: res = subprocess.run(cmd, stdout=devnull) else: res = subprocess.run(cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=stdin) except subprocess.CalledProcessError as e: logger.critical(traceback.format_exc()) logger.info('Command: {} '.format(cmd_str)) logger.info('Stdout: {}'.format(e.stdout.decode("utf8"))) logger.info('Stderr: {}'.format(e.stderr.decode("utf8"))) sys.exit(1) return res
12,638
def antiderivate(values, ax_val, index, Nper, is_aper, is_phys, is_freqs): """Returns the anti-derivate of values along given axis values is assumed to be periodic and axis is assumed to be a linspace Parameters ---------- values: ndarray array to derivate ax_val: ndarray axis values index: int index of axis along which to derivate Nper: int number of periods to replicate is_aper: bool True if values is anti-periodic along axis is_phys: bool True if physical quantity (time/angle/z) is_freqs: bool True if frequency axis Returns ------- values: ndarray anti-derivate of values """ if is_freqs: dim_array = np.ones((1, values.ndim), int).ravel() dim_array[index] = -1 axis_reshaped = ax_val.reshape(dim_array) values = values / (axis_reshaped * 2 * 1j * np.pi) elif is_phys: if ax_val.size > 1: # Swap axis to always have integration axis on 1st position values = np.swapaxes(values, index, 0) if Nper is None: # Taking input values values_full = values ax_full = ax_val else: # Add last point to axis ax_full = np.concatenate( ( ax_val, np.array([ax_val[-1] + ax_val[1] - ax_val[0]]), ) ) # Get values on a full (anti-)period shape = list(values.shape) shape[0] = shape[0] + 1 values_full = np.zeros(shape, dtype=values.dtype) values_full[:-1, ...] = values # Add first sample at the end of values to integrate on last interval # Last value is the same as (respectively the opposite of) the first value # in case of periodicity (respectively anti-periodicity) values_full[-1, ...] = (-1) ** int(is_aper) * values[0, ...] # Anti-derivate along axis values = np.roll( scp_int.cumulative_trapezoid(values_full, x=ax_full, axis=0), shift=1, axis=0, ) # Integration constant is given by removing average value values = values - np.mean(values, axis=0) # Get N first values and swap axes back to origin values = np.swapaxes(values, 0, index) else: raise Exception("Cannot anti-derivate along axis if axis size is 1") else: raise AxisError("Derivation only available for time/angle/z/freqs") return values
12,639
def check_tensor_occupied_memory(t): """ this is a reminder function. """ print(sys.getsizeof(t.storage()))
12,640
def renumber_labels(label_img): """ Re-number nuclei in a labeled image so the nuclei numbers are unique and consecutive. """ new_label = 0 for old_label in np.unique(label_img): if not old_label == new_label: label_img[label_img == old_label] = new_label new_label += 1 return label_img
12,641
def solve(): """ PART 1: You've sneaked into another supply closet - this time, it's across from the prototype suit manufacturing lab. You need to sneak inside and fix the issues with the suit, but there's a guard stationed outside the lab, so this is as close as you can safely get. As you search the closet for anything that might help, you discover that you're not the first person to want to sneak in. Covering the walls, someone has spent an hour starting every midnight for the past few months secretly observing this guard post! They've been writing down the ID of the one guard on duty that night - the Elves seem to have decided that one guard was enough for the overnight shift - as well as when they fall asleep or wake up while at their post (your puzzle input). For example, consider the following records, which have already been organized into chronological order: [1518-11-01 00:00] Guard #10 begins shift [1518-11-01 00:05] falls asleep [1518-11-01 00:25] wakes up [1518-11-01 00:30] falls asleep [1518-11-01 00:55] wakes up [1518-11-01 23:58] Guard #99 begins shift [1518-11-02 00:40] falls asleep [1518-11-02 00:50] wakes up [1518-11-03 00:05] Guard #10 begins shift [1518-11-03 00:24] falls asleep [1518-11-03 00:29] wakes up [1518-11-04 00:02] Guard #99 begins shift [1518-11-04 00:36] falls asleep [1518-11-04 00:46] wakes up [1518-11-05 00:03] Guard #99 begins shift [1518-11-05 00:45] falls asleep [1518-11-05 00:55] wakes up Timestamps are written using year-month-day hour:minute format. The guard falling asleep or waking up is always the one whose shift most recently started. Because all asleep/awake times are during the midnight hour (00:00 - 00:59), only the minute portion (00 - 59) is relevant for those events. Visually, these records show that the guards are asleep at these times: Date ID Minute 000000000011111111112222222222333333333344444444445555555555 012345678901234567890123456789012345678901234567890123456789 11-01 #10 .....####################.....#########################..... 11-02 #99 ........................................##########.......... 11-03 #10 ........................#####............................... 11-04 #99 ....................................##########.............. 11-05 #99 .............................................##########..... The columns are Date, which shows the month-day portion of the relevant day; ID, which shows the guard on duty that day; and Minute, which shows the minutes during which the guard was asleep within the midnight hour. (The Minute column's header shows the minute's ten's digit in the first row and the one's digit in the second row.) Awake is shown as ., and asleep is shown as #. Note that guards count as asleep on the minute they fall asleep, and they count as awake on the minute they wake up. For example, because Guard #10 wakes up at 00:25 on 1518-11-01, minute 25 is marked as awake. If you can figure out the guard most likely to be asleep at a specific time, you might be able to trick that guard into working tonight so you can have the best chance of sneaking in. You have two strategies for choosing the best guard/minute combination. Strategy 1: Find the guard that has the most minutes asleep. What minute does that guard spend asleep the most? In the example above, Guard #10 spent the most minutes asleep, a total of 50 minutes (20+25+5), while Guard #99 only slept for a total of 30 minutes (10+10+10). Guard #10 was asleep most during minute 24 (on two days, whereas any other minute the guard was asleep was only seen on one day). While this example listed the entries in chronological order, your entries are in the order you found them. You'll need to organize them before they can be analyzed. What is the ID of the guard you chose multiplied by the minute you chose? (In the above example, the answer would be 10 * 24 = 240.) -------------------------------------------------------------------------------------------------------------------- PART 2: Strategy 2: Of all guards, which guard is most frequently asleep on the same minute? In the example above, Guard #99 spent minute 45 asleep more than any other guard or minute - three times in total. (In all other cases, any guard spent any minute asleep at most twice.) What is the ID of the guard you chose multiplied by the minute you chose? (In the above example, the answer would be 99 * 45 = 4455.) """ with open(_INPUT_FILE) as file: logs = file.read().splitlines() # Clean and sort input log_dict = {} for log in logs: log = log.replace('[', '').replace(']', '') date = log[:16] movement = log[17:] date = datetime.strptime(date, '%Y-%m-%d %H:%M') log_dict[date] = movement # noinspection PyTypeChecker ordered_dict = OrderedDict(sorted(log_dict.items())) guard_id = None start_time = None log_dict = {} for date, movement in ordered_dict.items(): if movement.startswith('Guard'): guard_id = movement.split(' ')[1].replace('#', '') if guard_id not in log_dict: log_dict[guard_id] = dict(duration=0, minutes={}) elif movement.startswith('falls'): start_time = date else: end_time = date duration = (end_time - start_time).total_seconds() / 60.0 log_dict[guard_id]['duration'] += duration for i in range(0, int(duration)): minute_date = (start_time + timedelta(minutes=i)).minute if minute_date in log_dict[guard_id]['minutes']: log_dict[guard_id]['minutes'][minute_date] += 1 else: log_dict[guard_id]['minutes'][minute_date] = 1 maximum_duration = 0 maximum_guard = None for key, value in log_dict.items(): if value['duration'] > maximum_duration: maximum_duration = value['duration'] maximum_guard = key log_minutes = log_dict[maximum_guard]['minutes'] sort_minutes = [(k, log_minutes[k]) for k in sorted(log_minutes, key=log_minutes.get, reverse=True)] for key, value in sort_minutes: print('Part 1: Guard * Maximum minute: {}'.format(int(maximum_guard) * int(key))) break maximum_duration = 0 maximum_minute = 0 maximum_guard = None for key, value in log_dict.items(): log_minutes = value['minutes'] sort_minutes = [(k, log_minutes[k]) for k in sorted(log_minutes, key=log_minutes.get, reverse=True)] if sort_minutes: if sort_minutes[0][1] > maximum_duration: maximum_duration = sort_minutes[0][1] maximum_minute = sort_minutes[0][0] maximum_guard = key print('Part 2: Any Guard * Maximum minute: {}'.format(int(maximum_guard) * int(maximum_minute)))
12,642
async def densenet_xgboost_action_localization( files: List[UploadFile] = File(...), weights_densenet: Optional[str] = "denseXgB_model_mylayer", weights_xgboost: Optional[str] = "recognition_xgboost_prev_frames", classNames: Optional[str] = "classes", save_upload_to_file: bool = False, ) -> Any: """ Get densenet_xgboost action localization result for the video file. """ # Obtain the model paths model_path_densenet = Path(f"model_weights/densenet_xgboost/densenet/{weights_densenet}.h5") model_path_xgboost = Path(f"model_weights/densenet_xgboost/xgboost/{weights_xgboost}.joblib") model_path_classes = Path(f"model_weights/densenet_xgboost/classes/{classNames}.txt") if ( not os.path.isfile(model_path_densenet) or not os.path.isfile(model_path_xgboost) or not os.path.isfile(model_path_classes) ): raise HTTPException( status_code=status.HTTP_422_UNPROCESSABLE_ENTITY, detail="Model weights not available." ) for file in files: try: # Obtain the video path if file.content_type in ['video/mp4']: if save_upload_to_file: video_path = Path(f'uploads/video/{file.filename}') video_path.parent.mkdir(parents=True, exist_ok=True) deps.save_upload_file(upload_file=file, destination=video_path) else: video_path = deps.save_upload_file_tmp(upload_file=file) else: raise HTTPException( status_code=status.HTTP_415_UNSUPPORTED_MEDIA_TYPE, detail="Please upload only .mp4 files." ) model = DenseNet_XGBoost( input_video_path=video_path, model_path_densenet=model_path_densenet, model_path_xgboost=model_path_xgboost, model_path_classes=model_path_classes, video_name=file.filename, ) save_path = model.predict() print(video_path) finally: if not save_upload_to_file: Path.unlink(video_path) # Delete the temp file return { "model_weights_rgb": weights_densenet, "model_weights_of": weights_xgboost, "classNames": classNames, "results_path": save_path }
12,643
def callback(): """ Extract the OAuth code from the callback and exchange it for an access token. """ smart_client = _get_smart() try: smart_client.handle_callback(request.url) except Exception as e: return """<h1>Authorization Error</h1><p>{}</p><p><a href="/logout">Start over</a></p>""".format(e) logging.debug("Got an access token, returning home") return redirect('/')
12,644
def __virtual__(): """ Return virtual name of the module. :return: The virtual name of the module. """ return __virtualname__
12,645
def after_b(session: Session, operation: Operation, model: SQLClass): """ after update it is important to call 'merge' if model is changed in this function since model is not part of the session """ print("***************before_name:", operation.command, model, session) print(model.comment) if operation.command != 'd': model.comment_after = f"processed_after_{operation.command}: {model.id}" if operation.command == 'u': session.merge(model) print(model.comment)
12,646
def getsamplev3(qcode): """Get a sample object of a given identifier in API V3 style Returns: A sample (v3) object """ scrit = SampleSearchCriteria() scrit.withCode().thatEquals(qcode) fetch_opt = SampleFetchOptions() fetch_opt.withProperties() fetch_opt.withSpace() result = api.searchSamples(sessionToken, scrit, fetch_opt) samples = [] for sample in result.getObjects(): samples.append(sample) if len(samples) > 1: raise mtbutils.MTBdropboxerror('More than one sample found with identifier {}'.format(qcode)) return samples[0]
12,647
def test_metadata(base_pkg, field, value): """Test metadata is available on base package.""" assert getattr(base_pkg, f"__{field}__") is not None
12,648
def k892_distribution(mass): """Calculate normalized relativistic Breit-Wigner distribution value for K(892) at given mass""" if k892_distribution.norm is None: k892_distribution.norm = _norm(_k892_distribution_unnormalized) return _k892_distribution_unnormalized(mass) / k892_distribution.norm
12,649
def ProcessMoleculesUsingSingleProcess(Mols, PAINSPatternMols, Writer, WriterFiltered): """Process and filter molecules using a single process.""" NegateMatch = OptionsInfo["NegateMatch"] OutfileFilteredMode = OptionsInfo["OutfileFilteredMode"] Compute2DCoords = OptionsInfo["OutfileParams"]["Compute2DCoords"] SetSMILESMolProps = OptionsInfo["OutfileParams"]["SetSMILESMolProps"] MiscUtil.PrintInfo("\nFiltering molecules...") (MolCount, ValidMolCount, RemainingMolCount) = [0] * 3 FirstMol = True for Mol in Mols: MolCount += 1 if Mol is None: continue if RDKitUtil.IsMolEmpty(Mol): MolName = RDKitUtil.GetMolName(Mol, MolCount) MiscUtil.PrintWarning("Ignoring empty molecule: %s" % MolName) continue ValidMolCount += 1 if FirstMol: FirstMol = False if SetSMILESMolProps: if Writer is not None: RDKitUtil.SetWriterMolProps(Writer, Mol) if WriterFiltered is not None: RDKitUtil.SetWriterMolProps(WriterFiltered, Mol) MolMatched = DoesMoleculeContainsPAINSPattern(Mol, PAINSPatternMols) if MolMatched == NegateMatch: RemainingMolCount += 1 WriteMolecule(Writer, Mol, Compute2DCoords) else: if OutfileFilteredMode: WriteMolecule(WriterFiltered, Mol, Compute2DCoords) return (MolCount, ValidMolCount, RemainingMolCount)
12,650
def UDiv(a: BitVec, b: BitVec) -> BitVec: """Create an unsigned division expression. :param a: :param b: :return: """ return _arithmetic_helper(a, b, z3.UDiv)
12,651
def _pressure_level_widths(tro3_cube, ps_cube, top_limit=0.0): """Create a cube with pressure level widths. This is done by taking a 2D surface pressure field as lower bound. Parameters ---------- tro3_cube : iris.cube.Cube `Cube` containing `mole_fraction_of_ozone_in_air`. ps_cube : iris.cube.Cube `Cube` containing `surface_air_pressure`. top_limit : float Pressure in Pa. Returns ------- iris.cube.Cube `Cube` of same shape as `tro3_cube` containing pressure level widths. """ pressure_array = _create_pressure_array(tro3_cube, ps_cube, top_limit) data = _apply_pressure_level_widths(pressure_array) p_level_widths_cube = tro3_cube.copy(data=data) p_level_widths_cube.rename('pressure level widths') p_level_widths_cube.units = ps_cube.units return p_level_widths_cube
12,652
def load_model_configurations(sender): """ Iterates through setting MODELS_CRUD_EVENT searching for the sender model configurations. :param sender: Django Model :return dict """ for model_config in settings.MODELS_CRUD_EVENT: model = model_config['model'] app, model = model.rsplit('.', 1) model = apps.get_app_config(app).get_model(model) if sender == model: return model_config return None
12,653
def _write_csv_file(file_name, write_data_points, write_attributes=None, as_lat_long=False, delimiter=','): """Write a .csv file.""" points = write_data_points pointattributes = write_attributes fd = open(file_name, 'w') if as_lat_long: titlelist = "latitude" + delimiter + "longitude" + delimiter else: titlelist = "x" + delimiter + "y" + delimiter if pointattributes is not None: for title in list(pointattributes.keys()): titlelist = titlelist + title + delimiter titlelist = titlelist[0:-len(delimiter)] # remove the last delimiter fd.write(titlelist + "\n") # <x/lat> <y/long> [attributes] for i, vert in enumerate(points): if pointattributes is not None: attlist = "," for att in list(pointattributes.keys()): attlist = attlist + str(pointattributes[att][i]) + delimiter attlist = attlist[0:-len(delimiter)] # remove the last delimiter attlist.strip() else: attlist = '' fd.write(str(vert[0]) + delimiter + str(vert[1]) + attlist + "\n") fd.close()
12,654
def get_user_by_private_or_public_nickname(nickname: str) -> Optional[User]: """ Gets the user by his (public) nickname, based on the option, whether his nickname is public or not :param nickname: Nickname of the user :return: Current user or None """ user: User = get_user_by_case_insensitive_nickname(nickname) public_user: User = get_user_by_case_insensitive_public_nickname(nickname) if not user or not public_user: return None settings: Settings = user.settings if not settings: return None if settings.should_show_public_nickname and user: return user elif not settings.should_show_public_nickname and public_user: return public_user return None
12,655
def copy_file( file: File, path: Union[Path, str], ledger: Ledger, overwrite: bool = False, description: Optional[str] = None, ) -> None: """ Copy the file from src into dst. Args: file: File object representing the file that will be copied. path: Path to the destination of the copied file. ledger: Book keeper to keep track of the generated files. overwrite: If False, copy the file if it does not already exists in the \ target path. If True, overwrite the target file if it is already present. description: Description of the file that will be recorded by the Ledger. Returns: None. """ target = path / file.dst if not os.path.isfile(str(target)) or overwrite: ledger.register(file, description=description) copyfile(file.src, target) else: _logger.debug(f"file {file.dst} already exists in {target}")
12,656
def expose(window, context, name, monitor): """REST HTTP/HTTPS API to view tuples from a window on a stream. Embeds a Jetty web server to provide HTTP REST access to the collection of tuples in `window` at the time of the last eviction for tumbling windows, or last trigger for sliding windows. Example with a sliding window:: import streamsx.endpoint as endpoint s = topo.source([{'a': 'Hello'}, {'a': 'World'}, {'a': '!'}]).as_json() endpoint.expose(window=s.last(3).trigger(1), context='sample', name='view', monitor='endpoint-out') The URL containing "**context**/**name**" for the sample above ends with: ``/sample/view/tuples`` **URL mapping** The URL contains the following parts: ``https://<base-url>/<prefix>/<context>/<name>/<postfix>`` For a web-server in a job its URLs are exposed with **prefix** path: * jobname/ - When a job name was explictly set. Job names should be simple mapping to a single path element. * streams/jobs/jobid/ - When a job name was not explicitly set. Example URLs within the cluster for application-name of "em" in project "myproject" are * with a web-server in job named "transit" with context "sample" and name "view": ``https://em.myproject.svc:8443/transit/sample/view/tuples`` * with a web-server in job 7: ``https://em.myproject.svc:8443/streams/jobs/7/sample/view/tuples`` * retrieve information for job named "transit" with context "sample" and name "view": ``https://em.myproject.svc:8443/transit/sample/view/ports/info`` Args: window(Window): Windowed stream of tuples that will be viewable using a HTTP GET request. context(str): Defines an URL context path. URL contains ``context``/``name``. name(str): Sink name in the Streams context. This name is part of the URL. monitor(str): The name of the endpoint-monitor that provides the ssl configuration for this endpoint. If it is None, the connection uses plain HTTP Returns: streamsx.topology.topology.Sink: Stream termination. """ _add_toolkit_dependency(window.topology, '[4.3.0,5.0.0)') sslAppConfigName = None if monitor is not None: sslAppConfigName = monitor + '-streams-certs' _op = _HTTPTupleView(window, context=context, name=name, sslAppConfigName=sslAppConfigName) return streamsx.topology.topology.Sink(_op)
12,657
def get_order_args(): """ Get order arguments, return a dictionary { <VIEW_NAME>: (ORDER_COL, ORDER_DIRECTION) } Arguments are passed like: _oc_<VIEW_NAME>=<COL_NAME>&_od_<VIEW_NAME>='asc'|'desc' """ orders = {} for arg in request.args: re_match = re.findall('_oc_(.*)', arg) if re_match: order_direction = request.args.get('_od_' + re_match[0]) if order_direction in ('asc', 'desc'): orders[re_match[0]] = (request.args.get(arg), order_direction) return orders
12,658
def certreport(req, *opts): """ Generate a report of the certificates (optionally limited by expiration time or key size) found in the selection. :param req: The request :param opts: Options (not used) :return: always returns the unmodified working document **Examples** .. code-block:: yaml - certreport: error_seconds: 0 warning_seconds: 864000 error_bits: 1024 warning_bits: 2048 For key size checking this will report keys with a size *less* than the size specified, defaulting to errors for keys smaller than 1024 bits and warnings for keys smaller than 2048 bits. It should be understood as the minimum key size for each report level, as such everything below will create report entries. Remember that you need a 'publish' or 'emit' call after certreport in your plumbing to get useful output. PyFF ships with a couple of xslt transforms that are useful for turning metadata with certreport annotation into HTML. """ if req.t is None: raise PipeException("Your pipeline is missing a select statement.") if not req.args: req.args = {} if type(req.args) is not dict: raise PipeException("usage: certreport {warning: 864000, error: 0}") error_seconds = int(req.args.get('error_seconds', "0")) warning_seconds = int(req.args.get('warning_seconds', "864000")) error_bits = int(req.args.get('error_bits', "1024")) warning_bits = int(req.args.get('warning_bits', "2048")) seen = {} for eid in req.t.xpath("//md:EntityDescriptor/@entityID", namespaces=NS, smart_strings=False): for cd in req.t.xpath("md:EntityDescriptor[@entityID='%s']//ds:X509Certificate" % eid, namespaces=NS, smart_strings=False): try: cert_pem = cd.text cert_der = base64.b64decode(cert_pem) m = hashlib.sha1() m.update(cert_der) fp = m.hexdigest() if not seen.get(fp, False): entity_elt = cd.getparent().getparent().getparent().getparent().getparent() seen[fp] = True cdict = xmlsec.utils.b642cert(cert_pem) keysize = cdict['modulus'].bit_length() cert = cdict['cert'] if keysize < error_bits: annotate_entity(entity_elt, "certificate-error", "keysize too small", "%s has keysize of %s bits (less than %s)" % (cert.getSubject(), keysize, error_bits)) log.error("%s has keysize of %s" % (eid, keysize)) elif keysize < warning_bits: annotate_entity(entity_elt, "certificate-warning", "keysize small", "%s has keysize of %s bits (less than %s)" % (cert.getSubject(), keysize, warning_bits)) log.warn("%s has keysize of %s" % (eid, keysize)) notafter = cert.getNotAfter() if notafter is None: annotate_entity(entity_elt, "certificate-error", "certificate has no expiration time", "%s has no expiration time" % cert.getSubject()) else: try: et = datetime.strptime("%s" % notafter, "%y%m%d%H%M%SZ") now = datetime.now() dt = et - now if total_seconds(dt) < error_seconds: annotate_entity(entity_elt, "certificate-error", "certificate has expired", "%s expired %s ago" % (cert.getSubject(), -dt)) log.error("%s expired %s ago" % (eid, -dt)) elif total_seconds(dt) < warning_seconds: annotate_entity(entity_elt, "certificate-warning", "certificate about to expire", "%s expires in %s" % (cert.getSubject(), dt)) log.warn("%s expires in %s" % (eid, dt)) except ValueError as ex: annotate_entity(entity_elt, "certificate-error", "certificate has unknown expiration time", "%s unknown expiration time %s" % (cert.getSubject(), notafter)) req.store.update(entity_elt) except Exception as ex: log.debug(traceback.format_exc()) log.error(ex)
12,659
async def test_ssdp_not_supported(hass: HomeAssistantType, fritz: Mock): """Test starting a flow from discovery with unsupported device.""" fritz().get_device_elements.side_effect = HTTPError("Boom") result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": SOURCE_SSDP}, data=MOCK_SSDP_DATA ) assert result["type"] == RESULT_TYPE_FORM assert result["step_id"] == "confirm" result = await hass.config_entries.flow.async_configure( result["flow_id"], user_input={CONF_PASSWORD: "whatever", CONF_USERNAME: "whatever"}, ) assert result["type"] == RESULT_TYPE_ABORT assert result["reason"] == "not_supported"
12,660
def version_info(): # pragma: no cover """ Get version of nameko_kafka package as tuple """ return tuple(map(int, __version__.split('.')))
12,661
def StrokePathCommandAddCapType(builder, capType): """This method is deprecated. Please switch to AddCapType.""" return AddCapType(builder, capType)
12,662
def process_images(dummy_request): """Downloads and processes all images uploaded before resize logic fix deployment""" global n_global_resized media_bucket = storage_client.bucket(MEDIA_BUCKET) process_global_images(db_pool, media_bucket) process_user_images(db_pool, media_bucket) return f"Done! \n\n resized, replaced: \nGlobal: {n_global_resized}\n User: {n_user_resized}"
12,663
def _filename_pattern(ext): """Returns an re matching native or tfrecord files of format `ext`.""" return r".*\.{}(\.tfrecord)?(\.gz)?".format(ext)
12,664
def show_images(): """ Show samples from each class """ (X_train, y_train), (X_test, y_test) = cifar10.load_data() num_train, img_channels, img_rows, img_cols = X_train.shape num_test, _, _, _ = X_train.shape num_classes = len(np.unique(y_train)) class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'] fig = plt.figure(figsize=(8, 3)) for i in range(num_classes): ax = fig.add_subplot(2, 5, 1 + i, xticks=[], yticks=[]) idx = np.where(y_train[:]==i)[0] x_idx = X_train[idx,::] img_num = np.random.randint(x_idx.shape[0]) im = np.transpose(x_idx[img_num,::], (1, 2, 0)) ax.set_title(class_names[i]) plt.imshow(im) plt.show()
12,665
def assemble_remote_url(): """ 组装目标服务器URL, 即生成 parse.remote_url 的值 :rtype: str """ if parse.is_external_domain: # 请求的是外部域名 (external domains) scheme = 'https://' if parse.is_https else 'http://' return urljoin(scheme + parse.remote_domain, parse.remote_path_query) else: # 请求的是主域名及可以被当做(alias)主域名的域名 return urljoin(target_scheme + target_domain, parse.remote_path_query)
12,666
def test_add_colormap(attribute): """Test directly adding a vispy Colormap object""" shape = (10, 2) np.random.seed(0) data = 20 * np.random.random(shape) annotations = {'point_type': _make_cycled_properties([0, 1.5], shape[0])} color_kwarg = f'{attribute}_color' colormap_kwarg = f'{attribute}_colormap' args = {color_kwarg: 'point_type', colormap_kwarg: 'viridis'} layer = Points(data, properties=annotations, **args) setattr(layer, f'{attribute}_colormap', get_colormap('gray')) layer_colormap = getattr(layer, f'{attribute}_colormap') assert 'unnamed colormap' in layer_colormap.name
12,667
def test_runClean() -> None: """Run `buildnis` with the argument `--clean` to remove generated build data""" with pytest.raises(expected_exception=SystemExit) as excp: runBuildnis(["--clean"]) assert excp.value.args[0] == 0
12,668
def register_blueprints(app): """Register Flask blueprints.""" app.register_blueprint(public.views.blueprint) app.register_blueprint(drawbot.views.blueprint) app.register_blueprint(user.views.blueprint) return None
12,669
def figure() -> None: """Helper to create new figure""" plt.close() plt.figure(figsize=(10, 3))
12,670
def write_attribute(xml_elem, elem: str=None, attrib: str=None, txt: str=None): """ Write new text to a xml attribute. Elem can be used to refer to a subelement of the current xml_elem Args: xml_elem: The current xml element elem (str): The requested element tag name attrib (str): The attribute name txt (str): The new text for the element Returns: xml_elem: The modified xml element """ if xml_elem is not None: if elem is not None: xml_elem = try_get_single_element_from_xml(elem=elem, xml_elem=xml_elem) if xml_elem is not None: xml_elem.set(attrib, txt) return xml_elem
12,671
def _ndarray_feature(x: np.ndarray) -> tf.train.Feature: """Create an ndarray feature stored as bytes.""" x_bytes = x.tostring() feature = tf.train.Feature(bytes_list=tf.train.BytesList(value=[x_bytes])) return feature
12,672
def get_img_compliance_level(profile): """ Try to figure out the IIIF Image API compliance level given the `profile` value from a info.json. """ patt_iiif = re.compile('level([0-2])\.json$') patt_stan = re.compile('#level([0-2])$') def get_from_str(s): m = None if 'http://iiif.io/api/image/2/' in s: m = patt_iiif.search(s) elif 'http://library.stanford.edu/iiif/image-api/' in s: m = patt_stan.search(s) if m: return int(m.group(1)) return -1 lvl = -1 if type(profile) == str: lvl = get_from_str(profile) elif type(profile) == list: for p in [x for x in profile if type(x) == str]: found = get_from_str(p) if found != -1: lvl = found break if lvl == -1: log('Could not find compliance level in info.json.') return lvl
12,673
def _extract_protocol_layers(deserialized_data): """ Removes unnecessary values from packets dictionaries. :param deserialized_data: Deserialized data from tshark. :return: List of filtered packets in dictionary format. """ packets_filtered = [] for packet in deserialized_data: packets_filtered.append(packet["_source"]["layers"]) return packets_filtered
12,674
def collect_examples(): """ Collects test designs that have readme files and converts them for documentation examples. """ # Base directory for tests tests_dir = os.path.abspath( os.path.join( os.path.dirname(__file__), "../tests" ) ) # Base directory for examples examples_dir = os.path.abspath( os.path.join( os.path.dirname(__file__), "examples" ) ) # Skip if the directory already exist if os.path.isdir(examples_dir): return print("Collecting test designs and including them in \"examples\" section") # Create the directory os.makedirs(examples_dir, exist_ok=True) # Look for all subdirectories that have "readme.rst" file inside tests = [] for d in os.listdir(tests_dir): # Only directories d_name = os.path.join(tests_dir, d) if os.path.isdir(d_name): # Must contain the readme file f_name = os.path.join(d_name, README_NAME) if os.path.isfile(f_name): tests.append((d, d_name,)) # Process each test check_ignore = ignore_patterns("*.v", "golden.*.xml") for test_name, test_src in tests: test_rel = os.path.relpath(test_src, tests_dir) test_dst = os.path.join(examples_dir, test_rel) print("", test_name) # Copy files copytree(test_src, test_dst, ignore=check_ignore) # Build XMLs for verilog giles process_verilog_files(test_src, test_dst) # Build examples.rst tname = os.path.join(os.path.dirname(__file__), TEMPLATE_NAME) fname = os.path.join(examples_dir, INDEX_NAME) with open(tname, "r") as fsrc, open(fname, "w") as fdst: # Copy for line in fsrc: fdst.write(line) # Append included tests tests = sorted(tests, key=lambda t:t[0]) for test_name, _ in tests: fdst.write(" {}/{}\n".format(test_name, README_NAME))
12,675
def calculate_affinity( adata: AnnData, level: int = 1, block_key: Optional[str] = 'nsbm', group_by: Optional[str] = None, state: Optional = None, neighbors_key: Optional[str] = 'neighbors', adjacency: Optional[sparse.spmatrix] = None, directed: bool = False, use_weights: bool = False, obsp: Optional[str] = None, back_prob: bool = False, copy: bool = False ) -> Optional[AnnData]: """\ Calculate cell affinity given a partition scheme. It can be used for partitions calculated using schist or for any partition scheme, given for example by cell annotations. Parameters ---------- adata: The AnnData object. Should have been already processed with schist level: The level to calculate affinity. This parameter is effective only for Nested partitions block_key: The prefix for partitions. This parameter is ignored if the state is not gt.NestedBlockState group_by: The key for group names used for calculations. Setting this will override level and block_key. This is effective only for NestedBlockState partitions state: Optionally calculate affinities on this state. neighbors_key Use neighbors connectivities as adjacency. If not specified, leiden looks .obsp['connectivities'] for connectivities (default storage place for pp.neighbors). If specified, leiden looks .obsp[.uns[neighbors_key]['connectivities_key']] for connectivities. adjacency Sparse adjacency matrix of the graph, defaults to neighbors connectivities. directed Whether to treat the graph as directed or undirected. use_weights If `True`, edge weights from the graph are used in the computation (placing more emphasis on stronger edges). copy: Return a new object or do everything in place Returns ------- Depending on `copy`, returns or updates `adata` with affinity values in adata.obsm[f'CA_{block_key}_level_{level}'] """ matrix_key = f'CA_{block_key}_level_{level}' # the default name of the matrix if group_by: logg.info(f'Calculating cell affinity to {group_by}') else: logg.info(f'Calculating cell affinity to level {level}') if not state: # if no state is provided, use the default to retrieve graph if 'schist' in adata.uns and 'blocks' in adata.uns['schist'][f'{block_key}']: params = adata.uns['schist'][f'{block_key}']['params'] if 'neighbors_key' in params: neighbors_key=params['neighbors_key'] if 'use_weights' in params: use_weights=params['use_weights'] if 'deg_corr' in params: deg_corr=params['deg_corr'] state = state_from_blocks(adata, state_key=block_key, neighbors_key=neighbors_key, adjacency=adjacency, directed=directed, use_weights=use_weights, deg_corr=deg_corr ) g = state.g elif not neighbors_key: # no state and no adjacency provided, raise an error raise ValueError("A state or an adjacency matrix should be given" "Otherwise a graph cannot be computed") else: # get the graph from the adjacency adjacency = _choose_graph(adata, obsp, neighbors_key) g = get_igraph_from_adjacency(adjacency, directed=directed) g = g.to_graph_tool() gt.remove_parallel_edges(g) state = gt.BlockState(g) else: g = state.g if group_by: matrix_key = f'CA_{group_by}' # if groups are given, we generate a new BlockState and work on that if group_by in adata.obs.columns and adata.obs[group_by].dtype.name == 'category': partitions = adata.obs[group_by].cat.codes.values state = gt.BlockState(g, b=partitions) if back_prob: ca_matrix = get_cell_back_p(state) else: ca_matrix = get_cell_loglikelihood(state, as_prob=True) else: raise ValueError(f"{group_by} should be a categorical entry in adata.obs") else: # use precomputed blocks and states if type(state) == gt.NestedBlockState: if back_prob: p0 = get_cell_back_p(state, level=0) else: p0 = get_cell_loglikelihood(state, level=0, as_prob=True) group_col = None if group_by and group_by in adata.obs.columns: group_col = group_by else: g_name = f'{block_key}_level_{level}' if g_name in adata.obs.columns: group_col = g_name if not group_col: raise ValueError("The provided groups or level/blocks do not exist") g0 = pd.Categorical(state.project_partition(0, 0).a) cross_tab = pd.crosstab(g0, adata.obs[group_col], normalize='index') ca_matrix = (p0 @ cross_tab).values elif type(state) == gt.PPBlockState: if back_prob: ca_matrix = get_cell_back_p(state) else: ca_matrix = get_cell_loglikelihood(state, as_prob=True) matrix_key = 'CA_ppbm' adata.obsm[matrix_key] = ca_matrix return adata if copy else None
12,676
async def scrape_website(client): """ :param client: client bot is connected to :return: only if there's an issue Type '!scrape' to restart the scraping process. Note: this function is executed on bot_ready, so I have to work around not having a convenient guild object. r """ debug_channel = utils.get_bot_commands_channel(client) await debug_channel.send(f"Started web scraping.") print(f"Web scraper starting...") # This loop will always run indefinitely. while True: # During this web scraping, first check if there was # any commands issued to force stop this functionality. if scraper.issued_off: game_lab_channel = utils.get_game_lab_channel(client) print(f"Successfully turned off webscraper") await debug_channel.send(f"Successfully turned off scraper.\n\nPlease go to {game_lab_channel.mention} and verify this action by comparing its edited timestamp.") scraper.issued_off = False scraper.is_scraping = False return # Secondly, check if the embeds exist. # It's possible someone may have deleted them mid-process. if not await validators.validate_pc_availability_embeds(client): print(f"...web scraping ending prematurely- embeds are missing! (This can be restarted with !scrape)") await debug_channel.send(f"ERROR: Machine availability panels must first exist in the channel `#{debug_channel}`! You can add these panels by entering `!gamelab` inside the channel, then start auto-updating PC availability with `!scrape`.") return scraper.is_scraping = True pc_statuses = await _get_scraped_pc_availability() if pc_statuses is None: print("Game Lab Availability is offline. Unable to get PC statuses. Restart bot with !restart.") break print(f"Updating PC availability with the following statuses:\n\t{pc_statuses}") await update_machine_availability_embed(client, pc_statuses) print(F"Trying again in 5 seconds") await asyncio.sleep(5) return None
12,677
def _check_spot_bid(spot_bid, spot_history): """ Prevents users from potentially over-paying for instances Note: this checks over the whole region, not a particular zone :param spot_bid: float :type spot_history: list[SpotPriceHistory] :raises UserError: if bid is > 2X the spot price's average >>> from collections import namedtuple >>> FauxHistory = namedtuple( "FauxHistory", [ "price", "availability_zone" ] ) >>> spot_data = [ FauxHistory( 0.1, "us-west-2a" ), \ FauxHistory( 0.2, "us-west-2a" ), \ FauxHistory( 0.3, "us-west-2b" ), \ FauxHistory( 0.6, "us-west-2b" ) ] >>> # noinspection PyProtectedMember >>> _check_spot_bid( 0.1, spot_data ) >>> # noinspection PyProtectedMember # >>> Box._check_spot_bid( 2, spot_data ) Traceback (most recent call last): ... UserError: Your bid $ 2.000000 is more than double this instance type's average spot price ($ 0.300000) over the last week """ average = mean([datum.price for datum in spot_history]) if spot_bid > average * 2: logger.warn("Your bid $ %f is more than double this instance type's average " "spot price ($ %f) over the last week", spot_bid, average)
12,678
def numpy_dtypes_for_minmax(request): """ Fixture of numpy dtypes with min and max values used for testing cummin and cummax """ dtype = request.param min_val = ( np.iinfo(dtype).min if np.dtype(dtype).kind == "i" else np.finfo(dtype).min ) max_val = ( np.iinfo(dtype).max if np.dtype(dtype).kind == "i" else np.finfo(dtype).max ) return (dtype, min_val, max_val)
12,679
def solve(topics): """Solve.""" a_words, b_words = get_dicts(topics) candidates = [] original = [] duplicates = [] for a, b in topics: # print(a, b) # print(a_words[a], b_words[b]) if not (a_words[a] == 1 or b_words[b] == 1): candidates.append((a, b)) else: original.append((a, b)) a_words_org, b_words_org = get_dicts(original) while len(candidates) > 0: l_candidates = [] for a, b in candidates: if a_words_org[a] >= 1 and b_words_org[b] >= 1: duplicates.append((a, b)) else: l_candidates.append((a, b)) candidates = l_candidates[:] # print(candidates) return len(candidates)
12,680
def mock_jobflow_settings(memory_jobstore): """Mock the jobflow settings to use our specific jobstore (with data store).""" from unittest import mock from jobflow.settings import JobflowSettings settings = JobflowSettings(JOB_STORE=memory_jobstore) with mock.patch("jobflow.SETTINGS", settings): yield
12,681
def get_package_plugin(package_type): """ Get a plugin for a specific package Parameters ---------- package_type: str The package type to fetch Returns ------- InvirtualEnvPlugin: The invirtualenv plugin for the specific package_type """ for plugin in installed_plugins(): if package_type in plugin.package_formats: return plugin
12,682
def _type_annotation( node: ast.AST, atok: asttokens.ASTTokens ) -> Tuple[Optional[TypeAnnotation], Optional[Error]]: """Parse the type annotation.""" if isinstance(node, ast.Name): return AtomicTypeAnnotation(identifier=Identifier(node.id), node=node), None elif isinstance(node, ast.Constant): if not isinstance(node.value, str): return ( None, Error( node.value, f"Expected a string literal " f"if the type annotation is given as a constant, " f"but got: " f"{node.value!r} (as {type(node.value)})", ), ) return AtomicTypeAnnotation(identifier=Identifier(node.value), node=node), None elif isinstance(node, ast.Subscript): if not isinstance(node.value, ast.Name): return ( None, Error( node.value, f"Expected a name to define " f"a subscripted type annotation," f"but got: {atok.get_text(node.value)}", ), ) # NOTE (mristin, 2022-01-22): # There were breaking changes between Python 3.8 and 3.9 in ``ast`` module. # Relevant to this particular piece of parsing logic is the deprecation of # ``ast.Index`` and ``ast.ExtSlice`` which is replaced with their actual value # and ``ast.Tuple``, respectively. # # Hence we need to switch on Python version and get the underlying slice value # explicitly. # # See deprecation notes just at the end of: # https://docs.python.org/3/library/ast.html#ast.AST if isinstance(node.slice, ast.Slice): return ( None, Error( node.slice, f"Expected an index to define a subscripted type annotation, " f"but got a slice: {atok.get_text(node.slice)}", ), ) # noinspection PyUnresolvedReferences if (sys.version_info < (3, 9) and isinstance(node.slice, ast.ExtSlice)) or ( sys.version_info >= (3, 9) and isinstance(node.slice, ast.Tuple) and any(isinstance(elt, ast.Slice) for elt in node.slice.elts) ): return ( None, Error( node.slice, f"Expected an index to define a subscripted type annotation, " f"but got an extended slice: {atok.get_text(node.slice)}", ), ) # NOTE (mristin, 2022-01-22): # Please see the note about the deprecation of ``ast.Index`` above. index_node = None # type: Optional[ast.AST] if sys.version_info < (3, 9): # noinspection PyUnresolvedReferences if isinstance(node.slice, ast.Index): index_node = node.slice.value else: return ( None, Error( node.slice, f"Expected an index to define a subscripted type annotation, " f"but got: {atok.get_text(node.slice)}", ), ) else: index_node = node.slice assert index_node is not None subscripts = [] # type: List[TypeAnnotation] if isinstance(index_node, ast.Tuple): for elt in index_node.elts: subscript_annotation, error = _type_annotation(node=elt, atok=atok) if error is not None: return None, error assert subscript_annotation is not None subscripts.append(subscript_annotation) elif isinstance(index_node, (ast.Name, ast.Subscript, ast.Constant)): subscript_annotation, error = _type_annotation(node=index_node, atok=atok) if error is not None: return None, error assert subscript_annotation is not None subscripts.append(subscript_annotation) else: return ( None, Error( index_node, f"Expected a tuple, a name, a subscript or a string literal " f"for a subscripted type annotation, " f"but got: {atok.get_text(index_node)}", ), ) return ( SubscriptedTypeAnnotation( identifier=Identifier(node.value.id), subscripts=subscripts, node=node, ), None, ) else: return ( None, Error( node, f"Expected either atomic type annotation (as name or string literal) " f"or a subscripted one (as a subscript), " f"but got: {atok.get_text(node)} (as {type(node)})", ), )
12,683
def validate_basic_message(msg): """Validate basic messages. This example just uses basic assertions but you could easily use a schema library to get more sophisticated validators. """ assert msg.type == TYPE assert "~l10n" in msg assert "sent_time" in msg assert "content" in msg return msg
12,684
def _filter_event_queryset(queryset, params, srs=None): """ Filter events queryset by params (e.g. self.request.query_params in EventViewSet) """ # Filter by string (case insensitive). This searches from all fields # which are marked translatable in translation.py val = params.get('text', None) if val: val = val.lower() # Free string search from all translated fields fields = EventTranslationOptions.fields # and these languages languages = [x[0] for x in settings.LANGUAGES] qset = Q() for field in fields: for lang in languages: kwarg = {field + '_' + lang + '__icontains': val} qset |= Q(**kwarg) queryset = queryset.filter(qset) val = params.get('last_modified_since', None) # This should be in format which dateutil.parser recognizes, e.g. # 2014-10-29T12:00:00Z == 2014-10-29T12:00:00+0000 (UTC time) # or 2014-10-29T12:00:00+0200 (local time) if val: dt = parse_time(val, is_start=False) queryset = queryset.filter(Q(last_modified_time__gte=dt)) val = params.get('start', None) if val: dt = parse_time(val, is_start=True) queryset = queryset.filter(Q(end_time__gt=dt) | Q(start_time__gte=dt)) val = params.get('end', None) if val: dt = parse_time(val, is_start=False) queryset = queryset.filter(Q(end_time__lt=dt) | Q(start_time__lte=dt)) val = params.get('bbox', None) if val: bbox_filter = build_bbox_filter(srs, val, 'position') places = Place.geo_objects.filter(**bbox_filter) queryset = queryset.filter(location__in=places) # Filter by data source, multiple sources separated by comma val = params.get('data_source', None) if val: val = val.split(',') queryset = queryset.filter(data_source_id__in=val) # Negative filter by data source, multiple sources separated by comma val = params.get('data_source!', None) if val: val = val.split(',') queryset = queryset.exclude(data_source_id__in=val) # Filter by location id, multiple ids separated by comma val = params.get('location', None) if val: val = val.split(',') queryset = queryset.filter(location_id__in=val) # Filter by keyword id, multiple ids separated by comma val = params.get('keyword', None) if val: val = val.split(',') queryset = queryset.filter(keywords__pk__in=val) # Filter only super or sub events if recurring has value val = params.get('recurring', None) if val: val = val.lower() if val == 'super': queryset = queryset.filter(is_recurring_super=True) elif val == 'sub': queryset = queryset.filter(is_recurring_super=False) val = params.get('max_duration', None) if val: dur = parse_duration_string(val) cond = 'end_time - start_time <= %s :: interval' queryset = queryset.extra(where=[cond], params=[str(dur)]) val = params.get('min_duration', None) if val: dur = parse_duration_string(val) cond = 'end_time - start_time >= %s :: interval' queryset = queryset.extra(where=[cond], params=[str(dur)]) val = params.get('publisher', None) if val: queryset = queryset.filter(publisher__id=val) return queryset
12,685
def process_user(enrollment, section): """Handle getting assignments for a single user Args: enrollment (canvasapi.enrollment.Enrollment): Canvas <Enrollment> object section (canvasapi.section.Section): Canvas <Section> object Returns: [list]: formatted list for writing to the CSV """ missing = get_user_missing(section, enrollment.user["id"]) login = course.get_user(enrollment.user["id"]).login_id regex = re.compile("@") if regex.search(login) is None: email = f"{login}@elkhart.k12.in.us" else: email = login return [ enrollment.user["sortable_name"], email, section.name, enrollment.last_activity_at, len(missing), ", ".join(missing), ]
12,686
def op(name, data, bucket_count=None, display_name=None, description=None, collections=None): """Create a histogram summary op. Arguments: name: A unique name for the generated summary node. data: A `Tensor` of any shape. Must be castable to `float64`. bucket_count: Optional positive `int`. The output will have this many buckets, except in two edge cases. If there is no data, then there are no buckets. If there is data but all points have the same value, then there is one bucket whose left and right endpoints are the same. display_name: Optional name for this summary in TensorBoard, as a constant `str`. Defaults to `name`. description: Optional long-form description for this summary, as a constant `str`. Markdown is supported. Defaults to empty. collections: Optional list of graph collections keys. The new summary op is added to these collections. Defaults to `[Graph Keys.SUMMARIES]`. Returns: A TensorFlow summary op. """ if display_name is None: display_name = name summary_metadata = metadata.create_summary_metadata( display_name=display_name, description=description) with tf.name_scope(name): tensor = _buckets(data, bucket_count=bucket_count) return tf.summary.tensor_summary(name='histogram_summary', tensor=tensor, collections=collections, summary_metadata=summary_metadata)
12,687
def fix_target_not_found(issue: issues.TargetNotFound, tree: Tree, patches: Patches, opt: options.Options) -> None: """Fix a `TargetNotFound` issue. The result of this function's successful run is a patch added to `patches`. """ if opt.mode == '-i': __fix_target_not_found_i(issue, tree, patches, opt) elif opt.mode == '-a': object_index = tree.names object_name = Path(issue.link.get_href()).name __fix_a(issue, tree, object_name, object_index, patches)
12,688
def doc_iter(limit=None): """iterate over all documents (doc = single paragraph)""" from itertools import islice for path in islice(list_data_files(),limit): yield from json_to_docs(path)
12,689
def read_as_str(file): """ 读取文件,并返回读取到的内容 """ try: with open(file, 'r') as f: return f.read() except IOError: return ""
12,690
def test_normalize_text(name: str, expected_output: str) -> None: """It normalizes the text.""" assert converter.normalize_text(name) == expected_output
12,691
def has_xml_header(filepath): """ Return True if the first line of the file is <?xml :param filepath: :return: """ return True
12,692
def shikaku(givens): """Solver for Shikaku minipuzzles.""" sym = grilops.make_number_range_symbol_set(0, SIZE * SIZE - 1) sg = grilops.SymbolGrid(LATTICE, sym) rc = grilops.regions.RegionConstrainer( LATTICE, solver=sg.solver, rectangular=True ) shifter = Shifter(sg.solver) for p in LATTICE.points: sg.solver.add(sg.cell_is(p, rc.region_id_grid[p])) given = givens[p.y][p.x] if given > 0: given = shifter.given(p, given) sg.solver.add(rc.parent_grid[p] == grilops.regions.R) sg.solver.add(rc.region_size_grid[p] == given) else: sg.solver.add(rc.parent_grid[p] != grilops.regions.R) assert sg.solve() sg.print() print() shifter.print_shifts() print() return shifter.eval_binary()
12,693
def parse_arguments(args_to_parse): """Parse the command line arguments. Parameters ---------- args_to_parse: list of str Arguments to parse (split on whitespaces). """ description = "PyTorch implementation of CNN's for Human Activity Recognition" default_config = get_config_section([CONFIG_FILE], "Preset") parser = argparse.ArgumentParser(description=description, formatter_class=FormatterNoDuplicate) # Learning options training = parser.add_argument_group('Training specific options') training.add_argument('-d', '--dataset', help="Path to training data.", default=default_config['dataset'], choices=DATASETS) training.add_argument('-b', '--batch-size', type=int, default=default_config['batch_size'], help='Batch size for training.') training.add_argument('--lr', type=float, default=default_config['lr'], help='Learning rate.') training.add_argument('-e', '--epochs', type=int, default=default_config['epochs'], help='Maximum number of epochs to run for.') training.add_argument('-s', '--is_standardized', type=bool, default=default_config['is_standardized'], help='Whether to standardize the data.') # Model Options model = parser.add_argument_group('Model specific options') model.add_argument('-m', '--model-type', default=default_config['model'], choices=MODELS, help='Type of encoder to use.') # General options general = parser.add_argument_group('General options') general.add_argument('-n', '--name', type=str, default=default_config['name'], help="Name of the model for storing and loading purposes.") # Evaluation options evaluation = parser.add_argument_group('Evaluation specific options') evaluation.add_argument('--is-eval-only', action='store_true', default=default_config['is_eval_only'], help='Whether to only evaluate using precomputed model `name`.') evaluation.add_argument('--no-test', action='store_true', default=default_config['no_test'], help="Whether or not to compute the test losses.`") args = parser.parse_args(args_to_parse) return args
12,694
def test_create_key_pair_file(tmpdir): """Tests create_key_pair_file""" from apyfal._utilities import create_key_pair_file import apyfal._utilities as _utl tmp_dir = tmpdir.dirpath() ssh_dir = tmp_dir.join('.ssh') key_pair = 'key_pair' key_content = 'key_content' # Mock SSH path utl_ssh_dir = _utl.SSH_DIR _utl.SSH_DIR = str(ssh_dir) # Tests try: assert not ssh_dir.check(dir=True) # Not existing file create_key_pair_file(key_pair, key_content) assert ssh_dir.check(dir=True) assert ssh_dir.join(key_pair + '.pem').check(file=True) assert ssh_dir.join(key_pair + '.pem').read('rt') == key_content # File with same content exists create_key_pair_file(key_pair, key_content) assert not ssh_dir.join(key_pair + '_2.pem').check(file=True) # File with different content exists key_content = 'another_key_content' create_key_pair_file(key_pair, key_content) assert ssh_dir.join(key_pair + '_2.pem').check(file=True) assert ssh_dir.join(key_pair + '_2.pem').read('rt') == key_content # Restore os.path.expanduser finally: _utl.SSH_DIR = utl_ssh_dir
12,695
def invert_qgniw(qh,phi,phih,k,l,f0): """ Calculate the streamfunction given the potential vorticity. The algorithm is: 1) Calculate wave potential vorticity 2) Invert for wave, pw, and vortex stremfunctions, pv. 3) Calculate the geostrophic stremfunction, p = pv+pw. """ wv2 = k**2 + l**2 wv2i = 1./wv2 wv2i[0,0] = 0 phih = np.fft.fft2(phi) phix, phiy = np.fft.ifft2(1j*k*phih), np.fft.ifft2(1j*l*phih) jach = np.fft.fft2((1j*(np.conj(phix)*phiy - np.conj(phiy)*phix)).real) jach[0,0] = 0 # the wavy PV phi2 = np.abs(phi)**2 gphi2h = -wv2*np.fft.fft2(phi2) qwh = 0.5*(0.5*gphi2h + jach)/f0 # invert for psi pw = np.fft.ifft2((wv2i*qwh)).real pv = np.fft.ifft2(-(wv2i*qh)).real p = pv+pw ph = np.fft.fft2(p) return ph
12,696
def package_ref_key(package_name, ref): """Returns ndb.Key corresponding to particular PackageRef.""" assert is_valid_package_ref(ref), ref return ndb.Key(PackageRef, ref, parent=package_key(package_name))
12,697
def conv2d_backprop_input(dout, x_size, weight, stride=1, pad=0): """Backpropagation input for conv2d.""" filter_num, _, filter_h, filter_w = weight.shape dout = dout.transpose(0, 2, 3, 1).reshape(-1, filter_num) col_w = weight.reshape(filter_num, -1).T dcol = np.dot(dout, col_w.T) dx = col2im(dcol, x_size, filter_h, filter_w, stride, pad) return dx
12,698
def get_feeds_from_url(url: str) -> list: """ Try to parse the URL and find any RSS feeds in the webpage Adapted from: https://gist.github.com/alexmill/9bc634240531d81c3abe """ logger.info(f"Attempting to find RSS feeds from {url}...") # If the URL itself is a proper RSS feed, just return it if is_rss_feed(url): logger.debug("URL is already a proper RSS feed") return [url] html = get_html(url) possible_feeds = get_feeds_from_links(html) + get_feeds_from_atags(url, html) return [url for url in set(possible_feeds) if is_rss_feed(url)]
12,699