content
stringlengths
22
815k
id
int64
0
4.91M
def rot_permutated_geoms(geo, saddle=False, frm_bnd_key=[], brk_bnd_key=[], form_coords=[]): """ convert an input geometry to a list of geometries corresponding to the rotational permuations of all the terminal groups """ gra = graph(geo, remove_stereo=True) term_atms = {} all_hyds = [] neighbor_dct = automol.graph.atom_neighbor_keys(gra) # determine if atom is a part of a double bond unsat_atms = automol.graph.unsaturated_atom_keys(gra) if not saddle: rad_atms = automol.graph.sing_res_dom_radical_atom_keys(gra) res_rad_atms = automol.graph.resonance_dominant_radical_atom_keys(gra) rad_atms = [atm for atm in rad_atms if atm not in res_rad_atms] else: rad_atms = [] gra = gra[0] for atm in gra: if gra[atm][0] == 'H': all_hyds.append(atm) for atm in gra: if atm in unsat_atms and atm not in rad_atms: pass else: if atm not in frm_bnd_key and atm not in brk_bnd_key: #if atm not in form_coords: nonh_neighs = [] h_neighs = [] neighs = neighbor_dct[atm] for nei in neighs: if nei in all_hyds: h_neighs.append(nei) else: nonh_neighs.append(nei) if len(nonh_neighs) < 2 and len(h_neighs) > 1: term_atms[atm] = h_neighs geo_final_lst = [geo] for atm in term_atms: hyds = term_atms[atm] geo_lst = [] for geom in geo_final_lst: geo_lst.extend(_swap_for_one(geom, hyds)) geo_final_lst = geo_lst return geo_final_lst
21,900
def wasserstein_loss(y_true, y_pred): """ for more detail: https://github.com/keras-team/keras-contrib/blob/master/examples/improved_wgan.py""" return K.mean(y_true * y_pred)
21,901
def call_and_transact( contract_function: ContractFunction, transaction_params: Optional[TxParams] = None, ) -> HexBytes: """ Executes contract_function.{call, transaction}(transaction_params) and returns txhash """ # First 'call' might raise an exception contract_function.call(transaction_params) return contract_function.transact(transaction_params)
21,902
def bidirectional_rnn_model(input_dim, units, output_dim=29): """ Build a bidirectional recurrent network for speech Params: input_dim (int): Length of the input sequence. units: output dimensions of the GRU output_dim: output dimensions of the dense connected layers Returns: returns the RNN acoustic model Code Attribution: This function contains code that was updated and leveraged from the Udacity Natural Language Processing Nano Degree Training material. """ # Main acoustic input input_data = Input(name='the_input', shape=(None, input_dim)) # TODO: Add bidirectional recurrent layer bidir_rnn = Bidirectional(GRU(units, return_sequences=True, implementation=2, name="bidir_rnn"))(input_data) # TODO: Add a TimeDistributed(Dense(output_dim)) layer time_dense = TimeDistributed(Dense(output_dim))(bidir_rnn) # Add softmax activation layer y_pred = Activation('softmax', name='softmax')(time_dense) # Specify the model model = Model(inputs=input_data, outputs=y_pred) model.output_length = lambda x: x print(model.summary()) return model
21,903
def test_setitem(): """Test __setitem__.""" sl = setlist('abc') sl[0] = 'd' assert sl == setlist('dbc') sl[0] = 'd' assert sl == setlist('dbc') sl[1] = 'e' assert sl == setlist('dec') sl[2] = 'f' assert sl == setlist('def') with pytest.raises(IndexError): sl[3] = 'g' sl[0], sl[1] = 'h', 'i' assert sl == setlist('hif') sl = setlist(range(10)) sl[0] = 'a' assert sl == setlist(['a'] + list(range(1, 10))) sl[9] = 'b' assert sl == setlist(['a'] + list(range(1, 9)) + ['b']) sl[-1] = 'c' assert sl == setlist(['a'] + list(range(1, 9)) + ['c']) with pytest.raises(IndexError): sl[-11] = 'd' assert sl == setlist(['a'] + list(range(1, 9)) + ['c']) with pytest.raises(IndexError): sl[10] = 'd' assert sl == setlist(['a'] + list(range(1, 9)) + ['c']) with pytest.raises(TypeError): sl[0:2] = 1 sl = setlist(range(10)) with pytest.raises(ValueError): sl[0:2] = [8, 9] with pytest.raises(ValueError): sl[-1:0:-2] = ['a', 'b']
21,904
def compress_sparql(text: str, prefix: str, uri: str) -> str: """ Compress given SPARQL query by replacing all instances of the given uri with the given prefix. :param text: SPARQL query to be compressed. :param prefix: prefix to use as replace. :param uri: uri instance to be replaced. :return: compressed SPARQL query. """ bordersremv = lambda matchobj: prefix + ":" + re.sub(f"[<>]|({uri})", "", matchobj.group(0)) return re.sub(f"<?({uri}).*>?", bordersremv, text)
21,905
def easter(g_year): """Return fixed date of Easter in Gregorian year g_year.""" century = quotient(g_year, 100) + 1 shifted_epact = mod(14 + 11 * mod(g_year, 19) - quotient(3 * century, 4) + quotient(5 + (8 * century), 25), 30) adjusted_epact = ((shifted_epact + 1) if ((shifted_epact == 0) or ((shifted_epact == 1) and (10 < mod(g_year, 19)))) else shifted_epact) paschal_moon = (fixed_from_gregorian(gregorian_date(g_year, APRIL, 19)) - adjusted_epact) return kday_after(SUNDAY, paschal_moon)
21,906
def _with_generator_error_translation(code_to_exception_class_func, func): """Same wrapping as above, but for a generator""" @funcy.wraps(func) def decorated(*args, **kwargs): """Execute a function, if an exception is raised, change its type if necessary""" try: for x in func(*args, **kwargs): yield x except grpc.RpcError as exc: raise_exception_from_grpc_exception(code_to_exception_class_func, exc) return decorated
21,907
def test_link_ts8(): """Test linking input ts8 files""" link_ts8_files(LINK_OPTIONS_TS8)
21,908
async def test_failed_update_and_reconnection( hass: HomeAssistant, aioclient_mock: AiohttpClientMocker ): """Test failed update and reconnection.""" mock_responses(aioclient_mock) assert await async_setup_component( hass, SENSOR_DOMAIN, {SENSOR_DOMAIN: ONE_SENSOR_CONFIG} ) aioclient_mock.clear_requests() mock_responses(aioclient_mock, error=True) next_update = dt_util.utcnow() + timedelta(seconds=3) async_fire_time_changed(hass, next_update) await hass.async_block_till_done() assert hass.states.get("sensor.efergy_728386").state == STATE_UNAVAILABLE aioclient_mock.clear_requests() mock_responses(aioclient_mock) next_update = dt_util.utcnow() + timedelta(seconds=30) async_fire_time_changed(hass, next_update) await hass.async_block_till_done() assert hass.states.get("sensor.efergy_728386").state == "1628"
21,909
def openei_api_request( data, ): """Query the OpenEI.org API. Args: data (dict or OrderedDict): key-value pairs of parameters to post to the API. Returns: dict: the json response """ # define the Overpass API URL, then construct a GET-style URL as a string to # hash to look up/save to cache url = " https://openei.org/services/api/content_assist/recommend" prepared_url = requests.Request("GET", url, params=data).prepare().url cached_response_json = get_from_cache(prepared_url) if cached_response_json is not None: # found this request in the cache, just return it instead of making a # new HTTP call return cached_response_json
21,910
def boring_stuff(axarr, edir): """ Axes, titles, legends, etc. Yeah yeah ... """ for i in range(2): for j in range(3): if i == 0 and j == 0: axarr[i,j].set_ylabel("Loss Training MBs", fontsize=ysize) if i == 0 and j == 1: axarr[i,j].set_ylabel("Loss Validation Set", fontsize=ysize) else: axarr[i,j].set_ylabel("Average Return", fontsize=ysize) axarr[i,j].set_xlabel("Training Minibatches", fontsize=xsize) axarr[i,j].tick_params(axis='x', labelsize=tick_size) axarr[i,j].tick_params(axis='y', labelsize=tick_size) axarr[i,j].legend(loc="best", prop={'size':legend_size}) axarr[i,j].legend(loc="best", prop={'size':legend_size}) axarr[0,0].set_title(edir+", Training Losses", fontsize=title_size) axarr[0,1].set_title(edir+", Validation Losses", fontsize=title_size) axarr[0,0].set_yscale('log') axarr[0,1].set_yscale('log')
21,911
def cloudtrail_cleanup(): """Function to clean up CloudTrail Logs""" logging.info("Cleaning up CloudTrail Logs.") try: logging.info("Cleaning up CloudTrail Logs created by Assisted Log Enabler for AWS.") trail_list: list = [] removal_list: list = [] logging.info("DescribeTrails API Call") cloudtrail_trails = cloudtrail.describe_trails() for trail in cloudtrail_trails['trailList']: trail_list.append(trail['TrailARN']) logging.info("Listing CloudTrail trails created by Assisted Log Enabler for AWS.") print("Full trail list") print(trail_list) for removal_trail in trail_list: logging.info("Checking tags for trails created by Assisted Log Enabler for AWS.") logging.info("ListTags API Call") trail_tags = cloudtrail.list_tags( ResourceIdList=[removal_trail] ) for tag_lists in trail_tags['ResourceTagList']: for key_info in tag_lists['TagsList']: print(key_info) if key_info['Key'] == 'workflow' and key_info['Value'] == 'assisted-log-enabler': removal_list.append(removal_trail) print("Trails to be removed") print(removal_list) for delete_trail in removal_list: logging.info("Deleting trails created by Assisted Log Enabler for AWS.") logging.info("DeleteTrail API Call") cloudtrail.delete_trail( Name=delete_trail ) logging.info(delete_trail + " has been deleted.") time.sleep(1) except Exception as exception_handle: logging.error(exception_handle)
21,912
def generate_content(vocab, length): """Generate a random passage. Pass in a dictionary of words from a text document and a specified length (number of words) to return a randomized string. """ new_content = [] pair = find_trigram(vocab) while len(new_content) < length: third = find_trigram(vocab, pair) trigram = (pair + " " + third).split() new_content.extend(*[trigram]) # unpack trigrams and add to content next_one = find_trigram(vocab, trigram[1] + " " + trigram[2]) if len(next_one.split()) > 1: pair = next_one else: next_two = find_trigram(vocab, trigram[2] + " " + next_one) pair = next_one + " " + next_two return " ".join(new_content)
21,913
def transform_generic(inp: dict, out, met: ConfigurationMeta) -> list: """ handle_generic is derived from P -> S, where P and S are logic expressions. This function will use a generic method to transform the logic expression P -> S into multiple mathematical constraints. This is done by first converting r into a logic expression Ç, then Ç is converted into CNF and last into constraints. """ support_variable_name = met.support_variable_name P = None if inp['condition'] and inp['condition']['sub_conditions']: P = "" evaluated_sub_conditions = [] for sub_condition in inp['condition']['sub_conditions']: if sub_condition['relation'] == "ALL": concat = " & ".join(sub_condition['components']) elif sub_condition.relation == "ANY": concat = " | ".join(sub_condition['components']) else: raise Exception(f"Not implemented for relation type: '{sub_condition.relation}'") if not concat == '': evaluated_sub_conditions.append(f"({concat})") if inp['condition']['relation'] == "ALL": P = " & ".join(evaluated_sub_conditions) elif inp['condition']['relation'] == "ANY": P = " | ".join(evaluated_sub_conditions) else: raise Exception(f"Not implemented for relation type: '{inp['condition']['relation']}'") cmps = inp['consequence']['components'] if inp['consequence']['rule_type'] in ["REQUIRES_ALL", "PREFERRED"]: S = " & ".join(cmps) elif inp['consequence']['rule_type'] == "REQUIRES_ANY": S = " | ".join(cmps) elif inp['consequence']['rule_type'] == "FORBIDS_ALL": _cmps = [f"~{x}" for x in cmps] S = " & ".join(_cmps) elif inp['consequence']['rule_type'] == "REQUIRES_EXCLUSIVELY": if P == None: return transform_exactly_one(inp=inp, out=out, met=met) condition = [] for i in range(len(cmps)): clause = [f"{cmps[j]}" if i == j else f"~{cmps[j]}" for j in range(len(cmps))] condition.append(" & ".join(clause)) S = " | ".join([f"({x})" for x in condition]) else: raise Exception(f"Not implemented for rule type '{inp['consequence']['rule_type']}'") expression = S if not P else f"({P}) >> ({S})" constraints = fake_expression_to_constraints( expression=expression, support_variable_name=support_variable_name, ) _constraints = [] for constraint, support_vector_value in constraints: constraint[support_variable_name] = support_vector_value _constraints.append(constraint) return _constraints
21,914
def test_pass_by_reference() -> None: """Test whether pass-by-reference arguments work correctly.""" inc = TestNamespace() incArgRef = inc.addArgument("R") incArgVal = inc.emit_load(incArgRef) incAdd = AddOperator(incArgVal, IntLiteral(1)) inc.emit_store(incArgRef, incAdd) incCode = inc.createCodeBlock(None) outer = TestNamespace() outerA = outer.addRegister("a") initA = IntLiteral(100) outer.emit_store(outerA, initA) outer.inlineBlock(incCode, args(R=outerA.bits)) outer.inlineBlock(incCode, args(R=outerA.bits)) outer.inlineBlock(incCode, args(R=outerA.bits)) outerRet = outer.addVariable("ret") finalA = outer.emit_load(outerA) outer.emit_store(outerRet, finalA) code = createSimplifiedCode(outer) retVal, retWidth = getRetVal(code) correct = (Store(retVal, outerA.bits.storage),) assertNodes(code.nodes, correct) assertRetVal(code, 103) assert retWidth == 8
21,915
def generate_mprocess_from_name( c_sys: CompositeSystem, mprocess_name: str, is_physicality_required: bool = True ) -> MProcess: """returns MProcess object specified by name. Parameters ---------- c_sys : CompositeSystem CompositeSystem of MProcess. mprocess_name : str name of the MProcess. is_physicality_required: bool = True whether the generated object is physicality required, by default True Returns ------- MProcess MProcess object. """ # check mprocess name single_mprocess_names = mprocess_name.split("_") mprocess_name_list = get_mprocess_names_type1() + get_mprocess_names_type2() for single_mprocess_name in single_mprocess_names: if single_mprocess_name not in mprocess_name_list: raise ValueError( f"mprocess_name is out of range. mprocess_name={single_mprocess_name}" ) # generate mprocess hss = generate_mprocess_hss_from_name(mprocess_name, c_sys) mprocess = MProcess( hss=hss, c_sys=c_sys, is_physicality_required=is_physicality_required ) return mprocess
21,916
def convert_polydata_to_image_data(poly, ref_im, reverse=True): """ Convert the vtk polydata to imagedata Args: poly: vtkPolyData ref_im: reference vtkImage to match the polydata with Returns: output: resulted vtkImageData """ from vtk.util.numpy_support import vtk_to_numpy, numpy_to_vtk # Have to copy to create a zeroed vtk image data ref_im_zeros = vtk.vtkImageData() ref_im_zeros.DeepCopy(ref_im) ref_im_zeros.GetPointData().SetScalars(numpy_to_vtk(np.zeros(vtk_to_numpy(ref_im_zeros.GetPointData().GetScalars()).shape))) ply2im = vtk.vtkPolyDataToImageStencil() ply2im.SetTolerance(0.05) ply2im.SetInputData(poly) ply2im.SetOutputSpacing(ref_im.GetSpacing()) ply2im.SetInformationInput(ref_im_zeros) ply2im.Update() stencil = vtk.vtkImageStencil() stencil.SetInputData(ref_im_zeros) if reverse: stencil.ReverseStencilOn() stencil.SetStencilData(ply2im.GetOutput()) stencil.Update() output = stencil.GetOutput() return output
21,917
def matplot(x, y, f, vmin=None, vmax=None, ticks=None, output='output.pdf', xlabel='X', \ ylabel='Y', diverge=False, cmap='viridis', **kwargs): """ Parameters ---------- f : 2D array array to be plotted. extent: list [xmin, xmax, ymin, ymax] Returns ------- Save a fig in the current directory. To be deprecated. Please use imshow. """ fig, ax = plt.subplots(figsize=(4,3)) set_style() if diverge: cmap = "RdBu_r" else: cmap = 'viridis' xmin, xmax = min(x), max(x) ymin, ymax = min(y), max(y) extent = [xmin, xmax, ymin, ymax] cntr = ax.imshow(f.T, aspect='auto', cmap=cmap, extent=extent, \ origin='lower', vmin=vmin, vmax=vmax, **kwargs) ax.set_aspect('auto') ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) fig.colorbar(cntr, ticks=ticks) ax.xaxis.set_ticks_position('bottom') # fig.subplots_adjust(wspace=0, hspace=0, bottom=0.14, left=0.14, top=0.96, right=0.94) if output is not None: fig.savefig(output, dpi=1200) return fig, ax
21,918
def createNewClasses(df, sc, colLabel): """ Divide the data into classes Parameters ---------- df: Dataframe Spark Dataframe sc: SparkContext object SparkContext object colLabel: List Items that considered Label logs_dir: string Directory for log file Return ---------- colCat: List Items that is considered categories colNum: List Items that is considered numerical values """ rdd = sc.parallelize(df.dtypes) colCat = rdd.map(lambda i: i[0] if (i[1]=='string' or i[1]=='boolean' and i[0] not in colLabel) else None).filter(lambda i: i != None).collect() colNum = rdd.map(lambda i: i[0] if (i[1]=='double' and i[0] not in colLabel) else None).filter(lambda i: i != None).collect() print(f"Label: {colLabel} \nCategories: {colCat}\nNumerical: {colNum}") return colCat, colNum
21,919
def scan_usb(device_name=None): """ Scan for available USB devices :param device_name: The device name (MX6DQP, MX6SDL, ...) or USB device VID:PID value :rtype list """ if device_name is None: objs = [] devs = RawHid.enumerate() for cls in SDP_CLS: for dev in devs: for value in cls.DEVICES.values(): if dev.vid == value[0] and dev.pid == value[1]: objs += [cls(dev)] return objs else: if ':' in device_name: vid, pid = device_name.split(':') devs = RawHid.enumerate(int(vid, 0), int(pid, 0)) return [SdpBase(dev) for dev in devs] else: for cls in SDP_CLS: if device_name in cls.DEVICES: vid = cls.DEVICES[device_name][0] pid = cls.DEVICES[device_name][1] devs = RawHid.enumerate(vid, pid) return [cls(dev) for dev in devs] return []
21,920
def start_bme280_sensor(args): """Main program function, parse arguments, read configuration, setup client, listen for messages""" global status_topic, read_loop i2c_address = bme280.I2C_ADDRESS_GND # 0x76, alt is 0x77 options = Options() if args.daemon: file_handle = open(args.log_file, "w") else: file_handle = sys.stdout client = mqtt.Client(args.clientid) mqtt_conf = configparser.ConfigParser() mqtt_conf.read(args.config) options.section = args.section options.root_topic = mqtt_conf.get(args.section, 'topic') topics = Topics(options.root_topic, args.section) status_topic = options.root_topic + '/' + "LWT" if mqtt_conf.has_option(args.section, 'address'): i2c_address = int(mqtt_conf.get(args.section, 'address'), 0) if mqtt_conf.has_option(args.section, 'mode'): options.mode = mqtt_conf.get(args.section, 'mode') if mqtt_conf.has_option(args.section, 'toffset'): options.toffset = float(mqtt_conf.get(args.section, 'toffset')) if mqtt_conf.has_option(args.section, 'hoffset'): options.hoffset = float(mqtt_conf.get(args.section, 'hoffset')) if mqtt_conf.has_option(args.section, 'poffset'): options.poffset = float(mqtt_conf.get(args.section, 'poffset')) if mqtt_conf.has_option(args.section, 'elevation'): options.elevation = float(mqtt_conf.get(args.section, 'elevation')) if mqtt_conf.has_option(args.section, 'format'): options.format = mqtt_conf.get(args.section, 'format') if (mqtt_conf.has_option(args.section, 'username') and mqtt_conf.has_option(args.section, 'password')): username = mqtt_conf.get(args.section, 'username') password = mqtt_conf.get(args.section, 'password') client.username_pw_set(username=username, password=password) host = mqtt_conf.get(args.section, 'host') port = int(mqtt_conf.get(args.section, 'port')) client.on_connect = on_connect # client.on_disconnect = on_disconnect client.connect(host, port, 60) client.loop_start() # Initialise the BME280 bus = SMBus(1) sensor = bme280.BME280(i2c_addr=i2c_address, i2c_dev=bus) # print("pre setup = {0}".format(sensor._is_setup)) #sensor.setup(mode=options.mode, temperature_standby=SENSOR_STANDBY) # Sync to sleep() call (in ms), when in normal mode sensor.setup(mode=options.mode) #print("post setup = {0}".format(sensor._is_setup)) sensor_data = SensorData() # Initialize a sensor_data object to hold the information first_read = True # problems with the first read of the data? seems ok in forced mode. read_loop = True curr_datetime = datetime.datetime.now() str_datetime = curr_datetime.strftime("%Y-%m-%d %H:%M:%S") print("{0}: pid: {1:d}, bme280 sensor started on 0x{2:x}, mode: {3:s}, toffset: {4:0.1f} C, hoffset: {5:0.1f} %, poffset: {6:0.2f} hPa". format(str_datetime, os.getpid(), i2c_address, options.mode, options.toffset, options.hoffset, options.poffset), file=file_handle) file_handle.flush() while read_loop: curr_time = time.time() my_time = int(round(curr_time)) sensor_data.temperature = sensor.get_temperature() sensor_data.humidity = sensor.get_humidity() sensor_data.pressure = sensor.get_pressure() if not first_read and sensor_data.pressure < 800: curr_datetime = datetime.datetime.now() str_datetime = curr_datetime.strftime("%Y-%m-%d %H:%M:%S") print("{0}: pid: {1:d} bme280 sensor fault - reset".format(str_datetime, os.getpid())) sensor._is_setup = False sensor.setup(mode=options.mode) time.sleep(SLEEP_TIME) continue if my_time % 60 == 0: if not first_read: publish_mqtt(client, sensor_data, options, topics, file_handle, args.verbose) first_read = False done_time = time.time() # print("difference = {0}".format(done_time - curr_time)) time.sleep(SLEEP_TIME) curr_datetime = datetime.datetime.now() str_datetime = curr_datetime.strftime("%Y-%m-%d %H:%M:%S") print("{0}: pid: {1:d}, bme280 sensor interrupted".format(str_datetime, os.getpid()), file=file_handle) client.publish(status_topic, "Offline", retain=True) client.disconnect()
21,921
def _boolrelextrema( data, comparator, axis=0, order: tsutils.IntGreaterEqualToOne = 1, mode="clip" ): """Calculate the relative extrema of `data`. Relative extrema are calculated by finding locations where comparator(data[n],data[n+1:n+order+1]) = True. Parameters ---------- data: ndarray comparator: function function to use to compare two data points. Should take 2 numbers as arguments axis: int, optional axis over which to select from `data` order: int, optional How many points on each side to require a `comparator`(n,n+x) = True. mode: string, optional How the edges of the vector are treated. 'wrap' (wrap around) or 'clip' (treat overflow as the same as the last (or first) element). Default 'clip'. See numpy.take Returns ------- extrema: ndarray Indices of the extrema, as boolean array of same shape as data. True for an extrema, False else. See Also -------- argrelmax, argrelmin Examples -------- >>> testdata = np.array([1,2,3,2,1]) >>> _boolrelextrema(testdata, np.greater, axis=0).tolist() [False, False, True, False, False] """ datalen = data.shape[axis] locs = np.arange(0, datalen) results = np.ones(data.shape, dtype=bool) main = data.take(locs) for shift in range(1, order + 1): plus = np.take(data, locs + shift, axis=axis, mode=mode) results &= comparator(main, plus) minus = np.take(data, locs - shift, axis=axis, mode=mode) results &= comparator(main, minus) if ~results.any(): return results return results
21,922
def parse_description(offer_markup): """ Searches for description if offer markup :param offer_markup: Body from offer page markup :type offer_markup: str :return: Description of offer :rtype: str """ html_parser = BeautifulSoup(offer_markup, "html.parser") return html_parser.find(id="textContent").text.replace(" ", "").replace("\n", " ").replace("\r", "").strip()
21,923
def return_args(): """Return a parser object.""" _parser = ArgumentParser(add_help=True, description=( "Translate msgid's from a POT file with Google Translate API")) _parser.add_argument('-f', '--file', action='store', required=True, help="Get the POT file name.") _parser.add_argument('-o', '--output_file', action='store', required=True, help="Get name to save the new PO file.") _parser.add_argument('-t', '--translate', action='store', required=True, help="Get language to translate to.") _parser.add_argument('-i', '--imprecise', action='store_true', help="Save translated texts as fuzzy(draft).") _parser.add_argument('-e', '--error', action='store_true', help="Print translate errors if exist.") _parser.add_argument('-p', '--print_process', action='store_true', help="Print translate process.") return _parser
21,924
def get_df_tau(plot_dict, gen_err): """ Return a dataframe of the kendall tau's coefficient for different methods """ # tau, p_value = compute_tau(result_dict[err], plot_dict['avg_clusters'], inverse=True) # taus, pvalues, names, inverses = [tau], [p_value], ['cc'], ['True'] taus, pvalues, names, inverses = [], [], [], [] for key, value in plot_dict.items(): value = np.array(value) # if key in ['ranks', 'stable_ranks', 'avg_clusters', 'modularity']: # continue for i in range(value.shape[1]): if key == "Schatten": if i == 0: # Schatten 1-norm, no inversion inverse_flag = False elif i == 1: continue # skip trivial 2-norm else: inverse_flag = True else: inverse_flag = True tau, p_value = compute_tau(gen_err, value[:, i], inverse=inverse_flag) taus.append(tau) pvalues.append(p_value) names.append(key + "_" + str(i + 1)) inverses.append(inverse_flag) kendal_cor = pd.DataFrame( {"metric": names, "kendall_tau": taus, "pvalue": pvalues, "inverse": inverses} ) return kendal_cor
21,925
def traverse(graph, priorities): """Return a sequence of all the nodes in the graph by greedily choosing high 'priority' nodes before low 'priority' nodes.""" reachable = PriorityContainer() visited = {} # start by greedily choosing the highest-priority node current_node = max(priorities.items(), key=lambda i: i[1])[0] visited_count = 0 while current_node: # visit node visited[current_node] = visited_count visited_count += 1 # update visit-able nodes for neighbor in graph[current_node]['neighbors']: if neighbor not in reachable and neighbor not in visited: reachable.put((priorities[neighbor], neighbor)) try: current_priority, current_node = reachable.get(False) except Queue.Empty: current_priority = current_node = None return visited
21,926
def build_dataset(dataset_name, set_name, root_path, transforms=None): """ :param dataset_name: the name of dataset :param root_path: data is usually located under the root path :param set_name: "train", "valid", "test" :param transforms: :return: """ if "cameo_half_year" in dataset_name: _, data_type, max_length, depth, profile_type = dataset_name.split("-") max_length = int(max_length) depth = int(depth) dataset = CAMEO_HALF_YEAR(root=root_path, data_type=data_type, transform=transforms, max_length_limit=max_length, depth=depth, profile_type=profile_type) else: raise Exception("Can not build unknown image dataset: {}".format(dataset_name)) return dataset
21,927
def process_quote_data(): """ Write fetched quote data as a CSV. """ quote_data = api.get_quote_data(",".join(SYMBOLS)) field_names = list(quote_data[0].keys()) lib.write_csv(CSV_OUT_QUOTE_DATA, quote_data, field_names)
21,928
def remove_characters(text, characters_to_remove=None): """ Remove various auxiliary characters from a string. This function uses a hard-coded string of 'undesirable' characters (if no such string is provided), and removes them from the text provided. Parameters: ----------- text : str A piece of text to remove characters from. characters_to_remove : str A string of 'undesirable' characters to remove from the text. Returns: -------- text : str A piece of text with undesired characters removed. """ # chars = "\\`*_{}[]()<>#+-.!$%@" if characters_to_remove is None: characters_to_remove = "\\`*_{}[]()<>#+!$%@" for c in characters_to_remove: if c in text: text = text.replace(c, '') return text
21,929
def changePrev ( v, pos, findPat, changePat, bodyFlag = 1 ): """ changePrev: use string.rfind() to change text in a Leo outline. v the vnode to start the search. pos the position within the body text of v to start the search. findPat the search string. changePat the replacement string. bodyFlag true: change body text. false: change headline text. returns a tuple (v,pos) showing where the change occured. returns (None,0) if no further match in the outline was found. Note: if (v,pos) is a tuple returned previously from changePrev, changePrev(v,pos-len(findPat),findPat,changePat) changes the next matching string. """ n = len(findPat) v, pos = findPrev(v, pos, findPat, bodyFlag) if v == None: return None, 0 if bodyFlag: s = v.bodyString() # s[pos:pos+n] = changePat s = s[:pos] + changePat + s[pos+n:] v.setBodyStringOrPane(s) else: s = v.headString() #s[pos:pos+n] = changePat s = s[:pos] + changePat + s[pos+n:] v.setHeadStringOrHeadline(s) return v, pos
21,930
def put_mint(update: Update, context: CallbackContext) -> int: """ Returns the token data to user """ session_uuid = context.user_data['session_uuid'] user = get_chat_info(update) creator_username = user['username'] # Start DB Session to get addr session = Session() sesh_exists = session.query(Tokens).filter( Tokens.session_uuid == session_uuid).scalar() is not None if sesh_exists: # Add a check for the UTXO, bail if not found token_data = session.query(Tokens).filter( Tokens.session_uuid == session_uuid).one() logging.info(f'Searching for the UTXOs in address: {token_data}') bot_payment_addr = token_data.bot_payment_addr update.message.reply_text("Checking for confirmed transactions.") utxo = check_wallet_utxo(bot_payment_addr) if utxo: sesh = {'session_uuid': session_uuid} update.message.reply_text("OK, I found the Transaction!") update.message.reply_text( "Please grab a coffee as I build your NFT " "I'll ssend it back to you with your change in ADA." ) update.message.reply_text("Initiating NFT Minting process....") minted = mint(**sesh) if minted: update.message.reply_text( f"Holey Baloney! \n your token is minted, @{creator_username}." ) update.message.reply_text( f"The token should arrive in your wallet any second now." ) update.message.reply_text( f"Thank you for using the *NFT-TELEGRAM-BOT*. \n Have a Daedalus day." ) return ConversationHandler.END else: update.message.reply_text( f"Something failed, please try not to panic, " f"but you may have hit a bug. Sorry." ) return ConversationHandler.END else: update.message.reply_text( f"Sorry, but there is no UTXO to use yet. " f"Transaction not found." f"Please try running /MINT again in a few moments." ) return ConversationHandler.END update.message.reply_text( f"Sorry, but there is no PRE_MINT session yet. " f"Please try /start again in a few moments." ) return ConversationHandler.END
21,931
def handle_edge_event_3sides(evt, step, skel, queue, immediate): """Handle a collapse of a triangle with 3 sides collapsing. It does not matter whether the 3-triangle has wavefront edges or not. Important: The triangle vertices should collapse to 1 point. The following steps are performed: - stop the 3 kinetic vertices of the triangle - optionally make a new skeleton node - schedule all neighbours, if any, for immediate processing (these also collapse to the same point) """ now = evt.time t = evt.triangle logging.info("* edge 3sides :: tri>> #{} [{}]".format(id(t), t.info)) logging.debug(evt.side) assert len(evt.side) == 3 # we stop the vertices always at the same geometric location # This means that the triangle collapse leads to 1 point sk_node, newly_made = stop_kvertices(t.vertices, step, now) if newly_made: skel.sk_nodes.append(sk_node) # get neighbours around collapsing triangle, if any, and schedule them for n in t.neighbours: if n is not None and n.event is not None and n.stops_at is None: n.neighbours[n.neighbours.index(t)] = None schedule_immediately(n, now, queue, immediate) # we "remove" the triangle itself t.stops_at = now
21,932
def craft_one_type(sess, model, X, Y, dataset, attack, batch_size): """ TODO :param sess: :param model: :param X: :param Y: :param dataset: :param attack: :param batch_size: :return: """ if attack == 'fgsm': # FGSM attack print('Crafting fgsm adversarial samples...') X_adv = fast_gradient_sign_method( sess, model, X, Y, eps=ATTACK_PARAMS[dataset]['eps'], clip_min=CLIP_MIN, clip_max=CLIP_MAX, batch_size=batch_size ) elif attack in ['bim-a', 'bim-b']: # BIM attack print('Crafting %s adversarial samples...' % attack) its, results = basic_iterative_method( sess, model, X, Y, eps=ATTACK_PARAMS[dataset]['eps'], eps_iter=ATTACK_PARAMS[dataset]['eps_iter'], clip_min=CLIP_MIN, clip_max=CLIP_MAX, batch_size=batch_size ) if attack == 'bim-a': # BIM-A # For each sample, select the time step where that sample first # became misclassified X_adv = np.asarray([results[its[i], i] for i in range(len(Y))]) else: # BIM-B # For each sample, select the very last time step X_adv = results[-1] elif attack == 'jsma': # JSMA attack print('Crafting jsma adversarial samples. This may take > 5 hours') X_adv = saliency_map_method( sess, model, X, Y, theta=1, gamma=0.1, clip_min=CLIP_MIN, clip_max=CLIP_MAX ) elif attack == 'cw-l2': # C&W attack print('Crafting %s examples. This takes > 5 hours due to internal grid search' % attack) image_size = ATTACK_PARAMS[dataset]['image_size'] num_channels = ATTACK_PARAMS[dataset]['num_channels'] num_labels = ATTACK_PARAMS[dataset]['num_labels'] cw_attack = CarliniL2(sess, model, image_size, num_channels, num_labels, batch_size=batch_size) X_adv = cw_attack.attack(X, Y) elif attack == 'cw-lid': # C&W attack to break LID detector print('Crafting %s examples. This takes > 5 hours due to internal grid search' % attack) image_size = ATTACK_PARAMS[dataset]['image_size'] num_channels = ATTACK_PARAMS[dataset]['num_channels'] num_labels = ATTACK_PARAMS[dataset]['num_labels'] cw_attack = CarliniLID(sess, model, image_size, num_channels, num_labels, batch_size=batch_size) X_adv = cw_attack.attack(X, Y) _, acc = model.evaluate(X_adv, Y, batch_size=batch_size, verbose=0) print("Model accuracy on the adversarial test set: %0.2f%%" % (100 * acc)) np.save(os.path.join(PATH_DATA, 'Adv_%s_%s.npy' % (dataset, attack)), X_adv) l2_diff = np.linalg.norm( X_adv.reshape((len(X), -1)) - X.reshape((len(X), -1)), axis=1 ).mean() print("Average L-2 perturbation size of the %s attack: %0.2f" % (attack, l2_diff))
21,933
def trait_colors(rows): """Make tags for HTML colorizing text.""" backgrounds = defaultdict(lambda: next(BACKGROUNDS)) for row in rows: for trait in row['traits']: key = trait['trait'] if key not in ('heading',): _ = backgrounds[key] return backgrounds
21,934
def test(): """Run the unit tests""" import unittest tests = unittest.TestLoader().discover("tests") unittest.TextTestRunner(verbosity=2).run(tests)
21,935
def main(): """ NAME foldtest_magic.py DESCRIPTION does a fold test (Tauxe, 2010) on data INPUT FORMAT pmag_specimens format file, er_samples.txt format file (for bedding) SYNTAX foldtest_magic.py [command line options] OPTIONS -h prints help message and quits -f sites formatted file [default for 3.0 is sites.txt, for 2.5, pmag_sites.txt] -fsa samples formatted file -fsi sites formatted file -exc use criteria to set acceptance criteria (supported only for data model 3) -n NB, set number of bootstraps, default is 1000 -b MIN, MAX, set bounds for untilting, default is -10, 150 -fmt FMT, specify format - default is svg -sav saves plots and quits -DM NUM MagIC data model number (2 or 3, default 3) OUTPUT Geographic: is an equal area projection of the input data in original coordinates Stratigraphic: is an equal area projection of the input data in tilt adjusted coordinates % Untilting: The dashed (red) curves are representative plots of maximum eigenvalue (tau_1) as a function of untilting The solid line is the cumulative distribution of the % Untilting required to maximize tau for all the bootstrapped data sets. The dashed vertical lines are 95% confidence bounds on the % untilting that yields the most clustered result (maximum tau_1). Command line: prints out the bootstrapped iterations and finally the confidence bounds on optimum untilting. If the 95% conf bounds include 0, then a pre-tilt magnetization is indicated If the 95% conf bounds include 100, then a post-tilt magnetization is indicated If the 95% conf bounds exclude both 0 and 100, syn-tilt magnetization is possible as is vertical axis rotation or other pathologies """ if '-h' in sys.argv: # check if help is needed print(main.__doc__) sys.exit() # graceful quit kappa = 0 dir_path = pmag.get_named_arg("-WD", ".") nboot = int(float(pmag.get_named_arg("-n", 1000))) # number of bootstraps fmt = pmag.get_named_arg("-fmt", "svg") data_model_num = int(float(pmag.get_named_arg("-DM", 3))) if data_model_num == 3: infile = pmag.get_named_arg("-f", 'sites.txt') orfile = 'samples.txt' site_col = 'site' dec_col = 'dir_dec' inc_col = 'dir_inc' tilt_col = 'dir_tilt_correction' dipkey, azkey = 'bed_dip', 'bed_dip_direction' crit_col = 'criterion' critfile = 'criteria.txt' else: infile = pmag.get_named_arg("-f", 'pmag_sites.txt') orfile = 'er_samples.txt' site_col = 'er_site_name' dec_col = 'site_dec' inc_col = 'site_inc' tilt_col = 'site_tilt_correction' dipkey, azkey = 'sample_bed_dip', 'sample_bed_dip_direction' crit_col = 'pmag_criteria_code' critfile = 'pmag_criteria.txt' if '-sav' in sys.argv: plot = 1 else: plot = 0 if '-b' in sys.argv: ind = sys.argv.index('-b') untilt_min = int(sys.argv[ind+1]) untilt_max = int(sys.argv[ind+2]) else: untilt_min, untilt_max = -10, 150 if '-fsa' in sys.argv: orfile = pmag.get_named_arg("-fsa", "") elif '-fsi' in sys.argv: orfile = pmag.get_named_arg("-fsi", "") if data_model_num == 3: dipkey, azkey = 'bed_dip', 'bed_dip_direction' else: dipkey, azkey = 'site_bed_dip', 'site_bed_dip_direction' else: if data_model_num == 3: orfile = 'sites.txt' else: orfile = 'pmag_sites.txt' orfile = pmag.resolve_file_name(orfile, dir_path) infile = pmag.resolve_file_name(infile, dir_path) critfile = pmag.resolve_file_name(critfile, dir_path) df = pd.read_csv(infile, sep='\t', header=1) # keep only records with tilt_col data = df.copy() data = data[data[tilt_col].notnull()] data = data.where(data.notnull(), "") # turn into pmag data list data = list(data.T.apply(dict)) # get orientation data if data_model_num == 3: # often orientation will be in infile (sites table) if os.path.split(orfile)[1] == os.path.split(infile)[1]: ordata = df[df[azkey].notnull()] ordata = ordata[ordata[dipkey].notnull()] ordata = list(ordata.T.apply(dict)) # sometimes orientation might be in a sample file instead else: ordata = pd.read_csv(orfile, sep='\t', header=1) ordata = list(ordata.T.apply(dict)) else: ordata, file_type = pmag.magic_read(orfile) if '-exc' in sys.argv: crits, file_type = pmag.magic_read(critfile) SiteCrits = [] for crit in crits: if crit[crit_col] == "DE-SITE": SiteCrits.append(crit) #break # get to work # PLTS = {'geo': 1, 'strat': 2, 'taus': 3} # make plot dictionary if not set_env.IS_WIN: pmagplotlib.plot_init(PLTS['geo'], 5, 5) pmagplotlib.plot_init(PLTS['strat'], 5, 5) pmagplotlib.plot_init(PLTS['taus'], 5, 5) if data_model_num == 2: GEOrecs = pmag.get_dictitem(data, tilt_col, '0', 'T') else: GEOrecs = data if len(GEOrecs) > 0: # have some geographic data num_dropped = 0 DIDDs = [] # set up list for dec inc dip_direction, dip for rec in GEOrecs: # parse data dip, dip_dir = 0, -1 Dec = float(rec[dec_col]) Inc = float(rec[inc_col]) orecs = pmag.get_dictitem( ordata, site_col, rec[site_col], 'T') if len(orecs) > 0: if orecs[0][azkey] != "": dip_dir = float(orecs[0][azkey]) if orecs[0][dipkey] != "": dip = float(orecs[0][dipkey]) if dip != 0 and dip_dir != -1: if '-exc' in sys.argv: keep = 1 for site_crit in SiteCrits: crit_name = site_crit['table_column'].split('.')[1] if crit_name and crit_name in rec.keys() and rec[crit_name]: # get the correct operation (<, >=, =, etc.) op = OPS[site_crit['criterion_operation']] # then make sure the site record passes if op(float(rec[crit_name]), float(site_crit['criterion_value'])): keep = 0 if keep == 1: DIDDs.append([Dec, Inc, dip_dir, dip]) else: num_dropped += 1 else: DIDDs.append([Dec, Inc, dip_dir, dip]) if num_dropped: print("-W- Dropped {} records because each failed one or more criteria".format(num_dropped)) else: print('no geographic directional data found') sys.exit() pmagplotlib.plot_eq(PLTS['geo'], DIDDs, 'Geographic') data = np.array(DIDDs) D, I = pmag.dotilt_V(data) TCs = np.array([D, I]).transpose() pmagplotlib.plot_eq(PLTS['strat'], TCs, 'Stratigraphic') if plot == 0: pmagplotlib.draw_figs(PLTS) Percs = list(range(untilt_min, untilt_max)) Cdf, Untilt = [], [] plt.figure(num=PLTS['taus']) print('doing ', nboot, ' iterations...please be patient.....') for n in range(nboot): # do bootstrap data sets - plot first 25 as dashed red line if n % 50 == 0: print(n) Taus = [] # set up lists for taus PDs = pmag.pseudo(DIDDs) if kappa != 0: for k in range(len(PDs)): d, i = pmag.fshdev(kappa) dipdir, dip = pmag.dodirot(d, i, PDs[k][2], PDs[k][3]) PDs[k][2] = dipdir PDs[k][3] = dip for perc in Percs: tilt = np.array([1., 1., 1., 0.01*perc]) D, I = pmag.dotilt_V(PDs*tilt) TCs = np.array([D, I]).transpose() ppars = pmag.doprinc(TCs) # get principal directions Taus.append(ppars['tau1']) if n < 25: plt.plot(Percs, Taus, 'r--') # tilt that gives maximum tau Untilt.append(Percs[Taus.index(np.max(Taus))]) Cdf.append(float(n) / float(nboot)) plt.plot(Percs, Taus, 'k') plt.xlabel('% Untilting') plt.ylabel('tau_1 (red), CDF (green)') Untilt.sort() # now for CDF of tilt of maximum tau plt.plot(Untilt, Cdf, 'g') lower = int(.025*nboot) upper = int(.975*nboot) plt.axvline(x=Untilt[lower], ymin=0, ymax=1, linewidth=1, linestyle='--') plt.axvline(x=Untilt[upper], ymin=0, ymax=1, linewidth=1, linestyle='--') tit = '%i - %i %s' % (Untilt[lower], Untilt[upper], 'Percent Unfolding') print(tit) plt.title(tit) if plot == 0: pmagplotlib.draw_figs(PLTS) ans = input('S[a]ve all figures, <Return> to quit \n ') if ans != 'a': print("Good bye") sys.exit() files = {} for key in list(PLTS.keys()): files[key] = ('foldtest_'+'%s' % (key.strip()[:2])+'.'+fmt) pmagplotlib.save_plots(PLTS, files)
21,936
def webhook(): """ CI with GitHub & PythonAnywhere Author : Aadi Bajpai https://medium.com/@aadibajpai/deploying-to-pythonanywhere-via-github-6f967956e664 """ try: event = request.headers.get('X-GitHub-Event') # Get payload from GitHub webhook request payload = request.get_json() x_hub_signature = request.headers.get('X-Hub-Signature') # Check if signature is valid if not github.is_valid_signature(x_hub_signature, request.data): abort(401) if event == "ping": return json.dumps({'msg': 'Ping Successful!'}) if event != "push": return json.dumps({'msg': "Wrong event type"}) repo = git.Repo(my_directory) branch = payload['ref'][11:] # Checking that branch is a non staging deployments if my_directory != "/home/stagingapi/mysite": if branch != 'master': return json.dumps({'msg': 'Not master; ignoring'}) repo.git.reset('--hard') origin = repo.remotes.origin try: origin.pull(branch) utility.write("tests/gitstats.txt", f'{branch} ,' + str(payload["after"])) return f'Updated PythonAnywhere successfully with branch: {branch}' except Exception: origin.pull('master') utility.write("tests/gitstats.txt", f'{branch} ,' + str(payload["after"])) return 'Updated PythonAnywhere successfully with branch: master' except Exception as error_message: return utility.handle_exception( "Github Update Server", {error_message})
21,937
def regenerate_browse_image(dataset_directory): """ Regenerate the browse image for a given dataset path. (TODO: This doesn't regenerate package checksums yet. It's mostly useful for development.) :param dataset_directory: :return: """ dataset_metadata = serialise.read_dataset_metadata(dataset_directory) product_type = dataset_metadata.product_type dataset_driver = drivers.PACKAGE_DRIVERS[product_type] # Clear existing browse metadata, so we can create updated info. dataset_metadata.browse = None dataset_metadata = create_dataset_browse_images(dataset_driver, dataset_metadata, dataset_directory) serialise.write_dataset_metadata(dataset_directory, dataset_metadata)
21,938
def cli_parser() -> argparse.Namespace: """ Parser for the command line interface. """ fw_parser = argparse.ArgumentParser( fromfile_prefix_chars="@", description="FileWriter Starter" ) fw_parser.add_argument( "-f", "--filename", metavar="filename", type=str, required=True, help="Name of the output file, e.g., `<filename>.nxs`.", ) fw_parser.add_argument( "-j", "--job-id", metavar="job_id", type=str, help="The job identifier of the currently running file-writer job. " "The job identifier should be a valid UUID.", ) fw_parser.add_argument( "-c", "--config", metavar="json_config", type=str, required=True, help="Path to JSON config file.", ) fw_parser.add_argument( "-b", "--broker", metavar="kafka_broker", type=str, default="localhost:9092", help="Kafka broker port.", ) fw_parser.add_argument( "-t", "--command-status-topic", metavar="consume_topic", type=str, required=True, help="Name of the Kafka topic to listen to" " commands and send status to.", ) fw_parser.add_argument( "-p", "--job-pool-topic", metavar="job_pool_topic", type=str, required=True, help="The Kafka topic that the available file-writers" " are listening to for write jobs.", ) fw_parser.add_argument( "--timeout", metavar="ack_timeout", type=float, default=5, help="How long to wait for timeout on acknowledgement.", ) fw_parser.add_argument( "--stop", metavar="stop_writing", type=float, help="How long the file will be written.", ) args = fw_parser.parse_args() return args
21,939
def pension_drawdown(months, rate, monthly_drawdown, pension_pot): """ Returns the balance left in the pension pot after drawing an income for the given nr of months """ return monthly_growth(months, rate, -monthly_drawdown, pension_pot)
21,940
def bytesToUInt(bytestring): """Unpack 4 byte string to unsigned integer, assuming big-endian byte order""" return _doConv(bytestring, ">", "I")
21,941
def safe_mkdirs(path): """ This makes new directories if needed. Args: path (str): The full path of the would be directory. """ if not os.path.exists(path): os.makedirs(path)
21,942
def fractal_se(p_x, p_y, img): """ if possible, place pixel's SE fractal on image """ if ( # S ((p_y + 1) < len(img) and img[p_y + 1][p_x] != "1") and # SW ((p_x - 1) >= 0 and img[p_y + 1][p_x - 1] != "1") and # E ((p_x + 1) < len(img) and img[p_y][p_x + 1] != "1") and # NE ((p_y - 1) >= 0 and img[p_y - 1][p_x + 1] != "1") and # SE img[p_y + 1][p_x + 1] != "1" ): img[p_y + 1][p_x + 1] = "SE"
21,943
def use(*authenticator_classes): """ A decorator to attach one or more :class:`Authenticator`'s to the decorated class. Usage: from thorium import auth @auth.use(BasicAuth, CustomAuth) class MyEngine(Endpoint): ... OR @auth.use(BasicAuth) @auth.use(CustomAuth) class MyEngine(Endpoint): ... :param authenticator_classes: One or more :class:`Authenticator` class definitions. """ def wrapped(cls): if not cls._authenticator_classes: cls._authenticator_classes = [] cls._authenticator_classes.extend(authenticator_classes) return cls return wrapped
21,944
def list_standard_models(): """Return a list of all the StandardCellType classes available for this simulator.""" standard_cell_types = [obj for obj in globals().values() if isinstance(obj, type) and issubclass(obj, standardmodels.StandardCellType)] for cell_class in standard_cell_types: try: create(cell_class) except Exception, e: print "Warning: %s is defined, but produces the following error: %s" % (cell_class.__name__, e) standard_cell_types.remove(cell_class) return [obj.__name__ for obj in standard_cell_types]
21,945
def test_distinct_multi_columns(schools): """Test getting distinct values for a multiple columns.""" boro_grade = distinct(schools, columns=['borough', 'grade']) assert len(boro_grade) == 37 assert boro_grade[('K', '09-12')] == 14 assert sum(v for v in boro_grade.values()) == 100
21,946
def AchievableTarget(segments,target,Speed): """ The function checks if the car can make the required curvature to reach the target, taking into account its speed Return [id, radius, direction} id = 1 -> achievable else id =0 direction = 1 -> right direction = -1 -> left """ Rminamaxlat=Speed**2/parameters.Max_accelerationlateral Rminepsilonmax=parameters.tsb*Speed**2/(parameters.epsilonmax*pi/180)+parameters.Car_length/(parameters.epsilonmax*pi/180) Rmin=max(Rminamaxlat,Rminepsilonmax) Rmax=abs(CurvatureRadius(target))/3 xp=target[0] yp=target[1] Ns=len(segments) #coeficient K=0 if xp!=0: K=yp/xp #Calculating which way the car will turn direction=1 #right if yp<0: direction=-1 #left #If the radius of curvature is greater than the minimum possible then the objective is not reachable if Rmin>Rmax: return(0,Rmax,direction) #Adding possible radius values between the minimum and the maximum in the list R [] R=[] Nr=100 i=0 while i<Nr: R.append(Rmax-i*(Rmax-Rmin)/(Nr-1)) i+=1 #Checking all posible radius i=0 while i<Nr: r=R[i] yc=direction*r #If the car and the segment are aligned then the arc is a straight line without problems if yp==0: return(1,Rmax,1) if xp!=0: xinter=(-2*K*yc)/(1+K**2) yinter=K*xinter j=0 while (j<Ns and IntersectionArc([xinter,yinter],segments[j])!=1): j+=1 if j==Ns: return(1,r,direction) return(0,r,direction) xinter=0 yinter=direction*2*r theta=180 j=0 while (j<Ns and IntersectionArc([xinter,yinter],segments[j])!=1): j+=1 if j==Ns: return(1,r,direction) return(0,r,direction) i+=1
21,947
def handle_generic_response(response): """ Handle generic response: output warning and suuggested next steps This is reserved for unhandled status codes. Output a snippet of the response text to aid the user with debugging. Parameters ---------- response: requests.Response The response from the HTTP request default_delay: int Default number of seconds to wait between requests """ assert isinstance(response, requests.Response) url = response.request.url print(f'Unhandled HTTP status code: {response.status_code} for request {url}') print(response.headers) print(response.text[:2000])
21,948
def test_equals(): """ Verify that a parameter set and a covering array that contains a "don't care" value is correctly converted to a data frame, where the "don't care" value becomes Pandas' pd.NA value. """ p1 = Parameter("Colour", [RED, GREEN]) p2 = Parameter("Pet", [BIRD, CAT, DOG, FISH]) p3 = Parameter("Speed", [FAST, SLOW]) p4 = Parameter("Music", [EIGHTIES, TWENTIES]) parameter_set = ParameterSet([p1, p2, p3, p4]) # Covering array from... # generator = RecursiveGenerator(parameter_set, 2) # covering_array = generator.generate_covering_array() covering_array = np.array( [ [1, 1, 1, 1], [1, 2, 2, 1], [2, 1, 2, 1], [2, 2, 1, 0], [2, 2, 2, 2], [1, 1, 1, 2], [1, 3, 1, 1], [2, 3, 2, 2], [1, 4, 1, 1], [2, 4, 2, 2], ] ) # configs1 = ConfigurationSet(parameter_set, covering_array) # configs2 = ConfigurationSet(parameter_set, covering_array) configs1 = ConfigurationSet( parameter_set=parameter_set, covering_array=covering_array ) configs2 = ConfigurationSet( parameter_set=parameter_set, covering_array=covering_array ) assert configs1 is not configs2 assert configs1 == configs2
21,949
def read_silixa_files_routine_v4( filepathlist, timezone_netcdf='UTC', silent=False, load_in_memory='auto'): """ Internal routine that reads Silixa files. Use dtscalibration.read_silixa_files function instead. The silixa files are already timezone aware Parameters ---------- load_in_memory filepathlist timezone_netcdf silent Returns ------- """ from xml.etree import ElementTree import dask # translate names tld = { 'ST': 'st', 'AST': 'ast', 'REV-ST': 'rst', 'REV-AST': 'rast', 'TMP': 'tmp'} # Open the first xml file using ET, get the name space and amount of data xml_tree = ElementTree.parse(filepathlist[0]) namespace = get_xml_namespace(xml_tree.getroot()) logtree = xml_tree.find('./{0}wellLog'.format(namespace)) logdata_tree = logtree.find('./{0}logData'.format(namespace)) # Amount of datapoints is the size of the logdata tree nx = len(logdata_tree) sep = ':' ns = {'s': namespace[1:-1]} # Obtain metadata from the first file attrs = read_silixa_attrs_singlefile(filepathlist[0], sep) # Add standardised required attributes attrs['isDoubleEnded'] = attrs['customData:isDoubleEnded'] double_ended_flag = bool(int(attrs['isDoubleEnded'])) attrs['forwardMeasurementChannel'] = attrs[ 'customData:forwardMeasurementChannel'] if double_ended_flag: attrs['backwardMeasurementChannel'] = attrs[ 'customData:reverseMeasurementChannel'] else: attrs['backwardMeasurementChannel'] = 'N/A' chFW = int(attrs['forwardMeasurementChannel']) - 1 # zero-based if double_ended_flag: chBW = int(attrs['backwardMeasurementChannel']) - 1 # zero-based else: # no backward channel is negative value. writes better to netcdf chBW = -1 # obtain basic data info if double_ended_flag: data_item_names = [ attrs['logCurveInfo_{0}:mnemonic'.format(x)] for x in range(0, 6)] else: data_item_names = [ attrs['logCurveInfo_{0}:mnemonic'.format(x)] for x in range(0, 4)] nitem = len(data_item_names) ntime = len(filepathlist) # print summary if not silent: print( '%s files were found, each representing a single timestep' % ntime) print( '%s recorded vars were found: ' % nitem + ', '.join(data_item_names)) print('Recorded at %s points along the cable' % nx) if double_ended_flag: print('The measurement is double ended') else: print('The measurement is single ended') # obtain timeseries from data timeseries_loc_in_hierarchy = [ ('wellLog', 'customData', 'acquisitionTime'), ('wellLog', 'customData', 'referenceTemperature'), ('wellLog', 'customData', 'probe1Temperature'), ('wellLog', 'customData', 'probe2Temperature'), ('wellLog', 'customData', 'referenceProbeVoltage'), ('wellLog', 'customData', 'probe1Voltage'), ('wellLog', 'customData', 'probe2Voltage'), ( 'wellLog', 'customData', 'UserConfiguration', 'ChannelConfiguration', 'AcquisitionConfiguration', 'AcquisitionTime', 'userAcquisitionTimeFW')] if double_ended_flag: timeseries_loc_in_hierarchy.append( ( 'wellLog', 'customData', 'UserConfiguration', 'ChannelConfiguration', 'AcquisitionConfiguration', 'AcquisitionTime', 'userAcquisitionTimeBW')) timeseries = { item[-1]: dict(loc=item, array=np.zeros(ntime, dtype=np.float32)) for item in timeseries_loc_in_hierarchy} # add units to timeseries (unit of measurement) for key, item in timeseries.items(): if f'customData:{key}:uom' in attrs: item['uom'] = attrs[f'customData:{key}:uom'] else: item['uom'] = '' # Gather data arr_path = 's:' + '/s:'.join(['wellLog', 'logData', 'data']) @dask.delayed def grab_data_per_file(file_handle): """ Parameters ---------- file_handle Returns ------- """ with open_file(file_handle, mode='r') as f_h: eltree = ElementTree.parse(f_h) arr_el = eltree.findall(arr_path, namespaces=ns) if not len(arr_el) == nx: raise ValueError( 'Inconsistent length of x-dimension' + '\nCheck if files are mixed up, or if the number of ' + 'data points vary per file.') # remove the breaks on both sides of the string # split the string on the comma arr_str = [arr_eli.text.split(',') for arr_eli in arr_el] return np.array(arr_str, dtype=float) data_lst_dly = [grab_data_per_file(fp) for fp in filepathlist] data_lst = [ da.from_delayed(x, shape=(nx, nitem), dtype=float) for x in data_lst_dly] data_arr = da.stack(data_lst).T # .compute() # Check whether to compute data_arr (if possible 25% faster) data_arr_cnk = data_arr.rechunk({0: -1, 1: -1, 2: 'auto'}) if load_in_memory == 'auto' and data_arr_cnk.npartitions <= 5: if not silent: print('Reading the data from disk') data_arr = data_arr_cnk.compute() elif load_in_memory: if not silent: print('Reading the data from disk') data_arr = data_arr_cnk.compute() else: if not silent: print('Not reading the data from disk') data_arr = data_arr_cnk data_vars = {} for name, data_arri in zip(data_item_names, data_arr): if name == 'LAF': continue if tld[name] in dim_attrs: data_vars[tld[name]] = ( ['x', 'time'], data_arri, dim_attrs[tld[name]]) else: raise ValueError( 'Dont know what to do with the' + ' {} data column'.format(name)) # Obtaining the timeseries data (reference temperature etc) _ts_dtype = [(k, np.float32) for k in timeseries] _time_dtype = [ ('filename_tstamp', np.int64), ('minDateTimeIndex', '<U29'), ('maxDateTimeIndex', '<U29')] ts_dtype = np.dtype(_ts_dtype + _time_dtype) @dask.delayed def grab_timeseries_per_file(file_handle): """ Parameters ---------- file_handle Returns ------- """ with open_file(file_handle, mode='r') as f_h: eltree = ElementTree.parse(f_h) out = [] for k, v in timeseries.items(): # Get all the timeseries data if 'userAcquisitionTimeFW' in v['loc']: # requires two namespace searches path1 = 's:' + '/s:'.join(v['loc'][:4]) val1 = eltree.findall(path1, namespaces=ns) path2 = 's:' + '/s:'.join(v['loc'][4:6]) val2 = val1[chFW].find(path2, namespaces=ns) out.append(val2.text) elif 'userAcquisitionTimeBW' in v['loc']: # requires two namespace searches path1 = 's:' + '/s:'.join(v['loc'][:4]) val1 = eltree.findall(path1, namespaces=ns) path2 = 's:' + '/s:'.join(v['loc'][4:6]) val2 = val1[chBW].find(path2, namespaces=ns) out.append(val2.text) else: path = 's:' + '/s:'.join(v['loc']) val = eltree.find(path, namespaces=ns) out.append(val.text) # get all the time related data startDateTimeIndex = eltree.find( 's:wellLog/s:minDateTimeIndex', namespaces=ns).text endDateTimeIndex = eltree.find( 's:wellLog/s:maxDateTimeIndex', namespaces=ns).text if isinstance(file_handle, tuple): file_name = os.path.split(file_handle[0])[-1] else: file_name = os.path.split(file_handle)[-1] tstamp = np.int64(file_name[10:-4]) out += [tstamp, startDateTimeIndex, endDateTimeIndex] return np.array(tuple(out), dtype=ts_dtype) ts_lst_dly = [grab_timeseries_per_file(fp) for fp in filepathlist] ts_lst = [ da.from_delayed(x, shape=tuple(), dtype=ts_dtype) for x in ts_lst_dly] ts_arr = da.stack(ts_lst).compute() for name in timeseries: if name in dim_attrs: data_vars[name] = (('time',), ts_arr[name], dim_attrs[name]) else: data_vars[name] = (('time',), ts_arr[name]) # construct the coordinate dictionary coords = { 'x': ('x', data_arr[0, :, 0], dim_attrs['x']), 'filename': ('time', [os.path.split(f)[1] for f in filepathlist]), 'filename_tstamp': ('time', ts_arr['filename_tstamp'])} maxTimeIndex = pd.DatetimeIndex(ts_arr['maxDateTimeIndex']) dtFW = ts_arr['userAcquisitionTimeFW'].astype('timedelta64[s]') if not double_ended_flag: tcoords = coords_time( maxTimeIndex, timezone_netcdf=timezone_netcdf, dtFW=dtFW, double_ended_flag=double_ended_flag) else: dtBW = ts_arr['userAcquisitionTimeBW'].astype('timedelta64[s]') tcoords = coords_time( maxTimeIndex, timezone_netcdf=timezone_netcdf, dtFW=dtFW, dtBW=dtBW, double_ended_flag=double_ended_flag) coords.update(tcoords) return data_vars, coords, attrs
21,950
def mutual_information(co_oc, oi, oj, n): """ :param co_oc: Number of co occurrences of the terms oi and oj in the corpus :param oi: Number of occurrences of the term oi in the corpus :param oj: Number of occurrences of the term oi in the corpus :param n: Total number of words in the corpus :return: """ e = (oi * oj)/n return math.log2(co_oc/e)
21,951
def random(population: pd.DataFrame, num_parents_per_nationality: Dict[str, int]) -> pd.DataFrame: """Selects parents of next generation randomly Args: population (pd.DataFrame): Current population dataframe. num_parents_per_nationality (Dict[str, int]): Dictionary indicating how many parents should come from each nation. Returns: df (pd.DataFrame): Parents of next generation. """ df = pd.DataFrame() national_origins = np.unique(population['birth_nation']) for nation in national_origins: tdf = population.loc[population['birth_nation'] == nation] # TODO see effect of setting random state tdf = tdf.sample(n=num_parents_per_nationality[nation], random_state=123) df = df.append(tdf) return df
21,952
def interpolate_GLMdenoise_to_fsaverage_prior(freesurfer_sub, prf_props, save_stem, GLMdenoise_path=None, plot_class=0, plot_bootstrap=0, target_varea=1, interp_method='linear'): """interpolate a scanning session's GLMdenoise models results to fsaverage space In order to combine data across subjects, we need them to have equivalent vertices (that is, vertices we can consider 'the same' and average together). We follow the method done by Benson et al, 2019's analysis of the retinotopic data in the Human Connectome Project: interpolate each subject's results to the locations in fsaverage, in the visual field (the Benson et al, 2014 retinotopic atlas defines the retinotopic coordinates for fsaverage). For the subject's retinotopic information, you should almost certainly pass the outputs of the Bayesian retinotopy, as a dictionary. For the paths used in this project, the following is how to create this dictionary (setting the BIDS_DIR and subject variables beforehand): ``` template = (f'{BIDS_dir}/derivatives/prf_solutions/{subject}/bayesian_posterior/' '{hemi}.inferred_{data}.mgz') prf_props = {} for h in ['lh', 'rh']: prf_props[h] = {} names = zip(['varea', 'eccen', 'angle'], ['visual_area', 'eccentricity', 'polar_angle']) for k, prop in names: prf_props[h][prop] = ny.load(template.format(hemi=h, data=k)) ``` The following steps are taken: - grab and shape the 'models' field from the GLMdenoise results.mat file, add to the prf_props dict - for each hemisphere: - add all properties from the prf_props dict to the neuropythy mesh - grab the fsaverage retinotopic prior (from the neuropythy package) - for each bootstrap: - interpolate the amplitude estimates for all models from the subject's retinotopic space to the fsaverage one - insert all these interpolated estimates into a properly-sized array - concatenate this array across hemispheres and save as an hdf5 file (the array is now the right size for the GLMdenoise results field, but doesn't look like quite right because the GLMdenoise results field also contains the fitted HRF) The main output is: - save_stem+"_models.hdf5": a HDF5 file containing the array (as field 'models') with shape (num_bootstraps, num_classes, 1, num_vertices, 1) containing the subject/session's amplitude estimates (for each bootstrap and class) interpolate to the fsaverage retinotopic prior space. It has this shape because that's the shape of the GLMdenoise output, and we'll want to mimic that. We use a HDF5 file because this will be very large, and a HDF5 file is more compact than a .npy file We also produce several outputs to help check what's going on. The first two are plots which show the same amplitude estimates, one in the subject's original retinotopic space, and one interpolated to the fsaverage retinotopic prior space. These two should look like they're conveying the same information, just sampling at different locations. - save_stem+"_models_b{plot_bootstrap}_c{plot_class}_space-subject.png": a plot showing the amplitude estimates for the stimulus class `plot_class` and the bootstrap `plot_bootstrap` as a scatter plot, with x, y locations coming from the subject's pRFs and the values from the output of GLMdenoise - save_stem+"_models_b{plot_bootstrap}_c{plot_class}_space-prior.png": a plot showing the amplitude estimates for the stimulus class `plot_class` and the bootstrap `plot_bootstrap` as a scatter plot, with x, y locations coming from the fsaverage pRF prior and the interpolated values. We then produce four outputs to examine any voxels that have zero amplitudes. GLMdenoise shouldn't produce voxels that have an amplitude estimate of exactly zero, so this is often a sign that something has gotten messed up. For each of the following, if there are no voxels with zero amplitude, we create a text file (replacing the .png extension with .txt) that contains the string "No voxels have amplitude zero" instead of the plot. - save_stem+"_zero_check_b{plot_bootstrap}_coords-polar_space-subject": a seaborn pairplot showing the polar angle and eccentricity locations of all voxels that have any zero amplitudes prior to interpolation. - save_stem+"_zero_check_b{plot_bootstrap}_coords-cartesian_space-subject": a seaborn pairplot showing the x and y locations of all voxels that have any zero amplitudes prior to interpolation. - save_stem+"_zero_check_b{plot_bootstrap}_coords-polar_space-prior": a seaborn pairplot showing the polar angle and eccentricity locations of all voxels that have any zero amplitudes after interpolation - save_stem+"_zero_check_b{plot_bootstrap}_coords-polar_space-prior": a seaborn pairplot showing the x and y locations of all voxels that have any zero amplitudes after interpolation. The expectation is: - There should never be any voxels with amplitude zero prior to interpolation (so none of the `space-subject` plots should be created) - if `interp_method='linear'`, the only voxels with amplitude zero after interpolation should be at the extremes of the visual field (so along the visual meridian and far periphery / with min and max possible eccentricity values) - if `interp_method='nearest'`, no voxels should have amplitude zero after interpolation Parameters ---------- freesurfer_sub : str The freesurfer subject to use. This can be either the name (e.g., wlsubj045; in which case the environmental variable SUBJECTS_DIR must be set) or a path to the freesurfer folder. It will be passed directly to neuropythy.freesurfer_subject, so see the docstring of that function for more details prf_props : dict dictionary containing the arrays with prf properties to add to the neuropythy freesurfer subject. This should contain two keys, 'lh' and 'rh', corresponding to the left and right hemispheres, respectively. Each of those should have a dictionary containing identical keys, which should be some subset of 'visual_area', 'eccentricity', and 'polar_angle'. If any of those are not included in prf_props, we will use the corresponding property from the freesurfer directory (and if they aren't present there, this function will fail). The intended use is that this will contain the results of the Bayesian retinotopy, which we'll use as the pRF parameters in subject-space. save_stem : str the stem of the path to save things at (i.e., should not end in the extension) GLMdenoise_path : str or None, optional path to the results.mat file created by GLMdenoise for this subject/session. If None, we assume prf_props already contains the 'models_bootstrap_{i:02d}' keys plot_class : int, optional we create a plot showing the amplitudes for one class, one bootstrap. this specifies which class to plot. plot_bootstrap : int, optional we create a plot showing the amplitudes for one class, one bootstrap. this specifies which bootstrap to plot. target_varea : int, optional The visual area we're interpolating. because we interpolate in the visual field, we can only do one visual area at a time (because otherwise they'll interfere with each other) interp_method : {'nearest', 'linear'}, optional whether to use linear or nearest-neighbor interpolation. See the docstring of `neuropythy.mesh.interpolate` for more details Returns ------- interp_all : np.array the numpy array containing the interpolated amplitude estimates, of shape (num_bootstraps, num_classes, 1, num_vertices, 1). note that num_vertices here is the number of vertices in the entire fsaverage brain, not just `target_varea` (but all vertices not in that visual area will be 0). """ sub = ny.freesurfer_subject(freesurfer_sub) if GLMdenoise_path is not None: prf_props = add_GLMdenoise_field_to_props(GLMdenoise_path, prf_props) num_bootstraps = len([b for b in prf_props['lh'].keys() if 'bootstrap' in b]) if num_bootstraps != 100: raise Exception(f"There should be 100 bootstraps, but there are {num_bootstraps}!") priors = {} idx = {} submesh = {} for hemi in ['lh', 'rh']: priors[hemi] = dict(zip(['x', 'y', 'varea', 'polar_angle', 'eccentricity'], get_fsaverage_coords(hemi, target_varea))) # we need to figure out which vertices correspond to our # targeted visual area for constructing the overall array (which # should mimic the results of GLMdenoise run on the full # brain). we grab the first element of np.where because this is # a 1d array idx[hemi] = np.where(priors[hemi]['varea'] == target_varea)[0] if hemi == 'lh': mesh = sub.lh.with_prop(**prf_props['lh']) else: mesh = sub.rh.with_prop(**prf_props['rh']) submesh[hemi] = mesh.white_surface.submesh(mesh.white_surface.mask(('visual_area', target_varea))) # grab the vmin and vmax, for the target varea, in the plotted # bootstrap, across both hemispheres and all classes. We use 1st and # 99th percnetile because the min/max are often much larger than the # rest of the distribution vmin = min(np.percentile(submesh['lh'].properties[f'models_bootstrap_{plot_bootstrap:02d}'][:, plot_class], 1), np.percentile(submesh['rh'].properties[f'models_bootstrap_{plot_bootstrap:02d}'][:, plot_class], 1)) vmax = max(np.percentile(submesh['lh'].properties[f'models_bootstrap_{plot_bootstrap:02d}'][:, plot_class], 99), np.percentile(submesh['rh'].properties[f'models_bootstrap_{plot_bootstrap:02d}'][:, plot_class], 99)) interpolated_all = [] zero_check_data = {'submesh': {}, 'interpolated': {}, 'original': {}} for hemi in ['lh', 'rh']: # this should be of shape (num_bootstraps, num_classes, 1, # num_vertices, 1), in order to mimic the output of # GLMdenoise. num_vertices will be different between the two # hemispheres, everything else will be the same. Note that we # use priors[hemi][varea] to get the number of vertices, NOT # prf_props[hemi]['models_bootstrap_00'], because we want the # number in fsaverage-space, not in subject-space _, num_classes = prf_props[hemi]['models_bootstrap_00'].shape interpolated_hemi = np.zeros((num_bootstraps, num_classes, 1, priors[hemi]['varea'].shape[0], 1)) x, y = ny.as_retinotopy(submesh[hemi], 'geographical') submesh_tmp = submesh[hemi].copy(coordinates=[x, y]) zero_check_data['submesh'][hemi] = submesh_tmp.with_prop(x=x, y=y).properties # neuropythy's interpolate can only work with 2d arrays, so we # need to do each bootstrap separate for i in range(num_bootstraps): interp_models = submesh_tmp.interpolate([priors[hemi]['x'], priors[hemi]['y']], f'models_bootstrap_{i:02d}', method=interp_method) # for now, there's a bug where neuropythy isn't putting # inserting NaNs in the extrapolated locations, so we do # that manually. they'll be exactly 0 interp_models[interp_models.sum(1)==0] = np.nan interpolated_hemi[i, :, 0, idx[hemi], 0] = interp_models if i == plot_bootstrap: fig = plot_amplitudes(x, y, submesh_tmp.properties[f'models_bootstrap_{i:02d}'], hemi, f'bootstrap {i}', 'subject', plot_class, vmin=vmin, vmax=vmax) fig.savefig(save_stem + f"_models_{hemi}_b{i:02d}_c{plot_class:02d}_space-subject.png") fig = plot_amplitudes(priors[hemi]['x'], priors[hemi]['y'], interp_models, hemi, f'bootstrap {i}', 'fsaverage', plot_class, vmin=vmin, vmax=vmax) fig.savefig(save_stem + f"_models_{hemi}_b{i:02d}_c{plot_class:02d}_space-prior.png") zero_check_data['interpolated'][hemi] = interp_models zero_check_data['original'][hemi] = submesh_tmp.properties[f'models_bootstrap_{i:02d}'] interpolated_all.append(interpolated_hemi) for a, p, s, n in zip([zero_check_data['original'], zero_check_data['interpolated']], [zero_check_data['submesh'], priors], ['subject', 'prior'], ['zero', 'nan']): for v, c in zip([['polar_angle', 'eccentricity'], ['x', 'y']], ['polar', 'cartesian']): fig = plot_zero_check(a, p, v, nan_check=(n == 'nan')) if not isinstance(fig, str): fig.savefig(save_stem + f"_{n}_check_b{i:02d}_coords-{c}_space-{s}.png") else: print(fig) print(fig, file=open(save_stem + f"_zero_check_b{i:02d}_coords-{c}_space-{s}.txt", 'w')) # concatenate into one array (vertices are on dimension 3) interpolated_all = np.concatenate(interpolated_all, 3) # and save with h5py.File(save_stem + '_models.hdf5', 'w') as f: f.create_dataset('results/models', data=interpolated_all, compression='gzip') return interpolated_all
21,953
def is_dark(color: str) -> bool: """ Whether the given color is dark of bright Taken from https://github.com/ozh/github-colors """ l = 0.2126 * int(color[0:2], 16) + 0.7152 * int(color[2:4], 16) + 0.0722 * int(color[4:6], 16) return False if l / 255 > 0.65 else True
21,954
def test_null_identifiers_go_to_the_right_case(multiple_identifier_target, stu, cases): """ If an identifying column can be null, then there is no way to associate it with a case unless there is another non-null identifying column. """ multiple_identifier_target.load_actual( [ { "id": stu["c1stu1"]["id"], "uuid": stu["c1stu1"]["uuid"], "first_name": "Buffy", }, {"id": stu["c2stu2"]["id"], "uuid": None, "first_name": "Willow"}, ] ) actual = multiple_identifier_target.case_data(cases[1]) expected = markdown_to_df( """ | id | uuid | first_name | | - | - | - | | stu2 | {NULL} | Willow | """ ) assert_frame_equal(actual, expected)
21,955
async def consume(queue: asyncio.Queue, es: AsyncElasticsearch) -> NoReturn: """Consume and run a job from the shared queue.""" while True: # Wait for a job from the producers job = await queue.get() logging.info(f"Starting the '{job.name}' job") # Execute the job function = getattr(jobs, job.name) await function(job.start_time_ms, es, **job.params) logging.info(f"Finished running the '{job.name}' job")
21,956
def get_date_input_examples(FieldClass) -> list: """ Generate examples for a valid input value. :param FieldClass: InputField :return: List of input examples. """ r = [] for f in FieldClass.input_formats: now = datetime.now() r.append(now.strftime(f)) return r
21,957
def fail(value, context_info=None, *, src_exception=None, err_condition=None): """Wrapper to raise (and log) DAVError.""" if isinstance(value, Exception): e = as_DAVError(value) else: e = DAVError( value, context_info, src_exception=src_exception, err_condition=err_condition, ) _logger.debug("Raising DAVError {}".format(e.get_user_info())) raise e
21,958
def sve_logistic(): """SVE of the logistic kernel for Lambda = 42""" print("Precomputing SVEs for logistic kernel ...") return { 10: sparse_ir.compute_sve(sparse_ir.LogisticKernel(10)), 42: sparse_ir.compute_sve(sparse_ir.LogisticKernel(42)), 10_000: sparse_ir.compute_sve(sparse_ir.LogisticKernel(10_000)) }
21,959
def post_team_iteration(id, team, organization=None, project=None, detect=None): # pylint: disable=redefined-builtin """Add iteration to a team. :param id: Identifier of the iteration. :type: str :param team: Name or ID of the team. :type: str """ organization, project = resolve_instance_and_project(detect=detect, organization=organization, project=project) client = get_work_client(organization) team_context = TeamContext(project=project, team=team) team_setting_iteration = TeamSettingsIteration(id=id) try: team_iteration = client.post_team_iteration(iteration=team_setting_iteration, team_context=team_context) return team_iteration except AzureDevOpsServiceError as ex: _handle_empty_backlog_iteration_id(ex=ex, client=client, team_context=team_context)
21,960
def JoinTypes(types): """Combine a list of types into a union type, if needed. Leaves singular return values alone, or wraps a UnionType around them if there are multiple ones, or if there are no elements in the list (or only NothingType) return NothingType. Arguments: types: A list of types. This list might contain other UnionTypes. If so, they are flattened. Returns: A type that represents the union of the types passed in. Order is preserved. """ queue = collections.deque(types) seen = set() new_types = [] while queue: t = queue.popleft() if isinstance(t, pytd.UnionType): queue.extendleft(reversed(t.type_list)) elif isinstance(t, pytd.NothingType): pass elif t not in seen: new_types.append(t) seen.add(t) if len(new_types) == 1: return new_types.pop() elif any(isinstance(t, pytd.AnythingType) for t in new_types): return pytd.AnythingType() elif new_types: return pytd.UnionType(tuple(new_types)) # tuple() to make unions hashable else: return pytd.NothingType()
21,961
def main(): """Populate MMap datasets."""
21,962
def scale_bar_and_direction(ax,arrow_location=(0.86,0.08),scalebar_location=(0.88,0.05),scalebar_distance=25,zorder=20): """Draw a scale bar and direction arrow Parameters ---------- ax : axes length : int length of the scalebar in km. ax_crs: projection system of the axis to be provided in Jamaica grid coordinates location: tuple center of the scalebar in axis coordinates (ie. 0.5 is the middle of the plot) linewidth: float thickness of the scalebar. """ # lat-lon limits scale_bar(ax, scalebar_location, scalebar_distance, color='k',zorder=zorder) ax.text(*arrow_location,transform=ax.transAxes, s='N', fontsize=14,zorder=zorder) arrow_location = numpy.asarray(arrow_location) + numpy.asarray((0.008,-0.03)) # arrow_location[1] = arrow_location[1] - 0.02 ax.arrow(*arrow_location, 0, 0.02, length_includes_head=True, head_width=0.01, head_length=0.04, overhang=0.2,transform=ax.transAxes, facecolor='k',zorder=zorder)
21,963
def calc_nominal_strike(traces: np.ndarray): """ Gets the start and ending trace of the fault and ensures order for largest lon value first Parameters ---------- traces: np.ndarray Array of traces of points across a fault with the format [[lon, lat, depth],...] """ # Extract just lat and lon for the start and end of the traces trace_start, trace_end = [traces[0][0], traces[0][1]], [ traces[-1][0], traces[-1][1], ] # Ensures correct order if trace_start[0] < trace_end[0]: return np.asarray([trace_end]), np.asarray([trace_start]) else: return np.asarray([trace_start]), np.asarray([trace_end])
21,964
def data_split(config_path: Text) -> None: """Split dataset into train/test. Args: config_path {Text}: path to config """ config = load_config(config_path) dataset = pd.read_csv(config.featurize.features_path) train_dataset, test_dataset = train_test_split( dataset, test_size=config.data_split.test_size, random_state=config.base.random_state ) train_csv_path = config.data_split.train_path test_csv_path = config.data_split.test_path train_dataset.to_csv(train_csv_path, index=False) test_dataset.to_csv(test_csv_path, index=False) print(f'Train data saved to: {train_csv_path}') print(f'Train data shape: {train_dataset.shape}') print(f'Test data saved to: {test_csv_path}') print(f'Test data shape: {test_dataset.shape}')
21,965
def make_tree_plot(df_summary, param_names=None, info_path=InfoPath(), tree_params: TreePlotParams = TreePlotParams(), summary_params=SummaryParams()): """ Make tree plot of parameters. """ info_path = InfoPath(**info_path.__dict__) tree_plot_data = extract_tree_plot_data( df_summary, param_names=param_names, summary_params=summary_params) fig, ax = tree_plot(tree_plot_data, params=tree_params) info_path.base_name = info_path.base_name or 'summary' info_path.extension = info_path.extension or 'pdf' the_path = get_info_path(info_path) fig.savefig(the_path, dpi=info_path.dpi) plt.close(fig)
21,966
def create_category_freq_plots(freq_dict, iri2label_dict, args={'subonto':'all'}, categories=['gender', 'sexual orientation', 'race', 'disability', 'religion']): """ Create heatmap of annotation categories frequencies of (sub)ontology: subannot - all_ent For each category class, create heatmap to compare their freq in all other S""" print('Creating plots of category frequency terms in all S') t0 = time.time() o_path = os.path.join(RES_DIR, 'freq_plots_category') n_tag = get_tag_n_prot_attr() if not os.path.isdir(o_path): os.mkdir(o_path) IRI_categories = get_iri_from_label_list(categories, iri2label_dict) index_dict = {'categories':IRI_categories} subonto = args['subonto'] for subannot in freq_dict.keys(): title = '{}_{}'.format(subannot, subonto) export_freq_plot(index_dict, freq_dict[subannot][subonto], iri2label_dict, o_path,title, n_tag) print("Executed in %s seconds." % str(time.time()-t0)) return
21,967
def merge_options(custom_options, **default_options): """ Utility function to merge some default options with a dictionary of custom_options. Example: custom_options = dict(a=5, b=3) merge_options(custom_options, a=1, c=4) --> results in {a: 5, b: 3, c: 4} """ merged_options = default_options merged_options.update(custom_options) return merged_options
21,968
def test_exception_negative_count(marker_trackerstore: TrackerStore): """Tests an exception is thrown when an invalid count is given.""" with pytest.raises(RasaException): MarkerTrackerLoader(marker_trackerstore, STRATEGY_SAMPLE_N, -1)
21,969
def build_wall(game: Board, player: Player) -> float: """ Encourage the player to go the middle row and column of the board to increase the chances of a partition in the later game """ position = game.get_player_location(player) blanks = game.get_blank_spaces() blank_vertical = [loc for loc in blanks if position[1] == 3] blank_horizontal = [loc for loc in blanks if position[0] == 3] vertical = len(blank_vertical) horizontal = len(blank_horizontal) if position == (3, 3): return max(vertical, horizontal) elif position[0] == 3: return horizontal elif position[1] == 3: return vertical else: return 0
21,970
def get_menu_from_hzu_navigation(): """ 获取惠州学院官网的导航栏的 HTML 文本。 :return: 一个 ul 标签文本 """ try: html = urlopen("https://www.hzu.edu.cn/") except HTTPError as e: print(e) print('The page is not exist or have a error in getting page.') return None except URLError as e: print(e) print("url is wrong or the url couldn't open.") return None try: bs = BeautifulSoup(html.read(), 'html.parser') return bs.find(id='naver').find('ul', {'class': {'wp-menu'}}) except AttributeError as e: print(e) print('某个标签元素不存在 或者url错误(服务器不存在)导致html.read()出错') return None
21,971
def test_find_by_id(session, client, jwt): """Assert that user find by id is working as expected.""" user = User.find_by_id(1) if not user: user2 = User.create_from_jwt_token(TEST_TOKEN, 'PS12345') user = User.find_by_id(user2.id) assert user assert user.id assert user.username == 'username_TEST1' assert user.iss == 'issuer_TEST1' assert user.sub == 'subject_TEST1' assert user.firstname == 'given_name_TEST1' assert user.lastname == 'family_name_TEST1'
21,972
def calc_user_withdraw_fee(user_id, amount): """手续费策略""" withdraw_logs = dba.query_user_withdraw_logs(user_id, api_x.utils.times.utctoday()) if len(withdraw_logs) > 0: return Decimal('2.00') return Decimal('0.00')
21,973
def test_exception_handling_captures_calctypeerror(err_class, capfd): """Test exception handling captures input. Args: capfd: pytest stdout/stderr capture data """ from app.cli import _exception_handler import app.calculator as calc Error = getattr(calc, err_class) def f(n1, n2): raise Error("Test") _exception_handler(f, None, None) out, err = capfd.readouterr() assert "Test" in out
21,974
def get_last_row(dbconn, tablename, n=1, uuid=None): """ Returns the last `n` rows in the table """ return fetch(dbconn, tablename, n, uuid, end=True)
21,975
def checkAtomicElementAxes(project): """Check Atomic Element axes: - Are all defined axes used? - Are all used axes defined? - Are all axis values within the defined range? """ glyphSet = project.deepComponentGlyphSet compoGlyphSet = project.atomicElementGlyphSet yield from _checkComponentAxes(glyphSet, compoGlyphSet)
21,976
def get_start(period, reference_date: Optional[FlexDate] = None, strfdate="%Y-%m-%d") -> FlexDate: """ Returns the first day of the given period for the reference_date. Period can be one of the following: {'year', 'quarter', 'month', 'week'} If reference_date is instance of str, returns a string. If reference_date is instance of datetime.date, returns a datetime.date instance. If reference_date is instance of SmartDate, returns a SmartDate instance. If no reference_date given, returns a SmartDate instance. Examples -------- >>> # when no reference is given assume that it is datetime.date(2018, 5, 8) >>> get_start('month') SmartDate(2018, 5, 1) >>> get_start('quarter', '2017-05-15') '2017-04-01' >>> get_start('year', datetime.date(2017, 12, 12)) datetime.date(2017, 01, 01) """ start_functions = { "decade": _get_decade_start, "year": _get_year_start, "quarter": _get_quarter_start, "month": _get_month_start, "fortnight": _get_fortnight_start, "week": _get_week_start, "day": _get_day_start, "decades": _get_decade_start, "years": _get_year_start, "quarters": _get_quarter_start, "months": _get_month_start, "fortnights": _get_fortnight_start, "weeks": _get_week_start, "days": _get_day_start, } return start_functions[period](reference_date or SmartDate.today(), strfdate)
21,977
def prepare_lc_df(star_index, frame_info, magmatch, magx): """Prepare cleaned light curve data Add mag, mag_err, magx, and magx_err to info Remove nan values or too bright values in magx Args: star_index (int): index of the star frame_info (DataFrame): info data magmatch (array): raw photometry array magx (array): corrected photometry array Returns: lc (array): light curve data """ lc = frame_info.copy() lc = lc.assign(mag=magmatch[star_index, :, 0]) lc = lc.assign(mag_err=magmatch[star_index, :, 1]) lc = lc.assign(magx=magx[star_index, :, 0]) lc = lc.assign(magx_err=magx[star_index, :, 1]) lc = lc[~np.isnan(lc.magx) & (lc.magx > 1)] return lc
21,978
def _filter_nones(centers_list): """ Filters out `None` from input list Parameters ---------- centers_list : list List potentially containing `None` elements Returns ------- new_list : list List without any `None` elements """ return [c for c in centers_list if c is not None]
21,979
def test_bbox_deltas_2d(): """ Test that `deltas` property returns the correct value for an example 1D region. """ minp = [5, 2] maxp = [7, 83] expected = [2, 81] numpy.testing.assert_allclose(AxisAlignedBoundingBox(minp, maxp).deltas, expected)
21,980
def pseudo_shuffle_mat(ref_var, mat, replace=False, debug=False): """ Shuffles the data but keeps the time information (i.e. shuffles the velocity while keeping the time information intact) :param np.array ref_var: shape: n_accelerations :param np.array mat: shape: (n_trials, n_accelerations) :return: shuffled_mat :rtype: np.array """ shuffled_mat = np.zeros(mat.shape) n_accs, n_samples = mat.shape # TODO: use replace=True for velocity as well seeds = np.random.choice(np.arange(len(ref_var)), n_samples, replace=replace) for i in range(n_samples): seed = seeds[i] trial_vms = mat[:, i] shuffled_vms = np.hstack((trial_vms[seed:], trial_vms[:seed])) # FIXME: could be done with np.roll (TEST:) if debug: plt.plot(shuffled_vms) # (with fake traces) shuffled_mat[:, i] = shuffled_vms.copy() if debug: plt.show() return shuffled_mat
21,981
def CleanupDjangoSettings(): """Removes incompatible entries from the django settings module.""" # Ensure this module is installed as an application. apps = getattr(settings, "INSTALLED_APPS", ()) found = False for app in apps: if app.endswith("appengine_django"): found = True break if not found: logging.warn("appengine_django module is not listed as an application!") apps += ("appengine_django",) setattr(settings, "INSTALLED_APPS", apps) logging.info("Added 'appengine_django' as an application") # Ensure the database backend is appropriately configured. dbe = getattr(settings, "DATABASE_ENGINE", "") if dbe != "appengine": settings.DATABASE_ENGINE = "appengine" logging.warn("DATABASE_ENGINE is not configured as 'appengine'. " "Value overriden!") for var in ["NAME", "USER", "PASSWORD", "HOST", "PORT"]: val = getattr(settings, "DATABASE_%s" % var, "") if val: setattr(settings, "DATABASE_%s" % var, "") logging.warn("DATABASE_%s should be blank. Value overriden!") # Remove incompatible middleware modules. mw_mods = list(getattr(settings, "MIDDLEWARE_CLASSES", ())) disallowed_middleware_mods = ( 'django.middleware.doc.XViewMiddleware',) if VERSION < (0, 97, None): # Sessions are only supported with Django 0.97. disallowed_middleware_mods += ( 'django.contrib.sessions.middleware.SessionMiddleware',) for modname in mw_mods[:]: if modname in disallowed_middleware_mods: # Currently only the CommonMiddleware has been ported. As other base # modules are converted, remove from the disallowed_middleware_mods # tuple. mw_mods.remove(modname) logging.warn("Middleware module '%s' is not compatible. Removed!" % modname) setattr(settings, "MIDDLEWARE_CLASSES", tuple(mw_mods)) # Remove incompatible application modules app_mods = list(getattr(settings, "INSTALLED_APPS", ())) disallowed_apps = ( 'django.contrib.contenttypes', 'django.contrib.sites',) if VERSION < (0, 97, None): # Sessions are only supported with Django 0.97. disallowed_apps += ('django.contrib.sessions',) for app in app_mods[:]: if app in disallowed_apps: app_mods.remove(app) logging.warn("Application module '%s' is not compatible. Removed!" % app) setattr(settings, "INSTALLED_APPS", tuple(app_mods)) # Remove incompatible session backends. session_backend = getattr(settings, "SESSION_ENGINE", "") if session_backend.endswith("file"): logging.warn("File session backend is not compatible. Overriden " "to use db backend!") setattr(settings, "SESSION_ENGINE", "django.contrib.sessions.backends.db")
21,982
def exec_in_subprocess(func, *args, poll_interval=0.01, timeout=None, **kwargs): """ Execute a function in a fork Args: func (:obj:`types.FunctionType`): function * args (:obj:`list`): list of positional arguments for the function poll_interval (:obj:`float`, optional): interval to poll the status of the subprocess timeout (:obj:`float`, optional): maximum execution time in seconds **kwargs (:obj:`dict`, optional): dictionary of keyword arguments for the function Returns: :obj:`object`: result of the function """ context_instance = multiprocessing.get_context('fork') queue = context_instance.Queue() process = Process(target=subprocess_target, args=[queue, func] + list(args), kwargs=kwargs) process.start() start_time = time.time() while process.exception is None: time.sleep(poll_interval) if timeout is not None and (time.time() - start_time) > timeout: raise TimeoutError('Execution did not complete in {} s.'.format(timeout)) if process.exception: raise process.exception results = queue.get() return results
21,983
def nroot_real_matplotlib(n, res=101): """ Plot the Riemann surface for the real part of the n'th root function. """ x = np.linspace(-1, 1, res) X, Y = np.meshgrid(x, x, copy=False) Z = X + 1.0j * Y r = np.absolute(Z) theta = np.angle(Z) rroot = r**(1./n) theta /= n real = rroot * np.cos(theta) fig = plt.figure() ax = fig.gca(projection='3d') ax.plot_surface(X[x>=0], Y[x>=0], real[x>=0]) ax.plot_surface(X[x<0], Y[x<0], real[x<0]) for i in xrange(1, n): theta += 2. * np.pi / n real = rroot * np.cos(theta) ax.plot_surface(X[x>=0], Y[x>=0], real[x>=0]) ax.plot_surface(X[x<0], Y[x<0], real[x<0]) plt.show()
21,984
def tour_de_jeu(minesweeper): """ Function called at each turn of play. Asks the player the coordinates of a cell and the type of movement that he wants to do. :param minesweeper: minesweeper game :minesweeper type: Minesweeper :return: None :CU: None """ r=input('Your play x,y,C (C=(R)eveal,(S)et,(U)nset):').split(',') C=r[2] x=int(r[0]) y=int(r[1]) if C=='R' or C=='r': try: if not minesweeper.grid[y][x].is_revealed(): minesweeper.reveal_all_cells_from(x,y) else: print('This cell has already been revealed') except IndexError: print("This cell doesn't exist") elif C=='S' or C=='s': try: minesweeper.grid[y][x].set_hypothetic() except IndexError: print("This cell doesn't exist") elif C=='U' or C=='u': try: minesweeper.grid[y][x].unset_hypothetic() except IndexError: print("This cell doesn't exist") else: print("This choice doesn't exist") tour_de_jeu(minesweeper)
21,985
def hold_numpy_printoptions(**kwargs): """ Temporarily set the numpy print options. See https://docs.scipy.org/doc/numpy/reference/generated/numpy.set_printoptions.html :param kwargs: Print options (see link) """ opts = np.get_printoptions() np.set_printoptions(**kwargs) yield np.set_printoptions(**opts)
21,986
def format_code(c, check=False): """Format code""" BLACK_CMD = ( f"black {apply_paths(CODE, TESTS, ADDITIONAL)} " f"--line-length {LINE_LENGTH}" ) if check: result = c.run(BLACK_CMD + " --check") if result.return_code != 0: exit(1) else: c.run(BLACK_CMD)
21,987
def get_cv_score_table(clf): """ Get a table (DataFrame) of CV parameters and scores for each combination. :param clf: Cross-validation object (GridSearchCV) :return: """ # Create data frame df = pd.DataFrame(list(clf.cv_results_['params'])) # Add test scores df['rank'] = clf.cv_results_['rank_test_score'] df['test_mean'] = clf.cv_results_['mean_test_score'] df['test_sd'] = clf.cv_results_['std_test_score'] # Add scores over training data df['train_mean'] = clf.cv_results_['mean_train_score'] df['train_sd'] = clf.cv_results_['std_train_score'] # Add time metrics (s) df['fit_time_mean'] = clf.cv_results_['mean_fit_time'] df['fit_time_sd'] = clf.cv_results_['std_fit_time'] df['score_time_mean'] = clf.cv_results_['mean_score_time'] df['score_time_sd'] = clf.cv_results_['std_score_time'] return df
21,988
def test_environment_from_manifest(): """Test scenarios for loading manifest files.""" # load an invalid manifest (bad schema) manifest_path = get_resource(os.path.join("manifests", "test.yaml"), False) env = from_manifest(manifest_path) assert not env.get_valid() # make sure we can't double-load the manifest assert not env.load_manifest_with_cache(manifest_path) # load an invalid manifest (bad content) manifest_path = get_resource( os.path.join("manifests", "invalid.yaml"), False ) env = from_manifest(manifest_path) assert not env.get_valid() assert env.compile("a") == (False, False) env.clean_cache()
21,989
def message(self, update, context): """Receive message, generate response, and send it back to the user.""" max_turns_history = self.chatbot_params.get('max_turns_history', 2) giphy_prob = self.chatbot_params.get('giphy_prob', 0.1) giphy_max_words = self.chatbot_params.get('giphy_max_words', 10) if 'turns' not in context.chat_data: context.chat_data['turns'] = [] turns = context.chat_data['turns'] user_message = update.message.text return_gif = False if '@gif' in user_message: # Return gif return_gif = True user_message = user_message.replace('@gif', '').strip() if max_turns_history == 0: context.chat_data['turns'] = [] # A single turn is a group of user messages and bot responses right after turn = { 'user_messages': [], 'bot_messages': [] } turns.append(turn) turn['user_messages'].append(user_message) logger.debug(f"{update.effective_message.chat_id} - User: {user_message}") # Merge turns into a single prompt (don't forget EOS token) prompt = "" from_index = max(len(turns) - max_turns_history - 1, 0) if max_turns_history >= 0 else 0 for turn in turns[from_index:]: # Each turn begins with user messages for user_message in turn['user_messages']: prompt += clean_text(user_message) + self.generation_pipeline.tokenizer.eos_token for bot_message in turn['bot_messages']: prompt += clean_text(bot_message) + self.generation_pipeline.tokenizer.eos_token # Generate bot messages bot_messages = generate_responses( prompt, self.generation_pipeline, seed=self.seed, debug=self.debug, **self.generator_kwargs ) if len(bot_messages) == 1: bot_message = bot_messages[0] else: bot_message = pick_best_response( prompt, bot_messages, self.ranker_dict, debug=self.debug ) turn['bot_messages'].append(bot_message) logger.debug(f"{update.effective_message.chat_id} - Bot: {bot_message}") # Return response as text update.message.reply_text(bot_message) if len(bot_message.split()) <= giphy_max_words and random.random() < giphy_prob: return_gif = True if return_gif: # Also return the response as a GIF gif_url = translate_message_to_gif(bot_message, **self.chatbot_params) context.bot.send_animation(update.effective_message.chat_id, gif_url)
21,990
async def test_setup_depose_user(hass): """Test set up and despose user.""" notify_auth_module = await auth_mfa_module_from_config(hass, {"type": "notify"}) await notify_auth_module.async_setup_user("test-user", {}) assert len(notify_auth_module._user_settings) == 1 await notify_auth_module.async_setup_user("test-user", {}) assert len(notify_auth_module._user_settings) == 1 await notify_auth_module.async_depose_user("test-user") assert len(notify_auth_module._user_settings) == 0 await notify_auth_module.async_setup_user("test-user2", {"secret": "secret-code"}) assert len(notify_auth_module._user_settings) == 1
21,991
def test_extra_pol_setup(): """Test reading in an ms file with extra polarization setups (not used in data).""" uvobj = UVData() testfile = os.path.join( DATA_PATH, "X5707_1spw_1scan_10chan_1time_1bl_noatm.ms.tar.gz" ) import tarfile with tarfile.open(testfile) as tf: new_filename = os.path.join(DATA_PATH, tf.getnames()[0]) tf.extractall(path=DATA_PATH) uvobj.read(new_filename, file_type="ms") # delete the untarred folder shutil.rmtree(new_filename)
21,992
def show_image_matrix(images, titles=None, suptitle=None): """Displays a matrix of images in matplotlib.""" rows = len(images) columns = len(images[0]) fig = plt.figure(figsize=(columns + 1, rows + 1)) # Avoid large blank margins fig.set_dpi(images[0][0].shape[0]) # Preserve original image size if suptitle: fig.suptitle(suptitle) for i, row in enumerate(images): for j, image in enumerate(row): ax = fig.add_subplot(rows, columns, i * columns + j + 1) ax.axis('off') if image is None: image = np.ones(images[0][0].shape) # Placeholder ax.imshow(image, cmap='gray', interpolation='none', vmin=0, vmax=1) if titles: ax.set_title(titles[i][j], fontsize=6) plt.margins(0, 0) plt.show()
21,993
def model_handle_check(model_type): """ Checks for the model_type and model_handle on the api function, model_type is a argument to this decorator, it steals model_handle and checks if it is present in the MODEL_REGISTER the api must have model_handle in it Args: model_type: the "type" of the model, as specified in the MODEL_REGISTER Returns: wrapped api function """ def decorator(api_func): @wraps(api_func) def wrapper(*args, model_handle, **kwargs): if model_handle not in MODEL_REGISTER: return make_response( jsonify( {"error": f"{model_handle} not found in registered models"} ), 404, ) if ( model_handle in MODEL_REGISTER and MODEL_REGISTER[model_handle]["type"] != model_type ): return make_response( jsonify({"error": f"{model_handle} model is not an {model_type}"}), 412, ) return api_func(*args, model_handle=model_handle, **kwargs) return wrapper return decorator
21,994
def train_model_mixed_data(type_tweet, split_index, custom_tweet_data = pd.Series([]), stop_words = "english"): """ Fits the data on a Bayes model. Modified train_model() with custom splitting of data. :param type_tweet: :param split_index: :param custom_tweet_data: if provided, this is used instead of test data for prediction :param stop_words: :return: training_data, testing_data , label_train, label_test """ data_train = type_tweet['tweet'][:split_index] label_train = type_tweet['class'][:split_index] data_test = type_tweet['tweet'][split_index:] label_test = type_tweet['class'][split_index:] #probably better to not remove any stopwords count_vector = CountVectorizer(stop_words=[]) # Fit training data and return a matrix training_data = count_vector.fit_transform(data_train) # Transform testing data and return a matrix. if not custom_tweet_data.empty: testing_data = count_vector.transform(custom_tweet_data) else: testing_data = count_vector.transform(data_test) return training_data, testing_data , label_train, label_test
21,995
def _fit_curves(ns, ts): """Fit different functional forms of curves to the times. Parameters: ns: the value of n for each invocation ts: the measured run time, as a (len(ns), reps) shape array Returns: scores: normalised scores for each function coeffs: coefficients for each function names: names of each function fns: the callable for each function in turn. """ # compute stats med_times = np.median(ts, axis=1) # fit and score complexities scores = [] coeffs = [] names = [] fns = [] ns = np.array(ns) ts = np.array(med_times) for c_name, c_fn in complexities.items(): res = scipy.optimize.minimize_scalar( complexity_fit, bracket=[1e-5, 1e5], args=(c_fn, ns, ts) ) scores.append(res.fun) coeffs.append(res.x) names.append(c_name) fns.append(c_fn) scores = 1.0 / np.sqrt(np.array(scores)) tot_score = np.sum(scores) scores = scores / tot_score return scores, coeffs, names, fns
21,996
def isolate_integers(string): """Isolate positive integers from a string, returns as a list of integers.""" return [int(s) for s in string.split() if s.isdigit()]
21,997
def start_thread(func): """Start a thread.""" thread = threading.Thread(target=func) thread.start()
21,998
def extractAFlappyTeddyBird(item): """ # A Flappy Teddy Bird """ vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title']) if not (chp or vol) or 'preview' in item['title'].lower(): return None if 'The Black Knight who was stronger than even the Hero' in item['title']: return buildReleaseMessageWithType(item, 'The Black Knight Who Was Stronger than Even the Hero', vol, chp, frag=frag, postfix=postfix) return False
21,999