content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def archived_minute(dataSet, year, month, day, hour, minute): """ Input: a dataset and specific minute Output: a list of ride details at that minute or -1 if no ride during that minute """ year = str(year) month = str(month) day = str(day) #Converts hour and minute into 2 digit integers (that are strings) hour = "%02d" % hour minute = "%02d" % minute timeStamp = month+'/'+day+'/'+year+' '+hour+':'+minute+':'+'00' if timeStamp in dataSet: return dataSet[timeStamp] else: return -1
e550cb8ae5fbcfcc2a0b718dc2e4f3372f100015
3,637,300
def Rq(theta, vect): """Returns a 3x3 matrix representing a rotation of angle theta about vect axis. Parameters ---------- theta: float, rotation angle in radian vect: list of float or array, vector about which the rotation happens """ I = np.matrix(np.identity(3)) Q = np.matrix(np.zeros((3,3))) Q[0,1] = -vect[2] Q[0,2] = vect[1] Q[1,2] = -vect[0] Q[1,0] = -Q[0,1] Q[2,0] = -Q[0,2] Q[2,1] = -Q[1,2] res = I + np.sin(theta)*Q + (1-np.cos(theta))*Q**2 return res
069ef6568df170179a728b59dfb6a03dca0fbb75
3,637,301
def ExpsMaintPol(): """Maintenance expense per policy""" return asmp.ExpsMaintPol.match(prod, polt, gen).value
778d77d860378deeceee33e20a996e667430f851
3,637,302
def inputRead(c, inps): """ Reads the tokens in the input channels (Queues) given by the list inps using the token rates defined by the list c. It outputs a list where each element is a list of the read tokens. Parameters ---------- c : [int] List of token consumption rates. inps : [Queue] List of channels. Returns ---------- inputs: [List] List of token lists. """ if len(c) != len(inps): raise Exception("Token consumption list and Queue list have different sizes") inputs = [] for i in range(len(c)): aux = [] for j in range(c[i]): aux.append(inps[i].get()) inputs.append(aux) return inputs
ea70548f7da4fae66fe5196734bbf39deb255537
3,637,303
def _split_schema_abstract(s): """ split the schema abstract into fields >>> _split_schema_abstract("a b c") ['a', 'b', 'c'] >>> _split_schema_abstract("a(a b)") ['a(a b)'] >>> _split_schema_abstract("a b[] c{a b}") ['a', 'b[]', 'c{a b}'] >>> _split_schema_abstract(" ") [] """ r = [] w = '' brackets = [] for c in s: if c == ' ' and not brackets: if w: r.append(w) w = '' else: w += c if c in _BRACKETS: brackets.append(c) elif c in _BRACKETS.values(): if not brackets or c != _BRACKETS[brackets.pop()]: raise ValueError("unexpected " + c) if brackets: raise ValueError("brackets not closed: %s" % brackets) if w: r.append(w) return r
ba1fba44979074b34adf87173a1277c212bd93e8
3,637,304
def myfn(n): """打印hello world 每隔一秒打印一个hello world,共n次 """ if n == 1: print("hello world!") return else: print("hello world!") return myfn(n - 1)
4405e8b4c591c435d43156283c0d8e2aa9860055
3,637,305
def name(ea, **flags): """Return the name defined at the address specified by `ea`. If `flags` is specified, then use the specified value as the flags. """ ea = interface.address.inside(ea) # figure out what default flags to use fn = idaapi.get_func(ea) # figure out which name function to call if idaapi.__version__ < 6.8: # if get_true_name is going to return the function's name instead of a real one, then leave it as unnamed. if fn and interface.range.start(fn) == ea and not flags: return None aname = idaapi.get_true_name(ea) or idaapi.get_true_name(ea, ea) else: aname = idaapi.get_ea_name(ea, flags.get('flags', idaapi.GN_LOCAL)) # return the name at the specified address or not return utils.string.of(aname) or None
7c0d938f5f4112f08749e1f412403d0da7ebf4d1
3,637,306
def estimate_distance( row: pd.DataFrame, agent_x: float, agent_y: float ): """ Side function to estimate distance from AGENT to the other vehicles This function should be applied by row Args: row: (pd.DataFrame) agent_x: (float) x coordinate of agent agent_y: (float) y coordinate of agent Returns: (pd.DataFrame) """ row["distance"] = np.sqrt( (row["center_x"] - agent_x) ** 2 + (row["center_y"] - agent_y) ** 2 ) return row
c6d3dd9dbdcdde06baea95c8e0e56794d80aa0de
3,637,307
from .plot_methods import plot_spinpol_bands from .bokeh_plots import bokeh_spinpol_bands def spinpol_bands(kpath, eigenvalues_up, eigenvalues_dn, backend=None, data=None, **kwargs): """ Plot the provided data for a bandstructure (spin-polarized) Non-weighted, weighted, as a line plot or scatter plot, color-mapped or fixed colors are all possible options :param kpath: data for the kpoints path (flattened to 1D) :param eigenvalues_up: data for the eigenvalues for spin-up :param eigenvalues_dn: data for the eigenvalues for spin-down :param data: source for the data of the plot (optional) (pandas Dataframe for example) :param backend: name of the backend to use (uses a default if None is given) Kwargs are passed on to the backend plotting functions: - ``matplotlib``: :py:func:`~masci_tools.vis.plot_methods.plot_spinpol_bands()` - ``bokeh``: :py:func:`~masci_tools.vis.bokeh_plots.bokeh_spinpol_bands()` :returns: Figure object for the used plotting backend """ plot_funcs = {PlotBackend.mpl: plot_spinpol_bands, PlotBackend.bokeh: bokeh_spinpol_bands} backend = PlotBackend.from_str(backend) return plot_funcs[backend](kpath, eigenvalues_up, eigenvalues_dn, data=data, **kwargs)
60df91e2a06b4ff2e943efebc4ff936fb55164dd
3,637,308
def valid_config_and_get_dates(): """ 校验配置文件的参数,并返回配置文件的预约疫苗日期 :return: """ if config.global_config.getConfigSection("cookie") == "": raise Exception("请先配置登陆后的 cookie,查看方式请查看 README.MD") if config.global_config.getConfigSection("date") == "": raise Exception("请先配置登陆后的 预约日期") valid_dates = get_dates() if len(valid_dates) == 0: raise Exception("预约日期未配置或配置不正确(预约日期需要大于等于今天),请重新配置预约日期") return valid_dates
42310c1fa60331e2ae238f4c6ab5ab23940536b3
3,637,309
import requests def check_internet_connection(): """Checks if there is a working internet connection.""" url = 'http://www.google.com/' timeout = 5 try: _ = requests.get(url, timeout=timeout) return True except requests.ConnectionError as e: return False return False
5f587e6077377196d2c89b39f5be5d6a2747e093
3,637,310
def wall_filter(points, img): """ Filters away points that are inside walls. Works by checking where the refractive index is not 1. """ deletion_mask = img[points[:, 0], points[:, 1]] != 1 filtered_points = points[~deletion_mask] return filtered_points
05a34602e8a555eb1f1739f5de910a71514a92ae
3,637,311
import requests def navigateResults(results): """Navigate all links, returning a list contaning the ulrs and corresponding pages. results:[String] - List with links to be visited Return: {list}[{tuple}({String}url, {String}content)]""" global BASE_ADDR ret = [] for i in results: page = requests.get("%s%s" % (BASE_ADDR, i), verify=False) ret.append(["%s%s" % (BASE_ADDR, i), page.text]) return ret
566da85528c7af46b29076c2b96eaf90f083becc
3,637,312
def _get_matching_signature(oper, args): """ Search the first operation signature matched by a list of arguments Args: oper: Operation where searching signature args: Candidate list of argument expressions Returns: Matching signature, None if not found """ # Search corresponding signature return next((s for s in oper.signatures if _is_matching_arguments(s, args)), None)
21efb39c19f664ba0d79bcdbd1ab042bcaafffce
3,637,313
def format_size(num: int) -> str: """Format byte-sizes. :param num: Size given as number of bytes. .. seealso:: http://stackoverflow.com/a/1094933 """ for x in ['bytes', 'KB', 'MB', 'GB']: if num < 1024.0 and num > -1024.0: return "%3.1f%s" % (num, x) num /= 1024.0 return "%3.1f%s" % (num, 'TB')
349649fc1ee6069b2cfba1ac8aa3745aafc9c7fc
3,637,314
def hub_payload(hub): """Create response payload for a hub.""" if hasattr(hub, "librarySectionID"): media_content_id = f"{HUB_PREFIX}{hub.librarySectionID}:{hub.hubIdentifier}" else: media_content_id = f"{HUB_PREFIX}server:{hub.hubIdentifier}" payload = { "title": hub.title, "media_class": MEDIA_CLASS_DIRECTORY, "media_content_id": PLEX_URI_SCHEME + media_content_id, "media_content_type": hub.type, "can_play": False, "can_expand": True, } return BrowseMedia(**payload)
e446ea607db6a665c94fab774ef46db70e1ed76f
3,637,315
def roq_transform(pressure, loading): """Rouquerol transform function.""" return loading * (1 - pressure)
b69d83579cdb904cc7e3625a371e1f6c0573e44b
3,637,316
def _cms_inmem(file_names): """ Computes mean and image_classification deviation in an offline fashion. This is possible only when the dataset can be allocated in memory. Parameters ---------- file_names: List of String List of file names of the dataset Returns ------- mean : double std : double """ img = np.zeros([file_names.size] + list(np.array(Image.open(file_names[0]).convert('RGB')).shape)) # Load all samples for i, sample in enumerate(file_names): img[i] = np.array(Image.open(sample).convert('RGB')) mean = np.array([np.mean(img[:, :, :, 0]), np.mean(img[:, :, :, 1]), np.mean(img[:, :, :, 2])]) / 255.0 std = np.array([np.std(img[:, :, :, 0]), np.std(img[:, :, :, 1]), np.std(img[:, :, :, 2])]) / 255.0 return mean, std
88f856bbe33ec0e819b81dc7f5ad5930db45beea
3,637,317
def ddphi_spherical_zm (dd, ps_zm, r_e, lat, time_chunk=None ): """ This function calculates the gradient in meridional direction in a spherical system It takes and returns xarray.DataArrays inputs: dd data xarray.DataArray with (latitude, time, level) or (latitude, time), or combinations there off ps_zm xr.DataArray, Surfare pressure in the dimensions acoording dd, no copying to addional dimensions needed. 2nd dimension should be latitude, if more then 2 dims. r_e earth radius used in the spherical gradient lat np.array, latitude values in degree, same size as dd.latitude returns: xr.DataArray same dimensions as dd """ # ensure correct chunks rechunk_dic=dict() for k in dd.dims: rechunk_dic[k]= dd[k].size if time_chunk is not None: rechunk_dic['time']= time_chunk dd= dd.chunk(rechunk_dic) #plt.hist(np.diff(lat_radiens)) lat_radiens =lat *np.pi/180.0 cos_phi= np.cos(lat_radiens) if ps_zm is None: print('no ps weight lat gradient') ps_dummy = dd.isel(level=1)*0+1 grad_matrix = ps_dummy* r_e *cos_phi**2 * dd else: print('ps weight lat gradient') rechunk_dic=dict() for k in ps_zm.dims: rechunk_dic[k]= uzm_vzm_rep[k].size if time_chunk is not None: rechunk_dic['time']= time_chunk ps_zm=ps_zm.chunk(rechunk_dic) grad_matrix =ps_zm* r_e *cos_phi**2 * dd if lat.size != grad_matrix.shape[1]: grad_matrix= grad_matrix.T if lat.size != grad_matrix.shape[1]: raise ValueError('the 2nd dimension it not the same size as the latitude. make sure the input arrays as the cooriantes like (time, latitude, level) or (time, latitude)') grad_matrix_dphi = - grad_matrix.differentiate('latitude', edge_order=2)/(4.0*lat_radiens.diff('latitude').mean()) #grad_matrix_dphi_np =np.gradient(grad_matrix, lat_radiens , axis=1) # ensure same order of diemnsions when data is returned # only for non-xarray fileds # trans_list=list() # for k in list(dd.shape): # for i in [i for i,x in enumerate(list(grad_matrix_dphi.shape)) if x == k]: # trans_list.append(i) #print(np.shape(r_e**2 *cos_phi**2)) #print(np.shape(ps_zm * r_e**2 *cos_phi**2)) if ps_zm is None: factor = r_e**2 *cos_phi**2 else: factor = ps_zm * r_e**2 *cos_phi**2 # non xarray version #dd_return = xr.DataArray(data=np.transpose(grad_matrix_dphi, trans_list), dims=dd.dims, coords=dd.coords ) /factor # xarray version dd_return = grad_matrix_dphi/factor return dd_return
5cae237e3a1dc9646a0a288a6164880c482a82be
3,637,318
from methods import read_magmom_comp_data import os import itertools def process_group_magmom_comp( name=None, group=None, write_atoms_objects=False, verbose=False, ): """ """ #| - process_group_magmom_comp # ##################################################### group_w_o = group # ##################################################### out_dict = dict() out_dict["df_magmoms_comp"] = None out_dict["good_triplet_comb"] = None out_dict["job_ids"] = None # out_dict[""] = job_ids_list = list(set(group.job_id_max.tolist())) #| - Reading data # ######################################################### df_jobs = get_df_jobs() # ######################################################### df_atoms_sorted_ind = get_df_atoms_sorted_ind() df_atoms_sorted_ind = df_atoms_sorted_ind.set_index("job_id") # ######################################################### df_job_ids = get_df_job_ids() df_job_ids = df_job_ids.set_index("job_id") assert name != None, "Must pass name to read previous data" magmom_comp_data_prev = read_magmom_comp_data(name=name) if magmom_comp_data_prev is not None: pair_wise_magmom_comp_data_prev = \ magmom_comp_data_prev["pair_wise_magmom_comp_data"] #__| if write_atoms_objects: #| - Write atoms objects df_i = pd.concat([ df_job_ids, df_atoms_sorted_ind.loc[ group_w_o.job_id_max.tolist() ] ], axis=1, join="inner") # ######################################################### df_index_i = group_w_o.index.to_frame() compenv_i = df_index_i.compenv.unique()[0] slab_id_i = df_index_i.slab_id.unique()[0] active_sites = [i for i in df_index_i.active_site.unique() if i != "NaN"] active_site_i = active_sites[0] folder_name = compenv_i + "__" + slab_id_i + "__" + str(int(active_site_i)) # ######################################################### for job_id_i, row_i in df_i.iterrows(): tmp = 42 job_id = row_i.name atoms = row_i.atoms_sorted_good ads = row_i.ads file_name = ads + "_" + job_id + ".traj" print("Is this saving to the right place d9sf") root_file_path = os.path.join("__temp__", folder_name) print(os.getcwd(), root_file_path) if not os.path.exists(root_file_path): os.makedirs(root_file_path) file_path = os.path.join(root_file_path, file_name) atoms.write(file_path) #__| # ##################################################### #| - Getting good triplet combinations all_triplet_comb = list(itertools.combinations( group_w_o.job_id_max.tolist(), 3)) good_triplet_comb = [] for tri_i in all_triplet_comb: df_jobs_i = df_jobs.loc[list(tri_i)] # Triplet must not contain duplicate ads # Must strictly be a *O, *OH, and *bare triplet ads_freq_dict = CountFrequency(df_jobs_i.ads.tolist()) tmp_list = list(ads_freq_dict.values()) any_repeat_ads = [True if i > 1 else False for i in tmp_list] if not any(any_repeat_ads): good_triplet_comb.append(tri_i) #__| # ##################################################### #| - MAIN LOOP if verbose: print( "Number of viable triplet combinations:", len(good_triplet_comb) ) data_dict_list = [] pair_wise_magmom_comp_data = dict() for tri_i in good_triplet_comb: #| - Process triplets data_dict_i = dict() if verbose: print("tri_i:", tri_i) all_pairs = list(itertools.combinations(tri_i, 2)) df_jobs_i = df_jobs.loc[list(tri_i)] sum_norm_abs_magmom_diff = 0. for pair_i in all_pairs: # if pair_i in list(pair_wise_magmom_comp_data_prev.keys()): if (magmom_comp_data_prev is not None) and \ (pair_i in list(pair_wise_magmom_comp_data_prev.keys())): magmom_data_out = pair_wise_magmom_comp_data_prev[pair_i] else: # print("Need to run manually") # print("pair_i:", pair_i) #| - Process pairs row_jobs_0 = df_jobs.loc[pair_i[0]] row_jobs_1 = df_jobs.loc[pair_i[1]] ads_0 = row_jobs_0.ads ads_1 = row_jobs_1.ads # ############################################# if set([ads_0, ads_1]) == set(["o", "oh"]): job_id_0 = df_jobs_i[df_jobs_i.ads == "o"].iloc[0].job_id job_id_1 = df_jobs_i[df_jobs_i.ads == "oh"].iloc[0].job_id elif set([ads_0, ads_1]) == set(["o", "bare"]): job_id_0 = df_jobs_i[df_jobs_i.ads == "bare"].iloc[0].job_id job_id_1 = df_jobs_i[df_jobs_i.ads == "o"].iloc[0].job_id elif set([ads_0, ads_1]) == set(["oh", "bare"]): job_id_0 = df_jobs_i[df_jobs_i.ads == "bare"].iloc[0].job_id job_id_1 = df_jobs_i[df_jobs_i.ads == "oh"].iloc[0].job_id else: print("Woops something went wrong here") # ############################################# row_atoms_i = df_atoms_sorted_ind.loc[job_id_0] # ############################################# atoms_0 = row_atoms_i.atoms_sorted_good magmoms_sorted_good_0 = row_atoms_i.magmoms_sorted_good was_sorted_0 = row_atoms_i.was_sorted # ############################################# # ############################################# row_atoms_i = df_atoms_sorted_ind.loc[job_id_1] # ############################################# atoms_1 = row_atoms_i.atoms_sorted_good magmoms_sorted_good_1 = row_atoms_i.magmoms_sorted_good was_sorted_1 = row_atoms_i.was_sorted # ############################################# # ############################################# magmom_data_out = get_magmom_diff_data( ads_atoms=atoms_1, slab_atoms=atoms_0, ads_magmoms=magmoms_sorted_good_1, slab_magmoms=magmoms_sorted_good_0, ) #__| pair_wise_magmom_comp_data[pair_i] = magmom_data_out tot_abs_magmom_diff = magmom_data_out["tot_abs_magmom_diff"] norm_abs_magmom_diff = magmom_data_out["norm_abs_magmom_diff"] if verbose: print(" ", "pair_i: ", pair_i, ": ", np.round(norm_abs_magmom_diff, 3), sep="") sum_norm_abs_magmom_diff += norm_abs_magmom_diff # ################################################# data_dict_i["job_ids_tri"] = set(tri_i) data_dict_i["sum_norm_abs_magmom_diff"] = sum_norm_abs_magmom_diff # ################################################# data_dict_list.append(data_dict_i) # ################################################# #__| #__| # ##################################################### df_magmoms_i = pd.DataFrame(data_dict_list) # ##################################################### out_dict["df_magmoms_comp"] = df_magmoms_i out_dict["good_triplet_comb"] = good_triplet_comb out_dict["pair_wise_magmom_comp_data"] = pair_wise_magmom_comp_data out_dict["job_ids"] = job_ids_list # ##################################################### return(out_dict) #__|
d0e466b7282234c4dde326666ddbf1a51b3370bb
3,637,319
def precompute_dgmatrix(set_gm_minmax,res=0.1,adopt=True): """Precomputing MODIT GRID MATRIX for normalized GammaL Args: set_gm_minmax: set of gm_minmax for different parameters [Nsample, Nlayers, 2], 2=min,max res: grid resolution. res=0.1 (defaut) means a grid point per digit adopt: if True, min, max grid points are used at min and max values of x. In this case, the grid width does not need to be res exactly. Returns: grid for DIT (Nlayer x NDITgrid) """ set_gm_minmax=np.array(set_gm_minmax) lminarray=np.min(set_gm_minmax[:,:,0],axis=0) #min lmaxarray=np.max(set_gm_minmax[:,:,1],axis=0) #max dlog=np.max(lmaxarray-lminarray) gm=[] Ng=(dlog/res).astype(int)+2 Nlayer=len(lminarray) for i in range(0,Nlayer): lxmin=lminarray[i] lxmax=lmaxarray[i] if adopt==False: grid=np.logspace(lxmin,lxmin+(Ng-1)*res,Ng) else: grid=np.logspace(lxmin,lxmax,Ng) gm.append(grid) gm=np.array(gm) return gm
b007c4ec9f1aec9af364abc40ed903e9db66482c
3,637,320
def get_image_urls(ids): """function to map ids to image URLS""" return [f"http://127.0.0.1:8000/{id}" for id in ids]
a70cd4eea39ea277c82ccffac2e9b7d68dd7c801
3,637,321
def partition(n: int) -> int: """Pure Python partition function, ported to Python from SageMath. A000041 implemented by Peter Luschny. @CachedFunction def A000041(n): if n == 0: return 1 S = 0; J = n-1; k = 2 while 0 <= J: T = A000041(J) S = S+T if is_odd(k//2) else S-T J -= k if is_odd(k) else k//2 k += 1 return S """ if n in _p.keys(): return _p[n] if not n: return 1 sum, j, k = EMPTY_SUM, dec(n), 2 while j >= 0: t = partition(j) if k//2 % 2: sum += t else: sum -= t if k % 2: j -= k else: j -= k//2 k += 1 _p[n] = sum return sum
e30279029e8fd3ec012020fcf9e088822577e3d3
3,637,322
def port_to_host_int(port: int) -> int: """Function to convert a port from network byte order to little endian Args: port (int): the big endian port to be converted Returns: int: the little endian representation of the port """ return ntohs(port)
69506efe702826ca55e013cfe7f064c61994beb9
3,637,323
def inv_median(a): """ Inverse of the median of array a. This can be used as the `scale` argument of ccdproc.combine when combining flat frames. See CCD Data Reduction Guide Sect. 4.3.1 """ return 1 / np.median(a)
3612b6b334781c44677c3be91d876b69ec6cd6d3
3,637,324
def mpdisted(dask_client, T_A, T_B, m, percentage=0.05, k=None, normalize=True): """ Compute the z-normalized matrix profile distance (MPdist) measure between any two time series with a distributed dask cluster The MPdist distance measure considers two time series to be similar if they share many subsequences, regardless of the order of matching subsequences. MPdist concatenates and sorts the output of an AB-join and a BA-join and returns the value of the `k`th smallest number as the reported distance. Note that MPdist is a measure and not a metric. Therefore, it does not obey the triangular inequality but the method is highly scalable. Parameters ---------- dask_client : client A Dask Distributed client that is connected to a Dask scheduler and Dask workers. Setting up a Dask distributed cluster is beyond the scope of this library. Please refer to the Dask Distributed documentation. T_A : ndarray The first time series or sequence for which to compute the matrix profile T_B : ndarray The second time series or sequence for which to compute the matrix profile m : int Window size percentage : float, default 0.05 The percentage of distances that will be used to report `mpdist`. The value is between 0.0 and 1.0. This parameter is ignored when `k` is not `None`. k : int Specify the `k`th value in the concatenated matrix profiles to return. When `k` is not `None`, then the `percentage` parameter is ignored. normalize : bool, default True When set to `True`, this z-normalizes subsequences prior to computing distances. Otherwise, this function gets re-routed to its complementary non-normalized equivalent set in the `@core.non_normalized` function decorator. Returns ------- MPdist : float The matrix profile distance Notes ----- `DOI: 10.1109/ICDM.2018.00119 \ <https://www.cs.ucr.edu/~eamonn/MPdist_Expanded.pdf>`__ See Section III """ return _mpdist(T_A, T_B, m, percentage, k, dask_client=dask_client, mp_func=stumped)
5ba6455228901e528909a63764a5a9d4fbdef2b3
3,637,325
def naive_pipeline_2() -> Pipeline: """Generate pipeline with NaiveModel(2).""" pipeline = Pipeline(model=NaiveModel(2), transforms=[], horizon=7) return pipeline
a14cef26c6970260435ddd5a17762e8dfa98e51a
3,637,326
import torch def caption_image_batch(encoder, decoder, images, word_map, device, max_length): """ Reads an image and captions it with beam search. :param encoder: encoder model :param decoder: decoder model :param image: image :param word_map: word map :param beam_size: number of sequences to consider at each decode-step :return: caption, weights for visualization """ # Encode encoder_out = encoder(images) # (1, enc_image_size, enc_image_size, encoder_dim) batch_size = encoder_out.size(0) encoder_dim = encoder_out.size(3) # Flatten encoding encoder_out = encoder_out.view(batch_size, -1, encoder_dim) # (1, num_pixels, encoder_dim) # Tensor to store top k previous words at each step; now they're just <start> k_prev_words = torch.LongTensor([[word_map['<start>']]] * batch_size) # (k, 1) # Tensor to store top k sequences; now they're just <start> seqs = k_prev_words # (k, 1) # Lists to store completed sequences, their alphas and scores complete_seqs = set() # Start decoding step = 1 h, c = decoder.init_hidden_state(encoder_out) # s is a number less than or equal to k, because sequences are removed from this process once they hit <end> while len(complete_seqs) < batch_size: embeddings = decoder.embedding(k_prev_words.to(device)).squeeze(1) # (s, embed_dim) awe, alpha = decoder.attention(encoder_out, h) # (s, encoder_dim), (s, num_pixels) gate = decoder.sigmoid(decoder.f_beta(h)) # gating scalar, (s, encoder_dim) awe = gate * awe h, c = decoder.decode_step(torch.cat([embeddings, awe], dim=1), (h, c)) # (s, decoder_dim) scores = decoder.fc(h) # (s, vocab_size) _, next_word_inds = scores.max(1) next_word_inds = next_word_inds.cpu() # Add new words to sequences, alphas seqs = torch.cat([seqs, next_word_inds.unsqueeze(1)], dim=1) # (s, step+1) # Which sequences are incomplete (didn't reach <end>)? incomplete_inds = [ind for ind, next_word in enumerate(next_word_inds) if next_word != word_map['<end>']] complete_inds = set(range(batch_size)) - set(incomplete_inds) complete_seqs.update(complete_inds) k_prev_words = next_word_inds.unsqueeze(1) # Break if things have been going on too long if step > max_length: break step += 1 k_end_words = torch.LongTensor([[word_map['<end>']]] * batch_size) # (k, 1) seqs = torch.cat([seqs, k_end_words], dim=1) # (s, step+1) seq_length = [s.tolist().index(word_map['<end>']) for s in seqs] return seq_length
192def399d6b05947df7bac06e90836771a22dda
3,637,327
def wasserstein_distance(p, q, C): """Wasserstein距离计算方法, p.shape=(m,) q.shape=(n,) C.shape=(m,n) p q满足归一性概率化 """ p = np.array(p) q = np.array(q) A_eq = [] for i in range(len(p)): A = np.zeros_like(C) A[i,:] = 1.0 A_eq.append(A.reshape((-1,))) for i in range(len(q)): A = np.zeros_like(C) A[:,i] = 1.0 A_eq.append(A.reshape((-1,))) A_eq = np.array(A_eq) b_eq = np.concatenate([p, q], axis=0) C = np.array(C).reshape((-1,)) return optimize.linprog( c=C, A_eq=A_eq[:-1], b_eq=b_eq[:-1], method="interior-point", options={"cholesky":False, "sym_pos":True} ).fun
3a1e1836f5ec9975b30dafff06d8a4eaee90e482
3,637,328
def median_cutoff_points(ventricular_rate, ponset, toffset): """Calculate the median cutoff start and end points""" ponset = 0 if np.isnan(ponset) else int(ponset) toffset = 600 if np.isnan(toffset) else int(toffset) # limit the onset and offset to be in the range of 0-600 # take some margin of 10ms (5*2) on the start and end indices margin = 5 ponset = max(ponset - margin, 0) toffset = min(toffset, 600) if np.isnan(ventricular_rate): end = 600 else: # calculate the average number of points between the QRS complexes rr_interval = (1 * 60 * 1000 / 2) / ventricular_rate # say that the end of a beat would be around the onset of the P wave # plus the avg. duration of one beat end = min(ponset + margin + rr_interval, 600) # if the GE measured T wave offset is larger than our calculated beat # endpoint, take the measured T wave offset end = max(end, toffset) if not np.isinf(end): end = int(end) return ponset, end
28de4a6ec172a052cb3a819d21ae8371ba68a469
3,637,329
def make_colormap(color_palette, N=256, gamma=1.0): """ Create a linear colormap from a color palette. Parameters ---------- color_palette : str, list, or dict A color string, list of color strings, or color palette dict Returns ------- cmap : LinearSegmentedColormap A colormap object based on color_palette using linear segments. """ colors = extract_palette(color_palette) rgb = map(hex2rgb, colors) return LinearSegmentedColormap.from_list('custom', list(rgb), N=N, gamma=1.0)
78fee496532616d3d4a8d7bee6ee5ec7637750f5
3,637,330
def frac_correct(t): """Compute fraction correct and confidence interval of trials t """ assert np.all(t.outcome.values<2) frac = t.outcome.mean() conf = confidence(frac, len(t)) return frac, conf
82ed3ae7e6e9a34933ff7c07a2ca54faf563b235
3,637,331
def run_location(tokens, description): """Identifies the indices of matching text in the lines. Arguments: tokens (list): A list of strings, serialized from the GUI. description (CourseDescription): The course to be matched against. Returns: list: List of list of index positions. """ indices = run_text(tokens[1], description) result = [] if tokens[0] == 'before': for line_indices in indices: result.append([start for start, end in line_indices]) elif tokens[0] == 'after': for line_indices in indices: result.append([end for start, end in line_indices]) return result
b2f7867319620f8a46c07727494cc8dbf85d5084
3,637,332
from functools import reduce def SUM(r, expression = lambda trx:None): #todo None ok? cause error = good? """Sum expression""" if expression.func_code.co_consts==(None,) and len(r._heading)==1: #if no expression but 1 attribute, use it expression = lambda trx:trx[r._heading[0]] return reduce(lambda x,y: x + y, (expression(tr) for tr in r._scan()), 0)
5bfba45f88d06187635c906ce4a0e1490888ed16
3,637,333
def domean(data, start, end, calculation_type): """ Gets average direction using Fisher or principal component analysis (line or plane) methods Parameters ---------- data : nest list of data: [[treatment,dec,inc,int,quality],...] start : step being used as start of fit (often temperature minimum) end : step being used as end of fit (often temperature maximum) calculation_type : string describing type of calculation to be made 'DE-BFL' (line), 'DE-BFL-A' (line-anchored), 'DE-BFL-O' (line-with-origin), 'DE-BFP' (plane), 'DE-FM' (Fisher mean) Returns ------- mpars : dictionary with the keys "specimen_n","measurement_step_min", "measurement_step_max","specimen_mad","specimen_dec","specimen_inc" """ mpars = {} datablock = [] start0, end0 = start, end # indata = [rec.append('g') if len(rec)<6 else rec for rec in indata] # # this statement doesn't work! indata = [] for rec in data: if len(rec) < 6: rec.append('g') indata.append(rec) if indata[start0][5] == 'b': print("Can't select 'bad' point as start for PCA") flags = [x[5] for x in indata] bad_before_start = flags[:start0].count('b') bad_in_mean = flags[start0:end0 + 1].count('b') start = start0 - bad_before_start end = end0 - bad_before_start - bad_in_mean datablock = [x for x in indata if x[5] == 'g'] if indata[start0] != datablock[start]: print('problem removing bad data in pmag.domean start of datablock shifted:\noriginal: %d\nafter removal: %d' % ( start0, indata.index(datablock[start]))) if indata[end0] != datablock[end]: print('problem removing bad data in pmag.domean end of datablock shifted:\noriginal: %d\nafter removal: %d' % ( end0, indata.index(datablock[end]))) mpars["calculation_type"] = calculation_type rad = old_div(np.pi, 180.) if end > len(datablock) - 1 or end < start: end = len(datablock) - 1 control, data, X, Nrec = [], [], [], float(end - start + 1) cm = [0., 0., 0.] # # get cartesian coordinates # fdata = [] for k in range(start, end + 1): if calculation_type == 'DE-BFL' or calculation_type == 'DE-BFL-A' or calculation_type == 'DE-BFL-O': # best-fit line data = [datablock[k][1], datablock[k][2], datablock[k][3]] else: data = [datablock[k][1], datablock[k][2], 1.0] # unit weight fdata.append(data) cart = dir2cart(data) X.append(cart) if calculation_type == 'DE-BFL-O': # include origin as point X.append([0., 0., 0.]) # pass if calculation_type == 'DE-FM': # for fisher means fpars = fisher_mean(fdata) mpars["specimen_direction_type"] = 'l' mpars["specimen_dec"] = fpars["dec"] mpars["specimen_inc"] = fpars["inc"] mpars["specimen_alpha95"] = fpars["alpha95"] mpars["specimen_n"] = fpars["n"] mpars["specimen_r"] = fpars["r"] mpars["measurement_step_min"] = indata[start0][0] mpars["measurement_step_max"] = indata[end0][0] mpars["center_of_mass"] = cm mpars["specimen_dang"] = -1 return mpars # # get center of mass for principal components (DE-BFL or DE-BFP) # for cart in X: for l in range(3): cm[l] += old_div(cart[l], Nrec) mpars["center_of_mass"] = cm # # transform to center of mass (if best-fit line) # if calculation_type != 'DE-BFP': mpars["specimen_direction_type"] = 'l' if calculation_type == 'DE-BFL' or calculation_type == 'DE-BFL-O': # not for planes or anchored lines for k in range(len(X)): for l in range(3): X[k][l] = X[k][l] - cm[l] else: mpars["specimen_direction_type"] = 'p' # # put in T matrix # T = np.array(Tmatrix(X)) # # get sorted evals/evects # t, V = tauV(T) if t == []: mpars["specimen_direction_type"] = "Error" print("Error in calculation") return mpars v1, v3 = V[0], V[2] if t[2] < 0: t[2] = 0 # make positive if calculation_type == 'DE-BFL-A': Dir, R = vector_mean(fdata) mpars["specimen_direction_type"] = 'l' mpars["specimen_dec"] = Dir[0] mpars["specimen_inc"] = Dir[1] mpars["specimen_n"] = len(fdata) mpars["measurement_step_min"] = indata[start0][0] mpars["measurement_step_max"] = indata[end0][0] mpars["center_of_mass"] = cm s1 = np.sqrt(t[0]) MAD = old_div(np.arctan(old_div(np.sqrt(t[1] + t[2]), s1)), rad) if np.iscomplexobj(MAD): MAD = MAD.real # I think this is how it is done - i never anchor the "PCA" - check mpars["specimen_mad"] = MAD return mpars if calculation_type != 'DE-BFP': # # get control vector for principal component direction # rec = [datablock[start][1], datablock[start][2], datablock[start][3]] P1 = dir2cart(rec) rec = [datablock[end][1], datablock[end][2], datablock[end][3]] P2 = dir2cart(rec) # # get right direction along principal component ## for k in range(3): control.append(P1[k] - P2[k]) # changed by rshaar # control is taken as the center of mass # control=cm dot = 0 for k in range(3): dot += v1[k] * control[k] if dot < -1: dot = -1 if dot > 1: dot = 1 if np.arccos(dot) > old_div(np.pi, 2.): for k in range(3): v1[k] = -v1[k] # get right direction along principal component # s1 = np.sqrt(t[0]) Dir = cart2dir(v1) MAD = old_div(np.arctan(old_div(np.sqrt(t[1] + t[2]), s1)), rad) if np.iscomplexobj(MAD): MAD = MAD.real if calculation_type == "DE-BFP": Dir = cart2dir(v3) MAD = old_div( np.arctan(np.sqrt(old_div(t[2], t[1]) + old_div(t[2], t[0]))), rad) if np.iscomplexobj(MAD): MAD = MAD.real # # get angle with center of mass # CMdir = cart2dir(cm) Dirp = [Dir[0], Dir[1], 1.] dang = angle(CMdir, Dirp) mpars["specimen_dec"] = Dir[0] mpars["specimen_inc"] = Dir[1] mpars["specimen_mad"] = MAD # mpars["specimen_n"]=int(Nrec) mpars["specimen_n"] = len(X) mpars["specimen_dang"] = dang[0] mpars["measurement_step_min"] = indata[start0][0] mpars["measurement_step_max"] = indata[end0][0] return mpars
09e3539e4705995e8721976412977e13575add19
3,637,334
def download_data(dataset: str): """ Downloads a dataset as a .csv file. :param dataset: The name of the dataset to download. """ return send_from_directory(app.config['DATA_FOLDER'], dataset, as_attachment=True)
cc4cd03f38ecfbfc3b602a2ed323482203ef319f
3,637,335
def _split_features_target(feature_matrix, problem_name): """Split the features and labels. Args: feature_matrix (pd.DataFrame): a dataframe consists of both feature values and target values. problem_name (str): the name of the problem. Returns: tuple: features (pd.DataFrame) and target (pd.Series). """ features = feature_matrix.copy().reset_index(drop=True) if problem_name.lower() in features.columns: features.pop(problem_name.lower()) target = features.pop(TARGET_NAME[problem_name]) features = features return features, target
363dd1a9956f97c3c3520a074cca3c7c57f1517f
3,637,336
def descope_queue_name(scoped_name): """Descope Queue name with '.'. Returns the queue name from the scoped name which is of the form project-id.queue-name """ return scoped_name.split('.')[1]
24de78d12399e0894f495cd5c472b10c2315e4af
3,637,337
import os def SearchBeamPosition(DriverType=None): """A factory for SearchBeamPosition classes.""" DriverInstance = DriverFactory.Driver(DriverType) class SearchBeamPositionWrapper(DriverInstance.__class__): def __init__(self): DriverInstance.__class__.__init__(self) self.set_executable("dials.search_beam_position") self._sweep_filename = None self._spot_filename = None self._optimized_filename = None self._phil_file = None self._image_range = None def set_sweep_filename(self, sweep_filename): self._sweep_filename = sweep_filename def set_spot_filename(self, spot_filename): self._spot_filename = spot_filename def set_phil_file(self, phil_file): self._phil_file = phil_file def set_image_range(self, image_range): self._image_range = image_range def get_optimized_experiments_filename(self): return self._optimized_filename def run(self): logger.debug("Running %s", self.get_executable()) self.clear_command_line() self.add_command_line(self._sweep_filename) self.add_command_line(self._spot_filename) nproc = PhilIndex.params.xia2.settings.multiprocessing.nproc self.set_cpu_threads(nproc) self.add_command_line("nproc=%i" % nproc) if self._image_range: self.add_command_line("image_range=%d,%d" % self._image_range) if self._phil_file is not None: self.add_command_line(self._phil_file) self._optimized_filename = os.path.join( self.get_working_directory(), "%d_optimised.expt" % self.get_xpid() ) self.add_command_line("output.experiments=%s" % self._optimized_filename) self.start() self.close_wait() self.check_for_errors() self.get_all_output() assert os.path.exists(self._optimized_filename), self._optimized_filename return SearchBeamPositionWrapper()
dd2315b859b858e69ec4f90e04e852ca0c3aac62
3,637,338
from IPython.display import Image def movie(function, movie_name="movie.gif", play_range=None, loop=0, optimize=True, duration=100, embed=False, mp4=True): """ Make a movie from a function. function has signature: function(index) and should return a PIL.Image. """ frames = [] for index in range(*play_range): frames.append(function(index)) if frames: frames[0].save(movie_name, save_all=True, append_images=frames[1:], optimize=optimize, loop=loop, duration=duration) if mp4 is False: return Image(url=movie_name, embed=embed) else: return gif2mp4(movie_name)
ac1705c4dae278a58af4f745d676c20570639567
3,637,339
def get_rounded_reward_2(duration: float) -> float: """ Helper function to round reward. :param duration: not rounded duration :return: rounded duration, two decimal points """ return round(get_reward(duration), 2)
ad1e97788504e6b4173853dbd7e82e9b407f7d0b
3,637,340
def fitness_order(order): """fitness function of a order of cities""" score = 0 cacher = str(order) if cacher in cache: return cache[cacher] for i in range(len(order) - 1): score += distance_map[(order[i], order[i + 1])] score += distance_map[(order[0], order[-1])] cache[cacher] = score return score
e2af48535bf2219ec4cb6ddf0972c7dcc7ef363a
3,637,341
from ba import _lang from typing import Optional def get_human_readable_user_scripts_path() -> str: """Return a human readable location of user-scripts. This is NOT a valid filesystem path; may be something like "(SD Card)". """ app = _ba.app path: Optional[str] = app.python_directory_user if path is None: return '<Not Available>' # On newer versions of android, the user's external storage dir is probably # only visible to the user's processes and thus not really useful printed # in its entirety; lets print it as <External Storage>/myfilepath. if app.platform == 'android': ext_storage_path: Optional[str] = ( _ba.android_get_external_storage_path()) if (ext_storage_path is not None and app.python_directory_user.startswith(ext_storage_path)): path = ('<' + _lang.Lstr(resource='externalStorageText').evaluate() + '>' + app.python_directory_user[len(ext_storage_path):]) return path
f5d78fed6db03947f1bb4135391aeeb7a130031c
3,637,342
def rotated_positive_orthogonal_basis( angle_x=np.pi / 3, angle_y=np.pi / 4, angle_z=np.pi / 5 ): """Get a rotated orthogonal basis. If X,Y,Z are the rotation matrices of the passed angles, the resulting basis is Z * Y * X. Parameters ---------- angle_x : Rotation angle around the x-axis (Default value = np.pi / 3) angle_y : Rotation angle around the y-axis (Default value = np.pi / 4) angle_z : Rotation angle around the z-axis (Default value = np.pi / 5) Returns ------- np.ndarray Rotated orthogonal basis """ # rotate axes to produce a more general test case r_x = tf.rotation_matrix_x(angle_x) r_y = tf.rotation_matrix_y(angle_y) r_z = tf.rotation_matrix_z(angle_z) r_tot = np.matmul(r_z, np.matmul(r_y, r_x)) return r_tot
8848d1d11f3e83727ed90957f012869f27991285
3,637,343
import array def create_sequences(tokenizer, max_length, descriptions, photos_features, vocab_size): """ 从输入的图片标题list和图片特征构造LSTM的一组输入 Args: :param tokenizer: 英文单词和整数转换的工具keras.preprocessing.text.Tokenizer :param max_length: 训练数据集中最长的标题的长度 :param descriptions: dict, key 为图像的名(不带.jpg后缀), value 为list, 包含一个图像的几个不同的描述 :param photos_features: dict, key 为图像的名(不带.jpg后缀), value 为numpy array 图像的特征 :param vocab_size: 训练集中表的单词数量 :return: tuple: 第一个元素为 numpy array, 元素为图像的特征, 它本身也是 numpy.array 第二个元素为 numpy array, 元素为图像标题的前缀, 它自身也是 numpy.array 第三个元素为 numpy array, 元素为图像标题的下一个单词(根据图像特征和标题的前缀产生) 也为numpy.array Examples: from pickle import load tokenizer = load(open('tokenizer.pkl', 'rb')) max_length = 6 descriptions = {'1235345':['startseq one bird on tree endseq', "startseq red bird on tree endseq"], '1234546':['startseq one boy play water endseq', "startseq one boy run across water endseq"]} photo_features = {'1235345':[ 0.434, 0.534, 0.212, 0.98 ], '1234546':[ 0.534, 0.634, 0.712, 0.28 ]} vocab_size = 7378 print(create_sequences(tokenizer, max_length, descriptions, photo_features, vocab_size)) (array([[ 0.434, 0.534, 0.212, 0.98 ], [ 0.434, 0.534, 0.212, 0.98 ], [ 0.434, 0.534, 0.212, 0.98 ], [ 0.434, 0.534, 0.212, 0.98 ], [ 0.434, 0.534, 0.212, 0.98 ], [ 0.434, 0.534, 0.212, 0.98 ], [ 0.434, 0.534, 0.212, 0.98 ], [ 0.434, 0.534, 0.212, 0.98 ], [ 0.434, 0.534, 0.212, 0.98 ], [ 0.434, 0.534, 0.212, 0.98 ], [ 0.534, 0.634, 0.712, 0.28 ], [ 0.534, 0.634, 0.712, 0.28 ], [ 0.534, 0.634, 0.712, 0.28 ], [ 0.534, 0.634, 0.712, 0.28 ], [ 0.534, 0.634, 0.712, 0.28 ], [ 0.534, 0.634, 0.712, 0.28 ], [ 0.534, 0.634, 0.712, 0.28 ], [ 0.534, 0.634, 0.712, 0.28 ], [ 0.534, 0.634, 0.712, 0.28 ], [ 0.534, 0.634, 0.712, 0.28 ], [ 0.534, 0.634, 0.712, 0.28 ]]), array([[ 0, 0, 0, 0, 0, 2], [ 0, 0, 0, 0, 2, 59], [ 0, 0, 0, 2, 59, 254], [ 0, 0, 2, 59, 254, 6], [ 0, 2, 59, 254, 6, 134], [ 0, 0, 0, 0, 0, 2], [ 0, 0, 0, 0, 2, 26], [ 0, 0, 0, 2, 26, 254], [ 0, 0, 2, 26, 254, 6], [ 0, 2, 26, 254, 6, 134], [ 0, 0, 0, 0, 0, 2], [ 0, 0, 0, 0, 2, 59], [ 0, 0, 0, 2, 59, 16], [ 0, 0, 2, 59, 16, 82], [ 0, 2, 59, 16, 82, 24], [ 0, 0, 0, 0, 0, 2], [ 0, 0, 0, 0, 2, 59], [ 0, 0, 0, 2, 59, 16], [ 0, 0, 2, 59, 16, 165], [ 0, 2, 59, 16, 165, 127], [ 2, 59, 16, 165, 127, 24]]), array([[ 0., 0., 0., ..., 0., 0., 0.], [ 0., 0., 0., ..., 0., 0., 0.], [ 0., 0., 0., ..., 0., 0., 0.], ..., [ 0., 0., 0., ..., 0., 0., 0.], [ 0., 0., 0., ..., 0., 0., 0.], [ 0., 0., 0., ..., 0., 0., 0.]])) """ X1, X2, y = list(), list(), list() for key, desc_list in descriptions.items(): for desc in desc_list: seq = tokenizer.texts_to_sequences([desc])[0] for i in range(1, len(seq)): in_seq, out_seq = seq[:i], seq[i] #填充in_seq,使得其长度为max_length in_seq = pad_sequences([in_seq], maxlen = max_length)[0] out_seq = to_categorical([out_seq], num_classes = vocab_size)[0] X1.append(photos_features[key][0]) X2.append(in_seq) y.append(out_seq) return array(X1), array(X2), array(y)
31cffba7fdce229bd264e87291e57ed386498f3f
3,637,344
def avatar_uri(instance, filename): """ upload_to handler for Channel.avatar """ return generate_filepath(filename, instance.name, "_avatar", "channel")
f880f49055fbdc30f6a045f2f7916c077d22452c
3,637,345
def readMoveBaseGoalsFromFile(poses_file): """Read and return MoveBaseGoals for the robot-station and patrol-poses. If the contents of the file do not obey the syntax rules of _readPosesFromFile(), or if no patrol-poses were found, an IOError exception is raised. """ patrol_poses, station_pose = _readPosesFromFile(poses_file) _assertNumPatrolPoses(patrol_poses) patrol_goals = [_createMoveBaseGoalFromPose(x) for x in patrol_poses] station_goal = None if station_pose==None else _createMoveBaseGoalFromPose(station_pose) return patrol_goals, station_goal
09085a9bc569e78050f3f10552aa160e9a9324bb
3,637,346
import configparser def get_config(section = None): """Load local config file""" run_config = configparser.ConfigParser() run_config.read(get_repo_dir() + 'config.ini') if len(run_config) == 1: run_config = None elif section is not None: run_config = run_config[section] return run_config
934dfad58fd674b58ccb4ddfeb9d62edbaba6e84
3,637,347
import os def system_info(x): """ Get system info. """ return list(os.uname())
4231e967190daee2ae7c6d9823878bcb0a957bc1
3,637,348
def get_parent(running_list, i, this_type, parent_type): """Get the description of an industry group's parent OSHA industry decriptions are provided in ordered lists; this function identifies the parent industry group based on information provided by the groups preceeding it """ prior = running_list[i - 1] if clean_desc(prior.full_desc)[1] == parent_type: # If the type of the previous group is a parent type then set the # parent description to previous element's description parent_desc = str(prior.full_desc) elif clean_desc(prior.full_desc)[1] == this_type: # Else if the previous group is the more granular type then set the # parent description to previous element's parent description parent_desc = str(prior.parent_desc) else: # Otherwise raise a value error err_msg = 'Unexpected code type: ' + prior raise ValueError(err_msg) return parent_desc
f307900a6220f4d6f14ed0c1924d8f03a40a8a1d
3,637,349
import torch def rebalance_binary_class(label, mask=None, base_w=1.0): """Binary-class rebalancing.""" weight_factor = label.float().sum() / torch.prod(torch.tensor(label.size()).float()) weight_factor = torch.clamp(weight_factor, min=1e-2) alpha = 1.0 weight = alpha * label*(1-weight_factor)/weight_factor + (1-label) return weight_factor, weight
5adf3a21e4cc4b9e7bf129ecf31cfe37ab7a305a
3,637,350
def from_base(num_base: int, dec: int) -> float: """Returns value in e.g. ETH (taking e.g. wei as input).""" return float(num_base / (10 ** dec))
447a07b3e282e5104f8dcd50639c658f3013ec7a
3,637,351
def make_auth_header(auth_token): """Make the authorization headers to communicate with endpoints which implement Auth0 authentication API. Args: auth_token (dict): a dict obtained from the Auth0 domain oauth endpoint, containing the signed JWT (JSON Web Token), its expiry, the scopes granted, and the token type. Returns: headers (dict): A dict representing the headers with necessary token information to talk to Auth0 authentication required endpoints. """ token_type = auth_token['token_type'] access_token = auth_token['access_token'] headers = { "Content-type": "application/json", "Authorization": "{token_type} {access_token}".format( token_type=token_type, access_token=access_token ), } return headers
e7c9b93cfbda876668068fb871d3abaf06157204
3,637,352
import os def get_file_in_archive(relative_path, subpath, url, force_extract = False): """ Download a zip file, unpack it, and get the local address of a file within this zip (so that you can open it, etc). :param relative_path: Local name for the extracted folder. (Zip file will be named this with the appropriate zip extension) :param url: Url of the zip file to download :param subpath: Path of the file relative to the zip folder. :param force_extract: Force the zip file to re-extract (rather than just reusing the extracted folder) :return: The full path to the file on your system. """ local_folder_path = get_archive(relative_path=relative_path, url=url, force_extract=force_extract) local_file_path = os.path.join(local_folder_path, subpath) assert os.path.exists(local_file_path), 'Could not find the file "%s" within the extracted folder: "%s"' % (subpath, local_folder_path) return local_file_path
6ddb6d170a1ba0466594e095277bbd3a1fc71d63
3,637,353
from typing import List from typing import Optional import os import ssl async def create_nats_client(servers: List[str]) -> Optional[NatsClient]: """ Create a NATS client for any NATS server or NATS cluster configured to accept this installation's NKey. :param servers: List of one or more NATS servers in the same NATS cluster. :return: a connected NATS client instance """ settings = get_settings() client = await nats.connect( verbose=True, servers=servers, nkeys_seed=os.path.join( settings.connect_config_directory, settings.nats_nk_file ), tls=get_ssl_context(ssl.Purpose.SERVER_AUTH), allow_reconnect=settings.nats_allow_reconnect, max_reconnect_attempts=settings.nats_max_reconnect_attempts, ) logger.info("Created NATS client") logger.debug(f"Created NATS client for servers = {servers}") return client
e74b55bf1d44353e1938f5408c5a7982adf4563f
3,637,354
from typing import Tuple def _drop_additional_columns( pdf: PandasDataFrame, column_names: Tuple, additional_columns: Tuple, ) -> PandasDataFrame: """Removes additional columns from pandas DataFrame.""" # ! columns has to be a list to_drop = list(compress(column_names, additional_columns)) return pdf.drop(columns=to_drop)
86a41647ae9bed9a18b428610f73af36105702b8
3,637,355
def get_cc3d(mask, top=1): """ 26-connected neighbor :param mask: :param top: top K connected components :return: """ msk = connected_components(mask.astype('uint8')) indices, counts = np.unique(msk, return_counts=True) indices = indices[1:] counts = counts[1:] if len(counts) >= top: # print(f'Found {len(counts)} connected components') pass else: return 'invalid' labels = indices[np.argpartition(counts, -top)[-top:]] for i in range(top): msk[msk == labels[i]] = 501+i mn = 501 mx = 501 + top - 1 msk[msk < mn] = 500 msk[msk > mx] = 500 msk = msk - 500 return msk
e91d5f2617ba526ebc27eaca93d8fed7e0145d0b
3,637,356
def dscp_class(bits_0_2, bit_3, bit_4): """ Takes values of DSCP bits and computes dscp class Bits 0-2 decide major class Bit 3-4 decide drop precedence :param bits_0_2: int: decimal value of bits 0-2 :param bit_3: int: value of bit 3 :param bit_4: int: value of bit 4 :return: DSCP class name """ bits_3_4 = (bit_3 << 1) + bit_4 if bits_3_4 == 0: dscp_cl = "cs{}".format(bits_0_2) elif (bits_0_2, bits_3_4) == (5, 3): dscp_cl = "ef" else: dscp_cl = "af{}{}".format(bits_0_2, bits_3_4) return dscp_cl
79e9881e413a5fcbbbaab110e7b3346a2dbcaa53
3,637,357
def load_data(impaths_all, test=False): """ Load data with corresponding masks and segmentations :param impaths_all: Paths of images to be loaded :param test: Boolean, part of test set? :return: Numpy array of images, masks and segmentations """ # Save all images, masks and segmentations images = [] masks = [] segmentations = [] # Load as numpy array and normalize between 0 and 1 for im_path in impaths_all: images.append(np.array(Image.open(im_path)) / 255.) mask_path = im_path.replace('images', 'mask').replace('.png', '_mask.gif') masks.append(np.array(Image.open(mask_path)) / 255.) if not test: seg_path = im_path.replace('images', '1st_manual').replace('training.png', 'manual1.gif') else: seg_path = im_path.replace('images', '1st_manual').replace('test.png', 'manual1.gif') segmentations.append(np.array(Image.open(seg_path)) / 255.) return np.array(images), np.expand_dims(np.array(masks), axis=-1), np.expand_dims(np.array(segmentations), axis=-1)
b1d8f1b135eab0f122370aaa98f8cbbe6f2f1be7
3,637,358
def f_assert_seq0_gte_seq1(value_list): """检测列表中的第一个元素是否大于等于第二个元素""" if not value_list[0] >= value_list[1]: raise FeatureProcessError('%s f_assert_seq0_gte_seq1 Error' % value_list) return value_list
225e60a565ffa81ec373dea7f8097ee6619a8b01
3,637,359
import tempfile def download_file(res): """ Download file into temporary file :param res: Response object :return: downloaded file location """ LOGGER.debug("Chunked file download started") with tempfile.NamedTemporaryFile(delete=False) as file: for chunk in res.iter_content(chunk_size=config.CHUNK_SIZE): if chunk: file.write(chunk) file.close() LOGGER.debug("File stored as %s", file.name) return file.name
84eac5fd06b3bfca6fec43eff79dcc996a2ac13d
3,637,360
def extract_units(name_andor_units): """Extracts the number of academic credit units the course is worth. Returns NaN if the number of units is variable.""" start = name_andor_units.rindex('(') + 1 end = name_andor_units.index(' ', start) units = name_andor_units[start:end] try: return float(units) except ValueError: if '/' in units or '-' in units: LOGGER.debug("Encountered variable units string %s; using NaN", repr(units)) return NaN LOGGER.error("Encountered unparseable units string %s", repr(units)) raise
63a8a1e497a9840f0f351e4235dd6409230a6405
3,637,361
from typing import List def decorate_diff_with_color(contents: List[str]) -> str: """Inject the ANSI color codes to the diff.""" for i, line in enumerate(contents): if line.startswith("+++") or line.startswith("---"): line = f"\033[1;37m{line}\033[0m" # bold white, reset elif line.startswith("@@"): line = f"\033[36m{line}\033[0m" # cyan, reset elif line.startswith("+"): line = f"\033[32m{line}\033[0m" # green, reset elif line.startswith("-"): line = f"\033[31m{line}\033[0m" # red, reset contents[i] = line return '\n'.join(contents)
55821622b1e7d7f545fa4ad34abebd108f27528d
3,637,362
from typing import Dict def _combine_multipliers(first: Dict[Text, float], second: Dict[Text, float]) -> Dict[Text, float]: """Combines operation weight multiplier dicts. Modifies the first dict.""" for name in second: first[name] = first.get(name, 1.0) * second[name] return first
e9db49daad0463e42a9231a2459fac5b4d14e181
3,637,363
def scale_to_one(iterable): """ Scale an iterable of numbers proportionally such as the highest number equals to 1 Example: >> > scale_to_one([5, 4, 3, 2, 1]) [1, 0.8, 0.6, 0.4, 0.2] """ m = max(iterable) return [v / m for v in iterable]
92cfc7ef586ecfea4300aeedabe2410a247610f7
3,637,364
def fixed_size_of_type_in_bits(type_ir, ir): """Returns the fixed, known size for the given type, in bits, or None. Arguments: type_ir: The IR of a type. ir: A complete IR, used to resolve references to types. Returns: size if the size of the type can be determined, otherwise None. """ array_multiplier = 1 while type_ir.HasField("array_type"): if type_ir.array_type.WhichOneof("size") == "automatic": return None else: assert type_ir.array_type.WhichOneof("size") == "element_count", ( 'Expected array size to be "automatic" or "element_count".') element_count = type_ir.array_type.element_count if not is_constant(element_count): return None else: array_multiplier *= constant_value(element_count) assert not type_ir.HasField("size_in_bits"), ( "TODO(bolms): implement explicitly-sized arrays") type_ir = type_ir.array_type.base_type assert type_ir.HasField("atomic_type"), "Unexpected type!" if type_ir.HasField("size_in_bits"): size = constant_value(type_ir.size_in_bits) else: type_definition = find_object(type_ir.atomic_type.reference, ir) size_attr = get_attribute(type_definition.attribute, _FIXED_SIZE_ATTRIBUTE) if not size_attr: return None size = constant_value(size_attr.expression) return size * array_multiplier
a3aaf62ef37eab29011fe6bc4f17e4f694145766
3,637,365
def insecure(path): """Find an insecure path, at or above this one""" return first(search_parent_paths(path), insecure_inode)
685e66392f2adf8c9447e5cacf883de68c3bbe2d
3,637,366
import gc def n_feature_influence(estimators, n_train, n_test, n_features, percentile): """ Estimate influence of the number of features on prediction time. Parameters ---------- estimators : dict of (name (str), estimator) to benchmark n_train : nber of training instances (int) n_test : nber of testing instances (int) n_features : list of feature-space dimensionality to test (int) percentile : percentile at which to measure the speed (int [0-100]) Returns: -------- percentiles : dict(estimator_name, dict(n_features, percentile_perf_in_us)) """ percentiles = defaultdict(defaultdict) for n in n_features: print("benchmarking with %d features" % n) X_train, y_train, X_test, y_test = generate_dataset(n_train, n_test, n) for cls_name, estimator in estimators.items(): estimator.fit(X_train, y_train) gc.collect() runtimes = bulk_benchmark_estimator(estimator, X_test, 30, False) percentiles[cls_name][n] = 1e6 * np.percentile(runtimes, percentile) return percentiles
16d386385f4fde04670e37e76abe13b8c22a487c
3,637,367
def make_author_list(res): """Takes a list of author names and returns a cleaned list of author names.""" try: r = [", ".join([clean_txt(x['family']).capitalize(), clean_txt(x['given']).capitalize()]) for x in res['author']] except KeyError as e: print("No 'author' key, using 'Unknown Author'. You should edit the markdown file to change the name and citationkey.") r = ["Unknown Authors"] return r
e1459514928d87a3e688b6957437452d02e50987
3,637,368
def backproject_points_np(p, fx=None, fy=None, cx=None, cy=None, K=None): """ p.shape = (nr_points,xyz) """ if not K is None: fx = K[0, 0] fy = K[1, 1] cx = K[0, 2] cy = K[1, 2] # true_divide u = ((p[:, 0] / p[:, 2]) * fx) + cx v = ((p[:, 1] / p[:, 2]) * fy) + cy return np.stack([v, u]).T
a06031de2f03d6e80ae149024a5bb2bea96f6b76
3,637,369
import os import sys import platform def configure_dexter_substitutions(): """Configure substitutions for host platform and return list of dependencies """ # Produce dexter path, lldb path, and combine into the %dexter substitution # for running a test. dexter_path = os.path.join(config.cross_project_tests_src_root, 'debuginfo-tests', 'dexter', 'dexter.py') dexter_test_cmd = '"{}" "{}" test'.format(sys.executable, dexter_path) if lldb_path is not None: dexter_test_cmd += ' --lldb-executable "{}"'.format(lldb_path) tools.append(ToolSubst('%dexter', dexter_test_cmd)) # For testing other bits of dexter that aren't under the "test" subcommand, # have a %dexter_base substitution. dexter_base_cmd = '"{}" "{}"'.format(sys.executable, dexter_path) tools.append(ToolSubst('%dexter_base', dexter_base_cmd)) # Set up commands for DexTer regression tests. # Builder, debugger, optimisation level and several other flags differ # depending on whether we're running a unix like or windows os. if platform.system() == 'Windows': # The Windows builder script uses lld. dependencies = ['clang', 'lld-link'] dexter_regression_test_builder = '--builder clang-cl_vs2015' dexter_regression_test_debugger = '--debugger dbgeng' dexter_regression_test_cflags = '--cflags "/Zi /Od"' dexter_regression_test_ldflags = '--ldflags "/Zi"' else: # Use lldb as the debugger on non-Windows platforms. dependencies = ['clang', 'lldb'] dexter_regression_test_builder = '--builder clang' dexter_regression_test_debugger = "--debugger lldb" dexter_regression_test_cflags = '--cflags "-O0 -glldb"' dexter_regression_test_ldflags = '' # Typical command would take the form: # ./path_to_py/python.exe ./path_to_dex/dexter.py test --fail-lt 1.0 -w --builder clang --debugger lldb --cflags '-O0 -g' # Exclude build flags for %dexter_regression_base. dexter_regression_test_base = ' '.join( # "python", "dexter.py", test, fail_mode, builder, debugger, cflags, ldflags ['"{}"'.format(sys.executable), '"{}"'.format(dexter_path), 'test', '--fail-lt 1.0 -w', dexter_regression_test_debugger]) tools.append(ToolSubst('%dexter_regression_base', dexter_regression_test_base)) # Include build flags for %dexter_regression_test. dexter_regression_test_build = ' '.join([ dexter_regression_test_base, dexter_regression_test_builder, dexter_regression_test_cflags, dexter_regression_test_ldflags]) tools.append(ToolSubst('%dexter_regression_test', dexter_regression_test_build)) return dependencies
a3d6be2c9c997af55d626b1b5e5aee0ee246466e
3,637,370
def recover(D, gamma=None): """Recover low-rank and sparse part, using Alg. 4 of [2]. Note: gamma is lambda in Alg. 4. Parameters --------- D : numpy ndarray, shape (N, D) Input data matrix. gamma : float, default = None Weight on sparse component. If 'None', then gamma = 1/sqrt(max(D, N)) as shown in [1] to be the optimal choice under a set of suitable assumptions. Returns ------- LL : numpy array, shape (N, D) Low-rank part of data SP : numpy array, shape (N, D) Sparse part of data n_iter : int Number of iterations until convergence. """ n, m = D.shape if gamma is None: gamma = 1/np.sqrt(np.amax([n, m])) # the following lines implement line 1 of Alg. 4 Y = np.sign(D) l2n = np.linalg.norm(Y, ord=2) l2i = np.linalg.norm(np.asarray(Y).ravel(), ord=np.inf) dual_norm = np.amax([l2n, l2i]) Y = Y/dual_norm # line 4 of Alg. 4 A_hat = np.zeros(D.shape) E_hat = np.zeros(D.shape) D_fro = np.linalg.norm(D, ord='fro') # cf. section "Choosing Parameters" of [2] proj_tol = 1e-06*D_fro term_tol = 1e-07 iter_max = 1e+03 num_svd = 0 # track # of SVD calls m = 0.5/l2n # \mu in Alg. 4 r = 6 # \rho in Alg. 4 sv = 5 svp = sv k = 0 converged = False while not converged: primal_converged = False sv = sv+np.round(n*0.1) primal_iter = 0 while not primal_converged: # implement line 10 in Alg. 4 T_tmp = D-A_hat+1/m*Y E_tmp = (np.maximum(T_tmp-gamma/m, 0) + np.minimum(T_tmp+gamma/m, 0)) # line 7 of Alg. 4 U, S, V = np.linalg.svd(D-E_tmp+1/m*Y, full_matrices=False) # line 8 of Alg. 4 svp = len(np.where(S > 1/m)[0]) if svp < sv: sv = np.amin([svp+1, n]) else: sv = np.amin([svp + np.round(0.05*n), n]) A_tmp = (np.mat(U[:,0:svp]) * np.diag(S[0:svp]-1/m) * np.mat(V[0:svp,:])) # check convergence of inner optimization if (np.linalg.norm(A_hat-A_tmp, ord='fro') < proj_tol and np.linalg.norm(E_hat-E_tmp, ord='fro') < proj_tol): primal_converged = True A_hat = A_tmp E_hat = E_tmp primal_iter = primal_iter+1 num_svd = num_svd+1 # line 13 of Alg. 4 Z = D-A_hat-E_hat Y = Y+m*Z m = r*m # evaluate stopping criteria stop_crit = np.linalg.norm(Z,'fro')/D_fro if stop_crit < term_tol: converged = True # some information about the iteration non_zero = len(np.where(np.asarray(np.abs(E_hat)).ravel()>0)[0]) message = ["[iter: %.4d]" % k, "#svd=%.4d" % num_svd, "rank(P)=%.4d" % svp, "|C|_0=%.4d" % non_zero, "crit=%.4g" % stop_crit] print ' '.join(message) k = k+1 # handle non-convergence if not converged and k > iter_max: warnings.warn("terminate after max. iter.", UserWarning) converged = True return (A_hat, E_hat, k)
83a269006a7cc6c48b1db520c813929886eedf7b
3,637,371
import matplotlib.pyplot as plt def plot_Reff(Reff: dict, dates=None, ax_arg=None, truncate=None, **kwargs): """ Given summary statistics of Reff as a dictionary, plot the distribution over time """ plt.style.use("seaborn-poster") if ax_arg is None: fig, ax = plt.subplots(figsize=(12, 9)) else: fig, ax = ax_arg color_cycle = ax._get_lines.prop_cycler curr_color = next(color_cycle)["color"] if dates is None: dates = range(len(Reff["mean"])) if truncate is None: ax.plot(dates, Reff["mean"], color=curr_color, **kwargs) ax.fill_between( dates, Reff["lower"], Reff["upper"], alpha=0.4, color=curr_color ) ax.fill_between(dates, Reff["bottom"], Reff["top"], alpha=0.4, color=curr_color) else: ax.plot( dates[truncate[0] : truncate[1]], Reff["mean"][truncate[0] : truncate[1]], color=curr_color, **kwargs ) ax.fill_between( dates[truncate[0] : truncate[1]], Reff["lower"][truncate[0] : truncate[1]], Reff["upper"][truncate[0] : truncate[1]], alpha=0.4, color=curr_color, ) ax.fill_between( dates[truncate[0] : truncate[1]], Reff["bottom"][truncate[0] : truncate[1]], Reff["top"][truncate[0] : truncate[1]], alpha=0.4, color=curr_color, ) # plt.legend() # grid line at R_eff =1 ax.set_yticks( [1], minor=True, ) ax.set_yticks([0, 2, 3], minor=False) ax.set_yticklabels([0, 2, 3], minor=False) ax.yaxis.grid(which="minor", linestyle="--", color="black", linewidth=2) ax.tick_params(axis="x", rotation=90) return fig, ax
30db12546c6648c2e81609bd2292dc9be2fa6eea
3,637,372
def get_stylesheet(): """Generate an html link to a stylesheet""" return "{static_url}/code_pygments/css/{theme}.css".format( static_url=core_config['ASSETS_URL'], theme=module_config['PYGMENTS_THEME'])
e5f766a414d6fac32b361dfbb54da929bff4458d
3,637,373
def create_volume(ctxt, host='test_host', display_name='test_volume', display_description='this is a test volume', status='available', migration_status=None, size=1, availability_zone='fake_az', volume_type_id=None, replication_status='disabled', replication_extended_status=None, replication_driver_data=None, consistencygroup_id=None, **kwargs): """Create a volume object in the DB.""" vol = {} vol['size'] = size vol['host'] = host vol['user_id'] = ctxt.user_id vol['project_id'] = ctxt.project_id vol['status'] = status vol['migration_status'] = migration_status vol['display_name'] = display_name vol['display_description'] = display_description vol['attach_status'] = 'detached' vol['availability_zone'] = availability_zone if consistencygroup_id: vol['consistencygroup_id'] = consistencygroup_id if volume_type_id: vol['volume_type_id'] = volume_type_id for key in kwargs: vol[key] = kwargs[key] vol['replication_status'] = replication_status vol['replication_extended_status'] = replication_extended_status vol['replication_driver_data'] = replication_driver_data return db.volume_create(ctxt, vol)
b638e98ab88f0e65c37503dee80bbec2250aab0e
3,637,374
def duration_of_treatment_30(): """ Real Name: b'duration of treatment 30' Original Eqn: b'10' Units: b'Day' Limits: (None, None) Type: constant b'' """ return 10
510b8e114007c9e64f866c98bfce9d2d86fa7bfe
3,637,375
def input_pkgidx(g_dim): """ Specify the parking spots index by the user return 1*pk_dim np.array 'pk_g_idx' where pk_dim is the number of spots """ #print('Please specify the num of parking spots:') pk_dim = np.int(input('Please specify the num of parking spots:')) while pk_dim >= g_dim: print('Too many parking spots!') pk_dim = np.int(input('Please specify the num of parking spots:')) pk_g_idx = -np.ones(pk_dim, dtype = int) for idx in range(pk_dim): print('Input as grid index ranging from 0 to',g_dim-1) spot_idx = np.int(input()) while (spot_idx < 0) or (spot_idx >= g_dim): print('Invalid input!') print('Input as grid index ranging from 0 to',g_dim-1) spot_idx = np.int(input()) while spot_idx in pk_g_idx: print('Repeated input!') print('Input as grid index ranging from 0 to',g_dim-1) spot_idx = np.int(input()) while (spot_idx < 0) or (spot_idx >= g_dim): print('Invalid input!') print('Input as grid index ranging from 0 to',g_dim-1) spot_idx = np.int(input()) pk_g_idx[idx] = spot_idx pk_g_idx.sort() return pk_g_idx
acba8fdbeb1d0fdcb67d28b64fa313489fd597df
3,637,376
def get_total_count(data): """ Retrieves the total count from a Salesforce SOQL query. :param dict data: data from the Salesforce API :rtype: int """ return data['totalSize']
7cb8696c36449425fbcfa944f1f057d063972888
3,637,377
def _check_hex(dummy_option, opt, value): """ Checks if a value is given in a decimal integer of hexadecimal reppresentation. Returns the converted value or rises an exception on error. """ try: if value.lower().startswith("0x"): return int(value, 16) else: return int(value) except ValueError: raise OptionValueError( "option {0:s}: invalid integer or hexadecimal value: {1:s}.".format(opt, value))
6acf63f42b79ba30a55fc1666bdd2c1f65124fd3
3,637,378
def get_challenge(): """returns the ChallengeSetting object, from cache if cache is enabled""" challenge = cache_mgr.get_cache('challenge') if not challenge: challenge, _ = ChallengeSetting.objects.get_or_create(pk=1) # check the WattDepot URL to ensure it does't end with '/' if challenge.wattdepot_server_url: while challenge.wattdepot_server_url.endswith('/'): challenge.wattdepot_server_url = challenge.wattdepot_server_url[:-1] # create the admin create_admin_user() cache_mgr.set_cache('challenge', challenge, 2592000) return challenge
f874b90624bbd9c4d4fc5e18efd6363d461fab0f
3,637,379
async def get_user_from_event(event): """ Get the user from argument or replied message. """ args = event.pattern_match.group(1).split(" ", 1) extra = None if event.reply_to_msg_id: previous_message = await event.get_reply_message() user_obj = await event.client.get_entity(previous_message.sender_id) extra = event.pattern_match.group(1) elif args: user = args[0] if len(args) == 2: extra = args[1] if user.isnumeric(): user = int(user) if not user: await event.edit("`Pass the user's username, id or reply!`") return if event.message.entities is not None: probable_user_mention_entity = event.message.entities[0] if isinstance(probable_user_mention_entity, MessageEntityMentionName): user_id = probable_user_mention_entity.user_id user_obj = await event.client.get_entity(user_id) return user_obj try: user_obj = await event.client.get_entity(user) except (TypeError, ValueError) as err: await event.edit(str(err)) return None return user_obj, extra
4cd659637603e9910aa5fb00e855767ae8252c2e
3,637,380
def migrate_data(conn: redis.StrictRedis, data: dict) -> list: """ Uploads the given data to the given redis database connection """ pipe = conn.pipeline() for key, value in data.items(): command_and_formatter = TYPE_TO_PUT_COMMAND[value["type"]] command = command_and_formatter[0] formatter = command_and_formatter[1] redis_method = getattr(pipe, command) formatted_values = formatter(value["value"]) arguments = [key] + formatted_values redis_method(*arguments) return pipe.execute()
7e54d3bd0c5e302d3e2f173a53bf6cd6a9a6f6fb
3,637,381
def dummy_location(db, create_location): """Give you a dummy default location.""" loc = create_location(u'Test') db.session.flush() return loc
ec6ffa3b42e07c88b8224ee2aaaf000853a4169f
3,637,382
from pathlib import Path def get_resources_path() -> Path: """ Convenience method to return the `resources` directory in this project """ return alpyne._ROOT_PATH.joinpath("resources")
35b90856e00fbee8aeb373350cef77596a5f2a71
3,637,383
import subprocess import os def is_running_in_container(): # type: () -> bool """ Determines if we're running in an lxc/docker container. """ out = subprocess.check_output('cat /proc/1/sched', shell=True) out = out.decode('utf-8').lower() checks = [ 'docker' in out, '/lxc/' in out, out.split()[0] not in ('systemd', 'init',), os.path.exists('/.dockerenv'), os.path.exists('/.dockerinit'), os.getenv('container', None) is not None ] return any(checks)
5cef531a2b8bc989f5fa0aedde548ca1272bc494
3,637,384
import os def load_imagenet_val(num=None): """ Load a handful of validation images from ImageNet. Inputs: - num: Number of images to load (max of 25) Returns: - X: numpy array with shape [num, 224, 224, 3] - y: numpy array of integer image labels, shape [num] - class_names: dict mapping integer label to class name """ imagenet_fn = 'datasets/imagenet_val_25.npz' if not os.path.isfile(imagenet_fn): print('file %s not found' % imagenet_fn) assert False, 'Need to download imagenet_val_25.npz in datasets folder' f = np.load(imagenet_fn) X = f['X'] y = f['y'] class_names = f['label_map'].item() if num is not None: X = X[:num] y = y[:num] return X, y, class_names
c5d94290b7df90d9abc3a76041fea72aad66e864
3,637,385
import math def sk_rot_mx(rot_vec): """ use Rodrigues' rotation formula to transform the rotation vector into rotation matrix :param rot_vec: :return: """ theta = np.linalg.norm(rot_vec) vector = np.array(rot_vec) * math.sin(theta / 2.0) / theta a = math.cos(theta / 2.0) b = -vector[0] c = -vector[1] d = -vector[2] return np.array( [ [ a * a + b * b - c * c - d * d, 2 * (b * c + a * d), 2 * (b * d - a * c) ], [ 2 * (b * c - a * d), a * a + c * c - b * b - d * d, 2 * (c * d + a * b) ], [ 2 * (b * d + a * c), 2 * (c * d - a * b), a * a + d * d - b * b - c * c ] ] )
9ba2abfd877d87423db02b224fed30ec59dc90f7
3,637,386
def split_line(line, points, tolerance=1e-9): """Split line at point or multipoint, within some tolerance """ to_split = snap_line(line, points, tolerance) return list(split(to_split, points))
46a4ae55ff655c864154d37108689d81ad77daf1
3,637,387
from pathlib import Path import os def analyze_data() -> bool: """ Read file format xml and returns name data Parameters ---------- Returns ------- True: bool Answer - whether the copy was successful """ path_to_file = Path.PATH_TO_PROGRAM + Path.DATA_DIRECTORY files_list = os.listdir(path_to_file) for file in files_list: copy_file(read_xml_file(path_to_file + file)) return True
4ad7ae4d5d88e976c440071c283f93ef78d424c1
3,637,388
import copy def cross_validation(docs, values, k): """ docs: Dict with text lists separate by value values: Target values texts k: Steps of cross validation """ group_size = {} confusion_matrix = [] m = {'true':{}, 'false':{}} for value in values: group_size[value] = len(docs[value])/k m['true'][value] = 0 m['false'][value] = 0 for i in xrange(0,k): training = copy.deepcopy(docs) confusion_matrix.insert(i, copy.deepcopy(m)) for value in values: begin = i * group_size[value] end = (i + 1) * group_size[value] test = training[value][begin:end] del training[value][begin:end] probabilities, vocabulary = learn(training, values) for doc in test: prob_value = classify(doc, probabilities, vocabulary, values) if value == prob_value: confusion_matrix[i]['true'][value] += 1 else: confusion_matrix[i]['false'][prob_value] += 1 return confusion_matrix
625a45edf45dc88db6e8c3b5342604891f899ebc
3,637,389
def quote_plus(url, safe='/', encoding=None, errors=None): """Wrapper for urllib.parse.quote_plus""" return uquote_plus(url, safe=safe, encoding=encoding, errors=errors)
159a5e1e25bf35ee08b14f6dca871a4d0bb7f411
3,637,390
def averageSeriesWithWildcards(requestContext, seriesList, *position): #XXX """ Call averageSeries after inserting wildcards at the given position(s). Example: .. code-block:: none &target=averageSeriesWithWildcards(host.cpu-[0-7].cpu-{user,system}.value, 1) This would be the equivalent of ``target=averageSeries(host.*.cpu-user.value)&target=averageSeries(host.*.cpu-system.value)`` """ if isinstance(position, int): positions = [position] else: positions = position result = [] matchedList = {} for series in seriesList: newname = '.'.join(map(lambda x: x[1], filter(lambda i: i[0] not in positions, enumerate(series.name.split('.'))))) if newname not in matchedList: matchedList[newname] = [] matchedList[newname].append(series) for name in matchedList.keys(): result.append( averageSeries(requestContext, (matchedList[name]))[0] ) result[-1].name = name return result
479be75db3498a1882c8a27d1a13de85102c52a6
3,637,391
def errore_ddp_digitale(V): """ Calcola l'errore della misura di ddp del multimetro digitale supponendo che si sia scelta la scala corretta. La ddp deve essere data in Volt """ V=absolute(V) if V<0.2: return sqrt(V**2*25e-6+1e-8) if V<2: return sqrt(V**2*25e-6+1e-6) if V<20: return sqrt(V**2*25e-6+1e-4) if V<200: return sqrt(V**2*25e-6+1e-2) print("Tollerati valori minori di 200V") return
d6504dfce600f5d9af2af33115c2e07b8033cf03
3,637,392
import subprocess import sys def run_command(cmd, get_output=False, tee=True, custom_env=None): """ Runs a command. Args: cmd (str): The command to run. get_output (bool): If true, run_command will return the stdout output. Default: False. tee (bool): If true, captures output (if get_output is true) as well as prints output to stdout. Otherwise, does not print to stdout. """ print("Running command: {:}".format(cmd)) if not get_output: return subprocess.check_call(cmd, shell=True) else: output = [] if custom_env is not None: print("Overriding Environment") p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True, env=custom_env) else: p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True) for line in iter(p.stdout.readline, b""): line = line.decode("utf-8") if tee: sys.stdout.write(line) sys.stdout.flush() output.append(line.rstrip("\n")) ret = p.wait() if ret == 0: return output else: raise subprocess.CalledProcessError(ret, cmd)
a7db34c9284ea6c332ff72202c4e8c64d9bdadbc
3,637,393
import os def list_profiles_in(path): """list profiles in a given root directory""" files = os.listdir(path) profiles = [] for f in files: full_path = os.path.join(path, f) if os.path.isdir(full_path) and f.startswith('profile_'): profiles.append(f.split('_',1)[-1]) return profiles
1e377b2a686d3c63e569c57af7415a9604b3709e
3,637,394
def extractLine(shape, z = 0): """ Extracts a line from a shape line. """ x = shape.exteriorpoints()[0][0] - shape.exteriorpoints()[1][0] y = shape.exteriorpoints()[0][1] - shape.exteriorpoints()[1][1] return (x, y, z)
c61021b1e3dc6372d9d7554a7033bbd3ab128343
3,637,395
import logging def get_fragility_model_04(fmodel, fname): """ :param fmodel: a fragilityModel node :param fname: path of the fragility file :returns: an :class:`openquake.risklib.scientific.FragilityModel` instance """ logging.warn('Please upgrade %s to NRML 0.5', fname) node05 = convert_fragility_model_04(fmodel, fname) node05.limitStates.text = node05.limitStates.text.split() return get_fragility_model(node05, fname)
62633b156f18c6e722321cf937ea06741aa7a65f
3,637,396
def substring_in_list(substr_to_find, list_to_search): """ Returns a boolean value to indicate whether or not a given substring is located within the strings of a list. """ result = [s for s in list_to_search if substr_to_find in s] return len(result) > 0
77521a1c5d487fa110d5adecb884dd298d2515e5
3,637,397
def downsample_image(image: np.ndarray, scale: int) -> np.ndarray: """Downsamples the image by an integer factor to prevent artifacts.""" if scale == 1: return image height, width = image.shape[:2] if height % scale > 0 or width % scale > 0: raise ValueError(f'Image shape ({height},{width}) must be divisible by the' f' scale ({scale}).') out_height, out_width = height // scale, width // scale resized = cv2.resize(image, (out_width, out_height), cv2.INTER_AREA) return resized
7d011bda8dc2fccc9782e621bb61d7ab68992640
3,637,398
def getQueryString( bindings, variableName ): """ Columns a bunch of data about the bindings. Will return properly formatted strings for updating, inserting, and querying the SQLite table specified in the bindings dictionary. Will also return the table name and a string that lists the columns (properly formatted for use in an SQLite query). variableName is the name to use for the SQLiteC++ Statement variable in the generated methods. """ table = '' columns = [] queryData = [] insertData = [] updateData = [] whereClaus = [] bindData = [] index = 0 for b in bindings: # Process table if (b['type'] == 'table'): table = b['table'] # Process column elif (b['type'] == 'column'): columns.append( b['column'] ) # Process query data if (b['variableType'] == 'string'): text = '{variable} = std::string( {query}.getColumn({index}).getText() );' text = text.format(variable = b['variable'], index = index, query = variableName) queryData.append( text ) else: text = '{variable} = {query}.getColumn({index});' text = text.format(variable = b['variable'], index = index, query = variableName) queryData.append( text ) index = index + 1 # Process insert data if (b['variableType'] == 'string' or b['variableType'] == 'char*'): insertData.append( "\"'\" << " + b['variable'] + " << \"'\"" ) else: insertData.append( b['variable'] ) # Process id if (b.get('id')): whereClaus.append( b['column'] + ' = ?' ) text = 'query.bind({index}, {variableName});' text = text.format(index = len(whereClaus), variableName = b['variable']) bindData.append( text ) # Process update data for i in range(0, len(columns)): t = columns[i] + '=" << ' + insertData[i] updateData.append(t) columns = ', '.join( columns ) updateData = ' << ", '.join( updateData ) insertData = ' << \", " << '.join( insertData ) queryData = '\n'.join( queryData ) whereClaus = ' AND '.join( whereClaus ) bindData = '\n\t'.join( bindData ) return { 'table': table, 'updateData': updateData, 'columns': columns, 'insertData': insertData, 'queryData': queryData, 'whereClaus': whereClaus, 'bindData': bindData }
9cc81601cde229cc5f5bf53ef73997efc515ed2b
3,637,399