content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def pwr_y(x, a, b, e): """ Calculate the Power Law relation with a deviation term. Parameters ---------- x : numeric Input to Power Law relation. a : numeric Constant. b : numeric Exponent. e : numeric Deviation term. Returns ------- numeric Output of Power Law relation. Notes ----- Power Law relation: :math:`y = a x^b + e` """ return a*x**b+e
e736d9bb2e4305ef0dc0a360143a611b805f7612
3,648,400
def file_update_projects(file_id): """ Page that allows users to interact with a single TMC file """ this_file = TMCFile.query.filter_by(uid=file_id).first() project_form = AssignProjectsToFile() if project_form.validate_on_submit(): data = dict((key, request.form.getlist(key) if len( request.form.getlist(key)) > 1 else request.form.getlist(key)[0]) for key in request.form.keys()) pid_list = [] for k in data: if "project_" in k: pid_list.append(int(k.replace("project_", ""))) # Make sure all selected projects are associated for pid in pid_list: project = Project.query.filter_by(uid=pid).first() if project not in this_file.project_ids: this_file.project_ids.append(project) # Remove association with unchecked projects for project in this_file.project_ids: if project.uid not in pid_list: this_file.project_ids.remove(project) db.session.commit() flash("Updated project associations", "success") return redirect(url_for("single_file_bp.single_file", file_id=file_id))
172d0caccb3e7ba39282cb6860fb80fca0a050bb
3,648,401
def find_optimal_cut(edge, edge1, left, right): """Computes the index corresponding to the optimal cut such that applying the function compute_blocks() to the sub-blocks defined by the cut reduces the cost function comparing to the case when the function compute_blocks() is applied to the whole matrix. If cutting point can not be find, the algorithm returns the result from the function compute_blocks(). Parameters ---------- edge : ndarray sparsity pattern profile of the matrix edge1 : ndarray conjugated sparsity pattern profile of the matrix left : int size of the leftmost diagonal block right : int size of the rightmost diagonal block Returns ------- """ unique_indices = np.arange(left, len(edge) - right + 1) blocks = [] seps = [] sizes = [] metric = [] size = len(edge) for j1, item1 in enumerate(unique_indices): seps.append(item1) item2 = size - item1 # print(item1, item2) # print(item1) edge_1 = edge[:item1] edge_2 = (edge1 - np.arange(len(edge1)))[item2:] + np.arange(item1) edge_3 = edge1[:item2] edge_4 = (edge - np.arange(len(edge)))[item1:] + np.arange(item2) block1 = compute_blocks(left, (edge1 - np.arange(len(edge)))[item2], edge_1, edge_2) block2 = compute_blocks(right, (edge - np.arange(len(edge1)))[item1], edge_3, edge_4) block = block1 + block2[::-1] blocks.append(block) metric.append(np.sum(np.array(block) ** 3)) sizes.append((block1[-1], block2[-1])) if len(metric) == 0: return [left, right], np.nan, 0, 0 else: best = np.argmin(np.array(metric)) blocks = blocks[best] blocks = [item for item in blocks if item != 0] sep = seps[best] right_block, left_block = sizes[best] return blocks, sep, right_block, left_block
63120c904a71b6dc40d75df6db19a5bdb619f9e2
3,648,402
def seq_to_networkx(header, seq, constr=None): """Convert sequence tuples to networkx graphs.""" graph = nx.Graph() graph.graph['id'] = header.split()[0] graph.graph['header'] = header for id, character in enumerate(seq): graph.add_node(id, label=character, position=id) if id > 0: graph.add_edge(id - 1, id, label='-') assert(len(graph) > 0), 'ERROR: generated empty graph.\ Perhaps wrong format?' graph.graph['sequence'] = seq if constr is not None: graph.graph['constraint'] = constr return graph
7c44b3aa0fb30637eda9bc7e960db1e3d65e7907
3,648,403
def add_vertex_edge_for_load_support(network, sup_dic, load_dic, bars_len, key_removed_dic): """ Post-Processing Function: Adds vertices and edges in accordance with supports and loads returns the cured network """ if not key_removed_dic: load_sup_dic=merge_two_dicts(sup_dic, load_dic) else: load_dic_2=load_dic.copy() for key in key_removed_dic: load_dic_2.pop(key) load_dic_2=merge_two_dicts(load_dic_2, key_removed_dic[key]) load_sup_dic=merge_two_dicts(sup_dic, load_dic_2) # define arbitrary r to be added to get leaf vertex coordinates max_len=max(bars_len) r=max_len/3.0 # make a polygon and polyline from outer vertices of network points = network.to_points() cycles = network_find_cycles(network) mesh = Mesh.from_vertices_and_faces(points, cycles) if 0 in mesh.face and len(mesh.face)>1: mesh.delete_face(0) if len(mesh.face)==1: ver_lis=[key for key in mesh.vertices()] else: ver_lis=mesh.vertices_on_boundary(ordered=True) ver_lis_plyln=ver_lis[:] ver_lis_plyln.append(ver_lis[0]) pt_lis_plygn=[mesh.vertex_coordinates(key) for key in ver_lis] pt_lis_plyln=[mesh.vertex_coordinates(key) for key in ver_lis_plyln] plygn=Polygon(pt_lis_plygn) plyln=Polyline(pt_lis_plyln) # add leaf vertices for key in load_sup_dic: if load_sup_dic[key][0]!=0.0: pt_1=add_vectors(network.node_coordinates(key), (+r, 0.0, 0.0)) plyln_bln=is_point_on_polyline(pt_1, plyln.points, tol=0.001) plygn_bln=is_point_in_polygon_xy(pt_1, plygn.points) if plyln_bln or plygn_bln: pt_1=add_vectors(network.node_coordinates(key), (-r, 0.0, 0.0)) key_2=network.add_node(x=np.asscalar(pt_1[0]), y=pt_1[1], z=0.0) network.add_edge(key, key_2) if load_sup_dic[key][1]!=0.0: pt_2=add_vectors(network.node_coordinates(key), (0.0,+r, 0.0)) plyln_bln=is_point_on_polyline(pt_2, plyln.points, tol=0.001) plygn_bln=is_point_in_polygon_xy(pt_2, plygn.points) if plyln_bln or plygn_bln: pt_2=add_vectors(network.node_coordinates(key), (0.0,-r, 0.0)) key_2=network.add_node(x=pt_2[0], y=np.asscalar(pt_2[1]), z=0.0) network.add_edge(key, key_2) return network, plygn, plyln
ce52cfac5e3bb58b31cfc1b2e243c435c5926d0f
3,648,404
def mimicry(span): """Enrich the match.""" data = {'mimicry': span.lower_} sexes = set() for token in span: if token.ent_type_ in {'female', 'male'}: if token.lower_ in sexes: return {} sexes.add(token.lower_) return data
724d09156e97961049cb29d9f3c1f02ab5af48b0
3,648,405
def LeftBinarySearch(nums, target): """ :type nums: List[int] :type target: int :rtype: int """ low = 0 high = len(nums) while low < high: mid = (low + high) // 2 if nums[mid] < target: low = mid + 1 else: high = mid assert low == high if low == len(nums) or nums[low] != target: return -1 return low
d08f72e1563ee91e9ca6c9cf95db4c794312aa59
3,648,406
def backup_file_content(jwd, filepath, content): """backs up a string in the .jak folder. TODO Needs test """ backup_filepath = create_backup_filepath(jwd=jwd, filepath=filepath) return create_or_overwrite_file(filepath=backup_filepath, content=content)
8d661ca8fbf30a5d528cb79c01a6c74767084535
3,648,407
async def security_rule_get( hub, ctx, security_rule, security_group, resource_group, **kwargs ): """ .. versionadded:: 1.0.0 Get a security rule within a specified network security group. :param name: The name of the security rule to query. :param security_group: The network security group containing the security rule. :param resource_group: The resource group name assigned to the network security group. CLI Example: .. code-block:: bash azurerm.network.network_security_group.security_rule_get testrule1 testnsg testgroup """ netconn = await hub.exec.azurerm.utils.get_client(ctx, "network", **kwargs) try: secrule = netconn.security_rules.get( network_security_group_name=security_group, resource_group_name=resource_group, security_rule_name=security_rule, ) result = secrule.as_dict() except CloudError as exc: await hub.exec.azurerm.utils.log_cloud_error("network", str(exc), **kwargs) result = {"error": str(exc)} return result
34fb0cc8c2399f3749970b1061e2d5d209b11750
3,648,408
def create_centroid_pos(Direction, Spacing, Size, position): # dim0, dim1,dim2, label): """ :param Direction,Spacing, Size: from sitk raw.GetDirection(),GetSpacing(),GetSize() :param position:[24,3] :return: """ direction = np.round(list(Direction)) direc0 = direction[0:7:3] direc1 = direction[1:8:3] direc2 = direction[2:9:3] dim0char = Dic[(np.argwhere((np.abs(direc0)) == 1))[0][0]] dim1char = Dic[(np.argwhere((np.abs(direc1)) == 1))[0][0]] dim2char = Dic[(np.argwhere((np.abs(direc2)) == 1))[0][0]] resolution = Spacing w, h, c = Size[0], Size[1], Size[2] jsonlist = [] for i in range(24): dim0, dim1, dim2 = position[i:i + 1, 0], position[i:i + 1, 1], position[i:i + 1, 2] if dim0 >= 0: label = i + 1 if np.sum(direc0) == -1: if dim0char == 'X': Jsondim0 = dim0 * resolution[0] else: Jsondim0 = (w - dim0) * resolution[0] else: if dim0char == 'X': Jsondim0 = (w - dim0) * resolution[0] else: Jsondim0 = dim0 * resolution[0] if np.sum(direc1) == -1: if dim1char == 'X': Jsondim1 = dim1 * resolution[1] else: Jsondim1 = (h - dim1) * resolution[1] else: if dim1char == 'X': Jsondim1 = (h - dim1) * resolution[1] else: Jsondim1 = dim1 * resolution[1] if np.sum(direc2) == -1: if dim2char == 'X': Jsondim2 = dim2 * resolution[2] else: Jsondim2 = (c - dim2) * resolution[2] else: if dim2char == 'X': Jsondim2 = (c - dim2) * resolution[2] else: Jsondim2 = dim2 * resolution[2] jsonlist.append({dim0char: Jsondim0, dim1char: Jsondim1, dim2char: Jsondim2, 'label': label}) return jsonlist
67f252a237f294bdf738bf0b5e9a89aad51201d7
3,648,409
from sklearn.model_selection import GroupKFold def group_split_data_cv(df, cv=5, split=0): """ Args: cv: number of cv folds split: index of the cv fold to return Note that GroupKFold is not random """ splitter = GroupKFold(n_splits=cv) split_generator = splitter.split(df, groups=df['arpnum']) for k, (train_idx, test_idx) in enumerate(split_generator): if k == split: return df.iloc[train_idx], df.iloc[test_idx]
4d2fb6c62bdd313aa9b16d52b637adbfd1adc654
3,648,410
def encode(valeur,base): """ int*int -->String hyp valeur >=0 hypothèse : base maxi = 16 """ chaine="" if valeur>255 or valeur<0 : return "" for n in range (1,9) : calcul = valeur % base if (calcul)>9: if calcul==10: bit='A' if calcul==11: bit='B' if calcul==12: bit='C' if calcul==13: bit='D' if calcul==14: bit='E' if calcul==15: bit='F' else : bit=calcul chaine =str(bit)+chaine valeur = valeur // base n+=1 return (chaine)
c5fe7d129ab19d1f77ac9d5160f5d714a796c0a0
3,648,411
def main(request): """ Main admin page. Displayes a paginated list of files configured source directory (sorted by most recently modified) to be previewed, published, or prepared for preview/publish. """ # get sorted archive list for this user try: archives = request.user.archivist.sorted_archives() except ObjectDoesNotExist: # i.e. no user -> archivist association if request.user.is_superuser: archives = Archive.objects.all() else: archives = [] # get current tab if set in session; default to first tab current_tab = request.session.get('active_admin_tab', 0) # files for publication now loaded in jquery ui tab via ajax # get the 10 most recent task results to display status recent_tasks = TaskResult.objects.order_by('-created')[:10] # absolute path to login, for use in javascript if timeout occurs login_url = request.build_absolute_uri(settings.LOGIN_URL) return render(request, 'fa_admin/index.html', { 'archives': archives, 'current_tab': current_tab, 'login_url': login_url, 'task_results': recent_tasks})
9f10a3546dbbd209b8d91e812c4190c3498b1c03
3,648,412
def parse_char(char, invert=False): """Return symbols depending on the binary input Keyword arguments: char -- binary integer streamed into the function invert -- boolean to invert returned symbols """ if invert == False: if char == 0: return '.' elif char == 1: return '@' if char == 0: return '@' elif char == 1: return '.'
38c0d1c150a1c8e8f7d2f3d1bde08ec3e5ceb65b
3,648,413
import subprocess def run_blast(database, program, filestore, file_uuid, sequence, options): """ Perform a BLAST search on the given database using the given query Args: database: The database to search (full path). program: The program to use (e.g. BLASTN, TBLASTN, BLASTX). filestore: The directory to store the XML output. file_uuid: A unique identifier for the filename. sequence: The sequence to BLAST. options: Any extra options to pass to the BLAST executable. Returns: A tuple containing the stdout and stderr of the program. # Test: >>> seq = ">test\\nTTCATAATTAATTTTTTATATATATATTATATTATAATATTAATTTATATTATAAAAATAATATTTATTATTAAAATATT\\nTATTCTCCTTTCGGGGTTCCGGCTCCCGTGGCCGGGCCCCGGAATTATTAATTAATAATAAATTATTATTAATAATTATT\\n>test 2\\nAATGGTATTAGATTCAGTGAATTTGGTACAAGACGTCGTAGATCTCTGAAGGCTCAAGATCTAATTATGCAAGGAATCATGAAAGCTGTGAACGGTAACCCAGACAGAAACAAATCGCTATTATTAGGCACATCAAATATTTTATTTGCCAAGAAATATGGAGTCAAGCCAATCGGTACTGTGGCTCACGAGTGGGTTATGGGAGTCGCTTCTATTAGTGAAGATTATTTGCATGCCAATAAAAATGCAATGGATTGTTGGATCAATACTTTTGGTGCAAAAAATGCTGGTTTAGCATTAACGGATACTTTTGGAACTGATGACTTTTTAAAATCATTCCGTCCACCATATTCTGATGCTTACGTCGGTGTTAGACAAGATTCTGGAGACCCAGTTGAGTATACCAAAAAGATTTCCCACCATTACCATGACGTGTTGAAATTGCCTAAATTCTCGAAGATTATCTGTTATTCCGATTCTTTGAACGTCGAAAAGGCAATAACTTACTCCCATGCAGCTAAAGAGAATG" >>> blast('/Users/work/Projects/pyBlast/db/yeast.nt', '/Users/work/Projects/pyBlast/bin/blastn', '/Users/work/Projects/pyBlast/store/', seq, {u'-evalue': 10.0, u'-strand': u'both'}) >>> seq = ">test\\nTTC" >>> blast('/Users/work/Projects/pyBlast/db/yeast.nt', '/Users/work/Projects/pyBlast/bin/blastn', '/Users/work/Projects/pyBlast/store/', seq, {u'-evalue': 10.0, u'-strand': u'both'}) """ query = [program, '-db', database, '-outfmt', '5', '-query', '-', '-out', "{0}{1}.xml".format(filestore, file_uuid), '-max_target_seqs', '50'] exclude = [ '-db', '-query', '-out', '-subject', '-html', '-gilist', '-negative_gilist', '-entrez_query', '-remote', '-outfmt', '-num_threads', '-import_search_strategy', '-export_search_strategy', '-window_masker_db', '-index_name', '-use_index', ] extra = parse_extra_options(options, exclude) query.extend(extra) p = subprocess.Popen(query, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=-1) stdout, stderr = p.communicate(sequence) return (stdout, stderr)
f3358f90b6a8e3bed2138b88f6a28634142fe3ac
3,648,414
def get_transformer_dim(transformer_name='affine'): """ Returns the size of parametrization for a given transformer """ lookup = {'affine': 6, 'affinediffeo': 6, 'homografy': 9, 'CPAB': load_basis()['d'], 'TPS': 32 } assert (transformer_name in lookup), 'Transformer not found, choose between: ' \ + ', '.join([k for k in lookup.keys()]) return lookup[transformer_name]
8e61b2e135c2f5933955082b4d951ff2f88283b7
3,648,415
def ListVfses(client_urns): """Lists all known paths for a list of clients. Args: client_urns: A list of `ClientURN` instances. Returns: A list of `RDFURN` instances corresponding to VFS paths of given clients. """ vfs = set() cur = set() for client_urn in client_urns: cur.update([ client_urn.Add("fs/os"), client_urn.Add("fs/tsk"), client_urn.Add("temp"), client_urn.Add("registry"), ]) while cur: nxt = [] for _, children in aff4.FACTORY.MultiListChildren(cur): nxt.extend(children) vfs.update(nxt) cur = nxt return vfs
20bf77875d099106e5190d02c0c62d38eb1a6590
3,648,416
def delete_product(productId): """Deletes product""" response = product2.delete_product(productId) return response
394848c8b9c8803140744b8a1a1eb6995cd04bf7
3,648,417
def compute_face_normals(points, trilist): """ Compute per-face normals of the vertices given a list of faces. Parameters ---------- points : (N, 3) float32/float64 ndarray The list of points to compute normals for. trilist : (M, 3) int16/int32/int64 ndarray The list of faces (triangle list). Returns ------- face_normal : (M, 3) float32/float64 ndarray The normal per face. :return: """ pt = points[trilist] a, b, c = pt[:, 0], pt[:, 1], pt[:, 2] norm = np.cross(b - a, c - a) return _normalize(norm)
4bbe9f7311f6125fd73b028c984e09ee4f124791
3,648,418
def get_deletion_confirmation(poll): """Get the confirmation keyboard for poll deletion.""" delete_payload = f"{CallbackType.delete.value}:{poll.id}:0" delete_all_payload = f"{CallbackType.delete_poll_with_messages.value}:{poll.id}:0" locale = poll.user.locale buttons = [ [ InlineKeyboardButton( i18n.t("keyboard.permanently_delete", locale=locale), callback_data=delete_payload, ) ], [ InlineKeyboardButton( i18n.t("keyboard.permanently_delete_with_messages", locale=locale), callback_data=delete_all_payload, ) ], [get_back_to_management_button(poll)], ] return InlineKeyboardMarkup(buttons)
6d741aa13d3d5234c53115b8b74c353fdce9e87e
3,648,419
def ngram_tokenizer(lines, ngram_len=DEFAULT_NGRAM_LEN, template=False): """ Return an iterable of ngram Tokens of ngram length `ngram_len` computed from the `lines` iterable of UNICODE strings. Treat the `lines` strings as templated if `template` is True. """ if not lines: return ngrams = unigram_tokenizer(lines, template) ngrams = tokens_ngram_processor(ngrams, ngram_len) ngrams = ngram_to_token(ngrams) return ngrams
fb7f079ddee8bac10b2ae9efd306a482042b8a0f
3,648,420
def list_datasets(service, project_id): """Lists BigQuery datasets. Args: service: BigQuery service object that is authenticated. Example: service = build('bigquery','v2', http=http) project_id: string, Name of Google project Returns: List containing dataset names """ datasets = service.datasets() response = datasets.list(projectId=project_id).execute() dataset_list = [] for field in response['datasets']: dataset_list.append(field['datasetReference']['datasetId']) return dataset_list
2712e6a99427ce3b141e7948bba36e8e724f82bc
3,648,421
def tors(universe, seg, i): """Calculation of nucleic backbone dihedral angles. The dihedral angles are alpha, beta, gamma, delta, epsilon, zeta, chi. The dihedral is computed based on position of atoms for resid `i`. Parameters ---------- universe : Universe :class:`~MDAnalysis.core.universe.Universe` containing the trajectory seg : str segment id for base i : int resid of the first base Returns ------- [alpha, beta, gamma, delta, epsilon, zeta, chi] : list of floats torsion angles in degrees Notes ----- If failure occurs be sure to check the segment identification. .. versionadded:: 0.7.6 """ a = universe.select_atoms(" atom {0!s} {1!s} O3\' ".format(seg, i - 1), " atom {0!s} {1!s} P ".format(seg, i), " atom {0!s} {1!s} O5\' ".format(seg, i), " atom {0!s} {1!s} C5\' ".format(seg, i)) b = universe.select_atoms(" atom {0!s} {1!s} P ".format(seg, i), " atom {0!s} {1!s} O5\' ".format(seg, i), " atom {0!s} {1!s} C5\' ".format(seg, i), " atom {0!s} {1!s} C4\' ".format(seg, i)) g = universe.select_atoms(" atom {0!s} {1!s} O5\' ".format(seg, i), " atom {0!s} {1!s} C5\' ".format(seg, i), " atom {0!s} {1!s} C4\' ".format(seg, i), " atom {0!s} {1!s} C3\' ".format(seg, i)) d = universe.select_atoms(" atom {0!s} {1!s} C5\' ".format(seg, i), " atom {0!s} {1!s} C4\' ".format(seg, i), " atom {0!s} {1!s} C3\' ".format(seg, i), " atom {0!s} {1!s} O3\' ".format(seg, i)) e = universe.select_atoms(" atom {0!s} {1!s} C4\' ".format(seg, i), " atom {0!s} {1!s} C3\' ".format(seg, i), " atom {0!s} {1!s} O3\' ".format(seg, i), " atom {0!s} {1!s} P ".format(seg, i + 1)) z = universe.select_atoms(" atom {0!s} {1!s} C3\' ".format(seg, i), " atom {0!s} {1!s} O3\' ".format(seg, i), " atom {0!s} {1!s} P ".format(seg, i + 1), " atom {0!s} {1!s} O5\' ".format(seg, i + 1)) c = universe.select_atoms(" atom {0!s} {1!s} O4\' ".format(seg, i), " atom {0!s} {1!s} C1\' ".format(seg, i), " atom {0!s} {1!s} N9 ".format(seg, i), " atom {0!s} {1!s} C4 ".format(seg, i)) if len(c) < 4: c = universe.select_atoms(" atom {0!s} {1!s} O4\' ".format(seg, i), " atom {0!s} {1!s} C1\' ".format(seg, i), " atom {0!s} {1!s} N1 ".format(seg, i), " atom {0!s} {1!s} C2 ".format(seg, i)) alpha = a.dihedral.value() % 360 beta = b.dihedral.value() % 360 gamma = g.dihedral.value() % 360 delta = d.dihedral.value() % 360 epsilon = e.dihedral.value() % 360 zeta = z.dihedral.value() % 360 chi = c.dihedral.value() % 360 return [alpha, beta, gamma, delta, epsilon, zeta, chi]
1efcac83c7ec6689e33830daf011bead5199e5dd
3,648,422
def get_metric(metric,midi_notes,Fe,nfft,nz=1e4,eps=10,**kwargs): """ returns the optimal transport loss matrix from a list of midi notes (interger indexes) """ nbnotes=len(midi_notes) res=np.zeros((nfft/2,nbnotes)) f=np.fft.fftfreq(nfft,1.0/Fe)[:nfft/2] f_note=[2.0**((n-60)*1./12)*440 for n in midi_notes] for i in range(nbnotes): m=np.zeros((nfft/2,)) if metric=='square': m=(f_note[i]-f)**2 elif metric=='psquare': if midi_notes[i]==0: m[:]=nz else: nmax=int(f.max()/f_note[i]) m[:]=np.inf for j in range(1,nmax+1): m=np.minimum(m,(j*f_note[i]-f)**2+j*eps) res[:,i]=m return res,f
f21717f239431fac2e37e6b59abfdcb6b964aa0c
3,648,423
def octave(track, note, dur): """Generate the couple of blanche""" track.append(Message('note_on', note=note, velocity=100, time=0)) track.append(Message('note_on', note=note + 12, velocity=100, time=0)) track.append(Message('note_off', note=note, velocity=64, time=dur)) track.append(Message('note_off', note=note + 12, velocity=64, time=0)) return track
c94391677849b1aef58df1a08ade0bae3fe691f5
3,648,424
def solveTrajectoryPickle(dir_path, file_name, only_plot=False, solver='original', **kwargs): """ Rerun the trajectory solver on the given trajectory pickle file. """ # Load the pickles trajectory traj_p = loadPickle(dir_path, file_name) # Run the PyLIG trajectory solver if solver == 'original': # Given the max time offset from the pickle file and input, use the larger one of the two max_toffset = traj_p.max_toffset if "max_toffset" in kwargs: if (kwargs["max_toffset"] is not None) and (traj_p.max_toffset is not None): max_toffset = max(traj_p.max_toffset, kwargs["max_toffset"]) # Remove the max time offset from the list of keyword arguments kwargs.pop("max_toffset", None) # Preserve the trajectory ID if hasattr(traj_p, "traj_id"): traj_id = traj_p.traj_id else: traj_id = None # Reinitialize the trajectory solver meastype = 2 traj = Trajectory(traj_p.jdt_ref, output_dir=dir_path, max_toffset=max_toffset, \ meastype=meastype, traj_id=traj_id, **kwargs) # Fill the observations for obs in traj_p.observations: traj.infillWithObs(obs, meastype=meastype) elif solver == 'gural': # Init the Gural solver traj = GuralTrajectory(len(traj_p.observations), traj_p.jdt_ref, velmodel=3, \ max_toffset=traj_p.max_toffset, meastype=2, output_dir=dir_path, verbose=True) # Fill the observations for obs in traj_p.observations: traj.infillTrajectory(obs.azim_data, obs.elev_data, obs.time_data, obs.lat, obs.lon, obs.ele) else: print('Unrecognized solver:', solver) if only_plot: # Set saving results traj_p.save_results = True # Override plotting options with given options traj_p.plot_all_spatial_residuals = kwargs["plot_all_spatial_residuals"] traj_p.plot_file_type = kwargs["plot_file_type"] # Show the plots traj_p.savePlots(dir_path, traj_p.file_name, show_plots=kwargs["show_plots"]) # Recompute the trajectory else: # Run the trajectory solver traj = traj.run() return traj
a5b5dca042906e86eb153c8889466bff983af243
3,648,425
def load_data(path): """ 读取.mat的原始eeg数据 :param path: :return: """ data=scio.loadmat(path) labels = data['categoryLabels'].transpose(1, 0) X = data['X_3D'].transpose(2, 1, 0) return X,labels
69d540529b93705b3fb3a34a607da469825185f5
3,648,426
def distance_along_glacier(nx, map_dx): """Calculates the distance along the glacier in km. Parameters ---------- nx : int number of grid points map_dx : int grid point spacing Returns ------- ndarray distance along the glacier in km. """ return np.linspace(0, nx, nx) * map_dx * 1e-3
58acc7f48b0f901b1c3e800ea6e98046805f855a
3,648,427
def make_postdict_to_fetch_token(token_endpoint: str, grant_type: str, code: str, client_id: str, client_secret: str, redirect_uri: str) -> dict: """POST dictionary is the API of the requests library""" return {'url': token_endpoint, 'data': { 'grant_type': grant_type, 'code': code, 'client_id': client_id, 'client_secret': client_secret, 'redirect_uri': redirect_uri, }, 'headers': { 'Content-Type': 'application/x-www-form-urlencoded', }}
f366fc140c70d094ff99b28a369ac96b4c2a8b49
3,648,428
def _haxe_std_lib(ctx): """ _haxe_std_lib implementation. Args: ctx: Bazel context. """ toolchain = ctx.toolchains["@rules_haxe//:toolchain_type"] build_source_file = ctx.actions.declare_file("StdBuild.hx") toolchain.create_std_build( ctx, ctx.attr.target, build_source_file, ) hxml = create_hxml_map(ctx, toolchain, for_std_build = True) hxml["classpaths"].append(build_source_file.dirname) hxml["args"].append("--dce no") # Handle the case where we're building in an external directory. if hxml["external_dir"] != "": ext_idx = build_source_file.path.find("external/") hxml["external_dir"] = build_source_file.path[ext_idx:-11] build_file = ctx.actions.declare_file("{}-std-build.hxml".format(ctx.attr.name)) create_build_hxml(ctx, toolchain, hxml, build_file, suffix = "-intermediate") intermediate = ctx.actions.declare_directory(hxml["output_dir"]) # Do the compilation. runfiles = [build_source_file] + find_direct_sources(ctx) + find_direct_resources(ctx) toolchain.compile( ctx, hxml = build_file, runfiles = runfiles, out = intermediate, ) # Post process the output file. output = ctx.actions.declare_file(hxml["output_dir"].replace("-intermediate", "")) output_file = ctx.actions.declare_file("{}/{}".format(ctx.attr.name, hxml["output_file"])) if "output_file" in hxml else None if hxml["target"] == "java": toolchain.create_final_jar( ctx, find_direct_sources(ctx), intermediate, output, hxml["output_file"], False, output_file = output_file, ) else: inputs = [intermediate] hxcpp_include_dir = None if hxml["target"] == "cpp": hxcpp_include_dir = ctx.actions.declare_directory("hxcpp_includes") toolchain.copy_cpp_includes(ctx, hxcpp_include_dir) inputs.append(hxcpp_include_dir) cmd = "mkdir -p {} && cp -r {}/* {}".format(output.path, intermediate.path, output.path) if hxcpp_include_dir != None: cmd += " && cp -r {}/* {}/{}/include".format(hxcpp_include_dir.path, output.path, hxml["name"]) ctx.actions.run_shell( outputs = [output, output_file], inputs = inputs, command = cmd, use_default_shell_env = True, ) return calc_provider_response(ctx, toolchain, hxml, output, output_file = output_file, library_name = "StdBuild")
7a29757f7fa9fdcd1942b73221633a0eb7afc2f8
3,648,429
import os def detect_vswhere_path(): """ Attempt to detect the location of vswhere, which is used to query the installed visual studio tools (version 2017+) :return: The validated path to vswhere """ # Find VS Where path_program_files_x86 = os.environ['ProgramFiles(x86)'] if not path_program_files_x86 or not os.path.isdir(path_program_files_x86): raise Errors.WafError("Unable to determine folder 'Program Files (x86)'") path_visual_studio_installer = os.path.normpath(os.path.join(path_program_files_x86, 'Microsoft Visual Studio\\Installer\\')) if not os.path.isdir(path_visual_studio_installer): raise Errors.WafError("Unable to locate Visual Studio Installer.") path_vswhere = os.path.normpath(os.path.join(path_visual_studio_installer, 'vswhere.exe')) if not os.path.isfile(path_vswhere): raise Errors.WafError("Unable to locate 'vswhere' in path '{}'.".format(path_visual_studio_installer)) return path_vswhere
f533e38bb1bddfaeaaf764cfb986aeb70b9e568d
3,648,430
def spread_match_network(expr_df_in, node_names_in): """ Matches S (spreadsheet of gene expressions) and N (network) The function returns expr_df_out which is formed by reshuffling columns of expr_df_in. Also, node_names_out is formed by reshuffling node_names_in. The intersection of node_names_out and column names of expr_df_out are placed at the beginning of both lists. Input: expr_df_in: A pandas dataframe corresponding to gene expression node_names_in: Name of the nodes in the network Output: expr_df_out: Reorganized dataframe of gene expressions nodes_names_out: Reordered node names nodes_genes_intersect: Sorted list of shared genes """ node_names_in_set = set(node_names_in) gene_names_in_set = set(expr_df_in.columns.values) nodes_genes_intersect = sorted(list(gene_names_in_set & node_names_in_set)) nodes_minus_genes = sorted(list(node_names_in_set - gene_names_in_set)) genes_minus_nodes = sorted(list(gene_names_in_set - node_names_in_set)) genes_names_out = nodes_genes_intersect + genes_minus_nodes nodes_names_out = nodes_genes_intersect + nodes_minus_genes expr_df_out = expr_df_in[genes_names_out] return(expr_df_out, nodes_names_out, nodes_genes_intersect)
c0b78263a341d3b7682922eb9a948c21ab2e7e45
3,648,431
import io def top_level(url, data): """Read top level names from compressed file.""" sb = io.BytesIO(data) txt = None with Archive(url, sb) as archive: file = None for name in archive.names: if name.lower().endswith('top_level.txt'): file = name break if file: txt = archive.read(file).decode('utf-8') sb.close() return [name.replace('/', '.') for name in txt.splitlines()] if txt else []
0fe92b1d038248f5f759d19b1e27ad013b3592c2
3,648,432
import requests def get_timeseries_data(request): """ AJAX Controller for getting time series data. """ return_obj = {} # -------------------- # # VERIFIES REQUEST # # -------------------- # if not (request.is_ajax() and request.method == "POST"): return_obj["error"] = "Unable to establish a secure connection." return JsonResponse(return_obj) # -------------------------- # # GETS DATA FROM REQUEST # # -------------------------- # layer_code = request.POST.get("layer_code") site_code = request.POST.get("site_code") variable_code = request.POST.get("var_code") site_name = request.POST.get("site_name") variable_name = request.POST.get("var_name") # ------------------------- # # GETS TIME SERIES DATA # # ------------------------- # network_id = layer_code.split(":")[0].split("-")[1] database_id = ":".join(layer_code.split(":")[1:]) request_url = f"{hydroserver_url}/wof/{network_id}/{database_id}/values/" params = { "site_code": site_code, "variable_code": variable_code } response = requests.get(request_url, params=params) waterml = etree.fromstring(response.content) no_data_value = waterml.find("{http://www.cuahsi.org/waterML/1.1/}timeSeries").find("{http://www.cuahsi.org/waterML/1.1/}variable").find("{http://www.cuahsi.org/waterML/1.1/}noDataValue").text try: unit_name = waterml.find("{http://www.cuahsi.org/waterML/1.1/}timeSeries").find("{http://www.cuahsi.org/waterML/1.1/}variable").find("{http://www.cuahsi.org/waterML/1.1/}unit").find("{http://www.cuahsi.org/waterML/1.1/}unitAbbreviation").text except: unit_name = None timeseries_data = [[ x.get('dateTime'), x.text if x.text != no_data_value else None ] for x in waterml.find("{http://www.cuahsi.org/waterML/1.1/}timeSeries").find("{http://www.cuahsi.org/waterML/1.1/}values").iter("{http://www.cuahsi.org/waterML/1.1/}value")] # -------------------------- # # RETURNS DATA TO CLIENT # # -------------------------- # return_obj["timeseries_data"] = timeseries_data return_obj["no_data_value"] = no_data_value return_obj["site_name"] = site_name return_obj["variable_name"] = variable_name return_obj["unit_name"] = unit_name return_obj["variable_code"] = variable_code return_obj["site_code"] = site_code return_obj["layer_code"] = layer_code return JsonResponse(return_obj)
d8bb691f99d4a993d2b8e7c7e52f079566f45a63
3,648,433
from typing import List from typing import Tuple import bisect def line_col(lbreaks: List[int], pos: int) -> Tuple[int, int]: """ Returns the position within a text as (line, column)-tuple based on a list of all line breaks, including -1 and EOF. """ if not lbreaks and pos >= 0: return 0, pos if pos < 0 or pos > lbreaks[-1]: # one character behind EOF is still an allowed position! raise ValueError('Position %i outside text of length %s !' % (pos, lbreaks[-1])) line = bisect.bisect_left(lbreaks, pos) column = pos - lbreaks[line - 1] return line, column
6b99e3b19ed1a490e4a9cc284f99e875085f819a
3,648,434
from sys import stderr def add_scrollbars_with_tags(outer, InnerType, *inner_args, **inner_kw): """ Wrapper around `add_scrollbars`. Returns tuple of InnerType instance and scroll tag. Scroll tag should be added to all `inner` child widgets that affect scrolling. """ scrolltag = "tag_" + str(next(tags_count)) inner = add_scrollbars(outer, InnerType, *inner_args, **inner_kw) inner.bindtags((scrolltag, ) + inner.bindtags()) canvas = inner.master if OS == "Linux" : def _on_mousewheel(event): if event.num == 4: canvas.yview("scroll", -1, "units") elif event.num == 5: canvas.yview("scroll", 1, "units") inner.bind_class(scrolltag, "<ButtonPress-4>", _on_mousewheel, '+' ) inner.bind_class(scrolltag, "<ButtonPress-5>", _on_mousewheel, '+' ) elif OS == "Windows": def _on_mousewheel(event): canvas.yview("scroll", -event.delta // 120, "units") inner.bind_class(scrolltag, "<MouseWheel>", _on_mousewheel, '+' ) else: stderr.write("add_scrollbars_with_tags: OS %s not supported" % (OS)) return inner, scrolltag
64327326528e32cf1d40a8b3873be8ef034421aa
3,648,435
def sample_from_script(script_path, num_lines, chars_per_line): """Sample num_lines from a script. Parameters ---------- script_path : str Path to the script num_lines : int Number of lines to sample. chars_per_line : int Numer of consecutive characters considered a line. Returns ------- lines : List All the sampled lines. """ script = read_script(script_path) script = split_n_lines(script, num_chars=chars_per_line) # sample with replacement since some scripts are sparse. lines = np.random.choice(script, num_lines, replace=True) return lines
52e04582ec297ac512b2d2586524c7c4cb46b1d0
3,648,436
def is_valid_uuid(x): """Determine whether this is a valid hex-encoded uuid.""" if not x or len(x) != 36: return False return (parse_uuid(x) != None)
707618844ddb4375c855e12ca2f75966a91d7c5b
3,648,437
def wait_for_needle_list( loops: int, needle_list: list[tuple[str, tuple[int, int, int, int]]], sleep_range: tuple[int, int], ): """ Works like vision.wait_for_needle(), except multiple needles can be searched for simultaneously. Args: loops: The number of tries to look for each needle in needle_list. needle_list: A list of filepaths to the needles to look for. Each item in the list is a 2-tuple containing: - The filepath to the needle. - The region in which to search for that needle. sleep_range: A 2-tuple containing the minimum and maximum number of miliseconds to wait after each loop. Returns: If a needle in needle_list is found, returns a 2-tuple containing the ltwh dimensions of the needle and the index of the needle in needle_list (This is so the function knows which needle was found). Returns false if no needles in needle_list could be found. """ for _ in range(1, loops): for item in needle_list: needle, region = item needle_found = Vision( region=region, needle=needle, loop_num=1 ).wait_for_needle(get_tuple=True) if needle_found is True: return needle_found, needle_list.index(needle) misc.sleep_rand(sleep_range[0], sleep_range[1]) return False
4f09801f54d2f29aea18eb868c7ef44ab0532627
3,648,438
import random def get_word(): """Returns random word.""" words = ['Charlie', 'Woodstock', 'Snoopy', 'Lucy', 'Linus', 'Schroeder', 'Patty', 'Sally', 'Marcie'] return random.choice(words).upper()
c4437edc3a1e91cd90c342eda40cfd779364d9c1
3,648,439
def is_admin(user): """Check if the user is administrator""" admin_user = current_app.config['ADMIN_USER'] if user.email == admin_user or user.email.replace('@cern.ch', '') == admin_user: current_app.logger.debug('User {user} is admin'.format(user=user.email)) return True return False
a4a6f796f6b8a18076f8ceda9f7ac30d809973ce
3,648,440
from datetime import datetime def parsed_json_to_dict(parsed): """ Convert parsed dict into dict with python built-in type param: parsed parsed dict by json decoder """ new_bangumi = {} new_bangumi['name'] = parsed['name'] new_bangumi['start_date'] = datetime.strptime( parsed['start_date'], '%Y-%m-%d').date() if 'translation_team' in parsed: new_bangumi['translation_team'] = parsed['translation_team'] else: new_bangumi['translation_team'] = [] if 'total_ep' in parsed: new_bangumi['total_ep'] = int(parsed['total_ep']) else: new_bangumi['total_ep'] = 99 if 'dled_ep' in parsed: new_bangumi['dled_ep'] = int(parsed['dled_ep']) else: new_bangumi['dled_ep'] = 0 if 'keyword' in parsed: new_bangumi['keyword'] = parsed['keyword'] else: new_bangumi['keyword'] = new_bangumi['name'] new_bangumi['folder'] = parsed['folder'] if 'folder' in parsed and parsed[ 'folder'] is not '' else new_bangumi['name'] new_bangumi['offset'] = int(parsed['offset']) if 'offset' in parsed else 0 return new_bangumi
e3bb8306e19a16c9e82d5f6e96c9b4a3707c0446
3,648,441
import pickle import osmnx # noqa def download_osmnx_graph(): # pragma: no cover """Load a simple street map from Open Street Map. Generated from: .. code:: python >>> import osmnx as ox # doctest:+SKIP >>> address = 'Holzgerlingen DE' # doctest:+SKIP >>> graph = ox.graph_from_address(address, dist=500, network_type='drive') # doctest:+SKIP >>> pickle.dump(graph, open('osmnx_graph.p', 'wb')) # doctest:+SKIP Returns ------- networkx.classes.multidigraph.MultiDiGraph An osmnx graph of the streets of Holzgerlingen, Germany. Examples -------- >>> from pyvista import examples >>> graph = examples.download_osmnx_graph() # doctest:+SKIP See :ref:`open_street_map_example` for a full example using this dataset. """ try: except ImportError: raise ImportError('Install `osmnx` to use this example') filename, _ = _download_file('osmnx_graph.p') return pickle.load(open(filename, 'rb'))
51aa0fec3bdbe5197edb3fb3dd0f405be6f0f7df
3,648,442
import pandas def plot_shift_type_by_frequency(tidy_schedule: pandas.DataFrame) -> tuple: """ Plots a bar graph of shift type frequencies. :param tidy_schedule: A pandas data frame containing a schedule, as loaded by load_tidy_schedule(). :type tidy_schedule: pandas.DataFrame :return: A tuple with a figure and an axis containing a matplotlib bar graph. :rtype: tuple """ return_data = ( tidy_schedule .groupby('shift_type') .agg({'shift_type': 'count'}) .query('shift_type > 0') .rename_axis(None) .sort_values(by='shift_type', ascending=False) ) dates = medinetparsepy.get_min_max_dates.get_min_max_dates(tidy_schedule) fig, ax = matplotlib.pyplot.subplots() ax.bar(return_data.index, return_data['shift_type']) ax.set_xlabel('Shift Type') ax.set_ylabel('Frequency') ax.set_title(f'Shift Type by Frequency\nBetween {dates[0]} and {dates[1]}') return (fig, ax)
81fb649cd8439932bbbbf27d9690c5ab9f96e410
3,648,443
def load_image(path, size=None): """ Load the image from the given file-path and resize it to the given size if not None. Eg: size = (width, height) """ img = Image.open(path) if (size != None) and (size != ''): img = img.resize(size=size, resample=Image.LANCZOS) img = np.array(img) # Scale image-pixels so they fall between 0.0 and 1.0 # img = img / 255.0 # Convert 2-dim gray-scale array to 3-dim RGB array. if (len(img.shape) == 2): img = np.repeat(img[:, :, np.newaxis], 3, axis=2) return np.array(img)
e770ea3447ce8a7d236c4712859707b8e3cd8248
3,648,444
import secrets import ipaddress def call_wifi(label): """Wifi connect function Parameters ---------- label : str Output label Returns ------- None """ try: # Setup wifi and connection print(wifi.radio.connect(secrets['ssid'], secrets['password'])) print('ip', wifi.radio.ipv4_address) show_text("ip: {}".format(wifi.radio.ipv4_address), label) ipv4 = ipaddress.ip_address('8.8.8.8') ping_result = wifi.radio.ping(ipv4) print('ping', ping_result) show_text("ping: {}".format(ping_result), label) except: return False
a1514ff756b5217b8f79b4f9af882a234b1ad17d
3,648,445
def load_normalized_face_landmarks(): """ Loads the locations of each of the 68 landmarks :return: """ normalized_face_landmarks = np.float32([ (0.0792396913815, 0.339223741112), (0.0829219487236, 0.456955367943), (0.0967927109165, 0.575648016728), (0.122141515615, 0.691921601066), (0.168687863544, 0.800341263616), (0.239789390707, 0.895732504778), (0.325662452515, 0.977068762493), (0.422318282013, 1.04329000149), (0.531777802068, 1.06080371126), (0.641296298053, 1.03981924107), (0.738105872266, 0.972268833998), (0.824444363295, 0.889624082279), (0.894792677532, 0.792494155836), (0.939395486253, 0.681546643421), (0.96111933829, 0.562238253072), (0.970579841181, 0.441758925744), (0.971193274221, 0.322118743967), (0.163846223133, 0.249151738053), (0.21780354657, 0.204255863861), (0.291299351124, 0.192367318323), (0.367460241458, 0.203582210627), (0.4392945113, 0.233135599851), (0.586445962425, 0.228141644834), (0.660152671635, 0.195923841854), (0.737466449096, 0.182360984545), (0.813236546239, 0.192828009114), (0.8707571886, 0.235293377042), (0.51534533827, 0.31863546193), (0.516221448289, 0.396200446263), (0.517118861835, 0.473797687758), (0.51816430343, 0.553157797772), (0.433701156035, 0.604054457668), (0.475501237769, 0.62076344024), (0.520712933176, 0.634268222208), (0.565874114041, 0.618796581487), (0.607054002672, 0.60157671656), (0.252418718401, 0.331052263829), (0.298663015648, 0.302646354002), (0.355749724218, 0.303020650651), (0.403718978315, 0.33867711083), (0.352507175597, 0.349987615384), (0.296791759886, 0.350478978225), (0.631326076346, 0.334136672344), (0.679073381078, 0.29645404267), (0.73597236153, 0.294721285802), (0.782865376271, 0.321305281656), (0.740312274764, 0.341849376713), (0.68499850091, 0.343734332172), (0.353167761422, 0.746189164237), (0.414587777921, 0.719053835073), (0.477677654595, 0.706835892494), (0.522732900812, 0.717092275768), (0.569832064287, 0.705414478982), (0.635195811927, 0.71565572516), (0.69951672331, 0.739419187253), (0.639447159575, 0.805236879972), (0.576410514055, 0.835436670169), (0.525398405766, 0.841706377792), (0.47641545769, 0.837505914975), (0.41379548902, 0.810045601727), (0.380084785646, 0.749979603086), (0.477955996282, 0.74513234612), (0.523389793327, 0.748924302636), (0.571057789237, 0.74332894691), (0.672409137852, 0.744177032192), (0.572539621444, 0.776609286626), (0.5240106503, 0.783370783245), (0.477561227414, 0.778476346951)]) return normalized_face_landmarks
2dbd191371345c4382efa3573b54e281607da37c
3,648,446
import os def vacuum_vessel(shot): """ Get the coordinates of the Tore Supra / WEST vacuum vessel R_wall, Z_wall = vacuum_vessel(shot) Arguments: - shot: Tore Supra or WEST shot number Returns: - R_wall: radius of the vacuum chamber walls [m] - Z_wall: height of the vacuum chamber walls [m] TODO: once WEST will have started, get the final vacuum vessel coordinates """ if (shot <= 0) or (not isinstance(shot, int)): raise ValueError('Shot number should be a positive integer') elif shot < 50000: # Tore Supra vacuum chamber profile wall = pw.tsmat(shot, 'APOLO;+Parametres;Paroi') R_wall = wall[:,0] Z_wall = wall[:,1] else: # WEST vacuum chamber profile # get the absolute path of the filename, in order to work even if launched from other dir filename = os.path.dirname(__file__) + os.sep + 'WEST_vacuum_vessel.txt' R_wall, Z_wall = np.loadtxt(filename, skiprows=1, unpack=True) return R_wall, Z_wall
da22ed6ae6d61238f3bda0e1e2f4fd7ede7f7f68
3,648,447
from datetime import datetime from shutil import copyfile def backup_file(file): """Create timestamp'd backup of a file Args: file (str): filepath Returns: backupfile(str) """ current_time = datetime.now() time_stamp = current_time.strftime("%b-%d-%y-%H.%M.%S") backupfile = file +'.bkp_'+ time_stamp copyfile(file, backupfile) return(backupfile)
1c1b33028aab01b4e41ed3ef944202ecc53415df
3,648,448
def svn_client_mergeinfo_log_eligible(*args): """ svn_client_mergeinfo_log_eligible(char path_or_url, svn_opt_revision_t peg_revision, char merge_source_path_or_url, svn_opt_revision_t src_peg_revision, svn_log_entry_receiver_t receiver, svn_boolean_t discover_changed_paths, apr_array_header_t revprops, svn_client_ctx_t ctx, apr_pool_t pool) -> svn_error_t """ return _client.svn_client_mergeinfo_log_eligible(*args)
9f372556d56e0fdc88afc5b3fd35218fb46f3768
3,648,449
def share_nodes_sockets(): """ Create a shared node layout where the simulation and analysis ranks share compute nodes. Furthermore, they share sockets of the node. """ shared_sockets = SummitNode() for i in range(10): shared_sockets.cpu[i] = "simulation:{}".format(i) shared_sockets.cpu[21+i] = "simulation:{}".format(10+i) for i in range(10): shared_sockets.cpu[10+i] = "pdf_calc:{}".format(i) shared_sockets.cpu[21+10+i] = "pdf_calc:{}".format(10+i) return [shared_sockets]
d34bfb1b97e4e3b06dee54a89c084dd404c3c6ca
3,648,450
from glob import glob import os def imlist(img_dir, valid_exts=None, if_recursive=False): """ List images under directory :param img_dir: :param valid_exts: :param if_recursive: :return: """ if is_str(valid_exts): valid_exts = [valid_exts.strip(".")] valid_exts = list(valid_exts) if is_seq(valid_exts) else ["jpg", "jpeg", "bmp", "tif", "gif", "png"] images = [] for ext in valid_exts: images.extend(glob(os.path.join(img_dir, "**", "*.%s" % ext), recursive=if_recursive)) return images
fe13d2fe91a90c50a767d8ad3013f50ae0559d9c
3,648,451
def _rle_decode(data): """ Decodes run-length-encoded `data`. """ if not data: return data new = b'' last = b'' for cur in data: if last == b'\0': new += last * cur last = b'' else: new += last last = bytes([cur]) return new + last
8463ff6a20b3a39df7b67013d47fe81ed6d53477
3,648,452
def find_shift_between_two_models(model_1,model_2,shift_range=5,number_of_evaluations=10,rotation_angles=[0.,0.,0.], cropping_model=0,initial_guess=[0.,0.,0.], method='brute_force',full_output=False): """ Find the correct shift alignment in 3D by using a different optimization algorithms to minimise the distance between the two models. Args: :model_1(float ndarray): 3d ndarray of the fixed object :model_2(float ndarray): 3d ndarray ot the rotatable model Kwargs: :shift_range(float): absolute value of the range in which the brute should be applied :number_of_evaluations(int): number of grid points on which the brute force optimises :rotation_angles(list): set of euler angles for rotating model_2 before applying the shift :method(str): is the optimisation method which is use to minimise the difference, default = brute_force, other option fmin_l_bfgs_b :full_output(bool): returns full output as a dictionary, default = False """ def shifting(x,model_1,model_2): x0, x1, x2 = x #model_2 = nutcracker.utils.rotate.rotation_based_on_euler_angles(model_2, rotation_angles) model_2 = ndimage.interpolation.shift(model_2, shift=(x0, x1, x2), order=0, mode='wrap') #model_2 = np.roll(np.roll(np.roll(model_2,int(x0),axis=0), int(x1), axis=1), int(x2), axis=2) return np.sum(np.abs(model_1 - model_2) ** 2) model_2 = nutcracker.utils.rotate.rotation_based_on_euler_angles(model_2, rotation_angles) # cropping the model if cropping_model: model_1 = model_1[cropping_model/2:-cropping_model/2,cropping_model/2:-cropping_model/2,cropping_model/2:-cropping_model/2] model_2 = model_2[cropping_model/2:-cropping_model/2,cropping_model/2:-cropping_model/2,cropping_model/2:-cropping_model/2] args = (model_1, model_2) if method == 'brute_force': # set parameters r = slice(-float(shift_range),float(shift_range),2.*shift_range/number_of_evaluations) ranges = [r,r,r] # shift retrieval brute force shift = optimize.brute(shifting, ranges=ranges, args=args, full_output=True, finish=optimize.fmin_bfgs) shift = np.array(shift) elif method == 'fmin_l_bfgs_b': #parameter for fmin_l_bfgs_b x0 = np.array(initial_guess) # fmin_l_bfgs_b optimisation shift = optimize.fmin_l_bfgs_b(shifting, x0, args=args, approx_grad=True) shift = np.array(shift) shift_values = shift[0] if full_output: if method == 'brute_force': out = {'shift_values':shift[0], 'shift_fvalues':shift[1], 'shift_grid':shift[2], 'shift_jout':shift[3]} elif method == 'fmin_l_bfgs_b': out = {'shift_values':shift[0], 'shift_fvalues':shift[1]} return out else: return shift_values
39dea881a5a00174b178d22910b5cee6d7ce48cd
3,648,453
from typing import Optional import requests def get_url( url: str, stream: bool = False, session: Optional[requests.Session] = None ) -> requests.Response: """Call requests.get() on a url and return the requests.Response.""" if not session: session = retry_session() resp = session.get(url, stream=stream) resp.raise_for_status() return resp
c056446cbb1966f79b472de2f140b9962246fd75
3,648,454
from typing import Optional def uploadFromPath(localFilePath: str, resource, bucketName: str, fileID: str, headerArgs: Optional[dict] = None, partSize: int = 50 << 20): """ Uploads a file to s3, using multipart uploading if applicable :param str localFilePath: Path of the file to upload to s3 :param S3.Resource resource: boto3 resource :param str bucketName: name of the bucket to upload to :param str fileID: the name of the file to upload to :param dict headerArgs: http headers to use when uploading - generally used for encryption purposes :param int partSize: max size of each part in the multipart upload, in bytes :return: version of the newly uploaded file """ if headerArgs is None: headerArgs = {} client = resource.meta.client file_size, file_time = fileSizeAndTime(localFilePath) version = uploadFile(localFilePath, resource, bucketName, fileID, headerArgs, partSize) info = client.head_object(Bucket=bucketName, Key=compat_bytes(fileID), VersionId=version, **headerArgs) size = info.get('ContentLength') assert size == file_size # Make reasonably sure that the file wasn't touched during the upload assert fileSizeAndTime(localFilePath) == (file_size, file_time) return version
ee8ca7e177ab8538fd668a42111f86503b57edc1
3,648,455
def scale_log2lin(value): """ Scale value from log10 to linear scale: 10**(value/10) Parameters ---------- value : float or array-like Value or array to be scaled Returns ------- float or array-like Scaled value """ return 10**(value/10)
04f15a8b5a86a6e94dd6a0f657d7311d38da5dc0
3,648,456
from typing import Union import torch from typing import Optional from typing import List def train( train_length:Union[int, TrainLength], model:nn.Module, dls:DataLoaders, loss_func:LossFunction, opt:torch.optim.Optimizer, sched=None, metric:Optional[Metric]=None, device=None, clip_grad:ClipGradOptions=None, callbacks:List[TrainingCallback]=None ) -> TrainingStats: """ Train `model` with the data given by `dls.train` to minimize `loss_func`. Args train_length: if it's an int, number of training epochs; if it's a TrainLength's subclass instance, training won't stop until `train_length.must_stop(...)`, which is called at the end of each epoch, returns `True`. model: module to train. dls: dataloaders that iterates over the training and validation data. If you don't want to evaluate `model` using a validation set, `dls.valid` can be `None`. train_dl: dataloader that iterates over the training data. valid_dl: dataloader that iterates over the validation data. loss_func: loss function to minimize. We assume that this loss function applies reduction over the batch, i.e., it only returns one value. opt: Pytorch optimizer sched: scheduler with a method `step` that will be executed once per step. metric: function that receives a model, a DataLoader `dl` and a `metric_fn` function, computes the metric `metric_fn` for every batch of `dl` and returns the average. device: device, in Pytorch format, where the model and data should be placed to train and calculate metrics. clip_grad: if not None, the gradients of `clip_grad` are clipped to be at most `clip_grad.max_norm` right before each optimizer step. callbacks: list of callbacks that must be called every time an event (end of step, end of epoch, ...) occurs. Returns: statistics of the training run, like a history of the losses/metrics by epoch """ if isinstance(train_length, int): train_length = TrainLengthNEpochs(train_length) assert dls.train is not None if device is None: device = get_best_available_device() if callbacks is None: callbacks = [] n_steps = 0 n_epochs_completed = 0 train_loss_history = [] train_metric_history = [] valid_metric_history = [] while (True): model.train() train_losses_epoch = None n_examples_epoch = 0 for x, y, *extra_xs in dls.train: x, y = x.to(device), y.to(device) opt.zero_grad() preds = model(x, *extra_xs) loss = loss_func(preds, y) loss.backward() if clip_grad is not None: torch.nn.utils.clip_grad_norm_(clip_grad.params, clip_grad.max_norm) opt.step() n_steps += 1 if sched is not None: sched.step() with torch.no_grad(): actual_bs = x.shape[0] n_examples_epoch += actual_bs detached_loss = loss.detach()[None] * actual_bs train_losses_epoch = ( detached_loss if train_losses_epoch is None else torch.cat((train_losses_epoch, detached_loss)) ) for cb in callbacks: cb.on_step_end(loss, model, opt) #losses.append(loss.detach().cpu().item()) #print('Train loss = ', loss.detach()) #print('Epoch completed') model.eval() train_metric, valid_metric, metric_name = None, None, '' if metric is not None: metric_name = metric.name train_metric = metric(model, dls.train, device=device) train_metric_history.append(train_metric) if dls.valid is not None: valid_metric = metric(model, dls.valid, device=device) valid_metric_history.append(valid_metric) avg_train_loss = ((train_losses_epoch.sum()) / n_examples_epoch).item() train_loss_history.append(avg_train_loss) n_epochs_completed += 1 epoch_stats = EpochTrainingStats(avg_train_loss, train_metric, valid_metric, n_epochs_completed, metric_name) for cb in callbacks: cb.on_epoch_end(epoch_stats, model, opt) if train_length.must_stop(epoch_stats): break #valid_metric_str = f'{valid_metric:.4f}' if dls.valid is not None else 'N/A' #last_iter_train_loss = loss.detach().item() #print(f'Avg train loss = {avg_train_loss:.4f}, Last iter train loss = {last_iter_train_loss:.4f}') #print(f'Train metric (f1) = {train_metric}') #print(f'Valid metric (f1) = {valid_metric}') return TrainingStats( np.array(train_loss_history), np.array(train_metric_history), np.array(valid_metric_history), n_epochs_completed, n_steps, )
ad6e4796df66a38df2140060a2150f77b8d7c525
3,648,457
def _error_to_level(error): """Convert a boolean error field to 'Error' or 'Info' """ if error: return 'Error' else: return 'Info'
b43e029a4bb14b10de4056758acecebc85546a95
3,648,458
def add_review(status): """ Adds the flags on the tracker document. Input: tracker document. Output: sum of the switches. """ cluster = status['cluster_switch'] classify = status['classify_switch'] replace = status['replace_switch'] final = status['final_switch'] finished = status['finished_switch'] num = cluster + classify + replace + final + finished return num
8f2ba4cd8b6bd4e500e868f13733146579edd7ce
3,648,459
def n_floordiv(a, b): """safe floordiv""" return np.where(b != 0, o.floordiv(a, b), 1)
461752cfceaac911ef3be2335c2eb3893d512cc7
3,648,460
def load_encoder_inputs(encoder_np_vecs='train_body_vecs.npy'): """ Load variables & data that are inputs to encoder. Parameters ---------- encoder_np_vecs : str filename of serialized numpy.array of encoder input (issue title) Returns ------- encoder_input_data : numpy.array The issue body doc_length : int The standard document length of the input for the encoder after padding the shape of this array will be (num_examples, doc_length) """ vectorized_body = np.load(encoder_np_vecs) # Encoder input is simply the body of the issue text encoder_input_data = vectorized_body doc_length = encoder_input_data.shape[1] print('Shape of encoder input: {}'.format(encoder_input_data.shape)) return encoder_input_data, doc_length
571cf13f6ff23fea5bb111ed12ac8afc06cc5f8b
3,648,461
def parse_row(row): """Create an Event object from a data row Args: row: Tuple of input data. Returns: Event object. """ # Ignore either 1 or 2 columns that preceed year if len(row) > 6: row = row[2:] else: row = row[1:] # Remove occasional 'r' or 'x' character prefix from year, # I'm not sure what these specify. year = row[0] if not year[0].isdigit(): year = year[1:] return Event(year=int(year), latitude=float(row[1]), longitude=float(row[2]), depth=float(row[3]), magnitude=float(row[4]))
22923ee8f8e0b3b29eab3052df0e0b8b74613f66
3,648,462
import math import operator from typing import Counter def vertical_log_binning(p, data): """Create vertical log_binning. Used for peak sale.""" index, value = zip(*sorted(data.items(), key=operator.itemgetter(1))) bin_result = [] value = list(value) bin_edge = [min(value)] i = 1 while len(value) > 0: num_to_bin = int(math.ceil(p * len(value))) # print num_to_bin edge_value = value[num_to_bin - 1] bin_edge.append(edge_value) to_bin = list(filter(lambda x: x <= edge_value, value)) bin_result += [i] * len(to_bin) value = list(filter(lambda x: x > edge_value, value)) # print len(bin_result) + len(value) i += 1 # print '\n' bin_result_dict = dict(zip(index, bin_result)) bin_distri = Counter(bin_result_dict.values()) # print len(index), len(bin_result) return bin_result_dict, bin_edge, bin_distri
bf536250bc32a9bda54c8359589b10aa5936e902
3,648,463
def get_main_name(ext="", prefix=""): """Returns the base name of the main script. Can optionally add an extension or prefix.""" return prefix + op.splitext(op.basename(__main__.__file__))[0] + ext
03beb4da53436054bf61a4f68d8b0f3d51ac13be
3,648,464
def _grad_block_to_band(op, grad): """ Gradient associated to the ``block_to_band`` operator. """ grad_block = banded_ops.band_to_block( grad, op.get_attr("block_size"), symmetric=op.get_attr("symmetric"), gradient=True ) return grad_block
638c4047b224b80feb7c4f52151f96c4a62179b9
3,648,465
def LSTM(nO, nI): """Create an LSTM layer. Args: number out, number in""" weights = LSTM_weights(nO, nI) gates = LSTM_gates(weights.ops) return Recurrent(RNN_step(weights, gates))
296b1a7cb73a0e5dcb50e4aa29b33c944768c688
3,648,466
import requests def get_token(host, port, headers, auth_data): """Return token for a user. """ url = api_url(host, port, '/Users/AuthenticateByName') r = requests.post(url, headers=headers, data=auth_data) return r.json().get('AccessToken')
4d58d50c1421c17e89fa2d8d2205f0e066749e73
3,648,467
from datetime import datetime def generateDateTime(s): """生成时间""" dt = datetime.fromtimestamp(float(s)/1e3) time = dt.strftime("%H:%M:%S.%f") date = dt.strftime("%Y%m%d") return date, time
8d566412230b5bb779baa395670ba06457c2074f
3,648,468
def get_activation_function(): """ Returns tf.nn activation function """ return ACTIVATION_FUNCTION
9f55f5122f708120ce7a5181b7035681f37cc0c6
3,648,469
import requests import json def doi_and_title_from_citation(citation): """ Gets the DOI from a plaintext citation. Uses a search to CrossRef.org to retrive paper DOI. Parameters ---------- citation : str Full journal article citation. Example: Senís, Elena, et al. "CRISPR/Cas9‐mediated genome engineering: An adeno‐associated viral (AAV) vector toolbox. Biotechnology journal 9.11 (2014): 1402-1412. Returns ------- doi : str """ # Encode raw citation citation = urllib_quote(citation) # Search for citation on CrossRef.org to try to get a DOI link api_search_url = 'http://search.labs.crossref.org/dois?q=' + citation try: response = requests.get(api_search_url).json() except json.decoder.JSONDecodeError: return None resp = response[0] doi = resp.get('doi') title = resp.get('title') if doi is None: return None # If crossref returns a http://dx.doi.org/ link, retrieve the doi from it # and save the URL to pass to doi_to_info if 'http://dx.doi.org/' in doi: doi = doi.replace('http://dx.doi.org/', '') doi = doi.strip() return doi, title
bd51d91c414c97a9e061d889a27917c1b487edd1
3,648,470
def prep_ciphertext(ciphertext): """Remove whitespace.""" message = "".join(ciphertext.split()) print("\nciphertext = {}".format(ciphertext)) return message
a5cd130ed3296addf6a21460cc384d8a0582f862
3,648,471
import re import os def setup_sample_file(base_filename, args, num_threads=1): """ Return a sample data file, the ancestors file, a corresponding recombination rate (a single number or a RateMap), a prefix to use for files, and None """ gmap = args.genetic_map sd = tsinfer.load(base_filename + ".samples") anc = tsinfer.generate_ancestors( sd, num_threads=num_threads, path=base_filename + ".ancestors", ) logger.info("GA done") inference_pos = anc.sites_position[:] match = re.search(r'(chr\d+)', base_filename) if match or gmap is not None: if gmap is not None: try: rho=float(gmap) logger.info(f"Using rate {gmap} for the recombination rate") except ValueError: rho = intervals.read_hapmap(gmap) logger.info(f"Using file from {gmap} for the recombination map") else: chr = match.group(1) logger.info(f"Using {chr} from HapMapII_GRCh37 for the recombination map") gmap = stdpopsim.get_species("HomSap").get_genetic_map(id="HapMapII_GRCh37") if not gmap.is_cached(): gmap.download() filename = os.path.join(gmap.map_cache_dir, gmap.file_pattern.format(id=chr)) rho = intervals.read_hapmap(filename) else: rho = 1e-8 # shouldn't matter what this is - it it relative to mismatch #if np.any(d==0): # w = np.where(d==0) # raise ValueError("Zero recombination rates at", w, inference_pos[w]) return sd.path, anc.path, rho, "", None
a9f7229eeaac3830d3e6fdc92214d11e5f0e3cab
3,648,472
def main(): """Runs dir().""" call = PROCESS_POOL.submit(call_dir) while True: if call.done(): result = call.result().decode() print("Results: \n\n{}".format(result)) return result
6e02aab50023ed9b72c2f858122a2652a2f4607f
3,648,473
def bacthing_predict_SVGPVAE_rotated_mnist(test_data_batch, vae, svgp, qnet_mu, qnet_var, aux_data_train): """ Get predictions for test data. See chapter 3.3 in Casale's paper. This version supports batching in prediction pipeline (contrary to function predict_SVGPVAE_rotated_mnist) . :param test_data_batch: batch of test data :param vae: fitted (!) VAE object :param svgp: fitted (!) SVGP object :param qnet_mu: precomputed encodings (means) of train dataset (N_train, L) :param qnet_var: precomputed encodings (vars) of train dataset (N_train, L) :param aux_data_train: train aux data (N_train, 10) :return: """ images_test_batch, aux_data_test_batch = test_data_batch _, w, h, _ = images_test_batch.get_shape() # get latent samples for test data from GP posterior p_m, p_v = [], [] for l in range(qnet_mu.get_shape()[1]): # iterate over latent dimensions p_m_l, p_v_l, _, _ = svgp.approximate_posterior_params(index_points_test=aux_data_test_batch, index_points_train=aux_data_train, y=qnet_mu[:, l], noise=qnet_var[:, l]) p_m.append(p_m_l) p_v.append(p_v_l) p_m = tf.stack(p_m, axis=1) p_v = tf.stack(p_v, axis=1) epsilon = tf.random.normal(shape=tf.shape(p_m), dtype=tf.float64) latent_samples = p_m + epsilon * tf.sqrt(p_v) # predict (decode) latent images. # =============================================== # Since this is generation (testing pipeline), could add \sigma_y to images recon_images_test_logits = vae.decode(latent_samples) # Gaussian observational likelihood, no variance recon_images_test = recon_images_test_logits # Bernoulli observational likelihood # recon_images_test = tf.nn.sigmoid(recon_images_test_logits) # Gaussian observational likelihood, fixed variance \sigma_y # recon_images_test = recon_images_test_logits + tf.random.normal(shape=tf.shape(recon_images_test_logits), # mean=0.0, stddev=0.04, dtype=tf.float64) # MSE loss for CGEN (here we do not consider MSE loss, ince ) recon_loss = tf.reduce_sum((images_test_batch - recon_images_test_logits) ** 2) # report per pixel loss K = tf.cast(w, dtype=tf.float64) * tf.cast(h, dtype=tf.float64) recon_loss = recon_loss / K # =============================================== return recon_images_test, recon_loss
6603db14abbd7bbb2ba8965ee43d876d4a607b0a
3,648,474
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry): """ Set up Strava Home Assistant config entry initiated through the HASS-UI. """ hass.data.setdefault(DOMAIN, {}) # OAuth Stuff try: implementation = await config_entry_oauth2_flow.async_get_config_entry_implementation( hass=hass, config_entry=entry ) except ValueError: implementation = config_entry_oauth2_flow.LocalOAuth2Implementation( hass, DOMAIN, entry.data[CONF_CLIENT_ID], entry.data[CONF_CLIENT_SECRET], OAUTH2_AUTHORIZE, OAUTH2_TOKEN, ) OAuth2FlowHandler.async_register_implementation(hass, implementation) oauth_websession = config_entry_oauth2_flow.OAuth2Session( hass, entry, implementation ) await oauth_websession.async_ensure_token_valid() # webhook view to get notifications for strava activity updates def strava_update_event_factory(data, event_type=CONF_STRAVA_DATA_UPDATE_EVENT): hass.bus.fire(event_type, data) strava_webhook_view = StravaWebhookView( oauth_websession=oauth_websession, event_factory=strava_update_event_factory, host=get_url(hass, allow_internal=False, allow_ip=False), hass=hass, ) hass.http.register_view(strava_webhook_view) # event listeners async def strava_startup_functions(): await renew_webhook_subscription( hass=hass, entry=entry, webhook_view=strava_webhook_view ) await strava_webhook_view.fetch_strava_data() return True def ha_start_handler(event): """ called when HA rebooted i.e. after all webhook views have been registered and are available """ hass.async_create_task(strava_startup_functions()) def component_reload_handler(event): """called when the component reloads""" hass.async_create_task(strava_startup_functions()) async def async_strava_config_update_handler(): """called when user changes sensor configs""" await strava_webhook_view.fetch_strava_data() return def strava_config_update_handler(event): hass.async_create_task(async_strava_config_update_handler()) def core_config_update_handler(event): """ handles relevant changes to the HA core config. In particular, for URL and Unit System changes """ if "external_url" in event.data.keys(): hass.async_create_task( renew_webhook_subscription( hass=hass, entry=entry, webhook_view=strava_webhook_view ) ) if "unit_system" in event.data.keys(): hass.async_create_task(strava_webhook_view.fetch_strava_data()) # register event listeners hass.data[DOMAIN]["remove_update_listener"] = [] # if hass.bus.async_listeners().get(EVENT_HOMEASSISTANT_START, 0) < 1: hass.data[DOMAIN]["remove_update_listener"].append( hass.bus.async_listen(EVENT_HOMEASSISTANT_START, ha_start_handler) ) # if hass.bus.async_listeners().get(EVENT_CORE_CONFIG_UPDATE, 0) < 1: hass.data[DOMAIN]["remove_update_listener"].append( hass.bus.async_listen(EVENT_CORE_CONFIG_UPDATE, core_config_update_handler) ) if hass.bus.async_listeners().get(CONF_STRAVA_RELOAD_EVENT, 0) < 1: hass.data[DOMAIN]["remove_update_listener"].append( hass.bus.async_listen(CONF_STRAVA_RELOAD_EVENT, component_reload_handler) ) if hass.bus.async_listeners().get(CONF_STRAVA_CONFIG_UPDATE_EVENT, 0) < 1: hass.data[DOMAIN]["remove_update_listener"].append( hass.bus.async_listen( CONF_STRAVA_CONFIG_UPDATE_EVENT, strava_config_update_handler ) ) hass.data[DOMAIN]["remove_update_listener"] = [ entry.add_update_listener(strava_config_update_helper) ] for component in PLATFORMS: hass.async_create_task( hass.config_entries.async_forward_entry_setup(entry, component) ) return True
9ba10cf00f447d0e2038b8a542a45166c264b801
3,648,475
from typing import List from typing import Tuple from typing import Union def normalize_boxes(boxes: List[Tuple], img_shape: Union[Tuple, List]) -> List[Tuple]: """ Transform bounding boxes back to yolo format """ img_height = img_shape[1] img_width = img_shape[2] boxes_ = [] for i in range(len(boxes)): x1, y1, x2, y2 = boxes[i] width = x2 - x1 height = y2 - y1 x_mid = x1 + 0.5 * width y_mid = y1 + 0.5 * height box = [ x_mid / img_width, y_mid / img_height, width / img_width, height / img_height, ] boxes_.append(box) return boxes_
086e0b069d06a4718e8ffd37189cf3d08c41d19f
3,648,476
import copy def _make_reference_filters(filters, ref_dimension, offset_func): """ Copies and replaces the reference dimension's definition in all of the filters applied to a dataset query. This is used to shift the dimension filters to fit the reference window. :param filters: :param ref_dimension: :param offset_func: :return: """ reference_filters = [] for ref_filter in filters: if ref_filter.field is ref_dimension: # NOTE: Important to apply the offset function to the start and stop properties because the date math can # become expensive over many rows ref_filter = copy.copy(ref_filter) ref_filter.start = offset_func(ref_filter.start) ref_filter.stop = offset_func(ref_filter.stop) reference_filters.append(ref_filter) return reference_filters
eeeeb74bb3618c87f3540de5b44970e197885dc6
3,648,477
import os def detect(): """ Detects the shell the user is currently using. The logic is picked from Docker Machine https://github.com/docker/machine/blob/master/libmachine/shell/shell.go#L13 """ shell = os.getenv("SHELL") if not shell: return None if os.getenv("__fish_bin_dir"): return "fish" return os.path.basename(shell)
4c6db387f21b1e4abef17efebbdc45b45c5b7fe7
3,648,478
def load_plane_dataset(name, num_points, flip_axes=False): """Loads and returns a plane dataset. Args: name: string, the name of the dataset. num_points: int, the number of points the dataset should have, flip_axes: bool, flip x and y axes if True. Returns: A Dataset object, the requested dataset. Raises: ValueError: If `name` an unknown dataset. """ try: return { 'gaussian': GaussianDataset, 'crescent': CrescentDataset, 'crescent_cubed': CrescentCubedDataset, 'sine_wave': SineWaveDataset, 'abs': AbsDataset, 'sign': SignDataset, 'four_circles': FourCircles, 'diamond': DiamondDataset, 'two_spirals': TwoSpiralsDataset, 'checkerboard': CheckerboardDataset, 'rings': RingsDataset, '8-gaussians':EightGaussiansDataset }[name](num_points=num_points, flip_axes=flip_axes) except KeyError: raise ValueError('Unknown dataset: {}'.format(name))
aee32a6aa7f2be6ae515d6f3b1e27cda4d0f705e
3,648,479
def get_toxic(annotated_utterance, probs=True, default_probs=None, default_labels=None): """Function to get toxic classifier annotations from annotated utterance. Args: annotated_utterance: dictionary with annotated utterance, or annotations probs: return probabilities or not default: default value to return. If it is None, returns empty dict/list depending on probs argument Returns: dictionary with toxic probablilties, if probs == True, or toxic labels if probs != True """ default_probs = {} if default_probs is None else default_probs default_labels = [] if default_labels is None else default_labels return _get_etc_model( annotated_utterance, "toxic_classification", probs=probs, default_probs=default_probs, default_labels=default_labels, )
ac69075af2edd9cdc84383054ba9ebe700dddb58
3,648,480
def compute_energy_lapkmode(X,C,l,W,sigma,bound_lambda): """ compute Laplacian K-modes energy in discrete form """ e_dist = ecdist(X,C,squared =True) g_dist = np.exp(-e_dist/(2*sigma**2)) pairwise = 0 Index_list = np.arange(X.shape[0]) for k in range(C.shape[0]): tmp=np.asarray(np.where(l== k)) if tmp.size !=1: tmp = tmp.squeeze() else: tmp = tmp[0] # print('length of tmp ', len(tmp)) # pairwise = pairwise - W[tmp,:][:,tmp].sum() # With potts values -1/0 nonmembers = np.in1d(Index_list,tmp,invert =True) # With potts values 0/1 pairwise = pairwise + W[tmp,:][:,nonmembers].sum() E_kmode = compute_km_energy(l,g_dist.T) print(E_kmode) E = (bound_lambda)*pairwise + E_kmode return E
3fc5c2f9695e33eb3d1ac42a3172c30f1d81d23b
3,648,481
def calc_2d_wave_map(wave_grid, x_dms, y_dms, tilt, oversample=2, padding=10, maxiter=5, dtol=1e-2): """Compute the 2D wavelength map on the detector. :param wave_grid: The wavelength corresponding to the x_dms, y_dms, and tilt values. :param x_dms: the trace x position on the detector in DMS coordinates. :param y_dms: the trace y position on the detector in DMS coordinates. :param tilt: the trace tilt angle in degrees. :param oversample: the oversampling factor of the input coordinates. :param padding: the native pixel padding around the edge of the detector. :param maxiter: the maximum number of iterations used when solving for the wavelength at each pixel. :param dtol: the tolerance of the iterative solution in pixels. :type wave_grid: array[float] :type x_dms: array[float] :type y_dms: array[float] :type tilt: array[float] :type oversample: int :type padding: int :type maxiter: int :type dtol: float :returns: wave_map_2d - an array containing the wavelength at each pixel on the detector. :rtype: array[float] """ os = np.copy(oversample) xpad = np.copy(padding) ypad = np.copy(padding) # No need to compute wavelengths across the entire detector, slightly larger than SUBSTRIP256 will do. dimx, dimy = 2048, 300 y_dms = y_dms + (dimy - 2048) # Adjust y-coordinate to area of interest. # Generate the oversampled grid of pixel coordinates. x_vec = np.arange((dimx + 2*xpad)*os)/os - (os - 1)/(2*os) - xpad y_vec = np.arange((dimy + 2*ypad)*os)/os - (os - 1)/(2*os) - ypad x_grid, y_grid = np.meshgrid(x_vec, y_vec) # Iteratively compute the wavelength at each pixel. delta_x = 0.0 # A shift in x represents a shift in wavelength. for niter in range(maxiter): # Assume all y have same wavelength. wave_iterated = np.interp(x_grid - delta_x, x_dms[::-1], wave_grid[::-1]) # Invert arrays to get increasing x. # Compute the tilt angle at the wavelengths. tilt_tmp = np.interp(wave_iterated, wave_grid, tilt) # Compute the trace position at the wavelengths. x_estimate = np.interp(wave_iterated, wave_grid, x_dms) y_estimate = np.interp(wave_iterated, wave_grid, y_dms) # Project that back to pixel coordinates. x_iterated = x_estimate + (y_grid - y_estimate)*np.tan(np.deg2rad(tilt_tmp)) # Measure error between requested and iterated position. delta_x = delta_x + (x_iterated - x_grid) # If the desired precision has been reached end iterations. if np.all(np.abs(x_iterated - x_grid) < dtol): break # Evaluate the final wavelength map, this time setting out-of-bounds values to NaN. wave_map_2d = np.interp(x_grid - delta_x, x_dms[::-1], wave_grid[::-1], left=np.nan, right=np.nan) # Extend to full detector size. tmp = np.full((os*(dimx + 2*xpad), os*(dimx + 2*xpad)), fill_value=np.nan) tmp[-os*(dimy + 2*ypad):] = wave_map_2d wave_map_2d = tmp return wave_map_2d
727002a0cc61f6219c92d6db3d31eb653f849f03
3,648,482
def is_pipeline_variable(var: object) -> bool: """Check if the variable is a pipeline variable Args: var (object): The variable to be verified. Returns: bool: True if it is, False otherwise. """ # Currently Expression is on top of all kinds of pipeline variables # as well as PipelineExperimentConfigProperty and PropertyFile # TODO: We should deprecate the Expression and replace it with PipelineVariable return isinstance(var, Expression)
dd33657dce848dac819f89a4820c33df1ab4479e
3,648,483
def export_data(): """Exports data to a file""" data = {} data['adgroup_name'] = request.args.get('name') if data['adgroup_name']: data['sitelist'] = c['adgroups'].find_one({'name':data['adgroup_name']}, {'sites':1})['sites'] return render_template("export.html", data=data)
a6b43f90907e174f07773b0ed7603a48a3ff35ca
3,648,484
def thresh_bin(img, thresh_limit=60): """ Threshold using blue channel """ b, g, r = cv2.split(img) # mask = get_salient(r) mask = cv2.threshold(b, 50, 255, cv2.THRESH_BINARY_INV)[1] return mask
3660179d1e1c411feb44e993a8ab94f10c63d6e4
3,648,485
from typing import Any def get_aux(): """Get the entire auxiliary stack. Not commonly used.""" @parser def g(c: Cursor, a: Any): return a, c, a return g
b345901f4987e8849fbe35c0c997f38480d79f04
3,648,486
def _destupidize_dict(mylist): """The opposite of _stupidize_dict()""" output = {} for item in mylist: output[item['key']] = item['value'] return output
f688e25a9d308e39f47390fef493ab80d303ea15
3,648,487
def equipment_add(request, type_, id_=None): """Adds an equipment.""" template = {} if request.method == 'POST': form = EquipmentForm(request.POST) if form.is_valid(): form.save(request.user, id_) return redirect('settings_equipment') template['form'] = form elif id_: template['form'] = EquipmentForm(instance=Equipment.objects.get(pk=id_)) else: template['form'] = EquipmentForm() return render(request, 'settings/equipment_add.html', template)
a8f2fce6c9aa64316edb96df9597fbfb516839a3
3,648,488
def _parse_text(val, **options): """ :return: Parsed value or value itself depends on 'ac_parse_value' """ if val and options.get('ac_parse_value', False): return parse_single(val) return val
cbd0d0b65237e8d3f817aa0bae1861f379a68b26
3,648,489
import os def output_path(model, model_set): """Return path to model output directory Parameters ---------- model : str model_set : str """ path = model_path(model, model_set=model_set) return os.path.join(path, 'output')
2cd89f89417e4164fdd66ca0c544b6c623f21ddb
3,648,490
def get_rotation_matrix(rotation_angles): """Get the rotation matrix from euler's angles Parameters ----- rotation_angles: array-like or list Three euler angles in the order [sai, theta, phi] where sai = rotation along the x-axis theta = rotation along the y-axis phi = rotation along the z-axis Returns ----- A rotation matrix of shape (3, 3) Refrences ----- Computing Euler angles from a rotation matrix by Gregory G. Slabaugh https://www.gregslabaugh.net/publications/euler.pdf """ sai = rotation_angles[0] # s theta = rotation_angles[1] # t phi = rotation_angles[2] # p # find all the required sines and cosines cs = np.cos(sai) ct = np.cos(theta) cp = np.cos(phi) ss = np.sin(sai) st = np.sin(theta) sp = np.sin(phi) # contruct the rotation matrix along the x-axis rotation_matrix = np.array([ [ct*cp, ss*st*cp-cs*sp, cs*st*cp+ss*sp], [ct*sp, ss*st*sp+cs*cp, cs*st*sp-ss*cp], [ -st, sp*ct, cp*ct] ]) return rotation_matrix
2965d1ce5c688e794f7fce6e51afd2e558c1bab7
3,648,491
def _metric_list_for_check(maas_store, entity, check): """ Computes the metrics list for a given check. Remote checks return a metric for each monitoring zone and each type of metric for the check type. Agent checks return a metric for each metric type on the check type. Check types that Mimic doesn't know about generate an empty list. """ if check.type not in maas_store.check_types: return [] if REMOTE_CHECK_TYPE_REGEX.match(check.type): return [{'name': '{0}.{1}'.format(mz, metric.name), 'type': metric.type, 'unit': metric.unit} for metric in maas_store.check_types[check.type].metrics for mz in check.monitoring_zones_poll] return [{'name': metric.name, 'type': metric.type, 'unit': metric.unit} for metric in maas_store.check_types[check.type].metrics]
c295f976c8c85d60af8f6e734f666381bc0186d2
3,648,492
import matplotlib.pyplot as plt import numpy as np def plot_MA_values(t,X,**kwargs): """ Take the numpy.ndarray time array (t) of size (N,) and the state space numpy.ndarray (X) of size (2,N), (4,N), or (8,N), and plots the moment are values of the two muscles versus time and along the moment arm function. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ **kwargs ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1) InputString - must be a string. Used to alter the figure Title. Default is None. """ assert (np.shape(X)[0] in [2,4,8]) \ and (np.shape(X)[1] == len(t)) \ and (str(type(X)) == "<class 'numpy.ndarray'>"), \ "X must be a (2,N), (4,N), or (8,N) numpy.ndarray, where N is the length of t." assert np.shape(t) == (len(t),) and str(type(t)) == "<class 'numpy.ndarray'>", "t must be a (N,) numpy.ndarray." InputString = kwargs.get("InputString",None) assert InputString is None or type(InputString)==str, "InputString must either be a string or None." if InputString is None: DescriptiveTitle = "Moment arm equations" else: assert type(InputString)==str, "InputString must be a string" DescriptiveTitle = "Moment arm equations\n(" + InputString + " Driven)" fig, ((ax1,ax2),(ax3,ax4)) = plt.subplots(2,2,figsize=(8,6)) plt.subplots_adjust(left = 0.15,hspace=0.1,bottom=0.1) plt.suptitle(DescriptiveTitle) ax1.plot(np.linspace(0,np.pi*(160/180),1001),\ np.array(list(map(lambda x1: R1([x1]),np.linspace(0,np.pi*(160/180),1001)))),\ '0.70') ax1.plot(np.linspace(min(X[0,:]),max(X[0,:]),101),\ np.array(list(map(lambda x1: R1([x1]),np.linspace(min(X[0,:]),max(X[0,:]),101)))),\ 'g',lw=3) ax1.set_xticks([0,np.pi/4,np.pi/2,3*np.pi/4,np.pi]) ax1.set_xticklabels([""]*len(ax1.get_xticks())) ax1.set_ylabel("Moment Arm for\n Muscle 1 (m)") """ Note: Need to Transpose X in order for Map to work. """ ax2.plot(t,np.array(list(map(lambda X: R1(X),X.T))),'g') ax2.set_ylim(ax1.get_ylim()) ax2.set_yticks(ax1.get_yticks()) ax2.set_yticklabels([""]*len(ax1.get_yticks())) ax2.set_xticklabels([""]*len(ax2.get_xticks())) ax3.plot(np.linspace(0,np.pi*(160/180),1001),\ np.array(list(map(lambda x1: R2([x1]),np.linspace(0,np.pi*(160/180),1001)))),\ '0.70') ax3.plot(np.linspace(min(X[0,:]),max(X[0,:]),101),\ np.array(list(map(lambda x1: R2([x1]),np.linspace(min(X[0,:]),max(X[0,:]),101)))),\ 'r',lw=3) ax3.set_xticks([0,np.pi/4,np.pi/2,3*np.pi/4,np.pi]) ax3.set_xticklabels([r"$0$",r"$\frac{\pi}{4}$",r"$\frac{\pi}{2}$",r"$\frac{3\pi}{4}$",r"$\pi$"]) ax3.set_xlabel("Joint Angle (rads)") ax3.set_ylabel("Moment Arm for\n Muscle 2 (m)") ax4.plot(t,np.array(list(map(lambda X: R2(X),X.T))),'r') ax4.set_ylim(ax3.get_ylim()) ax4.set_yticks(ax3.get_yticks()) ax4.set_yticklabels([""]*len(ax3.get_yticks())) ax4.set_xlabel("Time (s)") return(fig,[ax1,ax2,ax3,ax4])
91e37001b689a66f53e6035b27520527ea9aa922
3,648,493
def filter_pdf_files(filepaths): """ Returns a filtered list with strings that end with '.pdf' Keyword arguments: filepaths -- List of filepath strings """ return [x for x in filepaths if x.endswith('.pdf')]
3f44b3af9859069de866cec3fac33a9e9de5439d
3,648,494
import os def index_file(path: str) -> dict: """ Indexes the files and directory under a certain directory Arguments: path {str} - the path of the DIRECTORY to index Return: {dict} - structures of the indexed directory """ structure = {} # Represents the directory structure for dirpath, directory, files in os.walk(path): all_files = {} for file in files: all_files.update(get_file_info(path, dirpath, file)) node_info = get_directory_info(path, dirpath, all_files, directory) structure.update({dirpath: node_info}) return structure
9385b8577d296a43e8e2d5b3ea3517ba1e498f65
3,648,495
def hue_quadrature(h: FloatingOrArrayLike) -> FloatingOrNDArray: """ Return the hue quadrature from given hue :math:`h` angle in degrees. Parameters ---------- h Hue :math:`h` angle in degrees. Returns ------- :class:`numpy.floating` or :class:`numpy.ndarray` Hue quadrature. Examples -------- >>> hue_quadrature(196.3185839) # doctest: +ELLIPSIS 237.6052911... """ h = as_float_array(h) h_i = HUE_DATA_FOR_HUE_QUADRATURE["h_i"] e_i = HUE_DATA_FOR_HUE_QUADRATURE["e_i"] H_i = HUE_DATA_FOR_HUE_QUADRATURE["H_i"] # :math:`h_p` = :math:`h_z` + 360 if :math:`h_z` < :math:`h_1, i.e. h_i[0] h[h <= h_i[0]] += 360 # *np.searchsorted* returns an erroneous index if a *nan* is used as input. h[np.asarray(np.isnan(h))] = 0 i = as_int_array(np.searchsorted(h_i, h, side="left") - 1) h_ii = h_i[i] e_ii = e_i[i] H_ii = H_i[i] h_ii1 = h_i[i + 1] e_ii1 = e_i[i + 1] H = H_ii + ( (100 * (h - h_ii) / e_ii) / ((h - h_ii) / e_ii + (h_ii1 - h) / e_ii1) ) return as_float(H)
df120ae34dfc45ecbb818718885cbbb501667bdd
3,648,496
def aa_find_devices_ext (devices, unique_ids): """usage: (int return, u16[] devices, u32[] unique_ids) = aa_find_devices_ext(u16[] devices, u32[] unique_ids) All arrays can be passed into the API as an ArrayType object or as a tuple (array, length), where array is an ArrayType object and length is an integer. The user-specified length would then serve as the length argument to the API funtion (please refer to the product datasheet). If only the array is provided, the array's intrinsic length is used as the argument to the underlying API function. Additionally, for arrays that are filled by the API function, an integer can be passed in place of the array argument and the API will automatically create an array of that length. All output arrays, whether passed in or generated, are passed back in the returned tuple.""" if not AA_LIBRARY_LOADED: return AA_INCOMPATIBLE_LIBRARY # devices pre-processing __devices = isinstance(devices, int) if __devices: (devices, num_devices) = (array_u16(devices), devices) else: (devices, num_devices) = isinstance(devices, ArrayType) and (devices, len(devices)) or (devices[0], min(len(devices[0]), int(devices[1]))) if devices.typecode != 'H': raise TypeError("type for 'devices' must be array('H')") # unique_ids pre-processing __unique_ids = isinstance(unique_ids, int) if __unique_ids: (unique_ids, num_ids) = (array_u32(unique_ids), unique_ids) else: (unique_ids, num_ids) = isinstance(unique_ids, ArrayType) and (unique_ids, len(unique_ids)) or (unique_ids[0], min(len(unique_ids[0]), int(unique_ids[1]))) if unique_ids.typecode != 'I': raise TypeError("type for 'unique_ids' must be array('I')") # Call API function (_ret_) = api.py_aa_find_devices_ext(num_devices, num_ids, devices, unique_ids) # devices post-processing if __devices: del devices[max(0, min(_ret_, len(devices))):] # unique_ids post-processing if __unique_ids: del unique_ids[max(0, min(_ret_, len(unique_ids))):] return (_ret_, devices, unique_ids)
1b84cfc3d6fd52f786c2191fde4d37a6287e8b87
3,648,497
def decode_varint_in_reverse(byte_array, offset, max_varint_length=9): """ This function will move backwards through a byte array trying to decode a varint in reverse. A InvalidVarIntError will be raised if a varint is not found by this algorithm used in this function. The calling logic should check for this case in case it is encountered which is likely in the context of carving. Note: This cannot determine if the field being parsed was originally a varint or not and may give false positives. Please keep this in mind when calling this function. Note: If the array runs out of bytes while parsing in reverse, the currently determined varint will be returned. Note: Since the parsing starts from the left of the offset specified, the resulting byte string that represents this varint can be determined by byte_array[varint_relative_offset:offset]. The length of the varint in bytes can be determined likewise either from the len() of the above or offset - varint_relative_offset. :param byte_array: bytearray The byte array to parse for the varint in reverse. :param offset: int The offset to move backwards from. The offset specified is not included in the parsing and the algorithm starts with the last byte of the varint at offset - 1. If you want to start at the end of the byte array then the offset should be the length of the byte array (where the offset would refer to a non-existing index in the array). :param max_varint_length: int The maximum number of varint bytes to go back in reverse. The default is 9 since this is the maximum number of bytes a varint can be. :return: :raise: InvalidVarIntError: If a varint is not determined while parsing the byte array in reverse using the algorithm in this function. This error is not logged as an error but rather a debug statement since it is very likely to occur during carving and should be handled appropriately. """ if offset > len(byte_array): log_message = "The offset: {} is greater than the size of the byte array: {} for the bytes: {}." log_message = log_message.format(offset, len(byte_array), hexlify(byte_array)) getLogger(LOGGER_NAME).error(log_message) raise ValueError(log_message) unsigned_integer_value = 0 varint_inverted_relative_offset = 0 varint_byte = ord(byte_array[offset - 1 - varint_inverted_relative_offset:offset - varint_inverted_relative_offset]) varint_byte &= 0x7f unsigned_integer_value |= varint_byte varint_inverted_relative_offset += 1 while offset - varint_inverted_relative_offset - 1 >= 0: if varint_inverted_relative_offset > max_varint_length: """ Since this exception is not considered a important exception to log as an error, it will be logged as a debug statement. There is a good chance of this use case occurring and is even expected during carving. """ log_message = "A varint was not determined from byte array: {} starting at offset: {} in reverse." log_message = log_message.format(byte_array, offset) getLogger(LOGGER_NAME).debug(log_message) return InvalidVarIntError(log_message) varint_byte = ord(byte_array[offset - 1 - varint_inverted_relative_offset: offset - varint_inverted_relative_offset]) msb_set = varint_byte & 0x80 if msb_set: varint_byte &= 0x7f varint_byte <<= (7 * varint_inverted_relative_offset) unsigned_integer_value |= varint_byte varint_inverted_relative_offset += 1 else: break varint_relative_offset = offset - varint_inverted_relative_offset return unsigned_integer_value, varint_relative_offset
528d6c40a6e53c747ffca2c88388aa58cb98ea71
3,648,498
import imp import os def manager_version(request): """ Context processor to add the rhgamestation-manager version """ # Tricky way to know the manager version because its version lives out of project path root = imp.load_source('__init__', os.path.join(settings.BASE_DIR, '__init__.py')) return {'manager_version': root.__version__}
364887d6f8a521f12b03f4ed0dae2ebba1bf2b15
3,648,499