content
stringlengths
22
815k
id
int64
0
4.91M
def readremotenames(repo): """ read the details about the remotenames stored in .hg/logexchange/ and yields a tuple (node, remotepath, name). It does not yields information about whether an entry yielded is branch or bookmark. To get that information, call the respective functions. """ for bmentry in readremotenamefile(repo, 'bookmarks'): yield bmentry for branchentry in readremotenamefile(repo, 'branches'): yield branchentry
19,200
def only_t1t2(src, names): """ This function... :param src: :param names: :return: """ if src.endswith("TissueClassify"): # print "Keeping T1/T2!" try: names.remove("t1_average_BRAINSABC.nii.gz") except ValueError: pass try: names.remove("t2_average_BRAINSABC.nii.gz") except ValueError: pass else: names.remove("TissueClassify") # print "Ignoring these files..." # for name in names: # print "\t" + name return names
19,201
def distanceEucl(a, b): """Calcul de la distance euclidienne en dimension quelconque""" dist = np.linalg.norm(a - b) return dist
19,202
def main(): """ Program main """ options = docopt(__doc__) cmd = command.Command(options) for opt in options: if options[opt]: cmd(opt)
19,203
def _search(self, *query): """Search for a match between the query terms and a tensor's Id, Tag, or Description. https://github.com/OpenMined/PySyft/issues/2609 Note that the query is an AND query meaning that every item in the list of strings (query*) must be found somewhere on the tensor in order for it to be included in the results. Args: query: A list of strings to match against. me: A reference to the worker calling the search. Returns: A list of PointerTensors. """ results = list() for key, obj in self._objects.items(): found_something = True for query_item in query: # If deserialization produced a bytes object instead of a string, # make sure it's turned back to a string or a fair comparison. if isinstance(query_item, bytes): query_item = query_item.decode("ascii") match = False if query_item == str(key): match = True if isinstance(obj, FrameworkTensor): if obj.tags is not None: if query_item in obj.tags: match = True if obj.description is not None: if query_item in obj.description: match = True if not match: found_something = False if found_something: # set garbage_collect_data to False because if we're searching # for a tensor we don't own, then it's probably someone else's # decision to decide when to delete the tensor. ptr = obj.create_pointer(garbage_collect_data=False, owner=sy.local_worker) results.append(ptr) return results
19,204
def cli(ctx): """ Network objects. A Network resource can represent an IP Network and an IP Address. Working with networks is usually done with CIDR notation. Networks can have any number of arbitrary attributes as defined below. """
19,205
def _FindLockNames(locks): """ Finds the ids and descriptions of locks that given locks can block. @type locks: dict of locking level to list @param locks: The locks that gnt-debug delay is holding. @rtype: dict of string to string @return: The lock name to entity name map. For a given set of locks, some internal locks (e.g. ALL_SET locks) can be blocked even though they were not listed explicitly. This function has to take care and list all locks that can be blocked by the locks given as parameters. """ lock_map = {} if locking.LEVEL_NODE in locks: node_locks = locks[locking.LEVEL_NODE] if node_locks == locking.ALL_SET: # Empty list retrieves all info name_uuid_map = _GetNodeUUIDMap([]) else: name_uuid_map = _GetNodeUUIDMap(node_locks) for name in name_uuid_map: lock_map["node/%s" % name_uuid_map[name]] = name # If ALL_SET was requested explicitly, or there is at least one lock # Note that locking.ALL_SET is None and hence the strange form of the if if node_locks == locking.ALL_SET or node_locks: lock_map["node/[lockset]"] = "joint node lock" #TODO add other lock types here when support for these is added return lock_map
19,206
def test_md033_bad_inline_html_present(): """ Test to make sure we get the expected behavior after scanning a good file from the test/resources/rules/MD026 directory that has atx headings that do not end with punctuation. """ # Arrange scanner = MarkdownScanner() supplied_arguments = [ "scan", "test/resources/rules/md033/bad_inline_html_present.md", ] expected_return_code = 1 expected_output = ( "test/resources/rules/md033/bad_inline_html_present.md:3:9: " + "MD033: Inline HTML [Element: a] (no-inline-html)\n" + "test/resources/rules/md033/bad_inline_html_present.md:5:17: " + "MD033: Inline HTML [Element: ![CDATA[] (no-inline-html)" ) expected_error = "" # Act execute_results = scanner.invoke_main(arguments=supplied_arguments) # Assert execute_results.assert_results( expected_output, expected_error, expected_return_code )
19,207
def strip_directives(filename, filepath, outpath): """ Read in file, remove all preprocessor directives and output """ # r = re.compile(r"(^#.*$\n)") with open(os.path.join(filepath, filename)) as infile: txt = infile.read() outtxt = re.sub(r"(^#.*$\n)", '', txt, flags=re.M) with open(os.path.join(outpath, filename), 'w') as outfile: outfile.write(outtxt)
19,208
def runtime(command: list, show=True, env=None): """Runs the command and returns the runtime.""" print('START:', *command) t_start = time() if show: r = subprocess.run(command, env=env) else: r = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) t_end = time() if r.returncode != 0: raise Exception(r.stderr.decode()) t = t_end - t_start print('END: runtime =', round(t, 3), 'seconds') return t
19,209
def val2str(val): """Writes values to a string. Args: val (any): Any object that should be represented by a string. Returns: valstr (str): String representation of `val`. """ # Return the input if it's a string if isinstance(val,str ): valstr=val # Handle types where spaces are added elif isinstance(val,tuple): valstr=repr(val).replace(', ',',') elif isinstance(val,list ): valstr=repr(val).replace(', ',',') elif isinstance(val,dict ): valstr=repr(val).replace(', ',',').replace(': ',':') # Otherwise use repr() else: valstr=repr(val) # Return output return valstr
19,210
def save_device_information(device, **kwargs): """Show version to print information users interest Args: Mandatory: device (`obj`) : Device object. Returns: True: Result is PASSED Raises: None Example: >>> save_device_information(device=Device()) """ # bypass the section log.info('Junos device does not have bootvar.')
19,211
def show_grades(grades, format): """ Show the grades received as argument in the format specified. :param grades: grades to show :param format: format of the output """ print('Assessment\'s grades') print(assessment_serializer.serialize_grades(grades, format) + "\n")
19,212
def configure_interface_switchport_mode(device, interface, mode): """ Configures switchport mode on interface Args: device ('obj') : device to use interface ('str') : interface to configure mode ('str') : interface mode Returns: None Raises: SubCommandFailure """ log.info( "Configuring switchport mode on {interface} with mode = {mode}".format( interface=interface, mode=mode ) ) try: device.configure( [ "interface {interface}".format(interface=interface), "switchport mode {mode}".format(mode=mode), ] ) except SubCommandFailure: log.error('Failed to configure switchport mode on the interface') raise
19,213
def test_prepare_input(img, mocker): """Test image preparations.""" img_clone = torch.tensor((1, 3, 4, 4)) img_clone.clone = mocker.Mock(return_value=img) out = _prepare_input(img_clone) img_clone.clone.assert_called_once() img.detach.assert_called_once() img.to.assert_called_once() assert_that(out).is_same_as(img)
19,214
def use_low_level_network(query, env): """ Make a call for variables using the lower level network code """ records = get_block_of_records(query, env=env) process_records("Network example", records)
19,215
def showItems(category_name): """Pulls all the Categories, the specific Category selected by the user from the home page, all the items within that specific Category, and then counts the number of items. All this information is displayed on the items.html page. """ categories = session.query(Category).order_by(asc(Category.name)) category = session.query(Category).filter_by(name=category_name).one() items = session.query(Item).filter_by(category_name=category_name).all() itemscount = session.query(Item). \ filter_by(category_name=category_name). \ count() return render_template('items.html', categories=categories, items=items, category=category, itemscount=itemscount)
19,216
def date_ranges(): """Build date ranges for current day, month, quarter, and year. """ today = datetime.date.today() quarter = math.floor((today.month - 1) / 3) cycle = current_cycle() return { 'month': ( today.replace(day=1), today.replace(day=calendar.monthrange(today.year, today.month)[1]), ), 'quarter': ( today.replace(day=1, month=quarter * 3 + 1), today.replace( day=calendar.monthrange(today.year, quarter * 3 + 3)[1], month=quarter * 3 + 3, ), ), 'year': ( today.replace(day=1, month=1), today.replace( day=calendar.monthrange(today.year, 12)[1], month=12, ), ), 'cycle': ( datetime.date( year=cycle - 1, month=1, day=1, ), datetime.date( year=cycle, month=12, day=calendar.monthrange(cycle, 12)[1], ), ), }
19,217
def FiskJohnsonDiscreteFuncBCKWD(r,F0,T): """Compute reverse Fourier-Bessel transformation via Fisk Johnson procedure. Compute reverse Fourier-Bessel transform (i.e. 0th order reverse Hankel transform) using a rapidly convergent summation of a Fourier-Bessel expansion following the metod introduced in Ref. [1] and further detailed in Ref. [2]. Args: r (numpy array, ndim=1): equispaced 1D grid of target coordinates. F0 (numpy array, ndim=1): Fourier-Bessel transformed function at discrete coordinates given by its scaled bessel zeros. T (float): truncation threshold for objective function. Returns: f (numpy array, ndim=1): reverse Fourier-Bessel transform of input function. Notes: - Fisk Johnson procedure for reverse Fourier-Bessel transformation. - Implements Eq. (10) of Ref. [1]. - above truncation threshold it holds that f(r>T) = 0. - on input F0 = F0[jm/T] for m = 0...N-1 where jm are the first N zeros of the 0th order Bessel function in ascending order. Refs: [1] An Improved Method for Computing a Discrete Hankel Transform H. Fisk Johnson Comp. Phys. Commun. 43 (1987) 181-202 [2] Theory and operational rules for the discrete Hankel transform N. Baddour, U. Chouinard J. Opt. Soc. Am. A 32 (2015) 611 """ # INITIALIZE EMPTY ARRAY FOR REVESE TRANSFORM f = np.zeros(r.size) # COMPUTE FIRST N ZEROS OF 0TH ORDER BESSEL FUNCTION IN ASCENDING ORDER jm = scs.jn_zeros(0,F0.size) # REVERSE TRANSFORM YIELDING ARBITRARY FUNCTION VALUES f(xT) FROM ITS # FOURIER BESSEL TRANSFORM F(j[m]/T) m=0...N-1 AT SCALED BESSEL ZEROS # j[m]/T. SEE EQ. (10) OF REF. [1]. x = r/T f[x<1] = 2.0/T**2*np.sum( F0*scs.j0(jm*x[x<1,np.newaxis])/scs.j1(jm)**2, axis=1) return f
19,218
def make_waterfall_horizontal(data, layout): """Function used to flip the figure from vertical to horizontal. """ h_data = list(data) h_data = [] for i_trace, trace in enumerate(list(data)): h_data.append(trace) prov_x = h_data[i_trace]['x'] h_data[i_trace]['x'] = list(h_data[i_trace]['y'])[::-1] h_data[i_trace]['y'] = list(prov_x)[::-1] h_data[i_trace]['orientation'] = 'h' h_data[i_trace]['hoverinfo'] = hoverinfo_horizontal_( h_data[i_trace]['hoverinfo']) h_annotations = [] for i_ann, annotation in enumerate(list(layout['annotations'])): h_annotations.append(annotation) prov_x = h_annotations[i_ann]['x'] h_annotations[i_ann]['x'] = h_annotations[i_ann]['y'] h_annotations[i_ann]['y'] = prov_x h_annotations.reverse() h_layout = layout h_layout['annotations'] = h_annotations h_layout['xaxis'] = go.layout.XAxis({'title': 'Prediction score'}) h_layout['yaxis'] = go.layout.YAxis({'title': ''}) return h_data, h_layout
19,219
def parse_file(producer): """ Given a producer name, return appropriate parse function. :param producer: NMR machine producer. :return: lambda function that reads file according to producer. """ global path_to_directory return { "Agilent": (lambda: ng.agilent.read(dir=path_to_directory)), "Bruker": (lambda: ng.bruker.read(dir=path_to_directory)), "Varian": (lambda: ng.varian.read(dir=path_to_directory)), }.get(producer)
19,220
def print_glossary(): """ Added by Steven Combs. """ message = "*all-atom = in the case of sampling, synonymous with fine movements and often including side chain information; also referred to as high-resolution \n \ *benchmark = another word for a test of a method, scoring function, algorithm, etc. by comparing results from the method to accepted methods/models \n \ *binary file = a file in machine-readable language that can be executed to do something in silico \n \ *BioPython = a set of tools for biological computing written and compatible with Python http://biopython.org/wiki/Biopython \n \ *build = to compile the source code so it can be used as a program \n \ *centroid = in Rosetta centroid mode, side chains are represented as unified spheres centered at the residue?s center of mass\n \ *cluster center = the geometric center of a cluster, or group, of models \n \ *clustering = in this case, grouping models with similar structure together \n \ *comparative model = a protein model where the primary sequence from one protein (target) is placed, or threaded, onto the three dimensional coordinates of a protein of known structure (template)language (binary)" tkinter.messagebox.showinfo(title="Rosetta Glossary from Nature Protocols Paper. Written by Stephanie Hirst DeLuca", message=message)
19,221
def test_similar_pairs_gene_set(similarity_em): """Test defining a gene set for similar pairs.""" # Subsetting genes shouldn't change the results in this case. gene_set_1 = reversed([g for g in similarity_em.genes if int(g.name[-1]) in (0,2,4)]) sps = similarity_em.create_similar_pairs(0, 2, 512, 42, gene_set=gene_set_1) for i in range(len(similarity_em.cells)): similar_cells = sps.get_similar_cells(i) expected_pairs = [k for k in range(len(similarity_em.cells)) if k % 2 == i % 2 and k != i] assert len(similar_cells) == 2 assert expected_pairs[0] in similar_cells assert expected_pairs[1] in similar_cells gene_set_2 = (g for g in similarity_em.genes if int(g.name[-1]) in (1,3,5)) sps = similarity_em.create_similar_pairs(0, 2, 512, 42, gene_set=gene_set_2) for i in range(len(similarity_em.cells)): similar_cells = sps.get_similar_cells(i) expected_pairs = [k for k in range(len(similarity_em.cells)) if k % 2 == i % 2 and k != i] assert len(similar_cells) == 2 assert expected_pairs[0] in similar_cells assert expected_pairs[1] in similar_cells
19,222
def get_lon_dim_name_impl(ds: Union[xr.Dataset, xr.DataArray]) -> Optional[str]: """ Get the name of the longitude dimension. :param ds: An xarray Dataset :return: the name or None """ return _get_dim_name(ds, ['lon', 'longitude', 'long'])
19,223
def logout(): """Log out user.""" session.pop('eventbrite_token', None) return redirect(url_for('index'))
19,224
def search(query="", casesense=False, filterout=[], subscribers=0, nsfwmode=2, doreturn=False, sort=None): """ Search for a subreddit by name *str query = The search query "query" = results where "query" is in the name "*query" = results where "query" is at the end of the name "query*" = results where "query" is at the beginning of the name "*query*" = results where "query" is in the middle of the name bool casesense = is the search case sensitive list filterout = [list, of, words] to omit from search. Follows casesense int subscribers = minimum number of subscribers int nsfwmode = 0 - Clean only 1 - Dirty only 2 - All int sort = The integer representing the sql column to sort by. Defaults to no sort. """ querys = ''.join([c for c in query if c in GOODCHARS]) queryx = '%%%s%%' % querys if '!' in query: cur.execute('SELECT * FROM subreddits WHERE name LIKE ?', [querys]) return cur.fetchone() if nsfwmode in [0,1]: cur.execute('SELECT * FROM subreddits WHERE name LIKE ? AND subscribers > ? AND nsfw=?', [queryx, subscribers, nsfwmode]) else: cur.execute('SELECT * FROM subreddits WHERE name LIKE ? AND subscribers > ?', [queryx, subscribers]) results = [] if casesense is False: querys = querys.lower() filterout = [x.lower() for x in filterout] if '*' in query: positional = True front = query[-1] == '*' back = query[0] == '*' if front and back: mid = True front = False back = False else: mid = False else: positional = False lenq = len(querys) for item in fetchgenerator(cur): name = item[SQL_NAME] if casesense is False: name = name.lower() if querys not in name: #print('%s not in %s' % (querys, name)) continue if (positional and front) and (name[:lenq] != querys): #print('%s not front %s (%s)' % (querys, name, name[:lenq])) continue if (positional and back) and (name[-lenq:] != querys): #print('%s not back %s (%s)' % (querys, name, name[-lenq:])) continue if (positional and mid) and (querys not in name[1:-1]): #print('%s not mid %s (%s)' % (querys, name, name[1:-1])) continue if any(filters in name for filters in filterout): #print('%s not filter %s' % (querys, name)) continue results.append(item) if sort is not None: results.sort(key=lambda x: x[sort], reverse=True) if doreturn is True: return results else: for item in results: print(item)
19,225
def is_xh(filename): """ Detects if the given file is an XH file. :param filename: The file to check. :type filename: str """ info = detect_format_version_and_endianness(filename) if info is False: return False return True
19,226
def is_parent_process_alive(): """Return if the parent process is alive. This relies on psutil, but is optional.""" parent_pid = os.getppid() if psutil is None: try: os.kill(parent_pid, 0) except OSError: return False else: return True else: try: return psutil.pid_exists(parent_pid) except (AttributeError, KeyboardInterrupt, Exception): return False
19,227
def validate_func_kwargs( kwargs: dict, ) -> Tuple[List[str], List[Union[str, Callable[..., Any]]]]: """ Validates types of user-provided "named aggregation" kwargs. `TypeError` is raised if aggfunc is not `str` or callable. Parameters ---------- kwargs : dict Returns ------- columns : List[str] List of user-provied keys. func : List[Union[str, callable[...,Any]]] List of user-provided aggfuncs Examples -------- >>> validate_func_kwargs({'one': 'min', 'two': 'max'}) (['one', 'two'], ['min', 'max']) """ tuple_given_message = "func is expected but received {} in **kwargs." columns = list(kwargs) func = [] for col_func in kwargs.values(): if not (isinstance(col_func, str) or callable(col_func)): raise TypeError(tuple_given_message.format(type(col_func).__name__)) func.append(col_func) if not columns: no_arg_message = "Must provide 'func' or named aggregation **kwargs." raise TypeError(no_arg_message) return columns, func
19,228
def gen_test(): """ Test function used for debugging """ train_files, train_steering, train_flip_flags, valid_files, valid_steering, valid_flip_flags = organize_data() for i in range(2): # x, y, flip_flags = (next(generator_v2(valid_files, valid_steering, valid_flip_flags))) start_time = time.time() x, y = (next(generator_v2(train_files, train_steering, train_flip_flags))) end_time = time.time() print("Generator time:", end_time - start_time) print("Gen len", len(x), len(y)) x, y = shuffle(x, y, n_samples=4) for j, img in enumerate(x): # cv2.imshow("{0}, {1}: {2}: {3:.3f}".format(i,j,flip_flags[j], y[j]), img) print(img) print(img.shape) cv2.imshow("{0}, {1}: {2}: {3:.3f}".format(i,j,'', y[j]), img) cv2.waitKey(8000) cv2.destroyAllWindows()
19,229
def mni152_to_fslr(img, fslr_density='32k', method='linear'): """ Projects `img` in MNI152 space to fsLR surface Parameters ---------- img : str or os.PathLike or niimg_like Image in MNI152 space to be projected fslr_density : {'32k', '164k'}, optional Desired output density of fsLR surface. Default: '32k' method : {'nearest', 'linear'}, optional Method for projection. Specify 'nearest' if `img` is a label image. Default: 'linear' Returns ------- fsLR : (2,) tuple-of-nib.GiftiImage Projected `img` on fsLR surface """ if fslr_density in ('4k', '8k'): raise NotImplementedError('Cannot perform registration fusion to ' f'fsLR {fslr_density} space yet.') return _vol_to_surf(img, 'fsLR', fslr_density, method)
19,230
def parse_record(raw_record, _mode, dtype): """Parse CIFAR-10 image and label from a raw record.""" # Convert bytes to a vector of uint8 that is record_bytes long. record_vector = tf.io.decode_raw(raw_record, tf.uint8) # The first byte represents the label, which we convert from uint8 to int32 # and then to one-hot. label = tf.cast(record_vector[0], tf.int32) # The remaining bytes after the label represent the image, which we reshape # from [depth * height * width] to [depth, height, width]. depth_major = tf.reshape(record_vector[1:_RECORD_BYTES], [_NUM_CHANNELS, _HEIGHT, _WIDTH]) # Convert from [depth, height, width] to [height, width, depth], and cast # as float32. image = tf.cast(tf.transpose(depth_major, [1, 2, 0]), tf.float32) # normalise images to range 0-1 image = image/255.0 image = tf.cast(image, dtype) return image, image
19,231
def create_sintel_submission(model, iters=32, warm_start=False, output_path='sintel_submission'): """ Create submission for the Sintel leaderboard """ model.eval() for dstype in ['clean', 'final']: test_dataset = datasets.MpiSintel(split='test', aug_params=None, dstype=dstype) flow_prev, sequence_prev = None, None for test_id in range(len(test_dataset)): image1, image2, (sequence, frame) = test_dataset[test_id] if sequence != sequence_prev: flow_prev = None padder = InputPadder(image1.shape) image1, image2 = padder.pad(image1[None].cuda(), image2[None].cuda()) flow_low, flow_pr = model(image1, image2, iters=iters, flow_init=flow_prev, test_mode=True) flow = padder.unpad(flow_pr[0]).permute(1, 2, 0).cpu().numpy() if warm_start: flow_prev = forward_interpolate(flow_low[0])[None].cuda() output_dir = os.path.join(output_path, dstype, sequence) output_file = os.path.join(output_dir, 'frame%04d.flo' % (frame+1)) if not os.path.exists(output_dir): os.makedirs(output_dir) frame_utils.writeFlow(output_file, flow) sequence_prev = sequence
19,232
def makeSSHTTPClient(paramdict): """Creates a SingleShotHTTPClient for the given URL. Needed for Carousel.""" # get the "url" and "postbody" keys from paramdict to use as the arguments of SingleShotHTTPClient return SingleShotHTTPClient(paramdict.get("url", ""), paramdict.get("postbody", ""), extraheaders = paramdict.get("extraheaders", None), method = paramdict.get('method', None) )
19,233
def getFiles(regex, camera, mjdToIngest = None, mjdthreshold = None, days = None, atlasroot='/atlas/', options = None): """getFiles. Args: regex: camera: mjdToIngest: mjdthreshold: days: atlasroot: options: """ # If mjdToIngest is defined, ignore mjdThreshold. If neither # are defined, grab all the files. # Don't use find, use glob. It treats the whole argument as a regex. # e.g. directory = "/atlas/diff/" + camera "/5[0-9][0-9][0-9][0-9]", regex = *.ddc if mjdToIngest: if options is not None and options.difflocation is not None: directory = options.difflocation.replace('CAMERA', camera).replace('MJD', str(mjdToIngest)) else: directory = atlasroot + "diff/" + camera + "/" + str(mjdToIngest) fileList = glob.glob(directory + '/' + regex) else: if mjdthreshold and days: fileList = [] for day in range(days): if options is not None and options.difflocation is not None: directory = options.difflocation.replace('CAMERA', camera).replace('MJD', str(mjdthreshold + day)) else: directory = atlasroot + "diff/" + camera + "/%d" % (mjdthreshold + day) files = glob.glob(directory + '/' + regex) if files: fileList += files else: if options is not None and options.difflocation is not None: directory = options.difflocation.replace('CAMERA', camera).replace('MJD', '/[56][0-9][0-9][0-9][0-9]') else: directory = atlasroot + "diff/" + camera + "/[56][0-9][0-9][0-9][0-9]" fileList = glob.glob(directory + '/' + regex) fileList.sort() return fileList
19,234
def chain(*args: GradientTransformation) -> GradientTransformation: """Applies a list of chainable update transformations. Given a sequence of chainable transforms, `chain` returns an `init_fn` that constructs a `state` by concatenating the states of the individual transforms, and returns an `update_fn` which chains the update transformations feeding the appropriate state to each. Args: *args: a sequence of chainable (init_fn, update_fn) tuples. Returns: A single (init_fn, update_fn) tuple. """ init_fns, update_fns = zip(*args) def init_fn(params: Params) -> Sequence[OptState]: return [fn(params) for fn in init_fns] def update_fn(updates: Updates, state: OptState, params: Params = None ) -> Tuple[Updates, Sequence[OptState]]: new_state = [] for s, fn in zip(state, update_fns): # pytype: disable=wrong-arg-types updates, new_s = fn(updates, s, params) new_state.append(new_s) return updates, new_state return GradientTransformation(init_fn, update_fn)
19,235
def test_usage_in_dict(): """Test usage_in_dict calculator""" # Tests with absolute moment data assert calculators.usage_in_dict(TEST_USAGE_IN_DICT) == TEST_USAGE_IN_DICT_RESULT assert calculators.usage_in_dict(TEST_USAGE_IN_DICT_2) == TEST_USAGE_IN_DICT_RESULT_2 # Tests with relative data assert calculators.usage_in_dict(TEST_USAGE_IN_DICT_2, TEST_USAGE_IN_DICT) == TEST_USAGE_IN_DICT_RESULT_3
19,236
def fibonacci_mult_tuple(fib0=2, fib1=3, count=10): """Returns a tuple with a fibonacci sequence using * instead of +.""" return tuple(fibonacci_mult_list(fib0, fib1, count))
19,237
def execute_in_process(f): """ Decorator. Execute the function in thread. """ def wrapper(*args, **kwargs): logging.info("Se ha lanzado un nuevo proceso") process_f = Process(target=f, args=args, kwargs=kwargs) process_f.start() return process_f return wrapper
19,238
def castep_spectral_dispersion(computer, calc_doc, seed): """ Runs a dispersion interpolation on top of a completed SCF calculation, optionally running orbitals2bands and OptaDOS projected dispersion. Parameters: computer (:obj:`matador.compute.ComputeTask`): the object that will be calling CASTEP. calc_doc (dict): the structure to run on. seed (str): root filename of structure. """ LOG.info('Performing CASTEP spectral dispersion calculation...') disp_doc = copy.deepcopy(calc_doc) disp_doc['task'] = 'spectral' disp_doc['spectral_task'] = 'bandstructure' # disable checkpointing for BS/DOS by default, leaving just SCF disp_doc['write_checkpoint'] = 'none' disp_doc['pdos_calculate_weights'] = True disp_doc['write_cell_structure'] = True disp_doc['continuation'] = 'default' required = [] forbidden = ['spectral_kpoints_mp_spacing'] computer.validate_calc_doc(disp_doc, required, forbidden) success = computer.run_castep_singleshot(disp_doc, seed, keep=True, intermediate=True) if disp_doc.get('write_orbitals'): LOG.info('Planning to call orbitals2bands...') _cache_executable = copy.deepcopy(computer.executable) _cache_core = copy.deepcopy(computer.ncores) computer.ncores = 1 computer.executable = 'orbitals2bands' try: success = computer.run_generic(intermediate=True, mv_bad_on_failure=False) except Exception as exc: computer.executable = _cache_executable computer.ncores = _cache_core LOG.warning('Failed to call orbitals2bands, with error: {}'.format(exc)) computer.ncores = _cache_core computer.executable = _cache_executable return success
19,239
def return_limit(x): """Returns the standardized values of the series""" dizionario_limite = {'BENZENE': 5, 'NO2': 200, 'O3': 180, 'PM10': 50, 'PM2.5': 25} return dizionario_limite[x]
19,240
def npaths(x, y): """ Count paths recursively. Memoizing makes this efficient. """ if x>0 and y>0: return npaths(x-1, y) + npaths(x, y-1) if x>0: return npaths(x-1, y) if y>0: return npaths(x, y-1) return 1
19,241
def sqlify(obj): """ converts `obj` to its proper SQL version >>> sqlify(None) 'NULL' >>> sqlify(True) "'t'" >>> sqlify(3) '3' """ # because `1 == True and hash(1) == hash(True)` # we have to do this the hard way... if obj is None: return 'NULL' elif obj is True: return "'t'" elif obj is False: return "'f'" elif datetime and isinstance(obj, datetime.datetime): return repr(obj.isoformat()) else: return repr(obj)
19,242
def test_estimator_to_pfa_mixednb(dtypes): """Check that converted PFA is giving the same results as MixedNB""" X, y, types = _classification_task(dtypes=dtypes) is_nominal = [t == 'n' for t in dtypes] estimator = _mixednb(X, y, is_nominal=is_nominal, classes=['a', 'b', 'c']) pfa = sklearn_to_pfa(estimator, types) estimator_pred = estimator.predict(X) pfa_pred = _predict_pfa(X, types, pfa) assert all(estimator_pred == pfa_pred)
19,243
def create_results_dataframe( list_results, settings, result_classes=None, abbreviate_name=False, format_number=False, ): """ Returns a :class:`pandas.DataFrame`. If *result_classes* is a list of :class:`Result`, only the columns from this result classes will be returned. If ``None``, the columns from all results will be returned. """ list_series = [] for results in list_results: builder = SeriesBuilder(settings, abbreviate_name, format_number) for result in results: prefix = result.getname().lower() + " " if result_classes is None: # Include all results builder.add_entity(result, prefix) elif type(result) in result_classes: if len(result_classes) == 1: builder.add_entity(result) else: builder.add_entity(result, prefix) list_series.append(builder.build()) return pd.DataFrame(list_series)
19,244
def set_config_values(): """ used to set the config values """ sed('/home/ubuntu/learning/learning/settings-production.py', "\[SERVER_NAME\]", host_name, backup='') sed('/home/ubuntu/learning/learning/settings.py', "\[SERVER_NAME\]", host_name, backup='') sed('/home/ubuntu/learning/config.py', "\[SERVER_NAME\]", SERVER_NAME, backup='') sed('/home/ubuntu/learning/config.py', "\[DB_USER_NAME\]", DB_USER_NAME, backup='') sed('/home/ubuntu/learning/config.py', "\[DB_USER_PASS\]", DB_USER_PASS, backup='') sed('/home/ubuntu/learning/config.py', "\[DB_NAME\]", DB_NAME, backup='') sed('/home/ubuntu/learning/config.py', "\[DB_HOST\]", DB_HOST, backup='') sed('/home/ubuntu/learning/config.py', "\[DB_PORT\]", DB_PORT, backup='') sed('/home/ubuntu/learning/config.py', "\[PRODUCT_NAME\]", PRODUCT_NAME, backup='') sed('/home/ubuntu/learning/config.py', "\[DEFAULT_HOST\]", DEFAULT_HOST, backup='') sed('/home/ubuntu/learning/config.py', "\[DEFAULT_PORT\]", DEFAULT_PORT, backup='') sed('/home/ubuntu/learning/config.py', "\[MANDRILL_API_KEY\]", MANDRILL_API_KEY, backup='') sed('/home/ubuntu/learning/config.py', "\[EMAIL_HOST\]", EMAIL_HOST, backup='') sed('/home/ubuntu/learning/config.py', "\[EMAIL_HOST_USER\]", EMAIL_HOST_USER, backup='') sed('/home/ubuntu/learning/config.py', "\[EMAIL_HOST_PASSWORD\]", EMAIL_HOST_PASSWORD, backup='') sed('/home/ubuntu/learning/config.py', "\[EMAIL_PORT\]", EMAIL_PORT, backup='')
19,245
def get_first_model_each_manufacturer(cars=cars): """return a list of matching models (original ordering)""" first = [] for key,item in cars.items(): first.append(item[0]) return(first)
19,246
def white(*N, mean=0, std=1): """ White noise. :param N: Amount of samples. White noise has a constant power density. It's narrowband spectrum is therefore flat. The power in white noise will increase by a factor of two for each octave band, and therefore increases with 3 dB per octave. """ return std * np.random.randn(*N) + mean
19,247
def random_size_crop(src, size, min_area=0.25, ratio=(3.0/4.0, 4.0/3.0)): """Randomly crop src with size. Randomize area and aspect ratio""" h, w, _ = src.shape area = w*h for _ in range(10): new_area = random.uniform(min_area, 1.0) * area new_ratio = random.uniform(*ratio) new_w = int(new_area*new_ratio) new_h = int(new_area/new_ratio) if random.uniform(0., 1.) < 0.5: new_w, new_h = new_h, new_w if new_w > w or new_h > h: continue x0 = random.randint(0, w - new_w) y0 = random.randint(0, h - new_h) out = fixed_crop(src, x0, y0, new_w, new_h, size) return out, (x0, y0, new_w, new_h) return random_crop(src, size)
19,248
def analyze_subject(subject_id, A, B, spheres, interpolate, mask, data_dir=None): """ Parameters ---------- subject_id : int unique ID of the subject (index of the fMRI data in the input dataset) A : tuple tuple of (even_trials, odd_trials) for the first condition (A); even/odd_trials is the subject's mean fMRI data for that trial, and should be either a 3D niimg or path (string) to a NIfTI file B : tuple tuple of (even_trials, odd_trials) for the second condition (B); formatted the same as A spheres : list TODO interpolate : bool whether or not to skip every other sphere and interpolate the results; used to speed up the analysis mask : Niimg-like object boolean image giving location of voxels containing usable signals data_dir : string path to directory where MVPA results should be stored Returns ------- score_map_fpath : str path to NIfTI file with values indicating the significance of each voxel for condition A; same shape as the mask """ A_even, A_odd = A B_even, B_odd = B if all(isinstance(img, str) for img in [A_even, A_odd, B_even, B_odd]): A_even, A_odd = load_img(A_even), load_img(A_odd) B_even, B_odd = load_img(B_even), load_img(B_odd) A_even, A_odd = get_data(A_even), get_data(A_odd) B_even, B_odd = get_data(B_even), get_data(B_odd) _mask = get_data(mask) scores = np.zeros_like(_mask, dtype=np.float64) X, y = [], [] for (x0, y0, z0), sphere in tqdm(spheres): _A_even, _A_odd = A_even[sphere].flatten(), A_odd[sphere].flatten() _B_even, _B_odd = B_even[sphere].flatten(), B_odd[sphere].flatten() AA_sim = atanh(np.corrcoef(np.vstack((_A_even, _A_odd)))[0, 1]) BB_sim = atanh(np.corrcoef(np.vstack((_B_even, _B_odd)))[0, 1]) AB_sim = atanh(np.corrcoef(np.vstack((_A_even, _B_odd)))[0, 1]) BA_sim = atanh(np.corrcoef(np.vstack((_B_even, _A_odd)))[0, 1]) scores[x0][y0][z0] = AA_sim + BB_sim - AB_sim - BA_sim X.append(np.array([x0, y0, z0])) y.append(scores[x0][y0][z0]) if interpolate: interp = NearestNDInterpolator(np.vstack(X), y) for indices in np.transpose(np.nonzero(_mask)): x, y, z = indices if not scores[x][y][z]: scores[x][y][z] = interp(indices) score_map_fpath = score_map_filename(data_dir, subject_id) scores = new_img_like(mask, scores) scores.to_filename(score_map_fpath) return score_map_fpath
19,249
def joinAges(dataDict): """Merges columns by county, dropping ages""" popColumns = list(dataDict.values())[0].columns.tolist() popColumns = [re.sub("[^0-9]", "", column) for column in popColumns] dictOut = dict() for compartmentName, table in dataDict.items(): table.columns = popColumns dictOut[compartmentName] = table.sum(axis=1, level=0) return dictOut
19,250
def plot_graph_route(G, route, bbox=None, fig_height=6, fig_width=None, margin=0.02, bgcolor='w', axis_off=True, show=True, save=False, close=True, file_format='png', filename='temp', dpi=300, annotate=False, node_color='#999999', node_size=15, node_alpha=1, node_edgecolor='none', node_zorder=1, edge_color='#999999', edge_linewidth=1, edge_alpha=1, use_geom=True, origin_point=None, destination_point=None, route_color='r', route_linewidth=4, route_alpha=0.5, orig_dest_node_alpha=0.5, orig_dest_node_size=100, orig_dest_node_color='r', orig_dest_point_color='b'): """ Plot a route along a networkx spatial graph. Parameters ---------- G : networkx multidigraph route : list the route as a list of nodes bbox : tuple bounding box as north,south,east,west - if None will calculate from spatial extents of data. if passing a bbox, you probably also want to pass margin=0 to constrain it. fig_height : int matplotlib figure height in inches fig_width : int matplotlib figure width in inches margin : float relative margin around the figure axis_off : bool if True turn off the matplotlib axis bgcolor : string the background color of the figure and axis show : bool if True, show the figure save : bool if True, save the figure as an image file to disk close : bool close the figure (only if show equals False) to prevent display file_format : string the format of the file to save (e.g., 'jpg', 'png', 'svg') filename : string the name of the file if saving dpi : int the resolution of the image file if saving annotate : bool if True, annotate the nodes in the figure node_color : string the color of the nodes node_size : int the size of the nodes node_alpha : float the opacity of the nodes node_edgecolor : string the color of the node's marker's border node_zorder : int zorder to plot nodes, edges are always 2, so make node_zorder 1 to plot nodes beneath them or 3 to plot nodes atop them edge_color : string the color of the edges' lines edge_linewidth : float the width of the edges' lines edge_alpha : float the opacity of the edges' lines use_geom : bool if True, use the spatial geometry attribute of the edges to draw geographically accurate edges, rather than just lines straight from node to node origin_point : tuple optional, an origin (lat, lon) point to plot instead of the origin node destination_point : tuple optional, a destination (lat, lon) point to plot instead of the destination node route_color : string the color of the route route_linewidth : int the width of the route line route_alpha : float the opacity of the route line orig_dest_node_alpha : float the opacity of the origin and destination nodes orig_dest_node_size : int the size of the origin and destination nodes orig_dest_node_color : string the color of the origin and destination nodes orig_dest_point_color : string the color of the origin and destination points if being plotted instead of nodes Returns ------- fig, ax : tuple """ # plot the graph but not the route fig, ax = plot_graph(G, bbox=bbox, fig_height=fig_height, fig_width=fig_width, margin=margin, axis_off=axis_off, bgcolor=bgcolor, show=False, save=False, close=False, filename=filename, dpi=dpi, annotate=annotate, node_color=node_color, node_size=node_size, node_alpha=node_alpha, node_edgecolor=node_edgecolor, node_zorder=node_zorder, edge_color=edge_color, edge_linewidth=edge_linewidth, edge_alpha=edge_alpha, use_geom=use_geom) # the origin and destination nodes are the first and last nodes in the route origin_node = route[0] destination_node = route[-1] if origin_point is None or destination_point is None: # if caller didn't pass points, use the first and last node in route as # origin/destination origin_destination_lats = (G.nodes[origin_node]['y'], G.nodes[destination_node]['y']) origin_destination_lons = (G.nodes[origin_node]['x'], G.nodes[destination_node]['x']) else: # otherwise, use the passed points as origin/destination origin_destination_lats = (origin_point[0], destination_point[0]) origin_destination_lons = (origin_point[1], destination_point[1]) orig_dest_node_color = orig_dest_point_color # scatter the origin and destination points ax.scatter(origin_destination_lons, origin_destination_lats, s=orig_dest_node_size, c=orig_dest_node_color, alpha=orig_dest_node_alpha, edgecolor=node_edgecolor, zorder=4) # plot the route lines edge_nodes = list(zip(route[:-1], route[1:])) lines = [] for u, v in edge_nodes: # if there are parallel edges, select the shortest in length data = min(G.get_edge_data(u, v).values(), key=lambda x: x['length']) # if it has a geometry attribute (ie, a list of line segments) if 'geometry' in data and use_geom: # add them to the list of lines to plot xs, ys = data['geometry'].xy lines.append(list(zip(xs, ys))) else: # if it doesn't have a geometry attribute, the edge is a straight # line from node to node x1 = G.nodes[u]['x'] y1 = G.nodes[u]['y'] x2 = G.nodes[v]['x'] y2 = G.nodes[v]['y'] line = [(x1, y1), (x2, y2)] lines.append(line) # add the lines to the axis as a linecollection lc = LineCollection(lines, colors=route_color, linewidths=route_linewidth, alpha=route_alpha, zorder=3) ax.add_collection(lc) # save and show the figure as specified fig, ax = save_and_show(fig, ax, save, show, close, filename, file_format, dpi, axis_off) return fig, ax
19,251
def test_dwindle( size, row_limits, col_limits, distributions, weights, max_iter, best_prop, lucky_prop, crossover_prob, mutation_prob, shrinkage, maximise, ): """ Test that the default dwindling method does nothing. """ families = [edo.Family(dist) for dist in distributions] do = DataOptimiser( trivial_fitness, size, row_limits, col_limits, families, weights, max_iter, best_prop, lucky_prop, crossover_prob, mutation_prob, shrinkage, maximise, ) do.dwindle() assert do.mutation_prob == mutation_prob do.mutation_prob = "foo" do.dwindle() assert do.mutation_prob == "foo"
19,252
def random_polynomialvector( secpar: int, lp: LatticeParameters, distribution: str, dist_pars: Dict[str, int], num_coefs: int, bti: int, btd: int, const_time_flag: bool = True ) -> PolynomialVector: """ Generate a random PolynomialVector with bounded Polynomial entries. Essentially just instantiates a PolynomialVector object with a list of random Polynomial objects as entries, which are in turn generated by random_polynomial :param secpar: Input security parameter :type secpar: int :param lp: Lattice parameters :type lp: LatticeParameters :param distribution: String code describing which distribution to use :type distribution: str :param dist_pars: Distribution parameters :type dist_pars: dict :param num_coefs: Number of coefficients to generate :type num_coefs: int :param bti: Number of bits required to unbiasedly sample indices without replacement. :type bti: int :param btd: Number of bits required to unbiasedly sample an integer modulo the modulus in lp :type btd: int :param const_time_flag: Indicates whether arithmetic should be constant time. :type const_time_flag: bool :return: :rtype: PolynomialVector """ if secpar < 1: raise ValueError('Cannot random_polynomialvector without an integer security parameter.') elif distribution == UNIFORM_INFINITY_WEIGHT: return random_polynomial_vector_inf_wt_unif( secpar=secpar, lp=lp, dist_pars=dist_pars, num_coefs=num_coefs, bti=bti, btd=btd, const_time_flag=const_time_flag ) raise ValueError('Tried to random_polynomialvector with a distribution that is not supported.')
19,253
def parse_module(file_name, file_reader): """Parses a module, returning a module-level IR. Arguments: file_name: The name of the module's source file. file_reader: A callable that returns either: (file_contents, None) or (None, list_of_error_detail_strings) Returns: (ir, debug_info, errors), where ir is a module-level intermediate representation (IR), debug_info is a ModuleDebugInfo containing the tokenization, parse tree, and original source text of all modules, and errors is a list of tokenization or parse errors. If errors is not an empty list, ir will be None. Raises: FrontEndFailure: An error occurred while reading or parsing the module. str(error) will give a human-readable error message. """ source_code, errors = file_reader(file_name) if errors: location = parser_types.make_location((1, 1), (1, 1)) return None, None, [ [error.error(file_name, location, "Unable to read file.")] + [error.note(file_name, location, e) for e in errors] ] return parse_module_text(source_code, file_name)
19,254
def process_tocdelay(app, doctree): """ Collect all *tocdelay* in the environment. Look for the section or document which contain them. Put them into the variable *tocdelay_all_tocdelay* in the config. """ for node in doctree.traverse(tocdelay_node): node["tdprocessed"] += 1
19,255
def get_glare_value(gray): """ :param gray: cv2.imread(image_path) grayscale image gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) :return: numrical value between 0-256 which tells the glare value """ blur = cv2.blur(gray, (3, 3)) # With kernel size depending upon image size mean_blur = cv2.mean(blur) return mean_blur[0]
19,256
def pname(name): """Prints a name out with apple at the end""" try: res = print_name(name) click.echo(click.style(res, bg='blue', fg='white')) except TypeError: click.echo("Must pass in Name")
19,257
def print_result(result_list): """ # printing the result in console """ print("IP\t\t\tMAC ADDRESS\n..........................................................................") for client in result_list: print(client["ip"]+"\t\t"+client["mac"])
19,258
def makeCrops(image, stepSize, windowSize, true_center): """ """ image = image.type(torch.FloatTensor) crops = [] truths = [] c_x, c_y, orient = true_center # TODO: look into otdering, why it's y,x ! margin = 15 # --> is x, but is the column # to slide horizontally, y must come first for y in range(0, image.shape[0] - windowSize[0] + 1, stepSize): for x in range(0, image.shape[1] - windowSize[1] + 1, stepSize): end_x, end_y = x + windowSize[1], y + windowSize[0] hasRect = (x + margin < c_x < end_x - margin) and ( y + margin < c_y < end_y - margin ) truths.append(hasRect) crops.append(image[y:end_y, x:end_x]) crops = torch.stack(crops) print("shape of crops", crops.shape) return crops, truths
19,259
def generate_n_clusters(object_generator, n_clusters, n_objects_per_cluster, *, rng=None): """ Creates n_clusters of random objects """ rng = np.random.default_rng(rng) object_clusters = [] for i in range(n_clusters): cluster_objects = generate_random_object_cluster(n_objects_per_cluster, object_generator, max_cluster_trans=0.5, max_cluster_rot=np.pi / 16, rng=rng) object_clusters.append(cluster_objects) all_objects = [item for sublist in object_clusters for item in sublist] return all_objects, object_clusters
19,260
def move_process_data_to_store(repo_path: str, *, remote_operation: bool = False): """Move symlinks to hdf5 files from process directory to store directory In process writes never directly access files in the data directory. Instead, when the file is created is is symlinked to either the remote data or stage data directory. All access is handled through this intermediate symlink in order to prevent any ability to overwpackedeven if there are major errors in the hash records). Once the write operation packedor staging, or completion of fetch for remote), this method is called to move the symlinks from the write enabled directory to the (read only, fully-committed) storage dir. Parameters ---------- repo_path : str path to the repository on dir remote_operation : bool, optional If this operation is occuring from a remote fetch operation. (the default is False, which means that all changes will occur in the staging area) """ store_dir = pjoin(repo_path, c.DIR_DATA_STORE) if not remote_operation: process_dir = pjoin(repo_path, c.DIR_DATA_STAGE) else: process_dir = pjoin(repo_path, c.DIR_DATA_REMOTE) dirs_to_make, symlinks_to_make = [], [] for root, dirs, files in os.walk(process_dir): for d in dirs: dirs_to_make.append(os.path.relpath(pjoin(root, d), process_dir)) for f in files: store_file_pth = pjoin(store_dir, os.path.relpath(pjoin(root, f), process_dir)) link_file_pth = os.path.normpath(pjoin(root, os.readlink(pjoin(root, f)))) symlinks_to_make.append((link_file_pth, store_file_pth)) for d in dirs_to_make: dpth = pjoin(store_dir, d) if not os.path.isdir(dpth): os.makedirs(dpth) for src, dest in symlinks_to_make: symlink_rel(src, dest) # reset before releasing control. shutil.rmtree(process_dir) os.makedirs(process_dir)
19,261
async def run_server(port): """ The main entry point for the server. """ base_context = ConnectionContext() async def responder(conn, recv_channel): """ This task reads results from finished method handlers and sends them back to the client. """ async for request, result in recv_channel: if isinstance(result, JsonRpcException): await conn.respond_with_error(request, result.get_error()) else: await conn.respond_with_result(request, result) async def connection_handler(ws_request): """ Handle a new connection by completing the WebSocket handshake and then iterating over incoming messages. """ ws = await ws_request.accept() transport = WebSocketTransport(ws) rpc_conn = JsonRpcConnection(transport, JsonRpcConnectionType.SERVER) conn_context = copy(base_context) result_send, result_recv = trio.open_memory_channel(10) async with trio.open_nursery() as nursery: nursery.start_soon(responder, rpc_conn, result_recv) nursery.start_soon(rpc_conn._background_task) async with dispatch.connection_context(conn_context): async for request in rpc_conn.iter_requests(): nursery.start_soon(dispatch.handle_request, request, result_send) nursery.cancel_scope.cancel() logger.info("Listening on port %d (Type ctrl+c to exit) ", port) await trio_websocket.serve_websocket(connection_handler, "localhost", port, None)
19,262
def collect(val, collections, default_collections): """Adds keys to a collection. Args: val: The value to add per each key. collections: A collection of keys to add. default_collections: Used if collections is None. """ if collections is None: collections = default_collections for key in collections: ops.add_to_collection(key, val)
19,263
def disp2vel(wrange, velscale): """ Returns a log-rebinned wavelength dispersion with constant velocity. This code is an adaptation of pPXF's log_rebin routine, simplified to deal with the wavelength dispersion only. Parameters ---------- wrange: list, np.array or astropy.Quantity Input wavelength dispersion range with two elements. velscale: float or astropy.Quantity Desired output velocity scale. Units are assumed to be km/s unless specified as an astropy.Quantity. """ c = 299792.458 # Speed of light in km/s if isinstance(wrange, list): wrange = np.array(wrange) wunits = wrange.unit if hasattr(wrange, "unit") else 1 if hasattr(velscale, "unit"): velscale = velscale.to(u.km/u.s).value veldiff = np.log(np.max(wrange) / np.min(wrange)) * c n = veldiff / velscale m = int(n) dv = 0.5 * (n-m) * velscale v = np.arange(0, m * velscale, velscale) + dv w = wrange[0] * np.exp(v / c) return w * wunits
19,264
def relabel(labels): """ Remaps integer labels based on who is most frequent """ uni_labels, uni_inv, uni_counts = np.unique( labels, return_inverse=True, return_counts=True ) sort_inds = np.argsort(uni_counts)[::-1] new_labels = range(len(uni_labels)) uni_labels_sorted = uni_labels[sort_inds] relabel_map = dict(zip(uni_labels_sorted, new_labels)) new_labels = np.array(itemgetter(*labels)(relabel_map)) return new_labels
19,265
def before_call_example(f, *args, **kwargs): """before_call decorators must always accept f, *args, **kwargs""" print("This is function before_call_example")
19,266
def test_plot(): """Ensure that the plot_micostructures function is run during the tests. """ plot_microstructures(np.arange(4).reshape(2, 2), titles=["test"]) plot_microstructures(np.arange(4).reshape(2, 2), titles="test") plot_microstructures(np.arange(4).reshape(2, 2))
19,267
def precision_at_threshold( weighted_actual_names: List[Tuple[str, float, int]], candidates: np.ndarray, threshold: float, distances: bool = False, ) -> float: """ Return the precision at a threshold for the given weighted-actuals and candidates :param weighted_actual_names: list of [name, weight, ?] - weight and ? are ignored :param candidates: array of [name, score] :param threshold: threshold :param distances: if True, score must be <= threshold; if False, score must be >= threshold; defaults to False """ matches = _get_matches(candidates, threshold, distances) num_matches = len(matches) if num_matches == 0: return 1.0 return len(set(name for name, weight, _ in weighted_actual_names).intersection(matches)) / num_matches
19,268
def test_unpack_two_distinct_sets_zip(zip_file_contents: List[Path], test_output_dirs: OutputFolderForTests) -> None: """ Test that a zip file containing two distinct set of files in two folders, but possibly in a series of nesting folders, can be extracted into a folder containing only the files. :param zip_file_contents: List of relative file paths to create and test. :param test_output_dirs: Test output directories. """ all_zip_filenames = TEST_ZIP_FILENAMES_1 + TEST_ZIP_FILENAMES_2 _common_test_unpack_dicom_zip(zip_file_contents, all_zip_filenames, test_output_dirs)
19,269
def task_set(repo: Repo, task: Task, args: list, quiet, force): """ set parameters of task, multiple key=value pairs allowed""" try: d = dict(arg.split('=') for arg in args) except ValueError: raise click.BadArgumentUsage('Wrong format for key=value argument') task.upgrade_task() # bump version, fix issues task_config = task.config """ prevent changing template""" reserved_keys = ['kind', 'template', 'template_version'] if not force and any(k in reserved_keys for k in d.keys()): raise OCLIException(f"keys {reserved_keys} could not be changed!") _task_update_config(task, d, force) # template = task_config.get('template', None) # if template: # """check task created from template set by template """ # try: # tpl = import_module(template) # type: TaskTemplate # if not hasattr(tpl, 'task_set'): # raise ModuleNotFoundError(f"Template is invalid: task_set is not defined") # tpl.task_set(task, d) # except ModuleNotFoundError as e: # raise OCLIException(f'Could not import "{template}": {e} ') # for k in d.keys(): # v = d[k] # if k == 'tag': # # if v.startswith('+'): # v = v[1:] # t = task.config.get('tag') # vs = v.split(',') # v = [x for x in vs if x not in t] # task.set_config('tag', t + v) # # print(f"Add tag {v}") # elif v.startswith('-'): # v = v[1:] # t = task.config.get('tag') # vs = v.split(',') # v = [x for x in t if x not in vs] # task.set_config('tag', v) # else: # vs = v.split(',') # task.set_config('tag', vs) # # else: # task.set_config(k, v, only_existed=not force) # task.set_config('_rev', task.config.get('_rev', 0) + 1) try: task.save() output.success("task saved") except RuntimeError as e: raise click.UsageError(f'Could bot save task RC-file, reason: {e}"') if not quiet: click.get_current_context().invoke(task_info)
19,270
def alias(*alias): """Select a (list of) alias(es).""" valias = [t for t in alias] return {"alias": valias}
19,271
def eval_agent(sess, env, agent): """Evaluate the RL agent through multiple roll-outs. Args: * sess: TensorFlow session * env: environment * agent: RL agent """ reward_ave_list = [] for idx_rlout in range(FLAGS.nb_rlouts_eval): state = env.reset() rewards = np.zeros(FLAGS.rlout_len) tf.logging.info('initial state: {}'.format(state)) for idx_iter in range(FLAGS.rlout_len): action = sess.run(agent.actions_clean, feed_dict={agent.states: state}) state, reward = env.step(action) rewards[idx_iter] = reward tf.logging.info('terminal state: {}'.format(state)) tf.logging.info('roll-out #%d: reward (ave.): %.2e' % (idx_rlout, np.mean(rewards))) reward_ave_list += [np.mean(rewards)] tf.logging.info('[EVAL] reward (ave.): %.4e' % np.mean(np.array(reward_ave_list)))
19,272
def create_blueprint(request_manager): """ Creates an instance of the blueprint. """ blueprint = Blueprint('requests', __name__, url_prefix='/requests') # pylint: disable=unused-variable @blueprint.route('<request_id>/state') def get_state(request_id): """ Retrieves the state of the specified request. --- parameters: - name: request_id description: id of the request in: path type: string required: true definitions: RequestResponse: description: Object containing the URL of a requests state type: object properties: stateUrl: description: URL the requests state can be retrieved from type: string StateResponse: description: Object describing request state and result url type: object properties: done: description: whether the processing of the request is done type: boolean resultUrl: description: URL the requests result can be retrieved from type: string responses: 200: application/json: schema: $ref: '#/definitions/StateResponse' """ # TODO 404 on invalid request_id or no futures return jsonify({ 'done': request_manager.request_processed(request_id), 'resultUrl': url_for('requests.get_result', request_id=request_id, _external=USE_EXTERNAL_URLS) }) @blueprint.route('<request_id>/result') def get_result(request_id): """ Retrieves the result of the specified request. --- parameters: - name: request_id description: id of the request in: path type: string required: true responses: 200: application/json: schema: description: object defined by the type of request type: object """ if not request_manager.request_processed(request_id): log.info('request "%s" not done or result already retrieved', request_id) abort(404) result = request_manager.get_result(request_id) log.debug(result) if not result: return jsonify({}) return jsonify(result) return blueprint
19,273
def parse_smyle(file): """Parser for CESM2 Seasonal-to-Multiyear Large Ensemble (SMYLE)""" try: with xr.open_dataset(file, chunks={}, decode_times=False) as ds: file = pathlib.Path(file) parts = file.parts # Case case = parts[-6] # Extract the component from the file string component = parts[-5] # Extract the frequency frequency = parts[-2] date_regex = r'\d{10}-\d{10}|\d{8}-\d{8}|\d{6}-\d{6}|\d{4}-\d{4}' date_range = extract_attr_with_regex(parts[-1], date_regex) # Pull out the start and end time start_time, end_time = date_range.split('-') # Extract variable and stream y = parts[-1].split(date_range)[0].strip('.').split('.') variable = y[-1] stream = '.'.join(y[-3:-1]) # Extract init_year, init_month, member_id z = extract_attr_with_regex(case, r'\d{4}-\d{2}.\d{3}').split('.') inits = z[0].split('-') init_year = int(inits[0]) init_month = int(inits[1]) member_id = z[-1] x = case.split(z[0])[0].strip('.').split('.') experiment = x[-2] grid = x[-1] # Get the long name from dataset long_name = ds[variable].attrs.get('long_name') # Grab the units of the variable units = ds[variable].attrs.get('units') # Set the default of # of vertical levels to 1 vertical_levels = 1 try: vertical_levels = ds[ds.cf['vertical'].name].size except (KeyError, AttributeError, ValueError): pass # Use standard region names regions = { 'atm': 'global', 'ocn': 'global_ocean', 'lnd': 'global_land', 'ice': 'global', } spatial_domain = regions.get(component, 'global') return { 'component': component, 'case': case, 'experiment': experiment, 'variable': variable, 'long_name': long_name.lower(), 'frequency': frequency, 'stream': stream, 'member_id': member_id, 'init_year': init_year, 'init_month': init_month, 'vertical_levels': vertical_levels, 'units': units, 'spatial_domain': spatial_domain, 'grid': grid, 'start_time': parse_date(start_time), 'end_time': parse_date(end_time), 'path': str(file), } except Exception: return {INVALID_ASSET: file, TRACEBACK: traceback.format_exc()}
19,274
def get_shape(grid, major_ticks=False): """ Infer shape from grid Parameters ---------- grid : ndarray Minor grid nodes array major_ticks : bool, default False If true, infer shape of majr grid nodes Returns ------- shape : tuple Shape of grid ndarray """ shape = tuple(len(np.unique(g)) for g in grid.T) if major_ticks: shape = tuple(np.max(grid + 1, axis=0).astype(int)) return shape
19,275
def lorentz_force_derivative(t, X, qm, Efield, Bfield): """ Useful when using generic integration schemes, such as RK4, which can be compared to Boris-Bunemann """ v = X[3:] E = Efield(X) B = Bfield(X) # Newton-Lorentz acceleration a = qm*E + qm*np.cross(v,B) ydot = np.concatenate((v,a)) return ydot
19,276
def test_pytest_sessionfinish(mocked_session): """Test sessionfinish with the configured RP plugin. :param mocked_session: pytest fixture """ mocked_session.config.py_test_service = mock.Mock() mocked_session.config.option.rp_launch_id = None pytest_sessionfinish(mocked_session) assert mocked_session.config.py_test_service.finish_launch.called
19,277
def copy_rate(source, target, tokenize=False): """ Compute copy rate :param source: :param target: :return: """ if tokenize: source = toktok(source) target = toktok(target) source_set = set(source) target_set = set(target) if len(source_set) == 0 or len(target_set) == 0: return 0. return set_overlap(source_set, target_set)
19,278
def set_maya_transform_attrs(dson_node, mesh_set): """ Record where this transform came from. This makes it easier to figure out what nodes are in later auto-rigging. """ if not dson_node.maya_node: return assert dson_node.maya_node.exists(), dson_node maya_node = dson_node.maya_node if dson_node.is_top_node: pm.addAttr(maya_node, longName='dson_top_node', at=bool) pm.setAttr(maya_node.attr('dson_top_node'), True) conform_target = dson_node.get('conform_target') if conform_target is not None: conform_target = conform_target.load_url() # Save this conform in an attribute for reference. pm.addAttr(dson_node.maya_node, longName='following_figure', at='message', niceName='Following figure') conform_target.maya_node.attr('message').connect(dson_node.maya_node.attr('following_figure')) # Connect the main DSON transform to each of its meshes, to make them easier to find # in scripts later. if mesh_set is not None: pm.addAttr(dson_node.maya_node, longName='dson_meshes', at='message', niceName='DSON meshes') for loaded_mesh in mesh_set.meshes.values(): if loaded_mesh.maya_mesh is None: continue pm.addAttr(loaded_mesh.maya_mesh, longName='dson_transform', at='message', niceName='DSON transform') dson_node.maya_node.attr('dson_meshes').connect(loaded_mesh.maya_mesh.attr('dson_transform')) # Store the node type, to allow distinguishing nodes that represent eg. modifiers. pm.addAttr(maya_node, longName='dson_type', dt='string', niceName='DSON type') if dson_node.node_source == 'node': pm.setAttr(maya_node.attr('dson_type'), dson_node.get_value('type')) else: pm.setAttr(maya_node.attr('dson_type'), dson_node.node_source) # Store the node's parent's name. For modifiers, this won't be the same as the Maya # parent. pm.addAttr(maya_node, longName='dson_parent_name', dt='string', niceName='DSON parent name') if dson_node.parent and 'name' in dson_node.parent: maya_node.attr('dson_parent_name').set(dson_node.parent.get_value('name')) if dson_node.node_source == 'modifier': # For modifiers, store a direct connection to the parent. We don't do this for all # nodes since it doesn't seem useful and adds a ton of DG connections. pm.addAttr(maya_node, longName='dson_parent', at='message', niceName='DSON parent') if dson_node.parent and dson_node.parent.maya_node: dson_node.parent.maya_node.attr('message').connect(dson_node.maya_node.attr('dson_parent')) pm.addAttr(maya_node, longName='dson_url', dt='string', niceName='DSON URL') pm.setAttr(maya_node.attr('dson_url'), dson_node.url) if dson_node.asset: pm.addAttr(maya_node, longName='dson_asset_name', dt='string', niceName='DSON asset name') pm.setAttr(maya_node.attr('dson_asset_name'), dson_node.asset.get_value('name')) pm.addAttr(maya_node, longName='dson_asset_url', dt='string', niceName='DSON asset URL') pm.setAttr(maya_node.attr('dson_asset_url'), dson_node.asset_url)
19,279
def read_json_info(fname): """ Parse info from the video information file. Returns: Dictionary containing information on podcast episode. """ with open(fname) as fin: return json.load(fin)
19,280
def user_set(): """Sets user.""" global user_name global user_color user_name = name_input.get() user_color = color_input.get() window.destroy()
19,281
def check_subman_version(required_version): """ Verify that the command 'subscription-manager' isn't too old. """ status, _ = check_package_version('subscription-manager', required_version) return status
19,282
def compile_index_template(version_numbers): """Compiles the index""" template = JINJA_ENV.get_template('index.html.j2') rendered_template = template.render( version_numbers=version_numbers ) with open( os.path.join(VERSION_FILES_DIR, '..', 'index.html'), 'w+' ) as final_page: final_page.write(rendered_template)
19,283
def create_output_directory(input_directory): """Creates new directory and returns its path""" output_directory = '' increment = 0 done_creating_directory = False while not done_creating_directory: try: if input_directory.endswith('/'): output_directory = input_directory + 'converted' else: output_directory = input_directory + '/converted' if increment is not 0: output_directory += str(increment) os.makedirs(output_directory, exist_ok=False) done_creating_directory = True except FileExistsError: increment += 1 return output_directory
19,284
def test_list_unsigned_long_max_length_3_nistxml_sv_iv_list_unsigned_long_max_length_4_4(mode, save_output, output_format): """ Type list/unsignedLong is restricted by facet maxLength with value 8. """ assert_bindings( schema="nistData/list/unsignedLong/Schema+Instance/NISTSchema-SV-IV-list-unsignedLong-maxLength-4.xsd", instance="nistData/list/unsignedLong/Schema+Instance/NISTXML-SV-IV-list-unsignedLong-maxLength-4-4.xml", class_name="NistschemaSvIvListUnsignedLongMaxLength4", version="1.1", mode=mode, save_output=save_output, output_format=output_format, structure_style="filenames", )
19,285
def bigsegment_twocolor(rows, cols, seed=None): """ Form a map from intersecting line segments. """ if seed is not None: random.seed(seed) possible_nhseg = [3,5] possible_nvseg = [1,3,5] gap_probability = random.random() * 0.10 maxdim = max(rows, cols) nhseg = 0 nvseg = 0 while (nhseg == 0 and nvseg == 0) or (nhseg % 2 != 0 and nvseg == 0): nhseg = random.choice(possible_nhseg) nvseg = random.choice(possible_nvseg) jitterx = 15 jittery = 15 team1_pattern, team2_pattern = segment_pattern( rows, cols, seed, colormode="classic", nhseg=nhseg, nvseg=nvseg, jitterx=jitterx, jittery=jittery, gap_probability=gap_probability, ) pattern1_url = pattern2url(team1_pattern) pattern2_url = pattern2url(team2_pattern) return pattern1_url, pattern2_url
19,286
def first_order_smoothness_loss( image, flow, edge_weighting_fn): """Computes a first-order smoothness loss. Args: image: Image used for the edge-aware weighting [batch, height, width, 2]. flow: Flow field for with to compute the smoothness loss [batch, height, width, 2]. edge_weighting_fn: Function used for the edge-aware weighting. Returns: Average first-order smoothness loss. """ img_gx, img_gy = image_grads(image) weights_x = edge_weighting_fn(img_gx) weights_y = edge_weighting_fn(img_gy) # Compute second derivatives of the predicted smoothness. flow_gx, flow_gy = image_grads(flow) # Compute weighted smoothness return ((tf.reduce_mean(input_tensor=weights_x * robust_l1(flow_gx)) + tf.reduce_mean(input_tensor=weights_y * robust_l1(flow_gy))) / 2.)
19,287
def where_is_my_birthdate_in_powers_of_two(date: int) -> int: """ >>> where_is_my_birthdate_in_powers_of_two(160703) <BLANKLINE> Dans la suite des <BLANKLINE> 0 1 3 765 2 , 2 , 2 , …, 2 <BLANKLINE> Ta date de naissance apparaît ici!: <BLANKLINE> …5687200260623819378316070394980560315787 ^~~~~~ <BLANKLINE> À la position #88532 <BLANKLINE> 765 """ date = str(date) located = False sequence = "" sequence_index = 0 while not located: sequence_index += 1 sequence += str(2 ** sequence_index) found_at = sequence.find(date) if found_at != -1: print(f""" Dans la suite des 0 1 3 {sequence_index} 2 , 2 , 2 , …, 2 Ta date de naissance apparaît ici!: …{numbers_around(sequence, at=found_at, lookaround=20)} {20 * ' '}^{(len(date)-1) * '~'} À la position #{sequence.find(date)} """) return sequence_index
19,288
def make_df_health_all(datadir): """ Returns full dataframe from health data at specified location """ df_health_all = pd.read_csv(str(datadir) + '/health_data_all.csv') return df_health_all
19,289
def _test_op1(ufunc, almost=False, cmp_op=False, ktol=1.0): """ General framework for testing unary operators on Xrange arrays """ # print("testing function", ufunc) rg = np.random.default_rng(100) n_vec = 500 max_bin_exp = 20 # testing binary operation of reals extended arrays for dtype in [np.float64, np.float32]: # print("dtype", dtype) op1 = rg.random([n_vec], dtype=dtype) op1 *= 2.**rg.integers(low=-max_bin_exp, high=max_bin_exp, size=[n_vec]) expected = ufunc(op1) res = ufunc(Xrange_array(op1)) _matching(res, expected, almost, dtype, cmp_op, ktol) # Checking datatype assert res._mantissa.dtype == dtype # with non null shift array # culprit exp_shift_array = rg.integers(low=-max_bin_exp, high=max_bin_exp, size=[n_vec]) expected = ufunc(op1 * (2.**exp_shift_array).astype(dtype)) _matching(ufunc(Xrange_array(op1, exp_shift_array)), expected, almost, dtype, cmp_op, ktol) # test "scalar" _matching(ufunc(Xrange_array(op1, exp_shift_array)[0]), expected[0], almost, dtype, cmp_op, ktol) # print("c2") # testing binary operation of reals extended arrays for dtype in [np.float32, np.float64]: op1 = (rg.random([n_vec], dtype=dtype) + 1j*rg.random([n_vec], dtype=dtype)) op1 *= 2.**rg.integers(low=-max_bin_exp, high=max_bin_exp, size=[n_vec]) expected = ufunc(op1) res = ufunc(Xrange_array(op1)) _matching(res, expected, almost, dtype, cmp_op, ktol) # Checking datatype to_complex = {np.float32: np.complex64, np.float64: np.complex128} if ufunc in [np.abs]: assert res._mantissa.dtype == dtype else: assert res._mantissa.dtype == to_complex[dtype] # with non null shift array exp_shift_array = rg.integers(low=-max_bin_exp, high=max_bin_exp, size=[n_vec]) expected = ufunc(op1 * (2.**exp_shift_array)) _matching(ufunc(Xrange_array(op1, exp_shift_array)), expected, almost, dtype, cmp_op, ktol)
19,290
def run(): """This client pushes PE Files -> ELS Indexer.""" # Grab server args args = client_helper.grab_server_args() # Start up workbench connection workbench = zerorpc.Client(timeout=300, heartbeat=60) workbench.connect('tcp://'+args['server']+':'+args['port']) # Test out PEFile -> strings -> indexer -> search data_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),'../data/pe/bad') file_list = [os.path.join(data_path, child) for child in os.listdir(data_path)][:20] for filename in file_list: # Skip OS generated files if '.DS_Store' in filename: continue with open(filename, 'rb') as f: base_name = os.path.basename(filename) md5 = workbench.store_sample(f.read(), base_name, 'exe') # Index the strings and features output (notice we can ask for any worker output) # Also (super important) it all happens on the server side. workbench.index_worker_output('strings', md5, 'strings', None) print '\n<<< Strings for PE: %s Indexed>>>' % (base_name) workbench.index_worker_output('pe_features', md5, 'pe_features', None) print '<<< Features for PE: %s Indexed>>>' % (base_name) # Well we should execute some queries against ElasticSearch at this point but as of # version 1.2+ the dynamic scripting disabled by default, see # 'http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/modules-scripting.html#_enabling_dynamic_scripting # Now actually do something interesing with our ELS index # ES Facets are kewl (http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-facets.html) facet_query = '{"facets" : {"tag" : {"terms" : {"field" : "string_list"}}}}' results = workbench.search_index('strings', facet_query) try: print '\nQuery: %s' % facet_query print 'Number of hits: %d' % results['hits']['total'] print 'Max Score: %f' % results['hits']['max_score'] pprint.pprint(results['facets']) except TypeError: print 'Probably using a Stub Indexer, if you want an ELS Indexer see the readme' # Fuzzy is kewl (http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-fuzzy-query.html) fuzzy_query = '{"fields":["md5","sparse_features.imported_symbols"],' \ '"query": {"fuzzy" : {"sparse_features.imported_symbols" : "loadlibrary"}}}' results = workbench.search_index('pe_features', fuzzy_query) try: print '\nQuery: %s' % fuzzy_query print 'Number of hits: %d' % results['hits']['total'] print 'Max Score: %f' % results['hits']['max_score'] pprint.pprint([(hit['fields']['md5'], hit['fields']['sparse_features.imported_symbols']) for hit in results['hits']['hits']]) except TypeError: print 'Probably using a Stub Indexer, if you want an ELS Indexer see the readme'
19,291
def lothars_in_cv2image(image, lothars_encoders,fc): """ Given image open with opencv finds lothars in the photo and the corresponding name and encoding """ # init an empty list for selfie and corresponding name lothar_selfies=[] names=[] encodings=[] # rgb image rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) #convert image to Greyscale for HaarCascade gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # cycle angles to until cv find a faces found=False angles=rotation_angles(5) for angle in angles: r_gray=rotate_image(gray,angle) faces = fc.detectMultiScale(r_gray, scaleFactor=1.3, minNeighbors=6, minSize=(30, 40), flags=cv2.CASCADE_SCALE_IMAGE) # cycle all faces found for i,face in enumerate(faces): # define the face rectangle (x,y,w,h) = face height, width = image.shape[:2] extra_h=((1+2*extra)/ratio-1)/2 x=int(max(0,x-w*extra)) y=int(max(0,y-h*extra_h)) w=int(min(w+2*w*extra,width)) h=int(min(h+2*h*extra_h,height)) #print('w/h=',w/h) # rotate colored image rotated_image=rotate_image(image,angle) # Save just the rectangle faces in SubRecFaces (no idea of meaning of 255) #cv2.rectangle(rotated_image, (x,y), (x+w,y+h), (255,255,255)) sub_face = rotated_image[y:y+h, x:x+w] index, name, encoding = lothars_in_selfies([subface], lothars_encoders, [x,y,w,h], num_jitters=2,keep_searching=False) if (len(name)>0): lothar_selfies.append(sub_face) names.append(which_lothar_is) encodings.append(encoding) found=True # break angle changes if a lothar was found if (found): break return lothar_selfies, names, encodings
19,292
def approx_nth_prime_upper(n): """ approximate upper limit for the nth prime number. """ return ceil(1.2 * approx_nth_prime(n))
19,293
def wavelength_to_velocity(wavelengths, input_units, center_wavelength=None, center_wavelength_units=None, velocity_units='m/s', convention='optical'): """ Conventions defined here: http://www.gb.nrao.edu/~fghigo/gbtdoc/doppler.html * Radio V = c (c/l0 - c/l)/(c/l0) f(V) = (c/l0) ( 1 - V/c ) * Optical V = c ((c/l0) - f)/f f(V) = (c/l0) ( 1 + V/c )^-1 * Redshift z = ((c/l0) - f)/f f(V) = (c/l0) ( 1 + z )-1 * Relativistic V = c ((c/l0)^2 - f^2)/((c/l0)^2 + f^2) f(V) = (c/l0) { 1 - (V/c)2}1/2/(1+V/c) """ if input_units in velocity_dict: print "Already in velocity units (%s)" % input_units return wavelengths if center_wavelength is None: raise ValueError("Cannot convert wavelength to velocity without specifying a central wavelength.") if center_wavelength_units not in wavelength_dict: raise ValueError("Bad wavelength units: %s" % (center_wavelength_units)) if velocity_units not in velocity_dict: raise ValueError("Bad velocity units: %s" % (velocity_units)) wavelength_m = wavelengths / wavelength_dict['meters'] * wavelength_dict[input_units] center_wavelength_m = center_wavelength / wavelength_dict['meters'] * wavelength_dict[center_wavelength_units] frequency_hz = speedoflight_ms / wavelength_m center_frequency_hz = speedoflight_ms / center_wavelength_m # the order is very ugly because otherwise, if scalar, the spectroscopic axis attributes won't be inherited if convention == 'radio': velocity = ( frequency_hz - center_frequency_hz ) / center_frequency_hz * speedoflight_ms * -1 elif convention == 'optical': velocity = ( frequency_hz - center_frequency_hz ) / frequency_hz * speedoflight_ms * -1 elif convention == 'relativistic': velocity = ( frequency_hz**2 - center_frequency_hz**2 ) / ( center_frequency_hz**2 + frequency_hz )**2 * speedoflight_ms * -1 else: raise ValueError('Convention "%s" is not allowed.' % (convention)) velocities = velocity * velocity_dict['m/s'] / velocity_dict[velocity_units] return velocities
19,294
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Set up the template sensors.""" await async_setup_reload_service(hass, DOMAIN, PLATFORMS) async_add_entities(await _async_create_entities(hass, config))
19,295
def gen_api_json(api): """Apply the api literal object to the template.""" api = json.dumps( api, cls=Encoder, sort_keys=True, indent=1, separators=(',', ': ') ) return TEMPLATE_API_DEFINITION % (api)
19,296
def get_massage(): """ Provide extra data massage to solve HTML problems in BeautifulSoup """ # Javascript code in ths page generates HTML markup # that isn't parsed correctly by BeautifulSoup. # To avoid this problem, all document.write fragments are removed my_massage = copy(BeautifulSoup.MARKUP_MASSAGE) my_massage.append((re.compile(u"document.write(.+);"), lambda match: "")) my_massage.append((re.compile(u'alt=".+">'), lambda match: ">")) return my_massage
19,297
def generate_test_images(): """Generate all test images. Returns ------- results: dict A dictionary mapping test case name to xarray images. """ results = {} for antialias, aa_descriptor in antialias_options: for canvas, canvas_descriptor in canvas_options: for func in (generate_test_001, generate_test_002, generate_test_003, generate_test_004, generate_test_005, generate_test_007, ): points, name = func() aggregators = draw_lines(canvas, points, antialias) img = shade(aggregators, cmap=cmap01) description = "{}_{}_{}".format( name, aa_descriptor, canvas_descriptor) results[description] = img for func in (generate_test_006, ): points, name = func() aggregator = draw_multi_segment_line(canvas, points, antialias) img = shade(aggregator, cmap=cmap01) description = "{}_{}_{}".format( name, aa_descriptor, canvas_descriptor) results[description] = img return results
19,298
def registered_metrics() -> Dict[Text, Type[Metric]]: """Returns standard TFMA metrics.""" return copy.copy(_METRIC_OBJECTS)
19,299