content
stringlengths
22
815k
id
int64
0
4.91M
def get_lif_list(path): """ Returns a list of files ending in *.lif in provided folder :param: path :return: list -- filenames """ path += '/*.lif' return glob.glob(path)
25,100
def parse_args(): """ Parse input arguments """ parser = argparse.ArgumentParser(description='Single image depth estimation') parser.add_argument('--dataset', dest='dataset', help='training dataset', default='custom', type=str) parser.add_argument('--epochs', dest='max_epochs', help='number of epochs to train', default=NUM_EPOCHS, type=int) parser.add_argument('--cuda', dest='cuda', help='whether use CUDA', action='store_true') parser.add_argument('--bs', dest='bs', help='batch_size', default=16, type=int) parser.add_argument('--num_workers', dest='num_workers', help='num_workers', default=1, type=int) parser.add_argument('--disp_interval', dest='disp_interval', help='display interval', default=10, type=int) parser.add_argument('--output_dir', dest='output_dir', help='output directory', default='saved_models', type=str) # config optimization parser.add_argument('--o', dest='optimizer', help='training optimizer', default="sgd", type=str) parser.add_argument('--lr', dest='lr', help='starting learning rate', default=1e-3, type=float) parser.add_argument('--lr_decay_step', dest='lr_decay_step', help='step to do learning rate decay, unit is epoch', default=5, type=int) parser.add_argument('--lr_decay_gamma', dest='lr_decay_gamma', help='learning rate decay ratio', default=0.1, type=float) # set training session parser.add_argument('--s', dest='session', help='training session', default=1, type=int) parser.add_argument('--eval_epoch', dest='eval_epoch', help='number of epoch to evaluate', default=2, type=int) # resume trained model parser.add_argument('--r', dest='resume', help='resume checkpoint or not', default=False, type=bool) parser.add_argument('--start_at', dest='start_epoch', help='epoch to start with', default=0, type=int) parser.add_argument('--checksession', dest='checksession', help='checksession to load model', default=1, type=int) parser.add_argument('--checkepoch', dest='checkepoch', help='checkepoch to load model', default=1, type=int) parser.add_argument('--checkpoint', dest='checkpoint', help='checkpoint to load model', default=0, type=int) # training parameters parser.add_argument('--gamma_sup', dest='gamma_sup', help='factor of supervised loss', default=1., type=float) parser.add_argument('--gamma_unsup', dest='gamma_unsup', help='factor of unsupervised loss', default=1., type=float) parser.add_argument('--gamma_reg', dest='gamma_reg', help='factor of regularization loss', default=10., type=float) args = parser.parse_args() return args
25,101
def plot_rhodelta_rho(rho, delta): """ Plot scatter diagram for rho*delta_rho points Args: rho : rho list delta : delta list """ logger.info("PLOT: rho*delta_rho plot") y = rho * delta r_index = np.argsort(-y) x = np.zeros(y.shape[0]) idx = 0 for r in r_index: x[r] = idx idx += 1 plt.figure(2) plt.clf() plt.scatter(x, y) plt.xlabel('sorted rho') plt.ylabel('rho*delta') plt.title("Decision Graph RhoDelta-Rho") plt.show() plt.savefig('Decision Graph RhoDelta-Rho.jpg')
25,102
def test_arguments_in_init(mocker): """Test whether arguments are set up correctly while initializing a HermesApp object.""" app = HermesApp( "Test arguments in init", mqtt_client=mocker.MagicMock(), host="rhasspy.home", port=8883, tls=True, username="rhasspy-hermes-app", password="test", ) assert app.args.host == "rhasspy.home" assert app.args.port == 8883 assert app.args.tls == True assert app.args.username == "rhasspy-hermes-app" assert app.args.password == "test"
25,103
def speaker_data_to_csvs(corpus_context, data): """ Convert speaker data into a CSV file Parameters ---------- corpus_context: :class:`~polyglotdb.corpus.CorpusContext` the corpus object data : :class:`~polyglotdb.io.helper.DiscourseData` Data to load into a graph """ directory = corpus_context.config.temporary_directory('csv') with open(os.path.join(directory, 'speaker_import.csv'), 'w') as f: header = ['name'] + sorted(next(iter(data.values())).keys()) writer = csv.DictWriter(f, header, delimiter=',') writer.writeheader() for k, v in sorted(data.items()): v['name'] = k writer.writerow(v)
25,104
def compute_distance_matrix(users, basestations): """Distances between all users and basestations is calculated. Args: users: (obj) list of users! basestations: (obj) list of basestations! Returns: (list of) numpy arrays containing the distance between a user and all basestations in km!. """ coords_list_ue = [getattr(ele, 'coordinates') for ele in users] coords_list_bs = [getattr(ele, 'coordinates') for ele in basestations] distance_matrix = [] count = 0 for _ in coords_list_ue: element = [coords_list_ue[count]] coords = element + coords_list_bs dist = distance.cdist(coords, coords, 'euclidean') new_dist = np.delete(dist[0], 0) distance_matrix.append(new_dist) count += 1 return np.array(distance_matrix)
25,105
def __getattr__(name): """Lazy load the global resolver to avoid circular dependencies with plugins.""" if name in _SPECIAL_ATTRS: from .core import resolver res = resolver.Resolver() res.load_plugins_from_environment() _set_default_resolver(res) return globals()[name] else: raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
25,106
def img_newt(N, xran=(-3, 3), yran=(-3, 3), tol=1e-5, niter=100): """ Add colors to a matrix according to the fixed point of the given equation. """ sol = [-(np.sqrt(3.0)*1j - 1.0)/2.0, (np.sqrt(3.0)*1j + 1.0)/2.0, -1.0] col_newt = np.zeros((N, N, 3)) Y, X = np.mgrid[yran[0]:yran[1]:N*1j, xran[0]:xran[1]:N*1j] for row in range(N): for col in range(N): x = X[row, col] y = Y[row, col] xf = newt(x + y*1j, fun, der, tol=tol, niter=niter) if abs(xf - sol[0])<1e-6: col_newt[row, col, :] = colors[0] if abs(xf - sol[1])<1e-6: col_newt[row, col, :] = colors[1] if abs(xf - sol[2])<1e-6: col_newt[row, col, :] = colors[2] if abs(xf - 1000) < 1e-6: col_newt[row, col, :] = colors[3] return col_newt
25,107
def _ensure_no_unsupported_suppressions( schema_ast: DocumentNode, renamings: RenamingMapping ) -> None: """Confirm renamings contains no enums, interfaces, or interface implementation suppressions.""" visitor = SuppressionNotImplementedVisitor(renamings) visit(schema_ast, visitor) if ( not visitor.unsupported_enum_suppressions and not visitor.unsupported_interface_suppressions and not visitor.unsupported_interface_implementation_suppressions ): return # Otherwise, attempted to suppress something we shouldn't suppress. error_message_components = [ f"Type renamings {renamings} attempted to suppress parts of the schema for which " f"suppression is not implemented yet." ] if visitor.unsupported_enum_suppressions: error_message_components.append( f"Type renamings mapped these schema enums to None: " f"{visitor.unsupported_enum_suppressions}, attempting to suppress them. However, " f"schema renaming has not implemented enum suppression yet." ) if visitor.unsupported_interface_suppressions: error_message_components.append( f"Type renamings mapped these schema interfaces to None: " f"{visitor.unsupported_interface_suppressions}, attempting to suppress them. However, " f"schema renaming has not implemented interface suppression yet." ) if visitor.unsupported_interface_implementation_suppressions: error_message_components.append( f"Type renamings mapped these object types to None: " f"{visitor.unsupported_interface_implementation_suppressions}, attempting to suppress " f"them. Normally, this would be fine. However, these types each implement at least one " f"interface and schema renaming has not implemented this particular suppression yet." ) error_message_components.append( "To avoid these suppressions, remove the mappings from the renamings argument." ) raise NotImplementedError("\n".join(error_message_components))
25,108
def pack_block_header(hdr: block.BlockHeader, abbrev: bool = False, pretty: bool = False, ) -> str: """Pack blockchain to JSON string with b64 for bytes.""" f = get_b2s(abbrev) hdr_ = {'timestamp': f(hdr['timestamp']), 'previous_hash': f(hdr['previous_hash']), 'nonce': f(hdr['nonce']), 'merkle_root': f(hdr['merkle_root']), 'this_hash': f(hdr['this_hash']) } return json_dumps(hdr_, pretty)
25,109
def get_datasets(recipe): """Get dataset instances from the recipe. Parameters ---------- recipe : dict of dict The specifications of the core datasets. Returns ------- datasets : dict of datasets A dictionary of dataset instances, compatible with torch's DataLoader objects. """ # "datasets" return {dataset: get_instance(**par) for dataset, par in recipe.items()}
25,110
def is_sedol(value): """Checks whether a string is a valid SEDOL identifier. Regex from here: https://en.wikipedia.org/wiki/SEDOL :param value: A string to evaluate. :returns: True if string is in the form of a valid SEDOL identifier.""" return re.match(r'^[0-9BCDFGHJKLMNPQRSTVWXYZ]{6}\d$', value)
25,111
def p_listof_literal_terms(t): """ listof-terms : LPAREN literal-term-list RPAREN | LPAREN RPAREN """ if len(t) == 4: t[0] = t[2] else: t[0] = []
25,112
def create_substrate(dim): """ The function to create two-sheets substrate configuration with specified dimensions of each sheet. Arguments: dim: The dimensions accross X, Y axis of the sheet """ # Building sheet configurations of inputs and outputs inputs = create_sheet_space(-1, 1, dim, -1) outputs = create_sheet_space(-1, 1, dim, 0) substrate = NEAT.Substrate( inputs, [], # hidden outputs) substrate.m_allow_input_output_links = True substrate.m_allow_input_hidden_links = False substrate.m_allow_hidden_hidden_links = False substrate.m_allow_hidden_output_links = False substrate.m_allow_output_hidden_links = False substrate.m_allow_output_output_links = False substrate.m_allow_looped_hidden_links = False substrate.m_allow_looped_output_links = False substrate.m_hidden_nodes_activation = NEAT.ActivationFunction.SIGNED_SIGMOID substrate.m_output_nodes_activation = NEAT.ActivationFunction.UNSIGNED_SIGMOID substrate.m_with_distance = True substrate.m_max_weight_and_bias = 3.0 return substrate
25,113
def EventObjectGenerator(plaso_storage, quiet=False): """Yields EventObject objects. Yields event_objects out of a StorageFile object. If the 'quiet' argument is not present, it also outputs 50 '.'s indicating progress. Args: plaso_storage: a storage.StorageFile object. quiet: boolean value indicating whether to suppress progress output. Yields: EventObject objects. """ total_events = plaso_storage.GetNumberOfEvents() if total_events > 0: events_per_dot = operator.floordiv(total_events, 80) counter = 0 else: quiet = True event_object = plaso_storage.GetSortedEntry() while event_object: if not quiet: counter += 1 if counter % events_per_dot == 0: sys.stdout.write(u'.') sys.stdout.flush() yield event_object event_object = plaso_storage.GetSortedEntry()
25,114
def main(): """Place holder function for running this module as cmd line program.""" print("null tpn processing.")
25,115
def remove_artefacts(signal: np.array, low_limit: int = 40, high_limit: int = 210) -> np.array: """ Replace artefacts [ultra-low and ultra-high values] with zero Args: signal: (np.array) 1D signal low_limit: (int) filter values below it high_limit: (int) filter values above it Output: (np.array) filtered signal """ # replace artefacts with zero signal_new = signal.astype('float') signal_new[signal < low_limit] = 0 #replace ultra-zmall values with 0 signal_new[signal > high_limit] = 0 #replace ultra-large values with 0 return signal_new
25,116
def normalizedBGR(im, display=True): """ Generate Opponent color space. O3 is just the intensity """ im = img.norm(im) B, G, R = np.dsplit(im, 3) b = (B - np.mean(B)) / np.std(B) g = (G - np.mean(G)) / np.std(G) r = (R - np.mean(R)) / np.std(R) out = cv2.merge((np.uint8(img.normUnity(b) * 255), np.uint8(img.normUnity(g) * 255), np.uint8(img.normUnity(r) * 255))) if display: cv2.imshow('norm bgr', np.hstack((np.uint8(img.normUnity(b) * 255), np.uint8(img.normUnity(g) * 255), np.uint8(img.normUnity(r) * 255)))) cv2.waitKey(0) return out, b, g, r
25,117
def generate_training_pages(): """ Responsible for generating the markdown pages of the training pages """ data = {} # Side navigation for training data['menu'] = resources_config.training_navigation # Training Overview training_md = resources_config.training_md + json.dumps(data) # write markdown to file with open(os.path.join(site_config.resources_markdown_path, "training.md"), "w", encoding='utf8') as md_file: md_file.write(training_md) # CTI training training_cti_md = resources_config.training_cti_md + json.dumps(data) # write markdown to file with open(os.path.join(site_config.resources_markdown_path, "training_cti.md"), "w", encoding='utf8') as md_file: md_file.write(training_cti_md)
25,118
def feeds(url): """ Tries to find feeds for a given URL. """ url = _full_url(url) data = _get(url) # Check if the url is a feed. if _is_feed(url): return [url] # Try to get feed links from markup. try: feed_links = [link for link in _get_feed_links(data, url) if _is_feed(link)] except: feed_links = [] if feed_links: return feed_links # Try 'a' links. try: links = _get_a_links(data) except: links = [] if links: # Filter to only local links. local_links = [link for link in links if link.startswith(url)] # Try to find feed links. feed_links.extend(_filter_feed_links(local_links)) # If still nothing has been found... if not feed_links: # Try to find feed-looking links. feed_links.extend(_filter_feedish_links(local_links)) # If still nothing has been found... if not feed_links: # BRUTE FORCE IT! guesses = [ 'atom.xml', # Blogger, TypePad 'index.atom', # MoveableType 'index.rdf', # MoveableType 'rss.xml', # Dave Winer/Manila 'index.xml', # MoveableType 'index.rss', # Slash 'feed' # WordPress ] tries = [parse.urljoin(url, g) for g in guesses] feed_links.extend([link for link in tries if _is_feed(link)]) # If *still* nothing has been found, # just try all the links. if links and not feed_links: feed_links.extend(_filter_feed_links(links)) feed_links.extend(_filter_feedish_links(links)) # Filter out duplicates. return list(set(feed_links))
25,119
def getlog(name): """Create logger object with predefined stream handler & formatting Parameters ---------- name : str module __name__ Returns ------- logging.logger Examples -------- >>> from smseventlog import getlog >>> log = getlog(__name__) """ name = '.'.join(str(name).split('.')[1:]) # cant set name to nothing or that calls the ROOT logger if name == '': name = 'base' return Logger(name)
25,120
def hard_prune(args, ADMM, model): """ hard_pruning, or direct masking Args: model: contains weight tensors in cuda """ print ("hard pruning") for (name, W) in model.named_parameters(): if name not in ADMM.prune_ratios: # ignore layers that do not have rho continue _, cuda_pruned_weights = weight_pruning( args, W, ADMM.prune_ratios[name]) # get sparse model in cuda W.data = cuda_pruned_weights # replace the data field in variable
25,121
def read_new_probe_design(path: str, reference_type: str = 'genome') -> pd.DataFrame: """ Read amplimap probes.csv file and return pandas dataframe. """ try: design = pd.read_csv(path) log.info('Read probe design table from %s -- found %d probes', path, len(design)) if list(design.columns) == mipgen_columns: # NB: smmip data seems to be in F2R1 orientation (second read = fwd in genomic coordinates) for fwd probes # but F1R2 orientation (second read = rev) for rev probes. # cs-tag data seems to be in F1R2 orientation for fwd targets. unclear for rev targets, but presumably F2R1? # in other words, CS-tag is in gene orientation, while smMIP is in opposite # so both are swapped for MIPs. # is this why sequences in probes.csv are currently so confusing? log.info('Detected old MIPGEN format, converting...') # read the probes file again in old mipgen format and convert design = read_and_convert_mipgen_probes(path) design = process_probe_design(design, reference_type) except Exception as e: raise AmplimapReaderException(e, filename = path, should_have_header = True).with_traceback(sys.exc_info()[2]) return design
25,122
def replace_nan(x): """ Replaces NaNs in 1D array with nearest finite value. Usage: y = replace_nan(x) Returns filled array y without altering input array x. Assumes input is numpy array. 3/2015 BWB """ import numpy as np # x2 = np.zeros(len(x)) np.copyto(x2,x) # bads = find(np.isnan(x)) # indices of NaNs if bads.size == 0: return x2 else: fins = find(np.isfinite(x)) # indices for all finites for ii in np.arange(0,bads.size): # for all NaNs # locate index of nearest finite diffs = np.abs(fins-bads[ii]) idx = diffs.argmin() # replace NaN with nearest finite x2[bads[ii]] = x[fins[idx]] return x2
25,123
def import_worksheet_data(model_type): """A function called from Excel to import data by ID from Firestore into the Excel workbook. Args: model_type (str): The data model at hand. """ # Initialize the workbook. book = xlwings.Book.caller() worksheet = book.sheets.active config_sheet = book.sheets['cannlytics.conf'] config = get_data_block(config_sheet, 'A1', expand='table') show_status_message( worksheet, coords=config['status_cell'], message='Importing %s data...' % model_type, background=config['success_color'], ) # Read the IDs. id_cell = increment_row(config['table_cell']) ids = worksheet.range(id_cell).options(expand='down', ndim=1).value # Get your Cannlytics API key from your .env file, location specified # by env_path on the cannlytics.config sheet. load_dotenv(config['env_path']) api_key = os.getenv('CANNLYTICS_API_KEY') # Get the worksheet columns. columns = worksheet.range(config['table_cell']).options(expand='right', ndim=1).value columns = [snake_case(x) for x in columns] # Get data using model type and ID through the API. base = config['api_url'] org_id = worksheet.range(config['org_id_cell']).value headers = { 'Authorization': 'Bearer %s' % api_key, 'Content-type': 'application/json', } if len(ids) == 1: url = f'{base}/{model_type}/{ids[0]}?organization_id={org_id}' else: url = f'{base}/{model_type}?organization_id={org_id}&items={str(ids)}' response = requests.get(url, headers=headers) if response.status_code != 200: show_status_message( worksheet, coords=config['status_cell'], message='Error importing data.', background=config['error_color'] ) return # Format the values. items = [] data = response.json()['data'] if not data: show_status_message( worksheet, coords=config['status_cell'], message='No data found.', background=config['error_color'] ) return try: for item in data: values = [] for column in columns: values.append(item.get(column)) items.append(values) except AttributeError: values = [] for column in columns: values.append(data.get(column)) items = [values] # Insert all rows at the same time. worksheet.range(id_cell).value = items # Show success status message. show_status_message( worksheet, coords=config['status_cell'], message='Imported %i %s.' % (len(ids), model_type), )
25,124
def handson_table(request, query_sets, fields): """function to render the scoresheets as part of the template""" return excel.make_response_from_query_sets(query_sets, fields, 'handsontable.html') # content = excel.pe.save_as(source=query_sets, # dest_file_type='handsontable.html', # dest_embed=True) # content.seek(0) # return render( # request, # 'custom-handson-table.html', # { # 'handsontable_content': content.read() # }) # return Response({'handsontable_content': render(content)}, template_name='custom-handson-table.html')
25,125
def is_periodic(G): """ https://stackoverflow.com/questions/54030163/periodic-and-aperiodic-directed-graphs Own function to test, whether a given Graph is aperiodic: """ if not nx.is_strongly_connected(G): print("G is not strongly connected, periodicity not defined.") return False cycles = list(nx.algorithms.cycles.simple_cycles(G)) cycles_sizes = [len(c) for c in cycles] # Find all cycle sizes cycles_gcd = reduce(gcd, cycles_sizes) # Find greatest common divisor of all cycle sizes is_periodic = cycles_gcd > 1 return is_periodic
25,126
def Sphere(individual): """Sphere test objective function. F(x) = sum_{i=1}^d xi^2 d=1,2,3,... Range: [-100,100] Minima: 0 """ #print(individual) return sum(x**2 for x in individual)
25,127
def find_names_for_google(df_birth_names): """ :param df_birth_names: 所有的birth data from the data given by Lu :return 1: df_country_found, 返回一个dataframe 里面有国家了 先通过country list过滤,有些国家可能有问题(如有好几个名字的(e.g. 荷兰),有些含有特殊符号,如刚果布,刚果金,朝鲜,南朝鲜北朝鲜是三个“国家”, 再比如说南奥塞梯,一些太平洋岛国归属有问题,还就是香港台湾这样的。。。。暂时算是国家) 而后看看是不是美国的一个州。 而后看城市,city在city list里面,citylist 参考 worldcities 数据库。 城市重名了就取人口多的那个城市。如Valencia :return 2: df_need_google_search, 返回一个dataframe 里面都是不在“国家列表”里面的,也不是美国的州,并且“worldcities database”里面找不到的 """ whitelist = set('abcdefghijklmnopqrstuvwxyz ABCDEFGHIJKLMNOPQRSTUVWXYZ') # teststr = " happy t00o go 129.129$%^&*(" # answer = ''.join(filter(whitelist.__contains__, teststr)) dirty_list = [] need_searching_list = [] for index, row in df_birth_names.head(30).iterrows(): item = ''.join(filter(whitelist.__contains__, row['birth'])).strip() # item = row['birth'].replace("'","").strip() if item is "":# null dirty_list.append(np.nan) print(item, " is null") continue if item in COUNTRY_LIST: # known countries dirty_list.append(item) print(item, " is a country") continue if item in US_STATES_LIST: # add us states as United States dirty_list.append("United States") print(item, " is a state in the US") continue if item in NATIONALITY_LIST: # add national from nationality information e.g. Chinese -> China nation_from_nationality = NATIONALITY_TO_COUNTRY.loc[item]["Country/entity name"] dirty_list.append(nation_from_nationality) print(item, " is a national of a certain country") continue if item in CITY_LIST: # known city to country e.g. London -> UK country_from_city = CITY_TO_COUNTRY.loc[item]["country"] dirty_list.append(country_from_city) print(item, " is a city and it has been transformed") continue flag1=0 # known city to country e.g. London -> UK for i in COUNTRY_LIST: if i in item: dirty_list.append(i) print(i, " maybe a country") flag1 = 1 break if flag1 == 1: continue flag2 = 0 for i in US_STATES_LIST: if i in item: dirty_list.append("United States") print(i, "maybe a state in the US") flag2 = 1 break if flag2 == 1: continue flag3 = 0 for i in CITY_LIST: if i in item: country_from_city = CITY_TO_COUNTRY.loc[i]["country"] dirty_list.append(country_from_city) print(i, " maybe a city, and we are attempting to transform it") flag3 = 1 break if flag3 == 1: continue need_searching_list.append(item) print("this item: ", item, " is not added") need_searching_list = list(dict.fromkeys(need_searching_list))# remove duplicates df_country_found = pd.DataFrame(dirty_list) df_need_google_search = pd.DataFrame(need_searching_list) return df_country_found, df_need_google_search
25,128
def check_type( value: Any, expected_type: Any, *, argname: str = "value", memo: Optional[TypeCheckMemo] = None, ) -> None: """ Ensure that ``value`` matches ``expected_type``. The types from the :mod:`typing` module do not support :func:`isinstance` or :func:`issubclass` so a number of type specific checks are required. This function knows which checker to call for which type. :param value: value to be checked against ``expected_type`` :param expected_type: a class or generic type instance :param argname: name of the argument to check; used for error messages :raises TypeCheckError: if there is a type mismatch """ if expected_type is Any or isinstance(value, Mock): return if memo is None: frame = sys._getframe(1) memo = TypeCheckMemo(frame.f_globals, frame.f_locals) try: check_type_internal(value, expected_type, memo) except TypeCheckError as exc: exc.append_path_element(argname) if memo.config.typecheck_fail_callback: memo.config.typecheck_fail_callback(exc, memo) else: raise
25,129
def make_address_mask(universe, sub=0, net=0, is_simplified=True): """Returns the address bytes for a given universe, subnet and net. Args: universe - Universe to listen sub - Subnet to listen net - Net to listen is_simplified - Whether to use nets and subnet or universe only, see User Guide page 5 (Universe Addressing) Returns: bytes - byte mask for given address """ address_mask = bytearray() if is_simplified: # Ensure data is in right range universe = clamp(universe, 0, 32767) # Make mask msb, lsb = shift_this(universe) # convert to MSB / LSB address_mask.append(lsb) address_mask.append(msb) else: # Ensure data is in right range universe = clamp(universe, 0, 15) sub = clamp(sub, 0, 15) net = clamp(net, 0, 127) # Make mask address_mask.append(sub << 4 | universe) address_mask.append(net & 0xFF) return address_mask
25,130
def get_offline_target(featureset, start_time=None, name=None): """return an optimal offline feature set target""" # todo: take status, start_time and lookup order into account for target in featureset.status.targets: driver = kind_to_driver[target.kind] if driver.is_offline and (not name or name == target.name): return get_target_driver(target, featureset) return None
25,131
def yaml2json(input, output, mini, binary, sort): """ -i YAML -o JSON [-m] (minify) [-b] (keep binary) """ writer.json( reader.yaml( input, sort=sort ), output, mini=mini, binary=binary )
25,132
def create_dir_struct(course_name="abc_course", force=False, working_dir=None): """ Create a directory structure that can be used to start an abc-classroom course. This includes a main directory, two sub directories for templates and cloned files, and a start to a configuration file. This is the implementation of the abc-quickstart script; it is called directly from main. """ # Make sure the sample configuration file is where it's supposed to be. try: config_path = path_to_example("config.yml") except FileNotFoundError as err: print( """Sample config.yml configuration file cannot be located at {}, please ensure abc-classroom has been installed correctly""".format( err ) ) # Assign the custom folder name if applicable if " " in course_name: raise ValueError( """Spaces not allowed in custom course name: {}. Please use hyphens instead.""".format( course_name ) ) if working_dir is None: working_dir = os.getcwd() main_dir = Path(working_dir, course_name) # Check if the main_dir exists if main_dir.is_dir(): if force: rmtree(main_dir) else: raise FileExistsError( """ Ooops! It looks like the directory {} already exists in this directory. You might have already run quickstart here. Consider using a different course name, deleting the existing directory, or running quickstart with the -f flag to force overwrite the existing directory.""".format( main_dir ) ) # Make the main course directory and copy the config file there main_dir.mkdir() copy(config_path, main_dir) # Use config functions to read / write config # We already have the file in the right place, but we need to # re-write the config so that is has the right course directory config = cf.get_config(main_dir) config["course_directory"] = str(main_dir) cf.write_config(config, main_dir) clone_dir = cf.get_config_option(config, "clone_dir") template_dir = cf.get_config_option(config, "template_dir") # Make the required subdirectories Path(main_dir, clone_dir).mkdir() Path(main_dir, template_dir).mkdir() # Create the extra_files directory in the main_dir & copy files extra_path = path_to_example("extra_files") copytree(extra_path, Path(main_dir, "extra_files")) # Copy the sample roster over to the new quickstart dir # TODO make sure the name of this file matches the default config name try: classroom_roster = path_to_example("classroom_roster.csv") copy(classroom_roster, Path(main_dir)) except FileNotFoundError as err: print( """Sample config.yml configuration file cannot be located at {}, please ensure abc-classroom has been installed correctly""".format( err ) ) print( """ Created new abc-classroom directory structure at: '{}', including a configuration file, directories for template repos and cloned repositories, and a directory for extra files needed for all assignments. To proceed, please create / move your course roster and course materials directory into '{}' and check the settings in the config file, 'config.yml'.""".format( main_dir, course_name ) )
25,133
def get_img_size(src_size, dest_size): """ Возвращает размеры изображения в пропорции с оригиналом исходя из того, как направлено изображение (вертикально или горизонтально) :param src_size: размер оригинала :type src_size: list / tuple :param dest_size: конечные размеры :type dest_size: list / tuple :rtype: tuple """ width, height = dest_size src_width, src_height = src_size if height >= width: return (int(float(width) / height * src_height), src_height) return (src_width, int(float(height) / width * src_width))
25,134
def test_get_query_result(): """ 测试获取查询结果集 """ hp.enable() hp.reset() mock_query_result() res = client.get("/v3/datalab/queries/1/result/") assert res.is_success() assert res.data["totalRecords"] == 1
25,135
def laxnodeset(v): """\ Return a nodeset with elements from the argument. If the argument is already a nodeset, it self will be returned. Otherwise it will be converted to a nodeset, that can be mutable or immutable depending on what happens to be most effectively implemented.""" if not isinstance(v, NodeSet): v = immnodeset(v) return v
25,136
def load_config(): """ Loads the configuration file. Returns: - (json) : The configuration file. """ return load_json_file('config.json')
25,137
def test_singleton_get_instance_without_configure_fails(): """ Calling get_instance() without first calling configure_instance() should fail """ with pytest.raises(Exception): c = Controller.get_instance()
25,138
def dtm_generate_footprint(tile_id): """ Generates a footprint file using gdal. :param tile_id: tile id in the format "rrrr_ccc" where rrrr is the row number and ccc is the column number :return execution status """ # Initiate return value and log output return_value = '' log_file = open('log.txt', 'a') try: cmd = settings.gdaltlindex_bin + \ settings.dtm_footprint_folder + '/DTM_1km_' + tile_id + '_footprint.shp ' + \ settings.dtm_folder + '/DTM_1km_' + tile_id + '.tif' cmd_return = subprocess.check_output(cmd, shell=False, stderr=subprocess.STDOUT) log_file.write( '\n' + tile_id + ' footprint generation... \n' + cmd_return + \ '\n' + tile_id + ' successful.\n\n') return_value = 'success' except: log_file.write('\n' + tile_id + ' footprint generation failed. \n') return_value = 'gdalError' # Close log file log_file.close() return return_value
25,139
def save_fitresult(output_dir, fit_result, log_lines=None): """ Save a fit result object to the specified directory with associated metadata Output directory contents: deltaG.csv/.txt: Fit output result (deltaG, covariance, k_obs, pfact) losses.csv/.txt: Losses per epoch log.txt: Log file with additional metadata (number of epochs, final losses, pyhdx version, time/date) Parameters ---------- output_dir: pathlib.Path or :obj:`str` Output directory to save fitresult to fit_result: pydhx.fittin_torch.TorchFitResult fit result object to save log_lines: :obj:`list` Optional additional lines to write to log file. Returns ------- """ output_dir = Path(output_dir) output_dir.mkdir(parents=True, exist_ok=True) fit_result.to_file(output_dir / 'fit_result.csv') fit_result.to_file(output_dir / 'fit_result.txt', fmt='pprint') dataframe_to_file(output_dir / 'losses.csv', fit_result.losses) dataframe_to_file(output_dir / 'losses.txt', fit_result.losses, fmt='pprint') if isinstance(fit_result.hdxm_set, pyhdx.HDXMeasurement): fit_result.hdxm_set.to_file(output_dir / 'HDXMeasurement.csv') if isinstance(fit_result.hdxm_set, pyhdx.HDXMeasurementSet): fit_result.hdxm_set.to_file(output_dir / 'HDXMeasurements.csv') loss = f'Total_loss {fit_result.total_loss:.2f}, mse_loss {fit_result.mse_loss:.2f}, reg_loss {fit_result.reg_loss:.2f}' \ f'({fit_result.regularization_percentage:.2f}%)' epochs = f"Number of epochs: {len(fit_result.losses)}" version = pyhdx.VERSION_STRING now = datetime.now() date = f'# {now.strftime("%Y/%m/%d %H:%M:%S")} ({int(now.timestamp())})' lines = [date, version, loss, epochs] if log_lines is not None: lines.append('') lines += log_lines log_file_out = output_dir / 'log.txt' log_file_out.write_text('\n'.join(lines))
25,140
def favicon(request): """ best by tezar tantular from the Noun Project """ if settings.DEBUG: image_data = open("static/favicon-dev.ico", "rb").read() else: image_data = open("static/favicon.ico", "rb").read() # TODO add cache headers return HttpResponse(image_data, content_type="image/x-icon")
25,141
def text_from_html(body): """ Gets all raw text from html, removing all tags. :param body: html :return: str """ soup = BeautifulSoup(body, "html.parser") texts = soup.findAll(text=True) visible_texts = filter(tag_visible, texts) return " ".join(t.strip() for t in visible_texts)
25,142
def alert_history(): """ Alert History: RESTful CRUD controller """ return s3_rest_controller(rheader = s3db.cap_history_rheader)
25,143
def chat(): """ Chat room. The user's name and room must be stored in the session. """ if 'avatar' not in session: session['avatar'] = avatars.get_avatar() data = { 'user_name': session.get('user_name', ''), 'avatar': session.get('avatar'), 'room_key': session.get('room_key', ''), 'password': session.get('password', '') } if data['user_name'] == '' or data['room_key'] == '': return redirect(url_for('.index')) return render_template('chat.html', **data)
25,144
def _get_split_idx(N, blocksize, pad=0): """ Returns a list of indexes dividing an array into blocks of size blocksize with optional padding. Padding takes into account that the resultant block must fit within the original array. Parameters ---------- N : Nonnegative integer Total array length blocksize : Nonnegative integer Size of each block pad : Nonnegative integer Pad to add on either side of each index Returns ------- split_idx : List of 2-tuples Indices to create splits pads_used : List of 2-tuples Pads that were actually used on either side Examples -------- >>> split_idx, pads_used = _get_split_idx(5, 2) >>> print split_idx [(0, 2), (2, 4), (4, 5)] >>> print pads_used [(0, 0), (0, 0), (0, 0)] >>> _get_split_idx(5, 2, pad=1) >>> print split_idx [(0, 3), (1, 5), (3, 5)] >>> print pads_used [(0, 1), (1, 1), (1, 0)] """ num_fullsplits = N // blocksize remainder = N % blocksize split_idx = [] pads_used = [] for i in range(num_fullsplits): start = max(0, i * blocksize - pad) end = min(N, (i + 1) * blocksize + pad) split_idx.append((start, end)) leftpad = i * blocksize - start rightpad = end - (i + 1) * blocksize pads_used.append((leftpad, rightpad)) # Append the last split if there is a remainder if remainder: start = max(0, num_fullsplits * blocksize - pad) split_idx.append((start, N)) leftpad = num_fullsplits * blocksize - start pads_used.append((leftpad, 0)) return split_idx, pads_used
25,145
def buydown_loan(amount, nrate, grace=0, dispoints=0, orgpoints=0, prepmt=None): """ In this loan, the periodic payments are recalculated when there are changes in the value of the interest rate. Args: amount (float): Loan amount. nrate (float, pandas.Series): nominal interest rate per year. grace (int): numner of grace periods without paying the principal. dispoints (float): Discount points of the loan. orgpoints (float): Origination points of the loan. prepmt (pandas.Series): generic cashflow representing prepayments. Returns: A object of the class ``Loan``. >>> nrate = interest_rate(const_value=10, start='2016Q1', periods=11, freq='Q', chgpts={'2017Q2':20}) >>> buydown_loan(amount=1000, nrate=nrate, dispoints=0, orgpoints=0, prepmt=None) # doctest: +NORMALIZE_WHITESPACE Amount: 1000.00 Total interest: 200.99 Total payment: 1200.99 Discount points: 0.00 Origination points: 0.00 <BLANKLINE> Beg_Ppal_Amount Nom_Rate Tot_Payment Int_Payment Ppal_Payment \\ 2016Q1 1000.000000 10.0 0.000000 0.000000 0.000000 2016Q2 1000.000000 10.0 114.258763 25.000000 89.258763 2016Q3 910.741237 10.0 114.258763 22.768531 91.490232 2016Q4 819.251005 10.0 114.258763 20.481275 93.777488 2017Q1 725.473517 10.0 114.258763 18.136838 96.121925 2017Q2 629.351591 20.0 123.993257 31.467580 92.525677 2017Q3 536.825914 20.0 123.993257 26.841296 97.151961 2017Q4 439.673952 20.0 123.993257 21.983698 102.009559 2018Q1 337.664393 20.0 123.993257 16.883220 107.110037 2018Q2 230.554356 20.0 123.993257 11.527718 112.465539 2018Q3 118.088816 20.0 123.993257 5.904441 118.088816 <BLANKLINE> End_Ppal_Amount 2016Q1 1.000000e+03 2016Q2 9.107412e+02 2016Q3 8.192510e+02 2016Q4 7.254735e+02 2017Q1 6.293516e+02 2017Q2 5.368259e+02 2017Q3 4.396740e+02 2017Q4 3.376644e+02 2018Q1 2.305544e+02 2018Q2 1.180888e+02 2018Q3 1.136868e-13 >>> pmt = cashflow(const_value=0, start='2016Q1', periods=11, freq='Q') >>> pmt['2017Q4'] = 200 >>> buydown_loan(amount=1000, nrate=nrate, dispoints=0, orgpoints=0, prepmt=pmt) # doctest: +NORMALIZE_WHITESPACE Amount: 1000.00 Total interest: 180.67 Total payment: 1180.67 Discount points: 0.00 Origination points: 0.00 <BLANKLINE> Beg_Ppal_Amount Nom_Rate Tot_Payment Int_Payment Ppal_Payment \\ 2016Q1 1000.000000 10.0 0.000000 0.000000 0.000000 2016Q2 1000.000000 10.0 114.258763 25.000000 89.258763 2016Q3 910.741237 10.0 114.258763 22.768531 91.490232 2016Q4 819.251005 10.0 114.258763 20.481275 93.777488 2017Q1 725.473517 10.0 114.258763 18.136838 96.121925 2017Q2 629.351591 20.0 123.993257 31.467580 92.525677 2017Q3 536.825914 20.0 123.993257 26.841296 97.151961 2017Q4 439.673952 20.0 323.993257 21.983698 302.009559 2018Q1 137.664393 20.0 50.551544 6.883220 43.668324 2018Q2 93.996068 20.0 50.551544 4.699803 45.851741 2018Q3 48.144328 20.0 50.551544 2.407216 48.144328 <BLANKLINE> End_Ppal_Amount 2016Q1 1.000000e+03 2016Q2 9.107412e+02 2016Q3 8.192510e+02 2016Q4 7.254735e+02 2017Q1 6.293516e+02 2017Q2 5.368259e+02 2017Q3 4.396740e+02 2017Q4 1.376644e+02 2018Q1 9.399607e+01 2018Q2 4.814433e+01 2018Q3 4.263256e-14 """ if not isinstance(nrate, pd.Series): TypeError('nrate must be a pandas.Series object.') if prepmt is None: prepmt = nrate.copy() prepmt[:] = 0 else: verify_period_range([nrate, prepmt]) life = len(nrate) - grace - 1 begppalbal = nrate.copy() intpmt = nrate.copy() ppalpmt = nrate.copy() totpmt = nrate.copy() endppalbal = nrate.copy() begppalbal[:] = 0 intpmt[:] = 0 ppalpmt[:] = 0 totpmt[:] = 0 endppalbal[:] = 0 ## ## balance calculation ## pyr = getpyr(nrate) for time in range(grace + life + 1): if time == 0: # begppalbal[time] = amount endppalbal[time] = amount totpmt[time] = amount * (dispoints + orgpoints) / 100 ### intpmt[time] = amount * dispoints / 100 # else: # # periodic payment per period # if time <= grace: begppalbal[time] = endppalbal[time - 1] intpmt[time] = begppalbal[time] * nrate[time] / pyr / 100 totpmt[time] = intpmt[time] endppalbal[time] = begppalbal[time] else: pmt = -pvpmt(nrate=nrate[time], nper=grace+life-time+1, pval=endppalbal[time-1], pmt=None, pyr=pyr) totpmt[time] = pmt + prepmt[time] # balance begppalbal[time] = endppalbal[time - 1] intpmt[time] = begppalbal[time] * nrate[time] / pyr / 100 ppalpmt[time] = totpmt[time] - intpmt[time] endppalbal[time] = begppalbal[time] - ppalpmt[time] data = {'Beg_Ppal_Amount':begppalbal} result = Loan(life=life, amount=amount, grace=grace, nrate=nrate, dispoints=dispoints, orgpoints=orgpoints, data=data) result['Nom_Rate'] = nrate result['Tot_Payment'] = totpmt result['Int_Payment'] = intpmt result['Ppal_Payment'] = ppalpmt result['End_Ppal_Amount'] = endppalbal return result
25,146
def ffill(array: np.ndarray, value: Optional[int] = 0) -> np.ndarray: """Forward fills an array. Args: array: 1-D or 2-D array. value: Value to be filled. Default is 0. Returns: ndarray: Forward-filled array. Examples: >>> x = np.array([0, 5, 0, 0, 2, 0]) >>> ffill(x) [0, 5, 5, 5, 2, 2] Notes: Works only in axis=1 direction. """ ndims = len(array.shape) ran = np.arange(array.shape[ndims - 1]) idx = np.where((array != value), ran, 0) idx = np.maximum.accumulate(idx, axis=ndims-1) # pylint: disable=E1101 if ndims == 2: return array[np.arange(idx.shape[0])[:, None], idx] return array[idx]
25,147
def test_dpp_proto_pkex_cr_req_no_i_auth_tag(dev, apdev): """DPP protocol testing - no I-Auth Tag in PKEX Commit-Reveal Request""" run_dpp_proto_pkex_req_missing(dev, 39, "No valid u (I-Auth tag) found")
25,148
def svn_stringbuf_from_file(*args): """svn_stringbuf_from_file(char const * filename, apr_pool_t pool) -> svn_error_t""" return _core.svn_stringbuf_from_file(*args)
25,149
def test_multiple_substitutions(tmp_path): """Render a template with multiple context variables.""" template_path = tmp_path / "template.txt" template_path.write_text(textwrap.dedent(u"""\ TO: {{email}} FROM: from@test.com Hi, {{name}}, Your number is {{number}}. """)) template_message = TemplateMessage(template_path) sender, recipients, message = template_message.render({ "email": "myself@mydomain.com", "name": "Myself", "number": 17, }) assert sender == "from@test.com" assert recipients == ["myself@mydomain.com"] plaintext = message.get_payload() assert "Hi, Myself," in plaintext assert "Your number is 17" in plaintext
25,150
def poly4(x, b, b0): """ Defines a function with polynom 4 to fit the curve Parameters ---------- x: numpy.ndarray: x of f(x) b: float Parameter to fit b0 : int y-intercept of the curve Returns ------- f : numpy.ndarray Result of f(x) """ return b * np.array(x) ** 4 + b0
25,151
def chkiapws06table6(printresult=True,chktol=_CHKTOL): """Check accuracy against IAPWS 2006 table 6. Evaluate the functions in this module and compare to reference values of thermodynamic properties (e.g. heat capacity, lapse rate) in IAPWS 2006, table 6. :arg bool printresult: If True (default) and any results are outside of the given tolerance, then the function name, reference value, result value, and relative error are printed. :arg float chktol: Tolerance to use when choosing to print results (default _CHKTOL). :returns: :class:`~teospy.tests.tester.Tester` instances containing the functions, arguments, reference values, results, and relative errors from the tests. The first instance involves derivatives of ice_g whereas the second tests the other thermodynamic functions. """ from teospy.tests.tester import Tester fargs0 = (273.16,611.657) fargs1 = (273.152519,101325.) fargs2 = (100.,1e8) propfargs = [fargs0,fargs1,fargs2] ders = [(0,0), (1,0), (0,1), (2,0), (1,1), (0,2)] # Tester instance for derivatives of ice_g derfuns = _ice_g derfnames = 'ice_g' # Derivatives change before arguments do here derfargs = [(der+fargs) for fargs in propfargs for der in ders] derargfmt = '({0:1g},{1:1g},{2:7.3f},{3:7g})' derrefs = [0.611784135,0.122069433940e+4,0.109085812737e-2, -0.767602985875e+1,0.174387964700e-6,-0.128495941571e-12, 0.10134274069e+3,0.122076932550e+4,0.109084388214e-2,-0.767598233365e+1, 0.174362219972e-6,-0.128485364928e-12,-0.222296513088e+6, 0.261195122589e+4,0.106193389260e-2,-0.866333195517e+1, 0.274505162488e-7,-0.941807981761e-13] header = 'Ice Gibbs energy derivatives' dertest = Tester(derfuns,derfargs,derrefs,derfnames,derargfmt,header=header) # Tester instance for other ice properties propfuns = [enthalpy,helmholtzenergy,internalenergy,entropy,cp,density, expansion,pcoefficient,kappa_t,kappa_s] propfnames = ['enthalpy','helmholtzenergy','internalenergy','entropy','cp', 'density','expansion','pcoefficient','kappa_t','kappa_s'] propargfmt = '({0:7.3f},{1:7g})' proprefs = [ [-0.333444253966e+6,-0.333354873637e+6,-0.483491635676e+6], [-0.55446875e-1,-0.918701567e+1,-0.328489902347e+6], [-0.333444921197e+6,-0.333465403393e+6,-0.589685024936e+6], [-0.122069433940e+4,-0.122076932550e+4,-0.261195122589e+4], [0.209678431622e+4,0.209671391024e+4,0.866333195517e+3], [0.916709492200e+3,0.916721463419e+3,0.941678203297e+3], [0.159863102566e-3,0.159841589458e-3,0.258495528207e-4], [0.135714764659e+7,0.135705899321e+7,0.291466166994e+6], [0.117793449348e-9,0.117785291765e-9,0.886880048115e-10], [0.114161597779e-9,0.114154442556e-9,0.886060982687e-10] ] header = 'Ice thermodynamic properties' proptest = Tester(propfuns,propfargs,proprefs,propfnames,propargfmt, header=header) # Run Tester instances and print results dertest.run() proptest.run() if printresult: dertest.printresults(chktol=chktol) proptest.printresults(chktol=chktol) return dertest, proptest
25,152
def flat(arr): """ Finds flat things (could be zeros) ___________________________ """ arr = np.array(arr) if arr.size == 0: return False mean = np.repeat(np.mean(arr), arr.size) nonzero_residuals = np.nonzero(arr - mean)[0] return nonzero_residuals.size < arr.size/100
25,153
def sensitive_fields(*paths, **typed_paths): """ paths must be a path like "password" or "vmInfo.password" """ def ret(old_init): def __init__(self, *args, **kwargs): if paths: ps = ["obj['" + p.replace(".", "']['") + "']" for p in paths] setattr(self, SENSITIVE_FIELD_NAME, ps) old_init(self) return __init__ return ret
25,154
def test_base_url(host, scheme): """Test the base URL setting on init.""" url = f"{scheme}://{utils.random_lower_string()}" dri_postal = DriPostal(url) assert isinstance(dri_postal.service_url, AnyHttpUrl) assert str(dri_postal.service_url) == url
25,155
def get_timestamp_diff(diff): """获取前后diff天对应的时间戳(毫秒)""" tmp_str = (datetime.today() + timedelta(diff)).strftime("%Y-%m-%d %H:%M:%S") tmp_array = time.strptime(tmp_str, "%Y-%m-%d %H:%M:%S") return int(time.mktime(tmp_array)) * 1000
25,156
def can_delete(account, bike): """ Check if an account can delete a bike. Account must be a team member and bike not borrowed in the future. """ return (team_control.is_member(account, bike.team) and not has_future_borrows(bike))
25,157
def run_dev( root: Optional[Path] = typer.Option(None), host: str = typer.Option("127.0.0.1"), rpc_port: int = typer.Option(3477), http_port: int = typer.Option(8000), ): """ Hot reloading local HTTP and RPC servers """ # set environment variables # commit to os.environ for HTTP/RPC processes environ.root = root environ.rpc_host = host environ.rpc_port = rpc_port environ.commit() # check working directory and the entitykb directory reload_dirs = [os.getcwd(), os.path.dirname(os.path.dirname(__file__))] http_app = "entitykb.http.dev:app" uvicorn.run( http_app, host=host, port=http_port, reload=True, reload_dirs=reload_dirs, )
25,158
def config_file(): """ Returns the config file ($HOME/.config/python-pulseaudio-profiles-trayicon/config.json). :return: the directory for the configurations :rtype: str """ return os.path.join(config_dir(), "config.json")
25,159
def setup_directories(base_dir, fqdn, mode, uid, gid): """Setup directory structure needed for cert updating operations.""" expected_dirs = [] settings = {'base': base_dir, 'fqdn': fqdn} dir_tree = DIR_TREE[:] dir_tree.insert(0, base_dir) for directory in dir_tree: expected_dirs.append((directory.format(**settings), mode, uid, gid)) errors = [] for dir_settings in expected_dirs: setup_ok = _setup_directory(*dir_settings) if not setup_ok: errors.append('Error setting up: {}'.format(dir_settings)) if errors: raise SetupError('\n'.join(errors))
25,160
def get_node_types(nodes, return_shape_type = True): """ Get the maya node types for the nodes supplied. Returns: dict: dict[node_type_name] node dict of matching nodes """ found_type = {} for node in nodes: node_type = cmds.nodeType(node) if node_type == 'transform': if return_shape_type: shapes = get_shapes(node) if shapes: node_type = cmds.nodeType(shapes[0]) if not node_type in found_type: found_type[node_type] = [] found_type[node_type].append(node) return found_type
25,161
def updateDistances(fileName): """ Calculate and update the distance on the given CSV file. Parameters ---------- fileName: str Path and name of the CSV file to process. Returns ------- ret: bool Response indicating if the update was successful or not. """ # Read the face data from the CSV file try: file = open(fileName, 'r+', newline='') except: return False reader = csv.reader(file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) writer = csv.writer(file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) # Read the face data from the CSV file and recalculate the distances, # also building a list to later recalculate the distance gradients frames = [] distances = [] faces = OrderedDict() prevDist = 0 for row in reader: if row[0] != 'frame': # Read the face data from the CSV file frameNum = int(row[0]) face = FaceData() face.fromList(row[1:]) face.gradient = 0.0 face.calculateDistance() faces[frameNum] = face # In case the face has been detected but the distance calculation # failed, assume the same distance as the previous detected face if not face.isEmpty(): if face.distance == 0: face.distance = prevDist prevDist = face.distance # Consider for the calculation of the gradients only the non-empty # faces (i.e. the frames where a face was detected) if not face.isEmpty(): frames.append(frameNum) distances.append(face.distance) # Calculate the gradients from the helper list of distances gradients = np.gradient(distances) for i, frameNum in enumerate(frames): faces[frameNum].gradient = gradients[i] # Save the face data back to the CSV file file.truncate(0) file.seek(0) writer.writerow(['frame'] + FaceData.header()) for frameNum, face in faces.items(): writer.writerow([frameNum] + face.toList()) file.close() return True
25,162
def hr(*args, **kwargs): """ The HTML <hr> element represents a thematic break between paragraph-level elements (for example, a change of scene in a story, or a shift of topic with a section). In previous versions of HTML, it represented a horizontal rule. It may still be displayed as a horizontal rule in visual browsers, but is now defined in semantic terms, rather than presentational terms. """ return void_el('hr', *args, **kwargs)
25,163
def as_linker_option(p): """Return as an ld library path argument""" if p: return '-Wl,' + p return ''
25,164
def split_backbone(options): """ Split backbone fasta file into chunks. Returns dictionary of backbone -> id. """ backbone_to_id = {} id_counter = 0 # Write all backbone files to their own fasta file. pf = ParseFasta(options.backbone_filename) tuple = pf.getRecord() while tuple is not None: print tuple[0] split_backbone = open(options.output_dir + '/' + options.prefix + '-' + str(id_counter) + '.fasta', 'w') split_backbone.write('>' + tuple[0] + '\n' + tuple[1]) split_backbone.close() backbone_to_id[tuple[0]] = options.prefix + '-' + str(id_counter) id_counter += 1 tuple = pf.getRecord() return backbone_to_id
25,165
def test_hash(CmpC): """ __hash__ returns different hashes for different values. """ assert hash(CmpC(1, 2)) != hash(CmpC(1, 1))
25,166
def _get_highest_tag(tags): """Find the highest tag from a list. Pass in a list of tag strings and this will return the highest (latest) as sorted by the pkg_resources version parser. """ return max(tags, key=pkg_resources.parse_version)
25,167
def test_ipm3(): """ Test ipm on unique distribution. """ d = bivariates['cat'] red = i_pm(d, ((0,), (1,)), (2,)) assert red == pytest.approx(1)
25,168
def papply( f, seq, pool_size=cores, callback=None ): """ Apply the given function to each element of the given sequence, optionally invoking the given callback with the result of each application. Do so in parallel, using a thread pool no larger than the given size. :param callable f: the function to be applied :param Sequence seq: the input sequence :param int pool_size: the desired pool size, if absent the number of CPU cores will be used. The actual pool size may be smaller if the input sequence is small.A pool size of 0 will make this function emulate the apply() builtin, i.e. f (and the callback, if provided) will be invoked serially in the current thread. :param callable callback: an optional function to be invoked with the return value of f >>> l=[]; papply( lambda a, b: a + b, [], pool_size=0, callback=l.append ); l [] >>> l=[]; papply( lambda a, b: a + b, [ (1, 2) ], pool_size=0, callback=l.append); l [3] >>> l=[]; papply( lambda a, b: a + b, [ (1, 2), (3, 4) ], pool_size=0, callback=l.append ); l [3, 7] >>> l=[]; papply( lambda a, b: a + b, [], pool_size=1, callback=l.append ); l [] >>> l=[]; papply( lambda a, b: a + b, [ (1, 2) ], pool_size=1, callback=l.append); l [3] >>> l=[]; papply( lambda a, b: a + b, [ (1, 2), (3, 4) ], pool_size=1, callback=l.append ); l [3, 7] >>> l=[]; papply( lambda a, b: a + b, [], pool_size=2, callback=l.append ); l [] >>> l=[]; papply( lambda a, b: a + b, [ (1, 2) ], pool_size=2, callback=l.append); l [3] >>> l=[]; papply( lambda a, b: a + b, [ (1, 2), (3, 4) ], pool_size=2, callback=l.append ); l [3, 7] """ __check_pool_size( pool_size ) n = len( seq ) if n: if pool_size == 0: for args in seq: result = apply( f, args ) if callback is not None: callback( result ) else: with thread_pool( min( pool_size, n ) ) as pool: for args in seq: pool.apply_async( f, args, callback=callback )
25,169
def rename_kwargs(func_name: str, kwargs: Dict, aliases: Dict): """ Used to update deprecated argument names with new names. Throws a `TypeError` if both arguments are provided, and warns if old alias is used. Nothing is returned as the passed `kwargs` are modified directly. Implementation is inspired from [`StackOverflow`][stack_link]. [stack_link]: https://stackoverflow.com/questions/49802412/how-to-implement-deprecation-in-python-with-argument-alias :param func_name: name of decorated function. :param kwargs: Arguments supplied to the method. :param aliases: Dictionary of aliases for a function's arguments. :raises TypeError: if both arguments are provided. """ # noqa: E501 for old_alias, new_alias in aliases.items(): if old_alias in kwargs: if new_alias in kwargs: raise TypeError( f"{func_name} received both {old_alias} and {new_alias}" ) warnings.warn( f"{old_alias} is deprecated; use {new_alias}", DeprecationWarning, ) kwargs[new_alias] = kwargs.pop(old_alias)
25,170
def add_comment(request, pk): """ Adds comment to the image - POST. Checks the user and assigns it to the comment.posted_by """ form = PhotoCommentForm(request.POST) if form.is_valid(): comment = form.save(commit=False) comment.user = request.user comment.save() else: text = 'You have used forbidden word!' messages.warning(request, text) return redirect('photo comments', pk)
25,171
def predict(): """ runs the three models and displays results view """ filename = request.form['filename'] file_root = os.path.splitext(filename)[0] full_filename = os.path.join(app.config['STATIC_MATRIX_PATH'], filename) # run YOLOv5 model os.system(f'python models/yolov5/detect.py ' f'--weights models/yolov5/best-2.pt ' f'--source {app.config["STATIC_MATRIX_FOLDER"]} ' f'--out {app.config["TEMP_FOLDER"]} ' f'--img 416 --conf 0.4 --save-txt') # run toLatex model latex = results_to_latex( os.path.join(app.config['TEMP_PATH'], file_root + '.txt'), CLASSES) latex_filename = os.path.join(app.config['STATIC_MATRIX_PATH'], file_root) # run renderLatex model displaylatex(latex.replace('\n', ''), latex_filename) # delete temporary folder os.system('rm -r temp') return render_template('results.html', latex=latex, matrix_image=full_filename, image_filename=filename, latex_pdf=latex_filename+'.pdf')
25,172
def disable_plugin( ctx: typer.Context, plugin: str = typer.Argument(None, help="the plugin key"), web: bool = typer.Option(False, help="open upm in web browser after disabling plugin"), ): """ Disables the specified plugin """ if plugin is None: try: plugin = pathutil.get_plugin_key_from_pom() except FileNotFoundError: logging.error("Could not find the plugin you want to get the info of. Are you in a maven directory?") sys.exit(1) except pathutil.PluginKeyNotFoundError: logging.error("Could not find the plugin you want to get the info of. Is the plugin key set in the pom.xml?") sys.exit(1) try: upm = UpmApi(ctx.obj.get("base_url")) response = upm.enable_disable_plugin(plugin, False) except requests.exceptions.ConnectionError: logging.error("Could not connect to host - check your base-url") sys.exit(1) except Exception as exc: logging.error("An error occured - check your credentials") logging.error("%s", exc) sys.exit(1) response.print_table(False) if web: browser.open_web_upm(ctx.obj.get("base_url"))
25,173
def disable_warnings_temporarily(func): """Helper to disable warnings for specific functions (used mainly during testing of old functions).""" def inner(*args, **kwargs): import warnings warnings.filterwarnings("ignore") func(*args, **kwargs) warnings.filterwarnings("default") return inner
25,174
def progress_bar(progress): """ Generates a light bar matrix to display volume / brightness level. :param progress: value between 0..1 """ dots = list(" " * 81) num_dots = ceil(round(progress, 3) * 9) while num_dots > 0: dots[81 - ((num_dots - 1) * 9 + 5)] = "*" num_dots -= 1 return "".join(dots)
25,175
def read(ctx, folder): """ Get folder details EVE-NG host \b Examples: eve-ng folder read /path/to/folder """ client = get_client(ctx) resp = client.api.get_folder(folder) cli_print_output("json", resp)
25,176
def get_context_command_parameter_converters(func): """ Parses the given `func`'s parameters. Parameters ---------- func : `async-callable` The function used by a ``SlasherApplicationCommand``. Returns ------- func : `async-callable` The converted function. parameter_converters : `tuple` of ``ParameterConverter`` Parameter converters for the given `func` in order. Raises ------ TypeError - If `func` is not async callable, neither cannot be instanced to async. - If `func` accepts keyword only parameters. - If `func` accepts `*args`. - If `func` accepts `**kwargs`. ValueError - If any parameter is not internal. """ analyzer, real_analyzer, should_instance = check_command_coroutine(func) parameters = real_analyzer.get_non_reserved_positional_parameters() parameter_converters = [] target_converter_detected = False for parameter in parameters: parameter_converter = create_internal_parameter_converter(parameter) if (parameter_converter is None): if target_converter_detected: raise TypeError(f'`{real_analyzer.real_function!r}`\'s `{parameter.name}` do not refers to any of the ' f'expected internal parameters. Context commands do not accept any additional parameters.') else: parameter_converter = create_target_parameter_converter(parameter) target_converter_detected = True parameter_converters.append(parameter_converter) parameter_converters = tuple(parameter_converters) if should_instance: func = analyzer.instance() return func, parameter_converters
25,177
def norm(x): """Normalize 1D tensor to unit norm""" mu = x.mean() std = x.std() y = (x - mu)/std return y
25,178
def honest_propose(validator, known_items): """ Returns an honest `SignedBeaconBlock` as soon as the slot where the validator is supposed to propose starts. Checks whether a block was proposed for the same slot to avoid slashing. Args: validator: Validator known_items (Dict): Known blocks and attestations received over-the-wire (but perhaps not included yet in `validator.store`) Returns: Optional[SignedBeaconBlock]: Either `None` if the validator decides not to propose, otherwise a `SignedBeaconBlock` containing attestations """ # Not supposed to propose for current slot if not validator.data.current_proposer_duties[validator.data.slot % SLOTS_PER_EPOCH]: return None # Already proposed for this slot if validator.data.last_slot_proposed == validator.data.slot: return None # honest propose return honest_propose_base(validator, known_items)
25,179
def concat_pic_pdf(src, dest=None, line_max=2, recursive=False, alpha=1, vertical=False, row_max=None): """将src文件夹下图片合成到dest(pdf文件),每行line_max个,扩大系数为alpha,vertical代表为竖直状态""" num = 0 dest = create_dest(dest, '.pdf') concat_tmp = os.path.join(src, 'concat') import shutil shutil.rmtree(concat_tmp, ignore_errors=True) os.makedirs(concat_tmp) ech(f'concat_pic: {src} -> {concat_tmp} -> {dest}', 'green') ech("src paths are:", 'yellow') all_path = get_file_list(src, recursive) print(all_path) N = len(all_path) assert N > 0 # 固定一个宽度,最后的实际宽度(取均值) width = 0 for i in range(N): w, _ = get_size(all_path[i], vertical) width += w width = int(width * alpha/N) # 同行的高度以第一个为准 for i in tqdm(range(0, N, line_max)): w, h = get_size(all_path[i], vertical) h = int(width * h/w) # print(f"the {i}th height is {h}") toImage = Image.new('RGBA', (width * line_max, h)) for j in range(line_max): # 每次打开图片绝对路路径列表的第一张图片 pic_fole_head = Image.open(all_path[num]) w, h = get_size(all_path[num], vertical) # 按照指定的尺寸,给图片重新赋值,<PIL.Image.Image image mode=RGB size=200x200 at 0x127B7978> tmppic = pic_fole_head.resize((width, h)) # 计算每个图片的左上角的坐标点(0, 0),(0, 200),(0, 400),(200, 0),(200, 200)。。。。(400, 400) loc = (int(j * width), 0) toImage.paste(tmppic, loc) num = num + 1 if num >= N: break toImage.save(f'{concat_tmp}/{i}.png') row = int(i/line_max) if row_max and (row + 1) % row_max == 0: jpg2pdf(concat_tmp, dest.replace('.pdf', f'-{row}.pdf')) shutil.rmtree(concat_tmp, ignore_errors=True) os.makedirs(concat_tmp) if os.listdir(concat_tmp): jpg2pdf(concat_tmp, dest)
25,180
def formatKwargsKey(key): """ 'fooBar_baz' -> 'foo-bar-baz' """ key = re.sub(r'_', '-', key) return key
25,181
def mktimestamp(dt): """ Prepares a datetime for sending to HipChat. """ if dt.tzinfo is None: dt = dt.replace(tzinfo=dateutil.tz.tzutc()) return dt.isoformat(), dt.tzinfo.tzname(dt)
25,182
def helicsFederateEnterExecutingModeIterativeAsync(fed: HelicsFederate, iterate: HelicsIterationRequest): """ Request an iterative entry to the execution mode. This call allows for finer grain control of the iterative process than `helics.helicsFederateRequestTime`. It takes a time and iteration request, and returns a time and iteration status. **Parameters** - **`fed`** - The federate to make the request of. - **`iterate`** - `helics.HelicsIterationRequest`, i.e. the requested iteration mode. """ f = loadSym("helicsFederateEnterExecutingModeIterativeAsync") err = helicsErrorInitialize() f(fed.handle, HelicsIterationRequest(iterate), err) if err.error_code != 0: raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
25,183
def test_actor_nisscan(monkeypatch, pkgs_installed, fill_conf_file, fill_ypserv_dir): """ Parametrized helper function for test_actor_* functions. Run the actor feeded with our mocked functions and assert produced messages according to set arguments. Parameters: pkgs_installed (touple): installed pkgs fill_conf_file (bool): not default ypbind config file fill_ypserv_dir (bool): not default ypserv dir content """ # Store final list of configured NIS packages configured_pkgs = [] # Fill ypbind config yp_conf_content = YPBIND_CONFIGURED_CONF if fill_conf_file else YPBIND_DEFAULT_CONF # Fill ypserv dir files yp_dir_content = (nisscan.YPSERV_DEFAULT_FILES + ('example.com',) if fill_ypserv_dir else nisscan.YPSERV_DEFAULT_FILES) # Mock 'isfile' & 'isdir' based on installed pkgs mocked_isfile = 'ypbind' in pkgs_installed mocked_isdir = 'ypserv' in pkgs_installed mock_config = mock.mock_open(read_data=yp_conf_content) with mock.patch("builtins.open" if six.PY3 else "__builtin__.open", mock_config): curr_actor_mocked = CurrentActorMocked() monkeypatch.setattr(api, 'current_actor', curr_actor_mocked) monkeypatch.setattr(api, "produce", produce_mocked()) monkeypatch.setattr(nisscan.os, 'listdir', lambda dummy: yp_dir_content) monkeypatch.setattr(nisscan.os.path, 'isfile', lambda dummy: mocked_isfile) monkeypatch.setattr(nisscan.os.path, 'isdir', lambda dummy: mocked_isdir) # Executed actor feeded with mocked functions nisscan.NISScanLibrary().process() # Filter NIS pkgs filtered_installed_pkgs = [x for x in pkgs_installed if x in nisscan.PACKAGES_NAMES] # Create correct list of pkgs for assert check for pkg in filtered_installed_pkgs: if (pkg == 'ypserv' and fill_ypserv_dir) or (pkg == 'ypbind' and fill_conf_file): configured_pkgs.append(pkg) # Sort NISConfig objects nisconf_template = set(NISConfig(nis_not_default_conf=configured_pkgs).nis_not_default_conf) nisconf_result = set(api.produce.model_instances[0].nis_not_default_conf) assert nisconf_template == nisconf_result
25,184
def test_hashing(): """" check that hashes are correctly computed """ cam1 = CameraGeometry.from_name("LSTCam") cam2 = CameraGeometry.from_name("LSTCam") cam3 = CameraGeometry.from_name("ASTRICam") assert len(set([cam1, cam2, cam3])) == 2
25,185
def load_and_assign_npz_dict(name='model.npz', sess=None): """Restore the parameters saved by ``tl.files.save_npz_dict()``. Parameters ---------- name : a string The name of the .npz file. sess : Session """ assert sess is not None if not os.path.exists(name): print("[!] Load {} failed!".format(name)) return False params = np.load(name) if len(params.keys()) != len(set(params.keys())): raise Exception("Duplication in model npz_dict %s" % name) ops = list() for key in params.keys(): try: # tensor = tf.get_default_graph().get_tensor_by_name(key) # varlist = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=key) varlist = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=key) if len(varlist) > 1: raise Exception("[!] Multiple candidate variables to be assigned for name %s" % key) elif len(varlist) == 0: raise KeyError else: ops.append(varlist[0].assign(params[key])) print("[*] params restored: %s" % key) except KeyError: print("[!] Warning: Tensor named %s not found in network." % key) sess.run(ops) print("[*] Model restored from npz_dict %s" % name)
25,186
def MakeProto(python_out): """Make sure our protos have been compiled to python libraries.""" # Start running from one directory above the grr directory which is found by # this scripts's location as __file__. cwd = os.path.dirname(os.path.abspath(__file__)) # Find all the .proto files. protos_to_compile = [] for (root, _, files) in os.walk(cwd): for filename in files: full_filename = os.path.join(root, filename) if full_filename.endswith(".proto"): proto_stat = os.stat(full_filename) compiled_name = full_filename.rsplit(".", 1)[0] + "_pb2.py" pb2_path = os.path.join(args.python_out, os.path.relpath(compiled_name, cwd)) try: pb2_stat = os.stat(pb2_path) if pb2_stat.st_mtime >= proto_stat.st_mtime: continue except (OSError, IOError): pass protos_to_compile.append(full_filename) if protos_to_compile: # Find the protoc compiler. protoc = os.environ.get("PROTOC", "protoc") try: output = subprocess.check_output([protoc, "--version"]) except (IOError, OSError): raise RuntimeError("Unable to launch %s protoc compiler. Please " "set the PROTOC environment variable.", protoc) if "3.3.0" not in output: raise RuntimeError("Incompatible protoc compiler detected. " "We need 3.3.0 not %s" % output) for proto in protos_to_compile: logging.info("Compiling %s", proto) # The protoc compiler is too dumb to deal with full paths - it expects a # relative path from the current working directory. subprocess.check_call( [ protoc, # Write the python files next to the .proto files. "--python_out", python_out, # Standard include paths. # We just bring google/proto/descriptor.proto with us to make it # easier to install. "--proto_path=.", "--proto_path=grr", "--proto_path=grr/proto", os.path.relpath(proto, cwd) ], cwd=cwd)
25,187
def check_dataset(dataset): """Check dataset name.""" if not isinstance(dataset, str): raise TypeError("Provide 'dataset' name as a string") if (dataset not in get_available_datasets()): raise ValueError("Provide valid dataset. get_available_datasets()")
25,188
def build_accuracy(logits, labels, name_scope='accuracy'): """ Builds a graph node to compute accuracy given 'logits' a probability distribution over the output and 'labels' a one-hot vector. """ with tf.name_scope(name_scope): correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1)) correct_prediction = tf.cast(correct_prediction, tf.float32) return tf.reduce_mean(correct_prediction)
25,189
def generate_markdown_files(domain, mitigations, side_nav_data, side_nav_mobile_data, notes): """Responsible for generating shared data between all mitigation pages and begins mitigation markdown generation """ data = {} if mitigations: data['domain'] = domain.split("-")[0] data['mitigation_list_len'] = str(len(mitigations)) data['side_menu_data'] = side_nav_data data['side_menu_mobile_view_data'] = side_nav_mobile_data data['mitigation_table'] = get_mitigation_table_data(mitigations) subs = mitigations_config.mitigation_domain_md.substitute(data) subs = subs + json.dumps(data) with open(os.path.join(mitigations_config.mitigation_markdown_path, data['domain'] + "-mitigations.md"), "w", encoding='utf8') as md_file: md_file.write(subs) # Generates the markdown files to be used for page generation for mitigation in mitigations: generate_mitigation_md(mitigation, domain, side_nav_data, side_nav_mobile_data, notes) return True else: return False
25,190
def compute_mp_av(mp, index, m, df, k): """ Given a matrix profile, a matrix profile index, the window size and the DataFrame that contains the timeseries. Create a matrix profile object and add the corrected matrix profile after applying the complexity av. Uses an extended version of the apply_av function from matrixprofile foundation that is compatible with multi-dimensional timeseries. The implementation can be found here (https://github.com/MORE-EU/matrixprofile/blob/master/matrixprofile/transform.py) Args: mp: A matrix profile. index: The matrix profile index that accompanies the matrix profile. window: The subsequence window size. df: The timeseries that was used to calculate the matrix profile. Return: Updated profile with an annotation vector """ # Apply the annotation vector m = m # window size mp = np.nan_to_num(mp, np.nanmax(mp)) # remove nan values profile = to_mpf(mp, index, m, df) av_type = 'complexity' profile = mpf.transform.apply_av(profile, av_type) return profile
25,191
def read_burris(fh): """ Read Burris formatted file, from given open file handle. Accepts comma or tab-separated files. Parameters ---------- fh : TextIOWrapper open file handle Returns ------- ChannelList """ all_survey_data = ChannelList() for i, orig_line in enumerate(fh, 1): try: line = orig_line.strip() if line.find(",") != -1: vals_temp = line.split(",") if vals_temp[0] == "Station ID" or vals_temp[0] == "Station": continue elif line.find("\t") != -1: vals_temp = line.split("\t") else: vals_temp = line.split() if vals_temp[0] == "Station ID" or vals_temp[0] == "Station": continue if len(vals_temp) == 15: # no meter operator specified ( c_station, c_meter, c_date, c_time, c_grav, c_dial, c_feedback, c_tide, c_tilt, _, _, c_height, c_elev, c_lat, c_long, ) = range( 15 ) # 0 - 15 all_survey_data.oper.append("None") else: # 16 values, includes meter operator. # Numbers are columns in the imported file ( c_station, c_oper, c_meter, c_date, c_time, c_grav, c_dial, c_feedback, c_tide, c_tilt, _, _, c_height, c_elev, c_lat, c_long, ) = range( 16 ) # 0 - 14 all_survey_data.oper.append(vals_temp[c_oper]) if line.find("/") != -1: date_temp = vals_temp[c_date].split("/") elif line.find("-") != -1: date_temp = vals_temp[c_date].split("-") else: date_temp = [] if int(date_temp[2]) > 999: date_temp = [date_temp[2], date_temp[0], date_temp[1]] elif int(date_temp[0]) > 999: date_temp = [date_temp[0], date_temp[1], date_temp[2]] # Else raise date error time_temp = vals_temp[c_time].split(":") # fill object properties: all_survey_data.station.append(vals_temp[c_station].strip()) all_survey_data.elev.append(float(vals_temp[c_elev])) all_survey_data.height.append(float(vals_temp[c_height])) all_survey_data.lat.append(float(vals_temp[c_lat])) all_survey_data.long.append(float(vals_temp[c_long])) # remove Earth tide correction; it's added in using the @grav property all_survey_data.raw_grav.append( float(vals_temp[c_grav]) * 1000.0 - float(vals_temp[c_tide]) * 1000.0 ) all_survey_data.tare.append(0) all_survey_data.etc.append(float(vals_temp[c_tide]) * 1000.0) all_survey_data.meter_etc.append(float(vals_temp[c_tide]) * 1000.0) all_survey_data.dial.append(float(vals_temp[c_dial])) all_survey_data.feedback.append(float(vals_temp[c_feedback])) all_survey_data.sd.append(-999) # Burris doesn't ouput SD, tiltx, tilty all_survey_data.meter.append(vals_temp[c_meter]) all_survey_data.tiltx.append(float(vals_temp[c_tilt]) * 1000.0) all_survey_data.tilty.append(0.0) all_survey_data.temp.append(0.0) all_survey_data.dur.append(5) all_survey_data.rej.append(5) all_survey_data.t.append( date2num( dt.datetime( int(date_temp[0]), int(date_temp[1]), int(date_temp[2]), int(time_temp[0]), int(time_temp[1]), int(time_temp[2]), ) ) ) all_survey_data.keepdata.append(1) except (IndexError, ValueError) as e: logging.exception("Error loading Burris file at line %d", i) logging.info("LINE: %s", line) e.i = i e.line = orig_line raise e all_survey_data.meter_type = "Burris" return all_survey_data
25,192
def _remove_invalid_characters(file_name): """Removes invalid characters from the given file name.""" return re.sub(r'[/\x00-\x1f]', '', file_name)
25,193
def get_ops(): """ Builds an opcode name <-> value dictionary """ li = ["EOF","ADD","SUB","MUL","DIV","POW","BITAND","BITOR","CMP","GET", \ "SET","NUMBER","STRING","GGET","GSET","MOVE","DEF","PASS", \ "JUMP","CALL","RETURN","IF","DEBUG","EQ","LE","LT","DICT", \ "LIST","NONE","LEN","LINE","PARAMS","IGET","FILE","NAME", \ "NE","HAS","RAISE","SETJMP","MOD","LSH","RSH","ITER","DEL", \ "REGS","BITXOR", "IFN", "NOT", "BITNOT"] dic = {} for i in li: dic[i] = li.index(i) return dic
25,194
def load_actions( file_pointer, file_metadata, target_adim, action_mismatch, impute_autograsp_action ): """Load states from a file given metadata and hyperparameters Inputs: file_pointer : file object file_metadata : file metadata row (Pandas) target_adim : dimensionality of action vector action_mismatch : indicator variable (ACTION_MISMATCH) to handle action length mismatches impute_autograsp_action : boolean flag indicating to impute action dim if missing in primative:"autograsp" Outputs: actions : np.array((T-1, action_dims)) """ a_T, adim = file_metadata["action_T"], file_metadata["adim"] if target_adim == adim: return file_pointer["policy"]["actions"][:] elif ( target_adim == adim + 1 and impute_autograsp_action and file_metadata["primitives"] == "autograsp" ): action_append, old_actions = ( np.zeros((a_T, 1)), file_pointer["policy"]["actions"][:], ) next_state = file_pointer["env"]["state"][:][1:, -1] high_val, low_val = ( file_metadata["high_bound"][-1], file_metadata["low_bound"][-1], ) midpoint = (high_val + low_val) / 2.0 for t, s in enumerate(next_state): if s > midpoint: action_append[t, 0] = high_val else: action_append[t, 0] = low_val return np.concatenate((old_actions, action_append), axis=-1) elif adim < target_adim and (action_mismatch & ACTION_MISMATCH.PAD_ZERO): pad = np.zeros((a_T, target_adim - adim), dtype=np.float32) return np.concatenate((file_pointer["policy"]["actions"][:], pad), axis=-1) elif adim > target_adim and (action_mismatch & ACTION_MISMATCH.CLEAVE): return file_pointer["policy"]["actions"][:][:, :target_adim] else: raise ValueError( "file adim - {}, target adim - {}, pad behavior - {}".format( adim, target_adim, action_mismatch ) )
25,195
def send_message(hookurl: str, text: str) -> int: """ Send a message on the channel of the Teams. The HTTP status is returned. parameters ---------- hookurl : str URL for the hook to the Teams' channel. text : str text to send. returns ------- int HTTP status from the sent message. """ msg = pymsteams.connectorcard(hookurl) msg.text(text) msg.send() return msg.last_http_status.status_code
25,196
def np_fft_irfftn(a, *args, **kwargs): """Numpy fft.irfftn wrapper for Quantity objects. Drop dimension, compute result and add it back.""" res = np.fft.irfftn(a.value, *args, **kwargs) return Quantity(res, a.dimension)
25,197
def get_codec_options() -> CodecOptions: """ Register all flag type registry and get the :class:`CodecOptions` to be used on ``pymongo``. :return: `CodecOptions` to be used from `pymongo` """ return CodecOptions(type_registry=TypeRegistry(type_registry))
25,198
def get_frame_lims(x_eye, y_eye, x_nose, y_nose, view, vertical_align='eye'): """Automatically compute the crop parameters of a view using the eye and nose and reference. Note that horizontal/vertical proportions are currently hard-coded. Parameters ---------- x_eye : float x position of the eye y_eye : float y position of the eye x_nose : float x position of the nose y_nose : float y position of the nose view : str 'left' | 'right' vertical_align : str defines which feature controls the vertical alignment 'eye' | 'nose' Returns ------- tuple - xmin (float) - xmax (float) - ymin (float) - ymax (float) """ # horizontal proportions edge2nose = 0.02 nose2eye = 0.33 eye2edge = 0.65 # vertical proportions eye2top = 0.10 eye2bot = 0.90 nose2top = 0.25 nose2bot = 0.75 # horizontal calc nose2eye_pix = np.abs(x_eye - x_nose) edge2nose_pix = edge2nose / nose2eye * nose2eye_pix eye2edge_pix = eye2edge / nose2eye * nose2eye_pix total_x_pix = np.round(nose2eye_pix + edge2nose_pix + eye2edge_pix) if view == 'left': xmin = int(x_nose - edge2nose_pix) xmax = int(x_eye + eye2edge_pix) elif view == 'right': xmin = int(x_eye - eye2edge_pix) xmax = int(x_nose + edge2nose_pix) else: raise Exception # vertical calc (assume we want a square image out) if vertical_align == 'eye': # based on eye eye2top_pix = eye2top * total_x_pix eye2bot_pix = eye2bot * total_x_pix ymin = int(y_eye - eye2top_pix) ymax = int(y_eye + eye2bot_pix) else: # based on nose nose2top_pix = nose2top * total_x_pix nose2bot_pix = nose2bot * total_x_pix ymin = int(y_nose - nose2top_pix) ymax = int(y_nose + nose2bot_pix) return xmin, xmax, ymin, ymax
25,199