content
stringlengths
22
815k
id
int64
0
4.91M
def getHRLanguages(fname, hrthreshold=0): """ :param fname: the name of the file containing filesizes. Created using wc -l in the wikidata folder :param hrthreshold: how big a set of transliteration pairs needs to be considered high resource :return: a list of language names (in ISO 639-3 format?) """ hrlangs = set() with open(fname) as fs: for line in fs: long,iso639_3,iso639_1,size = line.strip().split() if int(size) > hrthreshold: hrlangs.add(iso639_3) return hrlangs
33,000
def test_model_with_single_repo_is_valid(): """ Model may have only one OS repository and be valid """ repos = [AutoinstallMachineModel.OsRepository( 'os-repo', 'http://example.com/os', '/kernel', '/initrd', None, 'os', 'Default OS repo')] model = AutoinstallMachineModel(DEFAULT_OS, repos, OS_TEMPLATE, INST_TEMPLATE, [], [], SYSTEM_LP_DASD, CREDS) # does not throw model.validate() # non-empty assert model.operating_system assert model.os_repos assert model.template # no extra packages assert not model.package_repos
33,001
def test_preorder(): """PreOrderIter.""" f = Node("f") b = Node("b", parent=f) a = Node("a", parent=b) d = Node("d", parent=b) c = Node("c", parent=d) e = Node("e", parent=d) g = Node("g", parent=f) i = Node("i", parent=g) h = Node("h", parent=i) eq_(list(PreOrderIter(f)), [f, b, a, d, c, e, g, i, h]) eq_(list(PreOrderIter(f, maxlevel=0)), []) eq_(list(PreOrderIter(f, maxlevel=3)), [f, b, a, d, g, i]) eq_(list(PreOrderIter(f, filter_=lambda n: n.name not in ('e', 'g'))), [f, b, a, d, c, i, h]) eq_(list(PreOrderIter(f, stop=lambda n: n.name == 'd')), [f, b, a, g, i, h]) it = PreOrderIter(f) eq_(next(it), f) eq_(next(it), b) eq_(list(it), [a, d, c, e, g, i, h])
33,002
def _wrap(func, *args, **kwargs): """To do.""" def _convert(func_, obj): try: return func_(obj) except BaseException: return obj # First, decode each arguments args_ = [_convert(decode, x) for x in args] kwargs_ = {k: _convert(decode, v) for k, v in kwargs.items()} # Execute the function returned = func(*args_, **kwargs_) if isinstance(returned, OpenMaya.MSelectionList): returned = returned.getSelectionStrings() # Finally encode the returned object(s) if isinstance(returned, _STRING_TYPES): return _convert(encode, returned) if isinstance(returned, (list, tuple, set)): return type(returned)(_convert(encode, x) for x in returned) return returned
33,003
def make_id_graph(xml): """ Make an undirected graph with CPHD identifiers as nodes and edges from correspondence and hierarchy. Nodes are named as {xml_path}<{id}, e.g. /Data/Channel/Identifier<Ch1 There is a single "Data" node formed from the Data branch root that signifies data that can be read from the file Args ---- xml: `lxml.etree.ElementTree.Element` Root CPHD XML node Returns ------- id_graph: `networkx.Graph` Undirected graph * nodes: Data node, CPHD identifiers * edges: Parent identifiers to child identifiers; corresponding identifiers across XML branches """ id_graph = nx.Graph() def add_id_nodes_from_path(xml_path): id_graph.add_nodes_from(["{}<{}".format(xml_path, n.text) for n in xml.findall('.' + xml_path)]) def add_id_nodes_from_path_with_connected_root(xml_path): root_node = xml_path.split('/')[1] id_graph.add_edges_from(zip(itertools.repeat(root_node), ["{}<{}".format(xml_path, n.text) for n in xml.findall('.' + xml_path)])) def get_id_from_node_name(node_name): return node_name.split('<')[-1] def connect_matching_id_nodes(path_a, path_b): all_nodes = list(id_graph.nodes) all_a = {get_id_from_node_name(x): x for x in all_nodes if x.split('<')[0] == path_a} all_b = {get_id_from_node_name(x): x for x in all_nodes if x.split('<')[0] == path_b} for k in set(all_a).intersection(all_b): id_graph.add_edge(all_a[k], all_b[k]) def add_and_connect_id_nodes(path_a, path_b): add_id_nodes_from_path(path_a) add_id_nodes_from_path(path_b) connect_matching_id_nodes(path_a, path_b) def add_and_connect_children(parent_path, parent_id_name, children_paths): for parent in xml.findall('.' + parent_path): parent_id = parent.findtext(parent_id_name) for child_path in children_paths: for child in parent.findall('.' + child_path): id_graph.add_edge('{}/{}<{}'.format(parent_path, parent_id_name, parent_id), '{}/{}<{}'.format(parent_path, child_path, child.text)) add_id_nodes_from_path_with_connected_root('/Data/Channel/Identifier') add_id_nodes_from_path_with_connected_root('/Data/SupportArray/Identifier') channel_children = ['DwellTimes/CODId', 'DwellTimes/DwellId'] channel_children += ['Antenna/'+ident for ident in ('TxAPCId', 'TxAPATId', 'RcvAPCId', 'RcvAPATId')] channel_children += ['TxRcv/TxWFId', 'TxRcv/RcvId'] add_and_connect_children('/Channel/Parameters', 'Identifier', channel_children) connect_matching_id_nodes('/Data/Channel/Identifier', '/Channel/Parameters/Identifier') add_and_connect_id_nodes('/Data/SupportArray/Identifier', '/SupportArray/IAZArray/Identifier') add_and_connect_id_nodes('/Data/SupportArray/Identifier', '/SupportArray/AntGainPhase/Identifier') add_and_connect_id_nodes('/Data/SupportArray/Identifier', '/SupportArray/AddedSupportArray/Identifier') add_and_connect_id_nodes('/Channel/Parameters/DwellTimes/CODId', '/Dwell/CODTime/Identifier') add_and_connect_id_nodes('/Channel/Parameters/DwellTimes/DwellId', '/Dwell/DwellTime/Identifier') add_and_connect_id_nodes('/Antenna/AntCoordFrame/Identifier', '/Antenna/AntPhaseCenter/ACFId') add_and_connect_children('/Antenna/AntPattern', 'Identifier', ('GainPhaseArray/ArrayId', 'GainPhaseArray/ElementId')) add_and_connect_children('/Antenna/AntPhaseCenter', 'Identifier', ('ACFId',)) add_and_connect_id_nodes('/Channel/Parameters/Antenna/TxAPCId', '/Antenna/AntPhaseCenter/Identifier') add_and_connect_id_nodes('/Channel/Parameters/Antenna/TxAPATId', '/Antenna/AntPattern/Identifier') add_and_connect_id_nodes('/Channel/Parameters/Antenna/RcvAPCId', '/Antenna/AntPhaseCenter/Identifier') add_and_connect_id_nodes('/Channel/Parameters/Antenna/RcvAPATId', '/Antenna/AntPattern/Identifier') connect_matching_id_nodes('/SupportArray/AntGainPhase/Identifier', '/Antenna/AntPattern/GainPhaseArray/ArrayId') connect_matching_id_nodes('/SupportArray/AntGainPhase/Identifier', '/Antenna/AntPattern/GainPhaseArray/ElementId') add_and_connect_id_nodes('/Channel/Parameters/TxRcv/TxWFId', '/TxRcv/TxWFParameters/Identifier') add_and_connect_id_nodes('/Channel/Parameters/TxRcv/RcvId', '/TxRcv/RcvParameters/Identifier') return id_graph
33,004
def LOG_INFO(msg): """ print information with green color """ print('\033[32m' + msg + '\033[0m')
33,005
def aes_base64_encrypt(data, key): """ @summary: 1. pkcs7padding 2. aes encrypt 3. base64 encrypt @return: string """ cipher = AES.new(key) return base64.b64encode(cipher.encrypt(_pkcs7padding(data)))
33,006
def write_results(prediction, confidence, num_classes, nms_thresh = 0.4): """ @prediction salida de la red neural @confidence objectness @nms_conf non maximum supression confidence @description En base a la confianza de la prediccion y las clases se devuelve la prediccion final de la red, luego de post-procesar utilizando non-maximum supression para obtener la prediccion mas precisa """ # considerar solo aquellas bounding box con confianza mayor al limite conf_mask = (prediction[:,:,4] > confidence).float().unsqueeze(2) prediction = prediction*conf_mask # si objecteness<confidence, objectess == 0 # obtener coordenadas x,y de esquinas del bounding box # Ejemplo # esquina superior izquierda, y = centro,y - alto/2 # esquina superior izquierda, x = centro,x - ancho/2 box_corner = prediction.new(prediction.shape) box_corner[:,:,0] = (prediction[:,:,0] - (prediction[:,:,2]/2)) # top-left x box_corner[:,:,1] = (prediction[:,:,1] - (prediction[:,:,3]/2)) # top-left y box_corner[:,:,2] = (prediction[:,:,0] + (prediction[:,:,2]/2)) # top-right x box_corner[:,:,3] = (prediction[:,:,1] + (prediction[:,:,3]/2)) # top-right y # sustituir centro x,y,ancho,alto por esquina izquierda x,y, esquina derecha x,y prediction[:,:,:4] = box_corner[:,:,:4] batch_size = prediction.size(0) # leer numero de imagenes #print("Batch size is: {}".format(batch_size)) write = False # hacer Non Maximum Suppresion image por imagen (no por batch) for ind in range(batch_size): image_pred = prediction[ind] # leer imagen 'i' # **************************** # @IMPROVEMENTE: creo que esto se puede hacer antes, no en este punto # y ganar eficiencia # ******************************** # recuperar solo la clase con mayor confianza max_conf, max_conf_index = torch.max(image_pred[:,5:5+num_classes], 1) max_conf = max_conf.float().unsqueeze(1) max_conf_index = max_conf_index.float().unsqueeze(1) seq = (image_pred[:,:5], max_conf, max_conf_index) # almacenar info en un tuple image_pred = torch.cat(seq, 1) # concatenar todos los valores en 1 solo tensor # eliminar las bounding box con objecteness < confidence non_zero_ind = (torch.nonzero(image_pred[:,4])) #print("Objectess > Confidence para {} elementos".format(non_zero_ind.size(0))) try: # seleccionar solo las ubicaciones donde objectness > confidence image_pred_ = image_pred[non_zero_ind.squeeze(),:].view(-1,7) except: continue #print(">> image_pred all: {}".format(image_pred.size())) #print(">> image_pred_ conf< {}: {}\n{}".format(confidence, image_pred_.size(), image_pred_)) # si no hay detectiones con objecteness > confidence, continuar a # siguiente imagen if image_pred_.shape[0]==0: continue img_classes = unique(image_pred_[:,-1]) # obtener las classes detectadas #print(">> img_classes: {}".format(img_classes)) # ************************************ # # NON MAXIMUM SUPRESSION # See for reference: https://www.youtube.com/watch?v=VAo84c1hQX8 # ************************************ # for cls in img_classes: # obtener detecciones para la actual clase 'cls' cls_mask = image_pred_*(image_pred_[:,-1] == cls).float().unsqueeze(1) #print(">> Elementos de la clase {}:\n {}".format(cls, cls_mask)) class_mask_ind = torch.nonzero(cls_mask[:, -2]).squeeze() image_pred_class = image_pred_[class_mask_ind].view(-1, 7) # sort by objecteness in descending order conf_sort_index = torch.sort(image_pred_class[:,4], descending =True)[1] image_pred_class = image_pred_class[conf_sort_index] #print(">> Elementos de la clase {} ordenados descendente: \n {}".format(cls, image_pred_class)) idx = image_pred_class.size(0) #print(">> IDX: {}".format(idx)) # PERFORM Non Maximum Supression for i in range(idx): # obtener el IOU entre el bounding box con max conf, y el resto try: ious = bbox_iou(image_pred_class[i].unsqueeze(0), image_pred_class[i+1:]) except ValueError: #print("Value error en elemento {}".format(i)) break except IndexError: #print("IndexError en elemento {}".format(i)) break #print("IOUS para NMS: \n{}".format(ious)) # identificar bounding boxes cuyo IOU > threshold iou_mask = (ious < nms_thresh).float().unsqueeze(1) image_pred_class[i+1:] *= iou_mask # eliminar bounding boxes cuyo IOU > threshold non_zero_ind = torch.nonzero(image_pred_class[:,4]).squeeze() image_pred_class = image_pred_class[non_zero_ind].view(-1, 7) # batch_ind = image_pred_class.new(image_pred_class.size(0),1).fill_(ind) seq = batch_ind, image_pred_class if not write: output = torch.cat(seq, 1) write = True else: out = torch.cat(seq, 1) output = torch.cat((output,out)) try: return output except: # Si no hubo detecciones return 0
33,007
def es_indexing(builder) -> int: """indexing all examples in lsc4 dict TODO: 性能很差,indexing动作应该放在解析mdx文件的时候 :param builder dict builder """ # create index if not create_index(): return 0 print("es is connected and index created succeed, starting indexing the examples...") conn = sqlite3.connect(builder.get_mdx_db()) cursor = conn.execute('SELECT key_text FROM MDX_INDEX') keys = [item[0] for item in cursor] conn.close() examples = [] for key in keys: content = builder.mdx_lookup(key) str_content = "" if len(content) > 0: for c in content: str_content += c.replace("\r\n", "").replace("entry:/", "") exs = example_parse_lsc4(key, str_content) if exs: examples.extend(exs) if len(examples) > 2000: ingest("lsc4", examples) examples = [] ingest("lsc4", examples) print("indexing done", len(keys))
33,008
def test_quote_arg(unquote_home_dir): """should correctly quote arguments passed to the shell""" quoted_arg = swb.quote_arg('a/b c/d') nose.assert_equal(quoted_arg, '\'a/b c/d\'') unquote_home_dir.assert_called_once_with('\'a/b c/d\'')
33,009
def import_data(users, agencies, filename): """Import data from CSV file.""" if users: Users.populate(csv_name=filename) elif agencies: Agencies.populate(csv_name=filename)
33,010
def mast_query_darks(instrument, aperture, start_date, end_date): """Use ``astroquery`` to search MAST for dark current data Parameters ---------- instrument : str Instrument name (e.g. ``nircam``) aperture : str Detector aperture to search for (e.g. ``NRCA1_FULL``) start_date : float Starting date for the search in MJD end_date : float Ending date for the search in MJD Returns ------- query_results : list List of dictionaries containing the query results """ # Make sure instrument is correct case if instrument.lower() == 'nircam': instrument = 'NIRCam' dark_template = ['NRC_DARK'] elif instrument.lower() == 'niriss': instrument = 'NIRISS' dark_template = ['NIS_DARK'] elif instrument.lower() == 'nirspec': instrument = 'NIRSpec' dark_template = ['NRS_DARK'] elif instrument.lower() == 'fgs': instrument = 'FGS' dark_template = ['FGS_DARK'] elif instrument.lower() == 'miri': instrument = 'MIRI' dark_template = ['MIR_DARKALL', 'MIR_DARKIMG', 'MIR_DARKMRS'] # monitor_mast.instrument_inventory does not allow list inputs to # the added_filters input (or at least if you do provide a list, then # it becomes a nested list when it sends the query to MAST. The # nested list is subsequently ignored by MAST.) # So query once for each dark template, and combine outputs into a # single list. query_results = [] for template_name in dark_template: # Create dictionary of parameters to add parameters = {"date_obs_mjd": {"min": start_date, "max": end_date}, "apername": aperture, "exp_type": template_name} query = monitor_mast.instrument_inventory(instrument, dataproduct=JWST_DATAPRODUCTS, add_filters=parameters, return_data=True, caom=False) if 'data' in query.keys(): if len(query['data']) > 0: query_results.extend(query['data']) return query_results
33,011
def randomNumGen(choice): """Get a random number to simulate a d6, d10, or d100 roll.""" if choice == 1: #d6 roll die = random.randint(1, 6) elif choice == 2: #d10 roll die = random.randint(1, 10) elif choice == 3: #d100 roll die = random.randint(1, 100) elif choice == 4: #d4 roll die = random.randint(1, 4) elif choice == 5: #d8 roll die = random.randint(1, 8) elif choice == 6: #d12 roll die = random.randint(1, 12) elif choice == 7: #d20 roll die = random.randint(1, 20) else: #simple error message return "Shouldn't be here. Invalid choice" return die
33,012
def schedule_job_with_distance_matrix(request): """ :param request: HTTP request with following fields: - distance_matrix: dictionary where keys correspond to node ids and values to coordinates. - first_node: integer - id of the first node :return: """ request_dict = json.loads(request.read()) print(request_dict) sys.stdout.flush() distance_matrix = request_dict["distance_matrix"] first_node = request_dict["first_node"] tol = 1e-2 steps = 1 if "tol" in request_dict.keys(): tol = request_dict["tol"] if "steps" in request_dict.keys(): steps = request_dict["steps"] current_log = TSPLog.objects.create(nodes=None, distance_matrix=distance_matrix, first_node=first_node, tol=tol, steps=steps) current_log.save() q = Queue(connection=conn) result = q.enqueue( solve_tsp, distance_matrix, first_node, steps, tol, current_log, timeout=3600) return JsonResponse({"status_code": 200, "id": current_log.id})
33,013
def download_all(): """Download all files in the DATA_HUB.""" for name in DATA_HUB: download(name)
33,014
def is_distinct(coll, key=EMPTY): """Checks if all elements in the collection are different.""" if key is EMPTY: return len(coll) == len(set(coll)) else: return len(coll) == len(set(xmap(key, coll)))
33,015
def split_data(df_data, config, test_frac=0.2): """ split df_data to train and test. """ df_train, df_test = train_test_split(df_data, test_size=test_frac) df_train.reset_index(inplace=True, drop=True) df_test.reset_index(inplace=True, drop=True) df_train.to_csv(config.path_train_data, index=False) df_test.to_csv(config.path_test_data, index=False) return df_train
33,016
def query(params, lang='en'): """ Simple Mediawiki API wrapper """ url = 'https://%s.wikipedia.org/w/api.php' % lang finalparams = { 'action': 'query', 'format': 'json', } finalparams.update(params) resp = requests.get(url, params=finalparams) if not resp.ok: return None data = resp.json() if 'query' in data: return data['query']
33,017
def reporting_window(year, month): """ Returns the range of time when people are supposed to report """ last_of_last_month = datetime(year, month, 1) - timedelta(days=1) last_bd_of_last_month = datetime.combine( get_business_day_of_month(last_of_last_month.year, last_of_last_month.month, -1), time() ) last_bd_of_the_month = get_business_day_of_month(year, month, -1) return last_bd_of_last_month, last_bd_of_the_month
33,018
def df_destroyer(df): """destroys a df""" # TODO - implement a destroyer pass
33,019
def load_json(path: str) -> Dict[str, Any]: """Loads a `.json` file from `path`. Args: path (str): Path to file. Returns: Dict[str, Any]: Returns the loaded json. Example: >>> # Load a json file >>> load_json('mlnext.json') {'name': 'mlnext'} """ if not os.path.isfile(path): raise FileNotFoundError(f'Path {path} invalid.') with open(path, 'r') as file: data = json.load(file) return data
33,020
def test_newcollection(runner, input_dir): """Test newcoll command.""" result = runner.invoke( main, [ "--url", "mock://example.com/", "--email", "test@test.mock", "--password", "1234", "newcollection", "--community-handle", "111.1111", "--collection-name", "Test Collection", ], ) assert result.exit_code == 0
33,021
def _ensure_accepted_tags(builds: List[Dict], brew_session: koji.ClientSession, tag_pv_map: Dict[str, str], raise_exception: bool = True): """ Build dicts returned by koji.listTagged API have their tag names, however other APIs don't set that field. Tag names are required because they are associated with Errata product versions. For those build dicts whose tags are unknown, we need to query from Brew. """ builds = [b for b in builds if "tag_name" not in b] # filters out builds whose accepted tag is already set unknown_tags_builds = [b for b in builds if "_tags" not in b] # finds builds whose tags are not cached build_tag_lists = brew.get_builds_tags(unknown_tags_builds, brew_session) for build, tags in zip(unknown_tags_builds, build_tag_lists): build["_tags"] = {tag['name'] for tag in tags} # Finds and sets the accepted tag (rhaos-x.y-rhel-z-[candidate|hotfix]) for each build for build in builds: accepted_tag = next(filter(lambda tag: tag in tag_pv_map, build["_tags"]), None) if not accepted_tag: msg = f"Build {build['nvr']} has Brew tags {build['_tags']}, but none of them has an associated Errata product version." if raise_exception: raise IOError(msg) else: LOGGER.warning(msg) continue build["tag_name"] = accepted_tag
33,022
def fit_cluster_13(): """Fit a GMM to resolve objects in cluster 13 into C, Q, O. Returns ------- sklearn.mixture.GaussianMixture The mixture model trained on the latent scores. list The classes represented in order by the model components. """ data = classy.data.load() X13 = data.loc[data.cluster == 13, ["z1", "z3"]] gmm = GaussianMixture(n_components=3, random_state=17).fit(X13) # Determine which component captures which class CLASSES = ["", "", ""] for ind, class_ in zip(np.argsort(gmm.means_[:, 0]), ["C", "Q", "O"]): CLASSES[ind] = class_ return gmm, CLASSES
33,023
def sidebar_left(request): """ Return the left sidebar values in context """ if request.user.is_authenticated(): moderation_obj = { 'is_visible': False, 'count_notifs': 0, } if request.user.is_staff: moderation_obj['is_visible'] = True moderation_obj['count_notifs'] = ModerationHelper.count_unmoderated(request.user) return { 'sidebar_left': { 'moderation': moderation_obj, }, } return {}
33,024
async def test_init_entry(hass, generic_data): """Test setting up config entry.""" await setup_ozw(hass, fixture=generic_data) # Verify integration + platform loaded. assert "ozw" in hass.config.components for platform in PLATFORMS: assert platform in hass.config.components, platform assert f"{platform}.{DOMAIN}" in hass.config.components, f"{platform}.{DOMAIN}" # Verify services registered assert hass.services.has_service(DOMAIN, const.SERVICE_ADD_NODE) assert hass.services.has_service(DOMAIN, const.SERVICE_REMOVE_NODE)
33,025
def addAttribute(p, value, run, data): """ add a particular attribute to the run object. p is a string with the type value is a string that contains the value to add, run is the run object, data is the data object and badValue is a function that takes the run name, the bad value's string name, the value, and the data object. """ if p == 'indivs': try: run.indivs = int(value) except ValueError: raise UnexpectedValue(run.name, 'individuals', value, data) elif p == 'loci': try: run.loci = int(value) except ValueError: raise UnexpectedValue(run.name, 'loci', value, data) elif p == 'k': try: run.k = int(value) except ValueError: raise UnexpectedValue(run.name, 'populations assumed', value, data) elif p == 'burnin': try: run.burnin = int(value) except ValueError: raise UnexpectedValue(run.name, 'Burn-in period', value, data) elif p == 'reps': try: run.reps = int(value) except ValueError: raise UnexpectedValue(run.name, 'Reps', value, data) elif p == 'lnprob': if value == 'nan': raise UnexpectedValue(run.name, 'Estimated Ln Prob of Data', value, data) try: run.estLnProb = float(value) except ValueError: raise UnexpectedValue(run.name, 'Estimated Ln Prob of Data', value, data) elif p == 'meanln': if value == 'meanln': raise UnexpectedValue(run.name, 'Estimated Ln Prob of Data', value, data) try: run.meanLlh = float(value) except ValueError: raise UnexpectedValue(run.name, 'Mean value of ln likelihood', value, data) elif p == 'varln': if value == 'nan': raise UnexpectedValue(run.name, 'Estimated Ln Prob of Data', value, data) try: run.varLlh = float(value) except ValueError: raise UnexpectedValue(run.name, 'Variance of ln likelihood', value, data) else: sys.stderr.write('Error, %s unknown pattern type %s\n' % (data.uniqueName, p))
33,026
def get_edge_lengths(vertices, edge_points): """ get edge squared length using edge_points from get_edge_points(mesh) or edge_vertex_indices(faces) :params vertices (N,3) edge_points (E,4) """ N, D = vertices.shape E = edge_points.shape[0] # E,2,D (OK to do this kind of indexing on the first dimension) edge_vertices = vertices[edge_points[:,:2]] edges = (edge_vertices[:,0,:]-edge_vertices[:,1,:]) edges_sqrlen = torch.sum(edges * edges, dim=-1) return edges_sqrlen
33,027
def compute_pca(nparray): """ :param nparray: nxd array, d is the dimension :return: evs eigenvalues, axmat dxn array, each column is an eigenvector author: weiwei date: 20200701osaka """ ca = np.cov(nparray, y=None, rowvar=False, bias=True) # rowvar row=point, bias biased covariance pcv, pcaxmat = np.linalg.eig(ca) return pcv, pcaxmat
33,028
def fac(num): """求阶乘""" assert num >= 0 if num in (0, 1): return 1 return num * fac(num - 1)
33,029
def test_doc_example(): """Text examples given in documentation""" assert color('my string', fg='blue') == \ '\x1b[34mmy string\x1b[0m' assert color('some text', fg='red', bg='yellow', style='underline') == \ '\x1b[31;43;4msome text\x1b[0m'
33,030
def create_rep_avg_plot(plot_data, title, xlab, ylab, xlims, figname, add_line=False): """Create plot with replicates on same y-value with line showing average. Inputs: plot_data - list of tuples in form (entry name, entry value) title - title of plot xlab - x-axis label ylab - y-axis label xlims - tuple of (xmin, xmax, step) for plt.xlim figname - name of file to save figure as add_line - include line at 1.0 to show ratio of 1 """ width = 0.6 fig, ax = plt.subplots(figsize=(10, 5)) plt.tight_layout() # Make some place holder entries for legend for key, value in constants.REP_FORMAT.items(): plt.plot(-5, -5, value, color='black', fillstyle='none', markersize=8, label='Rep. {}'.format(key)) ax.vlines(-1, -5, 2, color='black', linestyles='solid', label='Rep. Average') nams = [] # y-axis tick names for idx, tup in enumerate(plot_data): x = tup[1] y = constants.SAMPLE_PLOT_VALUE[tup[0]] f = constants.REP_FORMAT[constants.SAMPLE_REP[tup[0]]] l = 'Rep. {}'.format(constants.SAMPLE_REP[tup[0]]) c = constants.SAMPLE_COLOR[tup[0]] #c = 'black' plt.plot(x, y, f, fillstyle='none', markersize=8, color=c) if ('rep2' not in tup[0]) and ('Rep2' not in tup[0]): nams.append((y, tup[0])) if ('pbat' not in tup[0]): avg = (x + plot_data[idx-1][1])/2 ax.vlines(avg, y-width/2, y+width/2, color='black', linestyles='solid') if add_line: ax.axvline(1.0, alpha=0.6, color='grey', linestyle='--') nams = sorted(nams, key=lambda d: d[0], reverse=False) ax.legend(ncol=3, bbox_to_anchor=(0.5, 0.96), frameon=False, loc='lower center', fontsize=20) plt.title(title, pad=40, fontsize=24) plt.xlabel(xlab, fontsize=20) plt.ylabel(ylab, fontsize=20) plt.xlim(xlims[0], xlims[1]+(0.5*xlims[2])) plt.ylim(-1, len(nams)) if not (isinstance(xlims[2], int)): plt.xticks( [i for i in np.arange(xlims[0], xlims[1]+xlims[2], xlims[2])], ['{:.1f}'.format(i) for i in np.arange(xlims[0], xlims[1]+xlims[2], xlims[2])], fontsize=18 ) else: plt.xticks( [i for i in np.arange(xlims[0], xlims[1]+xlims[2], xlims[2])], [str(i) for i in np.arange(xlims[0], xlims[1]+xlims[2], xlims[2])], fontsize=18 ) plt.yticks( [tup[0] for tup in nams], [' '.join([constants.SAMPLE_KIT[tup[1]], 'Sample', constants.SAMPLE_GROUP[tup[1]]]) for tup in nams], fontsize=18 ) plt.savefig(figname, bbox_inches='tight') plt.close('all')
33,031
def entropy(logp, p): """Compute the entropy of `p` - probability density function approximation. We need this in order to compute the entropy-bonus. """ H = -(logp * p).sum(dim=1).mean() return H
33,032
def main(): """ Method main, set output dir and call a specific function, as given in the options :param argv: :return: None """ config2 = ConfigParser() stream = resource_stream('drf_gen','config.ini') cg = stream.read().decode() #config2.read(resource_stream('drf_gen', 'config.ini'),encoding="utf-8-sig") #config2.readfp(cg) #print(config2.sections()) outputdir = 'drf_gen_build' #config2.get('outputdir', 'dir') os.mkdir(outputdir) if not os.path.exists(outputdir) else outputdir ap = ArgumentParser() ap.add_argument('-vv', '--verbose', default=False, help='Increase verbosity.') ap.add_argument('-m', '--model', required=True, action='store', dest='models_path', help='Path to your models.py file.') ap.add_argument('-a', '--admin', action='store_true', help='Will create a admin.py file from your models.py.') ap.add_argument('-v', '--views', action='store_true', help='Will create a views.py file from your models.py.') ap.add_argument('-s', '--serializers', action='store_true', help='Will create a serializers.py file from your models.py.') ap.add_argument('-u', '--urls', action='store_true', help='Will create a urls.py file from your models.py.') ap.add_argument('-A', '--All', action='store_true', help='Will create four files: urls.py, admin.py, views.py, serializers.py, from your models.py.') ap.add_argument('-D', '--Delete', action='store_true', help='\033[91m'+outputdir+' directory will be destroyed!!!''\033[0m') args = ap.parse_args() models = extractor_obj(args.models_path) if models: if args.admin: make_admin(outputdir) if args.verbose: print("\033[91madmin.py genereted at!---> \033[93m" + outputdir + "/admin.py") if args.views: make_views(outputdir) if args.verbose: print("\033[91mviews.py genereted at!---> \033[93m" + outputdir + "/views.py") if args.urls: make_urls(outputdir) if args.verbose: print("\033[91murls.py genereted at!---> \033[93m" + outputdir + "/urls.py") if args.serializers: make_serializers(outputdir) if args.verbose: print("\033[91serializers.py genereted at!---> \033[93m" + outputdir + "/serializers.py") if args.All: make_admin(outputdir) make_views(outputdir) make_urls(outputdir) make_serializers(outputdir) if args.verbose: print("\033[91madmin.py genereted at!---> \033[93m" + outputdir + "/admin.py") print("\033[91mviews.py genereted at!---> \033[93m" + outputdir + "/views.py") print("\033[91murls.py genereted at!---> \033[93m" + outputdir + "/urls.py") print("\033[91serializers.py genereted at!---> \033[93m" + outputdir + "/serializers.py") if args.Delete: op = raw_input('\033[91m Warning!!! '+outputdir+'directory will be destroyed!!! do you have sure? yes|not ''\033[0m') if op == 'yes': shutil.rmtree(outputdir) if args.verbose: print('\033[91m'+outputdir+' directory was destroyed!!!''\033[0m') sys.exit(0) else: print("OK nothing was destroyed.") sys.exit(0) make_models_improve() sys.exit(0) else: print("can't read models.py, make sure that you was used a valid path/file.") sys.exit(1)
33,033
def find(query): """Retrieve *exactly* matching tracks.""" args = _parse_query(query) return mpctracks('find', args)
33,034
def permuteregulations(graph): """Randomly change which regulations are repressions, maintaining activation and repression counts and directions.""" edges = list(graph.edges) copy = graph.copy() repressions = 0 for edge in edges: edge_data = copy.edges[edge] if edge_data['repress']: repressions += 1 edge_data['repress'] = False for new_repression in random.sample(edges, repressions): copy.edges[new_repression]['repress'] = True return copy
33,035
def handle_tokennetwork_new2(raiden, event, current_block_number): """ Handles a `TokenNetworkCreated` event. """ data = event.event_data token_network_address = data['token_network_address'] token_network_registry_address = event.originating_contract token_network_registry_proxy = raiden.chain.token_network_registry( token_network_registry_address, ) token_network_proxy = token_network_registry_proxy.token_network(token_network_address) raiden.blockchain_events.add_token_network_listener(token_network_proxy) token_address = data_decoder(event.event_data['args']['token_address']) token_network_state = TokenNetworkState( token_network_address, token_address, ) new_token_network = ContractReceiveNewTokenNetwork( event.originating_contract, token_network_state, ) raiden.handle_state_change(new_token_network, current_block_number)
33,036
def editor_command(command): """ Is this an external editor command? :param command: string """ # It is possible to have `\e filename` or `SELECT * FROM \e`. So we check # for both conditions. return command.strip().endswith('\\e') or command.strip().startswith('\\e ')
33,037
def blrObjFunction(initialWeights, *args): """ blrObjFunction computes 2-class Logistic Regression error function and its gradient. Input: initialWeights: the weight vector (w_k) of size (D + 1) x 1 train_data: the data matrix of size N x D labeli: the label vector (y_k) of size N x 1 where each entry can be either 0 or 1 representing the label of corresponding feature vector Output: error: the scalar value of error function of 2-class logistic regression error_grad: the vector of size (D+1) x 1 representing the gradient of error function """ train_data, labeli = args n_data = train_data.shape[0] n_features = train_data.shape[1] error = 0 error_grad = np.zeros((n_features + 1, 1)) ################## # YOUR CODE HERE # ################## # HINT: Do not forget to add the bias term to your input data initw = initialWeights.reshape(n_feature + 1, 1) inputWithBias = np.hstack((np.ones((n_data,1)),train_data)) out = sigmoid(np.dot(inputWithBias,initw)) a = np.sum((labeli * np.log(out))+(1.0 - labeli)*np.log(1.0 - out)) error = a * (-1/n_data) b = np.sum(((out-labeli)* inputWithBias),axis=0) error_grad = b/n_data return error, error_grad
33,038
def edit_battle(battle_id): """ Edit battle form. :param battle_id: :return: """ battle = Battle.query.get(battle_id) or abort(404) if battle.clan != g.player.clan and g.player.name not in config.ADMINS: abort(403) all_players = Player.query.filter_by(clan=g.player.clan, locked=False).order_by('lower(name)').all() sorted_players = sorted(all_players, reverse=True, key=lambda p: p.player_role_value()) date = battle.date map_name = battle.map_name province = battle.map_province battle_commander = battle.battle_commander enemy_clan = battle.enemy_clan battle_groups = BattleGroup.query.filter_by(clan=g.player.clan).order_by('date').all() battle_result = battle.outcome_repr() battle_group_final = battle.battle_group_final players = battle.get_players() description = battle.description replay = battle.replay.unpickle() duration = battle.duration if battle.battle_group: battle_group_description = battle.battle_group.description else: battle_group_description = '' if request.method == 'POST': players = map(int, request.form.getlist('players')) map_name = request.form.get('map_name', '') province = request.form.get('province', '') enemy_clan = request.form.get('enemy_clan', '') battle_result = request.form.get('battle_result', '') battle_commander = Player.query.get(int(request.form['battle_commander'])) description = request.form.get('description', '') battle_group = int(request.form['battle_group']) battle_group_title = request.form.get('battle_group_title', '') battle_group_description = request.form.get('battle_group_description', '') battle_group_final = request.form.get('battle_group_final', '') == 'on' duration = request.form.get('duration', 15 * 60) errors = False date = None try: date = datetime.datetime.strptime(request.form.get('date', ''), '%d.%m.%Y %H:%M:%S') except ValueError: flash(u'Invalid date format', 'error') errors = True if not map_name: flash(u'Please enter the name of the map', 'error') errors = True if not battle_commander: flash(u'No battle commander selected', 'error') errors = True if not players: flash(u'No players selected', 'error') errors = True if not enemy_clan: flash(u'Please enter the enemy clan\'s tag', 'errors') errors = True if not battle_result: flash(u'Please select the correct outcome of the battle', 'errors') errors = True bg = None if battle_group == -1: # new group bg = BattleGroup(battle_group_title, battle_group_description, g.player.clan, date) elif battle_group >= 0: # existing group bg = BattleGroup.query.get(battle_group) or abort(500) if bg.get_final_battle() is not None and bg.get_final_battle() is not battle and battle_group_final: flash(u'Selected battle group already contains a battle marked as final') errors = True if not errors: battle.date = date battle.clan = g.player.clan battle.enemy_clan = enemy_clan battle.victory = battle_result == 'victory' battle.draw = battle_result == 'draw' battle.map_name = map_name battle.map_province = province battle.battle_commander_id = battle_commander.id battle.description = description battle.duration = duration if bg: battle.battle_group_final = battle_group_final battle.battle_group = bg db_session.add(bg) else: battle.battle_group = None for ba in battle.attendances: if not ba.reserve: db_session.delete(ba) for player_id in players: player = Player.query.get(player_id) if not player: abort(404) ba = BattleAttendance(player, battle, reserve=False) db_session.add(ba) db_session.add(battle) db_session.commit() logger.info(g.player.name + " updated the battle " + str(battle.id)) return redirect(url_for('battles_list', clan=g.player.clan)) return render_template('battles/edit.html', date=date, map_name=map_name, province=province, battle=battle, battle_groups=battle_groups, duration=duration, battle_group_description=battle_group_description, battle_commander=battle_commander, enemy_clan=enemy_clan, battle_result=battle_result, battle_group_final=battle_group_final, players=players, description=description, replay=replay, replays=replays, all_players=all_players, sorted_players=sorted_players)
33,039
def write_config(yamlpath: PathType) -> None: """Read CONFIG in YAML format.""" config_str = omegaconf.OmegaConf.to_yaml(CONFIG) yamlpath = pathlib.Path(yamlpath) yamlpath.write_text(config_str)
33,040
def construct_epsilon_heli(epsilon_diag, pitch, divisions, thickness, handness="left"): """ construct the dielectric matrices of all layers return a N*3*3 array where N is the number of layers We define pitch to be the distance such the rotation is 180 degree e.g. apparant period in z direction """ if pitch == thickness: angles = np.linspace(0, -np.pi, divisions, endpoint=False) elif pitch > thickness: angles = np.linspace( 0, -np.pi * thickness / pitch, divisions, endpoint=False) else: raise NameError('Need thickness to be smaller than pitch') return np.array( [rotZ(i).dot(epsilon_diag.dot(rotZ(-i))) for i in angles])
33,041
def list_fm_tsv(f_tsv: os.path.abspath, col=0) -> List[int]: """ 2cols (pred, out_label_id) -> List[pred:int] """ return [int(line.split()[col]) for line in open(f_tsv, 'r')]
33,042
def image_overlay(im_1, im_2, color=True, normalize=True): """Overlay two images with the same size. Args: im_1 (np.ndarray): image arrary im_2 (np.ndarray): image arrary color (bool): Whether convert intensity image to color image. normalize (bool): If both color and normalize are True, will normalize the intensity so that it has minimum 0 and maximum 1. Returns: np.ndarray: an overlay image of im_1*0.5 + im_2*0.5 """ if color: im_1 = intensity_to_rgb(np.squeeze(im_1), normalize=normalize) im_2 = intensity_to_rgb(np.squeeze(im_2), normalize=normalize) return im_1*0.5 + im_2*0.5
33,043
def ansible_hostsfile_filepath(opts): """returns the filepath where the ansible hostsfile will be created""" # if the location was specified on the cmdline, return that if "hosts_output_file" in opts and bool(opts["hosts_output_file"]): return opts["hosts_output_file"] # otherwise return the default location in the temp exec directory return os.path.join(temp_exec_dirpath(), "provision_{}.hosts".format(opts['system']))
33,044
def get_next_seg(ea): """ Get next segment @param ea: linear address @return: start of the next segment BADADDR - no next segment """ nextseg = ida_segment.get_next_seg(ea) if not nextseg: return BADADDR else: return nextseg.start_ea
33,045
def validate_item_pid(item_pid): """Validate item or raise and return an obj to easily distinguish them.""" from invenio_app_ils.items.api import ITEM_PID_TYPE if item_pid["type"] not in [BORROWING_REQUEST_PID_TYPE, ITEM_PID_TYPE]: raise UnknownItemPidTypeError(pid_type=item_pid["type"]) # inline object with properties return type( "obj", (object,), { "is_item": item_pid["type"] == ITEM_PID_TYPE, "is_brw_req": item_pid["type"] == BORROWING_REQUEST_PID_TYPE, }, )
33,046
async def async_setup(hass, config): """Setup pool pump services.""" hass.data[DOMAIN] = {} # Copy configuration values for later use. hass.data[DOMAIN][ATTR_SWITCH_ENTITY_ID] = config[DOMAIN][ATTR_SWITCH_ENTITY_ID] hass.data[DOMAIN][ATTR_POOL_PUMP_MODE_ENTITY_ID] = config[DOMAIN][ATTR_POOL_PUMP_MODE_ENTITY_ID] hass.data[DOMAIN][ATTR_VAC_SWITCH_ENTITY_ID] = config[DOMAIN][ATTR_VAC_SWITCH_ENTITY_ID] hass.data[DOMAIN][ATTR_POOL_VAC_MODE_ENTITY_ID] = config[DOMAIN][ATTR_POOL_VAC_MODE_ENTITY_ID] hass.data[DOMAIN][ATTR_POOL_VAC_CONNECTED_ENTITY_ID] = config[DOMAIN][ATTR_POOL_VAC_CONNECTED_ENTITY_ID] hass.data[DOMAIN][ATTR_SWIMMING_SEASON_ENTITY_ID] = config[DOMAIN][ATTR_SWIMMING_SEASON_ENTITY_ID] hass.data[DOMAIN][ATTR_RUN_PUMP_IN_SWIMMING_SEASON_ENTITY_ID] = config[DOMAIN][ATTR_RUN_PUMP_IN_SWIMMING_SEASON_ENTITY_ID] hass.data[DOMAIN][ATTR_RUN_PUMP_IN_OFF_SEASON_ENTITY_ID] = config[DOMAIN][ATTR_RUN_PUMP_IN_OFF_SEASON_ENTITY_ID] hass.data[DOMAIN][ATTR_WATER_LEVEL_CRITICAL_ENTITY_ID] = config[DOMAIN][ATTR_WATER_LEVEL_CRITICAL_ENTITY_ID] async def check(call): """Check if the pool pump should be running now.""" # Use a fixed time reference. now = dt_util.now() mode = hass.states.get( hass.data[DOMAIN][ATTR_POOL_PUMP_MODE_ENTITY_ID]) _LOGGER.debug("Pool pump mode: %s", mode.state) # Only check if pool pump is set to 'Auto'. if mode.state == POOL_PUMP_MODE_AUTO: manager = PoolPumpManager(hass, now) _LOGGER.debug("Manager initialised: %s", manager) # schedule = "Unknown" if await manager.is_water_level_critical(): schedule = "Water Level Critical" else: run = manager.next_run() _LOGGER.debug("Next run: %s", run) if not run: # Try tomorrow tomorrow = now + timedelta(days=1) next_midnight = tomorrow.replace( hour=0, minute=0, second=0) _LOGGER.debug("Next midnight: %s", next_midnight) manager_tomorrow = PoolPumpManager(hass, next_midnight) _LOGGER.debug("Manager initialised: %s", manager_tomorrow) run = manager_tomorrow.next_run() _LOGGER.debug("Next run: %s", run) schedule = run.pretty_print() # Set time range so that this can be displayed in the UI. hass.states.async_set("{}.schedule".format(DOMAIN), schedule) # And now check if the pool pump should be running. await manager.check() else: hass.states.async_set("{}.schedule".format(DOMAIN), "Manual Mode") hass.services.async_register(DOMAIN, 'check', check) # Return boolean to indicate that initialization was successfully. return True
33,047
def save_as_png(prs: pptx.presentation.Presentation, save_folder: str, overwrite: bool = False) -> bool: """ Save presentation as PDF. Requires to save a temporary *.pptx first. Needs module comtypes (windows only). Needs installed PowerPoint. Note: you have to give full path for save_folder, or PowerPoint might cause random exceptions. """ result = False with TemporaryPPTXFile() as f: prs.save(f.name) try: result = save_pptx_as_png(save_folder, f.name, overwrite) except _ctypes.COMError as e: print(e) print("Couldn't save PNG file due to communication error with PowerPoint.") result = False return result
33,048
def http_post(request): """HTTP Cloud Function. Args: request (flask.Request): The request object. <https://flask.palletsprojects.com/en/1.1.x/api/#incoming-request-data> Returns: The response text, or any set of values that can be turned into a Response object using `make_response` <https://flask.palletsprojects.com/en/1.1.x/api/#flask.make_response>. """ # Init an empty json response response_data = {} request_json = request.get_json(silent=True) request_args = request.args if request_json and 'signed_message' in request_json: # Grab input values signed_message = request_json['signed_message'] elif request_args and 'signed_message' in request_args: # Grab input values signed_message = request_args['signed_message'] else: response_data['status'] = 'Invalid request parameters' return json.dumps(response_data) # Load the QR Code Back up and Return response_data['qr_code'] = pyqrcode.create(signed_message).png_as_base64_str(scale=2) response_data['status'] = 'Message Created' return json.dumps(response_data)
33,049
def _get_add_noise(stddev, seed: Optional[int] = None): """Utility function to decide which `add_noise` to use according to tf version.""" if distutils.version.LooseVersion( tf.__version__) < distutils.version.LooseVersion('2.0.0'): # The seed should be only used for testing purpose. if seed is not None: tf.random.set_seed(seed) def add_noise(v): return v + tf.random.normal( tf.shape(input=v), stddev=stddev, dtype=v.dtype) else: random_normal = tf.random_normal_initializer(stddev=stddev, seed=seed) def add_noise(v): return v + tf.cast(random_normal(tf.shape(input=v)), dtype=v.dtype) return add_noise
33,050
def create_incident_field_context(incident): """Parses the 'incident_fields' entry of the incident and returns it Args: incident (dict): The incident to parse Returns: list. The parsed incident fields list """ incident_field_values = dict() for incident_field in incident.get('incident_field_values', []): incident_field_values[incident_field['name'].replace(" ", "_")] = incident_field['value'] return incident_field_values
33,051
def create_profile(body, user_id): # noqa: E501 """Create a user profile # noqa: E501 :param body: :type body: dict | bytes :param user_id: The id of the user to update :type user_id: int :rtype: None """ if connexion.request.is_json: json = connexion.request.get_json() json["user_id"] = user_id profile = ProfileService().insert_profile(json) return profile return "Whoops..."
33,052
def get_server_info(context): """Get the server info.""" context.server_info = context.get("server") print(context.server_info)
33,053
def load_global_recovered() -> pd.DataFrame: """Loads time series data for global COVID-19 recovered cases Returns: pd.DataFrame: A pandas dataframe with time series data for global COVID-19 recovered cases """ return load_csv(global_recovered_cases_location)
33,054
def build_url(self, endpoint): """ Builds a URL given an endpoint Args: endpoint (Endpoint: str): The endpoint to build the URL for Returns: str: The URL to access the given API endpoint """ return urllib.parse.urljoin(self.base_url, endpoint)
33,055
def neighbors(i, diag = True,inc_self=False): """ determine the neighbors, returns a set with neighboring tuples {(0,1)} if inc_self: returns self in results if diag: return diagonal moves as well """ r = [1,0,-1] c = [1,-1,0] if diag: if inc_self: return {(i[0]+dr, i[1]+dc) for dr in r for dc in c} else: return {(i[0]+dr, i[1]+dc) for dr in r for dc in c if not (dr == 0 and dc == 0)} else: res = {(i[0],i[1]+1), (i[0],i[1]-1),(i[0]+1,i[1]),(i[0]-1,i[1])} if inc_self: res.add(i) return res
33,056
def test_create_product_price_min_10(): """capsys -- object created by pytest to capture stdout and stderr""" # pip the input os.chdir(working_dir) output = subprocess.run( ['python', '-m', 'qbay'], stdin=expected_in14, capture_output=True, text=True ).stdout product = Product.query.first() modified_expected_out14 = Template(expected_out14) modified_expected_out14 = modified_expected_out14.safe_substitute( last_modified_date=product.last_modified_date) assert output.strip() == modified_expected_out14.strip() db.session.query(User).delete() db.session.query(Product).delete() db.session.commit() db.session.close()
33,057
def readPLY(name): """Read a PLY mesh file.""" try: reader = vtk.vtkPLYReader() reader.SetFileName(name) reader.Update() print("Input mesh:", name) mesh = reader.GetOutput() del reader # reader = None return mesh except BaseException: print("PLY Mesh reader failed") exc_type, exc_value, exc_traceback = sys.exc_info() traceback.print_exception( exc_type, exc_value, exc_traceback, limit=2, file=sys.stdout) return None
33,058
def gumbel_softmax(logits, tau=1, hard=False, eps=1e-10): """ NOTE: Stolen from https://github.com/pytorch/pytorch/pull/3341/commits/327fcfed4c44c62b208f750058d14d4dc1b9a9d3 Sample from the Gumbel-Softmax distribution and optionally discretize. Args: logits: [batch_size, n_class] unnormalized log-probs tau: non-negative scalar temperature hard: if True, take argmax, but differentiate w.r.t. soft sample y Returns: [batch_size, n_class] sample from the Gumbel-Softmax distribution. If hard=True, then the returned sample will be one-hot, otherwise it will be a probability distribution that sums to 1 across classes Constraints: - this implementation only works on batch_size x num_features tensor for now based on https://github.com/ericjang/gumbel-softmax/blob/3c8584924603869e90ca74ac20a6a03d99a91ef9/Categorical%20VAE.ipynb , (MIT license) """ y_soft = gumbel_softmax_sample(logits, tau=tau, eps=eps) if hard: shape = logits.size() _, k = y_soft.data.max(-1) # this bit is based on # https://discuss.pytorch.org/t/stop-gradients-for-st-gumbel-softmax/530/5 y_hard = torch.zeros(*shape) if y_soft.is_cuda: y_hard = y_hard.cuda() y_hard = y_hard.zero_().scatter_(-1, k.view(shape[:-1] + (1,)), 1.0) # this cool bit of code achieves two things: # - makes the output value exactly one-hot (since we add then # subtract y_soft value) # - makes the gradient equal to y_soft gradient (since we strip # all other gradients) y = y_hard - y_soft.data + y_soft else: y = y_soft return y
33,059
def decompose_f_string(f_string: str) -> (List[str], List[str]): """ Decompose an f-string into the list of variable names and the separators between them. An f-string is any string that contains enclosed curly brackets around text. A variable is defined as the text expression within the enclosed curly brackets. The separators are the strings remnants that surround the variables. An example f-string and components would be: 'This is {an} f-string!', with variable 'an' and separators 'This is ' and ' f-string!'. An instance of this example would be: 'This is definetely a good f-string!' with variable value 'definetely a good'. Example ------- variable_names, separators = decompose_f_string(f_string="a/{x}b{y}/c{z}") # variable_names = ["x", "y", "z"] # separators = ["a/", "b", "/c"", ""] """ matches = re.findall("{.*?}", f_string) # {.*?} optionally matches any characters enclosed by curly brackets variable_names = [match.lstrip("{").rstrip("}") for match in matches] assert not any( (variable_name == "" for variable_name in variable_names) ), "Empty variable name detected in f-string! Please ensure there is text between all enclosing '{' and '}'." pattern = "^.*?{|}.*?{|}.*?$" # Description: patttern matches the all expressions outside of curly bracket enclosures # .*?{ optionally matches any characters optionally before curly bracket opening # | logical 'or' # }.*?{ between a curly bracket closure and opening # | # }.*? after a closure separators = [x.rstrip("{").lstrip("}") for x in re.findall(pattern=pattern, string=f_string)] if any((separator == "" for separator in separators[1:-1])): warn( "There is an empty separator between two variables in the f-string! " "The f-string will not be uniquely invertible." ) return variable_names, separators
33,060
def process(register, instructions): """Process instructions on copy of register.""" cur_register = register.copy() cur_index = 0 while cur_index < len(instructions): cur_instruction = instructions[cur_index] cur_index += process_instruction(cur_register, cur_instruction) return cur_register
33,061
def bearing_radians(lat1, lon1, lat2, lon2): """Initial bearing""" dlon = lon2 - lon1 y = sin(dlon) * cos(lat2) x = cos(lat1) * sin(lat2) - sin(lat1) * cos(lat2) * cos(dlon) return atan2(y, x)
33,062
def RunSimulatedStreaming(vm): """Spawn fio to simulate streaming and gather the results. Args: vm: The vm that synthetic_storage_workloads_benchmark will be run upon. Returns: A list of sample.Sample objects """ test_size = min(vm.total_memory_kb / 10, 1000000) iodepth_list = FLAGS.iodepth_list or DEFAULT_STREAMING_SIMULATION_IODEPTH_LIST results = [] for depth in iodepth_list: cmd = ( '--filesize=10g ' '--directory=%s ' '--ioengine=libaio ' '--overwrite=0 ' '--invalidate=1 ' '--direct=1 ' '--randrepeat=0 ' '--iodepth=%s ' '--blocksize=1m ' '--size=%dk ' '--filename=fio_test_file ') % (vm.GetScratchDir(), depth, test_size) if FLAGS.maxjobs: cmd += '--max-jobs=%s ' % FLAGS.maxjobs cmd += ( '--name=sequential_write ' '--rw=write ' '--end_fsync=1 ' '--name=sequential_read ' '--stonewall ' '--rw=read ') logging.info('FIO Results for simulated %s', STREAMING) res, _ = vm.RemoteCommand('%s %s' % (fio.FIO_CMD_PREFIX, cmd), should_log=True) results.extend( fio.ParseResults(fio.FioParametersToJob(cmd), json.loads(res))) UpdateWorkloadMetadata(results) return results
33,063
def create_warning_path(paths_=None): """It Creates the files names for both files ( strangers and spoofing )""" if not paths_: if not os.path.isdir('/opt/arp_warnings/'): os.system('mkdir /opt/arp_guard/arp_warnings') paths_ = ['/opt/arp_guard/arp_warnings/'] # default warning dir spoofs_path = [] strangers_paths = [] date_path = str(datetime.now().year) + "_" + str(datetime.now().month) + "_" + str(datetime.now().day) for i in paths_: spoofs_path.append(i + "MacSpoof_warning_" + date_path) strangers_paths.append(i + "strangers_warning_" + date_path) return spoofs_path, strangers_paths
33,064
def write_conll(fstream, data): """ Writes to an output stream @fstream (e.g. output of `open(fname, 'r')`) in CoNLL file format. @data a list of examples [(tokens), (labels), (predictions)]. @tokens, @labels, @predictions are lists of string. """ for cols in data: for row in zip(*cols): fstream.write("\t".join(row)) fstream.write("\n") fstream.write("\n")
33,065
def get_all_tutorial_info(): """ Tutorial route to get tutorials with steps Parameters ---------- None Returns ------- Tutorials with steps """ sql_query = "SELECT * FROM diyup.tutorials" cur = mysql.connection.cursor() cur.execute(sql_query) tutorials = cur.fetchall() output = [] for tutorial in tutorials: tutorial_data = {} tutorial_data['uuid'] = tutorial[0] tutorial_data['author_username'] = tutorial[1] tutorial_data['title'] = tutorial[2] tutorial_data['image'] = tutorial[3] tutorial_data['category'] = tutorial[4] tutorial_data['description'] = tutorial[5] tutorial_data['author_difficulty'] = str(tutorial[6]) tutorial_data['viewer_difficulty'] = \ str(average_rating_type_for_tutorial('difficulty', tutorial[0])) tutorial_data['rating'] = \ str(average_rating_type_for_tutorial('score', tutorial[0])) sql_query = "SELECT * FROM diyup.steps WHERE tutorial_uuid=%s" cur.execute(sql_query, (tutorial[0],)) steps = cur.fetchall() output_steps = [] for step in steps: step_data = {} step_data['index'] = step[1] step_data['content'] = step[2] step_data['image'] = step[3] output_steps.append(step_data) tutorial_data['steps'] = output_steps output.append(tutorial_data) cur.close() return jsonify({'tutorials' : output}), 200
33,066
def parse_date(datestring, default_timezone=UTC): """Parses ISO 8601 dates into datetime objects The timezone is parsed from the date string. However it is quite common to have dates without a timezone (not strictly correct). In this case the default timezone specified in default_timezone is used. This is UTC by default. """ if not isinstance(datestring, basestring): raise ParseError("Expecting a string %r" % datestring) m = ISO8601_REGEX.match(datestring) if not m: raise ParseError("Unable to parse date string %r" % datestring) groups = m.groupdict() tz = parse_timezone(groups["timezone"], default_timezone=default_timezone) if groups["fraction"] is None: groups["fraction"] = 0 else: groups["fraction"] = int(float("0.%s" % groups["fraction"]) * 1e6) return datetime(int(groups["year"]), int(groups["month"]), int(groups["day"]), int(groups["hour"]), int(groups["minute"]), int(groups["second"]), int(groups["fraction"]), tz)
33,067
def fworker(fworker_file, name): """ Configure the basic settings of a fireworker. Although the information can be put in manually when using the command without options, it's probably easiest to first set up the fireworker file and then use the '-f option to configure the fireworker based on this file. Note that specifying a name for the fworker allows you to configure multiple computational resources or settings. """ from vscworkflows.config import fworker fworker(fireworker_file=fworker_file, fworker_name=name)
33,068
def BigSpectrum_to_H2COdict(sp, vrange=None): """ A rather complicated way to make the spdicts above given a spectrum... """ spdict = {} for linename,freq in pyspeckit.spectrum.models.formaldehyde.central_freq_dict.iteritems(): if vrange is not None: freq_test_low = freq - freq * vrange[0]/pyspeckit.units.speedoflight_kms freq_test_high = freq - freq * vrange[1]/pyspeckit.units.speedoflight_kms else: freq_test_low = freq_test_high = freq if (sp.xarr.as_unit('Hz').in_range(freq_test_low) or sp.xarr.as_unit('Hz').in_range(freq_test_high)): spdict[linename] = sp.copy() spdict[linename].xarr.convert_to_unit('GHz') spdict[linename].xarr.refX = freq spdict[linename].xarr.refX_units = 'Hz' #spdict[linename].baseline = copy.copy(sp.baseline) #spdict[linename].baseline.Spectrum = spdict[linename] spdict[linename].specfit = sp.specfit.copy(parent=spdict[linename]) spdict[linename].xarr.convert_to_unit('km/s') if vrange is not None: try: spdict[linename].crop(*vrange, units='km/s') except IndexError: # if the freq in range, but there's no data in range, remove spdict.pop(linename) return spdict
33,069
def get_thickness_model(model): """ Return a function calculating an adsorbate thickness. The ``model`` parameter is a string which names the thickness equation which should be used. Alternatively, a user can implement their own thickness model, either as an experimental isotherm or a function which describes the adsorbed layer. In that case, instead of a string, pass the Isotherm object or the callable function as the ``model`` parameter. Parameters ---------- model : str or callable Name of the thickness model to use. Returns ------- callable A callable that takes a pressure in and returns a thickness at that point. Raises ------ ParameterError When string is not in the dictionary of models. """ # If the model is a string, get a model from the _THICKNESS_MODELS if isinstance(model, str): if model not in _THICKNESS_MODELS: raise ParameterError( f"Model {model} not an implemented thickness function. ", f"Available models are {_THICKNESS_MODELS.keys()}" ) return _THICKNESS_MODELS[model] # If the model is an callable, return it instead else: return model
33,070
def http(session: aiohttp.ClientSession) -> Handler: """`aiohttp` based request handler. :param session: """ async def handler(request: Request) -> Response: async with session.request( request.method, request.url, params=request.params or None, data=request.form_data or None, json=request.data or None, headers=request.headers or None, ) as response: return Response( status=response.status, reason=response.reason, headers=response.headers, data=await response.json(encoding='utf-8'), ) return handler
33,071
def remove_package_repo_and_wait(repo_name, wait_for_package): """ Remove a repository from the list of package sources, then wait for the removal to complete :param repo_name: name of the repository to remove :type repo_name: str :param wait_for_package: the package whose version should change after the repo is removed :type wait_for_package: str :returns: True if successful, False otherwise :rtype: bool """ return remove_package_repo(repo_name, wait_for_package)
33,072
def lyndon_of_word(word : str, comp: Callable[[List[str]],str] = min ) -> str: """ Returns the Lyndon representative among set of circular shifts, that is the minimum for th lexicographic order 'L'<'R' :code:`lyndon_of_word('RLR')`. Args: `word` (str): a word (supposedly binary L&R) `comp` ( Callable[List[str],str] ): comparision function min or max Returns: str: list of circular shifts :Example: >>> lyndon_of_word('LRRLRLL') 'LLLRRLR' """ if word == '': return '' return comp(list_of_circular_shifts(word))
33,073
def setColor(poiID, color): """setColor(string, (integer, integer, integer, integer)) -> None Sets the rgba color of the poi. """ traci._beginMessage(tc.CMD_SET_POI_VARIABLE, tc.VAR_COLOR, poiID, 1+1+1+1+1) traci._message.string += struct.pack("!BBBBB", tc.TYPE_COLOR, int(color[0]), int(color[1]), int(color[2]), int(color[3])) traci._sendExact()
33,074
def num_of_visited_nodes(driver_matrix): """ Calculate the total number of visited nodes for multiple paths. Args: driver_matrix (list of lists): A list whose members are lists that contain paths that are represented by consecutively visited nodes. Returns: int: Number of visited nodes """ return sum(len(x) for x in driver_matrix)
33,075
def gen_custom_item_windows_file(description, info, value_type, value_data, regex, expect): """Generates a custom item stanza for windows file contents audit Args: description: string, a description of the audit info: string, info about the audit value_type: string, "POLICY_TEXT" -- included for parity with other gen_* modules. value_data: string, location of remote file to check regex: string, regular expression to check file for expect: string, regular expression to match for a pass Returns: A list of strings to put in the main body of a Windows file audit file. """ out = [] out.append('') out.append('<custom_item>') out.append(' type: FILE_CONTENT_CHECK') out.append(' description: "%s"' % description.replace("\n", " ")) out.append(' info: "%s"' % info.replace("\n", " ")) out.append(' value_type: %s' % value_type) out.append(' value_data: "%s"' % value_data) out.append(' regex: "%s"' % regex) out.append(' expect: "%s"' % expect) out.append('</custom_item>') out.append(' ') return out
33,076
def create_signature(args=None, kwargs=None): """Create a inspect.Signature object based on args and kwargs. Args: args (list or None): The names of positional or keyword arguments. kwargs (list or None): The keyword only arguments. Returns: inspect.Signature """ args = [] if args is None else args kwargs = {} if kwargs is None else kwargs parameter_objects = [] for arg in args: param = inspect.Parameter( name=arg, kind=inspect.Parameter.POSITIONAL_OR_KEYWORD, ) parameter_objects.append(param) for arg in kwargs: param = inspect.Parameter( name=arg, kind=inspect.Parameter.KEYWORD_ONLY, ) parameter_objects.append(param) sig = inspect.Signature(parameters=parameter_objects) return sig
33,077
def execute_message_call( laser_evm, callee_address: BitVec, func_hashes: List[List[int]] = None ) -> None: """Executes a message call transaction from all open states. :param laser_evm: :param callee_address: """ # TODO: Resolve circular import between .transaction and ..svm to import LaserEVM here open_states = laser_evm.open_states[:] del laser_evm.open_states[:] for open_world_state in open_states: if open_world_state[callee_address].deleted: log.debug("Can not execute dead contract, skipping.") continue next_transaction_id = tx_id_manager.get_next_tx_id() external_sender = symbol_factory.BitVecSym( "sender_{}".format(next_transaction_id), 256 ) calldata = SymbolicCalldata(next_transaction_id) transaction = MessageCallTransaction( world_state=open_world_state, identifier=next_transaction_id, gas_price=symbol_factory.BitVecSym( "gas_price{}".format(next_transaction_id), 256 ), gas_limit=8000000, # block gas limit origin=external_sender, caller=external_sender, callee_account=open_world_state[callee_address], call_data=calldata, call_value=symbol_factory.BitVecSym( "call_value{}".format(next_transaction_id), 256 ), ) constraints = ( generate_function_constraints(calldata, func_hashes) if func_hashes else None ) _setup_global_state_for_execution(laser_evm, transaction, constraints) laser_evm.exec()
33,078
def select(type, name, optional): """Select data from data.json file""" with open('data.json', 'r') as f: data = json.load(f) for i in data[type]: if i == data[name]: return data[optional]
33,079
def test_xlseventform_month_out_of_range( stocking_event_dict, xls_choices, cache, year, month, day ): """If the stocking event data contains month that is >12 or less than 1, the form will not be valid and an error will be thrown. This test is parameterized to accept a list of day, month, year combinations If the form is invalid, it should contain a meaningful error message. """ data = stocking_event_dict data["year"] = year data["month"] = month data["day"] = day form = XlsEventForm(data=data, choices=xls_choices, cache=cache) status = form.is_valid() assert status is False error_messages = [x[1][0] for x in form.errors.items()] expected = "Select a valid choice. {} is not one of the available choices." assert expected.format(month) in error_messages
33,080
def perform_step(polymer: str, rules: dict) -> str: """ Performs a single step of polymerization by performing all applicable insertions; returns new polymer template string """ new = [polymer[i] + rules[polymer[i:i+2]] for i in range(len(polymer)-1)] new.append(polymer[-1]) return "".join(new)
33,081
def load_datasets(json_file): """load dataset described in JSON file""" datasets = {} with open(json_file, 'r') as fd: config = json.load(fd) all_set_path = config["Path"] for name, value in config["Dataset"].items(): assert isinstance(value, dict) datasets[name] = Dataset() for i in value: if not i in ('train', 'val', 'test'): continue sets = [] for j in to_list(value[i]): try: sets += list(_glob_absolute_pattern(all_set_path[j])) except KeyError: sets += list(_glob_absolute_pattern(j)) datasets[name].__setitem__(i, sets) if 'param' in value: for k, v in value['param'].items(): datasets[name].__setitem__(k, v) return datasets
33,082
def group(batch): """ batch: contains [ (name, [list of data], [list of others]), (name, [list of data], [list of others]), (name, [list of data], [list of others]), ... ] Note ---- We assume the shape[0] (or length) of all "data" and "others" are the same """ rng = np.random.RandomState(1234) batch_size = 64 indices = [range((b[1][0].shape[0] - 1) // batch_size + 1) for b in batch] # shuffle if possible if rng is not None: [rng.shuffle(i) for i in indices] # ====== create batch of data ====== # for idx in zip_longest(*indices): ret = [] for i, b in zip(idx, batch): # skip if one of the data is not enough if i is None: continue # pick data from each given input name = b[0]; data = b[1]; others = b[2:] start = i * batch_size end = start + batch_size _ = [d[start:end] for d in data] + \ [o[start:end] for o in others] ret.append(_) ret = [np.concatenate(x, axis=0) for x in zip(*ret)] # # shuffle 1 more time if rng is not None: permutation = rng.permutation(ret[0].shape[0]) ret = [r[permutation] for r in ret] # # return the batches for i in range((ret[0].shape[0] - 1) // batch_size + 1): start = i * batch_size end = start + batch_size _ = [x[start:end] for x in ret] # always return tuple or list if _ is not None: yield _ if isinstance(_, (tuple, list)) else (ret,)
33,083
def instantiate_descriptor(**field_data): """ Instantiate descriptor with most properties. """ system = get_test_descriptor_system() course_key = CourseLocator('org', 'course', 'run') usage_key = course_key.make_usage_key('html', 'SampleHtml') return system.construct_xblock_from_class( HtmlBlock, scope_ids=ScopeIds(None, None, usage_key, usage_key), field_data=DictFieldData(field_data), )
33,084
def simple_switch(M_in, P_in, slack=1, animate=True, cont=False, gen_pos=None, verbose=True): """ A simple switch algorithm. When encountering a change in sequence, compare the value of the switch to the value of the current state, switch if it's more. The default value function sum(exp(length(adjoint sequences))) where length is measured in the input arrays. """ start_time = time.time() M, P = np.copy(M_in), np.copy(P_in) M_track, P_track = np.zeros_like(M), np.ones_like(P) value_function = exp_len_value if not cont else continuity_value if animate: history = np.array([M,P]) for w in range(slack+1): M, P = blurr_slack(M,w), blurr_slack(P,w) # if slack w, then sequences of length w don't make any sense if animate: history = np.dstack([history, [M,P]]) for i in range(1,len(M)-w): if M[i] != M[i-1] or P[i] != P[i-1]: val = value_function(M,P,i-1,i,gen_pos) M_temp = np.concatenate([M[:i], [P[i+w]]*w, P[i+w:]]) P_temp = np.concatenate([P[:i], [M[i+w]]*w, M[i+w:]]) switch_val = value_function(M_temp,P_temp,i-1,i,gen_pos) if switch_val > val and not is_steeling(M,P,i,w): # print(i) M, P = np.copy(M_temp), np.copy(P_temp) M_track, P_track = track_switch(M_track, P_track, i) if animate: history = np.dstack([history, [M,P]]) ani = None if animate: # make it stop on the end for a while for _ in range(20): history = np.dstack([history, [M,P]]) ani = animate_history(history) if verbose: print("Solving time:", time.time()-start_time, "seconds") return M,P,M_track,P_track,ani
33,085
def drop_duplicates_by_type_or_node(n_df, n1, n2, typ): """ Drop the duplicates in the network, by type or by node. For each set of "duplicate" edges, only the edge with the maximum weight will be kept. By type, the duplicates are where nd1, nd2, and typ are identical; by node, the duplicates are where nd1, and nd2 are identical. Parameters: n_df (list): the data n1 (int): the column for the firts node n2 (int): the column for the second node typ (int): the column for the type Returns: list: the modified data """ # If n_df is sorted, this method will work, iterating through the # rows and only keeping the first row of a group of duplicate rows prev_nd1_val = None prev_nd2_val = None prev_type_val = None new_n_df = [] for row in n_df: nd1_val = row[n1] nd2_val = row[n2] type_val = row[typ] nodes_differ = nd1_val != prev_nd1_val or nd2_val != prev_nd2_val type_differs = type_val != prev_type_val if (DROP_DUPLICATES_METHOD == 'node' and nodes_differ) or (nodes_differ or type_differs): new_n_df.append(row) prev_nd1_val = nd1_val prev_nd2_val = nd2_val prev_type_val = type_val return new_n_df
33,086
def voting(labels): """ Majority voting. """ return sitk.LabelVoting(labels, 0)
33,087
def user_city_country(obj): """Get the location (city, country) of the user Args: obj (object): The user profile Returns: str: The city and country of user (if exist) """ location = list() if obj.city: location.append(obj.city) if obj.country: location.append(obj.country) if len(location): return ", ".join(str(i) for i in location) return 'Not available'
33,088
def test_get_midi_download_name(): """This test checks the functionality of our midi download name maker. We expect: - Any text going into our function comes back prefixed with '-melodie.mid' - Returns a string. """ for file_name in range(20): assert get_midi_download_name(file_name).endswith("-melodie.mid") assert isinstance(get_midi_download_name("I'm a string"), str) assert isinstance(get_midi_download_name(12345), str)
33,089
def test_encrypted_parquet_write_kms_error(tempdir, data_table, basic_encryption_config): """Write an encrypted parquet, but raise KeyError in KmsClient.""" path = tempdir / 'encrypted_table_kms_error.in_mem.parquet' encryption_config = basic_encryption_config # Empty master_keys_map kms_connection_config = pe.KmsConnectionConfig() def kms_factory(kms_connection_configuration): # Empty master keys map will cause KeyError to be raised # on wrap/unwrap calls return InMemoryKmsClient(kms_connection_configuration) crypto_factory = pe.CryptoFactory(kms_factory) with pytest.raises(KeyError, match="footer_key"): # Write with encryption properties write_encrypted_parquet(path, data_table, encryption_config, kms_connection_config, crypto_factory)
33,090
async def putStorBytes(app, key, data, filter_ops=None, bucket=None): """ Store byte string as S3 object with given key """ client = _getStorageClient(app) if not bucket: bucket = app['bucket_name'] if key[0] == '/': key = key[1:] # no leading slash shuffle = -1 # auto-shuffle clevel = 5 cname = None # compressor name if filter_ops: if "compressor" in filter_ops: cname = filter_ops["compressor"] if "use_shuffle" in filter_ops and not filter_ops['use_shuffle']: shuffle = 0 # client indicates to turn off shuffling if "level" in filter_ops: clevel = filter_ops["level"] msg = f"putStorBytes({bucket}/{key}), {len(data)} bytes shuffle: {shuffle}" msg += f" compressor: {cname} level: {clevel}" log.info(msg) if cname: try: blosc = codecs.Blosc(cname=cname, clevel=clevel, shuffle=shuffle) cdata = blosc.encode(data) # TBD: add cname in blosc constructor msg = f"compressed from {len(data)} bytes to {len(cdata)} bytes " msg += f"using filter: {blosc.cname} with level: {blosc.clevel}" log.info(msg) data = cdata except Exception as e: log.error(f"got exception using blosc encoding: {e}") raise HTTPInternalServerError() rsp = await client.put_object(key, data, bucket=bucket) return rsp
33,091
def fetch_block(folder, ind, full_output=False): """ A more generic function to fetch block number "ind" from a trajectory in a folder This function is useful both if you want to load both "old style" trajectories (block1.dat), and "new style" trajectories ("blocks_1-50.h5") It will be used in files "show" Parameters ---------- folder: str, folder with a trajectory ind: str or int, number of a block to fetch full_output: bool (default=False) If set to true, outputs a dict with positions, eP, eK, time etc. if False, outputs just the conformation (relevant only for new-style URIs, so default is False) Returns ------- data, Nx3 numpy array if full_output==True, then dict with data and metadata; XYZ is under key "pos" """ blocksh5 = glob.glob(os.path.join(folder, "blocks*.h5")) blocksdat = glob.glob(os.path.join(folder, "block*.dat")) ind = int(ind) if (len(blocksh5) > 0) and (len(blocksdat) > 0): raise ValueError("both .h5 and .dat files found in folder - exiting") if (len(blocksh5) == 0) and (len(blocksdat) == 0): raise ValueError("no blocks found") if len(blocksh5) > 0: fnames = [os.path.split(i)[-1] for i in blocksh5] inds = [i.split("_")[-1].split(".")[0].split("-") for i in fnames] exists = [(int(i[0]) <= ind) and (int(i[1]) >= ind) for i in inds] if True not in exists: raise ValueError(f"block {ind} not found in files") if exists.count(True) > 1: raise ValueError("Cannot find the file uniquely: names are wrong") pos = exists.index(True) block = load_URI(blocksh5[pos] + f"::{ind}") if not full_output: block = block["pos"] if len(blocksdat) > 0: block = load(os.path.join(folder, f"block{ind}.dat")) return block
33,092
def unique_boxes(boxes, scale=1.0): """Return indices of unique boxes.""" assert boxes.shape[1] == 4, 'Func doesnot support tubes yet' v = np.array([1, 1e3, 1e6, 1e9]) hashes = np.round(boxes * scale).dot(v) _, index = np.unique(hashes, return_index=True) return np.sort(index)
33,093
def export_all_courses(exported_courses_folder): """ Export all courses into specified folder Args: exported_courses_folder (str): The path of folder to export courses to. """ try: course_list = subprocess.Popen( ['/edx/bin/python.edxapp', '/edx/app/edxapp/edx-platform/manage.py', 'cms', '--settings', 'production', 'dump_course_ids'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = course_list.communicate() for course_id in out.splitlines(): course_id = course_id.decode('utf-8') logger.info("exporting course %s", course_id) export_course = subprocess.Popen( ['/edx/bin/python.edxapp', '/edx/app/edxapp/edx-platform/manage.py', 'cms', '--settings', 'production', 'export_olx', course_id, '--output', '{0}/{1}.tar.gz'.format(exported_courses_folder, course_id)]) out, err = export_course.communicate() except ValueError as err: logger.error( "The following error was encountered when exporting courses: ", err)
33,094
def dataloader(loader, mode): """Sets batchsize and repeat for the train, valid, and test iterators. Args: loader: tfds.load instance, a train, valid, or test iterator. mode: string, set to 'train' for use during training; set to anything else for use during validation/test Returns: An iterator for features and labels tensors. """ loader = loader.map(process_images) repeat = 1 if mode == 'train': repeat = None loader = loader.shuffle(1000 * FLAGS.batch_size) return loader.batch( FLAGS.batch_size).repeat(repeat).prefetch(tf.data.experimental.AUTOTUNE)
33,095
def sub_inplace(X, varX, Y, varY): """In-place subtraction with error propagation""" # Z = X - Y # varZ = varX + varY X -= Y varX += varY return X, varX
33,096
def gitlab_mngr_fixture(mock_config_file): """A pytest fixture that returns a GitLabRepositoryManager instance""" yield GitLabManager("https://test.repo.gigantum.com/", "https://test.gigantum.com/api/v1/", "fakeaccesstoken", "fakeidtoken")
33,097
def inv_logtransform(plog): """ Transform the power spectrum for the log field to the power spectrum of delta. Inputs ------ plog - power spectrum of log field computed at points on a Fourier grid Outputs ------- p - power spectrum of the delta field """ xi_log = np.fft.ifftn(plog) xi = np.exp(xi_log) - 1 p = np.fft.fftn(xi).real.astype('float') return p
33,098
def ipv6_b85decode(encoded, _base85_ords=RFC1924_ORDS): """Decodes an RFC1924 Base-85 encoded string to its 128-bit unsigned integral representation. Used to base85-decode IPv6 addresses or 128-bit chunks. Whitespace is ignored. Raises an ``OverflowError`` if stray characters are found. :param encoded: RFC1924 Base85-encoded string. :param _base85_ords: (Internal) Look up table. :returns: A 128-bit unsigned integer. """ if not builtins.is_bytes(encoded): raise TypeError("Encoded sequence must be bytes: got %r" % type(encoded).__name__) # Ignore whitespace. encoded = EMPTY_BYTE.join(encoded.split()) if len(encoded) != 20: raise ValueError("Not 20 encoded bytes: %r" % encoded) #uint128 = 0 #for char in encoded: # uint128 = uint128 * 85 + _base85_ords[byte_ord(char)] # Above loop unrolled to process 4 5-tuple chunks instead: try: #v, w, x, y, z = encoded[0:5] # v = encoded[0]..z = encoded[4] uint128 = ((((_base85_ords[encoded[0]] * 85 + _base85_ords[encoded[1]]) * 85 + _base85_ords[encoded[2]]) * 85 + _base85_ords[encoded[3]]) * 85 + _base85_ords[encoded[4]]) #v, w, x, y, z = encoded[5:10] # v = encoded[5]..z = encoded[9] uint128 = (((((uint128 * 85 + _base85_ords[encoded[5]]) * 85 + _base85_ords[encoded[6]]) * 85 + _base85_ords[encoded[7]]) * 85 + _base85_ords[encoded[8]]) * 85 + _base85_ords[encoded[9]]) #v, w, x, y, z = encoded[10:15] # v = encoded[10]..z = encoded[14] uint128 = (((((uint128 * 85 + _base85_ords[encoded[10]]) * 85 + _base85_ords[encoded[11]]) * 85 + _base85_ords[encoded[12]]) * 85 + _base85_ords[encoded[13]]) * 85 + _base85_ords[encoded[14]]) #v, w, x, y, z = encoded[15:20] # v = encoded[15]..z = encoded[19] uint128 = (((((uint128 * 85 + _base85_ords[encoded[15]]) * 85 + _base85_ords[encoded[16]]) * 85 + _base85_ords[encoded[17]]) * 85 + _base85_ords[encoded[18]]) * 85 + _base85_ords[encoded[19]]) except KeyError: raise OverflowError("Cannot decode `%r -- may contain stray " "ASCII bytes" % encoded) if uint128 > UINT128_MAX: raise OverflowError("Cannot decode `%r` -- may contain stray " "ASCII bytes" % encoded) return uint128 # I've left this approach in here to warn you to NOT use it. # This results in a massive amount of calls to byte_ord inside # tight loops.
33,099