content
stringlengths
22
815k
id
int64
0
4.91M
def draw_boxes_on_image(img, boxes, labels_index, labelmap_dict, **kwargs): """Short summary. Parameters ---------- img : ndarray Input image. boxes : ndarray-like It must has shape (n ,4) where n is the number of bounding boxes. labels_index : ndarray-like An array containing index of labels of bounding boxes. If None, only bounding boxes will be drawn. labelmap_dict : dict A dictionary mapping labels with its index. Returns ------- img Return annotated image. """ # When no box is detected if boxes is None: return img try: boxes = convert(boxes, lambda x: np.asarray(x, dtype=np.int32), np.ndarray) except TypeError: raise_type_error(type(boxes), [np.ndarray]) # When no box is detected if boxes.shape[0] == 0: return img if boxes.shape[1] != 4 or boxes.ndim != 2: raise ValueError("Input bounding box must be of shape (n, 4), " "got shape {} instead".format(boxes.shape)) else: return _draw_boxes_on_image(img, boxes, labels_index, labelmap_dict, **kwargs)
5,334,800
def guess_locations(location): """Convenience function to guess where other Strongholds are located.""" location = Point(*location) return (location, rotate(location, CLOCKWISE), rotate(location, COUNTERCLOCKWISE))
5,334,801
def get_centroid(mol, conformer=-1): """ Returns the centroid of the molecule. Parameters --------- conformer : :class:`int`, optional The id of the conformer to use. Returns ------- :class:`numpy.array` A numpy array holding the position of the centroid. """ centroid = sum(x for _, x in all_atom_coords(mol, conformer)) return np.divide(centroid, mol.GetNumAtoms())
5,334,802
def git_clone(target_dir, url, branch="master"): """ Clone projects to build/conanfile.name dir. This is import because the following file structure is base on it. :param target_dir: :param url: :param branch: :return: """ os.system(f"git clone --depth 1 --single-branch --branch {branch} {url} {target_dir}")
5,334,803
def idct(X, norm=None): """ The inverse to DCT-II, which is a scaled Discrete Cosine Transform, Type III Our definition of idct is that idct(dct(x)) == x For the meaning of the parameter `norm`, see: https://docs.scipy.org/doc/ scipy.fftpack.dct.html :param X: the input signal :param norm: the normalization, None or 'ortho' :return: the inverse DCT-II of the signal over the last dimension """ x_shape = X.shape N = x_shape[-1] X_v = X.contiguous().view(-1, x_shape[-1]) / 2 if norm == 'ortho': X_v[:, 0] *= np.sqrt(N) * 2 X_v[:, 1:] *= np.sqrt(N / 2) * 2 k = torch.arange(x_shape[-1], dtype=X.dtype, device=X.device)[None, :] * np.pi / (2 * N) W_r = torch.cos(k) W_i = torch.sin(k) V_t_r = X_v V_t_i = torch.cat([X_v[:, :1] * 0, -X_v.flip([1])[:, :-1]], dim=1) V_r = V_t_r * W_r - V_t_i * W_i V_i = V_t_r * W_i + V_t_i * W_r V = torch.cat([V_r.unsqueeze(2), V_i.unsqueeze(2)], dim=2) v = torch.irfft(V, 1, onesided=False) x = v.new_zeros(v.shape) x[:, ::2] += v[:, :N - (N // 2)] x[:, 1::2] += v.flip([1])[:, :N // 2] return x.view(*x_shape)
5,334,804
def version(only_number: bool = False) -> None: """Show the version info of MocaSystem.""" if only_number: mzk.tsecho(core.VERSION) else: mzk.tsecho(f'MocaVirtualDM ({core.VERSION})')
5,334,805
def div25(): """ Returns the divider 44444444444444444444444 :return: divider25 """ return divider25
5,334,806
def comp_periodicity(self, wind_mat=None): """Computes the winding matrix (anti-)periodicity Parameters ---------- self : Winding A Winding object wind_mat : ndarray Winding connection matrix Returns ------- per_a: int Number of spatial periods of the winding is_aper_a: bool True if the winding is anti-periodic over space """ if wind_mat is None: wind_mat = self.get_connection_mat() assert len(wind_mat.shape) == 4, "dim 4 expected for wind_mat" # Summing on all the layers (Nlay_r and Nlay_theta) wind_mat2 = squeeze(np_sum(np_sum(wind_mat, axis=1), axis=0)) qs = wind_mat.shape[3] # Number of phase Zs = wind_mat.shape[2] # Number of Slot Nperw = 1 # Number of electrical period of the winding Nperslot = 1 # Periodicity of the winding in number of slots # Looking for the periodicity of each phase for q in range(0, qs): k = 1 is_per = False while k <= Zs and not is_per: # We shift the array arround the slot and check if it's the same if array_equal(wind_mat2[:, q], roll(wind_mat2[:, q], shift=k)): is_per = True else: k += 1 # least common multiple to find common periodicity between different phase Nperslot = lcm(Nperslot, k) # If Nperslot > Zs no symmetry if Nperslot > 0 and Nperslot < Zs: # nb of periods of the winding (2 means 180°) Nperw = Zs / float(Nperslot) # if Zs cannot be divided by Nperslot (non integer) if Nperw % 1 != 0: Nperw = 1 # Check for anti symmetries in the elementary winding pattern if ( Nperslot % 2 == 0 and norm( wind_mat2[0 : Nperslot // 2, :] + wind_mat2[Nperslot // 2 : Nperslot, :] ) == 0 ): is_aper_a = True Nperw = Nperw * 2 else: is_aper_a = False return int(Nperw), is_aper_a
5,334,807
def _is_valid_target(target, target_name, target_ports, is_pair): """Return True if the specified target is valid, False otherwise.""" if is_pair: return (target[:utils.PORT_ID_LENGTH] in target_ports and target_name == _PAIR_TARGET_NAME) if (target[:utils.PORT_ID_LENGTH] not in target_ports or not target_name.startswith(utils.TARGET_PREFIX) or target_name == _PAIR_TARGET_NAME): return False return True
5,334,808
def _get_span_name(servicer_context): """Generates a span name based off of the gRPC server rpc_request_info""" method_name = servicer_context._rpc_event.call_details.method[1:] if isinstance(method_name, bytes): method_name = method_name.decode('utf-8') method_name = method_name.replace('/', '.') return '{}.{}'.format(RECV_PREFIX, method_name)
5,334,809
def save_flags(msfile,name,logger): """ Save the current flag version as "name". Input: msfile = Path to the MS. (String) name = Root of filename for the flag version. (String) """ logger.info('Saving flag version as: {}.'.format(name)) command = "flagmanager(vis='{0}', mode='save', versionname='{1}')".format(msfile,name) logger.info('Executing command: '+command) exec(command) logger.info('Completed saving flag version.')
5,334,810
def calculateNDFairnessPara(_ranking, _protected_group, _cut_point, _gf_measure, _normalizer, items_n, proItems_n ): """ Calculate group fairness value of the whole ranking. Calls function 'calculateFairness' in the calculation. :param _ranking: A permutation of N numbers (0..N-1) that represents a ranking of N individuals, e.g., [0, 3, 5, 2, 1, 4]. Each number is an identifier of an individual. Stored as a python array. :param _protected_group: A set of identifiers from _ranking that represent members of the protected group e.g., [0, 2, 3]. Stored as a python array for convenience, order does not matter. :param _cut_point: Cut range for the calculation of group fairness, e.g., 10, 20, 30,... :param _gf_measure: Group fairness measure to be used in the calculation, one of 'rKL', 'rND', 'rRD'. :param _normalizer: The normalizer of the input _gf_measure that is computed externally for efficiency. :param :param :return: returns fairness value of _ranking, a float, normalized to [0, 1] """ #print("calculateNDFairnessPara") #user_N=len(_ranking) #pro_N=len(_protected_group) if _normalizer==0: raise ValueError("Normalizer equals to zero") # error handling for input type if not isinstance(_ranking, (list, tuple, np.ndarray)) and not isinstance( _ranking, str ): raise TypeError("Input ranking must be a list-wise structure defined by '[]' symbol") if not isinstance(_protected_group, (list, tuple, np.ndarray)) and not isinstance( _protected_group, str ): raise TypeError("Input protected group must be a list-wise structure defined by '[]' symbol") if not isinstance( _cut_point, ( int ) ): raise TypeError("Input batch size must be an integer larger than 0") if not isinstance( _normalizer, (int, float, complex) ): raise TypeError("Input normalizer must be a number larger than 0") if not isinstance( _gf_measure, str ): raise TypeError("Input group fairness measure must be a string that choose from ['rKL', 'rND', 'rRD']") discounted_gf=0 #initialize the returned gf value for countni in range(len(_ranking)): countni=countni+1 if(countni%_cut_point ==0): ranking_cutpoint=_ranking[0:countni] pro_cutpoint=set(ranking_cutpoint).intersection(_protected_group) gf=calculateFairness(ranking_cutpoint,pro_cutpoint,items_n, proItems_n,_gf_measure) #discounted_gf+=gf/math.log(countni+1,LOG_BASE) # log base -> global variable #print("counttni : ", countni) discounted_gf+=gf/(1.1**(countni-10/1000)) # log base -> global variable # make a call to compute, or look up, the normalizer; make sure to check that it's not 0! # generally, think about error handling return discounted_gf/_normalizer
5,334,811
def getCategories(blog_id, username, password): """ Parameters int blog_id string username string password Return Values array struct int categoryId int parentId string description string categoryName string htmlUrl string rssUrl example from wordpress.com [{'categoryDescription': '', 'categoryId': 1356, 'categoryName': 'Blogroll', 'description': 'Blogroll', 'htmlUrl': 'https://rubelongfellow.wordpress.com/category/blogroll/', 'parentId': 0, 'rssUrl': 'https://rubelongfellow.wordpress.com/category/blogroll/feed/'}, {'categoryDescription': '', 'categoryId': 42431, 'categoryName': 'Gearhead', 'description': 'Gearhead', 'htmlUrl': 'https://rubelongfellow.wordpress.com/category/gearhead/', 'parentId': 0, 'rssUrl': 'https://rubelongfellow.wordpress.com/category/gearhead/feed/'}, {'categoryDescription': '', 'categoryId': 1, 'categoryName': 'Uncategorized', 'description': 'Uncategorized', 'htmlUrl': 'https://rubelongfellow.wordpress.com/category/uncategorized/', 'parentId': 0, 'rssUrl': 'https://rubelongfellow.wordpress.com/category/uncategorized/feed/'}] """ logger.debug("%s.getCategories entered" % __name__) res = [] user = get_user(username, password) blog = Blog.objects.get(pk=blog_id) check_perms(user, blog) logger.debug("getting categories for %s" % blog) for cat in Category.objects.filter(blog=blog): res.append({ 'categoryDescription': cat.description, 'categoryId': cat.id, 'categoryName': cat.title, 'description': cat.description, 'htmlUrl': cat.blog.get_absolute_url(), 'parentId': 0, 'rssUrl': os.path.join(cat.blog.get_absolute_url(), "feed"), }) return res
5,334,812
def change_server(name: str = None, description: str = None, repo_url: str = None, main_status: int = None, components: dict = None, password: str = None): """Change server according to arguments (using package config). This will automatically change the config so it has the right credentials.""" check_config() global server_name global server_password payload = {"name": server_name, "password": server_password} if name != None: if type(name) != str: raise TypeError("name expected to be of type str.") payload["newName"] = name if description != None: if type(description) != str: raise TypeError("description expected to be of type str.") payload["description"] = description if repo_url != None: if type(repo_url) != str: raise TypeError("repo_url expected to be of type str.") payload["repoURL"] = repo_url if main_status != None: if type(main_status) != int: raise TypeError("main_status expected to be of type int.") payload["mainStatus"] = main_status if components != None: if type(components) != dict: raise TypeError("components expected to be of type dict.") payload["components"] = json.dumps(components) if password != None: if type(password) != str: raise TypeError("password expected to be of type str.") payload["newPassword"] = password try: r = requests.post(_url + "api/changeserver", json.dumps(payload), timeout=3.05) if r.status_code == 200: if name != None: server_name = name if password != None: server_password = password return True else: return (False, r.status_code, r.text) except requests.exceptions.ConnectTimeout: raise ConnectionTimeout
5,334,813
def tokenize(lines, token='word'): """Split text lines into word or character tokens.""" if token == 'word': return [line.split() for line in lines] elif token == 'char': return [list(line) for line in lines] else: print('ERROR: unknown token type: ' + token)
5,334,814
def get_versioned_persist(service): """Get a L{Persist} database with upgrade rules applied. Load a L{Persist} database for the given C{service} and upgrade or mark as current, as necessary. """ persist = Persist(filename=service.persist_filename) upgrade_manager = UPGRADE_MANAGERS[service.service_name] if os.path.exists(service.persist_filename): upgrade_manager.apply(persist) else: upgrade_manager.initialize(persist) persist.save(service.persist_filename) return persist
5,334,815
def client_commands(): """Client commands group."""
5,334,816
def gravatar(environ): """ Generate a gravatar link. """ email = environ.get('tank.user_info', {}).get('email', '') return GRAVATAR % md5(email.lower()).hexdigest()
5,334,817
def _prepare_topomap(pos, ax, check_nonzero=True): """Prepare the topomap axis and check positions. Hides axis frame and check that position information is present. """ _hide_frame(ax) if check_nonzero and not pos.any(): raise RuntimeError('No position information found, cannot compute ' 'geometries for topomap.')
5,334,818
def keyrep(kspec, enc="utf-8"): """ Instantiate a Key given a set of key/word arguments :param kspec: Key specification, arguments to the Key initialization :param enc: The encoding of the strings. If it's JSON which is the default the encoding is utf-8. :return: Key instance """ if enc: _kwargs = {} for key, val in kspec.items(): if isinstance(val, str): _kwargs[key] = val.encode(enc) else: _kwargs[key] = val else: _kwargs = kspec if kspec["kty"] == "RSA": item = RSAKey(**_kwargs) elif kspec["kty"] == "oct": item = SYMKey(**_kwargs) elif kspec["kty"] == "EC": item = ECKey(**_kwargs) else: item = Key(**_kwargs) return item
5,334,819
def aa2matrix(axis, angle, radians=True, random=False): """ Given an axis and an angle, return a 3x3 rotation matrix. Based on: https://en.wikipedia.org/wiki/Rotation_matrix#Axis_and_angle Args: axis: a vector about which to perform a rotation angle: the angle of rotation radians: whether the supplied angle is in radians (True) or in degrees (False) random: whether or not to choose a random rotation matrix. If True, the axis and angle are ignored, and a random orientation is generated Returns: a 3x3 numpy array representing a rotation matrix """ #Convert to radians if necessary if radians is not True: angle *= rad #Allow for generation of random rotations if random is True: a = rand() axis = [rand(),rand(),rand()] angle = rand()*pi*2 #Ensure axis is a unit vector axis = axis / np.linalg.norm(axis) #Define quantities which are reused x = np.real(axis[0]) y = np.real(axis[1]) z = np.real(axis[2]) c = math.cos(angle) s = math.sin(angle) C = 1 - c #Define the rotation matrix Q = np.zeros([3,3]) Q[0][0] = x*x*C + c Q[0][1] = x*y*C - z*s Q[0][2] = x*z*C + y*s Q[1][0] = y*x*C + z*s Q[1][1] = y*y*C + c Q[1][2] = y*z*C - x*s Q[2][0] = z*x*C - y*s Q[2][1] = z*y*C + x*s Q[2][2] = z*z*C + c return Q
5,334,820
def expand_value_range(value_range_expression): """Expand the value range expression. Args: value_range_expression: Value range or expression to expand. Return: iterable. """ if type(value_range_expression) is str: # Grid search if value_range_expression.startswith('np.arange'): value_range_expression = arange(value_range_expression) # Random search elif value_range_expression.startswith('np.random'): raise NotImplementedError('Random search space ' 'not implemented yet') # If not an iterable, make it an iterable try: iter(value_range_expression) except TypeError: value_range_expression = [value_range_expression] return value_range_expression
5,334,821
def date_range(df): """Takes the dataframe returns date range. Example here: http://pandas.pydata.org/pandas-docs/stable/timeseries.html Returns as Days """ start_date = df.tail(1)['date'] start = pd.Timestamp.date(list(start_date.to_dict().values())[0]) end_date = df.head(1)['date'] end = pd.Timestamp.date(list(end_date.to_dict().values())[0]) rng = pd.date_range(start, end) return rng
5,334,822
def last_hit_timestamp(hit_count_rules, month): """ Get list of last hit timestamp to rule :param hit_count_rules: dictionary which contain json response with all hit count rules :param month: number of month elapsed since the rule was triggered last :return: list with rules that older than value in param month (contain rule name, id, type and access policy name) """ rule = [] for i in hit_count_rules: last_refresh = datetime.datetime.strptime(i["lastFetchTimeStamp"], '%Y-%m-%dT%H:%M:%SZ') limit = last_refresh - datetime.timedelta(month * 365 / 12) if i["lastHitTimeStamp"] != " ": last_hit = datetime.datetime.strptime(i["lastHitTimeStamp"], '%Y-%m-%dT%H:%M:%SZ') if last_hit < limit: rule.append(i["rule"]) return rule
5,334,823
def compare_gatenum_distribution (path, constraint): """ compare the gate number distribution in each run """ bm_path = path + 'runs/benchmark/electronic-circuits/md5Core/' # for npart in os.listdir(sol_path): # if npart.startswith('.') == False and npart != 'best_solns.txt': # print ('npart', npart) # load graph and metis partition edgelist = bm_path + '/DAG.edgelist' G = load_graph (edgelist) in_nodes, out_nodes, nonprimitives = gp.get_nonprimitive_nodes (G) G_primitive = gp.get_G_primitive (G, nonprimitives) nparts = [47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60] GateNumDict = {} f_out = open (path + 'runs/results/electronic-circuits/md5Core/nparts-run2/Compare Runs Gate Number Distribution.txt', 'w') f_out.write('Start cell number\tIteration\tEnd cell number\tGate Number Per Cell\tGate Number Distribution\n') # plot constrant unmet vs. for idx, npart in enumerate(nparts): print(npart) sol_path = path + 'runs/results/electronic-circuits/md5Core/nparts-run2/'+str(npart) part_sol = sol_path + '/part_solns.txt' cut, partDict = gp.load_metis_part_sol (part_sol) opt_file = sol_path + '/optimized_lc/part_solns.txt' if os.path.exists (opt_file): timeList = load_timestep (opt_file) if timeList != []: solDict = gp.load_opt_part_sol (opt_file) for i_idx, iteration in enumerate(solDict.keys()): part = solDict[iteration]['part'] endN = len(part.keys()) gatecounts = get_gatenum_distribution (part) x = list(gatecounts.keys()) y = list(gatecounts.values()) f_out.write('\t'.join([str(npart), str(iteration), str(endN), str([str(v for v in x)]), str([str(v for v in y)])])+'\n') if endN not in GateNumDict: GateNumDict[endN] = {} GateNumDict[endN][1] = gatecounts else: GateNumDict[endN][max(GateNumDict[endN].keys())+1] = gatecounts maxIter = max([max(GateNumDict[N].keys()) for N in GateNumDict]) colors = sns.color_palette("husl", maxIter) fig = plt.figure (figsize=(10,3)) for idx, N in enumerate(sorted(GateNumDict), 1): print(idx, N) ax = fig.add_subplot(2,6,idx) for i, itr in enumerate(sorted(GateNumDict[N])): x = GateNumDict[N][itr].keys() y = normalize_data( GateNumDict[N][itr].values() ) c = colors[i] ax.plot (x, y, marker='o', markersize=3, c=c, label=itr) ax.set_title(str(N)) ax.set_xlim([0, 6]) ax.set_ylim([-0.1, 1.1]) if idx != 7: ax.set_xticks([]) ax.set_yticks([]) else: ax.set_xticks([1,2,3,4,5]) fig.subplots_adjust (hspace=0.3) plt.savefig(path + 'runs/results/electronic-circuits/md5Core/nparts-run2/Compare Runs Gate Number Distribution.pdf', dpi=200) plt.show()
5,334,824
def url(should_be=None): """Like the default ``url()``, but can be called without arguments, in which case it returns the current url. """ if should_be is None: return get_browser().get_url() else: return twill.commands.url(should_be)
5,334,825
def propmap(numframe, denomframe, numdim, denomdim, numcause, denomcause, startage, endage, sex, shapefname, percfunc = threep, mean = False): """Draw a map with percentiles of deaths of one cause relative to another.""" plt.close() ages = ageslice(startage, endage, mean) agealias = ages['alias'] agelist = ages['agelist'] sexalias = numdim['Kon']['category']['label'][sex] numcausealias = causealias(numcause, numdim) denomcausealias = causealias(denomcause, denomdim) startyear = min(numframe.Tid) endyear = max(numframe.Tid) region_shp = shpreader.Reader(shapefname) propdict = prop_reggrp(numframe, numcause, denomframe, denomcause, sex, agelist, mean) prop = propdict['prop'] regvalues = propdict['regvalues'] units = list(map(scb_to_unit, regvalues)) regdict = dict(zip(units, regvalues)) percentiles = percfunc(prop) ax = plt.axes(projection = ccrs.TransverseMercator()) boundlist = [] for region_rec in region_shp.records(): regcode = region_rec.attributes['G_UNIT'] regend = region_rec.attributes['GET_END_YE'] if (regcode in regdict.keys() and regend > 1995): i = regvalues.index(regdict[regcode]) boundlist.append(region_rec.bounds) for percentile in percentiles: if prop[i] <= percentile['value']: facecolor = percentile['col'] break ax.add_geometries([region_rec.geometry], ccrs.TransverseMercator(), edgecolor = 'black', facecolor = facecolor) xmin = min([bound[0] for bound in boundlist]) xmax = max([bound[2] for bound in boundlist]) ymin = min([bound[1] for bound in boundlist]) ymax = max([bound[3] for bound in boundlist]) ax.set_xlim(xmin, xmax) ax.set_ylim(ymin, ymax) percpatches = [] perclabels = [] for i, percentile in enumerate(percentiles): percpatch = mpatches.Rectangle((0, 0), 1, 1, facecolor = percentile['col']) percpatches.append(percpatch) if i == 0: perclabel = str('\u2265' + perc_round(min(prop)) + '\n\u2264' + perc_round(percentile['value'])) else: perclabel = '\u2264' + perc_round(percentile['value']) perclabels.append(perclabel) plt.legend(percpatches, perclabels, loc = 'upper left', framealpha = 0.75, bbox_to_anchor=(1,1)) plt.title('Döda {numcausealias}/{denomcausealias}\n' '{sexalias} {agealias} {startyear}\u2013{endyear}'.format(**locals())) plt.show()
5,334,826
def test_order_book_can_cancel_sell(): """ Here there are more asks than bids, so the bids will fill""" instrument_id = "AAPL" quantity = 100 price = 10 limit_orders = [LimitOrder(instrument_id=instrument_id, order_direction=OrderDirection.buy if i % 2 else OrderDirection.sell, quantity=quantity - 10 * i, price=price + (i if i % 2 else -i)) for i in range(10)] for i, l in enumerate(limit_orders): l.order_id = i limit_orders[i] = l order_book = OrderBook() for order in limit_orders: order_book.add_order(order) cancel_order = CancelOrder(instrument_id=instrument_id, order_id=0, order_direction=OrderDirection.sell) order_book.match() order_book.add_order(cancel_order) assert not order_book.asks, "Test Failed: There should be no asks after this matching" assert order_book.best_ask is None, "Test Failed: best_ask should be empty" assert not order_book.bids, "Test Failed: There should be no bids after this matching" assert order_book.best_bid is None, "Test Failed: best_bid should be empty" assert len( order_book.trades) > 5, "Test Failed: trades should more than 5 trades" assert len( order_book.complete_orders) == 10, "Test Failed: complete_orders should have 10 orders" assert cancel_order.cancel_success, "Test Failed: cancel should succeed" pass
5,334,827
def remove_outliers(X_train,y_train): """ This function deletes outliers on the given numpy arrays, and returns clean version of them. Parameters ---------- X_train: dataset to remove outliers with k features y_train: dataset to remove outliers with k features """ clf = LocalOutlierFactor(n_neighbors=2) out1 = clf.fit_predict(X_train) out2 = clf.fit_predict(y_train) indexes = np.argwhere(out1+out2 != 2) X_train = np.delete(X_train,indexes,axis=0) y_train = np.delete(y_train,indexes,axis=0) return X_train,y_train
5,334,828
def query_data(regions, filepath_nl, filepath_lc, filepath_pop): """ Query raster layer for each shape in regions. """ shapes = [] csv_data = [] for region in tqdm(regions): geom = shape(region['geometry']) population = get_population(geom, filepath_pop) pop_density_km2, area_km2 = get_density(geom, population, 'epsg:4326', 'epsg:3857') shapes.append({ 'type': region['type'], 'geometry': mapping(geom), # 'id': region['id'], 'properties': { 'population': population, 'pop_density_km2': pop_density_km2, 'area_km2': area_km2, 'geotype': define_geotype(pop_density_km2), 'GID_2': region['properties']['GID_2'], 'GID_3': region['properties']['GID_3'], } }) csv_data.append({ 'population': population, 'pop_density_km2': pop_density_km2, 'area_km2': area_km2, 'geotype': define_geotype(pop_density_km2), 'GID_2': region['properties']['GID_2'], 'GID_3': region['properties']['GID_3'], }) return shapes, csv_data
5,334,829
def scrape_urls(html_text, pattern): """Extract URLs from raw html based on regex pattern""" soup = BeautifulSoup(html_text,"html.parser") anchors = soup.find_all("a") urls = [a.get("href") for a in anchors] return [url for url in urls if re.match(pattern, url)!=None]
5,334,830
def get_num_weight_from_name(model: nn.Module, names: List[str]) -> List[int]: """Get list of number of weights from list of name of modules.""" numels = [] for n in names: module = multi_getattr(model, n) num_weights = module.weight.numel() numels.append(num_weights) return numels
5,334,831
def tokenizeGenePredStream(genePredStream): """ Iterator through gene pred file, returning lines as list of tokens """ for line in bedStream: if line != '': tokens = line.split("\t") tokens[-1].rstrip() yield tokens
5,334,832
def _categories_level(keys): """use the Ordered dict to implement a simple ordered set return each level of each category [[key_1_level_1,key_2_level_1],[key_1_level_2,key_2_level_2]] """ res = [] for i in zip(*(keys)): tuplefied = _tuplify(i) res.append(list(OrderedDict([(j, None) for j in tuplefied]))) return res
5,334,833
def to_usd(my_price): """ Converts a numeric value to usd-formatted string, for printing and display purposes. Param: my_price (int or float) like 4000.444444 Example: to_usd(4000.444444) Returns: $4,000.44 """ return f"${my_price:,.2f}"
5,334,834
def test_init_dictionary_initial_D_init(X, D_init, solver_d, window, uv_constraint, shape): """Tests if init_dictionary is doing what is expected when rank1 is False and initial D_init is provided.""" d_solver = get_solver_d(N_CHANNELS, N_ATOMS, N_TIMES_ATOM, solver_d=solver_d, uv_constraint=uv_constraint, rank1=False, window=window, D_init=D_init, random_state=42) assert d_solver is not None D_hat = d_solver.init_dictionary(X) assert D_hat is not None assert D_hat.shape == shape D_init = prox_d(D_init) assert_allclose(D_hat, D_init) assert id(D_hat) != id(D_init)
5,334,835
def computeNodeDerivativeHermiteLagrange(cache, coordinates, node1, derivative1, scale1, node2, scale2): """ Computes the derivative at node2 from quadratic Hermite-Lagrange interpolation of node1 value and derivative1 to node2 value. :param cache: Field cache to evaluate in. :param coordinates: Coordinates field. :param node1, node2: Start and end nodes. :param derivative1: Node value label for derivative at node1. :param scale1, scale2: Scaling to apply to derivatives at nodes, e.g. -1.0 to reverse. :return: dx_dxi at node2 """ cache.setNode(node1) result, v1 = coordinates.getNodeParameters(cache, -1, Node.VALUE_LABEL_VALUE, 1, 3 ) result, d1 = coordinates.getNodeParameters(cache, -1, derivative1, 1, 3 ) d1 = [ d*scale1 for d in d1 ] cache.setNode(node2) result, v2 = coordinates.getNodeParameters(cache, -1, Node.VALUE_LABEL_VALUE, 1, 3 ) d2 = interpolateHermiteLagrangeDerivative(v1, d1, v2, 1.0) d2 = [ d*scale2 for d in d2 ] return d2
5,334,836
def _step5(state): """ Construct a series of alternating primed and starred zeros as follows. Let Z0 represent the uncovered primed zero found in Step 4. Let Z1 denote the starred zero in the column of Z0 (if any). Let Z2 denote the primed zero in the row of Z1 (there will always be one). Continue until the series terminates at a primed zero that has no starred zero in its column. Unstar each starred zero of the series, star each primed zero of the series, erase all primes and uncover every line in the matrix. Return to Step 3 """ count = 0 path = state.path path[count, 0] = state.Z0_r path[count, 1] = state.Z0_c while True: # Find the first starred element in the col defined by # the path. row = np.argmax(state.marked[:, path[count, 1]] == 1) if not state.marked[row, path[count, 1]] == 1: # Could not find one break else: count += 1 path[count, 0] = row path[count, 1] = path[count - 1, 1] # Find the first prime element in the row defined by the # first path step col = np.argmax(state.marked[path[count, 0]] == 2) if state.marked[row, col] != 2: col = -1 count += 1 path[count, 0] = path[count - 1, 0] path[count, 1] = col # Convert paths for i in range(count + 1): if state.marked[path[i, 0], path[i, 1]] == 1: state.marked[path[i, 0], path[i, 1]] = 0 else: state.marked[path[i, 0], path[i, 1]] = 1 state._clear_covers() # Erase all prime markings state.marked[state.marked == 2] = 0 return _step3
5,334,837
def download(): """Download data from zenodo. The NeonTreeEvaluation benchmark consists of two parts: 1) package code to run evaluation workflows 2) evaluation data. Evaluation data is ~ 2GB in size and will be downloaded to package contents. """ basedir = os.path.dirname(os.path.dirname(__file__)) datadir = "{}/{}".format(basedir, "data") print("Downloading data files to {}".format(datadir)) eval_url = zenodo_url(concept_rec_id="3723356", datadir=datadir)
5,334,838
def test_support_created_case_can_be_described_without_params(): """ On creating a support request it can be described """ client = boto3.client("support", "us-east-1") describe_cases_response = client.describe_cases() describe_cases_response["cases"].should.equal([]) client.create_case( subject="test_subject", serviceCode="test_service_code", severityCode="low", categoryCode="test_category_code", communicationBody="test_communication_body", ccEmailAddresses=["test_email_cc"], language="test_language", issueType="test_issue_type", attachmentSetId="test_attachment_set_id", ) describe_cases_response = client.describe_cases() describe_cases_response["cases"].should.have.length_of(1)
5,334,839
def encode_ascii_xml_array(data): """Encode an array-like container of strings as fixed-length 7-bit ASCII with XML-encoding for characters outside of 7-bit ASCII. """ if isinstance(data, np.ndarray) and \ data.dtype.char == STR_DTYPE_CHAR and \ data.dtype.itemsize > 0: return data convert = lambda s: encode_ascii_xml(s) if s is not None else '' ascii_data = map(convert, data) fixed_len = max(len(s) for s in ascii_data) fixed_len = max(1, fixed_len) dtype = '%s%d' % (STR_DTYPE_CHAR, fixed_len) # note: python3 would require np.fromiter return np.array(ascii_data, dtype=dtype)
5,334,840
def test_ap_wps_auto_setup_with_config_file(dev, apdev): """WPS auto-setup with configuration file""" conffile = "/tmp/ap_wps_auto_setup_with_config_file.conf" ifname = apdev[0]['ifname'] try: with open(conffile, "w") as f: f.write("driver=nl80211\n") f.write("hw_mode=g\n") f.write("channel=1\n") f.write("ieee80211n=1\n") f.write("interface=%s\n" % ifname) f.write("ctrl_interface=/var/run/hostapd\n") f.write("ssid=wps\n") f.write("eap_server=1\n") f.write("wps_state=1\n") hostapd.add_bss('phy3', ifname, conffile) hapd = hostapd.Hostapd(ifname) hapd.request("WPS_PBC") dev[0].scan_for_bss(apdev[0]['bssid'], freq="2412") dev[0].request("WPS_PBC " + apdev[0]['bssid']) dev[0].wait_connected(timeout=30) with open(conffile, "r") as f: lines = f.read().splitlines() vals = dict() for l in lines: try: [name,value] = l.split('=', 1) vals[name] = value except ValueError, e: if "# WPS configuration" in l: pass else: raise Exception("Unexpected configuration line: " + l) if vals['ieee80211n'] != '1' or vals['wps_state'] != '2' or "WPA-PSK" not in vals['wpa_key_mgmt']: raise Exception("Incorrect configuration: " + str(vals)) finally: try: os.remove(conffile) except: pass
5,334,841
def download_dataset( period: str, output_dir: Union[Path, str], fewer_threads: bool, datasets_path: Optional[Union[Path, str]] = None ) -> List[Path]: """Download files from the given dataset with the provided selections. Args: period: Name of the period to be downloaded. output_dir: Path to where the data should be stored. fewer_threads: If True, reduce the number of threads by half. dataset_config_filename: Filename of the configuration file. Default: None, in which case, the files will be taken from those defined in the package. Returns: None. """ # Validation output_dir = Path(output_dir) if datasets_path: datasets_path = Path(datasets_path) # Setup the dataset dataset = _extract_dataset_from_yaml(period=period, datasets_path=datasets_path) # Setup q: FilePairQueue = queue.Queue() queue_filler = DatasetDownloadFiller( dataset=dataset, output_dir=output_dir, q=q, ) download(queue_filler=queue_filler, q=q, fewer_threads=fewer_threads) # Return the files that are stored corresponding to this period. period_specific_dir = output_dir / dataset.data_type / str(dataset.year) / dataset.period period_files = sorted(Path(period_specific_dir).glob(f"**/{dataset.filename}")) logger.info(f"period_specific_dir: {period_specific_dir}, number of files: {len(period_files)}") # Write out the file list filelist = Path(output_dir) / "filelists" / f"{dataset.period}{dataset.file_type}.txt" filelist.parent.mkdir(exist_ok=True, parents=True) # Add the suffix to access the ROOT file if it's contained in a zip archive. suffix = "" if ".zip" in dataset.filename: suffix = "#AliAOD.root" if dataset.file_type == "AOD" else "#AliESDs.root" with open(filelist, "w") as f: # One file per line. f.write("\n".join([f"{p}{suffix}" for p in period_files])) return period_files
5,334,842
def test_say_trailing(bot): """Test optional trailing string.""" text = '"This is a test quote.' bot.say(text, '#sopel', trailing='"') assert bot.backend.message_sent == rawlist( # combined 'PRIVMSG #sopel :%s' % text + '"' )
5,334,843
def monospaced(fields, context): """ Make text monospaced. In HTML: use tags In Markdown: use backticks In Text: use Unicode characters """ content = fields[0] target = context['target'] if target == 'md': return wrapper('`')([content], context) if target == 'html': multiline = False for chunk in content: if type(chunk) is str and '\n' in chunk: multiline = True break if multiline: tag = 'pre' else: tag = 'code' return taggifier(tag)([content], context) if target == 'txt': return keymapper('monospaced')([content], context)
5,334,844
def indexview(request): """ initial page shows all the domains in columns """ domdb = Domain.objects if not request.user.has_perm('editapp.see_all'): # only see mine domdb = domdb.filter(owner__username=request.user.username) domains = [ d.domain for d in domdb.order_by('domain') ] # show in four columns # so slice into four arrays dslice = int((len(domains)+3)/4) c1,c2,c3,c4 = [ [d for d in domains[n*dslice:(n+1)*dslice]] for n in range(4) ] return render(request, 'editapp/index.html', { 'c1': c1, 'c2': c2, 'c3': c3, 'c4': c4, 'bpnav': bpnav(request, 'index') })
5,334,845
def print_helper(base_str, dbg): """ print helper applied to test dbg type and take correct print action """ print_dbg = test_dbg(dbg) if print_dbg: if isinstance(dbg, bool): print( " ".join([dt.datetime.now().strftime("%Y/%m/%d %H:%M:%S"), base_str]) ) else: dbg.write(base_str)
5,334,846
def _search_solutions(snake: Snake, board: Board, depth: int) -> None: """ Recursive function used to perform the backtracking algorithm Args: snake: representation of the positions of a snake in the board. board: two-dimensional space where the snake will be moving. depth: number of movements of the snake. """ global solutions match depth: case 0: # base case, all movements have been completed solutions += 1 return case _: # There are still movements to check # UP if _is_valid_snake((snake_moved := _move_snake(snake, SnakeMovementDirection.UP)), board): _search_solutions(snake_moved, board, depth - 1) # Recursive call # RIGHT if _is_valid_snake((snake_moved := _move_snake(snake, SnakeMovementDirection.RIGHT)), board): _search_solutions(snake_moved, board, depth - 1) # Recursive call # DOWN if _is_valid_snake((snake_moved := _move_snake(snake, SnakeMovementDirection.DOWN)), board): _search_solutions(snake_moved, board, depth - 1) # Recursive call # LEFT if _is_valid_snake((snake_moved := _move_snake(snake, SnakeMovementDirection.LEFT)), board): _search_solutions(snake_moved, board, depth - 1) # Recursive call return
5,334,847
def MC_swap(alloy, N, E, T): """ Randomly selects an atom and one of its neighbours in a matrix and calculates the change in energy if the two atoms were swapped. The following assignment is used to represent the neighbouring directions: 1 = up 2 = right 3 = down 4 = left """ kT = 8.617332*10**-5*T random_atom = np.random.randint(0,N,2) atom1 = alloy[random_atom[0],random_atom[1]] random_neighbour = np.random.randint(1,5,1) # Select appropriate neighbour if random_neighbour==1: row2=(random_atom[0]-2)%N column2 = random_atom[1] elif random_neighbour==2: row2 = random_atom[0] column2 = (random_atom[1])%N elif random_neighbour==3: row2 = (random_atom[0])%N column2 = random_atom[1] else: row2 = random_atom[0] column2 = (random_atom[0]-2)%N atom2 = alloy[row2, column2] if atom1==atom2: e=0 else: # Need to calculate the energy before and after atom one and two swap # Atom 1 up1= (random_atom[0]-2)%N down1 = (random_atom[0]%N) left1 = (random_atom[1]-2)%N right1 = (random_atom[1]%N) # Atom 2 up2=(row2-2)%N down2=(row2%N) left2=(column2-2)%N right2=(column2%N) # Change in energy Bonds1 = alloy[down1, random_atom[1]] + alloy[up1, random_atom[1]] + alloy[random_atom[0], right1] + alloy[random_atom[0], left1] Bonds2 = alloy[down2, column2] + alloy[up2, column2] + alloy[row2, right2] + alloy[row2, left2] # Count number of A-B bonds for atoms 1 and 2 if atom1==0: Initial1=Bonds1 End1=4-Bonds1 Initial2=4-Bonds2 End2=Bonds2 else: Initial1=4-Bonds1 End1=Bonds1 Initial2=Bonds2 End2=4-Bonds2 e = E*(End1+End2-Initial1-Initial2) # Energy difference for swapping atoms #Swapping atoms if there is enough energy to do so if e<0: alloy[random_atom[0],random_atom[1]]=atom2 alloy[row2, column2]=atom1 elif np.exp(-e/kT)>np.random.uniform(0,1): alloy[random_atom[0],random_atom[1]]=atom2 alloy[row2, column2]=atom1 return alloy
5,334,848
def _try_type(value, dtype): """ Examples -------- >>> _try_type("1", int) 1 >>> _try_type(1.0, int) 1 >>> _try_type("ab", float) 'ab' """ try: return dtype(value) except ValueError: return value
5,334,849
def get_all_codes(date=None): """ 获取某个交易日的所有股票代码列表,如果没有指定日期,则从当前日期一直向前找,直到找到有 数据的一天,返回的即是那个交易日的股票代码列表 :param date: 日期 :return: 股票代码列表 """ datetime_obj = datetime.now() if date is None: date = datetime_obj.strftime('%Y-%m-%d') codes = [] while len(codes) == 0: code_cursor = DB_CONN.basic.find( {'date': date}, projection={'code': True, '_id': False}) codes = [x['code'] for x in code_cursor] datetime_obj = datetime_obj - timedelta(days=1) date = datetime_obj.strftime('%Y-%m-%d') return codes
5,334,850
def cluster_config(request_data, op_ctx: ctx.OperationContext): """Request handler for cluster config operation. Required data: cluster_name Optional data and default values: org_name=None, ovdc_name=None (data validation handled in broker) :return: Dict """ _raise_error_if_pks_not_enabled() cluster, broker = _get_cluster_info(request_data, op_ctx, telemetry=False) # noqa: E501 telemetry_handler.record_user_action_details( cse_operation=CseOperation.PKS_CLUSTER_CONFIG, cse_params=_get_telemetry_data(request_data, cluster)) return broker.get_cluster_config(data=request_data)
5,334,851
def compute_pad_value(input_dir, list_IDs): """ Computes the minimum pixel intensity of the entire dataset for the pad value (if it's not 0) Args: input_dir: directory to input images list_IDs: list of filenames """ print("Computing min/pad value...") # iterating through entire dataset min_list = [] for id in list_IDs: x_train = load_data(os.path.join(input_dir, id)) min_list.append(x_train.min()) return np.asarray(min_list).min()
5,334,852
def image_marker_layout(box): """Layout the :class:`boxes.ImageMarkerBox` ``box``. :class:`boxes.ImageMarkerBox` objects are :class:`boxes.ReplacedBox` objects, but their used size is computed differently. """ _, width, height = box.replacement ratio = width / height one_em = box.style.font_size if width is not None and height is not None: box.width = width box.height = height elif width is not None and ratio is not None: box.width = width box.height = width / ratio elif height is not None and ratio is not None: box.width = height * ratio box.height = height elif ratio is not None: # ratio >= 1 : width >= height if ratio >= 1: box.width = one_em box.height = one_em / ratio else: box.width = one_em * ratio box.height = one_em else: box.width = width if width is not None else one_em box.height = height if height is not None else one_em
5,334,853
def iotcentral_cli_handler(args): """ CLI entry point for command: iotcentral """ logger = getLogger(__name__) from .pyazureutils_errors import PyazureutilsError try: if args.action == "register-device": status = _action_register_device(args) except PyazureutilsError as exc: logger.error("Operation failed with %s: %s", type(exc).__name__, exc) return STATUS_FAILURE
5,334,854
def cross_validation_wrapper(learner, dataset, k=10, trials=1): """[Fig 18.8] Return the optimal value of size having minimum error on validation set. err_train: A training error array, indexed by size err_val: A validation error array, indexed by size """ err_val = [] err_train = [] size = 1 while True: errT, errV = cross_validation(learner, size, dataset, k) # Check for convergence provided err_val is not empty if (err_train and isclose(err_train[-1], errT, rel_tol=1e-6)): best_size = 0 min_val = math.inf i = 0 while i < size: if err_val[i] < min_val: min_val = err_val[i] best_size = i i += 1 err_val.append(errV) err_train.append(errT) print(err_val) size += 1
5,334,855
def kelly_kapowski(s, g, w, its=45, r=0.025, m=1.5, **kwargs): """ Compute cortical thickness using the DiReCT algorithm. Diffeomorphic registration-based cortical thickness based on probabilistic segmentation of an image. This is an optimization algorithm. Arguments --------- s : ANTsimage segmentation image g : ANTsImage gray matter probability image w : ANTsImage white matter probability image its : integer convergence params - controls iterations r : scalar gradient descent update parameter m : scalar gradient field smoothing parameter kwargs : keyword arguments anything else, see KellyKapowski help in ANTs Returns ------- ANTsImage Example ------- >>> import ants >>> img = ants.image_read( ants.get_ants_data('r16') ,2) >>> img = ants.resample_image(img, (64,64),1,0) >>> mask = ants.get_mask( img ) >>> segs = ants.kmeans_segmentation( img, k=3, kmask = mask) >>> thick = ants.kelly_kapowski(s=segs['segmentation'], g=segs['probabilityimages'][1], w=segs['probabilityimages'][2], its=45, r=0.5, m=1) """ if isinstance(s, iio.ANTsImage): s = s.clone('unsigned int') d = s.dimension outimg = g.clone() kellargs = {'d': d, 's': s, 'g': g, 'w': w, 'c': its, 'r': r, 'm': m, 'o': outimg} for k, v in kwargs.items(): kellargs[k] = v processed_kellargs = utils._int_antsProcessArguments(kellargs) libfn = utils.get_lib_fn('KellyKapowski') libfn(processed_kellargs) return outimg
5,334,856
def test_tree_node_section() -> None: """Given a path to a ``stories.py``, extract needed info.""" from examples.minimal.components import stories stories_path = Path(stories.__file__) tree_node = TreeNode( root_path="examples.minimal", stories_path=stories_path, ) assert isinstance(tree_node.called_instance, Section) assert tree_node.name == "components" assert tree_node.package_path == ".components" assert tree_node.parent_path == "."
5,334,857
def loadPlugin(ac="script",a=1,n="string",qt=1,rc="script"): """ http://help.autodesk.com/cloudhelp/2019/ENU/Maya-Tech-Docs/CommandsPython/loadPlugin.html ----------------------------------------- loadPlugin is undoable, NOT queryable, and NOT editable. Load plug-ins into Maya. The parameter(s) to this command are either the names or pathnames of plug-in files. The convention for naming plug-ins is to use a .so extension on Linux, a .mll extension on Windows and .bundle extension on Mac OS X. If no extension is provided then the default extension for the platform will be used. To load a Python plugin you must explicitly supply the '.py' extension. If the plugin was specified with a pathname then that is where the plugin will be searched for. If no pathname was provided then the current working directory (i.e. the one returned by Maya's 'pwd' command) will be searched, followed by the directories in the MAYA_PLUG_IN_PATH environment variable. When the plug-in is loaded, the name used in Maya's internal plug-in registry for the plug-in information will be the file name with the extension removed. For example, if you load the plug-in "newNode.mll" the name used in the Maya's registry will be "newNode". This value as well as that value with either a ".so", ".mll" or ".bundle" extension can be used as valid arguments to either the unloadPlugin or pluginInfo commands. ----------------------------------------- Return Value: string[] the internal names of the successfully loaded plug-ins ----------------------------------------- Flags: ----------------------------------------- ac : addCallback [script] [] Add a MEL or Python callback script to be called after a plug-in is loaded. For MEL, the procedure should have the following signature: global proc procedureName(string $pluginName). For Python, you may specify either a script as a string, or a Python callable object such as a function. If you specify a string, then put the formatting specifier "%s" where you want the name of the plug-in to be inserted. If you specify a callable such as a function, then the name of the plug-in will be passed as an argument. ----------------------------------------- a : allPlugins [boolean] [] Cause all plug-ins in the search path specified in MAYA_PLUG_IN_PATH to be loaded. ----------------------------------------- n : name [string] [] Set a user defined name for the plug-ins that are loaded. If the name is already taken, then a number will be added to the end of the name to make it unique. ----------------------------------------- qt : quiet [boolean] [] Don't print a warning if you attempt to load a plug-in that is already loaded. ----------------------------------------- rc : removeCallback [script] Removes a procedure which was previously added with -addCallback. """
5,334,858
def test_file_with_connection(): """Test File class with connection to 2ch.""" catalog = Catalog('test') thread = catalog.threads[0] post = thread.posts[0] # Download file files_list = post.files for file_descr in files_list: assert isinstance(file_descr, File) file_descr.download(file_descr.name) assert path.exists(file_descr.name) remove(file_descr.name)
5,334,859
def get_word_count(frame, pattern_list, group_by_name): """ Compute word count and return a dataframe :param frame: :param pattern_list: :param column_name: :return: frame with count or None if pattern_list is empty """ if not pattern_list or len(pattern_list) == 0: return None else: return pd.DataFrame(frame[frame.words.isin(pattern_list)]. groupby(group_by_name).words.value_counts() .to_frame())
5,334,860
def test_aggregate_across_levels_perslice(dummy_metrics, dummy_vert_level): """Test extraction of metrics aggregation within selected vertebral levels and per slice""" agg_metric = aggregate_slicewise.aggregate_per_slice_or_level(dummy_metrics['with float'], levels=[2, 3], perslice=True, perlevel=False, vert_level=dummy_vert_level, group_funcs=(('WA', aggregate_slicewise.func_wa),)) assert agg_metric[(0,)] == {'VertLevel': (2,), 'WA()': 29.0} assert agg_metric[(2,)] == {'VertLevel': (3,), 'WA()': 39.0}
5,334,861
def parse(text, from_timezone=None): """ :rtype: TimeeDT """ timee_dt = None if from_timezone: timee_dt = parse_with_maya(text, timezone=from_timezone) return timee_dt else: for parse_method in parsing_methods(): result = parse_method(text) if result is not None: timee_dt = result break return timee_dt
5,334,862
def _command_line_objc_copts(objc_fragment): """Returns copts that should be passed to `clang` from the `objc` fragment. Args: objc_fragment: The `objc` configuration fragment. Returns: A list of `clang` copts, each of which is preceded by `-Xcc` so that they can be passed through `swiftc` to its underlying ClangImporter instance. """ # In general, every compilation mode flag from native `objc_*` rules should be passed, but `-g` # seems to break Clang module compilation. Since this flag does not make much sense for module # compilation and only touches headers, it's ok to omit. clang_copts = objc_fragment.copts + objc_fragment.copts_for_current_compilation_mode return collections.before_each("-Xcc", [copt for copt in clang_copts if copt != "-g"])
5,334,863
def test_set_slice_reorder(): """ Sets a slice across one of the axes. The source data is not in the same order as the axes in ImageStack, but set_slice should reorder the axes and write it correctly. """ stack = ImageStack.synthetic_stack() round_ = 1 y, x = stack.tile_shape index = {Axes.ROUND: round_} written = np.full( (stack.shape[Axes.ZPLANE], stack.shape[Axes.CH], y, x), fill_value=0.5, dtype=np.float32 ) stack.set_slice(index, written, [Axes.ZPLANE, Axes.CH]) expected = np.full( (stack.shape[Axes.CH], stack.shape[Axes.ZPLANE], y, x), fill_value=0.5, dtype=np.float32 ) assert np.array_equal(stack.get_slice(index)[0], expected)
5,334,864
def reversal_test_bootstrap(dec=None, inc=None, di_block=None, plot_stereo=False, save=False, save_folder='.', fmt='svg'): """ Conduct a reversal test using bootstrap statistics (Tauxe, 2010) to determine whether two populations of directions could be from an antipodal common mean. Parameters ---------- dec: list of declinations inc: list of inclinations or di_block: a nested list of [dec,inc] A di_block can be provided in which case it will be used instead of dec, inc lists. plot_stereo : before plotting the CDFs, plot stereonet with the bidirectionally separated data (default is False) save : boolean argument to save plots (default is False) save_folder : directory where plots will be saved (default is current directory, '.') fmt : format of saved figures (default is 'svg') Returns ------- plots : Plots of the cumulative distribution of Cartesian components are shown as is an equal area plot if plot_stereo = True Examples -------- Populations of roughly antipodal directions are developed here using ``ipmag.fishrot``. These directions are combined into a single di_block given that the function determines the principal component and splits the data accordingly by polarity. >>> directions_n = ipmag.fishrot(k=20, n=30, dec=5, inc=-60) >>> directions_r = ipmag.fishrot(k=35, n=25, dec=182, inc=57) >>> directions = directions_n + directions_r >>> ipmag.reversal_test_bootstrap(di_block=directions, plot_stereo = True) Data can also be input to the function as separate lists of dec and inc. In this example, the di_block from above is split into lists of dec and inc which are then used in the function: >>> direction_dec, direction_inc, direction_moment = ipmag.unpack_di_block(directions) >>> ipmag.reversal_test_bootstrap(dec=direction_dec,inc=direction_inc, plot_stereo = True) """ if di_block is None: all_dirs = make_di_block(dec, inc) else: all_dirs = di_block directions1, directions2 =pmag.flip(all_dirs) if plot_stereo == True: # plot equal area with two modes plt.figure(num=0, figsize=(4, 4)) plot_net(0) plot_di(di_block=directions1, color='b'), plot_di(di_block=do_flip(di_block=directions2), color='r') common_mean_bootstrap(directions1, directions2, save=save, save_folder=save_folder, fmt=fmt)
5,334,865
def handle_incoming_mail(addr=None): """Handle an incoming email by making a task to examine it. This code checks some basic properties of the incoming message to make sure that it is worth examining. Then it puts all the relevent fields into a dict and makes a new Cloud Task which is futher processed in python 3 code. """ logging.info('Request Headers: %r', flask.request.headers) logging.info('\n\n\nPOST for InboundEmail and addr is %r', addr) if addr != settings.INBOUND_EMAIL_ADDR: logging.info('Message not sent directly to our address') return {'message': 'Wrong address'} if flask.request.content_length > MAX_BODY_SIZE: logging.info('Message too big, ignoring') return {'message': 'Too big'} msg = get_incoming_message() precedence = msg.get('precedence', '') if precedence.lower() in ['bulk', 'junk']: logging.info('Precedence: %r indicates an autoresponder', precedence) return {'message': 'Wrong precedence'} from_addrs = (_extract_addrs(msg.get('x-original-from', '')) or _extract_addrs(msg.get('from', ''))) if from_addrs: from_addr = from_addrs[0] else: logging.info('could not parse from addr') return {'message': 'Missing From'} in_reply_to = msg.get('in-reply-to', '') body = u'' for part in msg.walk(): # We only process plain text emails. if part.get_content_type() == 'text/plain': body = part.get_payload(decode=True) if not isinstance(body, unicode): body = body.decode('utf-8') break # Only consider the first text part. to_addr = urllib.unquote(addr) subject = msg.get('subject', '') task_dict = { 'to_addr': to_addr, 'from_addr': from_addr, 'subject': subject, 'in_reply_to': in_reply_to, 'body': body, } logging.info('task_dict is %r', task_dict) response = call_py3_task_handler('/tasks/detect-intent', task_dict) if response.status_code and response.status_code != 200: logging.warning('Handoff to py3 failed.') flask.abort(400) return {'message': 'Done'}
5,334,866
def test_we_have_some_config_values(): """Check to see we have some config values.""" assert len(Config) > 0
5,334,867
def test_cascade_delete(settings): """ Verify that if we delete a model with the ArchiveMixin, then the delete cascades to its "parents", i.e. the models with foreign keys to it. """ settings.SOFT_DELETE_SAFE_MODE = False base = models.BaseArchiveModel.objects.create(name='test') related = models.RelatedModel.objects.create(base=base) models.RelatedCousinModel.objects.create(related=related) related_archivable = models.RelatedArchiveModel.objects.create( base=base) cousin_archivable = models.RelatedCousinArchiveModel.objects.create( related=related_archivable) base.delete() assert not (models.RelatedModel.objects.exists()) assert not (models.RelatedCousinModel.objects.exists()) assert not (models.RelatedArchiveModel.objects.exists()) assert (models.RelatedArchiveModel.all_objects.exists()) related_archivable = models.RelatedArchiveModel.all_objects.get( pk=related_archivable.pk) assert (related_archivable.deleted) is not None assert not (models.RelatedCousinArchiveModel.objects.exists()) assert (models.RelatedCousinArchiveModel.all_objects.exists()) cousin_archivable = models.RelatedCousinArchiveModel.all_objects.get( pk=cousin_archivable.pk) assert (cousin_archivable.deleted) is not None
5,334,868
def split(df, partition, column): """ :param df: The dataframe to split :param partition: The partition to split :param column: The column along which to split : returns: A tuple containing a split of the original partition """ dfp = df[column][partition] if column in categorical: values = dfp.unique() lv = set(values[:len(values)//2]) rv = set(values[len(values)//2:]) return dfp.index[dfp.isin(lv)], dfp.index[dfp.isin(rv)] else: median = dfp.median() dfl = dfp.index[dfp < median] dfr = dfp.index[dfp >= median] return (dfl, dfr)
5,334,869
def get_gaussian_kernel(l=5, sig=1.): """ creates gaussian kernel with side length l and a sigma of sig """ ax = np.linspace(-(l - 1) / 2., (l - 1) / 2., l) xx, yy = np.meshgrid(ax, ax) kernel = np.exp(-0.5 * (np.square(xx) + np.square(yy)) / np.square(sig)) return kernel / np.sum(kernel)
5,334,870
def save_context_test() -> None: """Test printing out context data. :return: None :rtype: None """ a = save_context('a') raise NotImplemented
5,334,871
def get_data_tbl(path, tblname): """Wrapper function around @merge_json """ files = get_annon_db_file(path, tblname) log.info("files: {}".format(files)) K,V = common.merge_json(files) return K,V
5,334,872
def costes_coloc(im_1, im_2, psf_width=3, n_scramble=1000, thresh_r=0.0, roi=None, roi_method='all', do_manders=True): """ Perform Costes colocalization analysis on a pair of images. Parameters ---------- im_1: array_like Intensity image for colocalization. Must be the same shame as `im_1`. im_2: array_like Intensity image for colocalization. Must be the same shame as `im_2`. psf_width: int, default 3 Width, in pixels of the point spread function. n_scramble: int, default 1000 Number of strambled image comparisons to do to get statistics. thresh_r: float, default 0.0 Threshold Pearson r value to be considered colocalized. roi: array_like, dtype bool, default None Boolean image the same shape as `im_1` and `im_2` that is True for pixels within the ROI. roi_method: str, default 'all' If 'all', all pixels of a given subimage must be within the ROI for the subimage itself to be considered part of the ROI. If 'any', if any one pixel is within the ROI, the subimage is considered part of the ROI. do_manders: bool, default True If True, compute the Manders coefficients. Returns ------- output: A CostesColocalization instance. The CostesColocalization instance has the following attributes. im_1, im_2, psf_width, n_scramble, thresh_r, roi, roi_method: As in the input parameters. a: slope of the regression line I_2 = a * I_1 + b b: intercept of regression line I_2 = a * I_1 + b M_1: Manders coefficient for image 1 M_2: Manders coefficient for image 2 pearson_r: Pearson coerrelaction coefficient of the pixels in the two images. p_coloc: The probability of colocalization being present in the two images. """ # Make float mirrored boundaries in preparation for scrambling im_1_mirror = mirror_edges(im_1, psf_width).astype(float) im_2_mirror = mirror_edges(im_2, psf_width).astype(float) # Set up ROI if roi is None: roi = np.ones_like(im_1, dtype='bool') # Rename images to be sliced ROI and convert to float im_1 = im_1[roi].astype(float) im_2 = im_2[roi].astype(float) # Mirror ROI at edges roi_mirror = mirror_edges(roi, psf_width) # Compute the blocks that we'll scramble blocks_1 = im_to_blocks(im_1_mirror, psf_width, roi_mirror, roi_method) blocks_2 = im_to_blocks(im_2_mirror, psf_width, roi_mirror, roi_method) # Compute the Pearson coefficient pearson_r = _pearson_r(blocks_1.ravel(), blocks_2.ravel()) # Do image scrambling and r calculations r_scr = scrambled_r(blocks_1, blocks_2, n=n_scramble) # Compute percent chance of coloc p_coloc = (r_scr < pearson_r).sum() / n_scramble # Now do work to compute adjusted Manders's coefficients if do_manders: # Get the linear relationship between im_2 and im_1 a, b = _odr_linear(im_1.ravel(), im_2.ravel()) # Perform threshold calculation thresh_1 = _find_thresh(im_1, im_2, a, b, thresh_r=thresh_r) thresh_2 = a * thresh_1 + b # Compute Costes's update to the Manders's coefficients inds = (im_1 > thresh_1) & (im_2 > thresh_2) M_1 = im_1[inds].sum() / im_1.sum() M_2 = im_2[inds].sum() / im_2.sum() # Toss results into class for returning return _CostesColocalization( im_1=im_1, im_2=im_2, roi=roi, roi_method=roi_method, psf_width=psf_width, n_scramble=n_scramble, thresh_r=thresh_r, thresh_1=thresh_1, thresh_2=thresh_2, a=a, b=b, M_1=M_1, M_2=M_2, r_scr=r_scr, pearson_r=pearson_r, p_coloc=p_coloc) else: return _CostesColocalization( im_1=im_1, im_2=im_2, roi=roi, roi_method=roi_method, psf_width=psf_width, n_scramble=n_scramble, thresh_r=None, thresh_1=None, thresh_2=None, a=None, b=None, M_1=None, M_2=None, r_scr=r_scr, pearson_r=pearson_r, p_coloc=p_coloc)
5,334,873
def test_creating_extra_byte_with_invalid_type(simple_las_path): """ Test the error message when creating extra bytes with invalid type """ las = laspy.read(simple_las_path) with pytest.raises(TypeError): las.add_extra_dim(laspy.ExtraBytesParams("just_a_test", "i16"))
5,334,874
def get_induced_dipole_count(efpobj): """Gets the number of polarization induced dipoles in `efpobj` computation. Returns ------- int Total number of polarization induced dipoles. """ (res, ndip) = efpobj._efp_get_induced_dipole_count() _result_to_error(res) return ndip
5,334,875
def _gen_parabola(phase: float, start: float, mid: float, end: float) -> float: """Gets a point on a parabola y = a x^2 + b x + c. The Parabola is determined by three points (0, start), (0.5, mid), (1, end) in the plane. Args: phase: Normalized to [0, 1]. A point on the x-axis of the parabola. start: The y value at x == 0. mid: The y value at x == 0.5. end: The y value at x == 1. Returns: The y value at x == phase. """ mid_phase = 0.5 delta_1 = mid - start delta_2 = end - start delta_3 = mid_phase ** 2 - mid_phase coef_a = (delta_1 - delta_2 * mid_phase) / delta_3 coef_b = (delta_2 * mid_phase ** 2 - delta_1) / delta_3 coef_c = start return coef_a * phase ** 2 + coef_b * phase + coef_c
5,334,876
def grant_perms(obj: element, mast: element, read_only: bool, meta): """ Grants another user permissions to access a Jaseci object Param 1 - target element Param 2 - master to be granted permission Param 3 - Boolean read_only flag Return - Sorted list """ mast = meta['h'].get_obj(meta['m_id'], uuid.UUID(meta['m_id'])) return mast.object_perms_grant(obj=obj, mast=mast, read_only=read_only)['success']
5,334,877
def get_instance_ids_compute_hostnames_conversion_dict(instance_ids, id_to_hostname, region=None): """Return instanceIDs to hostnames dict if id_to_hostname=True, else return hostname to instanceID dict.""" try: if not region: region = os.environ.get("AWS_DEFAULT_REGION") conversion_dict = {} ec2_client = boto3.client("ec2", region_name=region) response = ec2_client.describe_instances(InstanceIds=instance_ids).get("Reservations") for reservation in response: for instance in reservation.get("Instances"): instance_hostname = instance.get("PrivateDnsName").split(".")[0] instance_id = instance.get("InstanceId") if id_to_hostname: conversion_dict[instance_id] = instance_hostname else: conversion_dict[instance_hostname] = instance_id return conversion_dict except Exception as e: logging.error("Failed retrieving hostnames for instances {} with exception: {}".format(instance_ids, e))
5,334,878
def pkgdir(tmpdir, monkeypatch): """ temp directory fixture containing a readable/writable ./debian/changelog. """ cfile = tmpdir.mkdir('debian').join('changelog') text = """ testpkg (1.1.0-1) stable; urgency=medium * update to 1.1.0 * other rad packaging updates * even more cool packaging updates that take a lot of text to describe so the change wraps on multiple lines -- Ken Dreyer <kdreyer@redhat.com> Tue, 06 Jun 2017 14:46:37 -0600 testpkg (1.0.0-2redhat1) stable; urgency=medium * update to 1.0.0 (rhbz#123) -- Ken Dreyer <kdreyer@redhat.com> Mon, 05 Jun 2017 13:45:36 -0600 """.lstrip("\n") cfile.write(text) monkeypatch.chdir(tmpdir) return tmpdir
5,334,879
def read_plot_config(filename): """Read in plotting config file. Args: filename (str): Full path and name of config file. Returns: dict: Contents of config file. """ config = configparser.ConfigParser() config.read(filename) out = {} for section in config.sections(): out[section] = _get_section(config, section) return out
5,334,880
def parse(file_path, prec=15): """ Simple helper - file_path: Path to the OpenQASM file - prec: Precision for the returned string """ qasm = Qasm(file_path) return qasm.parse().qasm(prec)
5,334,881
def sinusoid(amplitude=1.0, frequency=1.0, phase=0.0, duration=60.0, samplerate=100.0): """Generate a sinusoid""" t = np.arange(0, duration, 1.0/samplerate) d = np.sin(2.0 * np.pi * frequency * t) return t, d
5,334,882
def get_model(): """ Returns a compiled convolutional neural network model. Assume that the `input_shape` of the first layer is `(IMG_WIDTH, IMG_HEIGHT, 3)`. The output layer should have `NUM_CATEGORIES` units, one for each category. """ model = tf.keras.models.Sequential() model.add( tf.keras.layers.Conv2D( 32, (3, 3), input_shape=( IMG_WIDTH, IMG_HEIGHT, 3))) model.add(tf.keras.layers.Activation('relu')) model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2))) model.add(tf.keras.layers.Conv2D(64, (3, 3))) model.add(tf.keras.layers.Activation('relu')) model.add(tf.keras.layers.Conv2D(64, (4, 4))) model.add(tf.keras.layers.Activation('relu')) model.add(tf.keras.layers.Conv2D(128, (4, 4))) model.add(tf.keras.layers.Activation('relu')) model.add(tf.keras.layers.MaxPooling2D(pool_size=(3, 3))) model.add(tf.keras.layers.Flatten()) model.add(tf.keras.layers.Dense(128)) model.add(tf.keras.layers.Activation('relu')) model.add(tf.keras.layers.Dropout(0.2)) model.add(tf.keras.layers.Dense(NUM_CATEGORIES)) model.add(tf.keras.layers.Activation('sigmoid')) model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy']) return model
5,334,883
def check_datetime_str(datetime_str): """ Tries to parse the datetime string to a datetime object. If it fails, it will return False :param str datetime_str: :return: returns True or False depending on the validity of the datetime string :rtype: bool """ try: parse_datetime_str(datetime_str) return True except ValueError: return False
5,334,884
def train(args, train_exe, build_res, place): """[train the net] Arguments: args {[type]} -- [description] train_exe {[type]} -- [description] compiled_prog{[type]} -- [description] build_res {[type]} -- [description] place {[type]} -- [description] """ global DEV_COUNT compiled_prog = build_res["compiled_prog"] cost = build_res["cost"] prediction = build_res["prediction"] pred_label = build_res["pred_label"] label = build_res["label"] fetch_list = [cost.name, prediction.name, pred_label.name, label.name] train_data_loader = build_res["train_data_loader"] train_prog = build_res["train_prog"] steps = 0 time_begin = time.time() test_exe = train_exe logger.info("Begin training") for i in range(args.epoch): try: for data in train_data_loader(): avg_cost_np, avg_pred_np, pred_label, label = train_exe.run(feed=data, program=compiled_prog, \ fetch_list=fetch_list) steps += 1 if steps % int(args.skip_steps) == 0: time_end = time.time() used_time = time_end - time_begin get_score(pred_label, label, eval_phase="Train") logger.info('loss is {}'.format(avg_cost_np)) logger.info("epoch: %d, step: %d, speed: %f steps/s" % (i, steps, args.skip_steps / used_time)) time_begin = time.time() if steps % args.save_steps == 0: save_path = os.path.join(args.checkpoints, "step_" + str(steps)) fluid.io.save(train_prog, save_path) logger.info("[save]step %d : save at %s" % (steps, save_path)) if steps % args.validation_steps == 0: if args.do_eval: evaluate(args, test_exe, build_res, "eval") if args.do_test: evaluate(args, test_exe, build_res, "test") except Exception as e: logger.exception(str(e)) logger.error("Train error : %s" % str(e)) exit(1) save_path = os.path.join(args.checkpoints, "step_" + str(steps)) fluid.io.save(train_prog, save_path) logger.info("[save]step %d : save at %s" % (steps, save_path))
5,334,885
def _df_pitch(df: pd.DataFrame, xcol: str = 'x', ycol: str = 'y', zcol: str = 'z'): """Find angular pitch for each row in an accelerometer dataframe. Args: df (pd.DataFrame): accelerometer dataframe xcol, ycol, zcol (str): column names for x, y, and z acceleration Returns: pd.Series: pitch """ out = pd.Series(pitch(df[xcol].values, df[ycol].values, df[zcol].values), name='pitch') return out
5,334,886
def gan_masked_generate_face(generator_fun, face_img: np.array): """ Generated a face from the seed one considering a generator_fun which should output alpha mask and bgr results :param generator_fun: takes an image and returns alpha mask concatenated with bgr results :param face_img: img to feed to the generator :return: """ gen_res = generator_fun(face_img) gen_mask = gen_res[:, :, 0] gen_bgr = gen_res[:, :, 1:] gen_mask = np.clip(gen_mask * 255, 0, 255).astype(np.uint8) # stack mask such as we have three channels gen_mask = np.stack([gen_mask, gen_mask, gen_mask], axis=2) return gen_bgr, gen_mask
5,334,887
def manual_auth(transport, username, hostname): """ Attempt to authenticate to the given transport using manual login/password method """ default_auth = 'p' # auth = raw_input(\ # 'Authenticate by (p)assword, (r)sa key, or (d)sa key? [%s] ' % default_auth) # if len(auth) == 0: # auth = default_auth auth = default_auth if auth == 'r': default_path = os.path.join(os.environ['HOME'], '.ssh', 'id_rsa') path = raw_input('RSA key [%s]: ' % default_path) if len(path) == 0: path = default_path try: key = paramiko.RSAKey.from_private_key_file(path) except paramiko.PasswordRequiredException: password = getpass.getpass('RSA key password: ') key = paramiko.RSAKey.from_private_key_file(path, password) transport.auth_publickey(username, key) elif auth == 'd': default_path = os.path.join(os.environ['HOME'], '.ssh', 'id_dsa') path = raw_input('DSS key [%s]: ' % default_path) if len(path) == 0: path = default_path try: key = paramiko.DSSKey.from_private_key_file(path) except paramiko.PasswordRequiredException: password = getpass.getpass('DSS key password: ') key = paramiko.DSSKey.from_private_key_file(path, password) transport.auth_publickey(username, key) else: passwd = getpass.getpass('Password for %s@%s: ' % (username, hostname)) transport.auth_password(username, passwd)
5,334,888
def get_namespace_from_path(path): """get namespace from file path Args: path (unicode): file path Returns: unicode: namespace """ return os.path.splitext(os.path.basename(path))[0]
5,334,889
def feat_row_sum_inv_normalize(x): """ :param x: np.ndarray, raw features. :return: np.ndarray, normalized features """ x_feat = x.astype(dtype=np.float64) inv_x_rowsum = np.power(x_feat.sum(axis=1), -1).flatten() inv_x_rowsum[np.isinf(inv_x_rowsum)] = 0. x_diag_mat = np.diag(inv_x_rowsum) normalized_x = x_diag_mat.dot(x_feat) return normalized_x
5,334,890
def cross3(v1, v2): """ cross3 """ return (v1[1] * v2[2] - v1[2] * v2[1], v1[2] * v2[0] - v1[0] * v2[2], v1[0] * v2[1] - v1[1] * v2[0])
5,334,891
def api_request(request, viewset, method, url_kwargs={}, get_params={}): """ Call an API route on behalf of the user request. Examples: data = api_request(request, CaseDocumentViewSet, 'list', get_params={'q': 'foo'}).data data = api_request(request, CaseDocumentViewSet, 'retrieve', url_kwargs={'id': '123'}).data """ # copy selected fields due to infinite recursion for some # request copies if isinstance(request, rest_framework.request.Request): request = request._request api_request = copy(request) api_request.method = 'GET' api_request.GET = QueryDict(mutable=True) api_request.GET.update(get_params) return viewset.as_view({'get': method})(api_request, **url_kwargs)
5,334,892
def ca_restart(slot): """ :param slot: """ LOG.info("CA_Restart: attempting to restart") ret = CA_Restart(CK_ULONG(slot)) LOG.info("CA_Restart: Ret Value: %s", ret) return ret
5,334,893
def test_provider_system_hook_command_exit(change_dir): """Verify the hook call works properly.""" with pytest.raises(HookCallException): tackle(context_file='command-exit.yaml', no_input=True)
5,334,894
def naturally_select_features(train, valid, X, Y, frame_type='h2o'): """ """ train_frame = train.as_data_frame() rounds_till_death = 3 number_of_features_to_make_the_cut = 0.2 n_folds = 20 features_made_the_cut = [] features_passing_to_next_round = [] features_passed_previous_round = [] if frame_type == 'spark': pass else: import h2o import math import pandas as pd import numpy as np from sklearn.model_selection import train_test_split from sklearn.model_selection import KFold from h2o.estimators.xgboost import H2OXGBoostEstimator for i in range(0,200): if i==0: # print(train_frame.columns) # print(X) features_passed_previous_round = X features_passing_to_next_round = [] else: # Rest lists features_passed_previous_round = features_passing_to_next_round features_passing_to_next_round = [] for j in range(1,3): column_frames = [] kf = KFold(n_splits=n_folds) for train_index, test_index in kf.split(features_passed_previous_round): # print(test_index) columns = [] for x in test_index: columns.append(features_passed_previous_round[x]) column_frames.append(columns) # for k in range(0,n_folds): # df_1, df_2 = train_test_split(df, test_size = 0.2) # frames.append(df_1) # frames.append(df_2) for k in range(0,n_folds): # Train an XGBoost model to select the most imporant vars # h2o_xgb_model = H2OXGBoostEstimator( ntrees = 10000, learn_rate = 0.005, sample_rate = 0.1, col_sample_rate = 0.8, max_depth = 5, nfolds = 3, keep_cross_validation_predictions=True, stopping_rounds = 10, seed = 12345) # execute training h2o_xgb_model.train(x=column_frames[k], y=Y, training_frame=train, validation_frame=valid) most_important_variables = h2o_xgb_model.varimp(use_pandas=False) number_of_features_passed = math.floor(len(train_frame.columns)*number_of_features_to_make_the_cut) features_passing_to_next_round.append(most_important_variables[0:number_of_features_passed-1]) print(most_important_variables[0:number_of_features_passed-1]) print(features_passing_to_next_round)
5,334,895
def test_update_team_password(mongo_proc, redis_proc, client): # noqa (fixture) """Test the /team/update_password endpoint.""" clear_db() register_test_accounts() res = client.post( "/api/v1/user/login", json={ "username": STUDENT_DEMOGRAPHICS["username"], "password": STUDENT_DEMOGRAPHICS["password"], }, ) csrf_t = get_csrf_token(res) # Attempt to change password while still in initial team res = client.post( "/api/v1/team/update_password", json={ "new_password": "newpassword", "new_password_confirmation": "newpassword", }, headers=[("X-CSRF-Token", csrf_t)], ) assert res.status_code == 422 assert res.json["message"] == "You have not created a team yet." # Attempt to set with non-matching passwords res = client.post( "/api/v1/teams", json={"team_name": "newteam", "team_password": "newteam"} ) tid = res.json["tid"] res = client.post( "/api/v1/team/update_password", json={"new_password": "newpassword", "new_password_confirmation": "invalid",}, headers=[("X-CSRF-Token", csrf_t)], ) assert res.status_code == 422 assert res.json["message"] == "Your team passwords do not match." # Successfully change password db = get_conn() old_password = str(db.teams.find_one({"tid": tid})["password"]) res = client.post( "/api/v1/team/update_password", json={ "new_password": "newpassword", "new_password_confirmation": "newpassword", }, headers=[("X-CSRF-Token", csrf_t)], ) assert res.status_code == 200 assert res.json["success"] is True new_password = str(db.teams.find_one({"tid": tid})["password"]) assert new_password != old_password
5,334,896
def pickup_target( path: ShortestPath, target_id: str ) -> None: """Update the given path to pickup the target with the given ID.""" path.action_list.append({ 'action': 'PickupObject', 'params': { 'objectId': target_id } })
5,334,897
def ogr_wkts(src_ds): """return the wkt(s) of the ogr dataset""" these_regions = [] src_s = src_ds.split(':') if os.path.exists(src_s[0]): poly = ogr.Open(src_s[0]) if poly is not None: p_layer = poly.GetLayer(0) for pf in p_layer: pgeom = pf.GetGeometryRef() pwkt = pgeom.ExportToWkt() r = Region().from_string(pwkt) if len(src_s) > 1: src_r = src_s[1].split('/') if len(src_r) > 0: r.zmin = utils.float_or(src_r[0]) if len(src_r) > 1: r.zmax = utils.float_or(src_r[1]) if len(src_r) > 2: r.wmin = utils.float_or(src_r[2]) if len(src_r) > 3: r.wmax = utils.float_or(src_r[3]) these_regions.append(r) poly = None return(these_regions)
5,334,898
def load_word2vec_matrix(embedding_size): """ Return the word2vec model matrix. Args: embedding_size: The embedding size Returns: The word2vec model matrix Raises: IOError: If word2vec model file doesn't exist """ word2vec_file = '../data/word2vec_' + str(embedding_size) + '.txt' if not os.path.isfile(word2vec_file): raise IOError("✘ The word2vec file doesn't exist. ") model = KeyedVectors.load_word2vec_format(open(word2vec_file, 'r'), binary=False, unicode_errors='replace') vocab_size = len(model.wv.vocab.items()) vocab = dict([(k, v.index) for k, v in model.wv.vocab.items()]) vector = np.zeros([vocab_size, embedding_size]) for key, value in vocab.items(): if key is not None: vector[value] = model[key] return vocab_size, vector
5,334,899