content
stringlengths
22
815k
id
int64
0
4.91M
def compute_quasisymmetry_error( R_lmn, Z_lmn, L_lmn, i_l, Psi, R_transform, Z_transform, L_transform, iota, helicity=(1, 0), data=None, ): """Compute quasi-symmetry triple product and two-term errors. f_C computation assumes transform grids are a single flux surface. Parameters ---------- R_lmn : ndarray Spectral coefficients of R(rho,theta,zeta) -- flux surface R coordinate. Z_lmn : ndarray Spectral coefficients of Z(rho,theta,zeta) -- flux surface Z coordiante. L_lmn : ndarray Spectral coefficients of lambda(rho,theta,zeta) -- poloidal stream function. i_l : ndarray Spectral coefficients of iota(rho) -- rotational transform profile. Psi : float Total toroidal magnetic flux within the last closed flux surface, in Webers. R_transform : Transform Transforms R_lmn coefficients to real space. Z_transform : Transform Transforms Z_lmn coefficients to real space. L_transform : Transform Transforms L_lmn coefficients to real space. iota : Profile Transforms i_l coefficients to real space. helicity : tuple, int Type of quasi-symmetry (M, N). Returns ------- data : dict Dictionary of ndarray, shape(num_nodes,) of quasi-symmetry errors. Key "QS_FF" is the flux function metric, key "QS_TP" is the triple product. """ data = compute_B_dot_gradB( R_lmn, Z_lmn, L_lmn, i_l, Psi, R_transform, Z_transform, L_transform, iota, data=data, ) # TODO: can remove this call if compute_|B| changed to use B_covariant data = compute_covariant_magnetic_field( R_lmn, Z_lmn, L_lmn, i_l, Psi, R_transform, Z_transform, L_transform, iota, data=data, ) M = helicity[0] N = helicity[1] # covariant Boozer components: I = B_theta, G = B_zeta (in Boozer coordinates) if check_derivs("I", R_transform, Z_transform, L_transform): data["I"] = jnp.mean(data["B_theta"] * data["sqrt(g)"]) / jnp.mean( data["sqrt(g)"] ) data["G"] = jnp.mean(data["B_zeta"] * data["sqrt(g)"]) / jnp.mean( data["sqrt(g)"] ) # QS two-term (T^3) if check_derivs("f_C", R_transform, Z_transform, L_transform): data["f_C"] = (M * data["iota"] - N) * (data["psi_r"] / data["sqrt(g)"]) * ( data["B_zeta"] * data["|B|_t"] - data["B_theta"] * data["|B|_z"] ) - (M * data["G"] + N * data["I"]) * data["B*grad(|B|)"] # QS triple product (T^4/m^2) if check_derivs("f_T", R_transform, Z_transform, L_transform): data["f_T"] = (data["psi_r"] / data["sqrt(g)"]) * ( data["|B|_t"] * data["(B*grad(|B|))_z"] - data["|B|_z"] * data["(B*grad(|B|))_t"] ) return data
5,336,900
def int_to_uuid(number): """ convert a positive integer to a UUID : a string of characters from `symbols` that is at least 3 letters long""" assert isinstance(number,int) and number >= 0 if number == 0: return '000' symbol_string = '' while number > 0: remainder = number % base number //= base symbol_string = encode_symbols[remainder] + symbol_string return symbol_string.rjust(3,'0')
5,336,901
def VPLoadCSV(self): """ Load Velocity Profile .csv """ VP_CSVName = askopenfilename() self.VP_CSV = pd.read_csv(VP_CSVName, delimiter=',') self.VP_Rad, self.VP_Prof = self.VP_CSV['Radius'].values, self.VP_CSV['Velocity Profile'].values #Update Equation Box self.VPEqu.delete(0, TK.END) self.VPEqu.insert(0, 'File Loaded - %s' %VP_CSVName) #Update StatusBox self.StatusBox.delete(0, TK.END) self.StatusBox.insert(0, 'Velocity Profile .CSV loaded')
5,336,902
def key_value_data(string): """Validate the string to be in the form key=value.""" if string: key, value = string.split("=") if not (key and value): msg = "{} not in 'key=value' format.".format(string) raise argparse.ArgumentTypeError(msg) return {key: value} return {}
5,336,903
def count_digit(n, digit): """Return how many times digit appears in n. >>> count_digit(55055, 5) 4 """ if n == 0: return 0 else: if n%10 == digit: return count_digit(n//10, digit) + 1 else: return count_digit(n//10, digit)
5,336,904
def _get_courses_in_page(url) -> [Course]: """ Given a WebSoc search URL, creates a generator over each Course in the results page """ # Get the page that lists the courses in a table with urllib.request.urlopen(url) as source: soup = bs.BeautifulSoup(source, "html.parser") # Iterate over each course, which is each row in the results for row in soup.find_all("tr"): # Get the values of each column cells = [td.string for td in row.find_all("td")] # Convert this row to a Course object if len(cells) in {15, 16, 17}: yield Course(cells)
5,336,905
def cal_chisquare(data, f, pepoch, bin_profile, F1, F2, F3, F4, parallel=False): """ calculate the chisquare distribution for frequency search on the pepoch time. """ chi_square = np.zeros(len(f), dtype=np.float64) t0 = pepoch if parallel: for i in prange(len(f)): phi = (data-t0)*f[i] + (1.0/2.0)*((data-t0)**2)*F1 + (1.0/6.0)*((data-t0)**3)*F2 +\ (1.0/24.0)*((data-t0)**4)*F3 + (1.0/120.0)*((data-t0)**5)*F4 phi = phi - np.floor(phi) #counts = numba_histogram(phi, bin_profile)[0] #NOTE: The histogram bin should give the edge of bin, instead of the bin number. #NOTE: For those pulse with narrow peak, it will be incorrect while calculate the chisquare counts = np.histogram(phi, np.linspace(0, 1, bin_profile+1)[:-1])[0] expectation = np.mean(counts) chi_square[i] = np.sum( (counts - expectation)**2 / expectation ) else: for i in range(len(f)): phi = (data-t0)*f[i] + (1.0/2.0)*((data-t0)**2)*F1 + (1.0/6.0)*((data-t0)**3)*F2 +\ (1.0/24.0)*((data-t0)**4)*F3 + (1.0/120.0)*((data-t0)**5)*F4 phi = phi - np.floor(phi) #counts = numba_histogram(phi, bin_profile)[0] #NOTE: The histogram bin should give the edge of bin, instead of the bin number. #NOTE: For those pulse with narrow peak, it will be incorrect while calculate the chisquare counts = np.histogram(phi, np.linspace(0, 1, bin_profile+1)[:-1])[0] expectation = np.mean(counts) chi_square[i] = np.sum( (counts - expectation)**2 / expectation ) return chi_square
5,336,906
def test_spacy_german(): """Test the parser with the md document.""" docs_path = "tests/data/pure_html/brot.html" # Preprocessor for the Docs preprocessor = HTMLDocPreprocessor(docs_path) doc = next(preprocessor._parse_file(docs_path, "md")) # Create an Parser and parse the md document parser_udf = get_parser_udf( structural=True, tabular=True, lingual=True, visual=False, language="de" ) doc = parser_udf.apply(doc) # Check that doc has sentences assert len(doc.sentences) == 824 sent = sorted(doc.sentences, key=lambda x: x.position)[143] assert sent.ner_tags == [ "O", "O", "LOC", "O", "O", "LOC", "O", "O", "O", "O", "O", "O", "O", "O", "O", ] # inaccurate assert sent.dep_labels == [ "mo", "ROOT", "sb", "mo", "nk", "nk", "punct", "mo", "nk", "nk", "nk", "sb", "oc", "rc", "punct", ]
5,336,907
def main(): """ Play around the pie chart with some meaningless data. """ # slices: list[int] = [120, 80, 30, 20] # labels: list[str] = ['Sixty', 'Forty', 'Fifteen', 'Ten'] # colours: list[str] = ['#008fd5', '#fc4f30', '#e5ae37', '#6d904f'] # plt.pie(slices, labels=labels, colors=colours, wedgeprops={'edgecolor': 'black'}) # # plt.title('My Customized Pie Chart') # plt.style.use('seaborn-dark') # plt.tight_layout() # plt.show() """ Using survey data presented by the Stack OverFlow. """ """ Data has been filtered down to top 5 most popular languages. """ pie_chart_slices: list[int] = [35917, 36443, 47544, 55466, 59219] pie_chart_labels: list[str] = ['Java', 'Python', 'SQL', 'HTML/CSS', 'JavaScript'] explode: list[float] = [0, 0.1, 0, 0, 0] plt.pie(pie_chart_slices, labels=pie_chart_labels, explode=explode, # apply explode index to make element pop out startangle=90, autopct='%1.1f%%', shadow=True, wedgeprops={'edgecolor': 'black'}) plt.title('Top 5 Most Popular Programming Languages in 2019') plt.style.use('seaborn-dark') plt.tight_layout() plt.show()
5,336,908
def get_css_urls(bundle, debug=None): """ Fetch URLs for the CSS files in the requested bundle. :param bundle: Name of the bundle to fetch. :param debug: If True, return URLs for individual files instead of the minified bundle. """ if debug is None: debug = settings.DEBUG if debug: items = [] for item in settings.MINIFY_BUNDLES['css'][bundle]: should_compile = item.endswith('.less') and getattr( settings, 'LESS_PREPROCESS', False ) if should_compile: compile_css(item) items.append('%s.css' % item) else: items.append(item) return [static(item) for item in items] else: return [static(f'css/{bundle}-min.css')]
5,336,909
def myt(): """ myt - my tASK MANAGER An application to manage your tasks through the command line using simple options. """ pass
5,336,910
def rng() -> np.random.Generator: """Random number generator.""" return np.random.default_rng(42)
5,336,911
def add_values_in_dict(sample_dict, key, list_of_values): """Append multiple values to a key in the given dictionary""" if key not in sample_dict: sample_dict[key] = list() sample_dict[key].extend(list_of_values) temp_list = sample_dict[key] temp_list = list(set(temp_list)) # remove duplicates sample_dict[key] = temp_list return sample_dict
5,336,912
def parse_length(line, p) -> int: """ parse length specifer for note or rest """ n_len = voices[ivc].meter.dlen # start with default length try: if n_len <= 0: SyntaxError(f"got len<=0 from current voice {line[p]}") if line[p].isdigit(): # multiply note length fac = parse_uint() if not fac: fac = 1 n_len *= fac if line[p] == '/': # divide note length while line[p] == '/': p += 1 if line[p].isdigit(): fac = parse_uint() else: fac = 2 if n_len % fac: SyntaxError(f"Bad length divisor {line[p-1]}") return n_len n_len = n_len/fac except SyntaxError as se: print(f"{se} Cannot proceed without default length. Emergency stop.") exit(1) return n_len
5,336,913
def get_dual_shapes_and_types(bounds_elided): """Get shapes and types of dual vars.""" dual_shapes = [] dual_types = [] layer_sizes = utils.layer_sizes_from_bounds(bounds_elided) for it in range(len(layer_sizes)): m = layer_sizes[it] m = [m] if isinstance(m, int) else list(m) if it < len(layer_sizes)-1: n = layer_sizes[it + 1] n = [n] if isinstance(n, int) else list(n) shapes = { 'lam': [1] + n, 'nu': [1] + m, 'muminus': [1] + n, 'muplus': [1] + n, 'nu_quad': [], 'muminus2': [], } types = { 'lam': utils.DualVarTypes.EQUALITY, 'nu': utils.DualVarTypes.INEQUALITY, 'muminus': utils.DualVarTypes.INEQUALITY, 'muplus': utils.DualVarTypes.INEQUALITY, 'nu_quad': utils.DualVarTypes.INEQUALITY, 'muminus2': utils.DualVarTypes.INEQUALITY, } dual_shapes.append(DualVar(**{ k: np.array(s) for k, s in shapes.items()})) dual_types.append(DualVar(**types)) else: shapes = {'nu': [1] + m, 'nu_quad': []} types = {'nu': utils.DualVarTypes.INEQUALITY, 'nu_quad': utils.DualVarTypes.INEQUALITY} dual_shapes.append(DualVarFin(**{ k: np.array(s) for k, s in shapes.items()})) dual_types.append(DualVarFin(**types)) # Add kappa N = sum([np.prod(np.array(i)) for i in layer_sizes]) dual_shapes.append(np.array([1, N+1])) dual_types.append(utils.DualVarTypes.INEQUALITY) return dual_shapes, dual_types
5,336,914
def linear_search(alist, key): """ Return index of key in alist . Return -1 if key not present.""" for i in range(len(alist)): if alist[i] == key: return i return -1
5,336,915
def create_pipeline(ctx: Context, pipeline_path: Text, build_target_image: Text, skaffold_cmd: Text, build_base_image: Text) -> None: """Command definition to create a pipeline.""" click.echo('Creating pipeline') ctx.flags_dict[labels.ENGINE_FLAG] = kubeflow_labels.KUBEFLOW_V2_ENGINE ctx.flags_dict[labels.PIPELINE_DSL_PATH] = pipeline_path ctx.flags_dict[kubeflow_labels.TFX_IMAGE_ENV] = build_target_image ctx.flags_dict[labels.BASE_IMAGE] = build_base_image ctx.flags_dict[labels.SKAFFOLD_CMD] = skaffold_cmd kubeflow_v2_handler.KubeflowV2Handler(ctx.flags_dict).create_pipeline()
5,336,916
def register_object_name(object, namepath): """ Registers a object in the NamingService. The name path is a list of 2-uples (id,kind) giving the path. For instance if the path of an object is [('foo',''),('bar','')], it is possible to get a reference to the object using the URL 'corbaname::hostname#foo/bar'. [('logilab','rootmodule'),('chatbot','application'),('chatter','server')] is mapped to 'corbaname::hostname#logilab.rootmodule/chatbot.application/chatter.server' The get_object_reference() function can be used to resolve such a URL. """ context = get_root_context() for id, kind in namepath[:-1]: name = [CosNaming.NameComponent(id, kind)] try: context = context.bind_new_context(name) except CosNaming.NamingContext.AlreadyBound as ex: context = context.resolve(name)._narrow(CosNaming.NamingContext) assert context is not None, \ 'test context exists but is not a NamingContext' id, kind = namepath[-1] name = [CosNaming.NameComponent(id, kind)] try: context.bind(name, object._this()) except CosNaming.NamingContext.AlreadyBound as ex: context.rebind(name, object._this())
5,336,917
def overlap_integral(xi, yi, zi, nxi, nyi, nzi, beta_i, xj, yj, zj, nxj, nyj, nzj, beta_j): """ overlap <i|j> between unnormalized Cartesian GTOs by numerical integration on a multicenter Becke grid Parameters ---------- xi,yi,zi : floats Cartesian positions of center i nxi,nyi,nzi : int >= 0 powers of Cartesian primitive GTO i beta_i : float > 0 exponent of radial part of orbital i xj,yj,zj : floats Cartesian positions of center j nxj,nyj,nzj : int >= 0 powers of Cartesian primitive GTO j beta_j : float > 0 exponent of radial part of orbital j """ # unnormalized bra and ket Gaussian type orbitals def CGTOi(x,y,z): dx, dy, dz = x-xi, y-yi, z-zi dr2 = dx*dx+dy*dy+dz*dz return pow(dx, nxi)*pow(dy,nyi)*pow(dz,nzi) * np.exp(-beta_i * dr2) def CGTOj(x,y,z): dx, dy, dz = x-xj, y-yj, z-zj dr2 = dx*dx+dy*dy+dz*dz return pow(dx, nxj)*pow(dy,nyj)*pow(dz,nzj) * np.exp(-beta_j * dr2) def integrand(x,y,z): return CGTOi(x,y,z) * CGTOj(x,y,z) # place a spherical grid on each center: ri, rj atoms = [(1, (xi, yi, zi)), (1, (xj, yj, zj))] # do the integral numerically olap = becke.integral(atoms, integrand) return olap
5,336,918
def save_project_id(config: Config, project_id: int): """Save the project ID in the project data""" data_dir = config.project.data_dir filename = data_dir / DEFAULT_PROJECTID_FILENAME with open(filename, "w") as f: return f.write(str(project_id))
5,336,919
async def session_start(): """ session_start: Creates a new database session for external functions and returns it - Keep in mind that this is only for external functions that require multiple transactions - Such as adding songs :return: A new database session """ return session_maker()
5,336,920
def apply(bpmn_graph: BPMN, parameters: Optional[Dict[Any, Any]] = None) -> graphviz.Digraph: """ Visualize a BPMN graph Parameters ------------- bpmn_graph BPMN graph parameters Parameters of the visualization, including: - Parameters.FORMAT: the format of the visualization - Parameters.RANKDIR: the direction of the representation (default: LR) Returns ------------ gviz Graphviz representation """ if parameters is None: parameters = {} from pm4py.objects.bpmn.obj import BPMN from pm4py.objects.bpmn.util.sorting import get_sorted_nodes_edges image_format = exec_utils.get_param_value(Parameters.FORMAT, parameters, "png") rankdir = exec_utils.get_param_value(Parameters.RANKDIR, parameters, "LR") font_size = exec_utils.get_param_value(Parameters.FONT_SIZE, parameters, 12) font_size = str(font_size) bgcolor = exec_utils.get_param_value(Parameters.BGCOLOR, parameters, "transparent") filename = tempfile.NamedTemporaryFile(suffix='.gv') viz = Digraph("", filename=filename.name, engine='dot', graph_attr={'bgcolor': bgcolor}) viz.graph_attr['rankdir'] = rankdir nodes, edges = get_sorted_nodes_edges(bpmn_graph) for n in nodes: n_id = str(id(n)) if isinstance(n, BPMN.Task): viz.node(n_id, shape="box", label=n.get_name(), fontsize=font_size) elif isinstance(n, BPMN.StartEvent): viz.node(n_id, label="", shape="circle", style="filled", fillcolor="green", fontsize=font_size) elif isinstance(n, BPMN.EndEvent): viz.node(n_id, label="", shape="circle", style="filled", fillcolor="orange", fontsize=font_size) elif isinstance(n, BPMN.ParallelGateway): viz.node(n_id, label="+", shape="diamond", fontsize=font_size) elif isinstance(n, BPMN.ExclusiveGateway): viz.node(n_id, label="X", shape="diamond", fontsize=font_size) elif isinstance(n, BPMN.InclusiveGateway): viz.node(n_id, label="O", shape="diamond", fontsize=font_size) elif isinstance(n, BPMN.OtherEvent): viz.node(n_id, label="", shape="circle", fontsize=font_size) for e in edges: n_id_1 = str(id(e[0])) n_id_2 = str(id(e[1])) viz.edge(n_id_1, n_id_2) viz.attr(overlap='false') viz.format = image_format return viz
5,336,921
def _predict_exp(data, paulistring): """Compute expectation values of paulistring given bitstring data.""" expectation_value = 0 for a in data: val = 1 for i, pauli in enumerate(paulistring): idx = a[i] if pauli == "I": continue elif pauli == "X": ls = [1, 1, -1, -1] elif pauli == "Y": ls = [-1, 1, 1, -1] elif pauli == "Z": ls = [1, -1, 1, -1] val *= ls[idx] expectation_value += val / len(data) return expectation_value
5,336,922
def allocate_single(size=1024 ** 3): """Method allocates memory for single variable Args: size (int): size Returns: void """ data = '0' * size del data
5,336,923
def AGI(ymod1, c02500, c02900, XTOT, MARS, sep, DSI, exact, nu18, taxable_ubi, II_em, II_em_ps, II_prt, II_no_em_nu18, c00100, pre_c04600, c04600): """ Computes Adjusted Gross Income (AGI), c00100, and compute personal exemption amount, c04600. """ # calculate AGI assuming no foreign earned income exclusion c00100 = ymod1 + c02500 - c02900 + taxable_ubi # calculate personal exemption amount if II_no_em_nu18: # repeal of personal exemptions for deps. under 18 pre_c04600 = max(0, XTOT - nu18) * II_em else: pre_c04600 = XTOT * II_em if DSI: pre_c04600 = 0. # phase-out personal exemption amount if exact == 1: # exact calculation as on tax forms line5 = max(0., c00100 - II_em_ps[MARS - 1]) line6 = math.ceil(line5 / (2500. / sep)) line7 = II_prt * line6 c04600 = max(0., pre_c04600 * (1. - line7)) else: # smoothed calculation needed for sensible mtr calculation dispc_numer = II_prt * (c00100 - II_em_ps[MARS - 1]) dispc_denom = 2500. / sep dispc = min(1., max(0., dispc_numer / dispc_denom)) c04600 = pre_c04600 * (1. - dispc) return (c00100, pre_c04600, c04600)
5,336,924
def tokenize(headline_list): """ Takes list of headlines as input and returns a list of lists of tokens. """ tokenized = [] for headline in headline_list: tokens = word_tokenize(headline) tokenized.append(tokens) return tokenized
5,336,925
def create_en_sentiment_component(nlp: Language, name: str, force: bool) -> Language: """ Allows the English sentiment to be added to a spaCy pipe using nlp.add_pipe("asent_en_v1"). """ LEXICON.update(E_LEXICON) return Asent( nlp, name=name, lexicon=LEXICON, intensifiers=INTENSIFIERS, negations=NEGATIONS, contrastive_conjugations=CONTRASTIVE_CONJ, lowercase=True, lemmatize=False, force=force, )
5,336,926
def p_term_comparison(p): """term : factor GREATER_THAN factor | factor GREATER_THAN_EQUALS factor | factor LESS_THAN factor | factor LESS_THAN_EQUALS factor | factor EQUALS factor | factor NOT_EQUALS factor | factor IS_NOT_EQUALS factor | factor IS_EQUALS factor""" p[0] = ast.CompareOperator(p[2], p[1], p[3]) p[0].set_position(p.lineno(2), compute_column(p.lexer, p.lexpos(2)))
5,336,927
def set_current(path): """Write the current file. """ fpath = pth.expandvars("$HOME/.excentury/current") with open(fpath, 'w') as _fp: _fp.write(path)
5,336,928
def lambda_handler(event, context): """ 店舗一覧情報を返す Parameters ---------- event : dict フロントより渡されたパラメータ context : dict コンテキスト内容。 Returns ------- shop_list : dict 店舗一覧情報 """ # パラメータログ logger.info(event) try: shop_list = get_shop_list() except Exception as e: logger.exception('Occur Exception: %s', e) return utils.create_error_response('Error') body = json.dumps( shop_list, default=utils.decimal_to_int, ensure_ascii=False) return utils.create_success_response(body)
5,336,929
def mask_land_ocean(data, land_mask, ocean=False): """Mask land or ocean values using a land binary mask. Parameters ---------- data: xarray.DataArray This input array can only have one of 2, 3 or 4 dimensions. All spatial dimensions should coincide with those of the land binary mask. land_mask: xarray.DataArray This array must have the same spatial extent as the input data. Though it can have different times or levels. It can be binary or not, because internally it will make sure of it. Sometimes these masks actually contain a range of values from 0 to 1. ocean: bool, optional Whether the user wants to mask land or ocean values. Default is to mask ocean values (False). Returns ------- xarray.Datarray same as input data but with masked values in either land or ocean. """ # noqa # remove numpy warning regarding nan_policy msg = 'Mean of empty slice' warnings.filterwarnings('ignore', message=msg) # get number of dimensions of both data arrays ndim_ds = len(data.dims) ndim_lm = len(land_mask.dims) # get dimensions of dataset if ndim_ds == 2: ntim = None nlat, mlon = data.shape elif ndim_ds == 3: ntim, nlat, mlon = data.shape elif ndim_ds == 4: ntim, nlev, nlat, mlon = data.shape else: msg = 'only 2, 3 or 4 dimensions allowed for data set' raise TypeError(msg) # get dimensions of land mask if ndim_lm == 2: lntim = None lnlat, lmlon = land_mask.shape elif ndim_lm == 3: lntim, lnlat, lmlon = land_mask.shape else: msg = 'only 2 or 3 dimensions allowed for land mask' raise TypeError(msg) # make sure dims agree if nlat != lnlat or mlon != lmlon: msg = 'spatial coordinates do not agree' raise ValueError(msg) # get a single land mask if many if lntim is not None or lntim == 1: land_mask = land_mask[0] # convert mask to binary if not already land_mask = binary_mask(land_mask) # create mask 1 (land) = True, 0 (ocean) = False mask = land_mask.values == 1 # tile mask to number of times if ndim_ds == 2: tmask = mask elif ndim_ds == 3: tmask = np.tile(mask, (ntim, 1, 1)) else: tmask = np.tile(mask, (ntim, 1, 1, 1)) # create masked array values = np.array(data.values) if ocean is True: maskval = np.ma.masked_array(values, tmask) else: maskval = np.ma.masked_array(values, tmask == False) # noqa E712 # replace values newdata = data.copy() newdata.values = maskval return newdata
5,336,930
def create_project_type(project_type_params): """ :param project_type_params: The parameters for creating an ProjectType instance -- the dict should include the 'type' key, which specifies the ProjectType subclass name, and key/value pairs matching constructor arguments for that ProjectType subclass. :type project_type_params: dict :return: The project_type instance :rtype: project_type.project_type.ProjectType """ project_type_params = project_type_params.copy() project_type_name = project_type_params.pop('type') project_type_class = get_project_type_subclass(project_type_name) if project_type_class: return project_type_class(**project_type_params) # create object using project_type_params as constructor args # Not yet implemented other project types return None
5,336,931
def env_str(env_name: str, default: str) -> str: """ Get the environment variable's value convert into string """ return getenv(env_name, default)
5,336,932
def test_list_g_year_month_min_length_1_nistxml_sv_iv_list_g_year_month_min_length_2_1(mode, save_output, output_format): """ Type list/gYearMonth is restricted by facet minLength with value 6. """ assert_bindings( schema="nistData/list/gYearMonth/Schema+Instance/NISTSchema-SV-IV-list-gYearMonth-minLength-2.xsd", instance="nistData/list/gYearMonth/Schema+Instance/NISTXML-SV-IV-list-gYearMonth-minLength-2-1.xml", class_name="NistschemaSvIvListGYearMonthMinLength2", version="1.1", mode=mode, save_output=save_output, output_format=output_format, structure_style="filenames", )
5,336,933
def server_bam_statistic(ip, snmp_config_data): """ :param ip: :param snmp_config_data: :return: """ try: var_binds = get_snmp_multiple_oid(oids=oids_bam, ip=ip, snmp_config_data=snmp_config_data) server_memory_usage = get_memory_usage(var_binds) server_cpu_usage = get_cpu_process(ip=ip, snmp_config_data=snmp_config_data) except Exception as ex: raise ex logger.debug("Server_bam_statistic: mem_usage: {} - cpu_usage: {}".format(server_memory_usage, server_cpu_usage)) return server_memory_usage, server_cpu_usage
5,336,934
def extract_sigma_var_names(filename_nam): """ Parses a 'sigma.nam' file containing the variable names, and outputs a list of these names. Some vector components contain a semicolon in their name; if so, break the name at the semicolon and keep just the 1st part. """ var_names = [] with open(filename_nam, 'r') as file: for line in file: var_name = line.strip() # check for semicolon if ';' in var_name: var_name = var_name.split(';')[0] var_names.append(var_name) return var_names
5,336,935
def vectors_to_arrays(vectors): """ Convert 1d vectors (lists, arrays or pandas.Series) to C contiguous 1d arrays. Arrays must be in C contiguous order for us to pass their memory pointers to GMT. If any are not, convert them to C order (which requires copying the memory). This usually happens when vectors are columns of a 2d array or have been sliced. If a vector is a list or pandas.Series, get the underlying numpy array. Parameters ---------- vectors : list of lists, 1d arrays or pandas.Series The vectors that must be converted. Returns ------- arrays : list of 1d arrays The converted numpy arrays Examples -------- >>> import numpy as np >>> import pandas as pd >>> data = np.array([[1, 2], [3, 4], [5, 6]]) >>> vectors = [data[:, 0], data[:, 1], pd.Series(data=[-1, -2, -3])] >>> all(i.flags.c_contiguous for i in vectors) False >>> all(isinstance(i, np.ndarray) for i in vectors) False >>> arrays = vectors_to_arrays(vectors) >>> all(i.flags.c_contiguous for i in arrays) True >>> all(isinstance(i, np.ndarray) for i in arrays) True >>> data = [[1, 2], (3, 4), range(5, 7)] >>> all(isinstance(i, np.ndarray) for i in vectors_to_arrays(data)) True """ arrays = [as_c_contiguous(np.asarray(i)) for i in vectors] return arrays
5,336,936
def random_name_gen(size=6): """Generate a random python attribute name.""" return ''.join( [random.choice(string.ascii_uppercase)] + [random.choice(string.ascii_uppercase + string.digits) for i in range(size - 1)] ) if size > 0 else ''
5,336,937
def load_player_history_table(div_soup): """Parse the HTML/Soup table for the numberfire predictions. Returns a pandas DataFrame """ if not div_soup: return None rows = div_soup.findAll('tr') table_header = [x.getText() for x in rows[0].findAll('th')] table_data = [[x.getText() for x in row.findAll('td')] for row in rows[1:]] if not table_data: logging.debug("No predictions found!") return None table = pandas.io.parsers.TextParser(table_data, names=table_header, index_col=table_header.index('Date'), parse_dates=True).read() # Next we want to separate combined projection stats like FGM-A into separate columns for FGM and FGA dash_cols = [col for col in table.columns if '-' in col] for col in dash_cols: name_parts = col.split('-') series1name = name_parts[0] series2name = name_parts[0][:-1] + name_parts[1] series1data = table[col].apply(lambda data: float(data.split('-')[0])) series2data = table[col].apply(lambda data: float(data.split('-')[1])) table[series1name] = pandas.Series(data=series1data, name=series1name, index=table.index, dtype=numpy.dtype('float')) table[series2name] = pandas.Series(data=series2data, name=series2name, index=table.index, dtype=numpy.dtype('float')) table.drop(dash_cols, axis=1, inplace=True) return table
5,336,938
def is_identity(u, tol=1e-15): """Test if a matrix is identity. Args: u: np.ndarray Matrix to be checked. tol: float Threshold below which two matrix elements are considered equal. """ dims = np.array(u).shape if dims[0] != dims[1]: raise Exception("Input matrix is not square.") return np.allclose(u, np.eye(u.shape[0]), atol=tol)
5,336,939
def main(): """main entry point""" _me = os.path.splitext(os.path.basename(__file__))[0] _output = _me + ".lld" _parser = ArgumentParser() _parser.add_argument("-o", "--oratab", action="store", default="/etc/oratab", help="oratab file to use on *nix") _parser.add_argument("-s", "--servername", dest="servername", default="localhost", help="zabbix server or proxy name") _parser.add_argument("-p", "--port", dest="port", default=10051, required=False, help="zabbix server or proxy name") _parser.add_argument("-H", "--hostname", dest="hostname", required=True, help="hostname to receive the discovery array") _parser.add_argument("-k", "--key", dest="key", required=True, help="key for the discovery array") _args = _parser.parse_args() print(_args) print(platform.system()) if platform.system() == "Windows": _sids = get_sids_from_windows() else: _sids = get_sids_from_linux(_args.oratab) # for _p in _sids: # print("sid {0} oh {1}\n".format(_p[0], _p[1])) _a_log_files = get_diag_info(_sids) # for sid, a_dir in _a_log_files: # print("{0:8s} {1}".format(sid, a_dir)) check_log_files(_a_log_files) dump = to_json(_a_log_files) print(dump) _f = open(_output, "w") _f.write(_args.hostname + " " + _args.key + " " + dump) _f.close() _cmd = "zabbix_sender -z {} -p {} -i {} -r -vv".format( _args.servername, _args.port, _output) os.system(_cmd)
5,336,940
def get_metrics(): """ Collects various system metrics and returns a list of objects. """ metrics = {} metrics.update(get_memory_metrics()) metrics.update(get_cpu_metrics()) metrics.update(get_disk_metrics()) return metrics
5,336,941
def getPost(blog_id, username, password, post_id, fields=[]): """ Parameters int blog_id string username string password int post_id array fields: Optional. List of field or meta-field names to include in response. """ logger.debug("%s.getPost entered" % __name__) user = get_user(username, password) post = Post.objects.get(id=post_id) check_perms(user, post) return _post_struct(post)
5,336,942
def create_fun(name: str, obj, options: dict): """ Generate a dictionnary that contains the information about a function **Parameters** > **name:** `str` -- name of the function as returned by `inspect.getmembers` > **obj:** `object` -- object of the function as returned by `inspect.getmembers` > **options:** `dict` -- extended options **Returns** > `dict` -- with keys: > - *name*, *obj* -- the function name and object as returned by `inspect.getmembers` > - *module* -- name of the module > - *path* -- path of the module file > - *doc* -- docstring of the function > - *source* -- source code of the function > - *args* -- arguments of the function as a `inspect.signature` object """ ignore_prefix = options.get("ignore_prefix") if ignore_prefix is not None and name[:len(ignore_prefix)]==ignore_prefix: return None fun = {} fun["name"] = name if name else 'undefined' fun["obj"] = obj fun["module"] = inspect.getmodule(obj).__name__ fun["path"] = inspect.getmodule(obj).__file__ fun["doc"] = inspect.getdoc(obj) or "" fun["source"] = rm_docstring_from_source(inspect.getsource(obj)) fun["args"] = inspect.signature(obj) return fun
5,336,943
def serial_ss(file_read, forward_rate, file_rateconstant, file_energy, matrix, species_list, factor, initial_y, t_final, third_body=None, chemkin_data=None, smiles=None, chemkin=True): """ Iteratively solves the system of ODEs for different rate constants generated from the data file in serial Parameters ---------- file_read : str path of the 'param_set' file where all the parameter combinations are listed forward_rate : list A list of forward reaction rates for all the reactions in the mechanism file_rateconstant : str path to the file `complete_rateconstantlist.dat` file_energy : str path to the file 'free_energy_library.dat' matrix : ndarray stoichiometric matrix species_list : list A list of unique species in the mechanism initial_y : list A list of initial concentrations t_final : float final time in seconds third_body : ndarray matrix with third body efficiencies chemkin_data :ndarray the data from parsed chemkin reaction file smiles : dict the smiles dictionary generated from species_smiles.dat file factor : float conversion factor from given unit of energy to kJ chemkin : bool indicates if chemkin files are read as input files default = True Returns ---------- : list A list of final concentrations of all the species at t_final for all the given combinations of parameters listed in 'param_set.txt' file """ read_file = open(file_read, "r") results = [] for pos, data in enumerate(read_file): result = func_solv(data, forward_rate, file_rateconstant, file_energy, matrix, species_list, initial_y, t_final, factor, third_body, pos, chemkin_data, smiles) results.append(result) return results
5,336,944
def contains_left_button(buttons) -> bool: """ Test if the buttons contains the left mouse button. The "buttons" should be values returned by get_click() or get_mouse() :param buttons: the buttons to be tested :return: if the buttons contains the left mouse button """ return (buttons & QtCore.Qt.LeftButton) > 0
5,336,945
def init(): """Initialize default fixtures.""" load_fixtures()
5,336,946
def extract_interest_from_import_batch( import_batch: ImportBatch, interest_rt: ReportType) -> List[Dict]: """ The return list contains dictionaries that contain data for accesslog creation, but without the report_type and import_batch fields """ # now we compute the interest data from it # go through the interest metrics and extract info about how to remap the values interest_metrics = [] metric_remap = {} metric_to_ig = {} # TODO: if we preselected the import_batches before submitting them here # we could remove the whole test here, which create a query for each import batch if import_batch.report_type not in import_batch.platform.interest_reports.all(): # the report_type does not represent interest for this platform, we can skip it logger.debug('Import batch report type not in platform interest: %s - %s', import_batch.report_type.short_name, import_batch.platform) return [] for rim in import_batch.report_type.reportinterestmetric_set.all().\ select_related('interest_group'): if rim.target_metric_id: metric_remap[rim.metric_id] = rim.target_metric_id interest_metrics.append(rim.metric_id) metric_to_ig[rim.metric_id] = rim.interest_group # remap interest groups into DimensionText metric_to_dim1 = {} dim1 = interest_rt.dimensions_sorted[0] for metric_id, ig in metric_to_ig.items(): # we do not use update_or_create here, because it creates one select and one update # even if nothing has changed dim_text, _created = DimensionText.objects.get_or_create( dimension=dim1, text=ig.short_name, defaults={'text_local_en': ig.name_en, 'text_local_cs': ig.name_cs}) if dim_text.text_local_en != ig.name_en or dim_text.text_local_cs != ig.name_cs: dim_text.text_local_en = ig.name_en dim_text.text_local_cs = ig.name_cs dim_text.save() metric_to_dim1[metric_id] = dim_text.pk # get source data for the new logs new_logs = [] # for the following dates, there are data for a superseeding report type, so we do not # want to created interest records for them clashing_dates = {} if import_batch.report_type.superseeded_by: if hasattr(import_batch, 'min_date') and hasattr(import_batch, 'max_date'): # check if we have an annotated queryset and do not need to compute the min-max dates min_date = import_batch.min_date max_date = import_batch.max_date else: date_range = import_batch.accesslog_set.aggregate(min_date=Min('date'), max_date=Max('date')) min_date = date_range['min_date'] max_date = date_range['max_date'] if min_date and max_date: # the accesslog_set might be empty and then there is nothing that could be clashing clashing_dates = { x['date'] for x in import_batch.report_type.superseeded_by.accesslog_set. filter(platform_id=import_batch.platform_id, organization_id=import_batch.organization_id, date__lte=max_date, date__gte=min_date). values('date') } for new_log_dict in import_batch.accesslog_set.filter(metric_id__in=interest_metrics).\ exclude(date__in=clashing_dates).\ values('organization_id', 'metric_id', 'platform_id', 'target_id', 'date').\ annotate(value=Sum('value')).iterator(): # deal with stuff related to the metric metric_id = new_log_dict['metric_id'] # fill in dim1 based on the interest group of the metric new_log_dict['dim1'] = metric_to_dim1[metric_id] # remap metric to target metric if desired new_log_dict['metric_id'] = metric_remap.get(metric_id, metric_id) new_logs.append(new_log_dict) return new_logs
5,336,947
def run(input_file, log_file, mode): """Entry point for the INOIS application.""" log_level = logging.DEBUG if log_file else logging.CRITICAL log_location = log_file if log_file else "inois.log" logging.basicConfig(format='%(asctime)s (%(levelname)s): %(message)s', filename=log_location, level=log_level) application_mode = mode if mode == ApplicationModeKeys.SEARCH else ApplicationModeKeys.UPLOAD logging.info(Notifications.APPLICATION_STARTED.format(application_mode, datetime.now())) print("\n" + Notifications.APPLICATION_STARTED.format(application_mode, datetime.now())) print(Banner.TEXT) config = ConfigService.initialize_config(input_file=input_file) session = AuthenticationService(config).get_authorization() FileService.validate_files(config) keys = KeyService.get_keys(config, session) if application_mode == ApplicationModeKeys.UPLOAD: HashService.hash_files(config, keys) FileService.delete_chunked_files(config) EncryptionService.encrypt_files(config, keys) FileService.delete_hashed_files(config) UploadService.upload_files(config, session) FileService.delete_encrypted_files(config) elif application_mode == ApplicationModeKeys.SEARCH: search_queries = HashService.hash_records_for_search(config, keys) SearchService.search_on_all_queries(search_queries, session) os.chdir(config.LAUNCH_DIRECTORY) logging.info(Notifications.APPLICATION_TERMINATED.format(datetime.now())) print("\n" + Notifications.APPLICATION_TERMINATED.format(datetime.now()))
5,336,948
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool: """Unload a config entry.""" _LOGGER.debug("Disconnecting from spa") spa: BalboaSpaWifi = hass.data[DOMAIN][entry.entry_id] if unload_ok := await hass.config_entries.async_unload_platforms(entry, PLATFORMS): hass.data[DOMAIN].pop(entry.entry_id) await spa.disconnect() return unload_ok
5,336,949
def is_bst(t: BST) -> bool: """Returns true if t is a valid BST object, false otherwise. Invariant: for each node n in t, if n.left exists, then n.left <= n, and if n.right exists, then n.right >= n.""" if not isinstance(t, BST): return False if t._root and t._root.parent is not None: return False return all_bst_nodes(t._root) and has_bst_property(t._root)
5,336,950
def koch(t, n): """Draws a koch curve with length n.""" if n<3: fd(t, n) return m = n/3.0 koch(t, m) lt(t, 60) koch(t, m) rt(t, 120) koch(t, m) lt(t, 60) koch(t, m)
5,336,951
def are_embedding_layer_positions_ok_for_testing(model): """ Test data can only be generated if all embeddings layers are positioned directly behind the input nodes """ def count_embedding_layers(model): layers = model.layers result = 0 for layer in layers: if isinstance(layer, keras.layers.Embedding): result += 1 layer_type = type(layer).__name__ if layer_type in ['Model', 'Sequential']: result += count_embedding_layers(layer) return result def count_embedding_layers_at_input_nodes(model): result = 0 for input_layer in get_model_input_layers(model): if input_layer._outbound_nodes and isinstance( input_layer._outbound_nodes[0].outbound_layer, keras.layers.Embedding): result += 1 return result return count_embedding_layers(model) == count_embedding_layers_at_input_nodes(model)
5,336,952
def all_tasks_stopped(tasks_state: Any) -> bool: """ Checks if all tasks are stopped or if any are still running. Parameters --------- tasks_state: Any Task state dictionary object Returns -------- response: bool True if all tasks are stopped. """ for t in tasks_state["tasks"]: if t["lastStatus"] in ("PENDING", "RUNNING"): return False return True
5,336,953
def enhance_color(image, factor): """Change the strength of colors in an image. This function has identical outputs to ``PIL.ImageEnhance.Color``. Added in 0.4.0. **Supported dtypes**: * ``uint8``: yes; fully tested * ``uint16``: no * ``uint32``: no * ``uint64``: no * ``int8``: no * ``int16``: no * ``int32``: no * ``int64``: no * ``float16``: no * ``float32``: no * ``float64``: no * ``float128``: no * ``bool``: no Parameters ---------- image : ndarray The image to modify. factor : number Colorfulness of the output image. Values close to ``0.0`` lead to grayscale images, values above ``1.0`` increase the strength of colors. Sane values are roughly in ``[0.0, 3.0]``. Returns ------- ndarray Color-modified image. """ return _apply_enhance_func(image, PIL.ImageEnhance.Color, factor)
5,336,954
def set_amc_animation(amc_file_path: str, frame_distance=1): """set animation with data of amc form :param data: file path to amc data :param frame_distance: set keyframe across every frame_distance, default is 1 """ with open(amc_file_path, "rb") as f: cur_frame = 0 character = bpy.data.objects["Armature"] character.select_set(True) bpy.ops.object.mode_set(mode="POSE") for line in tqdm(f.readlines()): if line.strip().isdigit(): cur_frame = int(line) elif cur_frame > 0 and cur_frame % frame_distance == 0: data = line.decode("utf-8").strip().split() joint_name = data[0] joint = character.pose.bones[joint_name] character.pose.bones[joint_name].rotation_mode = "XYZ" if joint_name == "root": set_joint_location_keyframe(joint, np.float_(data[1:4]), cur_frame) set_joint_rotation_keyframe(joint, np.float_(data[4:]) * np.pi / 180, cur_frame) else: set_joint_rotation_keyframe(joint, np.float_(data[1:]) * np.pi / 180, cur_frame, axis=ASF_JOINT2DOF[joint_name])
5,336,955
def locate_blocks(codestr): """ For processing CSS like strings. Either returns all selectors (that can be "smart" multi-lined, as long as it's joined by `,`, or enclosed in `(` and `)`) with its code block (the one between `{` and `}`, which can be nested), or the "lose" code (properties) that doesn't have any blocks. """ lineno = 1 par = 0 instr = None depth = 0 skip = False i = init = lose = 0 start = end = None lineno_stack = deque() for m in _blocks_re.finditer(codestr): i = m.start(0) c = codestr[i] if c == '\n': lineno += 1 if instr is not None: if c == instr: instr = None # A string ends (FIXME: needs to accept escaped characters) elif c in ('"', "'"): instr = c # A string starts elif c == '(': # parenthesis begins: par += 1 elif c == ')': # parenthesis ends: par -= 1 elif not par and not instr: if c == '{': # block begins: if depth == 0: if i > 0 and codestr[i - 1] == '#': # Do not process #{...} as blocks! skip = True else: lineno_stack.append(lineno) start = i if lose < init: _property = codestr[lose:init].strip() if _property: yield lineno, _property, None lose = init depth += 1 elif c == '}': # block ends: if depth <= 0: raise SyntaxError("Unexpected closing brace on line {0}".format(lineno)) else: depth -= 1 if depth == 0: if not skip: end = i _selectors = codestr[init:start].strip() _codestr = codestr[start + 1:end].strip() if _selectors: yield lineno_stack.pop(), _selectors, _codestr init = lose = end + 1 skip = False elif depth == 0: if c == ';': # End of property (or block): init = i if lose < init: _property = codestr[lose:init].strip() if _property: yield lineno, _property, None init = lose = i + 1 if depth > 0: if not skip: _selectors = codestr[init:start].strip() _codestr = codestr[start + 1:].strip() if _selectors: yield lineno, _selectors, _codestr if par: raise Exception("Missing closing parenthesis somewhere in block: '%s'" % _selectors) elif instr: raise Exception("Missing closing string somewhere in block: '%s'" % _selectors) else: raise Exception("Block never closed: '%s'" % _selectors) losestr = codestr[lose:] for _property in losestr.split(';'): _property = _property.strip() lineno += _property.count('\n') if _property: yield lineno, _property, None
5,336,956
def random_deceleration(most_comfortable_deceleration, lane_pos): """ Return a deceleration based on given attribute of the vehicle :param most_comfortable_deceleration: the given attribute of the vehicle :param lane_pos: y :return: the deceleration adopted by human driver """ if lane_pos: sigma = 0.3 else: sigma = 0.5 return np.random.normal(most_comfortable_deceleration, sigma)
5,336,957
def dataclass_fields(dc): """Returns a dataclass's fields dictionary.""" return {name: getattr(dc, name) for name in dc.__dataclass_fields__}
5,336,958
def gets_ontology_statistics(file_location: str, owltools_location: str = './pkt_kg/libs/owltools') -> str: """Uses the OWL Tools API to generate summary statistics (i.e. counts of axioms, classes, object properties, and individuals). Args: file_location: A string that contains the file path and name of an ontology. owltools_location: A string pointing to the location of the owl tools library. Returns: stats: A formatted string containing descriptive statistics. Raises: TypeError: If the file_location is not type str. OSError: If file_location points to a non-existent file. ValueError: If file_location points to an empty file. """ if not isinstance(file_location, str): raise TypeError('file_location must be a string') elif not os.path.exists(file_location): raise OSError('{} does not exist!'.format(file_location)) elif os.stat(file_location).st_size == 0: raise ValueError('{} is empty'.format(file_location)) else: output = subprocess.check_output([os.path.abspath(owltools_location), file_location, '--info']) res = output.decode('utf-8').split('\n')[-5:] cls, axs, op, ind = res[0].split(':')[-1], res[3].split(':')[-1], res[2].split(':')[-1], res[1].split(':')[-1] sent = 'The knowledge graph contains {0} classes, {1} axioms, {2} object properties, and {3} individuals' stats = sent.format(cls, axs, op, ind) return stats
5,336,959
def listener(): """ Initialize the ROS node and the topic to which it subscribes. """ rospy.init_node( 'subscriber_example', anonymous=True) # Subscribes to topic 'joint_states' rospy.Subscriber("joint_states", JointState, callback) rospy.spin()
5,336,960
def get_directives(app: Sphinx): """Return all directives available within the current application.""" from docutils.parsers.rst.directives import _directives, _directive_registry all_directives = {} all_directives.update(_directive_registry) all_directives.update(_directives) for key, (modulename, classname) in _directive_registry.items(): if key not in all_directives: try: module = import_module(f"docutils.parsers.rst.directives.{modulename}") all_directives[key] = getattr(module, classname) except (AttributeError, ModuleNotFoundError): pass for domain_name in app.env.domains: domain = app.env.get_domain(domain_name) prefix = "" if domain.name == "std" else f"{domain.name}:" # TODO 'default_domain' is also looked up by # sphinx.util.docutils.sphinx_domains.lookup_domain_element for direct_name, direct in domain.directives.items(): all_directives[f"{prefix}{direct_name}"] = direct return all_directives
5,336,961
def _get_wavs_from_dir(dir): """Return a sorted list of wave files from a directory.""" return [os.path.join(dir, f) for f in sorted(os.listdir(dir)) if \ _is_wav_file(f)]
5,336,962
def SqlReader(sql_statement: str, **kwargs): """ Use basic SQL queries to filter Reader. Parameters: sql_statement: string kwargs: parameters to pass to the Reader Note: `select` is taken from SQL SELECT `dataset` is taken from SQL FROM `filters` is taken from SQL WHERE """ # some imports here to remove cyclic imports from mabel import DictSet, Reader sql = SqlParser(sql_statement) get_logger().info(repr(sql)) actual_select = sql.select_expression if sql.select_expression is None: actual_select = "*" elif sql.select_expression != "*": actual_select = sql.select_expression + ", *" reducer = None if sql.select_expression == "COUNT(*)": reducer = lambda x: {"*": "*"} # FROM clause # WHERE clause if isinstance(sql.dataset, list): # it's a list if it's been parsed into a SQL statement, # this is how subqueries are interpretted - the parser # doesn't extract a dataset name - it collects parts of # a SQL statement which it can then pass to a SqlReader # to get back a dataset - which we then use as the # dataset for the outer query. reader = SqlReader("".join(sql.dataset), **kwargs) else: reader = Reader( select=actual_select, dataset=sql.dataset, filters=sql.where_expression, **kwargs, ) # GROUP BY clause if sql.group_by or any( [ t["type"] == TOKENS.AGGREGATOR for t in sql.select_evaluator.tokens ] # type:ignore ): from ...internals.group_by import GroupBy # convert the clause into something we can pass to GroupBy if sql.group_by: groups = [ group.strip() for group in sql.group_by.split(",") if group.strip() != "" ] else: groups = ["*"] # we're not really grouping aggregations = [] renames = [] for t in sql.select_evaluator.tokens: # type:ignore if t["type"] == TOKENS.AGGREGATOR: aggregations.append((t["value"], t["parameters"][0]["value"])) if t["as"]: t["raw"] = get_function_name(t) renames.append(t) elif t["type"] == TOKENS.VARIABLE and t["value"] not in groups: raise InvalidSqlError( "Invalid SQL - SELECT clause in a statement with a GROUP BY clause must be made of aggregations or items from the GROUP BY clause." ) if aggregations: grouped = GroupBy(reader, groups).aggregate(aggregations) else: grouped = GroupBy(reader, groups).groups() # there could be 250000 groups, so we're not going to load them into memory reader = DictSet(grouped) # HAVING clause # if we have a HAVING clause, filter the grouped data by it if sql.having: reader = reader.filter(sql.having) # SELECT clause renames = {} # type:ignore for t in sql.select_evaluator.tokens: # type:ignore if t["as"]: renames[get_function_name(t)] = t["as"] def _perform_renames(row): for k, v in [(k, v) for k, v in row.items()]: if k in renames: row[renames[k]] = row.pop(k, row.get(renames[k])) return row if renames: reader = DictSet(map(_perform_renames, reader)) reader = reader.select(sql.select_evaluator.fields()) # type:ignore # disctinct now we have only the columns we're interested in if sql.distinct: reader = reader.distinct() # ORDER BY clause if sql.order_by: take = 10000 # the Query UI is currently set to 2000 if sql.limit: take = int(sql.limit) reader = DictSet( reader.sort_and_take( column=sql.order_by, take=take, descending=sql.order_descending ) ) # LIMIT clause if sql.limit: reader = reader.take(sql.limit) return reader
5,336,963
def _handle_consent_confirmation(user, is_confirmed): """ Return server response given user consent. Args: user (fence.models.User): authN'd user is_confirmed (str): confirmation param """ if is_confirmed == "yes": # user has already given consent, continue flow response = server.create_authorization_response(grant_user=user) else: # user did not give consent response = server.create_authorization_response(grant_user=None) return response
5,336,964
def local_coherence(img, window_s=WSIZ): """ Calculate the coherence according to methdology described in: Bazen, Asker M., and Sabih H. Gerez. "Segmentation of fingerprint images." ProRISC 2001 Workshop on Circuits, Systems and Signal Processing. Veldhoven, The Netherlands, 2001. """ coherence = [] rs = window_s cs = window_s for r in range(4, img.shape[0] - rs, rs): for c in range(4, img.shape[1] - cs, cs): window = img[r:r + rs, c:c + cs] if window.var() != 0: # Need variance because of the constraint (gxx + gyy) < 0 gx = np.uint8(np.absolute(cv2.Sobel(window, cv2.CV_64F, 1, 0, ksize=5))).flatten() gy = np.uint8(np.absolute(cv2.Sobel(window, cv2.CV_64F, 0, 1, ksize=5))).flatten() gxx = sum([int(x) ** 2 for x in gx]) gyy = sum([int(y) ** 2 for y in gy]) gxy = sum([int(x) * int(y) for x, y in zip(gx, gy)]) assert gxx + gyy != 0 coherence.append(math.sqrt((math.pow((gxx - gyy), 2) + 4 * math.pow(gxy, 2))) / (gxx + gyy)) return coherence
5,336,965
def storeIDToWebID(key, storeid): """ Takes a key (int) and storeid (int) and produces a webid (a 16-character str suitable for including in URLs) """ i = key ^ storeid l = list('%0.16x' % (i,)) for nybbleid in range(0, 8): a, b = _swapat(key, nybbleid) _swap(l, a, b) return ''.join(l)
5,336,966
def _jupyter_server_extension_paths(): """ Set up the server extension for collecting metrics """ return [{"module": "jupyter_resource_usage"}]
5,336,967
def plot_alpha(ax=None): """Plot angle of attack versus vertical coordinate.""" df = pr.load_sampled_velocity(name="inflow") pitch = pr.read_alpha_deg() df["alpha_deg"] = pitch - np.rad2deg(np.tan(df.U_1/df.U_0)) if ax is None: fig, ax = plt.subplots() ax.plot(df.z, -df.alpha_deg) ax.set_xlabel("$z/H$") ax.set_ylabel(r"$\alpha$ (degrees)")
5,336,968
def _make_ordered_node_map( pipeline: p_pb2.Pipeline ) -> 'collections.OrderedDict[str, p_pb2.PipelineNode]': """Helper function to prepare the Pipeline proto for DAG traversal. Args: pipeline: The input Pipeline proto. Since we expect this to come from the compiler, we assume that it is already topologically sorted. Returns: An OrderedDict that map node_ids to PipelineNodes. """ node_map = collections.OrderedDict() for pipeline_or_node in pipeline.nodes: node_id = pipeline_or_node.pipeline_node.node_info.id node_map[node_id] = pipeline_or_node.pipeline_node return node_map
5,336,969
def read_omex_meta_files_for_archive(archive, archive_dirname, config=None): """ Read all of the OMEX Metadata files in an archive Args: archive (:obj:`CombineArchive`): COMBINE/OMEX archive archive_dirname (:obj:`str`): directory with the content of the archive config (:obj:`Config`, optional): configuration Returns: :obj:`tuple`: * :obj:`object`: representation of the OMEX Metadata file in :obj:`schema` * nested :obj:`list` of :obj:`str`: nested list of errors with the OMEX Metadata file * nested :obj:`list` of :obj:`str`: nested list of warnings with the OMEX Metadata file """ content = [] errors = [] warnings = [] if config is None: config = get_config() filenames = [] for item in archive.contents: if item.format and re.match(CombineArchiveContentFormatPattern.OMEX_METADATA.value, item.format): filenames.append(os.path.join(archive_dirname, item.location)) if filenames: return read_omex_meta_file(filenames, archive=archive, working_dir=archive_dirname, config=config) else: content = [] errors = [[( 'The COMBINE/OMEX does not contain an OMEX Metadata file. ' 'Archives must contain metadata for publication to BioSimulations.' )]] warnings = [] return (content, errors, warnings)
5,336,970
def fmin_b_bfgs(func, x0, args=(), options=None): """ The BFGS algorithm from Algorithm 6.1 from Wright and Nocedal, 'Numerical Optimization', 1999, pg. 136-143 with bounded parameters, using the active set approach from, Byrd, R. H., Lu, P., Nocedal, J., & Zhu, C. (1995). 'A Limited Memory Algorithm for Bound Constrained Optimization.' SIAM Journal on Scientific Computing, 16(5), 1190–1208. doi:10.1137/0916069 Notes: We utilise boolean arithmetic to avoid jax.cond calls which don't work on accelerators. A side effect is that we perform more gradient evaluations than scipy's BFGS func: callable Function of the form f(x) where x is a flat ndarray and returns a real scalar. The function should be composed of operations with vjp defined. If func is jittable then fmin_bfgs is jittable. If func is not jittable, then _nojit should be set to True. x0: ndarray initial variable args: tuple, optional Extra arguments to pass to func as func(x,*args) options: Optional dict of parameters maxiter: int Maximum number of evaluations norm: float Order of norm for convergence check. Default inf. gtol: flat Terminates minimization when |grad|_norm < g_tol ls_maxiter: int Maximum number of linesearch iterations bounds: 2-tuple of two vectors specifying the lower and upper bounds. e.g. (l, u) where l and u have the same size as x0. For parameters x_i without constraints the corresponding l_i=-jnp.inf and u_i=jnp.inf. Specifying l=None or u=None means no constraints on that side. Returns: BFGSResults """ if options is None: options = dict() maxiter: Optional[int] = options.get('maxiter', None) norm: float = options.get('norm', jnp.inf) gtol: float = options.get('gtol', 1e-5) ls_maxiter: int = options.get('ls_maxiter', 10) bounds: Tuple[jnp.ndarray, jnp.ndarray] = tuple(options.get('bounds', (None, None))) state = BFGSResults(converged=False, failed=False, k=0, nfev=0, ngev=0, nhev=0, x_k=x0, f_k=None, g_k=None, H_k=None, status=None, ls_status=jnp.array(0)) if maxiter is None: maxiter = jnp.size(x0) * 200 d = x0.shape[0] l = bounds[0] u = bounds[1] if l is None: l = -jnp.inf * jnp.ones_like(x0) if u is None: u = jnp.inf * jnp.ones_like(x0) l,u = jnp.where(l<u, l, u), jnp.where(l<u,u, l) def project(x,l,u): return jnp.clip(x,l, u) def get_active_set(x, l, u): return jnp.where((x==l) | (x==u)) def func_with_args(x): return func(x, *args) def get_generalised_Cauchy_point(xk, gk, l, u): def func(t): return func_with_args(project(xk - t* gk, l, u)) initial_H = jnp.eye(d) initial_H = options.get('hess_inv', initial_H) value_and_grad = jax.value_and_grad(func_with_args) f_0, g_0 = value_and_grad(x0) state = state._replace(f_k=f_0, g_k=g_0, H_k=initial_H, nfev=state.nfev + 1, ngev=state.ngev + 1, converged=jnp.linalg.norm(g_0, ord=norm) < gtol) def body(state): p_k = -(state.H_k @ state.g_k) line_search_results = line_search(value_and_grad, state.x_k, p_k, old_fval=state.f_k, gfk=state.g_k, maxiter=ls_maxiter) state = state._replace(nfev=state.nfev + line_search_results.nfev, ngev=state.ngev + line_search_results.ngev, failed=line_search_results.failed, ls_status=line_search_results.status) s_k = line_search_results.a_k * p_k x_kp1 = state.x_k + s_k f_kp1 = line_search_results.f_k g_kp1 = line_search_results.g_k # print(g_kp1) y_k = g_kp1 - state.g_k rho_k = jnp.reciprocal(y_k @ s_k) sy_k = s_k[:, None] * y_k[None, :] w = jnp.eye(d) - rho_k * sy_k H_kp1 = jnp.where(jnp.isfinite(rho_k), jnp.linalg.multi_dot([w, state.H_k, w.T]) + rho_k * s_k[:, None] * s_k[None, :], state.H_k) converged = jnp.linalg.norm(g_kp1, ord=norm) < gtol state = state._replace(converged=converged, k=state.k + 1, x_k=x_kp1, f_k=f_kp1, g_k=g_kp1, H_k=H_kp1 ) return state state = while_loop( lambda state: (~ state.converged) & (~state.failed) & (state.k < maxiter), body, state) state = state._replace(status=jnp.where(state.converged, jnp.array(0), # converged jnp.where(state.k == maxiter, jnp.array(1), # max iters reached jnp.where(state.failed, jnp.array(2) + state.ls_status, # ls failed (+ reason) jnp.array(-1))))) # undefined return state
5,336,971
def find_broken_in_text(text, ignore_substrings=None): """Find broken links """ links = _find(text, ignore_substrings=ignore_substrings) responses = [_check_if_broken(link) for link in links] return [res.url for res in responses if res.broken]
5,336,972
def revision_info(): """ Get the git hash and mtime of the repository, or the installed files. """ # TODO: test with "pip install -e ." for developer mode global _REVISION_INFO if _REVISION_INFO is None: _REVISION_INFO = git_rev(repo_path()) if _REVISION_INFO is None: try: from importlib import resources except ImportError: # CRUFT: pre-3.7 requires importlib_resources import importlib_resources as resources try: revdata = resources.read_text(PACKAGE_NAME, RESOURCE_NAME) commit = revdata.strip() _REVISION_INFO = commit except Exception: _REVISION_INFO = "unknown" return _REVISION_INFO
5,336,973
def home(): """ route for the index page""" return jsonify({"message" : "welcome to fast_Food_Fast online restaurant"})
5,336,974
def test_get_default_output_for_example(): """Tests for get_default_output_for_example().""" from reana.cli import get_default_output_for_example for (example, output) in ( ('', ('plot.png',)), ('reana-demo-helloworld', ('greetings.txt',)), ('reana-demo-root6-roofit', ('plot.png',)), ('reana-demo-alice-lego-train-test-run', ('plot.pdf', )), ): assert output == get_default_output_for_example(example)
5,336,975
def singlediode_voc(effective_irradiance, temp_cell, module_parameters): """ Calculate voc using the singlediode model. Parameters ---------- effective_irradiance temp_cell module_parameters Returns ------- """ photocurrent, saturation_current, resistance_series, resistance_shunt, nNsVth = \ calcparams_singlediode(effective_irradiance, temp_cell, module_parameters) # out = pvlib.pvsystem.singlediode(photocurrent, saturation_current, resistance_series, resistance_shunt, nNsVth, # method='newton') v_oc = pvlib.singlediode.bishop88_v_from_i(0, photocurrent, saturation_current, resistance_series, resistance_shunt, nNsVth, method='newton') return v_oc
5,336,976
async def test_generated_text_with_phrase() -> None: """Тестирует работу функции с непустой фразой""" phrase = 'Тестовая фраза' is_empty_phrase, generated_text = await get_generated_text(phrase) assert not is_empty_phrase assert generated_text
5,336,977
def YumUninstall(vm): """Stops collectd on 'vm'.""" _Uninstall(vm)
5,336,978
def test_cogeo_invalidresampling(runner): """Should exit with invalid resampling.""" with runner.isolated_filesystem(): result = runner.invoke( cogeo, ["create", raster_path_rgb, "output.tif", "-r", "gauss", "-w"] ) assert result.exception assert result.exit_code == 2 result = runner.invoke( cogeo, [ "create", raster_path_rgb, "output.tif", "--overview-resampling", "max", "-w", ], ) assert result.exception assert result.exit_code == 2
5,336,979
def _exec_document_lint_and_script( limit_count: Optional[int] = None) -> List[str]: """ Execute each runnable scripts in the documents and check with each lint. Parameters ---------- limit_count : int or None, optional Limitation of the script execution count. Returns ------- executed_scripts : list of str List of executed Python scripts. """ from apysc._file import file_util md_file_paths: List[str] = \ file_util.get_specified_ext_file_paths_recursively( extension='.md', dir_path='./docs_src/') hashed_vals: List[str] md_file_paths, hashed_vals = _slice_md_file_by_hashed_val( md_file_paths=md_file_paths) script_data_list: List[_ScriptData] = _make_script_data_list( md_file_paths=md_file_paths, hashed_vals=hashed_vals, limit_count=limit_count) workers: int = max(mp.cpu_count() // 2, 1) logger.info(msg="Document's code block flake8 checking started...") with mp.Pool(workers) as p: p.map(func=_check_code_block_with_flake8, iterable=script_data_list) logger.info(msg="Document's code block numdoclint checking started...") with mp.Pool(workers) as p: p.map( func=_check_code_block_with_numdoclint, iterable=script_data_list) logger.info(msg="Document's code block mypy checking started...") with mp.Pool(workers) as p: p.map(func=_check_code_block_with_mypy, iterable=script_data_list) logger.info(msg="Document's scripts execution started...") with mp.Pool(workers) as p: run_return_data_list: List[_RunReturnData] = p.map( func=_run_code_block_script, iterable=script_data_list) _move_code_block_outputs() _validate_script_return_data(return_data_list=run_return_data_list) _save_hashed_val(script_data_list=script_data_list) executed_scripts: List[str] = [ script_data['runnable_script'] for script_data in script_data_list] return executed_scripts
5,336,980
def balance_boxplot(balance_name, data, num_color='#FFFFFF', denom_color='#FFFFFF', xlabel="", ylabel="", linewidth=1, ax=None, **kwargs): """ Plots a boxplot for a given balance on a discrete metadata category. Parameters ---------- x, y, hue: str Variable names to be passed into the seaborn plots for plotting. balance_name : str Name of balance to plot. data : pd.DataFrame Merged dataframe of balances and metadata. num_color : str Hex for background colors of values above zero. denom_color : str Hex for background colors of values below zero. xlabel : str x-axis label. ylabel : str y-axis label. linewidth : str Width of the grid lines. ax : matplotlib axes object Axes object to render boxplots in. **kwargs : dict Values to pass in to customize seaborn boxplot. Returns ------- a : matplotlib axes object Matplotlit axes object with rendered boxplots. See Also -------- seaborn.boxplot """ warnings.warn("This visualization are deprecated.", DeprecationWarning) import seaborn as sns if ax is None: f, ax = plt.subplots() # the number 20 is pretty arbitrary - we are just # resizing to make sure that there is separation between the # edges of the plot, and the boxplot pad = (data[balance_name].max() - data[balance_name].min()) / 20 ax.axvspan(data[balance_name].min() - pad, 0, facecolor=num_color, zorder=0) ax.axvspan(0, data[balance_name].max() + pad, facecolor=denom_color, zorder=0) if 'hue' in kwargs.keys(): hue = kwargs['hue'] num_groups = len(data[hue].value_counts()) else: num_groups = 1 a = sns.boxplot(ax=ax, x=balance_name, data=data, **kwargs) a.minorticks_on() minorLocator = matplotlib.ticker.AutoMinorLocator(num_groups) a.get_yaxis().set_minor_locator(minorLocator) a.grid(axis='y', which='minor', color='k', linestyle=':', linewidth=1) a.set_xlim([data[balance_name].min() - pad, data[balance_name].max() + pad]) a.set_xlabel(xlabel) a.set_ylabel(ylabel) return a
5,336,981
def walk(dirname, file_list): """ This function is from a book called Think Python written by Allen B. Downey. It walks through a directory, gets names of all files and calls itself recursively on all the directories """ for name in os.listdir(dirname): path=os.path.join(dirname,name) if os.path.isfile(path): file_list.append(path) else: walk(path, file_list) return file_list
5,336,982
def new_auto_connection(dsn: dict, name: str = "DSN"): """ --------------------------------------------------------------------------- Saves a connection to automatically create database cursors, creating a used-as-needed file to automatically set up a connection. Useful for preventing redundant cursors. The function 'get_connection_file' returns the connection file path. Parameters ---------- dsn: dict Dictionnary containing the information to set up the connection. database : Database Name host : Server ID password : User Password port : Database Port (optional, default: 5433) user : User ID (optional, default: dbadmin) ... name: str, optional Name of the auto connection. See Also -------- change_auto_connection : Changes the current auto creation. read_auto_connect : Automatically creates a connection. vertica_conn : Creates a Vertica Database connection. """ check_types([("dsn", dsn, [dict],)]) path = get_connection_file() confparser = ConfigParser() confparser.optionxform = str try: confparser.read(path) except: pass if confparser.has_section(name): confparser.remove_section(name) confparser.add_section(name) for elem in dsn: confparser.set(name, elem, str(dsn[elem])) f = open(path, "w+") confparser.write(f) f.close() change_auto_connection(name)
5,336,983
def delete_nodes_not_in_list(uuids): """Delete nodes which don't exist in Ironic node UUIDs. :param uuids: Ironic node UUIDs """ inspector_uuids = _list_node_uuids() for uuid in inspector_uuids - uuids: LOG.warning( _LW('Node %s was deleted from Ironic, dropping from Ironic ' 'Inspector database'), uuid) with _get_lock_ctx(uuid): _delete_node(uuid)
5,336,984
def getCustomEvaluatorClusters(evaluator): """ Get the clusters for a given custom evaluator, if any. """ pass
5,336,985
def _apply_result_filters(key_gender_token_counters: Dict[Union[str, int], GenderTokenCounters], diff: bool, sort: bool, limit: int, remove_swords: bool) -> KeyGenderTokenResponse: """ A private helper function for applying optional keyword arguments to the output of GenderProximityAnalysis methods, allowing the user to sort, diff, limit, and remove stopwords from the output. These transformations do not mutate the input. :param key_gender_token_counters: a dictionary shaped Dict[Union[str, int], GenderTokenCounters] :param diff: return the difference in token occurrences across Genders. :param sort: return an array of the shape Sequence[Tuple[str, int]] :param limit: if sort==True, return only n=limit token occurrences. :param remove_swords: remove stop words from output. :return: a dictionary of the shape Dict[Union[str, int], GenderTokenResponse] >>> test_counter_1 = Counter({'foo': 1, 'bar': 2, 'own': 2}) >>> test_counter_2 = Counter({'foo': 5, 'baz': 2}) >>> test = {'doc': {'Male': test_counter_1, 'Female': test_counter_2}} >>> _apply_result_filters(test, diff=True, sort=False, limit=10, remove_swords=False).get('doc') {'Male': Counter({'bar': 2, 'own': 2, 'foo': -4}), 'Female': Counter({'foo': 4, 'baz': 2})} >>> _apply_result_filters(test, diff=False, sort=True, limit=10, remove_swords=False).get('doc') {'Male': [('bar', 2), ('own', 2), ('foo', 1)], 'Female': [('foo', 5), ('baz', 2)]} >>> _apply_result_filters(test, diff=False, sort=False, limit=10, remove_swords=True).get('doc') {'Male': Counter({'bar': 2, 'foo': 1}), 'Female': Counter({'foo': 5, 'baz': 2})} >>> _apply_result_filters(test, diff=True, sort=True, limit=10, remove_swords=False).get('doc') {'Male': [('bar', 2), ('own', 2), ('foo', -4)], 'Female': [('foo', 4), ('baz', 2)]} """ output = {} for key, gender_token_counters in key_gender_token_counters.items(): if remove_swords: output[key] = _remove_swords(gender_token_counters) else: output[key] = gender_token_counters if diff: output[key] = _diff_gender_token_counters(output[key]) if sort: output[key] = _sort_gender_token_counters(output[key], limit=limit) return output
5,336,986
def blog_delete(request): """Delete blog entry by id.""" blog_id = int(request.params.get('id')) entry = BlogRecordService.by_id(blog_id, request) if not entry: return HTTPNotFound() request.dbsession.delete(entry) return HTTPFound(location=request.route_url('home'))
5,336,987
async def get_ios_cfw(): """Gets all apps on ios.cfw.guide Returns ------- dict "ios, jailbreaks, devices" """ async with aiohttp.ClientSession() as session: async with session.get("https://api.appledb.dev/main.json") as resp: if resp.status == 200: data = await resp.json() return data
5,336,988
def log_exporter(message): """Log a message to the exporter log""" logger = logging.getLogger(EXPORTER_LOGGER_NAME) logger.info(message)
5,336,989
def server_error(errorMsg): """ Shorthand for returning error message. """ resp = HttpResponse(status=502) resp.write("<h3>502 BAD GATEWAY: </h3>") resp.write("<p>ERROR: {}</p>".format(errorMsg)) return resp
5,336,990
def protein_variant(variant): """ Return an HGVS_ variant string containing only the protein changes in a coding HGVS_ variant string. If all variants are synonymous, returns the synonymous variant code. If the variant is wild type, returns the wild type variant. :param str variant: coding variant string :return: protein variant string (or synonymous or wild type) :rtype: str """ if len(variant) == 0: raise ValueError("Empty variant string.") elif variant == WILD_TYPE_VARIANT: return WILD_TYPE_VARIANT elif variant == SYNONYMOUS_VARIANT: return SYNONYMOUS_VARIANT else: matches = re.findall("\((p\.\S*)\)", variant) if len(matches) == 0: raise ValueError("Invalid coding variant string.") # uniqify and remove synonymous seen = {"p.=": True} unique_matches = list() for v in matches: if v in seen: continue else: seen[v] = True unique_matches.append(v) if len(unique_matches) == 0: return SYNONYMOUS_VARIANT else: return ", ".join(unique_matches)
5,336,991
def parser(): """Parses arguments from command line using argparse. Parameters""" # default directory for reddit files default_directory = os.path.join(os.getcwd(), "data") parser = argparse.ArgumentParser() # obligatory parser.add_argument("mode", type = int, help = "execution mode: 1 build index, 2: query using existing index, 3 build and query") # conditionally obligatory parser.add_argument("--start", "-s", type = str, help = "first year/month") parser.add_argument("--end", "-e", type = str, help = "last year/month") # optional with defaults parser.add_argument("--dir", "-d", type = str, nargs = "?", default = default_directory, help = "directory for data storage") parser.add_argument("--num", "-n", type = int, nargs = "?", default = 10, help = "number of results per query") parser.add_argument("--fulltext", "-f", action = "store_true", help = "store fulltext and/or return in queries") parser.add_argument("--all", "-a", action = "store_true", help = "Return documents containing all rather than any of the query terms") parser.add_argument("--minfreq", "-m", type = int, nargs = "?", default = 5, help = "minimum term frequency") parser.add_argument("--lemma", "-l", action = "store_true", help = "lemmatize comments/queries") parser.add_argument("--cores", "-c", type = int, nargs = "?", default = 1, help = "number of cores to use") parser.add_argument("--progress", "-p", action = "store_true", help = "report progress") return parser
5,336,992
def dump_func_name(func): """This decorator prints out function name when it is called Args: func: Returns: """ def echo_func(*func_args, **func_kwargs): logging.debug('### Start func: {}'.format(func.__name__)) return func(*func_args, **func_kwargs) return echo_func
5,336,993
def calc_predictability_trace_of_avg_cov(x, k, p, ndim=False): """ The main evaluation criterion of GPFA, i.e., equation (2) from the paper. :param x: data array :param k: number of neighbors for estimate :param p: number of past time steps to consider :param ndim: n-dimensional evaluation if True :return: estimated variance in the next time step """ def _cov(t): successors = neighbors[t] + 1 successors = successors[successors<N] suc_dat = x[successors] return np.array(np.cov(suc_dat.T), ndmin=2) # pairwise distances of data points if x.ndim == 1: x = np.array(x, ndmin=2).T N, _ = x.shape y = concatenate_past(x, p=p) tree = scipy.spatial.cKDTree(y) neighbors = [tree.query(y[i], k=k+1)[1] for i in xrange(y.shape[0])] assert len(neighbors) == N covariances = map(_cov, range(p-1, N-1)) covariance = reduce(lambda a,b: a+b, covariances) / (N-p) if ndim: E, _ = np.linalg.eigh(covariance) return E result = np.trace(covariance) assert np.isfinite(result) return result
5,336,994
def nx_to_loreleai(graph: nx.Graph, relation_map: Dict[str, Predicate] = None) -> Sequence[Atom]: """ Converts a NetworkX graph into Loreleai representation To indicate the type of relations and nodes, the functions looks for a 'type' attribute Arguments: graph: NetworkX graph relation_map: maps from edge types to predicates """ literals = [] if relation_map is None: relation_map = {} for (u, v, t) in graph.edges.data('type', default=None): literals.append(relation_map[t](u, v)) return literals
5,336,995
def _kohn_sham_iteration( density, external_potential, grids, num_electrons, xc_energy_density_fn, interaction_fn, enforce_reflection_symmetry): """One iteration of Kohn-Sham calculation.""" # NOTE(leeley): Since num_electrons in KohnShamState need to specify as # static argument in jit function, this function can not directly take # KohnShamState as input arguments. The related attributes in KohnShamState # are used as input arguments for this helper function. if enforce_reflection_symmetry: xc_energy_density_fn = _flip_and_average_on_center_fn(xc_energy_density_fn) hartree_potential = scf.get_hartree_potential( density=density, grids=grids, interaction_fn=interaction_fn) xc_potential = scf.get_xc_potential( density=density, xc_energy_density_fn=xc_energy_density_fn, grids=grids) ks_potential = hartree_potential + xc_potential + external_potential xc_energy_density = xc_energy_density_fn(density) # Solve Kohn-Sham equation. density, total_eigen_energies, gap = scf.solve_noninteracting_system( external_potential=ks_potential, num_electrons=num_electrons, grids=grids) total_energy = ( # kinetic energy = total_eigen_energies - external_potential_energy total_eigen_energies - scf.get_external_potential_energy( external_potential=ks_potential, density=density, grids=grids) # Hartree energy + scf.get_hartree_energy( density=density, grids=grids, interaction_fn=interaction_fn) # xc energy + scf.get_xc_energy( density=density, xc_energy_density_fn=xc_energy_density_fn, grids=grids) # external energy + scf.get_external_potential_energy( external_potential=external_potential, density=density, grids=grids) ) if enforce_reflection_symmetry: density = _flip_and_average_on_center(density) return ( density, total_energy, hartree_potential, xc_potential, xc_energy_density, gap)
5,336,996
def get_sync_func_driver(physical_mesh): """Get the sync function on the driver.""" def sync_func_driver(): assert isinstance(physical_mesh, LocalPhysicalDeviceMesh) physical_mesh.devices[0].synchronize_all_activity() return sync_func_driver
5,336,997
def get_nc_BGrid_GFDL(grdfile): """ Bgrd = get_nc_BGrid_GFDL(grdfile) Load B-Grid grid object for GFDL CM2.1 from netCDF grid file """ nc = pyroms.io.Dataset(grdfile) lon_t = nc.variables['geolon_t'][:] lat_t = nc.variables['geolat_t'][:] lon_uv = nc.variables['geolon_c'][:] lat_uv = nc.variables['geolat_c'][:] h = nc.variables['ht'][:] f = nc.variables['coriolis_param'][:] kmt = nc.variables['kmt'][:] z_t = nc.variables['st_ocean'][:] z_t_edges = nc.variables['st_edges_ocean'][:] kmu = nc.variables['kmu'][:] z_uv = nc.variables['sw_ocean'][:] z_uv_edges = nc.variables['sw_edges_ocean'][:] # compute mask at t-point M_t, L_t = kmt.shape N_t = z_t.shape[0] mask_t = np.zeros((N_t, M_t, L_t)) for j in range(M_t): for i in range(L_t): try: mask_t[0:kmt[j,i], j,i] = 1 except: mask_t[:, j,i] = 0 # compute mask at uv-point M_uv, L_uv = kmu.shape N_uv = z_uv.shape[0] mask_uv = np.zeros((N_uv, M_uv, L_uv)) for j in range(M_uv): for i in range(L_uv): try: mask_uv[0:kmt[j,i], j,i] = 1 except: mask_uv[:, j,i] = 0 return BGrid_GFDL(lon_t, lat_t, lon_uv, lat_uv, \ mask_t, mask_uv, h, z_t, z_t_edges, \ z_uv, z_uv_edges, f)
5,336,998
def _getTimeDistORSlocal(fromLocs, toLocs, travelMode, port, speedMPS): """ Generate two dictionaries, one for time, another for distance, using ORS-local Parameters ---------- fromLocs: list, Required The start node coordinates in format of [[lat, lon], [lat, lon], ... ] toLocs: list, Required The End node coordinates in format of [[lat, lon], [lat, lon], ... ] travelMode: string, Required The travel mode for ORS, options are 'fastest', 'pedestrian', 'cycling', 'truck' port: string, Required localhost connection port speedMPS: float, Required A constant speed for calculation returns ------- timeSecs: dictionary A dictionary for time from nodes to nodes, unit is in [seconds] distMeters: dictionary A dictionary for distance from nodes to nodes, unit is in [meters] """ if (fromLocs == toLocs): locs = fromLocs.copy() [timeSecs, distMeters] = orsLocalGetTimeDistAll2All(locs, travelMode, port) elif (len(fromLocs) == 1): fromLoc = fromLocs[0] [timeSecs, distMeters] = orsLocalGetTimeDistOne2Many(fromLoc, toLocs, travelMode, port) elif (len(toLocs) == 1): toLoc = toLocs[0] [timeSecs, distMeters] = orsLocalGetTimeDistMany2One(fromLocs, toLoc, travelMode, port) else: for i in range(len(fromLocs)): [timeRow, distRow] = orsLocalGetTimeDistOne2Many(fromLocs[i], toLocs, routeType, port) for j in range(len(toLocs)): distMeters[i, j] = distRow[0, j] timeSecs[i, j] = timeRow[0, j] if (speedMPS != None): for i in range(len(fromLocs)): for j in range(len(toLocs)): timeSecs[i, j] = distMeters[i, j] / speedMPS return [timeSecs, distMeters]
5,336,999