content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def create_app(): """Create and configure an instance of the Flask application.""" app = Flask(__name__) # vvvvv use sqlite database vvvvv app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.sqlite3' # vvvvv have DB initialize the app DB.init_app(app) @app.route('/') def root(): return 'Welcome to TwitOff!' return app
92663b60bd9faf1d850f69a550b17102b92ffcb9
3,636,300
from mpi4py import MPI def MPITest(commsize): """ A decorator that repeatedly calls the wrapped function, with communicators of varying sizes. This converts the test to a generator test; therefore the underlyig test shall not be a generator test. Parameters ---------- commsize: scalar or tuple Sizes of communicator to use Usage ----- @MPITest(commsize=[1, 2, 3]) def test_stuff(comm): pass """ if not isinstance(commsize, (tuple, list)): commsize = (commsize,) sizes = sorted(list(commsize)) def dec(func): @pytest.mark.parametrize("size", sizes) def wrapped(size, *args): func_names = MPI.COMM_WORLD.allgather(func.__name__) if not all(func_names[0] == i for i in func_names): raise RuntimeError("function calls mismatched", func_names) try: comm, color = create_comm(size) except WorldTooSmall: return pytest.skip("Test skipped because world is too small. Include the test with mpirun -n %d" % (size)) try: if color == 0: rt = func(*args, comm=comm) if color == 1: rt = None #pytest.skip("rank %d not needed for comm of size %d" %(MPI.COMM_WORLD.rank, size)) finally: MPI.COMM_WORLD.barrier() return rt wrapped.__name__ = func.__name__ return wrapped return dec
3bb33e5d3d6919916ba865d79a5e0a16cdbb1b99
3,636,301
def getClassicalPitchNames(pitches): """ takes a list of pitches and returns a list of classical pitch class names """ return getPitchNames([normalizePitch(x,12) for x in pitches],getClassicalPitchNameCandidates)
737dccb3abe83c2bbb2c983cfa3fb80a4fbc5052
3,636,302
import pymel.core.uitypes def toPyUIList(res): # type: (str) -> List[pymel.core.uitypes.PyUI] """ returns a list of PyUI objects Parameters ---------- res : str Returns ------- List[pymel.core.uitypes.PyUI] """ if res is None: return [] return [pymel.core.uitypes.PyUI(x) for x in res]
6f3d35a1a7c10bc461561edcdb4b52404de3e5a7
3,636,303
def best_B(Ag): """ Given an antigenic determinant Ag this function returns the binding value of the best possible binder. """ top = 0 for i in range(len(Ag)): etop = np.min(cf.TD20[int(Ag[i]) - 1]) top += etop return top
b02afd943de6a4c8b2f9f1b4d897e5f03074c000
3,636,304
def forward_gradients_v2(ys, xs, grad_xs=None, gate_gradients=False): """Forward-mode pushforward analogous to the pullback defined by tf.gradients. With tf.gradients, grad_ys is the vector being pulled back, and here d_xs is the vector being pushed forward.""" if type(ys) == list: v = [tf.ones_like(yy) for yy in ys] else: v = tf.ones_like(ys) # dummy variable g = tf.gradients(ys, xs, grad_ys=v) return tf.gradients(g, v, grad_ys=grad_xs)
02fea7fc2367e69d2c6b085ea1a1379b7a442674
3,636,305
import resource def __limit_less(lim1, lim2): """Helper function for comparing two rlimit values, handling "unlimited" correctly. Params: lim1 (integer): first rlimit lim2 (integer): second rlimit Returns: true if lim1 <= lim2 """ if lim2 == resource.RLIM_INFINITY: return True if lim1 == resource.RLIM_INFINITY: return False return lim1 <= lim2
8c8faebd4cc1eecfbd8e0a73b16b2bee0a433572
3,636,306
from typing import List def getswarmlocations() -> List[str]: """ checks if the provided location is a location where a swarm can happen. :param location: the provided location :return: boolean if location is in the list of locations where swarms can happen, """ swarmlocationlist = open("commands/data/swarmlocations.csv").read().split(",") swarmlocationlist = [location.lower() for location in swarmlocationlist] swarmlocationlist = list(set(swarmlocationlist)) # remove duplicates just in case swarmlocationlist.sort() return swarmlocationlist
3a7db13c8a0176a4cc9a9dda38b23041639917a9
3,636,307
from typing import Any from typing import Optional def Request( default: Any = Undefined, *, default_factory: Optional[NoArgAnyCallable] = None, alias: Optional[str] = None, ) -> Any: """ Used to provide extra information about a field. :param default: since this is replacing the field’s default, its first argument is used to set the default, use ellipsis (``...``) to indicate the field is required :param default_factory: callable that will be called when a default value is needed for this field If both `default` and `default_factory` are set, an error is raised. :param alias: the public name of the field """ if default is not Undefined and default_factory is not None: raise ValueError("cannot specify both default and default_factory") return RequestInfo(default, default_factory=default_factory, alias=alias)
83ad97bc7a276e0d9d715031f263cf8d66fdaf78
3,636,308
import sys from django.conf import settings import os def get_jobs(when=None, only_scheduled=False): """ Returns a dictionary mapping of job names together with their respective application class. """ # FIXME: HACK: make sure the project dir is on the path when executed as ./manage.py try: cpath = os.path.dirname(os.path.realpath(sys.argv[0])) ppath = os.path.dirname(cpath) if ppath not in sys.path: sys.path.append(ppath) except: pass _jobs = {} if True: for app_name in settings.INSTALLED_APPS: scandirs = (None, 'minutely', 'quarter_hourly', 'hourly', 'daily', 'weekly', 'monthly', 'yearly') if when: scandirs = None, when for subdir in scandirs: try: path = find_job_module(app_name, subdir) for name in find_jobs(path): if (app_name, name) in _jobs: raise JobError("Duplicate job %s" % name) job = import_job(app_name, name, subdir) if only_scheduled and job.when == None: # only include jobs which are scheduled continue if when and job.when != when: # generic job not in same schedule continue _jobs[(app_name, name)] = job except ImportError: # No job module -- continue scanning pass return _jobs
9cb4ac165b446596f5728659c1cdf291fc7ef043
3,636,309
import os import json import logging def get_project_id(): """ Gets the project ID. It defaults to the project declared in the enviorment variable PROJECT but if it can't find it there it will try looking for a service account and take the project ID from there Args: Returns: """ service_acc_address = os.environ.get('GOOGLE_APPLICATION_CREDENTIALS', None) if service_acc_address: service_acc = open(service_acc_address, 'r').read() service_acc_project_id = json.loads(service_acc)['project_id'] else: service_acc_project_id = None project_id = os.environ.get('PROJECT', service_acc_project_id) if service_acc_project_id != None and project_id != service_acc_project_id: logging.critical("Warning the project in ENV VAR PROJECT is \ not the same as your service account project") return project_id
ce06f65be2fa70898a71f10d4f848afdf944da48
3,636,310
def val_err_str(val: float, err: float) -> str: """ Get a float representation of a value/error pair and create a string representation 12.345 +/- 1.23 --> 12.3(12) 12.345 +/- 0.012 -> 12.345(12 12345 +/- 654 ---> 12340(650) :param val: float representing the value :param err: float representing the error in the value :return: a string representation of the value/error pair """ err_sig_figs = 2 # future upgrade path is to allow user to set this dps = 2 - err_sig_figs if err < 10: while err < 10.: err *= 10 dps += 1 err = round(err, 0) else: # err > 10 while err > 100.: err /= 10 dps -= 1 err = round(err, 0) * 10 ** (-dps) val = round(val, dps) return f"{val:.{max(0, dps)}f}({err:.0f})"
5b759ff8e6996704edb7f6b68f6cb7e307593c9e
3,636,311
import os def discrete_distribution(probabilities, path='', fig_name='distribution_events_states.pdf', v_labels=None, h_labels=None, title=None, color_map=None, figsize=(12, 6), size_labels=16, size_values=14, bottom=None, top=None, left=None, right=None, savefig=False, usetex=False): """ Annotated heatmap of a given discrete distribution with 2 dimensions. :type probabilities: 2D array :param probabilities: the 2D discrete distribution. :type path: string :param path: where the figure is saved. :type fig_name: string :param fig_name: name of the file. :type v_labels: list of strings :param v_labels: labels for the first dimension (vertical). :type h_labels: list of strings :param h_labels: labels for the second dimension (horizontal). :type title: string :param title: suptitle. :param color_map: color map for the heatmap, see seaborn documentation. :type figsize: (int, int) :param figsize: tuple (width, height). :type size_labels: int :param size_labels: fontsize of labels. :type size_values: int :param size_values: fontsize of the annotations on top of the heatmap. :type bottom: float :param bottom: between 0 and 1, adjusts the bottom margin, see matplotlib subplots_adjust. :type top: float :param top: between 0 and 1, adjusts the top margin, see matplotlib subplots_adjust. :type left: float :param left: between 0 and 1, adjusts the left margin, see matplotlib subplots_adjust. :type right: float :param right: between 0 and 1, adjusts the right margin, see matplotlib subplots_adjust. :type savefig: boolean :param savefig: set to True to save the figure. :type usetex: boolean :param usetex: set to True if matplolib figure is rendered with TeX. :rtype: Figure :return: the figure (see matplotlib). """ if color_map is None: color_map = seaborn.cubehelix_palette(as_cmap=True, reverse=False, start=0.5, rot=-.75) v_size = np.shape(probabilities)[0] h_size = np.shape(probabilities)[1] # Create annotation matrix annot = np.ndarray((v_size, h_size), dtype=object) for x1 in range(v_size): for x2 in range(h_size): p = probabilities[x1, x2] if p == 0: if usetex: annot[x1, x2] = r'$0$\%' else: annot[x1, x2] = r'0%' elif p < 0.01: if usetex: annot[x1, x2] = r'$<1$\%' else: annot[x1, x2] = r'<1%' else: a = str(int(np.floor(100 * p))) if usetex: annot[x1, x2] = r'$' + a + r'$\%' else: annot[x1, x2] = a + r'%' f = plt.figure(figsize=figsize) ax = seaborn.heatmap(probabilities, xticklabels=h_labels, yticklabels=v_labels, annot=annot, cbar=False, cmap=color_map, fmt='s', square=True, annot_kws={'size': size_values}) ax.tick_params(axis='both', which='major', labelsize=size_labels) # font size for tick labels ax.set_yticklabels(v_labels, va='center') if title is not None: plt.title(title) plt.tight_layout() if bottom is not None: plt.subplots_adjust(bottom=bottom, top=top, left=left, right=right) if savefig: entire_path = os.path.join(path, fig_name) plt.savefig(entire_path) return f
e9da72db62779a760b71d7e498d733cf3f052ec4
3,636,312
import re def find_map(address): """ Look up a specified address in the /proc/PID/maps for a process. Returns: A string representing the map in question, or None if no match. """ maps = fetch_maps() for m in re.finditer(begin_pattern, maps): begin = int(m.group("begin"), 16) end = int(m.group("end"), 16) if begin <= address < end: return m.group(0) return None
c9a07d982b92ef1e165fa8e1bcc9ea3873f2f912
3,636,313
import regex def _format_css_declarations(content: list, indent_level: int) -> str: """ Helper function for CSS formatting that formats a list of CSS properties, like `margin: 1em;`. INPUTS content: A list of component values generated by the tinycss2 library OUTPUTS A string of formatted CSS """ output = "" tokens = tinycss2.parse_declaration_list(content) # Hold on to your butts... # When we alpha-sort declarations, we want to keep comments that are on the same # line attached to that declaration after it's reordered. # To do this, first create a list of sorted_declarations that is list of tuples. # The first tuple value is the declaration itself, and the second is a comment on the same line, if it exists. # While we do this, remove those same-line comments from our master list of tokens, # so that we don't process them twice later when we iterate over the master list again. sorted_declarations = [] i = 0 while i < len(tokens): if tokens[i].type == "declaration": if i + 1 < len(tokens) and tokens[i + 1].type == "comment": sorted_declarations.append((tokens[i], tokens[i + 1])) tokens.pop(i + 1) # Remove from the master list # Use regex to test if the token is on the same line, i.e. if the intervening white space doesn't include a newline elif i + 2 < len(tokens) and tokens[i + 1].type == "whitespace" and regex.match(r"[^\n]+", tokens[i + 1].value) and tokens[i + 2].type == "comment": sorted_declarations.append((tokens[i], tokens[i + 2])) tokens.pop(i + 1) # Remove from the master list tokens.pop(i + 1) else: # Special case in alpha-sorting: Sort -epub-* properties as if -epub- didn't exist # Note that we modify token.name, which DOESN'T change token.lower_name; and we use token.name # for sorting, but token.lower_name for output, so we don't have to undo this before outputting tokens[i].name = regex.sub(r"^-([a-z]+?)-(.+)", r"\2-\1-\2", tokens[i].name) sorted_declarations.append((tokens[i], None)) i = i + 1 # Actually sort declaration tokens and their associated comments, if any sorted_declarations.sort(key = lambda x : x[0].name) # Now, sort the master token list using an intermediary list, output_tokens # This will iterate over all tokens, including non-declaration tokens. If we encounter a declaration, # pull the nth declaration out of our sorted list instead. output_tokens = [] current_declaration_number = 0 for token in tokens: if token.type == "error": raise se.InvalidCssException("Couldn’t parse CSS. Exception: {token.message}") # Append the declaration to the output based on its sorted index. # This will sort declarations but keep things like comments before and after # declarations in the expected order. if token.type == "declaration": output_tokens.append(sorted_declarations[current_declaration_number]) current_declaration_number = current_declaration_number + 1 else: output_tokens.append((token, None)) # tokens is now a alpha-sorted list of tuples of (token, comment) tokens = output_tokens for token in tokens: comment = None if isinstance(token, tuple): comment = token[1] token = token[0] if token.type == "error": raise se.InvalidCssException("Couldn’t parse CSS. Exception: {token.message}") if token.type == "declaration": output += ("\t" * indent_level) + token.lower_name + ": " output += _format_css_component_list(token.value) if token.important: output += " !important" output += ";" if comment: output += " /* " + comment.value.strip() + " */" output += "\n" if token.type == "comment": output = output.rstrip() if output == "": output += ("\t" * indent_level) + "/* " + token.value.strip() + " */\n" else: output += " /* " + token.value.strip() + " */\n" return output.rstrip()
56205e557858349f17a8d7a549d472c3f549c2cc
3,636,314
def rosen_hess(x): """ The Hessian matrix of the Rosenbrock function. Parameters ---------- x : array_like 1-D array of points at which the Hessian matrix is to be computed. Returns ------- rosen_hess : ndarray The Hessian matrix of the Rosenbrock function at `x`. See Also -------- rosen, rosen_der, rosen_hess_prod Examples -------- >>> from scipy.optimize import rosen_hess >>> X = 0.1 * np.arange(4) >>> rosen_hess(X) array([[-38., 0., 0., 0.], [ 0., 134., -40., 0.], [ 0., -40., 130., -80.], [ 0., 0., -80., 200.]]) """ x = atleast_1d(x) H = np.diag(-400 * x[:-1], 1) - np.diag(400 * x[:-1], -1) diagonal = np.zeros(len(x), dtype=x.dtype) diagonal[0] = 1200 * x[0]**2 - 400 * x[1] + 2 diagonal[-1] = 200 diagonal[1:-1] = 202 + 1200 * x[1:-1]**2 - 400 * x[2:] H = H + np.diag(diagonal) return H
449c869e821c0e97e4126bd5955df5bd39d93f95
3,636,315
def flow_corr(): """ Symmetric cumulants SC(m, n) at the MAP point compared to experiment. """ fig, axes = plt.subplots( figsize=figsize(0.5, 1.2), sharex=True, nrows=2, gridspec_kw=dict(height_ratios=[4, 5]) ) observables = ['sc', 'sc_normed'] ylims = [(-2.5e-6, 2.5e-6), (-.9, .8)] labels = ['(4,2)', '(3,2)'] system = 'PbPb5020' def label(*mn, normed=False): fmt = r'\mathrm{{SC}}({0}, {1})' if normed: fmt += r'/\langle v_{0}^2 \rangle\langle v_{1}^2 \rangle' return fmt.format(*mn).join('$$') for obs, ylim, ax in zip(observables, ylims, axes.flat): for (mn, cmap), lbl in zip([((4, 2), 'Blues'), ((3, 2), 'Oranges')], labels): x = model.map_data[system][obs][mn]['x'] y = model.map_data[system][obs][mn]['Y'] ax.plot(x, y, color=getattr(plt.cm, cmap)(.7)) ax.text(1.02*x[-1], y[-1], lbl, va='center', ha='left') ax.axhline( 0, color='.5', lw=plt.rcParams['xtick.major.width'], zorder=-100 ) ax.set_xlim(0, 80) ax.set_ylim(*ylim) auto_ticks(ax, nbins=7, minor=2) if ax.is_first_col(): ax.set_ylabel(label('m', 'n', normed='normed' in obs)) if ax.is_first_row(): ax.set_title('Pb-Pb 5.02 TeV') else: ax.set_xlabel('Centrality %') # MAP estimate for Pb-Pb collisions at 5.02 TeV, calibrated to Pb-Pb # data at 2.76 and 5.02 TeV using a model without nucleon substructure. # symmetric cumulants SC = np.array([ [2.5e+00, 5.8591e-09, 5.9204e-09], [7.5e+00, 2.1582e-08, -2.1367e-08], [1.5e+01, 1.2228e-07, -1.3942e-07], [2.5e+01, 4.3989e-07, -5.4267e-07], [3.5e+01, 9.4414e-07, -1.0677e-06], [4.5e+01, 1.4138e-06, -1.4616e-06], [5.5e+01, 1.4456e-06, -1.2317e-06], [6.5e+01, 7.3726e-07, -3.3222e-07], ]) # normalized symmetric cumulants NSC = np.array([ [2.5e+00, 7.3202e-02, 2.1091e-02], [7.5e+00, 7.6282e-02, -2.0918e-02], [1.5e+01, 1.5216e-01, -4.7261e-02], [2.5e+01, 2.4814e-01, -8.6423e-02], [3.5e+01, 3.4423e-01, -1.1640e-01], [4.5e+01, 4.5614e-01, -1.4251e-01], [5.5e+01, 6.1072e-01, -1.5021e-01], ]) for ax, obs in zip(axes, [SC, NSC]): x, y42, y32 = obs.T ax.plot(x, y42, color=plt.cm.Blues(.7), linestyle='dashed') ax.plot(x, y32, color=plt.cm.Oranges(.7), linestyle='dashed') solid_line = lines.Line2D([], [], color=offblack) dashed_line = lines.Line2D([], [], linestyle='dashed', color=offblack) handles = [solid_line, dashed_line] labels = ["p-Pb, Pb-Pb 5.02 TeV", "Pb-Pb 2.76, 5.02 TeV"] plt.legend(handles, labels, loc=8, title='Bayesian calibration on:') set_tight(fig)
c2d887e3a646ad5e3f1f7570f8a7ee19bf25d1b1
3,636,316
def dodecagon(samples=128, radius=1): """Create a dodecagon mask. Parameters ---------- samples : `int`, optional number of samples in the square output array radius : `float`, optional radius of the shape in the square output array. radius=1 will fill the x Returns ------- `numpy.ndarray` binary ndarray representation of the mask """ return regular_polygon(12, samples=samples, radius=radius)
a45a8eaa723c9c0da93fa6477a07d1a13d6524e0
3,636,317
def port_name(name, nr=0): """Map node output number to name.""" return name + ":" + str(nr)
a82e0b9940fa6b7f11f1a11fbd8a1b9b1a57c07b
3,636,318
import os import urllib def url(ticker, start_date, end_date): """Format the correct URL from the params""" base_url = ''.join([API_BASE_PATH, ticker, '.csv']) params = {'start_date': start_date, 'end_date': end_date} if API_KEY_ENV in os.environ: params['api_key'] = os.environ[API_KEY_ENV] return ''.join([base_url, '?', urllib.parse.urlencode(params)])
0a19ee5bdc88f1a071b3b7bb3508f60dcda6ee7a
3,636,319
def AC3(csp, queue=None, removals=None, arc_heuristic=csp.dom_j_up): """[Figure 6.3]""" if queue is None: queue = {(Xi, Xk) for Xi in csp.variables for Xk in csp.neighbors[Xi]} csp.support_pruning() queue = arc_heuristic(csp, queue) checks = 0 while queue: (Xi, Xj) = queue.pop() revised, checks = revise(csp, Xi, Xj, removals, checks) if revised: if not csp.curr_domains[Xi]: return False, checks # CSP is inconsistent for Xk in csp.neighbors[Xi]: if Xk != Xj: queue.add((Xk, Xi)) return True, checks
507e942633da0ac1487db0c75375ac0e6d37a069
3,636,320
from typing import Optional from typing import List from typing import Union import pandas def load( name: str, ids: Optional[List[Union[str, int]]] = None, limit: Optional[int] = None, ) -> pandas.DataFrame: """Load dataset data to a pandas DataFrame. Args: name: The dataset name. ids: If provided, load dataset records with given ids. limit: The number of records to retrieve. Returns: The dataset as a pandas Dataframe. Examples: >>> import rubrix as rb >>> dataframe = rb.load(name="example-dataset") """ return _client_instance().load(name=name, limit=limit, ids=ids)
40499ccc3942d4c59c8588257c9f40a2d622109d
3,636,321
def _stringify(values): """internal method: used to convert values to a string suitable for an xml attribute""" if type(values) == list or type(values) == tuple: return " ".join([str(x) for x in values]) elif type(values) == type(True): return "1" if values else "0" else: return str(values)
a8f3c290ef949a254ca5dca9744ff3f4c602c4d2
3,636,322
def xml_safe(s): """Returns the XML-safe version of a given string. """ new_string = s.replace("&", "&amp;").replace("<", "&lt;") new_string = new_string.replace("\r", "").replace("\n", "<br/>") return new_string
166bf2b78441b4f22bf3a89f8be56efb756fe72f
3,636,323
import re def range_values(ent): """Extract values from the range and cached label.""" data = {} range_ = [e for e in ent.ents if e._.cached_label.split('.')[0] == 'range'][0] values = re.findall(FLOAT_RE, range_.text) if not all([re.search(INT_TOKEN_RE, v) for v in values]): raise RejectMatch keys = range_.label_.split('.')[1:] for key, value in zip(keys, values): data[key] = to_positive_int(value) range_._.data = data range_._.new_label = 'count' return range_
4fe1388727ef432a6b9587a2c179dafc6f60d42a
3,636,324
import os def plot_mw_nii_bars(ax, snr_min = None, shaded_kwargs = {}, **kwargs): """ Plots vertical lines and bars on bpt Diagram for Tilted Disk where only NII/HA line is detected Parameters ---------- ax 'matplotlib.pyplot.figure.axes' axes to plot lines on snr_min: 'number', optional, must be keyword Miniumum sigma detection level to plot Default of 2 sigma shaded_kwargs: 'dict', optional, must be keyword kwargs passed to ax.fill_betweenx for shaded error boxes **kwargs: 'dict', optional, must be keywords passed to ax.plot for lines """ # Default line color if "color" not in kwargs: kwargs["color"] = pal[4] if "facecolor" not in shaded_kwargs: shaded_kwargs["facecolor"] = kwargs["color"] # Default zorder if "zorder" not in kwargs: if "zorder" in shaded_kwargs: kwargs["zorder"] = shaded_kwargs["zorder"] + 1 else: kwargs["zorder"] = 2 shaded_kwargs["zorder"] = 1 else: if "zorder" not in shaded_kwargs: shaded_kwargs["zorder"] = kwargs["zorder"] - 1 # Default alpha if "alpha" not in kwargs: kwargs["alpha"] = 0.5 if "alpha" not in shaded_kwargs: shaded_kwargs["alpha"] = 0.1 # Default line style if "ls" not in kwargs: kwargs["ls"] = ':' # Default line width if "lw" not in kwargs: kwargs["lw"] = 2 # Default SNR if snr_min is None: snr_min = 2. # Default Label if "label" not in kwargs: kwargs["label"] = r'$>{0:2.1f}\sigma$ Tilted Disk'.format(snr_min) # Load Data nii_ha_data_filepath = os.path.join(directory, "mw_data/WHAM_NII_HA_DATA_021219.fits") nii_ha_data = Table.read(nii_ha_data_filepath) # SNR Cut Data snr_cut = (nii_ha_data["NII_SIGMA_LEVEL"] > snr_min) & (nii_ha_data["HA_SIGMA_LEVEL"] > snr_min) # in Tilted Disk OIII/HB points cut oiii_hb_cut = np.ones(len(nii_ha_data), dtype = bool) oiii_hb_cut[0] = False oiii_hb_cut[6] = False oiii_hb_cut[7] = False nii_ha_data = nii_ha_data[snr_cut & oiii_hb_cut] for ell, entry in enumerate(nii_ha_data): if ell == 0: ax.plot([entry["log_nii_ha"], entry["log_nii_ha"]], [-2-ell/3.,2], **kwargs) del kwargs["label"] else: ax.plot([entry["log_nii_ha"], entry["log_nii_ha"]], [-2-ell/3.,2], **kwargs) ax.fill_betweenx([-2,2], [entry["log_nii_ha_lower"], entry["log_nii_ha_lower"]], x2 = [entry["log_nii_ha_upper"], entry["log_nii_ha_upper"]], **shaded_kwargs) return ax
b274b213a183404d21aa3429fa6f0edefb3be25e
3,636,325
def get_weights(connections): """Returns the weights of the connections :param connections: :return: Numpy array of weights """ return np.array(nest.GetStatus(connections, keys="weight"))
50371989cf32b37bc7a25837db2c866e579ac0b6
3,636,326
def w2v_matrix_vocab_generator(w2v_pickle): """ Creates the w2v dict mapping word to index and a numpy matrix of (num words, size of embedding), words will be mapped to their index, such that row ith will be the embedding of the word mapped to the i index. :param w2v_pickle: Dataframe containing token and vector columns, where token is a string and vector is the embedding vector, each vector must have the same length (and the length must be equal to the argument embedding_dim). :return: A dict, np matrix pair, the dict maps words to indexes, the matrix ith row will contain the embedding of the word mapped to the ith index. """ # create internal w2v dictionary w2v_df = pd.read_pickle(w2v_pickle) w2v = dict() embedding_dim = len(w2v_df.iloc[0, 1]) # shape +2 for unknown and padding tokens w2v_weights = np.zeros(shape=(w2v_df.shape[0] + 2, embedding_dim)) for index, data_point in w2v_df.iterrows(): curr_index = len(w2v) w2v[data_point["token"]] = curr_index w2v_weights[curr_index, :] = np.array(data_point["vector"]) w2v["<UNK>"] = len(w2v_weights) - 2 w2v["<padding>"] = len(w2v_weights) - 1 return w2v, w2v_weights
3228d73facc7756cfcf32fb10d9d54f0b40c84d7
3,636,327
def concat( dfs, axis=0, join="outer", uniform=False, filter_warning=True, ignore_index=False ): """Concatenate, handling some edge cases: - Unions categoricals between partitions - Ignores empty partitions Parameters ---------- dfs : list of DataFrame, Series, or Index axis : int or str, optional join : str, optional uniform : bool, optional Whether to treat ``dfs[0]`` as representative of ``dfs[1:]``. Set to True if all arguments have the same columns and dtypes (but not necessarily categories). Default is False. ignore_index : bool, optional Whether to allow index values to be ignored/droped during concatenation. Default is False. """ if len(dfs) == 1: return dfs[0] else: func = concat_dispatch.dispatch(type(dfs[0])) return func( dfs, axis=axis, join=join, uniform=uniform, filter_warning=filter_warning, ignore_index=ignore_index, )
7f89a93410c3171e967682df954d259651cc5b91
3,636,328
def resize(dataset: xr.Dataset, invalid_value: float = 0) -> xr.Dataset: """ Pixels whose aggregation window exceeds the reference image are truncated in the output products. This function returns the output products with the size of the input images : add rows and columns that have been truncated. These added pixels will have bit 0 = 1 ( Invalid pixel : border of the reference image ) in the validity_mask and will have the disparity = invalid_value in the disparity map. :param dataset: Dataset which contains the output products :type dataset: xarray.Dataset with the variables : - disparity_map 2D xarray.DataArray (row, col) - confidence_measure 3D xarray.DataArray(row, col, indicator) - validity_mask 2D xarray.DataArray (row, col) :param invalid_value: disparity to assign to invalid pixels ( pixels whose aggregation window exceeds the image) :type invalid_value: float :return: the dataset with the size of the input images :rtype : xarray.Dataset with the variables : - disparity_map 2D xarray.DataArray (row, col) - confidence_measure 3D xarray.DataArray(row, col, indicator) - validity_mask 2D xarray.DataArray (row, col) """ offset = dataset.attrs['offset_row_col'] if offset == 0: return dataset c_row = dataset.coords['row'] c_col = dataset.coords['col'] row = np.arange(c_row[0] - offset, c_row[-1] + 1 + offset) col = np.arange(c_col[0] - offset, c_col[-1] + 1 + offset) resize_disparity = xr.Dataset() for array in dataset: if array == 'disparity_map': data = xr.DataArray(np.full((len(row), len(col)), invalid_value, dtype=np.float32), coords=[row, col], dims=['row', 'col']) resize_disparity[array] = dataset[array].combine_first(data) if array == 'confidence_measure': depth = len(dataset.coords['indicator']) data = xr.DataArray(data=np.full((len(row), len(col), depth), np.nan, dtype=np.float32), coords={'row': row, 'col': col}, dims=['row', 'col', 'indicator']) resize_disparity[array] = dataset[array].combine_first(data) if array == 'validity_mask': data = xr.DataArray(np.zeros((len(row), len(col)), dtype=np.uint16), coords=[row, col], dims=['row', 'col']) # Invalid pixel : border of the reference image data += PANDORA_MSK_PIXEL_REF_NODATA_OR_BORDER resize_disparity[array] = dataset[array].combine_first(data).astype(np.uint16) if array == 'interpolated_coeff': data = xr.DataArray(np.full((len(row), len(col)), np.nan, dtype=np.float32), coords=[row, col], dims=['row', 'col']) resize_disparity[array] = dataset[array].combine_first(data) resize_disparity.attrs = dataset.attrs resize_disparity.attrs['offset_row_col'] = 0 return resize_disparity
75729c99cf77ffeb79153bb4ff17ea69dac12f7b
3,636,329
import requests def score(graphs, schema, url, port): """ graphs is expected to be a list of dictionaries, where each entry in the list represents a graph with * key idx -> index value * key nodes -> list of ints representing vertices of the graph * key edges -> list of list of ints representing edges of graph """ stream = BufferedWriter(BytesIO()) writer = DataFileWriter(stream, avro.io.DatumWriter(), schema) # writer = DataFileWriter(open("imdb-graph.avro", "wb"), DatumWriter(), schema) for graph in graphs: writer.append({"edges": graph["edges"], "vertices": graph["vertices"], "index": graph["idx"], "label": graph.get("label")}) writer.flush() raw_bytes = stream.raw.getvalue() writer.close() url = "{}:{}/predictUnstructured/?ret_mode=binary".format(url.strip("/"), port) payload = raw_bytes headers = { 'Content-Type': 'application/octet-stream' } response = requests.request("POST", url, headers=headers, data = payload) return response
090846132114dfadfc950f3fff384e26c439acce
3,636,330
from typing import List from typing import Dict def group_by_author(commits: List[dict]) -> Dict[str, List[dict]]: """Group GitHub commit objects by their author.""" grouped: Dict[str, List[dict]] = {} for commit in commits: name = commit["author"]["login"] if name not in grouped: grouped[name] = [] grouped[name].append(commit) return grouped
239c523317dc8876017d4b61bc2ad8887444085e
3,636,331
import re def convert(name): """ CamelCase to under_score :param name: :return: """ s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name) return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
c92db5a27d4086f8c46cbcb17005e3fc0534b2cb
3,636,332
def plot(graph, particles=None, polyline=None, particles_alpha=None, label_start_end=True, bgcolor='white', node_color='grey', node_size=0, edge_color='lightgrey', edge_linewidth=3, **kwargs): """ Plots particle approximation of trajectory :param graph: NetworkX MultiDiGraph UTM projection encodes road network generating using OSMnx, see tools.cam_graph.py :param particles: MMParticles object (from inference.particles) particle approximation :param polyline: list-like, each element length 2 UTM - metres series of GPS coordinate observations :param particles_alpha: float in [0, 1] plotting parameter opacity of routes :param label_start_end: bool whether to label the start and end points of the route :param bgcolor: str background colour :param node_color: str node (intersections) colour :param node_size: float size of nodes (intersections) :param edge_color: str colour of edges (roads) :param edge_linewidth: float width of edges (roads :param kwargs: additional parameters to ox.plot_graph :return: fig, ax """ fig, ax = ox.plot_graph(graph, show=False, close=False, bgcolor=bgcolor, node_color=node_color, node_size=node_size, edge_color=edge_color, edge_linewidth=edge_linewidth, **kwargs) ax.set_aspect("equal") start_end_points = None if particles is not None: if isinstance(particles, np.ndarray): particles = [particles] start_end_points = np.zeros((2, 2)) alpha_min = 0.1 if particles_alpha is None: particles_alpha = 1 / len(particles) * (1 - alpha_min) + alpha_min xlim = [None, None] ylim = [None, None] for i, particle in enumerate(particles): if particle is None: continue if len(particle) > 1: int_path = interpolate_path(graph, particle, t_column=True) cart_int_path = cartesianise_path(graph, int_path, t_column=True) ax.plot(cart_int_path[:, 0], cart_int_path[:, 1], color='orange', linewidth=1.5, alpha=particles_alpha) cart_path = cartesianise_path(graph, observation_time_rows(particle), t_column=True) else: cart_path = cartesianise_path(graph, particle, t_column=True) ax.scatter(cart_path[:, 0], cart_path[:, 1], color='orange', alpha=particles_alpha, zorder=2) start_end_points[0] += cart_path[0] / len(particles) start_end_points[1] += cart_path[-1] / len(particles) xlim[0] = np.min(cart_path[:, 0]) if xlim[0] is None else min(np.min(cart_path[:, 0]), xlim[0]) xlim[1] = np.max(cart_path[:, 0]) if xlim[1] is None else max(np.max(cart_path[:, 0]), xlim[1]) ylim[0] = np.min(cart_path[:, 1]) if ylim[0] is None else min(np.min(cart_path[:, 1]), ylim[0]) ylim[1] = np.max(cart_path[:, 1]) if ylim[1] is None else max(np.max(cart_path[:, 1]), ylim[1]) xlim, ylim = expand_lims(xlim, ylim, 0.1) ax.set_xlim(xlim[0], xlim[1]) ax.set_ylim(ylim[0], ylim[1]) if polyline is not None: poly_arr = np.array(polyline) ax.scatter(poly_arr[:, 0], poly_arr[:, 1], marker='x', c='red', s=100, linewidth=3, zorder=10) if particles is None: start_end_points = poly_arr[np.array([0, -1])] xlim = [np.min(poly_arr[:, 0]), np.max(poly_arr[:, 0])] ylim = [np.min(poly_arr[:, 1]), np.max(poly_arr[:, 1])] xlim, ylim = expand_lims(xlim, ylim, 0.1) ax.set_xlim(xlim[0], xlim[1]) ax.set_ylim(ylim[0], ylim[1]) if start_end_points is not None and label_start_end: plt.annotate('Start', start_end_points[0] + 25, zorder=12) plt.annotate('End', start_end_points[1] + 25, zorder=12) plt.tight_layout() return fig, ax
3f57d69bd69c9164715db9fde5020c7da8aa42df
3,636,333
def non_empty_string(value): """Must be a non-empty non-blank string""" return bool(value) and bool(value.strip())
707d6c39a52b1ec0e317d156e74fef78170739d9
3,636,334
def metadataAbstractElementRequiredChildElementTest6(): """ Optional child elements, child elements required. >>> doctestMetadataAbstractElementFunction( ... testMetadataAbstractElementKnownChildElements, ... metadataAbstractElementRequiredChildElementTest6(), ... requiredChildElements=["foo"], ... optionalChildElements=["bar"]) [] """ metadata = """<?xml version="1.0" encoding="UTF-8"?> <test> <foo /> <bar /> </test> """ return ElementTree.fromstring(metadata)
dd114d393269b9731aba8bbc6681f1a1b29643e0
3,636,335
def index(): """ List containers """ containers = g.api.get_containers() clonable_containers = [] for container in containers: if container['state'] == 'STOPPED': clonable_containers.append(container['name']) context = { 'containers': containers, 'clonable_containers': clonable_containers, 'host': g.api.get_host(), } return render_template('containers.html', **context)
5e5178eba9824a9a4c83b5e179cf1fd6b3b8ed30
3,636,336
import torch def spatial_discounting_mask(): """ Input: config: Config should have configuration including HEIGHT, WIDTH, DISCOUNTED_MASK. Output: tf.Tensor: spatial discounting mask Description: Generate spatial discounting mask constant. Spatial discounting mask is first introduced in publication: Generative Image Inpainting with Contextual Attention, Yu et al. """ gamma = cfg.spatial_discounting_mask height, width = cfg.context_mask_shape shape = [1, 1, height, width] if cfg.discounted_mask: mask_values = np.ones((height, width)) for i in range(height): for j in range(width): mask_values[i, j] = max( gamma ** min(i, height - i), gamma ** min(j, width - j)) mask_values = np.expand_dims(mask_values, 0) mask_values = np.expand_dims(mask_values, 0) else: mask_values = np.ones(shape) spatial_discounting_mask_tensor = torch.tensor(mask_values, dtype=torch.float32) if cfg.use_cuda: spatial_discounting_mask_tensor = spatial_discounting_mask_tensor.cuda() return spatial_discounting_mask_tensor
1ac2ffaf0b3ef70b9efe0965ef9ecd8bb16fe7ce
3,636,337
from pathlib import Path import os def does_file_exist(filepath:Path)-> bool: """ Checks if file path exists. """ if os.path.exists(filepath): LOG.info("Data path detected:\n{}\.".format(filepath)) return True else: LOG.info("Data path\n{}\nnot detected. Downloading now...".format(filepath)) return False
615d1eea9d43fd7af966a22981d178366b161e05
3,636,338
def pca_results(scaled, pca): """ Plot the explained variance of the DataSet as a barchart, and return a DataFrame with the explained variance for each feature, for each dimension of the PCA. ----------------------------------------------------------- # Parameters: # scaled (pd.DataFrame): The DataFrame in which we are performing PCA on, scaled down using sklearn.preprocessing.scale(): from sklearn.preprocessing import scale scaled = pd.DataFrame(scale(data)) Where `data` is the original DataFrame. # pca: The sklearn.decomposition.PCA() object, which has been fitted to the scaled down DataFrame: pca = PCA(**args).fit(scaled) """ # Dimension indexing dimensions = ['Dimension {}'.format(i) for i in range(1,len(pca.components_)+1)] # PCA components components = pd.DataFrame(np.round(pca.components_, 4), columns = scaled.keys()) components.index = dimensions # PCA explained variance ratios = pca.explained_variance_ratio_.reshape(len(pca.components_), 1) variance_ratios = pd.DataFrame(np.round(ratios, 4), columns = ['Explained Variance']) variance_ratios.index = dimensions # Create a bar plot visualization fig, ax = plt.subplots(figsize = (14,8)) # Plot the feature weights as a function of the components components.plot(ax = ax, kind = 'bar') ax.set_ylabel("Feature Weights") ax.set_xticklabels(dimensions, rotation=0) # Display the explained variance ratios# for i, ev in enumerate(pca.explained_variance_ratio_): ax.text(i-0.40, ax.get_ylim()[1] + 0.05, "Explained Variance\n %.4f"%(ev)) # Return a concatenated DataFrame return pd.concat([variance_ratios, components], axis = 1)
48c5e2c740238c0005740de9171ad509e146fbed
3,636,339
import pwd def get_uid_from_user(user): """Return UID from user name Looks up UID matching the supplied user name; returns None if no matching name can be found. NB returned UID will be an integer. """ try: return pwd.getpwnam(str(user)).pw_uid except KeyError: return None
dd4f6f839f985b923199b438216c567e1e84327d
3,636,340
def get_approval_distance(election_1: ApprovalElection, election_2: ApprovalElection, distance_id: str = None) -> float or (float, list): """ Return: distance between approval elections, (if applicable) optimal matching """ inner_distance, main_distance = extract_distance_id(distance_id) metrics_without_params = { 'flow': mad.compute_flow, 'hamming': mad.compute_hamming, } metrics_with_inner_distance = { 'approvalwise': mad.compute_approvalwise, 'coapproval_frequency': mad.compute_coapproval_frequency_vectors, 'pairwise': mad.compute_pairwise, 'voterlikeness': mad.compute_voterlikeness, 'candidatelikeness': mad.compute_candidatelikeness, } if main_distance in metrics_without_params: return metrics_without_params.get(main_distance)(election_1, election_2) elif main_distance in metrics_with_inner_distance: return metrics_with_inner_distance.get(main_distance)(election_1, election_2, inner_distance)
776640b12ac799248a35b49f6751c5fa27303ab8
3,636,341
from pathlib import Path def get_base_folder(): """Return the base folder of ProfileQC.""" return Path(__file__).parent
e0a49bbbe018333dd107466a5178c5579327edc1
3,636,342
def u16le_list_to_byte_list(data): """! @brief Convert a halfword array into a byte array""" byteData = [] for h in data: byteData.extend([h & 0xff, (h >> 8) & 0xff]) return byteData
6e4dd1fe69a24f135d0dfa38d5d0ba109ad24b9e
3,636,343
def process_IBM_strings(string): """ Format all the IBM string in the same way, creating a single string of lowercase characters :param string: :return: """ parts = string.split() result = str(parts[0].lower()) for part in parts[1:]: result += " " + str(part.lower()) return result
72216b014a18c72d4dec9ec54f24f13de0d46583
3,636,344
def get_sample_size(number_of_clones, fold_difference, error_envelope_x_vals, error_envelope_y_vals, number_of_error_bars): """ This returns the number of cells in a sample that produce the an error bar of max_error_bar for a given number_of_clones in the parent population. This is the inverse function of the calculation performed by error_bar_on_fit_qD Delta = (fold_difference - 1.0) * number_of_clones """ target = fold_difference - 1. sample_size = bisect_eb(eb, number_of_clones, error_envelope_x_vals, error_envelope_y_vals, number_of_error_bars, target) return sample_size
392009961cd3797bdaab460cfee5808c8a0c4969
3,636,345
def get_r2(y,yhat): """ Calcualte the coef. of determination (R^2) """ ybar = np.mean(y) return 1 - (np.sum((y-yhat)**2))/(np.sum((y-ybar)**2))
e632765696b92eb76032681be790b1e25979a6d3
3,636,346
from typing import Dict from typing import Any def get_context() -> Dict[str, Any]: """ Retrieve the current Server Context. Returns: - Dict[str, Any]: the current context """ ctx = _context.get() # type: ignore if ctx is not None: assert isinstance(ctx, dict) return ctx.copy() else: return {}
dad971abb645fa7c194db5cd9ce45e7c38166f31
3,636,347
import pandas as pd def kiinteisto_alueiksi(kiinteisto): """ kiinteist: kiinteisto/property register An artificial property / constituency division will be made for the regionalization of postal codes. A brute-force distribution is used, where the relative number of residential properties in the constituency is divided into postcodes. Generally, constituencies are smaller than postcode areas. The paid property data (kiinteistorekisteri) also includes the number of apartments in the property data. In this way, the division would be more accurate. In various inspections, the division seemed competent. This returns the estimate of shares Returns: kiintosuus """ #new kiint and kiintpnrot -dataframes, with muncipality, constituency area and postcode kiint=kiinteisto[kiinteisto['Käyttötarkoitus']==1].reset_index().groupby(['Kuntanumero','Alue','Postinumero'],as_index=False ).count() kiint=kiint[['Alue','Postinumero','index']] kiintpnrot=kiint.reset_index().groupby(['Postinumero', 'Alue'],as_index=False ).sum()[['Alue','Postinumero','index']] kiintalueet=kiint.reset_index().groupby(['Alue'],as_index=False).sum()[['Alue','index']] #join them by constituency area kiintosuus= pd.merge(kiintpnrot, kiintalueet, how='inner', on='Alue', left_index=False, right_index=False, suffixes=('_x', '_y'), copy=True, indicator=False, validate=None) #brute-force calculation of share of areas based on the amount of properties kiintosuus['Osuus']=kiintosuus['index_x']/kiintosuus['index_y'] kiintosuus=kiintosuus[['Alue','Postinumero','Osuus']] return(kiintosuus)
fee095ccc4cb82b735c2d314a96ab20bf0790a9a
3,636,348
def G1DListCrossoverSinglePoint(genome, **args): """ The crossover of G1DList, Single Point .. warning:: You can't use this crossover method for lists with just one element. """ sister = None brother = None gMom = args["mom"] gDad = args["dad"] if len(gMom) == 1: utils.raiseException("The 1D List have one element, can't use the Single Point Crossover method !", TypeError) cut = prng.randint(1, len(gMom)) if args["count"] >= 1: sister = gMom.clone() sister.resetStats() sister[cut:] = gDad[cut:] if args["count"] == 2: brother = gDad.clone() brother.resetStats() brother[cut:] = gMom[cut:] return (sister, brother)
1cf77e96fb648a6d8664d157425f47f789248739
3,636,349
def optional_observation_map(env, inner_obs): """ If the env implements the `observation` function (i.e. if one of the wrappers is an ObservationWrapper), call that `observation` transformation on the observation produced by the inner environment """ if hasattr(env, 'observation'): return env.observation(inner_obs) else: return inner_obs
b1b57e74e498e520df80a310f95d1c79799a517d
3,636,350
def RunMetadataLabels(run_metadata): """Returns all labels in run_metadata.""" labels = [] for dev_stats in run_metadata.step_stats.dev_stats: for node_stats in dev_stats.node_stats: labels.append(node_stats.timeline_label) return labels
277745263c75c4c6037f8b7a26b9421699bec3a5
3,636,351
import sys import re def ShouldPackageFile(filename, target): """Returns true if the file should be a part of the resulting archive.""" if chromium_utils.IsMac(): file_filter = r'^.+\.(a|dSYM)$' elif chromium_utils.IsLinux(): file_filter = r'^.+\.(o|a|d)$' else: raise NotImplementedError('%s is not supported.' % sys.platform) if re.match(file_filter, filename): return False # Skip files that we don't care about. Mostly directories. things_to_skip = chromium_utils.FileExclusions() if filename in things_to_skip: return False return True
f3321d205378e4dad4ea6734bf0399d3e286b241
3,636,352
def is_entity_extractor_present(interpreter: Interpreter) -> bool: """Checks whether entity extractor is present.""" extractors = get_entity_extractors(interpreter) return extractors != []
0227bdd1f6d7a5040bff853de62075f040337f23
3,636,353
def get_workflow(name, namespace): """Get a workflow.""" api_group = "argoproj.io" api_version = "v1alpha1" co_name = "workflows" co_client = _get_k8s_custom_objects_client() return co_client.get_namespaced_custom_object(api_group, api_version, namespace, co_name, name)
cea58e40b9279a3134766374cd8f5e9eb2e1b4f8
3,636,354
import torch def step(x, b): """ The step function for ideal quantization function in test stage. """ y = torch.zeros_like(x) mask = torch.gt(x - b, 0.0) y[mask] = 1.0 return y
bac5dd8cbaa4da41219f03a85e086dd3bdd1e554
3,636,355
from typing import Union def encode_intended_validator( validator_address: Union[Address, str], primitive: bytes = None, *, hexstr: str = None, text: str = None) -> SignableMessage: """ Encode a message using the "intended validator" approach (ie~ version 0) defined in EIP-191_. Supply the message as exactly one of these three arguments: bytes as a primitive, a hex string, or a unicode string. .. WARNING:: Note that this code has not gone through an external audit. Also, watch for updates to the format, as the EIP is still in DRAFT. :param validator_address: which on-chain contract is capable of validating this message, provided as a checksummed address or in native bytes. :param primitive: the binary message to be signed :type primitive: bytes or int :param str hexstr: the message encoded as hex :param str text: the message as a series of unicode characters (a normal Py3 str) :returns: The EIP-191 encoded message, ready for signing .. _EIP-191: https://eips.ethereum.org/EIPS/eip-191 """ if not is_valid_address(validator_address): raise ValidationError( f"Cannot encode message with 'Validator Address': {validator_address}. " "It must be a checksum address, or an address converted to bytes." ) # The validator_address is a str or Address (which is a subtype of bytes). Both of # these are AnyStr, which includes str and bytes. Not sure why mypy complains here... canonical_address = to_canonical_address(validator_address) # type: ignore message_bytes = to_bytes(primitive, hexstr=hexstr, text=text) return SignableMessage( HexBytes(b'\x00'), # version 0, as defined in EIP-191 canonical_address, message_bytes, )
bb86535c06204bb0b2bf25a7e595cddb3bc83603
3,636,356
def _ip_desc_from_proto(proto): """ Convert protobuf to an IP descriptor. Args: proto (protos.keyval_pb2.IPDesc): protobuf of an IP descriptor Returns: desc (magma.mobilityd.IPDesc): IP descriptor from :proto: """ ip = ip_address(proto.ip.address) ip_block_addr = ip_address(proto.ip_block.net_address).exploded ip_block = ip_network( '{}/{}'.format( ip_block_addr, proto.ip_block.prefix_len, ), ) state = _desc_state_proto_to_str(proto.state) sid = proto.sid.id ip_type = _desc_type_proto_to_str(proto.type) return ip_descriptor.IPDesc( ip=ip, ip_block=ip_block, state=state, sid=sid, ip_type=ip_type, vlan_id=proto.vlan_id, )
b24fd6636cc30c707b8f1539cf16515946370b39
3,636,357
import inspect def inheritdocstrings(cls): """A class decorator for inheriting method docstrings. >>> class A(object): ... class_attr = True ... def method(self): ... '''Method docstring.''' >>> @inheritdocstrings ... class B(A): ... def method(self): ... pass >>> B.method.__doc__ 'Method docstring.' """ for name, cls_attr in inspect.getmembers(cls, callable): if not name.startswith('_') and not cls_attr.__doc__: for c in cls.mro(): if c is cls: continue attr = c.__dict__.get(name) if attr and attr.__doc__: try: cls_attr.__func__.__doc__ = attr.__doc__ except (AttributeError, TypeError): # Probably a read-only attribute, swallow it. pass break return cls
4af61e59dc7b3ba53243107bacd0738c2bc2e2a9
3,636,358
def get_totd_text(): """ Get the text for the Top of the Day post. :return: The body for the post. """ sections = [] # Most Upvoted Posts top_submissions = sorted([submission for submission in get_reddit().subreddit("all").top("day", limit=5)], key=lambda x: x.score, reverse=True) items = [format_item(i, item) for i, item in enumerate(top_submissions)] sections.append(get_templates()["section_template"].format( section_title="Most Upvoted Posts of the Day", section_note="", title_body="Title", items="\n".join(items), )) # Most Upvoted Comments comments = [] for submission in top_submissions: comments.extend([[comment, comment.score] for comment in submission.comments if isinstance(comment, Comment)]) top_comments = sorted(comments, key=lambda x: x[1], reverse=True) items = [format_item(i, item) for i, item in enumerate([comment_info[0] for comment_info in top_comments[:5]])] sections.append(get_templates()["section_template"].format( section_title="Most Upvoted Comments of the Day", section_note="\n\n^(Note: These may not be entirely accurate. Currently these are out of the comments taken from the top 5 submissions.)", title_body="Body", items="\n".join(items), )) submission_text = get_templates()["main"].format(date=current_date(), sections="\n\n".join(sections)) return submission_text
2077da29ea28f2563485eed14dc277b1646e27cd
3,636,359
import os def check_checkpoints(store_path): """ Inputs 1) store_path: The path where the checkpoint file will be searched at Outputs 1) checkpoint_file: The checkpoint file if it exists 2) flag: The flag will be set to True if the directory exists at the path Function: This function takes in the store_path and checks if a prior directory exists for the task already. If it doesn't, flag is set to False and the function returns an empty string. If a directory exists the function returns a checkpoint file """ #if the directory does not exist return an empty string if not os.path.isdir(store_path): return ["", False] #directory exists but there is no checkpoint file onlyfiles = [f for f in os.listdir(store_path) if os.path.isfile(os.path.join(store_path, f))] max_train = -1 flag = False #Check the latest epoch file that was created for file in onlyfiles: if(file.endswith('pth.tr')): flag = True test_epoch = file[0] if(test_epoch > max_train): max_epoch = test_epoch checkpoint_file = file #no checkpoint exists in the directory so return an empty string if (flag == False): checkpoint_file = "" return [checkpoint_file, True]
f4328a23d9c20258b89c9825ed827aff3308e461
3,636,360
import pickle import time def dump_ensure_space(file, value, fun_err=None): """ Only dump value if space enough in disk. If is not enough space, then it retry until have space Note: this method is less efficient and slowly than simple dump >>> with open("test_ensure_space.tmp", "wb") as f: ... dump_ensure_space(f, "test_value") :param file: file where dump :param value: value to dump :param fun_err: event previous to sleep if error, with params: times_waiting: times retrying until now time_to_retry: time to next retry in seconds err: msg error :return: None """ if fun_err is None: def fun_err_default(_, __, ___): return None fun_err = fun_err_default times_waiting = 0 retry = True while retry: try: pickle.dump(value, file, pickle.HIGHEST_PROTOCOL) retry = False except IOError as err: if "No space left on device" in str(err): retry = True times_waiting += 1 time_to_retry = 0.1 * times_waiting if time_to_retry > 3600: time_to_retry = 3600 fun_err(times_waiting, time_to_retry, err) time.sleep(time_to_retry) else: raise
622ed232a3e747e55004ab28225418fc3c6570ef
3,636,361
import torch def store_images(input, predicts, target, dataset='promise12'): """ store the test or valid image in tensorboardX images container :param input: NxCxHxW :param predicts: NxCxHxW :param target: NxHxW :return: """ N = input.shape[0] grid_image_list = [] for i in range(N): channel = input[i].shape[0] pred = torch.max(predicts[i], 0)[1].cpu().numpy() mask2s = get_mask_pallete(pred, dataset, channel=channel) if channel == 3: # rgb mask2s = torch.from_numpy(np.array(mask2s).transpose([2,0,1])).float() else: # gray mask2s = torch.from_numpy(np.expand_dims(np.array(mask2s),axis=0)).float() gt = target[i].cpu().numpy() target2s = get_mask_pallete(gt, dataset, channel=channel) if channel == 3: target2s = torch.from_numpy(np.array(target2s).transpose([2,0,1])).float() else: target2s = torch.from_numpy(np.expand_dims(np.array(target2s), axis=0)).float() grid_image_list += [input[i].cpu(), mask2s, target2s] grid_image = make_grid(grid_image_list, normalize=True, scale_each=True) return grid_image
14d853cdf98bea358f9170162d6a5ea27c1f88a8
3,636,362
from typing import Optional from typing import List from typing import Dict import csv def snmptable(ipaddress: str, oid: str, community: str = 'public', port: OneOf[str, int] = 161, timeout: int = 3, sortkey: Optional[str] = None ) -> OneOf[List[Dict[str, str]], Dict[str, Dict[str, str]]]: """ Runs Net-SNMP's 'snmptable' command on a given OID, converts the results into a list of dictionaries, and optionally sorts the list by a given key. :param community: the snmpv2 community string :param ipaddress: the IP address of the target SNMP server :param oid: the Object IDentifier to request from the target SNMP server :param port: the port on which SNMP is running on the target server :param sortkey: the key within each dict upon which to sort the list of results :param timeout: the number of seconds to wait for a response from the SNMP server :return: a list of dicts, one for each row of the table. The keys of the dicts correspond to the column names of the table. :raises `~snmp_cmds.exceptions.SNMPTimeout`: if the target SNMP server fails to respond :raises `~snmp_cmds.exceptions.SNMPInvalidAddress`: if the hostname or IP address supplied is not valid or cannot be resolved :raises `~snmp_cmds.exceptions.SNMPError`: if the underlying Net-SNMP command produces an unknown or unhandled error :raises `~snmp_cmds.exceptions.SNMPTableError`: if the requested OID is not a valid table """ # We want our delimiter to be something that would never show up in the # wild, so we'll use the non-printable ascii character RS (Record Separator) delimiter = '\x1E' ipaddress = validate_ip_address(ipaddress) host = '{}:{}'.format(ipaddress, port) cmdargs = [ 'snmptable', '-m', 'ALL', '-Pe', '-t', str(timeout), '-r', '0', '-v', '2c', '-Cif', delimiter, '-c', community, host, oid ] cmd = run(cmdargs, stdout=PIPE, stderr=PIPE) # Handle any errors that came up if cmd.returncode is not 0: check_for_timeout(cmd.stderr, host) if b'Was that a table?' in cmd.stderr: raise SNMPTableError(oid) else: handle_unknown_error(' '.join(cmdargs), cmd.stderr) # Process results else: # subprocess returns stdout from completed command as a single bytes # string. we'll split it into a list of bytes strings, and convert # each into a standard python string which the csv reader can handle cmdoutput = cmd.stdout.splitlines() cmdoutput = [item.decode('utf-8') for item in cmdoutput] # Strip the table name and the blank line following it from the output, # so all that remains is the table itself cmdoutput = cmdoutput[2:] table_parser = csv.DictReader(cmdoutput, delimiter=delimiter) results = [element for element in table_parser] if sortkey: results.sort(key=lambda i: i[sortkey]) return results
bb3e749d17c5038a2ed8857fab2ac226ee175c3f
3,636,363
def GetFile(message=None, title=None, directory=None, fileName=None, allowsMultipleSelection=False, fileTypes=None): """Ask the user to select a file. Some of these arguments are not supported: title, directory, fileName, allowsMultipleSelection and fileTypes are here for compatibility reasons. """ default_flags = 0x56 | kNavSupportPackages args, tpwanted = _process_Nav_args(default_flags, message=message) _interact() try: rr = Nav.NavChooseFile(args) good = 1 except Nav.error, arg: if arg[0] != -128: # userCancelledErr raise Nav.error, arg return None if not rr.validRecord or not rr.selection: return None if issubclass(tpwanted, Carbon.File.FSRef): return tpwanted(rr.selection_fsr[0]) if issubclass(tpwanted, Carbon.File.FSSpec): return tpwanted(rr.selection[0]) if issubclass(tpwanted, str): return tpwanted(rr.selection_fsr[0].as_pathname()) if issubclass(tpwanted, unicode): return tpwanted(rr.selection_fsr[0].as_pathname(), 'utf8') raise TypeError, "Unknown value for argument 'wanted': %s" % repr(tpwanted)
88b4d01b66542f4414f24cf123ba3a92e98befe2
3,636,364
def lda_recommend(context_list): """ With multiprocessing using Dask""" print("Recommending") topn = 500 sleep(0.2) vec_bow = id2word_dictionary.doc2bow(context_list) # This line takes a LONG time: it has to map to each of the 300 topics vec_ldamallet = ldamallet[vec_bow] # Convert the query to LDA space sims = malletindex[vec_ldamallet] sims = sorted(enumerate(sims), key=lambda item: -item[1])[:topn] # sims is a list of tuples of (docid -- line num in original training file, probability) return [docid_to_magid.get(docid) for docid, prob in sims]
7435de1aee9e43596b5467036b00b706502aa254
3,636,365
def grids_skf_lr(data_x, data_y, grid_params, weight_classes=None, scv_folds=5): """ :param data_x: :param data_y: :param grid_params: :param weight_classes: :param scv_folds: :return: """ if weight_classes is None: weight_classes = {0: 1, 1: 1} m_log = LogisticRegression( class_weight=weight_classes, random_state=0, multi_class='ovr', n_jobs=-1 ) grid_s = GridSearchCV( m_log, grid_params, n_jobs=-1, cv=StratifiedKFold(n_splits=scv_folds, shuffle=True), scoring=ml_metric, refit=True, verbose=1 ) minmax = MinMaxScaler() train_feat_norm = minmax.fit_transform(data_x) grid_s.fit(train_feat_norm, data_y) print(grid_s.best_estimator_) return grid_s.best_estimator_
7cf79512b3663e8b01ea96219d746c3a6a2fd4b0
3,636,366
def zeros(rows, cols, fortran=True): """Return the zero matrix with the given shape.""" order = "F" if fortran else "C" cparr = cp.zeros(shape=(rows, cols), dtype=cp.complex128, order=order) return CuPyDense._raw_cupy_constructor(cparr)
90e2a7bb7bfdaa5b8b242d1aa543b93dab5d1a60
3,636,367
def compute_trapezoidal_approx(bm, t0, y0, dt, sqrt_dt, dt1_div_dt=10, dt1_min=0.01): """Estimate int_{t0}^{t0+dt} int_{t0}^{s} dW(u) ds with trapezoidal rule. Slower compared to using the Gaussian with analytically derived mean and standard deviation, but ensures true determinism, since this rids the random number generation in the solver, i.e. all randomness comes from `bm`. The loop is from using the Trapezoidal rule to estimate int_0^1 v(s) ds with step size `dt1`. """ dt, sqrt_dt = float(dt), float(sqrt_dt) dt1 = max(min(1.0, dt1_div_dt * dt), dt1_min) v = lambda s: [bmi / sqrt_dt for bmi in bm(s * dt + t0)] # noqa # Estimate int_0^1 v(s) ds by Trapezoidal rule. # Based on Section 1.4 of Stochastic Numerics for Mathematical Physics. int_v_01 = [- v0 - v1 for v0, v1 in zip(v(0.0), v(1.0))] for t in np.arange(0, 1 + 1e-7, dt1, dtype=float): int_v_01 = [a + 2. * b for a, b in zip(int_v_01, v(t))] int_v_01 = [a * dt1 / 2. for a in int_v_01] return [(dt ** (3 / 2) * a - dt * b).to(y0[0]) for a, b in zip(int_v_01, bm(t0))]
719090d6427c0f37dd8aab4f8bfb60dfdfb8c362
3,636,368
def vavrycuk_psencik_hti(vp1, vs1, p1, d1, e1, y1, vp2, vs2, p2, d2, e2, y2, phi, theta1): """ Reflectivity for arbitrarily oriented HTI media, using the formulation derived by Vavrycuk and Psencik [1998], "PP-wave reflection coefficients in weakly anisotropic elastic media" """ theta1 = np.radians(theta1) phi = np.radians(phi) theta2, thetas1, thetas2, p = snell(vp1, vp2, vs1, vs2, theta1) theta = (theta1 + theta2)/2 theta = theta1 G1 = p1*(vs1**2) G2 = p2*(vs2**2) Z1 = p1*vp1 Z2 = p2*vp2 a = (vp1 + vp2)/2 B = (vs1 + vs2)/2 Z = (Z1 + Z2)/2 G = (G1 + G2)/2 dZ = Z2 - Z1 da = vp2 - vp1 dG = G2 - G1 dd = d2 - d1 de = e2 - e1 dy = y2 - y1 A = (1/2*(dZ/Z) + 1/2*(da/a)*np.tan(theta)**2 - 2*((B/a)**2)*(dG/G)*np.sin(theta)**2) B = 1/2*(dd*(np.cos(phi)**2) - 8*((B/a)**2)*dy*(np.sin(phi)*2)) C = 1/2*(de*(np.cos(phi)**4) + dd*(np.cos(phi)**2)*(np.sin(phi)**2)) Rpp = A + B*np.sin(theta)**2 + C*np.sin(theta)**2*np.tan(theta)**2 return(Rpp)
a48ff4cb76341e199c3d386b40d58bd6a2031e01
3,636,369
def concatenate_time_series(time_series_seq): """Concatenates a sequence of time-series objects in time. The input can be any iterable of time-series objects; metadata, sampling rates and other attributes are kept from the last one in the sequence. This one requires that all the time-series in the list have the same sampling rate and that all the data have the same number of items in all dimensions, except the time dimension""" # Extract the data pointer for each and build a common data block data = [] metadata = {} for ts in time_series_seq: data.append(ts.data) metadata.update(ts.metadata) # Sampling interval is read from the last one tseries = TimeSeries(np.concatenate(data,-1), sampling_interval=ts.sampling_interval, metadata=metadata) return tseries
ce2f51e0a14bf2b6de16ce366041522556b0793f
3,636,370
def deeplink_url_patterns( url_base_pattern=r'^init/%s/$', login_init_func=login_init, ): """ Returns new deeplink URLs based on 'links' from settings.SAML2IDP_REMOTES. Parameters: - url_base_pattern - Specify this if you need non-standard deeplink URLs. NOTE: This will probably closely match the 'login_init' URL. """ resources = get_deeplink_resources() new_patterns = [] for resource in resources: new_patterns += [ url( url_base_pattern % resource, login_init_func, { 'resource': resource, }, ) ] return new_patterns
bda7c28e0ce46e4b7f236562a3f8da09a5977c0b
3,636,371
def read_words(): """ Returns an array of all words in words.txt """ lines = read_file('resources/words.txt') words = [] for line in lines: words.extend(line.split(' ')) return words
96768fc1cd593b29caefaa1489f0478832b10886
3,636,372
def lynotename(midinote): """Find the LilyPond/Pently name of a MIDI note number. For example, given 60 (which means middle C), return "c'". """ octave, notewithin = midinote // 12, midinote % 12 notename = notenames[notewithin] if octave < 4: return notename + "," * (4 - octave) else: return notename + "'" * (octave - 4)
7eda2d4b5075759413e25626b70c5cd56c183447
3,636,373
from datetime import datetime def json_serial(obj): """ Fallback serializier for json. This serializes datetime objects to iso format. :param obj: an object to serialize. :returns: a serialized string. """ if isinstance(obj, (datetime.datetime, datetime.date)): return obj.isoformat() return str(obj)
1b4c23d84e89cb77d111160a5328046c62fb4227
3,636,374
import logging import glob import os import copy import warnings def get_library_file(instrument, detector, filt, pupil, wfe, wfe_group, library_path, wings=False, segment_id=None): """Given an instrument and filter name along with the path of the PSF library, find the appropriate library file to load. Parameters ----------- instrument : str Name of instrument the PSFs are from detector : str Name of the detector within ```instrument``` filt : str Name of filter used for PSF library creation pupil : str Name of pupil wheel element used for PSF library creation wfe : str Wavefront error. Can be 'predicted' or 'requirements' wfe_group : int Wavefront error realization group. Must be an integer from 0 - 9. library_path : str Path pointing to the location of the PSF library wings : bool, optional Must the library file contain PSF wings or PSF cores? Default is False. segment_id : int or None, optional If specified, returns a segment PSF library file and denotes the ID of the mirror segment Returns -------- matches : str Name of the PSF library file for the instrument and filter name """ logger = logging.getLogger('mirage.psf.psf_selection.get_library_file') psf_files = glob(os.path.join(library_path, '*.fits')) # Determine if the PSF path is default or not mirage_dir = expand_environment_variable('MIRAGE_DATA') gridded_dir = os.path.join(mirage_dir, '{}/gridded_psf_library'.format(instrument.lower())) if wings: gridded_dir = os.path.join(gridded_dir, 'psf_wings') default_psf = library_path == gridded_dir # Create a dictionary of header information for all PSF library files matches = [] instrument = instrument.upper() detector = detector.upper() filt = filt.upper() pupil = pupil.upper() wfe = wfe.lower() # set default file_wfe = '' # handle the NIRISS NRM case if pupil == 'NRM': pupil = 'MASK_NRM' # Handle the DHS for Coarse Phasing - this is a workaround for webbpsf not # implementing this. We're going to load an ITM image in any case in this mode # so the PSF is entirely unused, but we need to load something or else MIRAGE errors. if pupil == 'GDHS0' or pupil == 'GDHS60': pupil = 'CLEAR' for filename in psf_files: try: header = fits.getheader(filename) # Determine if it is an ITM file itm_sim = header.get('ORIGIN', '') == 'ITM' # Compare the header entries to the user input file_inst = header['INSTRUME'].upper() try: file_det = header['DETECTOR'].upper() except KeyError: file_det = header['DET_NAME'].upper() file_filt = header['FILTER'].upper() try: file_pupil = header['PUPIL'].upper() except KeyError: # If no pupil mask value is present, then assume the CLEAR is # being used if file_inst.upper() == 'NIRCAM': file_pupil = 'CLEAR' elif file_inst.upper() == 'NIRISS': try: file_pupil = header['PUPIL'].upper() # can be 'MASK_NRM' except KeyError: file_pupil = 'CLEARP' # NIRISS has many filters in the pupil wheel. WebbPSF does # not make a distinction, but Mirage does. Adjust the info # to match Mirage's expectations if file_inst.upper() == 'NIRISS' and file_filt in NIRISS_PUPIL_WHEEL_FILTERS: save_filt = copy(file_filt) if file_pupil == 'CLEARP': file_filt = 'CLEAR' else: raise ValueError(('Pupil value is something other than ' 'CLEARP, but the filter being used is ' 'in the pupil wheel.')) file_pupil = save_filt # Same for NIRCam if file_inst.upper() == 'NIRCAM' and file_filt in NIRCAM_PUPIL_WHEEL_FILTERS: save_filt = copy(file_filt) if file_pupil == 'CLEAR': if save_filt[0:2] == 'F4': file_filt = 'F444W' elif save_filt[0:2] == 'F3': file_filt = 'F322W2' elif save_filt[0:2] == 'F1': file_filt = 'F150W2' else: raise ValueError(('Pupil value is something other than ' 'CLEAR, but the filter being used is ' 'in the pupil wheel.')) file_pupil = save_filt if segment_id is None and not itm_sim: opd = header['OPD_FILE'] if 'requirements' in opd: file_wfe = 'requirements' elif 'predicted' in opd: file_wfe = 'predicted' file_wfe_grp = header['OPDSLICE'] if segment_id is not None: segment_id = int(segment_id) file_segment_id = int(header['SEGID']) if segment_id is None and itm_sim: # If we have an ITM library, then wfe is # meaningless, so force it to match file_wfe = 'predicted' wfe = 'predicted' # allow check below to pass for FGS if instrument.lower() == 'fgs': file_filt = 'N/A' filt = 'N/A' file_pupil = 'N/A' pupil = 'N/A' # Evaluate if the file matches the given parameters match = (file_inst == instrument and file_det == detector and file_filt == filt and file_pupil == pupil and file_wfe == wfe) if not wings and segment_id is None and not itm_sim and default_psf: match = match and file_wfe_grp == wfe_group if segment_id is not None: match = match and file_segment_id == segment_id elif not itm_sim and default_psf: match = match and file_wfe == wfe # If so, add to the list of all matches if match: matches.append(filename) except KeyError as e: warnings.warn('While searching for PSF file, error raised when examining {}:\n{}\nContinuing.'.format(os.path.basename(filename), e)) continue # Find files matching the requested inputs if len(matches) == 1: return matches[0] elif len(matches) == 0: logger.info('Requested parameters:\ninstrument {}\ndetector {}\nfilt {}\npupil {}\nwfe {}\n' 'wfe_group {}\nlibrary_path {}\n'.format(instrument, detector, filt, pupil, wfe, wfe_group, library_path)) raise ValueError("No PSF library file found matching requested parameters.") elif len(matches) > 1: raise ValueError("More than one PSF library file matches requested parameters: {}".format(matches))
f6b18cc2e57544685c6c8aaf95c7da44a4039ef8
3,636,375
def html_mail(sender='me@mail.com', recipients=['them@mail.com'], html_content='<p>Hi</p>', subject='Hello!', mailserver='localhost'): """ html_mail takes input html, sender, recipents and emails it in a mime type that will show as text or html on the recipients mail reader. depends on email, html2text """ # Create an email message container - uses multipart/alternative. # sets up basic email components message = MIMEMultipart('alternative') message['Subject'] = subject message['From'] = sender message['To'] = recipients # create a markdown text version of the supplied html content text = html2text.html2text(html_content) # setup the MIME types for both email content parts, text - plain & text - html. email_part1 = MIMEText(text, 'plain') email_part2 = MIMEText(html_content, 'html') """ complete the message composition - attach the email parts into our message container. make the order such that the text email is seen by the recipient, in case they are not reading with a mime & html compatible reader. """ message.attach(email_part1) message.attach(email_part2) # Send the message via our SMTP server. conn = smtplib.SMTP(mailserver) # sendmail function takes 3 arguments: sender's address, recipient's address # and message to send - here it is sent as one string. try: return_value = conn.sendmail(sender, recipients, message.as_string()) conn.quit() except Exception as e: print "error sending email: html_mail.html_mail %s" % (e,) return (return_value) if return_value: return return_value else: return None
83257bb4718063153587bbc685c650595b911554
3,636,376
def gotu(input_path: str) -> biom.Table: """Generate a gOTU table based on sequence alignments. """ profile = workflow(input_path, None)['none'] return profile_to_biom(profile)
59e119392da0c6179ed84b6de7da517ccad5107d
3,636,377
def read_data_megset(beamf_type): """ Read and prepare data for plotting.""" if beamf_type == 'lcmv': settings = config.lcmv_settings settings_columns = ['reg', 'sensor_type', 'pick_ori', 'inversion', 'weight_norm', 'normalize_fwd', 'use_noise_cov', 'reduce_rank'] data_fname = config.fname.lcmv_megset_results dfs = [] for subject in [1, 2, 4, 5, 6, 7]: df = pd.read_csv(data_fname(subject=subject), index_col=0) df['subject'] = subject df = df.rename(columns={'focs': 'focality'}) dfs.append(df) data = pd.concat(dfs, ignore_index=True) elif beamf_type == 'dics': settings = config.dics_settings settings_columns = ['reg', 'sensor_type', 'pick_ori', 'inversion', 'weight_norm', 'normalize_fwd', 'real_filter', 'use_noise_cov', 'reduce_rank'] data_fname = config.fname.dics_megset_results dfs = [] for subject in [1, 4, 5, 6, 7]: df = pd.read_csv(data_fname(subject=subject), index_col=0) df['focality'] = abs(df['focality']) df['subject'] = subject dfs.append(df) data = pd.concat(dfs, ignore_index=True) else: raise ValueError('Unknown beamformer type "%s".' % beamf_type) data['weight_norm'] = data['weight_norm'].fillna('none') data['pick_ori'] = data['pick_ori'].fillna('none') data['dist'] *= 1000 # Measure distance in mm # Average across the subjects data = data.groupby(settings_columns).agg('mean').reset_index() assert len(data) == len(settings) return data
6d2f3cd765779276e6730c56865e374058849662
3,636,378
import getpass import logging def collect_user_name(): """ Returns the username as provided by the OS. Returns a constant if it fails. """ try: uname = getpass.getuser() except Exception as e: logger = logging.getLogger(__name__) msg = "Failed to collect the user name: error was {}.".format(e) logger.warning(msg) uname = UNKNOWN_UNAME return uname
40e18be0ea51659346c7f761bafa8af194937e14
3,636,379
def get_dataframe() -> pd.DataFrame(): """Dummy DataFrame""" data = [ {"quantity": 1, "price": 2}, {"quantity": 3, "price": 5}, {"quantity": 4, "price": 8}, ] return pd.DataFrame(data)
3089e1a33f5f9b4df847db51271f7c3f936b351c
3,636,380
def get_node_mirna(mirna_name, taxid, psi_mi_to_sql_object): """ This function sets up a node dict and returns it. If the node is already in the SQLite database it fetches that node from the db, so it won't be inserted multiple times. """ # Testing if the node is already in the database node_dict = psi_mi_to_sql_object.get_node(mirna_name, node_tax_id=taxid) if not node_dict: node_dict = { "name" : 'HPRD:' + mirna_name, "tax_id": taxid, "alt_accession": None, 'pathways': None, "aliases": None, "topology": None } return node_dict
daab8ab8f43c1e9395dbbc2640ecb78b39f6867f
3,636,381
import urllib import sys def Web(website, system_id, address, page, params = {}): """Routine for connecting to website that is hosting the database""" data = '' params['systemid'] = system_id params = urllib.urlencode(params) headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"} try: conn = httplib.HTTPConnection(website+':80') conn.request("POST", '/'+ page, params, headers) response = conn.getresponse() data = response.read() conn.close() except: Debug("Unable to connect to website " + str(sys.exc_info()[0])) return 'error' else: #print data return data
58fc756411a8dc23fa88937157df4c4900486a3a
3,636,382
def generate_output_file_name(input_file_name): """ Generates an output file name from input file name. :type input_file_name: str """ assert isinstance(input_file_name, str) output_file_name = input_file_name + ".gen.ipynb" return output_file_name
e638d676048e062711ca1a09d88a12d76fb9239d
3,636,383
def prefixed_field_map(name: str) -> Mapper: """ Arguments --------- name : str Name of the property. Returns ------- Mapper Field map. See Also -------- field_map """ return field_map( name, api_to_python=add_signed_prefix_as_needed, python_to_api=remove_signed_prefix_as_needed, )
48be07a06f4f5b70d4e7b389a1896c94a181e62c
3,636,384
import sys def a_star(grid, start, end): """A-star algorithm implementation""" # open and closed nodes open_nodes = [] closed_nodes = [] # Create a start node and an goal node start_node = Node(start, None) goal_node = Node(end, None) # Add the start node open_nodes.append(start_node) # Loop until the open list is empty while len(open_nodes) > 0: for event in pygame.event.get(): # Checks for quit event if event.type == pygame.QUIT: pygame.quit() sys.exit() # Sort the open list to get the node with the lowest cost first open_nodes.sort() # Get the node with the lowest cost current_node = open_nodes.pop(0) # Add the current node to the closed list (i, j) = current_node.position grid.blocks[i][j].set_color(visited) pygame.display.update(grid.blocks[i][j].draw()) closed_nodes.append(current_node) # Check if we have reached the goal, return the path if current_node == goal_node: path = [] while current_node != start_node: path.append(current_node.position) current_node = current_node.parent # Return reversed path return path[::-1] # Unzip the current node position (x, y) = current_node.position # Get neighbors neighbors = [] if x - 1 >= 0: neighbors.append((x - 1, y)) if x + 1 < grid.r: neighbors.append((x + 1, y)) if y - 1 >= 0: neighbors.append((x, y - 1)) if y + 1 < grid.c: neighbors.append((x, y + 1)) # Loop neighbors for next in neighbors: # Get value from grid grid_value = grid.blocks[next[0]][next[1]] # Check if the node is a wall if grid_value.color == wall: continue # Create a neighbor node neighbor = Node(next, current_node) # Check if the neighbor is in the closed list if neighbor in closed_nodes: continue # Generate heuristics (Manhattan distance) neighbor.g = abs(neighbor.position[0] - start_node.position[0]) + abs( neighbor.position[1] - start_node.position[1] ) neighbor.h = abs(neighbor.position[0] - goal_node.position[0]) + abs( neighbor.position[1] - goal_node.position[1] ) neighbor.f = neighbor.g + neighbor.h # Check if neighbor is in open list and if it has a lower f value if add_to_open(open_nodes, neighbor) == True: open_nodes.append(neighbor)
b60fd2193b7b05b042aec29aa65cc3d57453f4d1
3,636,385
def is_water(residue): """ Parameters ---------- residue : a residue from a protein structure object made with PDBParser(). Returns ------- Boolean True if residue is water, False otherwise. """ residue_id = residue.get_id() hetfield = residue_id[0] return hetfield[0] == 'W'
2d547da9dc8def26a2e9581a240efa5d513aab64
3,636,386
def gelu(input_tensor): """Gaussian Error Linear Unit. This is a smoother version of the RELU. Original paper: https://arxiv.org/abs/1606.08415 Args: input_tensor: float Tensor to perform activation. Returns: `input_tensor` with the GELU activation applied. """ cdf = 0.5 * (1.0 + tf.erf(input_tensor / tf.sqrt(2.0))) return input_tensor * cdf
c4f8fead676ef7e4b036f8c94467402445044330
3,636,387
def index(): """ Route for signing the policy document or REST headers. """ request_payload = request.get_json() if request_payload.get('headers'): response_data = sign_headers(request_payload['headers']) else: credential = [c for c in request_payload['conditions'] if 'x-amz-credential' in c][0].values()[0] response_data = sign_policy(request.data, credential) return jsonify(response_data)
f914761d9b58a290f20b537c4df9ce8272dd7b6c
3,636,388
from sphinx.jinja2glue import BuiltinTemplateLoader def create_template_bridge(self): """Return the template bridge configured.""" if self.config.template_bridge: templates = self.app.import_object( self.config.template_bridge, 'template_bridge setting')() else: templates = BuiltinTemplateLoader() return templates
64a7a1dd70035b2507a0b8b9b3af8c91ccadb0ec
3,636,389
def make_answer(rdtype, answers=None, additionals=None, authorities=None): """For mocking an answer. We make an answer without any message (what would normally come over the network, to be parsed. We instead make a blank object for the sake of test complexity, and later attach the appropriate\ rrsets to the answer. This may cause some tests to fail that test attributes that are assigned during the creation of an Answer (flags?). The answers, additionals, and authorities should be lists of strings, with data fields space-separated. Each string representing one RR. See RFC for order of field per type. ex: MX would be '<Preference> <Mail Exchanger>' """ answer = dns.resolver.Answer( dns.name.from_text(TEST_DOMAIN), getattr(dns.rdatatype, rdtype.upper()), dns.rdataclass.IN, dns.message.from_text(''), raise_on_no_answer=False ) if answers: rrset = dns.rrset.from_text(TEST_DOMAIN, 60, 'IN', rdtype, *answers) answer.response.answer = [rrset] if additionals: rrset = dns.rrset.from_text(TEST_DOMAIN, 60, 'IN', rdtype, *additionals) answer.response.answer = [rrset] if authorities: rrset = dns.rrset.from_text(TEST_DOMAIN, 60, 'IN', rdtype, *authorities) answer.response.authority = [rrset] return answer
877f0acefd3082189c91885ff98ac056799c1e1b
3,636,390
def consolidate_payoff_results(period, reporter_configuration, simulation_output, score_map, priority_based): """ Gather per-run metrics according to a simulation result. :param resolved_per_reporter: Resolved issues per priority, including a priority detail. :param period: Description of the period. :param reporter_configuration: List of reporter configuration. :param completed_per_reporter: List containing completed reports per reporter per run. :param bugs_per_reporter: List containing found reports per reporter per priority per run. :param reports_per_reporter: ist containing reported (sic) reports per reporter per priority per run. :return: Consolidated metrics in a list. """ simulation_results = simulation_output.get_consolidated_output(reporter_configuration) logger.info( "Payoff function parameters: Priority-based " + str(priority_based) + " Severe Score: " + str( score_map[simdata.SEVERE_PRIORITY]) + " Non-Severe Score " + str(score_map[simdata.NON_SEVERE_PRIORITY])) for reporter_info in simulation_results: reporter_info["period"] = period payoff_score = get_payoff_score(reporter_info=reporter_info, score_map=score_map, priority_based=priority_based) reporter_info["payoff_score"] = payoff_score return simulation_results
34fc8079455972455fcd47aa1e582017d0a0d83a
3,636,391
def single_ray_belief_propagation( S, ray_voxel_indices, ray_to_occupancy_accumulated_pon, ray_to_occupancy_messages_pon, output_size ): """Run the sum product belief propagation for a single ray Arguments --------- S: tensor (M,) dtype=float32 The depth probability distribution for that ray ray_voxel_indices: tensor (M, 3), dtype=int32 The voxel indices in the voxel grid per ray ray_to_occupancy_accumulated_pon: tensor (D1, D2, D3), dtype=float32 Accumulator used to hold the quotient of the positive ray to occupancy message with the negative ray to occupancy message (in logspace) ray_to_occupancy_messages_pon: tensor (M, 1), dtype=float32 Ray to occupancy messages (in logspace) output_size: int Pad the output with 0 until its size is output_size """ occupancy_to_ray = extract_occupancy_to_ray_pos( ray_voxel_indices, ray_to_occupancy_accumulated_pon, ray_to_occupancy_messages_pon ) # Compute the cumulative products in linear time (see eq. 13, 14 Ulusoy # 3DV) # For the computation of the cumulative product we need # the occupancy-to-ray messages for the negative case. # We append 1 at the top because for the o_1 voxel this term is equal to 1 occupancy_to_ray_neg_cumprod = K.tf.cumprod( 1.0 - occupancy_to_ray, exclusive=True ) # Compute the part of the messages that is the same for positive and # negative messages common_part = occupancy_to_ray_neg_cumprod * S ray_to_occupancy_new_common = K.tf.cumsum( occupancy_to_ray * common_part, exclusive=True, ) # Finalize the positive messages ray_to_occupancy_new_positive = common_part + ray_to_occupancy_new_common # Finalize the negative messages (adding 2nd part of eq. 14 Ulusoy 3DV) # The summations we want to calculate are as follows: # i=1, \sum_{i=2}^N(\cdot) # i=2, \sum_{i=3}^N(\cdot) # ... # i=N-2, \sum_{i=N-1}^N(\cdot) # lets assume that we have [a, b, c, d, e]. We first inverse the array, # thus resulting in [e, d, c, b, a] and then we compute the cumulative sum # on this array. The output is [e, e+d, e+d+c, e+d+c+b, e+d+c+b+a]. However # we want them in the inverse order, thus we inverse the output once again # and we have [e+d+c+b+a, e+d+c+b, e+d+c, e+d, e] # Finally we also divide with the incoming message for the negative case t1 = K.tf.cumsum( occupancy_to_ray * common_part, reverse=True, exclusive=True ) t2 = t1 / (1.0 - occupancy_to_ray) ray_to_occupancy_new_negative = ray_to_occupancy_new_common + t2 # Normalize the positive ray_to_occupancy message ray_to_occupancy_new_pos =\ ray_to_occupancy_new_positive / (ray_to_occupancy_new_positive + ray_to_occupancy_new_negative) ray_to_occupancy_pon = K.log(ray_to_occupancy_new_pos) - K.log(1.0 - ray_to_occupancy_new_pos) # Make the size equal to the output_size by appending 0s M = K.shape(ray_to_occupancy_pon)[0] ray_to_occupancy_pon = K.concatenate([ ray_to_occupancy_pon, K.tf.zeros((output_size-M,)) ]) return ray_to_occupancy_pon
5c9c7acbf13e0f0f8adef3f5bebb5b14b81a2b8e
3,636,392
def cse_postprocess(cse_output): """ Perform CSE Postprocessing :arg: output from SymPy CSE with tuple format: (list of ordered pairs that contain substituted symbols and their replaced expressions, reduced SymPy expression) :return: output from SymPy CSE where postprocessing, such as back-substitution of addition/product of symbols, has been applied to the replaced/reduced expression(s) >>> from sympy.abc import x, y >>> from sympy import cse, cos, sin >>> cse_out = cse(3 + x + cos(3 + x)) >>> cse_postprocess(cse_out) ([], [x + cos(x + 3) + 3]) >>> cse_out = cse(3 + x + y + cos(3 + x + y)) >>> cse_postprocess(cse_out) ([(x0, x + y + 3)], [x0 + cos(x0)]) >>> cse_out = cse(3*x + cos(3*x)) >>> cse_postprocess(cse_out) ([], [3*x + cos(3*x)]) >>> cse_out = cse(3*x*y + cos(3*x*y)) >>> cse_postprocess(cse_out) ([(x0, 3*x*y)], [x0 + cos(x0)]) >>> cse_out = cse(x**2 + cos(x**2)) >>> cse_postprocess(cse_out) ([], [x**2 + cos(x**2)]) >>> cse_out = cse(x**3 + cos(x**3)) >>> cse_postprocess(cse_out) ([(x0, x**3)], [x0 + cos(x0)]) >>> cse_out = cse(x*y + cos(x*y) + sin(x*y)) >>> cse_postprocess(cse_out) ([(x0, x*y)], [x0 + sin(x0) + cos(x0)]) >>> from sympy import exp, log >>> expr = -x + exp(-x) + log(-x) >>> cse_pre = cse_preprocess(expr, declare=True) >>> cse_out = cse(cse_pre[0]) >>> cse_postprocess(cse_out) ([], [_NegativeOne_*x + exp(_NegativeOne_*x) + log(_NegativeOne_*x)]) """ replaced, reduced = cse_output replaced, reduced = replaced[:], reduced[:] i = 0 while i < len(replaced): sym, expr = replaced[i]; args = expr.args # Search through replaced expressions for negative symbols if (expr.func == sp.Mul and len(expr.args) == 2 and any(a1.func == sp.Symbol and \ (a2 == sp.S.NegativeOne or '_NegativeOne_' in str(a2)) for a1, a2 in [args, reversed(args)])): for k in range(i + 1, len(replaced)): if sym in replaced[k][1].free_symbols: replaced[k] = (replaced[k][0], replaced[k][1].subs(sym, expr)) for k in range(len(reduced)): if sym in reduced[k].free_symbols: reduced[k] = reduced[k].subs(sym, expr) # Remove the replaced expression from the list replaced.pop(i) if i != 0: i -= 1 # Search through replaced expressions for addition/product of 2 or less symbols if ((expr.func == sp.Add or expr.func == sp.Mul) and 0 < len(expr.args) < 3 and \ all((arg.func == sp.Symbol or arg.is_integer or arg.is_rational) for arg in expr.args)) or \ (expr.func == sp.Pow and expr.args[0].func == sp.Symbol and expr.args[1] == 2): sym_count = 0 # Count the number of occurrences of the substituted symbol for k in range(len(replaced) - i): # Check if the substituted symbol appears in the replaced expressions if sym in replaced[i + k][1].free_symbols: for arg in sp.preorder_traversal(replaced[i + k][1]): if arg.func == sp.Symbol and str(arg) == str(sym): sym_count += 1 for k in range(len(reduced)): # Check if the substituted symbol appears in the reduced expression if sym in reduced[k].free_symbols: for arg in sp.preorder_traversal(reduced[k]): if arg.func == sp.Symbol and str(arg) == str(sym): sym_count += 1 # If the number of occurrences of the substituted symbol is 2 or less, back-substitute if 0 < sym_count < 3: for k in range(i + 1, len(replaced)): if sym in replaced[k][1].free_symbols: replaced[k] = (replaced[k][0], replaced[k][1].subs(sym, expr)) for k in range(len(reduced)): if sym in reduced[k].free_symbols: reduced[k] = reduced[k].subs(sym, expr) # Remove the replaced expression from the list replaced.pop(i); i -= 1 i += 1 return replaced, reduced
ec7211d366550de93fa2839232820fd2fb3746f7
3,636,393
def lap(j, s, alpha): """ Laplace coefficient """ def int_func(x): return np.cos(j*x)/(1. - (2.*alpha*np.cos(x)) + alpha**2.)**s integral = integrate.quad(int_func, 0., 2.*np.pi)[0] return 1./np.pi*integral
013cb3611e7f678aac560896b80930e19e3d0579
3,636,394
import google def calendar_add(): """Adds a calendar to the database according to the infos in POST data.\ Also creates the calendar in google calendar service if no google_calendar_id is present in POST data. """ calendar_name = request.form["calendar_name"] std_email = request.form["std_email"] google_calendar_id = request.form["google_calendar_id"] if check_google_calendar_id(google_calendar_id): # Add the google calendar directly to the local DB (Assume that Calendar has been already created) cal_obj = Calendar(summary=calendar_name, std_email=std_email, calendar_id_google=google_calendar_id) try: db.session.add(cal_obj) db.session.commit() except Exception: flash(('Could not add calendar {} to google calendar'.format( calendar_name)), category="error") return redirect(url_for("get_calendars")) return redirect(url_for("get_calendars")) else: # Creating a google calendar and receiving the gcal ID from Google cal_record = Calendar.query.filter_by(summary=calendar_name).first() if cal_record is None: calendar__ = { 'summary': calendar_name, 'timeZone': 'Africa/Algiers' } resp = google.post("/calendar/v3/calendars", json=calendar__) if resp.status_code == 200: if "id" in resp.json().keys(): calendar_id = resp.json()["id"] calendar_obj = Calendar(calendar_id_google=calendar_id, summary=calendar_name, std_email=std_email) db.session.add(calendar_obj) db.session.commit() flash(('Added calendar {} to google calendar'.format( calendar_name)), category="success") return redirect(url_for("get_calendars")) else: flash(("Invalid response from calendar api"), category="danger") return redirect(url_for('get_calendars')), 302 else: flash(("Calendar API returned a non 200 response"), category="danger") return redirect(url_for('get_calendars')), 302 else: flash(("Calendar {} already found in application database".format( calendar_name)), category="info") return redirect(url_for('get_calendars')), 302
558c8b3580109773d4012b1048f33f56aca376ee
3,636,395
def proportional_allocation_by_location_and_activity(df, sectorcolumn): """ Creates a proportional allocation within each aggregated sector within a location :param df: :param sectorcolumn: :return: """ # tmp replace NoneTypes with empty cells df = replace_NoneType_with_empty_cells(df) # denominator summed from highest level of sector grouped by location short_length = min(df[sectorcolumn].apply(lambda x: len(str(x))).unique()) # want to create denominator based on short_length denom_df = df.loc[df[sectorcolumn].apply(lambda x: len(x) == short_length)].reset_index(drop=True) grouping_cols = [e for e in ['FlowName', 'Location', 'Activity', 'ActivityConsumedBy', 'ActivityProducedBy'] if e in denom_df.columns.values.tolist()] denom_df.loc[:, 'Denominator'] = denom_df.groupby(grouping_cols)['HelperFlow'].transform('sum') # list of column headers, that if exist in df, should be aggregated using the weighted avg fxn possible_column_headers = ('Location', 'LocationSystem', 'Year', 'Activity', 'ActivityConsumedBy', 'ActivityProducedBy') # list of column headers that do exist in the df being aggregated column_headers = [e for e in possible_column_headers if e in denom_df.columns.values.tolist()] merge_headers = column_headers.copy() column_headers.append('Denominator') # create subset of denominator values based on Locations and Activities denom_df_2 = denom_df[column_headers].drop_duplicates().reset_index(drop=True) # merge the denominator column with fba_w_sector df allocation_df = df.merge(denom_df_2, how='left', left_on=merge_headers, right_on=merge_headers) # calculate ratio allocation_df.loc[:, 'FlowAmountRatio'] = allocation_df['HelperFlow'] / allocation_df['Denominator'] allocation_df = allocation_df.drop(columns=['Denominator']).reset_index(drop=True) # fill empty cols with NoneType allocation_df = replace_strings_with_NoneType(allocation_df) # fill na values with 0 allocation_df['HelperFlow'] = allocation_df['HelperFlow'].fillna(0) return allocation_df
fb9376411d0b448a99563f41091f43863b694b8f
3,636,396
import os def Console(): """ Factory method that returns the Console object most appropriate for the current OS envrionment. """ if os.name == "posix": return POSIXConsole() elif os.name == "nt": return NTConsole() else: raise NotImplementedError("Console support not implemented for OS '{}'.".format(os.name))
598be9cf1f5f9da8dcef0e61ff2ed398845e6639
3,636,397
def prepare_target(): """ Creates a example target face :return: list of RFTargetVertex """ # size = 2 # target = [ # rfsm.RFTargetVertex(0, 0, 0, -size, -size), # rfsm.RFTargetVertex(0, 1, 1, -size, size), # rfsm.RFTargetVertex(1, 1, 1, size, size), # rfsm.RFTargetVertex(1, 0, 0, size, -size), # ] # target_faces = [[0, 1, 2], [2, 3, 0]] size = 2 target = [ rfsm.RFTargetVertex(0, 0, 0, -size, 1-size), rfsm.RFTargetVertex(0, 1, 1, -size, size), rfsm.RFTargetVertex(1, 0.5, 0.5, size, 0.45), ] target_faces = [[0, 1, 2]] return target, target_faces
27fa9a8bd2b5c7943b5882d2bf76312ea7e418bb
3,636,398
import json def data_fixture(): """Fixture data.""" data = json.loads(load_fixture("data.json", "evil_genius_labs")) return {item["name"]: item for item in data}
9eebcabb4f1517d66f76be8956f74ce438aab8f1
3,636,399