content
stringlengths
22
815k
id
int64
0
4.91M
def check_type(instance, *classes): """Check if object is instance of given class""" for klass in classes: if type(instance).__name__ == klass: return True for T in getmro(type(instance)): if T.__name__ == klass: return True return False
5,328,900
def test_get_earth_imperative_solution(solar_system): """ ## Imperative Solution The first example uses flow control statements to define a [Imperative Solution]( https://en.wikipedia.org/wiki/Imperative_programming). This is a very common approach to solving problems. """ def get_planet_by_name(name, the_solar_system): try: planets = the_solar_system['star']['planets'] for arc in planets.values(): for planet in arc: if name == planet.get('name', None): return planet except KeyError: pass return None actual = get_planet_by_name('Earth', solar_system) expected = {'Number of Moons': '1', 'diameter': 12756, 'has-moons': True, 'name': 'Earth'} assert actual == expected
5,328,901
def test_eeglab_event_from_annot(): """Test all forms of obtaining annotations.""" base_dir = op.join(testing.data_path(download=False), 'EEGLAB') raw_fname_mat = op.join(base_dir, 'test_raw.set') raw_fname = raw_fname_mat event_id = {'rt': 1, 'square': 2} raw1 = read_raw_eeglab(input_fname=raw_fname, preload=False) annotations = read_annotations(raw_fname) assert len(raw1.annotations) == 154 raw1.set_annotations(annotations) events_b, _ = events_from_annotations(raw1, event_id=event_id) assert len(events_b) == 154
5,328,902
def parse(request): """ A form that lets an authorized user import and the parse data files in the incoming directory. """ dir = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'incoming') if request.method == 'POST': parse_form = forms.Form(request.POST) if parse_form.is_valid(): options = electionaudits.parsers.set_options(["-c", "-s"]) electionaudits.parsers.parse([dir], options) else: parse_form = forms.Form() return render_to_response('electionaudits/parse.html', { 'parse_form': parse_form, 'parse': os.listdir(dir) })
5,328,903
def animate(map, time, phase0=0.0, res=75, interval=75): """ """ # Load the SPICE data ephemFiles = glob.glob('../data/TESS_EPH_PRE_LONG_2018*.bsp') tlsFile = '../data/tess2018338154046-41240_naif0012.tls' solarSysFile = '../data/tess2018338154429-41241_de430.bsp' #print(spice.tkvrsn('TOOLKIT')) for ephFil in ephemFiles: spice.furnsh(ephFil) spice.furnsh(tlsFile) spice.furnsh(solarSysFile) # JD time range allTJD = time + TJD0 nT = len(allTJD) allET = np.zeros((nT,), dtype=np.float) for i, t in enumerate(allTJD): allET[i] = spice.unitim(t, 'JDTDB', 'ET') # Calculate positions of TESS, the Earth, and the Sun tess = np.zeros((3, len(allET))) sun = np.zeros((3, len(allET))) for i, et in enumerate(allET): outTuple = spice.spkezr('Mgs Simulation', et, 'J2000', 'NONE', 'Earth') tess[0, i] = outTuple[0][0] * REARTH tess[1, i] = outTuple[0][1] * REARTH tess[2, i] = outTuple[0][2] * REARTH outTuple = spice.spkezr('Sun', et, 'J2000', 'NONE', 'Earth') sun[0, i] = outTuple[0][0] * REARTH sun[1, i] = outTuple[0][1] * REARTH sun[2, i] = outTuple[0][2] * REARTH # Figure setup fig = plt.figure(figsize=(8, 8)) ax = np.zeros((2, 2), dtype=object) ax[0, 0] = plt.subplot(221) ax[0, 1] = plt.subplot(222) ax[1, 0] = plt.subplot(223, sharex=ax[0, 0], sharey=ax[0, 0]) ax[1, 1] = plt.subplot(224, sharex=ax[0, 0], sharey=ax[0, 0]) for axis in [ax[0, 0], ax[1, 0], ax[1, 1]]: axis.set_aspect(1) axis.set_xlim(-65, 65) axis.set_ylim(-65, 65) for tick in axis.xaxis.get_major_ticks() + axis.yaxis.get_major_ticks(): tick.label.set_fontsize(10) i = 0 # Orbit xz ax[0, 0].plot(tess[0], tess[2], "k.", ms=1, alpha=0.025) txz, = ax[0, 0].plot(tess[0, i], tess[2, i], 'o', color="C0", ms=4) norm = 1. / np.sqrt(sun[0, i] ** 2 + sun[2, i] ** 2) x = sun[0, i] * norm y = sun[2, i] * norm theta = 180. / np.pi * np.arctan2(y, x) dayxz = Wedge((0, 0), 5, theta - 90, theta + 90, color=cmap(0.8)) nightxz = Wedge((0, 0), 5, theta + 90, theta + 270, color=cmap(0.0)) ax[0, 0].add_artist(dayxz) ax[0, 0].add_artist(nightxz) ax[0, 0].set_ylabel("z", fontsize=16) # Orbit xy ax[1, 0].plot(tess[0], tess[1], "k.", ms=1, alpha=0.025) txy, = ax[1, 0].plot(tess[0, i], tess[1, i], 'o', color="C0", ms=4) norm = 1. / np.sqrt(sun[0, i] ** 2 + sun[1, i] ** 2) x = sun[0, i] * norm y = sun[1, i] * norm theta = 180. / np.pi * np.arctan2(y, x) dayxy = Wedge((0, 0), 5, theta - 90, theta + 90, color=cmap(0.8)) nightxy = Wedge((0, 0), 5, theta + 90, theta + 270, color=cmap(0.0)) ax[1, 0].add_artist(dayxy) ax[1, 0].add_artist(nightxy) ax[1, 0].set_xlabel("x", fontsize=16) ax[1, 0].set_ylabel("y", fontsize=16) # Orbit zy ax[1, 1].plot(tess[2], tess[1], "k.", ms=1, alpha=0.025) tzy, = ax[1, 1].plot(tess[2, i], tess[1, i], 'o', color="C0", ms=4) norm = 1. / np.sqrt(sun[2, i] ** 2 + sun[1, i] ** 2) x = sun[2, i] * norm y = sun[1, i] * norm theta = 180. / np.pi * np.arctan2(y, x) dayzy = Wedge((0, 0), 5, theta - 90, theta + 90, color=cmap(0.8)) nightzy = Wedge((0, 0), 5, theta + 90, theta + 270, color=cmap(0.0)) ax[1, 1].add_artist(dayzy) ax[1, 1].add_artist(nightzy) ax[1, 1].set_xlabel("z", fontsize=16) # Render the image t = (time - time[0]) / (time[-1] - time[0]) t = 2 * (t - 0.5) Z = np.empty((len(time), res, res)) north_pole = np.empty((len(time), 3)) y = np.array(map[:, :, :]) for i in tqdm(range(len(time))): # Reset the map and rotate it to the correct phase # in the mean equatorial (J2000) frame map[:, :, :] = y ''' map.axis = [0, 1, 0] phase = (360. * time[i]) % 360. + phase0 map.rotate(phase) ''' # Rotate so that TESS is along the +z axis r = np.sqrt(np.sum(tess[:, i] ** 2)) costheta = np.dot(tess[:, i], [0, 0, r]) axis = np.cross(tess[:, i], [0, 0, r]) sintheta = np.sqrt(np.sum(axis ** 2)) axis /= sintheta theta = 180. / np.pi * np.arctan2(sintheta, costheta) R = starry.RAxisAngle(axis, theta) north_pole[i] = np.dot(R, [0, 0, 1]) source = np.dot(R, sun[:, i]) source /= np.sqrt(np.sum(source ** 2, axis=0)) ''' map.axis = axis map.rotate(theta) ''' # Align the pole of the Earth with the "north" direction costheta = np.dot([0, 1, 0], north_pole[i]) axis = np.cross([0, 1, 0], north_pole[i]) sintheta = np.sqrt(np.sum(axis ** 2)) axis /= sintheta theta = 180. / np.pi * np.arctan2(sintheta, costheta) map.axis = axis map.rotate(theta) # Rotate to the correct phase map.axis = north_pole[i] phase = (360. * time[i]) % 360. + phase0 map.rotate(phase) # Finally, rotate the image so that north always points up # This doesn't actually change the integrated flux! map.axis = [0, 0, 1] theta = 180. / np.pi * np.arctan2(north_pole[i, 0], north_pole[i, 1]) map.rotate(theta) R = starry.RAxisAngle([0, 0, 1], theta) north_pole[i] = np.dot(R, north_pole[i]) source = np.dot(R, source) # Render the image Z[i] = map.render(t=t[i], source=source, res=res)[0] # Reset the map map[:, :, :] = y map.axis = [0, 1, 0] # Image vmin = 0.0 vmax = np.nanmax(Z) cmap.set_under(cmap(vmin)) image = ax[0, 1].imshow(Z[0], extent=(-1, 1, -1, 1), origin="lower", cmap=cmap, vmin=vmin, vmax=vmax) npl, = ax[0, 1].plot(north_pole[0, 0], north_pole[0, 1], marker=r"$N$", color="r") spl, = ax[0, 1].plot(-north_pole[0, 0], -north_pole[0, 1], marker=r"$S$", color="b") if north_pole[0, 2] > 0: npl.set_visible(True) spl.set_visible(False) else: npl.set_visible(False) spl.set_visible(True) ax[0, 1].axis("off") ax[0, 1].set_xlim(-1.1, 1.1) ax[0, 1].set_ylim(-1.1, 1.1) # Function to animate each frame def update(i): # Update orbit txz.set_xdata(tess[0, i]) txz.set_ydata(tess[2, i]) norm = 1. / np.sqrt(sun[0, i] ** 2 + sun[2, i] ** 2) x = sun[0, i] * norm y = sun[2, i] * norm theta = 180. / np.pi * np.arctan2(y, x) dayxz.set_theta1(theta - 90) dayxz.set_theta2(theta + 90) nightxz.set_theta1(theta + 90) nightxz.set_theta2(theta + 270) txy.set_xdata(tess[0, i]) txy.set_ydata(tess[1, i]) norm = 1. / np.sqrt(sun[0, i] ** 2 + sun[1, i] ** 2) x = sun[0, i] * norm y = sun[1, i] * norm theta = 180. / np.pi * np.arctan2(y, x) dayxy.set_theta1(theta - 90) dayxy.set_theta2(theta + 90) nightxy.set_theta1(theta + 90) nightxy.set_theta2(theta + 270) tzy.set_xdata(tess[2, i]) tzy.set_ydata(tess[1, i]) norm = 1. / np.sqrt(sun[2, i] ** 2 + sun[1, i] ** 2) x = sun[2, i] * norm y = sun[1, i] * norm theta = 180. / np.pi * np.arctan2(y, x) dayzy.set_theta1(theta - 90) dayzy.set_theta2(theta + 90) nightzy.set_theta1(theta + 90) nightzy.set_theta2(theta + 270) image.set_data(Z[i]) npl.set_xdata(north_pole[i, 0]) npl.set_ydata(north_pole[i, 1]) spl.set_xdata(-north_pole[i, 0]) spl.set_ydata(-north_pole[i, 1]) if north_pole[i, 2] > 0: npl.set_visible(True) spl.set_visible(False) else: npl.set_visible(False) spl.set_visible(True) return txz, dayxz, nightxz, txy, dayxy, nightxy, \ tzy, dayzy, nightzy, image, npl, spl # Generate the animation ani = FuncAnimation(fig, update, frames=len(time), interval=interval, blit=False) try: if 'zmqshell' in str(type(get_ipython())): plt.close() display(HTML(ani.to_html5_video())) else: raise NameError("") except NameError: plt.show() plt.close() return np.nansum(Z, axis=(1, 2))
5,328,904
def get_params_from_ctx(func=None, path=None, derive_kwargs=None): """ Derive parameters for this function from ctx, if possible. :param str path: A path in the format ``'ctx.arbitraryname.unpackthistomyparams'`` to use to find defaults for the function. Default: ``'ctx.mymodulename.myfuncname'`` It's good to pass this explicitly to make it clear where your arguments are coming from. :param Callable derive_kwargs: Overkill. Is passed ``ctx`` as first arg, expected to return dict of the format ``{'argname': 'defaultvalueforparam'}``. **Examples**:: @get_params_from_ctx(path='ctx.randompath') # just 'ctx' works as well def myfuncname(ctx, requiredparam1, namedparam2='trulyoptional'): print(requiredparam1, namedparam2) If your default is a callable we will call it with ``args[0]``. This is how :meth:`invoke.config.Lazy` works under the hood. That is, this is a valid function:: @get_params_from_ctx def myfuncname(ctx, namedparam0=Lazy('ctx.mynamedparam0'), namedparam1=lambda ctx: ctx.myvalue * 4): print(namedparam1) # 4, if myvalue == 1 :) **Why do I need this?** Suppose this configuration: ``ctx = {"myfuncname" : {"requiredparam1" : 392, "namedparam2" : 199}}`` And this task, where it's important that we always have a value for some parameter, but we don't always want to supply it from the command-line:: @task def myfuncname(ctx, requiredparam=None, namedparam2=None): requiredparam1 = requiredparam1 or ctx.myfuncname.requiredparam1 if not requiredparam1: raise ValueError("Need a value for requiredparam1, but didn't want user to always have to give one.") namedparam2 = namedparam2 or ctx.myfuncname.namedparam2 print(requiredparam1, namedparam2) This task can be invoked from the command line like so:: $ invoke myfuncname (392, 199) Other functions/tasks can re-use our task with custom parameters, and the cmd-line user can override our config's defaults if he or she wishes. However, the semantics of this function are hidden behind the boilerplate of finding values for each argument. ``Requiredparam1`` and ``namedparam2`` are really required, we just can't reveal that in the function signature, or ```invoke``` will force the user to give one every time they call our task, even though we have a default in the config we defined. One solution is something like this:: def myfuncname(ctx, requiredparam1, namedparam2): print(param1, requiredparam1):: @task(name=myfuncname) def myfuncname_task(ctx, requiredparam1=None, namedparam2=None) requiredparam1 = namedparam1 or ctx.myfuncname.namedparam1 namedparam2 = namedparam2 or ctx.myfuncname.namedparam2 myfuncname(ctx, requiredparam1, namedparam2) This solution decouples the core of your code from invoke, which could be seen as a plus. However, if we were going to write this much boiler-plate and passing stuff around, we could have stuck with argparse. Also, notice that each parameter name appears *6 times*, and the function name appears *3 times*. Maybe it's not the worst nightmare for maintainability, but it sure gives writing a new re-usable task quite a lot of friction, so most just won't do it. They'll write the task, and you'll either get runtime ``Nones`` because you forgot to load a newly added param from the ctx, or you'll have a cmd-line experience so painful that people generate calls to your task from their own configs and scripts. Here's a better solution. It mirrors the logic of the above pair of functions, but with a simple decorator instead.:: @task @get_params_from_ctx def myfuncname(ctx, requiredparam1, namedparam2='trulyoptional') print(requiredparam1, namedparam2) ns.configure({"tasks": {"myfuncname" : {"requiredparam1" : 392}}}) The semantics of the raw python function now match the cmd-line task: * You can call it with no arguments, and as long as a proper value is found in ctx or in the signature of the function, it will run just like you called it from the cmd-line. * If no value was passed, and no default can be found, you will get a normal Python error. The cascading order for finding an argument value is as follows: 1. directly passed (i.e. ``task(ctx, 'arghere')`` or ``--arg arghere`` on cmd line 2. from config (``ctx`` arg) (defaults to ctx.__module__.func.__name__) 3. function defaults (``def myfunc(ctx, default=1)``) - default parameter values that are callable are called with callable(ctx) to get the value that should be used for a default. .. versionadded:: 0.1 """ if func is None: # Dirty hack taken from the wrapt documentation :) return partial( get_params_from_ctx, derive_kwargs=derive_kwargs, path=path ) # Only up here to we can use it to generate ParseError when decorated func gets called. sig = signature(func) func_name = _get_full_name(func) func.ctx_path = path or 'ctx.{}'.format(func_name) debug("Set {}() param ctx-path to {!r}".format(func_name, func.ctx_path)) if path: if path.endswith("."): raise ValueError( "Path can't end in .! Try 'ctx' instead of 'ctx.'." ) if path.split(".")[0] not in names_for_ctx: raise ValueError( "Path {!r} into ctx for {}()'s args must start with 'ctx.' or 'c.'".format( path, func_name ) ) user_passed_path = ( path # Necessary because otherwise doesn't go into closure on py2. ) @functools.wraps(func) def customized_default_decorator(*args, **kwargs): """ Creates a decorated function with the same argument list, but with almost every parameter optional. When called, looks for actually required params in ctx. Finally, calls original function. """ # Will throw here if too many args/kwargs directly_passed = get_directly_passed(func, sig, args, kwargs) # Task.__call__ will error before us if ctx wasn't passed # Might want a non-task to be skippable, so just try to carry on without ctx. ctx = args[0] if args else None class fell_through: # Cheapest sentinel I can come up with pass cache = {'derived': {}, 'ctx_argdict': {}} # Don't have nonlocal in py2 def get_directly_passed_arg(param_name): return directly_passed.pop(param_name, fell_through) def call_derive_kwargs_or_error(param_name): if not derive_kwargs: return fell_through if not cache['derived']: cache['derived'] = derive_kwargs(ctx) result = cache['derived'] return result.get(param_name, fell_through) def traverse_path_for_argdict(): # Could just use eval(path) with a similar trick to invoke.Lazy. if user_passed_path is None and not ctx: return {} # that's fine elif user_passed_path and not ctx: # If explicitly ask us to traverse (with a path), but # don't give ctx, what can we do? # msg = "You gave path {!r} for {!r} args but 'ctx' (arg[0]) was {!r}.".format(path, func_name, ctx) msg = "'ctx' (arg[0]) was {!r}. Cannot get dict from {} for args of {!r}.".format( ctx, user_passed_path, func_name ) raise DerivingArgsError(msg) path = func.ctx_path seq = path.split(".") looking_in = ctx.get('config', ctx) # Gracefully handle Configs (not usual Contexts) seq.pop(0) while seq: key = seq.pop(0) try: looking_in = looking_in[key] except (KeyError, AttributeError) as e: msg = "while traversing path {!r} for {}() args.".format(path, func_name), if user_passed_path: reraise_with_context( e, msg, DerivingArgsError ) else: debug("Ignoring {!r} {}".format(type(e).__name__, msg)) return {} return looking_in def get_from_ctx(param_name): if not cache['ctx_argdict']: cache['ctx_argdict'] = traverse_path_for_argdict() return cache['ctx_argdict'].get(param_name, fell_through) param_name_to_callable_default = { param_name: param.default for param_name, param in signature(func).parameters.items() if param.default is not param.empty and callable(param.default) } def call_callable_default(param_name): if param_name in param_name_to_callable_default: return param_name_to_callable_default[param_name](ctx) return fell_through # Decide through cascading what to use as the value for each parameter args_passing = {} expecting = sig.parameters for param_name in expecting: possibilities = ( # First, positionals and kwargs get_directly_passed_arg, # Then check ctx get_from_ctx, call_derive_kwargs_or_error, # Not really used/tested call_callable_default, ) passing = fell_through for p in possibilities: try: passing = p(param_name) except Exception as e: if type(e) is DerivingArgsError: raise reraise_with_context( e, "in {!r} step of deriving args for param {!r} of {}()".format( p.__name__, param_name, func_name ), DerivingArgsError ) if passing is not fell_through: debug("{}(): {} found value {:.25}... for param {!r}".format( func_name, p.__name__, str(passing), param_name) ) break else: debug("{}(): {} failed to find value for param {!r}".format(func_name, p.__name__, param_name)) if passing is not fell_through: args_passing[param_name] = passing # Now, bind and supply defaults to see if any are still missing. # Partial bind and then error because funcsigs error msg succ. ba = sig.bind_partial(**args_passing) # getcallargs isn't there on funcsig version. missing = [] for param in sig.parameters.values(): if param.name not in ba.arguments and param.default is param.empty: missing.append(param.name) # TODO contribute these improved error messages back to funcsigs if missing: msg = ("{!r} did not receive required positional arguments: {!r}. " "Looked in arguments passed directly to function and then {!r}.").format( func_name, ", ".join( missing ), '{}.{}'.format(func.ctx_path, param_name) ) raise TypeError(msg) # Now that we've generated a kwargs dict that is everything we know about how to call # this function, call it! # debug("Derived params {}".format({a: v for a, v in args_passing.items() # if a != 'ctx' and a != 'c'})) # TODO We get an 'unexpected kwarg clean' here in Py2 if try to use it. # Funcsigs bug of not respecting __signature__? Review both sources return func(**args_passing) # myparams = (ctx=None, arg1=None, optionalarg1=olddefault) myparams = [ p.replace(default=None) if p.default is p.empty else p for p in sig.parameters.values() ] if not myparams or myparams[0].name not in names_for_ctx: raise ValueError("Can't have a derive_kwargs_from_ctx function that doesn't have a context arg!") # Don't provide default for ctx myparams[0] = list(sig.parameters.values())[0] mysig = sig.replace(parameters=myparams) generated_function = customized_default_decorator generated_function.__signature__ = mysig # print('sig here ', mysig.parameters) return generated_function
5,328,905
def bresenham(points): """ Apply Bresenham algorithm for a list points. More info: https://en.wikipedia.org/wiki/Bresenham's_line_algorithm # Arguments points: ndarray. Array of points with shape (N, 2) with N being the number if points and the second coordinate representing the (x, y) coordinates. # Returns ndarray: Array of points after having applied the bresenham algorithm. """ points = np.asarray(points, dtype=np.int) def line(x0, y0, x1, y1): """ Bresenham line algorithm. """ d_x = x1 - x0 d_y = y1 - y0 x_sign = 1 if d_x > 0 else -1 y_sign = 1 if d_y > 0 else -1 d_x = np.abs(d_x) d_y = np.abs(d_y) if d_x > d_y: xx, xy, yx, yy = x_sign, 0, 0, y_sign else: d_x, d_y = d_y, d_x xx, xy, yx, yy = 0, y_sign, x_sign, 0 D = 2 * d_y - d_x y = 0 line = np.empty((d_x + 1, 2), dtype=points.dtype) for x in range(d_x + 1): line[x] = [x0 + x * xx + y * yx, y0 + x * xy + y * yy] if D >= 0: y += 1 D -= 2 * d_x D += 2 * d_y return line nb_points = len(points) if nb_points < 2: return points new_points = [] for i in range(nb_points - 1): p = points[i:i + 2].ravel().tolist() new_points.append(line(*p)) new_points = np.concatenate(new_points, axis=0) return new_points
5,328,906
def radialBeamProfile_flatTop(x,y,a): """Top hat beam profile \param[in] x x-position for profile computation \param[in] y y-position for profile computation \param[in] a radial extension of flat-top component \param[in] R 1/e-width of beam profile \param[out] isp radial irradiation source profile """ if (x**2+y**2) <= a*a: return 1.0 else: return 0.0
5,328,907
def post_save_count_update(sender, instance, created, **kwargs): """ Receiver that is called after a media file or a comment is created or deleted. Updates num_media and num_comments properties of an observation. """ deleted = hasattr(instance, 'status') and instance.status == 'deleted' if created or deleted: if sender.__name__ == 'Comment': instance.commentto.update_count() elif sender.__name__ in [ 'ImageFile', 'DocumentFile', 'VideoFile', 'AudioFile' ]: instance.contribution.update_count()
5,328,908
def assert_has_attr(obj, attribute, msg_fmt="{msg}"): """Fail is an object does not have an attribute. >>> assert_has_attr([], "index") >>> assert_has_attr([], "i_do_not_have_this") Traceback (most recent call last): ... AssertionError: [] does not have attribute 'i_do_not_have_this' The following msg_fmt arguments are supported: * msg - the default error message * obj - object to test * attribute - name of the attribute to check """ if not hasattr(obj, attribute): msg = "{!r} does not have attribute '{}'".format(obj, attribute) fail(msg_fmt.format(msg=msg, obj=obj, attribute=attribute))
5,328,909
def open_url(url): """Open an URL in the user's default web browser. The string attribute ``open_url.url_handler`` can be used to open URLs in a custom CLI script or utility. A subprocess is spawned with url as the parameter in this case instead of the usual webbrowser.open() call. Whether the browser's output (both stdout and stderr) are suppressed depends on the boolean attribute ``open_url.suppress_browser_output``. If the attribute is not set upon a call, set it to a default value, which means False if BROWSER is set to a known text-based browser -- elinks, links, lynx, w3m or 'www-browser'; or True otherwise. The string attribute ``open_url.override_text_browser`` can be used to ignore env var BROWSER as well as some known text based browsers and attempt to open url in a GUI browser available. Note: If a GUI browser is indeed found, this option ignores the program option `show-browser-logs` """ logger.debug('Opening %s', url) # Custom URL handler gets max priority if hasattr(open_url, 'url_handler'): p = subprocess.Popen([open_url.url_handler, url], stdin=subprocess.PIPE) p.communicate() return browser = webbrowser.get() if open_url.override_text_browser: browser_output = open_url.suppress_browser_output for name in [b for b in webbrowser._tryorder if b not in text_browsers]: browser = webbrowser.get(name) logger.debug(browser) # Found a GUI browser, suppress browser output open_url.suppress_browser_output = True break if open_url.suppress_browser_output: _stderr = os.dup(2) os.close(2) _stdout = os.dup(1) os.close(1) fd = os.open(os.devnull, os.O_RDWR) os.dup2(fd, 2) os.dup2(fd, 1) try: browser.open(url, new=2) finally: if open_url.suppress_browser_output: os.close(fd) os.dup2(_stderr, 2) os.dup2(_stdout, 1) if open_url.override_text_browser: open_url.suppress_browser_output = browser_output
5,328,910
def _split_data(x, y, k_idx, k, perm_indices): """Randomly and coordinates splits two indexable items. Splits items in accordiance with k-fold cross-validatoin. Arguments: x: [?] indexable item y: [?] indexable item k_idx: int index of the k-fold partition to use k: int number of partitions for k-fold cross-validation perm_indices: np.ndarray, int array of indices representing a permutation of the samples with shape (num_sample, ) Returns: x_majority: [?] majority partition of indexable item y_majority: [?] majority partition of indexable item x_minority: [?] minority partition of indexable item y_minority: [?] minority partition of indexable item """ assert k > 0 assert k_idx >= 0 assert k_idx < k N = len(x) partition_size = int(ceil(N / k)) # minority group is the single selected partition # majority group is the other partitions minority_start = k_idx * partition_size minority_end = minority_start + partition_size minority_indices = perm_indices[minority_start:minority_end] majority_indices = np.append(perm_indices[0:minority_start], perm_indices[minority_end:]) assert np.array_equal(np.sort(np.append(minority_indices, majority_indices)), np.array(range(N))) x_majority = [x[i] for i in majority_indices] y_majority = [y[i] for i in majority_indices] x_minority = [x[i] for i in minority_indices] y_minority = [y[i] for i in minority_indices] return (x_majority, y_majority), (x_minority, y_minority)
5,328,911
def set_neighborhood(G, nodes): """Return a list of all neighbors of every node in nodes. Parameters ---------- G : NetworkX graph An undirected graph. nodes : An interable container of nodes in G. Returns ------- list A list containing all nodes that are a neighbor of some node in nodes. See Also -------- set_closed_neighborhood """ # TODO: write unit test N = set() for n in nodes: N |= set(neighborhood(G, n)) return list(N)
5,328,912
def orientationCallback(mic): """Orientation callback function. Callback function performing the recording of the voice activity and the direction of arrival angle. Detects the end of a speech and average the measures taken to update the direction of arrival angle. :param mic: Instance of the Tuning class. """ counter = 0 voiceCounter = 1 angles = np.array([]) voices = np.array([]) global detectedAngle global robotSpeakingMic while True: if not robotSpeakingMic: detectedAngle = -1 # Record data voiceActivity = mic.is_voice() voices = np.append(voices, voiceActivity) angles = np.append(angles, mic.direction) if voiceActivity: counter = 0 else: counter += 1 voiceSamples = np.count_nonzero(voices) # If voice activity has been previously detected and there is no # voice activity anymore since 1 second then compute the # average angle if counter == 20 and voiceSamples > 2: counter = 0 doNotConsider = voiceSamples // 2 voiceCounter = 1 detectedAngle = 0 numberDetections = 0 for voice, angle in zip(voices, angles): if voice: # Do not take into acount the first samples of # voice activity as the first measures of # angle are othen strongly correlated to the last # angle detected if voiceCounter > doNotConsider: detectedAngle += angle numberDetections += 1 else: voiceCounter += 1 detectedAngle /= numberDetections voices = np.array([]) angles = np.array([]) # if nobody speak during a long time, reset the array # to avoid too high array lenght elif counter > 20 and voiceSamples <= 5: counter = 0 voices = np.array([]) angles = np.array([]) if detectedAngle != -1: time.sleep(0.5) else: time.sleep(0.05) else: time.sleep(0.1)
5,328,913
def resolve_Log( parent: Any, info: gr.ResolveInfo, id: Optional[int] = None, uuid: Optional[str] = None, ) -> ENTITY_DICT_TYPE: """Resolution function.""" return resolve_entity(Log, info, id, uuid)
5,328,914
def _get_path_size(source: Union[Path, ZipInfo]) -> int: """ A helper method that returns the file size for the given source :param source: the source object to get the file size for. :return: the source's size. """ return source.stat().st_size if isinstance(source, Path) else source.file_size
5,328,915
def test_download_ckpt(): """test download ckpt.""" md_path = '../mshub_res/assets/mindspore/gpu/1.0/googlenet_v1_cifar10.md' cell = CellInfo("googlenet") cell.update(md_path) asset_link = cell.asset[cell.asset_id]['asset-link'] asset_sha256 = cell.asset[cell.asset_id]["asset-sha256"] set_hub_dir('.cache') path = get_hub_dir() ckpt_path = _download_file_from_url(asset_link, hash_sha256=asset_sha256, save_path=path) assert os.path.exists(ckpt_path)
5,328,916
def shift_level_box_plot(ax, plot_data, y_label, methods_to_colors, legend_loc=None, hue_order=None, fontsize=22, in_distribution_line_width=0.8): """Make a boxplot across data splits, grouped by method, per shift level. Given `plot_data` (a pd.DataFrame), build a boxplot of metric performance (as measured by plot_data["value"]), with a different box per plot_data["method"], and a different group of boxes per plot_data["level"]. To also include solid lines for the performance on the in-distribution test set, plot_data["level"] should include values equal to "Test". See sns.boxplot for more info. See Fig. 2 in https://arxiv.org/abs/1906.02530 for an example. The 0.8 default for `in_distribution_line_width` comes from here: https://github.com/mwaskom/seaborn/blob/536fa2d8e9e8bb098b75174fbd8e2c91967e3b51/seaborn/categorical.py#L2200. 0.8 is the total width of all the plots at a level combined. Args: ax: matplotlib Axes to plot on. plot_data: a pd.DataFrame with columns "level", "value", and "method", used as the data for the box plots. y_label: the vertical label for the plot. methods_to_colors: an optional dict mapping method string names (values in plot_data["method"]) to colors to be used by matplotlib. legend_loc: an optional string location for ax.legend. If None, then no legend is made. hue_order: an optional list of method string names (values in plot_data["method"]), the order of boxes in each group (passed to sns.boxplot). If None, then the values in plot_data["method"] are organized according to `_get_hue_order` defined above. fontsize: an int font size for the plot. in_distribution_line_width: the total width of all the lines used for the in-distribution Test split plot. """ required_keys = ["level", "value", "method"] for key in required_keys: if key not in plot_data: # NOTE(znado): `list(plot_data)` gets a list of column names. raise ValueError( "{} missing from plot data DataFrame (existing keys: {}).".format( key, ",".join(list(plot_data)))) if hue_order is None: hue_order = _get_hue_order(plot_data) sns.boxplot( x="level", y="value", ax=ax, hue="method", data=plot_data[plot_data["level"] != "Test"], whis=100., order=["Test", 1, 2, 3, 4, 5], hue_order=hue_order, palette=methods_to_colors) if legend_loc is not None: ax.legend( ncol=1, title="Method", framealpha=0.5, loc=legend_loc, fontsize=fontsize) ax.set_xlabel("Shift intensity", fontsize=fontsize) ax.set_ylabel(y_label, fontsize=fontsize) ax.spines["top"].set_visible(False) ax.spines["right"].set_visible(False) ax.get_xaxis().tick_bottom() ax.get_yaxis().tick_left() in_dist_plot_data = plot_data[plot_data["level"] == "Test"] if in_dist_plot_data.empty: return # Plot the in distribution test set (shift level 0) as thick lines instead of # box plots. x_low = -in_distribution_line_width / 2 width = in_distribution_line_width / len(in_dist_plot_data) for method in hue_order: color = methods_to_colors[method] value = in_dist_plot_data[ in_dist_plot_data["method"] == method].value.to_numpy() if len(value) == 0: # pylint: disable=g-explicit-length-test continue value = value[0] ax.plot( [x_low, x_low + width], [value, value], color=color, linewidth=4., solid_capstyle="butt") x_low += width
5,328,917
def get_graph_subsampling_dataset( prefix, arrays, shuffle_indices, ratio_unlabeled_data_to_labeled_data, max_nodes, max_edges, **subsampler_kwargs): """Returns tf_dataset for online sampling.""" def generator(): labeled_indices = arrays[f"{prefix}_indices"] if ratio_unlabeled_data_to_labeled_data > 0: num_unlabeled_data_to_add = int(ratio_unlabeled_data_to_labeled_data * labeled_indices.shape[0]) unlabeled_indices = np.random.choice( NUM_PAPERS, size=num_unlabeled_data_to_add, replace=False) root_node_indices = np.concatenate([labeled_indices, unlabeled_indices]) else: root_node_indices = labeled_indices if shuffle_indices: root_node_indices = root_node_indices.copy() np.random.shuffle(root_node_indices) for index in root_node_indices: graph = sub_sampler.subsample_graph( index, arrays["author_institution_index"], arrays["institution_author_index"], arrays["author_paper_index"], arrays["paper_author_index"], arrays["paper_paper_index"], arrays["paper_paper_index_t"], paper_years=arrays["paper_year"], max_nodes=max_nodes, max_edges=max_edges, **subsampler_kwargs) graph = add_nodes_label(graph, arrays["paper_label"]) graph = add_nodes_year(graph, arrays["paper_year"]) graph = tf_graphs.GraphsTuple(*graph) yield graph sample_graph = next(generator()) return tf.data.Dataset.from_generator( generator, output_signature=utils_tf.specs_from_graphs_tuple(sample_graph))
5,328,918
def check_comment_exists(comment_id_required=True): """ Decorator to check if a given comment exists. If it does not, it returns an HTTP 400 error. Must be called with (), and may pass the optional argument of whether the id is required. If the id is passed, it will be checked against entities of the Comment kind, and a 400 error will be returned if it is not found. If the id is not passed, an error will be returned, unless the argument is False. """ def decorator(func): @wraps(func) def wrapper(self, comment_id=''): comment_entity = None if comment_id: comment_key = db.Key.from_path('Comment', int(comment_id)) comment_entity = db.get(comment_key) if not comment_entity: # bad request return self.error(400) elif comment_id_required: return self.error(400) func(self, comment_entity) return wrapper return decorator
5,328,919
def test_wrong_endpoint(api_client): """ Attempting to query using the wrong endpoint should return a 404 response. Regression test for #505. """ response = api_client.post("/know-me/subscription/apple/foobar/", {}) assert response.status_code == status.HTTP_404_NOT_FOUND
5,328,920
def save_image(img, path): """ Saves a numpy matrix or PIL image as an image Args: img_as_arr (Numpy array): Matrix of shape DxWxH path (str): Path to the image """ if isinstance(img, (np.ndarray, np.generic)): img = format_np_output(img) img = Image.fromarray(img) img.save(path)
5,328,921
def GeoMoonState(time): """Calculates equatorial geocentric position and velocity of the Moon at a given time. Given a time of observation, calculates the Moon's position and velocity vectors. The position and velocity are of the Moon's center relative to the Earth's center. The position (x, y, z) components are expressed in AU (astronomical units). The velocity (vx, vy, vz) components are expressed in AU/day. The coordinates are oriented with respect to the Earth's equator at the J2000 epoch. In Astronomy Engine, this orientation is called EQJ. If you need the Moon's position only, and not its velocity, it is much more efficient to use #GeoMoon instead. Parameters ---------- time : Time The date and time for which to calculate the Moon's position and velocity. Returns ------- StateVector The Moon's position and velocity vectors in J2000 equatorial coordinates (EQJ). """ # This is a hack, because trying to figure out how to derive a time # derivative for CalcMoon() would be extremely painful! # Calculate just before and just after the given time. # Average to find position, subtract to find velocity. dt = 1.0e-5 # 0.864 seconds t1 = time.AddDays(-dt) t2 = time.AddDays(+dt) r1 = GeoMoon(t1) r2 = GeoMoon(t2) return StateVector( (r1.x + r2.x) / 2, (r1.y + r2.y) / 2, (r1.z + r2.z) / 2, (r2.x - r1.x) / (2 * dt), (r2.y - r1.y) / (2 * dt), (r2.z - r1.z) / (2 * dt), time )
5,328,922
def get_machine_name(): """ Portable way of calling hostname shell-command. Regarding docker containers: NOTE: If we are running from inside the docker-dev environment, then $(hostname) will return the container-id by default. For now we leave that behaviour. We can override it in the future by passing --hostname to the container. Documentation: https://docs.docker.com/config/containers/container-networking/#ip-address-and-hostname :return: Unique name for a node in the cluster """ machine_name = platform.node() return machine_name
5,328,923
def pass_through_formatter(value): """No op update function.""" return value
5,328,924
def instantiate(decoder, model=None, dataset=None): """ Instantiate a full decoder config, e.g. handle list of configs Note that arguments are added in reverse order compared to encoder (model first, then dataset) """ decoder = utils.to_list(decoder) return U.TupleSequential(*[_instantiate(d, model=model, dataset=dataset) for d in decoder])
5,328,925
def un_normalize(stdevs, arrList): """ Return an arrayList with ith column multiplied by scalar stdevs[i] if stdevs[i] is not zero, and unmodified if it is zero. Args: stdevs: A list of numbers (should be the list output by normalize). arrList: A list of list of numbers that is the (normalized) data. Returns: list: A list of list of numbers of the same dimensions as arrlist. >>> un_normalize([0.5, 2],[[1, 2], [3,4]]) [[0.5, 4], [1.5, 8]] >>> un_normalize([0.0, 2],[[1, 2], [3,4]]) [[1, 4], [3, 8]] """ stdevs = list(map(lambda x: x if x != 0.0 else 1, stdevs)) return scalarMultCols(stdevs, arrList)
5,328,926
def activate(context: MapCommandContext): """All subsequent mapping actions will target this folder""" mapping_file_path = Path(context.current_dir) / DEFAULT_MAPPING_NAME if not mapping_file_path.exists(): raise MapperException( f"Could not find mapping file at " f"'{mapping_file_path}'" ) _activate(context.settings, mapping_path=mapping_file_path)
5,328,927
def convert_graph(input_path): """ Converts a CRED-like graph into a graph format supported by the igraph library. The input graph must have been generated by cli2 CRED command (look for credResult.json) :param input_path: The path to the CRED graph to convert (credResult.json) """ with open(input_path, encoding="utf8") as f: cred_file = json.load(f) # Locating important elements in the graph cred_data = cred_file[1]['credData'] graph = cred_file[1]['weightedGraph'][1]['graphJSON'][1] cred_node_addresses = graph['sortedNodeAddresses'] # Summary of edges/nodes and also a reminder about dangling edges print(f'Found cred summary data for {len(cred_data["nodeSummaries"])} nodes and {len(cred_data["edgeSummaries"])} edges') print(f'The graph has {len(graph["nodes"])} nodes, {len(graph["edges"])} edges and {len(graph["sortedNodeAddresses"])} node addresses') print(f'Dangling edges expected: {len(graph["edges"]) - len(cred_data["edgeSummaries"])}') g = Graph(directed=True) # Collecting nodes for i, cred_node in enumerate(graph['nodes']): cred_node_address = cred_node_addresses[cred_node['index']] igraph_node_atts = {'label': cred_node_address[2]+'-'+cred_node_address[-1][:7], 'type': cred_node_address[2], 'timestamp': cred_node['timestampMs'] if cred_node['timestampMs'] is not None else 0, 'totalCred': cred_data['nodeSummaries'][i]['cred'], 'index': cred_node['index'], } g.add_vertex(name=str(cred_node['index']), **igraph_node_atts) # Collecting edges dangling_edges = [] idx = 0 for cred_edge in graph['edges']: # Checking if the edges is a dangling one. If so, we skip. if len(g.vs.select(name_eq=str(cred_edge['srcIndex']))) + len(g.vs.select(name_eq=str(cred_edge['dstIndex']))) < 2: dangling_edges.append({"srcIndex": cred_edge['srcIndex'], "dstIndex": cred_edge['dstIndex']}) continue igraph_edge_atts = {'address': '-'.join(cred_edge['address']), 'timestamp': cred_edge['timestampMs'], 'backwardFlow': cred_data['edgeSummaries'][idx]['backwardFlow'], 'forwardFlow': cred_data['edgeSummaries'][idx]['forwardFlow'], } g.add_edge(str(cred_edge['srcIndex']), str(cred_edge['dstIndex']), **igraph_edge_atts) idx += 1 # Reporting the number of dangling edges found print(f"Dangling edges found: {len(dangling_edges)}") return g
5,328,928
def get_valid_classes_from_class_input( class_graph: class_dependency.JavaClassDependencyGraph, class_names_input: str) -> List[str]: """Parses classes given as input into fully qualified, valid classes. Input is a comma-separated list of classes.""" class_names = class_names_input.split(',') return get_valid_classes_from_class_list(class_graph, class_names)
5,328,929
def us_ppop(ppop): """ Determines if the ppop is in a valid format to be in the US """ # return false if it's null or not 7 digits long if not ppop or len(ppop) != 7: return False ppop = ppop.upper() if ppop[:2] in g_state_by_code or ppop[:2] in g_state_code_by_fips: return True return False
5,328,930
def get_pygments_lexer(location): """ Given an input file location, return a Pygments lexer appropriate for lexing this file content. """ try: T = _registry[location] if T.is_binary: return except KeyError: if binaryornot.check.is_binary(location): return try: # FIXME: Latest Pygments versions should work fine # win32_bug_on_s_files = dejacode.on_windows and location.endswith('.s') # NOTE: we use only the location for its file name here, we could use # lowercase location may be lexer = get_lexer_for_filename(location, stripnl=False, stripall=False) return lexer except LexerClassNotFound: try: # if Pygments does not guess we should not carry forward # read the first 4K of the file with open(location, 'rb') as f: content = f.read(4096) guessed = guess_lexer(content) return guessed except LexerClassNotFound: return
5,328,931
def get_robotstxt_parser(url, session=None): """Get a RobotFileParser for the given robots.txt URL.""" rp = RobotFileParser() try: req = urlopen(url, session, max_content_bytes=MaxContentBytes, allow_errors=range(600)) except Exception: # connect or timeout errors are treated as an absent robots.txt rp.allow_all = True else: if req.status_code >= 400: rp.allow_all = True elif req.status_code == 200: rp.parse(req.text.splitlines()) return rp
5,328,932
def init_model(config, checkpoint=None, device='cuda:0'): """Initialize a model from config file. Args: config (str or :obj:`mmcv.Config`): Config file path or the config object. checkpoint (str, optional): Checkpoint path. If left as None, the model will not load any weights. Returns: nn.Module: The constructed model. (nn.Module, None): The constructed extractor model """ if isinstance(config, str): config = mmcv.Config.fromfile(config) elif not isinstance(config, mmcv.Config): raise TypeError('config must be a filename or Config object, ' f'but got {type(config)}') config.data.test.test_mode = True model = build_architecture(config.model) if checkpoint is not None: # load model checkpoint load_checkpoint(model, checkpoint, map_location=device) # save the config in the model for convenience model.cfg = config model.to(device) model.eval() extractor = None if config.model.type == 'VideoBodyModelEstimator': extractor = build_backbone(config.extractor.backbone) if config.extractor.checkpoint is not None: # load model checkpoint load_checkpoint(extractor, config.extractor.checkpoint) extractor.cfg = config extractor.to(device) extractor.eval() return model, extractor
5,328,933
def estimate_period(time, y, y_err, clip=True, plot=True, **kwargs): """ Run a Lomb-Scargle Periodogram to find periodic signals. It's recommended to use the allesfitter.time_series functions sigma_clip and slide_clip beforehand. Parameters ---------- time : array of float e.g. time array (usually in days) y : array of float e.g. flux or RV array (usually as normalized flux or RV in km/s) yerr : array of float e.g. flux or RV error array (usually as normalized flux or RV in km/s) clip : bool, optional Automatically clip the input data with sigma_clip(low=4, high=4) and slide_clip(window_length=1, low=4, high=4). The default is True. plot : bool, optional To plot or not, that is the question. The default is False. **kwargs : collection of keyword arguments Any keyword arguments will be passed onto the astropy periodogram class. Returns ------- best_period : float The best period found. FAP : float The false alarm probability for the best period. fig : matplotlib.figure object, optional The summary figure. Only returned if plot is True. """ #========================================================================== #::: clean the inputs #========================================================================== time, y, y_err = clean(time, y, y_err) plot_bool = plot if clip: y = sigma_clip(time, y, low=4, high=4) y = slide_clip(time, y, window_length=1, low=4, high=4) time, y, y_err = clean(time, y, y_err) #========================================================================== #::: handle inputs #========================================================================== cadence = np.nanmedian(np.diff(time)) if kwargs is None: kwargs = {} if 'minperiod' not in kwargs: kwargs['minperiod'] = 10. * cadence if 'maxperiod' not in kwargs: kwargs['maxperiod'] = time[-1]-time[0] minfreq = 1./kwargs['maxperiod'] maxfreq = 1./kwargs['minperiod'] #========================================================================== #::: now do the periodogram #========================================================================== ls = LombScargle(time, y) #Analyze our dates and s-index data using the AstroPy Lomb Scargle module frequency, power = ls.autopower(minimum_frequency=minfreq, maximum_frequency=maxfreq) #Determine the LS periodogram best_power = np.nanmax(power) best_frequency = frequency[np.argmax(power)] best_period = 1./best_frequency FAP=ls.false_alarm_probability(best_power) #Calculate the FAP for the highest peak in the power array #========================================================================== #::: plot #========================================================================== def plot(): peak_loc=round(float(1./best_frequency),2) FAP_probabilities = [0.5, 0.1, 0.01] #Enter FAP values you want to determine FAP_levels=ls.false_alarm_level(FAP_probabilities) #Get corresponding LS Power values fig, axes = plt.subplots(4, 1, figsize=[10,15], tight_layout=True) #::: plot the periodogram ax = axes[0] ax.semilogx(1./frequency,power,color='b') ax.plot(peak_loc, best_power, marker='d', markersize=12, color='r') ax.text(peak_loc*1.2,best_power*0.95,'Peak Period: '+str(peak_loc)+' days') ax.text(peak_loc*1.2,best_power*0.85,'FAP: '+str(FAP)) ax.hlines(FAP_levels, kwargs['minperiod'], kwargs['maxperiod'], color='grey', lw=1) ax.text(kwargs['maxperiod'], FAP_levels[0],'0.5% FAP ', ha='right') ax.text(kwargs['maxperiod'], FAP_levels[1],'0.1% FAP ', ha='right') ax.text(kwargs['maxperiod'], FAP_levels[2],'0.01% FAP ', ha='right') ax.set(xlabel='Period (days)', ylabel='L-S power') ax.tick_params(axis='both',which='major') #::: plot the phase-folded data ax = axes[1] plot_phase_folded_lightcurve(time, y, period=1./best_frequency, epoch=0, ax=ax) ax.set(ylim=[np.nanmin(y), np.nanmax(y)], ylabel='Data (clipped; phased)') #::: plot the phase-folded data, zoomed ax = axes[2] plot_phase_folded_lightcurve(time, y, period=1./best_frequency, epoch=0, ax=ax) ax.set(ylabel='Data (clipped; phased; y-zoom)') #::: plot the autocorrelation of the data ax = axes[3] plot_acf(pd.Series(y, index=time), ax=ax, lags=np.linspace(start=1,stop=2*best_period/cadence,num=100,dtype=int)) ax.set(xlabel='Lag', ylabel='Autocorrelation', title='') return fig #========================================================================== #::: return #========================================================================== if plot_bool: fig = plot() return best_period, FAP, fig else: return best_period, FAP
5,328,934
def get_test_packages(): """Get a list of packages which need tests run. Filters the package list in the following order: * Check command line for packages passed in as positional arguments * Check if the the local remote and local branch environment variables have been set to specify a remote branch to diff against. * Check if in Travis, then limit the subset based on changes in a Pull Request ("push" builds to branches may not have any filtering) * Just use all packages An additional check is done for the cases when a diff is computed (i.e. using local remote and local branch environment variables, and on Travis). Once the filtered list of **changed** packages is found, the package dependency graph is used to add any additional packages which depend on the changed packages. :rtype: list :returns: A list of all package directories where tests need be run. """ all_packages = get_package_directories() local_diff = local_diff_branch() parser = get_parser() args = parser.parse_args() if args.packages is not UNSET_SENTINEL: verify_packages(args.packages, all_packages) return sorted(args.packages) elif local_diff is not None: changed_packages = get_changed_packages( 'HEAD', local_diff, all_packages) return follow_dependencies(changed_packages, all_packages) elif in_travis(): changed_packages = get_travis_directories(all_packages) return follow_dependencies(changed_packages, all_packages) else: return all_packages
5,328,935
def main(): """Main entry point for the script""" start = time.time() # short explanation: # str(number) - converts number to string # map(int, string) - converts each character to int # sum(list) - sums the list of ints (in this case) print(sum(map(int, str(int(math.pow(2, 1000)))))) timeutils.elapsed_time(time.time() - start)
5,328,936
def cli_exec( opt, engine_args: List[str], profile: str, profile_path: str, conan_arg: List[str], conan_option: List[str], conan_setting: List[str], preserve_env: bool, override_env: List[str], cache: bool, debug: bool, ) -> None: """Launch cloe-engine with a profile. ENGINE_ARGS are passed on to cloe-engine. """ options.deny_profile_and_path(profile, profile_path) conf = Configuration(profile) engine = Engine(conf, conanfile=profile_path) engine.conan_args = conan_arg engine.conan_options = conan_option engine.conan_settings = conan_setting engine.preserve_env = preserve_env # Prepare environment overrides: overrides = {} for line in override_env: kv = line.split("=", 1) if len(kv) == 1: kv.append(os.getenv(kv[0], "")) overrides[kv[0]] = kv[1] # Run cloe-engine and pass on returncode: # If cloe-engine is killed/aborted, subprocess will return 250. result = engine.exec( engine_args, use_cache=cache, debug=debug, override_env=overrides ) sys.exit(result.returncode)
5,328,937
def decoding_character(morse_character): """ Input: - morse_character : 문자열값으로 get_morse_code_dict 함수로 알파벳으로 치환이 가능한 값의 입력이 보장됨 Output: - Morse Code를 알파벳으로 치환함 값 Examples: >>> import morsecode as mc >>> mc.decoding_character("-") 'T' >>> mc.decoding_character(".") 'E' >>> mc.decoding_character(".-") 'A' >>> mc.decoding_character("...") 'S' >>> mc.decoding_character("....") 'H' >>> mc.decoding_character("-.-") 'K' """ # ===Modify codes below============= # 조건에 따라 변환되어야 할 결과를 result 변수에 할당 또는 필요에 따라 자유로운 수정 #morse_code_dict = get_morse_code_dict() char_dict = get_char_code_dict() result = char_dict.get(morse_character) return result
5,328,938
def SparsityParametersAddDimMetadata(builder, dimMetadata): """This method is deprecated. Please switch to AddDimMetadata.""" return AddDimMetadata(builder, dimMetadata)
5,328,939
def check_branch(payload, branch): """ Check if a push was on configured branch. :param payload: Payload from web hook. :param branch: Name of branch to trigger action on. :return: True if push was on configured branch, False otherwise. """ if "ref" in payload: if payload["ref"] == branch: return True return False
5,328,940
def convert(q: Quantity, new_unit: Union[str, Unit], equivalencies=None) -> Quantity: """Convert quantity to a new unit. :raises InvalidUnit: When target unit does not exist. :raises InvalidUnitConversion: If the conversion is invalid. Customized to be a bit more universal than the original quantities. """ try: return q.to(new_unit, equivalencies or []) except u.UnitConversionError: if q.unit.physical_type == "temperature": return q.to(new_unit, u.temperature()) else: raise InvalidUnitConversion( f"Cannot convert unit '{q.unit}' to '{new_unit}'." ) from None except ValueError as err: raise InvalidUnit(f"Unit '{new_unit}' does not exist.") from None
5,328,941
def process_files(subdirectory, sd_files, pattern="", verbose=False): """ recursively iterates ofer all files and checks those which meet criteria set by options only """ global total_files for __file in sd_files: current_filename = os.path.join(subdirectory, __file) if current_filename[-3:] == '.js': if not '/node_modules/' in subdirectory or ('/node_modules/' in subdirectory and skip_node_modules is False): if (skip_test_files is False): perform_code_analysis(current_filename, pattern, verbose) total_files = total_files + 1 else: if __file not in TEST_FILES and "/test" not in current_filename and "/tests" not in current_filename: perform_code_analysis( current_filename, pattern, verbose) total_files = total_files + 1
5,328,942
def randomized_pairwise_t_test(arr1, arr2, output=True): """ Perform a randomized pairwise t-test on two arrays of values of equal size. see Cohen, P.R., Empirical Methods for Artificial Intelligence, p. 168 """ # Make sure both arrays are the same length assert len(arr1) == len(arr2) # Cast them to floats arr1 = map(float, arr1) arr2 = map(float, arr2) # Calculate the absolute diffs diffs = [(arr1[i] - arr2[i]) for i in range(len(arr1))] # Calculate the original mean originalMean = sum(diffs) / float(len(diffs)) numLess = 0 # Do 10000 trials to test for i in range(10000): running_sum = 0. for j in range(len(diffs)): if choice([True,False]): running_sum += diffs[j] else: running_sum -= diffs[j] mean = running_sum / float(len(diffs)) if mean <= originalMean: numLess += 1 # Finally output / return the stats ratio = float(numLess + 1) / float(10001) ratio = min(ratio, 1-ratio) if output: print ("mean difference: %f\nsignificant at p <= %f" % (originalMean, ratio)) return originalMean, ratio
5,328,943
def update_metadata(radar, longitude: np.ndarray, latitude: np.ndarray) -> Dict: """ Update metadata of the gridded products. Parameter: ========== radar: pyart.core.Grid Radar data. Returns: ======== metadata: dict Output metadata dictionnary. """ today = datetime.datetime.utcnow() dtime = cftime.num2pydate(radar.time["data"], radar.time["units"]) maxlon = longitude.max() minlon = longitude.min() maxlat = latitude.max() minlat = latitude.min() metadata = { "comment": "Gridded radar volume using Barnes et al. ROI", "field_names": ", ".join([k for k in radar.fields.keys()]), "geospatial_bounds": f"POLYGON(({minlon:0.6} {minlat:0.6},{minlon:0.6} {maxlat:0.6},{maxlon:0.6} {maxlat:0.6},{maxlon:0.6} {minlat:0.6},{minlon:0.6} {minlat:0.6}))", "geospatial_lat_max": f"{maxlat:0.6}", "geospatial_lat_min": f"{minlat:0.6}", "geospatial_lat_units": "degrees_north", "geospatial_lon_max": f"{maxlon:0.6}", "geospatial_lon_min": f"{minlon:0.6}", "geospatial_lon_units": "degrees_east", "geospatial_vertical_min": np.int32(radar.origin_altitude["data"][0]), "geospatial_vertical_max": np.int32(20000), "geospatial_vertical_positive": "up", "history": f"created by Valentin Louf on gadi.nci.org.au at {today.isoformat()} using Py-ART", "processing_level": "b2", "time_coverage_start": dtime[0].isoformat(), "time_coverage_end": dtime[-1].isoformat(), "uuid": str(uuid.uuid4()), } return metadata
5,328,944
def main(): """ The main routine. """ try: default_output = os.environ["EC_DEFAULT_OUTPUT1"] except KeyError: default_output = CONST_OUTPUT_TABLE # set up command line arguments args = set_command_line_args(default_output) # run the crawl _, _, _ = crawl(args)
5,328,945
def idxsel2xsel(file, isel, dimensions, order): """ convert a index space selection object to an xSelect object """ if not isinstance(isel, idxSelect): raise TypeError('wrong argument type') xsel = {} xsel_size = {} xsel_dims = {} isarray = False interp = False masked = False multidim = False i = 0 for axis in dimensions: inc_i = True try: idx = isel[axis] if idx.interp: interp = True if idx.isarray: isarray = True if idx.dims is not None: multidim = True if isinstance(idx.v, N.ma.MaskedArray): masked = True xsel_dims[axis] = idx.dims idx = idx.v if isinstance(idx, slice): dimsize = file.cf_dimensions[axis] res = [idx.start, idx.stop, idx.step] if (idx.step is not None and idx.step < 0): if idx.start is None: res[0] = dimsize - 1 if idx.stop is None: res[1] = None else: if idx.start is None: res[0] = 0 if idx.stop is None: res[1] = dimsize if idx.step is None: res[2] = 1 xsel[axis] = slice(res[0], res[1], res[2]) elif N.isscalar(idx): xsel[axis] = idx if len(order) > 0: order.remove(i) for val in order: if val > i: order[order.index(val)] = val - 1 inc_i = False else: #xsel[axis] = idx.copy() xsel[axis] = copy.copy(idx) if len(idx.shape) == 0 or idx.shape == 1: if len(order) > 0: order.remove(i) for val in order: if val > i: order[order.index(val)] = val - 1 inc_i = False except KeyError: dimsize = file.cf_dimensions[axis] xsel[axis] = (slice(0, dimsize, 1)) xsel_dims[axis] = None if inc_i: i += 1 if isarray: # convert slices to 1d-arrays and determine result size for axis in dimensions: idx = xsel[axis] if isinstance(idx, slice): xsel[axis] = N.arange(idx.start, idx.stop, idx.step) if xsel_dims[axis] is None: if is_scalar(xsel[axis]): xsel_size[axis] = 0 else: xsel_size[axis] = len(xsel[axis]) else: xsel_size[axis] = isel[axis].axlen # determine shape of xsel dim_ret = [] for axis in dimensions: if xsel_size[axis] != 0: dim_ret.append(xsel_size[axis]) ndim_ret = len(dim_ret) # all 1d arrays if not multidim: i = 0 for axis in dimensions: if xsel_size[axis] != 0: idx_shape = N.ones(ndim_ret,dtype="int32") idx_shape[i] = dim_ret[i] xsel[axis].shape = idx_shape i += 1 # at least one multidimensional coordinate else: i = 0 for axis in dimensions: if xsel_dims[axis] is None: if xsel_size[axis] != 0: idx_shape = N.ones(ndim_ret,dtype="int32") idx_shape[i] = dim_ret[i] xsel[axis].shape = idx_shape i += 1 else: idx_shape2 = {} for axis2 in dimensions: if xsel_size[axis2] != 0: if axis2 in xsel_dims[axis]: idx_shape2[axis2] = isel[axis].dimsize(axis2) else: idx_shape2[axis2] = 1 idx_shape = [] for axis2 in dimensions: if axis2 in idx_shape2: idx_shape.append(idx_shape2[axis2]) if isel[axis].type != 'scalar': i += 1 # check if we only need basic slicing if not isarray and not interp: isbasic = True else: isbasic = False ret = [] for axis in dimensions: ret.append(xsel[axis]) ret = xSelect(ret) ret.isbasic = isbasic ret.interp = interp ret.masked = masked ret.order = order return ret
5,328,946
def _retrieve_max_kb_s_sent_state(status: FritzStatus, last_value: str) -> float: """Return upload max transmission rate.""" return round(status.max_bit_rate[0] / 1000, 1)
5,328,947
def test_view(app, base_client): """Test view.""" res = base_client.get("/sip2/monitoring") assert res.status_code == 200 assert 'Welcome to Invenio-SIP2' in str(res.data)
5,328,948
def user_detail(request, id, format=None): """ Retrieve, update or delete a server assets instance. """ try: snippet = User.objects.get(id=id) except User.DoesNotExist: return Response(status=status.HTTP_404_NOT_FOUND) if request.method == 'GET': serializer = UserSerializer(snippet) return Response(serializer.data) elif request.method == 'PUT': serializer = UserSerializer(snippet, data=request.data) if serializer.is_valid(): serializer.save() return Response(serializer.data) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) elif request.method == 'DELETE': if not request.user.has_perm('ops.delete_user'): return Response(status=status.HTTP_403_FORBIDDEN) snippet.delete() return Response(status=status.HTTP_204_NO_CONTENT)
5,328,949
def _gsmooth_img(args): """ HELPER FUNCTION: private! Smooth an image with a gaussian in 2d """ img,kernel,use_fft,kwargs = args if use_fft: return convolve_fft(img, kernel, normalize_kernel=True, **kwargs) else: return convolve(img, kernel, normalize_kernel=True, **kwargs)
5,328,950
def ParseArgs(): """Parses command line arguments. Returns: args from argparse.parse_args(). """ description = ( 'Handle Whale button click event.' ) parser = argparse.ArgumentParser( formatter_class=argparse.RawTextHelpFormatter, description=description) parser.add_argument('-d', '--debug', action='store_true', default=False, help='enable debug messages') parser.add_argument('--rpc_debug', action='store_true', default=False, help='enable debug messages for XMLRPC call') parser.add_argument('--nouse_dolphin', action='store_false', default=True, dest='use_dolphin', help='whether to skip dolphin control' ' (remote server). default: %(default)s') parser.add_argument('--use_polld', action='store_true', default=False, help='whether to use polld (for polling GPIO port on ' 'remote server) or poll local GPIO port, default: ' '%(default)s') parser.add_argument('--host', default='127.0.0.1', type=str, help='hostname of server, default: %(default)s') parser.add_argument('--dolphin_port', default=9997, type=int, help='port that dolphin_server listens, default: ' '%(default)d') parser.add_argument('--polld_port', default=9998, type=int, help='port that polld listens, default: %(default)d') parser.add_argument('--servod_port', default=9999, type=int, help='port that servod listens, default: %(default)d') parser.add_argument('--polling_wait_secs', default=5, type=int, help=('# seconds for polling button clicking event, ' 'default: %(default)d')) return parser.parse_args()
5,328,951
def __logs_by_scan_id(scan_id, language): """ select all events by scan id hash Args: scan_id: scan id hash language: language Returns: an array with JSON events or an empty array """ try: logs = [] for log in send_read_query( "select host,username,password,port,type,date,description from hosts_log where scan_id=\"{0}\"".format( scan_id), language): data = { "SCAN_ID": scan_id, "HOST": log[0], "USERNAME": log[1], "PASSWORD": log[2], "PORT": log[3], "TYPE": log[4], "TIME": log[5], "DESCRIPTION": log[6] } logs.append(data) return logs except: return []
5,328,952
def handle_xds110(args): """Helper function for handling 'xds110' command""" session_args = get_session_args(args) if args.cmd == 'xds110-reset': try: result = tiflash.xds110_reset(**session_args) print(result) except Exception as e: __exit_with_error(e) elif args.cmd == 'xds110-list': try: result = tiflash.xds110_list(**session_args) header = "XDS110 Devices:" print(header) print('-' * len(header)) for dev in result: print("%s (%s)" % (dev[0], dev[1])) except Exception as e: __exit_with_error(e) elif args.cmd == 'xds110-upgrade': try: result = tiflash.xds110_upgrade(**session_args) print(result) except Exception as e: __exit_with_error(e)
5,328,953
def docs(): """Redirect to documentation on Github Route: /docs Methods: GET Return: redirect to webpage """ return redirect("https://kinsaurralde.github.io/ws_281x-lights/#/")
5,328,954
def std(a, weights=None, axis=None, dtype=None, ddof=0, keepdims=False): """ Compute the weighted standard deviation along the specified axis. :param a: Array containing numbers whose standard deviation is desired. If `a` is not an array, a conversion is attempted. :param weights: Array containing weights for the elements of `a`. If `weights` is not an array, a conversion is attempted. :param axis: Axis or axes along which the means are computed. The default is to compute the mean of the flattened array. Type is None or int or tuple of ints, optional. :param dtype: data type to use in computing the mean. :param int ddof: Delta Degrees of Freedom. The divisor used in calculations is ``W - ddof``, where ``W`` is the sum of weights (or number of elements if `weights` is None). By default `ddof` is zero :param bool keepdims: If this is set to True, the axes which are reduced are left in the result as dimensions with size one. :return: np.ndarray """ if weights is None: return np.std(a, axis=axis, dtype=dtype, ddof=ddof, keepdims=keepdims) else: w = np.array(weights) m = mean(a, weights=w, axis=axis, keepdims=True) return np.sqrt( np.sum( w * (np.array(a) - m) ** 2, axis=axis, dtype=dtype, keepdims=keepdims ) / ( # noqa: W504 np.sum(w, axis=axis, dtype=dtype, keepdims=keepdims) - ddof ) )
5,328,955
def generate_tf_files(): """Generate all Terraform plans for the clusters in variables.json""" LOGGER_CLI.info('Generating Terraform files') env = Environment(loader=PackageLoader('terraform', 'templates')) template = env.get_template('cluster_template') all_buckets = CONFIG.get('s3_event_buckets') for cluster in CONFIG['clusters'].keys(): if cluster == 'main': raise InvalidClusterName('Rename cluster main to something else!') if all_buckets: buckets = all_buckets.get(cluster) else: buckets = None contents = template.render(cluster_name=cluster, s3_buckets=buckets) with open('terraform/{}.tf'.format(cluster), 'w') as tf_file: tf_file.write(contents)
5,328,956
def createOneHourCandles(markets, database): """ Function that creates tables for one minute candles. :param database: :param markets: :return: """ conn = pymysql.connect(host='localhost', user='jan', password='17051982', database=database) func_logging = logging.getLogger("bittrex_database." + str(__name__) + ".createOneHourCandles()") for ix in markets: with conn.cursor() as cur: comm = f'CREATE OR REPLACE TABLE `{ix}` (' \ f'symbol CHAR(20) NOT NULL, ' \ f'ttime FLOAT NOT NULL, ' \ f'oopen DOUBLE NOT NULL, ' \ f'hhigh DOUBLE NOT NULL, ' \ f'llow DOUBLE NOT NULL, ' \ f'cclose DOUBLE NOT NULL, ' \ f'base_vol DOUBLE NOT NULL, ' \ f'quote_vol DOUBLE NOT NULL, ' \ f'usd_vol DOUBLE NOT NULL, ' \ f'PRIMARY KEY (symbol, ttime)' \ f') ENGINE=Maria' cur.execute(comm) conn.commit() func_logging.info("One hour candles database have been initiated with empty columns.") return True
5,328,957
def test_merge2_sql_semantics_outerjoin_multi_keep_Nonelast(): """ Test that merge2 matches the following SQL query: select f.id as foo_id, f.col1 as foo_col1, f.col2 as foo_col2, f.team_name as foo_teamname, b.id as bar_id, b.col1 as bar_col1, b.col2 as bar_col2, b.strcol as bar_strcol from sql_semantics.foo as f full outer join ( select * from sql_semantics.bar inner join ( select distinct LAST_VALUE(id) over w as firstlast_row_id from sql_semantics.bar window w as ( partition by bar.col1, bar.col2 order by bar.id rows between unbounded preceding and unbounded following ) ) as bar_ids on bar.id = bar_ids.firstlast_row_id ) as b on f.col1 = b.col1 and f.col2 = b.col2 order by f.id, b.id asc; """ foo, bar = TestDataset.sql_semantics2() result = rt.merge2( foo, bar, on=[('col1', 'col1'), ('col2', 'col2')], how='outer', suffixes=('_foo', '_bar'), keep=(None, 'last'), indicator=True, ) assert result.get_nrows() == 17 # "foo_id","foo_col1","foo_col2","foo_teamname","bar_id","bar_col1","bar_col2","bar_strcol" # 1,5,NULL,"Phillies",NULL,NULL,NULL,NULL # 2,5,5,"Eagles",11,5,5,"Arch" # 3,8,NULL,"76ers",NULL,NULL,NULL,NULL # 4,NULL,1,"Flyers",NULL,NULL,NULL,NULL # 5,10,1,"Union",NULL,NULL,NULL,NULL # 6,NULL,4,"Wings",NULL,NULL,NULL,NULL # 7,-1,22,"Fusion",NULL,NULL,NULL,NULL # 8,11,9,"Fight",NULL,NULL,NULL,NULL # NULL,NULL,NULL,NULL,1,10,4,"Chestnut" # NULL,NULL,NULL,NULL,2,10,NULL,"Pine" # NULL,NULL,NULL,NULL,3,8,NULL,"Walnut" # NULL,NULL,NULL,NULL,4,NULL,3,"Locust" # NULL,NULL,NULL,NULL,6,NULL,NULL,"Spruce" # NULL,NULL,NULL,NULL,7,NULL,1,"Cypress" # NULL,NULL,NULL,NULL,9,5,NULL,"Sansom" # NULL,NULL,NULL,NULL,10,14,9,"Market" # NULL,NULL,NULL,NULL,12,-15,13,"Vine" inv = rt.int32.inv # Intersection cols (the 'on' cols) assert_array_equal(result.col1, rt.FA([5, 5, 8, inv, 10, inv, -1, 11, 10, 10, 8, inv, inv, inv, 5, 14, -15], dtype=np.int32)) assert_array_equal(result.col2, rt.FA([inv, 5, inv, 1, 1, 4, 22, 9, 4, inv, inv, 3, inv, 1, inv, 9, 13], dtype=np.int32)) # Cols from the left Dataset. assert_array_equal(result.id_foo, rt.FA([1, 2, 3, 4, 5, 6, 7, 8, inv, inv, inv, inv, inv, inv, inv, inv, inv], dtype=np.int32)) assert_array_equal( result.team_name, rt.FA([b'Phillies', b'Eagles', b'76ers', b'Flyers', b'Union', b'Wings', b'Fusion', b'Fight', b'', b'', b'', b'', b'', b'', b'', b'', b''])) # Cols from the right Dataset. assert_array_equal( result.id_bar, rt.FA([inv, 11, inv, inv, inv, inv, inv, inv, 1, 2, 3, 4, 6, 7, 9, 10, 12], dtype=np.int32) ) assert_array_equal( result.strcol, rt.FA([b'', b'Arch', b'', b'', b'', b'', b'', b'', b'Chestnut', b'Pine', b'Walnut', b'Locust', b'Spruce', b'Cypress', b'Sansom', b'Market', b'Vine']))
5,328,958
def coaddspectra(splist,plotsp=True,outf=None,sn_smooth_npix=10): """ Coadd spectra Parameters ---------- splist : list of XSpectrum1D objects List of spectra to coadd plotsp : bool If True, plot the coadded spectrum outf : str Output file sn_smooth_npix : float Parameter in coadd1d.combspec function that defines number of pixels to median filter by when computing S/N used to decide how to scale and weight spectra Returns ------- sp : XSpectrum1D A spectrum that represents coadded spectra from the splist list """ waves = [] fluxes = [] ivars = [] masks = [] for isp in splist: waves.append(isp.wavelength) fluxes.append(isp.flux) ivars.append(1. / (isp.sig) ** 2.) imask = np.repeat(True, len(isp.flux)) j = np.where((isp.flux == 0) & (isp.sig == 0))[0] imask[j] = False masks.append(imask) waves = np.ndarray.transpose(np.asarray(waves)) fluxes = np.ndarray.transpose(np.asarray(fluxes)) ivars = np.ndarray.transpose(np.asarray(ivars)) masks = np.ndarray.transpose(np.asarray(masks)) wave_stack, flux_stack, ivar_stack, mask_stack = coadd1d.combspec( waves, fluxes, ivars, masks, sn_smooth_npix, show=plotsp) ii = np.where(wave_stack > 0)[0] coadded_waves = wave_stack[ii] coadded_fluxes = flux_stack[ii] coadded_sigs = 1 / (np.sqrt(ivar_stack[ii])) # write and return the spectrum sp = xspec.XSpectrum1D(coadded_waves, coadded_fluxes, coadded_sigs) if outf is not None: sp.write_to_fits(outf) return sp
5,328,959
def get_rel_sim(relation, question, dataset): """ Get max cosine distance for relations :param relation: :param question: :return: """ query_ngrams = generate_ngrams(question) query_ngrams_vec = [get_avg_word2vec(phr, dataset) for phr in query_ngrams] relation_ngram = get_avg_word2vec(relation, dataset) similarities = [cosine_similarity(relation_ngram, q)[0][0] for q in query_ngrams_vec] if similarities and np.max(similarities) > 0.5: return np.max(similarities) else: return 0.0
5,328,960
def mk_request(bits, cn): """ Create a X509 request with the given number of bits in they key. Args: bits -- number of RSA key bits cn -- common name in the request Returns a X509 request and the private key (EVP) """ pk = EVP.PKey() x = X509.Request() rsa = RSA.gen_key(bits, 65537, lambda: None) pk.assign_rsa(rsa) x.set_pubkey(pk) name = x.get_subject() name.C = config.get('ca', 'cert_country') name.CN = cn name.ST = config.get('ca', 'cert_state') name.L = config.get('ca', 'cert_locality') name.O = config.get('ca', 'cert_organization') name.OU = config.get('ca', 'cert_org_unit') x.sign(pk, 'sha256') return x, pk
5,328,961
def nice(name): """Generate a nice name based on the given string. Examples: >>> names = [ ... "simple_command", ... "simpleCommand", ... "SimpleCommand", ... "Simple command", ... ] >>> for name in names: ... nice(name) 'Simple Command' 'Simple Command' 'Simple Command' 'Simple Command' Arguments: name (str): The string from which generate the nice name. Returns: str: The generated nice name. """ # The regular expression will match all upper case characters except the # one that starts the string and insert a space before it. return re.sub(r"(?<!^)([A-Z])", r" \1", name).replace("_", " ").title()
5,328,962
def get_outputs(): """Get the available outputs, excluding outputs in the EXCLUDED_OUTPUTS variable.""" outputs = [] tree = connection.get_tree() for node in filter( lambda node: node.type == "output" and node.name not in EXCLUDED_OUTPUTS, tree ): workspaces = node.nodes[1].nodes if workspaces: outputs.append((node, workspaces)) return outputs
5,328,963
def set_array_from_itk_image(dataset, itk_image): """Set dataset array from an ITK image.""" itk_output_image_type = type(itk_image) # Save the VTKGlue optimization for later #------------------------------------------ # Export the ITK image to a VTK image. No copying should take place. #export_filter = itk.ImageToVTKImageFilter[itk_output_image_type].New() #export_filter.SetInput(itk_image_data) #export_filter.Update() # Get scalars from the temporary image and copy them to the data set #result_image = export_filter.GetOutput() #filter_array = result_image.GetPointData().GetArray(0) # Make a new instance of the array that will stick around after this # filters in this script are garbage collected #new_array = filter_array.NewInstance() #new_array.DeepCopy(filter_array) # Should be able to shallow copy? #new_array.SetName(name) #------------------------------------------ import itk from . import utils result = itk.PyBuffer[ itk_output_image_type].GetArrayFromImage(itk_image) result = result.copy() utils.set_array(dataset, result, isFortran=False)
5,328,964
def get_user_owner_mailboxes_tuples(user): """ Return owned mailboxes of a user as tuple """ return ((owned_mailbox.id, owned_mailbox.email_address) for owned_mailbox in get_user_owner_mailboxes_query(user))
5,328,965
def modinv(a, m): """Modular Multiplicative Inverse""" a = a % m g, x, y = egcd(a, m) if g != 1: raise Exception('modular inverse does not exist') else: return x % m
5,328,966
def get_ip(): """Get the ip of the host computer""" s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) try: s.connect(('1.1.1.1', 1)) IP = s.getsockname()[0] except Exception: IP = '127.0.0.1' finally: s.close() return IP
5,328,967
def available_structure_info(): """ Lists available attributes for :func:`abagen.mouse.get_structure_info` """ return _STRUCTURE_ATTRIBUTES.copy()
5,328,968
def centos(function): """Decorator to set the Linux distribution to CentOS 7""" def wrapper(*args, **kwargs): hpccm.config.g_linux_distro = linux_distro.CENTOS hpccm.config.g_linux_version = StrictVersion('7.0') return function(*args, **kwargs) return wrapper
5,328,969
def Normalized2(p): """Return vector p normlized by dividing by its squared length. Return (0.0, 1.0) if the result is undefined.""" (x, y) = p sqrlen = x * x + y * y if sqrlen < 1e-100: return (0.0, 1.0) else: try: d = sqrt(sqrlen) return (x / d, y / d) except: return (0.0, 1.0)
5,328,970
def test_make_struct_type(doctest): """ > (define-values (struct:a make-a a? a-ref a-set!) (make-struct-type 'a #f 2 1 'uninitialized)) > (define an-a (make-a 'x 'y)) > (a-ref an-a 1) 'y > (a-ref an-a 2) 'uninitialized > (define a-first (make-struct-field-accessor a-ref 0)) > (a-first an-a) 'x > (define-values (struct:b make-b b? b-ref b-set!) (make-struct-type 'b struct:a 1 2 'b-uninitialized)) > (define a-b (make-b 'x 'y 'z)) > (a-ref a-b 1) 'y > (a-ref a-b 2) 'uninitialized > (b-ref a-b 0) 'z > (b-ref a-b 1) 'b-uninitialized > (b-ref a-b 2) 'b-uninitialized ;;;;;;;;;;;;;;;; > (define p1 #s(p a b c)) > (define-values (struct:p make-p p? p-ref p-set!) (make-struct-type 'p #f 3 0 #f null 'prefab #f '(0 1 2))) > (p? p1) #t > (p-ref p1 0) 'a > (make-p 'x 'y 'z) '#s(p x y z) """ assert doctest
5,328,971
def findwskeyword(keyword, sol): """Find and return a value for a keyword in the list of the wavelength solution""" i = sol.index(keyword) j = sol[i:].index('\n') return sol[i:i + j].split('=')[1].strip()
5,328,972
def test_dict_format_line() -> None: """Test formatting dictionary details.""" match = MatchError( message="xyz", linenumber=1, details={"hello": "world"}, # type: ignore filename="filename.yml", rule=rule, ) formatter.format(match)
5,328,973
def algorithm(array: numpy.array, start: Tuple[int, int], end: Tuple[int, int], heuristic: Callable = manhattan) -> Union[List, None]: """ Returns a list of all points, for the path between `start` and `end` :param array: a numpy array of Node instances :param start: a tuple (or list) of points corresponding to where to start on array :param end: like start, but for the end :param heuristic: a function that represents the heuristic (default: manhattan heuristic) Example: >>> test = numpy.array( [[0, 0, 0, 0, 0, 1], [0, 1, 1, 1, 0, 1], [0, 1, 0, 0, 0, 1], [0, 1, 0, 1, 1, 1], [0, 0, 0, 0, 1, 0], [1, 1, 1, 0, 0, 0]] ) >>> print(algorithm(test, (0, 0), (5, 5))) """ array = array_to_class(array) actual_start = array[start[0], start[1]] actual_end = array[end[0], end[1]] count = 0 open_set = PriorityQueue() open_set.put((0, count, actual_start)) came_from = {} g_score = {node: inf for row in array for node in row} f_score = {node: inf for row in array for node in row} g_score[actual_start] = 0 f_score[actual_start] = heuristic(start, end) open_set_hash = {actual_start} while not open_set.empty(): current = open_set.get()[2] current_pos = current.pos open_set_hash.remove(current) if current == actual_end: return reconstruct_path(came_from, start, end) for neighbor in get_neighbors(array, current_pos): neighbor_instance = array[neighbor[0], neighbor[1]] temp_g_score = g_score[current] + 1 if temp_g_score < g_score[neighbor_instance]: came_from[neighbor_instance] = current g_score[neighbor_instance] = temp_g_score f_score[neighbor_instance] = temp_g_score + heuristic(neighbor, end) if neighbor_instance not in open_set_hash: count += 1 open_set.put((f_score[neighbor_instance], count, neighbor_instance)) open_set_hash.add(neighbor_instance) return None
5,328,974
def execute_psql(temp_sql_file_path, source_path, download_job): """Executes a single PSQL command within its own Subprocess""" download_sql = Path(temp_sql_file_path).read_text() if download_sql.startswith("\\COPY"): # Trace library parses the SQL, but cannot understand the psql-specific \COPY command. Use standard COPY here. download_sql = download_sql[1:] # Stack 3 context managers: (1) psql code, (2) Download replica query, (3) (same) Postgres query with SubprocessTrace( name=f"job.{JOB_TYPE}.download.psql", service="bulk-download", resource=download_sql, span_type=SpanTypes.SQL, source_path=source_path, ), tracer.trace( name="postgres.query", service="db_downloaddb", resource=download_sql, span_type=SpanTypes.SQL ), tracer.trace( name="postgres.query", service="postgres", resource=download_sql, span_type=SpanTypes.SQL ): try: log_time = time.perf_counter() temp_env = os.environ.copy() if download_job and not download_job.monthly_download: # Since terminating the process isn't guaranteed to end the DB statement, add timeout to client connection temp_env["PGOPTIONS"] = f"--statement-timeout={settings.DOWNLOAD_DB_TIMEOUT_IN_HOURS}h" cat_command = subprocess.Popen(["cat", temp_sql_file_path], stdout=subprocess.PIPE) subprocess.check_output( ["psql", "-q", "-o", source_path, retrieve_db_string(), "-v", "ON_ERROR_STOP=1"], stdin=cat_command.stdout, stderr=subprocess.STDOUT, env=temp_env, ) duration = time.perf_counter() - log_time write_to_log( message=f"Wrote {os.path.basename(source_path)}, took {duration:.4f} seconds", download_job=download_job ) except subprocess.CalledProcessError as e: logger.error(f"PSQL Error: {e.output.decode()}") except Exception as e: if not settings.IS_LOCAL: # Not logging the command as it can contain the database connection string e.cmd = "[redacted psql command]" logger.error(e) sql = subprocess.check_output(["cat", temp_sql_file_path]).decode() logger.error(f"Faulty SQL: {sql}") raise e
5,328,975
def MFString(string_list): """ input a list of unicode strings output: a unicode string formed by encoding, enclosing each item in double quotes, and concatenating 27 Nov 2016: The complete case is as yet unimplemented, to avoid sending bad X3D into the world will instead fail with a Exception if any of the elements of list contain a XML special case in '"&<> """ from . import logger special_characters = u"\'\"&<>" assert( len(special_characters) == 5) # check unicode_type = type(u"") for item in string_list: if not type(item) is unicode_type: logger.warn("Non unicode entry for MFString: %s" % (repr(item),)) for c in special_characters: if c in item: raise ValueError("Unimplemented case: special character in MFString item: %s" % (repr(item),)) return " ".join([u'"%s"' % item for item in string_list])
5,328,976
def re_allocate_memory(ptr: VoidPtr, size: int)-> VoidPtr: """ Internal memory free ptr: The pointer which is pointing the previously allocated memory block by allocate_memory. size: The new size of memory block. """ return _rl.MemRealloc( ptr, _to_int(size) )
5,328,977
def get_task_manager(setup_file, **kwargs): """ Create a task manager of a correct type. Parameters ---------- setup_file : string File name of the setup file. kwargs : dict Additional kwargs. Returns ------- manager : TaskManager Created task manager. """ setup = json.load(open(setup_file)) manager = setup['manager'].lower() if manager == 'slurm': return SlurmTaskManager(setup_file, **kwargs) elif manager == 'sge': return SgeTaskManager(setup_file, **kwargs) elif manager == 'local': return LocalTaskManager(setup_file, **kwargs) else: raise ValueError('Unknown task manager: %s', manager)
5,328,978
def format_time(time): """ Converts datetimes to the format expected in SAML2 XMLs. """ return time.strftime("%Y-%m-%dT%H:%M:%SZ")
5,328,979
def get_delivery_voucher_discount(voucher, total_price, delivery_price): """Calculate discount value for a voucher of delivery type.""" voucher.validate_min_amount_spent(total_price) return voucher.get_discount_amount_for(delivery_price)
5,328,980
def list_docker_repos(ctx): """ List all git repositories that has docker compose file in the root. The included repositories exists in direct sub-directories of the current directory (or from current directory, if none exists in sub directories). :param ctx: (implicit fabric context param). """ current_directory = os.getcwd() git_repo_dirs = _get_repo_paths_containing_dockers(current_directory) if not git_repo_dirs: if _has_docker_compose_file(current_directory): print(_get_git_repo_str( current_directory, Repo(current_directory))) print('(repo is in current directory)') else: print('no GIT repos with dockers found from "{}"'.format( current_directory)) else: _run_action_for_each_repo(git_repo_dirs, lambda repo_dir, repo: print( _get_git_repo_str(repo_dir, repo)))
5,328,981
def has_admin_access(request): # type: (Request) -> bool """ Verifies if the authenticated user doing the request has administrative access. .. note:: Any request view that does not explicitly override ``permission`` by another value than the default :envvar:`MAGPIE_ADMIN_PERMISSION` will already automatically guarantee that the request user is an administrator since HTTP [403] Forbidden would have been otherwise replied. This method is indented for operations that are more permissive and require conditional validation of administrator access. .. seealso:: Definitions in :class:`magpie.models.RootFactory` and :class:`magpie.models.UserFactory` define conditional principals and :term:`ACL` based on the request. """ admin_perm = get_constant("MAGPIE_ADMIN_PERMISSION", request) authz_policy = request.registry.queryUtility(IAuthorizationPolicy) # noqa principals = get_principals(request) result = authz_policy.permits(models.RootFactory(request), principals, admin_perm) return isinstance(result, ACLAllowed)
5,328,982
def safe_remove(path: str) -> bool: """Removes a file or directory This will remove a file if it exists, and will remove a directory if the directory is empty. Args: path: The path to remove Returns: True if `path` was removed or did not exist, False if `path` was a non empty directory. Raises: UtilError: In the case of unexpected system call failures """ try: if S_ISDIR(os.lstat(path).st_mode): os.rmdir(path) else: os.unlink(path) # File removed/unlinked successfully return True except OSError as e: if e.errno == errno.ENOTEMPTY: # Path is non-empty directory return False elif e.errno == errno.ENOENT: # Path does not exist return True raise UtilError("Failed to remove '{}': {}".format(path, e))
5,328,983
def test_can_inspect_last_request_with_ssl(): """HTTPretty.last_request is recorded even when mocking 'https' (SSL)""" HTTPretty.register_uri(HTTPretty.POST, "https://secure.github.com/", body='{"repositories": ["HTTPretty", "lettuce"]}') response = requests.post( 'https://secure.github.com', '{"username": "gabrielfalcao"}', headers={ 'content-type': 'text/json', }, ) expect(HTTPretty.last_request.method).to.equal('POST') expect(HTTPretty.last_request.body).to.equal( b'{"username": "gabrielfalcao"}', ) expect(HTTPretty.last_request.headers['content-type']).to.equal( 'text/json', ) expect(response.json()).to.equal({"repositories": ["HTTPretty", "lettuce"]})
5,328,984
def model_selection(modelname, num_out_classes=2, pretrain_path=None): """ :param modelname, num_out_classes, pretrained, dropout: :return: model, image size """ return TransferModel(modelchoice=modelname, num_out_classes=num_out_classes, pretrain_path=pretrain_path)
5,328,985
def stack1(x, filters, blocks, stride1=2, dilation=1, name=None): """A set of stacked residual blocks. # Arguments x: input tensor. filters: integer, filters of the bottleneck layer in a block. blocks: integer, blocks in the stacked blocks. stride1: default 2, stride of the first layer in the first block. name: string, stack label. # Returns Output tensor for the stacked blocks. """ x = block1(x, filters, stride=stride1, name=name + '_block1') for i in range(2, blocks + 1): x = block1(x, filters, conv_shortcut=False, dilation=dilation, name=name + '_block' + str(i)) return x
5,328,986
def check_length(df_array, path_array, iteration): """ ずらす回数がデータ数より多ければアラートする。 Input ------ df_array : 読み込んだデータ群の配列 path_array : 元データのファイルパス配列 Raises ------ ずらす回数が、データ数よりも多い時。 """ for df, path in zip(df_array, path_array): if len(df) < iteration: print(path) print("上記のファイルのデータ数より、ずらす回数が多くなっています。") exit()
5,328,987
def ci(config: Config) -> None: # pylint: disable=invalid-name """profile to run in CI""" config.option.newfirst = False config.option.failedfirst = False config.option.verbose = 1 config.option.cacheclear = True
5,328,988
def _CreateDynamicDisplayAdSettings(media_service, opener): """Creates settings for dynamic display ad. Args: media_service: a SudsServiceProxy instance for AdWords's MediaService. opener: an OpenerDirector instance. Returns: The dynamic display ad settings. """ image = _CreateImage(media_service, opener, 'https://goo.gl/dEvQeF') logo = { 'type': 'IMAGE', 'mediaId': image['mediaId'], 'xsi_type': 'Image' } dynamic_settings = { 'landscapeLogoImage': logo, 'pricePrefix': 'as low as', 'promoText': 'Free shipping!', 'xsi_type': 'DynamicSettings', } return dynamic_settings
5,328,989
def contacts_per_person_normal_00x30(): """ Real Name: b'contacts per person normal 00x30' Original Eqn: b'10' Units: b'contact/Day' Limits: (None, None) Type: constant b'' """ return 10
5,328,990
def init(ctx, project_type, project_name): """ 生成接口测试项目 使用方法: $ pithy-cli init # 生成接口测试项目 """ generate_project(project_name, project_type)
5,328,991
def create_change_set(StackName=None, TemplateBody=None, TemplateURL=None, UsePreviousTemplate=None, Parameters=None, Capabilities=None, ResourceTypes=None, RoleARN=None, NotificationARNs=None, Tags=None, ChangeSetName=None, ClientToken=None, Description=None, ChangeSetType=None): """ Creates a list of changes that will be applied to a stack so that you can review the changes before executing them. You can create a change set for a stack that doesn't exist or an existing stack. If you create a change set for a stack that doesn't exist, the change set shows all of the resources that AWS CloudFormation will create. If you create a change set for an existing stack, AWS CloudFormation compares the stack's information with the information that you submit in the change set and lists the differences. Use change sets to understand which resources AWS CloudFormation will create or change, and how it will change resources in an existing stack, before you create or update a stack. To create a change set for a stack that doesn't exist, for the ChangeSetType parameter, specify CREATE . To create a change set for an existing stack, specify UPDATE for the ChangeSetType parameter. After the CreateChangeSet call successfully completes, AWS CloudFormation starts creating the change set. To check the status of the change set or to review it, use the DescribeChangeSet action. When you are satisfied with the changes the change set will make, execute the change set by using the ExecuteChangeSet action. AWS CloudFormation doesn't make changes until you execute the change set. See also: AWS API Documentation :example: response = client.create_change_set( StackName='string', TemplateBody='string', TemplateURL='string', UsePreviousTemplate=True|False, Parameters=[ { 'ParameterKey': 'string', 'ParameterValue': 'string', 'UsePreviousValue': True|False }, ], Capabilities=[ 'CAPABILITY_IAM'|'CAPABILITY_NAMED_IAM', ], ResourceTypes=[ 'string', ], RoleARN='string', NotificationARNs=[ 'string', ], Tags=[ { 'Key': 'string', 'Value': 'string' }, ], ChangeSetName='string', ClientToken='string', Description='string', ChangeSetType='CREATE'|'UPDATE' ) :type StackName: string :param StackName: [REQUIRED] The name or the unique ID of the stack for which you are creating a change set. AWS CloudFormation generates the change set by comparing this stack's information with the information that you submit, such as a modified template or different parameter input values. :type TemplateBody: string :param TemplateBody: A structure that contains the body of the revised template, with a minimum length of 1 byte and a maximum length of 51,200 bytes. AWS CloudFormation generates the change set by comparing this template with the template of the stack that you specified. Conditional: You must specify only TemplateBody or TemplateURL . :type TemplateURL: string :param TemplateURL: The location of the file that contains the revised template. The URL must point to a template (max size: 460,800 bytes) that is located in an S3 bucket. AWS CloudFormation generates the change set by comparing this template with the stack that you specified. Conditional: You must specify only TemplateBody or TemplateURL . :type UsePreviousTemplate: boolean :param UsePreviousTemplate: Whether to reuse the template that is associated with the stack to create the change set. :type Parameters: list :param Parameters: A list of Parameter structures that specify input parameters for the change set. For more information, see the Parameter data type. (dict) --The Parameter data type. ParameterKey (string) --The key associated with the parameter. If you don't specify a key and value for a particular parameter, AWS CloudFormation uses the default value that is specified in your template. ParameterValue (string) --The value associated with the parameter. UsePreviousValue (boolean) --During a stack update, use the existing parameter value that the stack is using for a given parameter key. If you specify true , do not specify a parameter value. :type Capabilities: list :param Capabilities: A list of values that you must specify before AWS CloudFormation can update certain stacks. Some stack templates might include resources that can affect permissions in your AWS account, for example, by creating new AWS Identity and Access Management (IAM) users. For those stacks, you must explicitly acknowledge their capabilities by specifying this parameter. The only valid values are CAPABILITY_IAM and CAPABILITY_NAMED_IAM . The following resources require you to specify this parameter: AWS::IAM::AccessKey , AWS::IAM::Group , AWS::IAM::InstanceProfile , AWS::IAM::Policy , AWS::IAM::Role , AWS::IAM::User , and AWS::IAM::UserToGroupAddition . If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary. If you have IAM resources, you can specify either capability. If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM . If you don't specify this parameter, this action returns an InsufficientCapabilities error. For more information, see Acknowledging IAM Resources in AWS CloudFormation Templates . (string) -- :type ResourceTypes: list :param ResourceTypes: The template resource types that you have permissions to work with if you execute this change set, such as AWS::EC2::Instance , AWS::EC2::* , or Custom::MyCustomInstance . If the list of resource types doesn't include a resource type that you're updating, the stack update fails. By default, AWS CloudFormation grants permissions to all resource types. AWS Identity and Access Management (IAM) uses this parameter for condition keys in IAM policies for AWS CloudFormation. For more information, see Controlling Access with AWS Identity and Access Management in the AWS CloudFormation User Guide. (string) -- :type RoleARN: string :param RoleARN: The Amazon Resource Name (ARN) of an AWS Identity and Access Management (IAM) role that AWS CloudFormation assumes when executing the change set. AWS CloudFormation uses the role's credentials to make calls on your behalf. AWS CloudFormation uses this role for all future operations on the stack. As long as users have permission to operate on the stack, AWS CloudFormation uses this role even if the users don't have permission to pass it. Ensure that the role grants least privilege. If you don't specify a value, AWS CloudFormation uses the role that was previously associated with the stack. If no role is available, AWS CloudFormation uses a temporary session that is generated from your user credentials. :type NotificationARNs: list :param NotificationARNs: The Amazon Resource Names (ARNs) of Amazon Simple Notification Service (Amazon SNS) topics that AWS CloudFormation associates with the stack. To remove all associated notification topics, specify an empty list. (string) -- :type Tags: list :param Tags: Key-value pairs to associate with this stack. AWS CloudFormation also propagates these tags to resources in the stack. You can specify a maximum of 10 tags. (dict) --The Tag type enables you to specify a key-value pair that can be used to store information about an AWS CloudFormation stack. Key (string) -- Required . A string used to identify this tag. You can specify a maximum of 128 characters for a tag key. Tags owned by Amazon Web Services (AWS) have the reserved prefix: aws: . Value (string) -- Required . A string containing the value for this tag. You can specify a maximum of 256 characters for a tag value. :type ChangeSetName: string :param ChangeSetName: [REQUIRED] The name of the change set. The name must be unique among all change sets that are associated with the specified stack. A change set name can contain only alphanumeric, case sensitive characters and hyphens. It must start with an alphabetic character and cannot exceed 128 characters. :type ClientToken: string :param ClientToken: A unique identifier for this CreateChangeSet request. Specify this token if you plan to retry requests so that AWS CloudFormation knows that you're not attempting to create another change set with the same name. You might retry CreateChangeSet requests to ensure that AWS CloudFormation successfully received them. :type Description: string :param Description: A description to help you identify this change set. :type ChangeSetType: string :param ChangeSetType: The type of change set operation. To create a change set for a new stack, specify CREATE . To create a change set for an existing stack, specify UPDATE . If you create a change set for a new stack, AWS Cloudformation creates a stack with a unique stack ID, but no template or resources. The stack will be in the ` REVIEW_IN_PROGRESS http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-describing-stacks.html#d0e11995`_ state until you execute the change set. By default, AWS CloudFormation specifies UPDATE . You can't use the UPDATE type to create a change set for a new stack or the CREATE type to create a change set for an existing stack. :rtype: dict :return: { 'Id': 'string', 'StackId': 'string' } """ pass
5,328,992
def _get_scripts_shell(script_file): # type: (pathlib.Path) -> str """ Returns the shell used in the passed script file. If no shell is recognized exception is raised. Depended on presence of shebang. Supported shells: Bash, Fish, Zsh :param script_file: :return: :raises exceptions.UnknownShell: If no shell is recognized """ with script_file.open('r') as f: shebang = f.readline().lower() for shell in SUPPORTED_SHELLS: if shell in shebang: return shell raise exceptions.UnknownShell('It seems that the currently used post-commit ' 'hook uses shebang that is not known to Gitrack: ' + shebang)
5,328,993
def _process_worker(call_queue, result_queue): """Evaluates calls from call_queue and places the results in result_queue. This worker is run in a separate process. Args: call_queue: A multiprocessing.Queue of _CallItems that will be read and evaluated by the worker. result_queue: A multiprocessing.Queue of _ResultItems that will written to by the worker. shutdown: A multiprocessing.Event that will be set as a signal to the worker that it should exit when call_queue is empty. """ while True: call_item = call_queue.get(block=True) if call_item is None: # Wake up queue management thread result_queue.put(os.getpid()) return try: r = call_item.fn(*call_item.args, **call_item.kwargs) except BaseException as e: exc = _ExceptionWithTraceback(e, e.__traceback__) result_queue.put(_ResultItem(call_item.work_id, exception=exc)) logger.exception(e) # 主要是直接显示错误。 else: result_queue.put(_ResultItem(call_item.work_id, result=r))
5,328,994
def is_cmd_tool(name): """ Check whether `name` is on PATH and marked as executable. From: https://stackoverflow.com/a/34177358 """ from shutil import which return which(name) is not None
5,328,995
def load_model(model: Model, language=()): """Load geo model and return as dict.""" log.info("Reading geomodel: %s", model) with open(model.path, "rb") as infile: m = pickle.load(infile) result = defaultdict(set) for _geonameid, l in list(m.items()): result[l["name"].lower()].add((l["name"], l["latitude"], l["longitude"], l["country"], l["population"])) for lang in l["alternative_names"]: if lang in language or not language: for altname in l["alternative_names"][lang]: result[altname.lower()].add( (l["name"], l["latitude"], l["longitude"], l["country"], l["population"])) log.info("Read %d geographical names", len(result)) return result
5,328,996
def saml_metadata_generator(sp, validated=True, privacypolicy=False, tree=None, disable_entity_extensions=False): """ Generates metadata for single SP. sp: ServiceProvider object validated: if false, using unvalidated metadata privacypolicy: fill empty privacypolicy URLs with default value tree: use as root if given, generate new root if not return tree """ entity, history, validation_date = get_entity(sp, validated) if not entity: return tree if tree is not None: entity_descriptor = etree.SubElement(tree, "EntityDescriptor", entityID=entity.entity_id) else: entity_descriptor = etree.Element("EntityDescriptor", entityID=entity.entity_id, nsmap={"ds": 'http://www.w3.org/2000/09/xmldsig#', "mdattr": 'urn:oasis:names:tc:SAML:metadata:attribute', "mdui": 'urn:oasis:names:tc:SAML:metadata:ui', "saml": 'urn:oasis:names:tc:SAML:2.0:assertion', "xmlns": 'urn:oasis:names:tc:SAML:2.0:metadata', "xsd": 'http://www.w3.org/2001/XMLSchema', "xsi": 'http://www.w3.org/2001/XMLSchema-instance', }) if not disable_entity_extensions: if history: metadata_entity_extensions(entity_descriptor, history) else: metadata_entity_extensions(entity_descriptor, sp) metadata_spssodescriptor(entity_descriptor, sp, history, validation_date, privacypolicy) metadata_contact(entity_descriptor, sp, validation_date) if history: metadata_organization(entity_descriptor, history) else: metadata_organization(entity_descriptor, sp) if tree is not None: return tree else: return entity_descriptor
5,328,997
def import_from_afd(import_list, vlb_path, working_path, conn): """Imports an Armada Fleets Designer list into a Fleet object""" f = Fleet("Food", conn=conn) start = False obj_category = "assault" # shipnext = False for line in import_list.strip().split("\n"): try: last_line = line.strip() card_name = line.strip().split(" x ", 1)[-1] logging.info(card_name) if card_name.startswith("==="): start = True elif start and len(card_name) > 0: if card_name[0] == "·": upgrade, cost = card_name.split("(") upgrade = scrub_piecename(upgrade) cost = cost.split(")")[0] if upgrade in nomenclature_translation: translated = nomenclature_translation[upgrade] logging.info( "[-] Translated {} to {} - AFD.".format(upgrade, translated) ) upgrade = translated if (upgrade, cost) in ambiguous_names: upgrade_new = ambiguous_names[(upgrade, cost)][0] logging.info( "Ambiguous name {} ({}) translated to {}.".format( upgrade, cost, upgrade_new ) ) upgrade = upgrade_new _ = s.add_upgrade(upgrade) elif "(" not in card_name: logging.info("Hit the conditional for {}.".format(card_name)) card_name = scrub_piecename(str(card_name)) f.add_objective(obj_category, card_name) # TODO: retool the objs to not care about categories... :/ if obj_category == "assault": obj_category = "defense" else: obj_category = "navigation" else: card_name, cost = card_name.split(" (", 1) cost = cost.split(" x ")[-1].split(")")[0] issquadron = False isship = False card_name = scrub_piecename(card_name) try: if card_name in nomenclature_translation: t = nomenclature_translation[card_name] logging.info( "[-] Translated {} to {} - AFD.".format(card_name, t) ) card_name = t if (card_name, cost) in ambiguous_names: card_name_new = ambiguous_names[(card_name, cost)][0] logging.info( "Ambiguous name {} ({}) translated to {}.".format( card_name, cost, card_name_new ) ) card_name = card_name_new logging.info( "Searching for AFD piece {} in {}".format( scrub_piecename(card_name), str(conn) ) ) with sqlite3.connect(conn) as connection: issquadron = connection.execute( """SELECT * FROM pieces WHERE piecetype='squadroncard' AND piecename LIKE ?;""", ("%" + scrub_piecename(card_name) + "%",), ).fetchall() except ValueError as err: logging.exception(err) try: logging.info( "Searching for AFD piece {} in {}".format( card_name, str(conn) ) ) with sqlite3.connect(conn) as connection: isship = connection.execute( """SELECT * FROM pieces WHERE piecetype='shipcard' AND piecename LIKE ?;""", ("%" + card_name,), ).fetchall() except ValueError as err: logging.exception(err) if bool(issquadron): _ = f.add_squadron(card_name) elif bool(isship): s = f.add_ship(card_name) else: logging.info( "{}{} IS FUCKED UP, YO{}".format( "=" * 40, card_name, "=" * 40 ) ) except Exception as err: logging.exception(err) return (False, last_line) return (True, f)
5,328,998
def CWPProfileToVersionTuple(url): """Convert a CWP profile url to a version tuple Args: url: for example, gs://chromeos-prebuilt/afdo-job/cwp/chrome/ R65-3325.65-1519323840.afdo.xz Returns: A tuple of (milestone, major, minor, timestamp) """ fn_mat = (CWP_CHROME_PROFILE_NAME_PATTERN % tuple(r'([0-9]+)' for _ in xrange(0, 4))) fn_mat.replace('.', '\\.') return map(int, re.match(fn_mat, os.path.basename(url)).groups())
5,328,999