content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
import argparse import logging import sys import os def open_output_file(options: argparse.Namespace): """Open Output File.""" if options.output is None: logging.debug("Piping output to stdout.") def close_output(): pass return sys.stdout, close_output filepath = os.path.abspath(options.output) logging.debug("Piping output to {}.".format(filepath)) ofile = open(filepath, "w") def close_output(): ofile.close() return ofile, close_output
1f4317d5b2628318ca7ee6639782f049d2eef7db
30,700
def filter_labels(a, min_size, max_size=None): """ Remove (set to 0) labeled connected components that are too small or too large. Note: Operates in-place. """ if min_size == 0 and (max_size is None or max_size > np.prod(a.shape)): # shortcut for efficiency return a try: component_sizes = np.bincount( a.ravel() ) except TypeError: # On 32-bit systems, must explicitly convert from uint32 to int # (This fix is just for VM testing.) component_sizes = np.bincount( np.asarray(a.ravel(), dtype=int) ) bad_sizes = component_sizes < min_size if max_size is not None: np.logical_or( bad_sizes, component_sizes > max_size, out=bad_sizes ) bad_locations = bad_sizes[a] a[bad_locations] = 0 return a
5754959bd2f404fa0189aee406c08745f236c294
30,701
def mask(bigtiff,profile,out_mask): """ mask a 1 or 3 band image, band by band for memory saving """ if profile['count']==4: bigtiff1 = bigtiff[0,:,:] bigtiff1[out_mask==1] = profile['nodata'] bigtiff[0,:,:] = bigtiff1 del bigtiff1 bigtiff2 = bigtiff[1,:,:] bigtiff2[out_mask==1] = profile['nodata'] bigtiff[1,:,:] = bigtiff2 del bigtiff2 bigtiff3 = bigtiff[2,:,:] bigtiff3[out_mask==1] = profile['nodata'] bigtiff[2,:,:] = bigtiff3 del bigtiff3 bigtiff4 = bigtiff[3,:,:] bigtiff4[out_mask==1] = profile['nodata'] bigtiff[3,:,:] = bigtiff4 del bigtiff4 else: bigtiff1 = bigtiff[0,:,:] bigtiff1[out_mask==1] = profile['nodata'] bigtiff[0,:,:] = bigtiff1 del bigtiff1 return bigtiff
0e31612da8d80f5fb4d8f35a0664708294e98312
30,702
def quadratic_sum(n: int) -> int: """calculate the quadratic num from 1 ~ n""" sum = 0 for n in range(1, n + 1): sum += n ** 2 return sum
e47a3ee49888c85cc06c72c428d983885ed7009f
30,703
def dag_rules(rules, required_keys): """ Serializing dag parameters from variable. Checking for required fields using required_keys :return dict dag_rules. An example of how they should look, in ..._settings.json, "airflow_settings" """ is_exists_rule_keys = all(key in rules.keys() for key in required_keys) if not is_exists_rule_keys: raise SettingFieldMissingError( f"Some of the required fields {','.join(required_keys)} are missing in Variable, " f"get: {','.join(rules.keys())}") try: rules["start_date"] = parse(rules["start_date"]) except Exception as err: raise SettingFieldTypeError(f"Error in start date parser: {err}") return rules
319096f113ff82c783266d83d8e194badeb7ec7d
30,704
import socket def ip_address(value): """Get IPAddress""" return write_tv(ASN1_IPADDRESS, socket.inet_aton(value))
beb56177436f7a67abbd2627ebf681eef3ed6352
30,705
def _GetUpdatedMilestoneDict(master_bot_pairs, tests): """Gets the milestone_dict with the newest rev. Checks to see which milestone_dict to use (Clank/Chromium), and updates the 'None' to be the newest revision for one of the specified tests. """ masters = set([m.split('/')[0] for m in master_bot_pairs]) if 'ClankInternal' in masters: milestone_dict = CLANK_MILESTONES.copy() else: milestone_dict = CHROMIUM_MILESTONES.copy() # If we might access the end of the milestone_dict, update it to # be the newest revision instead of 'None'. _UpdateNewestRevInMilestoneDict(master_bot_pairs, tests, milestone_dict) return milestone_dict
d3f8f78bb6aed29d2a7a932b288a84adf22e323b
30,706
def _hrv_nonlinear_poincare_hra(rri, out): """Heart Rate Asymmetry Indices. - Asymmetry of Poincaré plot (or termed as heart rate asymmetry, HRA) - Yan (2017) - Asymmetric properties of long-term and total heart rate variability - Piskorski (2011) """ N = len(rri) - 1 x = rri[:-1] # rri_n, x-axis y = rri[1:] # rri_plus, y-axis diff = y - x decelerate_indices = np.where(diff > 0)[0] # set of points above IL where y > x accelerate_indices = np.where(diff < 0)[0] # set of points below IL where y < x nochange_indices = np.where(diff == 0)[0] # Distances to centroid line l2 centroid_x = np.mean(x) centroid_y = np.mean(y) dist_l2_all = abs((x - centroid_x) + (y - centroid_y)) / np.sqrt(2) # Distances to LI dist_all = abs(y - x) / np.sqrt(2) # Calculate the angles theta_all = abs(np.arctan(1) - np.arctan(y / x)) # phase angle LI - phase angle of i-th point # Calculate the radius r = np.sqrt(x ** 2 + y ** 2) # Sector areas S_all = 1 / 2 * theta_all * r ** 2 # Guzik's Index (GI) den_GI = np.sum(dist_all) num_GI = np.sum(dist_all[decelerate_indices]) out["GI"] = (num_GI / den_GI) * 100 # Slope Index (SI) den_SI = np.sum(theta_all) num_SI = np.sum(theta_all[decelerate_indices]) out["SI"] = (num_SI / den_SI) * 100 # Area Index (AI) den_AI = np.sum(S_all) num_AI = np.sum(S_all[decelerate_indices]) out["AI"] = (num_AI / den_AI) * 100 # Porta's Index (PI) m = N - len(nochange_indices) # all points except those on LI b = len(accelerate_indices) # number of points below LI out["PI"] = (b / m) * 100 # Short-term asymmetry (SD1) sd1d = np.sqrt(np.sum(dist_all[decelerate_indices] ** 2) / (N - 1)) sd1a = np.sqrt(np.sum(dist_all[accelerate_indices] ** 2) / (N - 1)) sd1I = np.sqrt(sd1d ** 2 + sd1a ** 2) out["C1d"] = (sd1d / sd1I) ** 2 out["C1a"] = (sd1a / sd1I) ** 2 out["SD1d"] = sd1d # SD1 deceleration out["SD1a"] = sd1a # SD1 acceleration # out["SD1I"] = sd1I # SD1 based on LI, whereas SD1 is based on centroid line l1 # Long-term asymmetry (SD2) longterm_dec = np.sum(dist_l2_all[decelerate_indices] ** 2) / (N - 1) longterm_acc = np.sum(dist_l2_all[accelerate_indices] ** 2) / (N - 1) longterm_nodiff = np.sum(dist_l2_all[nochange_indices] ** 2) / (N - 1) sd2d = np.sqrt(longterm_dec + 0.5 * longterm_nodiff) sd2a = np.sqrt(longterm_acc + 0.5 * longterm_nodiff) sd2I = np.sqrt(sd2d ** 2 + sd2a ** 2) out["C2d"] = (sd2d / sd2I) ** 2 out["C2a"] = (sd2a / sd2I) ** 2 out["SD2d"] = sd2d # SD2 deceleration out["SD2a"] = sd2a # SD2 acceleration # out["SD2I"] = sd2I # identical with SD2 # Total asymmerty (SDNN) sdnnd = np.sqrt(0.5 * (sd1d ** 2 + sd2d ** 2)) # SDNN deceleration sdnna = np.sqrt(0.5 * (sd1a ** 2 + sd2a ** 2)) # SDNN acceleration sdnn = np.sqrt(sdnnd ** 2 + sdnna ** 2) # should be similar to sdnn in hrv_time out["Cd"] = (sdnnd / sdnn) ** 2 out["Ca"] = (sdnna / sdnn) ** 2 out["SDNNd"] = sdnnd out["SDNNa"] = sdnna return out
fff7f5c071b64fb44f7e8155a5ddf350a4586517
30,707
def headers(sheet): """Returns the values of the sheet's header row (i.e., the first row).""" return [ stringify_value(h) for h in truncate_row(next(sheet.iter_rows(values_only=True))) ]
26160064a3f0509d343140e3018e96fb1f7b91b4
30,708
def internal_server_error(message='Internal server error'): """500 Internal server error response""" errors = { '_internal': message } return error(500, errors)
d4ed017f6720ae3e62e5d6e66eb4d2cabd2a0775
30,709
def get_model(args, test=False): """ Create computation graph and variables. """ nn_in_size = 513 image = nn.Variable([args.batch_size, 3, nn_in_size, nn_in_size]) label = nn.Variable([args.batch_size, 1, nn_in_size, nn_in_size]) mask = nn.Variable([args.batch_size, 1, nn_in_size, nn_in_size]) pred = model.deeplabv3plus_model( image, args.output_stride, args.num_class, test=test, fix_params=False) # Initializing moving variance by 1 params = nn.get_parameters() for key, val in params.items(): if 'bn/var' in key: val.d.fill(1) loss = F.sum(F.softmax_cross_entropy( pred, label, axis=1) * mask) / F.sum(mask) Model = namedtuple('Model', ['image', 'label', 'mask', 'pred', 'loss']) return Model(image, label, mask, pred, loss)
9199034f0776e6d291185d130aadd78d7bf11ea0
30,710
import random def post_config(opt): """post_config""" # init fixed parameters opt.noise_amp_init = opt.noise_amp opt.nfc_init = opt.nfc opt.min_nfc_init = opt.min_nfc opt.scale_factor_init = opt.scale_factor opt.out_ = 'TrainedModels/%s/scale_factor=%f/' % (opt.input_name[:-4], opt.scale_factor) if opt.manualSeed is None: opt.manualSeed = random.randint(0, 10000) print("Random Seed: ", opt.manualSeed) random.seed(opt.manualSeed) set_seed(opt.manualSeed) return opt
b7b7127e560b24a0d8242dfb9912852e2cd33c7d
30,711
import collections def select_device_with_aspects(required_aspects, excluded_aspects=[]): """Selects the root :class:`dpctl.SyclDevice` that has the highest default selector score among devices that have all aspects in the `required_aspects` list, and do not have any aspects in `excluded_aspects` list. The list of SYCL device aspects can be found in SYCL 2020 specs: https://www.khronos.org/registry/SYCL/specs/sycl-2020/html/sycl-2020.html#sec:device-aspects :Example: .. code-block:: python import dpctl # select a GPU that supports double precision dpctl.select_device_with_aspects(['fp64', 'gpu']) # select non-custom device with USM shared allocations dpctl.select_device_with_aspects( ['usm_shared_allocations'], excluded_aspects=['custom']) """ if isinstance(required_aspects, str): required_aspects = [required_aspects] if isinstance(excluded_aspects, str): excluded_aspects = [excluded_aspects] seq = collections.abc.Sequence input_types_ok = isinstance(required_aspects, seq) and isinstance( excluded_aspects, seq ) if not input_types_ok: raise TypeError( "Aspects are expected to be Python sequences, " "e.g. lists, of strings" ) for asp in chain(required_aspects, excluded_aspects): if type(asp) != str: raise TypeError("The list objects must be of a string type") if not hasattr(SyclDevice, "has_aspect_" + asp): raise AttributeError(f"The {asp} aspect is not supported in dpctl") devs = get_devices() max_score = 0 selected_dev = None for dev in devs: aspect_status = all( ( getattr(dev, "has_aspect_" + asp) is True for asp in required_aspects ) ) aspect_status = aspect_status and not ( any( ( getattr(dev, "has_aspect_" + asp) is True for asp in excluded_aspects ) ) ) if aspect_status and dev.default_selector_score > max_score: max_score = dev.default_selector_score selected_dev = dev if selected_dev is None: raise SyclDeviceCreationError( f"Requested device is unavailable: " f"required_aspects={required_aspects}, " f"excluded_aspects={excluded_aspects}" ) return selected_dev
3f34baff8aab39ef88d20c610b203723712823af
30,712
import os def get_block_path(tag, block_number): """ Get the absolute path of a specific block identified by a tag and its unique identifier Args: tag: The tag used to identify the type of data stored in the block block_number: The unique identifier of the block Returns: The absolute path to the block """ block_folder_path = get_block_folder_path(tag) if not os.path.exists(block_folder_path): os.makedirs(block_folder_path) return os.path.join(block_folder_path, str(block_number) + BLOCK_EXT)
f151a7058efcbb04f30160f0028a6ec0b315f5fe
30,713
def plus(a:int,b:int)->int: """ plus operation :param a: first number :param b: second number :return: a+b """ return a+b
f54224b8f8c0b599b7cd799aba0d291f11c4c16f
30,714
def linear_upsample_3d(inputs, strides=(2, 2, 2), use_bias=False, trainable=False, name='linear_upsample_3d'): """Linear upsampling layer in 3D using strided transpose convolutions. The upsampling kernel size will be automatically computed to avoid information loss. Args: inputs (tf.Tensor): Input tensor to be upsampled strides (tuple, optional): The strides determine the upsampling factor in each dimension. use_bias (bool, optional): Flag to train an additional bias. trainable (bool, optional): Flag to set the variables to be trainable or not. name (str, optional): Name of the layer. Returns: tf.Tensor: Upsampled Tensor """ static_inp_shape = tuple(inputs.get_shape().as_list()) dyn_inp_shape = tf.shape(inputs) rank = len(static_inp_shape) num_filters = static_inp_shape[-1] strides_5d = [1, ] + list(strides) + [1, ] kernel_size = [2 * s if s > 1 else 1 for s in strides] kernel = get_linear_upsampling_kernel( kernel_spatial_shape=kernel_size, out_filters=num_filters, in_filters=num_filters, trainable=trainable) dyn_out_shape = [dyn_inp_shape[i] * strides_5d[i] for i in range(rank)] dyn_out_shape[-1] = num_filters static_out_shape = [static_inp_shape[i] * strides_5d[i] if isinstance(static_inp_shape[i], int) else None for i in range(rank)] static_out_shape[-1] = num_filters tf.logging.info('Upsampling from {} to {}'.format( static_inp_shape, static_out_shape)) upsampled = tf.nn.conv3d_transpose( value=inputs, filter=kernel, output_shape=dyn_out_shape, strides=strides_5d, padding='SAME', name='upsample') upsampled.set_shape(static_out_shape) return upsampled
42613b0aa245f53cc381a1c0b29ee5de67441c5c
30,715
def sample_ingredient(user: User, name: str = "Cinnamon") -> Ingredient: """Create a sample ingredient""" return Ingredient.objects.create(user=user, name=name)
c3ca73ece2c015608f54dd372749364c6d63b595
30,716
def naive_sample_frequency_spectrum(ts, sample_sets, windows=None, mode="site"): """ Naive definition of the generalised site frequency spectrum. """ method_map = { # "site": naive_site_sample_frequency_spectrum, "branch": naive_branch_sample_frequency_spectrum} return method_map[mode](ts, sample_sets, windows=windows)
0aeb561437a521757fe1c884a063592b8ca2eb2e
30,717
from matplotlib.gridspec import GridSpec def show_spectral_sources( sources, observation=None, norm=None, channel_map=None, show_observed=False, show_rendered=False, show_spectra=True, figsize=None, ): """Plot each source individually. The functions provides an more detailed inspection of every source in the list. Parameters ---------- sources: list of source models observation: `~scarlet.Observation` norm: norm to compress image intensity to the range [0,255] channel_map: array_like Linear mapping with dimensions (3, channels) show_observed: bool Whether the observation is shown show_rendered: bool Whether the model, rendered to match the observation, is shown show_spectra: bool Whether source spectrum is shown. For multi-component sources, spectra are shown separately. figsize: matplotlib figsize argument Returns ------- matplotlib figure """ if show_observed or show_rendered: assert ( observation is not None ), "Provide matched observation to show observed frame" panels = 1 + sum((show_observed, show_rendered)) nrows, ncols = len(list(sources)), panels if show_spectra is True: nrows = nrows*2 if figsize is None: figsize = (3 * panels, 3 * nrows) fig = plt.figure(figsize=figsize) gs = GridSpec(nrows=nrows, ncols=ncols) ax = [None]*(nrows*ncols) for k, src in enumerate(sources): if show_spectra: k = k*2 if hasattr(src, "center"): center = np.array(src.center) # center in src bbox coordinates if src.bbox is not None: center_ = center - np.array(src.bbox.origin[1:]) else: center_ = center # center in observed coordinates center__ = center - np.array(src.frame.origin[1:]) else: center = None panel = 0 frame_ = src.frame src.set_frame(src.bbox) if isinstance(src, ComponentTree): model = 0 spectra = [] for component in src: model_ = component.get_model() spectra.append(model_.sum(axis=(1, 2))) model += model_ else: model = src.get_model() spectra = [model.sum(axis=(1, 2))] src.set_frame(frame_) ax[k+panel] = fig.add_subplot(gs[k, panel]) ax[k+panel].imshow(img_to_rgb(model, norm=norm, channel_map=channel_map, mask=model.sum(axis=0)==0)) ax[k+panel].set_title("Model Source {}".format(k)) if center is not None: ax[k+panel].plot(*center_[::-1], "wx", mew=1, ms=10) if show_rendered: panel += 1 ax[k+panel] = fig.add_subplot(gs[k, panel]) model = src.get_model() model = observation.render(model) ax[k+panel].imshow(img_to_rgb(model, norm=norm, channel_map=channel_map)) ax[k+panel].set_title("Model Source {} Rendered".format(k)) if src.bbox is not None: ax[k+panel].set_ylim(src.bbox.start[-2], src.bbox.stop[-2]) ax[k+panel].set_xlim(src.bbox.start[-1], src.bbox.stop[-1]) if center is not None: ax[k+panel].plot(*center__[::-1], "wx", mew=1, ms=10) if show_observed: panel += 1 ax[k+panel] = fig.add_subplot(gs[k, panel]) ax[k+panel].imshow( img_to_rgb(observation.images, norm=norm, channel_map=channel_map) ) ax[k+panel].set_title("Observation".format(k)) if src.bbox is not None: ax[k+panel].set_ylim(src.bbox.start[-2], src.bbox.stop[-2]) ax[k+panel].set_xlim(src.bbox.start[-1], src.bbox.stop[-1]) if center is not None: ax[k+panel].plot(*center__[::-1], "wx", mew=1, ms=10) if show_spectra: k += 1 ax[k+panel] = fig.add_subplot(gs[k, :]) for spectrum in spectra: ax[k+panel].plot(spectrum) ax[k+panel].set_xticks(range(len(spectrum))) if hasattr(src.frame, "channels") and src.frame.channels is not None: ax[k+panel].set_xticklabels(src.frame.channels) ax[k+panel].set_title("SED") ax[k+panel].set_xlabel("Channel") ax[k+panel].set_ylabel("Intensity") fig.tight_layout() return fig
5c0e6af0189848c9264cb3b19ba09b6018ee79aa
30,718
import six import requests import json def make_server_request(request, payload, endpoint, auth=None, method='post'): """ makes a json request to channelstream server endpoint signing the request and sending the payload :param request: :param payload: :param endpoint: :param auth: :return: """ server_port = request.registry.settings["port"] signer = TimestampSigner(request.registry.settings["secret"]) sig_for_server = signer.sign("channelstream") if not six.PY2: sig_for_server = sig_for_server.decode("utf8") secret_headers = { "x-channelstream-secret": sig_for_server, "Content-Type": "application/json", } url = "http://127.0.0.1:%s%s" % (server_port, endpoint) response = getattr(requests, method)( url, data=json.dumps(payload), headers=secret_headers, auth=auth ) if response.status_code >= 400: log.error(response.text) response.raise_for_status() return response
99c1bac6c3f010692f6e4b94b93ea77b4b655fde
30,719
import numpy as np def nan_helpfcn(myarray): """ Helper function to return the locations of Nan values as a boolean array, plus a function to return the index of the array. Code inspired by: http://stackoverflow.com/questions/6518811/interpolate-nan-values-in-a-numpy-array Input: - myarray, 1d numpy array with possible NaNs, e.g. np.array([1,2,NaN,4,NaN,6]) Output: - nans, logical indices of NaNs, e.g. for the example above [False,False,True,False,True,False] - indf, this gives us the indices of the matrix - shifted. this is a lambda function. e.g. indes(nans) where nans was [False,False,True,False,True,False] gives [2,4] This is functionally equivalent to np.array(range(len(myarray))) Example: >>> myarray = np.array=[1,2,np.NaN,4,np.NaN,6]) >>> nanlocs,indf=nan_helpfcn(myarray) >>>nanlocs [False,False,True,False,True,False] >>> indf[nanlocs] [2,4] >>> indf[~nanlocs] [0,1,3,5] """ return np.isnan(myarray), lambda z: z.nonzero()[0]
b5770e6bdfda85bc71fd954aacc4c31dbbd47f13
30,720
def _get_norm_layer(normalization_type='no_norm', name=None): """Get normlization layer. Args: normalization_type: String. The type of normalization_type, only 'no_norm' and 'layer_norm' are supported. name: Name for the norm layer. Returns: layer norm class. """ if normalization_type == 'no_norm': layer = NoNorm(name=name) elif normalization_type == 'layer_norm': layer = tf.keras.layers.LayerNormalization( name=name, axis=-1, epsilon=1e-12, dtype=tf.float32) else: raise NotImplementedError('Only "no_norm" and "layer_norm" and supported.') return layer
8aa307db8c1ea93905cc5adddcec4a04f8718195
30,721
def run_single_camera(cam): """ This function acts as the body of the example; please see NodeMapInfo example for more in-depth comments on setting up cameras. :param cam: Camera to setup and run on. :type cam: CameraPtr :return: True if successful, False otherwise. :rtype: bool """ try: result = True # Retrieve TL device nodemap and print device information nodemap_tldevice = cam.GetTLDeviceNodeMap() result &= print_device_info(nodemap_tldevice) # Initialize camera cam.Init() # Retrieve GenICam nodemap nodemap = cam.GetNodeMap() # Configure callbacks err, callback_height, callback_gain = configure_callbacks(nodemap) if not err: return err # Change height and gain to trigger callbacks result &= change_height_and_gain(nodemap) # Reset callbacks result &= reset_callbacks(nodemap, callback_height, callback_gain) # Deinitialize camera cam.DeInit() except PySpin.SpinnakerException as ex: print 'Error: %s' % ex return False return result
b7a3df5fb0e44ac4ce293d6ff89d4955d662f482
30,722
import six def all_strs_text(obj): """ PyYAML refuses to load strings as 'unicode' on Python 2 - recurse all over obj and convert every string. """ if isinstance(obj, six.binary_type): return obj.decode('utf-8') elif isinstance(obj, list): return [all_strs_text(x) for x in obj] elif isinstance(obj, tuple): return tuple(all_strs_text(x) for x in obj) elif isinstance(obj, dict): return {six.text_type(k): all_strs_text(v) for k, v in six.iteritems(obj)} else: return obj
20b27cf809ed7fbf12b30a357d6aecfeeed88461
30,723
async def login_user(credentials: OAuth2PasswordRequestForm = Depends()): """Endpoint for logging user in.""" user = services.authenticate_user(email=credentials.username, password=credentials.password) if not user: raise HTTPException(status_code=401, detail="Invalid Credentials") return services.create_token(user)
d8e304b7cf718afce7a61c74ab38769d5695e7f5
30,724
from template import template def javascript(): """ Return javascript library for the Sage Notebook. This is done by reading the template ``notebook_lib.js`` where all of the javascript code is contained and replacing a few of the values specific to the running session. Before the code is returned (as a string), it is run through a JavascriptCompressor to minimize the amount of data needed to be sent to the browser. .. note:: This the output of this function is cached so that it only needs to be generated once. EXAMPLES:: sage: from sage.server.notebook.js import javascript sage: s = javascript() sage: s[:30] '/* JavaScriptCompressor 0.1 [w' """ global _cache_javascript if _cache_javascript is not None: return _cache_javascript s = template('notebook_lib.js', SAGE_URL=SAGE_URL, KEY_CODES=keyhandler.all_tests()) # TODO: use minify here, which is more standard (and usually safer # and with gzip compression, smaller); But first inquire about the # propriety of the "This software shall be used for Good, not # Evil" clause in the license. Does that prevent us from # distributing it (i.e., it adds an extra condition to the # software)? See http://www.crockford.com/javascript/jsmin.py.txt s = JavaScriptCompressor().getPacked(s) _cache_javascript = s return s
70e28b39f5f4a249c8273dd92f493cc172c1d0a5
30,725
import os def load_coedit_data(resource_dir): """Load preprocessed data about edit overlap between users.""" app.logger.info("Loading co-edit data") expected_header = ["user_text", "user_neighbor", "num_pages_overlapped"] with open(os.path.join(resource_dir, "coedit_counts.tsv"), "r") as fin: assert next(fin).strip().split("\t") == expected_header for line_str in fin: line = line_str.strip().split("\t") user = line[0] neighbor = line[1] overlap_count = int(line[2]) if user not in COEDIT_DATA: COEDIT_DATA[user] = [] COEDIT_DATA[user].append((neighbor, overlap_count)) return COEDIT_DATA
146a908f42a55ba14e2a2b8cd4bb61b5ef2b17c0
30,726
def fahrenheit2celsius(f: float) -> float: """Utility function to convert from Fahrenheit to Celsius.""" return (f - 32) * 5/9
5161b29998553ad6ff497e698058f330433d90b3
30,727
def loadFireTurnMap(): """load in hard-coded 11x11 fire turn map, then flip so that access is [x][y] to match Board access""" boardSize = 11 fireMapFile = open( "fireTurnMap.txt", "r" ) data = [[int(n) for n in line.split()] for line in fireMapFile] fireMapFile.close() rotated = [[None for j in range(boardSize)] for i in range(boardSize)] for i, row in enumerate(data): for j, value in enumerate(row): rotated[j][i] = value return rotated
a22350cf8ab488d719cdbaa0e3900c446b59b6f3
30,728
import itertools def get_param_list(params, mode='grid', n_iter=25): """ Get a list with all the parameter combinations that will be tested for optimization. Parameters ---------- params: dictionary Each key corresponds to a parameter. The values correspond to a list of parameters to be explored. In the case of 'grid', all possible parameter combinations will be explored. In the case of 'random', a determined number random distribution of parameters mode: string Possible values are: 'grid' (default) 'random' n_iter: int (optional) Number of parameter setitngs that will be sampled. Only valid for 'random' mode. Otherwise, ignored. Default value is 25. Notice there is a trade off between runtime and quality of the solution. Returns ------- param_list: list List of dictionaries. Each dictionary has a parameter combination to try. """ # Generating a list of dictionaries with all parameter combinations to try. if mode == 'grid': # In this case, we generate a list of dictionaries of ALL # possible parameter value combinations. # Trick from https://stackoverflow.com/a/61335465/948768 keys, values = zip(*params.items()) param_list = [dict(zip(keys, v)) for v in itertools.product(*values)] elif mode == 'random': # In this case, we generate a list of dictionaries with random # combinations of parameter values. param_list = [] for hh in range(n_iter): # Initialization. param_dict = {} for key, value in zip(params.keys(), params.values()): param_dict[key] = get_value_from_distribution(key, value) # Append the generated dictionary to the list. param_list.append(param_dict) else: raise ValueError("Invalid parameter optimization mode. Possible values are 'grid' and 'random'.") return param_list
ed99bdff2a27df05e81f04e0495b60fc845ec5c3
30,729
def _apple_universal_binary_rule_transition_impl(settings, attr): """Rule transition for `apple_universal_binary` supporting forced CPUs.""" forced_cpus = attr.forced_cpus platform_type = attr.platform_type new_settings = dict(settings) # If forced CPUs were given, first we overwrite the existing CPU settings # for the target's platform type with those CPUs. We do this before applying # the base rule transition in case it wants to read that setting. if forced_cpus: new_settings[_platform_specific_cpu_setting_name(platform_type)] = forced_cpus # Next, apply the base transition and get its output settings. new_settings = _apple_rule_base_transition_impl(new_settings, attr) # The output settings from applying the base transition won't have the # platform-specific CPU flags, so we need to re-apply those before returning # our result. For the target's platform type, use the forced CPUs if they # were given or use the original value otherwise. For every other platform # type, re-propagate the original input. # # Note that even if we don't have `forced_cpus`, we must provide values for # all of the platform-specific CPU flags because they are declared outputs # of the transition; the build will fail at analysis time if any are # missing. for other_type, flag in _PLATFORM_TYPE_TO_CPU_FLAG.items(): if forced_cpus and platform_type == other_type: new_settings[flag] = forced_cpus else: new_settings[flag] = settings[flag] return new_settings
e672473db5b117102a2147445c0416f3a22b2b2d
30,730
def xml_get_text(_node): """Helper function to get character data from an XML tree""" rc = list() for node in _node.childNodes: if node.nodeType == node.TEXT_NODE: rc.append(node.data) return unquote(''.join(rc))
0b611c0a95707b4220a114c7fe76c4fefd9d1615
30,731
def volume_get_all(context, marker, limit, sort_keys=None, sort_dirs=None, filters=None, offset=None): """Retrieves all volumes. If no sort parameters are specified then the returned volumes are sorted first by the 'created_at' key and then by the 'id' key in descending order. :param context: context to query under :param marker: the last item of the previous page, used to determine the next page of results to return :param limit: maximum number of items to return :param sort_keys: list of attributes by which results should be sorted, paired with corresponding item in sort_dirs :param sort_dirs: list of directions in which results should be sorted, paired with corresponding item in sort_keys :param filters: dictionary of filters; values that are in lists, tuples, or sets cause an 'IN' operation, while exact matching is used for other values, see _process_volume_filters function for more information :returns: list of matching volumes """ session = get_session() with session.begin(): # Generate the query query = _generate_paginate_query(context, session, marker, limit, sort_keys, sort_dirs, filters, offset) # No volumes would match, return empty list if query is None: return [] return query.all()
664807482b6e26c6e96f8ec697d7d70a5c53d087
30,732
def merge_multirnn_lstm_state(states, w, b): """ Given two multirnn lstm states, merge them into a new state of the same shape, merged by concatation and then projection Args: state1: the first state to mergem, of shape (s1, s2, s3, ...), each s is of shape LSTMStateTuple(c,h), h,c are of shape (batch_size, hidden_size) state2: the second state to merge, shape same as the first w: the projection weight, of shape (hidden_size * 2, hidden_size) b: the projection bias, of shape (hidden_size,) Returns: the merged states """ new_state = [] for i in range(len(states[0])): new_state.append(merge_lstm_states([s[i] for s in states], w, b)) return tuple(new_state)
7ccc6666fafd1e1b6e117dc257598d88777c3e40
30,733
def gen_iocs(indicator_list): """ Generates a list of IOCs from a list of Anomali indicators :param indicator_list: list of Anomali indicators, types ip_address, url, or domain :return: list of IOC objects """ ioc_list = list() for i in indicator_list: pp = process_pattern(i.get('pattern')) if pp: t, v = pp.popitem() else: t, v = None, None if t and v is not None: ioc_list.append( indicators.IOC( v, t, [i.get('labels')], None, process_severity(i.get('labels')), i.get('description'), None, None ) ) return ioc_list
48bb07cf726f9052abbfc7de59e78f49f4b111d3
30,734
from core.controller.jobcontroller import JobController import logging def run_job(job_id, run_date, user_id, rerun_flag, schedule_id, schedulelog_id, mark_complete=False): """ run the job """ logging.debug('run_job for id: %s', str(job_id)) """ status: job_id, success/failure: 0 - failure, 1 success , 2 - not ready job object jobrun object """ job_controller = JobController( user_id=user_id, job_id=job_id, run_date=run_date ) try: return_status, job, job_run = job_controller.execute( schedule_id, schedulelog_id, rerun_flag == 'Y', mark_complete) except SIDException as exp: logging.error('Job failure for %s', job_id) logging.error(str(exp)) return [job_id, 0, None, None] """ check the job status we check from DB instead of controller as job might have run before """ if not return_status: return [job_id, 2, job, None] if mark_complete: return [job_id, 1, job, None] return [job_id, 1, job, job_run]
33b21bc491d9759ab5e5bbf156c7182e546ed386
30,735
import csv def parse_input_specifications() -> dict[str, InputFile]: """ Ingest the input specs file and return a dictionary of the data. """ with open(PATH_INPUT_SPECS, 'r') as f: reader = csv.reader(f) next(reader) input_files = {} for row in reader: if not any(row): continue filename, column, datatype, example, default, notes, source = (c.strip() for c in row) if filename: f = InputFile(filename, source, notes) input_files[filename] = f else: c = InputFileColumn(column, datatype, example, default, notes) f.add_column(c) return input_files
fddc4b3ff1e9a45f09a62981fc12e7da1fa4e25c
30,736
import base64 def image_base64(img): """Return image as base64.""" if isinstance(img, str): img = get_thumbnail(img) with BytesIO() as buffer: img.save(buffer, "jpeg") return base64.b64encode(buffer.getvalue()).decode()
9a7cbbf9fd973831875ea0643547fd5abff2aa69
30,737
def read_file(file): """This function reads the raw data file, gets the scanrate and stepsize and then reads the lines according to cycle number. Once it reads the data for one cycle, it calls read_cycle function to denerate a dataframe. It does the same thing for all the cycles and finally returns a dictionary, the keys of which are the cycle numbers and the values are the corresponding dataframes. Parameters __________ file: raw data file Returns: ________ df_dict : dict dictionary of dataframes with keys as cycle numbers and values as dataframes for each cycle n_cycle: int number of cycles in the raw file voltam_parameters: dict dictionary containing the parameters of the experimental parametrs used for the cyclic voltammetry scan dict_of_df: dictionary of dataframes with keys = cycle numbers and values = dataframes for each cycle n_cycle: number of cycles in the raw file """ voltam_parameters = {} df_dict = {} data = {} param = 0 n_cycle = 0 with open(file, 'r') as f: # print(file + ' Opened') for line in f: if param != 6: if line.startswith('SCANRATE'): voltam_parameters['scan_rate(mV/s)'] = \ float(line.split()[2]) param = param+1 if line.startswith('STEPSIZE'): voltam_parameters['step_size(V)'] = \ float(line.split()[2]) * 0.001 param = param+1 if line.startswith('VINIT'): voltam_parameters['vinit(V)'] = float(line.split()[2]) param = param+1 if line.startswith('VLIMIT1'): voltam_parameters['vlimit_1(V)'] = float(line.split()[2]) param = param+1 if line.startswith('VLIMIT2'): voltam_parameters['vlimit_2(V)'] = float(line.split()[2]) param = param+1 if line.startswith('VFINAL'): voltam_parameters['vfinal(V)'] = float(line.split()[2]) param = param+1 if line.startswith('CURVE'): n_cycle += 1 data['cycle_'+str(n_cycle)] = [] if n_cycle: data['cycle_'+str(n_cycle)].append(line) for i in range(len(data)): df_dict['cycle_'+str(i+1)] = read_cycle(data['cycle_'+str(i+1)]) return df_dict, n_cycle, voltam_parameters
8eb59ad8f8b700a0d0c26386644be97aa2417bb7
30,738
import collections import os def data_loader(parent_dir='', dataset_list=('HD1', 'HD2', 'HD3', 'HD4', 'HD5', 'HD6'), min_gap=1, max_gap=4, min_stride=1, max_stride=2, epochs=-1, batch_size=1, random_lighting=False, luminence_threshold=0.1, depth_threshold=0.1, min_overlap=0.3, max_overlap=1.0, min_translation=0.05, validation_percentage=0, test_percentage=10, parallelism=20, parallel_image_reads=100, prefetch_buffer=20, filter_envmap=True): """Loads data.""" datasets = collections.namedtuple('datasets', ['training', 'validation', 'test']) test_start = 100 - test_percentage val_start = test_start - validation_percentage data_dir = os.path.join(parent_dir, dataset_list[0]) scenes = tf.data.Dataset.list_files(os.path.join(data_dir, '*')) for dataset in dataset_list[1:]: data_dir = os.path.join(parent_dir, dataset) scenes = scenes.concatenate( tf.data.Dataset.list_files(os.path.join(data_dir, '*'))) sequences = scenes.flat_map( lambda scene_dir: tf.data.Dataset.list_files(scene_dir + '/*')).apply( tf.data.experimental.ignore_errors()) if not random_lighting: sequences = sequences.filter(filter_random_lighting) sequences = sequences.filter(filter_seq_length).apply( tf.data.experimental.ignore_errors()) sequences = sequences.map( lambda sequence_dir: load_sequence(sequence_dir, parent_dir, parallelism), num_parallel_calls=parallelism) training = sequences.filter( lambda sequence: sequence.hash_in_range(100, 0, val_start)) validation = sequences.filter( lambda sequence: sequence.hash_in_range(100, val_start, test_start)) test = sequences.filter( lambda sequence: sequence.hash_in_range(100, test_start, 100)) training = prepare_training_set(training, min_gap, max_gap, min_stride, max_stride, batch_size, epochs, min_overlap, max_overlap, min_translation, luminence_threshold, depth_threshold, parallel_image_reads, prefetch_buffer, filter_envmap) validation = prepare_eval_set(validation, min_gap, max_gap, min_stride, max_stride, batch_size, min_overlap, max_overlap, min_translation, luminence_threshold, depth_threshold, parallel_image_reads, prefetch_buffer) test = prepare_eval_set(test, min_gap, max_gap, min_stride, max_stride, batch_size, min_overlap, max_overlap, min_translation, luminence_threshold, depth_threshold, parallel_image_reads, prefetch_buffer) return datasets(training, validation, test)
a0d96d71c4a37e812f4cd9b81d80283128f0a4d6
30,739
def calculate_performance(data): """Calculates swarm performance using a performance function""" df = pd.DataFrame(data) prev_column = None V = 0 G = 0 C = 0 vcount = 0 gcount = 0 ccount = 0 for column in df: v = calculate_max_speed(df, column, prev_column) g = calculate_vertical_mse(df, column) c = calculate_distance_sd(df, column) if v is not None: V += v vcount += 1 if g is not None: G += g gcount += 1 if c is not None: C += c ccount += 1 prev_column = column V /= vcount G /= gcount C /= ccount print(f'V: {round(V, 2)} | C: {round(C, 2)} | G: {round(G, 2)}') return round((V * 1000) / (C * G), 2)
dbf060501991b5408f8d102f35dbe60b34fae0a9
30,740
def ipn(request): """ Webhook handling for Coinbase Commerce """ if request.method == 'POST': request_sig = request.META.get('HTTP_X_CC_WEBHOOK_SIGNATURE', None) ''' # this was done in flask = request.data.decode('utf-8') try: # signature verification and event object construction event = Webhook.construct_event( json.dumps(request.data), request_sig, settings.ICO_COINBASE_WEBHOOK_SECRET) except Exception as e: return Response( {'message': 'Signature verification failed'}, status=status.HTTP_401_UNAUTHORIZED ) ''' event = request.data['event'] user = User.objects.get(pk=event['data']['metadata']['user_id']) code = event['data']['code'] amount = float(event['data']['local']['amount']) purchased = helpers.calculate_bought(amount) status = event['type'].split(':')[1] if status == 'pending': Transaction.objects.create( user=user, code=code, amount=amount, currency='USD', description=f'[{settings.ICO_STAGE}] Purchased {purchased} {settings.ICO_TOKEN_SYMBOL.upper()}', status=status ) elif status == 'confirmed': tx = Transaction.objects.get(user=user, code=code) tx.status = status tx.save() Transaction.objects.create( user=user, code=helpers.transfer_tokens(user, purchased), amount=purchased, currency=settings.ICO_TOKEN_SYMBOL.upper(), description=f'[{settings.ICO_STAGE}] Received {purchased} {settings.ICO_TOKEN_SYMBOL.upper()}', status=status ) return Response({'message': 'success'}, status=status.HTTP_200_OK)
774ad9ebe0f1be9b65c73e90627640f66fe16d4c
30,741
def get_decomposed_entries(structure_type, species): """ Get decomposed entries for mix types Args: structure_type(str): "garnet" or "perovskite" species (dict): species in dictionary. structure_type(str): garnet or perovskite Returns: decompose entries(list): list of entries prepared from unmix garnets/perovskite decomposed from input mix garnet/perovskite """ def decomposed(specie_complex): """Decompose those have sub-dict to individual dict objects.""" for site, specie in specie_complex.items(): spe_copy = specie_complex.copy() if len(specie) > 1: for spe, amt in specie.items(): spe_copy[site] = {spe: 1} yield spe_copy decompose_entries = [] model, scaler = load_model_and_scaler(structure_type, "unmix") std_formula = STD_FORMULA[structure_type] for unmix_species in decomposed(species): charge = sum([spe.oxi_state * amt * SITE_INFO[structure_type][site]["num_atoms"] for site in SITE_INFO[structure_type].keys() for spe, amt in unmix_species[site].items()]) if not abs(charge - 2 * std_formula['O']) < 0.1: continue formula = spe2form(structure_type, unmix_species) composition = Composition(formula) elements = [el.name for el in composition] chemsy = '-'.join(sorted(elements)) calc_entries = [] if CALC_ENTRIES[structure_type].get(chemsy): calc_entries = [entry for entry in CALC_ENTRIES[structure_type][chemsy] if \ entry.name == Composition(formula).reduced_formula] else: pass if calc_entries: decompose_entries.extend(calc_entries) else: cn_specific = True if structure_type == 'garnet' else False descriptors = get_descriptor(structure_type, unmix_species, cn_specific=cn_specific) form_e = get_form_e(descriptors, model, scaler) # tot_e = get_tote(form_e * std_formula.num_atoms, unmix_species) tot_e = get_tote(structure_type, form_e * std_formula.num_atoms, unmix_species) entry = prepare_entry(structure_type, tot_e, unmix_species) compat = MaterialsProjectCompatibility() entry = compat.process_entry(entry) decompose_entries.append(entry) return decompose_entries
0db24d7be2cacc2c0aed180cf5d31ccde057e358
30,742
import os def create_job_script(m): """ This is the first function that runs when a user initializes a new untargeted workflow """ #setup directories if not os.path.isdir(m['basedir']): os.mkdir(m['basedir']) dirs_to_make = ['job_scripts','logs','intermediate_results','%s_%s'%(m['basename'],m['polarity'])] for d in dirs_to_make: if not os.path.isdir(os.path.join(m['basedir'],d)): os.mkdir(os.path.join(m['basedir'],d)) job_cmd = make_task_and_job(m)#['basedir'],m['basename'],m['polarity'],m['files']) sbatch_file_name = os.path.join(m['basedir'],'job_scripts','%s_%s.sbatch'%(m['basename'],m['polarity'])) denovo_sbatch_file_name = os.path.join(m['basedir'],'job_scripts','%s_%s_denovo.sbatch'%(m['basename'],m['polarity'])) err_file_name = os.path.join(m['basedir'],'logs','%s_%s.err'%(m['basename'],m['polarity'])) out_file_name = os.path.join(m['basedir'],'logs','%s_%s.out'%(m['basename'],m['polarity'])) # job_cmd_filtered = make_targeted_mzmine_job(m['basedir'],m['basename'],m['polarity'],m['files']) params_filename = os.path.join(m['basedir'],'logs','%s_%s_params.json'%(m['basename'],m['polarity'])) new_params_filename = os.path.join(m['basedir'],'logs','%s_%s_params-used.json'%(m['basename'],m['polarity'])) copy_params_command = "cp '%s' '%s'"%(params_filename,new_params_filename) with open(sbatch_file_name,'w') as fid: fid.write('%s\n'%SLURM_HEADER.replace('slurm.err',err_file_name).replace('slurm.out',out_file_name)) fid.write('%s\n'%copy_params_command) fid.write('%s\n'%job_cmd) # bad_words = ['qos', '-p','-C','-L','-t','-N'] # bad_time = '#SBATCH -t 24:00:00' # good_time = '#SBATCH -t 24:00:00\n' # bad_node = '-N 1 -c 64' # good_node = '#SBATCH -N 1 -c 64\n' # with open(sbatch_file_name) as oldfile, open(denovo_sbatch_file_name, 'w') as newfile: # for line in oldfile: # if not any(bad_word in line for bad_word in bad_words): # newfile.write(line) # if bad_time in line: # newfile.write(good_time) # if bad_node in line: # newfile.write(good_node) # newfile.write('#SBATCH --mem=494G\n') return sbatch_file_name
0e38443b74e87c243ee1604425ae5f1773165533
30,743
def closest_point(p1, p2, s): """closest point on line segment (p1,p2) to s; could be an endpoint or midspan""" #if the line is a single point, the closest point is the only point if p1==p2: return (0,p1) seg_vector = vector_diff(p2,p1) seg_mag = mag(seg_vector) #print( "seg_vector, length", seg_vector, seg_mag ) seg_unit = vector_div( seg_vector, seg_mag ) stop_vector = vector_diff(s,p1) #print( "stop_vector", stop_vector ) #scalar projection of A onto B = (A dot B)/|B| = A dot unit(B) sp = dot_product( stop_vector, seg_unit ) #print( "scalar projection", sp ) if sp < 0: #closest point is startpoint #print( "startpoint" ) return (0, p1) elif sp > seg_mag: #closest point is endpoint #print( "endpoint" ) return (1, p2) else: #closest point is midspan #print( "midpoint" ) return (sp/seg_mag, vector_sum(p1,vector_mult( seg_unit, sp )))
5fbce0ac5b2d87f15b6dd5a146e77b23dba3d743
30,744
def new_name(): """ Returns a new legal identifier in C each time it's called Note: Not thread-safe in its current incarnation >>> name1 = new_name() >>> name2 = new_name() >>> name1 != name2 True """ global _num_names _num_names += 1 return '_id_{}'.format(_num_names)
bd72bfdedc7ccd00e973d9677e116cf5afe07314
30,745
def pollard_brent_f(c, n, x): """Return f(x) = (x^2 + c)%n. Assume c < n. """ x1 = (x * x) % n + c if x1 >= n: x1 -= n assert x1 >= 0 and x1 < n return x1
5037b3feac2f131645fbe6ceb00f0d18417a7c04
30,746
def morphological_transformation(input_dir): """ Performs advanced morphological transformations. Args: input_dir: Input Picture Data Stream. Returns: Picture Data Stream after Rotation Correction Processing. """ raw_image = cv2.imread(input_dir) gray_image = cv2.cvtColor(raw_image, cv2.COLOR_BGR2GRAY) # Gauss Fuzzy De-noising (Setting the Size of Convolution Kernel Affects # the Effect). blur_image = cv2.GaussianBlur(gray_image, (9, 9), 0) # Setting threshold 165 (Threshold affects open-close operation effect). _, threshold = cv2.threshold(blur_image, 165, 255, cv2.THRESH_BINARY) # Define rectangular structural elements. kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5)) # Closed operation (link block) closed = cv2.morphologyEx(threshold, cv2.MORPH_CLOSE, kernel) # # Open Operations (De-noising Points) image = cv2.morphologyEx(closed, cv2.MORPH_OPEN, kernel) return raw_image, image
b5db242cd39a71aea570d3f61b4c7a217565aa5d
30,747
import sys def fmt_to_datatype_v3(fmt, shape, array=False): """convert numpy dtype format string to mdf versions 2 and 3 channel data type and size Parameters ---------- fmt : numpy.dtype numpy data type shape : tuple numpy array shape array : bool disambiguate between bytearray and channel array Returns ------- data_type, size : int, int integer data type as defined by ASAM MDF and bit size """ byteorder = fmt.byteorder if byteorder in "=|": byteorder = "<" if sys.byteorder == "little" else ">" size = fmt.itemsize * 8 kind = fmt.kind if not array and shape[1:] and fmt.itemsize == 1 and kind == "u": data_type = v3c.DATA_TYPE_BYTEARRAY for dim in shape[1:]: size *= dim else: if kind == "u": if byteorder in "<": data_type = v3c.DATA_TYPE_UNSIGNED_INTEL else: data_type = v3c.DATA_TYPE_UNSIGNED_MOTOROLA elif kind == "i": if byteorder in "<": data_type = v3c.DATA_TYPE_SIGNED_INTEL else: data_type = v3c.DATA_TYPE_SIGNED_MOTOROLA elif kind == "f": if byteorder in "<": if size == 32: data_type = v3c.DATA_TYPE_FLOAT else: data_type = v3c.DATA_TYPE_DOUBLE else: if size == 32: data_type = v3c.DATA_TYPE_FLOAT_MOTOROLA else: data_type = v3c.DATA_TYPE_DOUBLE_MOTOROLA elif kind in "SV": data_type = v3c.DATA_TYPE_STRING elif kind == "b": data_type = v3c.DATA_TYPE_UNSIGNED_INTEL size = 1 else: message = f"Unknown type: dtype={fmt}, shape={shape}" logger.exception(message) raise MdfException(message) return data_type, size
b63d85663b4f1cf45e176f893980479af9f71383
30,748
import this # noqa: F401 def import_this(**kwargs): """Print the Zen of Python""" # https://stackoverflow.com/a/23794519 zen = io.StringIO() with contextlib.redirect_stdout(zen): text = f"```{zen.getvalue()}```" return text
1a80b384154f4cfa8b71eeb57807b8e9e3fce322
30,749
def getCurrentPane(): """Retrieve the current pane index as an int.""" return int(tget("display-message -p '#P'"))
f7439d407ef618c7d516ad9eed5c925c49639533
30,750
def get_lcc_size(G,seed_nodes): """ return the lcc size """ # getting subgraph that only consists of the black_nodes g = nx.subgraph(G,list(seed_nodes)) if g.number_of_nodes() != 0: # get all components max_CC = max(nx.connected_component_subgraphs(g), key=len) return len(max_CC.nodes()) # size of largest connected component" else: return 0
6582ab76a5b7a178d22592305d134529b327a2ec
30,751
def IsParalogLink(link, cds1, cds2): """sort out ortholog relationships between transcripts of orthologous genes. """ map_a2b = alignlib_lite.makeAlignmentVector() alignlib_lite.AlignmentFormatEmissions( link.mQueryFrom, link.mQueryAli, link.mSbjctFrom, link.mSbjctAli).copy(map_a2b) if link.mQueryLength < (map_a2b.getRowTo() - map_a2b.getRowFrom() + 1) or \ link.mSbjctLength < (map_a2b.getColTo() - map_a2b.getColFrom() + 1): print "ERRONEOUS LINK: %s" % str(link) raise "length discrepancy" coverage_a = 100.0 * \ (map_a2b.getRowTo() - map_a2b.getRowFrom() + 1) / link.mQueryLength coverage_b = 100.0 * \ (map_a2b.getColTo() - map_a2b.getColFrom() + 1) / link.mSbjctLength # check exon boundaries, look at starts, skip first exon def MyMap(a, x): if x < a.getRowFrom(): return 0 while x <= a.getRowTo(): c = a.mapRowToCol(x) if c: return c x += 1 else: return 0 mapped_boundaries = UniquifyList( map(lambda x: MyMap(map_a2b, x.mPeptideFrom / 3 + 1), cds1[1:])) reference_boundaries = UniquifyList( map(lambda x: x.mPeptideFrom / 3 + 1, cds2[1:])) nmissed = 0 nfound = 0 nmin = min(len(mapped_boundaries), len(reference_boundaries)) nmax = max(len(mapped_boundaries), len(reference_boundaries)) both_single_exon = len(cds1) == 1 and len(cds2) == 1 one_single_exon = len(cds1) == 1 or len(cds2) == 1 if len(mapped_boundaries) < len(reference_boundaries): mless = mapped_boundaries mmore = reference_boundaries else: mmore = mapped_boundaries mless = reference_boundaries # check if exon boundaries are ok for x in mless: is_ok = 0 for c in mmore: if abs(x - c) < param_boundaries_max_slippage: is_ok = 1 break if is_ok: nfound += 1 else: nmissed += 1 # set is_ok for dependent on exon boundaries # in single exon cases, require a check of coverage is_ok = False check_coverage = False if both_single_exon or one_single_exon: is_ok = True check_coverage = True else: if nmin == 1: is_ok = nmissed == 0 elif nmin == 2: is_ok = nmissed <= 1 elif nmin > 2: is_ok = nfound >= 2 cc = min(coverage_a, coverage_b) if param_loglevel >= 3: print "# nquery=", len(cds1), "nsbjct=", len(cds2), "nmin=", nmin, "nmissed=", nmissed, "nfound=", nfound, \ "is_ok=", is_ok, "check_cov=", check_coverage, \ "min_cov=", cc, coverage_a, coverage_b, \ "mapped=", mapped_boundaries, "reference=", reference_boundaries if not is_ok: return True, "different exon boundaries" if check_coverage and cc < param_min_coverage: return True, "low coverage" return False, None
012ad1a195c42127a39cabee9f7380c7cb8f6f9b
30,752
from numpy import array, vstack from scipy.spatial import Voronoi def segments(points): """ Return the bounded segments of the Voronoi diagram of the given points. INPUT: - ``points`` -- a list of complex points OUTPUT: A list of pairs ``(p1, p2)``, where ``p1`` and ``p2`` are the endpoints of the segments in the Voronoi diagram. EXAMPLES:: sage: from sage.schemes.curves.zariski_vankampen import discrim, segments # optional - sirocco sage: R.<x,y> = QQ[] sage: f = y^3 + x^3 - 1 sage: disc = discrim(f) # optional - sirocco sage: segments(disc) # optional - sirocco # abs tol 1e-15 [(-2.84740787203333 - 2.84740787203333*I, -2.14285714285714 + 1.11022302462516e-16*I), (-2.84740787203333 + 2.84740787203333*I, -2.14285714285714 + 1.11022302462516e-16*I), (2.50000000000000 + 2.50000000000000*I, 1.26513881334184 + 2.19128470333546*I), (2.50000000000000 + 2.50000000000000*I, 2.50000000000000 - 2.50000000000000*I), (1.26513881334184 + 2.19128470333546*I, 0.000000000000000), (0.000000000000000, 1.26513881334184 - 2.19128470333546*I), (2.50000000000000 - 2.50000000000000*I, 1.26513881334184 - 2.19128470333546*I), (-2.84740787203333 + 2.84740787203333*I, 1.26513881334184 + 2.19128470333546*I), (-2.14285714285714 + 1.11022302462516e-16*I, 0.000000000000000), (-2.84740787203333 - 2.84740787203333*I, 1.26513881334184 - 2.19128470333546*I)] """ discpoints = array([(CC(a).real(), CC(a).imag()) for a in points]) added_points = 3 * abs(discpoints).max() + 1.0 configuration = vstack([discpoints, array([[added_points, 0], [-added_points, 0], [0, added_points], [0, -added_points]])]) V = Voronoi(configuration) res = [] for rv in V.ridge_vertices: if not -1 in rv: p1 = CC(list(V.vertices[rv[0]])) p2 = CC(list(V.vertices[rv[1]])) res.append((p1, p2)) return res
5d4c62455a605dfb09c1009b44e12ecd726e4c84
30,753
def tpu_ordinal_fn(shard_index_in_host, replicas_per_worker): """Return the TPU ordinal associated with a shard.""" return shard_index_in_host % replicas_per_worker
773313750ce78cf5d32776752cb75201450416ba
30,754
import string def replace_example_chapter(path_to_documentation, chapter_lines): """func(path_to_doc._tx, [new_chapter]) -> [regenerated_documentation] Opens the documentation and searches for the text section separated through the global marks START_MARK/END_MARK. Returns the opened file with that section replaced by chapter_lines. """ lines = [] file = open(path_to_documentation, "rt") state = 0 for line in file.readlines(): if state == 0: lines.append(line) if string.find(line, START_MARK) > 0: state = state + 1 elif state == 1: if string.find(line, END_MARK) > 0: lines.extend(chapter_lines) lines.append(line) state = state + 1 else: lines.append(line) return lines
c4bb2285a55b0235d44a2550a3bad5a9d83583ad
30,755
def terminal(board): """ Returns True if game is over, False otherwise. """ if(winner(board)): return True for i in range(3): for j in range(3): if(board[i][j]==EMPTY): return False return True
0dd194c8281539977596779209d59533022ad16c
30,756
from typing import OrderedDict def get_network(layers, phase): """Get structure of the network. Parameters ---------- layers : list list of layers parsed from network parameters phase : int 0 : train 1 : test """ num_layers = len(layers) network = OrderedDict() for i in xrange(num_layers): layer = layers[i] if check_phase(layer, phase): layer_id = "trans_layer_"+str(i) if layer_id not in network: network[layer_id] = [] prev_blobs = map(str, layer.bottom) next_blobs = map(str, layer.top) for blob in prev_blobs+next_blobs: if blob not in network: network[blob] = [] for blob in prev_blobs: network[blob].append(layer_id) network[layer_id].extend(next_blobs) network = remove_loops(network) network = remove_blobs(network) return network
cfbbcc99195a4e81503a89ce80a6d2314c2deb30
30,757
def create_category_hiearchy(cats, categoryType): """A function that creates a dict of the root and subroot categories""" dict_out = {} for key in cats.keys(): name = cats[key]['name'] parent_name = cats[key]['parent']['name'] cat_type = cats[key]['categoryType'] if cat_type == categoryType: # Check if parent name is Root and should be the key if parent_name == 'Root': # Check to see if key exists if name not in dict_out.keys(): # If not, add key to dict and empty list dict_out[name] = [] else: if parent_name == 'Root': continue # Check if parent_name already key if parent_name not in dict_out.keys(): # If not, add the key and empty list dict_out[parent_name] = [] # Add the subcategory dict_out[parent_name].append(name) return dict_out
f0b19f2a6f56e49855a019a18d9357a31cfaeb2a
30,758
import os def run(event, _context): """ save string API Key as SecureString """ graphql_api_key_key_path = os.environ.get('GRAPHQL_API_KEY_KEY_PATH') print("graphql_api_key_key_path =", graphql_api_key_key_path) graphql_api_key = _get_parameter(graphql_api_key_key_path) if graphql_api_key: _save_secure_parameter(graphql_api_key_key_path, graphql_api_key) print("graphql_api_key saved as ssm SecureString") return event
4681bb3b5799457bef313bcdf0313575941a025e
30,759
import time def date(): """ Returns the current time formated with HTTP format. @return: `str` """ return time.strftime('%a, %d %b %Y %H:%M:%S GMT')
909f1f31c6c7f0ed03fe0b30785ff454f541a5fc
30,760
def _estimate_gaussian_covariances_tied(resp, X, nk, means, reg_covar): """Estimate the tied covariance matrix. Parameters ---------- resp : array-like of shape (n_samples, n_components) X : array-like of shape (n_samples, n_features) nk : array-like of shape (n_components,) means : array-like of shape (n_components, n_features) reg_covar : float Returns ------- covariance : array, shape (n_features, n_features) The tied covariance matrix of the components. """ avg_X2 = np.dot(X.T, X) avg_means2 = np.dot(nk * means.T, means) covariance = avg_X2 - avg_means2 covariance /= nk.sum() covariance.flat[:: len(covariance) + 1] += reg_covar return covariance
3bf510982698643afd9377e64d8fe569d7626452
30,761
from re import A def rights(value_strategy: SearchStrategy[A] ) -> SearchStrategy[either.Right[A]]: """ Create a search strategy that produces `pfun.either.Right` values Args: value_strategy: search strategy to draw values from Example: >>> rights(integers()).example() Right(0) Return: search strategy that produces `pfun.either.Right` values """ return builds(either.Right, value_strategy)
867db62f02955bf226109bf1cb8f04d4fb3f578c
30,762
def lpad(col, len, pad): """ Left-pad the string column to width `len` with `pad`. >>> df = spark.createDataFrame([('abcd',)], ['s',]) >>> df.select(lpad(df.s, 6, '#').alias('s')).collect() [Row(s=u'##abcd')] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.lpad(_to_java_column(col), len, pad))
b2a8b01b06166b4fd4ec09b76b634c8b2e231d86
30,763
import torch def get_audio_features(audios_data, audio_tStamp, frameRate, video_length, device='cuda'): """audio feature extraction""" extractor = ResNet50().to(device) output1 = torch.Tensor().to(device) output2 = torch.Tensor().to(device) extractor.eval() patchSize = 224 frameSkip = 2 with torch.no_grad(): for iFrame in range(1, video_length, frameSkip): tCenter = np.argmin(abs(audio_tStamp - iFrame / frameRate)) tStart = tCenter - patchSize / 2 + 1 tEnd = tCenter + patchSize / 2 if tStart < 1: tStart = 1 tEnd = patchSize else: if tEnd > audios_data.shape[2]: tStart = audios_data.shape[2] - patchSize + 1 tEnd = audios_data.shape[2] specRef_patch = audios_data[:, :, int(tStart - 1): int(tEnd)] refRGB = torch.cat((specRef_patch, specRef_patch, specRef_patch), 0) last_batch = refRGB.view(1, 3, specRef_patch.shape[1], specRef_patch.shape[2]).float().to(device) features_mean, features_std = extractor(last_batch) output1 = torch.cat((output1, features_mean), 0) output2 = torch.cat((output2, features_std), 0) output = torch.cat((output1, output2), 1).squeeze() return output
149f22fe855f52d63ffc2174800a83afb2568246
30,764
import asyncio async def node_watch_profile_report_builder(data_id: str): """ Allows the front-end to update the display information once a profile report builds successfully. Necessary because the profile report entails opening a separate tab. """ time_waited = 0 while time_waited < 600: if fs.profile_report_exists(data_id): return UpdateNode(node=get_node_by_data_id(data_id)) await asyncio.sleep(5) time_waited += 5 raise HTTPException( status_code=400, detail="The report either failed to generate or took too long" )
6cb6552d1a05726e77a0f31e5b3f4625752b2d1b
30,765
def write_report_systemsorted(system, username): """ function that prepares return values and paths """ """ the return values (prefix 'r') are used for the `mkdocs.yml` file they build the key-value-pair for every system """ # return system_id for mkdocs.yml rid = str(system.system_id) # return fqdn for mkdocs.yml if system.dnsname != None: rfqdn = system.system_name + "." + system.dnsname.dnsname_name else: rfqdn = system.system_name """ build the path for every file it is distinguished between the short version for the `mkdocs.yml` file ('value' of key-value-pair) and the long version that is used to write to the file system """ # build path path = system.system_name # check for domain and add to path if system.domain != None: path = path + "_" + system.domain.domain_name # check for system_install_time and add to path if system.system_install_time != None: install_time = system.system_install_time.strftime('%Y%m%d_%H%M%S') path = path + "_" + install_time # return shortened path for mkdocs.yml ('value') rpath = "systems/" + path + ".md" # get config model model = SystemExporterMarkdownConfigModel.objects.get(system_exporter_markdown_config_name = 'SystemExporterMarkdownConfig') # finish path for markdown file path = model.markdown_path + "/docs/systems/" + path + ".md" # open file for system report = open(path, "w") django_report = File(report) # write systemreport write_report.write_report(django_report, system) # close and save file django_report.closed report.close() # call logger info_logger(username, " SYSTEM_MARKDOWN_CREATED system_id:" + str(system.system_id) + "|system_name:" + str(system.system_name)) # return strings for mkdocs.yml (only used in systemsorted_async) return(rid, rfqdn, rpath)
f080203b1384277a9c6a0c934758d13674978df0
30,766
def compute_entailment_graph_agreement(graph1, graph2): """ Compute the agreement for the entailment graph: entities, arguments and predicates :param graph1: the first annotator's graph :param graph2: the second annotator's graph :return: """ # Compute the agreement for the entity entailment graph, for each entity, and return the average # (remove entities with one mention) entities_1_gold_f1 = compute_entities_f1(graph1, graph2) entities_2_gold_f1 = compute_entities_f1(graph2, graph1) entities_f1 = (entities_1_gold_f1 + entities_2_gold_f1) / 2.0 # Compute the agreement for the predicate entailment graph, for each predicate, and return the average # (remove predicates with one mention) props_1_gold_f1 = compute_predicate_f1(graph1, graph2) props_2_gold_f1 = compute_predicate_f1(graph2, graph1) propositions_f1 = (props_1_gold_f1 + props_2_gold_f1) / 2.0 # TODO: implement arguments_f1 = 0.0 # Compute the consensual graphs: TODO consensual_graph1 = graph1 consensual_graph2 = graph2 return entities_f1, arguments_f1, propositions_f1, consensual_graph1, consensual_graph2
6479f106df736a6a39149af546593a13ae81d9a2
30,767
import os def main(argv=None): """ Runs the main program. :param argv: The command line arguments. :return: The return code for the program's termination. """ args, ret = parse_cmdline(argv) if ret != GOOD_RET or args is None: return ret kbt = calc_kbt(args.temp) if args.src_file is not None: proc_data = to_zero_point(calc_rad(args.src_file, kbt)) write_csv(proc_data, create_out_fname(args.src_file, prefix=OUT_PFX), RAD_KEY_SEQ) else: found_files = find_files_by_dir(args.base_dir, args.pattern) logger.debug("Found '{}' dirs with files to process".format(len(found_files))) # noinspection PyCompatibility for f_dir, files in found_files.items(): if not files: logger.warn("No files found for dir '{}'".format(f_dir)) continue for pmf_path in ([os.path.join(f_dir, tgt) for tgt in files]): proc_data = to_zero_point(calc_rad(pmf_path, kbt)) f_name = create_out_fname(pmf_path, prefix=OUT_PFX) if allow_write(f_name, overwrite=args.overwrite): write_csv(proc_data, f_name, RAD_KEY_SEQ) return GOOD_RET
1fd21028373d07eee76aff63b0882c9b139cded8
30,768
def is_outlier(points, threshold=3.5): """ This returns a boolean array with "True" if points are outliers and "False" otherwise. These are the data points with a modified z-score greater than this: # value will be classified as outliers. """ # transform into vectors if len(points.shape) == 1: points = points[:,None] # compute median value median = np.median(points, axis=0) # compute diff sums along the axis diff = np.sum((points - median)**2, axis=-1) diff = np.sqrt(diff) # compute MAD med_abs_deviation = np.median(diff) # compute modified z-score # http://www.itl.nist.gov/div898/handbook/eda/section4/eda43.html#Iglewicz modified_z_score = 0.6745 * diff / med_abs_deviation # return a mask for each outliers return modified_z_score > threshold
edc28706b37a6c1cfef356f45dd87c076779fe6d
30,769
def hill_climbing_random_restart(problem,restarts=10): """From the initial node, keep choosing the neighbor with highest value, stopping when no neighbor is better. [Figure 4.2]""" # restarts = cantidad de reinicios aleatorios al llegar a un estado inmejorable current = Node(problem.initial) best = current # se lleva la cuenta del mejor estado hasta el momento while True: if (problem.value(current.state) > problem.value(best.state)): best = current if problem.goal_test(current.state): break neighbors = current.expand(problem) if neighbors: neighbor = argmax_random_tie(neighbors, key=lambda node: problem.value(node.state)) if problem.value(neighbor.state) > problem.value(current.state): current = neighbor else: if restarts > 0: restarts -= 1 current = Node(generate_random_state(problem.N)) else: break else: if restarts > 0: restarts -= 1 current = Node(generate_random_state(problem.N)) else: break return current.state
78846f5d67465c981b712d00da7a0d76bbf152bd
30,770
def ordinal(n): """Converts an integer into its ordinal equivalent. Args: n: number to convert Returns: nth: ordinal respresentation of passed integer """ nth = "%d%s" % (n, "tsnrhtdd"[(n // 10 % 10 != 1) * (n % 10 < 4) * n % 10 :: 4]) return nth
7f438c89a6b0f7adbc42f2eb1e619ca4bf862b4a
30,771
def check_date(date): """check if date string has correct format. Args: date as a string mmddyyyy Returns: a boolean indicating if valid (True) or not (False) """ if len(date) != 8: return False if not date.isdigit(): return False # months are between '01' ~ '12' if (date[0] != '1' and date[0] != '0'): return False if date[0] == '1': if (date[1] != '0') and (date[1] != '1') and (date[1] != '2'): return False # dates are between 0 ~ 31 if (date[2] != '0') and (date[2] != '1') \ and (date[2] != '2') and (date[2] != '3'): return False return True
8972498d94d459ba48851049780e46b057855d9f
30,772
async def expected_raceplan_individual_sprint_27_contestants( event_individual_sprint: dict, ) -> Raceplan: """Create a mock raceplan object - 27 contestants.""" raceplan = Raceplan(event_id=event_individual_sprint["id"], races=list()) raceplan.id = "390e70d5-0933-4af0-bb53-1d705ba7eb95" raceplan.no_of_contestants = 27 return raceplan
969cba41b0cdcdd83317cd98a57b437f66981dbe
30,773
def signal_to_m_converter(dataframe, dbm="4(dBm)"): """ This function convert a (beacon)dataframe with signal values from the tracer to the corresponding *m*eter values, depend on dBm power that was used. By default dbm = 4(dBm) """ # extract all different values from dataframe dataframe_unique_values = np.unique(dataframe) df_txpower = pd.DataFrame(constant.txpower_vs_distance) # extract the used power values from table_"Beacon_datasheet" choose_power = df_txpower[dbm] # caculate the lenght from powerlevel used for later iteration lenght_power = (df_txpower[dbm]).count() # empty list for collecting the corresponding meter values for each signal value list_meter_values = [] flag = True # loop over unique_values over dataframe for value in dataframe_unique_values: # interpolation function for i in range(0, lenght_power): if choose_power[i] >= value and value >= choose_power[i + 1]: meter_value = ( (df_txpower["Distance(m)"][i + 1] - df_txpower["Distance(m)"][i]) / (choose_power[i + 1] - choose_power[i]) * (value - choose_power[i]) + df_txpower["Distance(m)"][i] ) list_meter_values.append(meter_value) if flag: print("\nDistance i+1", df_txpower["Distance(m)"][i + 1]) print("\nDistance i", df_txpower["Distance(m)"][i]) print("\nchoose_power i+1", choose_power[i + 1]) print("\nchoose_power i", choose_power[i]) print("\nvalue", value) print("\ndf_txpower[distance][i]", df_txpower["Distance(m)"][i]) flag = False break else: meter_value = np.nan list_meter_values.append(meter_value) mod_dataframe = dataframe.replace(list(dataframe_unique_values), list_meter_values) return mod_dataframe
55e58553a8685287a0d07e3c3d2432408e46ba04
30,774
def return_lines_as_list(file): """ :rtype: list of str """ # read lines lines = file.readlines() def strip(string): """ Removes whitespace from beginning and end of string :type string: str """ return string.strip() # Coverts our lines to list return list(map(strip, lines))
69e3d45fa3df107a8852d10e104a543c014a6c79
30,775
def cvInitMatNDHeader(*args): """cvInitMatNDHeader(CvMatND mat, int dims, int type, void data=None) -> CvMatND""" return _cv.cvInitMatNDHeader(*args)
152f49b20a858e7bbb7229d77cdeffa6fd1ed049
30,776
def getObjectInfo(fluiddb, objectId): """ Get information about an object. """ return fluiddb.objects[objectId].get(showAbout=True)
baad59e6585e04a8c2a8cca1df305327b80f3768
30,777
def calculate_logAUC(true_y, predicted_score, FPR_range=(0.001, 0.1)): """ Calculate logAUC in a certain FPR range (default range: [0.001, 0.1]). This was used by previous methods [1] and the reason is that only a small percentage of samples can be selected for experimental tests in consideration of cost. This means only molecules with very high predicted activity values can be worth testing, i.e., the decision threshold is high. And the high decision threshold corresponds to the left side of the ROC curve, i.e., those FPRs with small values. Also, because the threshold cannot be predetermined, the area under the curve is used to consolidate all possible thresholds within a certain FPR range. Finally, the logarithm is used to bias smaller FPRs. The higher the logAUC[0.001, 0.1], the better the performance. A perfect classifer gets a logAUC[0.001, 0.1] ) of 1, while a random classifer gets a logAUC[0.001, 0.1] ) of around 0.0215 (See [2]) References: [1] Mysinger, M.M. and B.K. Shoichet, Rapid Context-Dependent Ligand Desolvation in Molecular Docking. Journal of Chemical Information and Modeling, 2010. 50(9): p. 1561-1573. [2] Mendenhall, J. and J. Meiler, Improving quantitative structure–activity relationship models using Artificial Neural Networks trained with dropout. Journal of computer-aided molecular design, 2016. 30(2): p. 177-189. :param true_y: numpy array of the ground truth :param predicted_score: numpy array of the predicted score (The score does not have to be between 0 and 1) :param FPR_range: the range for calculating the logAUC formated in (x, y) with x being the lower bound and y being the upper bound :return: a numpy array of logAUC of size [1,1] """ if FPR_range is not None: range1 = np.log10(FPR_range[0]) range2 = np.log10(FPR_range[1]) if (range1 >= range2): raise Exception('FPR range2 must be greater than range1') # print(f'true_y:{true_y}, predicted_score:{predicted_score}') fpr, tpr, thresholds = roc_curve(true_y, predicted_score, pos_label=1) x = fpr y = tpr x = np.log10(x) y1 = np.append(y, np.interp(range1, x, y)) y = np.append(y1, np.interp(range2, x, y)) x = np.append(x, range1) x = np.append(x, range2) x = np.sort(x) # print(f'x:{x}') y = np.sort(y) # print(f'y:{y}') range1_idx = np.where(x == range1)[-1][-1] range2_idx = np.where(x == range2)[-1][-1] trim_x = x[range1_idx:range2_idx + 1] trim_y = y[range1_idx:range2_idx + 1] area = auc(trim_x, trim_y) / 2 return area
fc75fd9a361435f31c4f089e7c6e2976330affd7
30,778
def format_inline(str_, reset='normal'): """Format a string if there is any markup present.""" if const.regex['url'].search(str_): text = slugify(str_.split('[[')[1].split('][')[1].split(']]')[0]) str_ = const.regex['url'].sub(const.styles['url'] + text + const.styles[reset], str_) for key, val in const.inline.items(): if val['pattern'].search(str_): matches = val['pattern'].findall(str_) repls = [val["cols"] + x.replace(val["delim"], "") + const.styles[reset] for x in matches] for x, y in zip(matches, repls): str_ = str_.replace(x, y) return str_
9cd6819bff098051812f23825bcdb61e7305d650
30,779
import logging def procces_data(formatted_input): """ Purpose: Proccess data Args: formatted_input - formatted input data Returns: proccesed_data - processed input """ # TODO logging.info("Processing Data") return formatted_input
d24f6c83cd718d0ea6dd7f2af7228bd7ff1f31e4
30,780
from typing import Dict from typing import Any import codecs import pickle def serialize_values( data_dictionary: Dict[str, Any], data_format: PersistedJobDataFormat ) -> Dict[str, Any]: """ Serializes the `data_dictionary` values to the format specified by `data_format`. Args: data_dictionary (Dict[str, Any]): Dict whose values are to be serialized. data_format (PersistedJobDataFormat): The data format used to serialize the values. Note that for `PICKLED` data formats, the values are base64 encoded after serialization, so that they represent valid UTF-8 text and are compatible with `PersistedJobData.json()`. Returns: Dict[str, Any]: Dict with same keys as `data_dictionary` and values serialized to the specified `data_format`. """ return ( { k: codecs.encode(pickle.dumps(v, protocol=4), "base64").decode() for k, v in data_dictionary.items() } if data_format == PersistedJobDataFormat.PICKLED_V4 else data_dictionary )
9dc1116357c2dd50f16bf9b1b1d5eec56ea6b4f7
30,781
def print_level_order(tree): """ prints each level of k-tree on own line input <--- Tree output <--- Prints nodes level by level """ if not isinstance(tree, KTree): raise TypeError('argument must be of type <KTree>') all_strings = [] def recurse(nodelist): nonlocal all_strings new_list = [] printlist = [] for node in nodelist: printlist.append(str(node.val)) for child in node.children: new_list.append(child) string = ' '.join(printlist) all_strings.append(string) if len(new_list): recurse(new_list) if tree.root: recurse([tree.root]) return '\n'.join(all_strings)
7bb5d43725dbe351a85792f685ad504ca1e2d263
30,782
def spark_add(): """ReduceByKey with the addition function. :input RDD data: The RDD to convert. :output Any result: The result. """ def inner(data: pyspark.rdd.RDD) -> ReturnType[pyspark.rdd.RDD]: o = data.reduceByKey(lambda a,b: a+b) return ReturnEntry(result=o) return inner
7cab67ac0f6a55911ca3e46612487bbe329b5d6f
30,783
def edit(): """ Allows the user to edit or delete a reservation """ user = db.session.query(models.Rideshare_user).filter(models.Rideshare_user.netid == session['netid']).first() form = forms.EditReservationFactory() reservation = None rideNumber = request.args.get('rideNo') userHasRev=check_user_has_rev(rideNumber) #check if user has this reservation before proceeding if userHasRev: if form.validate_on_submit(): cancel,newSpots,comments=extract_info(form) ride = db.session.query(models.Ride).filter(models.Ride.ride_no == rideNumber).first() reservation = db.session.query(models.Reserve).filter(models.Reserve.ride_no == rideNumber)\ .filter(models.Reserve.rider_netid==session['netid']).first() if cancel == "Yes": newSpots = cancel_reservation(reservation) email_driver_cancellation(user, ride, reservation) else: updatedSpots = int(request.form['spots_needed']) #only update spots if enough room in the ride if valid_new_rev(reservation, ride, updatedSpots): newSpots = update_reservation(reservation, updatedSpots, comments) else: return render_template('accountPages/edit-reservation.html', reservation=reservation, ride=ride, form=form) ride.seats_available = ride.seats_available - newSpots db.session.commit() return redirect(url_for('rides.account_main')) return render_template('accountPages/edit-reservation.html', user=user, form=form, reservation=reservation, userHasRev=userHasRev)
cfed4d98975d4e7abeb7021eba250c4f1e88c641
30,784
def webapp(): """Create a webapp fixture for accessing the site. Just include 'webapp' as an argument to the test method to use. """ # Create a webtest Test App for use testapp = flask.ext.webtest.TestApp(dnstwister.app) testapp.app.debug = True # Clear the cache dnstwister.cache.clear() return testapp
8a4ee5abd157ac41ce4d82969e47506b36765cf8
30,785
from typing import Type from pathlib import Path async def study_export( app: web.Application, tmp_dir: str, project_id: str, user_id: int, product_name: str, archive: bool = False, formatter_class: Type[BaseFormatter] = FormatterV2, ) -> Path: """ Generates a folder with all the data necessary for exporting a project. If archive is True, an archive will always be produced. returns: directory if archive is True else a compressed archive is returned """ # storage area for the project data base_temp_dir = Path(tmp_dir) destination = base_temp_dir / project_id destination.mkdir(parents=True, exist_ok=True) # The formatter will always be chosen to be the highest availabel version formatter = formatter_class(root_folder=destination) await formatter.format_export_directory( app=app, project_id=project_id, user_id=user_id, product_name=product_name ) if archive is False: # returns the path to the temporary directory containing the study data return destination # an archive is always produced when compression is active archive_path = await zip_folder( folder_to_zip=base_temp_dir, destination_folder=base_temp_dir ) return archive_path
c07bf3244323ee5a222ad0339631c704ed10c568
30,786
def gen_fileext_type_map(): """ Generate previewed file extension and file type relation map. """ d = {} for filetype in list(PREVIEW_FILEEXT.keys()): for fileext in PREVIEW_FILEEXT.get(filetype): d[fileext] = filetype return d
3ef34884b5fff37fbf20e7e11c87e2f16310a77a
30,787
def umm_fields(item): """Return only the UMM part of the data""" return scom.umm_fields(item)
65bb71ed27612a3f504b7aae771bff69eff85bbe
30,788
def converts_to_message(*args): """Decorator to register a custom NumPy-to-Message handler.""" def decorator(function): for message_type in args: if not issubclass(message_type, Message): raise TypeError() _to_message[message_type] = function return function return decorator
5fdd5875aec2962b1ee19766f08c522200e8ea0a
30,789
def oil_rho_sat( rho0: NDArrayOrFloat, g: NDArrayOrFloat, rg: NDArrayOrFloat, b0: NDArrayOrFloat ) -> NDArrayOrFloat: """Calculate the gas saturated oil density B&W Eq 24 Args: rho0: The oil reference density (g/cc) at 15.6 degC g: The gas specific gravity rg: The Gas-to-Oil ratio (L/L) b0: Oil formation volume factor FVF Returns: The gas saturated oil density (g/cc) at 15.6 degC """ return safe_divide((rho0 + 0.0012 * rg * g), b0)
cca2ccc3934dda8e84db03598d56660cd56edc7a
30,790
def astar(array, start, goal): """A* algorithm for pathfinding. It searches for paths excluding diagonal movements. The function is composed by two components, gscore and fscore, as seem below. f(n) = g(n) + h(n) """ neighbors = [(0, 1), (0, -1), (1, 0), (-1, 0)] close_set = set() came_from = {} gscore = {start: 0} fscore = {start: heuristic(start, goal)} oheap = [] heappush(oheap, (fscore[start], start)) while oheap: current = heappop(oheap)[1] if current == goal: data = [] while current in came_from: data.append(current) current = came_from[current] return data close_set.add(current) for i, j in neighbors: neighbor = current[0] + i, current[1] + j tentative_g_score = gscore[current] + heuristic(current, neighbor) if 0 <= neighbor[0] < array.shape[0]: if 0 <= neighbor[1] < array.shape[1]: if array[neighbor[0]][neighbor[1]] == 1: continue else: # array bound y walls continue else: # array bound x walls continue if neighbor in close_set and tentative_g_score >= gscore.get(neighbor, 0): continue if tentative_g_score < gscore.get(neighbor, 0) or neighbor not in [ i[1] for i in oheap ]: came_from[neighbor] = current gscore[neighbor] = tentative_g_score fscore[neighbor] = tentative_g_score + heuristic(neighbor, goal) heappush(oheap, (fscore[neighbor], neighbor)) return False
6ddc058246d0ac8db2aa90c847eb3d55d29321c7
30,791
def download_jar(req, domain, app_id): """ See ApplicationBase.create_jadjar This is the only view that will actually be called in the process of downloading a complete CommCare.jar build (i.e. over the air to a phone). """ response = HttpResponse(mimetype="application/java-archive") app = req.app _, jar = app.create_jadjar() set_file_download(response, 'CommCare.jar') response['Content-Length'] = len(jar) try: response.write(jar) except Exception: messages.error(req, BAD_BUILD_MESSAGE) return back_to_main(req, domain, app_id=app_id) return response
97c963b3dba3a2c95fcf98b784fc31fb778c2c3d
30,792
def cond(addr, condexpr): """ set a condtion breakpoint at addr. """ return setBreakpoint(addr, False, condexpr)
adf2c21ef4dd32b92f546bc70c3009a47e305ee9
30,793
def get_host_credentials(config, hostname): """Get login information for a host `hostip` (ipv4) from marvin's `config` @return the tuple username, password for the host else raise keyerror""" for zone in config.get('zones', []): for pod in zone.get('pods', []): for cluster in pod.get('clusters', []): for host in cluster.get('hosts', []): url = host.get('url') if str(url).startswith('http'): hostname_marvin = urlparse.urlsplit(str(url)).netloc else: hostname_marvin = str(url) if hostname == hostname_marvin: return host.get('username'), host.get('password') raise KeyError("Please provide the marvin configuration file with credentials to your hosts")
82651c247c50d3781c8e96038c373bd6c7fba4e6
30,794
def is_flat_dtype(dtype: np.dtype) -> bool: """ Determines whether a numpy dtype object is flat. Checks whether the ``dtype`` just encodes one element or a shape. A dtype can characterise an array of other base types, which can then be embedded as an element of another array. Parameters ---------- dtype : numpy.dtype The dtype to be checked. Raises ------ TypeError The input is not a numpy's dtype object. ValueError The dtype is structured -- this function only accepts plane dtypes. Returns ------- is_flat : boolean True if the dtype is flat, False otherwise. """ if not isinstance(dtype, np.dtype): raise TypeError('The input should be a numpy dtype object.') # If the dtype is complex if dtype.names is not None: raise ValueError('The numpy dtype object is structured. ' 'Only base dtype are allowed.') # pylint: disable=len-as-condition if _NUMPY_1_13: # pragma: no cover is_flat = not bool(dtype.ndim) else: # pragma: no cover is_flat = len(dtype.shape) == 0 return is_flat
08c690e66a3c9303926a8d25c45dace6c2b292c7
30,795
def _solarize_impl(pil_img, level): """Applies PIL Solarize to `pil_img`. Translate the image in the vertical direction by `level` number of pixels. Args: pil_img: Image in PIL object. level: Strength of the operation specified as an Integer from [0, `PARAMETER_MAX`]. Returns: A PIL Image that has had Solarize applied to it. """ level = int_parameter(level, 256) return ImageOps.solarize(pil_img.convert('RGB'), 256 - level).convert('RGBA')
615650710266b91c6f91d8b93ab26ef5c5081551
30,796
def version_flash(cmd="flash"): """Return the version of flash (as a short string). Parses the output with ``-v``:: $ flash -v | head -n 1 FLASH v1.2.11 It would capture the version from the first line as follows: >>> version_flash() 'v1.2.11' If the command is not on the path, returns None. """ text = getoutput(cmd + " -v") ver = text.split("\n", 1)[0] if ver.upper().startswith("FLASH V"): return ver[7:]
87e9fed11f9d3a3206f4e3a983db1dc165fca576
30,797
def initialise_df(*column_names): """ Initialise a pandasdataframe with n column names :param str column_names: N column names :return: Empty pandas dataframe with specified column names """ return pd.DataFrame(columns=column_names)
8561de29cc6a6aee1752a580c6038a84599a25c0
30,798
def _get_reporting_category(context): """Returns the current member reporting category""" member = _get_member(context) return member[TransactionLoops.MEMBER_REPORTING_CATEGORIES][-1]
64ed9fcaf4fd9459789a1225cf4d9dbfddbfdb49
30,799