content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def get_extensions(dtypes): """ Find available extensions for the specified dtypes """ return _filter_entities(_tb_extensions, dtypes)
7fdfcc5919f56568db75a84e496c278cf9f86da5
3,629,800
def remove_blocks(pattern, source): """Given a pattern and a source, replace all matching blocks with a placeholder and then return the updated source and blocks as a tuple.""" blocks = {} match = pattern.search(source) while match: key = placeholder() original_block = match.group(0) blocks[key] = original_block source = pattern.sub(key, source, count=1) match = pattern.search(source) return source, blocks
a79ad21ba3702958c9c63c721dde3b70efa7e98f
3,629,801
def asintegerarrays(*arrays, requirements=None, min_itemsize=None): """Cast the given array(s) to the same integer type. Not a public function. This is useful when calling cython functions. Args: *arrays (array-like): At least one array-like. requirements (str/list[str], optional): See :func:`numpy.require`. min_itemsize (int, optional): The minimum itemsize (in bytes) for the output arrays. Returns: Numpy array(s) satisfying the above requirements. They will all have the same dtype. """ # empty arrays are a problem because numy defaults them to float, so let's # do a tiny bit of prechecking arrays = [arr if len(arr) else np.asarray(arr, dtype=np.int8) for arr in arrays] if not arrays: raise TypeError('asintegerarrays() takes at least 1 array (0 given)') return _astypearrays(arrays, requirements, min_itemsize, [np.integer])
1c61938800ea444bd57f376076c9bd8a3597345c
3,629,802
def tune_hyperparameters(X_train, y_train, group, model, ct, param_dict, n_iter, score): """Tunes hyperparameters for a ML model with LOGO randomizedCV. Note: This function is not included in my final solution, as the time required to run it (even with GPU) takes much too long in Kaggle. Parameters ---------- X_train : pandas DataFrame The X train dataset. y_train : pandas DataFrame The y train dataset. group : pandas DataFrame The groups for leave one group out CV. model : sklearn Estimator The model to tune ct : sklearn ColumnTransformer The column transformer to apply transformations to datasets. param_dict : dict The range for each hyperparameter for the search. n_iter : int The number of iterations of randomized search to perform. score : str The scoring metric. Returns ------- sklearn RandomizedSearchCV Resulting object from the randomized search """ logo = LeaveOneGroupOut() pipe = make_pipeline(ct, model) search = RandomizedSearchCV( estimator=pipe, param_distributions=param_dict, n_iter=n_iter, scoring=score, cv=logo, ) search.fit(X_train, y_train, groups=group) return search
0abe011d3bdb922a6735f80caf472348a7838983
3,629,803
def _design_matrix(X, fit_intercept=True): """Make design matrix For ordinary linear square, it would be X.T@X, ie the Gram matrix Override it, if you need. Arguments: X {2D array} -- input data fit_intercept {bool} -- whether to add intercept to the design matrix Returns: design matrix and names/indexes of features """ N, p = X.shape if fit_intercept: if hasattr(X, 'columns'): if 'intercept' in self.columns: raise Exception('It seems that the design matrix has contained `intercept`, please check it.') features = tuple(X.columns) + ('intercept',) else: features = np.arange(p+1) return np.hstack((X, np.ones((N, 1)))), features else: if hasattr(X, 'columns'): features = tuple(X.columns) else: features = np.arange(p) return X, features
6c739a303af0674bbccce6dd832442f673e07016
3,629,804
def simulate_gym_user(num_days=1000, noise_frac=0.0, periodicity=3, success_prob=0.5): """ Small routine returning an artificial binary visit timeseries of a very regular user for regularity analysis. time_window: pd.DatetimeIndex, used as index for the timeseries visit_dates: List, values between 0 and 6, indicating days in the week where the simulated user visits visits_per_week: Integer. Gives the visits per week if visit_dates is not specified. Then, exactly visits_per_week random visit days are drawn. """ data = np.zeros(num_days) data[::periodicity] = 1.0 user = pd.Series(data) #corrupt data by binary white noise with success probability p, which means it has variance p(1-p) if not isclose(noise_frac, 0.0): irreg_days = user.sample(frac=noise_frac) irreg_index = irreg_days.index user.loc[irreg_index] = np.random.binomial(1, success_prob, len(irreg_index)) return user.astype(float)
b48b810dc527325e152a6de7200643e7699d8a2d
3,629,805
def get_new_session_notification_actions(session_id, link): """ Get the actions associated with a notification of an event getting a new session proposal. :param session_id: id of the session. :param link: link to view the session. :return: actions """ view_session_action = NotificationAction( subject='session', link=link, subject_id=session_id, action_type='view' ) save_to_db(view_session_action) return [view_session_action]
fe788e9c64535b29c6c563a71d4ef0a203f740f3
3,629,806
from typing import Tuple from typing import Any def pack_data(*args: Tuple[Any, str]) -> bytes: """Normalize data and pack them into a byte array""" values, abi_types = zip(*args) normalized_values = map_abi_data([abi_address_to_hex], abi_types, values) return decode_hex( "".join( remove_0x_prefix(hex_encode_abi_type(abi_type, value)) for abi_type, value in zip(abi_types, normalized_values) ) )
b9ce5c2f06edd05ab053f4d8809a845c5e01afa1
3,629,807
import json import re def _get_gh_issue_title(issue_id: int, repo_short_name: str) -> str: """ Get the title of a GitHub issue. :param repo_short_name: `current` refer to the repo_short_name where we are, otherwise a repo_short_name short name (e.g., "amp") """ repo_full_name_with_host, repo_short_name = _get_repo_full_name_from_cmd( repo_short_name ) # > (export NO_COLOR=1; gh issue view 1251 --json title ) # {"title":"Update GH actions for amp"} dbg.dassert_lte(1, issue_id) cmd = ( f"gh issue view {issue_id} --repo {repo_full_name_with_host} --json title" ) _, txt = hsinte.system_to_string(cmd) _LOG.debug("txt=\n%s", txt) # Parse json. dict_ = json.loads(txt) _LOG.debug("dict_=\n%s", dict_) title = dict_["title"] _LOG.debug("title=%s", title) # Remove some annoying chars. for char in ": + ( ) / ` *".split(): title = title.replace(char, "") # Replace multiple spaces with one. title = re.sub(r"\s+", " ", title) # title = title.replace(" ", "_") title = title.replace("-", "_") # Add the prefix `AmpTaskXYZ_...` task_prefix = git.get_task_prefix_from_repo_short_name(repo_short_name) _LOG.debug("task_prefix=%s", task_prefix) title = "%s%d_%s" % (task_prefix, issue_id, title) return title
f707b723db38a1d758ac6e6b4d3e605a9cbc878b
3,629,808
def cls_list_inputs(cls): """Return a list of inputs in a Component class""" return [k for k, v in cls.__class_traits__.iteritems() if v.iotype == 'in' and k not in Component.__class_traits__ and not v.vartypename == None]
2f4b40642822e612daf22c007726fafa407bd6d4
3,629,809
import torchvision import torch def load_trained_model(model_name=None, model_path="", class_num=10): """ Load trained model from .pth file. Supported models: * "resnet": resnet18 * "vgg": vgg11 * "inception": inception v3 * "mobilenet": mobilenet v2 """ model = None # load models if model_name == "resnet": model = torchvision.models.resnet18(pretrained=True) # for param in model.parameters(): # param.requires_grad = False num_features = model.fc.in_features model.fc = nn.Linear(num_features, class_num) elif model_name == "vgg": model = torchvision.models.vgg11(pretrained=True) num_features = model.classifier[-1].in_features model.classifier[-1] = nn.Linear(num_features, class_num) elif model_name == "inception": model = torchvision.models.inception_v3(pretrained=True, aux_logits=False) num_features = model.fc.in_features model.fc = nn.Linear(num_features, class_num) elif model_name == "mobilenet": model = torchvision.models.mobilenet_v2(pretrained=True) num_features = model.classifier[-1].in_features model.classifier[-1] = nn.Linear(num_features, class_num) else: raise NotImplementedError("Model not supported") model.load_state_dict(torch.load(model_path)) model.eval() return model
e3e47bede7b8607029b20012e360b3dc35086448
3,629,810
def get_note(identifier): """ Return a Document object for a single note instance. """ note = notes[identifier] return Document( url='/' + identifier, title='Note', content={ 'description': note['description'], 'complete': note['complete'], 'edit': Link(action='put', fields=[Field(name='description'), Field(name='complete')]), 'delete': Link(action='delete') } )
ce193684d24e3d29595db60ba55aea70d42864f4
3,629,811
from .util import imhist from .classical import histeq from .exact import histeq_exact def contrast_restoration(im, method, remove_bits=1, blur_sigma=0, **kwargs): """ Performs contrast enhancement by degrading an image with degrade_image then performing histogram equalization to restore the original histogram and hopefully the original image. The restored image and the number of failures (or None if not available) is returned. Other metrics, such as PSNR or SSIM, can be performed with the returned restored image. This restoration measurement is done in [1,2]. REFERENCES 1. Nikolova M, Wen Y-W, and Chan R, 2013, "Exact histogram specification for digital images using a variational approach", J of Mathematical Imaging and Vision, 46(3):309-325 2. Jung S-W, 2014, "Exact Histogram Specification Considering the Just Noticeable Difference", IEIE Transactions on Smart Processing and Computing, 3(2):52-58. """ hist = imhist(im) degraded = degrade_image(im, remove_bits, blur_sigma) return (histeq(degraded, hist, **kwargs), None) if method == 'classic' else \ histeq_exact(degraded, hist, method=method, return_fails=True, **kwargs)
b11144382e36406df5c4aab899e7fa041f44d5cf
3,629,812
def benchmark_matrix_inverse(): """ Benchmark the user's setup by measuring the time taken by matrix inversion Performs a benchmark of the user's setup by inverting a 6400x6400 matrix filled with random numbers. This function then returns the time in ms taken for this operation. Good performance on this benchmark indicates both the CPU and RAM are fast. Returns: Integer representing the time in milliseconds taken by the matrix creation, inversion and deletion. """ t_start = get_time() x = np.random.rand(6400, 6400) x = np.linalg.inv(x) del x return get_time() - t_start
2821ebba8fc74a5a2c3bd34471a834f58ea14bd7
3,629,813
def psiBlastRun(sequence, cycles=2, filename=None, **kwargs): """Returns the results from a full PSI-BLAST run (multiple cycles). All arguments are the same as psiBlastCycle and are passed to it except for cycles. :arg cycles: the number of cycles to run default is 2 :type cycles: int """ psithr = kwargs.get('psithr', 1.0e-3) job_id = kwargs.get('previousjobid','') selectedHits = kwargs.get('selectedHits','') cycles_done = 0 results_list = [] job_ids = [] while cycles_done < cycles: if cycles_done > 0: selectedHits = 'http://www.ebi.ac.uk/Tools/services/rest/psiblast/result/' \ + job_id + '/preselected_seq' sequence = None job_id, results, sequence = psiBlastCycle(sequence, filename, \ previousjobid=job_id, \ selectedHits=selectedHits, \ cycle=cycles_done, **kwargs) results_list.append(results) job_ids.append(job_id) cycles_done += 1 LOGGER.info('Finished cycle {0} with job ID {1}.'.format(cycles_done, job_id)) return job_ids, results_list, sequence
d2782d283739dcc65e6a5cf23f69e31d45ab6662
3,629,814
from pathlib import Path def filter_files(names: list[Path]) -> list[Path]: """只要文件,不要文件夹""" return [x for x in names if x.is_file()]
25c1258891e2df7c35f700a26cadf01013329337
3,629,815
from datetime import datetime def get_user_input(): """Returns validated user input for inclusion into a database.""" print('--- New appointment entry ---') while True: title = input("Appointment's title? ") if len(title) == 0: print('Title can not be empty') else: break while True: print('Date and time of the appointment:') day = input('\tDay? ') month = input('\tMonth (number)? ') year = input('\tYear? ') hour = input('\tHour (24h clock)? ') minute = input('\tMinute? ') # successful conversion into datetime object indicates correct values # entered by user try: start = datetime.datetime(int(year), int(month), int(day), int(hour), int(minute)) break except ValueError: print('Please correct date and time') while True: print('Duration of the appointment:') # hour and minute of appointment duration must be non-negative integers, # total duration can not be zero try: hour = int(input('\tHours? ')) minute = int(input('\tMinutes? ')) if hour >= 0 and minute >= 0 and hour + minute > 0: break else: print('Please correct duration time') except ValueError: print('Please correct duration time') comment = input('Any comments? ') return User_entry(title, comment, str(start), hour, minute)
f355dd2ab8332f6bcdfb21d4ee16dba47346e253
3,629,816
def send_data_frame(writer, filename, mime_type=None, **kwargs): """ Convert data frame into the format expected by the Download component. :param writer: a data frame writer :param filename: the name of the file :param mime_type: mime type of the file (optional, passed to Blob in the javascript layer) :return: dict of data frame content (base64 encoded) and meta data used by the Download component Examples -------- >>> df = pd.DataFrame({'a': [1, 2, 3, 4], 'b': [2, 1, 5, 6], 'c': ['x', 'x', 'y', 'y']}) ... >>> send_data_frame(df.to_csv, "mydf.csv") # download as csv >>> send_data_frame(df.to_json, "mydf.json") # download as json >>> send_data_frame(df.to_excel, "mydf.xls", index=False) # download as excel >>> send_data_frame(df.to_pkl, "mydf.pkl") # download as pickle """ name = writer.__name__ # Check if the provided writer is known. if name not in known_pandas_writers.keys(): raise ValueError("The provided writer ({}) is not supported, " "try calling send_string or send_bytes directly.".format(name)) # If binary, use send_bytes. if known_pandas_writers[name]: return send_bytes(writer, filename, mime_type, **kwargs) # Otherwise, use send_string. return send_string(writer, filename, mime_type, **kwargs)
77e725c9a2f8178bc573a987011327fdbb3bdc12
3,629,817
def find_path(node1, node2): """Finds the length of the path from node1 to node2 This is done by looking at the list of parents and finding the first common parent. """ parents1 = get_parents(node1) parents2 = get_parents(node2) for l1 in range(len(parents1)): if parents1[l1] in parents2: l2 = parents2.index(parents1[l1]) return l1 + l2 return 0
dd0469d6659e2f2cdc0a85e9d205b94ee656e68b
3,629,818
import os import yaml def _load_test_config(): """Loads information of the pre-configured gcp project.""" dirname, _ = os.path.split( os.path.abspath(django_cloud_deploy.tests.__file__)) config_path = os.path.join(dirname, 'integration', 'data', 'integration_test_config.yaml') with open(config_path) as config_file: config_file_content = config_file.read() return yaml.load(config_file_content, Loader=yaml.FullLoader)
6b45d2932409ac5373a940460c01ca0884a91987
3,629,819
def load_play_bcc(): """ Play : 8 samples, 3 features, 2 classifications """ FEATURES = ['Temperature', 'Humidity', 'Pressure'] CLASSES = ['Rainy', 'Play'] data = pd.DataFrame( [ #T,H,P R,P [0, 0, 0, 1, 0], [0, 0, 1, 0, 0], [0, 1, 0, 1, 0], [0, 1, 1, 0, 0], [1, 0, 0, 0, 1], [1, 0, 1, 0, 1], [1, 1, 0, 1, 0], [1, 1, 1, 0, 1], ], columns=FEATURES + CLASSES) return data, data[FEATURES], data[CLASSES]
a543a273331609c4c74d53d0cdf4a63644a7d8db
3,629,820
from typing import Dict from typing import Any def _pseudodata2dict(data: PseudopotentialData) -> Dict[str, Any]: """ Convert a PseudopotentialData to a compatible dict with: * Decimals replaced by strings * the required attrs set on the root * the key "coefficients" replaced with "coeffs" """ pseudo_dict = data.dict(by_alias=True) stack = [pseudo_dict] while stack: current = stack.pop() for key, val in current.items(): if isinstance(val, dict): stack.append(val) elif isinstance(val, Decimal): current[key] = str(val) elif isinstance(val, list) and val and isinstance(val[0], dict): stack += val elif isinstance(val, list) and val and isinstance(val[0], Decimal): current[key] = [str(v) for v in val] pseudo_dict["aliases"] = sorted(pseudo_dict.pop("identifiers"), key=lambda i: -len(i)) pseudo_dict["name"] = pseudo_dict["aliases"][0] pseudo_dict["tags"] = pseudo_dict["name"].split("-") return pseudo_dict
e9feacd5f4ab51b18be92c7ea17bb9372fc59154
3,629,821
import hashlib def hashkey(key): """Returns the sha1 hash for key""" # hash keys so we don't pay the sha1 overhead each time we call this one if key not in HASH_CACHE: HASH_CACHE[key] = hashlib.sha1(str.encode(key)).hexdigest() return HASH_CACHE[key]
a301ac524299eea1bbbe684d0a6ad011ca298619
3,629,822
def get_geocoder(email): """Get geocode function for geocoding through Nominatim with supplied email address as the user_agent, as per Nominatim's usage policy. This geocoder will take at least 1 second per Address queried in accordance with Nominatim's terms of service. Note: this process cannot be multi-threaded or run in parallel. Arguments --------- email : str A string email Address supplied to Nominatim as user_agent Examples -------- >>> email = "valid.email@address.com" >>> geocode = get_geocoder(email) >>> geocode("1315 10th St, Sacramento, CA 95814") Location(California State Capitol, 1315, 10th Street, Land Park, Sacramento, Sacramento County, California, 95814, United States of America, (38.57656885, -121.4934010890531, 0.0)) >>> email = "not_an_email" >>> geocode = get_geocoder(email) AssertionError: Must enter a valid email """ # Must enter a valid email assert is_valid_email(email), "Must enter a valid email" geolocator=Nominatim(user_agent=email) geocoder=RateLimiter(geolocator.geocode, min_delay_seconds=1) return geocoder
1a2ae6ecd0d3e2607bbdaa2f24ae6615e26ddd92
3,629,823
def move_ship_waypoint(instructions: list) -> list: """Move the ship using the waypoint movement rules :param instructions: List of movement instructions :return: Final position of the ship """ waypoint = [10, 1] ship = [0, 0] for instruction in instructions: cmd, val = instruction if cmd == 'F': ship[0] += val * waypoint[0] ship[1] += val * waypoint[1] if cmd == 'N': waypoint[1] += val elif cmd == 'S': waypoint[1] -= val elif cmd == 'E': waypoint[0] += val elif cmd == 'W': waypoint[0] -= val elif cmd == 'L' or cmd == 'R': rotation = (2 * (cmd == 'L') - 1) * val % 360 if rotation == 90: waypoint[0], waypoint[1] = -waypoint[1], waypoint[0] elif rotation == 180: waypoint[0] *= -1 waypoint[1] *= -1 elif rotation == 270: waypoint[0], waypoint[1] = waypoint[1], -waypoint[0] return ship
7202392e4826d522287455d94f7b06c0e2f931ee
3,629,824
def within_image_supervised_pixel_contrastive_loss( features, labels, ignore_labels, temperature): """Computes within-image supervised pixel contrastive loss. Args: features: A tensor of shape [batch_size, num_pixels, num_channels] labels: A tensor of shape [batch_size, num_pixels, 1] ignore_labels: A list of labels to ignore. Pixels with these labels will be ignored in the loss computation temperature: Temperature to use in contrastive loss Returns: Contrastive loss tensor """ logits = tf.matmul(features, features, transpose_b=True) / temperature positive_mask, negative_mask = generate_positive_and_negative_masks(labels) ignore_mask = generate_ignore_mask(labels, ignore_labels) return compute_contrastive_loss( logits, positive_mask, negative_mask, ignore_mask)
8e121d23c0c02b97dc1ad25d0093aff8519d7074
3,629,825
def _transform_indicators(metadata: dict) -> pd.DataFrame: """Transform indicators metadata into a formatted DataFrame.""" df = pd.DataFrame.from_dict(metadata.get("indicators")) df = df[ ["id", "code", "shortName", "name", "numerator", "denominator", "annualized"] ] df.columns = [ "dx_uid", "dx_code", "dx_shortname", "dx_name", "numerator", "denominator", "annualized", ] return df
882b66cf1dbe2e1e5f1189134efec357d5983c5c
3,629,826
def symmetricMatrix(seq): """ creates a symmetric 3x3 matrix from a sequence (list, tuple, Matrix) with 6 elements """ assert isinstance(seq, (list, tuple, symbolics.Matrix)) M=symbolics.Matrix([[seq[0], seq[1], seq[3]], [seq[1], seq[2], seq[4]], [seq[3], seq[4], seq[5]]]) return M
0b90a12fc0d95b412b142dcbe19dfa7e2910948a
3,629,827
import ray def get_actor(name: str) -> ray.actor.ActorHandle: """Get a named actor which was previously created. If the actor doesn't exist, an exception will be raised. Args: name: The name of the named actor. Returns: The ActorHandle object corresponding to the name. """ logger.warning("ray.util.get_actor has been moved to ray.get_actor and " "will be removed in the future.") return _get_actor(name)
6856200cf1ee61d3f0e069c048189f37dc0d06ff
3,629,828
import itertools def get_hyperparams_combinations(hyperparams): """Get list of hyperparmeter (dict) combinations.""" # transforms tuning hyperparams to a list of dict params for each option return [ {k:v for k,v in zip(hyperparams.keys(), hypms)} for hypms in itertools.product(*[vals for vals in hyperparams.values()]) ]
e5f52a8eddb8a2a476e0daa47f63161d440263f2
3,629,829
import os def import_curves_data_csv_file(): """ Import user curves data CSV file as a *Nuke* *ColorLookup* node. Returns ------- ColorLookup ColorLookup node. """ file = nuke.getFilename('Choose ColorLookup Node Curves Data CSV File', '*.csv') if file is not None: if os.path.exists(file): return color_lookup_node(file)
61b9842bf8b26b7024955827d1784ac1e536a326
3,629,830
from typing import Tuple def get_cropped_axes(image: np.ndarray, boundary_width: int = 5) -> Tuple[slice, ...]: """ Return the min and max values on both x and y axes where the image is not empty Method: find the min and max of all non-zero pixels in the image, and add a border :param image: the image to be cropped :param boundary_width: number of pixels boundary to add around bounding box :return: """ x_lim = image.shape[0] y_lim = image.shape[1] # noinspection PyUnresolvedReferences slice_x, slice_y = find_objects(image > 0)[0] new_slice_x_min = max(0, slice_x.start - boundary_width) new_slice_x_max = min(x_lim, slice_x.stop + boundary_width) new_slice_x = slice(new_slice_x_min, new_slice_x_max) new_slice_y_min = max(0, slice_y.start - boundary_width) new_slice_y_max = min(y_lim, slice_y.stop + boundary_width) new_slice_y = slice(new_slice_y_min, new_slice_y_max) return tuple([new_slice_x, new_slice_y])
9650f114cc9637e09550956c92a29a28d0062147
3,629,831
def addressable_list(type_constraint): """Marks a list's values as satisfying a given type constraint. Some (or all) elements of the list may be :class:`pants.engine.exp.objects.Resolvable` elements to resolve later. See :class:`AddressableDescriptor` for more details. :param type_constraint: The type constraint the list's values must all satisfy. :type type_constraint: :class:`TypeConstraint` """ return _addressable_wrapper(AddressableList, type_constraint)
b0769cd11cb4c15e4f1b585141809a5b715a16bf
3,629,832
def user(): """Returns a user with name='mesh', token='token', trakt={'trakt': 'auth'}""" return User('mesh', 'token', {'trakt': 'auth'})
d73d4ae88af53c5b315dcad99510028c75052e6b
3,629,833
from datetime import datetime import json import logging def convert_from_poolfile_to_sequence_set_and_back(inp_fp_path, op_path, conversion_type, description="", run_id=None): """ In this function we take either pool file or Sequence Set and convert from one to the other. Sequence Set is output and input as a JSON file, pool file is output and input as a TSV file. Conversion_type is an int: 0 -> going from pool file to Sequence Set 1 -> going from Sequence Set to pool file Args: inp_fp_path: Path to either pool file (TSV) or Sequence Set (JSON) op_path: Output path to the conversion output conversion_type: (int) from [0,1]. 0 -> Pool file to Sequence Set 1 -> Sequence Set to Pool File. description: (string) Optional string describing the set Poolfile: barcode(str):rcbarcode(str):nTot(int):n(int):scaffold(str):strand(str +/-):pos(int): n2(int):scaffold2(int):strand2(str +/-):pos2(int):nPastEnd(int) """ if conversion_type not in [0,1]: raise Exception("Cannot recognize conversion type: Must be int." + \ "Val: {}".format(conversion_type)) if conversion_type == 0: # Going from poolfile to Sequence Set if run_id is None: run_id = "MapTnSeq_Barcodes_run_on_" + \ str(datetime.now()).replace(' ', '_'), # output dict sequence_set = { "sequence_set_id": run_id, "description": "MapTnSeq (RBTNSEQ) mapping of barcodes to a " + \ "genome. Explanations of values: 'nTot' is total " + \ "number of times this barcode was counted." + \ " 'n' is number of times this barcode was counted" + \ " at this location. 'scf' is scaffold name." + \ " 'strand' is strand (+ or -). 'pos' is nucleotide" + \ " position. 'n2' is number of times found at second" + \ " highest counted location. 'scf2' is second highest" + \ " location scaffold, 'strand2' similarly, etc." + \ " 'nPastEnd' means number of times this barcode was" + \ " found next to the next part of the plasmid (barcode" + \ " wasn't inserted into the genome without the rest " + \ " of the plasmid).\n" + \ " User Description (optional): {}".format(description) } sequence_list = [] pool_FH = open(inp_fp_path, "r") header = pool_FH.readline().rstrip() c_line = pool_FH.readline() i = 1 while c_line != "": c_lst = c_line.split('\t') nPastEnd = c_lst[-1].rstrip() barcode, rcbarcode, nTot, n, scf, strnd, pos = c_lst[:7] n2, scf2, strnd2, pos2 = c_lst[7:-1] # desc_str holds all the information needed to reconstruct pool file desc_str = "nTot:{};n:{};scf:{};strand:{};pos:{};".format( nTot, n, scf, strnd, pos) desc_str += "n2:{};scf2:{};strnd2:{};pos2:{};".format( n2, scf2, strnd2, pos2) desc_str += "nPastEnd:" + nPastEnd sequence_list.append( { "sequence_id": "MapTnSeq_barcode_" + str(i), "description": desc_str, "sequence": barcode }) c_line = pool_FH.readline() i += 1 pool_FH.close() sequence_set["sequences"] = sequence_list with open(op_path, "w") as g: g.write(json.dumps(sequence_set, indent=2)) logging.info("Wrote Sequence Set JSON file to " + op_path) elif conversion_type == 1: # Going from Sequence Set to Pool File if inp_fp_path.split(".")[-1] != "json": raise Exception("Sequence Set indicated but not JSON file") sequence_set_d = json.loads(open(inp_fp_path).read()) out_pool_FH = open(op_path, "w") out_pool_FH.write("barcode\trcbarcode\tnTot\tn\tscaffold\tstrand\tpos\t" + \ "n2\tscaffold2\tstrand2\tpos2\tnPastEnd\n") seq_list = sequence_set_d["sequences"] for seq in seq_list: desc_str = seq["description"] barcode = seq["sequence"] tsl_d = {"A":"T", "T":"A", "G":"C", "C":"G"} rcbc1 = [tsl_d[x] for x in list(barcode)] rcbc1.reverse() rcbarcode = "".join(rcbc1) out_pool_FH.write(barcode + "\t" + rcbarcode + "\t") items = [x.split(":")[1] for x in desc_str.split(";")] out_pool_FH.write("\t".join(items) + "\n") out_pool_FH.close() logging.info("Wrote Pool File from Sequence Set at " + op_path) return None
ad46eb491840aa75764b013495a113ae746144ba
3,629,834
from typing import List def read_basis_format(basis_format: str) -> List[int]: """Read the basis set using the specified format.""" s = basis_format.replace('[', '').split(']')[0] fss = list(map(int, s.split(','))) fss = fss[4:] # cp2k coefficient formats start in column 5 return fss
9701309ab43eb7a0227aa141653688dbdce40811
3,629,835
def _central_crop(image_list, crop_height, crop_width): """Performs central crops of the given image list. Args: image_list: a list of image tensors of the same dimension but possibly varying channel. crop_height: the height of the image following the crop. crop_width: the width of the image following the crop. Returns: the list of cropped images. """ outputs = [] for image in image_list: image_height = tf.shape(image)[0] image_width = tf.shape(image)[1] offset_height = (image_height - crop_height) / 2 offset_width = (image_width - crop_width) / 2 outputs.append( _crop(image, offset_height, offset_width, crop_height, crop_width)) return outputs
16481f8db0be53b08fe3b4d4173ae3c9bd71ceb2
3,629,836
def concat_combining_function(a, b): """ Combines the tensor `a` and `b` by concatenating them. :param a: the tensor a :type a: tf.Tensor :param b: the tensor b :type b: tf.Tensor :return: a combination of the tensors :rtype: tf.Tensor """ if a.shape.rank == 2: a = tf.expand_dims(a, 1) if b.shape.rank == 2: b = tf.expand_dims(b, 1) return tf.concat([a, b], axis=1)
6c96fac311ef4085f282e8419ffa8d7b98ad38ed
3,629,837
def filter_list_zones(auth_context, cloud_id, zones=None, perm='read', cached=False): """Filter the zones of the specific cloud based on the RBAC policy""" if zones is None: zones = list_zones(auth_context.owner, cloud_id, cached=cached) if auth_context.is_owner(): return zones else: allowed_resources = auth_context.get_allowed_resources(perm) if cloud_id not in allowed_resources['clouds']: return [] filtered = [] for zone in zones: if zone['id'] in allowed_resources['zones']: for rec in list(zone['records']): if rec not in allowed_resources['records']: zone['records'].pop(rec) filtered.append(zone) return filtered
11e1d099d1af9f46379f6c8f4f6b63d3d9bf4280
3,629,838
def pixel_weighted_categorical_crossentropy(weights,target, output, from_logits=False, axis=-1): """ pixel weighted version of tf.keras.backend.categorical_crossentropy copy of https://github.com/tensorflow/tensorflow/blob/v2.3.1/tensorflow/python/keras/backend.py#L4640-L4708 except for last line where weights are introduced """ target = ops.convert_to_tensor_v2(target) output = ops.convert_to_tensor_v2(output) target.shape.assert_is_compatible_with(output.shape) if from_logits: return nn.softmax_cross_entropy_with_logits_v2( labels=target, logits=output, axis=axis) if (not isinstance(output, (ops.EagerTensor, variables_module.Variable)) and output.op.type == 'Softmax') and not hasattr(output, '_keras_history'): assert len(output.op.inputs) == 1 output = output.op.inputs[0] return nn.softmax_cross_entropy_with_logits_v2( labels=target, logits=output, axis=axis) output = output / math_ops.reduce_sum(output, axis, True) epsilon_ = _constant_to_tensor(epsilon(), output.dtype.base_dtype) output = clip_ops.clip_by_value(output, epsilon_, 1. - epsilon_) return -math_ops.reduce_sum(weights * target * math_ops.log(output), axis)
f54117bd362ff0e3e01d77f16650f98bd832402a
3,629,839
def auth_required(f): """ Decorator for aiohttp web handlers with primitive auth check """ @wraps(f) async def _wrapper(request): if 'auth' not in request: raise HTTPUnauthorized() return await f(request) return _wrapper
03a71418314b1caf7f40d95c41a6386f8d4eb10e
3,629,840
def p_to_stars(p, thres=(0.1, 0.05, 0.01)): """Return stars for significance values.""" stars = [] for t in thres: if p < t: stars.append("*") return "".join(stars)
d88c2fd6c1b4e2d75a9cb664dfc10fab308bc6ee
3,629,841
def permuteToBlocks(arr, blockshape): """Permute an array so that it consists of linearized blocks. Example: A two-dimensional array of the form 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 would be turned into an array like this with (2, 2) blocks: 0 1 4 5 2 3 6 7 8 9 12 13 10 11 14 15 """ if len(blockshape) < 2: raise ValueError("Need more than one dimension.") elif len(blockshape) == 2: blockheight, blockwidth = blockshape return permuteToBlocks2d(arr, blockheight, blockwidth) elif len(blockshape) == 3: blockdepth, blockheight, blockwidth = blockshape return permuteToBlocks3d(arr, blockdepth, blockheight, blockwidth) else: raise NotImplementedError("Only for dimensions 2 and 3.")
b5edc7c64ed3facf39354e7c38955cd5e218d3a3
3,629,842
from typing import Iterable from typing import Dict from typing import Tuple def _polygonise_splits( arr: np.ndarray, named_slices: Iterable[Dict[str, Tuple]] ) -> Dict[str, gpd.GeoDataFrame]: """ Create polygons from multiple sub-arrays of the given array. Note: Indices for `named_slices` must be given in x-y convention. x-y indexing is used for convenience with plotting later. The origin for x-y indexing is taken to be at the lower left corner of the array. The x-index increases horizontally to the right, y-index increases vertically to the top. Args: arr (np.ndarray): The array from which to select sub-arrays and polygonise them named_slices (Iterable[Dict[str, Tuple]]): An iterable of dictionaries containing the x-y limits of the sub-arrays to polygonise. x-y indices must be >= 0 or None. Returns: Dict[str, gpd.GeoDataFrame]: [description] """ result = {} for name, (x_lims, y_lims) in named_slices.items(): result[name] = polygonise_sub_array(arr, x_lims, y_lims) return result
41eea39a96e1dbacb161c4e160ae73ed49762deb
3,629,843
import warnings def getPointInTheMiddle(start_point, end_point, time_diff, point_idx): """ Calculates a new point between two points depending of the time difference between them and the point index. Parameters ---------- start_point: DataFrame end_point: DataFrame time_diff: float point_idx: int Point index between the start and the end points Returns ------- point: list A new point between the start and the end points. """ warnings.warn("The getPointInTheMiddle function is deprecated and " "will be removed in version 2.0.0. " "Use the get_point_in_the_middle function instead.", FutureWarning, stacklevel=8 ) return get_point_in_the_middle(start_point, end_point, time_diff, point_idx)
b5b76bbb1bc4b5cb37e5959b009106d1d50369a6
3,629,844
def get_style_dependencies(style_name=None, base_url=DEFAULT_BASE_URL): """Get the values of dependencies in a style. Args: style_name (str): Name of style; default is "default" style base_url (str): Ignore unless you need to specify a custom domain, port or version to connect to the CyREST API. Default is http://127.0.0.1:1234 and the latest version of the CyREST API supported by this version of py4cytoscape. Returns: dict: contains all dependencies and their current boolean value Raises: CyError: if style name doesn't exist requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error Examples: >>> get_style_dependencies(style_name='galFiltered Style') {'arrowColorMatchesEdge': False, 'nodeCustomGraphicsSizeSync': True, 'nodeSizeLocked': True} >>> get_style_dependencies() {'arrowColorMatchesEdge': False, 'nodeCustomGraphicsSizeSync': True, 'nodeSizeLocked': False} """ if style_name is None: style_name = 'default' narrate(f'style_name not specified, so accessing "default" style.') # launch error if visual style name is missing if style_name not in styles.get_visual_style_names(base_url=base_url): raise CyError(f'No visual style named "{style_name}"') res = commands.cyrest_get(f'styles/{style_name}/dependencies', base_url=base_url) # make it a dict dep_list = {dep['visualPropertyDependency']: dep['enabled'] for dep in res} return dep_list
8d297294c7f5da5f4f2bbea80c659a57e2207aba
3,629,845
import logging def bpt_nii(maps, ax = None, snr_min = None, deredden = False, return_figure = True, plot_map = False, plot_kde = False, radial_bin = None, return_data = None, overplot_dk_bpt = False, dk_bpt_kwargs = {}, deredden_kwargs = {}, classification_kwargs = {}, **kwargs): """ NII / HA BPT Diagram ala Krishnarao+19 for MaNGA Galaxy Parameters ---------- maps: 'marvin.tools.maps.Maps' or 'dk_marvin_tools.DKMaps.DKMaps' MaNGA Maps Data ax: 'matplotlib.pyplot.figure.axes', optional, must be keyword Matplotlib axes to plot on snr_min: 'number', optional, must be keyword min SNR to use for data deredden: 'bool', optional, must be keyword if True, will deredden emission lines return_figure: 'bool', optional, must be keyword if False, will return BPT Classifications as a dictionary plot_map: 'bool', optional, must be keyword if True, will instead plot a map color coded by classifications plot_kde: 'bool', optional, must be keyword if True, will plot kde plot instead of scatter plot radial_bin: 'list' or 'tuple' or 'np.ndarray', optional, must be keyword if given, only plots points within provided radial bin in terms of R/R_e return_data: 'bool', optional, must be keyword if True, returns data instead of plotting overplot_dk_bpt: 'bool', optional, must be keyword if True, overplots dk_bpt dk_bpt_kwargs: 'dict', optional, must be keyword kwargs passed to dk_bpt deredden_kwargs: 'dict', optional, must be keyword kwargs passed to deredden classification_kwargs: 'dict', optional, must be keyword kwargs passed to draw_classification_lines kwargs: 'dict', optional, must be keywords keywords to pass to scatter plot of BPT points or keywords to pass to map.plot of plot_map """ # Get EmLine Data if deredden: try: ha = maps.deredden("emline gflux ha", **deredden_kwargs) except AttributeError: logging.warning("provided maps object does not have a deredden method. Skipping dereddening process.") ha = maps["emline gflux ha"] hb = maps["emline gflux hb"] oiii = maps["emline glfux oiii 5007"] nii = maps["emline glflux nii 6585"] else: hb = maps.deredden("emline glfux hb", **deredden_kwargs) oiii = maps.deredden("emline glfux oiii 5007", **deredden_kwargs) nii = maps.deredden("emline glfux nii 6585", **deredden_kwargs) else: ha = maps["emline gflux ha"] hb = maps["emline gflux hb"] oiii = maps["emline glfux oiii 5007"] nii = maps["emline glflux nii 6585"] if snr_min is None: snr_min = 3. # Get masked Data ha_masked = ha.masked hb_masked = hb.masked oiii_masked = oiii.masked nii_masked = nii.masked # SNR Cut ha_masked.mask |= ha.snr < snr_min hb_masked.mask |= hb.snr < snr_min oiii_masked.mask |= oiii.snr < snr_min nii_masked.mask |= nii.snr < snr_min ha_masked.mask |= ha.ivar == 0 hb_masked.mask |= hb.ivar == 0 oiii_masked.mask |= oiii.ivar == 0 nii_masked.mask |= nii.ivar == 0 # Mask Negative Flux ha_masked.mask |= ha_masked.data <= 0 hb_masked.mask |= hb_masked.data <= 0 oiii_masked.mask |= oiii_masked.data <= 0 nii_masked.mask |= nii_masked.data <= 0 # masked Logarithms log_oiii_hb = np.ma.log10(oiii_masked / hb_masked) log_nii_ha = np.ma.log10(nii_masked / ha_masked) # Calculate Masks for classification regions sf_mask_nii = (log_oiii_hb < nii_sf_line(log_nii_ha)) & (log_nii_ha < 0.05) comp_mask = ((log_oiii_hb > nii_sf_line(log_nii_ha)) & (log_nii_ha < 0.05)) & \ ((log_oiii_hb < nii_comp_line(log_nii_ha)) & (log_nii_ha < 0.465)) sub_agn_mask_nii = (log_oiii_hb > nii_comp_line(log_nii_ha)) | (log_nii_ha > 0.465) agn_mask_nii = sub_agn_mask_nii & (nii_agn_line(log_nii_ha) < log_oiii_hb) liner_mask_nii = sub_agn_mask_nii & (nii_agn_line(log_nii_ha) > log_oiii_hb) invalid_mask = ha_masked.mask | oiii_masked.mask | nii_masked.mask | hb_masked.mask sf_classification = {"nii": sf_mask_nii} comp_classification = {"nii": comp_mask} agn_classification = {"nii": agn_mask_nii} liner_classification = {"nii": liner_mask_nii} invalid_classification = {"nii": invalid_mask} bpt_return_classification = {'sf': sf_classification, 'comp': comp_classification, 'agn': agn_classification, 'liner': liner_classification, 'invalid': invalid_classification} if not return_figure: return bpt_return_classification elif plot_map: # Make image bpt_image = np.empty(ha.shape) bpt_image[:] = np.nan # Star Forming bpt_image[bpt_return_classification['sf']['nii']] = 0.5 # Comp bpt_image[bpt_return_classification['comp']['nii']] = 1.5 # Seyfert bpt_image[bpt_return_classification['agn']['nii']] = 2.5 # LINER bpt_image[bpt_return_classification['liner']['nii']] = 3.5 # Ambiguous bpt_image[bpt_return_classification['invalid']['nii']] = 4.5 bpt_image = np.ma.masked_array(bpt_image, mask = np.isnan(bpt_image)) if ax is None: fig = plt.figure() ax = fig.add_subplot(111) else: fig = ax.figure if "cmap" not in kwargs: kwargs["cmap"] = ListedColormap(sns.color_palette([pal[1], pal[0], pal[9], pal[4], pal[8]])) if "title" not in kwargs: kwargs["title"] = "NII BPT Classification Map" fig, ax, cb = ha.plot(fig = fig, ax = ax, value = bpt_image, return_cb = True, cbrange = [0,5], **kwargs) cb.set_ticks([0.5, 1.5, 2.5, 3.5, 4.5]) cb.ax.set_yticklabels(['Star Forming', 'Composite', 'AGN', 'LI(N)ER', 'Invalid']) cb.set_label(r'BPT Classification', fontsize = 14) cb.ax.tick_params(labelsize=12) return ax, cb elif plot_kde: # KDE Map if return_data: radius = maps['spx ellcoo r_re'] return radius, log_nii_ha, log_oiii_hb else: if ax is None: fig = plt.figure() ax = fig.add_subplot(111) # Draw classification lines ax = draw_classification_lines(ax, **classification_kwargs) # Default kwargs # Default colormap if "cmap" not in kwargs: kwargs["cmap"] = "plasma" if "zorder" not in kwargs: kwargs["zorder"] = 0 if "shade" not in kwargs: kwargs["shade"] = True if "shade_lowest" not in kwargs: kwargs["shade_lowest"] = False if radial_bin is not None: radius = maps['spx ellcoo r_re'] within = radius.value >= radial_bin[0] within &= radius.value <= radial_bin[1] ax = sns.kdeplot(log_nii_ha[(np.invert(log_nii_ha.mask | log_oiii_hb.mask) & within)], log_oiii_hb[(np.invert(log_nii_ha.mask | log_oiii_hb.mask) & within)], ax = ax, **kwargs) else: ax = sns.kdeplot(log_nii_ha[np.invert(log_nii_ha.mask | log_oiii_hb.mask)], log_oiii_hb[np.invert(log_nii_ha.mask | log_oiii_hb.mask)], ax = ax, **kwargs) ax.set_xlabel(r'$log_{10}$([NII] $\lambda$ 6585/H$\alpha$)',fontsize=12) ax.set_ylabel(r'$log_{10}$([OIII] $\lambda$ 5007/H$\beta$)',fontsize=12) if overplot_dk_bpt: ax = plot_dkbpt(ax, **dk_bpt_kwargs) ax = scale_to_dkbpt(ax) return ax else: # Do the plotting if ax is None: fig = plt.figure() ax = fig.add_subplot(111) # Draw classification lines ax = draw_classification_lines(ax, **classification_kwargs) # Defautl kwargs # Default colors if ("c" not in kwargs) & ("color" not in kwargs) & ("cmap" not in kwargs): # Default radial colormapping radius = maps['spx ellcoo r_re'] kwargs["c"] = radius kwargs["cmap"] = sns.dark_palette(pal[8], as_cmap=True) # Default vmin/vmax if "c" in kwargs: if "vmin" not in kwargs: kwargs["vmin"] = 0. if "vmax" not in kwargs: kwargs["vmax"] = 3. # Default Size if "s" not in kwargs: kwargs["s"] = 5 # plot the points pts = ax.scatter(log_nii_ha, log_oiii_hb, **kwargs) ax.set_xlabel(r'$log_{10}$([NII] $\lambda$ 6585/H$\alpha$)',fontsize=12) ax.set_ylabel(r'$log_{10}$([OIII] $\lambda$ 5007/H$\beta$)',fontsize=12) if overplot_dk_bpt: ax = plot_dkbpt(ax, **dk_bpt_kwargs) ax = scale_to_dkbpt(ax) return pts, ax
a45801c3672cae2750133dbf7011dd7dfad50b3d
3,629,846
def correct_dcm(dcm): """ Correct DCM image which were actually signed data, but were treated as unsigned """ x = dcm.pixel_array + 1000 px_mode = 4096 x[x>=px_mode] = x[x>=px_mode] - px_mode dcm.PixelData = x.tobytes() dcm.RescaleIntercept = -1000 return dcm.pixel_array, dcm.RescaleIntercept
0186ab3fc4b606902da3a50a5835eb227a1b7733
3,629,847
def multiaz_subnets( name_prefix: str, cidr_block: str, region: str, vpc: object = None, vpc_id: str = None, no_of_subnets: int = 4, network_acl: object = None, network_acl_id: str = None, route_table: object = None, route_table_id: str = None, ) -> list: """Split given CIDR block into subnets over multiple AZs Either `vpc` or both `vpc_id` and `region` are required. If a network ACL or route table are passed as parameters, they will be associated with the subnets. `vpc`, `network_acl` and `route_table` are expected to be Troposphere resource objects which can be passed to Ref and GetAtt functions. As an alternative, `vpc_id`, `region`, `network_acl_id` `route_table_id` can be passed directly. If both resource and *_id are specified, the *_id will take precedence. Returns a list of Troposphere resources that describes the subnets and can be attached to a Template object. Returned subnet objects have the following keys set in their Metadata attribute: az: full availability zone name ("eu-west-1a") az_index: uppercase AZ, without the region part ("A") suffix: the suffix that was added to the name to form a unique resource title. Probably a single digit. Args: name_prefix (str): Prefix each resource with this string. Use to assure unique name for the resource in the calling Template cidr_block (str): IP range to split into subnets region (str): AWS region vpc (object, optional): VPC Troposphere resource. One of vpc or vpc_id is required. Defaults to None. vpc_id (str, optional): VPC ID. One of vpc or vpc_id is required. Defaults to None. no_of_subnets (int, optional): Create this many subnets. must be a power of 2. Defaults to 4. network_acl (object, optional): Network ACL Troposphere resource. Defaults to None. network_acl_id (str, optional): Network ACL ID. Defaults to None. route_table (object, optional): Route table resource. Defaults to None. route_table_id (str, optional): Route table ID. Defaults to None. Raises: ValueError: If neither vpc nor vpc_id were specified. Returns: list: Troposphere resources to be added to Template. """ if vpc is None and vpc_id is None: raise ValueError("One of vpc or vpc_id must be specified") if vpc_id is None: vpc_id = Ref(vpc) # Resource names only accept alphanumeric prefix = alphanum(name_prefix).lower().capitalize() net_split = split_net_across_zones(cidr_block, region, no_of_subnets) resources = list() for index, net_segment in enumerate(net_split): # set subnet az_index = net_segment["az"][-1:].upper() subnet = t_ec2.Subnet( title=f"{prefix}{index+1}", AvailabilityZone=net_segment["az"], CidrBlock=net_segment["cidr"], VpcId=vpc_id, Tags=[{"Key": "Name", "Value": f"{name_prefix} {az_index}"}], ) subnet.Metadata = {} subnet.Metadata["az"] = net_segment["az"].lower() subnet.Metadata["az_index"] = az_index subnet.Metadata["suffix"] = index + 1 resources.append(subnet) # associate network ACL with subnet if network_acl_id is None and network_acl is not None: network_acl_id = Ref(network_acl) if network_acl_id is not None: resources.append( t_ec2.SubnetNetworkAclAssociation( title=f"{subnet.title}NaclAssociation", SubnetId=Ref(subnet), NetworkAclId=network_acl_id, ) ) if route_table_id is None and route_table is not None: route_table_id = Ref(route_table) if route_table_id is not None: resources.append( t_ec2.SubnetRouteTableAssociation( title=f"{subnet.title}RouteAssociation", SubnetId=Ref(subnet), RouteTableId=route_table_id, ) ) return resources
58cd2d5eea2873683202907a1c91dc4dbefcb9d6
3,629,848
def EPdiv(a, da, b, db, covar=0): """ C = A / B """ return ( a / b, np.sqrt( ((da / b) ** 2 + ((a ** 2) * (db ** 2) / (b ** 4))) - (2 * covar * a / (b ** 3)) ), )
ec8fa19845e4c014c271badfba3d7ad6a503e096
3,629,849
def factorial_r(number): """ Calculates the factorial of a number, using a recursive process. :param number: The number. :return: n! """ # Check to make sure the argument is valid. if number < 0: raise ValueError # This is the recursive part of the function. if number == 0: # If the argument is 0, then we simply return the value of 0!, which is 1. return 1 else: # Otherwise, return n multiplied by the (n - 1)! return number * factorial_r(number - 1)
e5c28edac93b965f438bd61c5bb1c0a935c96700
3,629,850
def obstacles_to_grid(obstacles): """ Transforms a list of obstacles into an m x n grid where 1 represents the cell is covered by an obstacle and 0 represents no obstacle. m and n are derived by the canvas height/width and grid width. The assumption is that the obstacles fall precisely on grid lines. """ m = HEIGHT / GRID_WIDTH n = WIDTH / GRID_WIDTH grid = [[0 for _ in xrange(n)] for _ in xrange(m)] for o in obstacles: jstart = o.x1 / GRID_WIDTH jend = o.x2 / GRID_WIDTH istart = o.y1 / GRID_WIDTH iend = o.y2 / GRID_WIDTH for i in xrange(istart, iend): for j in xrange(jstart, jend): grid[i][j] = 1 return grid
15034a36bda253d6b045926291c2996c5867b272
3,629,851
def get_gps_data(filename, traversal_id): """ Gets GPS time series gathered from a traversal :param filename: <String> csv file from GPS team in format |date|time|name|latitude|longitude|heading :return: <pandas DataFrame> time_stamp|latitude|longitude """ delimiter = r"\s+" # some of the columns are separated by a space, others by tabs, use regex to include both header_row = 0 # the first row has all the header names df = pd.read_csv(filename, sep=delimiter, header=header_row, parse_dates=[['date', 'time']]) # replace date and time columns with date_time variable time_lat_long = df[df['name'] == traversal_id][['date_time', 'latitude', 'longitude']] gp = GeoPolygon(LAT_LONG, *time_lat_long[['latitude', 'longitude']].as_matrix().transpose()) return gp
2aeaa6b06025951c48cb9213dd9994d0e1dbb61d
3,629,852
def infection_rate_symptomatic_30x60(): """ Real Name: b'infection rate symptomatic 30x60' Original Eqn: b'Susceptible 60*Infected symptomatic 30x60*contact infectivity symptomatic 30x60*(self quarantine policy SWITCH self 60\\\\ *self quarantine policy 60+(1-self quarantine policy SWITCH self 60))/non controlled pop 30x60' Units: b'person/Day' Limits: (None, None) Type: component b'' """ return susceptible_60() * infected_symptomatic_30x60() * contact_infectivity_symptomatic_30x60( ) * (self_quarantine_policy_switch_self_60() * self_quarantine_policy_60() + (1 - self_quarantine_policy_switch_self_60())) / non_controlled_pop_30x60()
af8f774612cfd866b6931690ecff9c986f415769
3,629,853
def compute_rewards(rl_batch, batch_actions, episode_lengths, batch_size=None): """Compute rewards for each episode in the batch. Args: rl_batch: A data.RLBatch instance. This holds information about the task each episode is solving, and a reward function for each episode. batch_actions: Contains batch of episodes. Each sequence of actions will be converted into a BF program and then scored. A numpy array of shape [batch_size, max_sequence_length]. episode_lengths: The sequence length of each episode in the batch. Iterable of length batch_size. batch_size: (optional) number of programs to score. Use this to limit the number of programs executed from this batch. For example, when doing importance sampling some of the on-policy episodes will be discarded and they should not be executed. `batch_size` can be less than or equal to the size of the input batch. Returns: CodeScoreInfo namedtuple instance. This holds not just the computed rewards, but additional information computed during code execution which can be used for debugging and monitoring. this includes: BF code strings, test cases the code was executed on, code outputs from those test cases, and reasons for success or failure. """ code_strings = [ ''.join([misc.bf_int2char(a) for a in action_sequence[:l]]) for action_sequence, l in zip(batch_actions, episode_lengths)] if batch_size is None: batch_size = len(code_strings) else: assert batch_size <= len(code_strings) code_strings = code_strings[:batch_size] if isinstance(rl_batch.reward_fns, (list, tuple)): # reward_fns is a list of functions, same length as code_strings. assert len(rl_batch.reward_fns) >= batch_size r_fn_results = [ rl_batch.reward_fns[i](code_strings[i]) for i in xrange(batch_size)] else: # reward_fns is allowed to be one function which processes a batch of code # strings. This is useful for efficiency and batch level computation. r_fn_results = rl_batch.reward_fns(code_strings) # Expecting that r_fn returns a list of rewards. Length of list equals # length of the code string (including EOS char). batch_rewards = [r.episode_rewards for r in r_fn_results] total_rewards = [sum(b) for b in batch_rewards] test_cases = [io_to_text(r.input_case, r.input_type) for r in r_fn_results] code_outputs = [io_to_text(r.code_output, r.output_type) for r in r_fn_results] reasons = [r.reason for r in r_fn_results] return CodeScoreInfo( code_strings=code_strings, batch_rewards=batch_rewards, total_rewards=total_rewards, test_cases=test_cases, code_outputs=code_outputs, reasons=reasons)
6d757007f74552421648f19dfd915a306bc68d86
3,629,854
def _translate_args(t=tuple(), d=dict(), unfreeze=False): """ _make_wrapper_argsでラッパー関数を作成する際の補助関数。 関数呼び出しを行うS式の引数部分に埋め込む文字列を作成する。 e.g.) (func)なるS式を送り込む場合 ""を生成することを担当する。 (func args)なるS式を送り込む場合 " args"を生成することを担当する。(冒頭のスペースに留意) 文字列を埋め込む際変数は適切に評価を行ってから埋め込む。 Euslispにはハッシュのリテラル表現はない(と思う)ので、タプルやリストのみを再帰的にLispのリスト表現に変換する。Lispのリスト表現の冒頭には(quote )を付加する。 >>> _translate_args((), {}) '' >>> var1, var2 = 100, 200 >>> _translate_args(t=(var1, var2)) ' 100 200' >>> L = ((1,2),(3,4)) >>> _translate_args(t=L) ' `(1 2) `(3 4)' >>> L2 = (((1,2),(3,4)),) >>> _translate_args(t=L2) ' `((1 2) (3 4))' >>> val1, val2 = 5, 10 >>> _translate_args(d={"a":val1, "b":val2}) ' :a 5 :b 10' >>> nested_list_dict = {"key":[1,[2,[3]]]} >>> _translate_args(d=nested_list_dict) ' :key `(1 (2 (3)))' >>> _translate_args(t=(1, 2), d={"x":3, "y":4}) ' 1 2 :y 4 :x 3' >>> pi = 3.14 >>> t, d = (("ho", "ge", pi), 1), {"x":3, "y":(4, 5)} >>> _translate_args(t, d) ' `("ho" "ge" 3.14) 1 :y `(4 5) :x 3' Args: 最初の呼び出しは(args, kwargs)を想定している。argsは関数呼び出し時に*argsにバインドされたタプル, kwargsは関数呼び出し時に**kwargsにバインドされたディクショナリ。 unfreeze=Trueのときはproxyに対応する (lookup-registered-object ...) なるS式の前に , がついてunfreezeされるようになる。(型コンストラクタの引数でさらにproxyが入ってきたときのための実装) t (tuple or list) d (dictionary) unfreeze (bool) Returns: str: 関数呼び出しを行うS式の引数部分に埋め込む文字列 Raises: TypeError: 自動的な型変換に失敗した時 (ユーザー定義Pythonクラスインスタンスが引数としてとられていたときなど) """ return '{}{}'.format(_translate_tuple(t, unfreeze), _translate_dict(d, unfreeze))
25ca61d7a0682ad817a2b4d6a4103ad25ba47fbe
3,629,855
def merge_date_time2(data, date_column, time_column=None): """This method merges columns date and time .. note: If time is missing default is 00:00. .. note: Also convert date using dt.apply(str). import datetime datetime.time.fromisoformat() Parameters ---------- Returns ------- """ if not date_column in data: print("Column <%s> not found!" % date_column) if not time_column in data: print("Column <%s> not found!" % time_column) # Convert dates data[date_column] = pd.to_datetime(data[date_column]) # Fill empty times with default value data[time_column] = pd.to_datetime(data[time_column], errors='coerce') data[time_column] = data[time_column].fillna(pd.Timestamp('1960-01-01')) print(data.dtypes) # Format date = data[date_column].dt.strftime('%Y-%m-%d') time = data[time_column].dt.strftime('%H:%M:%S') # Return return pd.to_datetime(date + ' ' + time)
3d4ecf8e67b7bfa3cb3b967332c1878f8a1d7293
3,629,856
def logistic(x): """ A function that returns a value between 0 and 1 for x in the range [0, infinity] and -1 to 1 for x in the range [-infinity, infinity]. Useful for cost functions. """ return 2.0 / (1 + exp(-x)) - 1.0
39c45ebbcef74c11bbb4bd554757b0c738dfc99e
3,629,857
def make_perturbed_cmtsolution(py, src_frechet_directory, cmtsolution_directory, output_directory): """ make the pertured cmtsolution based on src_frechet. """ script = f"ibrun -n 1 {py} -m seisflow.scripts.source_inversion.make_perturbed_cmtsolution --src_frechet_directory {src_frechet_directory} --cmtsolution_directory {cmtsolution_directory} --output_directory {output_directory}; \n" return script
07bb69751ddaee9d7aa6389c6cab9bc6021758ed
3,629,858
def weight_correct_incorrect(rslt): """Return a pair of floating-point numbers denoting the weight of (correct, incorrect) instances in EvaluationResult rslt. >>> listInstCorrect = [Instance([],True,0.25)] >>> listInstIncorrect = [Instance([],False,0.50)] >>> rslt = EvaluationResult(listInstCorrect, listInstIncorrect, None) >>> weight_correct_incorrect(rslt) (0.25, 0.5)""" correctInst = sum([inst.dblWeight for inst in rslt.listInstCorrect]) incorrectInst = sum([inst.dblWeight for inst in rslt.listInstIncorrect]) return (correctInst , incorrectInst)
5a7ef1d338821f10b58ba06224059e532180c50d
3,629,859
import pathlib import csv def get_data_info(path): """ Get metadata of the iamges. """ samples = [] # the data is in subfolders parent = pathlib.Path(path) for csv_file in parent.glob('**/*.csv'): with open(str(csv_file), 'r') as f: reader = csv.reader(f) for line in reader: try: samples.append({'img_center': str(csv_file.resolve().parent.joinpath('IMG',pathlib.Path(line[0]).name)), 'img_left': str(csv_file.resolve().parent.joinpath('IMG',pathlib.Path(line[1]).name)), 'img_right': str(csv_file.resolve().parent.joinpath('IMG',pathlib.Path(line[2]).name)), 'steering_angle': float(line[3]), 'throttle': float(line[4]), 'brake': float(line[5]), 'speed': float(line[6])}) except Exception as e: print(e) return samples
53f3dd1b6ff18d43a656f4a3f6da26ab1e60a6c2
3,629,860
import math def BmatPRV(q): """ BmatPRV(Q) B = BmatPRV(Q) returns the 3x3 matrix which relates the body angular velocity vector w to the derivative of principal rotation vector Q. dQ/dt = [B(Q)] w """ p = np.linalg.norm(q) c = 1 / p / p * (1 - p / 2 / math.tan(p / 2)) B = np.zeros([3, 3]) B[0, 0] = 1 - c * (q[1] * q[1] + q[2] * q[2]) B[0, 1] = -q[2] / 2 + c * (q[0] * q[1]) B[0, 2] = q[1] / 2 + c * (q[0] * q[2]) B[1, 0] = q[2] / 2 + c * (q[0] * q[1]) B[1, 1] = 1 - c * (q[0] * q[0] + q[2] * q[2]) B[1, 2] = -q[0] / 2 + c * (q[1] * q[2]) B[2, 0] = -q[1] / 2 + c * (q[0] * q[2]) B[2, 1] = q[0] / 2 + c * (q[1] * q[2]) B[2, 2] = 1 - c * (q[0] * q[0] + q[1] * q[1]) return B
f1977d5eb0c3913454dd692861c8f32e80fbb035
3,629,861
import random def shuffle(x, y): """ Shuffle the datasets. """ for n in range(len(x) - 1): rnd = random.randint(0, (len(x) - 1)) x1 = x[rnd] x2 = x[rnd - 1] y1 = y[rnd] y2 = y[rnd - 1] x[rnd - 1] = x1 x[rnd] = x2 y[rnd - 1] = y1 y[rnd] = y2 return x, y
c9f198d3796c5d64eba818753701957ea1a0e924
3,629,862
from typing import FrozenSet def induced_subgraph(G: Graph, S: FrozenSet[Ind]) -> Graph: """ Generate the subgraph of G induced by the set of nodes S. See Also -------- https://en.wikipedia.org/wiki/Induced_subgraph Parameters ---------- G : Graph S : Set of nodes Returns ------- out : Graph Induced subgraph """ E = set() for e in G.E: if S.issuperset(e.set): E.add(e) return Graph(S, E)
488a97e278abb8961abaf4b889525123d5588ab8
3,629,863
import os import base64 def getKey(key, namespace=None): """Returns a key.""" app_id = os.environ.get('APPLICATION_ID', '') if app_id: app_id += '.' if namespace: key = '%(namespace)s.%(key)s' % locals() key = '%(app_id)s%(key)s' % locals() return base64.b64encode(key)
d29c091d7c22391b4a364e9b9be65790fa4fa15f
3,629,864
def make_peptide_bond(mol, res = None, start = "N", end = "C", delete = "OXT"): """Performs one condesation rxn between a molecule and residue/itself default creates peptide bond Parameters ---------- mol : rdkmol Main molecule on which reaction is performed res : rdkmol None or a single residue, when it is None, self-condensation is performed on @mol start : str atom name of one of the two atoms to which connection is established end : str atom name of the other atom to which connection is established delete : str default to hydroxy oxygen thats eliminated during condensation Returns ------- mol : rdkmol modified molecule """ startIdx, endIdx, deleteIdx = -1, -1, -1 for idx, atm in enumerate(mol.GetAtoms()): # get the last occurence of end and delete atomname if atm.GetPDBResidueInfo().GetName().strip() == end: endIdx = idx elif atm.GetPDBResidueInfo().GetName().strip() == delete: deleteIdx = idx if res is not None: #residue addition lastResNum = -1 for idx, atm in enumerate(mol.GetAtoms()): lastResNum = atm.GetPDBResidueInfo().GetResidueNumber() lastResNum += 1 for idx, atm in enumerate(res.GetAtoms()): #get the last occurence of start atomname atm.GetPDBResidueInfo().SetResidueNumber(lastResNum) if atm.GetPDBResidueInfo().GetName().strip() == start: startIdx = idx startIdx += mol.GetNumAtoms() mol = Chem.CombineMols(mol, res) else: #cyclisation for idx, atm in enumerate(mol.GetAtoms()): #get the first occurence of start atomname if atm.GetPDBResidueInfo().GetName().strip() == start: startIdx = idx break mol = Chem.RWMol(mol) mol.AddBond(startIdx, endIdx, Chem.BondType.SINGLE) mol.RemoveAtom(deleteIdx) mol.UpdatePropertyCache() Chem.GetSSSR(mol) return mol.GetMol()
fcf167b259a016ebaf0b82d2cee219f1c24646ce
3,629,865
def get_data_names(data, data_names): """ Get default names for data fields if none are given based on the data. Examples -------- >>> import numpy as np >>> east, north, up = [np.arange(10)]*3 >>> get_data_names((east,), data_names=None) ('scalars',) >>> get_data_names((east, north), data_names=None) ('east_component', 'north_component') >>> get_data_names((east, north, up), data_names=None) ('east_component', 'north_component', 'vertical_component') >>> get_data_names((up, north), data_names=('ringo', 'george')) ('ringo', 'george') """ if data_names is not None: if len(data) != len(data_names): raise ValueError( "Data has {} components but only {} names provided: {}".format( len(data), len(data_names), str(data_names) ) ) return data_names data_types = [ ("scalars",), ("east_component", "north_component"), ("east_component", "north_component", "vertical_component"), ] if len(data) > len(data_types): raise ValueError( " ".join( [ "Default data names only available for up to 3 components.", "Must provide custom names through the 'data_names' argument.", ] ) ) return data_types[len(data) - 1]
e2097d6dbf2c8cc52fd4a60124727cad5fe9fbc4
3,629,866
import pandas def get_labels_from_file(filename): """Get labels on the last column from file. Args: filename: file name Returns: List[str]: label list """ data_frame = pandas.read_csv(filename) labels = data_frame['summary'].tolist() return labels
605e9a464eb9fc007d2421fadcab362b7c22ebf5
3,629,867
def css3_lists(): """Return a list of all css3 color names, and a corresponding list of the colors' RGB values.""" css3_db = CSS3_HEX_TO_NAMES names = [] rgb_values = [] for color_hex, color_name in css3_db.items(): names.append(color_name) rgb_values.append(hex_to_rgb(color_hex)) return names, rgb_values
f1656566f8099b2dd5cc9f5c5662d45e9e820f84
3,629,868
from typing import Union def kmeans( embedding: np.ndarray, n_clusters: int = 1, init: Union[str, np.ndarray] = 'k-means++', n_init: int = 10, max_iter: int = 300, tolerance: float = 0.0001, precompute_distances='auto', verbose: int = 0, random_state: int = None, copy_x: bool = True, n_jobs: int = None, algorithm: str = 'auto' ) -> np.ndarray: """ Performs kmeans clustering on the embedding. :param numpy.ndarray embedding: An n x d array of vectors representing n labels in a d dimensional space :param int n_clusters: The number of clusters to form as well as the number of centroids to generate. Default 1 :param init: Method for initialization, defaults to 'k-means++': 'k-means++' : selects initial cluster centers for k-mean clustering in a smart way to speed up convergence. 'random': choose k observations (rows) at random from data for the initial centroids. If an ndarray is passed, it should be of shape (n_clusters, n_features) and gives the initial centers. :type init: Union[str, numpy.ndarray] :param int n_init: Number of time the k-means algorithm will be run with different centroid seeds. The final results will be the best output of n_init consecutive runs in terms of inertia. Default 10 :param int max_iter: Maximum number of iterations of the k-means algorithm for a single run. Default 300 :param float tolerance: Relative tolerance with regards to inertia to declare convergence. Default 1e-4 :param precompute_distances: Precompute distances (faster but takes more memory). 'auto' : do not precompute distances if n_samples * n_clusters > 12 million. This corresponds to about 100MB overhead per job using double precision. True : always precompute distances False : never precompute distances :type precompute_distances: Union[bool, str] :param int verbose: Verbosity mode. Default 0 :param random_state: Determines random number generation for centroid initialization. Use an int to make the randomness deterministic. :type random_state: Optional[Union[int, numpy.random.RandomState]] :param Optional[bool] copy_x: When pre-computing distances it is more numerically accurate to center the data first. If copy_x is True (default), then the original data is not modified, ensuring X is C-contiguous. If False, the original data is modified, and put back before the function returns, but small numerical differences may be introduced by subtracting and then adding the data mean, in this case it will also not ensure that data is C-contiguous which may cause a significant slowdown. :param Optional[int] n_jobs: The number of jobs to use for the computation. This works by computing each of the n_init runs in parallel. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. :param str algorithm: K-means algorithm to use. The classical EM-style algorithm is "full". The "elkan" variation is more efficient by using the triangle inequality, but currently doesn't support sparse data. "auto" chooses "elkan" for dense data and "full" for sparse data. :return: The cluster labels for each vector in the given embedding. The vector at index n in the embedding will have the label at index n in this returned array :rtype: numpy.ndarray """ classifier = KMeans( n_clusters=n_clusters, init=init, n_init=n_init, max_iter=max_iter, tol=tolerance, precompute_distances=precompute_distances, verbose=verbose, random_state=random_state, copy_x=copy_x, n_jobs=n_jobs, algorithm=algorithm ) classifier.fit(embedding) return classifier.predict(embedding)
4738a2b9b77c8812017b37cce0d80c0d028854c2
3,629,869
import six from datetime import datetime def get_json_struct(jsonobj, template=None): """ :param jsonobj: Object to parse and adjust so could be loaded into big query :param template: An input object to use as abasis as a template defaullt no template provided :return: A json object that is a template object. This can be used as input to get_bq_schema_from_json_repr """ if template is None: template = {} for key in jsonobj: newkey = INVALIDBQFIELDCHARS.sub("_", key) if jsonobj[key] is None: continue if newkey not in template: value = None if isinstance(jsonobj[key], bool): value = False elif isinstance(jsonobj[key], six.string_types): value = "" elif isinstance(jsonobj[key], six.text_type): value = "" elif isinstance(jsonobj[key], int) or isinstance(jsonobj[key], long): value = 0 elif isinstance(jsonobj[key], float): value = 0.0 elif isinstance(jsonobj[key], date): value = jsonobj[key] elif isinstance(jsonobj[key], datetime): value = jsonobj[key] elif isinstance(jsonobj[key], dict): value = get_json_struct(jsonobj[key]) elif isinstance(jsonobj[key], list): value = [{}] if len(jsonobj[key]) > 0: if not isinstance(jsonobj[key][0], dict): new_value = [] for vali in jsonobj[key]: new_value.append({"value": vali}) jsonobj[key] = new_value for list_item in jsonobj[key]: value[0] = get_json_struct(list_item, value[0]) else: raise UnexpectedType(str(jsonobj[key])) template[newkey] = value else: if isinstance(jsonobj[key], type(template[newkey])): if isinstance(jsonobj[key], dict): template[key] = get_json_struct(jsonobj[key], template[newkey]) if isinstance(jsonobj[key], list): if len(jsonobj[key]) != 0: if not isinstance(jsonobj[key][0], dict): new_value = [] for vali in jsonobj[key]: new_value.append({"value": vali}) jsonobj[key] = new_value for list_item in jsonobj[key]: template[newkey][0] = get_json_struct(list_item, template[newkey][0]) else: # work out best way to loosen types with worst case change to string newtype = "" if isinstance(jsonobj[key], float) and isinstance(template[newkey], int): newtype = 0.0 elif isinstance(jsonobj[key], datetime) and isinstance(template[newkey], date): newtype = jsonobj[key] if not (isinstance(jsonobj[key], dict) or isinstance(jsonobj[key], list)) and not ( isinstance(template[newkey], list) or isinstance(template[newkey], dict)): template[newkey] = newtype else: # this is so different type cannot be loosened raise InconsistentJSONStructure(key, str(jsonobj[key]), str(template[newkey])) return template
f4c505fa6a593ab9fa1df5ab47f986f11f854d57
3,629,870
import logging def pin_to_cpu(op): """Returns a CPU device for the given node.""" device = op.device if op.device is not None else "" dev = pydev.from_string(device) if not dev.device_type: return set_cpu0(device) if dev.device_type == "CPU": return device logging.info("Operation %s has been assigned to a non-CPU (%s), so " "it will not be pinned to the CPU.", op.name, dev.device_type) return device
0b9f248e53f2df5e26945bb0bfb5bac2544d1d45
3,629,871
def build_df(data): """ Creates and returns a pandas DataFrame from the given pandas Series with the original index as a column. It also extracts and returns the original column and index name to restore the DataFrame to its original column and index name. :param data: pandas Series :return: tuple with - processed pandas DataFrame - the name of the column containing the time series data - the name of the original index """ # retrieve original column and index name value_column_name = data.name previous_index_name = data.index.name if previous_index_name is None: previous_index_name = "index" df = pd.DataFrame(data) # set index to running number df = df.reset_index() return df, value_column_name, previous_index_name
0f34a6787a452b6d4e1cbcf2d99807c0e7d75141
3,629,872
def _map_spectrum_weight(map, spectrum=None): """Weight a map with a spectrum. This requires map to have an "energy" axis. The weights are normalised so that they sum to 1. The mean and unit of the output image is the same as of the input cube. At the moment this is used to get a weighted exposure image. Parameters ---------- map : `~gammapy.maps.Map` Input map with an "energy" axis. spectrum : `~gammapy.modeling.models.SpectralModel` Spectral model to compute the weights. Default is power-law with spectral index of 2. Returns ------- map_weighted : `~gammapy.maps.Map` Weighted image """ if spectrum is None: spectrum = PowerLawSpectralModel(index=2.0) # Compute weights vector energy_edges = map.geom.axes["energy_true"].edges weights = spectrum.integral( energy_min=energy_edges[:-1], energy_max=energy_edges[1:] ) weights /= weights.sum() shape = np.ones(len(map.geom.data_shape)) shape[0] = -1 return map * weights.reshape(shape.astype(int))
c27b1c342e51ed47270648f8598b6f66c3538f23
3,629,873
def clean_date_metadata(df): """Clean the collection and submission date metadata """ df.loc[:, "collection_date"] = df["covv_collection_date"].astype(str).str.strip() df.loc[:, "submission_date"] = df["covv_subm_date"].astype(str).str.strip() # Filter out really unspecific collection dates # If the date is 4 characters or less (a year, like "2019", or "2020"), then remove it df = df.loc[df["collection_date"].str.len() > 4, :] # df["collection_date"] = df["collection_date"].fillna( # "Unknown" # ) # Convert dates to datetime df.loc[:, "collection_date"] = pd.to_datetime(df["collection_date"], yearfirst=True) df.loc[:, "submission_date"] = pd.to_datetime(df["submission_date"], yearfirst=True) return df
6b407c57cc998dee31d2f169c9f45a91a125a1a8
3,629,874
def countries_reaction(t, react_time, top_countries): """ Computes how long a country takes to react once the deceased limit is exceeded. Parameters ---------- t : int Simulation instant. react_time : int Parameter of the exponential distribution. top_countries : list List with the index of the countries to close. Returns ------- country_react : dictionary Reaction time of each country in top_countries. flag : int """ country_react = {} for country in top_countries: country_react[country] = np.random.exponential(scale=2, size=1).astype('int') + react_time + t flag = 0 return country_react, flag
b75402686250de48f5bafd8f0f4c5ca6a1251708
3,629,875
import struct import numpy as np def binaryread_struct(file, vartype, shape=(1), charlen=16): """ Read text, a scalar value, or an array of values from a binary file. file is an open file object vartype is the return variable type: str, numpy.int32, numpy.float32, or numpy.float64 shape is the shape of the returned array (shape(1) returns a single value) for example, shape = (nlay, nrow, ncol) charlen is the length of the text string. Note that string arrays cannot be returned, only multi-character strings. Shape has no affect on strings. """ # store the mapping from type to struct format (fmt) typefmtd = {np.int32:'i', np.float32:'f', np.float64:'d'} # read a string variable of length charlen if vartype == str: result = file.read(charlen*1) # read other variable types else: fmt = typefmtd[vartype] # find the number of bytes for one value numbytes = vartype(1).nbytes # find the number of values nval = np.core.fromnumeric.prod(shape) fmt = str(nval) + fmt s = file.read(numbytes * nval) result = struct.unpack(fmt, s) if nval == 1: result = vartype(result[0]) else: result = np.array(result, dtype=vartype) result = np.reshape(result, shape) return result
c9e718e929598560206f2ee73a2b92f9347be5d6
3,629,876
def flatten_and_structure_dimensions(op, parameters, number_of_dimensions=None): """ Unrolls nested lists into one flat lists, applies the operation and rolls the resulting flat list back into nested lists. ---------- op : Operation to apply to the tuple of flat lists, resulting in one flat list. parameters : The tuple flat lists. number_of_dimensions : How many of the outer dimensions to peel of, leaving the inner dimensions nested. """ multi_args = isinstance(parameters, tuple) dimensions_and_arguments = map(lambda parameter: flatten_dimensions(parameter, number_of_dimensions), parameters if multi_args else [parameters]) arguments = map(lambda (dimensions, argument): argument, dimensions_and_arguments) interleaved_arguments = zip(*arguments) if multi_args else arguments[0] processed_elements = op(interleaved_arguments) return structure_dimensions(dimensions_and_arguments[0][0], processed_elements)
0e52c8f19204a064bf73b15276fde8820263135f
3,629,877
from typing import Any def create_result_scalar(name: str, item_type: str, value: Any) -> dict: """ Create a scalar result for posting to EMPAIA App API. :param name: Name of the result :param item_type: Type of result :param value: Value of the result """ result = {"name": name, "type": item_type, "value": value} return result
3fb16c540cc8c76cfc42e4a906e4be280346b802
3,629,878
def create_edge(source_id, target_id, relationship_type, vitrage_is_deleted=False, update_timestamp=None, metadata=None): """A builder to create an edge :param update_timestamp: :param source_id: :type source_id: str :param target_id: :type target_id: str :param relationship_type: :type relationship_type: str :param vitrage_is_deleted: :type vitrage_is_deleted: str :param metadata: :type metadata: dict :return: :rtype: Edge """ properties = { EConst.UPDATE_TIMESTAMP: update_timestamp, EConst.VITRAGE_IS_DELETED: vitrage_is_deleted, EConst.RELATIONSHIP_TYPE: relationship_type, } if metadata: properties.update(metadata) properties = {k: v for k, v in properties.items() if v is not None} edge = Edge(source_id=source_id, target_id=target_id, label=relationship_type, properties=properties) return edge
e6a367ec0d05fbbe92a1f763d3039c2e02bf8b9d
3,629,879
def hg_ui_with_checkers(hg_ui, checkers): """Get test mercurial ui with checkers config set up.""" for key, value in checkers.items(): hg_ui.setconfig('hg_commit_sanity', key, value) hg_commit_sanity.reposetup(hg_ui, hg_repo) return hg_ui
c818ac9b77505cc6bee54a06db4b8b1556a96ba7
3,629,880
def get_inbox_status(): """Return current inbox status""" emails = get_inbox_emails(EMAIL) new = get_new_emails(emails) (direct, cced) = get_direct_emails(emails, MY_EMAILS) nb_total = len(emails) nb_direct = len(direct) nb_cced = len(cced) nb_total_new = get_nb_new(new) nb_direct_new = get_nb_new(get_new_emails(direct)) nb_cced_new = get_nb_new(get_new_emails(cced)) direct = fmt_section(' ', nb_direct, nb_direct_new) cced = fmt_section(' ', nb_cced, nb_cced_new) total = fmt_section(' ', nb_total, nb_total_new, '') msg = '{}{}{}'.format(direct, cced, total) return msg
444772b165b9cd9d868e7599b6eea5d71226ea52
3,629,881
def fill_correlation_matrix(c_vec): """ Create a Theano tensor object representing a correlation matrix of a multivariate normal distribution. :param c_vec: PyMC3 model variable corresponding to the `LKJCorr` prior on elements of the correlation matrix :return: correlation matrix as a Theano tensor object """ n = c_vec.tag.test_value.shape[0] n_layers = n - 1 m = np.zeros((n, n)) res = tt.nlinalg.matrix_dot(m, 1) ind = 0 for layer in range(n_layers): start_col = layer + 1 for j in range(start_col, n): m[layer, j] = 1 m[j, layer] = 1 res += tt.nlinalg.matrix_dot(m, c_vec[ind]) ind += 1 m[layer, j] = 0 m[j, layer] = 0 res = tt.fill_diagonal(res, 1.) return res
e32037b7ce573c1a9faefab5b57f771ea7cd90a2
3,629,882
def find_isomorphism(G1, G2): """Search for isomorphism between two graphs Args: G1 (networkx.Graph) G2 (networkx.Graph) Returns: If no isomorphism is found, returns None. Otherwise, returns dict with keys as nodes from graph 1 and values as corresponding nodes from graph 2. """ if G1.number_of_nodes() != G2.number_of_nodes(): return None dqm = create_dqm(G1, G2) sampler = LeapHybridDQMSampler() results = sampler.sample_dqm(dqm, label='Example - Circuit Equivalence') best = results.first if np.isclose(best.energy, 0.0): G2_nodes = list(G2.nodes) return {k: G2_nodes[i] for k,i in best.sample.items()} else: # Isomorphism not found return None
b1688167e805f454150284ba557bd731aef37500
3,629,883
def list_bancos(request): """ Lista Bancos""" usuario = request.user dados = {} try: funcionario = Funcionario.objects.get(usuario_fun=usuario) except Exception: raise Http404() if funcionario: #id pesquisa termo_pesquisa = request.GET.get('pesquisa', None) # PESQUISAS DEVEM ESTAR DIRETO EM MODEL PESQUISANDO if termo_pesquisa: bancos = Fin_Banco.objects.all() #__icontains sem case sensitive bancos = bancos.filter(banco__icontains=termo_pesquisa) else: bancos = Fin_Banco.objects.all() dados = {"bancos": bancos} else: raise Http404() return render(request, "list_bancos.html", dados)
2a56d07314d08cae1516a9db8514688a5b5b413f
3,629,884
def encode_jwt_token(data, api_secret_code=None): """ Encode Python dictionary as JWT token. :param data: Dictionary with payload. :param api_secret_code: optional string, application secret key is used by default. :return: JWT token string with encoded and signed data. """ if api_secret_code is None: api_secret_code = settings.SECRET_KEY return jwt.encode( data, api_secret_code, algorithm='HS256', json_encoder=DjangoJSONEncoder )
a934b94687c7767bff9e5f8403f14c1ca9dae5ae
3,629,885
import logging def setup(): """Performs setup tasks for the program. Creates and configures a logger, creates and configures a webdriver and creates and configures a sqlite database connection. Returns: A Chromium based Selenium webdriver. """ logger_helper() db_helper() result = webdriver_helper() logger = logging.getLogger(LOGGER_NAME) logger.debug("Setup complete.") return result
5ebb01c2e8a8a4a6325d3d420ad35fa82e9056eb
3,629,886
from typing import Union from typing import Sequence from typing import Hashable from typing import Callable from typing import Iterable import PIL def handwrite( text: str, template: Union[Template, Sequence[Template]], seed: Hashable = None, mapper: Callable[[Callable, Iterable], Iterable] = map, ) -> Iterable[PIL.Image.Image]: """Handwrite `text` with the configurations in `template`, and return an Iterable of Pillow's Images. `template` could be a Template instance or a Sequence of Template instances. If pass a Template Sequence, the inside Template instances will be applied cyclically to the output pages. `seed` could be used for reproducibility. A different implementation of map built-in function (only accept one Iterable though) could be passed to `mapper` to boost the page rendering process, e.g. `multiprocessing.Pool.map`. """ if isinstance(template, Template): templates = (template,) else: templates = template pages = _draft(text, templates, seed) templates = copy_templates(templates) for t in templates: t.to_picklable(resources_released=True) renderer = _Renderer(templates, seed) return mapper(renderer, pages)
27552d307f964f274e4c6080f183c3e99be0c3c0
3,629,887
def printMathExp(btree: BinaryTree) -> str: """Print the whole math expression""" s = '' if btree is not None: if btree.left is not None: s += '(' s += printMathExp(btree.left) s += str(btree.key) s += printMathExp(btree.right) if btree.right is not None: s += ')' return s
e7499d99ec55785f1a2e8744b90ddd3f1d0acdf5
3,629,888
import os import time import requests def download_from_url(url: str, file_path='', attempts=28): """Downloads a URL content into a file (with large file support by streaming) :param url: URL to download :param file_path: Local file name to contain the data downloaded :param attempts: Number of attempts :return: New file path. Empty string if the download failed """ logger.info("Checking if path is correct and file already exists.") logger.info(url) if not file_path: file_path = os.path.realpath(os.path.basename(url)) logger.info(f'Downloading {url} content to {file_path}') if os.path.isfile(file_path) and os.access(file_path, os.R_OK): logger.info("File already exists and is readable. Not downloading again.") return None else: logger.info("File either inexistant or unreadable. Downloading.") url_sections = urlparse(url) if not url_sections.scheme: logger.info('The given url is missing a scheme. Adding http scheme') url = f'http://{url}' logger.info(f'New url: {url}') for attempt in range(1, attempts+1): try: if attempt > 1: time.sleep(60*6) # 10 seconds wait time between downloads with requests.get(url, stream=True) as r: with requests.get(url, stream=True) as r: r.raise_for_status() with open(file_path, 'wb') as f: for chunk in r.iter_content(chunk_size=8192): f.write(chunk) logger.info('Download finished successfully') return True except Exception as ex: logger.error(f'Attempt #{attempt} of {attempts} failed with error: {ex}')
c0b6eeef5711dcfffeab7d8fc2760743bf1a0eda
3,629,889
import logging def create_presigned_url(bucket_name, bucket_key, expiration=3600, signature_version=s3_signature['v4']): """Generate a presigned URL for the S3 object :param bucket_name: string :param bucket_key: string :param expiration: Time in seconds for the presigned URL to remain valid :param signature_version: string :return: Presigned URL as string. If error, returns None. """ s3_client = boto3.client('s3', aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key=AWS_SECRET_ACCESS_KEY, config=Config(signature_version=signature_version), region_name=AWS_DEFAULT_REGION ) try: response = s3_client.generate_presigned_url('get_object', Params={'Bucket': bucket_name, 'Key': bucket_key}, ExpiresIn=expiration) print(s3_client.list_buckets()['Owner']) for key in s3_client.list_objects(Bucket=bucket_name, Prefix=bucket_key)['Contents']: print(key['Key']) except ClientError as e: logging.error(e) return None # The response contains the presigned URL return response
2d3198cd7db5f09ab08a8a1e0abcaa1d85adf288
3,629,890
import os def setup(): """Load all resources.""" quote_files = ['./_data/DogQuotes/DogQuotesTXT.txt', './_data/DogQuotes/DogQuotesDOCX.docx', './_data/DogQuotes/DogQuotesPDF.pdf', './_data/DogQuotes/DogQuotesCSV.csv'] quotes = [] for in_file in quote_files: quotes.extend(Ingestor.parse(in_file)) images_path = "./_data/photos/dog/" imgs = [] for root, dirs, files in os.walk(images_path): for name in files: if name.endswith('.jpg'): imgs.append(os.path.join(root, name)) return quotes, imgs
a21818600ae0d9c7bc3dd4d95c44638114480d00
3,629,891
def softmax_op(node, ctx=None): """ This function computes its softmax along an axis. Parameters: ---- node : Node Input variable. Returns: ---- A new Node instance created by Op. """ return SoftmaxOp(node, ctx=ctx)
5cca012944bb41364cc41b3311d9b6a9ce386d55
3,629,892
def circle_fit(coords): """ Find the least squares circle fitting a set of 2D points ``(x,y)``. Parameters ---------- coords : (N, 2) ndarray Set of ``x`` and ``y`` coordinates. Returns ------- centre_i : (2,) The 2D coordinates of the centre of the circle. r_i : double The radius of the circle. References ---------- .. [1] http://www.scipy.org/Cookbook/Least_Squares_Circle """ def r_sq_of_circle(coords, centre): return np.mean(np.sum((coords - centre) ** 2, axis=1)) def residuals(p, x, y): x_c, y_c = p err = np.sqrt((x - x_c) ** 2 + (y - y_c) ** 2) return err - err.mean() c_est = np.mean(coords, axis=0) #r_sq = r_sq_of_circle(coords, c_est) centre_i, ier = optimize.leastsq(residuals, c_est, args=(coords[:, 0], coords[:, 1])) r_i = np.sqrt(r_sq_of_circle(coords, centre_i)) return centre_i, r_i
cac5275b1b3d59040c0acc94c12e492d78dfd647
3,629,893
import http def dispatcher(request, slug, view_name, *args, **kwargs): """Dispatcher that loads configuration corresponding to `slug` and dispatches view corresponding to `view_name` on said configuration. The configuration will be added to the `extra_context` attribute of all dispatched views. """ try: conf = options.get(slug) view = getattr(conf, view_name) except (options.ConfigurationNotRegistered, AttributeError): raise http.Http404 # TODO: It might be a better idea to add the configuration to context in # the configuration class itself to ensure it's available. extra_context = kwargs.get('extra_context', {}) extra_context.update({ 'group_config': conf }) kwargs['extra_context'] = extra_context return view(request, *args, **kwargs)
70c672b6802e71c7151a202a107cb5f722d7ba0c
3,629,894
def LayerSet_toLayers(ifc_file): """ Returns a dictionary where keys are the Id of the IfcMaterialLayerSet and where the values are a list (ListOfLayers) with an element per material layer. The material layer information is stored at the same time within a list containing Id (number), material and thickness """ IfcLayerSet = ifc_file.by_type("IfcMaterialLayerSet") LayerSet = {} for ls in IfcLayerSet: # ls.is_a() = IfcMaterialLayerSet ListOfLayers = [] for ml in ls.MaterialLayers: # ml.is_a() = IfcMaterialLayer currentLayer = IfcLib.DataClasses.Layer(ml.id(), ml.Material, ml.LayerThickness) ListOfLayers.append(currentLayer) LayerSet[ls.id()] = ListOfLayers return LayerSet
eaac63c2ee264e32dcf885036671a97bdffd50ba
3,629,895
def conv2d_annotate_fn(expr): # pylint: disable=unused-variable """Check if nn.conv2d is supported by TensorRT.""" attrs, args = expr.attrs, expr.args if not is_supported_trt_dtype(args): return False if not isinstance(args[1], Constant): logger.info("nn.conv2d: kernel argument must be constant.") return False if attrs.data_layout != "NCHW": logger.info("nn.conv2d: data_layout is %s but must be NCHW.", attrs.data_layout) return False if attrs.kernel_layout != "OIHW": logger.info("nn.conv2d: kernel_layout is %s but must be OIHW.", attrs.kernel_layout) return False if attrs.out_layout and attrs.out_layout != "NCHW": logger.info("nn.conv2d: out_layout is %s but must be NCHW.", attrs.out_layout) return False return True
b5567d4369e9f9eeaabbacb34e9b6ae7554a4d41
3,629,896
from google.cloud import bigquery from typing import List from datetime import datetime def create_bq_view_of_joined_features_and_entities( source: BigQuerySource, entity_source: BigQuerySource, entity_names: List[str] ) -> BigQuerySource: """ Creates BQ view that joins tables from `source` and `entity_source` with join key derived from `entity_names`. Returns BigQuerySource with reference to created view. The BQ view will be created in the same BQ dataset as `entity_source`. """ bq_client = bigquery.Client() source_ref = table_reference_from_string(source.bigquery_options.table_ref) entities_ref = table_reference_from_string(entity_source.bigquery_options.table_ref) destination_ref = bigquery.TableReference( bigquery.DatasetReference(entities_ref.project, entities_ref.dataset_id), f"_view_{source_ref.table_id}_{datetime.now():%Y%m%d%H%M%s}", ) view = bigquery.Table(destination_ref) join_template = """ SELECT source.* FROM `{entities.project}.{entities.dataset_id}.{entities.table_id}` entities JOIN `{source.project}.{source.dataset_id}.{source.table_id}` source ON ({entity_key})""" view.view_query = join_template.format( entities=entities_ref, source=source_ref, entity_key=" AND ".join([f"source.{e} = entities.{e}" for e in entity_names]), ) view.expires = datetime.now() + timedelta(days=1) bq_client.create_table(view) return BigQuerySource( event_timestamp_column=source.event_timestamp_column, created_timestamp_column=source.created_timestamp_column, table_ref=f"{view.project}:{view.dataset_id}.{view.table_id}", field_mapping=source.field_mapping, date_partition_column=source.date_partition_column, )
358d8f3b6d3ba98ed1b431c6c201ffdc7d3e30da
3,629,897
import sqlite3 def get_db(): """Connect to the application's configured database. The connection is unique for each request and will be reused if this is called again. """ if "db" not in g: g.db = sqlite3.connect( current_app.config["DATABASE"], detect_types=sqlite3.PARSE_DECLTYPES ) g.db.row_factory = sqlite3.Row return g.db
95bcd9e7338b402c040307e02d5dbbe4893453f8
3,629,898
import struct def _go_test_impl(ctx): """go_test_impl implements go testing. It emits an action to run the test generator, and then compiles the test into a binary.""" go = go_context(ctx) # Compile the library to test with internal white box tests internal_library = go.new_library(go, testfilter = "exclude") internal_source = go.library_to_source(go, ctx.attr, internal_library, ctx.coverage_instrumented()) internal_archive = go.archive(go, internal_source) go_srcs = split_srcs(internal_source.srcs).go # Compile the library with the external black box tests external_library = go.new_library( go, name = internal_library.name + "_test", importpath = internal_library.importpath + "_test", testfilter = "only", ) external_source = go.library_to_source(go, struct( srcs = [struct(files = go_srcs)], embedsrcs = [struct(files = internal_source.embedsrcs)], deps = internal_archive.direct + [internal_archive], x_defs = ctx.attr.x_defs, ), external_library, ctx.coverage_instrumented()) external_source, internal_archive = _recompile_external_deps(go, external_source, internal_archive, [t.label for t in ctx.attr.embed]) external_archive = go.archive(go, external_source) external_srcs = split_srcs(external_source.srcs).go # now generate the main function if ctx.attr.rundir: if ctx.attr.rundir.startswith("/"): run_dir = ctx.attr.rundir else: run_dir = pkg_dir(ctx.label.workspace_root, ctx.attr.rundir) else: run_dir = pkg_dir(ctx.label.workspace_root, ctx.label.package) main_go = go.declare_file(go, path = "testmain.go") arguments = go.builder_args(go, "gentestmain") arguments.add("-output", main_go) if go.coverage_enabled: if go.mode.race: arguments.add("-cover_mode", "atomic") else: arguments.add("-cover_mode", "set") arguments.add( # the l is the alias for the package under test, the l_test must be the # same with the test suffix "-import", "l=" + internal_source.library.importpath, ) arguments.add( "-import", "l_test=" + external_source.library.importpath, ) arguments.add("-pkgname", internal_source.library.importpath) arguments.add_all(go_srcs, before_each = "-src", format_each = "l=%s") ctx.actions.run( inputs = go_srcs, outputs = [main_go], mnemonic = "GoTestGenTest", executable = go.toolchain._builder, arguments = [arguments], ) test_gc_linkopts = gc_linkopts(ctx) if not go.mode.debug: # Disable symbol table and DWARF generation for test binaries. test_gc_linkopts.extend(["-s", "-w"]) # Link in the run_dir global for bzltestutil test_gc_linkopts.extend(["-X", "github.com/bazelbuild/rules_go/go/tools/bzltestutil.RunDir=" + run_dir]) # Now compile the test binary itself test_library = GoLibrary( name = go.label.name + "~testmain", label = go.label, importpath = "testmain", importmap = "testmain", importpath_aliases = (), pathtype = INFERRED_PATH, is_main = True, resolve = None, ) test_deps = external_archive.direct + [external_archive] + ctx.attr._testmain_additional_deps if ctx.configuration.coverage_enabled: test_deps.append(go.coverdata) test_source = go.library_to_source(go, struct( srcs = [struct(files = [main_go])], deps = test_deps, ), test_library, False) test_archive, executable, runfiles = go.binary( go, name = ctx.label.name, source = test_source, test_archives = [internal_archive.data], gc_linkopts = test_gc_linkopts, version_file = ctx.version_file, info_file = ctx.info_file, ) env = {} for k, v in ctx.attr.env.items(): env[k] = ctx.expand_location(v, ctx.attr.data) # Bazel only looks for coverage data if the test target has an # InstrumentedFilesProvider. If the provider is found and at least one # source file is present, Bazel will set the COVERAGE_OUTPUT_FILE # environment variable during tests and will save that file to the build # events + test outputs. return [ test_archive, DefaultInfo( files = depset([executable]), runfiles = runfiles, executable = executable, ), OutputGroupInfo( compilation_outputs = [internal_archive.data.file], ), coverage_common.instrumented_files_info( ctx, source_attributes = ["srcs"], dependency_attributes = ["deps", "embed"], extensions = ["go"], ), testing.TestEnvironment(env), ]
e6f5b5fd0cde681809495db4bfc9eb9fe4fe106c
3,629,899