content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def tcl_delta_remote(curef): """ Prepare remote version for delta scanning. :param curef: PRD of the phone variant to check. :type curef: str """ remotedict = networkutilstcl.remote_prd_info() fvver = remotedict.get(curef, "AAA000") if fvver == "AAA000": print("NO REMOTE VERSION FOUND!") raise SystemExit return fvver
65d1aeb25ce58c066465c3b7eb3e560a54224ba7
32,400
from typing import Iterable def prodi(items: Iterable[float]) -> float: """Imperative product >>> prodi( [1,2,3,4,5,6,7] ) 5040 """ p: float = 1 for n in items: p *= n return p
3b8e52f40a760939d5b291ae97c4d7134a5ab450
32,401
def transformer_prepare_encoder(inputs, target_space, hparams): """Prepare one shard of the model for the encoder. Args: inputs: a Tensor. target_space: a Tensor. hparams: run hyperparameters Returns: encoder_input: a Tensor, bottom of encoder stack encoder_self_attention_bias: a Tensor, containing large negative values to implement masked attention and possibly baises for diagonal alignments encoder_padding: a Tensor """ # Flatten inputs. ishape_static = inputs.shape.as_list() encoder_input = inputs encoder_padding = common_attention.embedding_to_padding(encoder_input) encoder_self_attention_bias = common_attention.attention_bias_ignore_padding( encoder_padding) # Append target_space_id embedding to inputs. emb_target_space = common_layers.embedding( target_space, 32, ishape_static[-1], name="target_space_embedding") emb_target_space = tf.reshape(emb_target_space, [1, 1, -1]) encoder_input += emb_target_space if hparams.pos == "timing": encoder_input = common_attention.add_timing_signal_1d(encoder_input) return (encoder_input, encoder_self_attention_bias, encoder_padding)
af9c5e2ab8fe3508722af822f19671461e92e62d
32,402
from typing import Tuple def get_bottom_left_coords( text_width: int, text_height: int, text_x: int, text_y: int, ) -> Tuple[TextOrg, BoxCoords]: """Get coordinates for text and background in bottom left corner. Args: text_width: Width of the text to be drawn. text_height: Height of the text to be drawn. text_x: X coordinate of the bottom-left corner of the text. text_y: Y coordinate of the bottom-left corner of the text. Returns: A tuple consisting of tuples for text point and text box coordinates. """ text_offset_x = ( text_x + config.settings.annotation_padding + config.settings.annotation_margin ) text_offset_y = ( text_y - config.settings.annotation_padding - config.settings.annotation_margin ) box_coords = ( ( text_offset_x - config.settings.annotation_padding, text_offset_y + config.settings.annotation_padding, ), ( text_offset_x + text_width + config.settings.annotation_padding, text_offset_y - text_height - config.settings.annotation_padding, ), ) text_org = (text_offset_x, text_offset_y) return AnnotationCoords(text_org, box_coords)
8cf1f88f98990bb727de86152ea5239b725c0fbc
32,403
def read_gcs_zarr(zarr_url, token='/opt/gcsfuse_tokens/impactlab-data.json', check=False): """ takes in a GCSFS zarr url, bucket token, and returns a dataset Note that you will need to have the proper bucket authentication. """ fs = gcsfs.GCSFileSystem(token=token) store_path = fs.get_mapper(zarr_url, check=check) ds = xr.open_zarr(store_path) return ds
f6ac1e149639afbc10af052a288eb6536a43a13a
32,404
import operator def bor(*args: int) -> int: """Bitwise or. Example: bor(0x01, 0x10) == 0x01 | 0x10 Returns: int: Inputs. """ return list(accumulate(args, operator.or_))[-1]
6e40612b0117fef5584857c947910fa4a0fa865f
32,405
from Scikit.ML.DocHelperMlExt import MamlHelper def mlnet_components_kinds(): """ Retrieves all kinds. """ kinds = list(MamlHelper.GetAllKinds()) kinds += ["argument", "command"] kinds = list(set(kinds)) titles = { 'anomalydetectortrainer': 'Anomaly Detection', 'binaryclassifiertrainer': 'Binary Classification', 'clusteringtrainer': 'Clustering', 'dataloader': 'Data Loader', 'datasaver': 'Data Saver', 'datascorer': 'Scoring', 'datatransform': 'Transforms (all)', 'ensembledataselector': 'Data Selection', 'evaluator': 'Evaluation', 'multiclassclassifiertrainer': 'Multiclass Classification', 'ngramextractorfactory': 'N-Grams', 'rankertrainer': 'Ranking', 'regressortrainer': 'Regression', 'tokenizetransform': 'Tokenization', 'argument': 'Arguments', 'command': 'Commands', } return {k: titles[k] for k in kinds if k in titles}
f5a365f2054a263786ca17a96d58d9f39c7061fe
32,406
import os import pickle def get_model(model_name, recompile=False): """ Get compiled StanModel This will compile the stan model if a cached pickle does not exist. Args: model_name (str): model name without `.stan` recompile (bool): Force force recompilation. """ model_file = model_path(model_name) cache_file = model_cache_path(model_name) if (not os.path.exists(cache_file)) or recompile: model = pystan.StanModel(file=model_file) logger.info("Compiling {:s}".format(model_name)) with open(cache_file, "wb") as f: pickle.dump(model, f) else: logger.info("Reading model from disk") model = pickle.load(open(cache_file, "rb")) return model
488e939370bc0b7b1a1c36e8c2d63610f275c206
32,407
from pathlib import Path import os def get_pkr_path(raise_if_not_found=True): """Return the path of the pkr folder If the env. var 'PKR_PATH' is specified, it is returned, otherwise a KeyError exception is raised. """ full_path = Path(os.environ.get(PATH_ENV_VAR, os.getcwd())).absolute() pkr_path = full_path while pkr_path.parent != pkr_path: if is_pkr_path(pkr_path): return pkr_path pkr_path = pkr_path.parent if raise_if_not_found and not is_pkr_path(pkr_path): raise KardInitializationException( "{} path {} is not a valid pkr path, no usable env found".format( "Given" if PATH_ENV_VAR in os.environ else "Current", full_path ) ) return pkr_path
2e43dc9121db910c0b7c9bfa4e21e63203bc0411
32,408
import time import json def wait_for_aee_finish_v1(cfs_name, cfs_namespace): # noqa: C901 """ Consults k8s API for status information about our CFS/AEE instance; returns its exit code. """ try: config.load_incluster_config() except ConfigException: # pragma: no cover config.load_kube_config() # Development _api_client = client.ApiClient() k8score = client.CoreV1Api(_api_client) ansible_status = None # Wait for the AEE pod to finish up for this CFS Session while not ansible_status: # Create a stream of events and changes from k8s stream = None while not stream: try: stream = watch.Watch().stream( k8score.list_namespaced_pod, cfs_namespace, label_selector="aee=%s" % cfs_name) except (HTTPError, MaxRetryError, ApiException) as e: LOGGER.warning("Unable to chat with k8s API to obtain\ an event stream: %s" % (e)) time.sleep(5) continue LOGGER.info("Obtained an event stream from k8s...") # Process all events in the obtained stream; NOTE: we cannot rely on # the MODIFIED event types, because the event in the stream may have # already passed before we started watching or between stream initialization try: for event in stream: LOGGER.debug("RAW OBJECT: %s", json.dumps(event['raw_object'], indent=2)) obj = event['object'] # Check the container status # Container status is not guaranteed to exists, so check before iterating. if not obj.status.container_statuses: continue for cs in obj.status.container_statuses: if cs.name == "ansible": if cs.state.terminated: ansible_status = cs.state.terminated break # Status is set, no longer process events if ansible_status: break except (HTTPError, MaxRetryError, ApiException) as e: LOGGER.warning("Failed processing event stream from k8s; " "established event stream terminated: %s" % (e)) time.sleep(5) continue return ansible_status.exit_code
2acf11a74aa71c8645cd9b2118b82c2513518451
32,409
def hexString(s): """ Output s' bytes in HEX s -- string return -- string with hex value """ return ":".join("{:02x}".format(ord(c)) for c in s)
22c1e94f0d54ca3d430e0342aa5b714f28a5815b
32,410
def hydrate_board_from_model(a, radius, rect_width): """ :type a: ndarray :type radius: int :return: Board """ b = Board(radius) for cellId in b.cells: thid = get_thid_from_cellId(cellId, rect_width) value = a[thid.y][thid.x] b.change_ownership(cellId, get_player_name_from_resource(value), int(abs(value))) return b
d7486e43beb2676aed32da627d03f018b4b91d65
32,411
from pathlib import Path def tree_walk(): """Walk the source folder using pathlib. Populate 3 dicts, a folder dict, a file dict, and a stats dict. - Returns: - [dict]: k: folders; v: size - [dict]: k: files; v: size - [dict]: 'file_size' 'num_dirs' 'num_files' """ try: # ~~~ # -variables- walk_dirs_dict, walk_files_dict = dd(list), dd(list) stat_dict = {'file_size': 0, 'num_dirs': 0, 'num_files': 0} # create exdir and exfile lists if args.exdir: exdir = args.exdir.split(',') if args.exfile: exfile = args.exfile.split(',') p = Path(args.source) # ~~~ # -rglob- for item in p.rglob('*'): if item.is_dir(): # add folders if no folder exclusions if not args.exdir: walk_dirs_dict[item] = item.stat().st_size stat_dict['num_dirs'] += 1 else: # add folders if the exclusion is not in the folder path for z in exdir: if z not in item: walk_dirs_dict[item] = item.stat().st_size stat_dict['num_dirs'] += 1 else: # add files if no file exclusions if not args.exfile: walk_files_dict[item] = item.stat().st_size stat_dict['num_files'] += 1 stat_dict['file_size'] += item.stat().st_size else: # add files if the exclusion is not in the folder path for z in exfile: if z not in item: walk_files_dict[item] = item.stat().st_size stat_dict['num_files'] += 1 stat_dict['file_size'] += item.stat().st_size except OSError as e: bp([f'tree walk failure: {args.source}\n{e}', Ct.RED], err=2) return walk_dirs_dict, walk_files_dict, stat_dict
e7edf9897560ee5d0ddab5344e0993e4185e6009
32,412
def handle_response(response, content_type, file_path=None): """handle response. Extract, transform and emit/write to file""" if content_type == "application/json": if file_path is None: return response.json() else: save_json(response.json(), file_path) elif content_type == "image/jpeg": if file_path is None: return response._content else: save_bytes_as_png(response._content, file_path) elif content_type == "text/csv": if file_path is None: return response.text else: save_csv(response.text, file_path)
43291fdf367f27ee3c982235518d6bf28d600691
32,413
import logging def parse_request(env) -> tuple[str, dict]: """ Parse request: resolve method, load method config, merge request method params with defaults. Return: method name string, method params dict """ request_method = env['REQUEST_METHOD'].upper() url_parts = urlparse(unquote(env['REQUEST_URI'])) logging.debug('Start %s request parsing: %s', request_method, url_parts) query_str, method_params = "", {} if request_method in ['HEAD', 'GET']: query_str = url_parts.query elif request_method == 'POST': try: content_length = int(env.get('CONTENT_LENGTH', 0)) except ValueError: content_length = 0 if content_length: query_str = env['wsgi.input'].read(content_length).decode() else: raise HttpException(HTTPStatus.BAD_REQUEST, f'{request_method} is invalid for resource {url_parts.path}') if query_str: method = url_parts.path.rpartition("/")[-1] method_params = dict(parse_qsl(query_str, keep_blank_values=True)) else: method = APP_DEFAULT_METHOD method_params['ip'] = url_parts.path.rpartition("/")[-1] method_config = config_method(method) default_method_params = dict((key.replace('request_param_', '').lower(), method_config[key]) for key in method_config.keys() if key.startswith('request_param_')) method_params = default_method_params | method_params # merge method params with defaults from config app section [app.{method}] request_param_* values logging.debug('Request parsed: method=%s\nmethod_params=[%s]\nmethod_config=[%s]', method, jpp(method_params), jpp(method_config)) return method, method_params
36632a86abb34fef79eed6f8a0438ab5e1dc7696
32,414
import random def randomCaptchaText(char_set=CAPTCHA_LIST, captcha_size=CAPTCHA_LENGTH): """ 随机生成定长字符串 :param char_set: 备选字符串列表 :param captcha_size: 字符串长度 :return: 字符串 """ captcha_text = [random.choice(char_set) for _ in range(captcha_size)] return ''.join(captcha_text)
ee426c26051e720636659cd013617abce2f77a5e
32,415
def integral_total(Nstrips): """ The total integral. """ return integral_4(Nstrips) + integral_1(Nstrips)
cc2468c69a3e6c98ee139125afc2f4a571cc588b
32,416
def calculate_amplitude(dem, Template, scale, age, angle): """Calculate amplitude and SNR of features using a template Parameters ---------- dem : DEMGrid Grid object of elevation data Template : WindowedTemplate Class representing template function scale : float Scale of template function in DEM cell units age : float Age parameter for template function angle : float Orientation of template in radians Returns ------- amp : np.array 2-D array of amplitudes for each DEM pixel snr : np.array 2-D array of signal-to-noise ratios for each DEM pixel """ ny, nx = dem._griddata.shape de = dem._georef_info.dx t = Template(scale, age, angle, nx, ny, de) template = t.template() curv = dem._calculate_directional_laplacian(angle) amp, age, angle, snr = match_template(curv, template) mask = t.get_window_limits() amp[mask] = 0 snr[mask] = 0 return amp, snr
b286ed97952667052a8ecfacb152b70a7a1be2ba
32,417
import os def app(request): """Session-wide Testable Flask Application """ _app.config.from_mapping( TESTING=True, SECRET_KEY=os.environ.get('SECRET_KEY'), SQLALCHEMY_DATABASE_URI=os.getenv('TEST_DATABASE_URL'), SQLALCHEMY_TRACK_MODIFICATIONS=False, WTF_CSRF_ENABLED=False, UPLOAD_FOLDER='test/test_uploads/', ) ctx = _app.app_context() ctx.push() def teardown(): ctx.pop() request.addfinalizer(teardown) return _app
bdb383cfdb5a3c857d6b2111ca2b3a1628392f69
32,418
from typing import OrderedDict def number_limit_sub_validator(entity_config: OrderedDict) -> OrderedDict: """Validate a number entity configurations dependent on configured value type.""" value_type = entity_config[CONF_TYPE] min_config: float | None = entity_config.get(NumberSchema.CONF_MIN) max_config: float | None = entity_config.get(NumberSchema.CONF_MAX) step_config: float | None = entity_config.get(NumberSchema.CONF_STEP) dpt_class = DPTNumeric.parse_transcoder(value_type) if dpt_class is None: raise vol.Invalid(f"'type: {value_type}' is not a valid numeric sensor type.") # Inifinity is not supported by Home Assistant frontend so user defined # config is required if if xknx DPTNumeric subclass defines it as limit. if min_config is None and dpt_class.value_min == float("-inf"): raise vol.Invalid(f"'min' key required for value type '{value_type}'") if min_config is not None and min_config < dpt_class.value_min: raise vol.Invalid( f"'min: {min_config}' undercuts possible minimum" f" of value type '{value_type}': {dpt_class.value_min}" ) if max_config is None and dpt_class.value_max == float("inf"): raise vol.Invalid(f"'max' key required for value type '{value_type}'") if max_config is not None and max_config > dpt_class.value_max: raise vol.Invalid( f"'max: {max_config}' exceeds possible maximum" f" of value type '{value_type}': {dpt_class.value_max}" ) if step_config is not None and step_config < dpt_class.resolution: raise vol.Invalid( f"'step: {step_config}' undercuts possible minimum step" f" of value type '{value_type}': {dpt_class.resolution}" ) return entity_config
96c33af5e3764cc6cfe0f355de216945b0ab3920
32,419
from typing import Tuple from typing import Union def patch_2D_aggregator( patches: np.ndarray, orig_shape: Tuple[int], patch_loc: np.array, count_ndarray: Union[np.array, None] = None, ) -> np.ndarray: """ Aggregate patches to a whole 2D image. Args: patches: shape is [patch_num, Channel, patch_size, patch_size] orig_shape: the image shape after aggregating patch_loc: the starting position where each patch in the original images count_ndarray: using to divide the aggregating image to average the overlapped regions """ NUM_PATCH = 4 dim_stack = [] for dim in range(patches.shape[1]): orig = np.zeros(orig_shape) for idx in range(NUM_PATCH): orig[ patch_loc[idx][0] : patch_loc[idx][0] + PATCH_SIZE, patch_loc[idx][1] : patch_loc[idx][1] + PATCH_SIZE, ] += patches[idx, dim, :, :] dim_stack.append(orig) orig = np.stack(dim_stack) if count_ndarray is not None: orig = np.divide(orig, count_ndarray) return orig.squeeze()
3fd7d98c7b792cb3df646045ad32d0cde2c94e56
32,420
from typing import Callable def vmap_grad(forward_fn: Callable, params: PyTree, samples: Array) -> PyTree: """ compute the jacobian of forward_fn(params, samples) w.r.t params as a pytree using vmapped gradients for efficiency """ complex_output = nkjax.is_complex(jax.eval_shape(forward_fn, params, samples)) real_params = not nkjax.tree_leaf_iscomplex(params) if real_params and complex_output: return vmap_grad_rc(forward_fn, params, samples) else: return vmap_grad_rr_cc(forward_fn, params, samples)
411a3ce1a38c31fef9422ed740d1a7d0a4cf887b
32,421
def random_crop_list(images, size, pad_size=0, order="CHW", boxes=None): """ Perform random crop on a list of images. Args: images (list): list of images to perform random crop. size (int): size to crop. pad_size (int): padding size. order (string): order of the 'height', 'width' and 'channel'. boxes (list): optional. Corresponding boxes to images. Dimension is 'num boxes' x 4. Returns: cropped (ndarray): the cropped list of images with dimension of 'height' x 'width' x 'channel'. boxes (list): optional. Corresponding boxes to images. Dimension is 'num boxes' x 4. """ assert order in ["CHW", "HWC"], "order {} is not supported".format(order) # explicitly dealing processing per image order to avoid flipping images. if pad_size > 0: images = [ pad_image(pad_size=pad_size, image=image, order=order) for image in images ] # image format should be CHW. if order == "CHW": if images[0].shape[1] == size and images[0].shape[2] == size: return images, boxes height = images[0].shape[1] width = images[0].shape[2] y_offset = 0 if height > size: y_offset = int(np.random.randint(0, height - size)) x_offset = 0 if width > size: x_offset = int(np.random.randint(0, width - size)) cropped = [ image[:, y_offset: y_offset + size, x_offset: x_offset + size] for image in images ] assert cropped[0].shape[1] == size, "Image not cropped properly" assert cropped[0].shape[2] == size, "Image not cropped properly" elif order == "HWC": if images[0].shape[1] == size and images[0].shape[2] == size: return images, boxes height = images[0].shape[0] width = images[0].shape[1] y_offset = 0 if height > size: y_offset = int(np.random.randint(0, height - size)) x_offset = 0 if width > size: x_offset = int(np.random.randint(0, width - size)) cropped = [ image[y_offset: y_offset + size, x_offset: x_offset + size, :] for image in images ] assert cropped[0].shape[1] == size, "Image not cropped properly" assert cropped[0].shape[2] == size, "Image not cropped properly" else: raise NotImplementedError("Unknown order {}".format(order)) if boxes is not None: boxes = [crop_boxes(proposal, x_offset, y_offset) for proposal in boxes] return cropped, boxes
e4e7933a02c356c509bbd3d7dbc54814ec1f7bc1
32,422
from typing import List def normalize_resource_paths(resource_paths: List[str]) -> List[str]: """ Takes a list of resource relative paths and normalizes to lowercase and with the "ed-fi" namespace prefix removed. Parameters ---------- resource_paths : List[str] The list of resource relative paths Returns ------- List[str] A list of normalized resource relative paths. For example: ["studentschoolassociations", "tpdm/candidates"] """ return list( map( lambda r: r.removeprefix("/").removeprefix("ed-fi/").lower(), resource_paths, ) )
ec7e5020ae180cbbdc5b35519106c0cd0697a252
32,423
def GetDegenerateSites(seq1, seq2, degeneracy=4, position=3): """returns two new sequenes containing only degenerate sites. Only unmutated positions are counted. """ new_seq1 = [] new_seq2 = [] for x in range(0, len(seq1), 3): c1 = seq1[x:x + 3] c2 = seq2[x:x + 3] if c1 in GeneticCodeAA and c2 in GeneticCodeAA: if GeneticCodeAA[c1] == GeneticCodeAA[c2]: if Degeneracy[c1][position] == degeneracy \ and Degeneracy[c2][position] == degeneracy: new_seq1.append(c1[position - 1]) new_seq2.append(c2[position - 1]) return "".join(new_seq1), "".join(new_seq2)
de9aa02b6ef46cb04e64094b67436922e86e10bb
32,424
def ransac(a, b, model: str ='rigid', inlier_threshold: float = 1.0, ransac_it: int = 100): """Estimates parameters of given model by applying RANSAC on corresponding point sets A and B (preserves handedness). :param a: nx4 array of points :param b: nx4 array of points :param model: Specify the model for RANSAC. Can be 'translation', 'rigid' or 'affine' :param inlier_threshold: Specify the inlier threshold in RANSAC process :param ransac_it: number of ransac iterations :return: corresponding transformation matrix (None if no transformation was found) :raise: NotImplementedError for models which are not implemented yet""" max_ransac_it = ransac_it num_samples = 0 estimate_transformation = None assert a.shape == b.shape if a.shape[1] == 3: a = np.concatenate((a, np.ones((a.shape[0], 1))), axis=1) b = np.concatenate((b, np.ones((a.shape[0], 1))), axis=1) if model == 'translation': num_samples = 1 estimate_transformation = translation_transformation elif model == 'rigid': num_samples = 4 estimate_transformation = rigid_transformation elif model == 'affine': num_samples = 4 estimate_transformation = affine_transformation assert a.shape[0] >= num_samples best_inlier = 0 best_inlier_idx = [] best_t = None for _ in range(max_ransac_it): # random sample data for generating hypothetical inliers hyp_inliers_idx = np.random.choice(a.shape[0], size=num_samples, replace=False) hyp_inliers_a = np.array([a[i] for i in hyp_inliers_idx]) hyp_inliers_b = np.array([b[i] for i in hyp_inliers_idx]) # calculate transformation based on hypothetical inliers and selected model try: t = estimate_transformation(hyp_inliers_a, hyp_inliers_b) except AssertionError: t = np.eye(4) # calculate consensus set for this transformation b_ = np.matmul(t, a.T).T dists = [np.linalg.norm((x - y)[:3]) for x, y in zip(b_, b)] inlier_idx = [i for i, x in enumerate(dists) if x < inlier_threshold] # save better consensus set if len(inlier_idx) > best_inlier: best_inlier = len(inlier_idx) best_inlier_idx = inlier_idx best_t = t # recalculate transformation with best consensus set if len(best_inlier_idx) > 0: consensus_set_a = np.array([a[i] for i in best_inlier_idx]) consensus_set_b = np.array([b[i] for i in best_inlier_idx]) try: best_t = estimate_transformation(consensus_set_a, consensus_set_b) except AssertionError: pass return best_t, best_inlier_idx
bbbf3c7695437ef00f4fc4570033575808d84604
32,425
from typing import List from datetime import datetime def create_telescope_types(session: scoped_session, telescope_types: List, created: datetime): """Create a list of TelescopeType objects. :param session: the SQLAlchemy session. :param telescope_types: a list of tuples of telescope type id and names. :param created:the created datetime in UTC. :return: a list of TelescopeType objects. """ items = [] for type_id, name in telescope_types: item = TelescopeType(name=name, type_id=type_id, created=created, modified=created) items.append(item) session.add(item) session.commit() return items
011a8f3950fd0f4bdd3809085d74c45ae5756716
32,426
from functools import reduce def update(*p): """ Update dicts given in params with its precessor param dict in reverse order """ return reduce(lambda x, y: x.update(y) or x, (p[i] for i in range(len(p)-1,-1,-1)), {})
de7f5adbe5504dd9b1be2bbe52e14d11e05ae86f
32,427
def alphabet_to_use(three_letter_code, parity, direction): """Return tuple of alphabet to be used for glue in given direction on tile of given parity. Note that this refers to the alphabet used for the CANONICAL direction, which may be the opposite of direction.""" if not parity in (0,1): raise ValueError('parity must be 0 or 1, cannot be %s' % parity) if not direction in directions: raise ValueError('direction must be in %s, cannot be %s' % (directions, direction)) if not three_letter_code: return ('A','C','G','T') if (parity == 1 and is_canonical(direction)) or (parity == 0 and not is_canonical(direction)): return ('A','C','T') else: return ('A','G','T')
b7bb02d5a5b9d5144ab8a6026600bd16096680aa
32,428
def get_tipranks_sentiment(collection): """ :param collection: "100-most-popular", "upcoming-earnings", "new-on-robinhood", "technology", "oil-and-gas", "finance", "software-service", "energy", "manufacturing", "consumer-products", "etf", "video-games", "social-media", "health", "entertainment" :return: pandas dataframe of collection stocks """ url = f'https://robinhood.com/collections/{collection}' [df] = pd.read_html(url) symbols = list(df.Symbol.values) for i, s in enumerate(symbols): # print("Processing {}".format(s)) url = "https://www.tipranks.com/api/stocks/getNewsSentiments/?ticker={}".format(s) s2 = pd.read_json(url, orient="index", typ="series") df2 = pd.DataFrame(s2).T # print("Processing {}: cols={}".format(s, df2.columns)) if df2.shape[1] > 0: if len(df2.buzz) > 0: df.loc[i, 'buzz'] = df2.buzz.iloc[0]['buzz'] if (df2.sentiment.any()): df.loc[i, 'bullish_pct'] = df2.sentiment.iloc[0]['bullishPercent'] df.loc[i, 'sector_avg_bullish_pct'] = df2.sectorAverageBullishPercent.iloc[0] df.loc[i, 'score'] = df2.score.iloc[0] df.loc[i, 'sector_avg_news_score'] = df2.sectorAverageNewsScore.iloc[0] return df
a6a6527314d2610f20de640aa8e17ad9234d5664
32,429
def lBoundedForward(x, lower): """ Transform from transformed (unconstrained) parameters to physical ones with upper limit Args: x (float): vector of transformed parameters lower (float): vector with lower limits Returns: Float: transformed variables and log Jacobian """ return np.exp(x) + lower, x
af3b7613f4b08917c835c51c38b6e506f619ab6a
32,430
from datetime import datetime def date_range(begin_date, end_date): """ :param begin_date: 起始日期,string :param end_date: 结束日期,string :return: dates: 指定日期范围内日期列表,元素类型string """ dates = [] dt = datetime.datetime.strptime(begin_date, "%Y-%m-%d") date = begin_date[:] while date <= end_date: dates.append(date) dt = dt + datetime.timedelta(days=1) date = dt.strftime("%Y-%m-%d") return dates
e168f291226fa00806992f85b3ac2c89f96b8426
32,431
import os def in_water(latitude: float, longitude: float) -> bool: """ Simple function to parse a shapefile from OpenStreet Maps. Returns a boolean signifying a location is or is not over water. LRU Caching provided via functools.lru_cache. Uses memoization to cache recently scanned results and provide large performance gains. :param latitude: float :param longitude: float :return: bool """ path = os.path.abspath('water_polygons.shp') with fiona.open(path) as fiona_collection: box_detail = 0.0001 point = Point(longitude, latitude) # here we filter to only scan results near the point in question. for record in fiona_collection.filter(bbox=( longitude+box_detail, latitude+box_detail, longitude-box_detail, latitude-box_detail)): if record['geometry']: shape = asShape(record['geometry']) if shape.contains(point): return True return False
8f2a0a5ff9adb6d16cbaefb656481e9825fb7b00
32,432
import collections def createNeighDict(rP, lP, b, c): """Finds the neighbours nearest to a lost packet in a particular tensor plane # Arguments rP: packets received in that tensor plane lp: packets lost in that tensor plane b,c : batch and channel number denoting the tensor plane # Returns Dictionary containing the neighbours nearest to the lost packets """ insertPos = np.searchsorted(rP, lP) neighDict = collections.OrderedDict() if len(rP)==0: return neighDict for i in range(len(lP)): ind = insertPos[i] #position at which lP is to be inserted in rP if ind==0: #check if insert position is at beginning i.e no top neighbour k = ((b, -1, c), (b, rP[ind], c)) # k = (tuple((b, -1, c)), tuple((b, rP[ind], c))) v = np.array([b, lP[i], c]) if k not in neighDict: neighDict[k] = v else: neighDict[k] = np.vstack((neighDict[k], v)) continue if ind==len(rP): #check if insert position is at the end i.e no bottom neighbour k = ((b, rP[-1], c), (b, 0, c)) # k = (tuple((b, rP[-1], c)), tuple((b, 0, c))) v = np.array([b, lP[i], c]) if k not in neighDict: neighDict[k] = v else: neighDict[k] = np.vstack((neighDict[k], v)) continue k = ((b, rP[ind-1], c), (b, rP[ind], c)) # k = (tuple((b, rP[ind-1], c)), tuple((b, rP[ind], c))) v = np.array([b, lP[i], c]) if tuple(k) not in neighDict: neighDict[k] = v else: neighDict[k] = np.vstack((neighDict[k], v)) return neighDict
0b67024dac04678b8cb084eb18312ed8df468d93
32,433
def get_norm_3d(norm: str, out_channels: int, bn_momentum: float = 0.1) -> nn.Module: """Get the specified normalization layer for a 3D model. Args: norm (str): one of ``'bn'``, ``'sync_bn'`` ``'in'``, ``'gn'`` or ``'none'``. out_channels (int): channel number. bn_momentum (float): the momentum of normalization layers. Returns: nn.Module: the normalization layer """ assert norm in ["bn", "sync_bn", "gn", "in", "none"], \ "Get unknown normalization layer key {}".format(norm) if norm == "gn": assert out_channels%8 == 0, "GN requires channels to separable into 8 groups" norm = { "bn": nn.BatchNorm3d, "sync_bn": nn.SyncBatchNorm, "in": nn.InstanceNorm3d, "gn": lambda channels: nn.GroupNorm(8, channels), "none": nn.Identity, }[norm] if norm in ["bn", "sync_bn", "in"]: return norm(out_channels, momentum=bn_momentum) else: return norm(out_channels)
2def355c8b775512fec9d58a4fa43a0b54734f96
32,434
import json def cancel_cheque(): """取消支票""" user_id = '96355632' sn = request.values['sn'] result = pay_client.app_cancel_cheque(user_id, sn, ret_result=True) return render_template('sample/info.html', title='取消支票结果', msg=json.dumps({'status_code': result.status_code, 'data': result.data}))
6734aa86a1300f678a22b45407a8597255ad0a33
32,435
def to_relative_engagement(lookup_table, duration, wp_score, lookup_keys=None): """ Convert watch percentage to relative engagement. :param lookup_table: duration ~ watch percentage table, in format of dur: [1st percentile, ..., 1000th percentile] :param duration: target input duration :param wp_score: target input watch percentage score :param lookup_keys: pre-computed duration split points, for faster computation """ if lookup_keys is None: lookup_keys = lookup_table['duration'] lookup_keys = np.array(lookup_keys) if isinstance(wp_score, list): re_list = [] if isinstance(duration, list): for d, s in zip(duration, wp_score): re_list.append(to_relative_engagement(lookup_table, d, s, lookup_keys=lookup_keys)) elif isinstance(duration, int): for s in wp_score: re_list.append(to_relative_engagement(lookup_table, duration, s, lookup_keys=lookup_keys)) return re_list else: bin_idx = np.sum(lookup_keys < duration) duration_bin = np.array(lookup_table[bin_idx]) re = np.sum(duration_bin <= wp_score) / 1000 # re = (np.sum(duration_bin < wp_score) + np.sum(duration_bin <= wp_score)) / 2000 return re
cfecebe5830a7681417d6fbd14485adc6908cb5d
32,436
import dmsky.factory def factory(ptype, **kwargs): """Factory method to build `DenityProfile` objects Keyword arguments are passed to class c'tor Parameters ---------- ptype : str Density profile type Returns ------- profile : `DensityProfile` Newly created object """ prof_copy = kwargs.copy() units = prof_copy.pop('units', None) if units: density, distance = units.rsplit('_', 1) scale_density = getattr(Units, density) scale_distance = getattr(Units, distance) scale_dict_param(prof_copy, 'rhos', scale_density, DensityProfile._params['rhos'].default) scale_dict_param(prof_copy, 'rs', scale_distance, DensityProfile._params['rs'].default) scale_dict_param(prof_copy, 'rmin', scale_distance, DensityProfile._params['rmin'].default) scale_dict_param(prof_copy, 'rmax', scale_distance, DensityProfile._params['rmax'].default) scale_dict_param(prof_copy, 'rhomax', scale_density, DensityProfile._params['rhomax'].default) return dmsky.factory.factory(ptype, module=__name__, **prof_copy)
9acb4b93fc3e82e22ec0360a38def9058cea5640
32,437
import re import difflib def _search_signals(name, stype, intf): """ Given a port name and signal type find a match in the inteface. :param name: name of the port :param stype: the port signal type :param intf: the interface to search for matching signal :return: @todo: this function is hackery, not much thought was put into parsing and matching the names ... this function could use some work. """ sig = None mm = 'none' sub = intf.name # the differential signals mp = {k: v for k, v in vars(intf).items() if re.match('.*{}.*{}.*_p'.format(sub, k), name) and isinstance(v, SignalType)} # signal match ms = {k: v for k, v in vars(intf).items() if re.match('.*{}.*{}.*'.format(sub, k), name) and isinstance(v, SignalType)} if len(mp) > 0: assert len(mp) == 1, "too many {}".format(mp) nm = mp.keys()[0] sig = mp[nm] mm = '.'.join((sub, nm,)) elif len(ms) > 0: if len(ms) > 1: # @todo: need a more robust way to match signals, this works # for now but ... subname = name.split('_')[-1] names = [nm for nm, ss in ms.iteritems()] print(name, sub, names) matches = difflib.get_close_matches(subname, names) if len(matches) == 0: print("@W: a match was not found") bm = '' else: # @todo: this is odd, for the multiple matches it is always the # best-least match?? bm = matches[-1] else: bm = ms.keys()[0] sig = ms[bm] mm = '.'.join((sub, bm,)) if sig is None: pass elif not isinstance(sig, SignalType): sig = None mm = 'none' return sig, mm
d4b785ecd1da6043180f4654ae6fae8c49455b65
32,438
def r2_score(y, y_predicted): """Calculate the R2 score. Parameters ---------- y : array-like of shape = number_of_outputs Represent the target values. y_predicted : array-like of shape = number_of_outputs Target values predicted by the model. Returns ------- loss : float R2 output can be non-negative values or negative value. Becoming 1.0 means your model outputs are exactly matched by true target values. Lower values means worse results. Notes ----- This is not a symmetric function. References ---------- [1] `Wikipedia entry on the Coefficient of determination <https://en.wikipedia.org/wiki/Coefficient_of_determination>`_ Examples -------- >>> y = [3, -0.5, 2, 7] >>> y_predicted = [2.5, 0.0, 2, 8] >>> explained_variance_score(y, y_predicted) 0.948 """ numerator = ((y - y_predicted) ** 2).sum(axis=0, dtype=np.float64) denominator = ((y - np.average(y, axis=0)) ** 2).sum(axis=0, dtype=np.float64) nonzero_denominator = denominator != 0 nonzero_numerator = numerator != 0 valid_score = nonzero_denominator & nonzero_numerator output_scores = np.ones([y.shape[0]]) output_scores[valid_score] = (1 - (numerator[valid_score] / denominator[valid_score])) # arbitrary set to zero to avoid -inf scores, having a constant # y_true is not interesting for scoring a regression anyway output_scores[nonzero_numerator & ~nonzero_denominator] = 0. return np.average(output_scores)
00e8004f076e8147f70896bd5304cd73c389522e
32,439
def vcg_solve(goal): """Compute the verification conditions for a hoare triple, then solves the verification conditions using SMT. """ assert goal.is_comb("Valid", 3), "vcg_solve" P, c, Q = goal.args T = Q.get_type().domain_type() pt = vcg_norm(T, goal) vc_pt = [ProofTerm("z3", vc, []) for vc in pt.assums] return ProofTerm("vcg", goal, vc_pt)
9f496edd1a3725640582f4016a086fd5dfe70d72
32,440
def ism_extinction(av_mag: float, rv_red: float, wavelengths: np.ndarray) -> np.ndarray: """ Function for calculating the optical and IR extinction with the empirical relation from Cardelli et al. (1989). Parameters ---------- av_mag : float Extinction (mag) in the V band. rv_red : float Reddening in the V band, ``R_V = A_V / E(B-V)``. wavelengths : np.ndarray Array with the wavelengths (um) for which the extinction is calculated. Returns ------- np.ndarray Extinction (mag) at ``wavelengths``. """ x_wavel = 1./wavelengths y_wavel = x_wavel - 1.82 a_coeff = np.zeros(x_wavel.size) b_coeff = np.zeros(x_wavel.size) indices = np.where(x_wavel < 1.1)[0] if len(indices) > 0: a_coeff[indices] = 0.574*x_wavel[indices]**1.61 b_coeff[indices] = -0.527*x_wavel[indices]**1.61 indices = np.where(x_wavel >= 1.1)[0] if len(indices) > 0: a_coeff[indices] = 1. + 0.17699*y_wavel[indices] - 0.50447*y_wavel[indices]**2 - \ 0.02427*y_wavel[indices]**3 + 0.72085*y_wavel[indices]**4 + \ 0.01979*y_wavel[indices]**5 - 0.77530*y_wavel[indices]**6 + 0.32999*y_wavel[indices]**7 b_coeff[indices] = 1.41338*y_wavel[indices] + 2.28305*y_wavel[indices]**2 + \ 1.07233*y_wavel[indices]**3 - 5.38434*y_wavel[indices]**4 - \ 0.62251*y_wavel[indices]**5 + 5.30260*y_wavel[indices]**6 - 2.09002*y_wavel[indices]**7 return av_mag * (a_coeff + b_coeff/rv_red)
cb2bba0cbb396fbac900492fe9b49d70646a4255
32,441
def rangify(values): """ Given a list of integers, returns a list of tuples of ranges (interger pairs). :param values: :return: """ previous = None start = None ranges = [] for r in values: if previous is None: previous = r start = r elif r == previous + 1: pass else: # r != previous + 1 ranges.append((start, previous)) start = r previous = r ranges.append((start, previous)) return ranges
672b30d4a4ce98d2203b84db65ccebd53d1f73f5
32,442
def load_balancers_with_instance(ec2_id): """ @param ec2_id: ec2 instance id @return: list of elb names with the ec2 instance attached """ elbs = [] client = boto3.client('elb') paginator = client.get_paginator('describe_load_balancers') for resp in paginator.paginate(): for elb in resp['LoadBalancerDescriptions']: # filter for ec2_instance ec2_ids = [i['InstanceId'] for i in elb['Instances']] if ec2_id in ec2_ids: elbs.append(elb['LoadBalancerName']) return elbs
b9ad53b7cafdbc44f88044e976a700a187605b2d
32,443
def parse_json_frequency_high(df, column, key): """ Takes a JETS dataframe and column containing JSON strings and finds the highest 'Mode' or 'Config' frequency. Excludes intermediate frequencies Parameters ---------- df : pandas dataframe JETS dataframe column : str The column where the frequencies are located key : str A substring of the key you are looking for in the json record(ex. 'FREQ' or 'MIN_FREQ') Returns ------- float """ parsed_json = json_extract_with_key(df['{}'.format(column)], key) arr = [] for i in range(len(parsed_json)): if search("MODE", parsed_json[i][0]): arr.append(parsed_json[i][1]) elif search("CONFIG", parsed_json[i][0]): arr.append(parsed_json[i][1]) try: return max(arr) except: return ''
e8489ed7bca2357d4be3421932898696daf27bac
32,444
def adapter_checker(read, args): """ Retrieves the end sequences and sorts adapter information for each end. """ cigar = read.cigartuples seq = read.query_sequence leftend, check_in_softl, left_match = get_left_end(seq, cigar, args) rightend, check_in_softr, right_match = get_right_end(seq, cigar, args) left_cigar_info = cigar right_cigar_info = list(cigar)[::-1] if leftend != "no softclip": three_left = get_adapter_info(Seq(leftend).complement(), args.three_adapter, args.three_score, left_cigar_info, args, check_in_softl, Seq(left_match).complement()) five_left = get_adapter_info(leftend, args.five_adapter, args.five_score, left_cigar_info, args, check_in_softl, left_match) else: three_left = (0, 0, 0, "no softclip") five_left = (0, 0, 0, "no softclip") if rightend != "no softclip": three_right = get_adapter_info(rightend[::-1], args.three_adapter, args.three_score, right_cigar_info, args, check_in_softr, right_match[::-1]) five_right = get_adapter_info(Seq(rightend).reverse_complement(), args.five_adapter, args.five_score, right_cigar_info, args, check_in_softr, Seq(right_match).reverse_complement()) else: three_right = (0, 0, 0, "no softclip") five_right = (0, 0, 0, "no softclip") return {"l3": three_left, "r3": three_right, "l5": five_left, "r5": five_right}
096fff89eafe7ce7915daac55e95a2ea51e7f302
32,445
def create_pane(widgets, horizontal, parent_widget=None, compact=False, compact_spacing=2): """Create a widget containing an aligned set of widgets. Args: widgets (list of `QWidget`). horizontal (bool). align (str): One of: - 'left', 'right' (horizontal); - 'top', 'bottom' (vertical) parent_widget (`QWidget`): Owner widget, QWidget is created if this is not provided. Returns: `QWidget` """ pane = parent_widget or QtWidgets.QWidget() type_ = QtWidgets.QHBoxLayout if horizontal else QtWidgets.QVBoxLayout layout = type_() if compact: layout.setSpacing(compact_spacing) layout.setContentsMargins(compact_spacing, compact_spacing, compact_spacing, compact_spacing) for widget in widgets: stretch = 0 if isinstance(widget, tuple): widget, stretch = widget if isinstance(widget, int): layout.addSpacing(widget) elif widget: layout.addWidget(widget, stretch) else: layout.addStretch() pane.setLayout(layout) return pane
f291b6482c8d5bb8ecb312b5f5747cf6c4e36e53
32,446
from typing import List from typing import Dict import collections import sys def filterbyasn(ips: List[Dict], max_per_asn: Dict, max_per_net: int) -> List[Dict]: """ Prunes `ips` by (a) trimming ips to have at most `max_per_net` ips from each net (e.g. ipv4, ipv6); and (b) trimming ips to have at most `max_per_asn` ips from each asn in each net. """ # Sift out ips by type ips_ipv46 = [ip for ip in ips if ip['net'] in ['ipv4', 'ipv6']] ips_onion = [ip for ip in ips if ip['net'] == 'onion'] # Filter IPv46 by ASN, and limit to max_per_net per network result = [] net_count: Dict[str, int] = collections.defaultdict(int) asn_count: Dict[int, int] = collections.defaultdict(int) for i, ip in enumerate(ips_ipv46): if i % 10 == 0: # give progress update print(f"{i:6d}/{len(ips_ipv46)} [{100*i/len(ips_ipv46):04.1f}%]\r", file=sys.stderr, end='', flush=True) if net_count[ip['net']] == max_per_net: # do not add this ip as we already too many # ips from this network continue asn = lookup_asn(ip['net'], ip['ip']) if asn is None or asn_count[asn] == max_per_asn[ip['net']]: # do not add this ip as we already have too many # ips from this ASN on this network continue asn_count[asn] += 1 net_count[ip['net']] += 1 result.append(ip) # Add back Onions (up to max_per_net) result.extend(ips_onion[0:max_per_net]) return result
c26c3b0d5a4191ade6b4e25134bc72d0c4792b32
32,447
def image_TOKEN_search_by_word_query_TOKEN(query_snd_ix, multi_distances, snd_fnames, img_fnames, id2pic): """map a word token query into the embedding space and find images in the same space return rank of first neighbor whose TOKEN is in the picture list of the id""" n_images, n_sounds = multi_distances.shape query_id = snd_fnames[query_snd_ix] img_neighbors = np.argsort(multi_distances[:, query_snd_ix]) pictures_for_query = id2pic[query_id] rank = img_neighbors.shape[0] for i in xrange(img_neighbors.shape[0]): if img_fnames[img_neighbors[i]] in pictures_for_query: rank = i + 1 break return rank
81fcd8ef466e4712cbab396ed55626e61f297fac
32,448
import os import jinja2 def get_package_environment(): """Loads templates from the current Python package""" templates_dir = os.path.dirname(templates.__file__) template_loader = jinja2.FileSystemLoader(searchpath=templates_dir) return jinja2.Environment(loader=template_loader)
434d59d97109f369bca31d42b476a9f3810296b5
32,449
import auth import os import sys def import_token(): """ Attempts to to get the discord token from auth file then env variable Returns: (string) """ try: return auth.DISCORD_HACKSPACE_TOKEN except ImportError: try: return os.environ["DISCORD_HACKSPACE_TOKEN"] except KeyError: print(""" ERROR: DISCORD_HACKSPACE_TOKEN neither provided by auth file or environmental variable!" """ ) sys.exit()
6728a6cb4d54ad66d0512fb2aacb85540dd689c5
32,450
from packaging import version def _evolve_angles_forwards( mass_1, mass_2, a_1, a_2, tilt_1, tilt_2, phi_12, f_start, final_velocity, tolerance, dt, evolution_approximant ): """Wrapper function for the SimInspiralSpinTaylorPNEvolveOrbit function Parameters ---------- mass_1: float primary mass of the binary mass_2: float secondary mass of the binary a_1: float primary spin magnitude a_2: float secondary spin magnitude tilt_1: float primary spin tilt angle from the orbital angular momentum tilt_2: float secondary spin tilt angle from the orbital angular momentum phi_12: float the angle between the in-plane spin components f_start: float frequency to start the evolution from final_velocity: float Final velocity to evolve the spins up to tolerance: float Only evolve spins if at least one spins magnitude is greater than tolerance dt: float steps in time for the integration, in terms of the mass of the binary evolution_approximant: str name of the approximant you wish to use to evolve the spins. """ if np.logical_or(a_1 > tolerance, a_2 > tolerance): # Total mass in seconds total_mass = (mass_1 + mass_2) * MTSUN_SI f_final = final_velocity ** 3 / (total_mass * np.pi) _approx = getattr(lalsimulation, evolution_approximant) if version.parse(lalsimulation.__version__) >= version.parse("2.5.2"): spinO = 6 else: spinO = 7 data = SimInspiralSpinTaylorPNEvolveOrbit( deltaT=dt * total_mass, m1=mass_1 * MSUN_SI, m2=mass_2 * MSUN_SI, fStart=f_start, fEnd=f_final, s1x=a_1 * np.sin(tilt_1), s1y=0., s1z=a_1 * np.cos(tilt_1), s2x=a_2 * np.sin(tilt_2) * np.cos(phi_12), s2y=a_2 * np.sin(tilt_2) * np.sin(phi_12), s2z=a_2 * np.cos(tilt_2), lnhatx=0., lnhaty=0., lnhatz=1., e1x=1., e1y=0., e1z=0., lambda1=0., lambda2=0., quadparam1=1., quadparam2=1., spinO=spinO, tideO=0, phaseO=7, lscorr=0, approx=_approx ) # Set index to take from array output by SimInspiralSpinTaylorPNEvolveOrbit: # -1 for evolving forward in time and 0 for evolving backward in time if f_start <= f_final: idx_use = -1 else: idx_use = 0 a_1_evolve = np.array( [ data[2].data.data[idx_use], data[3].data.data[idx_use], data[4].data.data[idx_use] ] ) a_2_evolve = np.array( [ data[5].data.data[idx_use], data[6].data.data[idx_use], data[7].data.data[idx_use] ] ) Ln_evolve = np.array( [ data[8].data.data[idx_use], data[9].data.data[idx_use], data[10].data.data[idx_use] ] ) tilt_1_evol, tilt_2_evol, phi_12_evol = \ tilt_angles_and_phi_12_from_spin_vectors_and_L( a_1_evolve, a_2_evolve, Ln_evolve ) else: tilt_1_evol, tilt_2_evol, phi_12_evol = tilt_1, tilt_2, phi_12 return tilt_1_evol, tilt_2_evol, phi_12_evol
b4a000db741aab65076ca2257230aaac45634465
32,451
import logging def get_execution(execution): """Get an execution""" logging.info('[ROUTER]: Getting execution: '+execution) include = request.args.get('include') include = include.split(',') if include else [] exclude = request.args.get('exclude') exclude = exclude.split(',') if exclude else [] try: execution = ExecutionService.get_execution(execution, current_identity) except ExecutionNotFound as e: logging.error('[ROUTER]: '+e.message) return error(status=404, detail=e.message) except Exception as e: logging.error('[ROUTER]: '+str(e)) return error(status=500, detail='Generic Error') return jsonify(data=execution.serialize(include, exclude)), 200
dfaba70a41e74423f86eca2c645f71dd2c4117ac
32,452
def fz_Kd_singlesite(K: float, p: np.ndarray, x: np.ndarray) -> np.ndarray: """Fit function for Cl titration.""" return (p[0] + p[1] * x / K) / (1 + x / K)
8054447e87c70adb4d6f505c45336ccd839a69c9
32,453
def show_exam_result(request, course_id, submission_id): """ Returns exam result template """ course_obj = get_object_or_404(Course, pk=course_id) submission_obj = get_object_or_404(Submission, pk=submission_id) submission_choices = submission_obj.choices.all() choice_ids = [choice_obj.id for choice_obj in submission_choices] max_score, question_score = 0, 0 course_questions = course_obj.question_set.all() for question in course_questions: max_score += question.grade if question.is_get_score(choice_ids): question_score += question.grade context = { "course": course_obj, "choices": submission_choices, "grade": int(question_score / max_score * 100), } print(question_score, max_score) return render(request, 'onlinecourse/exam_result_bootstrap.html', context)
e61ffc8748e3a9aa2cb62e4ed277a08f0be05c07
32,454
def createInvoiceObject(account_data: dict, invoice_data: dict) -> dict: """ example: https://wiki.wayforpay.com/view/852498 param: account_data: dict merchant_account: str merchant_password: str param: invoice_data reqularMode -> one of [ 'once', 'daily', 'weekly', 'quartenly', 'monthly', 'halfyearly', 'yearly' ] merchantPassword : str amount : str currency : str dateNext -> dd.mm.yyyy : str dateEnd -> dd.mm.yyyy : str orderReference -> timestamp : str email -> client email to notify return: object for invoice creation """ return { "requestType": "CREATE", "merchantAccount": account_data['merchant_account'], "merchantPassword": account_data['merchant_password'], "regularMode": invoice_data['regularMode'], "amount": str(invoice_data['currency']), "currency": invoice_data['currency'], "dateBegin": invoice_data['dateBegin'], "dateEnd": invoice_data['dateEnd'], "orderReference": str(invoice_data['orderReference']), "email": invoice_data['email'] }
0ea61d68916c5b6f43e568cb1978bcb05b8eba04
32,455
from sklearn.cluster import KMeans def _kmeans_seed_points(points, D, d, C, K, trial=0): """A seed point generation function that puts the seed points at customer node point cluster centers using k-Means clustering.""" kmeans = KMeans(n_clusters=K, random_state=trial).fit(points[1:]) return kmeans.cluster_centers_.tolist()
7131b53b9cf0c8719daa577f7a03c41f068df90d
32,456
def site_geolocation(site): """ Obtain lat-lng coordinate of active trials in the Cancer NCI API""" try: latitude = site['org_coordinates']['lat'] longitude = site['org_coordinates']['lon'] lat_lng = tuple((latitude, longitude)) return lat_lng except KeyError: # key ['org_coordinates'] is missing return None
9fefcd3f49d82233005c88e645efd1c00e1db564
32,457
def get_scaling_desired_nodes(sg): """ Returns the numb of desired nodes the scaling group will have in the future """ return sg.get_state()["desired_capacity"]
5a417f34d89c357e12d760b28243714a50a96f02
32,458
def _BitmapFromBufferRGBA(*args, **kwargs): """_BitmapFromBufferRGBA(int width, int height, buffer data) -> Bitmap""" return _gdi_._BitmapFromBufferRGBA(*args, **kwargs)
91fc08c42726ad101e1d060bf4e60d498d1f0b0f
32,459
def get_user_analysis_choice(): """ Function gets the user input to determine what kind of data quality metrics s/he wants to investigate. :return: analytics_type (str): the data quality metric the user wants to investigate percent_bool (bool): determines whether the data will be seen as 'percentage complete' or individual instances of a particular error target_low (bool): determines whether the number displayed should be considered a desirable or undesirable characteristic """ analysis_type_prompt = \ "\nWhat kind of analysis over time report would you like " \ "to generate for each site?\n\n" \ "A. Duplicates\n" \ "B. Amount of data following death dates\n" \ "C. Amount of data with end dates preceding start dates\n" \ "D. Success rate for concept_id field\n" \ "E. Population of the 'unit' field in the measurement table (" \ "only for specified measurements)\n" \ "F. Population of the 'route' field in the drug exposure table\n" \ "G. Percentage of expected drug ingredients observed\n" \ "H. Percentage of expected measurements observed\n" \ "I. Date consistency across tables \n\n" \ "Please specify your choice by typing the corresponding letter." user_command = input(analysis_type_prompt).lower() choice_dict = { 'a': 'duplicates', 'b': 'data_after_death', 'c': 'end_before_begin', 'd': 'concept', 'e': 'measurement_units', 'f': 'drug_routes', 'g': 'drug_success', 'h': 'sites_measurement', 'i': 'visit_date_disparity'} while user_command not in choice_dict.keys(): print("\nInvalid choice. Please specify a letter that corresponds " "to an appropriate analysis report.\n") user_command = input(analysis_type_prompt).lower() # NOTE: This dictionary needs to be expanded in the future percentage_dict = { 'duplicates': False, 'data_after_death': True, 'end_before_begin': True, 'concept': True, 'measurement_units': True, 'drug_routes': True, 'drug_success': True, 'sites_measurement': True, 'visit_date_disparity': True } # dictionary indicates if the target is to minimize or maximize number target_low = { 'duplicates': True, 'data_after_death': True, 'end_before_begin': True, 'concept': False, 'measurement_units': False, 'drug_routes': False, 'drug_success': False, 'sites_measurement': False, 'visit_date_disparity': False } analytics_type = choice_dict[user_command] percent_bool = percentage_dict[analytics_type] target_low = target_low[analytics_type] return analytics_type, percent_bool, target_low
58ebda03cd4eb12c92951649fc946b00eb1a8075
32,460
def points_to_segments(points): """Convert a list of points, given in clockwise order compared to the inside of the system to a list of segments. The last point being linked to the first one. Args: points (list): list of lists of size 2 Returns: [np.ndarray]: 2D-array of segments - each row is [x1,y1,x2,y2]. """ nb_of_points = len(points) points = np.array(points) first, last = points[0], points[-1] segments = np.concatenate((points[:nb_of_points-1],points[1:]), axis = 1) segments = np.concatenate((segments, np.expand_dims(np.concatenate((last,first)), axis = 0)), axis = 0) return segments # --------------------- Utils functions -------------------- #
1e560d8e752d34250f73c6e2305c7741a14afe04
32,461
def resnet_v1_34(inputs, num_classes=None, is_training=True, global_pool=True, output_stride=None, spatial_squeeze=True, reuse=None, scope='resnet_v1_34', **kwargs): """ResNet-34 model of [1]. See resnet_v1() for arg and return description.""" blocks = [ resnet_v1_simple_block('block1', out_depth=64, num_units=3, stride=1), resnet_v1_simple_block('block2', out_depth=128, num_units=4, stride=2), resnet_v1_simple_block('block3', out_depth=256, num_units=6, stride=2), resnet_v1_simple_block('block4', out_depth=512, num_units=3, stride=2), ] return resnet_v1(inputs, blocks, num_classes, is_training, global_pool=global_pool, output_stride=output_stride, include_root_block=True, spatial_squeeze=spatial_squeeze, reuse=reuse, scope=scope, **kwargs)
26e7d866a14d6a17acf92c10e4c5d48883c6b5c7
32,462
from typing import Union def from_dlpack(x: Union[ivy.Array, ivy.NativeArray]) -> ivy.Array: """Returns a new array containing the data from another (array) object with a ``__dlpack__`` method. Parameters ---------- x object input (array) object. Returns ------- ret an array containing the data in `x`. .. admonition:: Note :class: note The returned array may be either a copy or a view. See :ref:`data-interchange` for details. """ return _cur_backend(x).from_dlpack(x)
c3213a607eb150791e74ffb8a5781e789dcd989f
32,463
def render_webpage_string(vegalite_spec: str) -> str: """ Renders the given Vega-lite specification into a string of an HTML webpage that displays the specified plots. :param vegalite_spec str: The Vega-lite plot specification to create a webpage for. :returns: A string of a webpage with the specified plots. :rtype: str """ with open(PLOTS_TEMPLATE_FILE, "r") as template_file: return chevron.render(template_file, {SPEC_TAG: vegalite_spec})
6b9c410046d6aba3b3de04bcb2cce4779f55b3d1
32,464
def _parse_blog(element): """ Parse and return genral blog data (title, tagline etc). """ title = element.find("./title").text tagline = element.find("./description").text language = element.find("./language").text site_url = element.find("./{%s}base_site_url" % WP_NAMESPACE).text blog_url = element.find("./{%s}base_blog_url" % WP_NAMESPACE).text return { "title": title, "tagline": tagline, "language": language, "site_url": site_url, "blog_url": blog_url, }
a2678c0e55a8db5aee042744f1f343c96c7fe6f1
32,465
def summarize_block(block): """ Return the sentence that best summarizes block. """ sents = nltk.sent_tokenize(block) word_sents = map(nltk.word_tokenize, sents) d = dict((compute_score(word_sent, word_sents), sent) for sent, word_sent in zip(sents, word_sents)) return d[max(d.keys())]
991d389366f0587f7dc7fe2eaf6966d0b531012f
32,466
def weekend_subsets_3_2_rule(M, i, t, w, e, d1, d2): """ TODO: Write me :param M: Model :param i: :param t: :param w: :param e: :param d1: :param d2: :return: """ days_subset = [d1, d2] return sum(M.TourTypeDay[i, t, d, w] for d in days_subset) <= \ sum(M.MultiWeekDaysWorked[i, t, p1, p2] * min(len(days_subset), (M.A_mwdw[t, p1, w] - M.A_num_wkend_days[p2, w, t, e])) for p1 in pyo.sequence(M.num_mwdw_patterns[t]) for p2 in pyo.sequence(M.num_weekend_patterns[e, t]))
16623cac436f51ef70e5c3448737d74b0e4d47c9
32,467
def get_club_result() -> list: """ Returns the club's page. """ d = api_call("ion", "activities") while "next" in d and d["next"] is not None: for result in d["results"]: if "cube" in result["name"].lower(): return result d = api_call("ion", d["next"], False)
3f335aeb2c476dc29e0d335b00722e5b56eb6716
32,468
def DiffuserConst_get_decorator_type_name(): """DiffuserConst_get_decorator_type_name() -> std::string""" return _RMF.DiffuserConst_get_decorator_type_name()
97c9a68d35b079a1ecbf71a742c6799c4b6411bb
32,469
import tqdm def lemmatizer(): """ Substitutes words by their lemma """ lemmatizer = WordNetLemmatizer() preprocessor = lambda text: [lemmatizer.lemmatize(w) for w in \ text.split(" ")] def preprocess(name, dataset): description = " Running NLTK Lemmatizer - preprocessing dataset " description += "{}...".format(name) data = [preprocessor(x) for x in tqdm(dataset, desc=description)] return data return preprocess
627ce460abb71969ac3f19832f2854a1a00db7c3
32,470
import io def convert_numpy_array(numpy_array: np.ndarray): """ Converts a numpy array into compressed bytes :param numpy_array: An array that is going to be converted into bytes :return: A BytesIO object that contains compressed bytes """ compressed_array = io.BytesIO() # np.savez_compressed() requires a file-like object to write to np.save(compressed_array, numpy_array, allow_pickle=True, fix_imports=False) return compressed_array
1fe24003d00736b86361cf5eef03da304edc6bf6
32,471
def notas(* valores, sit=False): """ -> Função para analisar notas e situações de vários alunos. :param valores: uma ou mais notas dos alunos (aceita várias) :param sit: valor opcional, indicando se deve ou não adicionar a situação :return: dicionário com várias informações sobre a situação da turma. """ dicionario = dict() dicionario["Quantidade de notas"] = len(valores) dicionario["Maior nota"] = max(valores) dicionario["Menor nota"] = min(valores) dicionario["Média da turma"] = sum(valores)/len(valores) if sit == True: if dicionario["Média da turma"] >= 7.0: dicionario["A situação"] = 'BOA' elif 5.0 <= dicionario["Média da turma"] < 7.0: dicionario["A situação"] = 'RAZOÁVEL' else: dicionario["A situação"] = 'RUIM' return dicionario
a6915e9b7b1feef0db2be6fdf97b6f236d73f282
32,472
def append_OrbitSection(df): """Use OrbitDirection flags to identify 4 sections in each orbit.""" df["OrbitSection"] = 0 ascending = (df["OrbitDirection"] == 1) & (df["QDOrbitDirection"] == 1) descending = (df["OrbitDirection"] == -1) & (df["QDOrbitDirection"] == -1) df["OrbitSection"].mask( (df["QDLat"] > 50) & ascending, 1, inplace=True ) df["OrbitSection"].mask( (df["QDLat"] > 50) & descending, 2, inplace=True ) df["OrbitSection"].mask( (df["QDLat"] < -50) & descending, 3, inplace=True ) df["OrbitSection"].mask( (df["QDLat"] < -50) & ascending, 4, inplace=True ) return df
4f2cad6cb2facf6a7a8c7a89ed7b3df0a56a54c2
32,473
def _get_old_time(request): """ Get's the alarm time the user wants to change Args: request (Request): contains info about the conversation up to this point (e.g. domain, intent, entities, etc) Returns: string: resolved 24-hour time in XX:XX:XX format """ old_time_entity = next( (e for e in request.entities if e['role'] == 'old_time'), None) if old_time_entity: duckling_result = parse_numerics(old_time_entity['text'].lower(), dimensions=['time']) for candidate in duckling_result[0]: if candidate['body'] == old_time_entity['text'].lower(): return candidate['value']['value'][TIME_START_INDEX:TIME_END_INDEX] else: return None
6a2929ccffb4b397bd9f1dd044e70c871e302e33
32,474
def sxxxxx(p, nss): """ Defines a scalar wavefunction. Input momenta have shape (num events, 4). Parameters ---------- p: tf.Tensor, scalar boson four-momenta of shape=(None,4) nss: tf.Tensor, final|initial state of shape=(), values=(+1|-1) Returns ------- phi: tf.Tensor, scalar wavefunction of shape=(3,None) """ v0 = tf.expand_dims(complex_tf(p[:, 0] * nss, p[:, 3] * nss), 1) v1 = tf.expand_dims(complex_tf(p[:, 1] * nss, p[:, 2] * nss), 1) v = tf.expand_dims(complex_tf(1.0, 0.0), 1) phi = tf.concat([v0, v1, v], axis=1) return tf.transpose(phi)
429fe82c9781ec8918fe57a68e899f899df8f32f
32,475
import os def get_user_directories_directory(): """ Determines the directory where user directories are stored. This is actually not that easy, and different systems have different ways of doing it. So, we try adding a user called '_chaptest_' just to see where the directory goes, and use that. """ global _udd if _udd is not None: return _udd try: testuser = "_chaptest_" useradd(testuser) userinfo = lookup_user(testuser) _udd = os.path.dirname(userinfo.pw_dir) userdel(testuser) except Exception: _udd = "/" # default if any error occurs return _udd
7546d4d67b4b74fda1ce9cf80aa4fdec185b471c
32,476
def target_risk_contributions(target_risk, cov): """ Returns the weights of the portfolio that gives you the weights such that the contributions to portfolio risk are as close as possible to the target_risk, given the covariance matrix """ n = cov.shape[0] init_guess = np.repeat(1 / n, n) bounds = ((0.0, 1.0),) * n # an N-tuple of 2-tuples! # construct the constraints weights_sum_to_1 = {'type': 'eq', 'fun': lambda weights: np.sum(weights) - 1 } def msd_risk(weights, target_risk, cov): """ Returns the Mean Squared Difference in risk contributions between weights and target_risk """ w_contribs = risk_contribution(weights, cov) return ((w_contribs - target_risk) ** 2).sum() weights = minimize(msd_risk, init_guess, args=(target_risk, cov), method='SLSQP', options={'disp': False}, constraints=(weights_sum_to_1,), bounds=bounds) return weights.x
82d338f2bc8c6b712e7489b70a3122eee21d0aab
32,477
import urllib, datetime import xarray as xr import numpy as np def read_monthly_indices_from_CLIMEXP(name_of_index): """ Try reading various monthly indices from KNMI's Climate Explorer """ name_to_url = { 'M1i': 'http://climexp.knmi.nl/data/iM1.dat', # 1910 -> 'M2i': 'http://climexp.knmi.nl/data/iM2.dat', # 1910 -> 'M3i': 'http://climexp.knmi.nl/data/iM3.dat', 'M4i': 'http://climexp.knmi.nl/data/iM4.dat', # 1910 -> 'M5i': 'http://climexp.knmi.nl/data/iM5.dat', 'M6i': 'http://climexp.knmi.nl/data/iM6.dat', # 1910 -> 'NAO': 'http://climexp.knmi.nl/data/inao.dat', # 1821 -> 'NINO12': 'http://climexp.knmi.nl/data/inino2.dat', 'NINO3': 'http://climexp.knmi.nl/data/inino3.dat', 'NINO34': 'http://climexp.knmi.nl/data/inino5.dat', 'NINO4': 'http://climexp.knmi.nl/data/inino4.dat', 'AMO1': 'http://climexp.knmi.nl/data/iamo_hadsst.dat', 'AMO2': 'http://climexp.knmi.nl/data/iamo_hadsst_ts.dat', 'PDO1': 'http://climexp.knmi.nl/data/ipdo.dat', 'PDO2': 'http://climexp.knmi.nl/data/ipdo_hadsst3.dat', 'SOI': 'http://climexp.knmi.nl/data/isoi.dat', } url_string = name_to_url[name_of_index] try: fp2 = urllib.request.urlopen(url_string) data_extracted = fp2.readlines() except: pass data_asarray = [] for row in range(len(data_extracted)): try: dline = np.array(data_extracted[row].split()).astype(float) except: dline = [] if (len(dline) > 0): data_asarray.append(np.array(data_extracted[row].split()).astype(float)) data = np.array(data_asarray); dates = np.array([]) data_years = data[:, 0].astype(int); if (data.shape[1] > 3): data_tser = data[:, 1:13].ravel() for y in data_years: for m in range(1, 13): dates = np.append(dates, datetime.date(y, m, 1)) if (data.shape[1] <= 3): data_tser = data[:, 2] for row in data: dates = np.append(dates, datetime.date(int(row[0]), int(row[1]), 1)) data_tser[data_tser < -990] = np.nan if (name_of_index == 'Volc'): data_tser[data_tser == 0] = np.nan data_tser = np.sqrt(data_tser) data_tser[np.isinf(data_tser)] = np.nan data_tser = inpaint_nans(data_tser) date_range = [1800, 2150]; idxs = np.zeros(dates.shape, bool) for i, date in enumerate(dates): if ((date.year >= date_range[0]) & (date.year <= date_range[1])): idxs[i] = True ds = xr.Dataset(data_vars={name_of_index: ('time', data_tser[idxs])}, coords={'time': dates[idxs].astype(np.datetime64)}) return ds.resample(time='1M').mean()
8ea42dbca11e587267ef8e2c13ee1787be9db430
32,478
def generate_xdataEMX(parm): """ Generate the x data from the parameters dictionary Parameters: parm: [dict] parameters Returns: xdata = nd.array[XNbPoints] """ # Extracts the x axis data from the parameter file try: xpoints = parm['SSX'] except KeyError: xpoints = parm['ANZ'] try: xwid = parm['GSI'] xstart = parm['GST'] except KeyError: xwid = parm['XXWI'] xstart = parm['XXLB'] xdata = np.linspace(xstart, xstart+xwid, int(xpoints)) return xdata
a65b48e51f5013fe82d0b9baafe70330b15f0477
32,479
import csv def csv_2d_cartesian(filename, polar=False, scan=False): """extract 2d cartesian coordinates from a file""" x_values = [] y_values = [] with open(filename) as data_file: odom_data = csv.reader(data_file) for row in odom_data: # if scan: # row[1] = pi/2 + float(row[1]) if polar: x = float(row[0]) * cos(float(row[1])) y = float(row[0]) * sin(float(row[1])) else: x = float(row[0]) y = float(row[1]) x_values.append(x) y_values.append(y) return x_values, y_values
648a8f9bbee8b0b61284bf5a8b93c729bb085c9d
32,480
def get_edge_similarity(node_pos,neighbor_positions): """ useful for finding approximate colinear neighbors. """ displacements = get_displacement_to_neighbors(node_pos,neighbor_positions) n_neighbors = neighbor_positions.shape[0] # Quick and dirty, can reduce computation by factor 2. similarity = [] for d1 in displacements: for d2 in displacements: similarity+=[np.sum(d1*d2)/(np.linalg.norm(d1)*np.linalg.norm(d2))] similarity = np.array(similarity).reshape(n_neighbors,n_neighbors) return similarity
b1b64384d84ffbdd6a042b1e2c3a8a9a2212e61e
32,481
def high_pass_filter(x_vals, y_vals, cutoff, inspectPlots=True): """ Replicate origin directy http://www.originlab.com/doc/Origin-Help/Smooth-Algorithm "rotate" the data set so it ends at 0, enforcing a periodicity in the data. Otherwise oscillatory artifacts result at the ends This uses a 50th order Butterworth filter. """ x_vals, y_vals = fourier_prep(x_vals, y_vals) if inspectPlots: plt.figure("Real Space") plt.plot(x_vals, y_vals, label="Non-nan Data") zeroPadding = len(x_vals) # This needs to be this way because truncation is bad and actually # zero padding print("zero padding", zeroPadding) N = len(x_vals) onePerc = int(0.01 * N) x1 = np.mean(x_vals[:onePerc]) x2 = np.mean(x_vals[-onePerc:]) y1 = np.mean(y_vals[:onePerc]) y2 = np.mean(y_vals[-onePerc:]) m = (y1 - y2) / (x1 - x2) b = y1 - m * x1 flattenLine = m * x_vals + b y_vals -= flattenLine if inspectPlots: plt.figure("Real Space") plt.plot(x_vals, y_vals, label="Rotated Data") # even_data = np.column_stack((x_vals, y_vals)) # Perform the FFT and find the appropriate frequency spacing x_fourier = fft.fftfreq(zeroPadding, x_vals[1] - x_vals[0]) y_fourier = fft.fft(y_vals) # , n=zeroPadding) if inspectPlots: plt.figure("Frequency Space") plt.semilogy(x_fourier, np.abs(y_fourier), label="Raw FFT") # Define where to remove the data band_start = cutoff band_end = int(max(abs(x_fourier))) + 1 print(abs(y_fourier[-10:])) butterworth = 1 - np.sqrt(1 / (1 + (x_fourier / cutoff) ** 50)) y_fourier *= butterworth if inspectPlots: plt.plot(x_fourier, np.abs(y_fourier), label="FFT with removed parts") a = plt.legend() a.draggable(True) print("y_fourier", len(y_fourier)) # invert the FFT y_vals = fft.ifft(y_fourier, n=zeroPadding) # using fft, not rfft, so data may have some # complex parts. But we can assume they'll be negligible and # remove them # ( Safer to use np.real, not np.abs? ) # Need the [:len] to remove zero-padded stuff y_vals = y_vals[:len(x_vals)] # unshift the data y_vals += flattenLine y_vals = np.abs(y_vals) if inspectPlots: plt.figure("Real Space") print(x_vals.size, y_vals.size) plt.plot(x_vals, y_vals, label="Smoothed Data") a = plt.legend() a.draggable(True) return np.column_stack((x_vals, y_vals))
8a114e1868c28f1de8ee4ac445bd620cb45482ff
32,482
def hyetograph(dataframe, col="precipitation", freq="hourly", ax=None, downward=True): """Plot showing rainfall depth over time. Parameters ---------- dataframe : pandas.DataFrame Must have a datetime index. col : string, optional (default = 'precip') The name of the column in *dataframe* that contains the rainall series. freq : str, optional (default = 'hourly') The frequency to which the rainfall depth should be accumulated. ax : matplotlib.Axes object, optional The Axes on which the plot will be placed. If not provided, a new Figure and Axes will be created. downward : bool, optional (default = True) Inverts the y-axis to show the rainfall depths "falling" from the top. Returns ------- fig : matplotlib.Figure """ ylabel = "%s Rainfall Depth (in)" % freq.title() fig = _plotter( dataframe, col, ylabel, freq=freq, fillna=0, how="sum", ax=ax, downward=downward ) return fig
17deb837058ddd8ad8db9ed47c960cacfde957db
32,483
def objective(z, x): """ Objective. """ return park2_3_mf(z, x)
fb65c09f084b0af8848e78582703a1bb4e11e735
32,484
import json import re def validate_config(crawler_path): """ Validates config """ with open(crawler_path) as file: config = json.load(file) if 'total_articles_to_find_and_parse' not in config: raise IncorrectNumberOfArticlesError if 'seed_urls' not in config: raise IncorrectURLError urls = config["seed_urls"] articles = config["total_articles_to_find_and_parse"] if not urls: raise IncorrectURLError if not isinstance(articles, int) or articles <= 0: raise IncorrectNumberOfArticlesError if articles > 100: raise NumberOfArticlesOutOfRangeError for url in urls: check = re.search(DOMAIN, url) if not check: raise IncorrectURLError return urls, articles
ba46667fcc0d75be6b28d19c0f5fa2d41f9123dd
32,485
def parse_FORCE_SETS(natom=None, filename="FORCE_SETS", to_type2=False): """Parse FORCE_SETS from file. to_type2 : bool dataset of type2 is returned when True. Returns ------- dataset : dict Displacement dataset. See Phonopy.dataset. """ with open(filename, "r") as f: return _get_dataset( f, natom=natom, to_type2=to_type2, )
54b39f8b111292f53c6231facafb177109972965
32,486
import time def DeserializeFileAttributesFromObjectMetadata(obj_metadata, url_str): """Parses the POSIX attributes from the supplied metadata. Args: obj_metadata: The metadata for an object. url_str: File/object path that provides context if a warning is thrown. Returns: A POSIXAttribute object with the retrieved values or a default value for any attribute that could not be found. """ posix_attrs = POSIXAttributes() # Parse atime. found, atime = GetValueFromObjectCustomMetadata(obj_metadata, ATIME_ATTR, NA_TIME) try: atime = long(atime) if found and atime <= NA_TIME: WarnNegativeAttribute('atime', url_str) atime = NA_TIME elif atime > long(time.time()) + SECONDS_PER_DAY: WarnFutureTimestamp('atime', url_str) atime = NA_TIME except ValueError: WarnInvalidValue('atime', url_str) atime = NA_TIME posix_attrs.atime = atime # Parse gid. DeserializeIDAttribute(obj_metadata, GID_ATTR, url_str, posix_attrs) # Parse uid. DeserializeIDAttribute(obj_metadata, UID_ATTR, url_str, posix_attrs) found, mode = GetValueFromObjectCustomMetadata(obj_metadata, MODE_ATTR, NA_MODE) if found and MODE_REGEX.match(mode): try: # Parse mode into a 3-digit base-8 number. posix_attrs.mode = POSIXMode(int(mode)) except ValueError: WarnInvalidValue('mode', url_str) return posix_attrs
3fb3d3f4e45a622cc12ce1d65b6f02d59efd3f58
32,487
def hex_to_64(hexstr): """Convert a hex string to a base64 string. Keyword arguments: hexstr -- the hex string we wish to convert """ B64CHARS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/' ## internals # bits contains the bits read off so far that don't make enough for a char bits = 0 # bits_left tracks how many bits are left until a char is ready to convert bits_left = 6 # output holds the accrued base64 string thus far output = '' # Read each hex char as four bits. Every time 6 are accrued, # convert them to base64 and continue. for h in hexstr: hbits = int(h, 16) if bits_left == 6: # h's bits aren't enough. Hold 'em and keep going. bits = hbits bits_left = 2 elif bits_left == 4: # h's bits are just enough. Add 'em to the bits bin and convert. bits = (bits << 4) | hbits output += B64CHARS[bits] bits = 0 bits_left = 6 else: # h's top two bits finish a set of 6. Convert the set # and save the last two of h's bits. bits = (bits << 2) | (hbits >> 2) output += B64CHARS[bits] bits = hbits & 3 bits_left = 4 # After reading hexstr, we may need some zeroes for padding. # We should also add '=' chars for each pair of padding bits. if bits_left < 6: output += B64CHARS[bits << bits_left] output += '=' * (bits_left // 2) return output
ca8c48bedf4ac776288a8faad06ec80c0289c11e
32,488
def get_frozen_graph(graph_file): """Read Frozen Graph file from disk.""" with tf.gfile.FastGFile(graph_file, "rb") as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) return graph_def
e3a7bb3e1abf7eb09e6e11b325176eb95109e634
32,489
def wrap_functional_unit(dct): """Transform functional units for effective logging. Turns ``Activity`` objects into their keys.""" data = [] for key, amount in dct.items(): if isinstance(key, int): data.append({"id": key, "amount": amount}) else: try: data.append({"database": key[0], "code": key[1], "amount": amount}) except TypeError: data.append({"key": key, "amount": amount}) return data
9c86b5c6c4f360e86f39e2bc59ce4a34804cd7fa
32,490
import os def load_dataset_class(name): """dynamically load a class object from a dataset file. Return: dataset class object """ base_dir = os.path.join("lmnet", "datasets") dataset_class = _load_class_from_name(name, base_dir) return dataset_class
ce7aa8fe551ddc2b1b2071ea6d744f5759806dbd
32,491
def full_fuel_requirement(mass: int) -> int: """Complete fuel requirements for a single module.""" base_fuel = fuel_requirement(mass) return base_fuel + sum(additional_fuel_requirements(base_fuel))
62c9d7e11afd0805d476216bdd113081285b10c4
32,492
def is_private(name): """Check whether a Python object is private based on its name.""" return name.startswith("_")
d04dfd84884bbc8c8be179c6bc5fc1371f426a78
32,493
import warnings def get_suitable_output_file_name_for_current_output_format(output_file, output_format): """ renames the name given for the output_file if the results for current_output format are returned compressed by default and the name selected by the user does not contain the correct extension. output_file : str, optional, default None file name selected by the user output_format : str, optional, default 'votable' results format. Available formats in TAP are: 'votable', 'votable_plain', 'fits', 'csv', 'ecsv' and 'json'. Default is 'votable'. Returned results for formats 'votable' 'ecsv' and 'fits' are compressed gzip files. Returns ------- A string with the new name for the file. """ compressed_extension = ".gz" format_with_results_compressed = ['votable', 'fits', 'ecsv'] output_file_with_extension = output_file if output_file is not None: if output_format in format_with_results_compressed: # In this case we will have to take also into account the .fits format if not output_file.endswith(compressed_extension): warnings.warn('By default, results in "votable", "ecsv" and "fits" format are returned in ' f'compressed format therefore your file {output_file} ' f'will be renamed to {output_file}.gz') if output_format == 'votable': if output_file.endswith('.vot'): output_file_with_extension = output_file + '.gz' else: output_file_with_extension = output_file + '.vot.gz' elif output_format == 'fits': if output_file.endswith('.fits'): output_file_with_extension = output_file + '.gz' else: output_file_with_extension = output_file + '.fits.gz' elif output_format == 'ecsv': if output_file.endswith('.ecsv'): output_file_with_extension = output_file + '.gz' else: output_file_with_extension = output_file + '.ecsv.gz' # the output type is not compressed by default by the TAP SERVER but the users gives a .gz extension elif output_file.endswith(compressed_extension): output_file_renamed = output_file.removesuffix('.gz') warnings.warn(f'The output format selected is not compatible with compression. {output_file}' f' will be renamed to {output_file_renamed}') return output_file_with_extension
6925d721f06e52c8177c5e4f6404629043642cc4
32,494
def efficientnet_b1( num_classes: int = 1000, class_type: str = "single", dropout: float = 0.2, se_mod: bool = False, ) -> EfficientNet: """ EfficientNet B1 implementation; expected input shape is (B, 3, 240, 240) :param num_classes: the number of classes to classify :param class_type: one of [single, multi] to support multi class training; default single :param dropout: the amount of dropout to use while training :param se_mod: If true, moves squeeze-excite to the end of the block (after last 1x1) :return: The created EfficientNet B0 Module """ width_mult = 1.0 depth_mult = 1.1 sec_settings, out_channels = _create_section_settings( width_mult, depth_mult, se_mod ) return EfficientNet( sec_settings=sec_settings, out_channels=out_channels, num_classes=num_classes, class_type=class_type, dropout=dropout, )
e713946ec937d0beb2069f7a273a311cad4b3305
32,495
def mark_user_authenticated(user, login): """ Modify a User so it knows it is logged in - checked via user.is_authenticated() """ setattr(user, ACTIVATED_LOGIN_KEY, login) return user
dcd706204747c526c2128a49fcf24c7ef8e075bd
32,496
def assignments(bmat, order=1, ntry=10): """Make assignments between rows and columns. The objective is to have assigments following the following conditions: - all association are allowed in bmat, - each row is associated with a unique column, - each column is associated with a unique row, - all rows are associated. A classical use case is to assign students to defense schedule. Parameters ---------- bmat : array of bool or int Binary matrix indicating which assignments are allowed. order : int, optional Order of the greedy search. Default: 1. A higher oreder can be used for small dataset if a solution can not be found with oreder 1. ntry : int Number of random tries to use to solve the assignments problem. Returns ------- AssignmentResult Attributes are: - ``best_assignments`` contains the assigments that solve the problem or aith the higher number of associated rows. - ``not_assigned_rows`` contains the indexes of not assignated rows in the ``best_assignments`` (empty if the problem is solved) - ``problematic_rows`` contains tuples of problematics rows indexes and scores. A higher score indicating a row is problematic for the assignement problem. """ min_not_assigned_rows = np.inf best_assignments = () best_not_assigned_rows = () not_assigned_rows = [] for _ in range(ntry): cur_assignments, cur_not_assigned_rows = _assigments_one_try(bmat, order) if not cur_not_assigned_rows: return AssignmentsResult(cur_assignments, (), ()) if len(cur_not_assigned_rows) < min_not_assigned_rows: min_not_assigned_rows = len(cur_not_assigned_rows) best_assignments = cur_assignments best_not_assigned_rows = cur_not_assigned_rows not_assigned_rows.extend(cur_not_assigned_rows) pb_rows = [ (i, sum(j == i for j in not_assigned_rows) / ntry) for i in set(not_assigned_rows) ] pb_rows.sort(key=lambda x: x[1], reverse=True) return AssignmentsResult(best_assignments, best_not_assigned_rows, tuple(pb_rows))
5d183df6b538b13f53cf4a9ec18527525b5d0383
32,497
from operator import inv def _tracemin_fiedler(L, X, normalized, tol, method): """Compute the Fiedler vector of L using the TraceMIN-Fiedler algorithm. """ n = X.shape[0] if normalized: # Form the normalized Laplacian matrix and determine the eigenvector of # its nullspace. e = sqrt(L.diagonal()) D = spdiags(1. / e, [0], n, n, format='csr') L = D * L * D e *= 1. / norm(e, 2) if not normalized: def project(X): """Make X orthogonal to the nullspace of L. """ X = asarray(X) for j in range(X.shape[1]): X[:, j] -= X[:, j].sum() / n else: def project(X): """Make X orthogonal to the nullspace of L. """ X = asarray(X) for j in range(X.shape[1]): X[:, j] -= dot(X[:, j], e) * e if method is None: method = 'pcg' if method == 'pcg': # See comments below for the semantics of P and D. def P(x): x -= asarray(x * X * X.T)[0, :] if not normalized: x -= x.sum() / n else: x = daxpy(e, x, a=-ddot(x, e)) return x solver = _PCGSolver(lambda x: P(L * P(x)), lambda x: D * x) elif method == 'chol' or method == 'lu': # Convert A to CSC to suppress SparseEfficiencyWarning. A = csc_matrix(L, dtype=float, copy=True) # Force A to be nonsingular. Since A is the Laplacian matrix of a # connected graph, its rank deficiency is one, and thus one diagonal # element needs to modified. Changing to infinity forces a zero in the # corresponding element in the solution. i = (A.indptr[1:] - A.indptr[:-1]).argmax() A[i, i] = float('inf') solver = (_CholeskySolver if method == 'chol' else _LUSolver)(A) else: raise nx.NetworkXError('unknown linear system solver.') # Initialize. Lnorm = abs(L).sum(axis=1).flatten().max() project(X) W = asmatrix(ndarray(X.shape, order='F')) while True: # Orthonormalize X. X = qr(X)[0] # Compute interation matrix H. W[:, :] = L * X H = X.T * W sigma, Y = eigh(H, overwrite_a=True) # Compute the Ritz vectors. X *= Y # Test for convergence exploiting the fact that L * X == W * Y. res = dasum(W * asmatrix(Y)[:, 0] - sigma[0] * X[:, 0]) / Lnorm if res < tol: break # Depending on the linear solver to be used, two mathematically # equivalent formulations are used. if method == 'pcg': # Compute X = X - (P * L * P) \ (P * L * X) where # P = I - [e X] * [e X]' is a projection onto the orthogonal # complement of [e X]. W *= Y # L * X == W * Y W -= (W.T * X * X.T).T project(W) # Compute the diagonal of P * L * P as a Jacobi preconditioner. D = L.diagonal().astype(float) D += 2. * (asarray(X) * asarray(W)).sum(axis=1) D += (asarray(X) * asarray(X * (W.T * X))).sum(axis=1) D[D < tol * Lnorm] = 1. D = 1. / D # Since TraceMIN is globally convergent, the relative residual can # be loose. X -= solver.solve(W, 0.1) else: # Compute X = L \ X / (X' * (L \ X)). L \ X can have an arbitrary # projection on the nullspace of L, which will be eliminated. W[:, :] = solver.solve(X) project(W) X = (inv(W.T * X) * W.T).T # Preserves Fortran storage order. return sigma, asarray(X)
9b14aa1a8973134846bcf31dabe8bb27d70d36fd
32,498
def default_char_class_join_with(): """ default join for char_class and combine types """ return ''
9d8f15413f56202472f2e1257382babbaa444edc
32,499