content
stringlengths
22
815k
id
int64
0
4.91M
def compute_wolfe_gap(point_x, objective_function, feasible_region): """Compute the Wolfe gap given a point.""" grad = objective_function.evaluate_grad(point_x.cartesian_coordinates) v = feasible_region.lp_oracle(grad) wolfe_gap = grad.dot(point_x.cartesian_coordinates - v) return wolfe_gap
5,339,700
def replace_chunk(filename, offset, length, chunk, in_place=True, max_mem=5): """Replace length bytes of data with chunk, starting at offset. Any KeyboardInterrupts arriving while replace_chunk is runnning are deferred until the operation is complete. If in_place is true, the operation works directly on the original file; this is fast and works on files that are already open, but an error or interrupt may lead to corrupt file contents. If in_place is false, the function prepares a copy first, then renames it back over the original file. This method is slower, but it prevents corruption on systems with atomic renames (UNIX), and reduces the window of vulnerability elsewhere (Windows). If there is no need to move data that is not being replaced, then we use the direct method irrespective of in_place. (In this case an interrupt may only corrupt the chunk being replaced.) """ with suppress_interrupt(): _replace_chunk(filename, offset, length, chunk, in_place, max_mem)
5,339,701
async def get(ip, community, oid, port=161, timeout=DEFAULT_TIMEOUT): # type: (str, str, str, int, int) -> PyType """ Delegates to :py:func:`~puresnmp.aio.api.raw.get` but returns simple Python types. See the "raw" equivalent for detailed documentation & examples. """ raw_value = await raw.get(ip, community, oid, port, timeout=timeout) return raw_value.pythonize()
5,339,702
def preprocess_image(image, params): """Preprocess image tensor. Args: image: tensor, input image with shape [cur_batch_size, height, width, depth]. params: dict, user passed parameters. Returns: Preprocessed image tensor with shape [cur_batch_size, height, width, depth]. """ func_name = "preprocess_image" # Convert from [0, 255] -> [-1.0, 1.0] floats. image = tf.cast(x=image, dtype=tf.float32) * (2. / 255) - 1.0 print_obj(func_name, "image", image) return image
5,339,703
def get_vlim(xarr: xr.DataArray, alpha: float) -> dict: """Get vmin, vmax using mean and std.""" mean = xarr.mean() std = xarr.std() return {"vmin": max(0., mean - alpha * std), "vmax": mean + alpha * std}
5,339,704
def count_consumed_symbols(e): """Count how many symbols are consumed from each sequence by a single sequence diff entry.""" op = e.op if op == DiffOp.ADDRANGE: return (0, len(e.valuelist)) elif op == DiffOp.REMOVERANGE: return (e.length, 0) elif op == DiffOp.PATCH: return (1, 1) else: raise NBDiffFormatError("Invalid op '{}'".format(op))
5,339,705
def maha_dist_sq(cols, center, cov): """Calculate squared Mahalanobis distance of all observations (rows in the vectors contained in the list cols) from the center vector with respect to the covariance matrix cov""" n = len(cols[0]) p = len(cols) assert len(center) == p # observation matrix obs = flex.double(flex.grid(n, p)) for i, col in enumerate(cols): obs.matrix_paste_column_in_place(col, i) d2 = maha_dist_sq_cpp(obs, flex.double(center), cov) return d2
5,339,706
def get_form_target(): """ Returns the target URL for the comment form submission view. """ if get_comment_app_name() != DEFAULT_COMMENTS_APP and hasattr(get_comment_app(), "get_form_target"): return get_comment_app().get_form_target() else: return urlresolvers.reverse("comments.views.comments.post_comment")
5,339,707
def secret_page(username=None, password=None): """ Returns the HTML for the page visited after the user has logged-in. """ if username is None or password is None: raise ValueError("You need to pass both username and password!") return _wrapper(""" <h1> Welcome, {username}! </h1> <p> <small> Pst! I know your password is <span class="spoilers"> {password}</span>. </small> </p> """.format(username=escape(username.capitalize()), password=escape(password))) # ==== Edit username and pw ion secret.py ====
5,339,708
def user(username): """ displays a single user """ all_badgers = loads(r_server.get('all_badgers')) this_badger = all_badgers[username] this_badger_sorted = collections.OrderedDict(sorted(this_badger.items(), reverse=True)) days = days_in_a_row(this_badger) kwargs = {'badgers': { username: this_badger_sorted }, 'days': days } return render_template('index.html', **kwargs)
5,339,709
def ifft(a, axis): """ Fourier transformation from grid to image space, along a given axis. (inverse Fourier transform) :param a: numpy array, 1D or 2D (`uv` grid to transform) :param axis: int; axes over which to calculate :return: numpy array (an image in `lm` coordinate space) """ return numpy.fft.fftshift( numpy.fft.ifft(numpy.fft.ifftshift(a, axis), axis=axis), axis )
5,339,710
def transitions_and_masks_to_proposals(t1, t2, m1, m2, max_samples=10, max_ccs=6): """ assumes set-based s and a... so shape should be (n_components, *component_shape) Takes two transitions with their masks, and combines them using connected-component relabeling to form proposals Returns a list of tuples of ((s1, a1, s2) proposal, disconnected_component_idxs). """ sa1, s21 = t1 sa2, s22 = t2 # get_dcs_from_mask should return a set of tuples of indices, inc. the empty tuple # where the subgraph represented by each tuple is disconnected from the result of # the graph. Note that mask should be square, so columns corresp. to action idxs are # dummy columns. # # E.g., if mask is [[1,0,0,0],[0,1,0,0],[0,0,1,1],[0,0,1,1]], # this function should return: # set([ (,), (0,), (1,), (0,1), (2, 3), (0, 2, 3), (1, 2, 3), (0, 1, 2, 3) ]) dc1 = get_dcs_from_mask(m1, max_ccs) dc2 = get_dcs_from_mask(m2, max_ccs) # get shared connected components in random order shared_dc = list(dc1.intersection(dc2)) random.shuffle(shared_dc) # subsample shared_dc down to max_samples if len(shared_dc) > max_samples: shared_dc = shared_dc[:max_samples] all_idxs = set(range(len(sa1))) res = [] for dc in shared_dc: not_dc = list(all_idxs - set(dc)) dc = list(dc) # (0, 2) proposed_sa = np.zeros_like(sa1) proposed_s2 = np.zeros_like(sa1) proposed_sa[dc] = sa1[dc] proposed_sa[not_dc] = sa2[not_dc] proposed_s2[dc] = s21[dc] proposed_s2[not_dc] = s22[not_dc] proposed_t = (proposed_sa, proposed_s2) res.append((proposed_t, tuple(dc))) return res
5,339,711
def make_manifest(root: AnyPath) -> FileManifest: """ Returns the file manifest for the given directory. """ manifest = {} for (dirpath, dirnames, filenames) in os.walk(root): dirnames[:] = sorted(dirnames) for filename in sorted(filenames): path = Path(dirpath) / filename logging.info(f"PATH {path}") st_info = path.lstat() rel_path = os.fspath(path.relative_to(root)) manifest[rel_path] = FileMetadata( size=st_info.st_size, digest=make_digest(path, st_info.st_mode), ) return manifest
5,339,712
def main(): """Drives the main script behavior.""" script_dir = os.path.dirname(os.path.realpath(__file__)) for filename in os.listdir(script_dir): basename, extension = os.path.splitext(filename) if basename.startswith("Test") and extension == '.py': source_path = os.path.join(script_dir, filename) dest_path = source_path + ".park" sys.stdout.write("renaming {} to {}\n".format( source_path, dest_path)) os.rename(source_path, dest_path)
5,339,713
def exists(url): """Check based on protocol if url exists.""" parsed_url = urlparse(url) if parsed_url.scheme == "": raise RuntimeError("Invalid url: %s" % url) if parsed_url.scheme in ('http', 'https'): r = requests.head(url, verify=False) if r.status_code == 200: return True elif r.status_code == 404: return False else: r.raise_for_status() elif parsed_url.scheme in ('s3', 's3s'): s3_eps = boto.regioninfo.load_regions()['s3'] region = None for r, e in list(s3_eps.items()): if re.search(e, parsed_url.netloc): region = r break if region is None: raise RuntimeError("Failed to find region for endpoint %s." % parsed_url.netloc) conn = boto.s3.connect_to_region(region, aws_access_key_id=parsed_url.username, aws_secret_access_key=parsed_url.password) match = re.search(r'/(.*?)/(.*)$', parsed_url.path) if not match: raise RuntimeError("Failed to parse bucket & key from %s." % parsed_url.path) bn, kn = match.groups() try: bucket = conn.get_bucket(bn) except boto.exception.S3ResponseError as e: if e.status == 404: return False else: raise key = bucket.get_key(kn) if key is None: return False else: return True else: raise NotImplementedError("Failed to check existence of %s url." % parsed_url.scheme)
5,339,714
def inverse_chirality_symbol(symbol): """ Inverses a chirality symbol, e.g., the 'R' character to 'S', or 'NS' to 'NR'. Note that chiral double bonds ('E' and 'Z') must not be inversed (they are not mirror images of each other). Args: symbol (str): The chirality symbol. Returns: str: The inverse chirality symbol. Raises: InputError: If ``symbol`` could not be recognized. """ inversion_dict = {'R': 'S', 'S': 'R', 'NR': 'NS', 'NS': 'NR', 'E': 'E', 'Z': 'Z'} if symbol not in list(inversion_dict.keys()): raise InputError(f"Recognized chirality symbols are 'R', 'S', 'NR', 'NS', 'E', and 'Z', got {symbol}.") return inversion_dict[symbol]
5,339,715
def simplify_columns(df): """ Simplify column labels for use as snake_case database fields. All columns will be re-labeled by: * Replacing all non-alphanumeric characters with spaces. * Forcing all letters to be lower case. * Compacting internal whitespace to a single " ". * Stripping leading and trailing whitespace. * Replacing all remaining whitespace with underscores. Args: df (pandas.DataFrame): The DataFrame to clean. Returns: pandas.DataFrame: The cleaned DataFrame. Todo: Update docstring. """ df.columns = ( df.columns. str.replace(r'[^0-9a-zA-Z]+', ' ', regex=True). str.strip(). str.lower(). str.replace(r'\s+', ' ', regex=True). str.replace(' ', '_') ) return df
5,339,716
def delete( request: HttpRequest, wid: Optional[int] = None, workflow: Optional[Workflow] = None, ) -> JsonResponse: """Delete a workflow.""" if request.method == 'POST': # Log the event Log.objects.register( request.user, Log.WORKFLOW_DELETE, None, { 'id': workflow.id, 'name': workflow.name}) # Nuke the logs pointing to the workflow for litem in workflow.logs.all(): litem.workflow = None litem.save() # Perform the delete operation workflow.delete() # In this case, the form is valid anyway return JsonResponse({'html_redirect': reverse('home')}) return JsonResponse({ 'html_form': render_to_string( 'workflow/includes/partial_workflow_delete.html', {'workflow': workflow}, request=request), })
5,339,717
def at_threshold(FPR, TPR, parameter, threshold): """ False positive rate (FPR) and True positive rate (TPR) at the selected threshold. :param FPR: False positive rates of given receiver operating characteristic (ROC) curve :param TPR: True positive rate of given receiver operating characteristic (ROC) curve :param parameter: possible thresholds :param threshold: selected threshold """ index = np.argmin(np.abs(parameter - threshold)) FPR_at_threshold = FPR[index] TPR_at_threshold = TPR[index] return FPR_at_threshold, TPR_at_threshold
5,339,718
def cancel_and_close(driver, target_id): """ Clicks the cancel button in the current window, confirm the alert dialog, waits until it is closed and then changes to the window with the given target id is closed. """ time.sleep(10) current_id = driver.current_window_handle driver.find_element_by_name("_cancel").click() alert = driver.switch_to_alert() alert.accept() # TODO remove this workaround when Selenium starts working again as intended time.sleep(1) wait_till_closed_and_switch(driver, current_id, target_id)
5,339,719
def _check_kl_estimator(estimator_fn, distribution_fn, num_samples=10000, rtol=1e-1, atol=1e-3, grad_rtol=2e-1, grad_atol=1e-1): """Compares the estimator_fn output and gradient to exact KL.""" rng_key = jax.random.PRNGKey(0) def expected_kl(params): distribution_a = distribution_fn(**params[0]) distribution_b = distribution_fn(**params[1]) return distribution_a.kl_divergence(distribution_b) def estimate_kl(params): distribution_a = distribution_fn(**params[0]) distribution_b = distribution_fn(**params[1]) return estimator_fn(distribution_a, distribution_b, rng_key=rng_key, num_samples=num_samples) params = ( dict(loc=0.0, scale=1.0), dict(loc=0.1, scale=1.0), ) expected_value, expected_grad = jax.value_and_grad(expected_kl)(params) value, grad = jax.value_and_grad(estimate_kl)(params) np.testing.assert_allclose(expected_value, value, rtol=rtol, atol=atol) chex.assert_tree_all_close(expected_grad, grad, rtol=grad_rtol, atol=grad_atol)
5,339,720
def relative_bias(simu, reco, relative_scaling_method='s1'): """ Compute the relative bias of a reconstructed variable as `median(reco-simu)/relative_scaling(simu, reco)` Parameters ---------- simu: `numpy.ndarray` reco: `numpy.ndarray` relative_scaling_method: str see `ctaplot.ana.relative_scaling` Returns ------- """ assert len(reco) == len(simu) if len(simu) == 0: return 0 return np.median((reco - simu) / relative_scaling(simu, reco, method=relative_scaling_method))
5,339,721
def addon_config() -> Dict[str, Any]: """Sample addon config.""" return { "package-name": "djangocms-blog", "installed-apps": [ "filer", "easy_thumbnails", "aldryn_apphooks_config", "parler", "taggit", "taggit_autosuggest", "meta", "djangocms_blog", "sortedm2m", ], "settings": { "META_SITE_PROTOCOL": "https", "META_USE_SITES": True, "MIDDLEWARE": ["django.middleware.gzip.GZipMiddleware"], }, "urls": [["", "djangocms_blog.taggit_urls"]], "message": "Please check documentation to complete the setup", }
5,339,722
def print_layout24(layout): """ Print layout. """ print(' {0} {1}'.format(' '.join(layout[0:4]), ' '.join(layout[12:16]))) print(' {0} {1}'.format(' '.join(layout[4:8]), ' '.join(layout[16:20]))) print(' {0} {1}'.format(' '.join(layout[8:12]), ' '.join(layout[20:24])))
5,339,723
def join_mutations_regions( out_path: str, sample1_id: int, sample2_id: int, mutations_file: File, regions_file: File ) -> File: """ Join mutations and regions together to compute an allele frequence. """ def iter_mut_points(muts): for pos, count in muts: yield pos, "mut", count def iter_region_points(regions): for start, end, depth in regions: yield start - 0.5, "region", depth def iter_allele_freqs(points): denom = 0 for pos, kind, count in points: if kind == "region": denom = count elif kind == "mut": yield pos, count, denom, count / denom points1 = iter_mut_points(read_mutations(mutations_file)) points2 = iter_region_points(read_regions(regions_file)) points = iter_merge(points1, points2) allele_freqs = iter_allele_freqs(points) allele_freqs_path = f"{out_path}/allele_freqs/{sample1_id}_{sample2_id}.allele_freqs" return write_allele_freqs(allele_freqs_path, allele_freqs)
5,339,724
def credibility_interval(post, alpha=1.): """Calculate bayesian credibility interval. Parameters: ----------- post : array_like The posterior sample over which to calculate the bayesian credibility interval. alpha : float, optional Confidence level. Returns: -------- med : float Median of the posterior. low : float Lower part of the credibility interval. up : float Upper part of the credibility interval. """ z = erf(alpha/sp.sqrt(2)) lower_percentile = 100 * (1 - z) / 2 upper_percentile = 100 * (1 + z) / 2 low, med, up = sp.percentile( post, [lower_percentile, 50, upper_percentile] ) return med, low, up
5,339,725
def test_packed_object_reader(): """Test the functionality of the PackedObjectReader.""" bytestream = b"0123456789abcdef" with tempfile.NamedTemporaryFile(mode="wb", delete=False) as tempfhandle: tempfhandle.write(bytestream) offset = 3 length = 5 expected_bytestream = bytestream[offset : offset + length] # Read 1 byte at a time with open(tempfhandle.name, "rb") as fhandle: packed_reader = utils.PackedObjectReader(fhandle, offset=offset, length=length) data = [] while True: piece = packed_reader.read(1) if not piece: break data.append(piece) assert b"".join(data) == expected_bytestream # Read 2 bytes at a time (note that the length is odd, so it's not a multiple) with open(tempfhandle.name, "rb") as fhandle: packed_reader = utils.PackedObjectReader(fhandle, offset=offset, length=length) data = [] while True: piece = packed_reader.read(2) if not piece: break data.append(piece) assert b"".join(data) == expected_bytestream # Offset beyond the file limit with open(tempfhandle.name, "rb") as fhandle: packed_reader = utils.PackedObjectReader( fhandle, offset=len(bytestream) + 10, length=length ) assert packed_reader.read() == b"" # Offset before the file limit, but longer length with open(tempfhandle.name, "rb") as fhandle: packed_reader = utils.PackedObjectReader(fhandle, offset=offset, length=1000000) assert packed_reader.read() == bytestream[offset:]
5,339,726
def test_create_instance_of_animal_shelter(empty_q): """ can we create a Queue instance with no input values """ assert isinstance(empty_q, AnimalShelter)
5,339,727
def test_space_after(style_checker): """style_checker on python file with invalid No_Style_Check comment. The file has a No_Style_Check comment, except that that the comment is not starting at the end of the line. So style_checker ignores it, and lets pep8 report the errors in that file. """ p = style_checker.run_style_checker('/trunk/module', 'src/space_after.py') style_checker.assertNotEqual(p.status, 0, p.image) style_checker.assertRunOutputEqual(p, """\ src/space_after.py:1:17: W291 trailing whitespace src/space_after.py:7:1: F821 undefined name 'space_before_paren' src/space_after.py:7:19: E211 whitespace before '(' """)
5,339,728
def test_wild_g001_wild_g001_v(mode, save_output, output_format): """ TEST :Syntax Validation - any : ANY (w/ namespace=##any) and instance document has elements from targetNamespace """ assert_bindings( schema="msData/wildcards/wildG001.xsd", instance="msData/wildcards/wildG001.xml", class_name="Foo", version="1.1", mode=mode, save_output=save_output, output_format=output_format, structure_style="filenames", )
5,339,729
def svn_ra_invoke_replay_revstart_callback(*args): """ svn_ra_invoke_replay_revstart_callback(svn_ra_replay_revstart_callback_t _obj, svn_revnum_t revision, void replay_baton, svn_delta_editor_t editor, void edit_baton, apr_hash_t rev_props, apr_pool_t pool) -> svn_error_t """ return apply(_ra.svn_ra_invoke_replay_revstart_callback, args)
5,339,730
def tree(ctx, rootpage): """Export metadata of a page tree.""" if not rootpage: click.serror("No root page selected via --entity!") return 1 outname = getattr(ctx.obj.outfile, 'name', None) with api.context() as cf: results = [] try: #page = content.ConfluencePage(cf, rootpage, expand='metadata.labels,metadata.properties') #results.append(page.json) pagetree = cf.walk(rootpage, depth_1st=True, expand='metadata.labels,metadata.properties,version') for depth, data in pagetree: data.update(dict(depth=depth)) results.append(data) except api.ERRORS as cause: # Just log and otherwise ignore any errors api.diagnostics(cause) else: ctx.obj.log.info('Got {} results.'.format(len(results))) if results: print_result(ctx, results)
5,339,731
def to_half_life(days): """ Return the constant [1/s] from the half life length [day] """ s= days * 3600*24 return -math.log(1/2)/s
5,339,732
def Send (dst_ip, data, sequence=0, spoof_source=False, dst_port=MDNS_PORT, src_port=MDNS_PORT, dns_name=TEST_QUERY): """ Send one packet of MDNS with data. :param dst_ip: IP as string. :param data: Data as bytes/string. :param sequence: Number to use for sequence. Int. :param spoof_source: Default:False. Set as IP for spoofing. :param dst_port: .... :param src_port: ... :param dns_name: DNS name to put in the MDNS request. :return: semper vera!!! """ payload = "" payload += "\x00" # TransID is 2 bytes. Using one for sequence. payload += struct.pack('B', sequence) payload += "\x00\x00" # Stndrt qry payload += "\x00\x01" # 1 questions payload += "\x00\x00" # 0 ans RRs payload += "\x00\x00" # 0 authority RRs payload += "\x00\x00" # 0 additional RRs # Start of query: payload += struct.pack('B', len(dns_name)) # Length? -> YES it is! payload += dns_name # name payload += "\x00" # Query Terminator payload += "\x00\x0c" # PTR request payload += "\x00\x01" # class IN if spoof_source is False: pkt = IP( dst = dst_ip # src = "1.1.1.1" ) / UDP( sport = src_port, dport = dst_port ) / Raw( load = payload ) else: pkt = IP( dst = dst_ip, src = spoof_source ) / UDP( sport = src_port, dport = dst_port ) / Raw( load = data ) send(pkt) return True
5,339,733
def accreds_logs_list(request): """Display the list of accreds""" from units.models import Unit main_unit = Unit.objects.get(pk=settings.ROOT_UNIT_PK) main_unit.set_rights_can_select(lambda unit: Accreditation.static_rights_can('LIST', request.user, unit)) main_unit.set_rights_can_edit(lambda unit: Accreditation.static_rights_can('CREATE', request.user, unit)) main_unit.check_if_can_use_hidden(request.user) if request.GET.get('upk'): update_current_unit(request, request.GET.get('upk')) return render(request, 'units/accreds/logs_list.html', {'main_unit': main_unit})
5,339,734
def write_pycode(CGpath,codes): """ param1: string : path of file to generate with either .py or .ipynb extension. param2: string : Actual code strings to be written into the file. The function writes provide code string into the the .py file. """ with open(CGpath,'w') as f: f.write(codes)
5,339,735
def build_dict(file_name, max_vocab_size): """ reads a list of sentences from a file and returns - a dictionary which maps the most frequent words to indices and - a table which maps indices to the most frequent words """ word_freq = Counter() with open(file_name) as file: for line in file: word_freq.update(line.split()) if max_vocab_size <= 0: max_vocab_size = len(word_freq) words, _ = zip(*word_freq.most_common(max_vocab_size)) # ID of pad_string must be 0 words = [pad_string, unk_string] + list(words) word2ID = {w:i for i,w in enumerate(words)} return word2ID, words
5,339,736
def from_net(func): """ 为进行相似度数据收集的函数装饰,作用是忽略env中的数据获取模式,改变数据获取模式, 只使用网络数据模式进行数据收集,完成整个任务后,再恢复之前的数据获取模式 :param func: 进行相似度应用且有数据收集行为的函数 """ @functools.wraps(func) def wrapper(*args, **kwargs): # 临时保存env设置中的g_data_fetch_mode fetch_mode = ABuEnv.g_data_fetch_mode # 设置数据获取模式为强制网络模式 ABuEnv.g_data_fetch_mode = ABuEnv.EMarketDataFetchMode.E_DATA_FETCH_FORCE_NET if fetch_mode != ABuEnv.EMarketDataFetchMode.E_DATA_FETCH_FORCE_NET: # 如果原有设置不是强制网络模式,warning提示 logging.warning('data from net!!!') result = func(*args, **kwargs) # 恢复之前的g_data_fetch_mode ABuEnv.g_data_fetch_mode = fetch_mode return result return wrapper
5,339,737
def create_index(connection, table_name, index): """Create index. Args: connection: pyodbc.connect() object, Connection to use when running Sql table_name: string, Table name including db schema (ex: my_schema.my_table) index: string, Column name of index (can put multiple columns comma delimited if desired) Returns: cursor object, Results of the call to pyodb.connection().cursor().execute(query) """ cursor = connection.cursor() table_split = table_name.split('.') table = table_split[-1] if len(table_split) > 1: use_db = "USE {0}; ".format(table_split[0]) run_sql(connection, use_db) if index is not None: idx_name = table + '_idx' sql = "SELECT name FROM sys.indexes where name = '{0}' and object_id = OBJECT_ID('{1}')".format(idx_name, table) log.debug("SQL to run: " + sql) try: exists = sql_get_query_data(connection, sql) val = exists.fetchone()[0] if val != idx_name: ddl2 = 'CREATE INDEX {0} ON {1}({2});'.format(idx_name, table_name, index) try: cursor.execute(ddl2.encode('utf-8')) connection.commit() except UnicodeDecodeError: cursor.execute(ddl2) connection.commit() except TypeError: log.info("Index does not exist, will attempt to create it") ddl2 = 'CREATE INDEX {0} ON {1}({2});'.format(idx_name, table_name, index) try: cursor.execute(ddl2.encode('utf-8')) connection.commit() except UnicodeDecodeError: cursor.execute(ddl2) connection.commit() return cursor
5,339,738
def profile(c): """Create an interactive CPU flame graph.""" _, venv_bin, _ = get_venv(VENV) pyinstrument = venv_bin / 'pyinstrument' c.run(f'{pyinstrument.resolve()} --renderer html {(venv_bin /project_name ).resolve()} -v --format count --pages 5', pty=True)
5,339,739
def embedding_lookup(ids, params): """ Returns the embeddings lookups. The difference of this function to TensorFlow's function is that this function expects the ids as the first argument and the parameters as the second; while, in TensorFlow, is the other way around. :param ids: the ids :type ids: tf.Tensor :param params: the parameters :type params: tf.Tensor :return: the lookup :rtype: tf.Tensor """ return tf.nn.embedding_lookup(params, ids)
5,339,740
def mock_pcluster_config(mocker, scheduler=None, extra_patches=None, patch_funcs=None): """Mock various components used to instantiate an instance of PclusterConfig.""" mock_patches = get_mock_pcluster_config_patches(scheduler, extra_patches) for function, return_value in mock_patches.items(): mocker.patch(function, return_value=return_value) mocker.patch.object(PclusterConfig, "_PclusterConfig__test_configuration")
5,339,741
def in_auto_mode(conx: Connection) -> bool: """Determine whether the controller is in AUTO or one of the MANUAL modes. Wraps the Karel IN_AUTO_MODE routine. NOTE: this method is moderately expensive, as it executes a Karel program on the controller. :returns: True if the controller is in AUTO mode :rtype: bool """ ret = exec_karel_prg(conx, prg_name='dmh_autom') if not ret[JSON_SUCCESS]: raise DominhException("Select_TPE error: " + ret[JSON_REASON]) return ret['in_auto_mode']
5,339,742
def pref(pref_name, default=None): """Return a preference value. Since this uses CFPreferencesCopyAppValue, Preferences can be defined several places. Precedence is: - MCX - /var/root/Library/Preferences/com.github.salopensource.sal.plist - /Library/Preferences/com.github.salopensource.sal.plist - default_prefs defined here. """ default_prefs = { 'ServerURL': 'http://sal', 'osquery_launchd': 'com.facebook.osqueryd.plist', 'SkipFacts': [], 'SyncScripts': True, 'BasicAuth': True, 'GetGrains': False, 'GetOhai': False, 'LastRunWasOffline': False, 'SendOfflineReport': False, } pref_value = CFPreferencesCopyAppValue(pref_name, BUNDLE_ID) if pref_value is None and default: pref_value = default elif pref_value is None and pref_name in default_prefs: pref_value = default_prefs.get(pref_name) # we're using a default value. We'll write it out to # /Library/Preferences/<BUNDLE_ID>.plist for admin # discoverability set_pref(pref_name, pref_value) if isinstance(pref_value, NSDate): # convert NSDate/CFDates to strings pref_value = str(pref_value) return pref_value
5,339,743
def tRange(tStart, tStop, *, timedelta=300): """ Generate datetime list between tStart and tStop with fixed timedelta. Parameters ---------- tStart: datetime start time. tStop: datetime stop time. Keywords -------- timedelta: int time delta in seconds (default: 300). Returns ------- tList: list datetime between tStart and tStop with fixed timedelta. Examples -------- >>> import datetime as dt >>> tList = tRange(dt.datetime(2011, 1, 1), dt.datetime(2011, 1, 2), ... >>> timedelta=3600 * 12) >>> tList [datetime.datetime(2011, 1, 1, 0, 0), datetime.datetime(2011, 1, 1, 12, 0), datetime.datetime(2011, 1, 2, 0, 0)] History ------- 2020-02-25 First version. """ nTimedelta = int((tStop - tStart) / dt.timedelta(seconds=timedelta)) + 1 tList = [tStart + dt.timedelta(seconds=timedelta * i) for i in range(0, nTimedelta) if tStart + dt.timedelta(seconds=timedelta * i) <= tStop] return tList
5,339,744
def insert_unit_record(cnx, time,user_id, unit_name, level, gear_level, gp, stars): """ Adds the specified unit to the database for a user It will check previous entries and skip it if it's a duplicate record 'since only a handful of units change every day, this will save roughly a bazillionmilliontrillion redundant database entries """ cursor = cnx.cursor() query = """ insert into unit(time, user_id, unit_name, level, gear_level, gp, stars) select %s, %s, %s, %s, %s, %s, %s from unit where (user_id = %s and unit_name = %s and gp = %s) having count(*) = 0;"""; cursor.execute(query, (time, user_id, unit_name, level, gear_level, gp, stars, user_id, unit_name, gp)) cnx.commit()
5,339,745
def TransformInversePoints(T,points): """Transforms a Nxk array of points by the inverse of an affine matrix""" kminus = T.shape[1]-1 return numpy.dot(points-numpy.tile(T[0:kminus,kminus],(len(points),1)),T[0:kminus,0:kminus])
5,339,746
def act_func(act): """function that can choose activation function Args: act: (str) activation function name Returns: corresponding Pytorch activation function """ return nn.ModuleDict([ ['relu', nn.ReLU(inplace=True)], ['leaky_relu', nn.LeakyReLU(negative_slope=0.01, inplace=True)], ['selu', nn.SELU(inplace=True)] ])[act]
5,339,747
def ajax_get_hashtags(): """Flask Ajax Get Hashtag Route.""" f = request.args.get('f', 0, type=int) t = request.args.get('t', 0, type=int) hashtags_list = get_hashtags() try: if t == 0: return jsonify(dict(hashtags_list[f:])) elif t > len(hashtags_list): return jsonify(dict(hashtags_list[f:])) else: return jsonify(dict(hashtags_list[f:t])) except: return False
5,339,748
def imprimeJogo(letrasErradas, letrasAcertadas, palavraSecreta): """ Feito a partir da variável global que contem as imagens do jogo em ASCII art, e támbem as letras chutadas de maneira correta e as letras erradas e a palavra secreta """
5,339,749
def extract_pdf_information(pdf_path): """ Print and return pdf information """ # read binary with open(pdf_path, 'rb') as f: pdf = PdfFileReader(f) information = pdf.getDocumentInfo() number_of_pages = pdf.getNumPages() txt = f""" Information about {pdf_path}: Author: {information.author} Creator: {information.creator} Producer: {information.producer} Subject: {information.subject} Title: {information.title} Number of pages: {number_of_pages} """ print(txt) return information
5,339,750
def normalize_string(subject: str) -> str: """Deprecated function alias""" logger.warn("normalize_string is deprecated") return string_to_title(subject)
5,339,751
def get_default_args(func): """ Return dict for parameter name and default value. Parameters ---------- func : Callable A function to get parameter name and default value. Returns ------- Dict Parameter name and default value. Examples -------- >>> def test_func(a: int, b: str = "c") -> int: ... return a+1 >>> get_default_args(test_func) {'b': 'c'} >>> def test_func2(a: int = 1, b="c") -> int: ... return a+1 >>> get_default_args(test_func2) {'a': 1, 'b': 'c'} """ signature = inspect.signature(func) return { k: v.default for k, v in signature.parameters.items() if v.default is not inspect.Parameter.empty }
5,339,752
def main(): """ Stub function for command line tool that launches the plasma calculator notebook. """ parser = argparse.ArgumentParser(description="Plasma calculator") parser.add_argument( "--port", type=int, default=8866, help="Port to run the notebook" ) parser.add_argument( "--dark", action="store_true", help="Turn on dark mode, reduces eye strain" ) parser.add_argument( "--no-browser", action="store_true", help="Do not open the browser" ) # module_path = plasmapy.__path__[0] module_path = os.path.dirname(os.path.abspath(__file__)) computed_calculator_path = os.path.join(module_path, "plasma_calculator.ipynb") args = parser.parse_args() theme = "dark" if args.dark else "light" no_browser = "--no-browser" if args.no_browser else "" command = f"voila {no_browser} --port={args.port} --theme={theme} {computed_calculator_path} \ --VoilaConfiguration.file_whitelist favicon.ico" try: subprocess.call(shlex.split(command)) except KeyboardInterrupt: print("Stopping calculator! Bye")
5,339,753
def sec_to_time(seconds): """Transform seconds into a formatted time string. Parameters ----------- seconds : int Seconds to be transformed. Returns ----------- time : string A well formatted time string. """ m, s = divmod(seconds, 60) h, m = divmod(m, 60) return "%02d:%02d:%02d" % (h, m, s)
5,339,754
def chunks(list_of_features, n, agol_layer): """ Yield successive n-sized chunks from list_of_features. list_of_features: list. List of features to be updated. n: numeric. chunk size, 1000 is the max for AGOL feature layer agol_layer: AGOL layer. Ex: flayer = gis.content.get(feature_layer_id) agol_layer = flayer.layers[0] """ for i in range(0, len(list_of_features), n): chunk_list=list_of_features[i:i + n] agol_layer.edit_features(updates=chunk_list) print("update successful")
5,339,755
def list_files(root_dir, extension, min_N=None, max_N=None, exclude=[], random_seed=24): """Yields all files with allowed extensions. The absolute path is returned for each file. Parameters ---------- root_dir : str, Path Top level directory extension : str, iterable One or more extensions to filter files by. min_N : int Skip sub directory if it has less files than this. max_N : int Maximum amount of files to return for each sub directory. If sub dir exceeds this limit, files are shuffled and then sliced like so [:max_N] exclude : iterable Directories to ignore Yields ------ A new file path (Path). """ if type(extension) is not list: extension = [extension] for dirpath, _, filenames in os.walk(root_dir): dirpath = Path(dirpath) if dirpath.name in exclude: continue if min_N and len(filenames) < min_N: continue if max_N and len(filenames) > max_N: random.seed(random_seed) random.shuffle(filenames) filenames = filenames[:max_N] for filename in filenames: filepath = dirpath / filename if filepath.suffix in extension: yield filepath.resolve()
5,339,756
def power_oos(dmap_object, Y): """ Performs out-of-sample extension to calculate the values of the diffusion coordinates at each given point using the power-like method. Parameters ---------- dmap_object : DiffusionMap object Diffusion map upon which to perform the out-of-sample extension. Y : array-like, shape (n_query, n_features) Data for which to perform the out-of-sample extension. Returns ------- phi : numpy array, shape (n_query, n_eigenvectors) Transformed value of the given values. """ m = int(Y.shape[0]) k_yx, y_bandwidths = dmap_object.local_kernel.compute(Y, return_bandwidths=True) # Evaluate on ref points yy_right_norm_vec = dmap_object._make_right_norm_vec(k_yx, y_bandwidths)[1] k_yy_diag = dmap_object.local_kernel.kernel_fxn(0, dmap_object.epsilon_fitted) data_full = np.vstack([dmap_object.local_kernel.data, Y]) k_full = sps.hstack([k_yx, sps.eye(m) * k_yy_diag]) right_norm_full = np.hstack([dmap_object.right_norm_vec, yy_right_norm_vec]) weights = dmap_object._compute_weights(data_full) P = dmap_object._left_normalize(dmap_object._right_normalize(k_full, right_norm_full, weights)) L = dmap_object._build_generator(P, dmap_object.epsilon_fitted, y_bandwidths) L_yx = L[:, :-m] L_yy = np.array(L[:, -m:].diagonal()) adj_evals = dmap_object.evals - L_yy.reshape(-1, 1) dot_part = np.array(L_yx.dot(dmap_object.dmap)) return (1. / adj_evals) * dot_part
5,339,757
def main(token: str, nextcloud_username: str = None, nextcloud_password: str = None): """ Starts the Antistasi Discord Bot 'AntiPetros'. Instantiates the bot, loads the extensions and starts the bot with the Token. This is seperated from the Cli run function so the bot can be started via cli but also from within vscode. Args: token_file ([str]): discord token nextcloud_username([str]): username for dev_drive on nextcloud nexctcloud_password([str]): password for dev_drive on nextcloud """ log.info(glog.NEWRUN()) if nextcloud_username is not None: os.environ['NEXTCLOUD_USERNAME'] = nextcloud_username if nextcloud_password is not None: os.environ['NEXTCLOUD_PASSWORD'] = nextcloud_password os.environ['INFO_RUN'] = "0" anti_petros_bot = AntiPetrosBot(token=token) anti_petros_bot.run()
5,339,758
def test_ft_tf_queue_thre_con_unicast(): """ Author : Prudvi Mangadu (prudvi.mangadu@broadcom.com) """ tf_data.index = 0 tf_data.threshold = 20 count = 1 while True: result = 1 result2 = 1 cutils.banner_log("TEST Starts for iteration - {}".format(count)) st.log("Unicast queue threshold config") tfapi.config_threshold(vars.D1, threshold_type='queue', port_alias=vars.D1T1P4, index=tf_data.index, buffer_type='unicast', value=tf_data.threshold) st.log("Unicast queue threshold config verify") if not tfapi.verify_threshold(vars.D1, threshold_type='queue', buffer_type='unicast', port_alias=vars.D1T1P4, uc0=tf_data.threshold): st.error("Unable to configure unicast queue threshold value on unicast-queue buffer") result = 0 st.log("Traffic start and stop") tf_tg_traffic_start_stop(tf_data.unicast, tf_data.traffic_duration) st.log("Checking unicast queue breach event") if not tfapi.verify_threshold_breaches(vars.D1, buffer='queue', port=vars.D1T1P4, index=tf_data.index, threshold_type='unicast'): st.error("Unicast queue threshold breach Event is not found") if tf_data.need_debug_prints: tf_collecting_debug_logs_when_test_fails('unicast', tf_data.traffic_duration, tf_data.unicast) result = 0 result2 = 0 st.log("Clear Unicast queue threshold breach") tfapi.clear_threshold(vars.D1, breach='all') st.log("Checking unicast queue breach event") if tfapi.verify_threshold_breaches(vars.D1, buffer='queue', port=vars.D1T1P4, index=tf_data.index, threshold_type='unicast'): st.error("Post clear - Unicast queue threshold breach Event is found") result = 0 st.log("Unicast queue threshold config clear") tfapi.clear_threshold(vars.D1, threshold_type='queue', port_alias=vars.D1T1P4, index=tf_data.index, buffer_type='unicast') st.log("Unicast queue threshold config verify") if tfapi.verify_threshold(vars.D1, threshold_type='queue', buffer_type='unicast', port_alias=vars.D1T1P4, uc0=tf_data.threshold): st.error("Unable to configure unicast queue threshold value") result = 0 if not result2 and tf_data.need_debug_prints: st.log("As Breach events are not observed collecting logs by disabling the Thresholds") tf_collecting_debug_logs_when_test_fails('unicast', tf_data.traffic_duration, tf_data.unicast) tfapi.clear_threshold(vars.D1, breach='all') if result: st.log("Test PASSED in Iteration {}.".format(count)) report_result(result) break if count == tf_data.test_max_retries_count: st.log("Test Failed in all {} Iterations. Hence Declaring as FAIL".format(count)) report_result(result) st.log("Test Failed in the Iteration-{}. Hence re-testing".format(count)) count += 1
5,339,759
def mock_get_schedules_response( schedule_object, ) -> Generator[MagicMock, Any, None]: """Fixture for mocking the get_schedules response. Args: schedule_object: Fixture of mocked ``SwitcherV2Schedule`` object. Yields: Mocked ``SwitcherV2GetScheduleResponseMSG`` object. """ mock_response = MagicMock(messages.SwitcherV2GetScheduleResponseMSG) mock_response.successful = True mock_response.found_schedules = True mock_response.get_schedules = [schedule_object] mock_response.msg_type = messages.ResponseMessageType.GET_SCHEDULES with patch( "request_handlers.messages.SwitcherV2GetScheduleResponseMSG", new=mock_response, ) as patcher: yield patcher
5,339,760
def detect_face_landmarks(image, face_rect=None): """ detect face landmarks, if face_rect is None, the face_rect is the same size as image -> object :param image: :param face_rect: where the face is """ if(face_rect == None): face_rect = dlib.rectangle(0, 0, image.shape[0], image.shape[1]) return _detect_face_landmarks(image, face_rect)
5,339,761
def main_handler(event, context): """ Pull the specified files from S3 and push to a Shared Folder in Google Drive. The payload passed in contains a list of Excel file names in the array fileList. """ print('payload:', event) bucket = os.environ.get('REPORTS_BUCKET') credentials_parameter = os.environ.get('GOOGLE_CREDENTIALS_PARAMETER') folder_id_parameter = os.environ.get('GOOGLE_SHARED_FOLDER_ID_PARAMETER') credentials = get_service_account_credentials(credentials_parameter) folder_id = get_folder_id(folder_id_parameter) # note regarding cache_discovery=False # https://github.com/googleapis/google-api-python-client/issues/299 service = discovery.build('drive', 'v3', credentials=credentials, cache_discovery=False) s3_client = boto3.client('s3') """ for file_name in event['fileList']: download_path = '/tmp/{}'.format(file_name) s3_client.download_file(bucket, file_name, download_path) upload_file(service, download_path, file_name, file_name, folder_id, 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet') """ #download from gdrive to S3 os.chdir('/tmp/') request = service.files().get_media(fileId='1SrhmjVCW6dDV_vStCZHRzwI5dO_snvAo') fh = io.FileIO('testfile.xlsx', 'wb') downloader = MediaIoBaseDownload(fh, request) done = False while done is False: status, done = downloader.next_chunk() print(fh) print ("Download %d%%." % int(status.progress() * 100)) a = os.listdir('/tmp/') for x in a: print('file in temp directorys is '+x) s3_client.upload_file("/tmp/testfile.xlsx", "gdil-s3", "testfile.xlsx")
5,339,762
def ensure_iterable(obj): """Ensure ``obj`` is either a sequential iterable object that is not a string type. 1. If ``obj`` is :const:`None` return an empty :class:`tuple`. 2. If ``obj`` is an instance of :class:`str`, :class:`bytes`, or :class:`Mapping`, or not :class:`Iterable` return a list containing ``obj`` 3. Return ``obj`` Parameters ---------- obj : object The object to ensure iterability of Returns ------- :class:`Sequence` Returns either ``obj`` or a wrapepr around ``obj`` """ if obj is None: return tuple() if not isinstance(obj, Iterable) or isinstance(obj, basestring) or isinstance(obj, Mapping): return [obj] return obj
5,339,763
def test_dist(**kwargs): """ Test Distance """ a = np.random.random((2, 3)) d = ahrs.utils.metrics.euclidean(a[0], a[1]) result = np.allclose(d, np.linalg.norm(a[0] - a[1])) return result
5,339,764
def get_pca(coords): """ Parameters ----------- coords : 2D np.array of points Returns --------- new_coords : 2D np.array of points keeps original number of dimension as input coords variance_ratio : tuple """ pca = PCA(n_components=3) # pca.fit(coords) # new_coords = pca.transform(coords) new_coords = pca.fit_transform(coords) return new_coords, pca.explained_variance_ratio_
5,339,765
def get_form_info(email): """Gets all existing application form info from the database.""" user_id = get_user_id(email) if not user_id: return (False, "Invalid user ID. Please contact the organizers.") query = """ SELECT * FROM applications WHERE user_id = %s AND application_year = %s """ with flask.g.pymysql_db.cursor() as cursor: cursor.execute(query, [user_id, app_year.year + "0000"]) application = cursor.fetchone() query = """ SELECT * FROM members WHERE user_id = %s """ with flask.g.pymysql_db.cursor() as cursor: cursor.execute(query, [user_id]) member = cursor.fetchone() query = """ SELECT * FROM diet WHERE user_id = %s """ with flask.g.pymysql_db.cursor() as cursor: cursor.execute(query, [user_id]) diet = cursor.fetchall() query = """ SELECT * FROM race WHERE user_id = %s """ with flask.g.pymysql_db.cursor() as cursor: cursor.execute(query, [user_id]) race = cursor.fetchall() validationForm = ValidationForm() validationForm.fill(application, member) return (FormInfo(application, member, diet, race), validationForm)
5,339,766
def getLog(): """simple wrapper around basic logger""" return logging
5,339,767
def _date(defval, t): """ 支持的格式: unix 时间戳 yyyy-mm-dd 格式的日期字符串 yyyy/mm/dd 格式的日期字符串 yyyymmdd 格式的日期字符串 如果年月日其中有一项是0,将被转换成 1 """ if t is None: return defval if isinstance(t, (int, float)): return datetime.fromtimestamp(t).strftime('%Y-%m-%d %H:%M:%S') lt = len(t) if lt < 8: return defval if lt == 8: format_str = '%Y%m%d' else: t = t.replace('/', '-') format_str = '%Y-%m-%d %H:%M:%S' if lt > 19: format_str += '.%f' try: return str(datetime.strptime(t, format_str)) except: return defval
5,339,768
def compute_lat_long_distance(point1, point2): """ Compute the distance between two records that have fields 'lat' and 'lon'. See details and reference implementation at http://andrew.hedges.name/experiments/haversine/ :param point1: a record with { 'lat', 'lon' } :param point2: a record with { 'lat', 'lon' } :return: """ lat1 = degree_to_rad(point1['lat']) lat2 = degree_to_rad(point2['lat']) lon1 = degree_to_rad(point1['lon']) lon2 = degree_to_rad(point2['lon']) dlon = lon2 - lon1 dlat = lat2 - lat1 a = math.sin(dlat / 2) * math.sin(dlat / 2) + \ math.cos(lat1) * math.cos(lat2) * math.sin(dlon / 2) * math.sin(dlon / 2) c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a)) earth_radius = 3961 # Use 6373 for km d = earth_radius * c # In miles return round(d, 3)
5,339,769
def entropy_analysis(data_df): """ Masked Shannon entropy analysis for sequences Parameters ---------- data_df: pandas.DataFrame merged Pandas dataframe Returns ------- H_list: list entropy values for all positions null_freq_list: list masked percentage for all positions """ seq_list = data_df['sequence'].values.tolist() base_set = set([]) for seq in seq_list: base_set.update(set(seq)) H_list = [] null_freq_list = [] STEP = ceil(len(seq_list[0]) / 10) for base_idx in range(len(seq_list[0])): if base_idx % STEP == 0: logging.info('Entropy analysis in progress: {}% completed.'.format(10 * base_idx // STEP)) H, null_freq = base_entropy_masked(seq_list, base_set, base_idx) H_list.append(H,) null_freq_list.append(null_freq) logging.info('Entropy analysis in progress: DONE.') return H_list, null_freq_list
5,339,770
def set_global_cfg(cfg: CfgNode) -> None: """ Let the global config point to the given cfg. Assume that the given "cfg" has the key "KEY", after calling `set_global_cfg(cfg)`, the key can be accessed by: :: from detectron2.config import global_cfg print(global_cfg.KEY) By using a hacky global config, you can access these configs anywhere, without having to pass the config object or the values deep into the code. This is a hacky feature introduced for quick prototyping / research exploration. """ global global_cfg global_cfg.clear() global_cfg.update(cfg)
5,339,771
def get_requirements(filename): """ Helper function to read the list of requirements from a file """ dependency_links = [] with open(filename) as requirements_file: requirements = requirements_file.read().strip('\n').splitlines() requirements = [req for req in requirements if not req.startswith('#')] for i, req in enumerate(requirements): if ':' in req: match_obj = re.match(r"git\+(?:https|ssh|http):.*#egg=(.*)-(.*)", req) assert match_obj, "Cannot make sense of url {}".format(req) requirements[i] = "{req}=={ver}".format(req=match_obj.group(1), ver=match_obj.group(2)) dependency_links.append(req) return requirements, dependency_links
5,339,772
def nigam_and_jennings_response(acc, dt, periods, xi): """ Implementation of the response spectrum calculation from Nigam and Jennings (1968). Ref: Nigam, N. C., Jennings, P. C. (1968) Digital calculation of response spectra from strong-motion earthquake records. National Science Foundation. :param acc: acceleration in m/s2 :param periods: response periods of interest :param dt: time step of the acceleration time series :param xi: critical damping factor :return: response displacement, response velocity, response acceleration """ acc = -np.array(acc, dtype=np.float) periods = np.array(periods, dtype=np.float) if periods[0] == 0: s = 1 else: s = 0 w = 6.2831853 / periods[s:] dt = np.float(dt) xi = np.float(xi) # implement: delta_t should be less than period / 20 a, b = compute_a_and_b(xi, w, dt) resp_u = np.zeros([len(periods), len(acc)], dtype=np.float) resp_v = np.zeros([len(periods), len(acc)], dtype=np.float) for i in range(len(acc) - 1): # possibly speed up using scipy.signal.lfilter # x_i+1 = A cross (u, v) + B cross (acc_i, acc_i+1) # Eq 2.7a resp_u[s:, i + 1] = (a[0][0] * resp_u[s:, i] + a[0][1] * resp_v[s:, i] + b[0][0] * acc[i] + b[0][1] * acc[i + 1]) resp_v[s:, i + 1] = (a[1][0] * resp_u[s:, i] + a[1][1] * resp_v[s:, i] + b[1][0] * acc[i] + b[1][1] * acc[i + 1]) w2 = w ** 2 if s: sdof_acc = np.zeros_like(resp_u, dtype=np.float) sdof_acc[s:] = -2 * xi * w[:, np.newaxis] * resp_v[s:] - w2[:, np.newaxis] * resp_u[s:] sdof_acc[0] = acc else: sdof_acc = -2 * xi * w[:, np.newaxis] * resp_v[s:] - w2[:, np.newaxis] * resp_u[s:] return resp_u, resp_v, sdof_acc
5,339,773
def rebin_BTSettl(make_unique=False): """ Rebin BTSettle models to atlas ck04 resolution; this makes spectrophotometry MUCH faster makes new directory: BTSettl_rebin Code expects to be run in cdbs/grid directory """ # Get an atlas ck04 model, we will use this to set wavelength grid sp_atlas = get_castelli_atmosphere() # Create cdbs/grid directory for rebinned models path = 'BTSettl_rebin/' if not os.path.exists(path): os.mkdir(path) # Read in the existing catalog.fits file and rebin every spectrum. cat = fits.getdata('BTSettl/catalog.fits') files_all = [cat[ii][1].split('[')[0] for ii in range(len(cat))] #==============================# #tmp = [] #for ii in files_all: # if ii.startswith('btp00'): # tmp.append(ii) #files_all = tmp #=============================# print( 'Rebinning BTSettl spectra') if make_unique: print('Making unique') make_wavelength_unique(files_all, 'BTSettl') print('Done') for ff in range(len(files_all)): vals = cat[ff][0].split(',') temp = float(vals[0]) metal = float(vals[1]) logg = float(vals[2]) # Fetch the BTSettl spectrum, rebin flux try: sp = pysynphot.Icat('BTSettl', temp, metal, logg) flux_rebin = rebin_spec(sp.wave, sp.flux, sp_atlas.wave) # Make new output c0 = fits.Column(name='Wavelength', format='D', array=sp_atlas.wave) c1 = fits.Column(name='Flux', format='E', array=flux_rebin) cols = fits.ColDefs([c0, c1]) tbhdu = fits.BinTableHDU.from_columns(cols) prihdu = fits.PrimaryHDU() tbhdu.header['TUNIT1'] = 'ANGSTROM' tbhdu.header['TUNIT2'] = 'FLAM' outfile = path + files_all[ff].split('[')[0] finalhdu = fits.HDUList([prihdu, tbhdu]) finalhdu.writeto(outfile, overwrite=True) except: pdb.set_trace() orig_file = '{0}/{1}'.format('BTSettl/', files_all[ff].split('[')[0]) outfile = path + files_all[ff].split('[')[0] cmd = 'cp {0} {1}'.format(orig_file, outfile) os.system(cmd) print('Done {0} of {1}'.format(ff, len(files_all))) return
5,339,774
def prompt_for_asking_mfa_code(perfect_profile: ProfileTuple): """該当プロフィールのMFAトークン入力を促すプロンプトを表示する""" print(PROMPT_ASK_MFA_TOKEN_FOR_PROFILE_BEFORE + perfect_profile.name + PROMPT_ASK_MFA_TOKEN_FOR_PROFILE_AFTER)
5,339,775
def binary_hamiltonian(op, nqubits, qubits1, qubits2, weights, device=None): """Generates tt-tensor classical Ising model Hamiltonian (two-qubit interaction terms in a single basis). Hamiltonian of the form: H = sum_i omega_i sigma_ind1(i) sigma_ind2(i) where omega_i are the Hamiltonian weights, sigma is the operator specified by op, and ind1, ind2 are the qubit numbers specified of index i. spins and weight values. Parameters ---------- op : tt-tensor, single-qubit operator to encode MaxCut graph nqubits : int, number of qubits (vertices) to encode in MaxCut problem qubits1 : List/tensor of ints, qubit indices qubits2 : List/tensor of ints, qubit indices weights : List/tensor of real floats, graph weights Returns ------- Hamiltonian encoding specified classical Ising model graph. """ H, inds_min, inds_max = [], minimum(qubits1, qubits2), maximum(qubits1, qubits2) for i in range(0, len(qubits1)): #H = tt_matrix_sum(H, _two_qubit_interaction(op, op, inds_min[i], inds_max[i], weights[i], nqubits)) H = tt_matrix_sum(H, _two_qubit_interaction(op, op, inds_min[i], inds_max[i], weights[i], nqubits), device=device) return [*H]
5,339,776
def map_entry(entry, fields): """ Retrieve the entry from the given fields and replace it if it should have a different name within the database. :param entry: is one of the followings: - invalid field name - command (i.g. $eq) - valid field with no attribute name - valid field with an attribute name to use instead """ field = fields.get(entry) if isinstance(field, ListField) and isinstance(field.inner, EmbeddedField): fields = field.inner.embedded_document_cls.schema.fields elif isinstance(field, EmbeddedField): fields = field.embedded_document_cls.schema.fields return getattr(field, 'attribute', None) or entry, fields
5,339,777
def seqlogo_hairpin(N, target='none', ligand='theo', pam=None): """ Randomize the stem linking the aptamer to the sgRNA and the parts of the sgRNA that were the most conserved after being randomized in previous screens. Specifically, I identified these conserved positions by looking at a sequence logo of the relatively few (≈20) clones I sequenced from my previous screen. The theory behind this strategy is that positions with a clear preference for some nucleotides over others are more likely to be important for sensor activity. In this case, the previous screen was ``mhf`` and the sequence logo showed that all three positions in the ruler that were randomized had a preference for a non-native nucleotide. (In fact, the preference was for C in all three cases.) The ``mhf`` screen kept two positions in the ruler fixed, but since these positions were flanked by important-seeming positions on both sides, I decided to randomize the whole ruler this time. I am also randomizing the stem (often called a communication module) that connects the aptamer to the sgRNA. The ``N`` parameter dictates how long this stem should be, in base pairs, not counting any base pairs that are implicitly included with the aptamer. (Note: I realized that including one base pair on the end of the aptamer domain makes simulating the whole construct easier, so all the new aptamers include one base pair like that. But the theophylline aptamer predates this realization, so it doesn't.) Parameters ---------- N: int The length of the communication module, in base pairs. Recommended values are 3 and 4. """ # Make sure the length of the communication module makes sense. if N < 0: raise ValueError('qh: N must be >= 0') # Base this library on the optimized sgRNA described by Dang et al. sgrna = on(pam=pam, target=target) # Randomize the entire ruler. sgrna['ruler'].seq = 'GU' + 'N' * (len(sgrna['ruler']) - 2) # Randomize the communication module. sgrna['hairpin/5'].seq = N * 'N' sgrna['hairpin/3'].seq = N * 'N' # Insert the aptamer above the communication module. sgrna['hairpin/o'].attachment_sites = 0,4 sgrna.attach(aptamer(ligand), 'hairpin/o', 0, 'hairpin/o', 4) return sgrna
5,339,778
def azure_firewall_ip_group_list_command(client: AzureFirewallClient, args: Dict[str, Any]) -> CommandResults: """ List IP groups in resource group or subscription. Args: client (AzureFirewallClient): Azure Firewall API client. args (dict): Command arguments from XSOAR. Returns: CommandResults: outputs, readable outputs and raw response for XSOAR. """ resource = args.get('resource') limit = arg_to_number(args.get('limit') or '50') page = arg_to_number(args.get('page') or '1') validate_pagination_arguments(limit, page) readable_message = get_pagination_readable_message(header='IP Group List:', limit=limit, page=page) start_offset = (page - 1) * limit end_offset = start_offset + limit complete_requests = False total_response = {'value': []} response = client.azure_firewall_ip_group_list_request(resource=resource) while not complete_requests: total_response['value'].extend(response.get('value')) if len(total_response['value']) >= end_offset or not response.get('nextLink'): complete_requests = True else: response = client.azure_firewall_ip_group_list_request(resource=resource, next_link=response.get('nextLink')) return generate_ip_group_command_output(total_response.get('value')[start_offset: end_offset], readable_header=readable_message)
5,339,779
def plot_heatmap(df, title=""): """ Plotly heatmap wrapper :param df: pd.DataFrame :param title: str """ fig = go.Figure( data=go.Heatmap(z=df.values, x=df.columns, y=df.index, colorscale="RdBu") ) fig.update_layout(template=_TEMPLATE, title=title, legend_orientation="h") return fig
5,339,780
def normalise_dir_pattern(repo_dir, d): """ if d is a relative path, prepend the repo_dir to it """ if not d.startswith(repo_dir): return os.path.join(repo_dir, d) else: return d
5,339,781
def describe_dependency(): """ Describe Dependency supported """ print('acl - clausal modifier of noun') print('advcl - adverbial clause modifier') print('advmod - adverbial modifier') print('amod - adjectival modifier') print('appos - appositional modifier') print('aux - auxiliary') print('case - case marking') print('ccomp - clausal complement') print('compound - compound') print('compound:plur - plural compound') print('conj - conjunct') print('cop - cop') print('csubj - clausal subject') print('dep - dependent') print('det - determiner') print('fixed - multi-word expression') print('flat - name') print('iobj - indirect object') print('mark - marker') print('nmod - nominal modifier') print('nsubj - nominal subject') print('obj - direct object') print('parataxis - parataxis') print('root - root') print('xcomp - open clausal complement') print( 'you can read more from https://universaldependencies.org/treebanks/id_pud/index.html' )
5,339,782
def patch(diff, orig_file, filename, request=None): """Apply a diff to a file. This delegates out to ``patch`` because noone except Larry Wall knows how to patch. Args: diff (bytes): The contents of the diff to apply. orig_file (bytes): The contents of the original file. filename (unicode): The name of the file being patched. request (django.http.HttpRequest, optional): The HTTP request, for use in logging. Returns: bytes: The contents of the patched file. Raises: reviewboard.diffutils.errors.PatchError: An error occurred when trying to apply the patch. """ log_timer = log_timed('Patching file %s' % filename, request=request) if not diff.strip(): # Someone uploaded an unchanged file. Return the one we're patching. return orig_file # Prepare the temporary directory if none is available tempdir = tempfile.mkdtemp(prefix='reviewboard.') try: orig_file = convert_line_endings(orig_file) diff = convert_line_endings(diff) (fd, oldfile) = tempfile.mkstemp(dir=tempdir) f = os.fdopen(fd, 'w+b') f.write(orig_file) f.close() newfile = '%s-new' % oldfile process = subprocess.Popen(['patch', '-o', newfile, oldfile], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=tempdir) with controlled_subprocess('patch', process) as p: stdout, stderr = p.communicate(diff) failure = p.returncode try: with open(newfile, 'rb') as f: new_file = f.read() except Exception: new_file = None if failure: rejects_file = '%s.rej' % newfile try: with open(rejects_file, 'rb') as f: rejects = f.read() except Exception: rejects = None error_output = force_text(stderr.strip() or stdout.strip()) # Munge the output to show the filename instead of # randomly-generated tempdir locations. base_filename = os.path.basename(filename) error_output = ( error_output .replace(rejects_file, '%s.rej' % base_filename) .replace(oldfile, base_filename) ) raise PatchError(filename=filename, error_output=error_output, orig_file=orig_file, new_file=new_file, diff=diff, rejects=rejects) return new_file finally: shutil.rmtree(tempdir) log_timer.done()
5,339,783
def make_docs(): """Create the documentation and add it to ../src/docs""" doc_build_dir = os.path.join(THIS_DIR, '..', 'docs') doc_html_build = os.path.join(doc_build_dir, '_build', 'html') doc_dest_dir = os.path.join(THIS_DIR, '..', 'src', 'docs') ex("make clean && make html", cwd=doc_build_dir) if os.path.isdir(doc_dest_dir): remove_directory(doc_dest_dir, False) else: os.makedirs(doc_dest_dir) items = set(os.listdir(doc_html_build)) ^ set(['.buildinfo', 'objects.inv']) for item in items: source = os.path.abspath(os.path.join(doc_html_build, item)) dest = os.path.abspath(os.path.join(doc_dest_dir, item)) if os.path.isdir(source): shutil.copytree(source, dest) else: shutil.copyfile(source, dest)
5,339,784
def test_naptr_flood(cetp_mgr): """ Tests the establishment of CETP-H2H, CETP-C2C layer and CETPTransport(s) towards r-ces upon getting a list of NAPTR records.""" sender_info = ("10.0.3.111", 43333) l_hostid, l_hostip = "hosta1.cesa.lte.", sender_info[0] dst_id, r_cesid, r_ip, r_port, r_proto = "", "", "", "", "" naptr_records = {} naptr_records['srv1.hostb1.cesb.lte.'] = [('srv1.hostb1.cesb.lte.', 'cesb.lte.', '10.0.3.103', '49001', 'tls'), ('srv1.hostb1.cesb.lte.', 'cesb.lte.', '10.0.3.103', '49002', 'tls')] naptr_list = naptr_records['srv1.hostb1.cesb.lte.'] cb_args = ("SomeValue", sender_info) st = time.time() for it in range(0, 5000): n = naptr_list[:] dst_id, r_cesid, r_ip, r_port, r_proto = n[0] yield from asyncio.sleep(random.uniform(0, 0.001)) #cetp_mgr.process_outbound_cetp(some_cb, cb_args, dst_id, r_cesid, n) cetp_mgr.process_dns_message(some_cb, cb_args, dst_id, r_cesid=r_cesid, naptr_list=n) #et = time.time() - st #print("Total time", et) test_output(cetp_mgr) yield from asyncio.sleep(4) test_output(cetp_mgr)
5,339,785
def shift_transactions_forward(index, tindex, file, pos, opos): """Copy transactions forward in the data file This might be done as part of a recovery effort """ # Cache a bunch of methods seek=file.seek read=file.read write=file.write index_get=index.get # Initialize, pv=z64 p1=opos p2=pos offset=p2-p1 # Copy the data in two stages. In the packing stage, # we skip records that are non-current or that are for # unreferenced objects. We also skip undone transactions. # # After the packing stage, we copy everything but undone # transactions, however, we have to update various back pointers. # We have to have the storage lock in the second phase to keep # data from being changed while we're copying. pnv=None while 1: # Read the transaction record seek(pos) h=read(TRANS_HDR_LEN) if len(h) < TRANS_HDR_LEN: break tid, stl, status, ul, dl, el = unpack(TRANS_HDR,h) status = as_text(status) if status=='c': break # Oops. we found a checkpoint flag. tl=u64(stl) tpos=pos tend=tpos+tl otpos=opos # start pos of output trans thl=ul+dl+el h2=read(thl) if len(h2) != thl: raise PackError(opos) # write out the transaction record seek(opos) write(h) write(h2) thl=TRANS_HDR_LEN+thl pos=tpos+thl opos=otpos+thl while pos < tend: # Read the data records for this transaction seek(pos) h=read(DATA_HDR_LEN) oid,serial,sprev,stloc,vlen,splen = unpack(DATA_HDR, h) assert not vlen plen=u64(splen) dlen=DATA_HDR_LEN+(plen or 8) tindex[oid]=opos if plen: p=read(plen) else: p=read(8) p=u64(p) if p >= p2: p=p-offset elif p >= p1: # Ick, we're in trouble. Let's bail # to the index and hope for the best p=index_get(oid, 0) p=p64(p) # WRITE seek(opos) sprev=p64(index_get(oid, 0)) write(pack(DATA_HDR, oid, serial, sprev, p64(otpos), 0, splen)) write(p) opos=opos+dlen pos=pos+dlen # skip the (intentionally redundant) transaction length pos=pos+8 if status != 'u': index.update(tindex) # Record the position tindex.clear() write(stl) opos=opos+8 return opos
5,339,786
def draw_disturbances(seed, shocks_cov, num_periods, num_draws): """Creates desired number of draws of a multivariate standard normal distribution.""" # Set seed np.random.seed(seed) # Input parameters of the distribution mean = [0, 0, 0] shocks_cov_matrix = np.zeros((3, 3), float) np.fill_diagonal(shocks_cov_matrix, shocks_cov) # Create draws from the standard normal distribution draws = np.random.multivariate_normal( mean, shocks_cov_matrix, (num_periods, num_draws) ) # Return function output return draws
5,339,787
def main(): """Entrypoint function.""" parser = argparse.ArgumentParser() parser.add_argument('-u', '--username', help='Hydro Quebec username') parser.add_argument('-p', '--password', help='Password') parser.add_argument('-j', '--json', action='store_true', default=False, help='Json output') parser.add_argument('-i', '--influxdb', action='store_true', default=False, help='InfluxDb output') parser.add_argument('-c', '--contract', default=None, help='Contract number') parser.add_argument('-l', '--list-contracts', action='store_true', default=False, help='List all your contracts') parser.add_argument('-H', '--hourly', action='store_true', default=False, help='Show yesterday hourly consumption') parser.add_argument('-t', '--timeout', default=REQUESTS_TIMEOUT, help='Request timeout') parser.add_argument('-V', '--version', action='store_true', default=False, help='Show version') raw_group = parser.add_argument_group('Detailled-energy raw download option') raw_group.add_argument('--detailled-energy', action='store_true', default=False, help='Get raw json output download') raw_group.add_argument('--start-date', default=(datetime.datetime.now(HQ_TIMEZONE) - datetime.timedelta(days=1)).strftime("%Y-%m-%d"), help='Start date for detailled-output') raw_group.add_argument('--end-date', default=datetime.datetime.now(HQ_TIMEZONE).strftime("%Y-%m-%d"), help="End date for detailled-output") args = parser.parse_args() if args.version: print(VERSION) return 0 if not args.username or not args.password: parser.print_usage() print("pyhydroquebec: error: the following arguments are required: " "-u/--username, -p/--password") return 3 client = HydroQuebecClient(args.username, args.password, args.timeout) loop = asyncio.get_event_loop() if args.detailled_energy is False: async_func = client.fetch_data() else: start_date = datetime.datetime.strptime(args.start_date, '%Y-%m-%d') end_date = datetime.datetime.strptime(args.end_date, '%Y-%m-%d') async_func = client.fetch_data_detailled_energy_use(start_date, end_date) try: fut = asyncio.wait([async_func]) loop.run_until_complete(fut) except BaseException as exp: print(exp) return 1 finally: close_fut = asyncio.wait([client.close_session()]) loop.run_until_complete(close_fut) if not client.get_data(): return 2 if args.list_contracts: print("Contracts: {}".format(", ".join(client.get_contracts()))) elif args.influxdb: output_influx(client.get_data(args.contract)) elif args.json or args.detailled_energy: output_json(client.get_data(args.contract)) else: output_text(args.username, client.get_data(args.contract), args.hourly) return 0
5,339,788
def wheel(pos): """Generate rainbow colors across 0-255 positions.""" if pos>1280: pos = 1280 if pos <= 255: r = 255-pos g = 0 b = 255 else: pos = pos-256 if pos <= 255: r = 0 g = pos b = 255 else: pos = pos-256 if pos <= 255: r = 0 g = 255 b = 255-pos else: pos = pos-256 if pos <= 255: r = pos g = 255 b = 0 else: pos = pos-256 if pos <= 255: r = 255 g = 255-pos b = 0 return (r, g, b)
5,339,789
def get_physical_connectivity(port): """Get local_link_information from specified port. @param port a port object @return lli a list of following dict {"switch_id": "MAC_of_switch", "port_id": "1/1/0/1", "switch_info": "switch_name"} """ # TODO(yushiro) replace following characters to constant value binding_profile = port['binding:profile'] lli = binding_profile.get("local_link_information", {}) is_all_specified = True if lli else False for i in lli: if not (i.get('switch_id') and i.get('port_id') and i.get('switch_info')): is_all_specified = False if is_all_specified: return lli LOG.error(_LE("Some physical network param is missing:%s"), lli) raise ml2_exc.MechanismDriverError(method="get_physical_connectivity")
5,339,790
def optimize_profile(diff_matrix, x_points, dc_init, exp_norm_profiles, display_result=True, labels=None): """ Fit the diffusion matrix Parameters ---------- diff_matrix : tuple tuple of (eigenvalues, eigenvectors) in reduced basis (dim n-1) x_points : 1-D array_like spatial coordinates dc_init : array concentration difference between endmembers exp_norm_profiles : list of arrays profiles to be fitted, of length the nb of experiments, with n profiles for each experiment. Profiles are normalized, that is, an estimation of the estimated mean concentration should be substracted. """ n_comp = len(dc_init[0]) - 1 n_exp = len(x_points) def cost_function(coeffs, x_points, dc_init, exp_norm_profiles): n_comp = len(dc_init[0]) - 1 diag = coeffs[:n_comp] n_exp = len(x_points) P = np.matrix(coeffs[n_comp: n_comp + n_comp**2].reshape((n_comp, n_comp))) adjust_cmeans = coeffs[n_comp + n_comp**2: n_comp + n_comp**2 + (n_comp) * n_exp].reshape((n_exp, n_comp)) adjust_dc = coeffs[n_comp + n_comp**2 + (n_comp) * n_exp: n_comp + n_comp**2 + 2 * (n_comp) * n_exp].reshape((n_exp, n_comp)) errors = np.array([]) for i in range(n_exp): dc_corr = np.copy(dc_init[i]) dc_corr[:-1] -= adjust_dc[i] profile_corr = np.copy(exp_norm_profiles[i]) profile_corr[:-1, :] -= adjust_cmeans[i][:, None] error = evolve_profile((diag, P), x_points[i], dc_corr, profile_corr, plot=False) errors = np.concatenate((errors, error)) return errors diag, P = diff_matrix coeffs = np.concatenate((diag, np.array(P).ravel(), np.zeros(2 * n_exp * n_comp))) res = optimize.leastsq(cost_function, coeffs, args=(x_points, dc_init, exp_norm_profiles), full_output=True, factor=10)[0] diags, eigvecs, shifts = res[:n_comp], \ res[n_comp: n_comp + n_comp**2].reshape((n_comp, n_comp)), \ res[n_comp + n_comp**2:].reshape((2, n_exp, n_comp)) if display_result: for i in range(n_exp): dc_corr = np.copy(dc_init[i]) dc_corr[:-1] -= shifts[1, i] prof_corr = np.copy(exp_norm_profiles[i]) prof_corr[:-1] -= shifts[0, i][:, None] _ = evolve_profile((diags, eigvecs), x_points[i], dc_corr, exp_norm_profiles=prof_corr, labels=labels) return diags, eigvecs, shifts
5,339,791
def getargsfromdoc(obj): """Get arguments from object doc""" if obj.__doc__ is not None: return getargsfromtext(obj.__doc__, obj.__name__)
5,339,792
def test_0007_e(): """ 불규칙 활용기능 테스트 1""" # #### "러" 불규칙 활용 pos_list = pos_E.endswithE(u"검푸르러") assert postag_left_check(pos_list, u"검푸르"), u"검푸르 in eojeol" assert postag_end_check(pos_list, u"러/EC"), u"러/EC in eojeol" pos_list = pos_E.endswithE(u"푸르러서") assert postag_left_check(pos_list, u"푸르"), u"푸르 in eojeol" assert postag_end_check(pos_list, u"어서/EC"), u"어서/EC in eojeol" # #### "ㄷ" 불규칙 활용 pos_list = pos_E.endswithE(u"걸으니") # 규칙형 검사 assert postag_left_check(pos_list, u"걸"), u"걸 in eojeol" assert postag_end_check(pos_list, u"으니/EC"), u"으니/EC in eojeol" # 불규칙형 assert postag_left_check(pos_list, u"걷"), u"걷 in eojeol" assert postag_end_check(pos_list, u"으니/EC"), u"으니/EC in eojeol" pos_list = pos_E.endswithE(u"실어") # 규칙형 검사 assert postag_left_check(pos_list, u"실"), u"실 in eojeol" assert postag_end_check(pos_list, u"어/EC"), u"어/EC in eojeol" # 불규칙형 assert postag_left_check(pos_list, u"싣"), u"싣 in eojeol" assert postag_end_check(pos_list, u"어/EC"), u"어/EC in eojeol" # #### "ㄹ" 불규칙 활용 pos_list = pos_E.endswithE(u"주니") # 몸무게가 줄다. 몸무게가 주니 # 규칙형 검사 assert postag_left_check(pos_list, u"주"), u"주 in eojeol" assert postag_end_check(pos_list, u"니/EC"), u"니/EC in eojeol" # 불규칙형 assert postag_left_check(pos_list, u"줄"), u"줄 in eojeol" assert postag_end_check(pos_list, u"니/EC"), u"니/EC in eojeol" # 아주 예외적으로 시가 붙는 경우 불규칙이 발생한다. pos_list = pos_E.endswithE(u"가셨다.") # 아버지는 칼을 가셨다. # 규칙형 검사 assert postag_left_check(pos_list, u"가"), u"가 in eojeol" assert postag_end_check( pos_list, u"시/EP+었/EP+다/EF"), u"시/EP+었/EP+다/EF in eojeol" # 불규칙형 assert postag_left_check(pos_list, u"갈"), u"갈 in eojeol" assert postag_end_check( pos_list, u"시/EP+었/EP+다/EF"), u"시/EP+었/EP+다/EF in eojeol" # "우" 불규칙 pos_list = pos_E.endswithE(u"퍼") assert postag_left_check(pos_list, u"푸"), u"푸 in eojeol" assert postag_end_check(pos_list, u"어/EC"), u"어/EC in eojeol" pos_list = pos_E.endswithE(u"퍼서") assert postag_left_check(pos_list, u"푸"), u"푸 in eojeol" assert postag_end_check(pos_list, u"어서/EC"), u"어서/EC in eojeol" pos_list = pos_E.endswithE(u"펐다.") assert postag_left_check(pos_list, u"푸"), u"푸 in eojeol" assert postag_end_check(pos_list, u"었/EP+다/EF"), u"었/EP+다/EF in eojeol"
5,339,793
def nms(dets, thresh): """Dispatch to either CPU or GPU NMS implementations.\ Accept dets as tensor""" return pth_nms(dets, thresh)
5,339,794
def get_disk_usage(): """ Handle determining disk usage on this VM """ disk = {} # Get the amount of general disk space used cmd_out = subprocess.getstatusoutput('df -h | grep "/dev/xvda1"')[1] cmd_parts = cmd_out.split() disk["gen_disk_used"] = cmd_parts[2] disk["gen_disk_total"] = cmd_parts[3] disk["gen_disk_percent"] = cmd_parts[4] # Get the amount of Docker disk space used cmd_out = subprocess.getstatusoutput('df -h | grep "tmpfs"')[1] cmd_parts = cmd_out.split() disk["docker_disk_used"] = cmd_parts[2] disk["docker_disk_total"] = cmd_parts[3] disk["docker_disk_percent"] = cmd_parts[4] return disk
5,339,795
def generate_arg_parser(): """ this function receives input arguments for various functions. :return: """ project_path = get_project_path() # load data default_db_path = "".join([project_path, "/data/DisasterResponseDataBase.db"]) default_model_path = "".join([str(project_path), "/models/dr_trained_model.lzma"]) parser = argparse.ArgumentParser( description="Load data from database, load model, and run the webapp." ) parser.add_argument( "--db_file", action="store", dest="db_file", type=str, default=default_db_path, help="Path to disaster response database", ) parser.add_argument( "--model_file", action="store", dest="model_file", type=str, default=default_model_path, help="path to store trained machine leaning model.", ) return parser.parse_args(), parser
5,339,796
def QFont_from_Font(font): """ Convert the given Enaml Font into a QFont. Parameters ---------- font : Font The Enaml Font object. Returns ------- result : QFont The QFont instance for the given Enaml font. """ qfont = QFont(font.family, font.pointsize, font.weight) qfont.setStyle(FONT_STYLES[font.style]) qfont.setCapitalization(FONT_CAPS[font.caps]) qfont.setStretch(FONT_STRETCH[font.stretch]) return qfont
5,339,797
def download_file(source_url, dest_path, source_path=""): """ Downloads the given archive and extracts it Currently works for: - `zip` files - `tar.gz` files Inputs: - `source_url` (str): URL to download the ZIP file - `source_path` (str): path of the file in the ZIP file - `dest_path` (str): path of the extracted file """ # Initiate the request r = requests.get(source_url, stream=True) # Measure the total size of the ZIP file total_size = int(r.headers.get("content-length", 0)) block_size = 1024 t = tqdm(total=total_size, unit="iB", unit_scale=True) file_extension = source_url.split(".")[-1] if file_extension == "zip": # Save the ZIP file in a temporary ZIP file with open(os.path.join("data", "raw", "temp.zip"), "wb") as f: for data in r.iter_content(block_size): t.update(len(data)) f.write(data) t.close() if total_size != 0 and t.n != total_size: print( colored( "ERROR: Something went wrong while downloading the ZIP file", "red" ) ) z = zipfile.ZipFile(os.path.join("data", "raw", "temp.zip")) # Extract the file from the temporary file if source_path != "": z.extract(source_path, os.path.dirname(dest_path)) os.rename(os.path.join(os.path.dirname(dest_path), source_path), dest_path) else: z.extractall(os.path.dirname(dest_path)) # z.extractall(dest_path.split(os.path.sep)[:-1]) # Remove the temporary file os.remove(os.path.join("data", "raw", "temp.zip")) elif file_extension == "gz": # Save the GZ file in a temporary GZ file with open(os.path.join("data", "raw", "temp.gz"), "wb") as temp_file: for data in r.iter_content(block_size): t.update(len(data)) temp_file.write(data) t.close() if total_size != 0 and t.n != total_size: print( colored( "ERROR: Something went wrong while downloading the GZ file", "red" ) ) with gzip.open(os.path.join("data", "raw", "temp.gz"), "rb") as file_in: with open(dest_path, "wb") as file_out: shutil.copyfileobj(file_in, file_out) # Remove the temporary file os.remove(os.path.join("data", "raw", "temp.gz"))
5,339,798
def test_sns_topic_created(template): """ Test for SNS Topic and Subscription: S3 Upload Event Notification """ template.resource_count_is("AWS::SNS::Subscription", 1) template.resource_count_is("AWS::SNS::Topic", 1) template.resource_count_is("AWS::SNS::TopicPolicy", 1)
5,339,799