content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
from typing import OrderedDict async def bulkget(ip, community, scalar_oids, repeating_oids, max_list_size=1, port=161, timeout=DEFAULT_TIMEOUT): # type: (str, str, List[str], List[str], int, int, int) -> BulkResult """ Delegates to :py:func:`~puresnmp.aio.api.raw.bulkget` but returns simple Python types. See the "raw" equivalent for detailed documentation & examples. """ raw_output = await raw.bulkget(ip, community, scalar_oids, repeating_oids, max_list_size=max_list_size, port=port, timeout=timeout) pythonized_scalars = {oid: value.pythonize() for oid, value in raw_output.scalars.items()} pythonized_list = OrderedDict( [(oid, value.pythonize()) for oid, value in raw_output.listing.items()]) return BulkResult(pythonized_scalars, pythonized_list)
6795f7d9ff7ac1952406922395e2308346ff244d
3,645,600
def repo_version_db_key() -> bytes: """The db formated key which version information can be accessed at Returns ------- bytes db formatted key to use to get/set the repository software version. """ db_key = c.K_VERSION.encode() return db_key
090a70e59d1a2c7d4a3f4589b9f4a2ef975e2585
3,645,601
def retrieve_psd_cdf(path): """interact with hdf5 file format for marginal CDFs for a set of PSDs""" with h5py.File(path, 'r') as obj: group = obj['PSD_CDF'] Npsd = group.attrs['num_psds'] freqs = group['frequencies'][...] data = group['CDFs'][...] vals = data[:,0,:] cdfs = data[:,1,:] return freqs, vals, cdfs, Npsd
f0ee184d972ddcbedeb94345f15fcab9d08e8458
3,645,602
def get(context: mgp.ProcCtx) -> mgp.Record(tracks=list): """Returns a list of track_ids of trendy songs. Calculates recently popular tracks by comparing the popularity of songs using the `followers`, `created_at`, and proximity to other popular songs (pagerank). Example usage: CALL trendy_tracks.get() YIELD tracks Equivalent cypher query: MATCH (track:Track)<--(playlist:Playlist) WITH track, count(playlist) AS popularity RETURN track ORDER BY popularity DESC LIMIT 10 :return: List of track ids that are currently trendy. :rtype: mgp.Record(tracks=list[dict[str][Any]]) """ return mgp.Record( tracks=list( map( lambda vertex: dict(vertex.properties), nlargest( 10, filter( lambda vertex: "Track" in vertex.labels, context.graph.vertices, ), key=lambda vertex: sum(1 for _ in vertex.in_edges), ), ) ) )
182d8a3d26028f472f1cc64bd993b6a29635daf5
3,645,603
import ipaddress def ip_only(value): """ Returns only the IP address string of the value provided. The value could be either an IP address, and IP network or and IP interface as defined by the ipaddress module. Parameters ---------- value : str The value to use Returns ------- str The IP address only value, if the value provided was valid None If the value provided is not an IP thing """ for test in [lambda x: str(ipaddress.ip_address(x)), lambda x: str(ipaddress.ip_interface(x).ip), lambda x: str(ipaddress.ip_network(x).network_address)]: try: return test(value) except: pass return None
149b202969c0ccb4e0c5e55417ce0231f1b5fc11
3,645,604
import logging def GetAuth1Token(): """Returns an Auth1Token for use with server authentication.""" if AUTH1_TOKEN: return AUTH1_TOKEN if not OBJC_OK: logging.error('Objective-C bindings not available.') return None pref_value = Foundation.CFPreferencesCopyAppValue( 'AdditionalHttpHeaders', 'ManagedInstalls') if pref_value is None: logging.error('GetAuth1Token(): AdditionalHttpHeaders not present.') return None header = 'Cookie: Auth1Token=' for h in pref_value: if h.startswith(header): logging.debug('GetAuth1Token(): found %s', h) token = h[len(header):] if token.find(';') > -1: token = token[0:token.find(';')] token = str(token) return token logging.error('GetAuth1Token(): AdditionalHttpHeaders lacks a token.') return None
70134cb639e5bc1021fd04ee2524592681a5b04b
3,645,605
import os def TemplateInputFilename(context): """Build template file name from config.""" if args.templatedir: filename = config_lib.CONFIG.Get("PyInstaller.template_filename", context=context) return os.path.join(args.templatedir, filename) return None
4a132a5a74d96414c63aa0b6113ebb07f3d46d4b
3,645,606
from typing import Tuple from datetime import datetime from typing import Optional def get_data_by_isin(isin: str, dates: Tuple[datetime.date], is_etf: bool) -> Tuple[Optional[np.ndarray], str]: """Retrieves stock/ETF prices in EUR by ISIN for the given dates. Cached to make sure this is only queried once for a given currency & date-range.""" from_date = dates[0].strftime("%d/%m/%Y") to_date = (dates[-1] + datetime.timedelta(days=7)).strftime("%d/%m/%Y") # Retrieves stock/etf information based on the ISIN try: if is_etf: data = investpy.search_etfs(by="isin", value=isin) else: data = investpy.search_stocks(by="isin", value=isin) except RuntimeError: print(f"[DGPC] Warning, could not retrieve {'ETF' if is_etf else 'stock'} data for ISIN {isin}.") return None, "" # When a stock/ETF is listed in multiple countries, take one of the preferred countries if found for country in PREFERRED_COUNTRIES: local_data = data[data["country"] == country] if local_data.shape[0] > 0: break else: # Taking the first country from the results if none of the preferred countries is found country = data["country"][0] local_data = data # Retrieves the actual historical prices for the stock/etf currency = list(local_data["currency"])[0] symbol = list(local_data["symbol"])[0] if is_etf: name = list(local_data["name"])[0] history = investpy.get_etf_historical_data(name, country=country, from_date=from_date, to_date=to_date) else: history = investpy.get_stock_historical_data(symbol, country=country, from_date=from_date, to_date=to_date) history = history.reset_index() values = densify_history(history, dates) # Convert the results to euro if currency != "EUR": currency_modifier = to_euro_modifier(currency, tuple(dates)) values *= currency_modifier return values, symbol
d4d46b45f480488fb67d3a6116a3b2e90c736efc
3,645,607
from typing import Dict from typing import Any def get_result_qiskit() -> Dict[str, Dict[str, Any]]: """Fixture for returning sample experiment result Returns ------- Dict[str, Dict[str, Any]] A dictionary of results for physics simulation and perfect gates A result dictionary which looks something like:: { "name": name of this experiment (obtained from qobj.experiment header) "seed": random seed used for simulation "shots": number of shots used in the simulation "data": { "counts": {'0x9: 5, ...}, "memory": ['0x9', '0xF', '0x1D', ..., '0x9'] }, "status": status string for the simulation "success": boolean "time_taken": simulation time of this single experiment } """ # Result of physics based sim for applying X on qubit 0 in 6 qubits perfect_counts = {"110000": 1000} counts_dict = { "c3_qasm_perfect_simulator": perfect_counts, } return counts_dict
7dc44fe110687b92f5e8b23c24798b06dd19e71e
3,645,608
def all_budgets_for_student(user_id): """Returns a queryset for all budgets that a student can view/edit i.e. is the submitter, president, or treasurer for any of the organization's budgets""" query = Q(budget__submitter=user_id) | Q(budget__president_crsid=user_id) | Q(budget__treasurer_crsid=user_id) orgs = Organization.objects.filter(query) budgets = Budget.objects.filter(organization__in=orgs) return budgets
2048f2b579c2e8903ca34c34990e9c2c5215f79c
3,645,609
import subprocess import sys import errno def exec_process(cmdline, silent=True, catch_enoent=True, input=None, **kwargs): """Execute a subprocess and returns the returncode, stdout buffer and stderr buffer. Optionally prints stdout and stderr while running.""" try: sub = subprocess.Popen(args=cmdline, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs) stdout, stderr = sub.communicate(input=input) if type(stdout) != type(""): # decode on Python 3 # do nothing on Python 2 (it just doesn't care about encoding anyway) stdout = stdout.decode(sys.getdefaultencoding(), "replace") stderr = stderr.decode(sys.getdefaultencoding(), "replace") returncode = sub.returncode if not silent: sys.stdout.write(stdout) sys.stderr.write(stderr) except OSError as e: if e.errno == errno.ENOENT and catch_enoent: raise DistutilsError('"%s" is not present on this system' % cmdline[0]) else: raise if returncode != 0: raise DistutilsError('Got return value %d while executing "%s", stderr output was:\n%s' % (returncode, " ".join(cmdline), stderr.rstrip("\n"))) return stdout
30d81dd2cac035902bdd800d1205bcadce4421b5
3,645,610
import torch def uniform_weights(x, x_mask): """Return uniform weights over non-masked x (a sequence of vectors). Args: x: batch * len * hdim x_mask: batch * len (1 for padding, 0 for true) Output: x_avg: batch * hdim """ alpha = Variable(torch.ones(x.size(0), x.size(1))) if x.data.is_cuda: alpha = alpha.cuda() alpha = alpha * x_mask.eq(0).float() alpha = alpha / alpha.sum(1, keepdim=True).expand(alpha.size()) return alpha
a1b88fc88ac65886283159d077e9550dab95c8de
3,645,611
from operator import inv def mixed_estimator_2(T1, T2, verbose=False): """ Based on the Lavancier and Rochet (2016) article. The method combines two series of estimates of the same quantity taking into account their correlations. The individual measureements are assumed independent. The current implementation works only for point estimates. The main result corresponds to Eq. (11) from the article. Its variance is the equation after Eq. (9). [equation not checked] """ B = 1000 # bootstrap repetitions # Drop nans not_nans = np.logical_or(np.isnan(T1), np.isnan(T2)) T1, T2 = T1[~not_nans], T2[~not_nans] n = len(T1) # Return nan if no samples # If one sample, return simple average with no variance if n == 0: return np.nan, np.nan, np.array([np.nan, np.nan]) elif n == 1: # print(T1) return T1[0] / 2 + T2[0] / 2, np.nan, np.array([0.5, 0.5]) # Calculate the estimators for the data set. This is the input data for the rest T1_data_median = np.median(T1) T2_data_median = np.median(T2) # Estimate the covariance sigma matrix with bootstrap (with replacement, as described in the article) sigma = np.zeros((2, 2)) for b in range(B): T1_sample = np.random.choice(T1, size=n, replace=True) T2_sample = np.random.choice(T2, size=n, replace=True) # print('T1', T1_sample) T1_sample_median = np.median(T1_sample) T2_sample_median = np.median(T2_sample) sigma += np.array([[(T1_sample_median - T1_data_median)**2, (T1_sample_median - T1_data_median) * (T2_sample_median - T2_data_median)], [(T1_sample_median - T1_data_median) * (T2_sample_median - T2_data_median), (T2_sample_median - T2_data_median)**2]]) sigma /= B # print(n, sigma) # Calculate the mixed estimator I = np.array([[1, 1]]).T T = np.array([[T1_data_median, T2_data_median]]).T weights = inv(I.T @ inv(sigma) @ I) @ I.T @ inv(sigma) mixed_estimator = (weights @ T)[0, 0] mixedV = (inv(I.T @ inv(sigma) @ I))[0, 0] if verbose: print('weights', weights) print(mixed_estimator, '+-', np.sqrt(mixedV)) return mixed_estimator, mixedV, np.squeeze(weights)
8f9ee282b0756dd41ff98e9ae596e46ddf6947a3
3,645,612
def e_add_const(pub, a, n): """Add constant n to an encrypted integer""" return a * modpow(pub.g, n, pub.n_sq) % pub.n_sq
37be82c71da3114f94d8b2ebe08f54a0726ec655
3,645,613
def area_triangle(base, height): """ """ return (base * height) / 2.0
474e1a090dc7af9d68eaab35e6b04e5e165b6777
3,645,614
def _getAtomInvariantsWithRadius(mol, radius): """ Helper function to calculate the atom invariants for each atom with a given radius Arguments: - mol: the molecule of interest - radius: the radius for the Morgan fingerprint Return: list of atom invariants """ inv = [] for i in range(mol.GetNumAtoms()): info = {} fp = rdMolDescriptors.GetMorganFingerprint(mol, radius, fromAtoms=[i], bitInfo=info) for k in info.keys(): if info[k][0][1] == radius: inv.append(k) return inv
8b8565a62af7f94c79604342077918a5b4261410
3,645,615
import argparse def build_argparser(): """Construct an argument parser for the ``translate_header.py`` script. Returns ------- argparser : `argparse.ArgumentParser` The argument parser that defines the ``translate_header.py`` command-line interface. """ parser = argparse.ArgumentParser(description="Summarize headers from astronomical data files") parser.add_argument("files", metavar="file", type=str, nargs="+", help="File(s) from which headers will be parsed." " If a directory is given it will be scanned for files matching the regular" " expression defined in --regex.") parser.add_argument("-q", "--quiet", action="store_true", help="Do not report the translation content from each header. This forces " "output mode 'none'.") parser.add_argument("-d", "--dumphdr", action="store_true", help="Dump the header in YAML format to standard output rather than translating it." " This is the same as using mode=yaml") parser.add_argument("--traceback", action="store_true", help="Give detailed trace back when any errors encountered") parser.add_argument("-n", "--hdrnum", default=1, help="HDU number to read. If the HDU can not be found, a warning is issued but " "translation is attempted using the primary header. " "The primary header is always read and merged with this header.") parser.add_argument("-m", "--mode", default="auto", choices=OUTPUT_MODES, help="Display mode for translated parameters. 'verbose' displays all the information" " available. 'table' displays important information in tabular form." " 'yaml' dumps the header in YAML format (this is equivalent to -d option)." " 'fixed' dumps the header in YAML after it has had corrections applied." " Add 'native' suffix to dump YAML in PropertyList or Astropy native form." " 'none' displays no translated header information and is an alias for the " " '--quiet' option." " 'auto' mode is 'verbose' for a single file and 'table' for multiple files.") parser.add_argument("-l", "--log", default="warn", help="Python logging level to use.") re_default = r"\.fit[s]?\b" parser.add_argument("-r", "--regex", default=re_default, help="When looking in a directory, regular expression to use to determine whether" f" a file should be examined. Default: '{re_default}'") parser.add_argument("-p", "--packages", action="append", type=str, help="Python packages to import to register additional translators") return parser
ec054012455a7fc0f6559d01f69e5a3b71cb346c
3,645,616
def radcool(temp, zmetal): """ Cooling Function This version redefines Lambda_sd (rho/m_p)^2 Lambda(T,z) is the cooling in erg/cm^3 s Args: temp : temperature in the unit of K zmetal: metallicity in the unit of solar metallicity Return: in the unit of erg*s*cm^3 """ tshape = temp.shape tempflt = temp.flatten() qlog0 = np.zeros_like(tempflt) qlog1 = np.zeros_like(tempflt) for i, t in enumerate(tempflt): tlog = np.log10(t) # zero metal cooling coefficient Lambda_([Fe/H]=0 if tlog>=6.1: qlog0[i] = -26.39 + 0.471*(np.log10(t + 3.1623e6)) elif tlog>=4.9: arg = 10.**(-(tlog-4.9)/.5) + 0.077302 qlog0[i] = -22.16 + np.log10(arg) elif tlog>=4.25: bump1rhs = -21.98 - ((tlog-4.25)/0.55) bump2lhs = -22.16 - ((tlog-4.9)/0.284)**2 qlog0[i] = max(bump1rhs,bump2lhs) else: qlog0[i] = -21.98 - ((tlog-4.25)/0.2)**2 if qlog0[i]==np.nan: mylog.warning('There is NaN.') # emission from metals alone at solar abundance if tlog>=5.65: tlogc = 5.65 qlogc = -21.566 qloginfty = -23.1 p = 0.8 qlog1[i] = qlogc -p*(tlog - tlogc) qlog1[i] = max(qlog1[i],qloginfty) else: tlogm = 5.1 qlogm = -20.85 sig = 0.65 qlog1[i] = qlogm - ((tlog - tlogm)/sig)**2 qlambda0 = 10.**qlog0 qlambda1 = 10.**qlog1 # final cooling coefficient Lambda_sd: radcoolsd = qlambda0 + zmetal.flatten()*qlambda1 radcoolsd = radcoolsd.reshape(tshape) return radcoolsd
720ed6625c9fe348ebe78aa80127c4bcc4e911a9
3,645,617
def L_model_forward(X, parameters): """ Implement forward propagation for the [LINEAR->RELU]*(L-1)->LINEAR->SIGMOID computation Arguments: X -- data, numpy array of shape (input size, number of examples) parameters -- output of initialize_parameters_deep() Returns: AL -- last post-activation value caches -- list of caches containing: every cache of linear_activation_forward() (there are L-1 of them, indexed from 0 to L-1) """ caches = [] A = X L = len(parameters) // 2 # number of layers in the neural network # Implement [LINEAR -> RELU]*(L-1). Add "cache" to the "caches" list. for l in range(1, L): A_prev = A A, cache = linear_activation_forward(A_prev, parameters["W" + str(l)], parameters["b" + str(l)], activation="relu") caches.append(cache) # Implement LINEAR -> SIGMOID. Add "cache" to the "caches" list. AL, cache = linear_activation_forward(A, parameters["W" + str(L)], parameters["b" + str(L)], activation="sigmoid") caches.append(cache) assert(AL.shape == (1,X.shape[1])) return AL, caches
b086f172e1fc0d8dad2353af1b35a8f6bd3f13dc
3,645,618
def linalg_multiply(a): """ Multiple all elements in vector or matrix Parameters: * a (array or matrix): The input to multiply Return (number): The product of all elements """ return np.prod(a)
bac2457c61813cb5d662cef37fb2b48d8e65ba34
3,645,619
def lambda_handler(event, context): """ This method selects 10% of the input manifest as validation and creates an s3 file containing the validation objects. """ label_attribute_name = event['LabelAttributeName'] meta_data = event['meta_data'] s3_input_uri = meta_data['IntermediateManifestS3Uri'] input_total = int(meta_data['counts']['input_total']) # 10% of the total input should be used for validation. validation_set_size = input_total // 10 source = S3Ref.from_uri(s3_input_uri) validation_labeled_query = """select * from s3object[*] s where s."{}-metadata"."human-annotated" IN ('yes') LIMIT {}""".format( label_attribute_name, validation_set_size) dest = create_ref_at_parent_key(source, "validation_input.manifest") copy_with_query(source, dest, validation_labeled_query) logger.info("Uploaded validation set of size {} to {}.".format( validation_set_size, dest.get_uri())) meta_data['counts']['validation'] = validation_set_size meta_data['ValidationS3Uri'] = dest.get_uri() return meta_data
f6e0313155a47110e47567320e03e241bb6dde37
3,645,620
def get_table_6(): """表 6 蓄熱の採用の可否 Args: Returns: list: 表 6 蓄熱の採用の可否 """ table_6 = [ ('不可', '不可', '可', '可', '可'), ('不可', '不可', '可', '可', '可'), ('不可', '不可', '可', '可', '可'), ('不可', '不可', '可', '可', '可'), ('不可', '不可', '可', '可', '可'), ('不可', '不可', '不可', '可', '可'), ('不可', '不可', '不可', '可', '可') ] return table_6
4ecd4526ed9ce67b7a5d22b67dd804059807e94d
3,645,621
import random def is_prime(number, num_trials=200): """Determines whether a number is prime. Runs the Miller-Rabin probabilistic primality test many times on the given number. Args: number (int): Number to perform primality test on. num_trials (int): Number of times to perform the Miller-Rabin test. Returns: True if number is prime, False otherwise. """ if number < 2: return False if number != 2 and number % 2 == 0: return False # Find largest odd factor of n-1. exp = number - 1 while exp % 2 == 0: exp //= 2 for _ in range(num_trials): rand_val = int(random.SystemRandom().randrange(1, number)) new_exp = exp power = pow(rand_val, new_exp, number) while new_exp != number - 1 and power != 1 and power != number - 1: power = (power * power) % number new_exp *= 2 if power != number - 1 and new_exp % 2 == 0: return False return True
78478437c08bcbd5e4c690466e4fe51bb4fad5ce
3,645,622
def extract_labels(filenames): """ Extract class labels of the images from image path list. # Arguments filenames: List of paths to image file. # Returns List of image labels. """ return LabelEncoder().fit_transform([extract_label(filename) for filename in filenames])
53f708a0abb105d3ffce0202117b6eae812a9ede
3,645,623
def reverseList(head): """ :type head: ListNode :rtype: ListNode """ current = head; # temp is first counter = 0; flag = 'Y'; while current is not None and flag != 'N': # store the current element # print(f"Current: {current.val}"); first = current; if counter == 0: try: # find the next element second = current.next; current.next = None; current = second.next; if current == None: # this means we are in an edge case so we want to exit head = second; second.next = first; return head; second.next = first; next_element = second; except: # this means we are in an edge case so we want to exit flag='N'; else: try: next_iter = current.next; current.next = next_element; current = next_iter; next_element = first; head = first; except: # this means we are in an edge case so we want to exit flag = 'N'; counter+=1; return head;
06f07ad9c5dbb13d2e288ea2ff14ef31febf87b9
3,645,624
def validate(data: BuildParams): """ Makes sure a valid combination of params have been provided. """ git_repo = bool(data.source.git_repo) dockerfile = bool(data.source.dockerfile) build_context = bool(data.source.build_context) git_valid = git_repo and not dockerfile and not build_context dockerfile_valid = dockerfile and not git_repo if not (git_valid or dockerfile_valid): return False, "Only one of build sources (git_repo, dockerfile) can be used.\n" \ "git_context can only be used in combination with dockerfile" return True, ""
708335092018339aa4f64b58d5ec8d2cb09751c3
3,645,625
import pickle def load_model(filepath=FILEPATH) -> TrainingParams: """ Load :param filepath: :return: """ with open(filepath, "rb") as handler: model = pickle.load(handler) return model
f2a1ed631bdb7b1f7e6fd372ca604ef4ef6890f2
3,645,626
def is_symmetric(m): """Check if a sparse matrix is symmetric https://mail.python.org/pipermail/scipy-dev/2014-October/020117.html Parameters ---------- m : sparse matrix Returns ------- check : bool """ if m.shape[0] != m.shape[1]: raise ValueError('m must be a square matrix') if not isinstance(m, sparse.coo_matrix): m = sparse.coo_matrix(m) r, c, v = m.row, m.col, m.data tril_no_diag = r > c triu_no_diag = c > r if triu_no_diag.sum() != tril_no_diag.sum(): return False, "no_diag_sum", triu_no_diag.sum() - tril_no_diag.sum() rl = r[tril_no_diag] cl = c[tril_no_diag] vl = v[tril_no_diag] ru = r[triu_no_diag] cu = c[triu_no_diag] vu = v[triu_no_diag] sortl = np.lexsort((cl, rl)) sortu = np.lexsort((ru, cu)) vl = vl[sortl] vu = vu[sortu] check = np.allclose(vl, vu) return check
84523c1c4bf0120025d6e7a0bcc9cf2e489b1ae8
3,645,627
from pathlib import Path from typing import List import os def get_proj_libdirs(proj_dir: Path) -> List[str]: """ This function finds the library directories """ proj_libdir = os.environ.get("PROJ_LIBDIR") libdirs = [] if proj_libdir is None: libdir_search_paths = (proj_dir / "lib", proj_dir / "lib64") for libdir_search_path in libdir_search_paths: if libdir_search_path.exists(): libdirs.append(str(libdir_search_path)) if not libdirs: raise SystemExit( "ERROR: PROJ_LIBDIR dir not found. Please set PROJ_LIBDIR." ) else: libdirs.append(proj_libdir) return libdirs
efeacd08940c1f8706cd86aa0c5da50b498608e6
3,645,628
def render_raster_map(bounds, scale, basemap_image, aoi_image, id, path, colors): """Render raster dataset map based on bounds. Merge this over basemap image and under aoi_image. Parameters ---------- bounds : list-like of [xmin, ymin, xmax, ymax] bounds of map scale : dict map scale info basemap_image : Image object aoi_image : Image object id : str map ID path : str path to raster dataset colors : list-like of colors colors to render map image based on values in raster Returns ------- id, Image object Image object is None if it could not be rendered or does not overlap bounds """ raster_img = render_raster(path, bounds, scale, WIDTH, HEIGHT, colors) map_image = merge_maps([basemap_image, raster_img, aoi_image]) map_image = to_base64(map_image) return id, map_image
f24c3b48911c7c322d3c02e9808f0013354c567d
3,645,629
def str2num(s): """Convert string to int or float number. Parameters ---------- s : string String representing a number. Returns ------- Number (int or float) Raises ------ TypeError If `s` is not a string. ValueError If the string does not represent a (float or int) number. """ try: x = float(s) if x.is_integer(): return int(x) else: return x except ValueError: raise ValueError("'s' does not represent a number (int or float)")
5dfaed567a66fc7d3ee46cbb70d9c408d38fcbfe
3,645,630
import os def get_html_templates_path(): """ Return path to ABlog templates folder. """ pkgdir = os.path.abspath(os.path.dirname(__file__)) return os.path.join(pkgdir, "templates")
e72caf4a2558298ec909aff04bfde381abba256f
3,645,631
def new_log(infile_history=None, extra_notes=None, git_repo=None): """Create a new command line log/history. Kwargs: infile_history (dict): keys are input file names and values are the logs for those files extra_notes (list): List containing strings of extra information (output is one list item per line) git_repo (str): Location of git repository associated with script executed at command line Returns: str. Command line log """ log = '' current_entry = get_current_entry(git_repo=git_repo) log += current_entry + '\n' if extra_notes: log += 'Extra notes: \n' for line in extra_notes: log += line + '\n' if infile_history: assert type(infile_history) == dict nfiles = len(list(infile_history.keys())) for fname, history in infile_history.items(): if nfiles > 1: log += 'History of %s: \n %s \n' %(fname, history) else: log += '%s \n' %(history) return log
f1bbf4b9c84442d7abf700fec98277eb9e2283ea
3,645,632
from typing import OrderedDict import inspect def _get_new_args_dict(func, args, kwargs): """Build one dict from args, kwargs and function default args The function signature is used to build one joint dict from args and kwargs and additional from the default arguments found in the function signature. The order of the args in this dict is the order of the args in the function signature and hence the list of args can be used in cases where we can only supply *args, but we have to work with a mixture of args, kwargs and default args as in xarray.apply_ufunc in the xarray wrapper. """ new_args_dict = OrderedDict() for i, (arg, parameter) in enumerate(inspect.signature(func).parameters.items()): if i < len(args): new_args_dict[arg] = args[i] elif arg in kwargs.keys(): new_args_dict[arg] = kwargs[arg] else: new_args_dict[arg] = parameter.default return new_args_dict
ad7553e7b778b8f7b499217c7ee4ad7328958809
3,645,633
def hellinger_funct(x,P,Q): """ P,Q should be numpy stats gkde objects """ return np.sqrt(P(x) * Q(x))
198f0cf72ef75cece3c59248d8cd1215fa4299a1
3,645,634
import os def clean_collection(collection): """Iterates through the images in the Collection and remove those that don't exist on disk anymore """ images = collection.images() number_purged = 0 for image in images: if not os.path.isfile(image.get_filepath()): logger.info('Removing Image %s from collection %s', image.get_filepath(), collection) image.delete() number_purged = number_purged + 1 return number_purged
ce4162ef6fe4670b7666e6f84b4de9b9cb01e91b
3,645,635
from datetime import datetime def human_date(date): """ Return a string containing a nice human readable date/time. Miss out the year if it's this year """ today = datetime.datetime.today() if today.year == date.year: return date.strftime("%b %d, %I:%M%P") return date.strftime("%Y %b %d, %I:%M%P")
7088617b58c0d3b3193e11885fcd7b7ef075f627
3,645,636
def compute_thickness(wmP, kdTreegm, kdTreewm): """ This function.. :param wmP: :param kdTreegm: :param kdTreewm: :return: """ # Find the closest point to the gray matter surface point gmIndex = kdTreegm.FindClosestPoint(wmP) gmP = kdTreegm.GetDataSet().GetPoint(gmIndex) # compute the distance # distance from wm point to gm point dst1 = distance.euclidean(wmP, gmP) wmIndex = kdTreewm.FindClosestPoint(gmP) wmP2 = kdTreegm.GetDataSet().GetPoint(wmIndex) # distnace from gm to closest wm point dst2 = distance.euclidean(gmP, wmP2) # average the two distances thickness = (dst1 + dst2) / float(2) return thickness
c2c13a8c17eb997843c9e5752c6ae05f0854a7e5
3,645,637
import sys def sync(args): """Synchronize your local repository with the manifest and the real world. This includes: - ensures that all projects are cloned - ensures that they have the correct remotes set up - fetches from the remotes - checks out the correct tracking branches - if the local branch is not dirty and it is a fast-forward update, merges the remote branch's changes in Options: -f - if you have dirty repositories, will blow away changes rather than failing. This does *not* reset your branch if you have local *committed* changes. Process exit code will be 0 if all projects updated correctly. """ force = '-f' in args man = load_manifest() for (name, project) in man.projects.items(): if not project.is_cloned(): project.clone() ensure_remotes([]) fetch([]) checkout_branches(args) retcode = 0 for project in man.projects.values(): if project.is_uptodate(): continue repo = project.git_repo if repo.is_workdir_dirty() or repo.is_index_dirty(): if force: print("Blowing away changes in %s" % project.name, file=sys.stderr) repo.check_command(['reset', '--hard', 'HEAD']) else: print("Not syncing project %s - it is dirty." % project.name, file=sys.stderr) retcode = 1 continue (left, right) = project.tracking_status if left > 0: print(("Not syncing project %s - you have %d unpushed changes." % (project.name, left)), file=sys.stderr) retcode = 1 continue elif right > 0: repo.check_command(["merge", project.tracker.remote_ref]) project.set_uptodate() else: print("Project %s needs no update" % project.name, file=sys.stderr) return retcode
c7cac9892d00b91f2f812c0ec20fc33fbe06308e
3,645,638
def cytoband_interval(): """Create test fixture for Cytoband Interval.""" return CytobandInterval( start="q13.32", end="q13.32" )
d052e2dcf7276dc24c680d0b1168ebea6f779eac
3,645,639
def _get_proxy_class(request): """ Return a class that is a subclass of the requests class. """ cls = request.__class__ if cls not in _proxy_classes: class RequestProxy(cls): def __init__(self, request): self.__dict__ = request.__dict__ self.__request = request def __eq__(self, other): return self.__request == other # since we're overriding __eq__ we must override __hash__: def __hash__(self): return hash(self.__request) def finish(self): return self.__request.finish() _proxy_classes[cls] = RequestProxy return _proxy_classes[cls]
72113c9d38bdf91650fa88d4297a25457f34b9f8
3,645,640
import csv def rebuilt_emoji_dictionaries(filename): """ Rebuilds emoji dictionaries, given a csv file with labeled emoji's. """ emoji2unicode_name, emoji2sentiment = {}, {} with open(filename) as csvin: for emoji in csv.DictReader(csvin): for key, value in emoji.items(): if key in ('Occurrences', 'Positive', 'Neutral', 'Negative'): emoji[key] = int(value) elif key in ('Position',): emoji[key] = float(value) emoji['Sentiment'] = (emoji['Positive'] - emoji['Negative']) / \ max(100, (emoji['Positive'] + emoji['Neutral'] + emoji['Negative'])) emoji2unicode_name[emoji['Emoji']] = emoji['Unicode name'] emoji2sentiment[emoji['Emoji']] = emoji['Sentiment'] return emoji2unicode_name, emoji2sentiment
69bf1438a524ea54bd7bef2d4537a0c61cd0bc3d
3,645,641
def send_message(receiver, message): """ Send message to receivers using the Twilio account. :param receiver: Number of Receivers :param message: Message to be Sent :return: Sends the Message """ message = client.messages.create( from_="whatsapp:+14155238886", body=message, to=f"whatsapp:{receiver}" ) return message
05022f40104d8b38ffe096ee01941ef04da5f076
3,645,642
def stem_list(tokens: list) -> list: """Stems all tokens in a given list Arguments: - tokens: List of tokens Returns: List of stemmed tokens """ stem = PorterStemmer().stem return [stem(t) for t in tokens]
6086bda0bce5ce042156a12617a2c09b4b8f9cc8
3,645,643
def unique(s): """Return a list of the elements in s, but without duplicates. For example, unique([1,2,3,1,2,3]) is some permutation of [1,2,3], unique("abcabc") some permutation of ["a", "b", "c"], and unique(([1, 2], [2, 3], [1, 2])) some permutation of [[2, 3], [1, 2]]. For best speed, all sequence elements should be hashable. Then unique() will usually work in linear time. If not possible, the sequence elements should enjoy a total ordering, and if list(s).sort() doesn't raise TypeError it's assumed that they do enjoy a total ordering. Then unique() will usually work in O(N*log2(N)) time. If that's not possible either, the sequence elements must support equality-testing. Then unique() will usually work in quadratic time. """ n = len(s) if n == 0: return [] # Try using a dict first, as that's the fastest and will usually # work. If it doesn't work, it will usually fail quickly, so it # usually doesn't cost much to *try* it. It requires that all the # sequence elements be hashable, and support equality comparison. u = {} try: for x in s: u[x] = 1 except TypeError: del u # move on to the next method else: return u.keys() # We can't hash all the elements. Second fastest is to sort, # which brings the equal elements together; then duplicates are # easy to weed out in a single pass. # NOTE: Python's list.sort() was designed to be efficient in the # presence of many duplicate elements. This isn't true of all # sort functions in all languages or libraries, so this approach # is more effective in Python than it may be elsewhere. try: t = list(s) t.sort() except TypeError: del t # move on to the next method else: assert n > 0 last = t[0] lasti = i = 1 while i < n: if t[i] != last: t[lasti] = last = t[i] lasti += 1 i += 1 return t[:lasti] # Brute force is all that's left. u = [] for x in s: if x not in u: u.append(x) return u
055d2d6e748e1a4ee22057fcd3e73d4e8c8e8081
3,645,644
import subprocess def execute(command: str, cwd: str = None, env: dict = None) -> str: """Executes a command and returns the stdout from it""" result = subprocess.run( command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd, env=env, shell=True, check=False, ) if result.returncode != 0: stderr = result.stderr.decode("utf-8").rstrip() raise RuntimeError(stderr) return result.stdout.decode("utf-8").rstrip()
abda75f0e24f619ea35126ae3cdf8b8271e755f4
3,645,645
def get_ground_truth_assignments_for_zacharys_karate_club() -> jnp.ndarray: """Returns ground truth assignments for Zachary's karate club.""" return jnp.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
fc2050072293d9857b50425a4f31137a0872096d
3,645,646
import os def read_frames_from_dir(_dir, _type='png'): """ Read frames from the dir, and return list """ paths = os.listdir(_dir) valid_paths = [] for path in paths: if _type in path: valid_paths.append(path) valid_paths = ns.natsorted(valid_paths) frames = [] for path in valid_paths: frames.append( np.array(Image.open(ospj(_dir,path))) ) return np.array(frames)
e5e94498ec53b2099c9aa4645d34f410626d6044
3,645,647
from typing import List from typing import Tuple def sort_places_versus_distance_from_coordinates( list_places: List[Place], gps_coord: Tuple[float, float] ) -> List[Place]: """Oder list of places according to the distance to a reference coordinates. Note: this helper is compensating the bad results of the API. Results in the API are generally sorted, but lot of cases identified where the order is inconsistent (example: Montréal) Args: list_places: List of Place instances to be ordered gps_coord: Tuple with latitude and longitude in degrees for the reference point Returns: List of Place instances ordered by distance to the reference point (nearest first) """ sorted_places = sorted( list_places, key=lambda x: haversine((float(x.latitude), float(x.longitude)), gps_coord), ) return sorted_places
3089503406bf0959dd1caac5746693f812eb449c
3,645,648
from typing import List from typing import Tuple def solve_part2_coordinate_subdivision(boxes: List[Tuple[str, Box]]) -> int: """ An alternative method to solve part 2 which uses coordinate subdivisions to make a new grid. On the puzzle input, this is roughly a 800x800x800 grid, which actually takes some time to compute through (~3 min) It runs all the examples however, in under 3 seconds. """ # The boxes are in [a, b] form. Replace them with coordinate divisions that are [a, b) x_divisions = sorted({b.x0 for _, b in boxes} | {b.x1 + 1 for _, b in boxes}) y_divisions = sorted({b.y0 for _, b in boxes} | {b.y1 + 1 for _, b in boxes}) z_divisions = sorted({b.z0 for _, b in boxes} | {b.z1 + 1 for _, b in boxes}) # Map of lower corner coordinates to index into the divisions x_index = {x: i for i, x in enumerate(x_divisions)} y_index = {y: i for i, y in enumerate(y_divisions)} z_index = {z: i for i, z in enumerate(z_divisions)} on = set() for step, box in boxes: points = { (x, y, z) for x in range(x_index[box.x0], x_index[box.x1 + 1]) for y in range(y_index[box.y0], y_index[box.y1 + 1]) for z in range(z_index[box.z0], z_index[box.z1 + 1]) } if step == 'on': on |= points else: on -= points # Calculate the actual area held by all boxes def area(pos: Tuple[int, int, int]) -> int: x, y, z = pos return ((x_divisions[x + 1] - x_divisions[x]) * (y_divisions[y + 1] - y_divisions[y]) * (z_divisions[z + 1] - z_divisions[z])) return sum(map(area, on))
5a283b0de558755f7ec95ff9cde091ca95b245de
3,645,649
def decodeSignal(y, t, fclk, nbits) : """ This file reads in digitized voltages outputted from the QCM Antenna Master Controller Board and outputs time,logic code number pairs. The encoding scheme is as follows: HEADER: 3 clock cycles HIGH, followed by 3 clock cycles LOW SIGNAL: 1 clock cycle LOW, followed by 1 clock cycle HIGH or LOW depending on logic state of bit, followed by another clock cycle LOW CLOSER: 1 clock cycle LOW, followed by 2 clock cycles HIGH. Ex. USAGE: ... fclk=4.E6 nbits=7 t,y = decodeSig(y,t, fclk, nbits) y = array of double-precision numbers giving voltage signal with encoded numbers t = array of double-precision numbers giving timebase of signal fclk = Clock speed of output, which is master controller board's clock speed divided by 16, since least significant bit of counter is only toggled on clock positive edges [Hz] nbits = number of bits encoded in signal. Begun on Tuesday, 17 January 2012 (my 28th Birthday!), Ted Golfinopoulos """ tauc=1./fclk #Period of master controller board clock, [s] taus=t[1]-t[0] #Sampling time fs=1.E0/taus #Sampling frequency. onThresh=1.0E0 #Threshold voltage above which the signal is considered ON. #Duration of an encoded logic transmission, including header (6 clock cycles), #bit encoding, and closer (3 clock cycles) [s] dt = (9.E0+nbits*3.E0)*tauc tbin = 3.E0*tauc #Find indice and times times where board output is high onSamplesInHeader=int(3.E0*tauc/taus) #Number of digitizer samples expected to be high in header. onSamplesInCloser=int(2.E0*tauc/taus) #Number of digitizer samples expected to be low in closer. codeLength=int(dt/taus) #Number of samples expected in whole code. ###Nomenclature: #header = characteristic pattern at the start of an encoded signal. # Here, it is 3 clock counts HIGH, followed by 3 clock counts LOW #closer = characteristic pattern at the end of an encoded signal. # Here, it is 1 clock count LOW, followed by 2 clock counts HIGH #Find indices at which headers and closers start. #The algorithm that follows looks for stretches of points where the signal is HIGH for a given #duration - the header is high for 3 counts, the closer for 2, and encoded signal bits for 1. #There may be some spread in the actual number of points registering as HIGH; as such, the algorithm #returns the index of the first point for which the subsequent sequence of points is HIGH for the expected #time period, then advances the index pointer by (a) if header, the nominal number of time points in the #encoded stream, less the closer, or (b) if closer, the nominal number of time points in the closer. #This avoids double-counting. #The resulting indices delimit the boundaries of encoded numbers. headInds=[] closeInds=[] bufferLength=0; i=0 # Initialize index pointer while i < len(y) : if(y[i]>onThresh) : #First, check if y[i] is on - save computation of comparing series. if(all(y[(i+bufferLength):(i+onSamplesInHeader-bufferLength)]>onThresh)) : #Header found - store and jump to end of header ON code. headInds.append(i) i=i+codeLength-onSamplesInCloser continue #Don't start marking closers until a header has been found - this can be important if MCB starts putting outputs before the outputs signal digitization starts. elif(all(y[(i+bufferLength):(i+onSamplesInCloser-bufferLength)]>onThresh) and len(headInds)>0) : closeInds.append(i) #Start index of closer found - store. Code is between these two indices. i=i+onSamplesInCloser continue i=i+1 #Increment index print("Finished finding headers and closers.") # Takes an array containing a list of bits which are on in a binary number, in any order, with least-significant value corresponding to 0, and returns the decimal number corresponding to this number. def onBits2num(bitInds) : if len(bitInds)==0 : return 0 else : return sum([ pow(2,aa) for aa in bitInds ]) #Preallocate arrays. codeVals=zeros(len(closeInds)) #Array to store encoded numbers timeVals=zeros(len(closeInds)) #Array to store timepoints at which encoded numbers were sampled #Loop through all indices containing the start and end times for encoded bit patterns for i in range( 0, len(closeInds) ) : #Within each encoded segment, divide up the segment into bins of duration, tbin. #The state of the bits are contained in each bin. Find and number the bins for which the #board output was high. try : tOnInBin= t[headInds[i]+find( y[headInds[i]:closeInds[i]]>onThresh )] - t[headInds[i]] codeInds=find([tOnInBin[jj]>2.E0*tbin and tOnInBin[jj]<(2.E0+nbits)*tbin for jj in range(0,len(tOnInBin))]) except : temp=headInds[i:i+5] print(i) print('headInds') print(len(headInds)) print(temp) temp=closeInds[i:i+5] print('closeInds') print(len(closeInds)) print(temp) temp=find( y[headInds[i]:closeInds[i]]>onThresh ) print('length of find( y[headInds[i]:closeInds[i]]>onThresh )') print(len(temp)) print('First value') print(temp[0]) raise #Re-raise the exception. #Don't try to index into tOnInBin with array unless array is not empty. If array is empty, the logic code is 0, and the signal is low for the entire code segment. if(len(codeInds)>0) : tOnInBin= tOnInBin[ codeInds ] tOnInBin=tOnInBin-2.E0*tbin #Subtract time equivalent to first two time bins from signal - these are for the header. else : tOnInBin = [] onBins = unique([ int(aa) for aa in tOnInBin/tbin ]) #The first two bins (i.e t>0 and t < 2*tbin) comprise the header. #Remove these bins from consideration. The remaining internal bins comprise the logic signal, #ordered most-to-least significant bit. Turn these numbers into the 2's place to simplify conversion #into a decimal number. onBits = (nbits - 1) - onBins #Convert array showing which places are 1 in the binary number into a decimal number. Store. codeVals[i] = onBits2num(onBits) timeVals[i] = t[headInds[i]]-0.5*taus #Store timepoint. On average, time point is halfway between data points around the edge. print("Finished calculating codes.") #Return vectors of time points and corresponding code values. return [timeVals, codeVals]
72b812267e172ee245cb5c7f59366105baad40dc
3,645,650
def sqrt_price_to_tick(sqrt_price): """ TODO: finish documentation See formula 6.8 in the white paper. We use the change of base formula to compute the log as numpy doesn't have a log function with an arbitrary base. :param sqrt_price: :return: """ base = np.sqrt(1.0001) return int(np.floor(np.log(sqrt_price) / np.log(base)))
8c57f7918d656c982e8c34e3b6952f724ab2dc18
3,645,651
import logging def getStateName(responseText: str) -> str: """Parse state name in title field. Args: responseText: response.text object from requests. Returns: State string name. """ soup = Soup(responseText, "html.parser") logging.debug("Ingesting soup: %s", soup.prettify()) if soup.title: return soup.title.get_text() else: return "***No state found."
3e28c9a68cb116ad5faa36bf1b4b41e00284e477
3,645,652
def find_dead_blocks(func, cfg): """Find all immediate dead blocks""" return [block for block in cfg if not cfg.predecessors(block) if block != func.startblock]
3f72e0a573b1ef617511f2b9ec3d2e30c7ba6554
3,645,653
def _mcse_sd(ary): """Compute the Markov Chain sd error.""" _numba_flag = Numba.numba_flag ary = np.asarray(ary) if _not_valid(ary, shape_kwargs=dict(min_draws=4, min_chains=1)): return np.nan ess = _ess_sd(ary) if _numba_flag: sd = np.float(_sqrt(svar(np.ravel(ary), ddof=1), np.zeros(1))) else: sd = np.std(ary, ddof=1) fac_mcse_sd = np.sqrt(np.exp(1) * (1 - 1 / ess) ** (ess - 1) - 1) mcse_sd_value = sd * fac_mcse_sd return mcse_sd_value
4fcf966b7ec98cad193418f7e623c96154646b5f
3,645,654
def dfs_predecessors(G, source=None): """Return dictionary of predecessors in depth-first-search from source.""" return dict((t,s) for s,t in dfs_edges(G,source=source))
6929d25c981fec9b932c0f978b1ee45f37e0e565
3,645,655
def merge_all_sections(prnt_sctns, child_sctns, merge_within_sections=False): """ Merge the doc-sections of the parent's and child's attribute into a single docstring. Parameters ---------- prnt_sctns: OrderedDict[str, Union[None,str]] child_sctns: OrderedDict[str, Union[None,str]] Returns ------- str Output docstring of the merged docstrings.""" doc = [] prnt_only_raises = prnt_sctns["Raises"] and not ( prnt_sctns["Returns"] or prnt_sctns["Yields"] ) if prnt_only_raises and (child_sctns["Returns"] or child_sctns["Yields"]): prnt_sctns["Raises"] = None for key in prnt_sctns: sect = merge_section( key, prnt_sctns[key], child_sctns[key], merge_within_sections=merge_within_sections ) if sect is not None: doc.append(sect) return "\n\n".join(doc) if doc else None
c692d1b08db1a49545eb39e6385040fafc10e149
3,645,656
def input_fn(is_training, data_dir, batch_size, num_epochs=1): """Input_fn using the tf.data input pipeline for CIFAR-10 dataset. Args: is_training: A boolean denoting whether the input is for training. data_dir: The directory containing the input data. batch_size: The number of samples per batch. num_epochs: The number of epochs to repeat the dataset. Returns: A tuple of images and labels. """ dataset = tf.data.Dataset.from_tensor_slices(get_filenames(is_training, data_dir)) dataset = dataset.flat_map(tf.data.TFRecordDataset) #if is_training: # When choosing shuffle buffer sizes, larger sizes result in better # randomness, while smaller sizes have better performance. # is a relatively small dataset, we choose to shuffle the full epoch. #dataset = dataset.shuffle(buffer_size=_NUM_IMAGES['train']) if is_training: # When choosing shuffle buffer sizes, larger sizes result in better # randomness, while smaller sizes have better performance. # is a relatively small dataset, we choose to shuffle the full epoch. dataset = dataset.shuffle(buffer_size=500) dataset = dataset.map(parse_record) dataset = dataset.map(lambda image, label: preprocess_image(image, label, is_training)) dataset = dataset.prefetch(batch_size) # We call repeat after shuffling, rather than before, to prevent separate # epochs from blending together. dataset = dataset.repeat(num_epochs) dataset = dataset.batch(batch_size) iterator = dataset.make_one_shot_iterator() images, labels = iterator.get_next() return images, labels
94875b205df2d67993dd658f33fbf3be917cf701
3,645,657
from typing import List def _eye(sys: brax.System, qp: brax.QP) -> List[float]: """Determines the camera location for a Brax system.""" d = {} for joint in sys.config.joints: if joint.parent not in d: d[joint.parent] = [] po, co = joint.parent_offset, joint.child_offset off = onp.array([po.x, po.y, po.z]) - onp.array([co.x, co.y, co.z]) d[joint.parent].append((joint.child, onp.linalg.norm(off))) def max_dist(parent): ret = 0 for child, dist in d.get(parent, []): dist += max_dist(child) if dist > ret: ret = dist return ret # TODO: improve on this rough approximation of the bounding box dist = max([max_dist(p) for p in d] + [1]) * 3 off = [dist * .5, -dist, dist * .5] if sys.config.frozen.position.x: off = [dist, 0, 0] elif sys.config.frozen.position.y: off = [0, -dist, 0] elif sys.config.frozen.position.z: off = [0, 0, dist * 2] return list(qp.pos[0] + onp.array(off))
22eb7a1ffbd2aed810e19e842eaf9ab648a2b83b
3,645,658
def num_list(to_parse): """ Creates list from its string representation Arguments: to_parse {string} -- String representation of list, can include 'None' or internal lists, represented by separation with '#' Returns: list[int] -- List represented in to_parse """ if len(to_parse) == 2: return [] inter = to_parse[1:-1] inter = [x.strip() for x in inter.split(',')] result = [] for n in inter: if n == "None": result.append(None) elif "#" in n: result.append([int(x) for x in n.split("#")]) else: result.append(int(n)) return result
b444554e37434b5ae42ebc913bcc0f9b99c65ce9
3,645,659
import torch import os def main(): """Main.""" torch.manual_seed(args.seed) # Experiment Information print_experiment_info(args) dataloaders, G, optimizer_g, writer = train_setup(args) optimizer_g, lr = lr_scheduler_withoutDecay(optimizer_g, lr=args.lr) # scheduler_g = optim.lr_scheduler.StepLR(optimizer_g, step_size=20, gamma=0.1, verbose=True) scheduler_g = optim.lr_scheduler.StepLR(optimizer_g, step_size=5, gamma=0.5, verbose=True) F1 = ResClassifier(num_classes=args.class_num, num_layer=1) F2 = ResClassifier(num_classes=args.class_num, num_layer=1) F1.cuda() F2.cuda() optimizer_f = optim.SGD(list(F1.parameters()) + list(F2.parameters()), momentum=0.9, lr=0.01, weight_decay=0.0005) # scheduler_f = optim.lr_scheduler.StepLR(optimizer_f, step_size=20, gamma=0.1, verbose=True) scheduler_f = optim.lr_scheduler.StepLR(optimizer_f, step_size=5, gamma=0.5, verbose=True) # G_ckpt= os.path.join(args.out, f'ckpts/MCD_G.pkl') # if os.path.exists(G_ckpt): # checkpoint = torch.load (G_ckpt, map_location='cuda') # G.load_state_dict (checkpoint, strict=False) # F1_ckpt= os.path.join(args.out, f'ckpts/MCD_F1.pkl') # if os.path.exists(F1_ckpt): # checkpoint = torch.load (F1_ckpt, map_location='cuda') # F1.load_state_dict (checkpoint, strict=False) # F2_ckpt= os.path.join(args.out, f'ckpts/MCD_F2.pkl') # if os.path.exists(F2_ckpt): # checkpoint = torch.load (F2_ckpt, map_location='cuda') # F2.load_state_dict (checkpoint, strict=False) if args.show_feat: G_ckpt= os.path.join(args.out, f'ckpts/MCD_G.pkl') if os.path.exists(G_ckpt): checkpoint = torch.load (G_ckpt, map_location='cuda') G.load_state_dict (checkpoint, strict=False) F1_ckpt= os.path.join(args.out, f'ckpts/MCD_F1.pkl') if os.path.exists(F1_ckpt): checkpoint = torch.load (F1_ckpt, map_location='cuda') F1.load_state_dict (checkpoint, strict=False) F2_ckpt= os.path.join(args.out, f'ckpts/MCD_F2.pkl') if os.path.exists(F2_ckpt): checkpoint = torch.load (F2_ckpt, map_location='cuda') F2.load_state_dict (checkpoint, strict=False) Test_MCD_tsne(args, G, F1, F2, dataloaders, 30, splits=['test_source', 'train_target', 'test_target']) return if args.criterion == 'ce': criterion = nn.CrossEntropyLoss() elif args.criterion == 'focal': criterion = FocalLoss(gamma=1) elif args.criterion == 'weighted_focal': if args.source == 'RAF_balanced': cls_num_list= np.array([713, 262, 713, 713, 713, 682, 713]) else: #RAF cls_num_list= np.array([1259, 262, 713, 4705, 1885, 682, 2465]) beta = 0.9999 effective_num = 1.0 - np.power(beta, cls_num_list) per_cls_weights = (1.0 - beta) / np.array(effective_num) per_cls_weights = per_cls_weights / np.sum(per_cls_weights) * len(cls_num_list) #[0.65831665 3.01150101 1.13164193 0.20750166 0.45330163 1.18126904 0.35646808] per_cls_weights = [1.75, 3.0, 2.0, 1.0, 1.5, 2.0, 1.25] print(per_cls_weights) class_weights = torch.FloatTensor(per_cls_weights).cuda() criterion = FocalLoss(weight=class_weights, gamma=1) elif args.criterion == 'ldam': if args.source == 'RAF_balanced': cls_num_list= np.array([713, 262, 713, 713, 713, 682, 713]) else: #RAF cls_num_list= np.array([1259, 262, 713, 4705, 1885, 682, 2465]) idx = 0 betas = [0, 0.9999] effective_num = 1.0 - np.power(betas[idx], cls_num_list) per_cls_weights = (1.0 - betas[idx]) / np.array(effective_num) per_cls_weights = per_cls_weights / np.sum(per_cls_weights) * len(cls_num_list) per_cls_weights = [1.75, 3.0, 2.0, 1.0, 1.5, 2.0, 1.25] per_cls_weights = torch.FloatTensor(per_cls_weights).cuda() def get_drw_weights(args, epoch, cls_num_list): if True: idx = 0 if epoch <= 5 else 1 betas = [0, 0.9999] effective_num = 1.0 - np.power(betas[idx], cls_num_list) per_cls_weights = (1.0 - betas[idx]) / np.array(effective_num) per_cls_weights = per_cls_weights / np.sum(per_cls_weights) * len(cls_num_list) per_cls_weights = torch.FloatTensor(per_cls_weights).cuda() return per_cls_weights print(f'Using {args.criterion} loss') # Running Experiment print("Run Experiment...") for epoch in range(1, args.epochs + 1): # if epoch < 5 and args.criterion == 'weighted_focal': #Try delayed reweighting # criterion = FocalLoss(gamma=1) if args.criterion=='ldam': if epoch >4: per_cls_weights = [1.75, 3.0, 2.0, 1.0, 1.5, 2.0, 1.25] else: per_cls_weights = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] # per_cls_weights = get_drw_weights(args, epoch, cls_num_list) print(f'Epoch: {epoch}, per cls weights: {per_cls_weights}') per_cls_weights = torch.FloatTensor(per_cls_weights).cuda() criterion = LDAMLoss(cls_num_list, weight=per_cls_weights) print(f'Epoch : {epoch}') Train_MCD(args, G, F1, F2, dataloaders['train_source'], dataloaders['train_target'], optimizer_g, optimizer_f, epoch, writer, criterion) scheduler_g.step() scheduler_f.step() print('\nEvaluation ...') Test_MCD_tsne(args, G, F1, F2, dataloaders, epoch, splits=['test_source', 'train_target', 'test_target']) # Test_MCD(args, G, F1, F2, dataloaders, epoch, splits=['train_source', 'test_source']) # Test_MCD(args, G, F1, F2, dataloaders, epoch, splits=['train_source', 'train_target']) # Test_MCD(args, G, F1, F2, dataloaders, epoch, splits=['train_source', 'test_target']) # Test_MCD(args, G, F1, F2, dataloaders, epoch, splits=['train_source', 'train_target', 'test_source', 'test_target']) if args.save_checkpoint and epoch%5: torch.save(G.state_dict(), os.path.join(args.out, f'ckpts/MCD_G_{epoch}.pkl')) torch.save(F1.state_dict(), os.path.join(args.out, f'ckpts/MCD_F1_{epoch}.pkl')) torch.save(F2.state_dict(), os.path.join(args.out, f'ckpts/MCD_F2_{epoch}.pkl')) writer.close()
aa042529b8903ac800824e82b1f06c7480da2107
3,645,660
def total_scatter_matrix(data): """ Total sum of square (TSS) : sum of squared distances of points around the baycentre References : Clustering Indices, Bernard Desgraupes (April 2013) """ X = np.array(data.T.copy(), dtype=np.float64) for feature_i in range(data.shape[1]): X[feature_i] = X[feature_i] - np.mean(X[feature_i]) T = np.dot(X, X.T) return T
829bfdbf838d087517465e7173e480796e52cf8e
3,645,661
def save_camera_zip(camera_id, year, month, file_path=None): """ Download a camera ZIP archive. :param camera_id: int, camera ID :param year: int, year :param month: int, month :param file_path: str, optional, path to save file :return: bool, status of download """ # Setup file name file_name = "{0:04d}.{1:02d}.zip".format(year, month) if file_path is None: file_path = "./{0}".format(file_name) # Download save_buffer(get_zip_url(camera_id, year, month), file_path) return True
41e4ef0f5f412850266de2d136da719adae08e04
3,645,662
def read_user(str): """ str -> dict """ pieces = str.split() return { 'first': pieces[0], 'last': pieces[1], 'username': pieces[5], 'custID': pieces[3], 'password': pieces[7], 'rank': 0, 'total': 0 }
fcb24a2b791f0df8f40ea4080cdabe83d51fe068
3,645,663
import calendar def lweekdate(weekday, year, month, nextDay=0): """ Usage lastDate = lweekdate(weekday, year, month, nextDay) Notes Date of last occurrence of weekday in month returns the serial date number for the last occurrence of Weekday in the given year and month and in a week that also contains NextDay. Weekday Weekday whose date you seek. Enter as an integer from 1 through 7: 1 Sunday 2 Monday 3 Tuesday 4 Wednesday 5 Thursday 6 Friday 7 Saturday Year Year. Enter as a four-digit integer. Month Month. Enter as an integer from 1 through 12. Not Implemented NextDay (Optional) Weekday that must occur after Weekday in the same week. Enter as an integer from 0 through 7, where 0 = ignore (default) and 1 through 7 are the same as for Weekday. Any input can contain multiple values, but if so, all other inputs must contain the same number of values or a single value that applies to all. See Also Use the function datestr to convert serial date numbers to formatted date strings. Examples """ assert weekday in range(1,8), "weekday must be in range(1,8)" assert month in range(1,13), "month must be in range(1,13)" assert year in range(0, 10000), "year must be in range(0,10000)" assert nextDay in range(0,8), "weekday must be in range(0,8)" day = calendar.monthcalendar(year,month)[-1][weekday-1] if day == 0: day = calendar.monthcalendar(year,month)[-2][weekday-1] return datenum(year, month, day)
60367db7223f104260e2b7d757b367d6388d222b
3,645,664
def multi_polygon_gdf(basic_polygon): """ A GeoDataFrame containing the basic polygon geometry. Returns ------- GeoDataFrame containing the basic_polygon polygon. """ poly_a = Polygon([(3, 5), (2, 3.25), (5.25, 6), (2.25, 2), (2, 2)]) gdf = gpd.GeoDataFrame( [1, 2], geometry=[poly_a.buffer(0), basic_polygon.buffer(0)], crs="epsg:4326", ) multi_gdf = gpd.GeoDataFrame( geometry=gpd.GeoSeries(gdf.unary_union), crs="epsg:4326" ) return multi_gdf
9acfa76ca3a51603d96e1388dc7c7a1178ec3fa1
3,645,665
import traceback def get_or_create_pull(github_repo, title, body, head, base, *, none_if_no_commit=False): """Try to create the PR. If the PR exists, try to find it instead. Raises otherwise. You should always use the complete head syntax "org:branch", since the syntax is required in case of listing. if "none_if_no_commit" is set, return None instead of raising exception if the problem is that head and base are the same. """ try: # Try to create or get a PR return github_repo.create_pull( title=title, body=body, head=head, base=base ) except GithubException as err: err_message = err.data['errors'][0].get('message', '') if err.status == 422 and err_message.startswith('A pull request already exists'): _LOGGER.info('PR already exists, get this PR') return list(github_repo.get_pulls( head=head, base=base ))[0] elif none_if_no_commit and err.status == 422 and err_message.startswith('No commits between'): _LOGGER.info('No PR possible since head %s and base %s are the same', head, base) return None else: _LOGGER.warning("Unable to create PR:\n%s", err.data) raise except Exception as err: response = traceback.format_exc() _LOGGER.warning("Unable to create PR:\n%s", response) raise
3968fc99c006c45e20eabce1329d95247ad855c8
3,645,666
import json from typing import OrderedDict def get_board_properties(board, board_path): """parses the board file returns the properties of the board specified""" with open(helper.linux_path(board_path)) as f: board_data = json.load(f, object_pairs_hook=OrderedDict) return board_data[board]
e5fa5542c540c643ecf8b57314e227d14e193a56
3,645,667
import re def parse_command(command): """ Parse the given one-line QF command analogously to parse_file(). """ m = re.match(r'^\#?([bdpq]|build|dig|place|query)\s+(.+)', command) if m is None: raise ParametersError("Invalid command format '%s'." % command) # set up details dict details = { 'build_type': buildconfig.get_full_build_type_name(m.group(1)), 'start': (0, 0), 'start_comment': '', 'comment': '' } # break apart lines by # and cells by , lines = [[cell.strip() for cell in line.split(',')] for line in m.group(2).split('#') ] # break up lines into z-layers separated by #> or #< # TODO: actually support this properly, right now we are just # calling this to do conversion to FileLayers for us filelayers = split_zlayers(lines) # tidy up the layers for fl in filelayers: fl.fixup() fl.clean_cells() return filelayers, details
05f0e12b413b3e26f2c73734e3d6e8f10d124c0d
3,645,668
from typing import Union from typing import List import shlex import subprocess def _run(cmd: Union[str, List[str]]) -> List[str]: """Run a 'cmd', returning stdout as a list of strings.""" cmd_list = shlex.split(cmd) if type(cmd) == str else cmd result = subprocess.run(cmd_list, capture_output=True) return result.stdout.decode('utf-8').split("\n")
74fc47f531e8eef9f77b80798f0b5505b57968da
3,645,669
def get_repository_username(repo_url): """ Returns the repository username :return: (str) Repository owner username """ repo_path = _get_repo_path(repo_url) return repo_path[0]
008e67435c11e4fbb12ca19149e795dd50c12526
3,645,670
from IPython.config import Application import logging def get_logger(): """Grab the global logger instance. If a global IPython Application is instantiated, grab its logger. Otherwise, grab the root logger. """ global _logger if _logger is None: if Application.initialized(): _logger = Application.instance().log else: logging.basicConfig() _logger = logging.getLogger() return _logger
717487ac1c94c09ab7831e405255283aea4570a5
3,645,671
def process_y(y_train: pd.Series, max_mult=20, large_sampsize=50000): """ Drop missing values, downsample the negative class if sample size is large and there is significant class imbalance """ # Remove missing labels ytr = y_train.dropna() # The code below assumes the negative class is over-represented. assert ytr.mean() < 0.5 # If there are too many negative samples, downsample if len(ytr) > large_sampsize: label_counts = ytr.value_counts() max_neg = max(label_counts.loc[1.0] * max_mult, large_sampsize) y_neg = ytr[ytr == 0.0] y_pos = ytr[ytr == 1.0] new_y = pd.concat( [y_neg.sample(frac=1.0, replace=False).iloc[:max_neg], y_pos] ).sample(frac=1.0, replace=False) return new_y else: return ytr
2f36ba3bce93d47f944784f83fd731b9aa315acc
3,645,672
def _distance(y1, y2): """1D distance calculator""" inner = (y2 - y1) ** 2 d = np.sqrt(inner) return d
696c5ccbe720301d22d9b142e9a5d5f3c507b738
3,645,673
def create_container(context, values): """Create a new container. :param context: The security context :param values: A dict containing several items used to identify and track the container, and several dicts which are passed into the Drivers when managing this container. For example: :: { 'uuid': uuidutils.generate_uuid(), 'name': 'example', 'type': 'virt' } :returns: A container. """ return _get_dbdriver_instance().create_container(context, values)
b9047467a0a96c1b08bc92b4e74399e0a413ba45
3,645,674
from google.protobuf.json_format import MessageToDict from typing import Callable from typing import Dict def build_model_from_pb(name: str, pb_model: Callable): """ Build model from protobuf message. :param name: Name of the model. :param pb_model: protobuf message. :return: Model. """ dp = MessageToDict(pb_model(), including_default_value_fields=True) all_fields = { k: (name if k in ('chunks', 'matches') else type(v), Field(default=v)) for k, v in dp.items() } if pb_model == QueryLangProto: all_fields['parameters'] = (Dict, Field(default={})) return create_model(name, **all_fields)
a1de965b13b6cbbe33a08a52561f699042dd93f8
3,645,675
def create_proof_of_time_pietrzak(discriminant, x, iterations, int_size_bits): """ Returns a serialized proof blob. """ delta = 8 powers_to_calculate = proof_pietrzak.cache_indeces_for_count(iterations) powers = iterate_squarings(x, powers_to_calculate) y = powers[iterations] proof = proof_pietrzak.generate_proof(x, iterations, delta, y, powers, x.identity(), generate_r_value, int_size_bits) return y.serialize(), serialize_proof(proof)
d31ac6eadcc3ce155d682e2cef4b392561a1412b
3,645,676
def resample_dataset ( fname, x_factor, y_factor, method="mean", \ data_min=-1000, data_max=10000 ): """This function resamples a GDAL dataset (single band) by a factor of (``x_factor``, ``y_factor``) in x and y. By default, the only method used is to calculate the mean. The ``data_min`` and ``data_max`` parameters are used to mask out pixels in value""" QA_OK = np.array([0, 1, 4, 12, 8, 64, 512, 2048] )# VI OK # Table in http://gis.cri.fmach.it/modis-ndvi-evi/ # First open the NDVI file fname = 'HDF4_EOS:EOS_GRID:"%s":' % fname + \ 'MOD_Grid_monthly_CMG_VI:CMG 0.05 Deg Monthly NDVI' gdal_data = gdal.Open ( fname ) # Get raster sizes nx = gdal_data.RasterXSize ny = gdal_data.RasterYSize # Calculate output raster size nnx = nx/x_factor nny = ny/y_factor # Reshape the raster data... B = np.reshape ( gdal_data.ReadAsArray(), ( nny, y_factor, nnx, x_factor ) ) # Now open QA file fname = fname.replace ("NDVI", "VI Quality" ) gdal_data = gdal.Open ( fname ) qa = gdal_data.ReadAsArray() # Check what goes through QA qa_pass = np.logical_or.reduce([qa==x for x in QA_OK ]) B = np.ma.array ( B, mask=qa_pass ) # Re-jiggle the dimensions so we can easily average over then C = np.transpose ( B, (0, 2, 1, 3 ) ) if method == "mean": reduced_raster = np.mean ( np.mean ( C, axis=-1), axis=-1 ) else: raise NotImplemented( "Only mean reduction supported by now") return reduced_raster
f32465711d7dae3a8e7350676cb0e90f084bf5c5
3,645,677
def int2bytes(number, fill_size=None, chunk_size=None, overflow=False): """ Convert an unsigned integer to bytes (base-256 representation):: Does not preserve leading zeros if you don't specify a chunk size or fill size. .. NOTE: You must not specify both fill_size and chunk_size. Only one of them is allowed. :param number: Integer value :param fill_size: If the optional fill size is given the length of the resulting byte string is expected to be the fill size and will be padded with prefix zero bytes to satisfy that length. :param chunk_size: If optional chunk size is given and greater than zero, pad the front of the byte string with binary zeros so that the length is a multiple of ``chunk_size``. :param overflow: ``False`` (default). If this is ``True``, no ``OverflowError`` will be raised when the fill_size is shorter than the length of the generated byte sequence. Instead the byte sequence will be returned as is. :returns: Raw bytes (base-256 representation). :raises: ``OverflowError`` when fill_size is given and the number takes up more bytes than fit into the block. This requires the ``overflow`` argument to this function to be set to ``False`` otherwise, no error will be raised. """ if number < 0: raise ValueError("Number must be an unsigned integer: %d" % number) if fill_size and chunk_size: raise ValueError("You can either fill or pad chunks, but not both") # Ensure these are integers. number & 1 raw_bytes = b('') # Pack the integer one machine word at a time into bytes. num = number word_bits, _, max_uint, pack_type = get_word_alignment(num) pack_format = ">%s" % pack_type while num > 0: raw_bytes = pack(pack_format, num & max_uint) + raw_bytes num >>= word_bits # Obtain the index of the first non-zero byte. zero_leading = bytes_leading(raw_bytes) if number == 0: raw_bytes = ZERO_BYTE # De-padding. raw_bytes = raw_bytes[zero_leading:] length = len(raw_bytes) if fill_size and fill_size > 0: if not overflow and length > fill_size: raise OverflowError( "Need %d bytes for number, but fill size is %d" % (length, fill_size) ) raw_bytes = raw_bytes.rjust(fill_size, ZERO_BYTE) elif chunk_size and chunk_size > 0: remainder = length % chunk_size if remainder: padding_size = chunk_size - remainder raw_bytes = raw_bytes.rjust(length + padding_size, ZERO_BYTE) return raw_bytes
091764ffeb9a15036b484380750f04496db36da1
3,645,678
def compFirstFivePowOf2(iset={0, 1, 2, 3, 4}): """ task 0.5.6 a comprehension over the given set whose value is the set consisting of the first five powers of two, starting with 2**0 """ return {2**x for x in iset}
a7b04ab6b127ef5ee7fdd3598b1569e171fd009e
3,645,679
import types import pandas def sdc_pandas_dataframe_getitem(self, idx): """ Intel Scalable Dataframe Compiler User Guide ******************************************** Pandas API: pandas.DataFrame.getitem Get data from a DataFrame by indexer. Limitations ----------- Supported ``key`` can be one of the following: * String literal, e.g. :obj:`df['A']` * A slice, e.g. :obj:`df[2:5]` * A tuple of string, e.g. :obj:`df[('A', 'B')]` * An array of booleans, e.g. :obj:`df[True,False]` * A series of booleans, e.g. :obj:`df(series([True,False]))` Supported getting a column through getting attribute. Examples -------- .. literalinclude:: ../../../examples/dataframe/getitem/df_getitem_attr.py :language: python :lines: 37- :caption: Getting Pandas DataFrame column through getting attribute. :name: ex_dataframe_getitem .. command-output:: python ./dataframe/getitem/df_getitem_attr.py :cwd: ../../../examples .. literalinclude:: ../../../examples/dataframe/getitem/df_getitem.py :language: python :lines: 37- :caption: Getting Pandas DataFrame column where key is a string. :name: ex_dataframe_getitem .. command-output:: python ./dataframe/getitem/df_getitem.py :cwd: ../../../examples .. literalinclude:: ../../../examples/dataframe/getitem/df_getitem_slice.py :language: python :lines: 34- :caption: Getting slice of Pandas DataFrame. :name: ex_dataframe_getitem .. command-output:: python ./dataframe/getitem/df_getitem_slice.py :cwd: ../../../examples .. literalinclude:: ../../../examples/dataframe/getitem/df_getitem_tuple.py :language: python :lines: 37- :caption: Getting Pandas DataFrame elements where key is a tuple of strings. :name: ex_dataframe_getitem .. command-output:: python ./dataframe/getitem/df_getitem_tuple.py :cwd: ../../../examples .. literalinclude:: ../../../examples/dataframe/getitem/df_getitem_array.py :language: python :lines: 34- :caption: Getting Pandas DataFrame elements where key is an array of booleans. :name: ex_dataframe_getitem .. command-output:: python ./dataframe/getitem/df_getitem_array.py :cwd: ../../../examples .. literalinclude:: ../../../examples/dataframe/getitem/df_getitem_series.py :language: python :lines: 34- :caption: Getting Pandas DataFrame elements where key is series of booleans. :name: ex_dataframe_getitem .. command-output:: python ./dataframe/getitem/df_getitem_series.py :cwd: ../../../examples .. seealso:: :ref:`Series.getitem <pandas.Series.getitem>` Get value(s) of Series by key. :ref:`Series.setitem <pandas.Series.setitem>` Set value to Series by index :ref:`Series.loc <pandas.Series.loc>` Access a group of rows and columns by label(s) or a boolean array. :ref:`Series.iloc <pandas.Series.iloc>` Purely integer-location based indexing for selection by position. :ref:`Series.at <pandas.Series.at>` Access a single value for a row/column label pair. :ref:`Series.iat <pandas.Series.iat>` Access a single value for a row/column pair by integer position. :ref:`DataFrame.setitem <pandas.DataFrame.setitem>` Set value to DataFrame by index :ref:`DataFrame.loc <pandas.DataFrame.loc>` Access a group of rows and columns by label(s) or a boolean array. :ref:`DataFrame.iloc <pandas.DataFrame.iloc>` Purely integer-location based indexing for selection by position. :ref:`DataFrame.at <pandas.DataFrame.at>` Access a single value for a row/column label pair. :ref:`DataFrame.iat <pandas.DataFrame.iat>` Access a single value for a row/column pair by integer position. Intel Scalable Dataframe Compiler Developer Guide ************************************************* Pandas DataFrame method :meth:`pandas.DataFrame.getitem` implementation. .. only:: developer Test: python -m sdc.runtests -k sdc.tests.test_dataframe.TestDataFrame.test_df_getitem* """ ty_checker = TypeChecker('Operator getitem().') if not isinstance(self, DataFrameType): return None if isinstance(idx, types.StringLiteral): col_loc = self.column_loc.get(idx.literal_value) if col_loc is None: key_error = True else: type_id, col_id = col_loc.type_id, col_loc.col_id key_error = False def _df_getitem_str_literal_idx_impl(self, idx): if key_error == False: # noqa data = self._data[type_id][col_id] return pandas.Series(data, index=self._index, name=idx) else: raise KeyError('Column is not in the DataFrame') return _df_getitem_str_literal_idx_impl if isinstance(idx, types.UnicodeType): def _df_getitem_unicode_idx_impl(self, idx): # http://numba.pydata.org/numba-doc/dev/developer/literal.html#specifying-for-literal-typing # literally raises special exception to call getitem with literal idx value got from unicode return literally(idx) return _df_getitem_unicode_idx_impl if isinstance(idx, types.Tuple): if all([isinstance(item, types.StringLiteral) for item in idx]): return gen_df_getitem_tuple_idx_impl(self, idx) if isinstance(idx, types.SliceType): return gen_df_getitem_slice_idx_impl(self, idx) if isinstance(idx, SeriesType) and isinstance(idx.dtype, types.Boolean): self_index_is_none = isinstance(self.index, types.NoneType) idx_index_is_none = isinstance(idx.index, types.NoneType) if self_index_is_none and not idx_index_is_none: if not check_index_is_numeric(idx): ty_checker.raise_exc(idx.index.dtype, 'number', 'idx.index.dtype') if not self_index_is_none and idx_index_is_none: if not check_index_is_numeric(self): ty_checker.raise_exc(idx.index.dtype, self.index.dtype, 'idx.index.dtype') if not self_index_is_none and not idx_index_is_none: if not check_types_comparable(self.index, idx.index): ty_checker.raise_exc(idx.index.dtype, self.index.dtype, 'idx.index.dtype') return gen_df_getitem_bool_series_idx_impl(self, idx) if isinstance(idx, types.Array) and isinstance(idx.dtype, types.Boolean): return gen_df_getitem_bool_array_idx_impl(self, idx) ty_checker = TypeChecker('Operator getitem().') expected_types = 'str, tuple(str), slice, series(bool), array(bool)' ty_checker.raise_exc(idx, expected_types, 'idx')
90b335db7327da883561665909a2b335437efc83
3,645,680
def display_finds_meta(r): """A list of urls in r is displayed as HTML""" rows = ["<tr><td><img src='{row}'</td><td><a href = {meta} target='_'>{meta}</a></td></tr>".format(row=row, meta=row) for row in r] return HTML("""<html><head></head> <body> <table> {rows} </table> </body> </html> """.format(rows=' '.join(rows)))
bd769bc4b0b6d4d55ec721e02c623f30d5eb5e1f
3,645,681
def __pairwise__(iterable): """ Converts a list of elements in a list of pairs like: list -> (list[0], list[1]), (list[2], list[3]), (list[4], list[5]), ... :param iterable: Input list. :return: List of pairs of the given list elements. """ a = iter(iterable) return zip(a, a)
59eae23e0e6f9ccba528f9632caf77fe28698c5b
3,645,682
def _generateTriangleSequence(): """ Generates list of elements following sequence of triangle numbers. Returns: sequenceElements - List of elements following the sequence. """ sequenceElements = [] totalCharactersInNewSequence = 0 total = 1 currentAddend = 2 while totalCharactersInNewSequence <= _MAX_NUMBER_OF_CHARACTERS_TO_PRINT: currentSequenceMember = str(total) sequenceElements.append(currentSequenceMember) totalCharactersInNewSequence += len(currentSequenceMember) total += currentAddend currentAddend += 1 return sequenceElements
453b5e672a4817281f5e4ba51ca3ea426fcdb3d2
3,645,683
import math def normalize_neurons_range(neurons, standard_diagonal_line: int or float): """ :param neurons: should be refined. :param standard_diagonal_line: pre-defined standard length of diagonal line of xoy plate :return: neurons, width_scale, [width_span, height_span, z_span] width_scale: The length of width is different with height among all volumes, so scaling width could transfer ellipse shell into circle shell to count conveniently on xoy plate. """ regions = np.array([re for res in neurons.copy() for re in res], dtype = np.float32) # [x, y, z] width, height = np.max(regions[:, 0]) - np.min(regions[:, 0]), np.max(regions[:, 1]) - np.min(regions[:, 1]) scale = standard_diagonal_line / math.sqrt(width * width + height * height) neurons = [[[p[0] * scale, p[1] * scale, p[2] * scale] for p in pp] for pp in neurons] # for knn feature width_scale = height / width width_span = width * width_scale * scale height_span = height * scale z_span = (np.max(regions[:, 2]) - np.min(regions[:, 2])) * scale return neurons, width_scale, [width_span, height_span, z_span]
1709b68054aa10ccd9d065b04d809e2df4d3a8e2
3,645,684
def distort_image(image): """Perform random distortions to the given 4D image and return result""" # Switch to 3D as that's what these operations require slices = tf.unpack(image) output = [] # Perform pixel-wise distortions for image in slices: image = tf.image.random_flip_left_right(image) image = tf.image.random_saturation(image, .2, 2.) image += tf.truncated_normal(image.get_shape(), stddev=.05) image = tf.image.random_contrast(image, .85, 1.15) image = tf.image.random_brightness(image, .3) output.append(image) # Go back to 4D image = tf.pack(output) return image
70db49a2a3dfe31c0b511824342a95ad5da30430
3,645,685
from io import StringIO def tablebyname(filehandle, header): """fast extraction of the table using the header to identify the table This function reads only one table from the HTML file. This is in contrast to `results.readhtml.titletable` that will read all the tables into memory and allows you to interactively look thru them. The function `results.readhtml.titletable` can be very slow on large HTML files. This function is useful when you know which file you are looking for. It looks for the title line that is in bold just before the table. Some tables don't have such a title in bold. This function will not work for tables that don't have a title in bold Parameters ---------- fhandle : file like object A file handle to the E+ HTML table file header: str This is the title of the table you are looking for Returns ------- titleandtable : (str, list) - (title, table) - title = previous item with a <b> tag - table = rows -> [[cell1, cell2, ..], [cell1, cell2, ..], ..] """ htmlheader = f"<b>{header}</b><br><br>" with filehandle: for line in filehandle: line = _decodeline(line) if line.strip() == htmlheader: justtable = getnexttable(filehandle) thetable = f"{htmlheader}\n{justtable}" break filehandle = StringIO(thetable) htables = readhtml.titletable(filehandle) try: return list(htables[0]) except IndexError as e: None
ba1c228843f631b0441fc69c66b0d9ae7acbf813
3,645,686
def BuildDataset(): """Create the dataset""" # Get the dataset keeping the first two features iris = load_iris() x = iris["data"][:,:2] y = iris["target"] # Standardize and keep only classes 0 and 1 x = (x - x.mean(axis=0)) / x.std(axis=0) i0 = np.where(y == 0)[0] i1 = np.where(y == 1)[0] x = np.vstack((x[i0],x[i1])) # Train and test data xtrn = np.vstack((x[:35],x[50:85])) ytrn = np.array([0]*35 + [1]*35) xtst = np.vstack((x[35:50],x[85:])) ytst = np.array([0]*15+[1]*15) idx = np.argsort(np.random.random(70)) xtrn = xtrn[idx] ytrn = ytrn[idx] idx = np.argsort(np.random.random(30)) xtst = xtst[idx] ytst = ytst[idx] y_train = np.zeros((len(ytrn),2)) for i in range(len(ytrn)): if (ytrn[i] == 1): y_train[i,:] = [0,1] else: y_train[i,:] = [1,0] y_test = np.zeros((len(ytst),2)) for i in range(len(ytst)): if (ytst[i] == 1): y_test[i,:] = [0,1] else: y_test[i,:] = [1,0] return (xtrn.reshape((xtrn.shape[0],1,2)), y_train, xtst.reshape((xtst.shape[0],1,2)), y_test)
f6b3cc216262899880f048dd6d4823596d111c1a
3,645,687
def read_tile(file, config): """Read a codex-specific 5D image tile""" # When saving tiles in ImageJ compatible format, any unit length # dimensions are lost so when reading them back out, it is simplest # to conform to 5D convention by reshaping if necessary slices = [None if dim == 1 else slice(None) for dim in config.tile_dims] return imread(file)[slices]
3e0e61d8fa5ac497377c02c90a03bcbd176752ab
3,645,688
def rmsd(V, W): """ Calculate Root-mean-square deviation from two sets of vectors V and W. """ D = len(V[0]) N = len(V) rmsd = 0.0 for v, w in zip(V, W): rmsd += sum([(v[i] - w[i]) ** 2.0 for i in range(D)]) return np.sqrt(rmsd / N)
dc537f1cc742f7c4c5231af4c45d470e582be623
3,645,689
from re import L def partition(f, xs): """ partition :: (a -> Bool) -> [a] -> ([a], [a]) The partition function takes a predicate a list and returns the pair of lists of elements which do and do not satisfy the predicate. """ yes, no = [], [] for item in xs: if f(item): yes.append(item) else: no.append(item) return L[yes], L[no]
7fde3557fa9d1e3bdf232885dda360a6695dabc0
3,645,690
from math import exp, pi, sin def bvp2_check(): """ Using scikits.bvp_solver to solve the bvp y'' + y' + sin y = 0, y(0) = y(2*pi) = 0 y0 = y, y1 = y' y0' = y1, y1' = y'' = -sin(y0) - y1 """ lbc, rbc = .1, .1 def function1(x , y): return np.array([y[1] , -sin(y[0]) -y[1] ]) def boundary_conditions(ya,yb): return (np.array([ya[0] - lbc]), #evaluate the difference between the temperature of the hot stream on the #left and the required boundary condition np.array([yb[0] - rbc]))#evaluate the difference between the temperature of the cold stream on the #right and the required boundary condition problem = bvp_solver.ProblemDefinition(num_ODE = 2, num_parameters = 0, num_left_boundary_conditions = 1, boundary_points = (0, 2.*pi), function = function1, boundary_conditions = boundary_conditions) guess = np.linspace(0.,2.*pi, 10) guess = np.array([.1-np.sin(2*guess),np.sin(2*guess)]) # plt.plot(guess,np.sin(guess)) # plt.show() solution = bvp_solver.solve(problem, solution_guess = guess) # A = np.linspace(0.,2.*pi, 200) T = solution(A) plt.plot(A, T[0,:],'-k',linewidth=2.0) plt.show() plt.clf() N = 150 x = (2.*np.pi/N)*np.arange(1,N+1).reshape(N,1) print x.shape print solution(x)[0,:].shape plt.plot(x,solution(x)[0,:]) plt.show() # np.save('sol',solution(x)[0,:]) return
6fe48b76945d3c322c21938049ab74099d022c7d
3,645,691
import zarr def get_zarr_store(file_path): """Get the storage type """ ZARR_STORE_MAP = {"lmdb": zarr.LMDBStore, "zip": zarr.ZipStore, "dbm": zarr.DBMStore, "default": zarr.DirectoryStore} suffix, subsuffix = get_subsuffix(file_path) if suffix != 'zarr' or (subsuffix is not None and subsuffix not in ZARR_STORE_MAP): return ZARR_STORE_MAP['default'](file_path) else: return ZARR_STORE_MAP[subsuffix](file_path)
ecc17168d56bd9cc725a2db51914cddd098aa1af
3,645,692
def convert_string(inpt): """Return string value from input lit_input >>> convert_string(1) '1' """ if PY2: return str(inpt).decode() else: return str(inpt)
a88ee436726a6587e673fb71673c771851b83cea
3,645,693
def get_ipc_kernel(imdark, tint, boxsize=5, nchans=4, bg_remove=True, hotcut=[5000,50000], calc_ppc=False, same_scan_direction=False, reverse_scan_direction=False): """ Derive IPC/PPC Convolution Kernels Find the IPC and PPC kernels used to convolve detector pixel data. Finds all hot pixels within hotcut parameters and measures the average relative flux within adjacent pixels. Parameters ========== Keyword Parameters ================== boxsize : int Size of the box. Should be odd, but if even, then adds +1. bg_remove : bool Remove the average dark current values for each hot pixel cut-out. Only works if boxsize>3. hotcut : array-like Min and max values of hot pixels (above bg and bias) to cosider. calc_ppc : bool Calculate and return post-pixel coupling? same_scan_direction : bool Are all the output channels read in the same direction? By default fast-scan readout direction is ``[-->,<--,-->,<--]`` If ``same_scan_direction``, then all ``-->`` reverse_scan_direction : bool If ``reverse_scan_direction``, then ``[<--,-->,<--,-->]`` or all ``<--`` """ ny, nx = imdark.shape chsize = int(ny / nchans) imtemp = imdark * tint boxhalf = int(boxsize/2) boxsize = int(2*boxhalf + 1) distmin = np.ceil(np.sqrt(2.0) * boxhalf) # Get rid of pixels around border pixmask = ((imtemp>hotcut[0]) & (imtemp<hotcut[1])) pixmask[0:4+boxhalf, :] = False pixmask[-4-boxhalf:, :] = False pixmask[:, 0:4+boxhalf] = False pixmask[:, -4-boxhalf:] = False # Ignore borders between amplifiers for ch in range(1, nchans): x1 = ch*chsize - boxhalf x2 = x1 + 2*boxhalf pixmask[:, x1:x2] = False indy, indx = np.where(pixmask) nhot = len(indy) if nhot < 2: print("No hot pixels found!") return None # Only want isolated pixels # Get distances for every pixel # If too close, then set equal to 0 for i in range(nhot): d = np.sqrt((indx-indx[i])**2 + (indy-indy[i])**2) ind_close = np.where((d>0) & (d<distmin))[0] if len(ind_close)>0: pixmask[indy[i], indx[i]] = 0 indy, indx = np.where(pixmask) nhot = len(indy) if nhot < 2: print("No hot pixels found!") return None # Stack all hot pixels in a cube hot_all = [] for iy, ix in zip(indy, indx): x1, y1 = np.array([ix,iy]) - boxhalf x2, y2 = np.array([x1,y1]) + boxsize sub = imtemp[y1:y2, x1:x2] # Flip channels along x-axis for PPC if calc_ppc: # Check if an even or odd channel (index 0) for ch in np.arange(0,nchans,2): even = True if (ix > ch*chsize) and (ix < (ch+1)*chsize-1) else False if same_scan_direction: flip = True if reverse_scan_direction else False elif even: flip = True if reverse_scan_direction else False else: flip = False if reverse_scan_direction else True if flip: sub = sub[:,::-1] hot_all.append(sub) hot_all = np.array(hot_all) # Remove average dark current values if boxsize>3 and bg_remove==True: for im in hot_all: im -= np.median([im[0,:], im[:,0], im[-1,:], im[:,-1]]) # Normalize by sum in 3x3 region norm_all = hot_all.copy() for im in norm_all: im /= im[boxhalf-1:boxhalf+2, boxhalf-1:boxhalf+2].sum() # Take average of normalized stack ipc_im_avg = np.median(norm_all, axis=0) # ipc_im_sig = robust.medabsdev(norm_all, axis=0) corner_val = (ipc_im_avg[boxhalf-1,boxhalf-1] + ipc_im_avg[boxhalf+1,boxhalf+1] + ipc_im_avg[boxhalf+1,boxhalf-1] + ipc_im_avg[boxhalf-1,boxhalf+1]) / 4 if corner_val<0: corner_val = 0 # Determine post-pixel coupling value? if calc_ppc: ipc_val = (ipc_im_avg[boxhalf-1,boxhalf] + \ ipc_im_avg[boxhalf,boxhalf-1] + \ ipc_im_avg[boxhalf+1,boxhalf]) / 3 if ipc_val<0: ipc_val = 0 ppc_val = ipc_im_avg[boxhalf,boxhalf+1] - ipc_val if ppc_val<0: ppc_val = 0 k_ipc = np.array([[corner_val, ipc_val, corner_val], [ipc_val, 1-4*ipc_val, ipc_val], [corner_val, ipc_val, corner_val]]) k_ppc = np.zeros([3,3]) k_ppc[1,1] = 1 - ppc_val k_ppc[1,2] = ppc_val return (k_ipc, k_ppc) # Just determine IPC else: ipc_val = (ipc_im_avg[boxhalf-1,boxhalf] + ipc_im_avg[boxhalf,boxhalf-1] + ipc_im_avg[boxhalf,boxhalf+1] + ipc_im_avg[boxhalf+1,boxhalf]) / 4 if ipc_val<0: ipc_val = 0 kernel = np.array([[corner_val, ipc_val, corner_val], [ipc_val, 1-4*ipc_val, ipc_val], [corner_val, ipc_val, corner_val]]) return kernel
4646ccc138d7f625941e9bc43382e1c5ef57e5c5
3,645,694
from datetime import datetime def get_description(): """ Return a dict describing how to call this plotter """ desc = dict() desc['data'] = True desc['cache'] = 86400 desc['description'] = """This plot presents the trailing X number of days temperature or precipitation departure from long term average. You can express this departure either in Absolute Departure or as a Standard Deviation. The Standard Deviation option along with precipitation is typically called the "Standardized Precipitation Index". <p>The plot also contains an underlay with the weekly US Drought Monitor that is valid for the station location. If you plot a climate district station, you get the US Drought Monitor valid for the district centroid. If you plot a statewide average, you get no USDM included. """ today = datetime.date.today() sts = today - datetime.timedelta(days=720) desc['arguments'] = [ dict(type='station', name='station', default='IA0200', label='Select Station:', network='IACLIMATE'), dict(type='int', name='p1', default=31, label='First Period of Days'), dict(type='int', name='p2', default=91, label='Second Period of Days'), dict(type='int', name='p3', default=365, label='Third Period of Days'), dict(type='date', name='sdate', default=sts.strftime("%Y/%m/%d"), min='1893/01/01', label='Start Date of Plot'), dict(type='date', name='edate', default=today.strftime("%Y/%m/%d"), min='1893/01/01', label='End Date of Plot'), dict(type='select', name='pvar', default='precip', options=PDICT, label='Which variable to plot?'), dict(type='select', name='how', default='diff', options=PDICT2, label='How to Express Departure?'), ] return desc
04d19dde79c25bfc3cc606cd1a2b09ecd8a6408b
3,645,695
def SpringH(z,m,k): """ with shapes (bs,2nd)""" D = z.shape[-1] # of ODE dims, 2*num_particles*space_dim q = z[:,:D//2].reshape(*m.shape,-1) p = z[:,D//2:].reshape(*m.shape,-1) return EuclideanK(p,m) + SpringV(q,k)
0895d44933a0390a65da42d596fcf1e822f0f93c
3,645,696
def write_sushi_input_files(lhafile): """ Add SusHi-related blocks to LHA file """ outfiles = {} for higgsname, higgstype in {'H': 12, 'A': 21}.iteritems(): lha = LHA(lhafile) sushi = Block('SUSHI', comment='SusHi specific') sushi.add(Entry([1, 2], comment='Select 2HDM')) sushi.add(Entry([2, higgstype], comment='h / H / A')) sushi.add(Entry([3, 0], comment='p-p collisions')) sushi.add(Entry([4, 13000], comment='E_cm')) sushi.add(Entry([5, 2], comment='ggH at NNLO')) sushi.add(Entry([6, 2], comment='bbH at NNLO')) sushi.add(Entry([7, 2], comment='SM EW content')) sushi.add(Entry([19, 1], comment='Verbosity')) sushi.add(Entry([20, 0], comment='All processes')) lha.add_block(sushi) thdm = Block('2HDM', '2HDM parameters') #thdm.add(Entry([1], comment='Type I')) #thdm.add(Entry([2], comment='Type II')) thdm.add(Entry([4], comment='Type IV')) lha.add_block(thdm) distrib = Block('DISTRIB', comment='Kinematic requirements') distrib.add(Entry([1, 0], comment='Sigma total')) distrib.add(Entry([2, 0], comment='Disable pT cut')) #distrib.add(Entry([21, GENER_SETTINGS['higgs_pt_min']], comment='Min higgs pT')) distrib.add(Entry([3, 0], comment='Disable eta cut')) #distrib.add(Entry([32, GENER_SETTINGS['higgs_eta_max']], comment='Max eta')) distrib.add(Entry([4, 1], comment='Use eta, not y')) lha.add_block(distrib) pdfspec = Block('PDFSPEC') pdfspec.add(Entry([1, 'MMHT2014lo68cl.LHgrid'], comment='Name of pdf (lo)')) pdfspec.add(Entry([2, 'MMHT2014nlo68cl.LHgrid'], comment='Name of pdf (nlo)')) pdfspec.add(Entry([3, 'MMHT2014nnlo68cl.LHgrid'], comment='Name of pdf (nnlo)')) pdfspec.add(Entry([4, 'MMHT2014nnlo68cl.LHgrid'], comment='Name of pdf (n3lo)')) pdfspec.add(Entry([10, 0], comment='Set number')) lha.add_block(pdfspec) lha.get_block('SMINPUTS').add(Entry([8, 1.275], comment='m_c')) # Write output suffix = '_%s_sushi.in' % higgsname outname = lhafile.replace('.lha', suffix) lha.write(outname) outfiles[higgsname] = outname return outfiles
f2f88c79d19de05109748c5c839550bfab905581
3,645,697
import functools def pytest_collection(session): # pylint: disable=unused-argument """Monkey patch lru_cache, before any module imports occur.""" # Gotta hold on to this before we patch it away old_lru_cache = functools.lru_cache @wraps(functools.lru_cache) def lru_cache_wrapper(*args, **kwargs): """Wrap lru_cache decorator, to track which functions are decorated.""" # Apply lru_cache params (maxsize, typed) decorated_function = old_lru_cache(*args, **kwargs) # Mimicking lru_cache: https://github.com/python/cpython/blob/v3.7.2/Lib/functools.py#L476-L478 @wraps(decorated_function) def decorating_function(user_function): """Wraps the user function, which is what everyone is actually using. Including us.""" wrapper = decorated_function(user_function) CACHED_FUNCTIONS.append(wrapper) return wrapper return decorating_function # Monkey patch the wrapped lru_cache decorator functools.lru_cache = lru_cache_wrapper yield # Be a good citizen and undo our monkeying functools.lru_cache = old_lru_cache
56f218c06c1d8cc64d884e94f503ae51be135c7f
3,645,698
def __graph_laplacian(mtx): """ Compute the Laplacian of the matrix. .. math:: """ L = np.diag(np.sum(mtx, 0)) - mtx return L
54f7fd0a359863bcf31ca9800c30e9eadf32ba8f
3,645,699