content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def process_2d_sawtooth(data, period, samplerate, resolution, width, verbose=0, start_zero=True, fig=None): """ Extract a 2D image from a double sawtooth signal Args: data (numpy array): measured trace period (float): period of the full signal samplerate (float): sample rate of the acquisition device resolution (list): resolution nx, ny. The nx corresonds to the fast oscillating sawtooth width (list of float): width paramter of the sawtooth signals verbose (int): verbosity level start_zero (bool): Default is True fig (int or None): figure handle Returns processed_data (list of arrays): the extracted 2D arrays results (dict): contains metadata """ npoints_expected = int(period * samplerate) # expected number of points npoints = data.shape[0] nchannels = data.shape[1] period_x = period / (resolution[1]) period_y = period if verbose: print('process_2d_sawtooth: expected %d data points, got %d' % (npoints_expected, npoints,)) if np.abs(npoints - npoints_expected) > 0: raise Exception('process_2d_sawtooth: expected %d data points, got %d' % (npoints_expected, npoints,)) full_trace = False if start_zero and (not full_trace): padding_x_time = ((1 - width[0]) / 2) * period_x padding_y_time = ((1 - width[1]) / 2) * period_y sawtooth_centre_pixels = ((1 - width[1]) / 2 + .5 * width[1]) * period * samplerate start_forward_slope_step_pixels = ((1 - width[1]) / 2) * period * samplerate end_forward_slope_step_pixels = ((1 - width[1]) / 2 + width[1]) * period * samplerate else: if full_trace: padding_x_time = 0 padding_y_time = 0 sawtooth_centre_pixels = .5 * width[1] * period * samplerate else: padding_x_time = 0 padding_y_time = 0 sawtooth_centre_pixels = .5 * width[1] * period * samplerate start_forward_slope_step_pixels = 0 end_forward_slope_step_pixels = (width[1]) * period * samplerate padding_x = int(padding_x_time * samplerate) padding_y = int(padding_y_time * samplerate) width_horz = width[0] width_vert = width[1] res_horz = int(resolution[0]) res_vert = int(resolution[1]) if resolution[0] % 32 != 0 or resolution[1] % 32 != 0: # send out warning, due to rounding of the digitizer memory buffers # this is not supported raise Exception( 'resolution for digitizer is not a multiple of 32 (%s) ' % (resolution,)) if full_trace: npoints_forward_x = int(res_horz) npoints_forward_y = int(res_vert) else: npoints_forward_x = int(width_horz * res_horz) npoints_forward_y = int(width_vert * res_vert) if verbose: print('process_2d_sawtooth: number of points in forward trace (horizontal) %d, vertical %d' % (npoints_forward_x, npoints_forward_y,)) print(' horizontal mismatch %d/%.1f' % (npoints_forward_x, width_horz * period_x * samplerate)) processed_data = [] row_offsets = res_horz * np.arange(0, npoints_forward_y).astype(int) + int(padding_y) + int(padding_x) for channel in range(nchannels): row_slices = [data[idx:(idx + npoints_forward_x), channel] for idx in row_offsets] processed_data.append(np.array(row_slices)) if verbose: print('process_2d_sawtooth: processed_data shapes: %s' % ([array.shape for array in processed_data])) if fig is not None: pixel_to_axis = 1. / samplerate times = np.arange(npoints) / samplerate plt.figure(fig) plt.clf() plt.plot(times, data[:, :], '.-', label='raw data') plt.title('Processing of digitizer trace') plt.axis('tight') for row_offset in row_offsets: plot2Dline([-1, 0, pixel_to_axis * row_offset, ], ':', color='r', linewidth=.8, alpha=.5) plot2Dline([-1, 0, pixel_to_axis * sawtooth_centre_pixels], '-c', linewidth=1, label='centre of sawtooth', zorder=-10) plot2Dline([0, -1, 0, ], '-', color=(0, 1, 0, .41), linewidth=.8) plot2Dline([-1, 0, pixel_to_axis * start_forward_slope_step_pixels], ':k', label='start of step forward slope') plot2Dline([-1, 0, pixel_to_axis * end_forward_slope_step_pixels], ':k', label='end of step forward slope') plot2Dline([-1, 0, 0, ], '-', color=(0, 1, 0, .41), linewidth=.8, label='start trace') plot2Dline([-1, 0, period, ], '-', color=(0, 1, 0, .41), linewidth=.8, label='end trace') # plot2Dline([0, -1, data[0,3], ], '--', color=(1, 0, 0, .41), linewidth=.8, label='first value of data') plt.legend(numpoints=1) if verbose >= 2: plt.figure(fig + 10) plt.clf() plt.plot(row_slices[0], '.-r', label='first trace') plt.plot(row_slices[-1], '.-b', label='last trace') plt.plot(row_slices[int(len(row_slices) / 2)], '.-c') plt.legend() return processed_data, {'row_offsets': row_offsets, 'period': period}
f7b212d54637e04294cda336e154910eb718b3e5
26,800
def lower_threshold_projection(projection, thresh=1e3): """ An ugly but effective work around to get a higher-resolution curvature of the great-circle paths. This is useful when plotting the great-circle paths in a relatively small region. Parameters ---------- projection : class Should be one of the cartopy projection classes, e.g., cartopy.crs.Mercator thresh : float Smaller values achieve higher resolutions. Default is 1e3 Returns ------- Instance of the input (`projection`) class Example ------- proj = lower_threshold_projection(cartopy.crs.Mercator, thresh=1e3) Note that the cartopy.crs.Mercator was not initialized (i.e., there are no brackets after the word `Mercator`) """ class LowerThresholdProjection(projection): @property def threshold(self): return thresh return LowerThresholdProjection()
165c657f1ec875f23df21ef412135e27e9e443c6
26,801
def post_benchmark(name, kwargs=None): """ Postprocess benchmark """ if kwargs is None: kwargs = {} post = benchmarks[name].post return post(**kwargs)
900f60435ae31e8ec09312f23ab9f98a723d22af
26,802
def tag_view_pagination_counts(request,hc,urls,tag_with_items): """ retrieve the pagination counts for a tag """ hc.browser.get(urls['https_authority']) try: po = hc.catalog.load_pageobject('TagsPage') po.goto_page() po.search_for_content([tag_with_items]) po = hc.catalog.load_pageobject('TagsViewPage') # counts has (pagination_start, pagination_end, pagination_total) counts = po.get_pagination_counts() current_url = po.current_url() finally: hc.browser.close() r = {'start' : counts[0], 'end' : counts[1], 'total' : counts[2], 'url' : current_url} return r
7130ffdbb2b8f90fadf1a159d15726012511ddee
26,803
def arts_docserver_role(name, rawtext, text, lineno, inliner, options=None, content=None): """Create a link to ARTS docserver. Parameters: name (str): The role name used in the document. rawtext (str): The entire markup snippet, with role. text (str): The text marked with the role. lineno (str): The line number where rawtext appears in the input. inliner (str): The inliner instance that called us. options (dict): Directive options for customization. content (list): The directive content for customization. Returns: list, list: Nodes to insert into the document, System messages. """ if content is None: content = [] if options is None: options = {} url = 'http://radiativetransfer.org/docserver-trunk/all/{}'.format(text) node = nodes.reference(rawtext, text, refuri=url, **options) return [node], []
8f20cc4adb9f7fa17514116fe984562d9f0174f3
26,804
def arima_model(splits, arima_order, graph=False): """ Evaluate an ARIMA model for a given order (p,d,q) and also forecast the next one time step. Split data in train and test. Train the model using t = (1, ..., t) and predict next time step (t+1). Then add (t+1) value from test dataset to history and fit again the model using t = (1, ..., t, t+1). Then, it predicts for the next instant (t+2), and so on up to t=N where N=len(test) Finally, with the predictions and observables from the test dataset, the metrics MSE, MAE, MAPE y R2 are calculated. Args: splits: Dictionary with training, validation and test data. arima_order: Tuple. Contains the argument p, d, q for ARIMA model. graph: Boolean. Plot the predictions and test dataset. Returns: Metrics. """ try: # prepare training dataset train = splits['train']['y'].astype('float32') val_y = splits['val']['y'].astype('float32') test_y = splits['test']['y'].astype('float32') test = pd.concat([val_y, test_y]) history = [x for x in train] predictions = list() for t in range(len(test)): model = sm.tsa.ARIMA(history, order=arima_order) model_fit = model.fit(disp=0, dis=-1) # All cores yhat = model_fit.forecast()[0] # Predict one step in future from the last value in history variable. predictions.append(yhat) history.append(test[t]) # Metrics mse = mean_squared_error(test, predictions) rmse = sqrt(mean_squared_error(test, predictions)) mae = mean_absolute_error(test, predictions) mape = mean_absolute_percentage_error(test, predictions) r2 = r2_score(test, predictions) metrics = {"RMSE": rmse, "MSE": mse, "MAE": mae, "MAPE": mape, "R2": r2} if graph: # plot forecasts against actual outcomes test.plot() plt.plot(predictions, color='red') plt.title('ARIMA Fit') plt.ylabel(test.name) plt.xlabel('Time [days]') plt.legend(['Data test', 'Forecast'], loc='upper left') # plt.show() return metrics except Exception as e: pass
07ca1349e5ebe02793fbf798cbd4e5ce20128a74
26,805
import ctypes def srfscc(srfstr, bodyid): """ Translate a surface string, together with a body ID code, to the corresponding surface ID code. The input surface string may contain a name or an integer ID code. https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/srfscc_c.html :param srfstr: Surface name or ID string. :type srfstr: str :param bodyid: ID code of body associated with surface. :type bodyid: int :return: Integer surface ID code. :rtype: int """ srfstr = stypes.stringToCharP(srfstr) bodyid = ctypes.c_int(bodyid) code = ctypes.c_int() isname = ctypes.c_int() libspice.srfscc_c(srfstr, bodyid, ctypes.byref(code), ctypes.byref(isname)) return code.value, bool(isname.value)
9f32b6929c2fd5d482db5f79f4b00f1fe41b114b
26,806
import json import requests def pr_comment( message: str, repo: str = None, issue: int = None, token=None, server=None, gitlab=False, ): """push comment message to Git system PR/issue :param message: test message :param repo: repo name (org/repo) :param issue: pull-request/issue number :param token: git system security token :param server: url of the git system :param gitlab: set to True for GitLab (MLRun will try to auto detect the Git system) """ if ("CI_PROJECT_ID" in environ) or (server and "gitlab" in server): gitlab = True token = token or environ.get("GITHUB_TOKEN") or environ.get("GIT_TOKEN") if gitlab: server = server or "gitlab.com" headers = {"PRIVATE-TOKEN": token} repo = repo or environ.get("CI_PROJECT_ID") # auto detect GitLab pr id from the environment issue = issue or environ.get("CI_MERGE_REQUEST_IID") repo = repo.replace("/", "%2F") url = f"https://{server}/api/v4/projects/{repo}/merge_requests/{issue}/notes" else: server = server or "api.github.com" repo = repo or environ.get("GITHUB_REPOSITORY") # auto detect pr number if not specified, in github the pr id is identified as an issue id # we try and read the pr (issue) id from the github actions event file/object if not issue and "GITHUB_EVENT_PATH" in environ: with open(environ["GITHUB_EVENT_PATH"]) as fp: data = fp.read() event = json.loads(data) if "issue" not in event: raise mlrun.errors.MLRunInvalidArgumentError( f"issue not found in github actions event\ndata={data}" ) issue = event["issue"].get("number") headers = { "Accept": "application/vnd.github.v3+json", "Authorization": f"token {token}", } url = f"https://{server}/repos/{repo}/issues/{issue}/comments" resp = requests.post(url=url, json={"body": str(message)}, headers=headers) if not resp.ok: errmsg = f"bad pr comment resp!!\n{resp.text}" raise IOError(errmsg) return resp.json()["id"]
7115ec59fca36459a15e07906ce5c58b305bcfdc
26,807
import subprocess import sys def check_output(command, catch_errors=True): """Execute a command and return the output as a stripped string. """ try: return subprocess.check_output( command, shell=True, stderr=subprocess.STDOUT).decode('utf-8').strip() except subprocess.CalledProcessError as suberror: if catch_errors: print("\n" + Fore.RED + suberror.stdout.decode('utf-8')) sys.exit(1) else: raise suberror
b12b37fabc205636a5d9e7fc67f0808c8337fcf2
26,808
def list_kickstart_profiles(name): """ returns the list of profile name matching the name of kickstart file e.g. http://host:port/cm/v1/cobbler/kickstarts/default.ks/profile """ cp = CobblerProvision() method = request.method if method == "GET": # MUST know how to get user token, empty token ONLY for debug kwargs = {"user_token": ""} func = getattr(cp, "get_profile_match_kickstart") return func(name) return None
05b734bd6142ef6260a18cd4961c32f26c71b068
26,809
from typing import Sequence def windowed_run_count_1d(arr: Sequence[bool], window: int) -> int: """Return the number of consecutive true values in array for runs at least as long as given duration. Parameters ---------- arr : Sequence[bool] Input array (bool). window : int Minimum duration of consecutive run to accumulate values. Returns ------- int Total number of true values part of a consecutive run at least `window` long. """ v, rl = rle_1d(arr)[:2] return np.where(v * rl >= window, rl, 0).sum()
ff7b18380cd77ad046fc5d5aa678e7393c72d495
26,810
def archived_attendance_overview(): """ Show archived attendance view """ this_year = 2019 last_year = 2018 attendance = CommitteeMeetingAttendance.annual_attendance_trends( period='historical') # index by year and cte id years = { year: { cte_id: list(cte_group)[0] for cte_id, cte_group in groupby(group, lambda r: r.committee_id) } for year, group in groupby(attendance, lambda r: r.year) } attendance = { 'NA': [], 'NCOP': [], } for cte in Committee.list().all(): curr = years[this_year].get(cte.id) prev = years[last_year].get(cte.id) if cte.house.sphere != 'national': continue if not curr or cte.ad_hoc or cte.house.name_short == 'Joint': continue attendance[cte.house.name_short].append({ 'committee': cte.name, 'committee_id': cte.id, 'n_meetings': curr.n_meetings, 'avg_attendance': curr.avg_attendance * 100, 'change': (curr.avg_attendance - (prev.avg_attendance if prev else 0)) * 100, }) # rank them for att in attendance.itervalues(): att.sort(key=lambda a: a['avg_attendance'], reverse=True) for i, item in enumerate(att): att[i]['rank'] = len(att) - i return render_template( 'archive_attendance_overview.html', year=this_year, attendance_na=attendance['NA'], attendance_ncop=attendance['NCOP']) pass
6d895a63e9b248b135385fd316112e4b749792a3
26,811
import platform import subprocess def start_daemon(args: list) -> int: """Starts daemon process. Arguments: args: Arguments for `subprocess.Popen` (first item must be the daemon filename) Returns: PID for started daemon, or None if start failed. Note: Gracefull shutdown on Windows is tricky. To allow shutdown via SIGINT signal handler (note that SIGINT is not available on Windows, you have to use CTRL_C_EVENT), it's necessry to start new shell with new console in background (i.e. detached from console of this daemon starting script). """ kwargs = {} if platform.system() == 'Windows': kwargs.update(shell=True, creationflags=subprocess.CREATE_NEW_CONSOLE) else: # Unix kwargs.update(start_new_session=True) p = subprocess.Popen(args, **kwargs) return p.pid if p.poll() is None else None
b21fb344b3173c5d3f49d1dcd4f7b0890290df35
26,812
def _get_medical_image_blob(roidb): """ Builds an input blob from the medical image in the roidb """ num_images = len(roidb) processed_ims = [] pre_ims = [] post_ims = [] abdo_masks = [] for i in range(num_images): im = raw_reader(roidb[i]["image"], cfg.MET_TYPE, [roidb[i]["height"], roidb[i]["width"]]) if roidb[i]['flipped']: im = im[:, ::-1] processed_ims.append(im) mask = abdominal_mask(im.copy()) abdo_masks.append(mask) if cfg.THREE_SLICES: # get pre-image basename = osp.basename(roidb[i]["image"]) names = basename[:-4].split("_") slice_num = int(names[-1]) if slice_num == 0: pre_im = im else: slice_num -= 1 names[-1] = str(slice_num) basename = "_".join(names) + ".raw" pre_path = osp.join(osp.dirname(roidb[i]["image"]), basename) pre_im = raw_reader(pre_path, cfg.MET_TYPE, [roidb[i]["height"], roidb[i]["width"]]) if roidb[i]['flipped']: pre_im = pre_im[:, ::-1] pre_ims.append(pre_im) # get post-image basename = osp.basename(roidb[i]["image"]) names = basename[:-4].split("_") names[-1] = str(int(names[-1]) + 1) basename = "_".join(names) + ".raw" post_path = osp.join(osp.dirname(roidb[i]["image"]), basename) try: post_im = raw_reader(post_path, cfg.MET_TYPE, [roidb[i]["height"], roidb[i]["width"]]) if roidb[i]['flipped']: post_im = post_im[:, ::-1] except FileNotFoundError: post_im = im post_ims.append(post_im) num_images = len(processed_ims) blob = np.zeros((num_images, cfg.TRAIN.MAX_SIZE, cfg.TRAIN.MAX_SIZE, 3), dtype=np.float32) abdo_mask = np.zeros((num_images, cfg.TRAIN.MAX_SIZE, cfg.TRAIN.MAX_SIZE), dtype=np.bool) if cfg.THREE_SLICES: for i in range(num_images): blob[i,:,:,0] = pre_ims[i] blob[i,:,:,1] = processed_ims[i] blob[i,:,:,2] = post_ims[i] abdo_mask[i,:,:] = abdo_masks[i] else: for i in range(num_images): blob[i,:,:,0] = processed_ims[i] blob[i,:,:,1] = processed_ims[i] blob[i,:,:,2] = processed_ims[i] abdo_mask[i,:,:] = abdo_masks[i] if cfg.USE_WIDTH_LEVEL: win, wind2, lev = cfg.WIDTH, cfg.WIDTH / 2, cfg.LEVEL blob = (np.clip(blob, lev - wind2, lev + wind2) - (lev - wind2)) / 2**16 * win else: blob /= cfg.MED_IMG_UPPER blob = np.clip(blob, -1., 1.) return blob, abdo_mask
9b09d53fbf08ff785eeaadacf3733b8b5bb995b4
26,813
def is_weak(key: object) -> bool: """ Check whether key is weak or not. Key is weak if it is a basic key - not an action and not a modifier. """ return not is_action(key) and not is_mod(key)
9493afd199fad665e97ae18da973c8a15671bfbb
26,814
def mock_cei_get_trades(mocker: MockerFixture) -> MockerFixture: """Fixture for mocking report_reader.get_trades.""" return mocker.patch("irpf_investidor.report_reader.get_trades")
fcceb00df6443a30544b92d001983c7bb243650a
26,815
def pql_breakpoint(state: State): """Creates a Python breakpoint. For a Preql breakpoint, use debug() instead """ breakpoint() return objects.null
a826c75ddb7a665d85f379a90b0e17b2ce2fc572
26,816
def _normalize(op1, op2, prec = 0): """Normalizes op1, op2 to have the same exp and length of coefficient. Done during addition. """ if op1.exp < op2.exp: tmp = op2 other = op1 else: tmp = op1 other = op2 # Let exp = min(tmp.exp - 1, tmp.adjusted() - precision - 1). # Then adding 10**exp to tmp has the same effect (after rounding) # as adding any positive quantity smaller than 10**exp; similarly # for subtraction. So if other is smaller than 10**exp we replace # it with 10**exp. This avoids tmp.exp - other.exp getting too large. tmp_len = len(str(tmp.int)) other_len = len(str(other.int)) exp = tmp.exp + min(-1, tmp_len - prec - 2) if other_len + other.exp - 1 < exp: other.int = 1 other.exp = exp tmp.int *= 10 ** (tmp.exp - other.exp) tmp.exp = other.exp return op1, op2
f0ed5a47e6fa466a8fb7aacf837c42f092ff031e
26,817
def change_tags(project_id): """Change the list of tags on a project.""" project = Project.query.get_or_404(project_id) if project.lifetime_stage not in (LifetimeStage.ongoing, LifetimeStage.finalizing): abort(400, {'message': 'Tags can only be modified in ongoing and finalizing stages.'}) if current_user != project.creator and not current_user.is_admin: abort(403) in_schema = ProjectSchema(only=('tags',)) try: updated_project = in_schema.load({'tags': request.json}, instance=project) except ValidationError as err: abort(400, {'message': err.messages}) try: db.session.add(updated_project) db.session.commit() except IntegrityError as err: db.session.rollback() log.exception(err) abort(400, {'message': 'Data integrity violated.'}) return NO_PAYLOAD
0f723eb4816066f8f3a4353b1261c207c2754100
26,818
from typing import List from typing import Dict from typing import Any import six from typing import Tuple from re import T from typing import Union def sort_db_results(results: List[Dict[str, Any]]) -> List[Dict[str, Any]]: """Deterministically sort DB results. Args: results: List[Dict], results from a DB. Returns: List[Dict], sorted DB results. """ sort_order: List[str] = [] if len(results) > 0: sort_order = sorted(six.iterkeys(results[0])) def sort_key(result: Dict[str, Any]) -> Tuple[Tuple[bool, Any], ...]: """Convert None/Not None to avoid comparisons of None to a non-None type.""" return tuple((result[col] is not None, result[col]) for col in sort_order) def sorted_value(value: T) -> Union[List[Any], T]: """Return a sorted version of a value, if it is a list.""" if isinstance(value, list): return sorted(value) return value return sorted( [{k: sorted_value(v) for k, v in six.iteritems(row)} for row in results], key=sort_key )
27a8a206ee08810cf7391a1d4d6e0cf68d06fe14
26,819
def classify_one(models, cooccurence_csv): """ Args: cooccurence_csv (str): the CSV file path for the co-occurrence feautres Output: dx (int): the class for diagnosis dx_name (str): the name for diagnosis class """ # Get the features from the CSV file cooc_features = open(cooccurence_csv, "r").read().split(",") freq_features = open(cooccurence_csv.replace("Cooccurrence", "Frequency"), "r").read().split(",") cooc_features = np.array([float(_.strip().rstrip()) for _ in cooc_features]) freq_features = np.array([float(_.strip().rstrip()) for _ in freq_features]) # Get the results only for upper triangle matrix (including diagonal). # Other parts will be all zero cooc_features = np.triu(cooc_features.reshape(8, 8), k=0).reshape(-1) # Normalize features cooc_features = cooc_features / np.sum(cooc_features) freq_features = freq_features / np.sum(freq_features) features = np.array(cooc_features.tolist() + freq_features.tolist()).reshape(1, -1) # First decision for experiment in ["Invasive v.s. Noninvasive", "Atypia and DCIS v.s. Benign", "DCIS v.s. Atypia"]: pca = models[experiment + " PCA"] if pca is not None: features = pca.transform(features).reshape(1, -1) model = models[experiment + " model"] rst = model.predict(features)[0] if rst: if experiment == "Invasive v.s. Noninvasive": return 4, "Invasive" if experiment == "Atypia and DCIS v.s. Benign": return 1, "Benign" if experiment == "DCIS v.s. Atypia": return 3, "DCIS" raise("programming error! unknown experiment") if experiment == "DCIS v.s. Atypia" and not rst: return 2, "Atypia" raise("programming error 2! Unknown experiment and rst")
6761a01ab832df15f88ee9032e4b1423a2d235cd
26,820
import math def discretize_q(q, tol=None, lat_range=(-math.pi / 2, math.pi / 2), points=None): """ simulate feature database by giving closest lat & roll with given tolerance and set lon to zero as feature detectors are rotation invariant (in opengl coords) """ if tol is not None and points is not None or tol is None and points is None: assert False, 'Give either tol or points' elif tol is not None: points = bf2_lat_lon(tol, lat_range=lat_range) lat, lon, roll = q_to_ypr(q) (nlat, nroll), idx = find_nearest_arr( points, np.array((lat, roll)), ord=2, fun=wrap_rads, ) nq0 = ypr_to_q(nlat, 0, nroll) return nq0, idx
a72653753d48e53bcebab94088828e9cecf19f34
26,821
import sys def constant_time_compare(val1, val2): """ Returns True if the two strings are equal, False otherwise. The time taken is independent of the number of characters that match. """ if len(val1) != len(val2): return False result = 0 if sys.version_info >= (3, 0, 0): # bytes are numbers for x, y in zip(val1, val2): result |= x ^ y else: for x, y in zip(val1, val2): result |= ord(x) ^ ord(y) return result == 0
c2b8c8d4a8b5c5f1b6b106211eeabe9b51a30fbe
26,822
from typing import Union import scipy def generate_testdata( pars: dataclass, ntrials: int = 1, baseclass: Union[object, None] = None, func: Union[object, None] = None, exp_test_set: bool=True, ): """ meanrate is in Hz(events/second) maxt is in seconds - duration of trace bigevent is a dict {'t': delayinsec, 'I': amplitudeinA} """ # if baseclass is None and func is not None: # raise ValueError("Need base class definition") timebase = np.arange(0.0, pars.maxt, pars.dt) # in ms t_psc = np.arange( 0.0, pars.tdur, pars.dt ) # time base for single event template in ms if baseclass is None and func is None: # make double-exp event print('Using our template - which may make a different psc than you want') tau_1 = pars.taus[0] # ms tau_2 = pars.taus[1] # ms Aprime = (tau_2 / tau_1) ** (tau_1 / (tau_1 - tau_2)) g = Aprime * (-np.exp(-t_psc / tau_1) + np.exp((-t_psc / tau_2))) gmax = np.max(g) g = pars.sign * g * pars.amp / gmax elif baseclass is not None: # use template from the class baseclass._make_template() gmax = np.max(pars.sign*baseclass.template) g = pars.amp * baseclass.template / gmax else: raise ValueError("Need base class or func definition") testpsc = np.zeros((ntrials, timebase.shape[0])) testpscn = np.zeros((ntrials, timebase.shape[0])) i_events = [None] * ntrials t_events = [None] * ntrials for i in range(ntrials): if exp_test_set: pars.expseed = i * 47 # starting seed for intervals pars.noiseseed = i # starting seed for background noise if pars.expseed is None: eventintervals = np.random.exponential( 1.0 / pars.meanrate, int(pars.maxt * pars.meanrate) ) else: np.random.seed(pars.expseed + i) eventintervals = np.random.exponential( 1.0 / pars.meanrate, int(pars.maxt * pars.meanrate) ) eventintervals = eventintervals[eventintervals < 10.0] events = np.cumsum(eventintervals) if pars.bigevent is not None: events = np.append(events, pars.bigevent["t"]) events = np.sort(events) # time of events with exp distribution: t_events[i] = events[events < pars.maxt] i_events[i] = np.array([int(x / pars.dt) for x in t_events[i]]) testpsc[i][i_events[i]] = np.random.normal( 1.0, pars.ampvar / pars.amp, len(i_events[i]) ) if pars.bigevent is not None: ipos = int(pars.bigevent["t"] / pars.dt) # position in array testpsc[ipos] = pars.bigevent["I"] testpsc[i] = scipy.signal.convolve(testpsc[i], g, mode="full")[ : timebase.shape[0] ] if pars.noise > 0: if pars.noiseseed is None: testpscn[i] = testpsc[i] + np.random.normal( 0.0, pars.noise, testpsc.shape[1] ) else: np.random.seed(pars.noiseseed) testpscn[i] = testpsc[i] + np.random.normal( 0.0, pars.noise, testpsc.shape[1] ) else: testpscn[i] = testpsc[i] # print(t_events) # print(i_events) # print(testpscn[0][i_events]) # mpl.plot(timebase, testpscn[0]) # mpl.show() # exit() return timebase, testpsc, testpscn, i_events, t_events
2c2fdd81608ca1df1564e7b6a7121c164d06e8d9
26,823
def has_param(param): """ Generate function, which will check `param` is in html element. This function can be used as parameter for .find() method in HTMLElement. """ def has_param_closure(element): """ Look for `param` in `element`. """ if element.params.get(param, "").strip(): return True return False return has_param_closure
6800725c378714b5161772f0a2f9ef89ae278400
26,824
import torch def neg_log_likeDUN(output, target, sigma, cat): """ function to compute expected loss across different network depths inputs: - preds_list, list of predictions for different depths - targets, single set of targets - noise, aleatoric noise (length 12) - cat, variational categorical distribution """ target_reshape = target.reshape(1,len(target),-1) sigma_reshape = sigma.reshape(1, 1, len(sigma)) exponent = -0.5*torch.sum((target_reshape - output)**2/sigma_reshape**2, 2) log_coeff = -torch.sum(torch.log(sigma)) - len(sigma) * torch.log(torch.sqrt(torch.tensor(2*np.pi))) scale = 1 / (exponent.size()[1]) pre_expectation = - scale * torch.sum(log_coeff + exponent, 1) expectation = (pre_expectation * cat).sum() return expectation
8c2bcd9833e9a309270087331b67223b5e4ce74e
26,825
def shoot(C, D, b, maxiter=1000, abs_tol=1e-7): """Return random equality set of P that projects on a projection facet. Returns randomly selected equality set E_0 of P such that the projection of the equality set is a facet of the projection. @param C: Matrix defining the polytope Cx+Dy <= b @param D: Matrix defining the polytope Cx+Dy <= b @param b: Vector defining the polytope Cx+Dy <= b @return: `E_0,af,bf`: Equality set and affine hull """ d = C.shape[1] k = D.shape[1] iter = 0 while True: if iter > maxiter: raise Exception( "shoot: could not find starting equality set") gamma = np.random.rand(d) - 0.5 c = np.zeros(k + 1) c[0] = -1 G = np.hstack([np.array([np.dot(C, gamma)]).T, D]) sol = solvers.lpsolve(c, G, b, solver='glpk') opt_sol = np.array(sol['x']).flatten() opt_dual = np.array(sol['z']).flatten() r_opt = opt_sol[0] y_opt = np.array(opt_sol[range(1, len(opt_sol))]).flatten() x_opt = r_opt * gamma E_0 = np.nonzero( np.abs(np.dot(C, x_opt) + np.dot(D, y_opt) - b) < abs_tol)[0] DE0 = D[E_0, :] CE0 = C[E_0, :] b0 = b[E_0] if rank(np.dot(null_space(DE0.T).T, CE0)) == 1: break iter += 1 af, bf = proj_aff(CE0, DE0, b0, abs_tol=abs_tol) if is_dual_degenerate(c, G, b, None, None, opt_sol, opt_dual, abs_tol=abs_tol): E_0 = unique_equalityset(C, D, b, af, bf, abs_tol=abs_tol) af, bf = proj_aff(C[E_0, :], D[E_0, :], b[E_0]) if len(bf) > 1: raise Exception("shoot: wrong dimension of affine hull") return E_0, af.flatten(), bf
1cd85fbd4752ff859553d7ef907ee85b20a99210
26,826
def bwdist( img, method=cv2.DIST_L2, dist_mask=cv2.DIST_MASK_5, label_type=cv2.DIST_LABEL_CCOMP, ravel=True, ): """Mimics Matlab's bwdist function, similar to OpenCV's distanceTransform() but with different output. https://www.mathworks.com/help/images/ref/bwdist.html Available metrics: https://docs.opencv.org/3.4/d7/d1b/group__imgproc__misc.html#gaa2bfbebbc5c320526897996aafa1d8eb Available distance masks: https://docs.opencv.org/3.4/d7/d1b/group__imgproc__misc.html#gaaa68392323ccf7fad87570e41259b497 Available label types: https://docs.opencv.org/3.4/d7/d1b/group__imgproc__misc.html#ga3fe343d63844c40318ee627bd1c1c42f """ flip = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY_INV)[1] dist, labeled = cv2.distanceTransformWithLabels(flip, method, dist_mask) if ravel: # return linear indices if ravel == True (default) idx = np.zeros(img.shape, dtype=np.intp) idx_func = np.flatnonzero else: # return two-channel indices if ravel == False idx = np.zeros((*img.shape, 2), dtype=np.intp) idx_func = lambda masked: np.dstack(np.where(masked)) for l in np.unique(labeled): mask = labeled == l idx[mask] = idx_func(img * mask) return dist, idx
9889e731dab572846936f7cc79b88cb794cd9852
26,827
def calculate_cumulative_conf(areaP90: float=1., areaP10: float=10., pdP90: float=10., pdP10: float=24): """Calculate cumulative confidence level for expected development size in MW Args: areaP90 (float): pessimistic area in sqkm areaP10 (float): optimistic area in sqkm pdP90 (float): pessimistic power density in MWe/sqkm pdP10 (float): optimistic power density in MWe/sqkm Returns: prob_df (pandas Dataframe): cumulative confidence curve in Reservoir Size """ # calculate area > 250 °C area_mu = ((np.log(areaP90)+np.log(areaP10))/2) area_sigma = (np.log(areaP10)-np.log(areaP90))/((norm.ppf(0.9)-(norm.ppf(0.1)))) # calculate powerdensity mean and standard dev powerdens_mu = ((np.log(pdP90)+np.log(pdP10))/2) powerdens_sigma = (np.log(pdP10)-np.log(pdP90))/((norm.ppf(0.9)-(norm.ppf(0.1)))) capacity_mu = area_mu + powerdens_mu capacity_sigma = ((area_sigma**2)+(powerdens_sigma**2))**0.5 eds = [lognorm.ppf(x/100, capacity_sigma, loc=0, scale=np.exp(capacity_mu)) for x in range(0,100)] indx = list(np.arange(0,100)[::-1]) edsepc_tups = list(zip(indx,eds)) prob_df = pd.DataFrame(edsepc_tups, columns = ['Cumulative confidence (%)', 'Expected development size (MWe)']) return prob_df
9231e1d7942ad8c0e36e606c3a7ddd25439016fb
26,828
def get_admins_timeseries_chart_data(): """ Return admins timeseries data to frontend :return: dict """ chart_data = {} try: pipe = [ {'$match': {VAX_AREA_KEY: {'$ne': 'ITA'}}}, {'$sort': {VAX_DATE_KEY: 1}} ] cursor = vax_admins_summary_coll.aggregate(pipeline=pipe) data = list(cursor) df = pd.DataFrame(data) dates = df[VAX_DATE_KEY].apply( lambda x: x.strftime(CHART_DATE_FMT)).unique().tolist() data = [{ 'name': OD_TO_PC_MAP[r], 'data': ( df[df[VAX_AREA_KEY] == r][VAX_FIRST_DOSE_KEY].cumsum() / df[df[VAX_AREA_KEY] == r][POP_KEY] * 100 ).round(2).to_list() } for r in sorted(df[VAX_AREA_KEY].unique())] chart_data = { "dates": dates, "data": data } app.logger.debug(f"Time series chart data {chart_data}") except Exception as e: app.logger.error(f"While getting vax timeseries chart data {e}") return chart_data
0b06612e6be332797f0c7b715810e3ddd61e0c62
26,829
def get_all_keys(data): """Get all keys from json data file""" all_keys = set(data[0].keys()) for row in data: all_keys = set.union(all_keys, set(row.keys())) return list(all_keys)
5532af993f87bf4e00c7bec13eb971e0114e736c
26,830
def submitmq(request): """Message queue submit page""" context = { "type": "mq", "choices": FRUIT_CHOICES, } if request.method == 'POST': # If the form has been submitted... p = None try: # save preference p = Preference() p.name = request.POST['name'] p.email = request.POST['email'] p.fruit = request.POST['fruit'] p.save() # Save the newly created preference in the session # so it can be used again request.session['preference_id'] = p.id except: msg_error = u'Something went wrong saving the message queue preference task' logger.error(msg_error) messages.add_message(request, messages.ERROR, msg_error) a = ActivityLog(pref=p, message=msg_error) a.save() return render(request, 'error.html', context) # Add a task to the message queue to send a confirmation email credentials = pika.credentials.PlainCredentials( settings.AMQP_USERNAME, settings.AMQP_PASSWORD ) # credentials=credentials, connection = pika.BlockingConnection( pika.ConnectionParameters( host=settings.AMQP_HOST, ) ) try: channel = connection.channel() channel.queue_declare(queue=settings.AMQP_QUEUE, durable=True) channel.basic_publish(exchange='', routing_key=settings.AMQP_QUEUE, body=str(p.id), properties=pika.BasicProperties( delivery_mode = 2, # make message persistent ), mandatory=True, immediate=False) logger.info("Queued message task for {} {} {}".format(p.id, p.name, p.email)) except: msg_error = u'Something went wrong saving the message queue preference task' logger.error(msg_error) messages.add_message(request, messages.ERROR, msg_error) a = ActivityLog(pref=p, message=msg_error) a.save() return render(request, 'error.html', context) channel.close() connection.close() return HttpResponseRedirect(reverse('thanks')) return render(request, 'refactortoscale/choices.html', context)
2b84e7f8ee9a118d0989024dcd58f26d52f30351
26,831
def plot_projections(img: np.ndarray, spacing: int = 5, zoom: float = None) -> np.ndarray: """ Plot three maximum intensity projections of the given image side-by-side. Parameters ---------- img : np.ndarray Input 3D image. spacing : int Number of pixels to separate the projections in the output image. Default: 5 zoom : float, optional Scale factor to interpolate the image along z axis. Default: None (no interpolation). Returns ------- np.ndarray Output image with the three maximum projections """ m0 = np.max(img, axis=0) m1 = np.max(img, axis=1) m2 = np.max(img, axis=2) if zoom is not None: zoom_arr = [1.] * len(m1.shape) zoom_arr[0] = zoom m1 = ndimage.interpolation.zoom(m1 * 1., zoom_arr, order=1) m2 = ndimage.interpolation.zoom(m2 * 1., zoom_arr, order=1) maxproj = np.zeros((m0.shape[0] + m1.shape[0] + spacing, m0.shape[1] + m2.shape[0] + spacing) + img.shape[3:]) maxproj[:m0.shape[0], :m0.shape[1]] = m0 maxproj[m0.shape[0] + spacing:, :m0.shape[1]] = m1 maxproj[:m0.shape[0], m0.shape[1] + spacing:] = np.swapaxes(m2, 0, 1) return maxproj
cc68f75bb10b249107b90ef6f8d664ff09aee4f2
26,832
def _ngl_write_atom( num, species, x, y, z, group=None, num2=None, occupancy=1.0, temperature_factor=0.0, ): """ Writes a PDB-formatted line to represent an atom. Args: num (int): Atomic index. species (str): Elemental species. x, y, z (float): Cartesian coordinates of the atom. group (str): A...group name? (Default is None, repeat elemental species.) num2 (int): An "alternate" index. (Don't ask me...) (Default is None, repeat first number.) occupancy (float): PDB occupancy parameter. (Default is 1.) temperature_factor (float): PDB temperature factor parameter. (Default is 0. Returns: (str): The line defining an atom in PDB format Warnings: * The [PDB docs](https://www.cgl.ucsf.edu/chimera/docs/UsersGuide/tutorials/pdbintro.html) indicate that the xyz coordinates might need to be in some sort of orthogonal basis. If you have weird behaviour, this might be a good place to investigate. """ if group is None: group = species if num2 is None: num2 = num return "ATOM {:>6} {:>4} {:>4} {:>5} {:10.3f} {:7.3f} {:7.3f} {:5.2f} {:5.2f} {:>11} \n".format( num, species, group, num2, x, y, z, occupancy, temperature_factor, species )
92a5d62f3c4f6d927aa5a6010b217344d0d241d3
26,833
def get_insert_components(options): """ Takes a list of 2-tuple in the form (option, value) and returns a triplet (colnames, placeholders, values) that permits making a database query as follows: c.execute('INSERT INTO Table ({colnames}) VALUES {placeholders}', values). """ col_names = ','.join(opt[0] for opt in options) placeholders = ','.join('?' for i in range(len(options))) if len(col_names) > 0: col_names = ',' + col_names if len(placeholders) > 0: placeholders = ',' + placeholders values = tuple(opt[1] for opt in options) return col_names, placeholders, values
3e1deecd39b0e519124278f47713d5b3a1571815
26,834
from typing import List def send_recording_to_gist(table: str, results: List['FormattedResult'], assignment: str): """Publish a table/result pair to a private gist""" # the "-" at the front is so that github sees it first and names the gist # after the homework table_filename = '-stograde report {} table.txt'.format(assignment) files = { table_filename: {'content': table}, } for file in results: filename = file.student + '.' + file.type.name.lower() files[filename] = { 'content': file.content.strip() } return post_gist('log for ' + assignment, files)
41cd2dc428018893659a0aaa5c67b50e37375dc8
26,835
def append_composite_importance(composite_terms, ebm, X, composite_name=None, global_exp=None, global_exp_name=None, contributions=None): """Computes the importance of the composite_terms and appends it to a global explanation. In case a global explanation is provided, the composite importance will be appended to it and returned. Otherwise, a new global explanation will be creted and returned. The composite importance will only be displayed in the Summary graph. Args: composite_terms: A list of term names or term indices ebm: A fitted EBM X (numpy array): Samples used to compute the composite importance composite_name (str, optional): User-defined composite Name global_exp (EBMExplanation, optional): User-defined global explanation object global_exp_name (str, optional): User-defined name when creating a new global explanation contributions (numpy array, optional): Contributions of all terms per X's row Returns: EBMExplanation: A global explanation with the composite importance appended to it """ check_is_fitted(ebm, "has_fitted_") if global_exp is not None: if global_exp.explanation_type != "global": raise ValueError("The provided explanation is {} but a global explanation is expected.".format(global_exp.explanation_type)) elif global_exp._internal_obj is None or global_exp._internal_obj["overall"] is None: raise ValueError("The global explanation object is incomplete.") else: global_explanation = global_exp else: global_explanation = ebm.explain_global(global_exp_name) if composite_name is None: composite_name = _get_composite_name(composite_terms, ebm.term_names_) composite_importance = compute_composite_importance(composite_terms, ebm, X, contributions) global_explanation._internal_obj["overall"]["names"].append(composite_name) global_explanation._internal_obj["overall"]["scores"].append(composite_importance) return global_explanation
12c8eeb6ad9dd49c96163ed349236fbc8cdf8ed5
26,836
import abc def apply_pair(main, other, *, fn): """`Apply` optimized for use with paired structured containers.""" # special sequence types must precede generic `Sequence` check if isinstance(main, (str, bytes)): return fn(main, other) # delegate other types to the function (int, float, etc.) elif not isinstance(main, (abc.Sequence, abc.Mapping)): return fn(main, other) # `objects` is Iterable[Union[Sequence, Mapping]], but not `str` or `bytes` # check that the current level data are objects of the same type if not (other is None or isinstance(other, type(main))): raise TypeError(f'`{other}` does not match type `{type(main)}`') # (dict, OrderedDict, etc) -> dict if isinstance(main, abc.Mapping): other = {} if other is None else other # recurse and rebuild the mapping as dict (main left-joins with others) return {k: apply_pair(main[k], other.get(k), fn=fn) for k in main.keys()} # (tuple, namedtuple, list, etc) -> list* elif isinstance(main, abc.Sequence): if other is None: other = repeat(None) else: # check if sequences conform assert len(main) == len(other) # TypeError values = [apply_pair(m, o, fn=fn) for m, o in zip(main, other)] # demote mutable sequences to lists if isinstance(main, abc.MutableSequence): return values # ... and immutable to tuple, keeping in mind special namedtuples return getattr(main, '_make', tuple)(values)
c547182fd33fe97136fccba635fa61d96ec73447
26,837
import requests import json def get_info(connector, host, key, datasetid): """Get basic dataset information from UUID. Keyword arguments: connector -- connector information, used to get missing parameters and send status updates host -- the clowder host, including http and port, should end with a / key -- the secret key to login to clowder datasetid -- the dataset to get info of """ url = "%sapi/datasets/%s?key=%s" % (host, datasetid, key) result = requests.get(url, verify=connector.ssl_verify if connector else True) result.raise_for_status() return json.loads(result.text)
982e3b9304b3c2ca04348efb794b8b8d34b86b89
26,838
from typing import Union from typing import Tuple from typing import List def solve_network( NWN: nx.Graph, source_node: Union[Tuple, List[Tuple]], drain_node: Union[Tuple, List[Tuple]], input: float, type: str = "voltage", solver: str = "spsolve", **kwargs ) -> np.ndarray: """ Solve for the voltages of each node in a given NWN. Each drain node will be grounded. If the type is "voltage", each source node will be at the specified input voltage. If the type is "current", current will be sourced from each source node. Parameters ---------- NWN : Graph Nanowire network. source_node : tuple, or list of tuples Voltage/current source nodes. drain_node : tuple, or list of tuples Grounded output nodes. input : float Supplied voltage (current) in units of v0 (i0). type : {"voltage", "current"}, optional Input type. Default: "voltage". solver: str, optional Name of sparse matrix solving algorithm to use. Default: "spsolve". **kwargs Keyword arguments passed to the solver. Returns ------- out : ndarray Output array containing the voltages of each node. If the input type is voltage, the current is also in this array as the last element. """ # Get lists of source and drain nodes if isinstance(source_node, tuple): source_node = [source_node] if isinstance(drain_node, tuple): drain_node = [drain_node] # Pass to solvers if type == "voltage": out = _solve_voltage(NWN, input, source_node, drain_node, solver, **kwargs) elif type == "current": out = _solve_current(NWN, input, source_node, drain_node, solver, **kwargs) else: raise ValueError("Invalid source type.") return out
c5678fe12cc9abdd9404694c513155e7a85f0713
26,839
def d2s_func(va, vb, ka, wa, wb, pa): """ Create a function that requires only frequency as an argurment, and used to calculate intensities across array of frequencies in the DNMR spectrum for two uncoupled spin-half nuclei. The idea is to calculate expressions that are independant of frequency only once, and then use them in a new function that depends only on v. This would avoid unneccessarily repeating some of the same operations. This function-within-a-function should be refactored to function-within-class! :param va: The frequency of nucleus 'a' at the slow exchange limit. va > vb :param vb: The frequency of nucleus 'b' at the slow exchange limit. vb < va :param ka: The rate constant for state a--> state b :param wa: The width at half heigh of the signal for nucleus a (at the slow exchange limit). :param wb: The width at half heigh of the signal for nucleus b (at the slow exchange limit). :param pa: The fraction of the population in state a. :param pa: fraction of population in state a wa, wb: peak widths at half height (slow exchange), used to calculate T2s returns: a function that takes v (x coord or numpy linspace) as an argument and returns intensity (y). """ # TODO: this seems like the hard way to create a partial function. Try a # functools.partial version of this. # TODO: factor pis out; redo comments to explain excision of v-independent # terms pi = np.pi pi_squared = pi ** 2 T2a = 1 / (pi * wa) T2b = 1 / (pi * wb) pb = 1 - pa tau = pb / ka dv = va - vb Dv = (va + vb) / 2 P = tau * (1 / (T2a * T2b) + pi_squared * (dv ** 2)) + (pa / T2a + pb / T2b) p = 1 + tau * ((pb / T2a) + (pa / T2b)) Q = tau * (- pi * dv * (pa - pb)) R = pi * dv * tau * ((1 / T2b) - (1 / T2a)) + pi * dv * (pa - pb) r = 2 * pi * (1 + tau * ((1 / T2a) + (1 / T2b))) def maker(v): """ Scheduled for refactoring. :param v: frequency :return: function that calculates the intensity at v """ # TODO: fix docstring, explain _P _Q etc correlate to P, Q etc in lit. # FIXED: previous version of this function used # nonlocal Dv, P, Q, R # but apparently when function called repeatedly these values would # become corrupted (turning into arrays?!) # Solution: add underscores to create copies of any variables in # outer scope whose values are changed in the inner scope. _Dv = Dv - v _P = P - tau * 4 * pi_squared * (_Dv ** 2) _Q = Q + tau * 2 * pi * _Dv _R = R + _Dv * r return(_P * p + _Q * _R) / (_P ** 2 + _R ** 2) return maker
c3af0fc4d07a8b35b0147c18a15d968d2680b7bb
26,840
def network_allocations_get_for_share_server(context, share_server_id, session=None): """Get network allocation for share server.""" return IMPL.network_allocations_get_for_share_server(context, share_server_id, session=session)
7fd30c9249ed167ecf17b2902879de992a023db9
26,841
def round_corner(radius, fill): """Draw a round corner""" corner = Image.new('L', (radius, radius), 0) # (0, 0, 0, 0)) draw = ImageDraw.Draw(corner) draw.pieslice((0, 0, radius * 2, radius * 2), 180, 270, fill=fill) return corner
b5f781123ed0dd45dcf61b5d130a9c38c4d3d38f
26,842
def hindcasts(hcsts, obsvs=None, hists=None, shade=False, ax=None, figsize=(15, 4)): """ Plot sets of hindcasts. Where multiple variables are provided, it is assumed that all inputs contain the same variables. Parameters ---------- hcsts : dict Dictionary of hindcasts to plot with the format {"name": hcst}, where hcst is an xarray.Dataset with dimensions "init" and "lead" obsvs : dict, optional Dictionary of observations to plot with the format {"name": obsv}, where obsv is an xarray.Dataset with dimension "time" hist : dict, optional Dictionary of historical runs to plot with the format {"name": hist}, where hist is an xarray.Dataset with dimension "time" shade : bool, optional If True, shade background according to change in bias correction in CAFE60v1 """ def _shading(ax): trans = cftime.datetime(1992, 1, 1) end = cftime.datetime(2040, 1, 1) xlim = ax.get_xlim() ylim = ax.get_ylim() ax.fill_between( [trans, end], [ylim[1], ylim[1]], [ylim[0], ylim[0]], color=[0.9, 0.9, 0.9], ) ax.set_xlim(xlim) ax.set_ylim(ylim) n_vars = len(hcsts[list(hcsts.keys())[0]].data_vars) if ax is None: fig = plt.figure(figsize=(figsize[0], n_vars * figsize[1])) axs = fig.subplots(n_vars, 1, sharex=True) if n_vars == 1: axs = [axs] else: axs = [ax] # Plot the hindcasts colormaps = ["autumn", "winter", "cool"] colormapcycler = cycle(colormaps) for name, hcst in hcsts.items(): if "time" in hcst: hcst_time = "time" elif "valid_time" in hcst: hcst_time = "valid_time" else: raise ValueError("I can't work out the time variable in hcsts") color = getattr(cm, next(colormapcycler))(np.linspace(0, 0.9, len(hcst.init))) for a, var in enumerate(hcst.data_vars): for idx, (i, c) in enumerate(zip(hcst[var].init, color)): if idx == 0: label = name else: label = "_nolabel_" h = hcst[var].sel(init=i) if "member" in h.dims: h_mean = h.mean("member", keep_attrs=True) for m in h.member: axs[a].plot( h[hcst_time], h.sel(member=m), color=[0.8, 0.8, 0.8], linestyle="-", label="_nolabel_", zorder=-1, ) else: h_mean = h axs[a].plot( h_mean[hcst_time][0], h_mean[0], color=c, marker="o", label="_nolabel_", ) axs[a].plot( h_mean[hcst_time], h_mean, color=c, linestyle="-", label=label ) xlim = (hcst[hcst_time].min().item(), hcst[hcst_time].max().item()) # Plot the observations if obsvs is not None: lines = ["-", "--", "-.", ":"] linecycler = cycle(lines) for name, obsv in obsvs.items(): line = next(linecycler) for a, var in enumerate(hcst.data_vars): axs[a].plot( obsv.time, obsv[var], color="black", label=name, linestyle=line ) # Plot the historical runs if hists is not None: for name, hist in hists.items(): for a, var in enumerate(hist.data_vars): h_mean = ( hist[var].mean("member", keep_attrs=True) if "member" in hist[var].dims else hist[var] ) axs[a].plot(h_mean.time, h_mean, label=name) # Format plots ticks = xr.cftime_range(start=xlim[0], end=xlim[-1], freq="5AS", calendar="julian") years = xr.cftime_range(start=xlim[0], end=xlim[-1], freq="AS", calendar="julian") xlim = (years.shift(-1, "AS")[0], years.shift(2, "AS")[-1]) for a, var in enumerate(hcst.data_vars): axs[a].set_xticks(ticks.values) axs[a].set_xticklabels(ticks.year, rotation=40) axs[a].set_xlim(xlim) axs[a].set_ylabel(hcst[var].attrs["long_name"]) axs[a].grid() if a == 0: axs[a].legend() if a == (n_vars - 1): axs[a].set_xlabel("year") else: axs[a].set_xlabel("") if shade: _shading(axs[a]) plt.tight_layout() if ax is None: fig.patch.set_facecolor("w") return fig else: return ax
da483ad5f39ca200eb6191cf10e77e3dc2b589c2
26,843
def Main(block_index): """ :param block_index: :return: """ header = GetHeader(block_index) print("got block!") ts = header.Timestamp print("got timestamp") block = GetBlock(block_index) txlist = block.Transactions # @TODO this does not work # for tx in block.Transactions for tx in txlist: type = tx.Type hash = tx.Hash Notify(type) is_invoke = False if type == INVOKE_TX_TYPE: is_invoke = True if hash == b'\xa0ljY\xd8n\x1b\xb5\xdb\xa0\xf5d\xd8\xb3\xd8\xec\xf2\xfb\xe3E\xe3|3\xba\x83\xf2$jW\xa24': print("correct hash!") else: print("hash does not match") return ts
be63ad2741c2d9d4de56739916924a927cf238bd
26,844
def _create_integration_test_executable( orig_target_name, target_suffix, executable): """Create _integration_test_executable rule and return its names. Args: orig_target_name: The name given to the sut_component or the integration_test. target_suffix: A suffix to append to the orig_target_name to make a unique target name for the _integration_test_executable. executable: Can be either a string, which is interpretted as a program, or a dictionary that has a mandatory "program" field (whose value is a string), and some optional fields. If a string is provided or if the optional field 'timeout_seconds' is not provided, it will default to DEFAULT_EXECUTABLE_TIMEOUT_SECONDS. Returns: The target name of the _integration_test_executable rule. """ # Create a target name for the _integration_test_executable rule. target_name = "_%s_%s" % (orig_target_name, target_suffix) # isinstance is not supported in skylark # pylint: disable=unidiomatic-typecheck if type(executable) == "string": _integration_test_executable( name = target_name, program = executable, ) return target_name # Validate that executable is a valid dictionary. # isinstance is not supported in skylark # pylint: disable=unidiomatic-typecheck if type(executable) != "dict": fail("Error in target %s: %s is neither a string nor a dictionary." % (orig_target_name, target_suffix)) for key in executable: if key not in ["program", "args", "input_files", "data", "deps", "output_properties", "output_files", "timeout_seconds"]: fail("Error in target %s: %s has an invalid key %s." % (orig_target_name, target_suffix, key)) _integration_test_executable( name = target_name, timeout_seconds = executable.get("timeout_seconds"), program = executable.get("program"), args = executable.get("args"), input_files = executable.get("input_files"), data = executable.get("data"), deps = executable.get("deps"), output_properties = executable.get("output_properties"), output_files = executable.get("output_files"), ) return target_name
761a0c586784ce37c3cd22feafb755d42908bca1
26,845
import torch def pgd(model, inp, label, epsilon=0.3, step_size=0.01, num_steps=40, random_start=True, pixel_range=(-0.5, 0.5)): """Short summary. Parameters ---------- model : nn.Module Model to attack inp : tensor Input to perturb adversarially. label : tensor Target label to minimize score of. epsilon : float Magnitude of perturbation (the default is 0.3). step_size : float Size of PGD step (the default is 0.01). num_steps : float Number of PGD steps for one attack. Note that the model is called this many times for each attack. (the default is 40). random_start : float Whether or not to add a uniform random (-epsilon to epsilon) perturbation before performing PGD. (the default is True). pixel_range : float Range to clip the output. (the default is (-0.5, 0.5)). Returns ------- tensor Adversarially perturbed input. """ adv_inp = inp.clone().detach() if epsilon == 0: return adv_inp if random_start: adv_inp += torch.rand(*adv_inp.shape, device=inp.device)*2*epsilon - epsilon for i in range(num_steps): inp_var = adv_inp.clone().requires_grad_(True) output = model(inp_var) loss = nn.CrossEntropyLoss()(output, label) loss.backward() adv_inp += inp_var.grad.sign()*step_size adv_inp = torch.max(torch.min(adv_inp, inp+epsilon), inp-epsilon) adv_inp = torch.clamp(adv_inp, *pixel_range) return adv_inp.clone().detach()
3c59fdf7f4f9d6941a1622c9dc9a9a378c5120c9
26,846
def enumerate_trials(perievents): """ adds an index to perievents_2D that counts the number of trials per session and event starting with 1, removes FrameCounter index :param perievents: perievents df, non-column based format :return: perievents df with additional index Trial """ # unstack indices to make several counts for each event and session perievents = perievents.reset_index('FrameCounter', drop=True) idx = list(perievents.index.names) perievents['Trial'] = perievents.groupby(idx).cumcount() + 1 return perievents.set_index('Trial', append=True)
d469a823b6af60d305dc37b1ce42176f16b21f8d
26,847
import torch def add_dims_right(tensor, ndims, right_indent=0): """ Add empty dimensions to the right of tensor shape """ assert right_indent >= 0 for i in range(ndims): tensor = torch.unsqueeze(tensor, -1-right_indent) return tensor
7d4c1b47eb659f0bcfc9dbcf7f7b04c1ccbafb80
26,848
def ciClim( imData, sigma = 2.5 ): """ Confidence interval color limits, for images. Most useful for highly spikey data. """ meanData = np.mean( imData ) stdData = np.std( imData ) return np.array( [meanData - sigma*stdData, meanData + sigma*stdData] )
64debb8fea07c60da0d9af825482959312ef09d5
26,849
def get_submodel_name(history = 60, lag = 365, num_neighbors = 20, margin_in_days = None, metric = "cos"): """Returns submodel name for a given setting of model parameters """ submodel_name = '{}-autoknn-hist{}-nbrs{}-margin{}-lag{}'.format(metric, history, num_neighbors, margin_in_days, lag) return submodel_name
69fa276a86c39f342ffceba72408ab6970dd0a41
26,850
def create_db_scoped_session(connection_string=None): """Create scoped session.""" # we use NullPool, so that SQLAlchemy doesn't pool local connections # and only really uses connections while writing results return scoped_session( sessionmaker(bind=create_engine( connection_string or configuration.POSTGRES_CONNECTION, poolclass=NullPool)))
da0583601147dbebb97c25d945f05fdb19859709
26,851
import torch def sens_reduce_precise(x: torch.Tensor, sens_maps: torch.Tensor) -> torch.Tensor: """ Combine num_coils individual coil images into a single image using estimates of the sensitivity maps. Then replace the magnitude with the RSS estimate to reduce inaccuracies caused by the sensitivity map estimates. Args: x: Tensor of shape (num_coils, H, W, 2) that contains the individual coil images. sens_maps: Sensitivity maps of shape (num_coils, H, W, 2). Returns: The combined image of shape (H, W, 2). """ x_reduced = sens_reduce(x, sens_maps) x_phi = to_polar(x_reduced)[...,1] x_rss = fastmri.rss_complex(x) x_polar = torch.stack((x_rss, x_phi), dim=-1) return to_cartesian(x_polar)
5d40c20dc9dd276a734378602f8ce7a42f402087
26,852
from typing import Callable def dict_to_object(item, callback: Callable = None): """This function convert a Python Dict into a Python object: >>> data = {"name": "John Smith", "hometown": {"name": "New York", "id": 123}}type >>> c = json_to_object(data) type>> c <class 'automatic'> >>> c.name "John Smith" >>> c.hometown.name "New York" typevars(c) mappingproxy({'name': 'Jotypemith', 'hometown': <class 'automatic'>, '__dict__':typetribute '__dict__' of 'automatic' objects>, '__weakref__': <attribute '__weakref__' of 'automatic' objects>, '__doc__': None}) """ def convert(item): if isinstance(item, dict): return type('automatic', (), { k: convert(v) for k, v in item.items() }) if isinstance(item, list): def yield_convert(item): for index, value in enumerate(item): yield convert(value) return list(yield_convert(item)) else: return item return convert(item)
6a44ccdf53e54609cac2954f60e080a148daa1ed
26,853
def show_vm(vm_id): """ Executes the onevm show command and returns the xml output. :param int vm_id: vm id number :return: XML output from onevm show command """ return _get_subp_out(["onevm", "show", "-x", str(vm_id)])
962b98bb061b5e647c2f11e2447b7b838994d3d4
26,854
def isragged(arr): """Test if an array is ragged (i.e. unequal-length records). Parameters ---------- arr : array Returns ------- ragged : bool Returns True of not all entries in arr are of same length (i.e. arr is indeed ragged). False otherwise. """ ragged = not (np.unique([len(r) for r in arr]).size == 1) return ragged
1fdc0c23be00a8b7535d7a85df05e5d1b323c298
26,855
def compress_public_key(public_key): """Compresses a given uncompressed public key. :param public_key: the key to compress, as bytes :return: the compressed key, as bytes """ if public_key[0] != 0x04 or len(public_key) != 65: raise ValueError('invalid uncompressed public key') # We take the y coordinate y = int.from_bytes(public_key, 'big') # And check its parity, to add the appropriate byte if y % 2: return b'\x03' + public_key[1:33] else: return b'\x02' + public_key[1:33]
8ff7c609a216e29b9cc31584e3cd824f7ab2879d
26,856
import re def sphinx_doc_parser(doc: str) -> DocParserRetType: """A doc parser handling Sphinx-style and epydoc-style docstrings Args: doc: The function doc string Returns: The parsed doc string and parsed parameter list """ main_doc, param_docs = plain_doc_parser(doc) parts = re.split(r'(^[:@].*:)', main_doc, 1, re.M) if len(parts) <= 1: return main_doc, param_docs main_doc = parts[0] for group in indented_groups(''.join(parts[1:])): match = re.match(r'[:@]param\s+([^:]+):\s*(.*)$', ' '.join(group)) if not match: continue name = match.group(1).strip('`').lstrip('*') param_docs[name] = ParamInfo(match.group(2).strip()) return main_doc, param_docs
1179149b7215838a7cfaa720b1ba6262170031c1
26,857
def bilinear_interpolate(X, x, y): """ Estimates of the pixel values at the coordinates (x, y) in `X` via bilinear interpolation. Notes ----- Assumes the current entries in X reflect equally-spaced samples from a 2D integer grid. Modified from https://bit.ly/2NMb1Dr Parameters ---------- X : :py:class:`ndarray <numpy.ndarray>` of shape `(in_rows, in_cols, in_channels)` An input image sampled along a grid of `in_rows` by `in_cols`. x : list of length `k` A list of x-coordinates for the samples we wish to generate y : list of length `k` A list of y-coordinates for the samples we wish to generate Returns ------- samples : list of length `(k, in_channels)` The samples for each (x,y) coordinate computed via bilinear interpolation """ x0 = np.floor(x).astype(int) y0 = np.floor(y).astype(int) x1 = x0 + 1 y1 = y0 + 1 x0 = np.clip(x0, 0, X.shape[1] - 1) y0 = np.clip(y0, 0, X.shape[0] - 1) x1 = np.clip(x1, 0, X.shape[1] - 1) y1 = np.clip(y1, 0, X.shape[0] - 1) Ia = X[y0, x0, :].T Ib = X[y1, x0, :].T Ic = X[y0, x1, :].T Id = X[y1, x1, :].T wa = (x1 - x) * (y1 - y) wb = (x1 - x) * (y - y0) wc = (x - x0) * (y1 - y) wd = (x - x0) * (y - y0) return (Ia * wa).T + (Ib * wb).T + (Ic * wc).T + (Id * wd).T
76692e28fe68e6af5266de8672ee0b32754297c3
26,858
import time def do_prediction(intbl, pbms, gene_names, filteropt="p-value", filterval=0.0001, spec_ecutoff=0.4, nonspec_ecutoff=0.35): """ intbl: preprocessed table filteropt: p-value or z-score filterval: # TFs for opt z-score and p-val cutoff for p-value """ # intbl: #rowidx,seq,val,diff,t,pbmname,escore_seq start_time = time.time() # move the comment here for testing predfiles = [config.PREDDIR + "/" + pbm for pbm in pbms] # os.listdir(preddir) preds = utils.chunkify(predfiles,config.PCOUNT) # chunks the predfiles for each process # need to use manager here shared_ready_sum = mp.Manager().Value('i', 0) pool = mp.Pool(processes=config.PCOUNT) if filteropt == "p-value": filterval = float(filterval) else: #z-score filterval = int(filterval) async_pools = [pool.apply_async(predict, (preds[i], intbl, shared_ready_sum, filteropt, filterval, spec_ecutoff, nonspec_ecutoff)) for i in range(0,len(preds))] total = len(predfiles) while not all([p.ready() for p in async_pools]): time.sleep(2) res = [p.get() for p in async_pools] pool.terminate() colnames,datavalues = postprocess(res,gene_names,filteropt,filterval) return colnames,datavalues
70bcd1ed8ffdaf1b97d64ab2afdb1ad48f4a2047
26,859
def fetch_res(n, k, dim, ampl_noise, type_mat, scaled, norm_laplacian, n_avrg, save_res_dir, embedding_method): """ Get the results from a given experiment when they were saved to a file, in order to use them for plotting in plot_from_res, or not to redo the same computation twice in run_synthetic_exps. """ fn = "n_{}-k_{}-dim_{}-ampl_{}-type_mat_{}-embedding_{}" \ "-scaled_{}-norm_laplacian_{}-n_avrg_{}.res".format( n, k, dim, ampl_noise, type_mat, embedding_method, scaled, norm_laplacian, n_avrg) fn = save_res_dir + "/" + fn with open(fn, 'r') as f: first_line = f.readlines()[0] res_mean, res_std = [float(el) for el in first_line.split()] return(res_mean, res_std)
ce8cac77d36b5c6bd2920426e054899681318508
26,860
def load_gramar_from_SYGUS_spec(spec): """Creates Grammar from the given SYGUS grammar specification (http://sygus.seas.upenn.edu/files/SyGuS-IF.pdf, last access 3.08.2016). :param spec: SYGUS grammar specification. :return: New grammar object. """ grammar = Grammar() spec = spec.replace('(', '( ') spec = spec.replace(')', ' )') words = spec.split() brackets_open = 0 grammar_rule = None rule_symb = None i = 0 def get_production_complex_body(start): body = [] opened = 0 k = start while k < len(words): if words[k] == '(': opened += 1 elif words[k] == ')': opened -= 1 body.append(words[k]) if opened == 0: return body, k k += 1 return body, k while i < len(words): w = words[i] # print('processing: ' + ' ('+str(brackets_open)+') ' + w ) if w == '(' and brackets_open < 3: brackets_open += 1 i += 1 continue elif w == ')': brackets_open -= 1 i += 1 continue if brackets_open == 2: # On the level of 2 opened brackets there are always production names (symb) and their sorts. rule_symb = words[i] sort = words[i + 1] if sort in {"(", ")"}: raise Exception("Malformed grammar!") grammar_rule = GrammarRule(rule_symb, sort, grammar) i += 2 continue elif brackets_open == 3: # On the level of 3 opened brackets there are defined concrete right hands for current product. j = i while j < len(words): if words[j] == '(': body, j = get_production_complex_body(j) grammar_rule.add(Production(rule_symb, body, grammar_rule)) elif words[j] == ')': i = j brackets_open -= 1 break else: # Simple symbol not requiring surrounding it with brackets. body = [ words[j] ] grammar_rule.add(Production(rule_symb, body, grammar_rule)) j += 1 i += 1 if i < len(words) and words[i] == ")": grammar.add_rule(grammar_rule) continue else: raise Exception("Malformed grammar!") i += 1 if brackets_open != 0: raise Exception("Malformed grammar!") # print('Final grammar:\n' + str(grammar)) return grammar
b909fdb2df0fd005a6ddc1c3069e1ba7659619c2
26,861
def im_entropy(im): """ Calculate entropy of one single image :param im: a greyscale image in numpy format :return: entropy of the image """ h, w = im.shape hist = np.histogram(im.reshape(-1), bins=256)[0] probs = hist / (h * w) probs = probs[probs > 0] ent = np.sum(-probs * np.log(probs)) return ent
aeaddd3c087e2f621ce0b35c108d6d1de630d477
26,862
def forward(layers, x): """ function for performing forward propagation in all the layers Parameters: layers : list x : numpy array Returns: list : (contains output of all layers in the form of numpy arrays) """ conv = layers[0] pool = layers[1] dense = layers[2] conv_output = conv.forward((x/255)- 0.5) pool_output = pool.forward(conv_output) dense_output = dense.forward(pool_output) return [conv_output, pool_output, dense_output]
2e7ae3c8ab513ca5f138c77a868783dc942063ee
26,863
def gen_word_vec_matrix(doc, nrows=100, stop_min=4, return_df=False): """ """ # filter stop words and non-alpha tokens vecs = [token.vector for token in doc if token.is_alpha and ((not token.is_stop) or (token.is_stop and len(token)>stop_min))] vec_df = pd.DataFrame(vecs) # pad or downsample to desired numbers of rows nvecs = vec_df.shape[0] if nvecs < nrows: vec_df = pd.concat([vec_df, vec_df.sample(nrows-nvecs)]) elif nvecs > nrows: vec_df = vec_df.sample(nrows) # return output in desired format if return_df: return vec_df else: return vec_df.values
98ddf6efc3ddadda8d72151a2d63860355aa6769
26,864
def __same_axes(x_axis, y_axis, xlim, ylim): """Check if two axes are the same, used to determine squared plots""" axes_same_and_not_none = (x_axis == y_axis) and (x_axis is not None) axes_same_lim = xlim == ylim return axes_same_and_not_none and axes_same_lim
ce7538ffa17e15df0fc055809103bf69af88e7aa
26,865
import csv def create_column_dicts(path_to_input): """Creates dictionaries: {column_index, column_name} and {column_name, column_index}""" cols = {} with open(path_to_input, newline="") as csvfile: inputreader = csv.reader(csvfile, delimiter=",") row = next(inputreader) for i, col in enumerate(row): cols[i] = col # reverse dictionary: {column_name, column_index} cols_inds = {v: k for k, v in cols.items()} return cols, cols_inds
13bf2e3c99fea9b1d2580b7bfc34e445af8b7e98
26,866
from typing import Type from typing import Any def autocommit(session_cls: Type[Any], do_commit_at_outermost: bool = True) -> Any: """ 自动提交装饰器 :param session_cls: Sqlalchemy Session Class, 且该类可以被inject获取到 :param do_commit_at_outermost: 上下文结束后是否提交 :return: """ def decorator(func: FunctionType): @wraps(func) def wrap(*args, **kwargs): session: session_cls = inject.instance(session_cls) with CommitContext(session, do_commit_at_outermost=do_commit_at_outermost): return func(*args, **kwargs) return wrap return decorator
f974ba9d28353ba7cf6f676d08b6b6bcbb751d0e
26,867
from operator import add def multiply(multiplicand, multiplier): """Multiplies the multiplicand by the multiplier This function multiplies the multiplicand by the multiplier only when they are either integers or floats Args: multiplicand(int/float): The quantity that is to be multiplied by another multiplier(int/float): The quantity by which a given number is to be multiplied >>> multiply(6, 3) 18 Returns: The product of the multiplicand multiplied by the multiplier Raises: TypeError if the multiplicand or multiplier is neither an integer or float """ if not isinstance(multiplicand, (int, float)): raise TypeError(f"{multiplicand} is not an integer or float") if not isinstance(multiplier, (int, float)): raise TypeError(f"{multiplier} is not an integer or float") if multiplicand is None: multiplicand = 0 if multiplier is None: multiplier = 0 product = 0 list_multiplier = [] for num in range(multiplier): list_multiplier.append(multiplicand) while list_multiplier: product = add(product, list_multiplier.pop()) return product
ad5da56004fd6ad6df327b892ea7cff2c09e9ac6
26,868
from typing import Optional from typing import Container def rolling_enveloped_dashboard( channel_df_dict: dict, desired_num_points: int = 250, num_rows: Optional[int] = None, num_cols: Optional[int] = 3, width_for_subplot_row: int = 400, height_for_subplot_row: int = 400, subplot_colors: Optional[Container] = None, min_points_to_plot: int = 1, plot_as_bars: bool = False, plot_full_single_channel: bool = False, opacity: float = 1, y_axis_bar_plot_padding: float = 0.06 ) -> go.Figure: """ A function to create a Plotly Figure with sub-plots for each of the available data sub-channels, designed to reduce the number of points/data being plotted without minimizing the insight available from the plots. It will plot either an envelope for rolling windows of the data (plotting the max and the min as line plots), or a bar based plot where the top of the bar (rectangle) is the highest value in the time window that bar spans, and the bottom of the bar is the lowest point in that time window (choosing between them is done with the `plot_as_bars` parameter). :param channel_df_dict: A dictionary mapping channel names to Pandas DataFrames of that channels data :param desired_num_points: The desired number of points to be plotted in each subplot. The number of points will be reduced from it's original sampling rate by applying metrics (e.g. min, max) over sliding windows and then using that information to represent/visualize the data contained within the original data. If less than the desired number of points are present, then a sliding window will NOT be used, and instead the points will be plotted as they were originally recorded (also the subplot will NOT be plotted as a bar based plot even if `plot_as_bars` was set to true). :param num_rows: The number of columns of subplots to be created inside the Plotly figure. If None is given, (then `num_cols` must not be None), then this number will automatically be determined by what's needed. If more rows are specified than are needed, the number of rows will be reduced to the minimum needed to contain all the subplots :param num_cols: The number of columns of subplots to be created inside the Plotly figure. See the description of the `num_rows` parameter for more details on this parameter, and how the two interact. This also follows the same approach to handling None when given :param width_for_subplot_row: The width of the area used for a single subplot (in pixels). :param height_for_subplot_row: The height of the area used for a single subplot (in pixels). :param subplot_colors: An 'array-like' object of strings containing colors to be cycled through for the subplots. If None is given (which is the default), then the `colorway` variable in Plotly's current theme/template will be used to color the data on each of the subplots uniquely, repeating from the start of the `colorway` if all colors have been used. :param min_points_to_plot: The minimum number of data points required to be present to create a subplot for a channel/subchannel (NOT including `NaN` values). :param plot_as_bars: A boolean value indicating if the plot should be visualized as a set of rectangles, where a shaded rectangle is used to represent the maximum and minimum values of the data during the time window covered by the rectangle. These maximum and minimum values are visualized by the locations of the top and bottom edges of the rectangle respectively, unless the height of the rectangle would be 0, in which case a line segment will be displayed in it's place. If this parameter is `False`, two lines will be plotted for each of the subplots in the figure being created, creating an 'envelope' around the data. An 'envelope' around the data consists of a line plotted for the maximum values contained in each of the time windows, and another line plotted for the minimum values. Together these lines create a boundary which contains all the data points recorded in the originally recorded data. :param plot_full_single_channel: If instead of a dashboard of subplots a single plot with multiple sub-channels should be created. If this is True, only one (key, value) pair can be given for the `channel_df_dict` parameter :param opacity: The opacity to use for plotting bars/lines :param y_axis_bar_plot_padding: Due to some unknown reason the bar subplots aren't having their y axis ranges automatically scaled so this is the ratio of the total y-axis data range to pad both the top and bottom of the y axis with. The default value is the one it appears Plotly uses as well. :return: The Plotly Figure containing the subplots of sensor data (the 'dashboard') """ if not (num_rows is None or isinstance(num_rows, (int, np.integer))): raise TypeError(f"`num_rows` is of type `{type(num_rows)}`, which is not allowed. " "`num_rows` can either be `None` or some type of integer.") elif not (num_cols is None or isinstance(num_cols, (int, np.integer))): raise TypeError(f"`num_cols` is of type `{type(num_cols)}`, which is not allowed. " "`num_cols` can either be `None` or some type of integer.") # I'm pretty sure the below is correct and it appears to work correctly, but it'd be nice if someone could # double check this logic # This removes any channels with less than `min_points_to_plot` data points per sub-channel, and removes any # sub-channels which have less than `min_points_to_plot` non-NaN data points channel_df_dict = { k: v.drop(columns=v.columns[v.notna().sum(axis=0) < min_points_to_plot]) for (k, v) in channel_df_dict.items() if v.shape[0] >= min_points_to_plot} subplot_titles = [' '.join((k, col)) for (k, v) in channel_df_dict.items() for col in v.columns] if num_rows is None and num_cols is None: raise TypeError("Both `num_rows` and `num_columns` were given as `None`! " "A maximum of one of these two parameters may be given as None.") elif num_rows is None: num_rows = 1 + (len(subplot_titles) - 1) // num_cols elif num_cols is None: num_cols = 1 + (len(subplot_titles) - 1) // num_rows elif len(subplot_titles) > num_rows * num_cols: raise ValueError("The values given for `num_rows` and `num_columns` result in a maximum " f"of {num_rows * num_cols} avaialable sub-plots, but {len(subplot_titles)} subplots need " "to be plotted! Try setting one of these variables to `None`, it will then " "automatically be set to the optimal number of rows/columns.") else: num_rows = 1 + (len(subplot_titles) - 1) // num_cols num_cols = int(np.ceil(len(subplot_titles)/num_rows)) if subplot_colors is None: colorway = pio.templates[pio.templates.default]['layout']['colorway'] else: colorway = subplot_colors if plot_full_single_channel: if len(channel_df_dict) != 1: raise ValueError("The 'channel_df_dict' parameter must be length 1 when " "'plot_full_single_channel' is set to true!") num_rows = 1 num_cols = 1 fig = go.Figure(layout_title_text=list(channel_df_dict.keys())[0]) else: fig = make_subplots( rows=num_rows, cols=num_cols, subplot_titles=subplot_titles, figure=go.Figure( layout_height=height_for_subplot_row * num_rows, layout_width=width_for_subplot_row * num_cols, ), ) # A counter to keep track of which subplot is currently being worked on subplot_num = 0 # A dictionary to be used to modify the Plotly Figure layout all at once after all the elements have been added layout_changes_to_make = {} for channel_data in channel_df_dict.values(): window = int(np.around((channel_data.shape[0]-1) / desired_num_points, decimals=0)) # If a window size of 1 is determined, it sets the stride to 1 so we don't get an error as a result of the # 0 length stride stride = 1 if window == 1 else window - 1 rolling_n = channel_data.rolling(window) min_max_tuple = (rolling_n.min()[::stride], rolling_n.max()[::stride]) min_max_equal = min_max_tuple[0] == min_max_tuple[1] is_nan_min_max_mask = np.logical_not( np.logical_and( pd.isnull(min_max_tuple[0]), pd.isnull(min_max_tuple[1]))) # If it's going to be plotted as bars, force it's time stamps to be uniformly spaced # so that the bars don't have any discontinuities in the X-axis if len(channel_data) >= desired_num_points and plot_as_bars: if isinstance(channel_data.index, pd.core.indexes.datetimes.DatetimeIndex): new_index = pd.date_range( channel_data.index.values[0], channel_data.index.values[-1], periods=len(channel_data), ) else: new_index = np.linspace( channel_data.index.values[0], channel_data.index.values[-1], num=len(channel_data), ) channel_data.set_index(new_index) # Loop through each of the sub-channels, and their respective '0-height rectangle mask' for subchannel_name, cur_min_max_equal in min_max_equal[channel_data.columns].iteritems(): traces = [] cur_color = colorway[subplot_num % len(colorway)] cur_subchannel_non_nan_mask = is_nan_min_max_mask[subchannel_name] # If there are less data points than the desired number of points # to be plotted, just plot the data as a line plot if len(channel_data) < desired_num_points: not_nan_mask = np.logical_not(pd.isnull(channel_data[subchannel_name])) traces.append( go.Scatter( x=channel_data.index[not_nan_mask], y=channel_data.loc[not_nan_mask, subchannel_name], name=subchannel_name, opacity=opacity, line_color=cur_color, showlegend=plot_full_single_channel, ) ) # If it's going to plot the data with bars elif plot_as_bars: # If there are any 0-height rectangles if np.any(cur_min_max_equal): equal_data_df = min_max_tuple[0].loc[cur_min_max_equal.values, subchannel_name] # Half of the sampling period half_dt = np.diff(cur_min_max_equal.index[[0, -1]])[0] / (2 * (len(cur_min_max_equal) - 1)) # Initialize the arrays we'll use for creating line segments where # rectangles would have 0 width so it will end up formatted as follows # (duplicate values for y since line segements are horizontal): # x = [x1, x2, None, x3, x4, None, ...] # y = [y12, y12, None, y34, y34, None, ...] x_patch_line_segs = np.repeat(equal_data_df.index.values, 3) y_patch_line_segs = np.repeat(equal_data_df.values, 3) # All X axis values are the same, but these values are supposed to represent pairs of start and end # times for line segments, so the time stamp is shifted half it's duration backwards for the start # time, and half it's duration forward for the end time x_patch_line_segs[::3] -= half_dt x_patch_line_segs[1::3] += half_dt # This is done every third value so that every two pairs of points is unconnected from eachother, # since the (None, None) point will not connect to either the point before it nor behind it x_patch_line_segs[2::3] = None y_patch_line_segs[2::3] = None traces.append( go.Scatter( x=x_patch_line_segs, y=y_patch_line_segs, name=subchannel_name, opacity=opacity, mode='lines', line_color=cur_color, showlegend=plot_full_single_channel, ) ) min_data_point = np.min(min_max_tuple[0][subchannel_name]) max_data_point = np.max(min_max_tuple[1][subchannel_name]) y_padding = (max_data_point - min_data_point) * y_axis_bar_plot_padding traces.append( go.Bar( x=min_max_tuple[0].index[cur_subchannel_non_nan_mask], y=(min_max_tuple[1].loc[cur_subchannel_non_nan_mask, subchannel_name] - min_max_tuple[0].loc[cur_subchannel_non_nan_mask, subchannel_name]), marker_color=cur_color, opacity=opacity, marker_line_width=0, base=min_max_tuple[0].loc[cur_subchannel_non_nan_mask, subchannel_name], showlegend=plot_full_single_channel, name=subchannel_name, ) ) # Adds a (key, value) pair to the dict for setting this subplot's Y-axis display range (applied later) min_y_range = min_data_point - y_padding max_y_range = max_data_point + y_padding y_axis_id = f'yaxis{1 + subplot_num}_range' if plot_full_single_channel: y_axis_id = 'yaxis_range' if layout_changes_to_make: min_y_range = min(min_y_range, layout_changes_to_make[y_axis_id][0]) max_y_range = max(max_y_range, layout_changes_to_make[y_axis_id][1]) layout_changes_to_make[y_axis_id] = [min_y_range, max_y_range] else: for cur_df in min_max_tuple: traces.append( go.Scatter( x=cur_df.index[cur_subchannel_non_nan_mask], y=cur_df.loc[cur_subchannel_non_nan_mask, subchannel_name], name=subchannel_name, opacity=opacity, line_color=cur_color, showlegend=plot_full_single_channel, ) ) # Add the traces created for the current subchannel of data to the plotly figure if plot_full_single_channel: fig.add_traces(traces) else: fig.add_traces( traces, rows=1 + subplot_num // num_cols, cols=1 + subplot_num % num_cols, ) subplot_num += 1 fig.update_layout( **layout_changes_to_make, bargap=0, barmode='overlay' ) return fig
bd26531b9f941e340c03ce519f977d55f860d345
26,869
def compare_clusterers(clusterer_1, clusterer_2, ground_truth, reduction, figsize=(15, 5), **kwargs): """Utility function for comparing two clustering approaches on a problem for which we have a ground truth. Args: - clusterer_1: a dictionary where key is the name of the clustering method and value are the predicted labels. - clusterer_2: a dictionary where key is the name of the clustering method and value are the predicted labels. - ground_truth: a numpy array, the ground truth labels. - reduction: a numpy array, a matrix holding the low dimensional projection of the original input data. - figsize: a tuple, specifying the size of the figure - **kwargs: keyword arguments passed to plt.scatter. Returns: - None """ fig, axs = plt.subplots(1, 3, figsize=figsize) score_1 = adjusted_rand_score( labels_true=ground_truth, labels_pred=next(iter(clusterer_1.values())) ) score_2 = adjusted_rand_score( labels_true=ground_truth, labels_pred=next(iter(clusterer_2.values())) ) axs[0].scatter( reduction[:, 0], reduction[:, 1], c=ground_truth, **kwargs ) axs[0].set_title('Ground Truth') axs[1].scatter( reduction[:, 0], reduction[:, 1], c=next(iter(clusterer_1.values())), **kwargs ) axs[1].set_title( f'Clust {list(clusterer_1.keys())[0]} - ARScore {round(score_1, 2)}' ) axs[2].scatter( reduction[:, 0], reduction[:, 1], c=next(iter(clusterer_2.values())), **kwargs ) axs[2].set_title( f'Clust {list(clusterer_2.keys())[0]} - ARScore {round(score_2, 2)}' ) plt.show() return None
2406894a354255e48d5e8777b54eed2e50391a25
26,870
def generate_and_write_ed25519_keypair(password, filepath=None): """Generates ed25519 key pair and writes custom JSON-formatted keys to disk. The private key is encrypted using AES-256 in CTR mode, with the passed password strengthened in PBKDF2-HMAC-SHA256. NOTE: The custom key format includes 'ed25519' as signing scheme. Arguments: password: An encryption password. filepath (optional): The path to write the private key to. If not passed, the key is written to CWD using the keyid as filename. The public key is written to the same path as the private key using the suffix '.pub'. Raises: UnsupportedLibraryError: pyca/pynacl or pyca/cryptography is not available. FormatError: Arguments are malformed. ValueError: An empty string is passed as 'password'. StorageError: Key files cannot be written. Side Effects: Writes key files to disk. Returns: The private key filepath. """ formats.PASSWORD_SCHEMA.check_match(password) return _generate_and_write_ed25519_keypair( filepath=filepath, password=password, prompt=False)
3c9249d6e72ce442562b9dcb1af8cea59111d369
26,871
def scale_bicubic(data, scale=(1., 1., 1.)): """ returns a interpolated, scaled version of data the output shape is scaled too. Parameters ---------- data: ndarray 3d input array scale: float, tuple scaling factor along each axis (x,y,z) interpolation: str either "nearest" or "linear" Returns ------- scaled output """ if not (isinstance(data, np.ndarray) and data.ndim == 3): raise ValueError("input data has to be a 3d array!") options_types = {np.uint8: ["-D", "TYPENAME=uchar", "-D", "READ_IMAGE=read_imageui"], np.uint16: ["-D", "TYPENAME=short", "-D", "READ_IMAGE=read_imageui"], np.float32: ["-D", "TYPENAME=float", "-D", "READ_IMAGE=read_imagef"], } dtype = data.dtype.type if not dtype in options_types: raise ValueError("type %s not supported! Available: %s" % (dtype, str(list(options_types.keys())))) if not isinstance(scale, (tuple, list, np.ndarray)): scale = (scale,) * 3 if len(scale) != 3: raise ValueError("scale = %s misformed" % scale) d_im = OCLImage.from_array(data) nshape = _scale_shape(data.shape, scale) res_g = OCLArray.empty(nshape, dtype) prog = OCLProgram(abspath("kernels/scale.cl"), build_options=options_types[dtype]) prog.run_kernel("scale_bicubic", res_g.shape[::-1], None, d_im, res_g.data) return res_g.get()
95b400b5cd05fb8e476bf5f5936641bca159015f
26,872
def test_calc_functions_multiple_arguments(): """Tests parse/eval handling functions with multiple arguments correctly""" def h1(x): return x def h2(x, y): return x * y def h3(x, y, z): return x * y * z assert evaluator("h(2)", {}, {"h": h1}, {})[0] == 2.0 assert evaluator("h(2, 3)", {}, {"h": h2}, {})[0] == 6.0 assert evaluator("h(2, 3, 4)", {}, {"h": h3}, {})[0] == 24.0 assert equal_as_arrays(evaluator("h(2, [1, 2, 3])", {}, {"h": h2})[0], MathArray([2, 4, 6])) with raises(ArgumentError): evaluator("h(2, 1)", {}, {"h": h1}, {}) with raises(UnableToParse): evaluator("h()", {}, {"h": h1}, {}) with raises(ArgumentError): evaluator("h(1)", {}, {"h": h2}, {}) with raises(ArgumentError): evaluator("h(1,2,3)", {}, {"h": h2}, {}) with raises(UnableToParse): evaluator("h()", {}, {"h": h3}, {}) with raises(ArgumentError): evaluator("h(1)", {}, {"h": h3}, {}) with raises(ArgumentError): evaluator("h(1,2)", {}, {"h": h3}, {})
cce31c7386e954389a4d59402d881f602f16373a
26,873
import string def get_sentiment(text, word_map): """ Identifies the overall sentiment of the text by taking the average of each word. Note: Words not found in the word_map dict are give zero value. """ # remove all punctuation text = text.translate(str.maketrans("", "", string.punctuation)) # split into tokens text = text.split() total_score, length = 0, 0 # get score for each word, put zero if not found scores = (word_map.get(token.lower(), 0) for token in text) # find average score for score in scores: total_score += score length += 1 return total_score / length
ee9e57c999539c0126e5c0d38711a617e82dab10
26,874
def point_line_seg_distance(p1: tuple, p2: tuple, p: tuple, extend_line: bool = False): """ Find shortest distance between a line segment (p1, p2) and a given point p. """ assert isinstance(p1, tuple) and len(p1) == 2 assert isinstance(p2, tuple) and len(p2) == 2 assert isinstance(p, tuple) and len(p) == 2 # Code adapted from https://stackoverflow.com/questions/849211/shortest-distance-between-a-point-and-a-line-segment x1, y1 = p1 x2, y2 = p2 x3, y3 = p x1, x2, x3, y1, y2, y3 = float(x1), float(x2), float(x3), float(y1), float(y2), float(y3) px = x2 - x1 py = y2 - y1 norm = px * px + py * py if norm == 0: dx = x1 - x3 dy = y1 - y3 dist = (dx * dx + dy * dy) ** .5 return dist, (x1, y1) u = ((x3 - x1) * px + (y3 - y1) * py) / float(norm) if not extend_line: if u > 1: u = 1 elif u < 0: u = 0 x = x1 + u * px y = y1 + u * py dx = x - x3 dy = y - y3 dist = (dx * dx + dy * dy) ** .5 return dist, (x, y)
47c669bb5fc3db9d97cf2ac03179b2fc700ad0a5
26,875
def Tanh_Derivative(x): """ 这是Tanh激活函数的导数计算公式 :param x: 需要进行计算的数据 :return: Tanh激活函数的导数的函数值 """ f = Tanh(x) derivative = 1.0-f*f return derivative
9142765acc9299ef3b35a16538956dfa3d61efc8
26,876
async def api_update_donation( data: CreateDonation, donation_id=None, g: WalletTypeInfo = Depends(get_key_type) ): """Update a donation with the data given in the request""" if donation_id: donation = await get_donation(donation_id) if not donation: raise HTTPException( status_code=HTTPStatus.NOT_FOUND, detail="Donation does not exist." ) if donation.wallet != g.wallet.id: raise HTTPException( status_code=HTTPStatus.FORBIDDEN, detail="Not your donation." ) donation = await update_donation(donation_id, **data.dict()) else: raise HTTPException( status_code=HTTPStatus.BAD_REQUEST, detail="No donation ID specified" ) return donation.dict()
d60f59d50e2515a8e5402c04b13c51a53855fa80
26,877
def random_monoalpha_cipher(pool=None): """Generate a Monoalphabetic Cipher""" if pool is None: pool = letters + digits original_pool = list(pool) shuffled_pool = list(pool) shuffle(shuffled_pool) return dict(zip(original_pool, shuffled_pool))
a12976552fc57183e939bd04f2c61cb175d3adec
26,878
def mrf_slice_example(slice): """convert mrf slice data to tensorflow Args: slice: Dict. Contains keys: immrf/data, immrf/path, tmap/data, tmap/path, mask/data, mask/path Returns: TODO """ immrf = slice["immrf/data"] tmap = slice["tmap/data"] mask = slice["mask/data"] slice_path = slice["immrf/path"] tmap_path = slice["tmap/path"] mask_path = slice["mask/path"] feature = { "immrf/data": record_feature.bytes_feature(immrf.tostring()), "immrf/shape": record_feature.int64_list_feature(immrf.shape), "immrf/path": record_feature.bytes_feature(tf.compat.as_bytes(slice_path)), "tmap/data": record_feature.bytes_feature(tmap.tostring()), "tmap/shape": record_feature.int64_list_feature(tmap.shape), "tmap/path": record_feature.bytes_feature(tf.compat.as_bytes(tmap_path)), "mask/data": record_feature.bytes_feature(mask.tostring()), "mask/shape": record_feature.int64_list_feature(mask.shape), "mask/path": record_feature.bytes_feature(tf.compat.as_bytes(mask_path)), } return tf.train.Example(features=tf.train.Features(feature=feature))
9c8995cd8a68793e505e525da144dabccaba5ff7
26,879
from typing import Union def get_comment(entity: Union[Table, TableProtocol, Column, Constraint]) -> str: """Get comment on entity""" if isinstance(entity, TableProtocol): return to_table(entity).comment or "" elif isinstance(entity, Table): return entity.comment or "" elif isinstance(entity, Column): return entity.comment or "" elif isinstance(entity, Constraint): if hasattr(entity, "info"): return getattr(entity, "info").get("comment") or "" return "" raise ValueError("invalid entity passed to get_comment")
4517d43980f596d1a31773a4322805331d871890
26,880
from typing import List def merge_neighbor_spans(spans: List[Span], distance) -> List[Span]: """Merge neighboring spans in a list of un-overlapped spans: when the gaps between neighboring spans is not larger than the specified distance, they are considered as the neighbors. Args: spans (List[Span]): The input list of spans. distance (int, optional): The upper bound of interval gaps between two neighboring spans. Defaults to 1. Returns: List[Span]: A list of merged spans """ is_neighboring_spans = ( lambda span1, span2: min( abs(span1.start - span2.end), abs(span1.end - span2.start) ) <= distance ) # It assumes non-overlapped intervals within the list merge_neighboring_spans = lambda span1, span2: Span( min(span1.start, span2.start), max(span1.end, span2.end) ) spans = sorted(spans, key=lambda ele: ele.start) # When sorted, only one iteration round is needed. if len(spans) == 0: return [] if len(spans) == 1: return spans cur_merged_spans = [spans[0]] for cur_span in spans[1:]: prev_span = cur_merged_spans.pop() if is_neighboring_spans(cur_span, prev_span): cur_merged_spans.append(merge_neighboring_spans(prev_span, cur_span)) else: # In this case, the prev_span should be moved to the bottom of the stack cur_merged_spans.extend([prev_span, cur_span]) return cur_merged_spans
a059228aefefe52c7417d414ed517cb36ee8b753
26,881
def handle_all_serials(oid, *args): """Return dict of oid to serialno from store() and tpc_vote(). Raises an exception if one of the calls raised an exception. The storage interface got complicated when ZEO was introduced. Any individual store() call can return None or a sequence of 2-tuples where the 2-tuple is either oid, serialno or an exception to be raised by the client. The original interface just returned the serialno for the object. The updated multi-commit API returns nothing from store(), and returns a sequence of resolved oids from tpc_vote. NOTE: This function is removed entirely in ZODB 5. """ d = {} for arg in args: if isinstance(arg, bytes): d[oid] = arg elif arg: for t in arg: if isinstance(t, bytes): # New protocol. The caller will use the tid # returned from tpc_finish if we return a dict # missing the oid. pass else: oid, serial = t if not isinstance(serial, bytes): raise serial # error from ZEO server d[oid] = serial return d
f2ff56d43f40f4bad5a802acbbe7a8fa869831d3
26,882
def remove_null_fields(data): """Remove all keys with 'None' values""" for k, v in data.items(): if isinstance(v, dict): remove_null_fields(v) if isinstance(v, list): for element in v: remove_null_fields(element) if not data[k]: del data[k] return data
3dc90d215d899afb2316acb92b5755fd93da204f
26,883
from scipy.ndimage.filters import gaussian_filter1d from math import log import numpy def epsautoconfeval(epsilon, plotTitle): """ investigate distance properties for clustering autoconfiguration plots of k-nearest-neighbor distance histogram and "knee" See SegmentedMessages#autoconfigureDBSCAN :param plotTitle: Part of plot's filename and header :param epsilon The manually determined "best" epsilon for comparison :return: """ # # distribution of all distances in matrix # hstplt = SingleMessagePlotter(specimens, tokenizer+'-distance-distribution-histo', args.interactive) # hstplt.histogram(tril(sm.distances), bins=[x / 50 for x in range(50)]) # plt.axvline(epsilon, label="manually determined eps={:0.2f}".format(epsilon), c="red") # hstplt.text('max {:.3f}, mean {:.3f}'.format(sm.distances.max(), sm.distances.mean())) # hstplt.writeOrShowFigure() # del hstplt neighbors = tyl.sm.neighbors() # list of tuples: (index from sm.distances, distance) sorted by distance mmp = MultiMessagePlotter(specimens, "knn-distance-funtion_" + plotTitle, 1, 2, isInteractive=False) mmp.axes[0].axhline(epsilon, label="manually determined eps={:0.2f}".format(epsilon), c="red") mmp.axes[1].axhline(epsilon, label="manually determined eps={:0.2f}".format(epsilon), c="red") krange = (0, 16, 1) for k in range(*krange): knearest = sorted([nfori[k][1] for nfori in neighbors]) mmp.plotToSubfig(1, knearest, alpha=.4, label="k={}".format(k)) # # kneedle approach: yields unusable results. does not find a knee! # smoothing approach sigma = log(len(neighbors)) knearest = dict() smoothknearest = dict() seconddiff = dict() # type: Dict[int, numpy.ndarray] seconddiffMax = (0, 0, 0) # ksteepeststats = list() # can we omit k = 0 ? # --> No - recall and even more so precision deteriorates for dns and dhcp (1000s) for k in range(0, len(neighbors) // 10): # round(2*log(len(neighbors))) knearest[k] = sorted([nfori[k][1] for nfori in neighbors]) smoothknearest[k] = gaussian_filter1d(knearest[k], sigma) # max of second difference (maximum upwards curvature) as knee seconddiff[k] = numpy.diff(smoothknearest[k], 2) seconddiffargmax = seconddiff[k].argmax() # noinspection PyArgumentList diffrelmax = seconddiff[k].max() / smoothknearest[k][seconddiffargmax] if 2*sigma < seconddiffargmax < len(neighbors) - 2*sigma and diffrelmax > seconddiffMax[2]: seconddiffMax = (k, seconddiffargmax, diffrelmax) # ksteepeststats.append((k, seconddiff[k].max(), diffrelmax)) # print(tabulate(ksteepeststats, headers=("k", "max(f'')", "max(f'')/f"))) # prepare to plot the smoothed nearest neighbor distribution and its second derivative k = seconddiffMax[0] x = seconddiffMax[1] + 1 # # calc mean of first derivative to estimate the noisiness (closer to 1 is worse) # firstdiff = numpy.diff(smoothknearest[k], 1) # # alt: integral # diag = numpy.empty_like(smoothknearest[k]) # for i in range(diag.shape[0]): # diag[i] = smoothknearest[k][0] + i*(smoothknearest[k][-1] - smoothknearest[k][0])/smoothknearest[k][-1] # belowdiag = diag - smoothknearest[k] # print("f' median={:.2f}".format(numpy.median(firstdiff))) # print("diag-f={:.2f}".format(sum(belowdiag))) mmp.plotToSubfig(0, smoothknearest[k], label="smooth k={}, sigma={:.2f}".format(k, sigma), alpha=.4) mmp.plotToSubfig(1, smoothknearest[k], label="smooth k={}, sigma={:.2f}".format(k, sigma), alpha=1, color='blue') mmp.plotToSubfig(0, knearest[k], alpha=.4) ax0twin = mmp.axes[0].twinx() # mmp.plotToSubfig(ax0twin, seconddiff[k], linestyle='dotted', color='cyan', alpha=.4) # noinspection PyTypeChecker mmp.plotToSubfig(ax0twin, [None] + seconddiff[k].tolist(), linestyle='dotted', color='magenta', alpha=.4) # epsilon = knearest[k][x] epsilon = smoothknearest[k][x] mmp.axes[0].axhline(epsilon, linestyle='dashed', color='blue', alpha=.4, label="curvature max {:.2f} of k={}".format( epsilon, k)) mmp.axes[0].axvline(x, linestyle='dashed', color='blue', alpha=.4) mmp.writeOrShowFigure(filechecker.reportFullPath) del mmp # if args.interactive: # from tabulate import tabulate # IPython.embed() # exit(0) return epsilon
4c23d8c50dd182c0d045b047e44c2d2484482eec
26,884
def equal_value_solution(par_dict1=params ,par_dict2=params1 , mean=-0.4, variance=0.35,N=1000): """ This function sets average tax revenue equal under different sets of parameters by changing the base tax rate. This is done by minizing the squared difference between old and new average revenue using the base tax rate tg as the argument. Args: mean: Distribution mean par_dict1: Original parameters - will be fixed after calculation par_dict2: New parameters from which a new base tax rate is calculated var: Distribution variance N: Number of draws tg: level of base housing tax - set separately from other parameters Returns: Tax rate required for different parameters to give the same average tax revenue. """ #Set seed np.random.seed(1) #Draw m m_draw = np.random.lognormal(mean, variance, N) #Set parameters tg_set = par_dict1["tg"] #Calculate original revenue original_rev = calc_rev_5(tg_set,par_dict1, m_draw, N) print(f'Tax revenue under the first set of parameters: {original_rev}') #Define value objective as squared difference between old and new revenue def equal_value_objective(tg_set): value_of_choice=(calc_rev_5(tg_set, par_dict2, m_draw, N)-original_rev)**2 return value_of_choice #Minimize squared distance solution_g=optimize.minimize_scalar(equal_value_objective,method='bounded', bounds=(0,np.inf)) tg_final=solution_g.x print(f'Tax revenue under new set of parameters with solution base tax rate {calc_rev_5(tg_final, par_dict2, m_draw, N)}') print(f' Solution tax rate {tg_final}') return tg_final
8a6d2b15c2b6b751647f62a3d0172cde45f23f94
26,885
def dist_l2(v, w): """ Euclidean (L2) distance between two vectors. """ s = 0 for i in range(len(v)): s = s + (v[i] - w[i]) ** 2 s = np.sqrt(s) return s
c65dced806a0e121263f3a4e26e88bd88e43fa43
26,886
def convert_single_example_dual(ex_index, example, max_seq_length, tokenizer, label_list): """Converts a single `InputExample` into one or more `DualInputFeatures`. Args: ex_index: index of the example example: InputExample max_seq_length: maximal sequence length of query + one passage in tokens tokenizer: mapping a string to a list of tokens label_list: labls for all passage candidates Returns: DualInputFeatures representing the records This flattens out the tokens of all candidates in a single sequence for the passages The query is only listed once and has form [CLS] q [SEP] Passages are as many as candidates and have form [CLS] p [SEP] """ max_seq_length_passage = max_seq_length - FLAGS.max_query_length label_map = {} for (i, label) in enumerate(label_list): label_map[label] = i tokens_a = tokenizer.tokenize(example.text_a) _truncate_seq(tokens_a, FLAGS.max_query_length - 2) assert (len(example.text_b_list)) == FLAGS.num_candidates tokens_b_list = [] for text_b in example.text_b_list: tokens_b_list.append(tokenizer.tokenize(text_b)) for tokens_b in tokens_b_list: # Modifies `tokens_a` and `tokens_b` in place so that the total # length is less than the specified length. # Account for [CLS], [SEP], [SEP] with "- 3" # _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 4) _truncate_seq(tokens_b, max_seq_length_passage - 2) # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambiguously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. all_segment_ids = [] all_input_ids = [] all_input_mask = [] all_unique_ids = [] all_label_ids = [] all_tokens = [] query_tokens = [] query_input_ids = [] query_input_mask = [] query_segment_ids = [] # first process the query tokens tokens = [] segment_ids = [] tokens.append("[CLS]") segment_ids.append(0) for token in tokens_a: tokens.append(token) segment_ids.append(0) tokens.append("[SEP]") segment_ids.append(0) input_ids = tokenizer.convert_tokens_to_ids(tokens) input_mask = [1] * len(input_ids) while len(input_ids) < FLAGS.max_query_length: input_ids.append(0) input_mask.append(0) segment_ids.append(0) assert len(input_ids) == FLAGS.max_query_length assert len(input_mask) == FLAGS.max_query_length assert len(segment_ids) == FLAGS.max_query_length # copy to query variables query_tokens.extend(tokens) query_input_ids.extend(input_ids) query_segment_ids.extend(segment_ids) query_input_mask.extend(input_mask) for cand_index, tokens_b in enumerate(tokens_b_list): tokens = [] segment_ids = [] tokens.append("[CLS]") segment_ids.append(1) if tokens_b: for token in tokens_b: tokens.append(token) segment_ids.append(1) tokens.append("[SEP]") segment_ids.append(1) input_ids = tokenizer.convert_tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1] * len(input_ids) # Zero-pad up to the sequence length. while len(input_ids) < max_seq_length_passage: input_ids.append(0) input_mask.append(0) segment_ids.append(1) assert len(input_ids) == max_seq_length_passage assert len(input_mask) == max_seq_length_passage assert len(segment_ids) == max_seq_length_passage label_id = int(example.labels[cand_index]) all_input_ids.extend(input_ids) all_input_mask.extend(input_mask) all_unique_ids.append(example.guid + "-" + example.text_b_guids[cand_index]) all_label_ids.append(label_id) all_segment_ids.extend(segment_ids) all_tokens.extend(tokens) if ex_index < 5: tf.logging.info("*** Example ***") tf.logging.info("guid: %s" % (example.guid)) # query info tf.logging.info( "query tokens: %s" % " ".join([tokenization.printable_text(x) for x in query_tokens])) tf.logging.info("query input_ids: %s" % " ".join([str(x) for x in query_input_ids])) tf.logging.info("query input_mask: %s" % " ".join([str(x) for x in query_input_mask])) tf.logging.info("query segment_ids: %s" % " ".join([str(x) for x in query_segment_ids])) tf.logging.info( "tokens: %s" % " ".join([tokenization.printable_text(x) for x in all_tokens])) tf.logging.info("input_ids: %s" % " ".join([str(x) for x in all_input_ids])) tf.logging.info("input_mask: %s" % " ".join([str(x) for x in all_input_mask])) tf.logging.info("segment_ids: %s" % " ".join([str(x) for x in all_segment_ids])) tf.logging.info("labels ids: %s" % " ".join([str(x) for x in all_label_ids])) tf.logging.info("prior scores: %s" % " ".join([str(x) for x in example.scores])) tf.logging.info("labels str: %s" % " ".join([str(x) for x in example.labels])) feature = DualInputFeatures( input_ids_1=query_input_ids, input_mask_1=query_input_mask, segment_ids_1=query_segment_ids, input_ids_2=all_input_ids, input_masks_2=all_input_mask, segment_ids_2=all_segment_ids, label_ids=all_label_ids, unique_ids=all_unique_ids, scores=example.scores) return feature
4c89637846ad9b79db1450bd1af4bfa38f80e08c
26,887
def find_recipes(rounds): """ Calculate the last ten recipes in the sequence. :param rounds: the number of rounds :return: a list of the last 10 recipes in the sequence >>> find_recipes(5) [0, 1, 2, 4, 5, 1, 5, 8, 9, 1] >>> find_recipes(18) [9, 2, 5, 1, 0, 7, 1, 0, 8, 5] >>> find_recipes(2018) [5, 9, 4, 1, 4, 2, 9, 8, 8, 2] """ recipes = [3, 7] elf1 = 0 elf2 = 1 while len(recipes) < rounds + 10: new_recipe = recipes[elf1] + recipes[elf2] for i in str(new_recipe): recipes.append(int(i)) elf1 = (elf1 + recipes[elf1] + 1) % len(recipes) elf2 = (elf2 + recipes[elf2] + 1) % len(recipes) return recipes[rounds:rounds+10]
281cd92094cf9c4be608de5a57075f7f38a531d0
26,888
def query_abs_over_wiki(abstract): """ query from es with the document is wiki_entity """ return { "track_total_hits": "true", "version": "true", "size": 1000, "sort": [{ "_score": { "order": "desc" } }], "_source": { "includes": [ "entity", "stem", "description" ] }, "stored_fields": [ "*" ], "script_fields": {}, "docvalue_fields": [], "query": { "match": { "entity": abstract } }, "highlight": { "pre_tags": [ "@@" ], "post_tags": [ "@@" ], "fields": { "*": {} }, "fragment_size": 2147483647 } }
7e35af5b210cd3485113d7cb5b72f7dd32745d94
26,889
def mitm_digest_preimage(tshash_params, digest, except_bitstrings=(), max_length=_MITM_DEFAULT_MAX_LENGTH): """ Performs a meet-in-the-middle attack to find a preimage whose hash arrives at a certain digest. """ # We currently don't discard bits from the state to get a digest # possible_truncations = ('0b01', '0b11') possible_truncations = ('',) end_states = tuple(digest + possible_truncation for possible_truncation in possible_truncations) return mitm(tshash_params=tshash_params, end_states=end_states, except_bitstrings=except_bitstrings, max_length=max_length)
7e5dc01a0f6f16d9cf064f7ba8070819c0543706
26,890
import re def similarityWithResult(Address_1: Address, Address_2: Address): """ 地址相似度计算 :param Address_1: 地址1, 由 Geocoding.normalizing 方法返回的 Address 类 :param Address_2: 地址2, 由 Geocoding.normalizing 方法返回的 Address 类 :return: """ geocoding = jpype.JClass('io.patamon.geocoding.Geocoding') pattern = re.compile("similarity=(.*?)\n\)", re.S) if type(Address_1) == type(Address_2) == Address: return eval(re.findall(pattern, str(geocoding.similarityWithResult(Address_1.__java__, Address_2.__java__).toString()))[0]) else: raise TypeError( "Geocoding.similarityWithResult仅支持计算两个由 Geocoding.normalizing 方法返回的Address类之间的相似度")
b4bf880eeb5b0666c06ef987d89beeb952ad6e1a
26,891
def longitudinal_summary(ds, lat_dim='lat', lon_dim='lon', lon_res=5): """Compute longitudinal (bin) statistics, averaging over latitude.""" # Check lat_dim and lon_dim # Check lon_res < 180 # TODO: lon between -180 and 180 , lat between -90 and 90 aggregating_dims = list(ds[lon_dim].dims) bins = np.arange(-180,180+lon_res, step=lon_res) labels= bins[:-1] + lon_res/2 return ds.groupby_bins(lon_dim, bins, labels=labels).mean(aggregating_dims)
7b2c6abf8eaf399587c58aea5783c474542602ec
26,892
def validFilename(string): """ return a valid filename from the input string stripping characters that are not allowed """ out = '' allowed = _allowedfilenamechars() for s in string: if s in allowed: out += s return out
14dd0cb10dd775f61035ec1a85c74f981c5432aa
26,893
def contains_source_code(http_response): """ :param http_response: The HTTP response object :return: A tuple with: - re.match object if the file_content matches a source code file - A tuple containing the programming language names """ body = http_response.get_body() for match, _, _, lang in _multi_re.query(body): if is_false_positive(http_response, match, lang): continue return match, lang return None, None
eadabdf1782e6785b34b7c65853e1cecfc81ab81
26,894
def wht(at1,at2,ints): """ Assign weights for Hessian matrix given 1-2,1-3,1-4 interactions :param at1: first atom :param at2: second atom :param ints: tuples of 1-2,1-3,1-4 interacting pair :return: 0 for diagonal, assigned weight constant for all other """ apair = [at1,at2] int2, int3, int4 = ints if at1 == at2: return 0.0 elif apair in int2: return co.WEIGHTS['h12'] elif apair in int3: return co.WEIGHTS['h13'] elif apair in int4: return co.WEIGHTS['h14'] else: return co.WEIGHTS['h']
b9e5e01743a6f732c63fbfc71dc264293d890d1b
26,895
def _get_users_with_perms(obj, attach_perms=False, with_group_users=None): """ Returns a list of users with their permissions on an object obj. """ user_obj_perms = get_user_perms(obj) if user_obj_perms is None: return get_users_with_perms( obj, attach_perms=attach_perms, with_group_users=with_group_users) user_perms = {} if attach_perms: for perm in user_obj_perms: if perm.user in user_perms: user_perms[perm.user].add(perm.permission.codename) else: user_perms[perm.user] = set([perm.permission.codename]) else: user_perms = [ perm.user for perm in user_obj_perms.only('user').distinct('user') ] if with_group_users: user_perms = _get_group_users_with_perms(obj, attach_perms, user_perms) return user_perms
79e0f80d28133e2dc1aef69bcf727d572d259d5b
26,896
import base64 def get_file_data(request, oid, version=None, status=None): """Get document data""" document = get_document(request, oid, version, status) if request.content_type.startswith('text/xml'): data = Binary(document.data.data) else: data = base64.b64encode(document.data.data).decode() return { 'data': data }
1195671fb7f428e624cccb1f7dc89e92b7ab5d13
26,897
def batch_get_all_movies(): """Attempt to get all movies downloaded and serialized. Pickup where the last attempt left off.""" goal = len(movie_id_df.movie_id) pickup = check_row_files() for i in range(5000, goal, 5000): print(f"Batch serializing the next 5000 movies (to {i}).") batch_serialize(pickup, i) if goal % 5000 != 0: remainder = goal % 5000 batch_serialize(goal - remainder, goal) return True
0225c89dfa76223d1354df48c5cb441dbb171461
26,898
def dataset_from_datasource(dsdict, *, datasource_name, dataset_name=None, **dsrc_args): """Transformer: Create a Dataset from a DataSource object This is just a thin wrapper around Dataset.from_datasource in order to conform to the transformer API Parameters ---------- dsdict: dict, ignored. Because this is a source, this argument is unnecessary (except to conform to the transformer function API) and is ignored datasource_name: str, required Name of datasource in DataSource catalog dataset_name: str Name of the generated Dataset. If None, this will be the `datasource_name` dsrc_args: dict Arguments are the same as the `Dataset.from_datasource()` constructor Returns ------- dict: {dataset_name: Dataset} """ if dataset_name is None: dataset_name = datasource_name ds = Dataset.from_datasource(dataset_name=dataset_name, datasource_name=datasource_name, **dsrc_args) return {dataset_name: ds}
644bb001c874c0e3d4552363b5edb8ecb369ee05
26,899