content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
from typing import Any def check_int(data: Any) -> int: """Check if data is `int` and return it.""" if not isinstance(data, int): raise TypeError(data) return data
814155f2407cd0e8b580372679f4cecfcc087d9e
32,100
def UpdateStatus(key_name, status): """Update the status/state of the specified bug. Args: key_name: Key name of the bug to update. status: A string containing the new status of the bug. Returns: Bug object with the updated target_element information. """ bug = GetBugByKey(key_name) bug.status = status bug.state = bugs_util.StateFromStatus(status, bug.provider) bug.put() return bug
614075959da3603cefd220d45c1e6c5c110bf17d
32,101
def onehot_encoding(categories, max_categories): """Given a list of integer categories (out of a set of max_categories) return one-hot enocded values""" out_array = np.zeros((len(categories), max_categories)) for key, val in enumerate(categories): out_array[key, int(val)] = 1.0 return out_array
89203b285faed64b4519a2ad5234b77b4fa837aa
32,102
def canny(gl_image, low_threshold=50, high_threshold=150): """Applies the Canny transform""" return cv2.Canny(gl_image, low_threshold, high_threshold)
de2d7194e9df6ab4cc7cda25a0b5ccd37f81822b
32,103
def res_stage(block:nn.Module, ic:int, oc:int, num_layers:int, dflag:bool=True, btype:str='basic', fdown:bool=False): """ Arguments --------- block : nn.Module the block type to be stacked one upon another ic : int # of input channels oc : int # of output channels num_layers - int # of blocks to be stacked dflag : bool Whether the first resblock needs to perform downsampling. Defaults to True. btype : str, should be one of ['basic'. 'bottleneck'] The type of resblock to be used. Defaults to 'basic' fdown : bool If true the side branch *must* contain a conv block, whether it performs downsampling or not. Defaults to False. Returns ------- layers : list A list containing all the nn.Module that is required for this layer. """ layers = [block(ic, oc, dflag=dflag, btype=btype, fdown=fdown)] layers += [block(oc, oc, btype=btype) for i in range (num_layers-1)] return layers
6cd95ba6d923265093daca2bdf888bde248dfd12
32,104
def getFirstValid(opts, default): """Returns the first valid entry from `opts`, or `default` if none found. Valid is defined as ``if o`` returns true.""" for o in opts: if o: return o return default
799a6ea4a993f0a112fa38b882566d72a0d223e0
32,105
from typing import Dict from typing import Any import json def read_json(path: str) -> Dict[Any, Any]: """ Read a JSON file. Args: - path: The path of the JSON file. Return: - The data of the JSON file. """ _logger.debug(f"Reading JSON file: {path}") with open(path, encoding="utf-8") as file: data = json.load(file) _logger.debug("JSON file loaded.") return data
7f0b2f8cf35a6edebf46caeb805efb855fbb3d37
32,106
import os def upload_thumb(): """ Used when another app has uploaded the main file to the cloud, and is sending the thumb for local display. """ image = request.files["thumb_file"] fname = request.form["filename"] tpath = os.path.join(IMAGE_FOLDER, "thumbs", fname) image.save(tpath) return "OK"
2638b8f6a1bf717120c2c8f16dc8702c0b5d88e9
32,107
def grid_density_gaussian_filter(data, size, resolution=None, smoothing_window=None): """Smoothing grid values with a Gaussian filter. :param [(float, float, float)] data: list of 3-dimensional grid coordinates :param int size: grid size :param int resolution: desired grid resolution :param int smoothing_window: size of the gaussian kernels for smoothing :return: smoothed grid values :rtype: numpy.ndarray """ resolution = resolution if resolution else size k = (resolution - 1) / size w = smoothing_window if smoothing_window else int(0.01 * resolution) # Heuristic imgw = (resolution + 2 * w) img = np.zeros((imgw, imgw)) for x, y, z in data: ix = int(x * k) + w iy = int(y * k) + w if 0 <= ix < imgw and 0 <= iy < imgw: img[iy][ix] += z z = ndi.gaussian_filter(img, (w, w)) # Gaussian convolution z[z <= BLANK_THRESH] = np.nan # Making low values blank return z[w:-w, w:-w]
d4c833aee72d28a760584cbd995595497d740531
32,108
from typing import List from typing import Dict from typing import Set from typing import Tuple import logging def channel_message_to_zerver_message( realm_id: int, users: List[ZerverFieldsT], slack_user_id_to_zulip_user_id: SlackToZulipUserIDT, slack_recipient_name_to_zulip_recipient_id: SlackToZulipRecipientT, all_messages: List[ZerverFieldsT], zerver_realmemoji: List[ZerverFieldsT], subscriber_map: Dict[int, Set[int]], added_channels: AddedChannelsT, dm_members: DMMembersT, domain_name: str, long_term_idle: Set[int], ) -> Tuple[ List[ZerverFieldsT], List[ZerverFieldsT], List[ZerverFieldsT], List[ZerverFieldsT], List[ZerverFieldsT], ]: """ Returns: 1. zerver_message, which is a list of the messages 2. zerver_usermessage, which is a list of the usermessages 3. zerver_attachment, which is a list of the attachments 4. uploads_list, which is a list of uploads to be mapped in uploads records.json 5. reaction_list, which is a list of all user reactions """ zerver_message = [] zerver_usermessage: List[ZerverFieldsT] = [] uploads_list: List[ZerverFieldsT] = [] zerver_attachment: List[ZerverFieldsT] = [] reaction_list: List[ZerverFieldsT] = [] total_user_messages = 0 total_skipped_user_messages = 0 for message in all_messages: slack_user_id = get_message_sending_user(message) if not slack_user_id: # Ignore messages without slack_user_id # These are Sometimes produced by Slack continue subtype = message.get("subtype", False) if subtype in [ # Zulip doesn't have a pinned_item concept "pinned_item", "unpinned_item", # Slack's channel join/leave notices are spammy "channel_join", "channel_leave", "channel_name", ]: continue try: content, mentioned_user_ids, has_link = convert_to_zulip_markdown( message["text"], users, added_channels, slack_user_id_to_zulip_user_id ) except Exception: print("Slack message unexpectedly missing text representation:") print(orjson.dumps(message, option=orjson.OPT_INDENT_2).decode()) continue rendered_content = None if "channel_name" in message: is_private = False recipient_id = slack_recipient_name_to_zulip_recipient_id[message["channel_name"]] elif "mpim_name" in message: is_private = True recipient_id = slack_recipient_name_to_zulip_recipient_id[message["mpim_name"]] elif "pm_name" in message: is_private = True sender = get_message_sending_user(message) members = dm_members[message["pm_name"]] if sender == members[0]: recipient_id = slack_recipient_name_to_zulip_recipient_id[members[1]] sender_recipient_id = slack_recipient_name_to_zulip_recipient_id[members[0]] else: recipient_id = slack_recipient_name_to_zulip_recipient_id[members[0]] sender_recipient_id = slack_recipient_name_to_zulip_recipient_id[members[1]] message_id = NEXT_ID("message") if "reactions" in message.keys(): build_reactions( reaction_list, message["reactions"], slack_user_id_to_zulip_user_id, message_id, zerver_realmemoji, ) # Process different subtypes of slack messages # Subtypes which have only the action in the message should # be rendered with '/me' in the content initially # For example "sh_room_created" has the message 'started a call' # which should be displayed as '/me started a call' if subtype in ["bot_add", "sh_room_created", "me_message"]: content = f"/me {content}" if subtype == "file_comment": # The file_comment message type only indicates the # responsible user in a subfield. message["user"] = message["comment"]["user"] file_info = process_message_files( message=message, domain_name=domain_name, realm_id=realm_id, message_id=message_id, slack_user_id=slack_user_id, users=users, slack_user_id_to_zulip_user_id=slack_user_id_to_zulip_user_id, zerver_attachment=zerver_attachment, uploads_list=uploads_list, ) content += file_info["content"] has_link = has_link or file_info["has_link"] has_attachment = file_info["has_attachment"] has_image = file_info["has_image"] topic_name = "imported from Slack" zulip_message = build_message( topic_name, float(message["ts"]), message_id, content, rendered_content, slack_user_id_to_zulip_user_id[slack_user_id], recipient_id, has_image, has_link, has_attachment, ) zerver_message.append(zulip_message) (num_created, num_skipped) = build_usermessages( zerver_usermessage=zerver_usermessage, subscriber_map=subscriber_map, recipient_id=recipient_id, mentioned_user_ids=mentioned_user_ids, message_id=message_id, is_private=is_private, long_term_idle=long_term_idle, ) total_user_messages += num_created total_skipped_user_messages += num_skipped if "pm_name" in message and recipient_id != sender_recipient_id: (num_created, num_skipped) = build_usermessages( zerver_usermessage=zerver_usermessage, subscriber_map=subscriber_map, recipient_id=sender_recipient_id, mentioned_user_ids=mentioned_user_ids, message_id=message_id, is_private=is_private, long_term_idle=long_term_idle, ) total_user_messages += num_created total_skipped_user_messages += num_skipped logging.debug( "Created %s UserMessages; deferred %s due to long-term idle", total_user_messages, total_skipped_user_messages, ) return zerver_message, zerver_usermessage, zerver_attachment, uploads_list, reaction_list
3abd517aa2b84aaa3db803777e768324c7dac227
32,109
def print_train_time(start, end, device=None): """Prints difference between start and end time. Args: start (float): Start time of computation (preferred in timeit format). end (float): End time of computation. device ([type], optional): Device that compute is running on. Defaults to None. Returns: float: time between start and end in seconds (higher is longer). """ total_time = end - start print(f"\nTrain time on {device}: {total_time:.3f} seconds") return total_time
9935f2c12bac8e8beca38075dd6f80b7211318b7
32,110
def filter_false_positive_matches( matches, trace=TRACE_REFINE or TRACE_FILTER_FALSE_POSITIVE, reason=DiscardReason.FALSE_POSITIVE, ): """ Return a filtered list of kept LicenseMatch matches and a list of discardable matches given a ``matches`` list of LicenseMatch by removing matches to false positive rules. """ kept = [] kept_append = kept.append discarded = [] discarded_append = discarded.append for match in matches: if match.rule.is_false_positive: if trace: logger_debug(' ==> DISCARDING FALSE POSITIVE:', match) match.discard_reason = reason discarded_append(match) else: kept_append(match) return kept, discarded
4bf9ec5c88db99c8989ca740f41b726603b892a5
32,111
def Moving_Average_ADX(data, period=14, smooth=14, limit=18): """ Moving Average ADX ADX Smoothing Trend Color Change on Moving Average and ADX Cross. Use on Hourly Charts - Green UpTrend - Red DownTrend - Black Choppy No Trend Source: https://www.tradingview.com/script/owwws7dM-Moving-Average-ADX/ Translator: 阿财(Rgveda@github)(4910163#qq.com) Parameters ---------- data : (N,) array_like 传入 OHLC Kline 序列。 The OHLC Kline. period : int or None, optional DI 统计周期 默认值为 14 DI Length period. Default value is 10. smooth : int or None, optional ADX 平滑周期 默认值为 14 ADX smoothing length period. Default value is 10. limit : int or None, optional ADX 限制阈值 默认值为 18 ADX MA Active limit threshold. Default value is 18. Returns ------- adx, ADXm : ndarray ADXm 指标和趋势指示方向 (-1, 0, 1) 分别代表 (下跌, 无明显趋势, 上涨) ADXm indicator and thread directions sequence. (-1, 0, 1) means for (Neagtive, No Trend, Positive) """ up = data.high.pct_change() down = data.low.pct_change() * -1 trur = TA_HMA(talib.TRANGE(data.high.values, data.low.values, data.close.values) , period) plus = 100 * TA_HMA(np.where(((up > down) & (up > 0)), up, 0), period) / trur minus = 100 * TA_HMA(np.where(((down > up) & (down > 0)), down, 0), period) / trur # 这里是dropna的替代解决办法,因为我觉得nparray的传递方式如果随便drop了可能会跟 data.index # 对不上,所以我选择补零替代dropna plus = np.r_[np.zeros(period + 2), plus[(period + 2):]] minus = np.r_[np.zeros(period + 2), minus[(period + 2):]] sum = plus + minus adx = 100 * TA_HMA(abs(plus - minus) / (np.where((sum == 0), 1, sum)), smooth) adx = np.r_[np.zeros(smooth + 2), adx[(smooth + 2):]] ADXm = np.where(((adx > limit) & (plus > minus)), 1, np.where(((adx > limit) & (plus < minus)), -1, 0)) return adx, ADXm
63595d9cc53999ae1e4b75c971b4388f092da649
32,112
def forbid_end(interval, function): """ Forbids an interval variable to end during specified regions. In the declaration of an interval variable it is only possible to specify a range of possible end times. This function allows the user to specify more precisely when the interval variable can end. In particular, the interval variable can end only at point *t* such that the function has non-zero value at *t-1*. When the interval variable is absent then this constraint is automatically satisfied, since such interval variable does not't have any start at all. Note the difference between *t* (end time of the interval variable) and *t-1* (the point when the function value is checked). It simplifies the sharing of the same function in constraints *forbid_start* and *forbid_end*. It also allows one to use the same function as *intensity* parameter of interval variable. Args: interval: Interval variable being restricted. function: If the function has value 0 at point *t*-1 then the interval variable interval cannot end at *t*. Returns: Constraint expression """ return CpoFunctionCall(Oper_forbid_end, Type_Constraint, (_convert_arg(interval, "interval", Type_IntervalVar), _convert_arg(function, "function", Type_StepFunction)))
fee64b27578f78632ed84d2575a7b2dfb6de39e2
32,113
import re def get_all_anime(total_pages: int) -> list: """ Get all the anime listed on all the pages of the website. :param total_pages: Total number of pages of HorribleSubs. :return: List containing the names of all the anime. """ titles = [] for page in range(1, total_pages + 1): print(f"Processing page: {page}/{total_pages}") url = f"https://nyaa.si/?f=0&c=0_0&q=[HorribleSubs]&p={page}" soup = open_url(url) tags = soup('a') for tag in tags: anime_id = tag.get('href', None) temp = tag.get('title', None) if temp and temp.startswith("[HorribleSubs]") and temp.endswith("[720p].mkv"): anime_id = re.findall("view/([0-9]+)", anime_id)[0] # temp = re.findall("\[HorribleSubs\] (.*?) - ([0-9]*) \[720p\].mkv", temp) titles.append((temp, anime_id)) print("Done!") print("Anime retrieval complete!") return titles
91160353c7b488f21fbc7ed0a50974193a4b45bf
32,114
def network_generator( rw: int, cl: int, b: float, xi: float, P: float, mu: float, bipartite: bool, ) -> ArrayLike: """ function to generate synthetic networks with nested, modular and in-block nested structures. Generates networks with a fixed block size and increasing number of blocks (hence, increasing network size), instead of networks with fixed size. This benchmark is a modification of the one introduced by ASR et al (PRE 2018). If the number of columns nodes is not given, the function will assume that we want to generate a unipartite network. The parameters must be passed respecting the following order. inputs: ---------- rw: int >1 number of row nodes that form a block cl: int >1 number of col nodes that form a block B: number >=1 number of blocks on which the main matrix will be divided xi: number, >=1 shape parameter to indicate how stylised is the nested curve p: number in [0, 1] paramteter that control the amount of noise outside a perfectly nested structure mu: number in [0, 1] parameteter that control the amount of noise outside the blocks bipartite: a boolean to indicate if you want to generate bipartite (True) or unipartite (False) networks output: ---------- M: array The synthetic network matrix with the predefined structure example: --------- network_matrix=network_generator(rw,cl,B,xi,p,mu) """ if rw < 3 or cl < 3: raise ValueError("MATRIX TOO SMALL: row and col sizes should be larger than 3") Mij = uniform(0, 1, size=(int(rw * b), int(cl * b))) cy, cx = mod_param(int(rw * b), int(cl * b), b) M_no = zeros(Mij.shape) le = [] Pi = ((b - 1) * mu) / b lb = 0 # for each block generate a nested structure for ii in range(int(b)): j, i = indices(M_no[cy * ii : cy * (ii + 1), cx * ii : cx * (ii + 1)].shape) # heaviside function to produce the nested structure H = ((j[::-1, :] + 1) / cy) > ballcurve((i / cx), xi) M_no[cy * ii : cy * (ii + 1), cx * ii : cx * (ii + 1)] = H le += [M_no[cy * ii : cy * (ii + 1), cx * ii : cx * (ii + 1)].sum()] lb += cy * cx # generate the nested structure of the remaining block j, i = indices(M_no[(ii + 1) * cy :, (ii + 1) * cx :].shape) # heaviside function to produce the nested structure H = ((j[::-1, :] + 1) / j.shape[0]) > ballcurve((i / i.shape[1]), xi) M_no[(ii + 1) * cy :, (ii + 1) * cx :] = H le += [M_no[(ii + 1) * cy :, (ii + 1) * cx :].sum()] lb += (int(rw * b) - ((ii + 1) * cy)) * (int(cl * b) - ((ii + 1) * cx)) Et = M_no.sum(dtype=int) # prob of having a link outside blocks p_inter = (mu * Et) / (lb * b) if ((lb * b) != 0) else 0 M_no[M_no == 0] = p_inter for ix in range(int(b)): j, i = indices(M_no[cy * ix : cy * (ix + 1), cx * ix : cx * (ix + 1)].shape) Pr = ( (P * le[ix]) / ((cx * cy) - le[ix] + (P * le[ix])) if ((cx * cy) - le[ix] + (P * le[ix])) != 0 else 0 ) # heaviside function to produce the nested structure H = ((j[::-1, :] + 1) / cy) > ballcurve((i / cx), xi) # prob of having a link within blocks p_intra = ((1 - P + (P * Pr)) * H + Pr * (1 - H)) * (1 - Pi) M_no[cy * ix : cy * (ix + 1), cx * ix : cx * (ix + 1)] = p_intra # calculate to the remaining block j, i = indices(M_no[(ix + 1) * cy :, (ix + 1) * cx :].shape) Pr = ( (P * le[ix + 1]) / ( ((int(rw * b) - (ix + 1) * cy) * (int(cl * b) - (ix + 1) * cx)) - le[ix + 1] + (P * le[ix + 1]) ) if (le[ix + 1] > 0) & (P != 0) else 0 ) # heaviside function to produce the nested structure H = ((j[::-1, :] + 1) / j.shape[0]) > ballcurve((i / i.shape[1]), xi) # prob of having a link within blocks p_intra = ((1 - P + (P * Pr)) * H + Pr * (1 - H)) * (1 - Pi) M_no[(ix + 1) * cy :, (ix + 1) * cx :] = p_intra M = (M_no > Mij).astype(int) if not bipartite: fill_diagonal(M, 0) M = triu(M, k=1) + (triu(M, k=1)).T return M
f9c6d615b117a2aa7ee22b7615ddcdbcaf628a71
32,115
def convert_rgb_to_hex(rgb: tuple([int, int, int])) -> str: """Take an RGB value and convert it to hex code. Args: rgb: a color represented as rgb values. Returns: Hex code of color or None if the RGB code is invalid. """ # Validate user Input is not negative or greater than 255 for value in rgb: if not 256 > value >= 0: # Return nothing if any of the RGB values fail validation return None return "%02x%02x%02x" % rgb
c20c6a96dbe577eff4421df28403a57e1f038e4e
32,116
def localVarName(value, position): """A name of a class.""" if not value[0].islower(): return Error('BadLocalVariableName', 'Local variable must start with a lower case letter', position, LINES) return None
d4f8838497109fcf41e9c904aaddc31edd69eadc
32,117
def nodeAndOutputFromScenegraphLocationString(string, dag): """ Returns a tuple containing the node defined in a location string and its corresponding Output. """ try: outputNodeUUID = uuidFromScenegraphLocationString(string) outputNode = dag.node(nUUID=outputNodeUUID) outputNodeOutputName = string.split(":")[3] return (outputNode, outputNode.outputNamed(outputNodeOutputName)) except: return(None, None)
d7fa1f03b3121f4dbcc9baac8c94bb5113fbc378
32,118
def rivers_with_station(stations): """Returns the names of rivers on which a station is situated""" return set(stations_by_river(stations).keys())
c2e78ff18c3fdc73f04e145beae77d914a0ee287
32,119
def check_string_is_nonempty(string, string_type='string'): """Ensures input is a string of non-zero length""" if string is None or \ (not isinstance(string, str)) or \ len(string) < 1: raise ValueError('name of the {} must not be empty!' ''.format(string_type)) return string
527e60b35f6a827ee9b1eae3c9a3f7abc596b7ff
32,120
def last_frame_with_txt(vid, txt, duration): """Take the last frame from vid, show it for duration with txt overlay.""" frame = list(vid.iter_frames())[-1] clip = ImageClip(frame, duration=duration) return CompositeVideoClip([ clip, TextClip(txt, font=MOVIEPY_FONT, color='black', bg_color='white', fontsize=40) .set_pos((10, 10)).set_duration(duration)])
e7a62332cb4ae69addc12bc679eb30479432caf2
32,121
import torch def load_dataset(dataset_size=100, dataset_start=0, shuffle=True, sentence_level=False, n_authors=15, k=5, features=u""): """ Load dataset :return: """ # Load from directory if sentence_level: reutersc50_dataset = torchlanguage.datasets.ReutersC50SentenceDataset( n_authors=n_authors, download=True, dataset_size=dataset_size, dataset_start=dataset_start ) else: reutersc50_dataset = torchlanguage.datasets.ReutersC50Dataset( n_authors=n_authors, download=True, dataset_size=dataset_size, dataset_start=dataset_start, load_features=features ) # end if # Reuters C50 dataset training reuters_loader_train = torch.utils.data.DataLoader( torchlanguage.utils.CrossValidationWithDev(reutersc50_dataset, train='train', k=k), batch_size=1, shuffle=shuffle ) # Reuters C50 dataset dev reuters_loader_dev = torch.utils.data.DataLoader( torchlanguage.utils.CrossValidationWithDev(reutersc50_dataset, train='dev', k=k), batch_size=1, shuffle=shuffle ) # Reuters C50 dataset test reuters_loader_test = torch.utils.data.DataLoader( torchlanguage.utils.CrossValidationWithDev(reutersc50_dataset, train='test', k=k), batch_size=1, shuffle=shuffle ) return reutersc50_dataset, reuters_loader_train, reuters_loader_dev, reuters_loader_test
2ace76a461699e9f0bdf7ca838d414a3c618898a
32,122
def df_as_table(dataframe, size='50'): """ :param dataframe: pandas dataframe to be displayed as a HTML table :param size: string to set realtive table size in percent standard 50% :return: string containing a html table """ shape = dataframe.shape n_cols = shape[1] n_rows = shape[0] headers = list(dataframe.columns) sides = list(dataframe.index.values) local_text = "<table style=\"width:"+size+"%\">" if headers != []: if sides != []: headers.insert(0, "-") local_text += "<tr>" for element in headers: local_text += "<th>"+element+"</th>" local_text += "</tr>" for i in range(n_rows): local_text += "<tr>" if sides != []: local_text += "<th>" + str(sides[i]) + "</th>" for j in range(n_cols): local_text += "<td>" + str(dataframe.iloc[i][j]) + "</td>" local_text += "</tr>" local_text += "</table>" return local_text
3634a90b3e3d4ef5c8cc737e19a0540305528959
32,123
def ecdh(privkey, pubkey): """ Given a loaded private key and a loaded public key, perform an ECDH exchange :param privkey: :param pubkey: :return: """ return ecdsa.ecdh(privkey, pubkey)
650607024f3fcd10fd7649897461c69a3d80596b
32,124
import getpass def update_config_cli(): """Get config from command line and write to a file""" cfg = Config() # FIXME: improve CLI experience print('Note: Enter blank to keep the current value.') for key in CFG_KEYS: # Show (or hide) current value if key in ENCRYPTED: current = '*' if cfg.smtp[key] else '' cfg.smtp[key] = ( getpass(f'{capwords(key, sep=". ")} [{current}]? ').strip() or cfg.smtp[key]) else: current = cfg.smtp[key] cfg.smtp[key] = ( input(f'{capwords(key, sep=". ")} [{current}]? ').strip() or cfg.smtp[key]) cfg.save() return cfg
981f843f25171cab69554658f9866a3551d13770
32,125
def portfolio_margin_account(self, **kwargs): """Get Portfolio Margin Account Info (USER_DATA) GET /sapi/v1/portfolio/account https://binance-docs.github.io/apidocs/spot/en/#get-portfolio-margin-account-info-user_data Keyword Args: recvWindow (int, optional): The value cannot be greater than 60000 """ return self.sign_request("GET", "/sapi/v1/portfolio/account", {**kwargs})
88a1087d44187ed130211ab7d42fdcbb54a038f3
32,126
from typing import Any from typing import Mapping def init_hyperparams(*, class_name: str, hyperparams, hyperparams_class) -> Any: """ Construct a hyperparams object from either a mapping or another hyperparams object. """ if isinstance(hyperparams_class, type) and is_dataclass(hyperparams_class): if hyperparams is None: return hyperparams_class() if isinstance(hyperparams, hyperparams_class): return hyperparams if isinstance(hyperparams, Mapping): return hyperparams_class(**hyperparams) raise WrongHyperparamsType(hyperparams=hyperparams, class_name=class_name) raise YouForgotTheHyperparams(class_name=class_name)
2aa4ebc5ec9e6d4502f7873e6517dc5285f8604e
32,127
import sys def get_theta(k, lamada, sequence, alphabet): """Get the theta list which use frequency to replace physicochemical properties(the kernel of ZCPseKNC method.""" theta = [] L = len(sequence) kmer = make_km_list(k, alphabet) fre_list = [frequency_p(sequence, str(key))[0] for key in kmer] fre_sum = float(sum(fre_list)) for i in range(1, lamada + 1): temp_sum = 0.0 for j in range(0, L - k - i + 1): nucleotide1 = sequence[j: j + k] nucleotide2 = sequence[j + i: j + i + k] if alphabet == DNA: fre_nucleotide1 = frequency_p(sequence, str(nucleotide1))[0] / fre_sum fre_nucleotide2 = frequency_p(sequence, str(nucleotide2))[0] / fre_sum temp_sum += pow(float(fre_nucleotide1) - float(fre_nucleotide2), 2) else: sys.stderr.write("The ZCPseKNC method just for DNA.") sys.exit(0) theta.append(temp_sum / (L - k - i + 1)) return theta
48624bb1c51315315874c9f7bff4d69f5dc15285
32,128
def _is_y(filename): """ Checks whether a file is a Nanometrics Y file or not. :type filename: str :param filename: Name of the Nanometrics Y file to be checked. :rtype: bool :return: ``True`` if a Nanometrics Y file. .. rubric:: Example >>> _is_y("/path/to/YAYT_BHZ_20021223.124800") #doctest: +SKIP True """ try: # get first tag (16 bytes) with open(filename, 'rb') as fh: _, tag_type, _, _ = _parse_tag(fh) except Exception: return False # The first tag in a Y-file must be the TAG_Y_FILE tag (tag type 0) if tag_type != 0: return False return True
adbb75533934d5050658b8a5078e66438b3381df
32,129
def is_scheduler_filter_enabled(filter_name): """Check the list of enabled compute scheduler filters from config. """ filters = CONF.compute_feature_enabled.scheduler_available_filters if len(filters) == 0: return False if 'all' in filters: return True if filter_name in filters: return True return False
f40e99f49a49aa24e66de72bad82b87ccf6ae8a2
32,130
from m2py.numerical.roots import nraphson def juros_price(PV, PMT, n, PV0=0): """ Calcula taxa de juros de um parcelamento pela table price Usado comummente em cŕedito concedido ao consumidor :param PV: Valor a Vista / Valor Presente :param PV0: Entrada :param PMT: Valor da Parcela :param n: Número de parcelas :return: Taxa de juros decimal usada no parcelamento """ c = (PV - PV0) / PMT f = lambda i: (1 - 1 / (1 + i) ** n) / i - c df = lambda i: ((i + 1) ** -n - 1 * n) / i - (1 - 1 / (i + 1) ** n) / i ** 2 root, _, _ = nraphson(f, df, 2, tol=1e-5, maxit=1000) return round(root, 5)
d593f27616c7028b39e80dff47a446a40fe43338
32,131
import json import re def delexicalisation(out_src, out_trg, category, properties_objects): """ Perform delexicalisation. :param out_src: source string :param out_trg: target string :param category: DBPedia category :param properties_objects: dictionary mapping properties to objects :return: delexicalised strings of the source and target; dictionary containing mappings of the replacements made """ with open('delex_dict.json') as data_file: data = json.load(data_file) # replace all occurrences of Alan_Bean to ASTRONAUT in input delex_subj = data[category] delex_src = out_src delex_trg = out_trg # for each instance, we save the mappings between nondelex and delex replcments = {} for subject in delex_subj: clean_subj = ' '.join(re.split('(\W)', subject.replace('_', ' '))) if clean_subj in out_src: delex_src = out_src.replace(clean_subj + ' ', category.upper() + ' ') replcments[category.upper()] = ' '.join(clean_subj.split()) # remove redundant spaces if clean_subj in out_trg: delex_trg = out_trg.replace(clean_subj + ' ', category.upper() + ' ') replcments[category.upper()] = ' '.join(clean_subj.split()) # replace all occurrences of objects by PROPERTY in input for pro, obj in sorted(properties_objects.items()): obj_clean = ' '.join(re.split('(\W)', obj.replace('_', ' ').replace('"', ''))) if obj_clean in delex_src: delex_src = delex_src.replace(obj_clean + ' ', pro.upper() + ' ') replcments[pro.upper()] = ' '.join(obj_clean.split()) # remove redundant spaces if obj_clean in delex_trg: delex_trg = delex_trg.replace(obj_clean + ' ', pro.upper() + ' ') replcments[pro.upper()] = ' '.join(obj_clean.split()) # possible enhancement for delexicalisation: # do delex triple by triple # now building | location | New_York_City New_York_City | isPartOf | New_York # is converted to # BUILDING location ISPARTOF City ISPARTOF City isPartOf ISPARTOF return delex_src, delex_trg, replcments
55108ff40e8739571a99a3481221c82c0fcbf255
32,132
def create_attention_mask_from_input_mask_v1(from_tensor, to_mask): """Create 3D attention mask from a 2D tensor mask. Args: from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...]. to_mask: int32 Tensor of shape [batch_size, to_seq_length]. Returns: float Tensor of shape [batch_size, from_seq_length, to_seq_length]. """ from_shape = bert_utils.get_shape_list(from_tensor, expected_rank=[2, 3]) batch_size = from_shape[0] from_seq_length = from_shape[1] to_shape = bert_utils.get_shape_list(to_mask, expected_rank=2) to_seq_length = to_shape[1] to_mask_boradcast = tf.cast( tf.reshape(to_mask, [batch_size, 1, to_seq_length]), tf.float32) # We don't assume that `from_tensor` is a mask (although it could be). We # don't actually care if we attend *from* padding tokens (only *to* padding) # tokens so we create a tensor of all ones. # # `broadcast_ones` = [batch_size, from_seq_length, 1] broadcast_ones = tf.cast(tf.expand_dims(to_mask, -1), tf.float32) # tf.ones( # shape=[batch_size, from_seq_length, 1], dtype=tf.float32) # Here we broadcast along two dimensions to create the mask. mask = broadcast_ones * to_mask_boradcast return mask
0f83dd4a2e5cf904f19f89ce5b42e562c4ba401e
32,133
import hashlib def md5(filename): """Hash function for files to be uploaded to Fl33t""" md5hash = hashlib.md5() with open(filename, "rb") as filehandle: for chunk in iter(lambda: filehandle.read(4096), b""): md5hash.update(chunk) return md5hash.hexdigest()
35068abafee2c5c4b1ac672f603b0e720a8c9a8c
32,134
from typing import Iterable from typing import Any from typing import Tuple def pairwise(iterable: Iterable[Any]) -> Iterable[Tuple[Any, Any]]: """ Divide the given iter into pairs and return as tuple pairs. s -> (s0,s1), (s1,s2), (s2, s3), ... """ a, b = tee(iterable) next(b, None) return zip(a, b)
c9867a51d238ee51a993465b1757b387d0c9be6a
32,135
def dustSurfaceDensitySingle(R, Rin, Sig0, p): """ Calculates the dust surface density (Sigma d) from single power law. """ return Sig0 * pow(R / Rin, -p)
441466f163a7b968cf193e503d43a1b014be7c5d
32,136
def rightToPurchase( symbol="", refid="", token="", version="", filter="", **timeseries_kwargs ): """Right to purchase up-to-date and detailed information on all new announcements, as well as 12+ years of historical records. Updated at 5am, 10am, 8pm UTC daily https://iexcloud.io/docs/api/#right-to-purchase Args: symbol (str): Symbol to look up refid (str): Optional. Id that matches the refid field returned in the response object. This allows you to pull a specific event for a symbol. token (str): Access token version (str): API version filter (str): filters: https://iexcloud.io/docs/api/#filter-results Supports all kwargs from `pyEX.stocks.timeseries.timeSeries` Returns: dict or DataFrame: result """ _raiseIfNotStr(symbol) symbol = _quoteSymbols(symbol) _timeseriesWrapper(timeseries_kwargs) return timeSeries( id="advanced_right_to_purchase", key=symbol, subkey=refid, token=token, version=version, filter=filter, **timeseries_kwargs )
8897902c7b729642cdd5e89658f62b70bccf2133
32,137
def compute_edits(old, new): """Compute the in-place edits needed to convert from old to new Returns a list ``[(index_1,change_1), (index_2,change_2)...]`` where ``index_i`` is an offset into old, and ``change_1`` is the new bytes to replace. For example, calling ``compute_edits("abcdef", "qbcdzw")`` will return ``[(0, "q"), (4, "zw")]``. That is, the update should be preformed as (abusing notation): ``new[index:index+len(change)] = change`` :param str old: The old data :param str new: The new data :returns: A list of tuples (index_i, change_i) """ deltas = [] delta = None for index, (n, o) in enumerate(zip(new, old)): if n == o: if delta is not None: deltas.append(delta) delta = None else: if delta is None: delta = (index, []) delta[1].append(n) if delta is not None: deltas.append(delta) return [(i, "".join(x)) for i, x in deltas]
f729addf84207f526e27d67932bb5300ced24b54
32,138
def gpx_to_lat_lon_list(filename): """ Summary: takes a .gpx file and turns the latitude and longitudes into a list of tuples Returns: list of tuples (latitude, longitude). """ gpx_file = open(filename, "r") gpx = gpxpy.parse(gpx_file) latlonlist = [] if len(gpx.tracks) > 0: print("tracks") for track in gpx.tracks: for segment in track.segments: for point in segment.points: latlonlist.append((point.longitude, point.latitude)) elif len(gpx.routes) > 0: print("routes") for route in gpx.routes: for point in route.points: latlonlist.append((point.longitude, point.latitude)) else: print("sorry mate, didn't care enough to implement this") return latlonlist
4d413a5894a30bb176a103b8a3933685490f30fe
32,139
def pre_order(size): """List in pre order of integers ranging from 0 to size in a balanced binary tree. """ interval_list = [None] * size interval_list[0] = (0, size) tail = 1 for head in range(size): start, end = interval_list[head] mid = (start + end) // 2 if mid > start: interval_list[tail] = (start, mid) tail += 1 if mid + 1 < end: interval_list[tail] = (mid + 1, end) tail += 1 interval_list[head] = mid return interval_list
45ab688c627c19cd0b9c1200830a91b064d46bda
32,140
def LockPrefix(): """Returns the lock prefix as an operand set.""" return set([Operands(disasms=('lock',))])
d4f84027494ad176efcb8c01f14876474aaca57f
32,141
def distance(pt, pts): """Distances of one point `pt` to a set of points `pts`. """ return np.sqrt((pts[:,0] - pt[0])**2 + (pts[:,1] - pt[1])**2)
06512472ac6c0e58182ad58190c82fa619d66d40
32,142
def prepare_rw_output_stream(output): """ Prepare an output stream that supports both reading and writing. Intended to be used for writing & updating signed files: when producing a signature, we render the PDF to a byte buffer with placeholder values for the signature data, or straight to the provided output stream if possible. More precisely: this function will return the original output stream if it is writable, readable and seekable. If the ``output`` parameter is ``None``, not readable or not seekable, this function will return a :class:`.BytesIO` instance instead. If the ``output`` parameter is not ``None`` and not writable, :class:`.IOError` will be raised. :param output: A writable file-like object, or ``None``. :return: A file-like object that supports reading, writing and seeking. """ if output is None: output = BytesIO() else: if not assert_writable_and_random_access(output): output = BytesIO() return output
af1afe87e5de12cad9eb72b93da069327c1fffb5
32,143
from pathlib import Path def add_references(md, tmp_dir, args): """ Remember that this function is run for main, review, and editor. """ citations_to_do = tmp_dir / 'citations.json' biblio = Path(args.library).with_suffix('.json') _prepare_node_input(md, citations_to_do) _check_citation_keys(citations_to_do, biblio) _process_node(tmp_dir, biblio, args) md = _read_node_output(md, tmp_dir) return md
5c4319720a809c9e6543ef078598b7a3539c3492
32,144
import argparse def get_args(): """Get command-line arguments""" parser = argparse.ArgumentParser( description='Purpose: Create Workout Of (the) Day (WOD)', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('-f', '--file', help='CSV input file of excercise', metavar='FILE', type=argparse.FileType('rt'), default='inputs/exercises.csv') parser.add_argument('-s', '--seed', help='Random seed', metavar='int', type=int, default=None) parser.add_argument('-n', '--num', help='Number of exercises', metavar='excercises', type=int, default=4) parser.add_argument('-e', '--easy', help='Halve the reps', action='store_true', default=False) args = parser.parse_args() if args.num < 0: parser.error(f'--num "{args.num}" must be greater than 0') return parser.parse_args()
2f320b4c4093a263c5eb69f0784344299389465c
32,145
from pandas import Timestamp def timestamp_now() -> Timestamp: """Returns a pandas timezone (UTC) aware Timestamp for the current time. Returns: pandas.Timestamp: Timestamp at current time """ return timestamp_tzaware(Timestamp.now())
545b0cb72691d3db127ccfc847295a4bc4902004
32,146
def readFlat4D(fn,interp=None): """ Load in data from 4D measurement of flat mirror. Scale to microns, remove misalignments, strip NaNs. Distortion is bump positive looking at surface from 4D. Imshow will present distortion in proper orientation as if viewing the surface. """ #Get xpix value in mm l = getline(fn,9) dx = float(l.split()[1])*1000. #Remove NaNs and rescale d = np.genfromtxt(fn,skip_header=12,delimiter=',') d = man.stripnans(d) d = d *.6328 d = d - np.nanmean(d) d = np.fliplr(d) #Interpolate over NaNs if interp is not None: d = man.nearestNaN(d,method=interp) return d,dx
8443ab4943bb571d1ead1f8f4342efec8e426139
32,147
def inner_product(D1, D2): """ Take the inner product of the frequency maps. """ result = 0. for key in D1: if key in D2: result += D1[key] * D2[key] return result
95efb9f63d6a379e1c5f7c8f6ad4bfd4061e2032
32,148
def list_launch_daemons(): """ Return an array of the files that are present in /Library/LaunchDaemons/ and /System/Library/LaunchDaemons/ """ files = list_files_in_dir("/Library/LaunchDaemons/") files += list_files_in_dir("/System/Library/LaunchDaemons/") return files
8e1f0ab1bb78a9121f5c00f032a5c8dc089f39b0
32,149
import six import sys def decode(input, errors='strict'): """ convert from wtf-8 encoded bytes to unicode text. If this is a python narrow build this will actually produce UTF-16 encoded unicode text (e.g. with surrogates). """ buf = [] try: it = six.iterbytes(input) c = None while True: c = next(it) if c < 0x80: pass elif c < 0xE0: c = (((c & 0x1F) << 6) + (next(it) & 0x3F)) elif c >= 0xE0 and c <= 0xEF: c = (((c & 0x0F) << 12) + ((next(it) & 0x3F) << 6) + (next(it) & 0x3F)) elif c >= 0xF0 and c <= 0xF4: c = (((c & 0x07) << 18) + ((next(it) & 0x3F) << 12) + ((next(it) & 0x3F) << 6) + (next(it) & 0x3F)) if c >= sys.maxunicode: # use a surrogate pair buf.append(((c - 0x10000) >> 10) + 0xD800) c = ((c - 0x10000) & 0x3FF) + 0xDC00 else: raise ValueError("Invalid wtf sequence") buf.append(c) c = None except StopIteration: if c is not None: raise ValueError("Malformed WTF-8 sequence") return six.text_type().join(map(six.unichr, buf)), len(buf)
ddb96ea0e5a12cd5e5b60f868dcd6522cecce86a
32,150
import struct def us_varchar_encode(text): """ encode with utf-16-le UShort *Varchar :param str text: :return: """ if not text: return '\x00\x00' length = len(text) return struct.pack('<H', length) + text.encode('utf-16-le')
07b232cd83e023d770fc4e7cd63250ad746aae19
32,151
from typing import List def graph_to_diagonal_h(n: int, nodes: List[int]) -> np.ndarray: """Construct diag(H).""" h = [0.0] * 2**n for node in nodes: diag = tensor_diag(n, node[0], node[1], node[2]) for idx, val in enumerate(diag): h[idx] += val return h
5c73d4b4a98465f3f03d9a423f867479b48da8fe
32,152
def condensational_heating(dQ2): """ Args: dQ2: rate of change in moisture in kg/kg/s, negative corresponds to condensation Returns: heating rate in degK/s """ return tf.math.scalar_mul(tf.constant(-LV / CPD, dtype=dQ2.dtype), dQ2)
55d5ec36bf1f4a217e239e35fb95e14060b07fb8
32,153
def collect_stats(cube, store, datasets=None): """ Collect statistics for given cube. Parameters ---------- cube: Cube Cube specification. store: simplekv.KeyValueStore KV store that preserves the cube. datasets: Union[None, Iterable[str], Dict[str, kartothek.core.dataset.DatasetMetadata]] Datasets to query, must all be part of the cube. May be either the result of :meth:`discover_datasets`, a list of Ktk_cube dataset ID or ``None`` (in which case auto-discovery will be used). Returns ------- stats: Dict[str, Dict[str, int]] Statistics per ktk_cube dataset ID. """ if callable(store): store = store() if not isinstance(datasets, dict): datasets = discover_datasets_unchecked( uuid_prefix=cube.uuid_prefix, store=store, filter_ktk_cube_dataset_ids=datasets, ) all_metapartitions = get_metapartitions_for_stats(datasets) return reduce_stats([collect_stats_block(all_metapartitions, store)])
526405128e95e13fb6f011300ddcda922ebe8582
32,154
def post_move_subject(subject_uuid: SubjectId, target_report_uuid: ReportId, database: Database): """Move the subject to another report.""" data_model = latest_datamodel(database) reports = latest_reports(database) source = SubjectData(data_model, reports, subject_uuid) target = ReportData(data_model, reports, target_report_uuid) target.report["subjects"][subject_uuid] = source.subject del source.report["subjects"][subject_uuid] delta_description = ( f"{{user}} moved the subject '{source.subject_name}' from report " f"'{source.report_name}' to report '{target.report_name}'." ) source_uuids = [source.report_uuid, subject_uuid] target_uuids = [target_report_uuid, subject_uuid] return insert_new_report(database, delta_description, (source.report, source_uuids), (target.report, target_uuids))
405be11279fe3fa2a65b75ac46518cdaabcb5e90
32,155
import sympy def makefunction(exprs, assignto, funcname='func', returncodestr=False, usenumba=True): """ Given sympy expressions list `expr` and a list of variable names `assignto`, it creates a function. It returns a function object if `returncodestr` = False. Otherwise, it returns a formatted function code as a string with the name of the function given by `funcname`. If `usenumba` is False it will not produce a Numba Jitted function. """ codestr = [ 'import math', 'from math import sqrt', # Bug in Sympy, need to handle sqrt separately ] if usenumba: codestr += [ 'import numba', '\n@numba.njit' ] else: codestr.append('') # Just to introduce a line break codestr += [ 'def {0}(x, y):'.format(funcname), '\n ############## Sub-expressions ##############' ] # Now the codegeneration part, first eliminate common sub-expressions replacements, reduced_exprs = sympy.cse(exprs, optimizations='basic') for lhs, rhs in replacements: codestr.append(' {} = {}'.format(lhs, sympy.pycode(rhs))) codestr.append('\n ############## Final Expressions ##############') for lhs, rhs in zip(assignto, reduced_exprs): codestr.append(' {} = {}'.format(lhs, sympy.pycode(rhs))) codestr.append('\n return {}'.format(', '.join(assignto))) funccode = '\n'.join(codestr) if returncodestr: return funccode else: exec(funccode, globals()) return globals()[funcname]
1ba503589fe18f6678f1ddc1a535978433837276
32,156
def open_instrument(instr_type): """open_visa_instrument implements the public api for each of the drivers for discovering and opening a connection :param instr_type: The abstract base class to implement A dictionary containing the technical specifications of the required equipment :return: A instantiated class connected to a valid dmm """ instruments = filter_connected( fixate.config.INSTRUMENTS, fixate.config.DRIVERS.get(instr_type, {}) ) try: instrument = list(instruments.values())[0] except IndexError: raise InstrumentNotConnected("No valid {} found".format(instr_type)) else: instrument_name = type(instrument).__name__ pub.sendMessage( "driver_open", instr_type=instrument_name, identity=instrument.get_identity(), ) return instrument
1741b94a527a0283efee7466ccc15be09abe1622
32,157
import jinja2 from datetime import datetime def thisyear(): """The current year.""" return jinja2.Markup(datetime.date.today().year)
3de970398e1fb55f98a968c0c83411d18e8cd423
32,158
from unicodedata import east_asian_width def display_width(str): """Return the required over-/underline length for str.""" try: # Respect &ambiwidth and &tabstop, but old vim may not support this return vim.strdisplaywidth(str) except AttributeError: # Fallback result = 0 for c in str: result += 2 if east_asian_width(c) in ('W', 'F') else 1 return result
ebeedd159de5c31ea435d44a88fe6fe16ccbcb54
32,159
def get_short_int(filename, ptr): """Jump to position 'ptr' in file and read a 16-bit integer.""" val = get_val(filename, ptr, np.int16) return int( val )
42377a73df1dfbff2593fa43e571e3d269db6449
32,160
def u32_from_dto(dto: U32DTOType) -> int: """Convert DTO to 32-bit int.""" check_overflow(0 <= dto <= U32_MAX) return dto
066ab2c2ed70d69ac8e37515ea815e1305574eea
32,161
def diagonal_basis_commutes(pauli_a, pauli_b): """ Test if `pauli_a` and `pauli_b` share a diagonal basis Example: Check if [A, B] with the constraint that A & B must share a one-qubit diagonalizing basis. If the inputs were [sZ(0), sZ(0) * sZ(1)] then this function would return True. If the inputs were [sX(5), sZ(4)] this function would return True. If the inputs were [sX(0), sY(0) * sZ(2)] this function would return False. :param pauli_a: Pauli term to check commutation against `pauli_b` :param pauli_b: Pauli term to check commutation against `pauli_a` :return: Boolean of commutation result :rtype: Bool """ overlapping_active_qubits = set(pauli_a.get_qubits()) & set(pauli_b.get_qubits()) for qubit_index in overlapping_active_qubits: if (pauli_a[qubit_index] != 'I' and pauli_b[qubit_index] != 'I' and pauli_a[qubit_index] != pauli_b[qubit_index]): return False return True
b95ac0cfe22233432df3a0e0f814c4e0e7af6d0f
32,162
def cost_using_SigmoidCrossEntropyWithLogits(logits, labels): """     Computes the cost using the sigmoid cross entropy          Arguments:     logits -- vector containing z, output of the last linear unit (before the final sigmoid activation)     labels -- vector of labels y (1 or 0) Note: What we've been calling "z" and "y" in this class are respectively called "logits" and "labels" in the TensorFlow documentation. So logits will feed into z, and labels into y.          Returns:     cost -- runs the session of the cost (formula (2)) """ # Create the placeholders for "logits" (z) and "labels" (y) lgt = tf.placeholder(tf.float32,name="lgt") lbl = tf.placeholder(tf.float32,name="lbl") # Use the loss function # sigmoid型交叉熵和逻辑 loss_func = tf.nn.sigmoid_cross_entropy_with_logits(logits=lgt,labels=lbl) # Create a session. See method 1 above. sess = tf.Session() # Run the session cost = sess.run(loss_func,feed_dict={lgt:logits,lbl:labels}) # Close the session. See method 1 above. sess.close() return cost
7990ee4cb4b4ebfc7b5f1f580be2315ee6667fa5
32,163
def clone_to_df(clone): """Convert a clone to a pandas.DataFrame.""" number_of_mutations = clone.deltas.shape[0] clone_stats = pd.DataFrame( np.stack([clone.frequencies for _ in range(number_of_mutations)]), columns=clone.frequencies.index, index=clone.deltas.index ) clone_stats['alt_cn'] = clone.deltas clone_stats['clone_id'] = clone.clone_id return clone_stats
e383241b024d5deef7022be3d04b36f4ffcee587
32,164
import os def get_points_on_rim(mesh_dir, scale=1.2, obj_name='cup'): """This will return me points which lie on the rim of the cup or bowl """ assert os.path.exists(mesh_dir) meshes = [osp.join(mesh_dir, m) for m in os.listdir(mesh_dir) if "visual" not in m] meshes = [m for m in meshes if "convex" in m] if len(meshes) == 0: return None loaded_meshes = [trimesh.load(m) for m in meshes] print(f'applying scale {scale}') # scale the mesh scaled_meshes = [l.apply_scale(scale) for l in loaded_meshes] # combine the meshes combined_scaled_mesh = np.sum(scaled_meshes) # now get the corners of the bounding box bbox_corners = get_bbox_from_mesh(mesh_dir,scale=scale) lx, ly, lz = bbox_corners[0] rx, ry, rz = bbox_corners[-2] # the arrangement of the bounding box is as follows # (lx, ly, lz), (rx, ly, lz), (rx, ry, lz), (lx, ry, lz) # (lx, ly, rz), (rx, ly, rz), (rx, ry, rz), (lx, ry, rz) # the up plane is formed by the following vertices. # (2, 3, 6, 7) # now I need to sample points in this 2D bounding box xs = np.random.uniform(low=lx, high=rx, size=1000) ys = np.random.uniform(low=ly, high=ry, size=1000) zs = np.random.uniform(low=lz, high=rz, size=1000) up_plane = np.c_[xs, ys, np.ones(len(xs))*rz] down_plane = np.c_[xs, ys, np.ones(len(xs))*lz] left_plane = np.c_[np.ones(len(ys))*lx, ys, zs] right_plane = np.c_[np.ones(len(ys))*rx, ys, zs] front_plane = np.c_[xs, np.ones(len(xs))*ly, zs] back_plane = np.c_[xs, np.ones(len(xs))*ry, zs] # plot the mesh and the points, if this is right # then I need to find the intersecting points up_cloud = trimesh.points.PointCloud(up_plane) down_cloud = trimesh.points.PointCloud(down_plane) left_cloud = trimesh.points.PointCloud(left_plane) right_cloud = trimesh.points.PointCloud(right_plane) front_cloud = trimesh.points.PointCloud(front_plane) back_cloud = trimesh.points.PointCloud(back_plane) scene = trimesh.Scene([combined_scaled_mesh, up_cloud, down_cloud, left_cloud, right_cloud, front_cloud, back_cloud]) # scene.show() # now compute the distance of all the points on the up-plane # to the mesh surface closest_points, distances, triangle_id = combined_scaled_mesh.nearest.on_surface( up_plane) pts_idx = distances < 3e-4 filtered_points = closest_points[pts_idx] # draw the spheres, of small radius spheres_list = list() for p in filtered_points: tmesh = trimesh.creation.icosphere(radius=0.003, color=np.asarray([1, 0, 0]).astype(np.uint8)) # apply the translation trans_mat = np.eye(4) trans_mat[:3, 3] = p tmesh = tmesh.apply_transform(trans_mat) spheres_list.append(tmesh) # draw it on my friend scene = trimesh.Scene([combined_scaled_mesh]+[spheres_list]) # scene.show() return filtered_points
b654aa85057c61fd20753f0660565e9c35d8670b
32,165
def reorder_kernel_weight(torch_weight): """ Reorder a torch kernel weight into a tf format """ len_shape = len(torch_weight.shape) transpose_target = list(range(len_shape)) transpose_target = transpose_target[2:] + transpose_target[:2][::-1] return torch_weight.transpose(transpose_target)
2e289d768d31d3ed875fbb3613ec0e3061b65cd9
32,166
def make_windows(x, window_size, horizon): """ Creates a window out of """ # Create a window of specific window size window_step = np.expand_dims(np.arange(window_size+horizon), axis=0) # Create a 2D array of multiple window steps window_indices = window_step + np.expand_dims(np.arange(len(x)-(window_size+horizon-1)), axis=0).T # Index on the target array (a time series) with 2D array of multiple window steps windowed_array = x[window_indices] windows, labels = create_window_labels(windowed_array, horizon) return windows, labels
4e226e2ee2c3951cd2dfe6cb4a92b9d66e9376bf
32,167
def interpolate(arr_old, arr_new, I_old, J_old): # deprecated 2013-08-26 """ input: array, i, j output: value (int(x), int(y)+1) + + (int(x)+1, int(y)+1) (x,y) + + (int(x)+1, int(y)) (int(x), int(y)) be careful - floor(x)=ceil(x)=x for integer x, so we really want floor(x) and floor(x)+1 """ I = I_old.copy() J = J_old.copy() arr_new2 = arr_new * 0 arr_new2 += (-999) height_new, width_new = arr_new.shape height_old, width_old = arr_old.shape # set all out-of-bounds to (0,0) for convenience I = (I>=0) * (I<height_old-1) * I #e.g. i>=0 and i<=4 for i=[0,1,2,3,4], width=5 J = (J>=0) * (J<width_old -1) * J # the loopings are necessary since we don't know beforehand where the (i_old, j_old) # would land for i in range(height_new): for j in range(width_new): i0 = int(I[i,j]) j0 = int(J[i,j]) i1 = i0 + 1 j1 = j0 + 1 i_frac = i % 1 j_frac = j % 1 f00 = arr_old[i0,j0] f01 = arr_old[i0,j1] f10 = arr_old[i1,j0] f11 = arr_old[i1,j1] arr_new2[i, j] = (1-i_frac)*(1-j_frac) * f00 + \ (1-i_frac)*( j_frac) * f01 + \ ( i_frac)*(1-j_frac) * f00 + \ ( i_frac)*( j_frac) * f00 return arr_new2
bcb34c33ca462c43390ff0dd8802d05dc0512dd3
32,168
from typing import Optional from typing import Union import os def create_tif_file(left: float, bottom: float, right: float, top: float, to_file: Optional[str] = None, cache_dir: str = CACHE_DIR, nodata: int = 0) -> Union[str, MemoryFile]: """Create a TIF file using SRTM data for the box defined by left, bottom, right, top. If to_file is provided, saves the resulting file to to_file and returns the path; otherwise, creates a rasterio.io.MemoryFile and returns that. """ if not os.path.exists(cache_dir): os.mkdir(cache_dir) xy = get_all_xy_components(left, bottom, right, top) zip_fnames = {} for x, y in xy: zip_fnames[(x, y)] = ZIP_FNAME.format(x=x, y=y) zip_fpaths = fetch_all_zips(zip_fnames, cache_dir) unzip_all(zip_fpaths.values(), cache_dir) srcs = [rasterio.open(get_tif_fpath(x, y, cache_dir), 'r') for x, y in xy] print(f'Creating TIF file from following files: {[s.name for s in srcs]}.') #print(f'Heights are: {[s.height for s in srcs]}.') #print(f'Widths are: {[s.width for s in srcs]}.') profile = srcs[0].profile data, transform = rasterio.merge.merge(srcs, (left, bottom, right, top), nodata=nodata) for src in srcs: src.close() clear_cache(cache_dir, True) bands, height, width = data.shape # No idea if this is the correct order for height and width, but they are both # the same so it doesn't matter in this case profile.update({ 'height': height, 'width': width, 'transform': transform }) print(f'Created TIF file with dimensions {width}x{height}.') if to_file: print(f'Writing TIF file to {to_file}.') with rasterio.open(to_file, 'w', **profile) as dst: dst.write(data) return to_file else: memfile = MemoryFile() with memfile.open(**profile) as dst: dst.write(data) return memfile
025eb5c5c70e0c2e9d4583bb291ff6caf25bedc4
32,169
def point_seg_sep(ar, br1, br2): """Return the minimum separation vector between a point and a line segment, in 3 dimensions. Parameters ---------- ar: array-like, shape (3,) Coordinates of a point. br1, br2: array-like, shape (3,) Coordinates for the points of a line segment Returns ------- sep: float array, shape (3,) Separation vector between point and line segment. """ v = br2 - br1 w = ar - br1 c1 = np.dot(w, v) if c1 <= 0.0: return ar - br1 c2 = np.sum(np.square(v)) if c2 <= c1: return ar - br2 b = c1 / c2 bc = br1 + b * v return ar - bc
a036f4ea9e9c308002e18e75111aed4408d75cf4
32,170
def get_creds(): """ Function which will take no arguments, but look for API credentials which are stored on the internet. Unfortunately, this is the most recognizable and weakest part of the code, since it fetches the creds from the same places over and over again. TODO: Rework this so we're not hard-coding locations for API keys :return: API keys for whichever method should be tried next. """ method, keys = apis.get_keys() if method is None: panic() return method, keys
0d7c002aa9b04df1baaa6258d84ca6ca8d3c40da
32,171
from typing import Callable def shd(node_1: BinaryTreeNode, node_2: BinaryTreeNode, hd: Callable[[BinaryTreeNode, BinaryTreeNode], float]) -> float: """Structural Hamming distance (SHD) :param node_1: :param node_2: :param hd: :return: """ if node_1 is None or node_2 is None: return 1 # first get arity of each node arity_1 = 0 arity_2 = 0 if node_1.has_left_child(): arity_1 += 1 if node_1.has_right_child(): arity_1 += 1 if node_2.has_left_child(): arity_2 += 1 if node_2.has_right_child(): arity_2 += 1 if arity_1 != arity_2: return 1 else: if arity_1 == 0: # both are leaves return hd(node_1, node_2) else: m = arity_1 ham_dist = hd(node_1, node_2) children_dist_sum = sum([shd(node_1.left, node_2.left, hd), shd(node_1.right, node_2.right, hd)]) return (1 / (m + 1)) * (ham_dist + children_dist_sum)
c6aef0189d41887fc4e63991d0176a27b0e1dd8a
32,172
import numpy def movmeanstd(ts, m=0): """ Calculate the mean and standard deviation within a moving window passing across a time series. Parameters ---------- ts: Time series to evaluate. m: Width of the moving window. """ if m <= 1: raise ValueError("Query length must be longer than one") mInt = int(m) zero = 0 ts = ts.astype(numpy.longdouble) # Add zero to the beginning of the cumulative sum of ts s = numpy.insert(numpy.cumsum(ts), zero, zero) # Add zero to the beginning of the cumulative sum of ts ** 2 sSq = numpy.insert(numpy.cumsum(ts ** 2), zero, zero) segSum = s[mInt:] - s[:-mInt] segSumSq = sSq[mInt:] - sSq[:-mInt] mov_mean = segSum / m mov_stdP = (segSumSq / m) - ((segSum / m) ** 2) if not numpy.all(mov_stdP == 0): mov_std = numpy.sqrt(numpy.abs(mov_stdP)) else: mov_std = mov_stdP return [mov_mean, mov_std]
8a9e56db4f26862bff972a3dbfac87f6ea5b8c35
32,173
def importing_regiondata(): """ Loads the regiondata Should convert the year column to proper year Should immediately create geopandas dataframe Returns: a dataframe """ regiondata = pd.read_stata("data/regiondata.dta") return regiondata
132e4076e941f4451b6bb52c5d81c5895dde0154
32,174
from rdkit import Chem def load_docked_ligands( pdbqt_output: str) -> Tuple[List[RDKitMol], List[float]]: """This function loads ligands docked by autodock vina. Autodock vina writes outputs to disk in a PDBQT file format. This PDBQT file can contain multiple docked "poses". Recall that a pose is an energetically favorable 3D conformation of a molecule. This utility function reads and loads the structures for multiple poses from vina's output file. Parameters ---------- pdbqt_output: str Should be the filename of a file generated by autodock vina's docking software. Returns ------- Tuple[List[rdkit.Chem.rdchem.Mol], List[float]] Tuple of `molecules, scores`. `molecules` is a list of rdkit molecules with 3D information. `scores` is the associated vina score. Notes ----- This function requires RDKit to be installed. """ try: except ModuleNotFoundError: raise ImportError("This function requires RDKit to be installed.") lines = open(pdbqt_output).readlines() molecule_pdbqts = [] scores = [] current_pdbqt: Optional[List[str]] = None for line in lines: if line[:5] == "MODEL": current_pdbqt = [] elif line[:19] == "REMARK VINA RESULT:": words = line.split() # the line has format # REMARK VINA RESULT: score ... # There is only 1 such line per model so we can append it scores.append(float(words[3])) elif line[:6] == "ENDMDL": molecule_pdbqts.append(current_pdbqt) current_pdbqt = None else: # FIXME: Item "None" of "Optional[List[str]]" has no attribute "append" current_pdbqt.append(line) # type: ignore molecules = [] for pdbqt_data in molecule_pdbqts: pdb_block = pdbqt_to_pdb(pdbqt_data=pdbqt_data) mol = Chem.MolFromPDBBlock(str(pdb_block), sanitize=False, removeHs=False) molecules.append(mol) return molecules, scores
bf7f6def099ccd3f3b54e431db6b944f85e49a2e
32,175
def rotationFromQuaternion(*args): """rotationFromQuaternion(float pA, float pB, float pC, float pD) -> Rotation""" return _almath.rotationFromQuaternion(*args)
e418bf864246ef209291d970e9cf33f0edc3fe8f
32,176
import re def get_username(identifier): """Checks if a string is a email adress or not.""" pattern = re.compile('.+@\w+\..+') if pattern.match(identifier): try: user = User.objects.get(email=identifier) except: raise Http404 else: return user.username else: return identifier
de5eb0db99b9580cd210f733cd2e829c84593573
32,177
def halo_particles(mock_dm_halo): """Spherical mock halo.""" def make(N_part=100, seed=None): random = np.random.RandomState(seed=seed) mass_dm, pos_dm = mock_dm_halo(N_part=N_part) vel_dm = random.random_sample(size=(N_part, 3)) return mass_dm, pos_dm, vel_dm return make
36c980c0d81c4a1edf09feec9aafcf1605968bb3
32,178
from typing import Dict from typing import Any import torch def TensorRTCompileSpec(compile_spec: Dict[str, Any]) -> torch.classes.tensorrt.CompileSpec: """ Utility to create a formated spec dictionary for using the PyTorch TensorRT backend Args: compile_spec (dict): Compilation settings including operating precision, target device, etc. One key is required which is ``input_shapes``, describing the input sizes or ranges for inputs to the graph as well as expect types and formats for those inputs. All other keys are optional. Entries for each method to be compiled. Note: Partial compilation of TorchScript modules is not supported through the PyTorch TensorRT backend If you need this feature, use trtorch.compile to compile your module. Usage of the resulting module is as if you were using the TensorRT integration. .. code-block:: py CompileSpec = { "forward" : trtorch.TensorRTCompileSpec({ "inputs": [ trtorch.Input((1, 3, 224, 224)), # Static input shape for input #1 trtorch.Input( min_shape=1, 3, 224, 224), opt_shape=(1, 3, 512, 512), max_shape=(1, 3, 1024, 1024), dtype=torch.int32 format=torch.channel_last ) # Dynamic input shape for input #2 ], "device": { "device_type": torch.device("cuda"), # Type of device to run engine on (for DLA use trtorch.DeviceType.DLA) "gpu_id": 0, # Target gpu id to run engine (Use Xavier as gpu id for DLA) "dla_core": 0, # (DLA only) Target dla core id to run engine "allow_gpu_fallback": false, # (DLA only) Allow layers unsupported on DLA to run on GPU }, "enabled_precisions": {torch.half}, # Operating precision set to FP16 "sparse_weights": Enable sparsity for convolution and fully connected layers. "disable_tf32": False, # Force FP32 layers to use traditional as FP32 format vs the default behavior of rounding the inputs to 10-bit mantissas before multiplying, but accumulates the sum using 23-bit mantissas "refit": False, # enable refit "debug": False, # enable debuggable engine "strict_types": False, # kernels should strictly run in operating precision "capability": trtorch.EngineCapability.DEFAULT, # Restrict kernel selection to safe gpu kernels or safe dla kernels "num_min_timing_iters": 2, # Number of minimization timing iterations used to select kernels "num_avg_timing_iters": 1, # Number of averaging timing iterations used to select kernels "workspace_size": 0, # Maximum size of workspace given to TensorRT "max_batch_size": 0, # Maximum batch size (must be >= 1 to be set, 0 means not set) "truncate_long_and_double": False, # Truncate long and double into int and float }) } Input Sizes can be specified as torch sizes, tuples or lists. Op precisions can be specified using torch datatypes or trtorch datatypes and you can use either torch devices or the trtorch device type enum to select device type. Returns: torch.classes.tensorrt.CompileSpec: List of methods and formated spec objects to be provided to ``torch._C._jit_to_tensorrt`` """ parsed_spec = _parse_compile_spec(compile_spec) backend_spec = torch.classes.tensorrt.CompileSpec() for i in parsed_spec.inputs: clone = torch.classes.tensorrt._Input() clone._set_min(i.min) clone._set_opt(i.opt) clone._set_max(i.max) clone._set_dtype(i.dtype) clone._set_format(i.format) clone._set_input_is_dynamic(i.input_is_dynamic) backend_spec._append_input(clone) d = torch.classes.tensorrt._Device() d._set_device_type(int(parsed_spec.device.device_type)) d._set_gpu_id(parsed_spec.device.gpu_id) d._set_dla_core(parsed_spec.device.dla_core) d._set_allow_gpu_fallback(parsed_spec.device.allow_gpu_fallback) if parsed_spec.torch_fallback.enabled: raise RuntimeError( "Partial module compilation is not currently supported via the PyTorch TensorRT backend. If you need partial compilation, use trtorch.compile" ) torch_fallback = torch.classes.tensorrt._TorchFallback() torch_fallback._set_enabled(parsed_spec.torch_fallback.enabled) torch_fallback._set_min_block_size(parsed_spec.torch_fallback.min_block_size) torch_fallback._set_forced_fallback_operators(parsed_spec.torch_fallback.forced_fallback_operators) backend_spec._set_device(d) backend_spec._set_torch_fallback(torch_fallback) backend_spec._set_precisions([int(i) for i in parsed_spec.enabled_precisions]) backend_spec._set_disable_tf32(parsed_spec.disable_tf32) backend_spec._set_refit(parsed_spec.refit) backend_spec._set_debug(parsed_spec.debug) backend_spec._set_refit(parsed_spec.refit) backend_spec._set_strict_types(parsed_spec.strict_types) backend_spec._set_capability(int(parsed_spec.capability)) backend_spec._set_num_min_timing_iters(parsed_spec.num_min_timing_iters) backend_spec._set_num_avg_timing_iters(parsed_spec.num_avg_timing_iters) backend_spec._set_workspace_size(parsed_spec.workspace_size) backend_spec._set_max_batch_size(parsed_spec.max_batch_size) backend_spec._set_truncate_long_and_double(parsed_spec.truncate_long_and_double) backend_spec._set_ptq_calibrator(parsed_spec._get_calibrator_handle()) return backend_spec
0274d315d97eb3b138b53db73374642207abefc5
32,179
def get_sec (hdr,key='BIASSEC') : """ Returns the numpy range for a FITS section based on a FITS header entry using the standard format {key} = '[{col1}:{col2},{row1}:row2}]' where 1 <= col <= NAXIS1, 1 <= row <= NAXIS2. """ if key in hdr : s = hdr.get(key) # WITHOUT CARD COMMENT ny = hdr['NAXIS2'] sx = s[s.index('[')+1:s.index(',')].split(':') sy = s[s.index(',')+1:s.index(']')].split(':') return [ny-int(sy[1]),ny-int(sy[0])+1,int(sx[0])-1,int(sx[1])] else : return None
3927e6f5d62818079fa9475976f04dda1824e976
32,180
import numpy def _fetch_object_array(cursor, type_tree=None): """ _fetch_object_array() fetches arrays with a basetype that is not considered scalar. """ arrayShape = cursor_get_array_dim(cursor) # handle a rank-0 array by converting it to # a 1-dimensional array of size 1. if len(arrayShape) == 0: arrayShape.append(1) # now create the (empty) array of the correct type and shape array = numpy.empty(dtype=object, shape=arrayShape) # goto the first element cursor_goto_first_array_element(cursor) # loop over all elements excluding the last one flat = array.flat arraySizeMinOne = array.size - 1 for i in range(arraySizeMinOne): flat[i] = _fetch_subtree(cursor, type_tree) cursor_goto_next_array_element(cursor) # final element then back tp parent scope flat[arraySizeMinOne] = _fetch_subtree(cursor, type_tree) cursor_goto_parent(cursor) return array
b4e262ec7fc4dba943ab2f8420add12f59aed4eb
32,181
import pickle def load_training_batch(batch_id, batch_size): """Load the Preprocessed Training data and return them in batches of <batch_size> or less""" filename = 'data/cifar_pickle/' + 'batch_' + str(batch_id) + '.pkl' features, labels = pickle.load(open(filename, mode='rb')) return batch_features_labels(features, labels, batch_size)
4aa762a80dde638d71076a888613606a1ee11a48
32,182
import subprocess def check_gzip(f): """Checks if a local gzipped file is ok Runs gunzip -t on the file using subprocess. Returns True on returncode 0. """ status = subprocess.run(["gzip", "-t", f], stdout=subprocess.PIPE, stderr=subprocess.PIPE) if status.returncode == 0: return True return False
c703e1dc39631c581cf9d6b4658160237b6ca27f
32,183
def reason_key_for_alert(alert): """Computes the reason key for an alert. The reason key for an alert is used to group related alerts together. Alerts for the same step name and reason are grouped together, and alerts for the same step name and builder are grouped together. """ # FIXME: May need something smarter for reason_key. reason_key = alert['step_name'] if alert['reason']: reason_key += ':%s' % alert['reason'] else: # If we don't understand the alert, just make it builder-unique. reason_key += ':%s' % alert['builder_name'] return reason_key
199d19360d45a7eeb1cbd09fa320d93c215a4be7
32,184
import logging import json def get_record(params,record_uid): """Return the referenced record cache""" record_uid = record_uid.strip() if not record_uid: logging.warning('No record UID provided') return if not params.record_cache: logging.warning('No record cache. Sync down first.') return if not record_uid in params.record_cache: logging.warning('Record UID %s not found in cache.' % record_uid) return cached_rec = params.record_cache[record_uid] rec = Record() try: data = json.loads(cached_rec['data_unencrypted']) rec = Record(record_uid) extra = None if 'extra_unencrypted' in cached_rec: extra = json.loads(cached_rec['extra_unencrypted']) rec.load(data, revision=cached_rec['revision'], extra=extra) if not resolve_record_view_path(params, record_uid): rec.mask_password() if cached_rec.get('version') == 3: rec.record_type = RecordV3.get_record_type_name(data) rec.login = RecordV3.get_record_field_value(cached_rec.get('data_unencrypted'), 'login') rec.login_url = RecordV3.get_record_field_value(cached_rec.get('data_unencrypted'), 'url') # if 'version' in cached_rec and cached_rec['version'] in (3, 4): # if 'data_unencrypted' in cached_rec: # version = cached_rec.get('version') or 0 # data_unencrypted = json.loads(cached_rec['data_unencrypted']) # if version == 3: # rec_type = data_unencrypted.get('type') or '' # if (rec_type and rec_type.strip()): # rec.login = 'type: ' + rec_type.strip() # elif version == 4: # fname = data_unencrypted.get('name') or '' # if (fname and fname.strip()): # rec.login = 'file: ' + fname.strip() except: logging.error('**** Error decrypting record %s', record_uid) return rec
7fce71c2f90387272a9c9b0a61ad4cccabf830f5
32,185
def get_probabilities(path, seq_len, model, outfile, mode): """ Get network-assigned probabilities Parameters: filename (str): Input file to be loaded seq_len (int): Length of input DNA sequence Returns: probas (ndarray): An array of probabilities for the test set true labels (ndarray): True test-set labels """ # Inputing a range of default values here, can be changed later. data_generator = merge_generators(path=path, batchsize=1000, seqlen=seq_len, mode='nr') # Load the keras model # model = load_model(model_file) test_on_batch(data_generator, model, outfile, mode) probas = np.loadtxt(outfile) true_labels = np.loadtxt(path['labels']) return true_labels, probas
a75bc11704538d082ecf91a61765f4412ec2c75d
32,186
import aiohttp import virtualenv_support import async_timeout import chardet import multidict import yarl import idna import pip import setuptools import virtualenv import os import shutil def install_dependencies(python) -> str: """ Copy aiohttp and virtualenv install locations (and their transitive dependencies in new virtualenv so that the update server can install without access to full system site-packages or connection to the internet. Full access to system site-packages causes the install inside the virtualenv to fail quietly because it does not have permission to overwrite a package by the same name and then it picks up the system version of otupdate. Also, we have to do a copy rather than a symlink because a non- admin Windows account does not have permissions to create symlinks. """ # Import all of the packages that need to be available in the virtualenv # for the update server to boot, so we can locate them using their __file__ # attribute # Determine where the site-packages directory exists in the virtualenv tmpdirname = python.split(VENV_NAME)[0] paths_raw = sp.check_output( '{} -c "import sys; [print(p) for p in sys.path]"'.format(python), shell=True) paths = paths_raw.decode().split() venv_site_pkgs = list( filter( lambda x: tmpdirname in x and 'site-packages' in x, paths))[-1] dependencies = [ ('aiohttp', aiohttp), ('virtualenv_support', virtualenv_support), ('async_timeout', async_timeout), ('chardet', chardet), ('multidict', multidict), ('yarl', yarl), ('idna', idna), ('pip', pip), ('setuptools', setuptools), ('virtualenv.py', virtualenv)] # Copy each dependency from is system-install location to the site-packages # directory of the virtualenv for dep_name, dep in dependencies: src_dir = os.path.abspath(os.path.dirname(dep.__file__)) dst = os.path.join(venv_site_pkgs, dep_name) if os.path.exists(dst): log.debug('{} already exists--skipping'.format(dst)) else: log.debug('Copying {} to {}'.format(dep_name, dst)) if dep_name.endswith('.py'): shutil.copy2(os.path.join(src_dir, dep_name), dst) else: shutil.copytree(src_dir, dst) return venv_site_pkgs
f2378db840c1c59b41adb292cc0640380593003f
32,187
from azure.mgmt.sql import SqlManagementClient from azure.cli.core.commands.client_factory import get_mgmt_service_client def get_sql_management_client(_): """ Gets the SQL management client """ return get_mgmt_service_client(SqlManagementClient)
6f67408fdecbe9b1a70ffbc34a4871c796e0f9f6
32,188
def string_to_gast(node): """ handles primitive string base case example: "hello" exampleIn: Str(s='hello') exampleOut: {'type': 'str', 'value': 'hello'} """ return {"type": "str", "value": node.s}
a3dcd89e893c6edd4a9ba6095cd107bb48cc9782
32,189
def ed25519_generate_key_pair_from_secret(secret): """ Generate a new key pair. Args: secret (:class:`string`): A secret that serves as a seed Returns: A tuple of (private_key, public_key) encoded in base58. """ # if you want to do this correctly, use a key derivation function! if not isinstance(secret, bytes): secret = secret.encode() hash_bytes = sha3.keccak_256(secret).digest() sk = Ed25519SigningKeyFromHash.generate(hash_bytes=hash_bytes) # Private key private_value_base58 = sk.encode(encoding='base58') # Public key public_value_compressed_base58 = sk.get_verifying_key().encode(encoding='base58') return private_value_base58, public_value_compressed_base58
25b8c18289c4cf8f09a7ba937fc8f9645406e9f2
32,190
from typing import List import math def align_tiles_naive(request: AlignNaiveRequest, tiles: List[TileModelDB]) -> List[AlignedTiledModel]: """ performs a naive aligning of the tiles simply based on the given rows and method. does not perform any advanced stitching or pixel checking. Does not use the row and column index, instead just iterates over the tiles in the order they are received. Meant to be called in a separate thread due it being cpu bound. """ if len(tiles) == 0: return [] # assumes they are all the same size width_px = tiles[0].width_px height_px = tiles[0].height_px columns = math.ceil(len(tiles) / request.rows) row = 0 col = 0 aligned_tiles: List[AlignedTiledModel] = [] for index, tile in enumerate(tiles): if request.method == "byRow": col = index % columns else: row = index % request.rows tile = tile.dict() tile["offset_x"] = col * width_px tile["offset_y"] = row * height_px aligned_tiles.append(AlignedTiledModel.parse_obj(tile)) if request.method == "byRow": if col == columns - 1: row = row + 1 else: if row == request.rows - 1: col = col + 1 return aligned_tiles
b279273d800a6884ad95f43f0a6a6f3be1ac3243
32,191
def estimate_operating_empty_mass(mtom, fuse_length, fuse_width, wing_area, wing_span, TURBOPROP): """ The function estimates the operating empty mass (OEM) Source: Raymer, D.P. "Aircraft design: a conceptual approach" AIAA educational Series, Fourth edition (2006). Args: mtom (float): Maximum take off mass [kg] fuse_length (float): Fuselage length [m] fuse_width (float): Fuselage width [m] wing_area (float): Wing area [m^2] wing_span (float): Wing span [m] TURBOPROP (bool): True if the the engines are turboprop False otherwise. Returns: oem (float): Operating empty mass [kg] """ G = 9.81 # [m/s^2] Acceleration of gravity. KC = 1.04 # [-] Wing with variable sweep (1.0 otherwhise). if TURBOPROP: C = -0.05 # [-] General aviation twin turboprop if fuse_length < 15.00: A = 0.96 elif fuse_length < 30.00: A = 1.07 else: A = 1.0 else: C = -0.08 # [-] General aviation twin engines if fuse_length < 30.00: A = 1.45 elif fuse_length < 35.00: A = 1.63 elif fuse_length < 60.00: if wing_span > 61: A = 1.63 else: A = 1.57 else: A = 1.63 oem = round((A * KC * (mtom*G)**(C)) * mtom,3) return oem
5b9bed8cef76f3c10fed911087727f0164cffab2
32,192
def var_gaussian(r, level=5, modified=False): """ Returns the Parametric Gaussian VaR of a Series or DataFrame """ # compute the Z score assuming it was Gaussian z = norm.ppf(level/100) if modified: # modify the Z score based on observed skewness and kurtosis s = skewness(r) k = kurtosis(r) z = (z + (z**2 - 1) * s/6 + (z**3 - 3*z) * (k-3)/24 - (2*z**3 - 5*z) * (s**2)/36 ) return -(r.mean() + z*r.std(ddof=0))
18d3b1ee2228fafaaf977b216245c8217e77396b
32,193
def grid_search_serial(data, greens, misfit, grid): """ Grid search over moment tensors. For each moment tensor in grid, generates synthetics and evaluates data misfit """ results = np.zeros(grid.size) count = 0 for mt in grid: print grid.index for key in data: results[count] += misfit[key](data[key], greens[key], mt) count += 1 return results
fa0a2c19cfbfa685d59f3effea7b3f7478999f88
32,194
def getSqTransMoment(system): """//Input SYSTEM is a string with both the molecular species AND the band "system" // Electronic transition moment, Re, needed for "Line strength", S = |R_e|^2*q_v'v" or just |R_e|^2 // //Allen's Astrophysical quantities, 4.12.2 - 4.13.1 // // ROtational & vibrational constants for TiO states:, p. 87, Table 4.17""" #// Square electronic transition moment, |Re|^2, #// needed for "Line strength", S = |R_e|^2*q_v'v" or just |R_e|^2 #// // //Allen's Astrophysical quantities, 4.12.2 - 4.13.1 #// As of Feb 2017 - try the band-head value R_00^2 from last column of table: RSqu = 0.0 #//default initialization #TiO alpha system if ("TiO_C3Delta_X3Delta" == system): RSqu = 0.84 #TiO beta system if ("TiO_c1Phi_a1Delta" == system): RSqu = 4.63 #TiO gamma system if ("TiO_A3Phi_X3Delta" == system): RSqu = 5.24 #CH A^2Delta_X^2Pi system - "G band" at 4300 A if ("CH_A2Delta_X2Pi" == system): RSqu = 0.081 #mean of two values given #// return RSqu
19c5311f7d8fde4bb834d809fd2f6ed7dd2c036e
32,195
def volumes(assets, start, end, frequency='daily', symbol_reference_date=None, start_offset=0, use_amount=False): """ 获取资产期间成交量(或成交额) Parameters ---------- assets (int/str/Asset or iterable of same) Identifiers for assets to load. Integers are interpreted as sids. Strings are interpreted as symbols. start (str or pd.Timestamp) Start date of data to load. end (str or pd.Timestamp) End date of data to load. frequency ({'minute', 'daily'}, optional) Frequency at which to load data. Default is ‘daily’. symbol_reference_date (pd.Timestamp, optional) Date as of which to resolve strings as tickers. Default is the current day. start_offset (int, optional) Number of periods before start to fetch. Default is 0. This is most often useful for calculating returns. use_amount:bool 是否使用成交额字段。默认为否。 如使用成交额,则读取期间成交额数据。 Returns: volumes (pd.Series or pd.DataFrame) Pandas object containing volumes for the requested asset(s) and dates. Data is returned as a pd.Series if a single asset is passed. Data is returned as a pd.DataFrame if multiple assets are passed. """ field = 'amount' if use_amount else 'volume' return prices(assets, start, end, frequency, field, symbol_reference_date, start_offset)
e2e0a7d6bd8b659e070299d00699d8cae6ed3c9f
32,196
from datetime import datetime import os import requests import re import zlib def fetch(path, url=None): """Fetches a file from either url or accession id in filename, updates file if local version is older. Checks a given path for file and downloads if a url is given or file name is an accession id. Also downloads the file if the remote location has a more recent version. Can be used in other functions to download required files from url or accession gb files. Args: path (str): Path to the file to be fetched url (str, optional): Url to update the local file from. Defaults to None. Returns: bool: True if the file is updated or downloaded. False otherwise. Examples: >>>import os >>>os.remove('NC_000853.txt') >>>fetch('NC_000853.txt') True >>>import os >>>os.remove('DRDatabase.txt') >>>fetch('DRDatabase.txt', 'http://crispr.i2bc.paris-saclay.fr/crispr/BLAST/DR/DRdatabase') True Result: NC_000853.txt file would be created in the running folder, if it existed before, it would be updated to a newer version if available in Entrez database. DRDatabase would also be created in the running folder, if the url has a more recent version the local file would be updated. """ def sync(): """ Checks and downloads a url if last modified date does not exist for the url or is more recent than local file. """ if not path_exists: return download(path, url) # Check last modified dates of file and url, download if url is newer filemodtime = datetime.datetime.fromtimestamp(os.path.getmtime(path)) r = requests.get(url, stream=True) if 'Last-Modified' not in r.headers: print('Last modified header not found in url, downloading...') # no last-modified header in url, downloading file return download(path, url) urlmodstr = r.headers['Last-Modified'] urlmodtime = datetime.datetime.strptime( urlmodstr, '%a, %d %b %Y %H:%M:%S %Z') if filemodtime < urlmodtime: '''Url file is more recent, downloading url.''' print('Url is more recent than file, downloading') return download(path, url) print('File is up-to-date') return False def gbsync(): """ Checks and downloads an accession file from nuccore database if file does not exist or its dates are different from url version """ print('Trying to fetch from Entrez') regex = r'([A-Z]{1,2}_\w+)' filename = os.path.basename(path) matches = re.search(regex, filename) if not matches: print('Filename does not match an accession') return False else: acc = matches.groups()[0] url = ('https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?' 'rettype=gbwithparts&tool=biopython&db=nuccore' '&id={}&email=example%40example.com'.format(acc)) r = requests.get(url, stream=True) if r.status_code != requests.codes.ok: print('Bad Status code returned from Entrez') r.close() return False if not path_exists: print('Path given does not exist, downloading from url') r.close() return download(path, url) else: # Path exists, try to get date to compare with Entrez # version, download if different regex = r'(\d{2}-\w{3}-\d{4})' with open(path, 'r') as f: print('Checking accession file for date') fline = f.readline() matches = re.search(regex, fline) if not matches: print('No date found in accession file {}, ' 'overwriting with Entrez entry'.format(filename)) return download(path, url) gbdate = matches.groups()[0] # Date found in file, download the first chunk of url # and decompress with gzip to get url date. chunk_size = 256 fchunk = r.raw.read(chunk_size) r.close() gzip_decomp = zlib.decompressobj(16 + zlib.MAX_WBITS) decomp_chunk = gzip_decomp.decompress(fchunk).decode() urldate = re.search(regex, decomp_chunk).groups()[0] if gbdate != urldate: print("Dates don't match for accession file {}, " "downloading".format(filename)) return download(path, url) else: print('Dates are matching for accession file {} ' 'returning'.format(filename)) return False filename = os.path.basename(path) path_exists = os.path.isfile(path) if url: return sync() else: # Try to fetch the file as an accession id, if it fails return False return gbsync()
57e74f4565906452684526535b8a2cdbeefd9f7d
32,197
def qipy_action(cd_to_tmpdir): """ QiPy Action """ return QiPyAction()
7c6d828c4baf29d2f457f02b0b54e6c967d96cb3
32,198
from twistedcaldav.directory.calendaruserproxy import ProxyDBService def recordProxyAccessInfo(directory, record): """ Group membership info for a record. """ # FIXME: This proxy finding logic should be in DirectoryRecord. def meAndMyGroups(record=record, groups=set((record,))): for group in record.groups(): groups.add(group) meAndMyGroups(group, groups) return groups # FIXME: This module global is really gross. rows = [] proxyInfoSeen = set() for record in meAndMyGroups(): proxyUIDs = (yield ProxyDBService.getMemberships(record.uid)) for proxyUID in proxyUIDs: # These are of the form: F153A05B-FF27-4B6C-BD6D-D1239D0082B0#calendar-proxy-read # I don't know how to get DirectoryRecord objects for the proxyUID here, so, let's cheat for now. proxyUID, proxyType = proxyUID.split("#") if (proxyUID, proxyType) not in proxyInfoSeen: proxyRecord = yield directory.recordWithUID(proxyUID) rows.append((proxyUID, proxyRecord.recordType, proxyRecord.shortNames[0], proxyRecord.fullName, proxyType)) proxyInfoSeen.add((proxyUID, proxyType)) if not rows: returnValue(None) rows = sorted( rows, key=lambda row: (row[1], row[2], row[4]) ) table = Table() table.addHeader(("UID", "Record Type", "Short Name", "Full Name", "Access")) for row in rows: table.addRow(row) returnValue(table.toString())
280e7340aa21ec4fd64a8ea2d892b7f67d9a4da5
32,199