content
stringlengths
22
815k
id
int64
0
4.91M
def executeCmd(cmd,arg): """ the meat: how we react to the SNI-based logic and execute the underlying command """ global currentPath global currentDirList global currentFileList global currentFileSizeList global agentName commands = initCmd(cmd) for testedCommand, alias in commands.items(): if testedCommand == cmd == "WHERE": currentPath = encodeString(cmdHandler(alias)) return cmdHandler(alias) elif testedCommand == cmd == 'CB': returnedOutput = cmdHandler(alias) currentPath = encodeString(returnedOutput) return returnedOutput elif testedCommand == cmd == 'ALIVE': return (str(agentName)).encode('utf-8') elif testedCommand == cmd == 'LS': returnedOutput = cmdHandler(alias) currentFileList,returnedOutput = emptyListCheck(returnedOutput) return returnedOutput elif testedCommand == cmd == 'SIZE': returnedOutput = cmdHandler(alias) currentFileSizeList = emptyListCheck(returnedOutput) return returnedOutput elif testedCommand == cmd == 'CD': try: target_dir = ('%s' % currentDirList[int(arg)]) except IndexError: print("(!) Invalid directory number!") return alias = (alias % target_dir).replace("'","") returnedOutput = (cmdHandler(alias)) currentPath = encodeString(returnedOutput) return returnedOutput elif testedCommand == cmd == 'EX': try: targetFile = ('%s' % currentFileList[int(arg)]) except IndexError: print("(!) Invalid file number!") return targetFilePath = ('%s/%s' % (currentPath,targetFile)) with open(targetFilePath, 'rb') as f: content = base64.b32encode(f.read()) return content elif testedCommand == cmd == "LD": returnedOutput = cmdHandler(alias) currentDirList,returnedOutput = emptyListCheck(returnedOutput) return returnedOutput elif testedCommand == cmd == "LIST": returnedOutput = cmdHandler(alias) return returnedOutput
22,100
async def text2image( text: str, auto_parse: bool = True, font_size: int = 20, color: Union[str, Tuple[int, int, int], Tuple[int, int, int, int]] = "white", font: str = "CJGaoDeGuo.otf", font_color: Union[str, Tuple[int, int, int]] = "black", padding: Union[int, Tuple[int, int, int, int]] = 0, ) -> BuildImage: """ 说明: 解析文本并转为图片 使用标签 <f> </f> 可选配置项 font: str -> 特殊文本字体 fs / font_size: int -> 特殊文本大小 fc / font_color: Union[str, Tuple[int, int, int]] -> 特殊文本颜色 示例 在不在,<f font=YSHaoShenTi-2.ttf font_size=30 font_color=red>HibiKi小姐</f>, 你最近还好吗,<f font_size=15 font_color=black>我非常想你</f>,这段时间我非常不好过, <f font_size=25>抽卡抽不到金色</f>,这让我很痛苦 参数: :param text: 文本 :param auto_parse: 是否自动解析,否则原样发送 :param font_size: 普通字体大小 :param color: 背景颜色 :param font: 普通字体 :param font_color: 普通字体颜色 :param padding: 文本外边距,元组类型时为 (上,左,下,右) """ pw = ph = top_padding = left_padding = 0 if padding: if isinstance(padding, int): pw = padding * 2 ph = padding * 2 top_padding = left_padding = padding elif isinstance(padding, tuple): pw = padding[0] + padding[2] ph = padding[1] + padding[3] top_padding = padding[0] left_padding = padding[1] if auto_parse and re.search(r"<f(.*)>(.*)</f>", text): _data = [] new_text = "" placeholder_index = 0 for s in text.split("</f>"): r = re.search(r"<f(.*)>(.*)", s) if r: start, end = r.span() if start != 0 and (t := s[:start]): new_text += t _data.append( [ (start, end), f"[placeholder_{placeholder_index}]", r.group(1).strip(), r.group(2), ] ) new_text += f"[placeholder_{placeholder_index}]" placeholder_index += 1 new_text += text.split("</f>")[-1] image_list = [] current_placeholder_index = 0 # 切分换行,每行为单张图片 for s in new_text.split("\n"): _tmp_text = s img_height = BuildImage(0, 0, font_size=font_size).getsize("正")[1] img_width = 0 _tmp_index = current_placeholder_index for _ in range(s.count("[placeholder_")): placeholder = _data[_tmp_index] if "font_size" in placeholder[2]: r = re.search(r"font_size=['\"]?(\d+)", placeholder[2]) if r: w, h = BuildImage(0, 0, font_size=int(r.group(1))).getsize( placeholder[3] ) img_height = img_height if img_height > h else h img_width += w else: img_width += BuildImage(0, 0, font_size=font_size).getsize( placeholder[3] )[0] _tmp_text = _tmp_text.replace(f"[placeholder_{_tmp_index}]", "") _tmp_index += 1 img_width += BuildImage(0, 0, font_size=font_size).getsize(_tmp_text)[0] # img_width += len(_tmp_text) * font_size # 开始画图 A = BuildImage( img_width, img_height, color=color, font=font, font_size=font_size ) basic_font_h = A.getsize("正")[1] current_width = 0 # 遍历占位符 for _ in range(s.count("[placeholder_")): if not s.startswith(f"[placeholder_{current_placeholder_index}]"): slice_ = s.split(f"[placeholder_{current_placeholder_index}]") await A.atext( (current_width, A.h - basic_font_h - 1), slice_[0], font_color ) current_width += A.getsize(slice_[0])[0] placeholder = _data[current_placeholder_index] # 解析配置 _font = font _font_size = font_size _font_color = font_color for e in placeholder[2].split(): if e.startswith("font="): _font = e.split("=")[-1] if e.startswith("font_size=") or e.startswith("fs="): _font_size = int(e.split("=")[-1]) if _font_size > 1000: _font_size = 1000 if _font_size < 1: _font_size = 1 if e.startswith("font_color") or e.startswith("fc="): _font_color = e.split("=")[-1] text_img = BuildImage( 0, 0, plain_text=placeholder[3], font_size=_font_size, font_color=_font_color, font=_font, ) _img_h = ( int(A.h / 2 - text_img.h / 2) if new_text == "[placeholder_0]" else A.h - text_img.h ) await A.apaste(text_img, (current_width, _img_h - 1), True) current_width += text_img.w s = s[ s.index(f"[placeholder_{current_placeholder_index}]") + len(f"[placeholder_{current_placeholder_index}]") : ] current_placeholder_index += 1 if s: slice_ = s.split(f"[placeholder_{current_placeholder_index}]") await A.atext((current_width, A.h - basic_font_h), slice_[0]) current_width += A.getsize(slice_[0])[0] A.crop((0, 0, current_width, A.h)) # A.show() image_list.append(A) height = 0 width = 0 for img in image_list: height += img.h width = width if width > img.w else img.w width += pw height += ph A = BuildImage(width + left_padding, height + top_padding, color=color) current_height = top_padding for img in image_list: await A.apaste(img, (left_padding, current_height), True) current_height += img.h else: width = 0 height = 0 _tmp = BuildImage(0, 0, font_size=font_size) for x in text.split("\n"): w, h = _tmp.getsize(x) height += h width = width if width > w else w width += pw height += ph A = BuildImage( width + left_padding, height + top_padding, font_size=font_size, color=color, font=font, ) await A.atext((left_padding, top_padding), text, font_color) # A.show() return A
22,101
def get_dosage_ann(): """ Convenience function for getting the dosage and snp annotation """ dos = {} s_ann = {} dos_path =\ ("/export/home/barnarj/CCF_1000G_Aug2013_DatABEL/CCF_1000G_Aug2013_Chr" "{0}.dose.double.ATB.RNASeq_MEQTL.txt") SNP_ANNOT =\ ("/proj/genetics/Projects/shared/Studies/Impute_CCF_Arrythmia/" "Projects/CCF/Projects/ATB/Projects/ATB_RNASeq/OutputData/" "ATB.RNASeq_Variant_Ann.bed.gz") return(dos, s_ann)
22,102
def function_arguments(function_name: str, services_module: types.ModuleType) -> typing.List[str]: """Get function arguments for stan::services `function_name`. This function parses a function's docstring to get argument names. This is an inferior method to using `inspect.Signature.from_callable(function)`. Unfortunately, pybind11 does not support this use of `inspect`. A compiled `services_module` is required for the lookup. Only simple function arguments are returned. For example, callback writers and var_context arguments are dropped. Arguments: function_name: Name of the function. services_module (module): Compiled model-specific services extension module. Returns: Argument names for `function_name`. """ function = getattr(services_module, f"{function_name}_wrapper") docstring = function.__doc__ # first line look something like this: function_name(arg1: int, arg2: int, ...) -> int function_name_with_arguments = docstring.split(" -> ", 1).pop(0) parameters = re.findall(r"(\w+): \w+", function_name_with_arguments) # remove arguments which are specific to the wrapper arguments_exclude = {"socket_filename"} return list(filter(lambda arg: arg not in arguments_exclude, parameters))
22,103
def cost_n_moves(prev_cost: int, weight: int = 1) -> int: """ 'g(n)' cost function that adds a 'weight' to each move.""" return prev_cost + weight
22,104
def get_email_dict(txt_dir): """ :param txt_dir: the input directory containing all text files. :return: a dictionary where the key is the publication ID and the value is the list of authors' email addresses. """ def chunk(text_file, page_limit=2000): fin = codecs.open(text_file, encoding='utf-8') doc = [] n = 0 for line in fin: line = line.strip().lower() if line: doc.append(line) n += len(line) if n > page_limit: break return ' '.join(doc) re_email = re.compile('[({\[]?\s*([a-z0-9\.\-_]+(?:\s*[,;|]\s*[a-z0-9\.\-_]+)*)\s*[\]})]?\s*@\s*([a-z0-9\.\-_]+\.[a-z]{2,})') email_dict = {} for txt_file in glob.glob(os.path.join(txt_dir, '*.txt')): # print(txt_file) try: doc = chunk(txt_file) except UnicodeDecodeError: continue emails = [] for m in re_email.findall(doc): ids = m[0].replace(';', ',').replace('|', ',') domain = m[1] if ',' in ids: emails.extend([ID.strip()+'@'+domain for ID in ids.split(',') if ID.strip()]) else: emails.append(ids+'@'+domain) if emails: key = os.path.basename(txt_file)[:-4] email_dict[key] = emails return email_dict
22,105
def get_gaussian_fundamentals(s, nfreq=None): """ Parses harmonic and anharmonic frequencies from gaussian log file. Input: s: String containing the log file output. nfreq : number of vibrational frequencies Returns: If successful: Numpy 2D array of size: nfreq x 2 1st column for harmonic frequencies in cm-1 2nd column for anharmonic frequencies in cm-1 else: A string showing the error. Portion of the relevant output: Fundamental Bands (DE w.r.t. Ground State) 1(1) 3106.899 2957.812 -0.042978 -0.008787 -0.008920 2(1) 3106.845 2959.244 -0.042969 -0.008924 -0.008782 3(1) 3082.636 2934.252 -0.043109 -0.008543 -0.008705 4(1) 3082.581 2935.702 -0.043101 -0.008709 -0.008539 5(1) 3028.430 2918.529 -0.048859 -0.008796 -0.008794 6(1) 3026.064 2926.301 -0.048438 -0.008788 -0.008785 7(1) 1477.085 1438.911 -0.044573 -0.001097 -0.007855 8(1) 1477.063 1439.122 -0.044576 -0.007858 -0.001089 9(1) 1474.346 1432.546 -0.043241 0.000678 -0.007062 10(1) 1474.318 1432.981 -0.043245 -0.007065 0.000691 11(1) 1410.843 1377.548 -0.028060 -0.016937 -0.016944 12(1) 1387.532 1356.818 -0.027083 -0.016001 -0.016001 13(1) 1205.022 1177.335 -0.029813 -0.010333 -0.011188 14(1) 1204.977 1177.775 -0.029806 -0.011191 -0.010328 15(1) 1011.453 988.386 -0.037241 -0.014274 -0.014270 16(1) 821.858 814.503 -0.025712 -0.008603 -0.010446 17(1) 821.847 814.500 -0.025693 -0.010449 -0.008599 18(1) 317.554 296.967 -0.035184 -0.010866 -0.010861 Overtones (DE w.r.t. Ground State) """ if nfreq == None: nfreq = get_gaussian_nfreq(s) freqs = np.zeros((nfreq, 2)) lines = s.splitlines() key = 'Fundamental Bands (DE w.r.t. Ground State)' iline = io.get_line_number(key, lines=lines) if iline > 0: for i in range(nfreq): iline += 1 line = lines[iline] cols = line.split() freqs[i, :] = [float(cols[-5]), float(cols[-4])] return freqs[freqs[:, 0].argsort()]
22,106
def dt_diff_file_reversions(): """ Compute diffs checking for reversions: (invert file order to simulate reverse filename progression) >>> old_state = test_config.setup() >>> DiffScript("crds.diff data/hst_0002.pmap data/hst_0001.pmap --check-diffs")() (('data/hst_0002.pmap', 'data/hst_0001.pmap'), ('data/hst_acs_0002.imap', 'data/hst_acs_0001.imap'), ('data/hst_acs_biasfile_0002.rmap', 'data/hst_acs_biasfile_0001.rmap'), ('HRC', 'A', '1.0', '*', 'N/A', 'N/A', 'N/A', 'N/A', 'N/A', 'N/A', 'N/A'), ('1992-01-02', '00:00:00'), 'replaced data/hst_acs_biasfile_0002.fits with data/hst_acs_biasfile_0001.fits') (('data/hst_0002.pmap', 'data/hst_0001.pmap'), ('data/hst_acs_0002.imap', 'data/hst_acs_0001.imap'), ('data/hst_acs_biasfile_0002.rmap', 'data/hst_acs_biasfile_0001.rmap'), ('HRC', 'A', '4.0', '*', '1062', '1044', '19.0', '20.0', 'N/A', 'N/A', 'N/A'), ('1992-01-01', '00:00:00'), 'added Match rule for m991609tj_bia.fits') (('data/hst_0002.pmap', 'data/hst_0001.pmap'), ('data/hst_acs_0002.imap', 'data/hst_acs_0001.imap'), ('data/hst_acs_biasfile_0002.rmap', 'data/hst_acs_biasfile_0001.rmap'), ('HRC', 'A', '4.0', '*', '1062', '1044', '19.0', '20.0', 'N/A', 'N/A', 'N/A'), ('2006-07-04', '11:32:35'), 'added Match rule for q9e1206kj_bia.fits') (('data/hst_0002.pmap', 'data/hst_0001.pmap'), ('data/hst_acs_0002.imap', 'data/hst_acs_0001.imap'), ('data/hst_acs_biasfile_0002.rmap', 'data/hst_acs_biasfile_0001.rmap'), ('HRC', 'A', '4.0', '*', '1062', '1044', '19.0', '20.0', 'N/A', 'N/A', 'N/A'), ('2006-07-15', '04:42:53'), 'added Match rule for q9e12071j_bia.fits') (('data/hst_0002.pmap', 'data/hst_0001.pmap'), ('data/hst_acs_0002.imap', 'data/hst_acs_0001.imap'), ('data/hst_acs_biasfile_0002.rmap', 'data/hst_acs_biasfile_0001.rmap'), ('HRC', 'A', '5.0', '*', '1062', '1044', '19.0', '20.0', 'N/A', 'N/A', 'N/A'), ('1992-01-01', '00:00:00'), 'deleted Match rule for m991609tj_bia.fits') (('data/hst_0002.pmap', 'data/hst_0001.pmap'), ('data/hst_acs_0002.imap', 'data/hst_acs_0001.imap'), ('data/hst_acs_biasfile_0002.rmap', 'data/hst_acs_biasfile_0001.rmap'), ('HRC', 'A', '5.0', '*', '1062', '1044', '19.0', '20.0', 'N/A', 'N/A', 'N/A'), ('2006-07-04', '11:32:35'), 'deleted Match rule for q9e1206kj_bia.fits') (('data/hst_0002.pmap', 'data/hst_0001.pmap'), ('data/hst_acs_0002.imap', 'data/hst_acs_0001.imap'), ('data/hst_acs_biasfile_0002.rmap', 'data/hst_acs_biasfile_0001.rmap'), ('HRC', 'A', '5.0', '*', '1062', '1044', '19.0', '20.0', 'N/A', 'N/A', 'N/A'), ('2006-07-15', '04:43:54'), 'deleted Match rule for q9e12071j_bia.fits') (('data/hst_0002.pmap', 'data/hst_0001.pmap'), ('data/hst_acs_0002.imap', 'data/hst_acs_0001.imap'), ('biasfile',), 'replaced data/hst_acs_biasfile_0002.rmap with data/hst_acs_biasfile_0001.rmap') (('data/hst_0002.pmap', 'data/hst_0001.pmap'), ('acs',), 'replaced data/hst_acs_0002.imap with data/hst_acs_0001.imap') CRDS - WARNING - Rule change at ('data/hst_acs_biasfile_0001.rmap', ('HRC', 'A', '4.0', '*', '1062', '1044', '19.0', '20.0', 'N/A', 'N/A', 'N/A'), ('1992-01-01', '00:00:00')) added Match rule for 'm991609tj_bia.fits' CRDS - WARNING - Rule change at ('data/hst_acs_biasfile_0001.rmap', ('HRC', 'A', '4.0', '*', '1062', '1044', '19.0', '20.0', 'N/A', 'N/A', 'N/A'), ('2006-07-04', '11:32:35')) added Match rule for 'q9e1206kj_bia.fits' CRDS - WARNING - Rule change at ('data/hst_acs_biasfile_0001.rmap', ('HRC', 'A', '4.0', '*', '1062', '1044', '19.0', '20.0', 'N/A', 'N/A', 'N/A'), ('2006-07-15', '04:42:53')) added Match rule for 'q9e12071j_bia.fits' CRDS - WARNING - Rule change at ('data/hst_acs_biasfile_0001.rmap', ('HRC', 'A', '5.0', '*', '1062', '1044', '19.0', '20.0', 'N/A', 'N/A', 'N/A'), ('1992-01-01', '00:00:00')) deleted Match rule for 'm991609tj_bia.fits' CRDS - WARNING - Rule change at ('data/hst_acs_biasfile_0001.rmap', ('HRC', 'A', '5.0', '*', '1062', '1044', '19.0', '20.0', 'N/A', 'N/A', 'N/A'), ('2006-07-04', '11:32:35')) deleted Match rule for 'q9e1206kj_bia.fits' CRDS - WARNING - Rule change at ('data/hst_acs_biasfile_0001.rmap', ('HRC', 'A', '5.0', '*', '1062', '1044', '19.0', '20.0', 'N/A', 'N/A', 'N/A'), ('2006-07-15', '04:43:54')) deleted Match rule for 'q9e12071j_bia.fits' CRDS - WARNING - Reversion at ('data/hst_acs_biasfile_0001.rmap', ('HRC', 'A', '1.0', '*', 'N/A', 'N/A', 'N/A', 'N/A', 'N/A', 'N/A', 'N/A'), ('1992-01-02', '00:00:00')) replaced 'data/hst_acs_biasfile_0002.fits' with 'data/hst_acs_biasfile_0001.fits' CRDS - WARNING - Reversion at ('data/hst_acs_0001.imap', ('biasfile',)) replaced 'data/hst_acs_biasfile_0002.rmap' with 'data/hst_acs_biasfile_0001.rmap' CRDS - WARNING - Reversion at ('data/hst_0001.pmap', ('acs',)) replaced 'data/hst_acs_0002.imap' with 'data/hst_acs_0001.imap' 2 >>> test_config.cleanup(old_state) """
22,107
def uniform_selection_tensor(tensor_data: np.ndarray, p: int, n_bits: int, per_channel: bool = False, channel_axis: int = 1, n_iter: int = 10, min_threshold: float = MIN_THRESHOLD, quant_error_method: qc.QuantizationErrorMethod = qc.QuantizationErrorMethod.MSE) -> dict: """ Compute the optimal quantization range based on the provided QuantizationErrorMethod to uniformly quantize the tensor. Different search is applied, depends on the value of the selected QuantizationErrorMethod. Args: tensor_data: Tensor content as Numpy array. p: p-norm to use for the Lp-norm distance. n_bits: Number of bits to quantize the tensor. per_channel: Whether the quantization should be per-channel or not. channel_axis: Output channel index. n_iter: Number of iterations to search for the optimal threshold (not used for this method). min_threshold: Minimal threshold to use if threshold is too small (not used for this method). quant_error_method: an error function to optimize the range parameters' selection accordingly. Returns: Optimal quantization range to quantize the tensor uniformly. """ tensor_min = get_tensor_min(tensor_data, per_channel, channel_axis) tensor_max = get_tensor_max(tensor_data, per_channel, channel_axis) if quant_error_method == qc.QuantizationErrorMethod.NOCLIPPING: mm = tensor_min, tensor_max else: error_function = get_threshold_selection_tensor_error_function(QuantizationMethod.UNIFORM, quant_error_method, p, norm=False) mm = qparams_uniform_selection_tensor_search(error_function, tensor_data, tensor_min, tensor_max, n_bits, per_channel, channel_axis) return {RANGE_MIN: mm[0], RANGE_MAX: mm[1]}
22,108
def writefile_latticemap(map_, file): """Writes a lattice map text file. The structure of the lattice map file is as follows:: ----------------------------------------- | <nnodes_dim> | | <nodes_x> | | <nodes_y> | | (<nodes_z>) | | values(x=0,...,n-1; y=0, (z=0)) | | values(x=0,...,n-1; y=1, (z=0)) | | ... | | values(x=0,...,n-1; y=m-1, (z=0)) | | values(x=0,...,n-1; y=0, (z=1)) | | ... | | values(x=0,...,n-1; y=0, (z=r-1)) | ----------------------------------------- In the case of two-dimensional maps, the quantities in parentheses are omitted. Args: map_ (LatticeMap): Lattice map to write. file (str or pathlib.Path): File or filename. Returns: None """ with open(file, 'w+') as lfile: lattice = map_.lattice # Write nnodes_dim line = ' '.join(f'{n:d}' for n in lattice.nnodes_dim) lfile.write(line + '\n') # Write nodes for nodes in lattice.nodes: line = ' '.join(f'{n}' for n in nodes) lfile.write(line + '\n') # Write values newshape = (lattice.nnodes_dim[0], -1) values = np.reshape(map_.values, newshape, order=NP_ORDER).transpose() for vals in values: line = ' '.join(f'{v}' for v in vals) lfile.write(line + '\n')
22,109
def _get_mock_dataset(root_dir): """ root_dir: directory to the mocked dataset """ base_dir = os.path.join(root_dir, "PennTreebank") os.makedirs(base_dir, exist_ok=True) seed = 1 mocked_data = defaultdict(list) for file_name in ("ptb.train.txt", "ptb.valid.txt", "ptb.test.txt"): txt_file = os.path.join(base_dir, file_name) with open(txt_file, "w", encoding="utf-8") as f: for i in range(5): rand_string = get_random_unicode(seed) dataset_line = f"{rand_string}" # append line to correct dataset split split = file_name.replace("ptb.", "").replace(".txt", "") mocked_data[split].append(dataset_line) f.write(f"{rand_string}\n") seed += 1 return mocked_data
22,110
def calculate_bin_P(P, x, cal_type='pes'): """ Calculate the virtual, binary transition function. That is, this function is to calculate the transition function which a state and action pair may visit the virtual state $z$ """ n, m = x.world_shape # P_z is defined for the n*m states $s$ and a virtual state $z$ # index 0 - n*m-1: real state # n*m: virtual state P_z = np.zeros((5, n*m+1, n*m+1)) ind_a, ind_s, ind_sp = np.where(P) if cal_type == 'pes': safe_space = x.S_hat elif cal_type == 'opt': safe_space = x.S_bar for i in range(len(ind_a)): if safe_space[ind_s[i], ind_a[i]]: P_z[ind_a[i], ind_s[i], ind_sp[i]] = 1 else: P_z[ind_a[i], ind_s[i], -1] = 1 # For any action, transition probability from z to z is equal to 1 P_z[:, -1, -1] = 1 return P_z
22,111
def makeYbus(baseMVA, bus, branch): """Builds the bus admittance matrix and branch admittance matrices. Returns the full bus admittance matrix (i.e. for all buses) and the matrices C{Yf} and C{Yt} which, when multiplied by a complex voltage vector, yield the vector currents injected into each line from the "from" and "to" buses respectively of each line. Does appropriate conversions to p.u. @see: L{makeSbus} @author: Ray Zimmerman (PSERC Cornell) @author: Richard Lincoln """ ## constants nb = bus.shape[0] ## number of buses nl = branch.shape[0] ## number of lines ## for each branch, compute the elements of the branch admittance matrix where ## ## | If | | Yff Yft | | Vf | ## | | = | | * | | ## | It | | Ytf Ytt | | Vt | ## Ytt, Yff, Yft, Ytf = branch_vectors(branch, nl) ## compute shunt admittance ## if Psh is the real power consumed by the shunt at V = 1.0 p.u. ## and Qsh is the reactive power injected by the shunt at V = 1.0 p.u. ## then Psh - j Qsh = V * conj(Ysh * V) = conj(Ysh) = Gs - j Bs, ## i.e. Ysh = Psh + j Qsh, so ... ## vector of shunt admittances Ysh = (bus[:, GS] + 1j * bus[:, BS]) / baseMVA ## build connection matrices f = real(branch[:, F_BUS]).astype(int) ## list of "from" buses t = real(branch[:, T_BUS]).astype(int) ## list of "to" buses ## connection matrix for line & from buses Cf = csr_matrix((ones(nl), (range(nl), f)), (nl, nb)) ## connection matrix for line & to buses Ct = csr_matrix((ones(nl), (range(nl), t)), (nl, nb)) ## build Yf and Yt such that Yf * V is the vector of complex branch currents injected ## at each branch's "from" bus, and Yt is the same for the "to" bus end i = hstack([range(nl), range(nl)]) ## double set of row indices Yf = csr_matrix((hstack([Yff, Yft]), (i, hstack([f, t]))), (nl, nb)) Yt = csr_matrix((hstack([Ytf, Ytt]), (i, hstack([f, t]))), (nl, nb)) # Yf = spdiags(Yff, 0, nl, nl) * Cf + spdiags(Yft, 0, nl, nl) * Ct # Yt = spdiags(Ytf, 0, nl, nl) * Cf + spdiags(Ytt, 0, nl, nl) * Ct ## build Ybus Ybus = Cf.T * Yf + Ct.T * Yt + \ csr_matrix((Ysh, (range(nb), range(nb))), (nb, nb)) Ybus.sort_indices() Ybus.eliminate_zeros() return Ybus, Yf, Yt
22,112
def list_blob(math_engine, batch_len, batch_width, list_size, channels, dtype="float32"): """Creates a blob with one-dimensional Height * Width * Depth elements. Parameters --------- math_engine : object The math engine that works with this blob. batch_len : int, > 0 The BatchLength dimension of the new blob. batch_width : int, > 0 The BatchWidth dimension of the new blob. list_size : int, > 0 The ListSize dimension of the new blob. channels : int, > 0 The Channels dimension of the new blob. dtype : {"float32", "int32"}, default="float32" The type of data in the blob. """ if dtype != "float32" and dtype != "int32": raise ValueError('The `dtype` must be one of {`float32`, `int32`}.') if batch_len < 1: raise ValueError('The `batch_len` must be > 0.') if batch_width < 1: raise ValueError('The `batch_width` must be > 0.') if list_size < 1: raise ValueError('The `list_size` must be > 0.') if channels < 1: raise ValueError('The `channels` must be > 0.') shape = numpy.array((batch_len, batch_width, list_size, 1, 1, 1, channels), dtype=numpy.int32, copy=False) return Blob(PythonWrapper.tensor(math_engine._internal, shape, dtype))
22,113
def test_len(test_row): """Test len method.""" assert len(test_row) == 4 empty_row = py3odb.row.Row() assert not empty_row
22,114
def downgrade_data(): """Data migration to retrieve the ids of old sensors. Note that downgraded ids are not guaranteed to be the same as during upgrade.""" # To support data downgrade, cascade upon updating ids recreate_sensor_fks(recreate_with_cascade_on_update=True) # Declare ORM table views t_markets = sa.Table( "market", sa.MetaData(), sa.Column("id", sa.Integer), sa.Column("name", sa.String(80)), ) # Use Alchemy's connection and transaction to go through the data connection = op.get_bind() # Get the max id used by assets and markets max_asset_id = get_max_id( connection, "asset" ) # may be different than during upgrade! max_market_id = get_max_id( connection, "market" ) # may be different than during upgrade! # Select all existing ids that need migrating market_results = connection.execute( sa.select( [ t_markets.c.id, t_markets.c.name, ] ) ).fetchall() # Iterate over all selected data tuples for id_, name in market_results: # Determine the new id new_id = id_ - max_asset_id # Update the id connection.execute( t_markets.update().where(t_markets.c.name == name).values(id=new_id) ) # Repeat steps for weather sensors t_weather_sensors = sa.Table( "weather_sensor", sa.MetaData(), sa.Column("id", sa.Integer), sa.Column("name", sa.String(80)), ) weather_sensor_results = connection.execute( sa.select( [ t_weather_sensors.c.id, t_weather_sensors.c.name, ] ) ).fetchall() for id_, name in weather_sensor_results: # Determine the new id new_id = id_ - max_market_id # Update the id connection.execute( t_weather_sensors.update() .where(t_weather_sensors.c.name == name) .values(id=new_id) ) # After supporting data downgrade, stop cascading upon updating ids recreate_sensor_fks(recreate_with_cascade_on_update=False)
22,115
def p_command_create(p): """ command : CREATE ident asinputformat block | CREATE ident asinputformat string """ p[0] = (p.lineno(1), Create(dialect=p[3], block=p[4]), p[2])
22,116
def skip_on_pypy_because_cache_next_works_differently(func): """Not sure what happens there but on PyPy CacheNext doesn't work like on CPython. """ return _skipif_wrapper(func, IS_PYPY, reason='PyPy works differently with __next__ cache.')
22,117
def get_life_of_brian(): """ Get lines from test_LifeOfBrian. """ count = 0 monty_list = ['coconut'] try: with open(LIFE_OF_BRIAN_SCRIPT) as f: lines = f.readlines() for line in lines: count += 1 #print(line) monty_list.append(line) random_line = random.randrange(0, count) picked_line = monty_list[random_line] return picked_line except: #print(f"file at : {LIFE_OF_BRIAN_SCRIPT} could not be opened.") return 'but it has FAAANNNGGsss'
22,118
def ackley_func(x): """Ackley's objective function. Has a global minimum at :code:`f(0,0,...,0)` with a search domain of [-32, 32] Parameters ---------- x : numpy.ndarray set of inputs of shape :code:`(n_particles, dimensions)` Returns ------- numpy.ndarray computed cost of size :code:`(n_particles, )` Raises ------ ValueError When the input is out of bounds with respect to the function domain """ if not np.logical_and(x >= -32, x <= 32).all(): raise ValueError('Input for Ackley function must be within [-32, 32].') d = x.shape[1] j = (-20.0 * np.exp(-0.2 * np.sqrt((1/d) * (x**2).sum(axis=1))) - np.exp((1/float(d)) * np.cos(2 * np.pi * x).sum(axis=1)) + 20.0 + np.exp(1)) return j
22,119
def validate_listable_type(*atype): """Validate a list of atype. @validate_listable_type(str) def example_func(a_list): return a_list @validate_listable_type(int) def example_int_func(a_list): return a_list """ if len(atype) != 1: raise ValueError("Expected one arg. Got {n} args.".format(n=len(atype))) type_ = atype[0] def wrap(f): def wrapped_f(*args, **kw): for arg in args[0]: if not isinstance(arg, type_): raise TypeError("Expected type {t}. Got type {x} for {v}.".format(t=type_, x=type(arg), v=args)) return f(*args) return wrapped_f return wrap
22,120
def meh(captcha): """Returns the sum of the digits which match the next one in the captcha input string. >>> meh('1122') 3 >>> meh('1111') 4 >>> meh('1234') 0 >>> meh('91212129') 9 """ result = 0 for n in range(len(captcha)): if captcha[n] == captcha[(n + 1) % len(captcha)]: result += int(captcha[n]) return result
22,121
def check_cli(module, cli): """ This method checks if vRouter exists on the target node. This method also checks for idempotency using the vrouter-bgp-show command. If the given vRouter exists, return VROUTER_EXISTS as True else False. If the given neighbor exists on the given vRouter, return NEIGHBOR_EXISTS as True else False. :param module: The Ansible module to fetch input parameters :param cli: The CLI string :return Booleans: VROUTER_EXISTS, NEIGHBOR_EXISTS """ vrouter_name = module.params['pn_vrouter_name'] neighbor = module.params['pn_neighbor'] # Check for vRouter check_vrouter = cli + ' vrouter-show format name no-show-headers' out = run_commands(module, check_vrouter)[1] if out: out = out.split() VROUTER_EXISTS = True if vrouter_name in out else False if neighbor: # Check for BGP neighbor show = cli + ' vrouter-bgp-show vrouter-name %s ' % vrouter_name show += 'format neighbor no-show-headers' out = run_commands(module, show)[1] if out and neighbor in out.split(): NEIGHBOR_EXISTS = True else: NEIGHBOR_EXISTS = False return VROUTER_EXISTS, NEIGHBOR_EXISTS
22,122
def plot_stat(ratio=0.1, num=100, stat="test MSE", method="ols", n_boot=1000, k_fold=1000, ridge_lmb=122.0, lasso_lmb=112.2): """ Create heatmap for given statistical indicator and sampling method :param ratio: ratio of the dataset to be used for testing :param num: length of dataset :param stat: statistical indicator :param method: resampling method :param n_boot: number of times bootstrap is performed if method=*_bootstrap :param k_fold: number of folds for cross-validation if method=*_crossvalidation :param ridge_lmb: lambda for ridge regression :param lasso_lmb: lambda for lasso regression """ # Path to example data data_path = get_data_path() # Load data order = np.load(data_path + "order.npy") num_points = np.load(data_path + "num_points.npy") noise_var = np.load(data_path + "noise_var.npy") test_ratio = np.load(data_path + "test_ratio.npy") ridge_lambda = np.load(data_path + "ridge_lambda.npy") k_folds = np.load(data_path + "k_folds.npy") n_boots = np.load(data_path + "n_boots.npy") lasso_lambda = np.load(data_path + "lasso_lambda.npy") # Load data for statistical indicator data = get_data_statistic(data_path, stat, method) n_ind = 0 for i in range(len(num_points)): if num == num_points[i]: n_ind = i r_ind = 0 for i in range(len(test_ratio)): if ratio == test_ratio[i]: r_ind = i rlambda_ind = 0 for i in range(len(ridge_lambda)): if ridge_lmb == ridge_lambda[i]: rlambda_ind = i llambda_ind = 0 for i in range(len(lasso_lambda)): if lasso_lmb == lasso_lambda[i]: llambda_ind = i nb_ind = 0 for i in range(len(n_boots)): if n_boot == n_boots[i]: nb_ind = i cv_ind = 0 for i in range(len(k_folds)): if k_fold == k_folds[i]: cv_ind = i if "crossvalidation" in method: r_ind = 0 else: cv_ind = 0 if "bootstrap" not in method: nb_ind = 0 if "ridge" not in method: rlambda_ind = 0 if "lasso" not in method: llambda_ind = 0 # Select subset of data for given ratio, lambda, number of bootstraps and/or folds for cross-validation and plot heatmap data_sub = data[:, n_ind, :, r_ind, rlambda_ind, llambda_ind, nb_ind, cv_ind] sns.heatmap(data_sub, annot=True, cmap="mako", vmax=np.amax(data_sub), vmin=np.amin(data_sub), xticklabels=noise_var, yticklabels=order) plt.ylabel('Polynomial Order') plt.xlabel('Noise Variance')
22,123
def is_prime(n): """ from https://stackoverflow.com/questions/15285534/isprime-function-for-python-language """ if n == 2 or n == 3: return True if n < 2 or n%2 == 0: return False if n < 9: return True if n%3 == 0: return False r = int(n**0.5) f = 5 while f <= r: if n%f == 0: return False if n%(f+2) == 0: return False f +=6 return True
22,124
def load(file, encoding=None): """load(file,encoding=None) -> object This function reads a tnetstring from a file and parses it into a python object. The file must support the read() method, and this function promises not to read more data than necessary. """ # Read the length prefix one char at a time. # Note that the netstring spec explicitly forbids padding zeros. c = file.read(1) if not c.isdigit(): raise ValueError("not a tnetstring: missing or invalid length prefix") datalen = ord(c) - ord("0") c = file.read(1) if datalen != 0: while c.isdigit(): datalen = (10 * datalen) + (ord(c) - ord("0")) if datalen > 999999999: errmsg = "not a tnetstring: absurdly large length prefix" raise ValueError(errmsg) c = file.read(1) if c != ":": raise ValueError("not a tnetstring: missing or invalid length prefix") # Now we can read and parse the payload. # This repeats the dispatch logic of pop() so we can avoid # re-constructing the outermost tnetstring. data = file.read(datalen) if len(data) != datalen: raise ValueError("not a tnetstring: length prefix too big") type = file.read(1) if type == ",": if encoding is not None: return data.decode(encoding) return data if type == "#": try: return int(data) except ValueError: raise ValueError("not a tnetstring: invalid integer literal") if type == "^": try: return float(data) except ValueError: raise ValueError("not a tnetstring: invalid float literal") if type == "!": if data == "true": return True elif data == "false": return False else: raise ValueError("not a tnetstring: invalid boolean literal") if type == "~": if data: raise ValueError("not a tnetstring: invalid null literal") return None if type == "]": l = [] while data: (item, data) = pop(data, encoding) l.append(item) return l if type == "}": d = {} while data: (key, data) = pop(data, encoding) (val, data) = pop(data, encoding) d[key] = val return d raise ValueError("unknown type tag")
22,125
def enable(configuration_file, section="ispyb"): """Enable access to features that are currently under development.""" global _db, _db_cc, _db_config if _db_config: if _db_config == configuration_file: # This database connection is already set up. return logging.getLogger("ispyb").warn( "__future__ configuration file change requested" ) disable() logging.getLogger("ispyb").info( "NOTICE: This code uses __future__ functionality in the ISPyB API. " "This enables unsupported and potentially unstable code, which may " "change from version to version without warnings. Here be dragons." ) cfgparser = configparser.RawConfigParser() if not cfgparser.read(configuration_file): raise RuntimeError( "Could not read from configuration file %s" % configuration_file ) cfgsection = dict(cfgparser.items(section)) host = cfgsection.get("host") port = cfgsection.get("port", 3306) database = cfgsection.get("database", cfgsection.get("db")) username = cfgsection.get("username", cfgsection.get("user")) password = cfgsection.get("password", cfgsection.get("pw")) # Open a direct MySQL connection _db = mysql.connector.connect( host=host, port=port, user=username, password=password, database=database, use_pure=True, ) _db_config = configuration_file _db.autocommit = True class DictionaryCursorContextManager(object): """This class creates dictionary cursors for mysql.connector connections. By using a context manager it is ensured that cursors are closed immediately after use. Cursors created with this context manager return results as a dictionary and offer a .run() function, which is an alias to .execute that accepts query parameters as function parameters rather than a list. """ def __enter__(cm): """Enter context. Ensure the database is alive and return a cursor with an extra .run() function.""" _db.ping(reconnect=True) cm.cursor = _db.cursor(dictionary=True) def flat_execute(stmt, *parameters): """Pass all given function parameters as a list to the existing .execute() function.""" return cm.cursor.execute(stmt, parameters) setattr(cm.cursor, "run", flat_execute) return cm.cursor def __exit__(cm, *args): """Leave context. Close cursor. Destroy reference.""" cm.cursor.close() cm.cursor = None _db_cc = DictionaryCursorContextManager import ispyb.model.datacollection ispyb.model.datacollection.DataCollection.integrations = ( _get_linked_autoprocintegration_for_dc ) ispyb.model.datacollection.DataCollection.pdb = _get_linked_pdb_for_dc import ispyb.model.processingprogram ispyb.model.processingprogram.ProcessingProgram.reload = _get_autoprocprogram
22,126
def MakeMsgCmd(cmdName,argList): """ Take a command name and an argList of tuples consisting of pairs of the form (argName, argValue), and return a string representing the corresponding dibs command. """ body = MakeStartTag(dibs_constants.cmdTagName,{'id':cmdName}) + '\n' for argPair in argList: body += (MakeStartTag(dibs_constants.argTagName,{'id':argPair[0]}) + argPair[1] + MakeEndTag(dibs_constants.argTagName) + '\n') body += (MakeStartTag(dibs_constants.argTagName, {'id':dibs_constants.cmdTimeArgName}) + `time.time()` + MakeEndTag(dibs_constants.argTagName) + '\n' + MakeEndCmdTag()) return body
22,127
def preprocess(tweet): """ Substitures urls with the string URL. Removes leading and trailing whitespaces Removes non latin characters :param tweet: :return: """ # remove URL line = remove_url(str(tweet.strip())) # remove non Latin characters stripped_text = '' for c in line: stripped_text += c if len(c.encode(encoding='utf_8')) == 1 else '' return stripped_text.translate(table).strip()
22,128
def read_xml_file(input_file, elem): """Reads xml data and extracts specified elements Parameters ---------- input_file : str The OTA xml file elem : str Specified elements to be extracted Returns ------- list a list of xml seat data """ tree = ET.parse(input_file) root = tree.findall(elem) return root
22,129
def _generate_supersize_archive(supersize_input_file, make_chromium_output_path, make_staging_path): """Creates a .size file for the given .apk or .minimal.apks""" subprocess.run([_CLANG_UPDATE_PATH, '--package=objdump'], check=True) supersize_input_path = make_chromium_output_path(supersize_input_file) size_path = make_staging_path(supersize_input_file) + '.size' supersize_script_path = os.path.join(_BINARY_SIZE_DIR, 'supersize') subprocess.run( [ supersize_script_path, 'archive', size_path, '-f', supersize_input_path, '-v', ], check=True, )
22,130
def report_mean_overall(nutrients_mean): """Report mean overall""" overall_carb_trend.append(nutrients_mean[0]) overall_fiber_trend.append(nutrients_mean[1]) overall_fat_trend.append(nutrients_mean[2]) overall_prot_trend.append(nutrients_mean[3])
22,131
def env(pip_packages: Optional[Union[str, List[str]]] = None): """A decorator that adds an environment specification to either Operator or Application. Args: pip_packages Optional[Union[str, List[str]]]: A string that is a path to requirements.txt file or a list of packages to install. Returns: A decorator that adds an environment specification to either Operator or Application. """ # Import the classes here to avoid circular import. from .application import Application, ApplicationEnv from .operator import Operator, OperatorEnv def decorator(cls): if hasattr(cls, "_env") and cls._env: raise ItemAlreadyExistsError(f"@env decorator is aleady specified for {cls}.") if issubclass(cls, Operator): environment = OperatorEnv(pip_packages=pip_packages) elif issubclass(cls, Application): environment = ApplicationEnv(pip_packages=pip_packages) else: raise UnknownTypeError(f"@env decorator cannot be specified for {cls}.") cls._env = environment return cls return decorator
22,132
def test_trace_propagation( endpoint, transport, encoding, enabled, expect_spans, expect_baggage, http_patchers, tracer, mock_server, thrift_service, app, http_server, base_url, http_client): """ Main TChannel-OpenTracing integration test, using basictracer as implementation of OpenTracing API. The main logic of this test is as follows: 1. Start a new trace with a root span 2. Store a random value in the baggage 3. Call the first service at the endpoint from `endpoint` parameter. The first service is either tchannel or http, depending on the value if `transport` parameter. 4. The first service calls the second service using pre-defined logic that depends on the endpoint invoked on the first service. 5. The second service accesses the tracing span and returns the value of the baggage item as the response. 6. The first service responds with the value from the second service. 7. The main test validates that the response is equal to the original random value of the baggage, proving trace & baggage propagation. 8. The test also validates that all spans have been finished and recorded, and that they all have the same trace ID. We expect 5 spans to be created from each test run: * top-level (root) span started in the test * client span (calling service-1) * service-1 server span * service-1 client span (calling service-2) * service-2 server span :param endpoint: name of the endpoint to call on the first service :param transport: type of the first service: tchannel or http :param enabled: if False, channels are instructed to disable tracing :param expect_spans: number of spans we expect to be generated :param http_patchers: monkey-patching of tornado AsyncHTTPClient :param tracer: a concrete implementation of OpenTracing Tracer :param mock_server: tchannel server (from conftest.py) :param thrift_service: fixture that creates a Thrift service from fake IDL :param app: tornado.web.Application fixture :param http_server: http server (provided by pytest-tornado) :param base_url: address of http server (provided by pytest-tornado) :param http_client: Tornado's AsyncHTTPClient (provided by pytest-tornado) """ # mock_server is created as a fixture, so we need to set tracer on it mock_server.tchannel._dep_tchannel._tracer = tracer mock_server.tchannel._dep_tchannel._trace = enabled register(tchannel=mock_server.tchannel, thrift_service=thrift_service, http_client=http_client, base_url=base_url) tchannel = TChannel(name='test', tracer=tracer, trace=enabled) app.add_handlers(".*$", [ (r"/", HttpHandler, {'client_channel': tchannel}) ]) with mock.patch('opentracing.tracer', tracer),\ mock.patch.object(tracing.log, 'exception') as log_exception: assert opentracing.tracer == tracer # sanity check that patch worked span = tracer.start_span('root') baggage = 'from handler3 %d' % time.time() span.set_baggage_item(BAGGAGE_KEY, baggage) if not enabled: span.set_tag('sampling.priority', 0) with span: # use span as context manager so that it's always finished response_future = None with tchannel.context_provider.span_in_context(span): if transport == 'tchannel': if encoding == 'json': response_future = tchannel.json( service='test-client', endpoint=endpoint, hostport=mock_server.hostport, body=mock_server.hostport, ) elif encoding == 'thrift': if endpoint == 'thrift1': response_future = tchannel.thrift( thrift_service.X.thrift1(mock_server.hostport), hostport=mock_server.hostport, ) elif endpoint == 'thrift3': response_future = tchannel.thrift( thrift_service.X.thrift3(mock_server.hostport), hostport=mock_server.hostport, ) elif endpoint == 'thrift4': response_future = tchannel.thrift( thrift_service.X.thrift4(mock_server.hostport), hostport=mock_server.hostport, ) else: raise ValueError('wrong endpoint %s' % endpoint) else: raise ValueError('wrong encoding %s' % encoding) elif transport == 'http': response_future = http_client.fetch( request=HTTPRequest( url='%s%s' % (base_url, endpoint), method='POST', body=mock_server.hostport, ) ) else: raise NotImplementedError( 'unknown transport %s' % transport) response = yield response_future assert log_exception.call_count == 0 body = response.body if expect_baggage: assert body == baggage def get_sampled_spans(): return [s for s in tracer.reporter.get_spans() if s.is_sampled] # Sometimes the test runs into weird race condition where the # after_send_response() hook is executed, but the span is not yet # recorded. To prevent flaky test runs we check and wait until # all spans are recorded, for up to 1 second. for i in range(0, 1000): spans = get_sampled_spans() if len(spans) >= expect_spans: break yield tornado.gen.sleep(0.001) # yield execution and sleep for 1ms spans = get_sampled_spans() assert expect_spans == len(spans), 'Unexpected number of spans reported' # We expect all trace IDs in collected spans to be the same if expect_spans > 0: spans = tracer.reporter.get_spans() assert 1 == len(set([s.trace_id for s in spans])), \ 'all spans must have the same trace_id'
22,133
def cost_stage_grads(x, u, target, lmbda): """ x: (n_states, ) u: (n_controls,) target: (n_states, ) lmbda: penalty on controls """ dL = jacrev(cost_stage, (0,1)) #l_x, l_u d2L = jacfwd(dL, (0,1)) # l_xx etc l_x, l_u = dL(x, u, target, lmbda) d2Ldx, d2Ldu = d2L(x, u, target, lmbda) l_xx, l_xu = d2Ldx l_ux, l_uu = d2Ldu return l_x, l_u, l_xx, l_ux, l_uu
22,134
def slope_field(f: Function2d, range: list = None, xlim: list = None, ylim: list = None, normalize: bool = True, plot_type: str = 'quiver', density: int = 20, color: bool = True, show: bool = True, cmap: str = 'viridis'): """ Slope Field =========== Graphical representation of the solutions to a first-order differential equation `y' = f(t,y)` Parameters ---------- f : Function2d Function with 2 parameters `y' = f(t,y)` range : list, optional Sets both limits x/y of the plot, by default [-5, 5] xlim : list, optional Sets the x limits of the plot, if `range` is defined, xlim is already set, by default [-5, 5] ylim : list, optional Sets the y limits of the plot, if `range` is defined, ylim is already set, by default [-5, 5] normalize : bool, optional Normalize the slope field, by default True plot_type : str, optional Defines the plot type quiver: -> plt.quiver() streamplot: -> plt.streamplot() by default 'quiver' density : int, optional Density of arrows, by default 20 color : bool, optional Color of the arrows, by default True show : bool, optional Shows the plot, by default True cmap : str, optional https://matplotlib.org/stable/tutorials/colors/colormaps.html Examples -------- >>> def fun(x,y): return x + np.sin(y) >>> slope_field(fun, range=[-2,2], plot_type='streamplot', cmap='plasma') plot figure >>> slope_field(fun, xlim=[-3,2], ylim=[-1,1], color=False, normalize=False, density=30, show=False) >>> T, U = odeEuler(fun, -3, 2, 1000, 0.1) >>> import matplotlib.pyplot as plt >>> plt.plot(T,U) >>> plt.show() plot figure """ if range is None and xlim is None and ylim is None: range = [-5,5] x1, x2 = range y1, y2 = range elif xlim is None and ylim is None: x1, x2 = range y1, y2 = range elif range is None: x1, x2 = xlim y1, y2 = ylim else: raise ValueError('Must speciefy either range or xlim/ylim') x = np.linspace(x1, x2, density) y = np.linspace(y1, y2, density) X, Y = np.meshgrid(x, y) dx, dy = np.ones(X.shape), f(X,Y) if normalize: norm = np.sqrt(dx**2 + dy**2) dx, dy = dx/norm , dy/norm if plot_type == 'quiver': #color = np.sqrt(((dx+4)/2)*2 + ((dy+4)/2)*2) if color: plt.quiver(X, Y, dx, dy, dy, cmap=cmap) else: plt.quiver(X, Y, dx, dy) elif plot_type == 'streamplot': if color: plt.streamplot(X, Y, dx, dy, color=dy, cmap=cmap) else: plt.streamplot(X, Y, dx, dy, color='k') else: raise ValueError("It only accepts either 'quiver' or 'streamplot'") plt.title(f'Slope Field ({plot_type})') plt.xlabel('x') plt.ylabel('y') if show: plt.show()
22,135
def create_rotation_matrix(angles): """ Returns a rotation matrix that will produce the given Euler angles :param angles: (roll, pitch, yaw) """ R_x = Matrix([[1, 0, 0], [0, cos(q), -sin(q)], [0, sin(q), cos(q)]]).evalf(subs={q: angles[0]}) R_y = Matrix([[cos(q), 0, sin(q)], [0, 1, 0], [-sin(q), 0, cos(q)]]).evalf(subs={q: angles[1]}) R_z = Matrix([[cos(q), -sin(q), 0], [sin(q), cos(q), 0], [0, 0, 1]]).evalf(subs={q: angles[2]}) return R_z * R_y * R_x
22,136
def goodput_for_range(endpoint, first_packet, last_packet): """Computes the goodput (in bps) achieved between observing two specific packets""" if first_packet == last_packet or \ first_packet.timestamp_us == last_packet.timestamp_us: return 0 byte_count = 0 seen_first = False for packet in endpoint.packets: if packet == last_packet: break if packet == first_packet: seen_first = True if not seen_first: continue # Packet contributes to goodput if it was not retransmitted if not packet.is_lost(): byte_count += packet.data_len time_us = last_packet.timestamp_us - first_packet.timestamp_us return byte_count * 8 * 1E6 / time_us
22,137
def validate_scopes( required_scopes: Sequence[str], token_scopes: Sequence[str] ) -> bool: """Validates that all require scopes are present in the token scopes""" missing_scopes = set(required_scopes) - set(token_scopes) if missing_scopes: raise SecurityException(f"Missing required scopes: {missing_scopes}") return not missing_scopes
22,138
def make_term_structure(rates, dt_obs): """ rates is a dictionary-like structure with labels as keys and rates (decimal) as values. TODO: Make it more generic """ settlement_date = pydate_to_qldate(dt_obs) rate_helpers = [] for label in rates.keys(): r = rates[label] h = make_rate_helper(label, r, settlement_date) rate_helpers.append(h) ts_day_counter = ActualActual(ISDA) tolerance = 1.0e-15 ts = PiecewiseYieldCurve.from_reference_date( BootstrapTrait.Discount, Interpolator.LogLinear, settlement_date, rate_helpers, ts_day_counter, tolerance ) return ts
22,139
def mark_emails_as_sent(automatic_email, emails): """ Context manager to mark users who have the given emails as sent after successful sending of email Args: automatic_email (AutomaticEmail): An instance of AutomaticEmail emails (iterable): An iterable of emails Yields: queryset of user id: A queryset of user ids which represent users who haven't been sent emails yet """ user_ids = list(User.objects.filter(email__in=emails).values_list('id', flat=True)) # At any point the SentAutomaticEmail will be in three possible states: # it doesn't exist, status=PENDING, and status=SENT. They should only change state in that direction, ie # we don't delete SentAutomaticEmail anywhere or change status from SENT to pending. for user_id in user_ids: # If a SentAutomaticEmail doesn't exist, create it with status=PENDING. # No defaults because the default status is PENDING which is what we want SentAutomaticEmail.objects.get_or_create( user_id=user_id, automatic_email=automatic_email, ) with transaction.atomic(): # Now all SentAutomaticEmails are either PENDING or SENT. # If SENT it was already handled by a different thread, so filter on PENDING. sent_queryset = SentAutomaticEmail.objects.filter( user_id__in=user_ids, automatic_email=automatic_email, status=SentAutomaticEmail.PENDING, ) user_ids_left = list(sent_queryset.select_for_update().values_list('user_id', flat=True)) # We yield the list of user ids here to let the block know which emails have not yet been sent yield user_ids_left sent_queryset.update(status=SentAutomaticEmail.SENT)
22,140
def region(x: float, n0: int, n1: int, func='cdf', exhaustive=False): """Region of integration for bbprop_cdf or bbprop_test Parameters ---------- x : float difference of proportions n0, n1 : int number of trials for the two samples func : str function to determine region for. should be "cdf" or "test". exhaustive : bool if set to true, check value in exhaustive mode Yields ------- r0, r1 tuple of coordinates """ if func not in {'cdf', 'test'}: raise RuntimeError('invalid "func" option') if not exhaustive: if func == 'cdf': yield from ( (r0, r1) for r0 in range(n0 + 1) for r1 in ( range(floor(n1/n0 * (r0 + x * n0)) + 1) if r0 < x * n0 else range(ceil(n1/n0 * (r0 - x * n0)), n1 + 1) if r0 > (1 - x) * n0 else range( ceil(n1/n0 * (r0 - x * n0)), floor(n1/n0 * (r0 + x * n0)) + 1 ) ) ) elif func == 'test': yield from ( (r0, r1) for r0 in range(n0 + 1) for r1 in ( range(ceil(n1/n0 * (r0 + x * n0)), n1 + 1) if r0 < x * n0 else range(floor(n1/n0 * (r0 - x * n0)) + 1) if r0 > (1 - x) * n0 else chain( range(floor(n1/n0 * (r0 - x * n0)) + 1), range(ceil(n1/n0 * (r0 + x * n0)), n1 + 1) ) ) ) else: if func == 'cdf': yield from ( (r0, r1) for r0 in range(n0 + 1) for r1 in range(n1 + 1) if abs(r0*n1 - r1*n0) <= n1 * n0 * x ) elif func == 'test': yield from ( (r0, r1) for r0 in range(n0 + 1) for r1 in range(n1 + 1) if abs(r0*n1 - r1*n0) >= n1 * n0 * x )
22,141
def _validate_metadata(metadata_list): """ Make sure the metadata_list does not have data for the following core metadata elements. Exception is raised if any of the following elements is present in metadata_list: title - (endpoint has a title parameter which should be used for specifying resource title) subject (keyword) - (endpoint has a keywords parameter which should be used for specifying resource keywords) description (abstract)- (endpoint has a abstract parameter which should be used for specifying resource abstract) publisher - this element is created upon resource publication format - this element is created by the system based on the resource content files date - this element is created by the system type - this element is created by the system :param metadata_list: list of dicts each representing data for a specific metadata element :return: """ err_message = "Metadata validation failed. Metadata element '{}' was found in value passed " \ "for parameter 'metadata'. Though it's a valid element it can't be passed " \ "as part of 'metadata' parameter." for element in metadata_list: # here k is the name of the element # v is a dict of all element attributes/field names and field values k, v = list(element.items())[0] if k.lower() in ('title', 'subject', 'description', 'publisher', 'format', 'date', 'type'): err_message = err_message.format(k.lower()) raise ValidationError(detail=err_message)
22,142
def skip_after_postgres(*ver): """Skip a test on PostgreSQL after (including) a certain version.""" ver = ver + (0,) * (3 - len(ver)) def skip_after_postgres_(f): @wraps(f) def skip_after_postgres__(self): if self.conn.server_version >= int("%d%02d%02d" % ver): return self.skipTest("skipped because PostgreSQL %s" % self.conn.server_version) else: return f(self) return skip_after_postgres__ return skip_after_postgres_
22,143
def validateParameters(options): """ Who needs documentation TODO: Add some... """ #options.identity should be a valid file if os.path.isfile(options.identity): try: f = open(options.identity, "r") except IOError as err: print "Could not open the identity file %s for reading, exiting." % options.identity sys.exit(1) finally: f.close() else: print "Could not find the identity file %s, exiting." % options.identity sys.exit(1) #options.rport, options.lport, and options.port should be numeric if not options.rport.isdigit() or not options.lport.isdigit() or not options.port.isdigit(): print "rport:%s lport:%s port:%s" % (options.rport, options.lport, options.port) print "rport, lport, and port options must all be numbers, exiting." sys.exit(1) #options.host should be an IP or a hostname validIpAddressRegex = "^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$" validHostnameRegex = "^(([a-zA-Z]|[a-zA-Z][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z]|[A-Za-z][A-Za-z0-9\-]*[A-Za-z0-9])$" if not re.match(validIpAddressRegex, options.host) and not re.match(validHostnameRegex, options.host): print "Supplied host: %s" % options.host print "Host appears to not be a valid host, exiting." sys.exit(1) #If we made it this far, we can return True return True
22,144
def make_order_embeddings(max_word_length, order_arr): """ 根据笔顺表生成具有最大字长约束的笔顺embeddings :param max_word_length: :param order_arr: :return: """ order_arr = [ row + [0] * (max_word_length - len(row)) if len(row) <= max_word_length else row[:max_word_length - 1] + [row[-1]] for row in order_arr ] order_arr = np.array(order_arr) order_embeddings = tf.convert_to_tensor(order_arr) return order_embeddings
22,145
def test_repo_config_userpass() -> None: """Test using repo with username/password.""" password = "pa55word" # noqa: S105 repo = RepositoryConfiguration( name="pypi", base_url="https://private.repo.org/pypi", username="fred", password=password, ) assert repo.get_access_url() == f"https://fred:{password}@private.repo.org/pypi"
22,146
def test_too_new_solver_methods_raise_error(X_y_data, solver): """Test that highs solver raises for scipy<1.6.0.""" X, y = X_y_data with pytest.raises(ValueError, match="scipy>=1.6.0"): QuantileRegressor(solver=solver).fit(X, y)
22,147
def agg_double_list(l): """ @param l: @type l: @return: @rtype: """ # l: [ [...], [...], [...] ] # l_i: result of each step in the i-th episode s = [numpy.sum(numpy.array(l_i), 0) for l_i in l] s_mu = numpy.mean(numpy.array(s), 0) s_std = numpy.std(numpy.array(s), 0) return s_mu, s_std
22,148
def get_string_from_bytes(byte_data, encoding="ascii"): """Decodes a string from DAT file byte data. Note that in byte form these strings are 0 terminated and this 0 is removed Args: byte_data (bytes) : the binary data to convert to a string encoding (string) : optional, the encoding type to use when converting """ string_bytes = byte_data[0:(len(byte_data) - 1)] # strip off the 0 at the end of the string string = string_bytes.decode(encoding) return string
22,149
def default_component(): """Return a default component.""" return { 'host': '192.168.0.1', 'port': 8090, 'name': 'soundtouch' }
22,150
def check_series_duplicates(patches_dir, series_path=Path('series')): """ Checks if there are duplicate entries in the series file series_path is a pathlib.Path to the series file relative to the patches_dir returns True if there are duplicate entries; False otherwise. """ entries_seen = set() for entry in _read_series_file(patches_dir, series_path): if entry in entries_seen: get_logger().warning('Patch appears more than once in series: %s', entry) return True entries_seen.add(entry) return False
22,151
def is_free(board: list, pos: int) -> bool: """checks if pos is free or filled""" return board[pos] == " "
22,152
def rc_from_blocks(blocks): """ Computes the x and y dimensions of each block :param blocks: :return: """ dc = np.array([np.diff(b[:, 0]).max() for b in blocks]) dr = np.array([np.diff(b[:, 1]).max() for b in blocks]) return dc, dr
22,153
def _create_parser(): """ Creates argparser for SISPO which can be used for CLI and options """ parser = argparse.ArgumentParser(usage="%(prog)s [OPTION] ...", description=__file__.__doc__) parser.add_argument("-i", "--inputdir", action="store", default=None, type=str, help="Path to 'definition.json' file") parser.add_argument("-o", "--outputdir", action="store", default=None, type=str, help="Path to results directory") parser.add_argument("-n", "--name", action="store", default=None, type=str, help="Name of simulation scenario") parser.add_argument("--verbose", action="store_true", help="Verbose output, displays log also on STDOUT") parser.add_argument("--with-sim", action="store_true", dest="with_sim", help="If set, SISPO will simulate the scenario") parser.add_argument("--with-render", action="store_true", dest="with_render", help="If set, SISPO will render the scenario") parser.add_argument("--with-compression", action="store_true", dest="with_compression", help="If set, SISPO will compress images") parser.add_argument("--with-reconstruction", action="store_true", dest="with_reconstruction", help="If set, SISPO will attempt reconstruction.") parser.add_argument("--restart", action="store_true", help="Use cProfiler and write results to log.") parser.add_argument("--profile", action="store_true", help="Use cProfiler and write results to log.") parser.add_argument("-v", "--version", action="store_true", help="Prints version number.") parser.add_argument("--with-plugins", action="store_true", dest="with_plugins", help="Plugins that are run before rendering.") return parser
22,154
def serialize_measurement(measurement): """Serializes a `openff.evaluator.unit.Measurement` into a dictionary of the form `{'value', 'error'}`. Parameters ---------- measurement : openff.evaluator.unit.Measurement The measurement to serialize Returns ------- dict of str and str A dictionary representation of a openff.evaluator.unit.Measurement with keys of {"value", "error"} """ return {"value": measurement.value, "error": measurement.error}
22,155
def nikon_ev_bias(seq): """ http://tomtia.plala.jp/DigitalCamera/MakerNote/index.asp First digit seems to be in steps of 1/6 EV. Does the third value mean the step size? It is usually 6, but it is 12 for the ExposureDifference. Check for an error condition that could cause a crash. This only happens if something has gone really wrong in reading the Nikon MakerNote. """ if len(seq) < 4: return "" # if seq == [252, 1, 6, 0]: return "-2/3 EV" if seq == [253, 1, 6, 0]: return "-1/2 EV" if seq == [254, 1, 6, 0]: return "-1/3 EV" if seq == [0, 1, 6, 0]: return "0 EV" if seq == [2, 1, 6, 0]: return "+1/3 EV" if seq == [3, 1, 6, 0]: return "+1/2 EV" if seq == [4, 1, 6, 0]: return "+2/3 EV" # Handle combinations not in the table. a = seq[0] # Causes headaches for the +/- logic, so special case it. if a == 0: return "0 EV" if a > 127: a = 256 - a ret_str = "-" else: ret_str = "+" b = seq[2] # Assume third value means the step size whole = a / b a = a % b if whole != 0: ret_str = ret_str + str(whole) + " " if a == 0: ret_str += "EV" else: from exifpy.objects import Ratio r = Ratio(a, b) ret_str = ret_str + r.__repr__() + " EV" return ret_str
22,156
def process_message(schema, publisher, data): """ Method to process messsages for all the bases that uses Google's Pub/Sub. Args: schema (:obj:`dict`, required): A JSON schema for contract validation. JSON Schema is a vocabulary that allows you to annotate and validate JSON documents. publisher (:obj:`PubSub`, optional): Instance of the '.manager.PubSub'. data (:obj: `dict`, required): A dictionary representing the message body. """ try: data = json.loads(request.data) validate(data, schema, format_checker=FormatChecker()) publisher.publish(data) return data, 202 except ValidationError as validate_error: return str(validate_error), 400
22,157
def rename_record_columns(records, columns_to_rename): """ Renames columns for better desc and to match Socrata column names :param records: list - List of record dicts :param columns_to_rename: dict - Dict of Hasura columns and matching Socrata columns """ for record in records: for column, rename_value in columns_to_rename.items(): if column in record.keys(): record[rename_value] = record.pop(column) return records
22,158
def request_similar_resource(token, data_): """If a similar resource to the data_ passed exists, this method gets and returns it """ headers = {'Authorization': 'Token {}'.format(token.token)} # get the resource endpoint url_check_res = URL.DB_URL + 'getSimilarResource/' # only res code if shadow id not passed resource_code = data_['resource_accessing'].split('/')[1] url_check_res += '{}/'.format(resource_code) if "shadow_id" in data_: url_check_res += "{}/".format(data_['shadow_id']) req = requests.get(url=url_check_res, headers=headers) code_to_return = HTTPStatus.NOT_FOUND data_to_return = {"success": False} if req.status_code == HTTPStatus.OK: code_to_return = HTTPStatus.OK data_to_return = json.loads(req.text) return code_to_return, data_to_return
22,159
def retrieve_uniprot_data_for_acc_list_in_xlsx_file(excelfile_with_uniprot_accessions, input_uniprot_flatfile, selected_uniprot_records_flatfile, logging): """ From a list of uniprot accessions in excel, select out desired records from a large UniProt flatfile. Parameters ---------- excelfile_with_uniprot_accessions : str Path to excel input file. logging : logging.Logger Logger for printing to console and logfile. selected_uniprot_records_flatfile : str Path to output UniProt flatfile containing selected records for analysis. """ logging.info('~~~~~~~~~~~~ starting retrieve_uniprot_data_for_acc_list_in_xlsx_file ~~~~~~~~~~~~') # take list of acc, search in default uniprot flatfile. If missing, download from uniprot server. df_uniprot_accessions = pd.read_excel(excelfile_with_uniprot_accessions, sheetname='uniprot_numbers') # remove proteins that are marked as 'not included in analysis' df_uniprot_accessions = df_uniprot_accessions[df_uniprot_accessions['include_in_analysis'] == True] # accession_list = [line.strip() for line in open(input_accession_list, "r")] uniprot_index_handle = SeqIO.index(input_uniprot_flatfile, "swiss") with open(selected_uniprot_records_flatfile, "wb") as output: for uniprot_accession in df_uniprot_accessions['uniprot_acc']: try: # this adds the selected records to the file, but adds a new line after each line! # Doesn't affect conversion to SeqRecord object) assert isinstance(uniprot_index_handle, object) output.write(uniprot_index_handle.get_raw(uniprot_accession)) except KeyError: sys.stdout.write("No SwissProt record found in %s for %s." % (input_uniprot_flatfile, uniprot_accession))
22,160
def total (initial, *positionals, **keywords): """ Simply sums up all the passed numbers. """ count = initial for n in positionals: count += n for n in keywords: count += keywords[n] return count
22,161
def login_required(f): """Ensures user is logged in before action Checks of token is provided in header decodes the token then returns current user info """ @wraps(f) def wrap(*args, **kwargs): token = None if 'x-access-token' in request.headers: token = request.headers['x-access-token'] if not token: return jsonify({ 'warning': 'Missing token. Please register or login' }), 401 is_token_valid = versions.v2.models.AuthToken.query.filter_by(token=token).first() is_token_valid = is_token_valid.valid if is_token_valid else True if not is_token_valid: return jsonify({ 'warning': 'Login again'}), 401 try: data = jwt.decode(token, app.config['SECRET_KEY']) current_user = data['id'] except jwt.ExpiredSignatureError: return jsonify({ 'warning': 'Expired token. Please login to get a new token' }), 401 except ValueError: return jsonify({ 'warning': 'Invalid token. Please register or login' }), 401 return f(current_user, *args, **kwargs) return wrap
22,162
def loop_to_unixtime(looptime, timediff=None): """Convert event loop time to standard Unix time.""" if timediff is None: timediff = _get_timediff() return looptime + timediff
22,163
def groups(column: str) -> "pli.Expr": """ Syntactic sugar for `pl.col("foo").agg_groups()`. """ return col(column).agg_groups()
22,164
def create_app(testing=False, cli=False) -> Flask: """Application factory, used to create application """ from authentek.internal import app app.config.from_object(os.getenv('APP_SETTINGS', 'authentek.server.config.DevelopmentConfig')) if testing is True: app.config["TESTING"] = True app = configure_extensions(app, cli) register_blueprints(app) return app
22,165
def validation_all(system='AuAu200'): """ Emulator validation: normalized residuals and RMS error for each observable. """ fig, (ax_box, ax_rms) = plt.subplots( nrows=2, figsize=figsize(1.25, aspect=.4), gridspec_kw=dict(height_ratios=[1.5, 1]) ) index = 1 ticks = [] ticklabels = [] vdata = model.validation_data[system] emu = emulators[system] mean, cov = emu.predict( Design(system, validation=True).array, return_cov=True ) for obs, subobslist in emu.observables: for subobs in subobslist: color = obs_color(obs, subobs) Y = vdata[obs][subobs]['Y'] Y_ = mean[obs][subobs] S_ = np.sqrt(cov[(obs, subobs), (obs, subobs)].T.diagonal()) Z = (Y_ - Y)/S_ for i, percentiles in enumerate( np.percentile(Z, [10, 25, 50, 75, 90], axis=0).T, start=index ): boxplot(ax_box, percentiles, x=i, box_width=.8, color=color) rms = 100*np.sqrt(np.square(Y_/Y - 1).mean(axis=0)) ax_rms.plot( np.arange(index, index + rms.size), rms, 'o', color=color ) ticks.append(.5*(index + i)) ticklabels.append(obs_label(obs, subobs)) index = i + 2 ax_box.set_xticks(ticks) ax_box.set_xticklabels(ticklabels) ax_box.tick_params('x', bottom=False, labelsize=plt.rcParams['font.size']) ax_box.set_ylim(-2.25, 2.25) ax_box.set_ylabel(r'Normalized residuals') q, p = np.sqrt(2) * special.erfinv(2*np.array([.75, .90]) - 1) ax_box.axhspan(-q, q, color='.85', zorder=-20) for s in [-1, 0, 1]: ax_box.axhline(s*p, color='.5', zorder=-10) ax_q = ax_box.twinx() ax_q.set_ylim(ax_box.get_ylim()) ax_q.set_yticks([-p, -q, 0, q, p]) ax_q.set_yticklabels([10, 25, 50, 75, 90]) ax_q.tick_params('y', right=False) ax_q.set_ylabel( 'Normal quantiles', fontdict=dict(rotation=-90), labelpad=4*plt.rcParams['axes.labelpad'] ) ax_rms.set_xticks([]) ax_rms.set_yticks(np.arange(0, 16, 5)) ax_rms.set_ylim(0, 15) ax_rms.set_ylabel('RMS % error') for y in ax_rms.get_yticks(): ax_rms.axhline(y, color='.5', zorder=-10) for ax in fig.axes: ax.set_xlim(0, index - 1) ax.spines['bottom'].set_visible(False)
22,166
def filter_roidb(roidb, config): """ remove roidb entries without usable rois """ def is_valid(entry): """ valid images have at least 1 fg or bg roi """ overlaps = entry['max_overlaps'] fg_inds = np.where(overlaps >= config.TRAIN.FG_THRESH)[0] bg_inds = np.where((overlaps < config.TRAIN.BG_THRESH_HI) & (overlaps >= config.TRAIN.BG_THRESH_LO + 0.0001))[0] valid = len(fg_inds) > 0 or len(bg_inds) > 0 return valid num = len(roidb) filtered_roidb = [entry for entry in roidb if is_valid(entry)] num_after = len(filtered_roidb) print 'filtered %d roidb entries: %d -> %d' % (num - num_after, num, num_after) return filtered_roidb
22,167
def get_res(url): """ 使用requests获取结果 :param url: :return: """ try: requests.adapters.DEFAULT_RETRIES = 5 res = requests.get(url) time.sleep(random.randint(0, 3)) if res.status_code == 200: return res return None except Exception, e: time.sleep(20) log.debug(str(e) + ' error') return None
22,168
def seebeck_thermometry(T_Kelvin): """ This function returns the Seebeck coefficient of the thermocouple concerned (by default type "E") at a certain temperature. The input of the function is a temperature in Kelvin, but the coefficient below are for a polynomial function with T in Celsius. The output is S in [V / K] """ coeff_E_below_270K = np.array([ 0, 5.8665508708E1, 4.5410977124E-2, -7.7998048686E-4, -2.5800160843E-5, -5.9452583057E-7, -9.3214058667E-9, -1.0287605534E-10, -8.0370123621E-13, -4.3979497391E-15, -1.6414776355E-17, -3.9673619516E-20, -5.5827328721E-23, -3.4657842013E-26 ])[::-1] # Reverse for poly1d coeff_E_above_270K = np.array([ 0, 5.8665508710E1, 4.5032275582E-2, 2.8908407212E-5, -3.3056896652E-7, 6.5024403270E-10, -1.9197495504E-13, -1.2536600497E-15, 2.1489217569E-18, -1.4388041782E-21, 3.5960899481E-25 ])[::-1] # Reverse for poly1d T_Celsius = T_Kelvin - 273.15 ## Selection of coefficients for temperature regime index_below = np.where(T_Celsius <= 0) index_above = np.where(T_Celsius > 0) S_values = np.zeros(np.size(T_Kelvin)) E_below = np.poly1d(coeff_E_below_270K) # is a poly1d object in microVolt S_below = np.polyder(E_below) # is a poly1d object in microVolt / Celsius S_values[index_below] = S_below(T_Celsius[index_below])*1e-6 # is in Volt / K E_above = np.poly1d(coeff_E_above_270K) # is a poly1d object in microVolt S_above = np.polyder(E_above) # is a poly1d object in microVolt / Celsius S_values[index_above] = S_above(T_Celsius[index_above])*1e-6 # is in Volt / K return S_values
22,169
def correlation_permutation_test( x, y, f, side, n=10000, confidence=0.99, plot=None, cores=1, seed=None ): """This function carries out Monte Carlo permutation tests comparing whether the correlation between two variables is statistically significant :param x: An iterable of X values observed :param y: An iterable of Y values observed :param f: The function for calculating the relationship strength between X and Y :param side: The side to use for hypothesis testing :param n: The number of permutations to sample, defaults to 10000 :type n: int, optional :param confidence: The probability that the true p-value is contained in the intervals returned, defaults to 0.99 :type confidence: float, optional :param plot: The name of a file to draw a plot of permuted correlations to, defaults to None :type plot: str, optional :param cores: The number of logical CPUs to use, defaults to 1 :type cores: int, optional :param seed: The seed for randomisation, defaults to None :type seed: int, optional :return: Named tuple containing upper and lower bounds of p-value at the given confidence """ if seed: rng = _rd.Random(seed) else: rng = _rd.Random() if callable(f): _f = f elif f == "pearsonr": _f = _pearsonr elif f == "spearmanr": _f = _spearmanr else: raise ValueError( "{} not valid for f -- must be a function, 'pearsonr', or 'spearmanr'".format( f ) ) _x = list(x) _y = list(y) if side in _GT: stat_0 = _f(_x, _y) elif side in _LT: stat_0 = _f(_x, _y) elif side in _BOTH: stat_0 = abs(_f(_x, _y)) else: raise ValueError( "{} not valid for side -- should be 'greater', 'lower', or 'both'".format( side ) ) jobs = ((_x[:], _y[:], stat_0, _f, rng.randint(0, 1e100)) for _ in range(n)) if side in _GT: result = _job_hander(_correlation_greater, jobs, cores) elif side in _LT: result = _job_hander(_correlation_lower, jobs, cores) else: result = _job_hander(_correlation_both, jobs, cores) v = [] p = 0 for truth, val in result: p += truth v.append(val) p /= n if plot: plot_histogram(x=v, x0=stat_0, outfile=plot, side=side) lower, upper = wilson(p, n, confidence) return _RESULT(lower, upper, confidence)
22,170
def value_iteration(model, maxiter=100): """ Solves the supplied environment with value iteration. Parameters ---------- model : python object Holds information about the environment to solve such as the reward structure and the transition dynamics. maxiter : int The maximum number of iterations to perform. Return ------ val_ : numpy array of shape (N, 1) Value function of the environment where N is the number of states in the environment. pi : numpy array of shape (N, 1) Optimal policy of the environment. """ # initialize the value function and policy pi = np.ones((model.num_states, 1)) val_ = np.zeros((model.num_states, 1)) for i in range(maxiter): # initialize delta delta = 0 # perform Bellman update for each state for state in range(model.num_states): # store old value tmp = val_[state].copy() # compute the value function val_[state] = np.max( np.sum((model.R[state] + model.gamma * val_) * model.P[state,:,:], 0) ) # find maximum change in value delta = np.max( (delta, np.abs(tmp - val_[state])) ) # stopping criteria if delta <= EPS * (1 - model.gamma) / model.gamma: print("Value iteration converged after %d iterations." % i) break # compute the policy for state in range(model.num_states): pi[state] = np.argmax(np.sum(val_ * model.P[state,:,:],0)) return val_, pi
22,171
def node(function: Callable): """A decorator that registers a function to execute when a node runs""" sig = inspect.signature(function) args = [] for (name, param) in sig.parameters.items(): value = param.default if value is inspect.Parameter.empty: raise TypeError(f"{name} must have a type (e.g. {name}=InputTable)") if inspect.isclass(value) and issubclass(value, _NodeInterfaceEntry): if value.__class__ in (type, abc.ABCMeta): value = value() # noinspection PyCallingNonCallable args.append(value(name)) else: raise TypeError(f"{name} is not a valid node parameter type") return NodeFunction(function, args)
22,172
def tick_bars_test(dataframe): """ This validates that the tick_bar() function works """ list0 = standard_bars.tick_bars(dataframe, 'close', 33) if len(list0) > 0: print('tick_bars_test() pass') else: print('error with tick_vars_test()')
22,173
def animate_edge_vect(data, param, res, fig, ax, k, curvature=False): """Animate an image of the contour as generated by show_plots.show_edge_scatter(). Parameters ---------- data : data object param : param object res : result object fig : matplotlib figure ax : matplotlib axis k : int frame curvature : bool, optional use curvature for coloring, by default False """ ax.set_title(f'Frame {k-1} to {k}') image = data.load_frame_morpho(k) for a in ax.get_children(): if isinstance(a, FancyArrow): a.remove() if curvature: f = spline_curvature(res.spline[k], np.linspace(0, 1, param.n_curve + 1)) else: f = res.displacement[:, k] ax.get_images()[0].set_data(image) ax.lines.pop(0) ax.lines.pop(0) fig, ax = show_edge_scatter( param.n_curve, res.spline[k - 1], # res.spline[k], res.spline[k], # res.spline[k + 1], res.param0[k], res.param[k], f, fig_ax=(fig, ax), )
22,174
def test_game(): """Test function to validate functionality of TicTacToe class. Plays a game of Tic-Tac-Toe by randomly selecting moves for both players. """ ttt = TicTacToe() ttt.print_board() while not ttt.done: legal_next_states = ttt.get_legal_next_states(ttt.state) move_idx = np.random.randint(0,len(legal_next_states)) ttt.step(legal_next_states[move_idx]) ttt.print_board()
22,175
def _change_agent_position(state: State): """changes agent position""" state.agent.position = dataclasses.replace( state.agent.position, y=(state.agent.position.y + 1) % state.grid.shape.height, x=(state.agent.position.x + 1) % state.grid.shape.width, )
22,176
def mg_refractive(m, mix): """Maxwell-Garnett EMA for the refractive index. Args: m: Tuple of the complex refractive indices of the media. mix: Tuple of the volume fractions of the media, len(mix)==len(m) (if sum(mix)!=1, these are taken relative to sum(mix)) Returns: The Maxwell-Garnett approximation for the complex refractive index of the effective medium If len(m)==2, the first element is taken as the matrix and the second as the inclusion. If len(m)>2, the media are mixed recursively so that the last element is used as the inclusion and the second to last as the matrix, then this mixture is used as the last element on the next iteration, and so on. """ if len(m) == 2: cF = float(mix[1]) / (mix[0]+mix[1]) * \ (m[1]**2-m[0]**2) / (m[1]**2+2*m[0]**2) er = m[0]**2 * (1.0+2.0*cF) / (1.0-cF) m = np.sqrt(er) else: m_last = mg_refractive(m[-2:], mix[-2:]) mix_last = mix[-2] + mix[-1] m = mg_refractive(m[:-2] + (m_last,), mix[:-2] + (mix_last,)) return m
22,177
def locateObjLocation(data, questionDict, questionIdict): """ Locate the object of where questions. Very naive heuristic: take the noun immediately after "where". """ where = questionDict['where'] for t in range(data.shape[0] - 1): if data[t, 0] == where: for u in range(t + 1, data.shape[0]): word = questionIdict[data[u, 0] - 1] lexname = lookupLexname(word) if (lexname is not None and \ lexname.startswith('noun')) or \ (lexname is None): return data[u, 0] print 'not found' return data[-1, 0]
22,178
def main(config, model, stid, forecast_date): """ Produce a Forecast object from bufkit data. """ # Get parameters from the config try: bufr = config['BUFKIT']['BUFR'] except KeyError: raise KeyError('bufkit: missing BUFR executable path in config BUFKIT options') try: bufkit_directory = config['BUFKIT']['BUFKIT_directory'] except KeyError: bufkit_directory = '%s/site_data' % config['THETAE_ROOT'] if config['debug'] > 50: print('bufkit warning: setting bufkit file directory to %s' % bufkit_directory) try: run_time = config['Models'][model]['run_time'] except KeyError: raise KeyError('bufkit: no run_time parameter defined for model %s in config!' % model) try: bufr_name = config['Models'][model]['bufr_name'] except KeyError: raise KeyError('bufkit: no bufr_name parameter defined for model %s in config!' % model) if 'bufr_stid' in config['Stations'][stid]: bufr_stid = config['Stations'][stid]['bufr_stid'] else: bufr_stid = str(stid) # Delete yesterday's bufkit files try: if not(config['BUFKIT']['archive']): bufr_delete_yesterday(bufkit_directory, bufr_stid, forecast_date - timedelta(days=1)) except KeyError: bufr_delete_yesterday(bufkit_directory, bufr_stid, forecast_date - timedelta(days=1)) # Get bufkit forecasts forecast = get_bufkit_forecast(config, bufr, bufkit_directory, model, bufr_name, run_time, bufr_stid, forecast_date) forecast.set_stid(str(stid)) return forecast
22,179
def mkshex(shapes: Union[CSVShape, List[CSVShape]]) -> Schema: """Convert list of csv2shape Shapes to ShExJSG Schema object.""" # pylint: disable=invalid-name # One- and two-letter variable names do not conform to snake-case naming style if isinstance(shapes, CSVShape): shapes = [shapes] schema_shexjsg = Schema() for s in shapes: shape_id = IRIREF(s.shapeID) if s.start: if schema_shexjsg.start: print(f"Multiple start shapes: <{schema_shexjsg.start}>, <{shape_id}>") else: schema_shexjsg.start = shape_id shape = Shape(id=shape_id) for csv_tc in s.tc_list: add_triple_constraint(shape, csv_tc) if not schema_shexjsg.shapes: schema_shexjsg.shapes = [shape] else: schema_shexjsg.shapes.append(shape) return schema_shexjsg
22,180
def get_unsigned_js_val(abs_val: int, max_unit: int, abs_limit: int) -> int: """Get unsigned remaped joystick value in reverse range (For example if the limit is 2000, and the input valueis also 2000, the value returned will be 1. And with the same limit, if the input value is 1, the output value wwill be 2000. The same applies to the values in between). This evenly devides the value so that the maximum js range is remapped to a value in the range of the specified limit. abs_val - The current joystick value max_unit - The maximum value to remap the joystick value abs_limit - The maximum range of the joystick """ inc = abs_limit / max_unit # ignoring signs to keep results positive if abs_val > 0: abs_val *= -1 val = int((abs_val / inc) + max_unit) # if the value is zero, return 1 (maximum range) if val == 0: val = 1 return val
22,181
def to_xyz(struct, extended_xyz: bool = True, print_stds: bool = False, print_forces: bool = False, print_max_stds: bool = False, print_energies: bool = False, predict_energy=None, dft_forces=None, dft_energy=None, timestep=-1, write_file: str = '', append: bool = False, labels=None) -> str: """ Function taken from the FLARE python package by Vandermause et al. at: https://github.com/mir-group/flare Reference: Vandermause, J., Torrisi, S. B., Batzner, S., Xie, Y., Sun, L., Kolpak, A. M. & Kozinsky, B. On-the-fly active learning of interpretable Bayesian force fields for atomistic rare events. npj Comput Mater 6, 20 (2020). https://doi.org/10.1038/s41524-020-0283-z Convenience function which turns a structure into an extended .xyz file; useful for further input into visualization programs like VESTA or Ovito. Can be saved to an output file via write_file. :param print_stds: Print the stds associated with the structure. :param print_forces: :param extended_xyz: :param print_max_stds: :param write_file: :return: """ species_list = [Z_to_element(x) for x in struct.coded_species] xyz_str = '' xyz_str += f'{len(struct.coded_species)} \n' # Add header line with info about lattice and properties if extended # xyz option is called. if extended_xyz: cell = struct.cell xyz_str += f'Lattice="{cell[0,0]} {cell[0,1]} {cell[0,2]}' xyz_str += f' {cell[1,0]} {cell[1,1]} {cell[1,2]}' xyz_str += f' {cell[2,0]} {cell[2,1]} {cell[2,2]}"' if timestep > 0: xyz_str += f' Timestep={timestep}' if predict_energy: xyz_str += f' PE={predict_energy}' if dft_energy is not None: xyz_str += f' DFT_PE={dft_energy}' xyz_str += ' Properties=species:S:1:pos:R:3' if print_stds: xyz_str += ':stds:R:3' stds = struct.stds if print_forces: xyz_str += ':forces:R:3' forces = struct.forces if print_max_stds: xyz_str += ':max_std:R:1' stds = struct.stds if labels: xyz_str += ':tags:R:1' clustering_labels = struct.local_energy_stds if print_energies: if struct.local_energies is None: print_energies = False else: xyz_str += ':local_energy:R:1' local_energies = struct.local_energies if dft_forces is not None: xyz_str += ':dft_forces:R:3' xyz_str += '\n' else: xyz_str += '\n' for i, pos in enumerate(struct.positions): # Write positions xyz_str += f"{species_list[i]} {pos[0]} {pos[1]} {pos[2]}" # If extended XYZ: Add in extra information if print_stds and extended_xyz: xyz_str += f" {stds[i,0]} {stds[i,1]} {stds[i,2]}" if print_forces and extended_xyz: xyz_str += f" {forces[i,0]} {forces[i,1]} {forces[i,2]}" if print_energies and extended_xyz: xyz_str += f" {local_energies[i]}" if print_max_stds and extended_xyz: xyz_str += f" {np.max(stds[i,:])} " if labels and extended_xyz: xyz_str += f" {clustering_labels[i]} " if dft_forces is not None: xyz_str += f' {dft_forces[i, 0]} {dft_forces[i,1]} ' \ f'{dft_forces[i, 2]}' if i < (len(struct.positions) - 1): xyz_str += '\n' # Write to file, optionally if write_file: if append: fmt = 'a' else: fmt = 'w' with open(write_file, fmt) as f: f.write(xyz_str) f.write("\n") return xyz_str
22,182
def load_file(filename: str): """Load the .xls file and return as a dataframe object.""" df = pd.read_csv(filename, delimiter='\t') return df
22,183
def loadRegexList(regexListFile): """Returns regexList, registries, internetSources""" regexList = [] registries = set() internetSourceTypes = set() libLF.log('Loading regexes from {}'.format(regexListFile)) with open(regexListFile, 'r') as inStream: for line in inStream: line = line.strip() if len(line) == 0: continue try: # Build the Regex regex = libLF.Regex() regex.initFromNDJSON(line) regexList.append(regex) registries = registries.union(regex.registriesUsedIn()) internetSourceTypes = internetSourceTypes.union(regex.internetSourcesAppearedIn()) except KeyboardInterrupt: raise except BaseException as err: libLF.log('Exception parsing line:\n {}\n {}'.format(line, err)) libLF.log('Loaded {} Regex\'es'.format(len(regexList))) return regexList, list(registries), list(internetSourceTypes)
22,184
def _decode_and_center_crop(image_bytes, image_size, resize_method=None): """Crops to center of image with padding then scales image_size.""" shape = tf.shape(image_bytes) image_height = shape[0] image_width = shape[1] padded_center_crop_size = tf.cast( ((image_size / (image_size + CROP_PADDING)) * tf.cast(tf.minimum(image_height, image_width), tf.float32)), tf.int32) offset_height = ((image_height - padded_center_crop_size) + 1) // 2 offset_width = ((image_width - padded_center_crop_size) + 1) // 2 image = tf.image.crop_to_bounding_box(image_bytes, offset_height, offset_width, padded_center_crop_size, padded_center_crop_size) image = _resize_image(image, image_size, resize_method) return image
22,185
def match_beacons_translate_only( sensor_a_beacons: typing.Set[typing.Tuple[int, int, int]], sensor_b_beacons: numpy.ndarray, min_matching: int, ) -> typing.Optional[numpy.ndarray]: """ Search for matching beacons between `sensor_a_beacons` and `sensor_b_beacons`, assuming their orientation matches. Returns either the offset of sensor_b relative to sensor_a, or None if no 12 matching beacons were found. """ # naive approach: full search for beacon_a in sensor_a_beacons: for beacon_b_num in range(sensor_b_beacons.shape[0]): # assume sensor_a_beacons[beacon_a_num] is the same beacon as # sensor_b_beacons[beacon_b_num] sensor_b_relative_to_sensor_a = beacon_a - sensor_b_beacons[beacon_b_num] sensor_b_beacons_relative_to_sensor_a = sensor_b_beacons + sensor_b_relative_to_sensor_a m = num_matching_beacons(sensor_a_beacons, sensor_b_beacons_relative_to_sensor_a) if m >= min_matching: return sensor_b_relative_to_sensor_a return None
22,186
def txm_log(): """ Return the logger. """ return __log__
22,187
def remove_list_by_name(listslist, name): """ Finds a list in a lists of lists by it's name, removes and returns it. :param listslist: A list of Twitter lists. :param name: The name of the list to be found. :return: The list with the name, if it was found. None otherwise. """ for i in range(len(listslist)): if listslist[i].name == name: return listslist.pop(i)
22,188
def song_clicks_metric(ranking): """ Spotify p :param ranking: :return: """ if 1 in ranking: first_idx = ranking.index(1) return math.floor(first_idx / 10) return 51 @staticmethod def print_subtest_results(sub_test_names, metric_names, results): (num_subtest, num_metrics) = results.shape print('{0: <15}'.format("Subtest"),"\t", end="") for i in range(num_metrics): print(metric_names[i], "\t", end="") print() for st in range(num_subtest): print('{0: <15}'.format(sub_test_names[st]), "\t", end="") for m in range(num_metrics): print(np.round(results[st][m],decimals=3), "\t", end="") print() @staticmethod def print_overall_results(metric_names, results): print('{0: <15}'.format(""),"\t", end="") for i in range(len(metric_names)): print(metric_names[i], "\t", end="") print() print('{0: <15}'.format("Overall"),"\t", end="") for m in range(len(metric_names)): print(np.round(results[m],decimals=3), "\t", end="") print()
22,189
def merge_reports(master: dict, report: dict): """ Merge classification reports into a master list """ keys = master.keys() ret = copy.deepcopy(master) for key in keys: scores = report[key] for score, value in scores.items(): ret[key][score] += [value] return ret
22,190
def compatible_tags( python_version: Optional[PythonVersion] = None, interpreter: Optional[str] = None, platforms: Optional[Iterable[str]] = None, ) -> Iterator[Tag]: """ Yields the sequence of tags that are compatible with a specific version of Python. The tags consist of: - py*-none-<platform> - <interpreter>-none-any # ... if `interpreter` is provided. - py*-none-any """ if not python_version: python_version = sys.version_info[:2] platforms = list(platforms or _platform_tags()) for version in _py_interpreter_range(python_version): for platform_ in platforms: yield Tag(version, "none", platform_) if interpreter: yield Tag(interpreter, "none", "any") for version in _py_interpreter_range(python_version): yield Tag(version, "none", "any")
22,191
def parse_xeasy_peaks(peak_file): """ Parse Xeasy3D peakfile to Peaks object Xeasy file format stores a column labled 'unused' to indicate rather the peak has been used in a structure calculation procedure (0 or 1). This column is, however, not assigned automatically and may not be set at all by the user. :param peak_file: Xeasy3D peak file path :type peak_file: string """ assert os.path.exists(peak_file), 'Xeasy3D peakfile {0} does not exist.'.format(peak_file) logging.debug('Parsing Xeasy3D peakfile {0}'.format(peak_file)) peaks = Peaks() with open(peak_file) as peakfile: for line in peakfile.readlines(): if not line.startswith('#') and len(line): line = line.strip().split() if len(line) > 10: peak = { 'id':int(line[0]), 'w1':float(line[1]), 'w2':float(line[2]), 'w3':float(line[3]), 'spec_type':line[5], 'vol':float(line[6]), 'vol_err':float(line[7]), 'intm':line[8], 'unused':int(line[9]), 'ass1':int(line[10]), 'ass2':int(line[11]), 'ass3':int(line[12]) } peaks.add(peak) return peaks
22,192
def detect_objects(): """ Detect objects in frame(from sharred array) and return target information """ detector = yolo() detect_fps = FPS() shm_to_yolo = shared_memory.SharedMemory(name='img_to_yolo') shared_img_to_yolo = np.ndarray((720, 960, 3), dtype=np.uint8, buffer=shm_to_yolo.buf) # existing_shm_detect = shared_memory.SharedMemory(name='detection') # shared_detection_array = np.ndarray((4,), dtype=np.int32, buffer=existing_shm_detect.buf) existing_shm_detect_all = shared_memory.SharedMemory(name='detection_all') shared_detection_array_all = np.ndarray((6,4), dtype=np.int32, buffer=existing_shm_detect_all.buf) shm_from_yolo = shared_memory.SharedMemory(name='img_from_yolo') shared_img_from_yolo = np.ndarray((720, 960, 3), dtype=np.uint8, buffer=shm_from_yolo.buf) while True: frame, all_array = detector.detect(shared_img_to_yolo, show_img=False) #shared_detection_array[:] = [target_x, target_y, target_width, target_height] shared_img_from_yolo[:] = frame[:] shared_detection_array_all[:] = all_array[:] detect_fps.update() #print(f'detect_fps: {detect_fps.get()}')
22,193
def main(): """Session manager""" __remove_temp_session() # **** Collect command line options **** # Note regarding Options: # It's important to collect options before monkey patching sys.exit, # otherwise, optparse won't be able to exit if --help option is passed options, args = get_options() if set_attached_console_visible is not None: set_attached_console_visible(DEBUG or options.show_console\ or options.reset_session\ or options.reset_to_defaults\ or options.optimize) app = initialize() if options.reset_session: # <!> Remove all configuration files! reset_session() # CONF.reset_to_defaults(save=True) return elif options.reset_to_defaults: # Reset Spyder settings to defaults CONF.reset_to_defaults(save=True) return elif options.optimize: # Optimize the whole Spyder's source code directory import spyderlib programs.run_python_script(module="compileall", args=[spyderlib.__path__[0]], p_args=['-O']) return if CONF.get('main', 'crash', False): CONF.set('main', 'crash', False) SPLASH.hide() QMessageBox.information(None, "Spyder", "Spyder crashed during last session.<br><br>" "If Spyder does not start at all and <u>before submitting a " "bug report</u>, please try to reset settings to defaults by " "running Spyder with the command line option '--reset':<br>" "<span style=\'color: #555555\'><b>python spyder --reset" "</b></span><br><br>" "<span style=\'color: #ff5555\'><b>Warning:</b></span> " "this command will remove all your Spyder configuration files " "located in '%s').<br><br>" "If restoring the default settings does not help, please take " "the time to search for <a href=\"%s\">known bugs</a> or " "<a href=\"%s\">discussions</a> matching your situation before " "eventually creating a new issue <a href=\"%s\">here</a>. " "Your feedback will always be greatly appreciated." "" % (get_conf_path(), __project_url__, __forum_url__, __project_url__)) next_session_name = options.startup_session while is_text_string(next_session_name): if next_session_name: error_message = load_session(next_session_name) if next_session_name == TEMP_SESSION_PATH: __remove_temp_session() if error_message is None: CONF.load_from_ini() else: print(error_message) QMessageBox.critical(None, "Load session", u("<b>Unable to load '%s'</b><br><br>Error message:<br>%s") % (osp.basename(next_session_name), error_message)) mainwindow = None try: mainwindow = run_spyder(app, options, args) except BaseException: CONF.set('main', 'crash', True) import traceback traceback.print_exc(file=STDERR) traceback.print_exc(file=open('spyder_crash.log', 'w')) if mainwindow is None: # An exception occured SPLASH.hide() return next_session_name = mainwindow.next_session_name save_session_name = mainwindow.save_session_name if next_session_name is not None: #-- Loading session # Saving current session in a temporary file # but only if we are not currently trying to reopen it! if next_session_name != TEMP_SESSION_PATH: save_session_name = TEMP_SESSION_PATH if save_session_name: #-- Saving session error_message = save_session(save_session_name) if error_message is not None: QMessageBox.critical(None, "Save session", u("<b>Unable to save '%s'</b><br><br>Error message:<br>%s") % (osp.basename(save_session_name), error_message)) ORIGINAL_SYS_EXIT()
22,194
def ingestion_before_request(): """ Custom checks for login requirements """ pass
22,195
def multi_layer_images(): """ Returns complex images (with sizes) for push and pull testing. """ # Note: order is from base layer down to leaf. layer1_bytes = layer_bytes_for_contents( "layer 1 contents", mode="", other_files={"file1": "from-layer-1",} ) layer2_bytes = layer_bytes_for_contents( "layer 2 contents", mode="", other_files={"file2": "from-layer-2",} ) layer3_bytes = layer_bytes_for_contents( "layer 3 contents", mode="", other_files={"file1": "from-layer-3", "file3": "from-layer-3",} ) layer4_bytes = layer_bytes_for_contents( "layer 4 contents", mode="", other_files={"file3": "from-layer-4",} ) layer5_bytes = layer_bytes_for_contents( "layer 5 contents", mode="", other_files={"file4": "from-layer-5",} ) return [ Image( id="layer1", bytes=layer1_bytes, parent_id=None, size=len(layer1_bytes), config={"internal_id": "layer1"}, ), Image( id="layer2", bytes=layer2_bytes, parent_id="layer1", size=len(layer2_bytes), config={"internal_id": "layer2"}, ), Image( id="layer3", bytes=layer3_bytes, parent_id="layer2", size=len(layer3_bytes), config={"internal_id": "layer3"}, ), Image( id="layer4", bytes=layer4_bytes, parent_id="layer3", size=len(layer4_bytes), config={"internal_id": "layer4"}, ), Image( id="someid", bytes=layer5_bytes, parent_id="layer4", size=len(layer5_bytes), config={"internal_id": "layer5"}, ), ]
22,196
def display_all(eventfile,diag_var,lc_t,lc_counts,diag_t,diag_counts,filetype): """ To display the plots for desired time interval. Whether to save or show the plots is determined in Lv3_diagnostics. eventfile - path to the event file. Will extract ObsID from this for the NICER files. diag_var - the diagnostic variable we are looking at lc_t - array corresponding to time values for the light curve lc_counts - array corresponding to counts for the light curve diag_t - array corresponding to times for the diagnostic variable diag_counts - array corresponding to counts for the diagnostic variable filetype = '.att', '.mkf', '.cl' or ['.hk',mpuno] """ if type(diag_var) != str: raise TypeError("diag_var should be a string!") if filetype not in ['.att','.mkf','.cl'] and type(filetype) != list and type(filetype) != np.ndarray: raise ValueError("filetype should be one of '.att','.mkf','.hk','.eventcl'! Or filetype = ['.hk',mpuno]") event_header = fits.open(eventfile)[1].header obj_name = event_header['OBJECT'] obsid = event_header['OBS_ID'] fig, (ax1,ax2) = plt.subplots(2,1,figsize=(10,8)) if filetype == '.att' or filetype == '.mkf' or filetype == '.cl': fig.suptitle('Diagnostic plots for ' + obj_name + ', ObsID ' + str(obsid) + '\n Comparing binned light curve and ' + diag_var + ' from ' + filetype + '\n for whole time interval and energy range', fontsize=12) elif len(filetype) == 2: fig.suptitle('Diagnostic plots for ' + obj_name + ', ObsID ' + str(obsid) + '\n MPU='+filetype[1] + ' - Comparing binned light curve and ' + diag_var + ' from ' + filetype[0] + '\n for whole time interval and energy range', fontsize=12) ax1.plot(lc_t[:-1],lc_counts,'b') ax1.set_xlabel('Time (s)',fontsize=12) ax1.set_ylabel('Counts',fontsize=12) ax2.plot(diag_t,diag_counts,'rx-') ax2.set_xlabel('Time (s)',fontsize=12) ax2.set_ylabel(diag_var) plt.subplots_adjust(hspace=0.2)
22,197
def user_teams(config: Config, email: str) -> Iterable[Team]: """Return the teams a user member is expected to be a member of. Only the teams in which the user is a direct member are return. The ancestors of these teams are not returned. """ names = config.by_member.get(email) if not names: return [] return (_get_team_exists(config, x) for x in names)
22,198
def se3_transform(g, a, normals=None): """ Applies the SE3 transform Args: g: SE3 transformation matrix of size ([1,] 3/4, 4) or (B, 3/4, 4) a: Points to be transformed (N, 3) or (B, N, 3) normals: (Optional). If provided, normals will be transformed Returns: transformed points of size (N, 3) or (B, N, 3) """ R = g[..., :3, :3] # (B, 3, 3) p = g[..., :3, 3] # (B, 3) if len(g.size()) == len(a.size()): b = torch.matmul(a, R.transpose(-1, -2)) + p[..., None, :] else: raise NotImplementedError b = R.matmul(a.unsqueeze(-1)).squeeze(-1) + p # No batch. Not checked if normals is not None: rotated_normals = normals @ R.transpose(-1, -2) return b, rotated_normals else: return b
22,199