content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def isempty(s): """ return if input object(string) is empty """ if s in (None, "", "-", []): return True return False
9c3ffd6ab818e803c1c0129588c345361c58807f
21,200
import sys def get_client(host, port=None, username=None, password=None, tenant=None, auth_url=None, auth_strategy=None, auth_token=None, region=None, is_silent_upload=False, insecure=True, aws_access_key=None, aws_secret_key=None): """ Returns a new boto Cloudformation client connection to a heat server """ # Note we pass None/None for the keys by default # This means boto reads /etc/boto.cfg, or ~/.boto # set is_secure=0 in the config to disable https cloudformation = BotoClient(aws_access_key_id=aws_access_key, aws_secret_access_key=aws_secret_key, port=port, path="/v1") if cloudformation: logger.debug("Got CF connection object OK") else: logger.error("Error establishing Cloudformation connection!") sys.exit(1) return cloudformation
b1b7068787aebeaa5087e5f1d5334ad68debac3d
21,201
def raw(text): """Returns a raw string representation of text""" new_str = '' for char in text: try: new_str += trans_map[char] except KeyError: new_str += char return new_str
528e88837bba76411b44044b566e2a645db4433e
21,202
def airtovac(wave_air): """ taken from idl astrolib ;+ ; NAME: ; AIRTOVAC ; PURPOSE: ; Convert air wavelengths to vacuum wavelengths ; EXPLANATION: ; Wavelengths are corrected for the index of refraction of air under ; standard conditions. Wavelength values below 2000 A will not be ; altered. Uses relation of Ciddor (1996). ; ; CALLING SEQUENCE: ; AIRTOVAC, WAVE_AIR, [ WAVE_VAC] ; ; INPUT/OUTPUT: ; WAVE_AIR - Wavelength in Angstroms, scalar or vector ; If this is the only parameter supplied, it will be updated on ; output to contain double precision vacuum wavelength(s). ; OPTIONAL OUTPUT: ; WAVE_VAC - Vacuum wavelength in Angstroms, same number of elements as ; WAVE_AIR, double precision ; ; EXAMPLE: ; If the air wavelength is W = 6056.125 (a Krypton line), then ; AIRTOVAC, W yields an vacuum wavelength of W = 6057.8019 ; ; METHOD: ; Formula from Ciddor 1996, Applied Optics 62, 958 ; ; NOTES: ; Take care within 1 A of 2000 A. Wavelengths below 2000 A *in air* are ; not altered. ; REVISION HISTORY ; Written W. Landsman November 1991 ; Use Ciddor (1996) formula for better accuracy in the infrared ; Added optional output vector, W Landsman Mar 2011 ; Iterate for better precision W.L./D. Schlegel Mar 2011 ;- """ wave_vac = wave_air * 1.0 g = wave_vac > 2000 #Only modify above 2000 A if np.sum(g): for iter in [0, 1]: if isinstance(g, np.ndarray): sigma2 = (1e4/wave_vac[g])**2. #Convert to wavenumber squared # Compute conversion factor fact = 1. + 5.792105e-2 / (238.0185 - sigma2) + \ 1.67917e-3 / (57.362 - sigma2) wave_vac[g] = wave_air[g] * fact #Convert Wavelength else: # scalar version sigma2 = (1e4/wave_vac)**2. #Convert to wavenumber squared # Compute conversion factor fact = 1. + 5.792105e-2 / (238.0185 - sigma2) + \ 1.67917e-3 / (57.362 - sigma2) wave_vac = wave_air * fact #Convert Wavelength return wave_vac
68d71855f0fa8256acc23bfd24d68985cfc1f3a7
21,203
def clamp(val, min_, max_): """clamp val to between min_ and max_ inclusive""" if val < min_: return min_ if val > max_: return max_ return val
31f2441ba03cf765138a7ba9b41acbfe21b7bda7
21,204
def GetUserLink(provider, email): """Retrieves a url to the profile of the specified user on the given provider. Args: provider: The name of the provider email: The email alias of the user. Returns: Str of the url to the profile of the user. """ user_link = '' if email and provider == Provider.ISSUETRACKER: user_link = 'http://code.google.com/u/' + email.split('@')[0] return encoding_util.EncodeToAscii(user_link)
ad5f30e7e04000369d45d242b18afc59922da9bc
21,205
def _as_bytes0(path): """Crashes translation if the path contains NUL characters.""" res = _as_bytes(path) rstring.check_str0(res) return res
76c9c130d1a74f9cacb34e30141db74400f6ea33
21,206
def get_ip(request): """Determines user IP address Args: request: resquest object Return: ip_address: requesting machine's ip address (PUBLIC) """ ip_address = request.remote_addr return ip_address
84e1540bc8b79fd2043a8fb6f107f7bcd8d7cc8c
21,207
def _is_valid_new_style_arxiv_id(identifier): """Determine if the given identifier is a valid new style arXiv ID.""" split_identifier = identifier.split('v') if len(split_identifier) > 2: return False elif len(split_identifier) == 2: identifier, version = split_identifier if not version.isnumeric(): return False else: identifier = split_identifier[0] split_identifier = identifier.split('.') if len(split_identifier) != 2: return False prefix, suffix = split_identifier if not prefix.isnumeric() or not suffix.isnumeric(): return False if len(prefix) != 4 or len(suffix) not in {4, 5}: return False month = prefix[2:4] if int(month) > 12: return False return True
71171984ad1497fa45e109b9657352c20bfe7682
21,208
def download_suite(request, domain, app_id): """ See Application.create_suite """ if not request.app.copy_of: request.app.set_form_versions(None) return HttpResponse( request.app.create_suite() )
382817e3a790d59c33c69eb5334841d2d9a1a7af
21,209
def get_graph(mol): """ Converts `rdkit.Chem.Mol` object to `PreprocessingGraph`. """ if mol is not None: if not C.use_aromatic_bonds: rdkit.Chem.Kekulize(mol, clearAromaticFlags=True) molecular_graph = PreprocessingGraph(molecule=mol, constants=C) return molecular_graph
3d105de313ab1aed6ed0fff598e791cd903e94de
21,210
def dict_fetchall(cursor): """ Returns all rows from a cursor as a dict """ desc = cursor.description return [ dict(zip([col[0] for col in desc], row)) for row in cursor.fetchall() ]
6d5e6621ac2cb6229f7caf6714cbc0124a33c271
21,211
def count_vowels(s): """Used to count the vowels in the sequence""" s = s.lower() counter=0 for x in s: if(x in ['a','e','i','o','u']): counter+=1 return counter
236500c76b22510e6f0d97a4200865e2a18b47c3
21,212
def _is_valid_dtype(matrix, complex_dtype=False, all_dtype=False): """ Check to see if it's a usable float dtype """ if all_dtype: return matrix.dtype in NUMPY_FLOAT_DTYPES + NUMPY_COMPLEX_DTYPES elif complex_dtype: return matrix.dtype in NUMPY_COMPLEX_DTYPES else: return matrix.dtype in NUMPY_FLOAT_DTYPES
1ca4a79082f170f53a905773b42fa8cb833e4016
21,213
def assess_edge(self, edge, fsmStack, request, **kwargs): """ Try to transition to ASSESS, or WAIT_ASSESS if not ready, or jump to ASK if a new question is being asked. """ fsm = edge.fromNode.fsm if not fsmStack.state.linkState: # instructor detached return fsm.get_node('END') elif fsmStack.state.linkState.fsmNode.node_name_is_one_of('QUESTION'): if fsmStack.state.unitLesson == fsmStack.state.linkState.unitLesson: return fsm.get_node('WAIT_ASSESS') else: # jump to the new question fsmStack.state.unitLesson = fsmStack.state.linkState.unitLesson fsmStack.state.save() return fsm.get_node('TITLE') else: # pragma: no cover if not fsmStack.next_point.response_to_check.selfeval: return edge.toNode if fsmStack.next_point.response_to_check.selfeval != 'correct': return fsm.get_node('INCORRECT_ANSWER') elif fsmStack.next_point.response_to_check.selfeval == 'correct': return fsm.get_node('CORRECT_ANSWER') return edge.toNode
e09d07afbc37c73188bbb5e6fa3436c88fce9bd7
21,214
def viable_source_types_for_generator_real (generator): """ Returns the list of source types, which, when passed to 'run' method of 'generator', has some change of being eventually used (probably after conversion by other generators) """ source_types = generator.source_types () if not source_types: # If generator does not specify any source types, # it might be special generator like builtin.lib-generator # which just relays to other generators. Return '*' to # indicate that any source type is possibly OK, since we don't # know for sure. return ['*'] else: result = [] for s in source_types: viable_sources = viable_source_types(s) if viable_sources == "*": result = ["*"] break else: result.extend(type.all_derived(s) + viable_sources) return unique(result)
e946663241fb77d3632f88b2f879d650e65f6d73
21,215
def negSamplingCostAndGradient(predicted, target, outputVectors, dataset, K=10): """ Negative sampling cost function for word2vec models Implement the cost and gradients for one predicted word vector and one target word vector as a building block for word2vec models, using the negative sampling technique. K is the sample size. Note: See test_word2vec below for dataset's initialization. Arguments/Return Specifications: same as softmaxCostAndGradient """ # Sampling of indices is done for you. Do not modify this if you # wish to match the autograder and receive points! indices = [target] indices.extend(getNegativeSamples(target, dataset, K)) ### YOUR CODE HERE u_o, v_c = outputVectors[target], outputVectors[indices[1:]] loss = -np.log(sigmoid(np.matmul(u_o, predicted))) print(u_o) print(v_c) ### END YOUR CODE return cost, gradPred, grad
d4b8a16166406f9e13296b8b5c53c56f95ff9d6b
21,216
def get_start_block(block): """ Gets the deepest block to use as the starting block. """ if not block.get('children'): return block first_child = block['children'][0] return get_start_block(first_child)
e658954bb69f88f10c2f328c605d6da094ba065d
21,217
def transform_rank_list(lam_ref, A, b, rank): """ A is a list here. We sum the first `rank` elements of it to return a matrix with the desired rank. """ _A = sum(A[0:rank]) _b = b _d = _A @ lam_ref + _b assert np.linalg.matrix_rank(_A) == rank, "Unexpected rank mismatch" return _A, _b, _d
2b77db3cb27ce3b66d0038042d649226e5a231d2
21,218
def FindWindowsWithTitle(title_to_search): """Finds windows with given title. Args: title_to_search: Window title substring to search, case-insensitive. Returns: A list of HWND that match the search condition. """ desktop_handle = None return FindWindowsWithText(desktop_handle, title_to_search)
bcc75a4351969cfdbb475032d556e73a4b0ceb92
21,219
import time def main_update(next_image_step): """ This includes some functionality for image / file writing at a specified frequency, Assumes global variables: time, step, files_freq, next_image_step if numerical dt exceeds next specified writing point override dt make sure we hit that point Set some flags so that image / file writing proceeds """ if md.thermal: dt = advDiff.get_max_dt()*md.courantFac #additional md.courantFac helps stabilise advDiff advDiff.integrate(dt) else: dt = advector.get_max_dt() #This relates to file writing at set period: #override dt make sure we hit certain time values #Set some flags so that image / file writing proceeds if step == 0: files_this_step = True else: files_this_step = False if time + dt >= next_image_step: dt = next_image_step - time files_this_step = True next_image_step += files_freq #increment time for our next image / file dump #Do advection advector.integrate(dt) marker.advection(dt) #remove drift in pressure pressureSurf = _pressure.evaluate()[0] pressureField.data[:] -= pressureSurf/surfLength return time+dt, step+1, files_this_step, next_image_step
6a402c70948dc90aade783b94b054ef5abf8225c
21,220
from typing import Iterable import time import os def record(packets: Iterable[Packet], pcap_path: str, *, src_ip: str = "127.0.0.1", dst_ip: str = "127.0.0.1", lidar_port: int = 7502, imu_port: int = 7503, use_sll_encapsulation: bool = False) -> int: """Record a sequence of sensor packets to a pcap file. Args: packets: A (finite!) sequence of packets pcap_path: Path of the output pcap file src_ip: Source IP to use for all packets dst_ip: Destination IP to use for all packets lidar_port: Src/dst port to use for lidar packets imu_port: Src/dst port to use for imu packets use_sll_encapsulation: Use sll encapsulaiton for pcaps(ouster studio can not read) Returns: Number of packets captured """ has_timestamp = None error = False buf_size = 2**16 n = 0 handle = _pcap.record_initialize(pcap_path, src_ip, dst_ip, buf_size, use_sll_encapsulation) try: for packet in packets: if isinstance(packet, LidarPacket): src_port = lidar_port dst_port = lidar_port elif isinstance(packet, ImuPacket): src_port = imu_port dst_port = imu_port else: raise ValueError("Unexpected packet type") if has_timestamp is None: has_timestamp = (packet.capture_timestamp is not None) elif has_timestamp != (packet.capture_timestamp is not None): raise ValueError("Mixing timestamped/untimestamped packets") ts = packet.capture_timestamp or time.time() _pcap.record_packet(handle, src_port, dst_port, packet._data, ts) n += 1 except Exception: error = True raise finally: _pcap.record_uninitialize(handle) if error and os.path.exists(pcap_path) and n == 0: os.remove(pcap_path) return n
0a02f7d919352e1b75c8af59cd807706ee0d131e
21,221
def assemble_batches(inputs, crop_mode='center_only'): """ Assemble DataFrame of image crops for feature computation. Input: inputs: list of filenames (center_only, corners, and selective_search mode) OR input DataFrame (list mode) mode: string 'list': take the image windows from the input as-is 'center_only': take the CROPPED_DIM middle of the image windows 'corners': take CROPPED_DIM-sized boxes at 4 corners and center of the image windows, as well as their flipped versions: a total of 10. 'selective_search': run Selective Search region proposal on the image windows, and take each enclosing subwindow. Output: df_batches: list of DataFrames, each one of BATCH_SIZE rows. Each row has 'image', 'filename', and 'window' info. Column 'image' contains (X x 3 x CROPPED_DIM x CROPPED_IM) ndarrays. Column 'filename' contains source filenames. Column 'window' contains [ymin, xmin, ymax, xmax] ndarrays. If 'filename' is None, then the row is just for padding. Note: for increased efficiency, increase the batch size (to the limit of gpu memory) to avoid the communication cost """ if crop_mode == 'list': images_df = _assemble_images_list(inputs) elif crop_mode == 'center_only': images_df = _assemble_images_center_only(inputs) elif crop_mode == 'corners': images_df = _assemble_images_corners(inputs) elif crop_mode == 'selective_search': images_df = _assemble_images_selective_search(inputs) else: raise Exception("Unknown mode: not in {}".format(CROP_MODES)) # Make sure the DataFrame has a multiple of BATCH_SIZE rows: # just fill the extra rows with NaN filenames and all-zero images. N = images_df.shape[0] remainder = N % BATCH_SIZE if remainder > 0: zero_image = np.zeros_like(images_df['image'].iloc[0]) zero_window = np.zeros((1, 4), dtype=int) remainder_df = pd.DataFrame([{ 'filename': None, 'image': zero_image, 'window': zero_window }] * (BATCH_SIZE - remainder)) images_df = images_df.append(remainder_df) N = images_df.shape[0] # Split into batches of BATCH_SIZE. ind = np.arange(N) / BATCH_SIZE df_batches = [images_df[ind == i] for i in range(N / BATCH_SIZE)] return df_batches
f22f3ed33b339a4375a1e3319d26cb2946762978
21,222
def __VF2_feasible(graph1, graph2, vertex1, vertex2, map21, map12, terminals1, terminals2, subgraph): """ Returns :data:`True` if two vertices `vertex1` and `vertex2` from graphs `graph1` and `graph2`, respectively, are feasible matches. `mapping21` and `mapping12` are the current state of the mapping from `graph1` to `graph2` and vice versa, respectively. `terminals1` and `terminals2` are lists of the vertices that are directly connected to the already-mapped vertices. `subgraph` is :data:`True` if graph2 is to be treated as a potential subgraph of graph1. i.e. graph1 is a specific case of graph2. Uses the VF2 algorithm of Vento and Foggia. The feasibility is assessed through a series of semantic and structural checks. Only the combination of the semantic checks and the level 0 structural check are both necessary and sufficient to ensure feasibility. (This does *not* mean that vertex1 and vertex2 are always a match, although the level 1 and level 2 checks preemptively eliminate a number of false positives.) """ cython.declare(vert1=Vertex, vert2=Vertex, edge1=Edge, edge2=Edge, edges1=dict, edges2=dict) cython.declare(i=cython.int) cython.declare(term1Count=cython.int, term2Count=cython.int, neither1Count=cython.int, neither2Count=cython.int) if not subgraph: # To be feasible the connectivity values must be an exact match if vertex1.connectivity1 != vertex2.connectivity1: return False if vertex1.connectivity2 != vertex2.connectivity2: return False if vertex1.connectivity3 != vertex2.connectivity3: return False # Semantic check #1: vertex1 and vertex2 must be equivalent if subgraph: if not vertex1.isSpecificCaseOf(vertex2): return False else: if not vertex1.equivalent(vertex2): return False # Get edges adjacent to each vertex edges1 = graph1.edges[vertex1] edges2 = graph2.edges[vertex2] # Semantic check #2: adjacent vertices to vertex1 and vertex2 that are # already mapped should be connected by equivalent edges for vert2 in edges2: if vert2 in map12: vert1 = map12[vert2] if not vert1 in edges1: # atoms not joined in graph1 return False edge1 = edges1[vert1] edge2 = edges2[vert2] if subgraph: if not edge1.isSpecificCaseOf(edge2): return False else: # exact match required if not edge1.equivalent(edge2): return False # there could still be edges in graph1 that aren't in graph2. # this is ok for subgraph matching, but not for exact matching if not subgraph: for vert1 in edges1: if vert1 in map21: vert2 = map21[vert1] if not vert2 in edges2: return False # Count number of terminals adjacent to vertex1 and vertex2 term1Count = 0; term2Count = 0; neither1Count = 0; neither2Count = 0 for vert1 in edges1: if vert1 in terminals1: term1Count += 1 elif vert1 not in map21: neither1Count += 1 for vert2 in edges2: if vert2 in terminals2: term2Count += 1 elif vert2 not in map12: neither2Count += 1 # Level 2 look-ahead: the number of adjacent vertices of vertex1 and # vertex2 that are non-terminals must be equal if subgraph: if neither1Count < neither2Count: return False else: if neither1Count != neither2Count: return False # Level 1 look-ahead: the number of adjacent vertices of vertex1 and # vertex2 that are terminals must be equal if subgraph: if term1Count < term2Count: return False else: if term1Count != term2Count: return False # Level 0 look-ahead: all adjacent vertices of vertex2 already in the # mapping must map to adjacent vertices of vertex1 for vert2 in edges2: if vert2 in map12: vert1 = map12[vert2] if vert1 not in edges1: return False # Also, all adjacent vertices of vertex1 already in the mapping must map to # adjacent vertices of vertex2, unless we are subgraph matching if not subgraph: for vert1 in edges1: if vert1 in map21: vert2 = map21[vert1] if vert2 not in edges2: return False # All of our tests have been passed, so the two vertices are a feasible # pair return True
f3ebfa379d710f5e1c6651713c15e9c6148d576d
21,223
async def place_rectangle( interface, element, x, y, width, height, include_all_sides=True, variant=None ): """Place a rectangle of an element. Parameters ---------- interface The editor interface. x X coordinate of the upper left corner. y Y coordinate of the upper left corner. width Width of the rectangle. height Height of the rectangle. include_all_sides If False, skip returning the left, right and lower sides. Use this when placing walls and you are only interested in the insides. variant The variant of element to place. Returns ------- A list of tuples (element, direction, x, y, variant) of placed elements. Some may be omitted, if `include_all_sides` is false. """ await interface.place_element( element, Direction.NONE, (x, y), (x + width - 1, y + height - 1), variant=variant, ) return_elements = [] if include_all_sides: x_range = range(x, x + width) y_range = range(y, y + height) else: x_range = range(x + 1, x + width - 1) y_range = range(y, y + height - 1) for placed_x in x_range: for placed_y in y_range: return_elements.append( (element, Direction.NONE, placed_x, placed_y, variant) ) return return_elements
e9b2c16e77627dc3e0cac5f0abfcdce23db5eb29
21,224
def rainbow_strokes(strokes: bpy.types.GPencilStrokes): """ strokesのインデックスに合せて頂点カラーを設定します """ n = [colorize_stroke(stroke, i, True) for i, stroke in enumerate(strokes)] return n # logger.debug(f"update:{sum(n)}") # logger.debug(rainbow.cache_info())
2ad0f0811e7b3c4f925a1cb21c2d0cfa374bdcf0
21,225
def calc_Q_hat_hs_d_t(Q, A_A, V_vent_l_d_t, V_vent_g_i, mu_H, mu_C, J_d_t, q_gen_d_t, n_p_d_t, q_p_H, q_p_CS, q_p_CL, X_ex_d_t, w_gen_d_t, Theta_ex_d_t, L_wtr, region): """(40-1a)(40-1b)(40-2a)(40-2b)(40-2c)(40-3) Args: Q: 当該住戸の熱損失係数(W/(m2・K)) A_A: 床面積の合計(m2) V_vent_l_d_t: 日付dの時刻tにおける局所換気量(m3/h) V_vent_g_i: 暖冷房区画iの全般換気量(m3/h) mu_H: 当該住戸の暖房期の日射取得係数((W/m2)/(W/m2)) mu_C: 当該住戸の冷房期の日射取得係数((W/m2)/(W/m2)) J_d_t: 日付dの時刻tにおける水平面全天日射量(W/m2) q_gen_d_t: 日付dの時刻tにおける内部発熱(W) n_p_d_t: 日付dの時刻tにおける在室人数(人) q_p_H: 暖房期における人体からの1人当たりの顕熱発熱量(W/人) q_p_CS: 冷房期における人体からの1人当たりの顕熱発熱量(W/人) q_p_CL: 冷房期における人体からの1人当たりの潜熱発熱量(W/人) X_ex_d_t: 日付dの時刻tにおける外気の絶対湿度(kg/kg(DA)) w_gen_d_t: param Theta_ex_d_t: 日付dの時刻tにおける外気温度(℃) L_wtr: 水の蒸発潜熱(kJ/kg) region: 地域区分 Theta_ex_d_t: returns: 日付dの時刻tにおける1時間当たりの熱源機の風量を計算するための熱源機の暖房出力(MJ/h) Returns: 日付dの時刻tにおける1時間当たりの熱源機の風量を計算するための熱源機の暖房出力(MJ/h) """ H, C, M = get_season_array_d_t(region) c_p_air = get_c_p_air() rho_air = get_rho_air() Theta_set_H = get_Theta_set_H() Theta_set_C = get_Theta_set_C() X_set_C = get_X_set_C() Q_hat_hs_d_t = np.zeros(24 * 365) Q_hat_hs_H_d_t = np.zeros(24 * 365) Q_hat_hs_CS_d_t = np.zeros(24 * 365) Q_hat_hs_CL_d_t = np.zeros(24 * 365) # 暖房期 (40-1b) if mu_H is not None: Q_hat_hs_H_d_t[H] = (((Q - 0.35 * 0.5 * 2.4) * A_A + (c_p_air * rho_air * (V_vent_l_d_t[H] + np.sum(V_vent_g_i[:5]))) / 3600) * (Theta_set_H - Theta_ex_d_t[H]) \ - mu_H * A_A * J_d_t[H] - q_gen_d_t[H] - n_p_d_t[H] * q_p_H) * 3600 * 10 ** -6 # (40-1a) Q_hat_hs_d_t[H] = np.clip(Q_hat_hs_H_d_t[H], 0, None) # 冷房期 (40-2b) Q_hat_hs_CS_d_t[C] = (((Q - 0.35 * 0.5 * 2.4) * A_A + (c_p_air * rho_air * (V_vent_l_d_t[C] + np.sum(V_vent_g_i[:5]))) / 3600) * (Theta_ex_d_t[C] - Theta_set_C) \ + mu_C * A_A * J_d_t[C] + q_gen_d_t[C] + n_p_d_t[C] * q_p_CS) * 3600 * 10 ** -6 # (40-2c) Q_hat_hs_CL_d_t[C] = ((rho_air * (V_vent_l_d_t[C] + np.sum(V_vent_g_i[:5])) * (X_ex_d_t[C] - X_set_C) * 10 ** 3 + w_gen_d_t[C]) \ * L_wtr + n_p_d_t[C] * q_p_CL * 3600) * 10 ** -6 # (40-2a) Q_hat_hs_d_t[C] = np.clip(Q_hat_hs_CS_d_t[C], 0, None) + np.clip(Q_hat_hs_CL_d_t[C], 0, None) # 中間期 (40-3) Q_hat_hs_d_t[M] = 0 return Q_hat_hs_d_t
64dd272673507b15a2d2c1782a0c3db88c3f8d76
21,226
def get_all_infoproviders(): """ Endpunkt `/infoproviders`. Response enthält Informationen über alle, in der Datenbank enthaltenen, Infoprovider. """ try: return flask.jsonify(queries.get_infoprovider_list()) except Exception: logger.exception("An error occurred: ") err = flask.jsonify({"err_msg": "An error occurred while loading all infoproviders"}) return err, 400
73966c2a5b171baead9edccec27f6380f61fb2ab
21,227
import math def maidenhead(dec_lat, dec_lon): """Convert latitude and longitude to Maidenhead grid locators.""" try: dec_lat = float(dec_lat) dec_lon = float(dec_lon) except ValueError: return '' if _non_finite(dec_lat) or _non_finite(dec_lon): return '' if 90 < math.fabs(dec_lat) or 180 < math.fabs(dec_lon): return '' if 89.99999 < dec_lat: # force North Pole to just inside lat_sq 'R' dec_lat = 89.99999 if 179.99999 < dec_lon: # force 180 to just inside lon_sq 'R' dec_lon = 179.99999 adj_lat = dec_lat + 90.0 adj_lon = dec_lon + 180.0 # divide into 18 zones (fields) each 20 degrees lon, 10 degrees lat grid_lat_sq = chr(int(adj_lat / 10) + 65) grid_lon_sq = chr(int(adj_lon / 20) + 65) # divide into 10 zones (squares) each 2 degrees lon, 1 degrees lat grid_lat_field = str(int(adj_lat % 10)) grid_lon_field = str(int((adj_lon / 2) % 10)) # remainder in minutes adj_lat_remainder = (adj_lat - int(adj_lat)) * 60 adj_lon_remainder = ((adj_lon) - int(adj_lon / 2) * 2) * 60 # divide into 24 zones (subsquares) each 5 degrees lon, 2.5 degrees lat grid_lat_subsq = chr(97 + int(adj_lat_remainder / 2.5)) grid_lon_subsq = chr(97 + int(adj_lon_remainder / 5)) # remainder in seconds adj_lat_remainder = (adj_lat_remainder % 2.5) * 60 adj_lon_remainder = (adj_lon_remainder % 5.0) * 60 # divide into 10 zones (extended squares) each 30 secs lon, 15 secs lat grid_lat_extsq = chr(48 + int(adj_lat_remainder / 15)) grid_lon_extsq = chr(48 + int(adj_lon_remainder / 30)) return (grid_lon_sq + grid_lat_sq + grid_lon_field + grid_lat_field + grid_lon_subsq + grid_lat_subsq + grid_lon_extsq + grid_lat_extsq)
63e44fffbf113f7c8a195b58556eef80a66690f7
21,228
def parse_worker_string(miner, worker): """ Parses a worker string and returns the coin address and worker ID Returns: String, String """ worker_part_count = worker.count(".") + 1 if worker_part_count > 1: if worker_part_count == 2: coin_address, worker = worker.split('.') else: worker_parts = worker.split('.') coin_address = worker_parts[0] worker = worker_parts[worker_part_count - 1] else: coin_address = worker if coin_address is not None: if miner.coin_address is None or len(miner.coin_address) == 0: miner.coin_address = coin_address elif miner.coin_address != coin_address: miner.coin_address = coin_address if worker is not None: if miner.worker_name is None or len(miner.worker_name) == 0: miner.worker_name = worker elif miner.worker_name != worker: miner.worker_name = worker return coin_address, worker
3492716fc9f5290a161de0b46e7af87afbe6b348
21,229
import json def get_inference_sequence(file_path): """ :param file_path: path of 2D bounding boxes :return: """ with open(file_path + '.json', 'r') as f: detected_bdbs = json.load(f) f.close() boxes = list() for j, bdb2d in enumerate(detected_bdbs): box = bdb2d['bbox'] box = {'x1': box[0], 'y1': box[1], 'x2': box[2], 'y2': box[3]} box['score'] = bdb2d['score'] box['classname'] = bdb2d['class'] boxes.append({'2dbdb': box}) camera = dict() camera['K'] = np.array([[529.5, 0., 365.], [0, 529.5, 265.], [0, 0, 1]]) boxes_out = list() for box in boxes: box_set = dict() box_set['bdb_pos'] = [box['2dbdb']['x1'], box['2dbdb']['y1'], box['2dbdb']['x2'], box['2dbdb']['y2']] if box['2dbdb']['classname'] not in OBJ_CATEGORY_TEST: continue box_set['size_cls'] = OBJ_CATEGORY_CLEAN.index(box['2dbdb']['classname']) boxes_out.append(box_set) data = dict() data['rgb_path'] = file_path + '.jpg' data['camera'] = camera data['boxes'] = list_of_dict_to_dict_of_list(boxes_out) data['sequence_id'] = int(file_path.split('/')[-1]) return data
a77b5f24004acf9839881cd52ce06b6f785f9bfb
21,230
def _DC_GetBoundingBox(self): """ GetBoundingBox() -> (x1,y1, x2,y2) Returns the min and max points used in drawing commands so far. """ return (self.MinX(), self.MinY(), self.MaxX(), self.MaxY())
47dc9e8bbc429dbd079695844c9bbcfc79b26229
21,231
from typing import Union from typing import Iterable from typing import List def map_text( text: Union[str, Text, Iterable[str], Iterable[Text]], mapping: StringMapper ) -> Union[str, List[str]]: """ Replace text if it matches one of the dictionary keys. :param text: Text instance(s) to map. :param mapping: Mappings to replace text. """ if isinstance(text, Text): text = text.get_text() if not isinstance(text, str): return [map_text(str(t), mapping) for t in text] if mapping is None: return text if isinstance(mapping, dict) or isinstance(mapping, Series): if text in mapping.keys(): return mapping[text] else: return text elif callable(mapping): return mapping(text) else: raise TypeError('mapping must be a dict or callable')
63c9dc6803d1aad572e76cb2a6554363ae358e9c
21,232
def _load_components(config: ConfigType) -> ConfigType: """Load the different componenets in a config Args: config (ConfigType) Returns: ConfigType """ special_key = "_load" if config is not None and special_key in config: loaded_config = read_config_file(config.pop(special_key)) updated_config = OmegaConf.merge(loaded_config, config) assert isinstance(updated_config, ConfigType) return updated_config return config
b00e2225df4d493636c509380c3c19c107ad32e6
21,233
import typing def _value_to_variant(value: typing.Union[bytes, int, float, str]) -> GLib.Variant: """ Automatically convert a Python value to a GLib.Variant by guessing the matching variant type. """ if isinstance(value, bool): return GLib.Variant("b", value) elif isinstance(value, bytes): return GLib.Variant("y", value) elif isinstance(value, int): return GLib.Variant("x", value) elif isinstance(value, float): return GLib.Variant("d", value) elif isinstance(value, str): return GLib.Variant("s", value) else: raise ValueError("Unknown value type", value)
8b16bad781954238174a160df5239c0b8cb88e2e
21,234
import math def ha_rise_set(el_limit, lat, dec): """ Hour angle from transit for rising and setting. Returns pi for a source that never sets and 0 for a source always below the horizon. @param el_limit : the elevation limit in radians @type el_limit : float @param lat : the observatory latitude in radians @type lat : float @param dec : the source declination in radians @type dec : float @return: hour angle from transit in radians """ cos_ha = (math.sin(el_limit) - math.sin(lat)*math.sin(dec)) \ /(math.cos(lat)*math.cos(dec)) if cos_ha <= -1: # never sets return pi elif cos_ha >= 1: # never visible return 0 else: return math.acos(cos_ha)
648de7a69039d73f3947706ecc4ee90e1d05597e
21,235
def create(transactions, user=None): """# Create Transactions Send a list of Transaction objects for creation in the Stark Bank API ## Parameters (required): - transactions [list of Transaction objects]: list of Transaction objects to be created in the API ## Parameters (optional): - user [Project object]: Project object. Not necessary if starkbank.user was set before function call ## Return: - list of Transaction objects with updated attributes """ return rest.post_multi(resource=_resource, entities=transactions, user=user)
32573a0e569fde73c6eaf228ad6a07849297c7b9
21,236
def get_quote(symbol): """ Returns today's stock price """ contents = get_content(symbol) return contents('.time_rtq_ticker span').text()
546ac10e5f7d5b3cc661dde5dceec8c4a8b0fae0
21,237
def load_pil(data, is_file = False): """ Parses a string or file written in PIL notation! """ # We only assign reactions in a postprocessing step, # because there are no macrostates in nuskell. set_io_objects(D = NuskellDomain, C = NuskellComplex) out = dsd_read_pil(data, is_file) clear_io_objects() cxs = {k: v for k, v in out['complexes'].items()} rms = {k: v for k, v in out['macrostates'].items()} det = set(list(out['det_reactions'])) con = set(list(out['con_reactions'])) [o.clear() for o in out.values()] out.clear() return cxs, rms, det, con
0fe0b507d19595f71d18d24e1f003fbaa59485fc
21,238
def get(obj, key, default=None, pattern_default=(), apply_transforms=True): """ Get a value specified by the dotted key. If dotted is a pattern, return a tuple of all matches >>> d = {'hello': {'there': [1, '2', 3]}} >>> get(d, 'hello.there[1]|int') 2 >>> get(d, 'hello.there[1:]') ['2', 3] >>> get([{'a': 1}, {'a':2}], '[*].a') (1, 2) """ ops = parse(key) vals = el.gets(ops, obj) if apply_transforms: vals = ( ops.apply(v) for v in vals ) found = tuple(vals) if not is_pattern(ops): return found[0] if found else default return found if found else pattern_default
b6b84a357e18fa0e78d6520ba50ff5668a97067c
21,239
def str_view(request): """ A simple test view that returns a string. """ return '<Response><Message>Hi!</Message></Response>'
fd9d150afdf0589cdb4036bcb31243b2e22ef1e2
21,240
import atexit def _run_script(script, start_with_ctty, args, kwargs): """ Meant to be called inside a python subprocess, do NOT call directly. """ enter_pty(start_with_ctty) result = script(*args, **kwargs) # Python-spawned subprocesses do not call exit funcs - https://stackoverflow.com/q/34506638/2907819 atexit._run_exitfuncs() return result
15507307bb85013d9354b7506569b69806bdf06a
21,241
def get_feed_list(feeds): """ Return List of Proto Feed Object """ feeds_pb_list = [feeds_pb2.Feed(**_get_valid_fields_feed(feed)) for feed in feeds] return feeds_pb2.FeedList(data=feeds_pb_list)
6e79c563649aef60396f0c8944d3532fabc17bc0
21,242
def group_interpellet_interval_plot(FEDs, groups, kde, logx, **kwargs): """ FED3 Viz: Plot the interpellet intervals as a histogram, first aggregating the values for devices in a Groups. Parameters ---------- FEDs : list of FED3_File objects FED3 files (loaded by load.FED3_File) groups : list of strings Groups to plot (based on the group attribute of each FED3_File) kde : bool Whether or not to include kernel density estimation, which plots probability density (rather than count) and includes a fit line (see seaborn.distplot) logx : bool When True, plots on a logarithmic x-axis **kwargs : ax : matplotlib.axes.Axes Axes to plot on, a new Figure and Axes are created if not passed date_filter : array A two-element array of datetimes (start, end) used to filter the data **kwargs also allows FED3 Viz to pass all settings to all functions. Returns ------- fig : matplotlib.figure.Figure """ if not isinstance(FEDs, list): FEDs = [FEDs] for FED in FEDs: assert isinstance(FED, FED3_File),'Non FED3_File passed to interpellet_interval_plot()' if 'ax' not in kwargs: fig, ax = plt.subplots(figsize=(4,5), dpi=125) else: ax = kwargs['ax'] bins=[] if logx: lowest = -2 highest = 5 ax.set_xticks(range(lowest,highest)) ax.set_xticklabels([10**num for num in range(-2,5)]) c=0 while c <= highest: bins.append(round(lowest+c,2)) c+=0.1 else: ax.set_xticks([0,300,600,900]) div = 900/50 bins = [i*div for i in range(50)] ax.set_xlim(-100,1000) for group in groups: all_vals = [] for FED in FEDs: if group in FED.group: df = FED.data if 'date_filter' in kwargs: s, e = kwargs['date_filter'] df = df[(df.index >= s) & (df.index <= e)].copy() y = list(df['Interpellet_Intervals'][df['Interpellet_Intervals'] > 0]) if logx: y = [np.log10(val) for val in y if not pd.isna(val)] all_vals += y sns.distplot(all_vals,bins=bins,label=group,ax=ax,norm_hist=False, kde=kde) ax.legend(fontsize=8) ylabel = 'Density Estimation' if kde else 'Count' ax.set_ylabel(ylabel) ax.set_xlabel('minutes between pellets') ax.set_title('Interpellet Interval Plot') plt.tight_layout() return fig if 'ax' not in kwargs else None
5c0ada4fdf71af7cfed8ffe7ec8b656c8984de9b
21,243
def _beta(x, p): """Helper function for `pdf_a`, beta = pi * d(1 - omega(x), omega(p)).""" omega = _amplitude_to_angle return np.pi * _circ_dist(1 - omega(x), omega(p))
9f0defbff0567ba8c181a9565570d0c7444ddc94
21,244
def set_reporting_max_width(w): """ Set the max width for reported parameters. This is used to that failures don't overflow terminals in the event arguments are dumped. :param w: The new max width to enforce for the module :type w: int :return: True """ _REPR_MAX_WIDTH[0] = int(w) return True
5da03b359fc823919bf2782907a0717c1d303a31
21,245
import re def get_version(): """Get LanguageTool version.""" version = _get_attrib().get('version') if not version: match = re.search(r"LanguageTool-?.*?(\S+)$", get_directory()) if match: version = match.group(1) return version
1223b13b23eb4dadafbc5a3e8bf3b6e7f521ab5b
21,246
def get_mnsp_offer_index(data) -> list: """Get MNSP offer index""" interconnectors = (data.get('NEMSPDCaseFile').get('NemSpdInputs') .get('PeriodCollection').get('Period') .get('InterconnectorPeriodCollection') .get('InterconnectorPeriod')) # Container for offer index offer_index = [] for i in interconnectors: # Non-MNSP interconnectors do not have an MNSPOfferCollection attribute if i.get('MNSPOfferCollection') is None: continue # Extract InterconnectorID and RegionID for each offer entry for j in i.get('MNSPOfferCollection').get('MNSPOffer'): offer_index.append((i['@InterconnectorID'], j['@RegionID'])) return offer_index
46211e9a29f1fd1fd3148deaaaa064b6d6b05ca7
21,247
from typing import Tuple def _find_clusters( data, cluster_range: Tuple[int, int] = None, metric: str = "silhouette_score", target=None, **kwargs, ): """Finds the optimal number of clusters for K-Means clustering using the selected metric. Args: data: The data. cluster_range: A tuple of the minimum and maximum cluster search range. Defaults to (2, 20). metric: The metric to optimize (from sklearn.metrics). target: (For supervised clustering) The labels, as a 1-D array. **kwargs: Keyword arguments to be passed into the K-Means estimator. Raises: ValueError: Max of cluster range greater than the min. Returns: clusters, KMeansFit """ cluster_range = cluster_range or (2, 20) if not cluster_range[0] < cluster_range[1]: raise ValueError( "cluster_range expected to be (min_cluster, max_cluster), but the min was >= the max" ) unsupervised_metrics = [ "silhouette_score", "davies_bouldin_score", "calinski_harabasz_score", ] scores = [] widgets = [] for n in range(*cluster_range): clusterwidget = _fit_kmeans(data, n, **kwargs) analysis_func = getattr(sklearn.metrics, metric) if metric in unsupervised_metrics: score = analysis_func(data, clusterwidget.clusters) else: if target is None: raise ValueError("'target' must be specified for supervised clustering") score = analysis_func(target, clusterwidget.clusters) scores.append(score) widgets.append(clusterwidget) best_idx = np.argmax(scores) clusterwidget = widgets[best_idx] clusterwidget.search = True clusterwidget.cluster_range = cluster_range clusterwidget.metric = metric clusterwidget.scores = scores if target is not None: clusterwidget.target = target return clusterwidget
a73afd74a6401799b6418e45372aee04cf353cb3
21,248
def _gate_objectives_li_pe(basis_states, gate, H, c_ops): """Objectives for two-qubit local-invariants or perfect-entangler optimizaton""" if len(basis_states) != 4: raise ValueError( "Optimization towards a two-qubit gate requires 4 basis_states" ) # Bell states as in "Theorem 1" in # Y. Makhlin, Quantum Inf. Process. 1, 243 (2002) psi1 = (basis_states[0] + basis_states[3]) / np.sqrt(2) psi2 = (1j * basis_states[1] + 1j * basis_states[2]) / np.sqrt(2) psi3 = (basis_states[1] - basis_states[2]) / np.sqrt(2) psi4 = (1j * basis_states[0] - 1j * basis_states[3]) / np.sqrt(2) return [ Objective(initial_state=psi, target=gate, H=H, c_ops=c_ops) for psi in [psi1, psi2, psi3, psi4] ]
76be659f97396384102706fe0bc101a7d85d6521
21,249
from typing import Generator import pkg_resources def get_pip_package_list(path: str) -> Generator[pkg_resources.Distribution, None, None]: """Get the Pip package list of a Python virtual environment. Must be a path like: /project/venv/lib/python3.9/site-packages """ packages = pkg_resources.find_distributions(path) return packages
9e73e27c2b50186dedeedd1240c28ef4f4d50e03
21,250
from OpenGL.GLU import gluGetString, GLU_EXTENSIONS def hasGLUExtension( specifier ): """Given a string specifier, check for extension being available""" if not AVAILABLE_GLU_EXTENSIONS: AVAILABLE_GLU_EXTENSIONS[:] = gluGetString( GLU_EXTENSIONS ) return specifier.replace(as_8_bit('.'),as_8_bit('_')) in AVAILABLE_GLU_EXTENSIONS
cf938ec4d0ec16ae96faa10c50ac5b4bc541a062
21,251
def do_slots_information(parser, token): """Calculates some context variables based on displayed slots. """ bits = token.contents.split() len_bits = len(bits) if len_bits != 1: raise TemplateSyntaxError(_('%s tag needs no argument') % bits[0]) return SlotsInformationNode()
e52d724abb435c1b8cba68c352977a1d6c1e1c12
21,252
def get_region_of_interest(img, sx=0.23, sy=0.15, delta=200, return_vertices=False): """ :param img: image to extract ROI from :param sx: X-axis factor for ROI bottom base :param sy: Y-axis factor for ROI top base :param delta: ROI top base length :param return_vertices: whether to return the ROI vertices :return: ROI (optional: vertices) """ assert len(img.shape) == 2 h, w = img.shape mask = np.zeros(img.shape) fill_color = 255 vertices = np.array( [ [0.5 * (w - delta), sy * h], [0.5 * (w + delta), sy * h], [(1 - sx) * w, h - 1], [sx * w, h - 1], ] ) cv2.fillPoly(mask, np.array([vertices], dtype=np.int32), fill_color) roi = mask.astype(np.uint8) & img.astype(np.uint8) if return_vertices: return roi, vertices else: return roi
932588f34ba9cd7e4e71b35df60cf03f40574fad
21,253
from typing import Counter import json def load_search_freq(fp=SEARCH_FREQ_JSON): """ Load the search_freq from JSON file """ try: with open(fp, encoding="utf-8") as f: return Counter(json.load(f)) except FileNotFoundError: return Counter()
5d5e1d1106a88379eab43ce1e533a7cbb5da7eb6
21,254
def _sum_of_squares(a, axis=0): """ Square each element of the input array, and return the sum(s) of that. Parameters ---------- a : array_like Input array. axis : int or None, optional Axis along which to calculate. Default is 0. If None, compute over the whole array `a`. Returns ------- sum_of_squares : ndarray The sum along the given axis for (a**2). See also -------- _square_of_sums : The square(s) of the sum(s) (the opposite of `_sum_of_squares`). """ a, axis = _chk_asarray(a, axis) return np.sum(a*a, axis)
5271d40b096e4f6f47e010bf0974bc77804a3108
21,255
import logging def get_preprocess_fn(pp_pipeline, remove_tpu_dtypes=True): """Transform an input string into the preprocessing function. The minilanguage is as follows: fn1|fn2(arg, arg2,...)|... And describes the successive application of the various `fn`s to the input, where each function can optionally have one or more arguments, which are either positional or key/value, as dictated by the `fn`. The output preprocessing function expects a dictinary as input. This dictionary should have a key "image" that corresponds to a 3D tensor (height x width x channel). Args: pp_pipeline: A string describing the pre-processing pipeline. remove_tpu_dtypes: Whether to remove TPU incompatible types of data. Returns: preprocessing function. Raises: ValueError: if preprocessing function name is unknown """ def _preprocess_fn(data): """The preprocessing function that is returned.""" # Validate input if not isinstance(data, dict): raise ValueError("Argument `data` must be a dictionary, " "not %s" % str(type(data))) # Apply all the individual steps in sequence. logging.info("Data before pre-processing:\n%s", data) for fn_name in pp_pipeline.split("|"): data = eval(fn_name)(data) # pylint: disable=eval-used if remove_tpu_dtypes: # Remove data that are TPU-incompatible (e.g. filename of type tf.string). for key in list(data.keys()): if data[key].dtype not in TPU_SUPPORTED_DTYPES: tf.logging.warning( "Removing key '{}' from data dict because its dtype {} is not in " " the supported dtypes: {}".format(key, data[key].dtype, TPU_SUPPORTED_DTYPES)) del data[key] logging.info("Data after pre-processing:\n%s", data) return data return _preprocess_fn
ef3065252b3aa67cebc6a041eba33711e7a17f82
21,256
def nodeset(v): """Convert a value to a nodeset.""" if not nodesetp(v): raise XPathTypeError, "value is not a node-set" return v
ccaada2ad8610e0b3561663aab8e90665f6c23de
21,257
import tqdm def get_char_embs(char_emb_path, char_emb_size, alphabet_size=1422): """Get pretrained character embeddings and a dictionary mapping characters to their IDs. Skips IDs 0 and 1, since these are reserved for PAD and UNK, respectively. Input: char_emb_path: path to glove.840B.{char_embedding_size}d-char.txt. If None, use random initialization. char_embedding_size: Size of character embeddings Returns: char_emb_matrix: Numpy array shape (1426, char_embedding_size) containing char embeddings. char2id: dict. Maps chars (string) to their IDs (int). """ print("Loading char embeddings from file: {}...".format(char_emb_path)) char_emb_matrix = [] char2id = {} idx = 0 with open(char_emb_path, 'r') as fh: for line in tqdm(fh, total=alphabet_size): line = line.lstrip().rstrip().split(" ") char = line[0] vector = list(map(float, line[1:])) if char_emb_size != len(vector): raise Exception("Expected vector of size {}, but got vector of size {}.".format(char_emb_size, len(vector))) char_emb_matrix.append(vector) char2id[char] = idx idx += 1 char_emb_matrix = np.array(char_emb_matrix, dtype=np.float32) print("Loaded char embedding matrix with shape {}.".format(char_emb_matrix.shape)) return char_emb_matrix, char2id
d4be3ed7780efb3ca378c18d805ff7c5550d98d7
21,258
def _get_reverse_complement(seq): """ Get the reverse compliment of a DNA sequence. Parameters: ----------- seq Returns: -------- reverse_complement_seq Notes: ------ (1) No dependencies required. Pure python. """ complement_seq = "" for i in seq: if i == "C": complement_seq += "G" elif i == "G": complement_seq += "C" elif i == "A": complement_seq += "T" elif i == "T": complement_seq += "A" elif i == "N": complement_seq += "N" reverse_complement_seq = complement_seq[::-1] return reverse_complement_seq
31408767c628ab7b0e6e63867e37f11eb6e19560
21,259
def wave_reduce_min_all(val): """ All threads get the result """ res = wave_reduce_min(val) return broadcast(res, 0)
dfac75ecd9aeb75dc37cbaa7d04ce2a2732b9ce9
21,260
def predict_class(all_headlines): """ Predict whether each headline is negative or positive. :param all_headlines: all headlines :return: headlines with predictions """ clf, v = load_classifier("SVM") headlines = [] for h in all_headlines: headlines.append(h.to_array()) df = pd.DataFrame(headlines) df.columns = \ [ 'headline', 'origin', 'semantic_value', 'pos', 'neg', 'neu', 'published_at' ] df['headline'] = df['headline'].map(lambda x: strip_punctuation(x)) df['headline'] = df['headline'].map(lambda x: x.lower()) df['headline'] = df['headline'].map(lambda x: filter_stop_words(x)) df['published_at'] = df['published_at'].map(lambda x: to_epoch(x)) df = normalise_column(df, 'published_at') tr_counts = v.transform(df['headline']) tr = pd.DataFrame(tr_counts.todense()) df.join(tr) output = clf.predict(df.drop(["headline", "origin"], axis=1)).astype(int) df['predicted_class'] = output i = 0 for h in all_headlines: h.predicted_class = df['predicted_class'].loc[i] i += 1 return all_headlines
38839eba678659529b7fe83d6dc09ffd3cf87e48
21,261
def find_tickets_for_seat_manager( user_id: UserID, party_id: PartyID ) -> list[DbTicket]: """Return the tickets for that party whose respective seats the user is entitled to manage. """ return db.session \ .query(DbTicket) \ .filter(DbTicket.party_id == party_id) \ .filter(DbTicket.revoked == False) \ .filter( ( (DbTicket.seat_managed_by_id == None) & (DbTicket.owned_by_id == user_id) ) | (DbTicket.seat_managed_by_id == user_id) ) \ .options( db.joinedload(DbTicket.occupied_seat), ) \ .all()
c59af6629a402f3844e01c5dd86553b8e5d33d64
21,262
import inspect from typing import Counter def insert_features_from_iters(dataset_path, insert_features, field_names, **kwargs): """Insert features into dataset from iterables. Args: dataset_path (str): Path of the dataset. insert_features (iter of iter): Collection of iterables representing features. field_names (iter): Collection of field names to insert. These must match the order of their attributes in the insert_features items. **kwargs: Arbitrary keyword arguments. See below. Keyword Args: use_edit_session (bool): Flag to perform updates in an edit session. Default is False. log_level (str): Level to log the function at. Defaults to 'info'. Returns: str: Path of the dataset updated. """ kwargs.setdefault('use_edit_session', False) log = leveled_logger(LOG, kwargs.get('log_level', 'info')) log("Start: Insert features into %s from iterables.", dataset_path) meta = {'dataset': dataset_metadata(dataset_path)} keys = {'row': tuple(contain(field_names))} if inspect.isgeneratorfunction(insert_features): insert_features = insert_features() session = Editor(meta['dataset']['workspace_path'], kwargs['use_edit_session']) cursor = arcpy.da.InsertCursor(dataset_path, field_names=keys['row']) feature_count = Counter() with session, cursor: for row in insert_features: cursor.insertRow(tuple(row)) feature_count['inserted'] += 1 log("%s features inserted.", feature_count['inserted']) log("End: Insert.") return feature_count
d6f4547b33a09391188beb96cf408f3148ef643e
21,263
def check_table(conn, table, interconnect): """ searches if Interconnect exists in table in database :param conn: connect instance for database :param table: name of table you want to check :param interconnect: name of the Interconnect you are looking for :return: results of SQL query searching for table """ cur = conn.cursor() sql_search = "SELECT * \ FROM %s \ WHERE Interconnect='%s'" % (table, interconnect) found = cur.execute(sql_search).fetchone() return found
0888146d5dfe20e7bdfbfe078c58e86fda43d6a5
21,264
import tarfile def get_host_config_tar_response(host): """ Build the tar.gz attachment response for the GetHostConfig view. Note: This is re-used to download host config from the admin interface. :returns: HttpResponseAttachment """ filename = '{host}_v{version}.tar.gz'.format( host=host.path_str(), version=host.config_version) # Use the response as file-like object to write the tar resp = HttpResponseAttachment(filename=filename, content_type='application/gzip') with closing(tarfile.open(mode='w:gz', fileobj=resp)) as tar: config_tar.generate_host_config_tar(host, TarWriter(tar)) return resp
8a968885bb197f781faf65abf100aa40568f6354
21,265
async def update_product_remove_tag_by_id( *, product_id: int, session: Session = Depends(get_session), db_product: Product = Depends(get_product_or_404), db_tag: Tag = Depends(get_tag_or_404), ): """ Remove tag from product """ existing_product = db_product["db_product"] existing_tag = db_tag["db_tag"] try: existing_product.tags.remove(existing_tag) session.add(existing_product) session.commit() session.refresh(existing_product) profiling_api( f"Product:update:remove_tag:by_id:{product_id}", db_product["start_time"], db_product["username"], ) except Exception as message: logger.error(message) logger.exception(message) raise HTTPException( status_code=404, detail="Impossible to remove the tag: product or tag not existing", ) return existing_product
41893e64fa02f24df26ed39128657218cbc87231
21,266
def aggregate_results_data(results, include_raw=False): """This function aggregates the results of an archive/unarchive operation into an easy-to-parse dictionary. .. versionchanged:: 4.1.1 This function can now properly handle the ``ARCHIVED`` status when returned. .. versionadded:: 4.1.0 :param results: The results from an archive or unarchive operation :type results: list, dict :param include_raw: Includes the raw API response in the aggregated data dictionary under the ``raw`` key (``False`` by default) :type include_raw: bool :returns: A dictionary with fields for ``status``, ``archived``, ``unarchived``, ``failed`` and ``unknown`` or the raw response when the API call completely fails, with the optional raw data when requested """ # Initially define the aggregate data aggregate_data = {'status': 'success'} archived_values = ['ARCHIVING', 'ARCHIVED'] archived, unarchived, failed, unknown = [], [], [], 0 # Return the raw error response if the entire API call failed if isinstance(results, dict) and results.get('status') == 'error': # TODO: Record a log entry for the failed API call aggregate_data.update(results) elif isinstance(results, list): for message in results: if isinstance(message, dict) and message.get('archivalStatus') in archived_values: archived.append(f"{message.get('msgUid')}") elif isinstance(message, dict) and message.get('unarchivalStatus') == 'UNARCHIVING': unarchived.append(f"{message.get('msgUid')}") elif isinstance(message, dict) and message.get('msgUid'): failed.append(f"{message.get('msgUid')}") else: # TODO: Record a log entry for the unknown result unknown += 1 # Update the aggregate data with the parsed results and return the dictionary aggregate_data['archived'] = archived aggregate_data['unarchived'] = unarchived aggregate_data['failed'] = failed aggregate_data['unknown'] = unknown if include_raw: aggregate_data['raw'] = results return aggregate_data
5d5a48ad054fc61ebbc6b68530d51ac865fa6f6a
21,267
def hard_sigmoid(x: tf.Tensor) -> tf.Tensor: """Hard sigmoid activation function. ```plot-activation activations.hard_sigmoid ``` # Arguments x: Input tensor. # Returns Hard sigmoid activation. """ return tf.clip_by_value(x+0.5, 0.0, 1.0)
203a41d52888b42b643df84986c5fbc8967222c6
21,268
def get_image_as_np_array(filename: str): """Returns an image as an numpy array """ img = Image.open(filename) return np.asarray(img)
8d3cc1c5311e675c6c710cbd7633a66748308e7d
21,269
def unreduced_coboundary(morse_complex, akq, cell_ix): """ Helper """ return unreduced_cells(akq, morse_complex.get_coboundary(cell_ix))
c074a9b7df35f961e66e31a88c8a7f95f48912c7
21,270
from typing import Union def __align(obj: Union[Trace, EventLog], pt: ProcessTree, max_trace_length: int = 1, max_process_tree_height: int = 1, parameters=None): """ this function approximates alignments for a given event log or trace and a process tree :param obj: event log or single trace :param pt: process tree :param max_trace_length: specifies when the recursive splitting stops based on the trace's length :param max_process_tree_height: specifies when the recursive splitting stops based on the tree's height :return: """ assert isinstance(pt, ProcessTree) if isinstance(obj, Trace): e = EventLog() e.append(obj) obj = e assert isinstance(obj, EventLog) pt = process_tree_to_binary_process_tree(pt) pt = EfficientTree(pt) parameters[Parameters.SUBTREE_ALIGN_CACHE] = {} return __approximate_alignments_for_log(obj, pt, max_trace_length, max_process_tree_height, parameters=parameters)
0f684403bb70a158c463b4babcada115e908ee88
21,271
import resource def scanProgramTransfersCount(program, transfersCount=None, address=None, args={}): """ Scan pools by active program, sort by transfersCount """ return resource.scan(**{**{ 'type': 'pool', 'index': 'activeProgram', 'indexValue': program, 'sort': 'transfersCount', 'sortValue': transfersCount, 'keyValue': address, }, **args})
b40a0ff2ea62f840a6c2fd858516dc8998aac30b
21,272
from pedal.tifa.commands import get_issues from pedal.tifa.feedbacks import initialization_problem def def_use_error(node, report=MAIN_REPORT): """ Checks if node is a name and has a def_use_error Args: node (str or AstNode or CaitNode): The Name node to look up. report (Report): The report to attach data to. Defaults to MAIN_REPORT. Returns: True if the given name has a def_use_error """ if not isinstance(node, str) and node.ast_name != "Name": raise TypeError def_use_issues = get_issues(initialization_problem) if not isinstance(node, str): node_id = node.id else: node_id = node has_error = False for issue in def_use_issues: name = issue.fields['name'] if name == node_id: has_error = True break return has_error
6e0113c451a2c09fdb84392060b672ffb3bc19d3
21,273
def get_ref_inst(ref): """ If value is part of a port on an instance, return that instance, otherwise None. """ root = ref.root() if not isinstance(root, InstRef): return None return root.inst
55f1a84131451a2032b7012b00f9336f12fee554
21,274
def not_found(error): """ Renders 404 page :returns: HTML :rtype: flask.Response """ view_args["title"] = "Not found" return render_template("404.html", args=view_args), 404
8882f171c5e68f3b24a1a7bd57dbd025a4b3a070
21,275
def xml_escape(x): """Paranoid XML escaping suitable for content and attributes.""" res = '' for i in x: o = ord(i) if ((o >= ord('a')) and (o <= ord('z'))) or \ ((o >= ord('A')) and (o <= ord('Z'))) or \ ((o >= ord('0')) and (o <= ord('9'))) or \ i in ' !#$%()*+,-./:;=?@\^_`{|}~': res += i else: res += '&#%d;' % o return res
018dc7d1ca050641b4dd7198e17911b8d17ce5fc
21,276
def read_tab(filename): """Read information from a TAB file and return a list. Parameters ---------- filename : str Full path and name for the tab file. Returns ------- list """ with open(filename) as my_file: lines = my_file.readlines() return lines
8a6a6b0ec693130da7f036f4673c89f786dfb230
21,277
def build_model(stage_id, batch_size, real_images, **kwargs): """Builds progressive GAN model. Args: stage_id: An integer of training stage index. batch_size: Number of training images in each minibatch. real_images: A 4D `Tensor` of NHWC format. **kwargs: A dictionary of 'start_height': An integer of start image height. 'start_width': An integer of start image width. 'scale_base': An integer of resolution multiplier. 'num_resolutions': An integer of number of progressive resolutions. 'stable_stage_num_images': An integer of number of training images in the stable stage. 'transition_stage_num_images': An integer of number of training images in the transition stage. 'total_num_images': An integer of total number of training images. 'kernel_size': Convolution kernel size. 'colors': Number of image channels. 'to_rgb_use_tanh_activation': Whether to apply tanh activation when output rgb. 'fmap_base': Base number of filters. 'fmap_decay': Decay of number of filters. 'fmap_max': Max number of filters. 'latent_vector_size': An integer of latent vector size. 'gradient_penalty_weight': A float of gradient norm target for wasserstein loss. 'gradient_penalty_target': A float of gradient penalty weight for wasserstein loss. 'real_score_penalty_weight': A float of Additional penalty to keep the scores from drifting too far from zero. 'adam_beta1': A float of Adam optimizer beta1. 'adam_beta2': A float of Adam optimizer beta2. 'generator_learning_rate': A float of generator learning rate. 'discriminator_learning_rate': A float of discriminator learning rate. Returns: An inernal object that wraps all information about the model. """ kernel_size = kwargs['kernel_size'] colors = kwargs['colors'] resolution_schedule = make_resolution_schedule(**kwargs) num_blocks, num_images = get_stage_info(stage_id, **kwargs) current_image_id = tf.train.get_or_create_global_step() current_image_id_inc_op = current_image_id.assign_add(batch_size) tf.summary.scalar('current_image_id', current_image_id) progress = networks.compute_progress( current_image_id, kwargs['stable_stage_num_images'], kwargs['transition_stage_num_images'], num_blocks) tf.summary.scalar('progress', progress) real_images = networks.blend_images( real_images, progress, resolution_schedule, num_blocks=num_blocks) def _num_filters_fn(block_id): """Computes number of filters of block `block_id`.""" return networks.num_filters(block_id, kwargs['fmap_base'], kwargs['fmap_decay'], kwargs['fmap_max']) def _generator_fn(z): """Builds generator network.""" to_rgb_act = tf.tanh if kwargs['to_rgb_use_tanh_activation'] else None return networks.generator( z, progress, _num_filters_fn, resolution_schedule, num_blocks=num_blocks, kernel_size=kernel_size, colors=colors, to_rgb_activation=to_rgb_act) def _discriminator_fn(x): """Builds discriminator network.""" return networks.discriminator( x, progress, _num_filters_fn, resolution_schedule, num_blocks=num_blocks, kernel_size=kernel_size) ########## Define model. z = make_latent_vectors(batch_size, **kwargs) gan_model = tfgan.gan_model( generator_fn=lambda z: _generator_fn(z)[0], discriminator_fn=lambda x, unused_z: _discriminator_fn(x)[0], real_data=real_images, generator_inputs=z) ########## Define loss. gan_loss = define_loss(gan_model, **kwargs) ########## Define train ops. gan_train_ops, optimizer_var_list = define_train_ops(gan_model, gan_loss, **kwargs) gan_train_ops = gan_train_ops._replace( global_step_inc_op=current_image_id_inc_op) ########## Generator smoothing. generator_ema = tf.train.ExponentialMovingAverage(decay=0.999) gan_train_ops, generator_vars_to_restore = add_generator_smoothing_ops( generator_ema, gan_model, gan_train_ops) class Model(object): pass model = Model() model.stage_id = stage_id model.batch_size = batch_size model.resolution_schedule = resolution_schedule model.num_images = num_images model.num_blocks = num_blocks model.current_image_id = current_image_id model.progress = progress model.num_filters_fn = _num_filters_fn model.generator_fn = _generator_fn model.discriminator_fn = _discriminator_fn model.gan_model = gan_model model.gan_loss = gan_loss model.gan_train_ops = gan_train_ops model.optimizer_var_list = optimizer_var_list model.generator_ema = generator_ema model.generator_vars_to_restore = generator_vars_to_restore return model
d188ef5672e928b1935a97ade3d26614eb700681
21,278
def int2(c): """ Parse a string as a binary number """ return int(c, 2)
dd1fb1f4c194e159b227c77c4246136863646707
21,279
from typing import Any from typing import Type from typing import List from typing import Dict def from_serializer( serializer: serializers.Serializer, api_type: str, *, id_field: str = "", **kwargs: Any, ) -> Type[ResourceObject]: """ Generate a schema from a DRF serializer. :param serializer: The serializer instance. :param api_type: The JSON API resource type. :param id_field: The 'id" field of the resource. If left empty, it is either "id" for non-model serializers, or for model serializers, it is looked up on the model. :param kwargs: Extra options (like links and transforms) passed to the schema. :return: The new schema class. """ # get_fields() should return them in the order of Meta.fields serializer_name = type(serializer).__name__ attrs: List[str] = [] rels: List[str] = [] if not id_field: # If this is a model serializer, we can reach in to the model # and look for the model's PK. if isinstance(serializer, serializers.ModelSerializer): model = serializer.Meta.model for db_field in model._meta.get_fields(): if getattr(db_field, "primary_key", False): id_field = db_field.attname break if not id_field: raise ValueError(f"Unable to find primary key from model: {model}") else: # Otherwise, just assume it's "id" id_field = "id" for field_name, field in serializer.get_fields().items(): if field_name != id_field: if isinstance(field, serializers.RelatedField): rels.append(field_name) else: attrs.append(field_name) values: Dict[str, Any] = { "id": id_field, "type": api_type, "attributes": attrs, "relationships": rels, } values.update(**kwargs) return type(f"{serializer_name}_AutoSchema", (ResourceObject,), values)
4fb2c0fb83c26d412de5582a8ebfeb4c72ac7add
21,280
def inv_rotate_pixpts(pixpts_rot, angle): """ Inverse rotate rotated pixel points to their original positions. Keyword arguments: pixpts_rot -- namedtuple of numpy arrays of x,y pixel points rotated angle -- rotation angle in degrees Return value: pixpts -- namedtuple of numpy arrays of pixel x,y points in original positions """ deg2rad = np.pi/180. angle_rad = angle*deg2rad xpix_pts = pixpts_rot.x*np.cos(angle_rad) + pixpts_rot.y*np.sin(angle_rad) ypix_pts = -pixpts_rot.x*np.sin(angle_rad) + pixpts_rot.y*np.cos(angle_rad) PixPoints = namedtuple('PixPoints', 'x y') pixpts = PixPoints(xpix_pts, ypix_pts) return pixpts
793b148a0c37d321065dc590343de0f4093abcff
21,281
import logging import functools import time def logged(func=None, level=logging.DEBUG, name=None, msg=None): """Decorator to log the function, with the duration. Args: ----- func (function): the function to log level (logging.OBJECT): INFO, DEBUG, WARNING ... name (str): name of the logger msg (str): message to log Return: ------- log the duration of the function """ if func is None: return functools.partial(logged, level=level, name=name, msg=msg) logger = name if name else Logger( func.__name__ + ".log", logging.INFO) logmsg = msg if msg else func.__name__ @functools.wraps(func) def wrapper(*args, **kwargs): """Wrapper to use it as a decorator.""" start = time.time() result = func(*args, **kwargs) end = time.time() msg = ":".join([str(func.__name__), str(end - start)]) logger.log(level, logmsg) logger.log(level, msg) return result return wrapper
3110b7744618c56a42516409dc73c590e97c9d18
21,282
def properties(classes): """get all property (p-*, u-*, e-*, dt-*) classnames """ return [c.partition("-")[2] for c in classes if c.startswith("p-") or c.startswith("u-") or c.startswith("e-") or c.startswith("dt-")]
417562d19043f4b98068ec38cc010061b612fef3
21,283
import array def adapt_p3_histogram(codon_usages, purge_unwanted=True): """Returns P3 from each set of codon usage for feeding to hist().""" return [array([c.positionalGC(purge_unwanted=True)[3] for c in curr])\ for curr in codon_usages]
d5b0b0b387c3a98f584ca82dad79effbb9aa7a31
21,284
def handle_logout_response(response): """ Handles saml2 logout response. :param response: Saml2 logout response """ if len(response) > 1: # Currently only one source is supported return HttpResponseServerError("Logout from several sources not supported") for entityid, logout_info in response.items(): if isinstance(logout_info, tuple): # logout_info is a tuple containing header information and a HTML message. binding, http_info = logout_info if binding == BINDING_HTTP_POST: # Display content defined in logout response body = "".join(http_info["data"]) return HttpResponse(body) elif binding == BINDING_HTTP_REDIRECT: # Redirect to address defined in logout response return HttpResponseRedirect(_get_location(http_info)) else: # Unknown binding return HttpResponseServerError("Logout binding not supported") else: # result from logout, should be OK pass return HttpResponseServerError("Failed to log out")
18d8983a3e01905e1c7c6b41b65eb7e9191a4bf5
21,285
def get_value_beginning_of_year(idx, col, validate=False): """ Devuelve el valor de la serie determinada por df[col] del primer día del año del índice de tiempo 'idx'. """ beggining_of_year_idx = date(year=idx.date().year, month=1, day=1) return get_value(beggining_of_year_idx, col, validate)
b3a267620f19cabe1492aea671d34f1142580a5d
21,286
from typing import List def doc2vec_embedder(corpus: List[str], size: int = 100, window: int = 5) -> List[float]: """ Given a corpus of texts, returns an embedding (representation of such texts) using a fine-tuned Doc2Vec embedder. ref: https://radimrehurek.com/gensim/models/doc2vec.html """ logger.info(f"Training Doc2Vec with: size={size}, window={window}") tagged_documents = [TaggedDocument(doc.split(), [i]) for i, doc in enumerate(corpus)] model = Doc2Vec(tagged_documents, vector_size=size, window=window, min_count=3, workers=16) def embedder(documents: List[str]) -> List[float]: """Generates an embedding using a Doc2Vec""" return scale_vectors([model.infer_vector(doc.split()) for doc in documents]) return embedder
e14eb3c1daca1c24f9ebeaa04f44091cc12b03ff
21,287
def PremIncome(t): """Premium income""" return SizePremium(t) * PolsIF_Beg1(t)
1673f5a18171989e15bdfd7fa3e814f8732fd732
21,288
def _setter_name(getter_name): """ Convert a getter name to a setter name. """ return 'set' + getter_name[0].upper() + getter_name[1:]
d4b55afc10c6d79a1432d2a8f3077eb308ab0f76
21,289
def get_bel_node_by_pathway_name(): """Get Reactome related eBEL nodes by pathway name.""" pathway_name = request.args.get('pathway_name') sql = f'''SELECT @rid.asString() as rid, namespace, name, bel, reactome_pathways FROM protein WHERE pure=true AND "{pathway_name}" in reactome_pathways ''' return _get_paginated_ebel_query_result(sql)
930bb79f70c050acaa052d684de389fc2eee9c36
21,290
def get_model(model_file, log=True): """Load a model from the specified model_file.""" model = load_model(model_file) if log: print('Model successfully loaded on rank ' + str(hvd.rank())) return model
ad699c409588652ac98da0f29b2cb25c53216a46
21,291
def variable_op(shape, dtype, name="Variable", set_shape=True, container="", shared_name=""): """Deprecated. Used variable_op_v2 instead.""" if not set_shape: shape = tensor_shape.unknown_shape() ret = gen_state_ops.variable(shape=shape, dtype=dtype, name=name, container=container, shared_name=shared_name) # TODO(mrry): Move this to where it is used, so we can get rid of this op # wrapper? if set_shape: ret.set_shape(shape) return ret
3a799880ed22d6983906c72f939b46f404362288
21,292
def _sample_weight(kappa, dim, num_samples): """Rejection sampling scheme for sampling distance from center on surface of the sphere. """ dim = dim - 1 # since S^{n-1} b = dim / (np.sqrt(4.0 * kappa ** 2 + dim ** 2) + 2 * kappa) x = (1.0 - b) / (1.0 + b) c = kappa * x + dim * np.log(1 - x ** 2) results = [] n = 0 while True: z = np.random.beta(dim / 2.0, dim / 2.0, size=num_samples) w = (1.0 - (1.0 + b) * z) / (1.0 - (1.0 - b) * z) u = np.random.uniform(low=0, high=1, size=num_samples) mask = kappa * w + dim * np.log(1.0 - x * w) - c >= np.log(u) results.append(w[mask]) n += sum(mask) if n >= num_samples: break results = np.concatenate(results)[:num_samples] return results
5760bfe205468e9d662ad0e8d8afa641fa45db2c
21,293
import torch def variable_time_collate_fn3( batch, args, device=torch.device("cpu"), data_type="train", data_min=None, data_max=None, ): """ Expects a batch of time series data in the form of (record_id, tt, vals, mask, labels) where - record_id is a patient id - tt is a 1-dimensional tensor containing T time values of observations. - vals is a (T, D) tensor containing observed values for D variables. - mask is a (T, D) tensor containing 1 where values were observed and 0 otherwise. - labels is a list of labels for the current patient, if labels are available. Otherwise None. Returns: combined_tt: The union of all time observations. combined_vals: (M, T, D) tensor containing the observed values. combined_mask: (M, T, D) tensor containing 1 where values were observed and 0 otherwise. """ D = batch[0][2].shape[1] len_tt = [ex[1].size(0) for ex in batch] maxlen = np.max(len_tt) enc_combined_tt = torch.zeros([len(batch), maxlen]).to(device) enc_combined_vals = torch.zeros([len(batch), maxlen, D]).to(device) enc_combined_mask = torch.zeros([len(batch), maxlen, D]).to(device) for b, (record_id, tt, vals, mask, labels) in enumerate(batch): currlen = tt.size(0) enc_combined_tt[b, :currlen] = tt.to(device) enc_combined_vals[b, :currlen] = vals.to(device) enc_combined_mask[b, :currlen] = mask.to(device) enc_combined_vals, _, _ = utils.normalize_masked_data( enc_combined_vals, enc_combined_mask, att_min=data_min, att_max=data_max ) if torch.max(enc_combined_tt) != 0.0: enc_combined_tt = enc_combined_tt / torch.max(enc_combined_tt) data_dict = { "observed_data": enc_combined_vals, "observed_tp": enc_combined_tt, "observed_mask": enc_combined_mask, } return data_dict
5158f7ab642ab33100ec5fc1c044e20edd90687c
21,294
import operator def run_map_reduce(files, mapper, n): """Runner to execute a map-reduce reduction of cowrie log files using mapper and files Args: files (list of files): The cowrie log files to be used for map-reduce reduction. mapper (MapReduce): The mapper processing the files using map_func and reduce_func. n (int): We want the n most commands / ips / etc. of the cowrie log files. Returns: result (list): List of map-reduced cowrie log data. """ # main work counts = mapper(files) counts.sort(key=operator.itemgetter(1)) counts.reverse() data = split_data_by_events(counts, n) result = build_json(data) return result
a46779fa5546c0e414a6dd4921f52c28cc80535e
21,295
import select def metadata_record_dictize(pkg, context): """ Based on ckan.lib.dictization.model_dictize.package_dictize """ model = context['model'] is_latest_revision = not(context.get('revision_id') or context.get('revision_date')) execute = _execute if is_latest_revision else _execute_with_revision # package if is_latest_revision: if isinstance(pkg, model.PackageRevision): pkg = model.Package.get(pkg.id) result = pkg else: package_rev = model.package_revision_table q = select([package_rev]).where(package_rev.c.id == pkg.id) result = execute(q, package_rev, context).first() if not result: raise tk.ObjectNotFound result_dict = d.table_dictize(result, context) if result_dict.get('title'): result_dict['title'] = result_dict['title'].strip() result_dict['display_name'] = result_dict['title'] or result_dict['name'] or result_dict['id'] # extras if is_latest_revision: extra = model.package_extra_table else: extra = model.extra_revision_table q = select([extra]).where(extra.c.package_id == pkg.id) result = execute(q, extra, context) result_dict['extras'] = ckan_model_dictize.extras_list_dictize(result, context) return result_dict
f049faf30322d5d4da45e2a424a6977c894db67c
21,296
def is_data_by_filename(fname): """ TODO this is super adhoc. FIXME """ return "Run201" in fname
f6fd006809dff852b4acf8987aa09bafd28bf3e3
21,297
def colorbar_set_label_parallel(cbar,label_list,hpos=1.2,vpos=-0.3, ha='left',va='center', force_position=None, **kwargs): """ This is to set colorbar label besie the colorbar. Parameters: ----------- cbar: the colorbar used to set. hpos: the left position of labels, used in vertical colorbar. vpos: the below position of labels, used in horizontal colorbar. force_position: 1. In case of a tuple, should be the fraction of the first small one and the number of remaining equal-length sections. Eg., (0.3,12) 2. In case of a np.ndarray or list with values in the unit of axes fraction, will be directly used to position the texts. Example: -------- /homel/ychao/python/script/set_label_parallel_colorbar.py """ def get_yloc(first,num): """ first is the fraction of the first small downward arrow; num is the number of remaining equal-length sections on the colorbar. """ first_pos = first/2. second_pos = np.arange(first + 0.5,num,1) all_pos = np.array([first_pos] + list(second_pos)) return all_pos/(first+num) cbar.set_ticklabels([]) cbar.ax.tick_params(right='off',left='off') #get the text position. yloc=(cbar.values-cbar.boundaries[0])/(cbar.boundaries[-1]-cbar.boundaries[0]) if force_position is not None: if isinstance(force_position,(tuple)) and len(force_position) == 2: yloc = get_yloc(*force_position) elif isinstance(force_position,(np.ndarray,list)): yloc = force_position else: raise ValueError("Cannot understand force_position") if len(label_list) != len(yloc): raise ValueError("the lenght of cbar segments and label list are not equal!") else: if cbar.orientation == 'vertical': for label,ypos in zip(label_list,yloc): cbar.ax.text(hpos,ypos,label,ha=ha,va=va,**kwargs) elif cbar.orientation == 'horizontal': for label,ypos in zip(label_list,yloc): cbar.ax.text(ypos,vpos,label,ha=ha,va=va,**kwargs)
811358f254b05d7fa243c96d91c94ed3cb1d1fcd
21,298
def read_csv(file, tz): """ Reads the file into a pandas dataframe, cleans data and rename columns :param file: file to be read :param tz: timezone :return: pandas dataframe """ ctc_columns = {1: 'unknown_1', 2: 'Tank upper', # temperature [deg C] 3: 'unknown_3', 4: 'Tank lower', # temperature [deg C] 5: 'unknown_5', 6: 'unknown_6', 7: 'Primary flow 1', # temperature [deg C] 8: 'Return flow', # temperature [deg C] 9: 'unknown_9', 10: 'Heater', # electric power [kW] 11: 'L1', # electric current [A] 12: 'L2', # electric current [A] 13: 'L3', # electric current [A] 14: 'unknown_14', 15: 'unknown_15', 16: 'unknown_16', 17: 'unknown_17', 18: 'unknown_18', 19: 'unknown_19', 20: 'unknown_20', 21: 'Charge pump', # speed [%] 22: 'unknown_22', 23: 'Heat pump flow', # temperature [deg C] 24: 'Heat pump return', # temperature [deg C] 25: 'unknown_25', 26: 'unknown_26', 27: 'unknown_27', 28: 'unknown_28', 29: 'unknown_29', 30: 'unknown_30', 31: 'unknown_31', 32: 'Compressor L1', # electric current [A] 33: 'Compressor' # on/off [-] } df = pd.read_csv(file, header=None, index_col=0, parse_dates=True, usecols=[i for i in range(34)]) df.index = df.index.tz_localize(tz, ambiguous='NaT') df = df.loc[df.index.notnull()] df = df.loc[~df.index.duplicated(keep='first')] df.rename(columns=ctc_columns, inplace=True) df['Compressor'] = np.where(df['Compressor'] == 'ON', 1, 0) return df
9e9ed864dcba6878562ae8686dab1d1f2650f5b3
21,299