content
stringlengths
22
815k
id
int64
0
4.91M
def heat_degree_day(Tcolumn): """ Returns a list of the heating degree day from an outdoor temperature list params: df is a pandas dataframe with datetime index and field named 'outT' which contains outdoor temperature in Fahrenheit base -- temperature base for the heating degree day value e.g. 65 for 65 degrees Fahrenheit column: the string name of the column containing temperature data Returns: hdd -- pandas dataframe of temperature and heating degree day values arranged by day This function provides the heating degree day value of a given list of outdoor temperature data (in Fahrenheit) with an accompanying datetime object list, needed for the definition of a heating degree day (https://www.weather.gov/key/climate_heat_cool). """ Temp = Tcolumn.groupby(pd.Grouper(freq = 'D')).mean() hdd = BASE - Temp hdd.name='hdd' return hdd
36,500
def quantile_loss(y_true, y_pred, taus): """ The quantiles loss for a list of quantiles. Sums up the error contribution from the each of the quantile loss functions. """ e = skewed_absolute_error( K.flatten(y_true), K.flatten(y_pred[:, 0]), taus[0]) for i, tau in enumerate(taus[1:]): e += skewed_absolute_error(K.flatten(y_true), K.flatten(y_pred[:, i + 1]), tau) return e
36,501
def parse_a3m(a3m_string: str) -> Tuple[Sequence[str], DeletionMatrix]: """Parses sequences and deletion matrix from a3m format alignment. Args: a3m_string: The string contents of a a3m file. The first sequence in the file should be the query sequence. Returns: A tuple of: * A list of sequences that have been aligned to the query. These might contain duplicates. * The deletion matrix for the alignment as a list of lists. The element at `deletion_matrix[i][j]` is the number of residues deleted from the aligned sequence i at residue position j. """ sequences, _ = parse_fasta(a3m_string) deletion_matrix = [] for msa_sequence in sequences: deletion_vec = [] deletion_count = 0 for j in msa_sequence: if j.islower(): deletion_count += 1 else: deletion_vec.append(deletion_count) deletion_count = 0 deletion_matrix.append(deletion_vec) # Make the MSA matrix out of aligned (deletion-free) sequences. deletion_table = str.maketrans('', '', string.ascii_lowercase) aligned_sequences = [s.translate(deletion_table) for s in sequences] return aligned_sequences, deletion_matrix
36,502
def PyEval_GetFuncName(space, func): """Return the name of func if it is a function, class or instance object, else the name of funcs type.""" raise NotImplementedError
36,503
def build_sub_lattice(lattice, symbol): """Generate a sub-lattice of the lattice based on equivalent atomic species. Args: lattice (ASE crystal class): Input lattice symbol (string): Symbol of species identifying sub-lattice Returns: list of lists: sub_lattice: Cartesian coordinates of the sub-lattice of symbol """ sub_lattice = [] i = 0 atomic_labels = lattice.get_chemical_symbols() positions = lattice.get_scaled_positions() for atom in atomic_labels: if atom == symbol: sub_lattice.append(positions[i]) i = i + 1 return sub_lattice
36,504
def make_url(connection_str): """ """ return _parse_rfc1738_args(connection_str)
36,505
def get_graphs_within_cutoff(structure: Union[Structure, MEGNetMolecule, Molecule], cutoff: float = 5.0, numerical_tol: float = 1e-8) -> Tuple[np.ndarray]: """ Get graph representations from structure within cutoff Args: structure: (pymatgen Structure) cutoff: (float) cutoff radius numerical_tol: (float) numerical tolerance Returns: center_indices, neighbor_indices, images, distances """ if isinstance(structure, Structure): lattice_matrix = np.ascontiguousarray(np.array(structure.lattice.matrix), dtype=float) pbc = np.array([1, 1, 1], dtype=int) elif isinstance(structure, MEGNetMolecule) or isinstance(structure, Molecule): lattice_matrix = np.array([[1000.0, 0., 0.], [0., 1000., 0.], [0., 0., 1000.]], dtype=float) pbc = np.array([0, 0, 0], dtype=int) else: raise ValueError('structure type not supported') r = float(cutoff) cart_coords = np.ascontiguousarray(np.array(structure.cart_coords), dtype=float) center_indices, neighbor_indices, images, distances = \ find_points_in_spheres(cart_coords, cart_coords, r=r, pbc=pbc, lattice=lattice_matrix, tol=numerical_tol) exclude_self = (center_indices != neighbor_indices) | (distances > numerical_tol) return center_indices[exclude_self], neighbor_indices[exclude_self], images[exclude_self], distances[exclude_self]
36,506
def add_cals1(): """ Add nutrients to daily intake for products. """ if 'username' in session: food = request.form.get("keyword") pr = Product(food) lst = pr.get_products() for i in lst: lyst.append(i) if len(lst) != 0: return render_template('productsearch.html', username=escape(session['username']), vars=lst) else: return render_template("failure.html") else: return render_template("failure.html")
36,507
def format_percent(percentage, pos): """ Formats percentages for the 'x' axis of a plot. :param percentage: The fraction between 0.0 and 1.0 :type percentage: float :param pos: The position argument :type pos: int :return: A formatted percentage string :rtype: str """ # pylint: disable=unused-argument return '{:.0f}%'.format(percentage * 100.)
36,508
def iterate_minibatches_u(datasize, batchsize, shuffle=False): """ This function tries to iterate unlabeled data in mini-batch for batch_data in iterate_minibatches_u(data, batchsize, True): #processing batch_data """ if shuffle: indices = np.arange(datasize) np.random.RandomState(np.random.randint(1,2147462579)).shuffle(indices) for start_idx in xrange(0, datasize - batchsize + 1, batchsize): if shuffle: excerpt = indices[start_idx:start_idx + batchsize] else: excerpt = slice(start_idx, start_idx + batchsize) yield excerpt
36,509
def simple_repr(obj, attrs: tp.Optional[tp.Sequence[str]] = None, overrides: dict = {}): """ Return a simple representation string for `obj`. If `attrs` is not None, it should be a list of attributes to include. """ params = inspect.signature(obj.__class__).parameters attrs_repr = [] if attrs is None: attrs = list(params.keys()) for attr in attrs: display = False if attr in overrides: value = overrides[attr] elif hasattr(obj, attr): value = getattr(obj, attr) else: continue if attr in params: param = params[attr] if param.default is inspect._empty or value != param.default: # type: ignore display = True else: display = True if display: attrs_repr.append(f"{attr}={value}") return f"{obj.__class__.__name__}({','.join(attrs_repr)})"
36,510
def verify_empty_search_table(self): """ Verifyies that there are not members in a local group members search table :param self: MainController object :return: None """ searc_members_table_is_empty = self.wait_until_visible(type=By.XPATH, element=popups.LOCAL_GROUP_EMPTY_MEMBERS_SEARCH_TABLE_XPATH) \ .is_displayed() assert searc_members_table_is_empty is True self.log('Members search table is empty') self.log('Click on "CANCEL" button') self.wait_until_visible(type=By.XPATH, element=popups.LOCAL_GROUP_SEARCH_MEMBERS_TABLE_CANCEL_BTN_XPATH).click()
36,511
def Gaussian_RadialBasis(basis_size: int, max_radius: float, min_radius=0., num_layers: int = 0, num_units: int = 0, activation_function='relu'): """ Note: based on e3nn.radial.GaussianRadialModel. :param basis_size: :param max_radius: :param min_radius: :param num_layers: :param num_units: :param activation_function: :return: """ activation_function = get_scalar_non_linearity(activation_function) """exp(-x^2 /spacing)""" spacing = (max_radius - min_radius) / (basis_size - 1) reference_points = torch.linspace(min_radius, max_radius, basis_size) sigma = 0.8 * spacing basis = partial(gaussian_basis_fn, sigma=sigma) return FiniteElement_RadialBasis(reference_points, radial_basis_fn=basis, radial_basis_type_name='φ_gauss', num_layers=num_layers, num_units=num_units, activation_function=activation_function)
36,512
def _get_column_outliers_std(column, m=3): """ given a pandas Series representing a column in a dataframe returns pandas Series without the values which are further than m*std :param column: pandas Series representing a column in a dataframe :param m: num of std as of to remove outliers :return: pandas Series with the values which exceeds m*std """ outliers = column[abs(column - np.mean(column)) > m * np.std(column)].index return outliers
36,513
def scrape_website(url): """Sends a GET request to a certain url and returns the Response object if status code is 200. Returns None if the server responds with a different code. """ result = requests.get(url) # if (True): # debugging if result.status_code == 200: return result return None
36,514
def set_filters(request, query, result, static_items=None): """ Sets filters in the query """ query_filters = query['filter']['and']['filters'] used_filters = {} if static_items is None: static_items = [] # Get query string items plus any static items, then extract all the fields qs_items = list(request.params.items()) total_items = qs_items + static_items qs_fields = [item[0] for item in qs_items] fields = [item[0] for item in total_items] # Now make lists of terms indexed by field all_terms = {} for item in total_items: if item[0] in all_terms: all_terms[item[0]].append(item[1]) else: all_terms[item[0]] = [item[1]] for field in fields: if field in used_filters: continue terms = all_terms[field] if field in ['type', 'limit', 'y.limit', 'x.limit', 'mode', 'annotation', 'format', 'frame', 'datastore', 'field', 'region', 'genome', 'sort', 'from', 'referrer']: continue # Add filter to result if field in qs_fields: for term in terms: qs = urlencode([ (k.encode('utf-8'), v.encode('utf-8')) for k, v in qs_items if '{}={}'.format(k, v) != '{}={}'.format(field, term) ]) result['filters'].append({ 'field': field, 'term': term, 'remove': '{}?{}'.format(request.path, qs) }) if field == 'searchTerm': continue # Add to list of active filters used_filters[field] = terms # Add filter to query query_filters.append(build_terms_filter(field, terms)) return used_filters
36,515
def generate_random_initial_population(population_size, n_nodes, al): """ Randomly create an initial population :param population_size: population size :type population_size: int :param n_nodes: number of nodes :type n_nodes: int :param al: adjacency list :type al: list of lists :return: random population :rtype: list of World_Map """ input_population = [] # Generate random initial population for _ in range(population_size): color_list = np.random.choice(['r', 'b', 'g'], n_nodes, replace=True) color_string = "".join(color_list) input_population.append(World_Map(color_string, al)) print('A random population of ' + str(population_size) + ' people was created') return input_population
36,516
def main(): """ Main entry point for module execution :returns: ansible_facts """ argument_spec = { 'gather_subset': dict(default=['software_info', 'software_images', 'host_name', 'platform_name', 'management_interface', 'software_version', 'fans', 'power_supplies', 'product_info', 'physical_interfaces', 'resource_utilization', 'domain_name'], type='list', choices=['software_info', 'software_images', 'host_name', 'platform_name', 'management_interface', 'software_version', 'config', 'fans', 'power_supplies', 'product_info', 'physical_interfaces', 'resource_utilization', 'domain_name']), 'gather_network_resources': dict(type='list', choices=['interfaces', 'vlans', 'vrfs']) } # Version Management try: from ansible.module_utils.aoscx_pyaoscx import Session from pyaoscx.session import Session as Pyaoscx_Session from pyaoscx.interface import Interface from pyaoscx.vlan import Vlan from pyaoscx.device import Device from pyaoscx.vrf import Vrf USE_PYAOSCX_SDK = True except ImportError: USE_PYAOSCX_SDK = False # Use the PYAOSCX SDK if USE_PYAOSCX_SDK: argument_spec.update(aoscx_http_argument_spec) ansible_module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) # Get session session = Session(ansible_module) # Session info session_info = session.get_session() # Create pyaoscx session object s = Pyaoscx_Session.from_session( session_info['s'], session_info['url']) warnings = [] if ansible_module.params["gather_subset"] == "!config": warnings.append( 'default value for `gather_subset` will be changed ' 'to `min` from `!config` v2.11 onwards') # Declare the Ansible facts ansible_facts = {} # Retrieve variables from module parameters network_resource_list = ansible_module.params['gather_network_resources'] subset_list = ansible_module.params['gather_subset'] # Retrieve ansible_network_resources ansible_network_resources = {} if network_resource_list is not None: for resource in network_resource_list: if resource == 'interfaces': ansible_network_resources.update( {'interfaces': Interface.get_facts(s)}) elif resource == 'vlans': ansible_network_resources.update( {'vlans': Vlan.get_facts(s)}) elif resource == 'vrfs': ansible_network_resources.update( {'vrfs': Vrf.get_facts(s)}) ansible_facts.update( {'ansible_network_resources': ansible_network_resources}) # Retrieve ansible_net_gather_network_resources ansible_facts.update( {'ansible_net_gather_network_resources': network_resource_list}) # Retrieve ansible_net_gather_subset ansible_facts.update({'ansible_net_gather_subset': subset_list}) # Retrieve device facts switch = Device(s) switch.get() switch.get_subsystems() # subsystem # Set the subsystem attributes allowed to retrieve as facts allowed_subsystem_attributes = [ 'product_info', 'power_supplies', 'interfaces', 'fans', 'resource_utilization' ] # Set the default subsets that are always retreived as facts default_subset_list = [ 'management_interface', 'software_version' ] # Extend subset_list with default subsets subset_list.extend(default_subset_list) # Delete duplicates subset_list = list(dict.fromkeys(subset_list)) # Iterate through given subset arguments in the gather_subset parameter # in argument_spec for subset in subset_list: # Argument translation for management_interface and # physical_interfaces if subset == 'management_interface': subset = 'mgmt_intf_status' elif subset == 'physical_interfaces': subset = 'interfaces' elif subset == 'host_name': subset = 'hostname' str_subset = 'ansible_net_' + subset # Check if current subset is inside the Device object if hasattr(switch, subset): # Get attribute value and add it to Ansible facts dictionary ansible_facts[str_subset] = getattr(switch, subset) # Check if current subset is inside the allowed subsystem # attributes elif subset in allowed_subsystem_attributes: ansible_facts.update({str_subset: {}}) # Iterate through Device subsystems for subsystem, value in switch.subsystems.items(): # Get attribute value and update the Ansible facts # dictionary ansible_facts[str_subset].update( {subsystem: switch.subsystems[subsystem][subset]}) ansible_module.exit_json( ansible_facts=ansible_facts, warnings=warnings) # USE OLD VERSION else: argument_spec.update(aoscx_http_argument_spec) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) module._connection = get_connection(module) # noqa warnings = [] if module.params["gather_subset"] == "!config": warnings.append( 'default value for `gather_subset` will be changed ' 'to `min` from `!config` v2.11 onwards') result = Facts(module).get_facts() ansible_facts, additional_warnings = result warnings.extend(additional_warnings) module.exit_json(ansible_facts=ansible_facts, warnings=warnings)
36,517
def initialization(): """Инициализация нужных файлов игры""" pygame.init() pygame.display.set_icon(pygame.image.load("data/icon.bmp")) pygame.display.set_caption('SPACE')
36,518
def test_new_valid_is_pack_display_name_already_exist(): """ Given - pack_metadata file with a pack name that does not exist in our repo. When - _is_pack_display_name_already_exist is called Then - Ensure it is valid and no error is returned. """ validator = IDSetValidations(is_circle=False, is_test_run=True, configuration=CONFIG) validator.packs_set = { "VMware": { "name": "VMware", "current_version": "1.1.0", "author": "Cortex XSOAR", "certification": "certified", "tags": [], "use_cases": [], "categories": [ "IT Services" ], "id": "VMware" } } pack_metadata_data = { "VMware": { "name": "VMware", "current_version": "1.1.0", "author": "Cortex XSOAR", "certification": "certified", "tags": [], "use_cases": [], "categories": [ "IT Services" ], "id": "VMware" } } is_valid, error = validator._is_pack_display_name_already_exist(pack_metadata_data=pack_metadata_data) assert is_valid assert not error
36,519
def import_mlp_args(hyperparameters): """ Returns parsed config for MultiLayerPerceptron classifier from provided settings *Grid-search friendly """ types = { 'hidden_layer_sizes': make_tuple, 'activation': str, 'solver': str, 'alpha': float, 'batch_size': int, 'learning_rate': str, 'learning_rate_init': float, 'max_iter': int, 'tol': float, } args = { 'hidden_layer_sizes': hyperparameters.get('hidden_layer_sizes', fallback='(100,)'), # Formatting matters! 'activation': hyperparameters.get('activation', fallback='relu'), 'solver': hyperparameters.get('solver', fallback='adam'), 'alpha': hyperparameters.get('alpha', fallback='0.0001'), 'batch_size': hyperparameters.get('batch_size', fallback='200'), 'learning_rate': hyperparameters.get('learning_rate', fallback='constant'), 'learning_rate_init': hyperparameters.get('learning_rate_init', fallback='0.001'), 'max_iter': hyperparameters.get('max_iter', fallback='200'), 'tol': hyperparameters.get('tol', fallback='1e-4'), } for key in args.keys(): args[key] = cast_to_typed_list(args[key], types[key]) return args
36,520
def download_map( location: List[str], node_tags: Optional[List[str]] = None, edge_tags: Optional[List[str]] = None, api_key: Optional[str] = None, ) -> networkx.DiGraph: """ Download map from OSM for specific locations. """ logger.info(f"Download map for {location}") return custom_graph_from_x( location, node_tags=node_tags, edge_tags=edge_tags, api_key=api_key )
36,521
def getLayers(model): """ get each layer's name and its module :param model: :return: each layer's name and its module """ layers = [] def unfoldLayer(model): """ unfold each layer :param model: the given model or a single layer :param root: root name :return: """ # get all layers of the model layer_list = list(model.named_children()) for item in layer_list: module = item[1] sublayer = list(module.named_children()) sublayer_num = len(sublayer) # if current layer contains sublayers, add current layer name on its sublayers if sublayer_num == 0: layers.append(module) # if current layer contains sublayers, unfold them elif isinstance(module, torch.nn.Module): unfoldLayer(module) unfoldLayer(model) return layers
36,522
def test_statement_dunder_getitem(sample_statement_object): """Verifies `s.__getitem__()`.""" assert ( # sample_statement_object.__getitem__(item="start_time") == sample_statement_object.sql sample_statement_object.__getitem__(item="start_time") == sample_statement_object.start_time )
36,523
def get_config_pdf_version(config_version: str, max_input_version: str) -> str: """ From the PDF version as set in the configuration and the maximum version of all input files, checks for the best PDF output version. Logs a warning, if the version set in the configuration is lower than any of the input files. >>> get_config_pdf_version('auto', '1.6') '1.6' >>> get_config_pdf_version('1.3', '1.5') '1.3' >>> get_config_pdf_version('1.x', '1.5') Traceback (most recent call last): ... ValueError: ('Invalid PDF version in configuration', '1.x') :param config_version: Version string from the configuration. Set to ``auto`` to just use ``max_input_version``. :param max_input_version: Maximum version from all input files. :return: ``config_version``, unless set to ``auto``, then ``max_input_version``. However, the automatic version setting will never be lower than ``1.3``. :raises ValueError: If the configuration-set version is an invalid pattern. """ if config_version == 'auto': return max(max_input_version, '1.3') if not PDF_VERSION_PATTERN.fullmatch(config_version): raise ValueError("Invalid PDF version in configuration", config_version) if max_input_version > config_version: log.warning("PDF version specified in config (%s) is lower than at least one of the input documents (%s). " "The resulting PDF may not be displayed correctly in all viewers.", config_version, max_input_version) return config_version
36,524
def threshold_otsu(hist): """Return threshold value based on Otsu's method. hist : array, or 2-tuple of arrays, optional Histogram from which to determine the threshold, and optionally a corresponding array of bin center intensities. An alternative use of this function is to pass it only hist. Returns ------- threshold : float Upper threshold value. All pixels with an intensity higher than this value are assumed to be foreground. References ---------- .. [1] Wikipedia, https://en.wikipedia.org/wiki/Otsu's_Method """ counts, bin_centers = hist bin_centers = bin_centers[:-1] # class probabilities for all possible thresholds weight1 = np.cumsum(counts) weight2 = np.cumsum(counts[::-1])[::-1] # class means for all possible thresholds mean1 = np.cumsum(counts * bin_centers) / weight1 mean2 = (np.cumsum((counts * bin_centers)[::-1]) / weight2[::-1])[::-1] # Clip ends to align class 1 and class 2 variables: # The last value of ``weight1``/``mean1`` should pair with zero values in # ``weight2``/``mean2``, which do not exist. variance12 = weight1[:-1] * weight2[1:] * (mean1[:-1] - mean2[1:]) ** 2 if len(variance12) == 0: return 0 idx = np.nanargmax(variance12) threshold = bin_centers[idx] return threshold
36,525
def filter_options(v): """Disable option v""" iris = dataframe() return [ {"label": col, "value": col, "disabled": col == v} for col in iris.columns ]
36,526
def scatter(x): """ matrix x x^t """ x1 = np.atleast_2d(x) xt = np.transpose(x1) s = np.dot(xt,x1) assert np.array_equal( np.shape(s), [len(x),len(x)] ) return s
36,527
def assert_allclose( actual: Tuple[complex, complex], desired: Tuple[numpy.complex128, numpy.complex128], err_msg: str, ): """ usage.scipy: 1 """ ...
36,528
def get_owned_object_or_40x(klass, owner, include_staff=False, include_superuser=True, *args, **kwargs): """ Returns an object if it can be found (using get_object_or_404). If the object is not owned by the supplied owner a 403 will be raised. """ obj = get_object_or_404(klass, *args, **kwargs) if obj.is_not_owned_by(owner, include_staff, include_superuser): raise PermissionDenied() return obj
36,529
def ampMeritFunction2(voltages,**kwargs): """Simple merit function calculator. voltages is 1D array of weights for the influence functions distortion is 2D array of distortion map ifuncs is 4D array of influence functions shade is 2D array shade mask Simply compute sum(ifuncs*voltages-distortion)**2) """ #Numpy way distortion = kwargs['inp'][0] ifuncs = kwargs['inp'][1] res = np.mean((np.dot(ifuncs,voltages)-distortion)**2) return res, [], 0
36,530
def get_transpose_graph(graph): """Get the transpose graph""" transpose = {node: set() for node in graph.keys()} for node, target_nodes in graph.items(): for target_node in target_nodes: transpose[target_node].add(node) return transpose
36,531
def _game_data_path(game_id): """ Find the path to the data file for a given game. This fully trusts game_id, and is not safe on unsanitised input. """ return os.path.join(_DATA_STORES, "{}{}".format(game_id, _EXTENSION))
36,532
def varimax(x, iteration=14): """ http://www.real-statistics.com/linear-algebra-matrix-topics/varimax/""" # TODO: set more intelligent angle evaluator # parameter: x np.array(m_features,c_factors) def _calculate_rotation_angle(x, y): u = np.square(x) - np.square(y) v = 2 * x * y A = np.sum(u) B = np.sum(v) C = np.sum(np.square(u) - np.square(v)) D = np.sum(u * v) X = D - (2 * A * B) / len(x) Y = C - (A ** 2 - B ** 2) / len(x) return np.arctan(X / Y) / 4 x = _normalize_numpy(x, axis=1) for _ in range(iteration): for factorLoad1 in range(x.shape[1]): for factorLoad2 in range(factorLoad1 + 1, x.shape[1]): np.sum(np.square(x[:, factorLoad1]) - np.square(x[:, factorLoad2])) angle = _calculate_rotation_angle(x[:, factorLoad1], x[:, factorLoad2]) rotationMatrix = np.array([[np.cos(angle), -np.sin(angle)], [np.sin(angle), np.cos(angle)]]) x[:, factorLoad1], x[:, factorLoad2] = np.dot(np.concatenate(([x[:, factorLoad1]], [x[:, factorLoad2]])).T, rotationMatrix).T return x
36,533
def websocket_trace_contexts(opp, connection, msg): """Retrieve contexts we have traces for.""" key = (msg["domain"], msg["item_id"]) if "item_id" in msg else None if key is not None: values = {key: opp.data[DATA_TRACE].get(key, {})} else: values = opp.data[DATA_TRACE] contexts = { trace.context.id: {"run_id": trace.run_id, "domain": key[0], "item_id": key[1]} for key, traces in values.items() for trace in traces.values() } connection.send_result(msg["id"], contexts)
36,534
def inventory_to_kml_string( inventory, icon_url="https://maps.google.com/mapfiles/kml/shapes/triangle.png", icon_size=1.5, label_size=1.0, cmap="Paired", encoding="UTF-8", timespans=True, strip_far_future_end_times=True): """ Convert an :class:`~obspy.core.inventory.inventory.Inventory` to a KML string representation. :type inventory: :class:`~obspy.core.inventory.inventory.Inventory` :param inventory: Input station metadata. :type icon_url: str :param icon_url: Internet URL of icon to use for station (e.g. PNG image). :type icon_size: float :param icon_size: Icon size. :type label_size: float :param label_size: Label size. :type encoding: str :param encoding: Encoding used for XML string. :type timespans: bool :param timespans: Whether to add timespan information to the single station elements in the KML or not. If timespans are used, the displayed information in e.g. Google Earth will represent a snapshot in time, such that using the time slider different states of the inventory in time can be visualized. If timespans are not used, any station active at any point in time is always shown. :type strip_far_future_end_times: bool :param strip_far_future_end_times: Leave out likely fictitious end times of stations (more than twenty years after current time). Far future end times may produce time sliders with bad overall time span in third party applications viewing the KML file. :rtype: byte string :return: Encoded byte string containing KML information of the station metadata. """ twenty_years_from_now = UTCDateTime() + 3600 * 24 * 365 * 20 # construct the KML file kml = Element("kml") kml.set("xmlns", "http://www.opengis.net/kml/2.2") document = SubElement(kml, "Document") SubElement(document, "name").text = "Inventory" # style definition cmap = get_cmap(name=cmap, lut=len(inventory.networks)) for i in range(len(inventory.networks)): color = _rgba_tuple_to_kml_color_code(cmap(i)) style = SubElement(document, "Style") style.set("id", "station_%i" % i) iconstyle = SubElement(style, "IconStyle") SubElement(iconstyle, "color").text = color SubElement(iconstyle, "scale").text = str(icon_size) icon = SubElement(iconstyle, "Icon") SubElement(icon, "href").text = icon_url hotspot = SubElement(iconstyle, "hotSpot") hotspot.set("x", "0.5") hotspot.set("y", "0.5") hotspot.set("xunits", "fraction") hotspot.set("yunits", "fraction") labelstyle = SubElement(style, "LabelStyle") SubElement(labelstyle, "color").text = color SubElement(labelstyle, "scale").text = str(label_size) for i, net in enumerate(inventory): folder = SubElement(document, "Folder") SubElement(folder, "name").text = str(net.code) SubElement(folder, "open").text = "1" SubElement(folder, "description").text = str(net) style = SubElement(folder, "Style") liststyle = SubElement(style, "ListStyle") SubElement(liststyle, "listItemType").text = "check" SubElement(liststyle, "bgColor").text = "00ffff" SubElement(liststyle, "maxSnippetLines").text = "5" # add one marker per station code for sta in net: placemark = SubElement(folder, "Placemark") SubElement(placemark, "name").text = ".".join((net.code, sta.code)) SubElement(placemark, "styleUrl").text = "#station_%i" % i SubElement(placemark, "color").text = color if sta.longitude is not None and sta.latitude is not None: point = SubElement(placemark, "Point") SubElement(point, "coordinates").text = "%.6f,%.6f,0" % \ (sta.longitude, sta.latitude) SubElement(placemark, "description").text = str(sta) if timespans: start = sta.start_date end = sta.end_date if start is not None or end is not None: timespan = SubElement(placemark, "TimeSpan") if start is not None: SubElement(timespan, "begin").text = str(start) if end is not None: if not strip_far_future_end_times or \ end < twenty_years_from_now: SubElement(timespan, "end").text = str(end) if timespans: start = net.start_date end = net.end_date if start is not None or end is not None: timespan = SubElement(folder, "TimeSpan") if start is not None: SubElement(timespan, "begin").text = str(start) if end is not None: if not strip_far_future_end_times or \ end < twenty_years_from_now: SubElement(timespan, "end").text = str(end) # generate and return KML string return tostring(kml, pretty_print=True, xml_declaration=True, encoding=encoding)
36,535
def concatenate_recordings(logPath, fileList, outFileName, odourClassesRecorded, nrSamples, nrInputNeurons, nrVR, spikeLengthSample, alpha, baselineValues): """Concatenates spike times vectors for all recording session. Input: -path of .csv files containing the arduino data for the desired recording sessions -name of .csv files containing the arduino data for the desired recording session -name of the output file where the concatenated spike times are saved -class labels for the recordings to be concatenated -number of times the e-nose was sampled during one recording session -number of input neurons (= number of sensors) -number of virtual receptors -length of Poisson process for each sample -alpha parameter regulating spiking rate -baseline values for each sensor Ouput: -saves the concatenated spike times matrices to file """ spikeTrainsRec = np.zeros((nrVR, nrSamples*spikeLengthSample*len(fileList))) for idx, fileName in enumerate(fileList): spikeTrainsRec[:, nrSamples*spikeLengthSample* idx:nrSamples*spikeLengthSample* (idx+1)] = sensor_spike_train(logPath, fileName, nrSamples, nrInputNeurons, nrVR, spikeLengthSample, alpha, baselineValues) with open(outFileName, 'wb') as csvfile: # timesWriter = csv.writer(csvfile, dialect = 'excel', delimiter=',', # quoting = csv.QUOTE_ALL) for neuron in range(nrVR): spikeTimes = np.nonzero(spikeTrainsRec[neuron, :])[0] np.savetxt(csvfile, [spikeTimes], delimiter=',', fmt = '%u')
36,536
def get_feature_embedding(config, data_loader, topk): """Iterate through all items in the data loader and maintain a list of top k highest entropy items and their embeddings topk - the max number of samples to keep. If None, don't bother with entropy, and just return embeddings for items in the data loader. Return the embeddings (topk_points x feature_dimension) and the indexes of each embedding in the original data loader. - Only 1 forward pass to get entropy and feature embedding - Done in a streaming fashion to be ram conscious """ config.model.eval() _batched_embeddings = [] with torch.no_grad(), register_embedding_hook( config.get_feature_embedding_layer(), _batched_embeddings): entropy = torch.tensor([]).to(config.device) embeddings = torch.tensor([]).to(config.device) loader_idxs = torch.tensor([], dtype=torch.long).to(config.device) N = 0 for X, y in data_loader: # get entropy and embeddings for this batch X, y = X.to(config.device), y.to(config.device) yhat = config.model(X) assert torch.isnan(yhat).sum() == 0 embeddings = torch.cat([embeddings, _batched_embeddings.pop()]) assert len(_batched_embeddings) == 0 # sanity check forward hook loader_idxs = torch.cat([ loader_idxs, torch.arange(N, N+X.shape[0], device=config.device)]) # select only top k values if topk is not None: _entropy = -yhat*torch.log2(yhat) - (1-yhat)*torch.log2(1-yhat) # Work around when yhat == 1 and entropy is nan instead of 0 _m = torch.isnan(_entropy) _entropy[_m] = 0 # check for other unexplained nan bugs assert ((yhat[_m] == 1) | (yhat[_m] == 0)).all() entropy = torch.cat([entropy, _entropy]) assert torch.isnan(entropy).sum() == 0 assert len(entropy) == len(embeddings) assert len(entropy) == len(loader_idxs) if len(entropy) > topk: entropy2, idxs = torch.topk(entropy, topk, dim=0) idxs = idxs.cpu().numpy().ravel() assert torch.isnan(entropy2).sum() == 0 assert max(idxs) < len(entropy) assert len(idxs) == len(entropy2) assert len(idxs) == topk embeddings = embeddings[idxs] loader_idxs = loader_idxs[idxs] entropy = entropy2 N += X.shape[0] embeddings = embeddings.reshape(embeddings.shape[0], -1) return embeddings, loader_idxs
36,537
def extract_yelp_data(term, categories, price, location, limit, sort_by, attributes, yelp_api_key=yelp): """ This function takes search results (a dictionary) and obtains the name, zip code, address of the possible restaurant matches in the form of a pandas dataframe. Inputs: - yelp_api_key: a string of the Yelp API Key - term: a string of search terms input by the user - lat: a float representing either a user's current location latitude or their desired location latitude - long: a float representing either a user's current location longitude or their desired location longitude - limit: an integer of maximum number of Yelp results that will be returned from the query - sort_by: string representing a user's sorting preference (options are: distance, best_match, rating, review_count) Outputs: - yelp_results: a pandas dataframe containing the zip code, name, address, of each potential result. """ yelp_api = YelpAPI(yelp_api_key) search_results = yelp_api.search_query(term=term, categories=categories, price=price, location=location, limit=limit, sort_by=sort_by, attributes=attributes) # If Yelp query returns nothing, return None if not search_results: return None # Initialize lists for each planned column in Yelp results dataframe; # these are characteristics of each business that get returned to user addresses = [] names = [] zip_code = [] latitude = [] longitude = [] phone = [] price = [] # obtain business information businesses = search_results['businesses'] for i in businesses: # In case a Yelp business is missing a field: try: a_address = i['location']['display_address'][0] a_name = i['name'] a_zip = i['location']['zip_code'] a_latitude = i['coordinates']['latitude'] a_longitude = i['coordinates']['longitude'] a_phone = i['phone'] a_price = i['price'] if all([a_address != "", a_name != "", a_zip != "", a_latitude != "", a_longitude != "", a_phone != "", a_price != ""]): addresses.append(a_address) names.append(a_name) zip_code.append(a_zip) latitude.append(a_latitude) longitude.append(a_longitude) phone.append(a_phone) price.append(a_price) except KeyError: print("Key Error, some missing field from the Yelp return!") # cast Yelp results lists into pandas dataframe yelp_results = pd.DataFrame() yelp_results['zip_code'] = zip_code yelp_results['name'] = names yelp_results['addr'] = addresses yelp_results['phone'] = phone yelp_results['price'] = price yelp_results['latitude'] = latitude yelp_results['longitude'] = longitude # change zip code column to appropriate data type yelp_results['zip_code'] = pd.to_numeric(yelp_results['zip_code']) return yelp_results
36,538
def collect_photo_info(api_key, tag, max_count): """Collects some interesting info about some photos from Flickr.com for a given tag """ photo_collection = [] url = "http://api.flickr.com/services/rest/?method=flickr.photos.search&tags=%s&format=json&nojsoncallback=1&api_key=%s" %(tag, api_key) resp = requests.get(url) results = resp.json() count = 0 for p in results['photos']['photo']: if count >= max_count: return photo_collection print 'Processing photo: "%s"' % p['title'] photo = {} url = "http://api.flickr.com/services/rest/?method=flickr.photos.getInfo&photo_id=" + p['id'] + "&format=json&nojsoncallback=1&api_key=" + api_key info = requests.get(url).json() photo["flickrid"] = p['id'] photo["title"] = info['photo']['title']['_content'] photo["description"] = info['photo']['description']['_content'] photo["page_url"] = info['photo']['urls']['url'][0]['_content'] photo["farm"] = info['photo']['farm'] photo["server"] = info['photo']['server'] photo["secret"] = info['photo']['secret'] # comments numcomments = int(info['photo']['comments']['_content']) if numcomments: #print " Now reading comments (%d)..." % numcomments url = "http://api.flickr.com/services/rest/?method=flickr.photos.comments.getList&photo_id=" + p['id'] + "&format=json&nojsoncallback=1&api_key=" + api_key comments = requests.get(url).json() photo["comment"] = [] for c in comments['comments']['comment']: comment = {} comment["body"] = c['_content'] comment["authorid"] = c['author'] comment["authorname"] = c['authorname'] photo["comment"].append(comment) photo_collection.append(photo) count = count + 1 return photo_collection
36,539
def yolo_eval(yolo_outputs, image_shape=(720., 1280.), max_boxes=10, score_threshold=.6, iou_threshold=.5): """ Converts the output of YOLO encoding (a lot of boxes) to your predicted boxes along with their scores, box coordinates and classes. Arguments: yolo_outputs -- output of the encoding model (for image_shape of (608, 608, 3)), contains 4 tensors: box_confidence: tensor of shape (None, 19, 19, 5, 1) box_xy: tensor of shape (None, 19, 19, 5, 2) box_wh: tensor of shape (None, 19, 19, 5, 2) box_class_probs: tensor of shape (None, 19, 19, 5, 80) image_shape -- tensor of shape (2,) containing the input shape, in this notebook we use (608., 608.) (has to be float32 dtype) max_boxes -- integer, maximum number of predicted boxes you'd like score_threshold -- real value, if [ highest class probability score < threshold], then get rid of the corresponding box iou_threshold -- real value, "intersection over union" threshold used for NMS filtering Returns: scores -- tensor of shape (None, ), predicted score for each box boxes -- tensor of shape (None, 4), predicted box coordinates classes -- tensor of shape (None,), predicted class for each box """ ### START CODE HERE ### # Retrieve outputs of the YOLO model (≈1 line) box_confidence, box_xy, box_wh, box_class_probs = yolo_outputs # Convert boxes to be ready for filtering functions boxes = yolo_boxes_to_corners(box_xy, box_wh) # Use one of the functions you've implemented to perform Score-filtering with a threshold of score_threshold (≈1 line) scores, boxes, classes = yolo_filter_boxes(box_confidence, boxes, box_class_probs, threshold=score_threshold) # Scale boxes back to original image shape. boxes = scale_boxes(boxes, image_shape) # Use one of the functions you've implemented to perform Non-max suppression with a threshold of iou_threshold (≈1 line) scores, boxes, classes = yolo_non_max_suppression(scores, boxes, classes, max_boxes=max_boxes,iou_threshold=iou_threshold) ### END CODE HERE ### return scores, boxes, classes
36,540
def flattenLists(l): """Flatten only lists""" for el in l: #print type(el) if type(el) == types.ListType: #print "bla" for sub in flattenLists(el): yield sub else: yield el
36,541
def task(name, required=None): """ A decorator for creating new tasks Args: name (str): Name of the task required (list): A list of required message keys that the task expects to be present """ def decorator(fn): @wraps(fn) def wrapper(*args, **kwargs): logger.debug('Executing task %s', name) try: result = fn(*args, **kwargs) except Exception as e: tb = format_exc() result = { 'success': 1, 'msg': 'Task {} failed'.format(name), 'traceback': tb } logger.warning('Task %s failed: %s', name, tb) finally: logger.debug('Returning result from task %s: %s', name, result) return result t = Task(name=name, function=wrapper, required=required) registry.register(t) return wrapper return decorator
36,542
def heterograph(g, max_level=4): """ Constructing hypergraph from homograph. Parameters ---------- g : `dgl.DGLGraph` Input graph. max_level : `int` (Default value = 4) Highest level of hypernodes. Returns ------- hg : `dgl.DGLHeteroGraph` Output graph. """ # ============== # initialization # ============== # initialize hypergraph as a dictionary hg = {} # ======== # indexing # ======== # get adjacency matrix a = g.adjacency_matrix() # get indices idxs = get_indices_from_adjacency_matrix(a) # make them all numpy idxs = {key: value.numpy() for key, value in idxs.items()} # also include n1 idxs["n1"] = np.arange(g.number_of_nodes())[:, None] # build a mapping between indices and the ordering idxs_to_ordering = {} for term in ["n%s" % level for level in range(1, max_level)]: idxs_to_ordering[term] = { tuple(subgraph_idxs): ordering for (ordering, subgraph_idxs) in enumerate(list(idxs[term])) } # NOTE: # here we define all the possible # 'has' and 'in' relationships. # TODO: # we'll test later to see if this adds too much overhead for small_idx in range(1, max_level+1): # child for big_idx in range(small_idx + 1, max_level+1): # parent for pos_idx in range(big_idx - small_idx + 1): # position # `in` relationship hg[ # (source, relationship, destination) ( "n%s" % small_idx, "n%s_as_%s_in_n%s" % (small_idx, pos_idx, big_idx), "n%s" % big_idx, ) ] = np.stack( # use `np.array` here but convert to list later [ np.array( [ idxs_to_ordering["n%s" % small_idx][tuple(x)] for x in idxs["n%s" % big_idx][ :, pos_idx : pos_idx + small_idx ] ] ), np.arange(idxs["n%s" % big_idx].shape[0]), ], axis=1, ) # define the same for `has` relationship hg[ ( "n%s" % big_idx, "n%s_has_%s_n%s" % (big_idx, pos_idx, small_idx), "n%s" % small_idx, ) ] = np.stack( [ np.arange(idxs["n%s" % big_idx].shape[0]), np.array( [ idxs_to_ordering["n%s" % small_idx][tuple(x)] for x in idxs["n%s" % big_idx][ :, pos_idx : pos_idx + small_idx ] ] ), ], axis=1, ) for term in ['n%s' % idx for idx in range(1, max_level+1)]: hg[ ( term, "%s_in_g" % term, "g", )] = np.stack( [ np.arange(len(idxs[term])), np.zeros(len(idxs[term])) ], axis=1, ) hg[ ( "g", "g_has_%s" % term, term )] = np.stack( [ np.zeros(len(idxs[term])), np.arange(len(idxs[term])), ], axis=1, ) # convert all to python `List` hg = dgl.heterograph({key: list(value) for key, value in hg.items()}) # include indices in the nodes themselves for term in ["n%s" % level for level in range(1, max_level+1)]: hg.nodes[term].data["idxs"] = torch.tensor(idxs[term]) hg.nodes[term].data["is_ring"] = torch.eq( hg.nodes[term].data["idxs"][:, 0], hg.nodes[term].data["idxs"][:, -1], )[:, None] for key in g.ndata.keys(): hg.nodes['n1'].data[key] = g.ndata[key] return hg
36,543
def test_cpphello(): """ Test the cpphello() function. """ assert cpphello() == "Greetings from C++!" return
36,544
def ireduce_ufunc(arrays, ufunc, axis=-1, dtype=None, ignore_nan=False, **kwargs): """ Streaming reduction generator function from a binary NumPy ufunc. Generator version of `reduce_ufunc`. ``ufunc`` must be a NumPy binary Ufunc (i.e. it takes two arguments). Moreover, for performance reasons, ufunc must have the same return types as input types. This precludes the use of ``numpy.greater``, for example. Note that performance is much better for the default ``axis = -1``. In such a case, reduction operations can occur in-place. This also allows to operate in constant-memory. Parameters ---------- arrays : iterable Arrays to be reduced. ufunc : numpy.ufunc Binary universal function. axis : int or None, optional Reduction axis. Default is to reduce the arrays in the stream as if they had been stacked along a new axis, then reduce along this new axis. If None, arrays are flattened before reduction. If `axis` is an int larger that the number of dimensions in the arrays of the stream, arrays are reduced along the new axis. Note that not all of NumPy Ufuncs support ``axis = None``, e.g. ``numpy.subtract``. dtype : numpy.dtype or None, optional Overrides the dtype of the calculation and output arrays. ignore_nan : bool, optional If True and ufunc has an identity value (e.g. ``numpy.add.identity`` is 0), then NaNs are replaced with this identity. An error is raised if ``ufunc`` has no identity (e.g. ``numpy.maximum.identity`` is ``None``). kwargs Keyword arguments are passed to ``ufunc``. Note that some valid ufunc keyword arguments (e.g. ``keepdims``) are not valid for all streaming functions. Also, contrary to NumPy v. 1.10+, ``casting = 'unsafe`` is the default in npstreams. Yields ------ reduced : ndarray or scalar Raises ------ TypeError : if ``ufunc`` is not NumPy ufunc. ValueError : if ``ignore_nan`` is True but ``ufunc`` has no identity ValueError : if ``ufunc`` is not a binary ufunc ValueError : if ``ufunc`` does not have the same input type as output type """ kwargs.update({"dtype": dtype, "axis": axis}) _check_binary_ufunc(ufunc) if ignore_nan: if ufunc.identity is None: raise ValueError( f"Cannot ignore NaNs because {ufunc.__name__} has no identity value" ) # TODO: use the ``where`` keyword in ufuncs instead arrays = map(partial(nan_to_num, fill_value=ufunc.identity, copy=False), arrays) # Since ireduce_ufunc is primed, we need to wait here # Priming is a way to start error checking before actually running # any computations. yield if kwargs["axis"] == -1: yield from _ireduce_ufunc_new_axis(arrays, ufunc, **kwargs) return if kwargs["axis"] is None: yield from _ireduce_ufunc_all_axes(arrays, ufunc, **kwargs) return first, arrays = peek(arrays) if kwargs["axis"] >= first.ndim: kwargs["axis"] = -1 yield from ireduce_ufunc(arrays, ufunc, **kwargs) return yield from _ireduce_ufunc_existing_axis(arrays, ufunc, **kwargs)
36,545
def check_date_mention(tweet): """Check the tweet to see if there is a valid date mention for the three dates of pyconopenspaces: 5/11, 5/12, 5/13. Quick fix to override SUTime defaulting to today's date and missing numeric info about event's date """ date_pat = re.compile("([5]{1}\/\d{2})") valid_dates = ["5/11", "5/12", "5/13"] dates = [d for d in tweet.split() if date_pat.match(d) and d in valid_dates] return dates if len(dates) == 1 else False
36,546
def parse(args): """[--starved <int>] [--control <int>] [--other <int>]""" parser = argparse.ArgumentParser() parser.add_argument('--control', metavar='level', type=int, default=2) parser.add_argument('--other', metavar='level', type=int, default=1) parser.add_argument('--starved', metavar='level', type=int, default=0) return parser.parse_args(args)
36,547
def make_nointer_beta(): """Make two random non-intersecting triangles in R^3 that pass the beta test.""" # Corners of triangle B. b1, b2, b3 = np.random.random(3), np.random.random(3), np.random.random(3) # Two edges of B. p1 = b2 - b1 p2 = b3 - b1 n = np.cross(p1, p2) n /= np.linalg.norm(n) T = b1 + (0.5 + 0.5 * np.random.random()) * p1 + (0.5 + 0.5 * np.random.random()) * p2 a1 = T + np.random.random() * n a2 = T - np.random.random() * n a3 = b1 + (1.5 + 0.5 * np.random.random()) * p1 + (1.5 + 0.5 * np.random.random()) * p2 A, B = np.array([a1, a2, a3]), np.array([b1, b2, b3]) # More fuzzing. if np.random.randint(2) == 1: A, B = B, A return A, B
36,548
def get_all_with_given_response(rdd, response='404'): """ Return a rdd only with those requests that received the response code entered. Default set to '404'. return type: pyspark.rdd.PipelinedRDD """ def status_iterator(ln): try: status = ln.split(' ')[-2] return True if status == response else False except: pass return rdd.filter(status_iterator)
36,549
async def get_neighbourhood(postcode_like: PostCodeLike) -> Optional[Neighbourhood]: """ Gets a police neighbourhood from the database. Acts as a middleware between us and the API, caching results. :param postcode_like: The UK postcode to look up. :return: The Neighbourhood or None if the postcode does not exist. :raises CachingError: If the needed neighbourhood is not in cache, and the fetch isn't responding. todo save locations/links """ try: postcode = await get_postcode(postcode_like) except CachingError as e: raise e else: if postcode is None: return None elif postcode.neighbourhood is not None: return postcode.neighbourhood try: data = await fetch_neighbourhood(postcode.lat, postcode.long) except ApiError as e: raise CachingError(f"Neighbourhood not in cache, and could not reach API: {e.status}") if data is not None: neighbourhood = Neighbourhood.from_dict(data) locations = [Location.from_dict(neighbourhood, postcode, location) for location in data["locations"]] links = [Link.from_dict(neighbourhood, link) for link in data["links"]] with Neighbourhood._meta.database.atomic(): neighbourhood.save() postcode.neighbourhood = neighbourhood postcode.save() for location in locations: location.save() for link in links: link.save() else: neighbourhood = None return neighbourhood
36,550
def test_MAE_sldiff(): """ MAE is 1/3 for two arrays that are the same but for one entry which differs by one unit. """ yhat = np.array([[1, 2, 3]]) y = np.array([[1, 1, 3]]) obs_i = MAE(yhat, y) exp_i = 1/3 assert approx(obs_i, 0.01) == exp_i return
36,551
def get_refl_weight(value, source_node): """Returns the reflection weight for Redshift Material :param value: :param source_node: :return: """ refl_color_map = source_node.ParameterBlock.texmap_reflection.Value refl_color_map_name = None try: refl_color_map_name = refl_color_map.GetName() except RuntimeError: pass if value.GetIntensity() > 0.0 or refl_color_map_name is not None: return 1.0 else: return 0.0
36,552
def createSMAbasis(delta, pistonMode, pistonProj): """ Input args: <delta> is the geometric covariance matrix of actuators, it is computed elsewhere. It is a square, symmetric matrix 60x60 <pistonMode> : piston mode (will be used in sparta) This will create a basis orthogonal to piston with last modes having large voltages and only small phase variance. """ m = filterOutPiston( np.identity(60), pistonMode, pistonProj ) lam, mo = diagonalisation( np.dot(m.T, np.dot(delta,m)) ) mo = np.dot(m, mo) SMAbasis = np.zeros(delta.shape) SMAbasis[:,0] = pistonMode SMAbasis[:,1:] = mo[:,:-1] return SMAbasis
36,553
def run_migrations_offline(): """Run migrations in 'offline' mode. This configures the context with just a URL and not an Engine, though an Engine is acceptable here as well. By skipping the Engine creation we don't even need a DBAPI to be available. Calls to context.execute() here emit the given string to the script output. """ url = config.get_main_option("sqlalchemy.url") context.configure( url=create_sqlalchemy_url(conn_info), target_metadata=target_metadata, literal_binds=True) with context.begin_transaction(): context.run_migrations()
36,554
def s3_put_bucket_website(s3_obj, bucketname, website_config): """ Boto3 client based Put bucket website function Args: s3_obj (obj): MCG or OBC object bucketname (str): Name of the bucket website_config (dict): Website configuration info Returns: dict : PutBucketWebsite response """ return s3_obj.s3_client.put_bucket_website( Bucket=bucketname, WebsiteConfiguration=website_config )
36,555
def admin_route(): """Route to admin portal for uploading and managing datasets. .. :quickref: admin; Route to admin portal for uploading and managing datasets. Notes: - flash() uses Bootstrap 4.0 alert categories, https://getbootstrap.com/docs/4.0/components/alerts/ # GET REQUESTS Args: n/a Query Args: n/a Returns: flask.render_template(): A rendered HTML template. Examples: n/a # POST REQUESTS Receives a file uploaded, which is of the type: ImmutableMultiDict([('file', <FileStorage: 'FILENAME' ('FILETYPE')>)]) """ from pma_api.manage.db_mgmt import list_cloud_datasets, download_dataset, \ delete_dataset from pma_api.models import ApiMetadata, Task from pma_api.task_utils import upload_dataset # upload if request.method == 'POST': try: file = request.files['file'] filename = secure_filename(file.filename) file_url: str = upload_dataset(filename=filename, file=file) return jsonify({'success': bool(file_url)}) except ExistingDatasetError as err: return jsonify({'success': False, 'message': str(err)}) except Exception as err: msg = 'An unexpected error occurred.\n' + \ err.__class__.__name__ + ': ' + str(err) return jsonify({'success': False, 'message': msg}) elif request.method == 'GET': if request.args: args = request.args.to_dict() if 'download' in args: # TODO: Delete tempfile after sending tempfile_path: str = download_dataset( version_number=int(args['download'])) return send_file( filename_or_fp=tempfile_path, attachment_filename=os.path.basename(tempfile_path), as_attachment=True) if 'delete' in args: try: delete_dataset(version_number=int(args['delete'])) except FileNotFoundError as err: msg = 'FileNotFoundError: ' + str(err) flash(message=msg, category='danger') return redirect(url_for('root.admin_route')) active_api_dataset: Dict = \ ApiMetadata.get_current_api_data(as_json=True) # TODO 2019.04.18-jef: active_dataset_version seems messy / breakable active_dataset_version: str = \ re.findall(r'-v[0-9]*', active_api_dataset['name'])[0]\ .replace('-v', '') present_task_list: List[str] = Task.get_present_tasks() task_id_url_map: Dict[str, str] = { task_id: url_for('root.taskstatus', task_id=task_id) for task_id in present_task_list} present_tasks: str = json.dumps(task_id_url_map) try: datasets: List[Dict[str, str]] = list_cloud_datasets() except EndpointConnectionError: msg = 'Connection Error: Unable to connect to data storage ' \ 'server to retrieve list of datasets.' datasets: List[Dict[str, str]] = [] flash(message=msg, category='danger') return render_template( 'admin.html', datasets=datasets, # List[Dict[str, str]] active_dataset_version=active_dataset_version, # int active_tasks=present_tasks, # str(json({id: url})) this_env=os.getenv('ENV_NAME', 'development'))
36,556
def check_assignment(tokenlist : List[str], current_line : int) -> Tuple[bool, List[Token.Token]]: """Checks if the given construction is of the type 'assignment'. If it is, the first value will return True and the second value will return a list of tokens. If it isn't of the type 'assignment', the first value will return False and the second value wil return None or an error token. Args: tokenlist (List[str]): A list of strings consisting of an instruction and their parameters Returns(either): bool, List[Token.Token]: Returns a bool(whether the token is of this type) and a list of tokens, which is the instruction and the parameters. bool, None : Returns a bool(whether the token is of this type) and None """ variable_keywords = { "int": int } assignment_operators = ['='] variable_keyword,tokenlist = tokenlist.next() if variable_keyword not in variable_keywords: return False, [Token.Token('ERROR', "Token is not of type 'location'", current_line)] name,tokenlist = tokenlist.next() assignment_operator,tokenlist = tokenlist.next() if assignment_operator not in assignment_operators: return True, [Token.Token('ERROR', "Unknown assignment operator", current_line)] value,tokenlist = tokenlist.next() if type(eval(value)) != variable_keywords[variable_keyword]: return True, [Token.Token('ERROR', 'Error: Value does not match type', current_line)] tokens = [Token.Token('TYPE', variable_keyword, current_line), Token.Token('IDENTIFIER', name, current_line), Token.Token('ASSIGNMENT', assignment_operator, current_line), Token.Token('VALUE', value, current_line)] return True, tokens
36,557
def test_view_empty_posts(app, database, test_client, default_groups): """Test to show empty product posts page""" url = url_for('product.posts') r = test_client.get(url) assert r.status_code == 200
36,558
def load_nifc_fires(): """load nifc data for 2020/2021 fire season NB this is a bit of an undocumented NIFC feature -- the data supposedly only cover 2021 but there are definitely 2020 fires included at the endpoint. This might not be true in the future. https://data-nifc.opendata.arcgis.com/datasets/nifc::wfigs-wildland-fire-perimeters-full-history/about """ nifc_uri = "https://storage.googleapis.com/carbonplan-data/raw/nifc/WFIGS_-_Wildland_Fire_Perimeters_Full_History.geojson" fires = geopandas.read_file(nifc_uri) nifc_colnames = {"poly_IncidentName": "name", "poly_Acres_AutoCalc": "acres"} fires = fires.rename(columns=nifc_colnames) fires = fires[fires["irwin_FireDiscoveryDateTime"].str[:4].isin(["2020", "2021"])] fires["ignite_at"] = ( fires["irwin_FireDiscoveryDateTime"] .apply(pd.Timestamp) .apply(lambda x: pd.Timestamp(x.date())) ) return fires.to_crs(crs)[["name", "acres", "ignite_at", "geometry"]]
36,559
def test_post_an_asset_with_invalid_data(client, setup_api_test_data): """ Add an asset with some fields having invalid data and one field missing. The right error messages should be in the response and the number of assets has not increased. """ with UserContext("test_admin_user@seita.nl") as prosumer: num_assets_before = len(prosumer.assets) auth_token = get_auth_token(client, "test_admin_user@seita.nl", "testtest") post_data = get_asset_post_data() post_data["name"] = "Something new" post_data["longitude"] = 300.9 del post_data["generic_asset_type_id"] post_asset_response = client.post( url_for("AssetAPI:post"), json=post_data, headers={"content-type": "application/json", "Authorization": auth_token}, ) print("Server responded with:\n%s" % post_asset_response.json) assert post_asset_response.status_code == 422 assert ( "exceeds the maximum longitude" in post_asset_response.json["message"]["json"]["longitude"][0] ) assert ( "required field" in post_asset_response.json["message"]["json"]["generic_asset_type_id"][0] ) assert ( GenericAsset.query.filter_by(account_id=prosumer.id).count() == num_assets_before )
36,560
def setup(client): """This is called when the cog is loaded via load_extension""" client.add_cog(Jail(client))
36,561
def test_remove_layer_shapefile(tmpdir): """Removal of layer in shapefile actually deletes the datasource""" filename = str(tmpdir.join("a_filename.shp")) create_sample_data(filename, "ESRI Shapefile") fiona.remove(filename, layer=0) assert not os.path.exists(filename)
36,562
def test_adjacent_nodes(graph_with_edges): """Ensure we get adjacent edges.""" assert graph_with_edges.adjacent('A', 'B')
36,563
def test_api_query_paginated_trades_pagination(mock_bitstamp): """Test pagination logic for trades works as expected. First request: 2 results, 1 valid trade (id 2) Second request: 2 results, no trades Third request: 2 results, 1 valid trade (id 5) and 1 invalid trade (id 6) Trades with id 2 and 5 are expected to be returned. """ # Not a trade user_transaction_1 = """ { "id": 1, "type": -1, "datetime": "2020-12-02 09:00:00" } """ # First trade, buy BTC with USD, within timestamp range user_transaction_2 = """ { "id": 2, "type": 2, "datetime": "2020-12-02 09:30:00", "btc": "0.50000000", "usd": "-10000.00000000", "btc_usd": "0.00005000", "fee": "20.00000000", "order_id": 2 } """ # Not a trade user_transaction_3 = """ { "id": 3, "type": -1, "datetime": "2020-12-02 18:00:00" } """ # Not a trade user_transaction_4 = """ { "id": 4, "type": -1, "datetime": "2020-12-03 9:00:00" } """ # Second trade, sell EUR for USD, within timestamp range user_transaction_5 = """ { "id": 5, "type": 2, "datetime": "2020-12-03 11:30:00", "eur": "-1.00000000", "usd": "1.22000000", "eur_usd": "0.81967213", "fee": "0.00610000", "order_id": 3 } """ # Third trade, buy ETH with USDC, out of timestamp range user_transaction_6 = """ { "id": 6, "type": 2, "datetime": "2020-12-03 12:00:01", "eth": "1.00000000", "usdc": "-750.00000000", "eth_usdc": "0.00133333", "fee": "3.75000000", "order_id": 1 } """ api_limit = 2 now = datetime.now() now_ts = int(now.timestamp()) options = { 'since_id': USER_TRANSACTION_MIN_SINCE_ID, 'limit': api_limit, 'sort': USER_TRANSACTION_SORTING_MODE, 'offset': 0, } expected_calls = [ call( endpoint='user_transactions', method='post', options={ 'since_id': 1, 'limit': 2, 'sort': 'asc', 'offset': 0, }, ), call( endpoint='user_transactions', method='post', options={ 'since_id': 3, 'limit': 2, 'sort': 'asc', 'offset': 0, }, ), call( endpoint='user_transactions', method='post', options={ 'since_id': 3, 'limit': 2, 'sort': 'asc', 'offset': 2, }, ), ] def get_paginated_response(): results = [ f'[{user_transaction_1},{user_transaction_2}]', f'[{user_transaction_3},{user_transaction_4}]', f'[{user_transaction_5},{user_transaction_6}]', ] for result_ in results: yield result_ def mock_api_query_response(endpoint, method, options): # pylint: disable=unused-argument return MockResponse(HTTPStatus.OK, next(get_response)) get_response = get_paginated_response() with patch( 'rotkehlchen.exchanges.bitstamp.API_MAX_LIMIT', new_callable=MagicMock(return_value=api_limit), ): with patch.object( mock_bitstamp, '_api_query', side_effect=mock_api_query_response, ) as mock_api_query: result = mock_bitstamp._api_query_paginated( start_ts=Timestamp(0), end_ts=Timestamp(now_ts), options=options, case='trades', ) assert mock_api_query.call_args_list == expected_calls expected_result = [ Trade( timestamp=1606901400, location=Location.BITSTAMP, pair=TradePair('BTC_USD'), trade_type=TradeType.BUY, amount=FVal("0.50000000"), rate=FVal("0.00005000"), fee=FVal("20.00000000"), fee_currency=Asset('USD'), link='2', notes='', ), Trade( timestamp=1606995000, location=Location.BITSTAMP, pair=TradePair('EUR_USD'), trade_type=TradeType.SELL, amount=FVal("1.22000000"), rate=FVal("0.81967213"), fee=FVal("0.00610000"), fee_currency=Asset('EUR'), link='5', notes='', ), ] assert result == expected_result
36,564
def eigenvalue_nonunitary_entanglement_infidelity(a, b, mx_basis): """ Returns (d^2 - 1)/d^2 * (1 - sqrt(U)), where U is the eigenvalue-unitarity of a*b^{-1} Parameters ---------- a : numpy.ndarray The first process (transfer) matrix. b : numpy.ndarray The second process (transfer) matrix. mx_basis : Basis or {'pp', 'gm', 'std'} the basis that `a` and `b` are in. Returns ------- float """ d2 = a.shape[0]; U = eigenvalue_unitarity(a, b) return (d2 - 1.0) / d2 * (1.0 - _np.sqrt(U))
36,565
def blah(): """ docstring """ print("blah") pass
36,566
def project_create_folder_structures(project_folder_path): """Creates a project structure. Assumes current directory is root of project""" # create the pm folder structure project_init_pm_folder(project_folder_path) # create other folder structures make_folder_if_doesnt_exist(os.path.join(project_folder_path, "wp")) make_folder_if_doesnt_exist( os.path.join( project_folder_path, "workspaces")) pass
36,567
def upload_to_s3(local_filepath, file_name, s3_path, bucket_name=BUCKET_NAME): """ Returns ---------- Uploads local file to appropriate s3 key, and prints status Parameters ---------- local_filepath : str ex. 'my/local/path' file_name : str ex. 'cleaned_data.csv' or 'model.pkl' bucket_name : str ex. 'dsapp-edu-data' s3_path : str ex. 'NC-Cabarrus/cleaned_data' """ def percent_cb(complete, total): """ Helper function that prints progress """ sys.stdout.write('.') sys.stdout.flush() conn = boto.connect_s3() bucket = conn.get_bucket(bucket_name) full_key_name = os.path.join(s3_path, file_name) k = bucket.new_key(full_key_name) full_filepath = os.path.join(local_filepath, file_name) k.set_contents_from_filename(full_filepath, cb=percent_cb, num_cb=10) return None
36,568
def morph(word, rootlist, Indo = False, n = 5): """ Bagi sesuatu perkataan ("word"), kembalikan n analisis morphologi yang paling mungkin berdasarkan senarai akar ("rootlist"). Format output: akar, perkataan, proklitik/awalan, akhiran/enklitik, apitan, reduplikasi @param Indo: Jika benar, awalan N- dan akhiran -in juga termasuk dalam analisis. @param n: Bilangan calon yang dikembalikan. """ cand = set() check = set() cand1 = NyahApitan(word, rootlist) cand2 = NyahAwalan(word, rootlist) cand3 = NyahAkhiran(word, rootlist) if Indo: cand1 = NyahApitan(word, rootlist, Indo = True) cand2 = NyahAwalan(word, rootlist, Indo = True) cand3 = NyahAkhiran(word, rootlist, Indo = True) # Tanpa imbuhan for (c1, c2, c3) in [(c1, c2, c3) for c1 in cand1 for c2 in cand2 for c3 in cand3]: if c1[0] == c2[0] == c3[0] and (c1[4], c2[4], c3[4]) == ("0", "0", "0"): cand.add((c1[0], c1[1], "0", "0", "0", c1[5])) # Dengan imbuhan else: for c1 in cand1: # Tanpa awalan, tanpa akhiran if not c1[2] and not c1[3]: cand.add((c1[0], c1[1], "0", "0", c1[4], c1[5])) # Tanpa awalan elif not c1[2]: temp = c1[1] + c1[3] # bentuk tanpa huruf-huruf apitan cand3c = NyahAkhiran(temp, rootlist) if Indo: cand3c = NyahAkhiran(temp, rootlist, Indo = True) for c3 in cand3c: if c1[1] == c3[0][0] and c1[3] == c3[0][2] and not c3[3]: cand.add((c1[0], c1[1], "0", c3[4], c1[4], c1[5])) # Tanpa akhiran elif not c1[3]: temp = c1[2] + c1[1] # bentuk tanpa huruf-huruf apitan cand2c = NyahAwalan(temp, rootlist) if Indo: cand2c = NyahAwalan(temp, rootlist, Indo = True) for c2 in cand2c: if c1[1] == c2[0][0] and c1[2] == c2[0][1] and not c2[2]: cand.add((c1[0], c1[1], c2[4], "0", c1[4], c1[5])) # Dengan awalan dan akhiran else: temp = c1[2] + c1[1] + c1[3] # bentuk tanpa huruf-huruf apitan cand2c = NyahAwalan(temp, rootlist) cand3c = NyahAkhiran(temp, rootlist) if Indo: cand2c = NyahAwalan(temp, rootlist, Indo = True) cand3c = NyahAkhiran(temp, rootlist, Indo = True) for c2 in cand2c: if c1[1] == c2[0][0] and c1[2] == c2[0][1] and not c2[2]:# and c1[3] == c2[0][2]: for c3 in cand3c: if c1[1] == c3[0][0] and c1[3] == c3[0][2] and not c3[3]: cand.add((c1[0], c1[1], c2[4], c3[4], c1[4], c1[5])) # Utamakan akar yang sedia ada cand4 = set([c for c in cand if c[1] in rootlist]) if cand4: cand = cand4 # Jika tiada analisis ditemui, cuba dengan huruf kecil if not cand: if not word.islower(): kecil = morph(word.lower(), rootlist) for k in kecil: check.add((k[0], word, k[2], k[3], k[4], k[5])) else: check.add((word, word, "0", "0", "0", c1[5])) # Susun mengikut jumlah suku kata (2 > 3 > 1 > 4 ...) dan panjang akar cand = sorted(cand, key = lambda x: SylCount(x[1], root = True, mono = True) + len(x[1])/100) # Tambah 5 hasil yang paling besar kemungkinannnya kepada senarai semak for c in cand[:n]: check.add((c[1], word, c[2], c[3], c[4], c[5])) return check
36,569
def get_logger(name): """ Returns a logger from the registry Parameters ---------- name : str the name indicating the logger to return Returns ------- :class:`delira.logging.base_logger.Logger` the specified logger object """ return _AVAILABLE_LOGGERS[name]
36,570
def load_yaml(path: Union[str, Path], pure: bool = False) -> dict: """config.yaml file loader. This function converts the config.yaml file to `dict` object. Args: path: .yaml configuration filepath pure: If True, just load the .yaml without converting to EasyDict and exclude extra info. Returns: `dict` object containing configuration parameters. Example: .. code-block:: python from dlp import CNF_PATH config = load_yaml(CNF_PATH) print(config["project_name"]) """ path = str(Path(path).absolute().resolve()) # * Load config file with open(path) as file: config = yaml.load(file) if pure == False: # Add extra features # Convert dict to easydict config = edict(config) return config
36,571
def InterpolatedCurveOnSurfaceUV1(thisSurface, points, tolerance, closed, closedSurfaceHandling, multiple=False): """ Returns a curve that interpolates points on a surface. The interpolant lies on the surface. Args: points (System.Collections.Generic.IEnumerable<Point2d>): List of at least two UV parameter locations on the surface. tolerance (double): Tolerance used for the fit of the push-up curve. Generally, the resulting interpolating curve will be within tolerance of the surface. closed (bool): If false, the interpolating curve is not closed. If true, the interpolating curve is closed, and the last point and first point should generally not be equal. closedSurfaceHandling (int): If 0, all points must be in the rectangular domain of the surface. If the surface is closed in some direction, then this routine will interpret each point and place it at an appropriate location in the covering space. This is the simplest option and should give good results. If 1, then more options for more control of handling curves going across seams are available. If the surface is closed in some direction, then the points are taken as points in the covering space. Example, if srf.IsClosed(0)=True and srf.IsClosed(1)=False and srf.Domain(0)=srf.Domain(1)=Interval(0,1) then if closedSurfaceHandling=1 a point(u, v) in points can have any value for the u coordinate, but must have 0<=v<=1. In particular, if points = { (0.0,0.5), (2.0,0.5) } then the interpolating curve will wrap around the surface two times in the closed direction before ending at start of the curve. If closed=True the last point should equal the first point plus an integer multiple of the period on a closed direction. Returns: NurbsCurve: A new NURBS curve if successful, or None on error. """ url = "rhino/geometry/surface/interpolatedcurveonsurfaceuv-surface_point2darray_double_bool_int" if multiple: url += "?multiple=true" args = [thisSurface, points, tolerance, closed, closedSurfaceHandling] if multiple: args = list(zip(thisSurface, points, tolerance, closed, closedSurfaceHandling)) response = Util.ComputeFetch(url, args) response = Util.DecodeToCommonObject(response) return response
36,572
def show(request, url, alias_model, template): """List all vouched users with this group.""" group_alias = get_object_or_404(alias_model, url=url) if group_alias.alias.url != url: return redirect('groups:show_group', url=group_alias.alias.url) group = group_alias.alias in_group = group.members.filter(user=request.user).exists() profiles = group.members.vouched() page = request.GET.get('page', 1) paginator = Paginator(profiles, settings.ITEMS_PER_PAGE) try: people = paginator.page(page) except PageNotAnInteger: people = paginator.page(1) except EmptyPage: people = paginator.page(paginator.num_pages) show_pagination = paginator.count > settings.ITEMS_PER_PAGE profile = request.user.userprofile hide_leave_group_button = (hasattr(group, 'steward') and profile == group.steward) data = dict(people=people, group=group, in_group=in_group, show_pagination=show_pagination, hide_leave_group_button=hide_leave_group_button) if isinstance(group, Group) and group.steward: """ Get the most globally popular skills that appear in the group Sort them with most members first """ skills = (Skill.objects .filter(members__in=profiles) .annotate(no_users=Count('members')) .order_by('-no_users')) data.update(skills=skills) data.update(irc_channels=group.irc_channel.split(' ')) data.update(members=profiles.count()) return render(request, template, data)
36,573
def index(): """ Home page. Displays subscription info and smart-sorted episodes. """ client = JsonClient(session["username"], session["password"]) subs = get_subscriptions(client, session["username"]) recent_episodes = smart_sort(client, session["username"]) for ep in recent_episodes: ep['description'] = re.sub(r'http\S+', '', ep['description']) ep['released'] = ep['released'].split('T', 1)[0] if request.method == 'POST': if request.form['submit'] == 'fetch': if not request.form['queryvalue']: return render_template('index.html', subs=subs) else: return redirect(url_for('searchresults', query=request.form['queryvalue'])) elif request.form['submit'] == 'advanced': return redirect(url_for('advancedsearch')) elif request.form['submit'] == 'sugg': return redirect(url_for('suggestions')) return render_template('index.html', subs=subs, recent_episodes=recent_episodes)
36,574
def range( lower: int, upper: int, step: Optional[int] = None, name: Optional[str] = None ) -> Series: """ Create a Series that ranges from lower bound to upper bound. Parameters ---------- lower Lower bound value. upper Upper bound value. step Optional step size. If none given, the step size will be 1. name Name of the Series """ if name is None: name = "" return Series(name, np.arange(lower, upper, step), nullable=False)
36,575
def local_gpu_masked_careduce(node): """ Detects eligible CAReduce{add}(GpuElemwise{Switch}) instances and replaces them with a masked CAReduce. """ # TODO: Probably don't need this hack checking for both GpuCAReduce and its # non-gpu counterpart anymore. Just the GPU should be fine. if not isinstance(node.op, GpuCAReduce): # Send this off to local_gpu_careduce first. # HACK: This happens outside of the standard optimization sequence. ret = local_gpu_careduce.transform(node) if not ret: return False print "local_gpu_careduce returned with", ret if isinstance(ret[0].owner.op, HostFromGpu): ret = ret[0].owner.inputs[0].owner else: ret = ret[0].owner node = ret if node.op.scalar_op.__class__ != theano.scalar.Add: return False above = node.inputs[0].owner if above is None or not isinstance(above.op, GpuElemwise): return False # The graph looks okay. Check the dims. if node.op.reduce_mask != (1, 0, 0): return False if node.op.pre_scalar_op: return False # Check switch op. # TODO: Check that it's actually a switch .. ! if len(above.inputs) != 3: return False mask, ift, iff = above.inputs if not mask.broadcastable: return False if not (not mask.broadcastable[0] and all(mask.broadcastable[1:])): return False if any(ift.broadcastable) or any(iff.broadcastable): return False new_op = GpuMaskedCAReduce() return [new_op(mask, ift, iff)]
36,576
def cli(): """Interact with the https://n26.com API via the command line."""
36,577
def ticks_lt(exact_price): """ Returns a generator for all the ticks below the given price. >>> list(ticks_lt(Decimal('0.35'))) [Decimal('0.34'), Decimal('0.33'), Decimal('0.20'), Decimal('0.10'), Decimal('0.01')] >>> list(ticks_lt(Decimal('0.20'))) [Decimal('0.10'), Decimal('0.01')] >>> list(ticks_lt(Decimal('0.0001'))) [] """ first_viable = bisect.bisect_left(_ALL_TICKS, exact_price) - 1 first_invalid_index, step = -1, -1 return (_ALL_TICKS[i] for i in range(first_viable, first_invalid_index, step))
36,578
def _decode_to_string(to_decode): """ This function is needed for Python 3, because a subprocess can return bytes instead of a string. """ try: return to_decode.decode("utf-8") except AttributeError: # bytesToDecode was of type string before return to_decode
36,579
def nn(x_dict): """ Implementation of a shallow neural network.""" # Extract Input. x = x_dict["images"] # First Hidden Layer. layer_1 = tf.layers.dense(x, 256) # Second Hidden Layer. layer_2 = tf.layers.dense(layer_1, 256) # Output Layer. output_layer = tf.layers.dense(layer_2, 10) return output_layer
36,580
def find_negamax_move_alphabeta(game_state, valid_moves, depth, alpha, beta, turn_multiplier): """ NegaMax algorithm with alpha beta pruning. Alpha beta pruning eliminates the need to check all moves within the game_state tree when a better branch has been found or a branch has too low of a score. alpha: upper bound (max possible); beta: lower bound (min possible) If max score is greater than alpha, that becomes the new alpha value. If alpha becomes >= beta, break out of branch. White is always trying to maximise score and black is always trying to minimise score. Once the possibility of a higher max or lower min has been eliminated, there is no need to check further branches. """ global next_move if depth == 0: return turn_multiplier * score_board(game_state) max_score = -checkmate_points for move in valid_moves: game_state.make_move(move) next_moves = game_state.get_valid_moves() score = -find_negamax_move_alphabeta(game_state, next_moves, depth - 1, -beta, -alpha, -turn_multiplier) if score > max_score: max_score = score if depth == set_depth: next_move = move game_state.undo_move() # Pruning if max_score > alpha: alpha = max_score if alpha >= beta: break return max_score
36,581
def create_segment(context_dir: str, segment: dict, is_be: bool): """Create segment in IDA and map in the data from the file :param context_dir: Parent directory of the context files :param segment: Segment information from _index.json :param is_be: True if processor is big endian, otherwise False """ input_name = get_input_name() if Path(segment['name']).name != input_name: ida_seg = idaapi.segment_t() ida_seg.start_ea = segment['start'] ida_seg.end_ea = segment['end'] ida_seg.bitness = 1 if is_be else 0 if segment['permissions']['r']: ida_seg.perm |= ida_segment.SEGPERM_READ if segment['permissions']['w']: ida_seg.perm |= ida_segment.SEGPERM_WRITE if segment['permissions']['x']: ida_seg.perm |= ida_segment.SEGPERM_EXEC idaapi.add_segm_ex(ida_seg, Path(segment['name']).name, 'CODE', idaapi.ADDSEG_OR_DIE) else: idaapi.add_segm_ex(ida_seg, Path(segment['name']).name, 'DATA', idaapi.ADDSEG_OR_DIE) if segment['content_file']: write_segment_bytes(segment['start'], PurePath(context_dir, segment['content_file']))
36,582
def sum_kernel(X, Y, kernels = None): """ Meta Kernel for summing multiple kernels. """ _sum = 0 for kernel in kernels: print("Doing", kernel["class"], "with parameters:", kernel["parameters"]) _sum = _sum + globals()[kernel["class"]](X, Y, **kernel["parameters"]) return _sum
36,583
def pretvori_v_sekunde(niz): """ Pretvori niz, ki predstavlja dolžino skladbe v formatu hh:mm:ss v število sekund. """ h, m, s = map(int, niz.split(":")) return s + m*60 + h*3600
36,584
def _get_profiling_data(filename): """Read a given file and parse its content for profiling data.""" data, timestamps = [], [] try: with open(filename, "r") as f: file_data = f.readlines() except Exception: logging.error("Could not read profiling data.", exc_info=True) raise SystemExit(1) for line in file_data: if line == "\n": continue line = line.strip() line_data = line.split(" ") if len(line_data) != 3: continue _, mem_usage, timestamp = line.split(" ") data.append(float(mem_usage)) timestamps.append(float(timestamp)) if not data: logging.error("No samples to parse in {}.".format(filename)) raise SystemExit(1) return {"data": data, "timestamp": timestamps}
36,585
def parse_dataset_name(dataset_name: str) -> (str, str): """ Split the string of the dataset name into two parts: dataset source name (e.g., cnc_in_domain) and dataset part (e.g., train). :param dataset_name: :return: dataset source name (e.g., cnc_in_domain) and dataset part (e.g., train). """ name_parts = dataset_name.rsplit('_', 1) dataset_source = name_parts[0] dataset_part = DatasetPart[name_parts[1].upper()] return dataset_source, dataset_part
36,586
def test_filter_schema(): """Test filter schema.""" conf = { 'include_domains': ['light'], 'include_entities': ['switch.kitchen'], 'exclude_domains': ['cover'], 'exclude_entities': ['light.kitchen'] } filt = FILTER_SCHEMA(conf) assert filt.config == conf
36,587
def get_analytics_zoo_classpath(): """ Get and return the jar path for analytics-zoo if exists. """ if os.getenv("BIGDL_CLASSPATH"): return os.environ["BIGDL_CLASSPATH"] jar_dir = os.path.abspath(__file__ + "/../../") jar_paths = glob.glob(os.path.join(jar_dir, "share/lib/*.jar")) if jar_paths: assert len(jar_paths) == 1, "Expecting one jar: %s" % len(jar_paths) return jar_paths[0] return ""
36,588
def add_predictions_to_tsv(file: str): """ takes a tsv file that already contains the following columns ['id', 'target_domain', 'source_domain', 'in_sent', 'out_sent', 'lemma_pos', 'word_type'] and adds predictions from our metaphor generation models The resulting extended file is stored separately :param file: path to a tsv file without predictions from controlled and free metaphor generation """ ''' # create data sets for controlled metaphor generation contr_train_set, _, contr_eval_set = \ split_train_dev_test(CSV_FULL_METAPHOR_SET, True) # create data sets for free metaphor generation free_train_set, _, free_eval_set = \ split_train_dev_test(CSV_FULL_METAPHOR_SET, False) ''' # redeclare the eval sets, since we are predicting on Kevin's data kevin_frame = \ pd.read_csv(file, sep='\t', header=None, names=['id', 'target_domain', 'source_domain', 'in_sent', 'out_sent', 'lemma_pos', 'word_type']) contr_eval_set = MetaphorDataset(kevin_frame, True) free_eval_set = MetaphorDataset(kevin_frame, False) # load the fine-tuned model for controlled metaphor generation contr_model = T5ForConditionalGeneration.from_pretrained( RESOURCES_PATH + 'fine_tuned_models/t5-base-meta-gen-controlled/') # load the fine-tuned model for free metaphor generation free_model = T5ForConditionalGeneration.from_pretrained( RESOURCES_PATH + 'fine_tuned_models/t5-base-meta-gen-free/') # load the gold standard frame for comparison eval_frame = contr_eval_set.metaphors_frame # add controlled predictions to dataframe eval_frame['out_pred_controlled'] = _predict(contr_model, contr_eval_set) # add free predictions to dataframe eval_frame['out_pred_free'] = _predict(free_model, free_eval_set) # attach metaphor_level column to metaphor_frame metaphor_level = (seq( eval_frame[['target_domain', 'source_domain']]) .map(lambda arr: _get_metaphor_level(arr[0], arr[1])) .list()) eval_frame['metaphor_level'] = metaphor_level # attach metaphor_type column to metaphor_frame metaphor_type = (seq( eval_frame[['target_domain', 'source_domain']]) .map(lambda arr: _get_metaphor_type(arr[0], arr[1])) .list()) eval_frame['metaphor_type'] = metaphor_type # attach unseen column to metaphor_frame (whether metaphor was in training set) train_frame = get_train_frame(CSV_FULL_METAPHOR_SET) train_metas = (train_frame[['target_domain', 'source_domain']] .drop_duplicates() .values .tolist()) metaphor_unseen = (seq( eval_frame[['target_domain', 'source_domain']].values.tolist()) .map(lambda lst: lst not in train_metas) .list()) eval_frame['metaphor_unseen'] = metaphor_unseen # store eval_frame eval_frame.to_csv(file[:-4] + '_w_preds.tsv', sep='\t', index=False)
36,589
def _add_layers(sequential, num_layers, num_units, kernel_init, activation, normalizer, residual_connections): """Adds several layers to a tf.keras.Sequential instance.""" for i in range(num_layers): sequential.add( _Layer(num_units, kernel_init, activation, normalizer(), False if i == 0 else residual_connections))
36,590
def plot_pq(df_pq, df_pq_std=None, columns=('mae', 'r2s'), title='Performance-Quantile'): """Plot the quantile performance plot from the prepared metrics table. Args: df_pq (pd.DataFrame): The QP table information with mean values. df_pq_std (pd.DataFrame): The QP table information with std values. columns (tuple): Which column of the qp table to be plotted, limited to 2 items. title (str): An optional name of the figure. Returns: plt.Figure: A figure of the resulting QP plot. """ fig, ax1 = plt.subplots(figsize=(5, 3)) if len(columns) == 1: ax1.plot(df_pq['quantile'], df_pq[columns[0]], 'r', label=columns[0]) ax1.set_ylabel(columns[0].upper()) ax1.legend(loc=1) if df_pq_std is not None: ax1.fill_between(df_pq['quantile'], df_pq[columns[0]] - df_pq_std[columns[0]], df_pq[columns[0]] + df_pq_std[columns[0]], color='r', alpha=0.5 ) elif len(columns) == 2: _ = ax1.plot(df_pq['quantile'], df_pq[columns[0]], 'r', label=columns[0]) ax1.set_ylabel(columns[0].upper()) ax2 = ax1.twinx() _ = ax2.plot(df_pq['quantile'], df_pq[columns[1]], 'g', label=columns[1]) ax2.set_ylabel(columns[1].upper()) ax1.legend(loc=1) ax2.legend(loc=4) if df_pq_std is not None: ax1.fill_between(df_pq['quantile'], df_pq[columns[0]] - df_pq_std[columns[0]], df_pq[columns[0]] + df_pq_std[columns[0]], color='r', alpha=0.5 ) ax2.fill_between(df_pq['quantile'], df_pq[columns[1]] - df_pq_std[columns[1]], df_pq[columns[1]] + df_pq_std[columns[1]], color='g', alpha=0.5 ) else: raise ValueError('Too many columns. Currently only two are allowed.') ax1.set_xlabel('Quantile') ax1.set_title(title) plt.show() return fig
36,591
def protoToOpenAPISchemaRecursive(lines, schemas, schemaPrefix, basename): """ Recursively create a schema from lines read from a proto file. This method is recursive because proto messages can contain internal messages and enums. If this is the case the method will call itself recursively. :param lines: list of lines read from a proto file. :param schemas: dictionary of schemas to which the new definitions will be added. :param basename: basename respectively prefix which is added before the name of a schema. This is used to prefix internal messages/enums with the name of the message containing it. :return: the filled schemas dictionary and the current procssing index. The return value should not be used because it deals with parameters only required for the recursion. """ # create a new schema schema = {} # save the current name for the schema name = "" # index for the current line parsed i = 0; # iterate till end of file while (i < len(lines)): # get current line and remove whitespaces at front and end line = lines[i].strip() # replace multiple whitepaces with a single one, see https://stackoverflow.com/questions/2077897/substitute-multiple-whitespace-with-single-whitespace-in-python line = ' '.join(line.split()) # increase index i += 1 # if the line is irrelevant for parsing, continue the loop if skipLine(line): continue # closing curly brackets indicate that a message/enum definition has ended if line.startswith('}'): # return schemas and current index so that loop which recursively called this can resume at the correct location return schemas, i # test if line indicates an internal message/enum if name != "" and (line.startswith('message') or line.startswith('enum')): # name is already specified but there is a message/enum, so it is internal # recursively call this method but splice the lines to begin at the definition of the internal type _, processedLines = protoToOpenAPISchemaRecursive(lines[(i-1):len(lines)-1], schemas, schemaPrefix, basename=(name + '.')) # move the index of this iteration after the definition of the internal type i += processedLines continue # type is a message if line.startswith('message'): # set message flag isMessage = True # extract name name = basename + line.split(' ')[1] if basename == '': name = schemaPrefix + name # create schema and add to schemas schemas[name] = schema schema['type'] = 'object' schema['properties'] = {} continue # type is an enum if line.startswith('enum'): # set message flag to false isMessage = False # extract name name = basename + line.split(' ')[1] if basename == '': name = schemaPrefix + name # create schema for enum and add to schemas schemas[name] = schema schema['type'] = 'string' schema['enum'] = [] continue # if item is an enum, parse lines as its values if not isMessage: enumValue = line.split('=')[0].strip() # ignore values called unknown if enumValue == "UNKNOWN": continue else: schema['enum'].append(enumValue) continue # extract information for field split = line.split(' ') option = split[0] # option is repeated, optional, ... fieldType = split[1] # fieldType is string, uint64, reference to another type, ... fieldName = split[2] # the name of the field # create a property for the field prop = {} # if the field option is repeated add the property as an array, else normally if option == "repeated": properties = schema['properties'] properties[fieldName] = {} properties[fieldName]['type'] = 'array' properties[fieldName]['items'] = prop else: schema['properties'][fieldName] = prop # add property fields based on field type and print an error if it could not be done if not addTypeToProp(fieldType, prop, schemaPrefix, schemas): print('Could not parser fieldType[' + fieldType + '] into an openAPI property') return schemas, i
36,592
def to_binary(s: typing.Union[str, bytes], encoding='utf8') -> bytes: """Cast function. :param s: object to be converted to bytes. """ return s if isinstance(s, bytes) else bytes(s, encoding=encoding)
36,593
def _generate_proto(): """Generate MetricsPayload for global_monitor.send().""" proto = metrics_pb2.MetricsPayload() # Key: Target, value: MetricsCollection. collections = {} # Key: (Target, metric name) tuple, value: MetricsDataSet. data_sets = {} count = 0 for (target, metric, start_time, end_time, fields_values ) in state.store.get_all(): for fields, value in fields_values.iteritems(): if count >= METRICS_DATA_LENGTH_LIMIT: yield proto proto = metrics_pb2.MetricsPayload() collections.clear() data_sets.clear() count = 0 if target not in collections: collections[target] = proto.metrics_collection.add() target.populate_target_pb(collections[target]) collection = collections[target] key = (target, metric.name) new_data_set = None if key not in data_sets: new_data_set = metrics_pb2.MetricsDataSet() metric.populate_data_set(new_data_set) data = metrics_pb2.MetricsData() metric.populate_data(data, start_time, end_time, fields, value) # All required data protos have been successfully populated. Now we can # insert them in serialized proto and bookeeping data structures. if new_data_set is not None: collection.metrics_data_set.add().CopyFrom(new_data_set) data_sets[key] = collection.metrics_data_set[-1] data_sets[key].data.add().CopyFrom(data) count += 1 if count > 0: yield proto
36,594
def visualize_q_percolation(Q): """ Takes a python dictionary of variables (keys) and values :param Q: :return: """ values = Q plt.ylabel('Number of Components for Q-Dimension') plt.title('Q-Value Percolation') plt.plot(values) fig = matplotlib.pyplot.gcf() fig.set_size_inches(10, 8) plt.show()
36,595
def multilabel(ctx, text_column, label_names, balance): """Train Multilabel classification This will use columns passed as the last argument [LABEL_NAMES] Each column should be binary integer encoded """ texts, labels = load_multilabel_data( ctx["input_data"], text_column=text_column, label_names=list(label_names) ) if balance: pos_weight = (labels.sum(axis=0) / labels.shape[1]).tolist() num_labels = labels.shape[1] n_train_examples = len(texts) training_dataloader = create_training_dataloader( texts, labels, ctx["max_seq_len"], ctx["batch_size"], multilabel=True ) bert_model = get_bert_multilabel_model(num_labels=num_labels) bert_opt = get_bert_opt( bert_model, n_train_examples, ctx["batch_size"], ctx["epochs"] ) if balance: trained_model = run_model_training( bert_model, bert_opt, training_dataloader, ctx["epochs"], pos_weight=pos_weight, ) else: trained_model = run_model_training( bert_model, bert_opt, training_dataloader, ctx["epochs"] ) if not ctx["model_name"]: ctx["model_name"] = "multilabel" training_parameters = create_training_parameters( num_labels=num_labels, problem_type="multilabel", max_seq_len=ctx["max_seq_len"], epochs=ctx["epochs"], label_names=label_names, ) save_model( trained_model, training_parameters, ctx["export_path"], ctx["model_name"] )
36,596
def test_config_from_toml_001(): """ Arrange: Create an `ElastalkConf` instance. Act: Configure the instance from configuration 001. Assert: The configuration matches expectations. """ config = ElastalkConf() for toml in get_config('001'): config.from_toml(toml_=toml) # Assert: global values assert config.sniff_on_start is False, \ '`sniff_on_start` should match the value in the configuration file.' assert config.maxsize == 200, \ '`maxsize` should match the value in the configuration file.' # Assert: global blobbing assert config.blobs.enabled is True, \ 'Blobbing should be enabled.' assert config.blobs.excluded == {"owner_", "group_"}, \ 'The list of global exclusions should match the expectations.' # Assert: index mappings for the 'cats' index assert config.indexes['cats'].mappings == "cats/mappings.json", \ 'The path to the mappings document should match the expectation.' # Validate the contents of the mappings document. mappings_doc = config.indexes['cats'].mappings_document( root=get_config_root('001') ) assert 'mappings' in mappings_doc, \ "The mappings document should contain the 'mappings' key." # Assert: blobbing for the 'cats' index assert not config.indexes['cats'].blobs.enabled, \ "Blobbing should be disabled for the 'cats' index." assert config.indexes['cats'].blobs.excluded == set(), \ "The list of exclusions for the 'cats' index should be empty." assert config.blobs_enabled(index='cats'), \ "Blobbing for the 'cats' index should be enabled per the global " \ "configuration." assert ( config.blob_exclusions(index='cats') == config.blobs.excluded ), "Blobs exclusions for the 'cats' index should match the global " \ "exclusions." # Assert: index mappings for the 'cats' index assert config.indexes['cats'].mappings == "cats/mappings.json", \ 'The path to the mappings document should match the expectation.' # Validate the contents of the mappings document. assert config.indexes['dogs'].mappings_document( root=get_config_root('001')) is None, \ "Index mapping should not be configured for the 'dogs' index." # Assert: blobbing for the 'dogs' index assert config.indexes['dogs'].blobs.enabled is True, \ "Blobbing should be enabled for the 'dogs' index." assert config.indexes['dogs'].blobs.excluded == {"name", "breed"}, \ "The list of blob exclusions for the 'dogs' index should match " \ "the expectations." assert config.blobs_enabled(index='dogs'), \ "Blobbing for the 'dogs' index should be enabled." assert ( config.blob_exclusions(index='dogs') == config.blobs.excluded | config.indexes['dogs'].blobs.excluded ), "Blobs exclusions for the 'dogs' index should contain the global " \ "exclusions and those configured for the index." # Assert: mappings for the 'pigs' index assert config.indexes['pigs'].mappings == "pigs/mappings.json", \ 'The path to the mappings document should match the expectation.' assert config.indexes['pigs'].mappings_document( root=get_config_root('001') ) is None, \ 'The mapping document should be `None` because the file does not ' \ 'exist.'
36,597
def load_numpy(data_path, save_disk_flag=True): """Load numpy.""" if save_disk_flag: # Save space but slow f_data = gzip.GzipFile(f'{data_path}.gz', "r") data = np.load(f_data) else: data = np.load(data_path) return data
36,598
def _teardown_logger(logger: logging.Logger, handler_list: List[logging.Handler]) -> None: """Closes and removes log handlers from the logger.""" for handler in handler_list: logger.removeHandler(handler) handler.close()
36,599