content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def format_priorities(df): """ Format the work management priorities for insertion into the SQL table. """ df=df[['WellName','Corp_ID','Facility_ID', 'Area', 'Route','Latitude', 'Longitude', 'Priority', 'Priority_Level', 'Description', 'Assigned_To', 'chokeStatusCreatedBy', 'chokeStatusDate', 'chokeStatusType','chokeStatusAction', 'chokeStatusComments', 'Gas_Production', 'CleanAvgGas']].drop_duplicates() return df
8bbbb918578b765594604ce557ffa8fc5eb99095
73,236
def normalize(value, dict_type=dict): """Normalize values. Recursively normalizes dict keys to be lower case, no surrounding whitespace, underscore-delimited strings.""" if isinstance(value, dict): normalized = dict_type() for k, v in value.items(): key = k.strip().lower().replace(' ', '_') normalized[key] = normalize(v, dict_type) return normalized elif isinstance(value, list): return [normalize(v, dict_type) for v in value] elif isinstance(value, tuple): return tuple([normalize(v, dict_type) for v in value]) else: return value
d926d49c2c742597ed01959c4e410f3e949c0ee0
73,239
import torch def gather_one(x, indices, *, dim): """ Gather with only one element along the gathered dimension """ return torch.gather(x, dim=dim, index=indices.unsqueeze(dim)).squeeze(dim)
60cdecc91561508dbc6a44a8a2129a4059f03173
73,240
def bin_from_float(number: float): """Return binary from float value. >>> bin_from_float(446.15625) 0b110111110.00101 """ integer = int(number) fractional = abs(number) % 1 count = len(str(fractional)[2:]) binary = [] while (count > 0): # print(number, integer, fractional) fractional = fractional * 2 if fractional >= 1: fractional = fractional - 1 binary.append('1') else: binary.append('0') count -= 1 if binary: return '{}.{}'.format(bin(integer), ''.join(binary)) return bin(integer)
cac5aa3ace34248e2eb706cadd65b4384f8cbb4b
73,243
def matches(character): """ Returns true if the given character matches the token. """ return lambda tokens: tokens and tokens[0] == character
f38be7196b6214a50d211c79af94ea8861048c7c
73,244
def binomial(n, k): """ Chose k objects from n. (n, k) = n!/(k!(n-k)!) Pascal's rule: (n + 1, k) = (n, k) + (n, k - 1) (k, k) = 0 """ # if k > n: # return 0 # if k == n or k == 0: # return 1 # return binomial(n - 1, k) + binomial(n - 1, k - 1) res = 1 for i in range(1, k + 1): res = res * (n - i + 1) // i return res
a94226ad56de79cd598196eb0cc04430daa3993d
73,246
def _get_base_href(tree): """ Return href value of a base tag """ base_hrefs = tree.xpath('//base/@href') if base_hrefs: return base_hrefs[0]
e1c7d4e2125f9dbb30eda651d690312cfd96d5a7
73,249
import colorsys def hsv_to_rgb(h: float, s: float, v: float) -> int: """ Converts hue, saturation and value parts in percents to rgb int value. :param h: hue part in percents :param s: saturation part in percents :param v: value part in percents :return: int """ r, g, b = colorsys.hsv_to_rgb(h, s, v) r, g, b, = int(r * 255), int(g * 255), int(b * 255) r = r << 16 g = g << 8 return r + g + b
899a5d15b73f6dfdd00eb95f9c4ecba9b5b3aa68
73,250
def is_actor_alive(actor, time=0.5): """ wait the actor to terminate or 0.5 secondes and return its is_alive value """ actor.join(time) return actor.is_alive()
0da52e131c6fdd82055a01242d499f68042d4242
73,254
def initializeStateFlags(*args): """return an initial state, built up by one or more state flags example from natspeak_spell: state = nsformat.initializeStateFlags(nsformat.flag_no_space_next) """ return set(args)
6835d5ea97ed85a46eec7876d417cf4197866de6
73,255
import pickle def do_unpickle(filename): """Unpickles an object from a given file.""" with open(filename, "rb") as f: return pickle.load(f)
71b483f4764a790cb5c9beeb862a247afa13d6af
73,260
def is_host_name_unique(yaml, hostname): """Returns True if there is at most one occurence of the given ifname amonst all host-names of TAPs.""" if not "taps" in yaml: return True host_names = [] for _tap_ifname, tap_iface in yaml["taps"].items(): host_names.append(tap_iface["host"]["name"]) return host_names.count(hostname) < 2
57dc2e7d4056e7088a84875ac5dfff0fd6792fb5
73,265
def broadcast_shape(shape, length): """Pad a shape with ones following standard Numpy broadcasting.""" n = len(shape) if n < length: return tuple([1] * (length - n) + list(shape)) else: return shape
6b20779d06d583c1c77de784ac575c6137df3821
73,266
def children_of_model(self,model): """Returns all children of the given model This method extends the mosaik.scenario.Entity class. It returns all children of the given model of the entity. Args: model: The model of the children Returns: A list of entities. """ children = self.children children = [e for e in children if e.type == model] return children
4845fe98f9dd7951e0653f26d1e34e15af57b0fa
73,269
def parse_line_update_success(tokens): """Parses line which logs stats for a successful write/update request.""" latency = float(tokens[2]) name = tokens[1] name_server = int(tokens[4]) local_name_server = int(tokens[5]) return latency, name, name_server, local_name_server
7cec3ef424a4b606b9e836e54d20a155d6bb016c
73,271
import requests def get_json_resource(resource_uri): """ Get a uri which returns JSON, return Python object. Returns None if the request status code is 4xx or 5xx. :param resource_uri: uri to request :return: Python dict/object from the request JSON. """ r = requests.get(resource_uri) if r.status_code == requests.codes.ok: return r.json() else: return None
41e97ab464a12298fc718d8328e90ae4e8efd884
73,276
def _is_in_placeholders(op, func_arg_placeholders): """Checks whether any output of this op is in func_arg_placeholders.""" return op.values() and any(x.name in func_arg_placeholders for x in op.values())
3da651b2924680324d702963f9c09aa5c9b316d9
73,277
def bool_(value: bool) -> bytearray: """ Pack bool to Starbound format. :param value: bool :return: bytearray """ return bytearray([value])
d92a92eda79d60110e35c5932b064e66f106d415
73,278
def convindicetoplanetype(listindice): """ Converts miller plane (3 indices) into plane type for recognition where indices are sorted in decreasing order ex: [1,-2,-1] -> 211 """ listpositiveinteger = [abs(elem) for elem in listindice] listpositiveinteger.sort(reverse=True) resint = (100 * listpositiveinteger[0] + 10 * listpositiveinteger[1] + 1 * listpositiveinteger[2]) return resint
af5a589e8d6284c4a4ed2bf478a85a0440c5ce22
73,286
def process_inputs(data): """ Process user inputs and apply default values if parameters not specified. Parameters ---------- data : dict User input containing model parameters Returns ------- out : dict Model data in format that allows construction of the Pyomo model """ # Extract symbols for assets assets = list(data['initial_weights'].keys()) # Number of intervals over multi-period optimisation horizon periods = [len(v.keys()) for k, v in data['estimated_returns'].items()][0] # Estimated return for each interval estimated_returns = { (i, int(k)): v for i, j in data['estimated_returns'].items() for k, v in j.items() } # Extract model parameters parameters = data.get('parameters', {}) data = { 'S_ASSETS': assets, 'S_PERIODS': range(1, periods + 1), 'S_TIME_INDEX': range(1, periods + 2), 'P_RETURN': estimated_returns, 'P_INITIAL_WEIGHT': data['initial_weights'], 'P_MIN_WEIGHT': parameters.get('min_weight', -1), 'P_MAX_WEIGHT': parameters.get('max_weight', 1), 'P_MIN_CASH_BALANCE': parameters.get('min_cash_balance', 0), 'P_MAX_LEVERAGE': parameters.get('max_leverage', 1), 'P_MAX_TRADE_SIZE': parameters.get('max_trade_size', 1), 'P_TRADE_AVERSION': parameters.get('trade_aversion', 1), 'P_TRANSACTION_COST': parameters.get('transaction_cost', 0.001), } return data
8ebd4d54d24c51a97ca9fa1a269623846e085ecd
73,288
def nan_check(test_dataframe): """ :param test_dataframe: pandas data frame :return: bool, whether there is any missing value in the data frame. """ return test_dataframe.isnull().values.any()
c3eaf976d4d10001800f1e0946da4ae36f6887a7
73,289
def V_d2bV_by_V_Approx(V): """ Approximate V*d^2(bV)/dV^2 for single mode fiber. This value is needed to determine the waveguide dispersion. This approximation is for the fundamental mode in the fiber and is good to 1% when 1.4<V<2.4. Approximation by Marcuse (1979) Args: V: V-parameter of the fiber [--] Returns: V*d^2(bV)/dV^2 [--] """ return 0.080 + 0.549 * (2.834 - V)**2
c4432a58de60619b8780c866a430667f2e5dc72b
73,290
def read_parallel_batches(response): """ Given a `response` from dynamoDB's get_item (after translating from dyJSON, that is, a dict where the important information, the content of a table's item, is inside the key 'Item'), return the number of parallel batches in which to split the requests. This is 1 by default, or something else if specified in the dynamoDB item. """ parallel_key = 'parallel_batches' config = response['Item'] if parallel_key not in config.keys() or config[parallel_key] == None or config[parallel_key] <= 1: return 1 else: return config[parallel_key]
50d4ea5327c47b69766d261fbb90adcf10ee9fa8
73,293
def calculate_probabilities(previous_probabilities): """ Calculate the probabilities for each genotype (AA, Aa, aa) based on the probabilities of the previous generation. """ probability_AA = (previous_probabilities["AA"] * 1/2 + previous_probabilities["Aa"] * 1/4 + previous_probabilities["aa"] * 0) probability_Aa = (previous_probabilities["AA"] * 1/2 + previous_probabilities["Aa"] * 1/2 + previous_probabilities["aa"] * 1/2) probability_aa = (previous_probabilities["AA"] * 0 + previous_probabilities["Aa"] * 1/4 + previous_probabilities["AA"] * 1/2) new_probabilities = { "AA": probability_AA, "Aa": probability_Aa, "aa": probability_aa } return(new_probabilities)
e59ec6393d2640de1e54055707356fd3e3c58e80
73,296
def make_twin_ax(ax): """ Create a twin axis on an existing axis with a shared x-axis """ # align the twinx axis twin_ax = ax.twinx() # Turn twin_ax grid off. twin_ax.grid(False) # Set ax's patch invisible ax.patch.set_visible(False) # Set axtwin's patch visible and colorize it in grey twin_ax.patch.set_visible(True) # move ax in front ax.set_zorder(twin_ax.get_zorder() + 1) return twin_ax
3a46cb36aeca522be43781a6cc947778192731a0
73,298
import yaml def load_yaml_conf(yaml_file): """Helper function to load the configuration yaml files""" with open(yaml_file, 'r') as stream: return yaml.safe_load(stream)
93c4d02395767525042902b3266117ed50f720c8
73,303
def write_out(filename,data=b""): """ write_out(path,str/bytes) like: write_out(r"c:\1.txt",data) """ result = False try: with open(filename,"wb") as f: if isinstance(data,str): f.write(data.encode("utf-8")) else: f.write(data) result = True except Exception as e: pass return result
9497726c0e83aa6312162f4965cbd96bc6569c0a
73,306
import re def _rgb_txt_line(string): """Parse a line from an X11 rgb.txt file Gives a name and 3 integers (RGB values) """ regexp = re.compile( r"([ 0-9][ 0-9][ 0-9])\s+([ 0-9][ 0-9][ 0-9])\s+([ 0-9][ 0-9][ 0-9])" r"\s+([a-zA-Z0-9 ]+)\s*" ) match = regexp.match(string) if not match: return "", (-1, -1, -1) red, green, blue, name = match.groups() return name.strip(), (int(red), int(green), int(blue))
f05c1df8b03dfe8b65c3b51851a0b8041aff9d9a
73,307
import random def nth_element(x, n): """Returns the nth order statistic of a list Note on stability: If multiple order statistics are equal, this function will return the first index of the element in the list Args: x (list): The list from which to chooise n (nonnegative int): The order statistic to get Returns: A tuple of (element, position of element in list) Raises: IndexError if n is greater than or equal to length of x DocTests: >>> nth_element([4, 2, 5, 1, 3], 0) (1, 3) >>> nth_element([5, 15, 3, 6], 3) (15, 1) >>> nth_element([1, 2, 3], 2) (3, 2) >>> nth_element([3, 2, 1], 2) (3, 0) >>> nth_element([7, 7, 7, 7, 7, 7, 7], 3) (7, 0) >>> nth_element([7, 7, 7, 7, 7, 7, 7], 6) (7, 0) >>> nth_element([4, 10, 3, 16, 7, 8, 15, 9], 3) (8, 5) >>> nth_element([1, 5, 9, 3, 0, 12, 5], 5) (9, 2) >>> nth_element([4, 3, 5, 2], 2) (4, 0) """ # make sure the index exists if n >= len(x): raise IndexError tmp = x index = n position = 0 while tmp: # partition the elements pivot = tmp[random.randint(0, len(tmp) - 1)] # make lists of elements higher and lower than the pivot above = [] below = [] for elt in tmp: if elt < pivot: below.append(elt) elif elt > pivot: above.append(elt) i = len(below) j = len(tmp) - len(above) # determine which partition to further examine if necessary if index < i: tmp = below elif index >= j: tmp = above index -= j else: return pivot, x.index(pivot)
7802869daf7cae84aa582bb9230cbb58f7350d84
73,309
def strip_transient(nb): """Strip transient values that shouldn't be stored in files. This should be called in *both* read and write. """ nb.metadata.pop('orig_nbformat', None) nb.metadata.pop('orig_nbformat_minor', None) nb.metadata.pop('signature', None) for cell in nb.cells: cell.metadata.pop('trusted', None) return nb
560a06532273c2a983692c17e898351c4a78130b
73,315
def _get_works(df, run, project, GEN=None): """ Get set of work values from a dataframe Parameters ---------- df : pandas.DataFrame Information generated from folding at home run : str run to collect data for (i.e. 'RUN0') project : str project to collect data for (i.e. 'PROJ13420') GEN : str, optional default=None if provided, will only return work values from a given run, (i.e. 'GEN0'), otherwise all generations are returned Returns ------- list, list Returns lists for forwards and backwards works """ works = df[df["RUN"] == run] if GEN: works = works[works["GEN"] == GEN] f = works[works["PROJ"] == project].forward_work r = works[works["PROJ"] == project].reverse_work return f, r
f91161405d3b2ababcbf992ed2a32a32ac4010d4
73,316
import pathlib from typing import Tuple from typing import List from typing import Optional import re def output_files(output_dir: pathlib.Path, jobid: str) -> Tuple[List[pathlib.Path], Optional[str]]: """Gives the list of output files from an array job. Args: output_dir: The directory containing output files jobid: A str containing an integer slurm job ID Returns: A tuple containing: - The list of all output files matching the specified jobid - The name of the job that produced the output, or None if a name could not be parsed """ files = list(output_dir.glob("*-{}-r*.out".format(jobid))) if not files: return [], None name_match = re.search("(.+)-" + jobid, files[0].name) if not name_match: return files, None job_name = name_match.groups()[0] return files, job_name
df04d323cc05e2e54975b1b0e0653695493fe0ee
73,324
import re def extract_hashtags(tweets): """Extracts all hashtags included in the input tweet data Parameters ---------- tweets : array_like List of tweets Returns ------- list List of hashtags included in the tweets list Examples -------- >>> tweets_list = [ "Make America Great Again! @DonaldTrump", "It's rocket-science tier investment~~ #LoveElonMusk" ] >>> extract_hashtags(tweets_list) [ "LoveElonMusk" ] """ # Check for correct input type if not isinstance(tweets, list): raise TypeError("'tweets' should be of type 'list'.") # Convert array like input to string text = " ".join(tweets) # Break tweets into individual words htags = re.findall(r'(#[A-Za-z0-9]*)', text) htags = [ht.replace('#', '') for ht in htags] return htags
87f648770bd7a828b6366fdd9e52dcd15e09c7a2
73,326
def linear_search_iterative(array, item): """Incrementing index until item is found in the array iteratively. array: list item: str Best case running time: O(1) if the item is at the beginning of the array. Worst case running time: O(n) if the item is last in the array. """ for index, value in enumerate(array): if item == value: return index # found return None
09b4c01926e327baf4f9cf324fcea202ebe1a836
73,329
import socket def parse_ip(ip): """ >>> parse_ip("192.0.2.1") == (socket.AF_INET, "192.0.2.1") True >>> parse_ip("2001:DB8::1234:5678") == (socket.AF_INET6, "2001:db8::1234:5678") True >>> parse_ip("not-an-ip") Traceback (most recent call last): ... ValueError: 'not-an-ip' is not a valid IPv4/6 address """ for family in (socket.AF_INET, socket.AF_INET6): try: data = socket.inet_pton(family, ip) except socket.error: pass else: return family, socket.inet_ntop(family, data) raise ValueError("{0!r} is not a valid IPv4/6 address".format(ip))
6a019f7623b746e9a2522db2d7cdc2048b77ed76
73,331
def mergeOptions(defaults, options): """Returns a dictionary where options are merged into the defaults""" merged_options = defaults.copy() for key, val in options.iteritems(): if key in merged_options: merged_options[key] = val return merged_options
d9479d0ce38b0a5813b9d41e2aa3719156eb832d
73,334
def get_property_names(obj): """Return set of object property name Inspiration: https://stackoverflow.com/questions/17735520/determine-if-given-class-attribute-is-a-property-or-not-python-object """ props = set() obj_type = type(obj) for attr in dir(obj): if isinstance(getattr(obj_type, attr, None), property): props.add(attr) return props
430c80eee4bf0cd615ad49fd725519aef4866aeb
73,337
def sign(n): """Return sign of number (-1,0,1).""" return 1 if n>0 else -1 if n<0 else 0
d174c55de0b61489bbb6c9ce72622c4c8f3f2632
73,338
def sorted_container_nodes(containers): """Returns a sorted iterable of containers sorted by label or id (whatever is available) Arguments: containers (iterable): The the list of containers to sort Returns: iterable: The sorted set of containers """ return sorted(containers, key=lambda x: (x.label or x.id or '').lower(), reverse=True)
950c0bcdc643f3cc65b60409f2081a2bbbf146c5
73,345
import re def _strip_ansi(source): """ Remove ANSI escape codes from text. Parameters ---------- source : str Source to remove the ANSI from """ ansi_re = re.compile('\x1b\\[(.*?)([@-~])') return ansi_re.sub('', source)
c93fa87610ec412b66a56876f6297893e896935f
73,348
import glob def read_labels(root_path): """ Reads labels and returns them in a tuple of sorted lists """ label_list = [] map_1 = sorted(glob.glob(root_path + 'Maps1_T/Maps1_T/*.png')) map_2 = sorted(glob.glob(root_path + 'Maps2_T/Maps2_T/*.png')) map_3 = sorted(glob.glob(root_path + 'Maps3_T/Maps3_T/*.png')) map_4 = sorted(glob.glob(root_path + 'Maps4_T/Maps4_T/*.png')) map_5 = sorted(glob.glob(root_path + 'Maps5_T/Maps5_T/*.png')) map_6 = sorted(glob.glob(root_path + 'Maps6_T/Maps6_T/*.png')) return map_1, map_2, map_3, map_4, map_5, map_6
a383dd4ab3bd63e837e91c820be27e72e0654d78
73,352
import requests def is_private(username): """ Check if user is private :param username: :return: bool """ r = requests.get(f"https://www.instagram.com/{username}/?__a=1") private_status = r.json()['graphql']['user']['is_private'] return private_status
93265f746a021f8d6afd2f5d64c7c7ee9e9a3de7
73,354
from typing import Optional from typing import Tuple from typing import Any def top_color_and_depth(container: tuple) -> Optional[Tuple[Any, int]]: """ Returns the top color from the container, along with the number of continuous segments that are that color. """ if len(container) == 0: return None top_c = None for i, color in enumerate(container): if top_c is None: top_c = color elif top_c != color: return top_c, i return top_c, len(container)
c0041014e1116e1b9fdc78db90947f9856864d05
73,355
def cpf_has_correct_length(value): """ This function receives the Brazilian CPF and returns True if the length of CPF is 11 or False if not. :param value: A string with the number of Brazilian CPF :return: """ if len(value) != 11: return False else: return True
defb573148545683e0b62ae79c0b798f00dfbf1b
73,357
import re def make_wildcard_domain(d): """ >>> make_wildcard_domain('foo-bar.example.com') ['*.example.com', 'example.com'] """ if d.count('.') == 1: without_star = d with_star = f'*.{without_star}' else: with_star = re.sub(r'^([^.]+)(?=.)', '*', d, 1) without_star = with_star.replace('*.', '') return [with_star, without_star]
35419b714622e0ac046a77702ee2260addce0e65
73,359
def lemmatize(sentence_vector): """ Function to lemmatize an array of sentences. Args: sentence_vector (array): Array containing text. Returns: numpy.array: Array containing the lemmatized words. """ return sentence_vector.progress_apply(lambda token_list: [token.lemma_ for token in token_list])
b3a236da69b32f217ada0fef90af7ff978d8e245
73,360
def get_billing_data(order): """Extracts order's billing address into payment-friendly billing data.""" data = {} if order.billing_address: data = { 'billing_first_name': order.billing_address.first_name, 'billing_last_name': order.billing_address.last_name, 'billing_company_name': order.billing_address.company_name, 'billing_address_1': order.billing_address.street_address_1, 'billing_address_2': order.billing_address.street_address_2, 'billing_city': order.billing_address.city, 'billing_postal_code': order.billing_address.postal_code, 'billing_country_code': order.billing_address.country.code, 'billing_email': order.user_email, 'billing_country_area': order.billing_address.country_area} return data
3f020b13905f33c215d32ec1a3b2e4f4af369e15
73,363
def levels_to_graph(levels): """ Convert an array of levels into a unicode string graph. Each level in the levels array is an integer 0-3. Those levels will be represented in the graph by 1-4 dots each. The returned string will contain two levels per rune. """ if len(levels) % 2 == 1: # Left pad uneven-length arrays with an empty column levels = [-1] + levels # From: http://stackoverflow.com/a/19177754/473672 unicodify = chr try: # Python 2 unicodify = unichr # type: ignore except NameError: # Python 3 pass # https://en.wikipedia.org/wiki/Braille_Patterns#Identifying.2C_naming_and_ordering LEFT_BAR = [0x00, 0x40, 0x44, 0x46, 0x47] RIGHT_BAR = [0x00, 0x80, 0xA0, 0xB0, 0xB8] graph = "" for index in range(0, len(levels) - 1, 2): left_level = levels[index] + 1 right_level = levels[index + 1] + 1 code = 0x2800 + LEFT_BAR[left_level] + RIGHT_BAR[right_level] graph += unicodify(code) return graph
8994d6d6eea5c741f706b78e6754bab7850909b2
73,365
def extract_data(result): """Extract plain text data.""" try: return result['data'] except KeyError: return ""
44d7b7cb44f299b14d5caeb70781a4d88c96db5b
73,367
def receive(socket, msglen, chunksize=2048): """Receive an entire message from a socket, which may be chunked. Parameters ---------- socket : socket.socket object from which to receive data msglen : int length of the entire message chunksize : int ; optional messages will be received in chunksize lengths and joined together. Return ------ str - entire message. """ chunks = [] bytes_received = 0 while bytes_received < msglen: recv_len = min(msglen - bytes_received, chunksize) chunk = socket.recv(recv_len) if chunk == '': raise RuntimeError("socket connection broken") chunks.append(chunk) bytes_received = bytes_received + len(chunk) return b''.join(chunks)
ae7f77384c5ab092feaf6c0774c1154c1c430534
73,368
def setup(setup_method): """Decorator for declaring setup methods for test cases.""" setup_method._is_setup_method = True return setup_method
6ac658e6cefb2c8874027897b3541f84cb1f215b
73,373
def FRET_efficiency_from_lifetime(fret_lifetime, donor_lifetime): """Compute FRET efficiency from FRET and donor-only states lifetimes.""" return 1 - fret_lifetime / donor_lifetime
31fc56e3749c411030fb32110b9cd75c7b8e7b44
73,378
import logging import math import collections def get_simulation_parameters(open_rocket_helper, sim, rocket_components, random_parameters, randomize=True): """Collect all global simulation parameters for export""" logging.info("Used global parameters...") options = sim.getOptions() tilt = math.degrees(options.getLaunchRodAngle()) azimuth = math.degrees(options.getLaunchRodDirection()) if randomize: thrust_factor = random_parameters.thrust_factor() mass_factor = random_parameters.mass_factor() CG_shift = random_parameters.CG_shift() Caxial_factor = random_parameters.Caxial_factor() CN_factor = random_parameters.CN_factor() Cside_factor = random_parameters.Cside_factor() else: thrust_factor = 1. mass_factor = 1. CG_shift = 0. Caxial_factor = 1. CN_factor = 1. Cside_factor = 1. logging.info("Launch rail tilt = {:6.2f}°".format(tilt)) logging.info("Launch rail azimuth = {:6.2f}°".format(azimuth)) logging.info("Thrust factor = {:6.2f}".format(thrust_factor)) logging.info("Mass factor = {:6.2f}".format(mass_factor)) logging.info("CG shift = {:6.2f}m".format(CG_shift)) logging.info("Caxial factor = {:6.2f}".format(Caxial_factor)) logging.info("CN factor = {:6.2f}".format(CN_factor)) logging.info("Cside factor = {:6.2f}".format(Cside_factor)) mcid = options.getMotorConfigurationID() rocket = options.getRocket() # stage sepration separationDelays = [] for stage in rocket_components.stages: separationEventConfiguration = stage.getStageSeparationConfiguration().get(mcid) separationDelays.append( separationEventConfiguration.getSeparationDelay()) logging.info("Separation delay of stage {} = {:6.2f}s".format( stage, separationDelays[-1])) fin_cants = [] for fins in rocket_components.fin_sets: fin_cants.append(math.degrees(fins.getCantAngle())) # There can be more than one parachute -> add unbiased # normaldistributed value parachute_cds = [] for parachute in rocket_components.parachutes: parachute_cds.append(parachute.getCD()) # motor ignition ignitionDelays = [] for motor in rocket_components.motors: ignitionDelays.append( motor.getIgnitionConfiguration().get(mcid).getIgnitionDelay()) logging.info("Ignition delay of stage {} = {:6.2f}s".format( motor.getStage(), ignitionDelays[-1])) Parameters = collections.namedtuple("Parameters", [ "tilt", "azimuth", "thrust_factor", "separation_delays", "fin_cants", "parachute_cds", "ignition_delays", "mass_factor", "CG_shift", "Caxial_factor", "CN_factor", "Cside_factor"]) return Parameters( tilt=tilt, azimuth=azimuth, thrust_factor=thrust_factor, separation_delays=separationDelays, fin_cants=fin_cants, parachute_cds=parachute_cds, ignition_delays=ignitionDelays, mass_factor=mass_factor, CG_shift=CG_shift, Caxial_factor=Caxial_factor, CN_factor=CN_factor, Cside_factor=Cside_factor)
afde0113309917592dc9bf1109056d189e501818
73,380
def egg_drop(n: int, k: int) -> int: """ What is the minimum number of trials we need to drop eggs to determine which floors of a building are safe for dropping eggs, given n eggs and k floors? :param n: number of eggs :param k: number of floors :return: the minimum number of trials >>> egg_drop(1, 5) 5 >>> egg_drop(2,36) 8 """ # Base cases. # If we have one egg, we need to try each floor. if n == 1: return k # If we have one floor, we need to try it. if k == 1 or k == 0: return k # Drop an egg from floor x: # 1. If it breaks, then we know the floor is <= x, and we have n-1 eggs left to do it, so E(n-1, x). # 2. If it doesn't break, then we know the floor is > x (which means k-x floors to try( and we have n eggs left # to do it, so E(n, k-x). return 1 + min([max(egg_drop(n-1, x-1), egg_drop(n, k-x)) for x in range(1,k+1)])
c12426c93bedb721393b5ef4e165c834d018784e
73,384
def removeSlash(p): """ If string p ends with character '/' or backslash, it is removed. """ if p.endswith('/') or p.endswith('\\'): return p[:-1] else: return p
cd24c506b270ef8b732578f76c2dd0cc11934ce1
73,388
def get_s3_keys(prefix, s3_client, bucket='fcp-indi'): """Retrieve all keys in an S3 bucket that match the prefix and site ID Parameters ---------- prefix : string S3 prefix designating the S3 "directory" in which to search. Do not include the site ID in the prefix. s3_client : boto3 client object from the get_s3_client() function bucket : string AWS S3 bucket in which to search Returns ------- list All the keys matching the prefix and site in the S3 bucket """ # Avoid duplicate trailing slash in prefix prefix = prefix.rstrip('/') response = s3_client.list_objects_v2( Bucket=bucket, Prefix=prefix, ) try: keys = [d['Key'] for d in response.get('Contents')] except TypeError: raise ValueError( 'There are no subject files in the S3 bucket with prefix ' '{pfix:s}'.format(pfix=prefix) ) while response['IsTruncated']: response = s3_client.list_objects_v2( Bucket=bucket, Prefix=prefix, ContinuationToken=response['NextContinuationToken'] ) keys += [d['Key'] for d in response.get('Contents')] return keys
4d0dc604bb6c4a447c50845ba540798b61aedacc
73,390
def _has_non_ascii_characters(data_string): """ Check if provided string contains non ascii characters :param data_string: str or unicode object """ try: data_string.encode('ascii') except UnicodeEncodeError: return True return False
8bb17ba61c503f6e861383d3717b60a178fcffe5
73,398
def rotate_list(numbers, cursor): """Rotate list such that the current start moves to the position indicated by the cursor.""" return numbers[len(numbers) - cursor:] + numbers[:len(numbers) - cursor]
691a4ba53445e67409b5faa2760c6a0eca967437
73,399
def iter_is_empty(i): """Test for empty iterator without consuming more than first element.""" try: i.next() except StopIteration: return True return False
d4cae6634daf379ec33de23623715749e665646c
73,400
def get_bbox_from_gt(gt_root): """ Get the all the supervised bounding boxes (ground truth) having `gt_root` as root folder. :param gt_root: root folder for the ground truth bounding boxes :return all_objs: all supervised bounding boxes """ all_objs = dict() all_objs_elem = gt_root.findall("./object") for obj_elem in all_objs_elem: id = obj_elem.find("./name").text if id not in all_objs.keys(): all_objs[id] = [] bbox_elem = obj_elem.find("./bndbox") xmin = int(bbox_elem.find("./xmin").text) - 1 ymin = int(bbox_elem.find("./ymin").text) - 1 xmax = int(bbox_elem.find("./xmax").text) ymax = int(bbox_elem.find("./ymax").text) all_objs[id].append((xmin, xmax, ymin, ymax)) return all_objs
ce8f2963f22089b8979ca7dbdf0c8fe3c5a82c5a
73,402
def load_input(filename): """ Load input names from file """ with open(filename) as f: return [quoted[1:-1] for quoted in f.readlines()[0].split(",")]
da8382204f8f521bf9b9bbbd71af9f093bb92c6f
73,403
def get_project_folder_name(project_file_name: str) -> str: """ Get the name of a folder associated with a project. :param project_file_name: Name of the project. :return: Name of the project folder according to Simio's nomenclature. """ project_name = project_file_name.split('.')[0] folder_name = '.'.join([project_name, 'Files']) return folder_name
4540fcbb40395918b75a29e81b207d1c773d28f0
73,407
def calc_num_metric(predicted_number, actual_number): """ How similar are 2 numbers of references? The difference between the predicted and actual numbers of references as a percentage of the actual number """ # In the case of 0 references (some documents don't have references but do # have a reference section), set actual_number to the closest integer, 1. # This introduces some small error, but avoids the script throwing a math error if actual_number == 0: actual_number = 1 metric = abs(100*((predicted_number - actual_number) / actual_number)) return metric
305838e141f3767eebb4b880b273f096b30ef70b
73,408
import hashlib def _compute_file_md5(filename, block_size=2 ** 20): """ Compute the md5 hash of a file, catching any errors. Adapted from http://stackoverflow.com/a/1131255/256798 :param filename: :param block_size: :return: string version of hash, or empty string if IOError. """ md5 = hashlib.md5() try: with open(filename, 'rb') as f: while True: data = f.read(block_size) if not data: break md5.update(data) return md5.hexdigest() except IOError as e: print('Could not open file: {}'.format(e)) return ''
a89358d135ec148682e79851a578fcb69526dd08
73,411
import itertools def one_to_one(graph, nodes): """ Return True if graph contains only one to one mappings. The directed graph should be represented as a dictionary mapping of edges for each node. Nodes should be passed a simple list. """ edges = itertools.chain.from_iterable(graph.values()) return len(graph) == len(nodes) and len(set(edges)) == len(nodes)
d320e1b018b296dc8b6e50e191a75843ea59201e
73,413
def normalize_series(series, min_max): """ Normalizes a series where all values between 0 and 1. Values can exceed 1 if they exceed the provided max in the min_max tuple. :param series: Pandas series to normalize :param min_max: Tuple of minimum and maximum value for normalization. :return: Normalized series """ if min_max is None: min_max = series.agg(['min', 'max']).values assert min_max[0] < min_max[1] series = (series - min_max[0]) / (min_max[1] - min_max[0]) return series
47f9376817b30176bdda5bf9a01aa58773575f2d
73,416
import torch def pdist(X,Y): """ Computes the squared Euclidean distance between all pairs x in X, y in Y """ # From https://github.com/clinicalml/cfrnet/blob/master/cfr/util.py M = -2*torch.matmul(X,torch.transpose(Y,0,1)) sqx = torch.sum(X**2,1,keepdim=True) sqy = torch.sum(Y**2,1,keepdim=True) D = M + torch.transpose(sqy,0,1) + sqx return D
2ea9988cd8270e50ad86b5f6a154ba670a59a835
73,417
def format_coordinates_for_cql(tile_coordinates: str) -> str: """ Format the tile coordinates for the cql filter string """ cql_filter_string = ( f"POLYGON(({tile_coordinates[0][1]}+{tile_coordinates[0][0]}," f"{tile_coordinates[1][1]}+{tile_coordinates[1][0]}," f"{tile_coordinates[2][1]}+{tile_coordinates[2][0]}," f"{tile_coordinates[3][1]}+{tile_coordinates[3][0]}," f"{tile_coordinates[4][1]}+{tile_coordinates[4][0]}))" ) return cql_filter_string
e0062d436330ba03e84c0a18ec47bb5b1299a752
73,418
from typing import Any def force_string(s: Any) -> str: """converts an Any value into a string by forcing string formatting""" return f'{s}'
651db34b62952623eb74d5a9c0ecd18cdc6e9891
73,424
import torch from typing import Optional def merge_features(xyz: torch.Tensor, features: Optional[torch.Tensor]) -> torch.Tensor: """Merge xyz coordinates and features to point cloud.""" if features is None: return xyz.transpose(1, 2) else: return torch.cat((xyz.transpose(1, 2), features), dim=1)
43c59fb16b1bd03d6607a22fa0e5b22962f6ee04
73,426
def prob(counts, variables=None, condition=(), transform=False): """Calculate (conditional) probabilites from counts. The levels of the result series, will allways be sorted to ensure proper behavior under multiplication and addition. :param counts: a series of counts with the variables as index. Typically, it is the result of an operation similar to ``df.groupby(variables).size()``. :param variables: the variables for which to calculate the probability. If not given all variables are used. :param conditon: the variables to condition on. :param transform: if ``True`` return the result with the original index, similar to how ``.groupby().transform()`` operates. """ condition = list(condition) if variables is None: variables = [*counts.index.names] else: variables = list(variables) if transform: complement = [name for name in counts.index.names if name not in {*variables}] else: complement = condition grouped = counts.groupby(level=condition + variables) counts = grouped.agg("sum") if not transform else grouped.transform("sum") if not complement: result = counts / counts.sum() else: result = counts / counts.groupby(level=complement).transform("sum") if len(result.index.names) > 1: return result.reorder_levels(sorted(result.index.names)) else: return result
a36e7cc30a07eb1fef5b4458c870ca4887eb0850
73,438
def _topological_sort(key_less_than_values): """Topological sort for monotonicities. Args: key_less_than_values: A defaultdict from index to a list of indices, such that for j in key_less_than_values[i] we must have output(i) <= output(j). Returns: A topologically sorted list of indices. Raises: ValueError: If monotonicities are circular. """ all_values = set() for values in key_less_than_values.values(): all_values.update(values) q = [k for k in key_less_than_values if k not in all_values] if not q: raise ValueError( "Circular monotonicity constraints: {}".format(key_less_than_values)) result = [] seen = set() while q: v = q[-1] seen.add(v) expand = [x for x in key_less_than_values[v] if x not in seen] if not expand: result = [v] + result q.pop() else: q.append(expand[0]) return result
17153499da6f6b4aa6d4af2661741c64eefbe357
73,446
def is_record(data): """ Returns whether the data is a record. A record has required ``releases`` and ``ocid`` fields. """ return 'releases' in data and 'ocid' in data
3f5a1ed2d4c84f92d99126a9c07d65b776495de0
73,447
def strage_to_intage(str_age): """Turn a string-age into an integer age""" if str_age[-1] == '+': return int(str_age[:-1]) return int(str_age)
fc85e7c419847936762337dae1e588600bcdca6e
73,451
def hcp_coord_test(x, y, z): """Test for coordinate in HCP grid""" m = x+y+z return m % 6 == 0 and (x-m//12) % 3 == 0 and (y-m//12) % 3 == 0
f82ead7b5ffd429e020b5d0c5d982a213c50306a
73,453
from typing import List def check_args_validate(args: List[str]): """ Function for checking the legitimacy of an option. Details are written in usage. """ if len(args) != 2 or not args[1].isdigit(): return False, 0 elif not 0 < int(args[1]) < 100: return False, 0 return True, int(args[1])
de07a93e2f13d1bf11167fcc0fc6ead72b9949f1
73,454
def set_robust(robust): """Return generic name if user passes None to the robust parameter in a regression. Note: already verified that the name is valid in check_robust() if the user passed anything besides None to robust. Parameters ---------- robust : string or None Object passed by the user to a regression class Returns ------- robust : string """ if not robust: return 'unadjusted' return robust
3e7a0498f7e63b373fcf4e0b624c8d0cfa0cc4ae
73,462
def contains_npmi_metric(metrics): """Checks if the csv file contains a metric that matches the plugin requirements. Args: metrics: The metrics in the header of the csv. Returns: Whether the header contains a metric that can be used by the plugin. """ for metric in metrics: if metric.startswith("nPMI@") or metric.startswith("nPMI_diff@"): return True return False
f1a54d02412dcc0d4864a23e27538d20939dbfda
73,466
def labels_from(labels_df): """ Extracts the unique labels from the labels dataframe """ # Build list with unique labels label_list = [] for tag_str in labels_df.tags.values: labels = tag_str.split(' ') for label in labels: if label not in label_list: label_list.append(label) return label_list
05ed873d7c7af8e8de7b5ccd22fba3bc1e46b33b
73,467
import re def extract_tags_links(string): """Extract tags and links from a narration string. Args: string: A string, possibly containing tags (`#tag`) and links (`^link`). Returns: A triple (new_string, tags, links) where `new_string` is `string` stripped of tags and links. """ tags = re.findall(r'(?:^|\s)#([A-Za-z0-9\-_/.]+)', string) links = re.findall(r'(?:^|\s)\^([A-Za-z0-9\-_/.]+)', string) new_string = re.sub(r'(?:^|\s)[#^]([A-Za-z0-9\-_/.]+)', '', string).strip() return new_string, frozenset(tags), frozenset(links)
1c416c87d31a54836c5b208f1a25f0464ed5314d
73,470
def _get_ids_for_cameras(cameras): """Get list of camera IDs from cameras""" return list(map(lambda camera: camera.id, cameras))
336ce0e3d14b79046c645fec294905a0950b7e01
73,472
def findIndexInList (L: list, item) -> int: """finds the index of an item in a given list""" for i in range (len (L)): if L[i] == item: return i # return -1 when unsuccessful return -1
152692cd6af04667771edfb5f7bc78a110debad6
73,473
from typing import List from typing import Dict from typing import Any from typing import Callable from functools import reduce def pluck( keys: List[str], items: List[Dict[str, Any]], key_sep: str = ".", key_func: Callable[[str], str] = lambda k: k, ) -> List[Dict[str, Any]]: """Pluck `keys` from `items`. Nested keys can be specified by concatenating with `key_sep` (i.e. `key1.key2`). Returned keys are set via `key_func`.""" return [ { key_func(k): reduce( lambda d, k: d[k] if isinstance(d, dict) else getattr(d, k), k.split(key_sep), d ) for k in keys } for d in items ]
9d0c11d908559a284fe404cf7131c8bce4922b50
73,474
def make_board(board_string): """Make a board from a string. For example:: >>> board = make_board(''' ... N C A N E ... O U I O P ... Z Q Z O N ... F A D P L ... E D E A Z ... ''') >>> len(board) 5 >>> board[0] ['N', 'C', 'A', 'N', 'E'] """ letters = board_string.split() board = [ letters[0:5], letters[5:10], letters[10:15], letters[15:20], letters[20:25], ] return board
546ea30ae23e3709860d4c9eef51c97e14354951
73,475
def EMPTY(value): """Constant validator that requires empty values (allows None).""" return not bool(value)
dfa069b29e9802658390bf4843c33263f9ae1e94
73,478
def detectFileFormat(filename): """ return format name by examining the last suffix of file name """ suffixFormatDict = { 'gb': 'GenBank', 'fa': 'FASTA', 'fasta': 'FASTA', 'gff3': 'GFF3'} suffix = filename.split('.')[-1] return(suffixFormatDict[suffix])
8a52af75e0a87a3bebfa41536aeca8be043bcfcd
73,486
def createTexFile(filename): """Creates a .tex file with the filename given and returns a file Parameters: filename (string): Name of the .tex file created. Returns: file: .tex file with filename """ out_file = open(f'{filename}.tex', "w") header = "\documentclass{article}\n\\begin{document}\n" header += f"\\title{{{filename}}}\n" header += "\maketitle\n" out_file.write(header) return out_file
f8bec9eef4e378dc26662467b98f8a401cb99079
73,489
def remove_paren(token: str): """Remove ( and ) from the given token.""" return token.replace('(', '') \ .replace(')', '')
cad04ee2b42c2ef66c0c0097ebcbe52a70511191
73,494
def id_to_loc(gdf, fid): """Provides the location (coordinates) of the given feature identifier in the GeoDataFrame. Args: gdf: A GeoDataFrame containing the input points. fid: The identifier of a feature in the GeoDataFrame. Returns: An array [x, y] denoting the coordinates of the feature. """ return [gdf.loc[fid]['geometry'].x, gdf.loc[fid]['geometry'].y]
7ea84db6d2a5d49e4f834e55ae4a7ae3595eb2f9
73,500
def groupbykey(iterable, key=None): """ Returns a list of sublists of *iterable* grouped by key: all elements x of a given sublist have the same value key(x). key(x) must return a hashable object, such that set(key(x) for x in iterable) is possible. If not given, key() is the identity funcion. """ if not key: key = lambda x: x # unique keys iterable = list(iterable) keys = set(key(x) for x in iterable) groups = [] for k in keys: # group with key = k groups.append([x for x in iterable if key(x) == k]) return groups
bc0c8b8f53d95b0731634e19d32f547bc25ce842
73,509
import io def PIL_im_to_BytesIO(im): """ Convert pillow image to io.BytesIO object ___ im: PIL.Image ___ Output: io.BytesIO object with the image data """ output = io.BytesIO() im.save(output, format="PNG") return output
3644c1c21b8ce16c2f6d18296b9993d7e9f9691e
73,511
def bytescl(img, bottom, top): """ Scale a pixel image to limits (0, 1). Keyword arguments: img -- Original pixel image. bottom -- Lower limit of img. top -- Upper limit of img. Output(s): scl_img -- Scaled image with new limits 0(min) - 1(max). """ scl_img = (((top - bottom) * (img - img.min())) / (img.max() - img.min())) + bottom return scl_img
e9b62e982920165be0a6897fb7f152c32737a14b
73,514
from typing import OrderedDict import json def load_recording(frame_file_tuples, session_id=None, camera=None, width=None, height=None, verbose=False): """ Load all frames of a single recording and return the dict representation of the recording. :param frame_file_tuples: List of (frame index, filename) tuples. Files are loaded in frame order. :param session_id: The identifier of this multi-camera recording session. :param camera: The identifier of this specific recording (i.e. camera angle) within the session. :param width: An integer representing the pixel width of the video file with which the OpenPose data was computed. :param height: An integer representing the pixel height of the video file with which the OpenPose data was computed. :param verbose: If true, print script progress information. :return: """ if verbose: print('Loading {} frames for recording "{}" of session "{}"'.format(len(frame_file_tuples), camera, session_id)) # Set up recording dictionary recording_dict = OrderedDict() if session_id is not None: recording_dict['id'] = str(session_id) if camera is not None: recording_dict['camera'] = str(camera) if width is not None: recording_dict['width'] = int(width) if height is not None: recording_dict['height'] = int(height) # Load frame data recording_dict['frames'] = frame2data = OrderedDict() for frame, filename in sorted(frame_file_tuples): with open(filename) as f: frame2data[frame] = json.load(f, object_pairs_hook=OrderedDict) return recording_dict
b83c5c2d3a9c6ef78ddb92d65be523bf1c533ec8
73,516
def list2p(paragraphs): """list of paragraphs to html""" return ''.join(['<p>{}</p>'.format(p) for p in paragraphs])
4f89f8ed0bcb308fd0d0f76686b404f857d98b3a
73,523
def valid_transfer_value(transfer_value): """Returns True if transfer_value is valid Returns False if transfer_value is not valid """ try: int(transfer_value) except: return False return True
d48f030419c28a26ea6fce3e99eb90609a5e9c87
73,527
import random def shuffle(l): """ Return a randomly shuffled list given an ordered list. """ final_list = [] for _ in l: final_list.append(l.pop(random.randrange(0, len(l)))) return final_list
435361d51f1b90c1921973fcea42a6f04f1c3018
73,529
from typing import Tuple def parse_mode(mode: str) -> Tuple[str, bool, bool]: """Parse a mode used in open. Order is not considered at all. Binary flag (b) is ignored. Valid modes are: r, r+, w, w+, x, x+. Return a tuple (nomalized, is_read, is_write). """ mode_set = set(mode) if len(mode_set) != len(mode): raise ValueError(f"invalid mode: {mode}") mode_plus = "+" in mode_set mode_set -= {"b", "+"} mode_base = mode_set.pop() if mode_set else "invalid" if mode_set or mode_base not in "rwx": raise ValueError(f"invalid mode: {mode}") if mode_plus: return (f"{mode_base}+", True, True) return (mode_base, mode_base == "r", mode_base != "r")
78f072eee4c32286e03ea76a83be793b60e3452f
73,530
def filter_not_in(values, vmin=None, vmax=None): """ Handles 'not in' filters op: ['in', 'not in'] values: iterable of values vmin, vmax: the range to compare within Returns ------- True or False """ if len(values) == 0: return False if vmax is not None and vmax in values: return True elif vmin is not None and vmin in values: return True else: return False
d9e29e94368866205c4fff0289fb95b892171db8
73,538
from typing import Tuple def mapRange(a: Tuple[float, float], b: Tuple[float, float], s: float) -> float: """Calculate a multiplier for the extrusion value from the distance to the perimeter. Args: a (Tuple[float, float]): a tuple containing: - a1 (float): the minimum distance to the perimeter (always zero at the moment) - a2 (float): the maximum distance to the perimeter where the interpolation is performed b (Tuple[float, float]): a tuple containing: - b1 (float): the maximum flow as a fraction - b2 (float): the minimum flow as a fraction s (float): the euclidean distance from the middle of a segment to the nearest perimeter Returns: float: a multiplier for the modified extrusion value """ (a1, a2), (b1, b2) = a, b return b1 + ((s - a1) * (b2 - b1) / (a2 - a1))
1cb3c27f2b117a9f89bd67b597d143fd80dcf314
73,539
def import_txt(file_name, two_dimensional=False, **kwargs): """ Reads control points from a text file and generates a 1-D list of control points. The following code examples illustrate importing different types of text files for curves and surfaces: .. code-block:: python # Import curve control points from a text file curve_ctrlpts = exchange.import_txt(file_name="control_points.txt") # Import surface control points from a text file (1-dimensional file) surf_ctrlpts = exchange.import_txt(file_name="control_points.txt") # Import surface control points from a text file (2-dimensional file) surf_ctrlpts, size_u, size_v = exchange.import_txt(file_name="control_points.txt", two_dimensional=True) You may set the file delimiters using the keyword arguments ``separator`` and ``col_separator``, respectively. ``separator`` is the delimiter between the coordinates of the control points. It could be comma ``1, 2, 3`` or space ``1 2 3`` or something else. ``col_separator`` is the delimiter between the control points and is only valid when ``two_dimensional`` is ``True``. Assuming that ``separator`` is set to space, then ``col_operator`` could be semi-colon ``1 2 3; 4 5 6`` or pipe ``1 2 3| 4 5 6`` or comma ``1 2 3, 4 5 6`` or something else. The defaults for ``separator`` and ``col_separator`` are *comma (,)* and *semi-colon (;)*, respectively. The following code examples illustrate the usage of the keyword arguments discussed above. .. code-block:: python # Import curve control points from a text file delimited with space curve_ctrlpts = exchange.import_txt(file_name="control_points.txt", separator=" ") # Import surface control points from a text file (2-dimensional file) w/ space and comma delimiters surf_ctrlpts, size_u, size_v = exchange.import_txt(file_name="control_points.txt", two_dimensional=True, separator=" ", col_separator=",") Please note that this function does not check whether the user set delimiters to the same value or not. :param file_name: file name of the text file :type file_name: str :param two_dimensional: type of the text file :type two_dimensional: bool :return: list of control points, if two_dimensional, then also returns size in u- and v-directions :rtype: list :raises IOError: an error occurred reading the file """ # File delimiters col_sep = kwargs.get('col_separator', ";") sep = kwargs.get('separator', ",") # Initialize an empty list to store control points ctrlpts = [] # Try opening the file for reading try: with open(file_name, 'r') as fp: if two_dimensional: # Start reading file size_u = 0 size_v = 0 for line in fp: # Remove whitespace line = line.strip() # Convert the string containing the coordinates into a list control_point_row = line.split(col_sep) # Clean and convert the values size_v = 0 for cpr in control_point_row: ctrlpts.append([float(c.strip()) for c in cpr.split(sep)]) size_v += 1 size_u += 1 # Return control points, size in u- and v-directions return ctrlpts, size_u, size_v else: # Start reading file for line in fp: # Remove whitespace line = line.strip() # Clean and convert the values ctrlpts.append([float(c.strip()) for c in line.split(sep)]) # Return control points return ctrlpts except IOError as e: print("An error occurred: {}".format(e.args[-1])) raise e except Exception: raise
7733ce80bd891f97d2131cb7f826cdbe2e2e5b36
73,540