content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def get_loss(preds, target, model_dict): """Returns the average loss value""" loss = model_dict["loss"](preds, target) return loss.mean()
8d5359aa7c0d137dfdf70ab3a297b6844493c9f3
492,489
import tkinter from typing import Callable from typing import Any import functools def bind_tab_key( widget: tkinter.Widget, on_tab: Callable[["tkinter.Event[Any]", bool], Any], **bind_kwargs: Any ) -> None: """A convenience function for binding Tab and Shift+Tab. Use this function like this:: def on_tab(event, shifted): # shifted is True if the user held down shift while pressing # tab, and False otherwise ... utils.bind_tab_key(some_widget, on_tab, add=True) The ``event`` argument and ``on_tab()`` return values are treated just like with regular bindings. Binding ``'<Tab>'`` works just fine everywhere, but binding ``'<Shift-Tab>'`` only works on Windows and Mac OSX. This function also works on X11. """ # there's something for this in more_functools, but it's a big # dependency for something this simple imo def callback(shifted: bool, event: tkinter.Event[tkinter.Misc]) -> Any: return on_tab(event, shifted) if widget.tk.call("tk", "windowingsystem") == "x11": # even though the event keysym says Left, holding down the right # shift and pressing tab also works :D shift_tab = "<ISO_Left_Tab>" else: shift_tab = "<Shift-Tab>" widget.bind("<Tab>", functools.partial(callback, False), **bind_kwargs) # bindcheck: ignore widget.bind(shift_tab, functools.partial(callback, True), **bind_kwargs)
751d79ba4b53ed5280c64822cb4d74815aba4df1
565,524
def format_time(seconds, total=None, short=False): """ Format ``seconds`` (number of seconds) as a string representation. When ``short`` is False (the default) the format is: HH:MM:SS. Otherwise, the format is exacly 6 characters long and of the form: 1w 3d 2d 4h 1h 5m 1m 4s 15s If ``total`` is not None it will also be formatted and appended to the result separated by ' / '. """ def time_tuple(ts): if ts is None or ts < 0: ts = 0 hours = ts / 3600 mins = (ts % 3600) / 60 secs = (ts % 3600) % 60 tstr = '%02d:%02d' % (mins, secs) if int(hours): tstr = '%02d:%s' % (hours, tstr) return (int(hours), int(mins), int(secs), tstr) if not short: hours, mins, secs, curr_str = time_tuple(seconds) retval = curr_str if total: hours, mins, secs, total_str = time_tuple(total) retval += ' / %s' % total_str return retval else: units = [ (u'y', 60 * 60 * 24 * 7 * 52), (u'w', 60 * 60 * 24 * 7), (u'd', 60 * 60 * 24), (u'h', 60 * 60), (u'm', 60), (u's', 1), ] seconds = int(seconds) if seconds < 60: return u'00m {0:02d}s'.format(seconds) for i in range(len(units) - 1): unit1, limit1 = units[i] unit2, limit2 = units[i + 1] if seconds >= limit1: return u'{0:02d}{1} {2:02d}{3}'.format( seconds // limit1, unit1, (seconds % limit1) // limit2, unit2) return u' ~inf'
456438e356b375af77fe1527f35ea22d230450f0
77,607
import typing def exclude_header( headers: typing.Sequence[str], exclude: typing.Set[str] ) -> typing.Sequence[typing.Optional[str]]: """ Exclude header. Exclude columns from header by changing the entry to None. :param headers: headers :param exclude: columns to be excluded :returns: header with columns excluded """ excluded_headers = [None if current in exclude else current for current in headers] return excluded_headers
29d432abcae17848c42b6d238b744157a51ba133
41,940
def format_schema_tf(schema): """Format schema for an Athena table for terraform. Args: schema (dict): Equivalent Athena schema used for generating create table statement Returns: formatted_schema (list(tuple)) """ # Construct the main Athena Schema formated_schema = [] for key_name in sorted(schema.keys()): key_type = schema[key_name] if isinstance(key_type, str): formated_schema.append((key_name.lower(), key_type)) # Account for nested structs elif isinstance(key_type, dict): struct_schema = ','.join( '{0}:{1}'.format(sub_key.lower(), key_type[sub_key]) for sub_key in sorted(key_type.keys()) ) formated_schema.append((key_name.lower(), 'struct<{}>'.format(struct_schema))) return formated_schema
ba808b145c16f2b13419f7dcdd0c55901da4ec8f
663,776
def stats_to_list(stats_dict): """ Parse the output of ``SESConnection.get_send_statistics()`` in to an ordered list of 15-minute summaries. """ result = stats_dict['GetSendStatisticsResponse']['GetSendStatisticsResult'] datapoints = [dp for dp in result['SendDataPoints']] datapoints.sort(key=lambda x: x['Timestamp']) return datapoints
9a0b1709eedec66edca722d60578a2ba2385cc9d
26,153
from typing import List def percentile(values: List[float], n: float) -> float: """ Calculate the linear interpolated percentile from an array of values :param values: an array of values :param n: percentage / 100 : 0 < n < 1 :return: percentile value """ if len(values) == 1: return values[0] values = sorted(values) float_index = n*(len(values)-1) i = int(float_index) w = abs(float_index-i) return values[i] + w*(values[i+1]-values[i])
d050e62f8220276611b5ef6161341d93c15b1d4a
237,773
def prepare_prediction_column(prediction): """ Return the cluster label of the highest probability. """ return prediction.argmax(axis=-1)
a0a4dd8af6974d1da0ca186bd7265752265b6abe
267,543
def in_region(pos, b, e): """ Returns pos \in [b,e] """ return b <= pos and pos <= e
274d65c05a3af0d70ce61ed50052d57bbbcef000
121,402
def _bop_or(obj1, obj2): """Boolean or.""" return bool(obj1) or bool(obj2)
9638d7680cf71f81839b825c81bd844d9820f545
621,808
def in_costmap_bounds(data, map_x, map_y): """ whether a pixel at (map_x, map_y) is inside the costmap area :param data: data array :param map_x int: x coordinate :param map_y int: y coordinate :return bool: whether a pixel at (map_x, map_y) is inside the costmap area """ return not (map_x < 0 or map_y < 0 or map_x >= data.shape[1] or map_y >= data.shape[0])
754c01f7c4af4ac19167dbddc0204dcf7ef4bd52
445,208
def _IntegerCeilingDivide(numerator, denominator): """returns numerator/denominator rounded up if there is any remainder.""" return -(-numerator // denominator)
a30875fde1d864b749b2d881915c9c463e79ed83
579,378
def string_prepend(prefix: str, string: str): """Prepends each line in `string` with `prefix`.""" sub = "\n" + prefix return prefix + string.replace("\n", sub)
4a9e78d482fab74c16b97a182b76ed64db4a0b72
275,998
import re def strip_between(a, b, string): """ Removes anything between (and including) string a and b inside the given string. """ p = "%s.*?%s" % (a, b) p = re.compile(p, re.DOTALL | re.I) return re.sub(p, "", string)
d550b8a025bb123a6ec3d04faa446dd774d3b4c1
377,104
def getCustodian(cur, custodian): """ The getCustodian function checks the database for a custodian and returns the ID if present :param cur: The sqlite3 database cursor object :param custodian: The name of the custodian :return: The custodian ID """ sql = "SELECT id FROM Custodians WHERE name='{}';".format(custodian) cur.execute(sql) data = cur.fetchone() return data
2198af45330637d7bdea03e5197faacaf9f88daa
405,750
import io import math def build_pretty_binary_heap(heap: list, total_width=36, fill=" ") -> str: """Returns a string (which can be printed) representing heap as a tree. To increase/decrease the horizontal space between nodes, just increase/decrease the float number h_space. To increase/decrease the vertical space between nodes, just increase/decrease the integer number v_space. Note: v_space must be an integer. To change the length of the line under the heap, you can simply change the line_length variable.""" if not isinstance(heap, list): raise TypeError("heap must be an list object") if len(heap) == 0: return "Nothing to print: heap is empty." output = io.StringIO() last_row = -1 h_space = 3.0 v_space = 2 for i, heap_node in enumerate(heap): if i != 0: row = int(math.floor(math.log(i + 1, 2))) else: row = 0 if row != last_row: output.write("\n" * v_space) columns = 2 ** row column_width = int(math.floor((total_width * h_space) / columns)) output.write(str(heap_node).center(column_width, fill)) last_row = row s = output.getvalue() + "\n" line_length = total_width + 15 s += ('-' * line_length + "\n") return s
3192eb35813ee2f54231165c9cd7879bc78d5a88
68,245
import click def get_checkpoint(db, id_or_alias): """Returns checkpoint entry in `db` indicated by `id_or_alias`. First tries to match an ID, then an alias. For the case of repeated aliases, will first match local checkpoints and then remotes. In both cases, matching will be newest first. """ # Go through the checkpoints ordered by creation date. There shouldn't be # repeated aliases, but if there are, prioritize the newest one. local_checkpoints = sorted( [c for c in db['checkpoints'] if c['source'] == 'local'], key=lambda c: c['created_at'], reverse=True ) remote_checkpoints = sorted( [c for c in db['checkpoints'] if c['source'] == 'remote'], key=lambda c: c['created_at'], reverse=True ) selected = [] for cp in local_checkpoints: if cp['id'] == id_or_alias or cp['alias'] == id_or_alias: selected.append(cp) for cp in remote_checkpoints: if cp['id'] == id_or_alias or cp['alias'] == id_or_alias: selected.append(cp) if len(selected) < 1: return None if len(selected) > 1: click.echo( "Multiple checkpoints found for '{}' ({}). Returning '{}'.".format( id_or_alias, len(selected), selected[0]['id'] ) ) return selected[0]
84eff1deb3df8c4829cc177aacd89b09501d6b33
615,682
def to_hex(s): """ Transforms a string to hexadecimal notation. """ hex_str = ' '.join("{0:02x}".format(ord(c)) for c in s) return '\n'.join([hex_str[i:i+48] for i in range(0, len(hex_str), 48)])
0c71b398291ac9081a7194509fc65218af404cb2
99,983
def _remove_model_weights(model: dict, to_delete=None) -> dict: """ Removes certain weights of a given model. The weights to remove are given by the to_delete argument. If there is also a bias term, that is deleted as well. Args: model: Loaded detectron2 model to_delete (list): Names of the weights to delete from the model, by default: ['roi_heads.box_predictor.cls_score', 'roi_heads.box_predictor.bbox_pred'] """ assert isinstance(model, dict) assert 'model' in model # print("Removing model weights with to_delete = None\n It is recommended to specify the to_delete weights directly, or use remove_model_weights_fsdet etc") # to_delete default values written here in order for default args to be immutable. if to_delete is None: # Heads in the bbox predictor: to_delete = ['roi_heads.box_predictor.cls_score', 'roi_heads.box_predictor.bbox_pred'] for param_name in to_delete: del model['model'][param_name + '.weight'] if param_name + '.bias' in model['model']: del model['model'][param_name + '.bias'] return model
70f88910db2fae52893869fc7acda8161b5df61e
44,037
def normalise_email(email): """ The local part of an email address is case-sensitive, the domain part isn't. This function lowercases the host and should be used in all email handling. """ clean_email = email.strip() if '@' in clean_email: local, host = clean_email.split('@') return local + '@' + host.lower() return clean_email
1d768cbd601ce4a8d5c7624c490eb6a2491a7ba9
226,591
import functools def lazy_property(fn): """A property decorator that caches the returned value.""" key = fn.__name__ + "_cache_val_" @functools.wraps(fn) def _lazy(self): """Very simple cache.""" if not hasattr(self, key): setattr(self, key, fn(self)) return getattr(self, key) return property(_lazy)
7ea8d0679b268b2db34193f0c0d52a1f1814b6fa
571,347
def get_publishers_of_articles_missing_type(articles): """ :param articles PyMongo collection of articles :return: Set: set of publishers of articles that are missing the doc_type property, defined as not having the field at all, or having it equal to the empty string. """ return articles.distinct( "publisher", { "$or": [{"doc_type": {"$exists": False}}, {"doc_type": ""}] } )
ab5319856c26a349a37d70f65c5292b6ea6a0e59
302,036
def create_sequence_string(cluster): """ Creates a string containing the cluster's sequences with their respective spectrum counts. :param cluster: The cluster to process :return: A string containing the cluster's sequences with their counts. """ sequence_strings = list() if cluster.identified_spectra == 0: return "[]" for sequence in cluster.sequence_counts.keys(): sequence_strings.append(sequence + ":" + str(cluster.sequence_counts.get(sequence))) final_string = "[" + ",".join(sequence_strings) + "]" return final_string
c7e5f8c5b55b193fa620de13ad4388dbef0dd701
499,295
def is_left(p0, p1, p2): """ is_left(): tests if a point is Left|On|Right of an infinite line. Input: three points P0, P1, and P2 Return: >0 for P2 left of the line through P0 and P1 =0 for P2 on the line <0 for P2 right of the line See: Algorithm 1 "Area of Triangles and Polygons" http://geomalgorithms.com/a03-_inclusion.html :param p0: point [x,y] array :param p1: point [x,y] array :param p2: point [x,y] array :return: """ v = (p1[0] - p0[0]) * (p2[1] - p0[1]) - (p2[0] - p0[0]) * (p1[1] - p0[1]) return v
8f7ff0161716e38ae91a9296fd84e730f0dcca0f
94,679
def mock_id_formatter(issue, format='long', **kwargs): """ Always return the issue id converted to its string representation. """ return str(issue['id'])
1a0f3232879c7b7123cd369292f299535ca0f2e8
567,384
def tariff_transform(value): """Transform tariff from number to description.""" if value == "1": return "low" return "high"
5a32cc3416f9d5998c3dd6594d3c33d2578d875a
575,965
def wid_to_gid(wid): """Calculate gid of a worksheet from its wid.""" widval = wid[1:] if len(wid) > 3 else wid xorval = 474 if len(wid) > 3 else 31578 return str(int(widval, 36) ^ xorval)
bf18e51ccc60fa19848b2cb090fb257991920b47
339,477
def truncate_chars_end_word(value: str, max_length: int): """ Truncates a string after a specified character length but does not cut off mid-word. """ if len(value) > max_length: truncd_val = value[:max_length] if value[max_length] != " ": truncd_val = truncd_val[:truncd_val.rfind(" ")] return truncd_val + "..." return value
4348293177e88b5f1bc453431f676c3734217553
111,465
def rgbToCMYK(r, g, b): """ Converts RGB to CMYK :param r: red value (from 0.0 to 1.0) :param g: green value (from 0.0 to 1.0) :param b: blue value (from 0.0 to 1.0) :return: tuple containing CMYK color values (from 0.0 to 1.0 each) """ k = 1-max([r,g,b]) c = (1-r-k)/(1-k) m = (1-g-k)/(1-k) y = (1-b-k)/(1-k) return c,m,y,k
d03ad0164aea91e9664e39b60567bc1484032b41
279,499
def data(prod, data): """Parse data using production and return the unboxed value from the result of read().""" (result, _) = prod.read(data) return result
870a28d5a09e92b9642ec16465be2f6441da69ca
74,901
def _InsertString(original_string, inserted_string, index): """Insert a string into another string at a given index.""" return original_string[0:index] + inserted_string + original_string[index:]
7b12a8cfb19f3304127cc8a461d0a56cfed64d54
555,395
def remove_stop_words(stop_words, words): """Remove stop words from a list of words.""" wl = [] for word in words: word = word.lower() if word not in stop_words: wl.append(word) return wl
45df31d7b7bf749c96f3473813d73a6b5e47953e
566,228
import click def running_command_name() -> str: """ Returns the current CLI command name as a space-separated string, or ``id3c`` if not running under any command. """ appname = None context = click.get_current_context(silent = True) if context: appname = context.command_path if not appname: appname = "id3c" return appname
a7a4ceb306240032c87b1c9433e4e597ef1c35ff
331,821
import torch def weighted_dice_loss( prediction, target_seg, weighted_val: float = 1.0, reduction: str = "mean", eps: float = 1e-8, ): """ Weighted version of Dice Loss Args: prediction: prediction target_seg: segmentation target weighted_val: values of k positives, reduction: 'none' | 'mean' | 'sum' 'none': No reduction will be applied to the output. 'mean': The output will be averaged. 'sum' : The output will be summed. eps: the minimum eps, """ target_seg_fg = target_seg == 1 target_seg_bg = target_seg == 0 target_seg = torch.stack([target_seg_bg, target_seg_fg], dim=1).float() n, _, h, w = target_seg.shape prediction = prediction.reshape(-1, h, w) target_seg = target_seg.reshape(-1, h, w) prediction = torch.sigmoid(prediction) prediction = prediction.reshape(-1, h * w) target_seg = target_seg.reshape(-1, h * w) # calculate dice loss loss_part = (prediction ** 2).sum(dim=-1) + (target_seg ** 2).sum(dim=-1) loss = 1 - 2 * (target_seg * prediction).sum(dim=-1) / torch.clamp(loss_part, min=eps) # normalize the loss loss = loss * weighted_val if reduction == "sum": loss = loss.sum() / n elif reduction == "mean": loss = loss.mean() return loss
d7163342c7280b60287307b4445eded624b1b01b
58,875
def count_interests(rows): """counts how many rows have non-None interests""" return len([row for row in rows if row["interest"] is not None])
1676ea037836b5e0e0bc8043b690b6cbfb418f1f
281,775
from typing import List from typing import Tuple def concatenate_shapes(shapes: List[Tuple[int]], axis: int): """Concatenate shapes along axis""" out = list(shapes[0]) out[axis] = sum(list(s)[axis] for s in shapes) return tuple(out)
959a66fec11fa7d67218f2fb1b76d5bcf990d463
26,386
def make_header_id_unique(header_id, used_ids): """Make the given ID unique given a dictionary of used_ids Arguments: header_id - Slugified header ID used_id - Dictionary associating each header ID without suffixes to the number of times that such ID has been used. """ if header_id in used_ids: unique_header_id = header_id + '-' + str(used_ids[header_id]) used_ids[header_id] += 1 else: unique_header_id = header_id used_ids[header_id] = 1 return unique_header_id
03aecc6a529364150836cface521b3937b67463c
197,141
def read_peaks(path): """read in list. Parameters ---------- path : list path Returns ------- a : list with elements on each line separated """ with open(path,"rt") as f: a=f.readlines() a=[x.split() for x in a] return a
81958a9bd665ff3c040d95cb7ec2f50734ad3444
234,497
def from_dero(value_in_dero): """Convert number in dero to smallest unit""" return int(value_in_dero*10**12)
6a27469721cbd9851312f73a971caf279c95ffa8
698,072
def dict_has_keys(dct, *keys): """Return True if the dictionary has all keys.""" for k in keys: if k not in dct: return False return True
577e94a82d19df0a9c87f76740c6c85c282378a9
383,858
def _prune_folders(folders, includes, excludes): """ Given a list of folders, return a copy of it that includes just the folders that should be considered part of the build based on the given list of include and exclude patterns. The filters are applied in the order: "include, exclude" such that if there are no includes, we assume that everything is included by default. """ result = [] for folder in folders: if not includes or folder in includes: if folder not in excludes: result.append(folder) return result
9ee3062d2c104b9c38426363e00a0f37116d4f65
480,187
def group_cc_emails(audit_ccs, assessment_ccs): """Returns grouped cc emails between audit and assessment. Args: audit_ccs: List of audit ccs assessment_ccs: List of assessment ccs Returns: Grouped list of ccs """ audit_ccs = frozenset(audit_ccs) assessment_ccs = frozenset(assessment_ccs) grouped_ccs = list(audit_ccs.union(assessment_ccs)) return grouped_ccs
adc4014a915e2ea191aefbfdc3af48ae4f17ae7f
68,828
def address_to_text(addr, kb): """ Properly convert an address to text for a label. :param int addr: The address to convert. :param angr.KnowledgeBase kb: The knowledgebase in use. :return: Text representation of the address. :rtype: str """ if addr in kb.labels: return kb.labels[addr] return "loc_%#x" % addr
9a3689a77be4fb0c489f867c50bd4df5e796b606
513,519
import torch def create_optimizer(optim_cfg: dict, model: torch.nn.Module) -> torch.optim.Optimizer: """ Take in optimizer config and create the optimizer for training. """ name = optim_cfg.get("type", None) if name.lower() == "sgd": lr = float(optim_cfg["lr"]) momentum = float(optim_cfg["momentum"]) weight_decay = float(optim_cfg["weight_decay"]) optimizer = torch.optim.SGD( model.parameters(), lr=lr, momentum=momentum, weight_decay=weight_decay, nesterov=True, ) elif name.lower() == "rmsprop": lr = float(optim_cfg["lr"]) momentum = float(optim_cfg["momentum"]) weight_decay = float(optim_cfg["weight_decay"]) optimizer = torch.optim.RMSprop( model.parameters(), lr=lr, momentum=momentum, weight_decay=weight_decay ) else: raise ValueError(f"Improper optimizer supplied {name}.") return optimizer
1f55b4c07cb6911aeda8aba4bcffcdb5626dcb13
568,924
def get_products(num1, num2): """Takes two numbers as lists of integers and multiplies them to yield a list of partial products as lists of integers""" products = [[] for i in range(len(num2))] for i, dig2 in enumerate(num2[::-1]): carry = 0 for dig1 in num1[::-1]: pp = dig2 * dig1 + carry pp, carry = pp % 10, pp // 10 products[i].insert(0,pp) products[i].insert(0,carry) return products
813d5acf3c9b1ed485759f15ac80c7cf3fbf0c28
430,341
import inspect def get_object_classes(classes_or_instances, expected_base_class=None): """Given a list of instances or class objects, return the list of their classes. :param classes_or_instances: mixed list to parse :type classes_or_instances: list[type or object] :param expected_base_class: if given, only subclasses or instances of this type will be returned :type expected_base_class: type :return: list of classes :rtype: list """ classes_or_instances = classes_or_instances or [] result = [] for obj in classes_or_instances: if inspect.isclass(obj): if not expected_base_class or issubclass(obj, expected_base_class): result.append(obj) else: if not expected_base_class or isinstance(obj, expected_base_class): result.append(type(obj)) return result
738b99224111d940c0a14c753fad69eb66f16271
672,871
def parse_purl(purl): """ Finds the purl in the body of the Eiffel event message and parses it :param purl: the purl from the Eiffel ArtC event :return: tuple: the artifact filename and the substring from the build path """ # pkg:<build_path>/<intermediate_directories>/ # <artifact_filename>@< build_number > # artifact_filename_and_build = purl.split('/')[-1] # artifact_filename = artifact_filename_and_build.split('@')[0] # build_path = purl.split('pkg:')[1].split('/artifacts')[0] # pkg:<intermediate_directories>/<artifact_filename>@<build_number>? # build_path=< build_path > # for when Eiffel Broadcaster is updated artifact_filename = purl.split('@')[0].split('/')[-1] build_path = purl.split('?build_path=')[-1] return artifact_filename, build_path
9071f2a88ea5879999da04babf11b1d4c8228cea
185,829
def tlv_file_mapping(tlv_data_mapping, tmp_path): """Return the path to an XML file containing the mapping.""" path = tmp_path / "mapping.xml" path.write_bytes(tlv_data_mapping) return path
ba24e3711c2e90f76b34cb719c2110cc59f91c3e
569,217
from operator import and_ def get_tool_version_association( app, parent_tool_version, tool_version ): """Return a ToolVersionAssociation if one exists that associates the two received tool_versions""" sa_session = app.model.context.current return sa_session.query( app.model.ToolVersionAssociation ) \ .filter( and_( app.model.ToolVersionAssociation.table.c.parent_id == parent_tool_version.id, app.model.ToolVersionAssociation.table.c.tool_id == tool_version.id ) ) \ .first()
6aa86e0e293dda54e26942111f584c060ece5997
165,552
def in_box(coords, box): """ Find if a coordinate tuple is inside a bounding box. :param coords: Tuple containing latitude and longitude. :param box: Two tuples, where first is the bottom left, and the second is the top right of the box. :return: Boolean indicating if the coordinates are in the box. """ if box[0][0] < coords[0] < box[1][0] and box[0][1] < coords[1] < box[1][1]: return True return False
ed4082b6311929e4982b4196ceaa566b05dfd714
682,804
def _GetClearedFieldsForCorsPolicy(cors_policy, field_prefix): """Gets a list of fields cleared by the user for CorsPolicy.""" cleared_fields = [] if not cors_policy.allowOrigins: cleared_fields.append(field_prefix + 'allowOrigins') if not cors_policy.allowOriginRegexes: cleared_fields.append(field_prefix + 'allowOriginRegexes') if not cors_policy.allowMethods: cleared_fields.append(field_prefix + 'allowMethods') if not cors_policy.allowHeaders: cleared_fields.append(field_prefix + 'allowHeaders') if not cors_policy.exposeHeaders: cleared_fields.append(field_prefix + 'exposeHeaders') if not cors_policy.maxAge: cleared_fields.append(field_prefix + 'maxAge') if not cors_policy.allowCredentials: cleared_fields.append(field_prefix + 'allowCredentials') if not cors_policy.disabled: cleared_fields.append(field_prefix + 'disabled') return cleared_fields
54fb0e874689b3ba621e59732d6f0e515efb1007
280,000
import re def untokenize(words): """ Untokenizing a text undoes the tokenizing operation, restoring punctuation and spaces to the places that people expect them to be. Ideally, `untokenize(tokenize(text))` should be identical to `text`, except for line breaks. """ text = ' '.join(words) step1 = text.replace("`` ", '"').replace(" ''", '"').replace('. . .', '...') step2 = step1.replace(" ( ", " (").replace(" ) ", ") ") step3 = re.sub(r' ([.,:;?!%]+)([ \'"`])', r"\1\2", step2) step4 = re.sub(r' ([.,:;?!%]+)$', r"\1", step3) step5 = step4.replace(" '", "'").replace(" n't", "n't").replace( "can not", "cannot") step6 = step5.replace(" ` ", " '") return step6.strip()
dd6ffc0937104c23bbf59b5d71947f7aa5f47f01
554,969
def get_final_stage(args): """ Gets the final stage number, which depends on PGO or a stage one only build :param args: The args variable generated by parse_parameters :return: The final stage number """ if args.build_stage1_only: return 1 elif args.pgo: return 3 else: return 2
d57db351b89a5e8e9a996802e997895b0aa792d5
585,927
def get_merge_targest(module): """Return a set of basic blocks that are targets of merge instructions.""" merge_targets = set() for function in module.functions: for basic_block in function.basic_blocks: if (len(basic_block.insts) > 1 and basic_block.insts[-2] in ['OpLoopMerge', 'OpSelectionMerge']): target_id = basic_block.inst.operands[0] merge_targets.add(target_id.inst.basic_block) return merge_targets
df6f9741e1f2de17ebca4e7653bc967919a258c7
592,192
def not_found(e): """Return a 404 if the route is not found.""" status_code = 404 response = { 'success': False, 'message': 'Not a valid endpoint.', }, status_code return response
1714990cc4155cecd574928a7ac2f937b5f3124b
622,339
def create_dict_lookup(driver_names, key): """ Creates a dict lookup from the list of Models. :param driver_names: List[leapp.models.Model[K, V]] :param key: a key value, that will be used as a primary key to generate a result Dict :return: Dict[leapp.models.Model[K,V].key, leapp.models.Model[K,V]] :raises ValueError: if there are duplicates of leapp.models.Model[K,V].key :raises AttributeError: if key not in K (Model attributes) Example: >>> create_dict_lookup([Model(a=1, b=2), Model(a=2, b=3)], key="b") == { >>> 2: Model(a=1, b=2), >>> 3: Model(a=2, b=3), >>> } """ lookup = {getattr(item, key): item for item in driver_names} if len(lookup) != len(driver_names): raise ValueError("A duplicated key(s) found") return lookup
230b684574e066249f211bfe5e5a417db6c874cf
499,994
def lower_tree(tree): """ Change all element names and attribute names to lower case. """ root = tree.getroot() for node in root.iter(): node.tag = node.tag.lower() attributes = dict() for attribute in node.attrib: attributes[attribute.lower()] = node.attrib[attribute] node.attrib = attributes tree._setroot(root) return tree
c61737154a86ee62f0c825de5d006913c624d817
668,558
def separate_categories(data): """ Separate the rows concerning "script-identifiable edits" from those concerning "other edits". Also, in each categry, and for each line, calculate the sum of levenshtein distances across all edits for that line. Return two separate DataFrames. """ # Line 6 should have an entry in both other and script. data = data.groupby(["category", "line"], axis=0).sum() other = data.loc["other", :].reset_index()\ .astype(int).sort_values(by=["line"]) script = data.loc["script-identifiable", :]\ .reset_index().astype(int).sort_values(by=["line"]) # print(script.head(), other.head()) return other, script
6d7b844bd69402fc32c2deb4c0ee3df131fdf6aa
21,538
def map_x_to_qubit(i, j, num_variables): """Map indices of x{i, j} variable to corresponding qubit number.""" return i * num_variables + j
61896be2c0ba6b89af97c72b3eeeb1ee152a8e26
503,619
import torch def flow_warp(img: torch.Tensor, flow: torch.Tensor) -> torch.Tensor: """Warp an image or feature map with optical flow From: https://www.programcreek.com/python/example/104458/torch.nn.functional.grid_sample Args: x (Tensor): size (N, C, H, W) flow (Tensor): size (N, H, W, 2), normal value interp_mode (str): 'nearest' or 'bilinear' padding_mode (str): 'zeros' or 'border' or 'reflection' Returns: Tensor: warped image or feature map """ B, C, H, W = img.size() # mesh grid grid_y, grid_x = torch.meshgrid(torch.arange(0, H), torch.arange(0, W)) grid = torch.stack((grid_x, grid_y), 2).float() # W(x), H(y), 2 grid.requires_grad = False grid = grid.type_as(img) vgrid = grid[None] + flow.permute(0, 2, 3, 1) # scale grid to [-1,1] vgrid_x = 2.0 * vgrid[:, :, :, 0] / max(W - 1, 1) - 1.0 vgrid_y = 2.0 * vgrid[:, :, :, 1] / max(H - 1, 1) - 1.0 vgrid_scaled = torch.stack((vgrid_x, vgrid_y), dim=3) output = torch.nn.functional.grid_sample( img, vgrid_scaled, mode="bilinear", padding_mode="border" ) return output
2a10e33f87613b3b167daa346f98099733367fb6
324,926
def parse_int_list(text): """Parse a string into a list of integers For example, the string "1,2,3,4" will be parsed to [1, 2, 3, 4]. Parameters ---------- text : str String to parse Returns ------- List[int] Parsed integer list """ result = [int(i) for i in text.split(',')] return result
f83b21c9038e5ea8eb2c0d53ad94479cab9258f3
14,485
def compose(*funcs): """Compose `funcs` to a single function. >>> compose(operator.abs, operator.add)(-2,-3) 5 >>> compose()('nada') 'nada' >>> compose(sorted, set, partial(filter, None))(range(3)[::-1]*2) [1, 2] """ # slightly optimized for most common cases and hence verbose if len(funcs) == 2: f0,f1=funcs; return lambda *a,**kw: f0(f1(*a,**kw)) elif len(funcs) == 3: f0,f1,f2=funcs; return lambda *a,**kw: f0(f1(f2(*a,**kw))) elif len(funcs) == 0: return lambda x:x # XXX single kwarg elif len(funcs) == 1: return funcs[0] else: def composed(*args,**kwargs): y = funcs[-1](*args,**kwargs) for f in funcs[:0:-1]: y = f(y) return y return composed
9283e125a8c3c1f7e05d04df07d7314bb92fe5b6
478,102
import torch def template_edge_loss(meshes, template_mesh): """ Computes mesh edge length regularization loss averaged across all meshes in a batch. Each mesh contributes equally to the final loss, regardless of the number of edges per mesh in the batch by weighting each mesh with the inverse number of edges. For example, if mesh 3 (out of N) has only E=4 edges, then the loss for each edge in mesh 3 should be multiplied by 1/E to contribute to the final loss. Args: meshes: Meshes object with a batch of meshes. target_length: Resting value for the edge length. Returns: loss: Average loss across the batch. Returns 0 if meshes contains no meshes or all empty meshes. """ if meshes.isempty(): return torch.tensor( [0.0], dtype=torch.float32, device=meshes.device, requires_grad=True ) N = len(meshes) edges_packed = meshes.edges_packed() # (sum(E_n), 3) temp_edges_packed = template_mesh.edges_packed() # (sum(E_n), 3) verts_packed = meshes.verts_packed() # (sum(V_n), 3) temp_verts_packed = template_mesh.verts_packed() # (sum(V_n), 3) verts_edges = verts_packed[edges_packed] temp_verts_edges = temp_verts_packed[temp_edges_packed] v0, v1 = verts_edges.unbind(1) t_v0, t_v1 = temp_verts_edges.unbind(1) edge_distance = (v0 - v1).norm(dim=1, p=2) ** 2.0 t_edge_distance = (t_v0 - t_v1).norm(dim=1, p=2) ** 2.0 loss = (edge_distance - t_edge_distance).norm(p=2) return loss / N
09fd636b21b6fe4cfe45a220b3f61c3c8cda42d1
586,979
def single_total(iterable, odd=False): """This example function sums the data contained in a single iterable, i.e. a list, tuple, set, pd.Series, etc. Args: iterable (list, tuple, set, pd.Series, etc): Any iterable that holds data that can be added together. Returns: total (int): Sum of passed iterable. """ total = 0 for item in iterable: if odd: if item % 2 != 0: total += int(item) else: total += int(item) return total
ff3fa5967772a86086930b0f8085a07ed6a7562a
469,417
import re def replace_mods(sequence, remove_heavy=True): """ encloses modifications with brackets, e.g. ox -> [ox] removes heavy labels from modifications (can be turned off with remove_heavy=False """ if remove_heavy: return re.sub('([a-z0-9]+)(-d[0-9]+)?([a-z0-9]+)', r'[\1\3]', sequence) else: return re.sub('([a-z\-0-9]+)', r'[\1]', sequence)
9fb9dd1616383bf8314524b6a8f0ca76f03d05e3
346,464
def construct_format_field_str(fname, fspec, conv): """ Constructs a format field string from the field name, spec, and conversion character (``fname``, ``fspec``, ``conv``). See Python String Formatting for more info. """ if fname is None: return "" ret = "{" + fname if conv: ret += "!" + conv if fspec: ret += ":" + fspec ret += "}" return ret
ac41890ad21acf828aff0d775d98a39b9adf63bf
456,297
def get_tags_from_message(message): """ Given a message string, extracts hashtags and returns a comma-separated list :param message: a Hipchat message body """ tags = {word.strip('#') for word in message.split() if word.startswith('#')} return ','.join(tags)
528f7702f43f8f81adf942c79b292f508773d205
701,967
import pickle def load_metrics(filestr:str): """ Load metric dict from filestr Args: filestr (str): strig to save dict to Return: metrics (dict): loaded metrics """ # pickle load with open(filestr, 'rb') as f: metrics = pickle.load(f) return metrics
199a30d7391e81a78ba7f4567de35e8c4f0a13a5
672,956
def require_atlas(f): """Decorator that ensures a subject (the first argument) is from the ATLAS survey. """ def g(subject, *args, **kwargs): if subject['metadata']['survey'] != 'atlas': raise ValueError('Subject not from ATLAS survey.') return f(subject, *args, **kwargs) return g
1fe2e88ae190ffde179e1c2f5945604f235c9382
665,589
def read_configfile(filename, separator = ':'): """read_configfile(filename, separator = ':') This function reads a configuration file, extracting <keyword>, <value> pairs to a dictionary. Parameters: - filename: name of the file to be read - separator: character used to separate the <keyword> from the <value> Return value: - a dictionary containing the <keyword>, <value> pairs. Notes about the configuration file format: - A <keyword>, <value> pair must fit on one line. - A line, a keyword, or a value may start with as many blank characters as fit, those will be ignored and won't appear in the result. - Comments start with a "#". - One can not mix values and comment on a line.""" # This will store values read in the file as strings. result = {} # Opening the file for reading file = open(filename, 'r') # try/finally block to ensure the file is ALWAYS correctly closed. try: # Reading the file line by line. while 1: # Reading a line. A line may start/end with blanks that we wish # to ignore. line_with_blanks = file.readline() # If nothing is read, the end of line has been reached, we get out # of the loop. if not line_with_blanks: break # Stripping the leading/trailing blanks. line = line_with_blanks.strip() # if the line was only made of blanks, or starts with a "#", it is # of no interest. if not line or (line[0] == '#'): continue # Seperating the <keyword> from the <value>. split_line = line.split(separator, 1) # Stripping leading/trailing blanks from the keyword and the # value. Storing the value as a string in "values". keyword = split_line[0].strip() result[keyword] = split_line[1].strip() # The file is now useless. We close it. finally: file.close() return result
e3ac53acf4451c76f665b6389858735464861ca6
233,062
def field_model(key): """ Model to replace each row with the value of single field in the row, with the specified key. """ def model_fn(row): return row[key] return model_fn
a3d39e227b8b6288ed63a96733ba81432a25dbbd
569,253
from datetime import datetime import uuid def generate_ami_name(prefix): """ Generate AMI image name. """ # current date and time now = datetime.now() s2 = now.strftime("%Y%m%d%H%M%S") x = uuid.uuid4().hex postfix = str(x)[:10] ami_name = prefix + "-" + s2 + "-" + postfix return ami_name
50cf4e6f8ab55b252503319429594242cea9a77e
695,980
def cast_bool(value): """Cast a value to a bool. Will properly detect strings such as "True", "T", "1". """ if isinstance(value, str): return value.strip().lower() in ["t", "true", "1"] return bool(value)
b4eb6ac47b67898f29826a90b291bac14a5019e8
468,110
from typing import Mapping from typing import Any def update_analytics_params(stream_slice: Mapping[str, Any]) -> Mapping[str, Any]: """ Produces the date range parameters from input stream_slice """ return { # Start date range "dateRange.start.day": stream_slice["dateRange"]["start.day"], "dateRange.start.month": stream_slice["dateRange"]["start.month"], "dateRange.start.year": stream_slice["dateRange"]["start.year"], # End date range "dateRange.end.day": stream_slice["dateRange"]["end.day"], "dateRange.end.month": stream_slice["dateRange"]["end.month"], "dateRange.end.year": stream_slice["dateRange"]["end.year"], # Chunk of fields "fields": stream_slice["fields"], }
ce3f4938dae50cae35f6f83c6e3f6e621e7db0cf
632,627
def direktmandate(votes_by_party): """Determine party that wins Direktmandat Input: votes_by_party (DataFrame): By party the number of votes Output: direktmandat(DataFrame): Indicates that party that wins Direktmandat """ direktmandat = votes_by_party == votes_by_party.max() direktmandat = direktmandat.astype(int) return direktmandat
85d8e8ced7fc2439c6369e7b610fa162bfdfd366
275,180
def cleanjoin(listlike, join_on=""): """ returns string of joined items in list, removing whitespace """ return join_on.join([text.strip() for text in listlike]).strip()
026e847f645d052e722c02a443e68598b98085b7
439,516
def _apply_ith_rotation(i, args): """ Applies the two-dimensional ("Givens") rotation matrix specified by cs[i] and sn[i] (the cosine and sine factors of the rotation) to the two-vector specified by the ith and i+1th components of h. Returns the rotated vector along with the unmodified cs and sn, which is necessary for Jax typing reasons. """ h, cs, sn = args x1 = h[i] y1 = h[i + 1] x2 = cs[i].conj() * x1 - sn[i].conj() * y1 y2 = sn[i] * x1 + cs[i] * y1 h = h.at[i].set(x2) h = h.at[i + 1].set(y2) return h, cs, sn
130c0d220ed05dfcb050f4657d04f3431304d8ec
273,417
def date_name_converter(date): """Convert date strings like "DD-MonthName3Letters-YY" to "MM-DD-YY" """ for month_num, month in enumerate( ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']): num_str = str(month_num + 1) if len(num_str) == 1: num_str = '0' + num_str date = date.replace(month, num_str) date_dd, date_mm, date_yy = date.split('-') return '%s-%s-%s' % (date_mm, date_dd, date_yy)
208416fee6fb11b40de69f29b9bf215638b3ae21
84,137
import json def loadJSONFromFile(filename): """A generic function to read a JSON file. Args: filename: The full filename for the JSON file Returns: The object loaded from JSON """ jsonFile = open(filename, "r") theObject = json.load(jsonFile) jsonFile.close() return theObject
d7a280e96ba890f58bd48e8d7a0c16f894a76772
290,891
def chunk_sent(text1:str, n_words_per_chunk:int, n_prev:int): """ Chunks sentences into chunks having n_words_per_chunk using a window that considers the last n_prev words of the previous chunk >>> some_text = "w1 w2 w3 w5. w6 w7 w8" >>> chunk_sent(some_text, 3,1) ['w1 w2 w3', 'w3 w5. w6', 'w6 w7 w8'] """ alpha = n_words_per_chunk - n_prev l_total = [] l_parcial = [] if len(text1.split())//alpha >0: n = len(text1.split())//alpha else: n = 1 for w in range(n): if w == 0: l_parcial = text1.split()[:n_words_per_chunk] l_total.append( " ".join(l_parcial) ) else: l_parcial = text1.split()[w*alpha:w*alpha + n_words_per_chunk] l_total.append(" ".join(l_parcial)) return l_total
13eb124ba6bf60cf86b103b928f39cbe0529d51a
179,711
def sorted_components(layout_area, row_height, comps): """ Sort the components by row :param layout_area: a list [x, y] that stores the area of the layout :param comps: a list of components that need to be sorted :return: a list of rows, each containing a list of components in that row. """ num_rows = layout_area[1] // row_height + 1 rows = [] for i in range(num_rows): rows.append([]) for comp in comps: comp_y = comp.placed[1] row_dest = comp_y // row_height rows[row_dest].append(comp) # sort vias in each row based on x-coordinate for each_row in rows: each_row.sort(key = lambda x: x.placed[0]) return rows
bf320e9414405192752ccfcced360d8eb06ec0de
636,682
def asymmetry (A,B,C): """ Ray's asymmetry parameter for molecular rotation. For a prolate symmetric top (B = C), kappa = -1. For an oblate symmetric top (B = A), kappa = +1. See Townes and Schawlow, Ch. 4. """ return (2.*B - A - C)/(A - C)
4c339fb244821c4a6a8d6f7360a3f941baa6a6d3
166,339
def merge_words(sequences): """Merge successive words into phrase, putting space between each word Arguments --------- sequences : list Each item contains a list, and this list contains a word sequence. Returns ------- The list contains phrase sequences. """ results = [] for seq in sequences: words = " ".join(seq) results.append(words) return results
802a863206a40fb997c38ee543be01f80de8fcf5
178,566
def Thumbnailer____getitem__(self, alias): """ Retrieve a thumbnail matching the alias options (or raise a ``KeyError`` if no such alias exists). """ options = self.get_full_options(alias) return self.get_thumbnail(options, silent_template_exception=True)
b532fdb68d22a48a668ed0dc820663df34d08588
182,588
def character_distance(left, right): """Return the alphabetic distance between two uppercase one-character strings. For example, character_distance('R', 'B') returns 16, since 'B' has value 66 and 'R' has value 82. """ return abs(ord(left) - ord(right))
280ff1c3d020e1f13a49ce63a72a09de858e61e7
480,053
import torch def rbf_kernel(X, Y=None, gamma=None): """Compute rbf Gram matrix between X and Y (or X) Parameters ---------- X: torch.Tensor of shape (n_samples_1, n_features) First input on which Gram matrix is computed Y: torch.Tensor of shape (n_samples_2, n_features), default None Second input on which Gram matrix is computed. X is reused if None gamma: float Gamma parameter of the kernel (see sklearn implementation) Returns ------- K: torch.Tensor of shape (n_samples_1, n_samples_2) Gram matrix on X/Y """ if Y is None: Y = X if gamma is None: gamma = 1.0 / X.shape[1] X_norm = (X ** 2).sum(1).view(-1, 1) Y_norm = (Y ** 2).sum(1).view(1, -1) K_tmp = X_norm + Y_norm - 2. * torch.mm(X, torch.t(Y)) K_tmp *= -gamma K = torch.exp(K_tmp) return K
60673a25871bab8a5c9ad8f5602144408c5f8466
378,338
def interleave(list1, list2): """Convert two lists to a single interleaved list""" if len(list1) != len(list2): raise ValueError("Lists are not the same length") return [val for pair in zip(list1, list2) for val in pair]
44ac3989fca219a8fbd4be222bb21d8a3d96e359
459,192
def is_not_in(value, other): """Inexistence""" return value not in other
1fddbf1bd15609c9b7c3a155a66c6233f68bf83b
473,203
def compute_TF(doc_info): """ tf = (frequency of the term in the doc/total number of terms in the doc) """ tf_scores = [] for idx, doc in enumerate(doc_info): tf_score_table = {} for word in doc['freq_dict'].keys(): count = doc['freq_dict'][word] tf_score_table[word] = count/doc_info[idx]['doc_length'] tf_scores.append(tf_score_table) return tf_scores
85affe7db684fa929c54c09db9914f5b5bebaa09
418,054
def below(prec, other_prec): """Whether `prec` is entirely below `other_prec`.""" return prec[1] < other_prec[0]
20337d8d507fc3b4a85c5c2abe5c8c014437d3af
634,673
from datetime import datetime def epoch_secs_to_local_time_str(epoch_secs) -> str: """Get a string of local time representation from integer epoch in seconds""" return datetime.fromtimestamp(epoch_secs).strftime("%Y-%m-%d %H:%M:%S")
7eedb9a76decfeeafbb0d0dbd9de14ffd061a367
297,670
def factor_conv(string, *, conv, factors): """Convert a string with a factor. The symbols from ``factors`` are compared to the end of ``string`` until one matches. The ``string`` is then shortend by the the length of the symbol and the rest converted with ``conv`` and multiplied by the factor that corresponds to the symbol. >>> factors = {'h': 3600, 'm': 60, 's': 1} >>> factor_conv('10m', conv=int, factors=factors) 600 :param str string: input string :param conv: converter function :param dict factors: mapping from symbol to factor :return: converted value :raises ValueError: if the string cannot be converted """ for sym in factors: if string.endswith(sym): if sym: return conv(string[:-len(sym)]) * factors[sym] else: return conv(string) * factors[sym] raise ValueError(f'invalid value: {string!r}')
49b9586c724df6645836396470041962102c9fab
451,518
def intersect_lines(p1, p2, p3, p4): """ Calculates intersection point between two lines defined as (p1, p2) and (p3, p4). Args: p1: (float, float) Point 1 as (x, y) coordinates. p2: (float, float) Point 2 as (x, y) coordinates. p3: (float, float) Point 3 as (x, y) coordinates. p4: (float, float) Point 4 as (x, y) coordinates. Returns: (float, float) or None Coordinates of the intersection point. Returns None if there is no intersection. """ a_dx = p2[0] - p1[0] a_dy = p1[1] - p2[1] a_sq = p2[0]*p1[1] - p1[0]*p2[1] b_dx = p4[0] - p3[0] b_dy = p3[1] - p4[1] b_sq = p4[0]*p3[1] - p3[0]*p4[1] d = a_dy * b_dx - a_dx * b_dy dx = a_sq * b_dx - a_dx * b_sq dy = a_dy * b_sq - a_sq * b_dy if d == 0: return None return dx/d, dy/d
f4e4cb6bc2826b83a2e82df3cb8510fec5cb52e1
110,523
import re from datetime import datetime import time def extract_date(str_date): """Find the first %Y-%m-%d string and return the datetime and the remainder of the string """ rgx = re.compile('\d{4}-\d{2}-\d{2}') o_match = rgx.search(str_date) if o_match is not None: i_start = o_match.start() i_end = i_start+10 return (datetime( *(time.strptime(str_date[i_start:i_end], "%Y-%m-%d")[0:6])), str_date[0:i_start] + str_date[i_end:]) else: return (None, str_date)
3f3407490eec4e3d65e289b5e2ebef3246c9c63f
29,996
def filter_tagged_vocabulary(tagged_vocabulary, vocabulary, split="|"): """Filters tagged_vocabulary (tokens merged with tags) for tokens occurring in vocabulary. Parameters ---------- tagged_vocabulary : collection vocabulary of tokens (can be merged with tags) vocabulary : collection target vocabulary of tokens without tags split : str string delimiting tags and tokens in tagged_vocabulary """ targets = set() for tagged_word in tagged_vocabulary: word, *tag = tagged_word.split(split) if word in vocabulary: targets.add(tagged_word) return targets
a84ded5b44db2a4075591fd56847dc9529eefc7a
46,184
import itertools def drop_last(n: int): """Drops the last n elements of a sequence. >>> tuple(drop_last(1)([1,2,3,4,5])) (1,2,3,4) """ def drop_last(seq): return itertools.islice(seq, len(seq) - n) return drop_last
3fccc612d58bbe69dac544e24f77bdbdf643511a
541,120
def loc_offset_to_scaled_value(v_g, v_d, d_s): """Offset training value for location distances. Idea d_x + z = x_g But learn 'z/d_s' instead of z directly Parameters ---------- v_g : float location ground truth, either x_g or y_g v_d : float location of default box eiter d_x or d_y d_s : float default box size, either h or w Returns ------- float l: scaled value to be learned by the network """ # v_d + z = v_g z = (v_g - v_d) return z/d_s
f5eb53185ca8152f026d52175b02f4aeba618c20
179,256
def _compute_moc_with_v_at_cst_depth(v, grid, X="X", Z="Z"): """ Compute the meridional overturning streamfunction. """ v_x_dx = grid.integrate(v, axis=X) # (vo_to * domcfg_to['y_f_dif']).sum(dim='x_c') # integrate from top to bot psi = grid.cumint(v_x_dx, axis=Z, boundary="fill", fill_value=0) * 1e-6 # convert -> from bot to top psi = psi - psi.isel({grid.axes[Z]._get_axis_coord(psi)[1]: -1}) return psi
c8cffc0a1481aec7b09a844efc93ca694332676c
350,899
def dataset_word_frequencies(nodes): """ Get frequency of words from an extracted dataset. """ freqs = {} for node in nodes.values(): for t in node.tokens: freqs[t.lower()] = freqs.get(t.lower(), 0) + 1 return freqs
8d163ccd4f4f7b37c7162840d176668e3b02a6fb
496,682
def huber(x, y, scaling=0.1): """ A helper function for evaluating the smooth L1 (huber) loss between the rendered silhouettes and colors. """ diff_sq = (x - y) ** 2 loss = ((1 + diff_sq / (scaling**2)).clamp(1e-4).sqrt() - 1) * float(scaling) return loss
fe9ba23d60ddd6effbeec9c9b49634a443ba1ae9
647,682