content
stringlengths
22
815k
id
int64
0
4.91M
async def test_cleanup(exp_log_records, timeout, recon_client, caplog, mocker, monkeypatch): """Proper cleanup with or without pending tasks.""" recon_client.cleanup_timeout = timeout # mocked methods names must match those in reconciler._ASYNC_METHODS async def publish_change_messages(): await asyncio.sleep(0) async def validate_rrsets_by_zone(): await asyncio.sleep(0) coro1 = asyncio.ensure_future(publish_change_messages()) coro2 = asyncio.ensure_future(validate_rrsets_by_zone()) mock_task = mocker.MagicMock(asyncio.Task) mock_task.all_tasks.side_effect = [ # in the `while iterations` loop twice # timeout of `0` will never hit this loop [coro1, coro2], [coro1.done(), coro2.done()] ] monkeypatch.setattr( 'gordon_gcp.plugins.janitor.reconciler.asyncio.Task', mock_task) await recon_client.cleanup() assert exp_log_records == len(caplog.records) assert coro1.done() assert coro2.done() assert 1 == recon_client.changes_channel.qsize()
26,600
def _check_alignment(beh_events, alignment, candidates, candidates_set, resync_i, check_i=None): """Check the alignment, account for misalignment accumulation.""" check_i = resync_i if check_i is None else check_i beh_events = beh_events.copy() # don't modify original events = np.zeros((beh_events.size)) start = np.argmin([abs(beh_e - candidates).min() for beh_e in beh_events + alignment]) for i, beh_e in enumerate(beh_events[start:]): error, events[start + i] = \ _event_dist(beh_e + alignment, candidates_set, candidates[-1], check_i) if abs(error) <= resync_i and start + i + 1 < beh_events.size: beh_events[start + i + 1:] -= error for i, beh_e in enumerate(beh_events[:start][::-1]): error, events[start - i - 1] = \ _event_dist(beh_e + alignment, candidates_set, candidates[-1], check_i) if abs(error) <= resync_i and start - i - 2 > 0: beh_events[:start - i - 2] -= error return beh_events, events
26,601
def user_info(): """ 个人中心基本资料展示 1、尝试获取用户信息 user = g.user 2、如果用户未登录,重定向到项目首页 3、如果用户登录,获取用户信息 4、把用户信息传给模板 :return: """ user = g.user if not user: return redirect('/') data = { 'user': user.to_dict() } return render_template('blogs/user.html', data=data)
26,602
def test_laplace_boundary_fmm(helpers, grid): """Test Laplace boundary operators.""" space = bempp.api.function_space(grid, "P", 1) vec = helpers.load_npy_data("fmm_p1_vec") for filename, operator in [ ("fmm_laplace_single", bempp.api.operators.boundary.laplace.single_layer), ("fmm_laplace_double", bempp.api.operators.boundary.laplace.double_layer), ( "fmm_laplace_adjoint", bempp.api.operators.boundary.laplace.adjoint_double_layer, ), ("fmm_laplace_hyper", bempp.api.operators.boundary.laplace.hypersingular), ]: fmm = operator(space, space, space, assembler="fmm").weak_form() dense = helpers.load_npy_data(filename) np.testing.assert_allclose(dense, fmm @ vec, rtol=TOL) bempp.api.clear_fmm_cache()
26,603
def build_config_tests_list(): """Build config tests list""" names,_,_,_ = zip(*config_tests) return names
26,604
def writefits(filename,array): """ This is a fast wrapper for fits.writeto, with overwrite enabled. """ import astropy.io.fits as fits from tayph.vartests import typetest from tayph.vartests import dimtest import pathlib import numpy as np filename=check_path(filename,'filename in writefits()') # base = np.shape(array) # dimtest(base,[2],'shape of array in writefits()')#Test that its 2-dimensional fits.writeto(filename,array,overwrite=True)
26,605
def get_layer_type_from_name(name=None) : """ get the layer type from the long form layer name """ if name is None : print("Error get_layer_type_from_name - Bad args - exiting...") sys.exit(1) #print(name) phase, layer_name = name.split(' ') layer_type = layer_name ## @@@ For new NVTX - make the convension 'Phase LayerType,UniqueLayerName' pattern = re.compile(r"([a-zA-Z0-9]+),(\S+)") res = re.match(pattern, layer_name) if res is not None: layer_type = "{}".format(res.group(1)) layer_name = "{}".format(res.group(2)) return layer_type, phase, layer_name ''' ## @@@ For Deep Bench - Remove this - make Deep Bench follow 'Phase Type,UniqueName' pattern pattern = re.compile(r"(Conv_\d+x\d+)") res = re.match(pattern, layer_name) if res is not None: layer_type = "{}".format(res.group(1)) return layer_type, phase, layer_name ''' ### All remaining pattern matches are there to support KNF naming convention pattern = re.compile(r"layer_\d+_\d+_(\w+)") res = re.match(pattern, layer_name) if res is not None: layer_type = "{}".format(res.group(1)) return layer_type, phase, layer_name ## Look for res_branch_relu tag #pattern = re.compile(r"res\w+_branch\w+_(relu)") pattern = re.compile(r"res\w+[_]+(relu)") res = re.match(pattern, layer_name) if res is not None: layer_type = "{}".format(res.group(1)) return layer_type, phase, layer_name ## Look for res_branch tag pattern = re.compile(r"res\w+_branch\w+") res = re.match(pattern, layer_type) if res is not None: layer_type = "conv" return layer_type, phase, layer_name ## Look for bn_branch tag pattern = re.compile(r"(bn)\w+_branch\w+") res = re.match(pattern, layer_type) if res is not None: layer_type = "{}".format(res.group(1)) return layer_type, phase, layer_name pattern = re.compile(r"res\d+[a-f]") res = re.match(pattern, layer_type) if res is not None: if Debug: print ("Found elt layer type from {}".format(layer_type)) layer_type = "elt" return layer_type, phase, layer_name # Get rid of numbers layer_type = re.sub(r"\d+", "", layer_type) # Special case - conv_expand - is a conv layer pattern = re.compile(r"(\w+)_expand") res = re.match(pattern, layer_type) if res is not None: layer_type = "{}".format(res.group(1)) return layer_type, phase, layer_name ## Look for bn_conv - V1 prototxt format has bn as first field V2 has it as 2nd field pattern = re.compile(r"bn_(conv)") res = re.match(pattern, layer_type) if res is not None: layer_type = "bn" return layer_type, phase, layer_name ## Look for compound layer names - use the 2nd field for the name layer_type = re.sub(r".*_(\w+)", "\g<1>", layer_type) return layer_type, phase, layer_name
26,606
def annotate_filter(**decargs): """Add input and output watermarks to filtered events.""" def decorator(func): """Annotate events with entry and/or exit timestamps.""" def wrapper(event, *args, **kwargs): """Add enter and exit annotations to the processed event.""" funcname = ":".join([func.__module__, func.__name__]) enter_key = funcname + "|enter" annotate_event(event, enter_key, **decargs) out = func(event, *args, **kwargs) exit_key = funcname + "|exit" annotate_event(event, exit_key, **decargs) return out return wrapper return decorator
26,607
def cplot(*args,**kwargs): """ cplot - Plot on the current graphe This is an "alias" to gcf().gca().plot() """ return(gcf().gca().plot(*args,**kwargs))
26,608
def need_to_flush_metrics(time_now): """Check if metrics need flushing, and update the timestamp of last flush. Even though the caller of this function may not successfully flush the metrics, we still update the last_flushed timestamp to prevent too much work being done in user requests. Also, this check-and-update has to happen atomically, to ensure only one thread can flush metrics at a time. """ if not interface.state.flush_enabled_fn(): return False datetime_now = datetime.datetime.utcfromtimestamp(time_now) minute_ago = datetime_now - datetime.timedelta(seconds=60) with _flush_metrics_lock: if interface.state.last_flushed > minute_ago: return False interface.state.last_flushed = datetime_now return True
26,609
def path_to_url(path): """Return the URL corresponding to a given path.""" if os.sep == '/': return path else: return '/'.join(split_all(path))
26,610
def get_store_contents(request, store_name): """Returns a list containing all the contents of the store in the form of: [ { "oid": <document_id>, "data": <document_data>, }, ... ] Keyword arguments: request -- Django HttpRequest object store_name -- the name of the store """
26,611
def create_instances_from_lists(x, y=None, name="data"): """ Allows the generation of an Instances object from a list of lists for X and a list for Y (optional). All data must be numerical. Attributes can be converted to nominal with the weka.filters.unsupervised.attribute.NumericToNominal filter. :param x: the input variables (row wise) :type x: list of list :param y: the output variable (optional) :type y: list :param name: the name of the dataset :type name: str :return: the generated dataset :rtype: Instances """ if y is not None: if len(x) != len(y): raise Exception("Dimensions of x and y differ: " + str(len(x)) + " != " + str(len(y))) # create header atts = [] for i in xrange(len(x[0])): atts.append(Attribute.create_numeric("x" + str(i+1))) if y is not None: atts.append(Attribute.create_numeric("y")) result = Instances.create_instances(name, atts, len(x)) # add data for i in xrange(len(x)): values = x[i][:] if y is not None: values.append(y[i]) result.add_instance(Instance.create_instance(values)) return result
26,612
def plot_mission(results,line_style='bo-'): """This function plots the results of the mission analysis and saves those results to png files.""" # Plot Flight Conditions plot_flight_conditions(results, line_style) # Plot Aerodynamic Forces plot_aerodynamic_forces(results, line_style) # Plot Aerodynamic Coefficients plot_aerodynamic_coefficients(results, line_style) # Drag Components plot_drag_components(results, line_style) # Plot Altitude, sfc, vehicle weight plot_altitude_sfc_weight(results, line_style) # Plot Velocities plot_aircraft_velocities(results, line_style) return
26,613
def jitdevice(func, link=[], debug=None, inline=False): """Wrapper for device-jit. """ debug = config.CUDA_DEBUGINFO_DEFAULT if debug is None else debug if link: raise ValueError("link keyword invalid for device function") return compile_device_template(func, debug=debug, inline=inline)
26,614
def median_filter_(img, mask): """ Applies a median filer to all channels """ ims = [] for d in range(3): img_conv_d = median_filter(img[:,:,d], size=(mask,mask)) ims.append(img_conv_d) return np.stack(ims, axis=2).astype("uint8")
26,615
def test_get_images_404( api_client: TestClient, return_encoded: EncodedImage, return_user: User, image_service: ImageService, access_token, image_name: str, ) -> None: """Test a successful get request for decoded images without finding any results.""" image_service.get_encoded.return_value = [] response = api_client.get( f"{URL}/{image_name}", headers=access_token, ) image_service.get_encoded.assert_called_once_with( user_id=return_user.id, image_name=image_name, ) assert response.status_code == 404 json_ = response.json() assert ( json_["detail"] == f"no encoded image(s) with name {image_name!r} found" )
26,616
def replace_lines(inst, clean_lines, norm_lines): """ Given an instance and a list of clean lines and normal lines, add a cleaned tier and normalized if they do not already exist, otherwise, replace them. :param inst: :type inst: xigt.Igt :param clean_lines: :type clean_lines: list[dict] :param norm_lines: :type norm_lines: list[dict] """ # ------------------------------------------- # Remove the old clean/norm lines. # ------------------------------------------- old_clean_tier = cleaned_tier(inst) if old_clean_tier is not None: inst.remove(old_clean_tier) old_norm_tier = normalized_tier(inst) if old_norm_tier is not None: inst.remove(old_norm_tier) # ------------------------------------------- # Now, add the clean/norm lines, if provided. # ------------------------------------------- if clean_lines: new_clean_tier = create_text_tier_from_lines(inst, clean_lines, CLEAN_ID, CLEAN_STATE) inst.append(new_clean_tier) if norm_lines: new_norm_tier = create_text_tier_from_lines(inst, norm_lines, NORM_ID, NORM_STATE) inst.append(new_norm_tier) return inst
26,617
def get_html(link: Link, path: Path) -> str: """ Try to find wget, singlefile and then dom files. If none is found, download the url again. """ canonical = link.canonical_outputs() abs_path = path.absolute() sources = [canonical["singlefile_path"], canonical["wget_path"], canonical["dom_path"]] document = None for source in sources: try: with open(abs_path / source, "r", encoding="utf-8") as f: document = f.read() break except (FileNotFoundError, TypeError): continue if document is None: return download_url(link.url) else: return document
26,618
def normalization(X,degree): """ A scaling technique in which values are shifted and rescaled so that they end up ranging between 0 and 1. It is also known as Min-Max scaling ---------------------------------------- degree: polynomial regression degree, or attribute/feature number """ X[:, :(degree)] = (X[:, :(degree)] - np.amin(X[:, :(degree)], axis = 0))/ \ (np.amax(X[:, :(degree)], axis = 0) - np.amin(X[:, :(degree)], axis = 0)) return X
26,619
def markdown_file(source_markdown_file, version, tmp_dir, new_path=None): """ Given a markdown file path, generate an HTML partial in a directory nested by the path on the URL itself. """ if not new_path: new_path = settings.OTHER_PAGE_PATH % ( settings.EXTERNAL_TEMPLATE_DIR, version, os.path.splitext( source_markdown_file.replace(tmp_dir, ''))[0] + '.html') # Create the nested directories if they don't exist. if not os.path.exists(os.path.dirname(new_path)): os.makedirs(os.path.dirname(new_path)) with open(source_markdown_file) as original_md_file: markdown_body = original_md_file.read() with codecs.open(new_path, 'w', 'utf-8') as new_html_partial: # Strip out the wrapping HTML new_html_partial.write( '{% verbatim %}\n' + markdown.markdown( unicode(markdown_body, 'utf-8'), extensions=['markdown.extensions.fenced_code', 'markdown.extensions.tables'] ) + '\n{% endverbatim %}' )
26,620
def select_without_source_table_privilege(self, node=None): """Check that user is unable to select from a view without SELECT privilege for the source table.""" user_name = f"user_{getuid()}" view_name = f"view_{getuid()}" table_name = f"table_{getuid()}" exitcode, message = errors.not_enough_privileges(name=f"{user_name}") if node is None: node = self.context.node with table(node, f"{table_name}"): with user(node, f"{user_name}"): try: with When("I create a view from the source table"): node.query(f"DROP VIEW IF EXISTS {view_name}") node.query(f"CREATE VIEW {view_name} AS SELECT * FROM {table_name}") with And("I grant view select privilege to the user"): node.query(f"GRANT SELECT ON {view_name} TO {user_name}") with Then( "I attempt to select from view without privilege on the source table" ): node.query( f"SELECT count(*) FROM {view_name}", settings=[("user", f"{user_name}")], exitcode=exitcode, message=message, ) finally: with Finally("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}")
26,621
def get_tags_from_match(child_span_0, child_span_1, tags): """ Given two entities spans, check if both are within one of the tags span, and return the first match or O """ match_tags = [] for k, v in tags.items(): parent_span = (v["start"], v["end"]) if parent_relation(child_span_0, child_span_1, parent_span): match_tags.append(v["tag"]) return match_tags[0] if match_tags else "O"
26,622
def test_1_1_1_7_file_group(host): """ CIS Ubuntu 20.04 v1.0.0 - Rule # 1.1.1.7 Tests if /etc/modprobe.d/1.1.1.7_vfat.conf is owned by group root """ assert host.file(VFAT_MOD_FILE).group == 'root'
26,623
def _rfc822_escape(header): """Return a version of the string escaped for inclusion in an RFC-822 header, by ensuring there are 8 spaces space after each newline. """ lines = header.split('\n') header = ('\n' + 8 * ' ').join(lines) return header
26,624
def getCgiBaseHref(): """Return value for <cgiBaseHref/> configuration parameter.""" val = sciflo.utils.ScifloConfigParser().getParameter('cgiBaseHref') if val is None: val = "http://%s/sciflo/cgi-bin/" % socket.getfqdn() return val
26,625
def _add_remote_resources(resources): """Retrieve remote resources like GATK/MuTect jars present in S3. """ out = copy.deepcopy(resources) for prog, info in resources.items(): for key, val in info.items(): if key == "jar" and objectstore.is_remote(val): store_dir = utils.safe_makedir(os.path.join(os.getcwd(), "inputs", "jars", prog)) fname = objectstore.download(val, store_dir, store_dir) version_file = os.path.join(store_dir, "version.txt") if not utils.file_exists(version_file): version = install.get_gatk_jar_version(prog, fname) with open(version_file, "w") as out_handle: out_handle.write(version) else: with open(version_file) as in_handle: version = in_handle.read().strip() del out[prog][key] out[prog]["dir"] = store_dir out[prog]["version"] = version return out
26,626
def dijkstra(gph: GraphState, algo: AlgoState, txt: VisText, start: Square, end: Square, ignore_node: Square = None, draw_best_path: bool = True, visualize: bool = True) \ -> [dict, bool]: """Code for the dijkstra algorithm""" # Used to determine the order of squares to check. Order of args helper decide the priority. queue_pos: int = 0 open_set = PriorityQueue() open_set.put((0, queue_pos, start)) open_set_hash: set = {start} # Determine what is the best square to check g_score: dict = {square: float('inf') for row in gph.graph for square in row} g_score[start] = 0 # Keeps track of next node for every node in graph. A linked list basically. came_from: dict = {} # Continues until every node has been checked or best path found i = 0 while not open_set.empty(): # If uses closes window the program terminates for event in pygame.event.get(): if event.type == pygame.QUIT: pygame.quit() # Gets the square currently being checked curr_square: Square = open_set.get()[2] open_set_hash.remove(curr_square) # Terminates if found the best path if curr_square == end: if draw_best_path: best_path(gph, algo, txt, came_from, end, visualize=visualize) return True return came_from # Decides the order of neighbours to check for nei in curr_square.neighbours: temp_g_score: int = g_score[curr_square] + 1 if temp_g_score < g_score[nei]: came_from[nei] = curr_square g_score[nei] = temp_g_score if nei not in open_set_hash: queue_pos += 1 open_set.put((g_score[nei], queue_pos, nei)) open_set_hash.add(nei) if nei != end and nei.color != CLOSED_COLOR and nei != ignore_node: nei.set_open() # Only visualize if called. Checks if square is closed to not repeat when mid node included. i += 1 if visualize and not curr_square.is_closed(): if i % gph.speed_multiplier == 0: i = 0 draw(gph, txt, display_update=False) draw_vis_text(txt, is_dijkstra=True) # Sets square to closed after finished checking if curr_square != start and curr_square != ignore_node: curr_square.set_closed() return False
26,627
def list_members(infile): """Print list of projects""" member_list = open(infile, mode='r', encoding='utf-8') # open file in read mode jlist = json.load(member_list) # load information from json if len(jlist) != 0: for project in jlist: # create cycle for print(str(project['title'])) # print "border" between projects print("Name: " + str(project['name'])) # print attributes of project print("Time: " + str(project['time'])) print("Object: " + str(project['object'])) print("Cash: " + str(project['cash'])) else: print("Empty") member_list.close()
26,628
def calc_window_features(dict_features, signal_window, fs, **kwargs): """This function computes features matrix for one window. Parameters ---------- dict_features : dict Dictionary with features signal_window: pandas DataFrame Input from which features are computed, window fs : int Sampling frequency \**kwargs: See below: * *features_path* (``string``) -- Directory of script with personal features * *header_names* (``list or array``) -- Names of each column window Returns ------- pandas DataFrame (columns) names of the features (data) values of each features for signal """ features_path = kwargs.get('features_path', None) names = kwargs.get('header_names', None) # Execute imports exec("import tsfel") domain = dict_features.keys() if features_path: sys.path.append(features_path[:-len(features_path.split(os.sep)[-1])-1]) exec("import "+features_path.split(os.sep)[-1][:-3]) importlib.reload(sys.modules[features_path.split(os.sep)[-1][:-3]]) exec("from " + features_path.split(os.sep)[-1][:-3]+" import *") # Create global arrays func_total = [] func_names = [] imports_total = [] parameters_total = [] feature_results = [] feature_names = [] for _type in domain: domain_feats = dict_features[_type].keys() for feat in domain_feats: # Only returns used functions if dict_features[_type][feat]['use'] == 'yes': # Read Function Name (generic name) func_names = [feat] # Read Function (real name of function) func_total = [dict_features[_type][feat]['function']] # Check for parameters if dict_features[_type][feat]['parameters'] != '': param = dict_features[_type][feat]['parameters'] # Check assert fs parameter: if 'fs' in param: # Select which fs to use if fs is None: # Check if features dict has default sampling frequency value if type(param['fs']) is int or type(param['fs']) is float: parameters_total = [str(key) + '=' + str(value) for key, value in param.items()] else: raise Exception('No sampling frequency assigned.') else: parameters_total = [str(key) + '=' + str(value) for key, value in param.items() if key not in 'fs'] parameters_total += ['fs =' + str(fs)] # feature has no fs parameter else: parameters_total = [] for key, value in param.items(): if type(value) is str: value = '"'+value+'"' parameters_total.append([str(key) + '=' + str(value)]) else: parameters_total = '' # To handle object type signals signal_window = np.array(signal_window).astype(float) # Name of each column to be concatenate with feature name if not isinstance(signal_window, pd.DataFrame): signal_window = pd.DataFrame(data=signal_window) if names is not None: if len(names) != len(list(signal_window.columns.values)): raise Exception('header_names dimension does not match input columns.') else: header_names = names else: header_names = signal_window.columns.values for ax in range(len(header_names)): window = signal_window.iloc[:, ax] execf = func_total[0] + '(window' if parameters_total != '': execf += ', ' + str(parameters_total).translate(str.maketrans({'[': '', ']': '', "'": ''})) execf += ')' eval_result = eval(execf, locals()) # Function returns more than one element if type(eval_result) == tuple: if np.isnan(eval_result[0]): eval_result = np.zeros(len(eval_result)) for rr in range(len(eval_result)): feature_results += [eval_result[rr]] feature_names += [str(header_names[ax]) + '_' + func_names[0] + '_' + str(rr)] else: feature_results += [eval_result] feature_names += [str(header_names[ax]) + '_' + func_names[0]] features = pd.DataFrame(data=np.array(feature_results).reshape(1, len(feature_results)), columns=np.array(feature_names)) return features
26,629
def nowIso8601(): """ Returns time now in ISO 8601 format use now(timezone.utc) YYYY-MM-DDTHH:MM:SS.ffffff+HH:MM[:SS[.ffffff]] .strftime('%Y-%m-%dT%H:%M:%S.%f%z') '2020-08-22T17:50:09.988921+00:00' Assumes TZ aware For nanosecond use instead attotime or datatime64 in pandas or numpy """ return (nowUTC().isoformat(timespec='microseconds'))
26,630
def get_trip_data(tripdata_path, output_path, start=None, stop=None): """ Read raw tripdata csv and filter unnecessary info. 1 - Check if output path exists 2 - If output path does not exist 2.1 - Select columns ("pickup_datetime", "passenger_count", "pickup_longitude", "pickup_latitude", "dropoff_longitude", "dropoff_latitude") 2.2 - If start and stop are not None, get excerpt 3 - Save clean tripdata in a csv 3 - Return dataframe Arguments: tripdata_path {string} -- Raw trip data csv path output_path {string} -- Cleaned trip data csv path start {string} -- Datetime where tripdata should start (e.g., 2011-02-01 12:23:00) stop {string} -- Datetime where tripdata should end (e.g., 2011-02-01 14:00:00) Returns: Dataframe -- Cleaned tripdata dataframe """ print("files:", output_path, tripdata_path) # Trip data dataframe (Valentine's day) tripdata_dt_excerpt = None try: # Load tripdata tripdata_dt_excerpt = pd.read_csv( output_path, parse_dates=True, index_col="pickup_datetime") print("Loading file '{}'.".format(output_path)) except: # Columns used filtered_columns = ["pickup_datetime", "passenger_count", "pickup_longitude", "pickup_latitude", "dropoff_longitude", "dropoff_latitude"] # Reading file tripdata_dt = pd.read_csv(tripdata_path, parse_dates=True, index_col="pickup_datetime", usecols=filtered_columns, na_values='0') tripdata_dt_excerpt = None # Get excerpt if start and stop: tripdata_dt_excerpt = pd.DataFrame( tripdata_dt.loc[(tripdata_dt.index >= start) & (tripdata_dt.index <= stop)]) else: tripdata_dt_excerpt = pd.DataFrame(tripdata_dt) # Remove None values tripdata_dt_excerpt.dropna(inplace=True) # Sort tripdata_dt_excerpt.sort_index(inplace=True) # Save day data tripdata_dt_excerpt.to_csv(output_path) return tripdata_dt_excerpt
26,631
def _qparams2url(qparams): """ parse qparams to make url segment :param qparams: :return: parsed url segment """ try: if qparams == []: return "" assert len(qparams) == 4 num = len(qparams[0][1]) path="" for i in range(num): for j in range(4): path += str(qparams[j][0]) + '=' + str(qparams[j][1][i]) + "&" path = path[:-1] return path except: return urllib.parse.urlencode(qparams, doseq=True)
26,632
def confusion_matrix(y_pred: IntTensor, y_true: IntTensor, normalize: bool = True, labels: IntTensor = None, title: str = 'Confusion matrix', cmap: str = 'Blues', show: bool = True): """Plot confusion matrix Args: y_pred: Model prediction returned by `model.match()` y_true: Expected class_id. normalize: Normalizes matrix values between 0 and 1. Defaults to True. labels: List of class string label to display instead of the class numerical ids. Defaults to None. title: Title of the confusion matrix. Defaults to 'Confusion matrix'. cmap: Color schema as CMAP. Defaults to 'Blues'. show: If the plot is going to be shown or not. Defaults to True. """ with tf.device("/cpu:0"): # Ensure we are working with integer tensors. y_pred = tf.cast(tf.convert_to_tensor(y_pred), dtype='int32') y_true = tf.cast(tf.convert_to_tensor(y_true), dtype='int32') cm = tf.math.confusion_matrix(y_true, y_pred) cm = tf.cast(cm, dtype='float') accuracy = tf.linalg.trace(cm) / tf.math.reduce_sum(cm) misclass = 1 - accuracy if normalize: cm = tf.math.divide_no_nan( cm, tf.math.reduce_sum(cm, axis=1)[:, np.newaxis] ) plt.figure(figsize=(8, 6)) plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() if labels is not None: tick_marks = np.arange(len(labels)) plt.xticks(tick_marks, labels, rotation=45) plt.yticks(tick_marks, labels) cm_max = tf.math.reduce_max(cm) thresh = cm_max / 1.5 if normalize else cm_max / 2.0 for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): val = cm[i, j] color = "white" if val > thresh else "black" txt = "%.2f" % val if val > 0.0 else "0" plt.text(j, i, txt, horizontalalignment="center", color=color) plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label\naccuracy={:0.4f}; misclass={:0.4f}'.format( accuracy, misclass)) if show: plt.show() else: return plt
26,633
def from_smiles(smiles: str) -> Molecule: """Load a molecule from SMILES.""" return cdk.fromSMILES(smiles)
26,634
def main(log_dir='../../log/', x_axis='num episodes', y_axis=['average reward'], hue='algorithm', env_filter_func=None, alg_filter_func=None): """plot performance of all environments and algorithms 1. traverse all environments and plots for all algos tensorboard logs in that environment 2. generate a dataframe for each environment, which contains all algos tensorboard log information Args: log_dir (str, optional): Directory of tensorboard log files. Defaults to '../Algorithms/pytorch/log/'. x_axis (str, optional): X label of plot. Defaults to 'num episodes'. y_axis (list, optional): Y label of plot. Defaults to ['average reward']. hue (str, optional): [description]. Defaults to 'algorithm'. env_filter_func ([type], optional): Filter function to select enviroments. Defaults to None. alg_filter_func ([type], optional): Filter function to select algorithms. Defaults to None. """ plot_all_logs(log_dir=log_dir, x_axis=x_axis, y_axis=y_axis, hue=hue, smooth=11, env_filter_func=env_filter_func, alg_filter_func=alg_filter_func)
26,635
def parse_template_config(template_config_data: Dict[str, Any]) -> EmailTemplateConfig: """ >>> from tests import doctest_utils >>> convert_html_to_text = registration_settings.VERIFICATION_EMAIL_HTML_TO_TEXT_CONVERTER # noqa: E501 >>> parse_template_config({}) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ImproperlyConfigured >>> parse_template_config({ ... 'subject': 'blah', ... }) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ImproperlyConfigured >>> parse_template_config({ ... 'subject': 'blah', ... 'body': 'blah', ... }) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ImproperlyConfigured >>> doctest_utils.equals( ... parse_template_config({ ... 'subject': 'rest_registration/register/subject.txt', ... 'html_body': 'rest_registration/register/body.html', ... 'text_body': 'rest_registration/register/body.txt', ... }), ... EmailTemplateConfig( ... 'rest_registration/register/subject.txt', ... 'rest_registration/register/body.txt', ... 'rest_registration/register/body.html', ... identity)) OK >>> doctest_utils.equals( ... parse_template_config({ ... 'subject': 'rest_registration/register/subject.txt', ... 'html_body': 'rest_registration/register/body.html', ... }), ... EmailTemplateConfig( ... 'rest_registration/register/subject.txt', ... 'rest_registration/register/body.html', ... 'rest_registration/register/body.html', ... convert_html_to_text)) OK >>> doctest_utils.equals( ... parse_template_config({ ... 'subject': 'rest_registration/register/subject.txt', ... 'text_body': 'rest_registration/register/body.txt', ... }), ... EmailTemplateConfig( ... 'rest_registration/register/subject.txt', ... 'rest_registration/register/body.txt', None, ... identity)) OK >>> doctest_utils.equals( ... parse_template_config({ ... 'subject': 'rest_registration/register/subject.txt', ... 'body': 'rest_registration/register/body.txt', ... }), ... EmailTemplateConfig( ... 'rest_registration/register/subject.txt', ... 'rest_registration/register/body.txt', None, ... identity)) OK >>> doctest_utils.equals( ... parse_template_config({ ... 'subject': 'rest_registration/register/subject.txt', ... 'body': 'rest_registration/register/body.html', ... 'is_html': True, ... }), ... EmailTemplateConfig( ... 'rest_registration/register/subject.txt', ... 'rest_registration/register/body.html', ... 'rest_registration/register/body.html', ... convert_html_to_text)) OK """ try: subject_template_name = template_config_data['subject'] except KeyError: raise ImproperlyConfigured(_("No 'subject' key found")) from None body_template_name = template_config_data.get('body') text_body_template_name = template_config_data.get('text_body') html_body_template_name = template_config_data.get('html_body') is_html_body = template_config_data.get('is_html') convert_html_to_text = registration_settings.VERIFICATION_EMAIL_HTML_TO_TEXT_CONVERTER # noqa: E501 if html_body_template_name and text_body_template_name: config = EmailTemplateConfig( subject_template_name=subject_template_name, text_body_template_name=text_body_template_name, html_body_template_name=html_body_template_name, text_body_processor=identity, ) elif html_body_template_name: config = EmailTemplateConfig( subject_template_name=subject_template_name, text_body_template_name=html_body_template_name, html_body_template_name=html_body_template_name, text_body_processor=convert_html_to_text, ) elif text_body_template_name: config = EmailTemplateConfig( subject_template_name=subject_template_name, text_body_template_name=text_body_template_name, html_body_template_name=None, text_body_processor=identity, ) elif body_template_name: if is_html_body: config = EmailTemplateConfig( subject_template_name=subject_template_name, text_body_template_name=body_template_name, html_body_template_name=body_template_name, text_body_processor=convert_html_to_text, ) else: config = EmailTemplateConfig( subject_template_name=subject_template_name, text_body_template_name=body_template_name, html_body_template_name=None, text_body_processor=identity, ) else: raise ImproperlyConfigured( 'Could not parse template config data: {template_config_data}'.format( # noqa: E501 template_config_data=template_config_data)) _validate_template_name_existence(config.subject_template_name) _validate_template_name_existence(config.text_body_template_name) if config.html_body_template_name: _validate_template_name_existence(config.html_body_template_name) assert callable(config.text_body_processor) return config
26,636
def stringify_message(message): """Return a JSON message that is alphabetically sorted by the key name Args: message """ return json.dumps(message, sort_keys=True, separators=(',', ':'))
26,637
def compute_similarity_transform(X, Y, compute_optimal_scale=False): """ A port of MATLAB's `procrustes` function to Numpy. Adapted from http://stackoverflow.com/a/18927641/1884420 Args X: array NxM of targets, with N number of points and M point dimensionality Y: array NxM of inputs compute_optimal_scale: whether we compute optimal scale or force it to be 1 Returns: d: squared error after transformation Z: transformed Y T: computed rotation b: scaling c: translation """ import numpy as np muX = X.mean(0) muY = Y.mean(0) X0 = X - muX Y0 = Y - muY ssX = (X0**2.).sum() ssY = (Y0**2.).sum() # centred Frobenius norm normX = np.sqrt(ssX) normY = np.sqrt(ssY) # scale to equal (unit) norm X0 = X0 / normX Y0 = Y0 / normY # optimum rotation matrix of Y A = np.dot(X0.T, Y0) U,s,Vt = np.linalg.svd(A,full_matrices=False) V = Vt.T T = np.dot(V, U.T) # Make sure we have a rotation detT = np.linalg.det(T) V[:,-1] *= np.sign( detT ) s[-1] *= np.sign( detT ) T = np.dot(V, U.T) traceTA = s.sum() if compute_optimal_scale: # Compute optimum scaling of Y. b = traceTA * normX / normY d = 1 - traceTA**2 Z = normX*traceTA*np.dot(Y0, T) + muX else: # If no scaling allowed b = 1 d = 1 + ssY/ssX - 2 * traceTA * normY / normX Z = normY*np.dot(Y0, T) + muX c = muX - b*np.dot(muY, T) return d, Z, T, b, c
26,638
def simplefenestration(idf, fsd, deletebsd=True, setto000=False): """convert a bsd (fenestrationsurface:detailed) into a simple fenestrations""" funcs = (window, door, glazeddoor,) for func in funcs: fenestration = func(idf, fsd, deletebsd=deletebsd, setto000=setto000) if fenestration: return fenestration return None
26,639
def observe_birds(observations_file: TextIO) -> Set[str]: """Return a set of the bird species listed in observations_file, which has one bird species per line. >>> file = StringIO("bird 1\\nbird 2\\nbird 1\\n") >>> birds = observe_birds(file) >>> 'bird 1' in birds True >>> 'bird 2' in birds True >>> len(birds) == 2 True """ birds_observed=set() for line in observations_file: bird = line.strip() birds_observed.add(bird) return birds_observed
26,640
def convert_sheet(sheet, result_dict, is_enum_mode=False): """ 转换单个sheet的数据 Args: sheet: openpyxl.worksheet.worksheet.Worksheet result_dict: [dict]结果都存在这里, key为data_name,value为sheet_result is_enum_mode: [bool]是否为enum导表模式 Returns: bool, 是否成功 """ if is_enum_mode: data_name = convert.excel_handler.get_enum_class_name(sheet) else: data_name = convert.excel_handler.get_data_name(sheet) sheet_name = convert.excel_handler.get_sheet_name(sheet) if not data_name: ec_converter.logger.info('sheet \'%s\' 的data名字为空或不符合命名规则,不导表', sheet_name) return True if data_name in result_dict: ec_converter.logger.error('data名字 \'%s\' 重复, sheet name = \'%s\'', data_name, sheet_name) return False name_schema_dict = {} col_schema_dict = {} if not _get_sheet_schema_meta_info(sheet, name_schema_dict, col_schema_dict): ec_converter.logger.error('sheet \'%s\' 获取字段信息失败', sheet_name) return False sheet_result = result_dict.setdefault(data_name, convert.sheet_result.SheetResult(data_name)) sheet_result.name_schema_dict = name_schema_dict sheet_result.col_schema_dict = col_schema_dict for row_data in convert.excel_handler.get_row_generator(sheet, settings.ROW_OFFSET): if not _convert_row(row_data, sheet_name, sheet_result): return False return True
26,641
def _nsimage_from_file(filename, dimensions=None, template=None): """Take a path to an image file and return an NSImage object.""" try: _log('attempting to open image at {0}'.format(filename)) with open(filename): pass except IOError: # literal file path didn't work -- try to locate image based on main script path try: from __main__ import __file__ as main_script_path main_script_path = os.path.dirname(main_script_path) filename = os.path.join(main_script_path, filename) except ImportError: pass _log('attempting (again) to open image at {0}'.format(filename)) with open(filename): # file doesn't exist pass # otherwise silently errors in NSImage which isn't helpful for debugging image = NSImage.alloc().initByReferencingFile_(filename) image.setScalesWhenResized_(True) image.setSize_((20, 20) if dimensions is None else dimensions) if not template is None: image.setTemplate_(template) return image
26,642
def bytes_to_int(b: bytes, order: str = 'big') -> int: """Convert bytes 'b' to an int.""" return int.from_bytes(b, order)
26,643
def test_mock_pp(): """test with mocked populator.""" with mock.patch('melissa.profile_populator.profile_populator'): with pytest.raises(IOError): from start import main
26,644
def test_consumer_offset_resume(sdc_builder, sdc_executor, database): """ Ensure that the Query consumer can resume where it ended and stop the pipeline when it reads all the data. We use a pipeline: SAP HANA Query Consumer >> wiretap SAP HANA Query Consumer >> Finisher The test run the pipeline three times. Each time, before it runs the pipeline it inserts one row to the database and the test ensures the pipeline outputs the new record inserted to test the offset works fine after resuming the pipeline. """ metadata = sqlalchemy.MetaData() table_name = get_random_string(string.ascii_lowercase, 20) table = sqlalchemy.Table( table_name, metadata, sqlalchemy.Column('ID', sqlalchemy.Integer, primary_key=True), sqlalchemy.Column('NAME', sqlalchemy.String(32)) ) pipeline_builder = sdc_builder.get_pipeline_builder() origin = pipeline_builder.add_stage('SAP HANA Query Consumer') origin.incremental_mode = True origin.sql_query = 'SELECT * FROM {0} WHERE '.format(table_name) + 'id > ${OFFSET} ORDER BY id' origin.initial_offset = '0' origin.offset_column = 'ID' wiretap = pipeline_builder.add_wiretap() finisher = pipeline_builder.add_stage("Pipeline Finisher Executor") origin >> wiretap.destination origin >= finisher pipeline = pipeline_builder.build().configure_for_environment(database) sdc_executor.add_pipeline(pipeline) try: logger.info('Creating table %s in %s database ...', table_name, database.type) table.create(database.engine) connection = database.engine.connect() for i in range(len(ROWS_IN_DATABASE)): wiretap.reset() # Insert one row to the database connection.execute(table.insert(), [ROWS_IN_DATABASE[i]]) sdc_executor.start_pipeline(pipeline).wait_for_finished() assert len(wiretap.output_records) == 1 assert wiretap.output_records[0].get_field_data('/ID') == i + 1 sdc_executor.get_pipeline_status(pipeline).wait_for_status('FINISHED') finally: logger.info('Dropping table %s in %s database...', table_name, database.type) table.drop(database.engine)
26,645
def fit_model(params,param_names,lam_gal,galaxy,noise,gal_temp, feii_tab,feii_options, temp_list,temp_fft,npad,line_profile,fwhm_gal,velscale,npix,vsyst,run_dir, fit_type,output_model): """ Constructs galaxy model by convolving templates with a LOSVD given by a specified set of velocity parameters. Parameters: pars: parameters of Markov-chain lam_gal: wavelength vector used for continuum model temp_fft: the Fourier-transformed templates npad: velscale: the velocity scale in km/s/pixel npix: number of output pixels; must be same as galaxy vsyst: dv; the systematic velocity fr """ # Construct dictionary of parameter names and their respective parameter values # param_names = [param_dict[key]['name'] for key in param_dict ] # params = [param_dict[key]['init'] for key in param_dict ] keys = param_names values = params p = dict(zip(keys, values)) c = 299792.458 # speed of light host_model = np.copy(galaxy) comp_dict = {} # Perform linear interpolation on the fwhm_gal array as a function of wavelength # We will use this to determine the fwhm resolution as a fucntion of wavelenth for each # emission line so we can correct for the resolution at every iteration. fwhm_gal_ftn = interp1d(lam_gal,fwhm_gal,kind='linear',bounds_error=False,fill_value=(0,0)) # Re-directed line_profile function def line_model(line_profile,*args): """ This function maps the user-chosen line profile to the correct line_model """ if (line_profile=='Gaussian'): line = gaussian(*args) return line elif (line_profile=='Lorentzian'): line = lorentzian(*args) return line ############################# Power-law Component ###################################################### # if all(comp in param_names for comp in ['power_amp','power_slope','power_break'])==True: if all(comp in param_names for comp in ['power_amp','power_slope'])==True: # Create a template model for the power-law continuum # power = simple_power_law(lam_gal,p['power_amp'],p['power_slope'],p['power_break']) # power = simple_power_law(lam_gal,p['power_amp'],p['power_slope']) # host_model = (host_model) - (power) # Subtract off continuum from galaxy, since we only want template weights to be fit comp_dict['power'] = {'comp':power,'pcolor':'xkcd:orange red','linewidth':1.0} ######################################################################################################## ############################# Fe II Component ########################################################## if (feii_tab is not None): if (feii_options['template']['type']=='VC04'): # Unpack feii_tab na_feii_tab = (feii_tab[0],feii_tab[1]) br_feii_tab = (feii_tab[2],feii_tab[3]) # Parse FeII options if (feii_options['amp_const']['bool']==False): # if amp not constant na_feii_amp = p['na_feii_amp'] br_feii_amp = p['br_feii_amp'] elif (feii_options['amp_const']['bool']==True): # if amp constant na_feii_amp = feii_options['amp_const']['na_feii_val'] br_feii_amp = feii_options['amp_const']['br_feii_val'] if (feii_options['fwhm_const']['bool']==False): # if amp not constant na_feii_fwhm = p['na_feii_fwhm'] br_feii_fwhm = p['br_feii_fwhm'] elif (feii_options['fwhm_const']['bool']==True): # if amp constant na_feii_fwhm = feii_options['fwhm_const']['na_feii_val'] br_feii_fwhm = feii_options['fwhm_const']['br_feii_val'] if (feii_options['voff_const']['bool']==False): # if amp not constant na_feii_voff = p['na_feii_voff'] br_feii_voff = p['br_feii_voff'] elif (feii_options['voff_const']['bool']==True): # if amp constant na_feii_voff = feii_options['voff_const']['na_feii_val'] br_feii_voff = feii_options['voff_const']['br_feii_val'] na_feii_template = VC04_feii_template(lam_gal,fwhm_gal,na_feii_tab,na_feii_amp,na_feii_fwhm,na_feii_voff,velscale,run_dir) br_feii_template = VC04_feii_template(lam_gal,fwhm_gal,br_feii_tab,br_feii_amp,br_feii_fwhm,br_feii_voff,velscale,run_dir) host_model = (host_model) - (na_feii_template) - (br_feii_template) comp_dict['na_feii_template'] = {'comp':na_feii_template,'pcolor':'xkcd:yellow','linewidth':1.0} comp_dict['br_feii_template'] = {'comp':br_feii_template,'pcolor':'xkcd:orange','linewidth':1.0} elif (feii_options['template']['type']=='K10'): # Unpack tables for each template f_trans_tab = (feii_tab[0],feii_tab[1],feii_tab[2]) s_trans_tab = (feii_tab[3],feii_tab[4],feii_tab[5]) g_trans_tab = (feii_tab[6],feii_tab[7],feii_tab[8]) z_trans_tab = (feii_tab[9],feii_tab[10]) # Parse FeII options if (feii_options['amp_const']['bool']==False): # if amp not constant f_feii_amp = p['feii_f_amp'] s_feii_amp = p['feii_s_amp'] g_feii_amp = p['feii_g_amp'] z_feii_amp = p['feii_z_amp'] elif (feii_options['amp_const']['bool']==True): # if amp constant f_feii_amp = feii_options['amp_const']['f_feii_val'] s_feii_amp = feii_options['amp_const']['s_feii_val'] g_feii_amp = feii_options['amp_const']['g_feii_val'] z_feii_amp = feii_options['amp_const']['z_feii_val'] # if (feii_options['fwhm_const']['bool']==False): # if fwhm not constant feii_fwhm = p['feii_fwhm'] elif (feii_options['fwhm_const']['bool']==True): # if fwhm constant feii_fwhm = feii_options['fwhm_const']['val'] # if (feii_options['voff_const']['bool']==False): # if voff not constant feii_voff = p['feii_voff'] elif (feii_options['voff_const']['bool']==True): # if voff constant feii_voff = feii_options['voff_const']['val'] # if (feii_options['temp_const']['bool']==False): # if temp not constant feii_temp = p['feii_temp'] elif (feii_options['temp_const']['bool']==True): # if temp constant feii_temp = feii_options['temp_const']['val'] f_trans_feii_template = K10_feii_template(lam_gal,'F',fwhm_gal,f_trans_tab,f_feii_amp,feii_temp,feii_fwhm,feii_voff,velscale,run_dir) s_trans_feii_template = K10_feii_template(lam_gal,'S',fwhm_gal,s_trans_tab,s_feii_amp,feii_temp,feii_fwhm,feii_voff,velscale,run_dir) g_trans_feii_template = K10_feii_template(lam_gal,'G',fwhm_gal,g_trans_tab,g_feii_amp,feii_temp,feii_fwhm,feii_voff,velscale,run_dir) z_trans_feii_template = K10_feii_template(lam_gal,'IZw1',fwhm_gal,z_trans_tab,z_feii_amp,feii_temp,feii_fwhm,feii_voff,velscale,run_dir) host_model = (host_model) - (f_trans_feii_template) - (s_trans_feii_template) - (g_trans_feii_template) - (z_trans_feii_template) comp_dict['F_feii_template'] = {'comp':f_trans_feii_template,'pcolor':'xkcd:rust orange','linewidth':1.0} comp_dict['S_feii_template'] = {'comp':s_trans_feii_template,'pcolor':'xkcd:rust orange','linewidth':1.0} comp_dict['G_feii_template'] = {'comp':g_trans_feii_template,'pcolor':'xkcd:rust orange','linewidth':1.0} comp_dict['Z_feii_template'] = {'comp':z_trans_feii_template,'pcolor':'xkcd:rust orange','linewidth':1.0} ######################################################################################################## ############################# Emission Line Components ################################################# # Narrow lines #### [OII]3727,3729 ################################################################################# if all(comp in param_names for comp in ['na_oii3727_core_amp','na_oii3727_core_fwhm','na_oii3727_core_voff','na_oii3729_core_amp'])==True: # Narrow [OII]3727 na_oii3727_core_center = 3727.092 # Angstroms na_oii3727_core_amp = p['na_oii3727_core_amp'] # flux units na_oii3727_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_oii3727_core_center,p['na_oii3727_core_voff']) na_oii3727_core_fwhm = np.sqrt(p['na_oii3727_core_fwhm']**2+(na_oii3727_core_fwhm_res)**2) # km/s na_oii3727_core_voff = p['na_oii3727_core_voff'] # km/s na_oii3727_core = gaussian(lam_gal,na_oii3727_core_center,na_oii3727_core_amp,na_oii3727_core_fwhm,na_oii3727_core_voff,velscale) host_model = host_model - na_oii3727_core comp_dict['na_oii3727_core'] = {'comp':na_oii3727_core,'pcolor':'xkcd:dodger blue','linewidth':1.0} # Narrow [OII]3729 na_oii3729_core_center = 3729.875 # Angstroms na_oii3729_core_amp = p['na_oii3729_core_amp'] # flux units na_oii3729_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_oii3729_core_center,na_oii3727_core_voff) na_oii3729_core_fwhm = np.sqrt(p['na_oii3727_core_fwhm']**2+(na_oii3729_core_fwhm_res)**2) # km/s # km/s na_oii3729_core_voff = na_oii3727_core_voff # km/s na_oii3729_core = gaussian(lam_gal,na_oii3729_core_center,na_oii3729_core_amp,na_oii3729_core_fwhm,na_oii3729_core_voff,velscale) host_model = host_model - na_oii3729_core comp_dict['na_oii3729_core'] = {'comp':na_oii3729_core,'pcolor':'xkcd:dodger blue','linewidth':1.0} # If tie_narrow=True, and includes [OIII]5007 elif (all(comp in param_names for comp in ['na_oii3727_core_amp','na_oii3727_core_voff','na_oii3729_core_amp','na_oiii5007_core_fwhm'])==True) & \ (all(comp not in param_names for comp in ['na_neiii_core_fwhm','na_Hg_fwhm','oiii4363_core_fwhm'])==True): # Narrow [OII]3727 na_oii3727_core_center = 3727.092 # Angstroms na_oii3727_core_amp = p['na_oii3727_core_amp'] # flux units na_oii3727_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_oii3727_core_center,p['na_oii3727_core_voff']) na_oii3727_core_fwhm = np.sqrt(p['na_oiii5007_core_fwhm']**2+(na_oii3727_core_fwhm_res)**2) # km/s na_oii3727_core_voff = p['na_oii3727_core_voff'] # km/s na_oii3727_core = gaussian(lam_gal,na_oii3727_core_center,na_oii3727_core_amp,na_oii3727_core_fwhm,na_oii3727_core_voff,velscale) host_model = host_model - na_oii3727_core comp_dict['na_oii3727_core'] = {'comp':na_oii3727_core,'pcolor':'xkcd:dodger blue','linewidth':1.0} # Narrow [OII]3729 na_oii3729_core_center = 3729.875 # Angstroms na_oii3729_core_amp = p['na_oii3729_core_amp'] # flux units na_oii3729_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_oii3729_core_center,na_oii3727_core_voff) na_oii3729_core_fwhm = np.sqrt(p['na_oiii5007_core_fwhm']**2+(na_oii3729_core_fwhm_res)**2) # km/s na_oii3729_core_voff = na_oii3727_core_voff # km/s na_oii3729_core = gaussian(lam_gal,na_oii3729_core_center,na_oii3729_core_amp,na_oii3729_core_fwhm,na_oii3729_core_voff,velscale) host_model = host_model - na_oii3729_core comp_dict['na_oii3729_core'] = {'comp':na_oii3729_core,'pcolor':'xkcd:dodger blue','linewidth':1.0} # If tie_narrow=True, but doesn't include [OIII]5007 elif (all(comp in param_names for comp in ['na_oii3727_core_amp','na_oii3727_core_voff','na_oii3729_core_amp','na_Hg_fwhm'])==True) & \ (all(comp not in param_names for comp in ['na_neiii_core_fwhm','oiii4363_core_fwhm','na_oiii5007_core_fwhm'])==True): # Narrow [OII]3727 na_oii3727_core_center = 3727.092 # Angstroms na_oii3727_core_amp = p['na_oii3727_core_amp'] # flux units na_oii3727_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_oii3727_core_center,p['na_oii3727_core_voff']) na_oii3727_core_fwhm = np.sqrt(p['na_Hg_fwhm']**2+(na_oii3727_core_fwhm_res)**2) # km/s na_oii3727_core_voff = p['na_oii3727_core_voff'] # km/s na_oii3727_core = gaussian(lam_gal,na_oii3727_core_center,na_oii3727_core_amp,na_oii3727_core_fwhm,na_oii3727_core_voff,velscale) host_model = host_model - na_oii3727_core comp_dict['na_oii3727_core'] = {'comp':na_oii3727_core,'pcolor':'xkcd:dodger blue','linewidth':1.0} # Narrow [OII]3729 na_oii3729_core_center = 3729.875 # Angstroms na_oii3729_core_amp = p['na_oii3729_core_amp'] # flux units na_oii3729_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_oii3729_core_center,na_oii3727_core_voff) na_oii3729_core_fwhm = np.sqrt(p['na_Hg_fwhm']**2+(na_oii3729_core_fwhm_res)**2) # km/s na_oii3729_core_voff = na_oii3727_core_voff # km/s na_oii3729_core = gaussian(lam_gal,na_oii3729_core_center,na_oii3729_core_amp,na_oii3729_core_fwhm,na_oii3729_core_voff,velscale) host_model = host_model - na_oii3729_core comp_dict['na_oii3729_core'] = {'comp':na_oii3729_core,'pcolor':'xkcd:dodger blue','linewidth':1.0} #### [NeIII]3870 ################################################################################# if all(comp in param_names for comp in ['na_neiii_core_amp','na_neiii_core_fwhm','na_neiii_core_voff'])==True: # Narrow H-gamma na_neiii_core_center = 3869.810 # Angstroms na_neiii_core_amp = p['na_neiii_core_amp'] # flux units na_neiii_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_neiii_core_center,p['na_neiii_core_voff']) na_neiii_core_fwhm = np.sqrt(p['na_neiii_core_fwhm']**2+(na_neiii_core_fwhm_res)**2) # km/s na_neiii_core_voff = p['na_neiii_core_voff'] # km/s na_neiii_core = gaussian(lam_gal,na_neiii_core_center,na_neiii_core_amp,na_neiii_core_fwhm,na_neiii_core_voff,velscale) host_model = host_model - na_neiii_core comp_dict['na_neiii_core'] = {'comp':na_neiii_core,'pcolor':'xkcd:dodger blue','linewidth':1.0} # If tie_narrow=True, and includes [OIII]5007 elif (all(comp in param_names for comp in ['na_neiii_core_amp','na_neiii_core_voff','na_oiii5007_core_fwhm'])==True) & \ (all(comp not in param_names for comp in ['na_neiii_core_fwhm','na_Hg_fwhm','oiii4363_core_fwhm'])==True): # Narrow H-gamma na_neiii_core_center = 3869.810 # Angstroms na_neiii_core_amp = p['na_neiii_core_amp'] # flux units na_neiii_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_neiii_core_center,p['na_neiii_core_voff']) na_neiii_core_fwhm = np.sqrt(p['na_oiii5007_core_fwhm']**2+(na_neiii_core_fwhm_res)**2) # km/s na_neiii_core_voff = p['na_neiii_core_voff'] # km/s na_neiii_core = gaussian(lam_gal,na_neiii_core_center,na_neiii_core_amp,na_neiii_core_fwhm,na_neiii_core_voff,velscale) host_model = host_model - na_neiii_core comp_dict['na_neiii_core'] = {'comp':na_neiii_core,'pcolor':'xkcd:dodger blue','linewidth':1.0} # If tie_narrow=True, but doesn't include [OIII]5007 elif (all(comp in param_names for comp in ['na_neiii_core_amp','na_neiii_core_voff','na_Hg_fwhm'])==True) & \ (all(comp not in param_names for comp in ['na_neiii_core_fwhm','oiii4363_core_fwhm','na_oiii5007_core_fwhm'])==True): # Narrow H-gamma na_neiii_core_center = 3869.810 # Angstroms na_neiii_core_amp = p['na_neiii_core_amp'] # flux units na_neiii_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_neiii_core_center,p['na_neiii_core_voff']) na_neiii_core_fwhm = np.sqrt(p['na_Hg_fwhm']**2+(na_neiii_core_fwhm_res)**2) # km/s na_neiii_core_voff = p['na_neiii_core_voff'] # km/s na_neiii_core = gaussian(lam_gal,na_neiii_core_center,na_neiii_core_amp,na_neiii_core_fwhm,na_neiii_core_voff,velscale) host_model = host_model - na_neiii_core comp_dict['na_neiii_core'] = {'comp':na_neiii_core,'pcolor':'xkcd:dodger blue','linewidth':1.0} #### H-delta ##################################################################################### if all(comp in param_names for comp in ['na_Hd_amp','na_Hd_fwhm','na_Hd_voff'])==True: # Narrow H-gamma na_hd_core_center = 4102.890 # Angstroms na_hd_core_amp = p['na_Hd_amp'] # flux units na_hd_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_hd_core_center,p['na_Hd_voff']) na_hd_core_fwhm = np.sqrt(p['na_Hd_fwhm']**2+(na_hd_core_fwhm_res)**2) # km/s na_hd_core_voff = p['na_Hd_voff'] # km/s na_Hd_core = gaussian(lam_gal,na_hd_core_center,na_hd_core_amp,na_hd_core_fwhm,na_hd_core_voff,velscale) host_model = host_model - na_Hd_core comp_dict['na_Hd_core'] = {'comp':na_Hd_core,'pcolor':'xkcd:dodger blue','linewidth':1.0} # If tie_narrow=True, and includes [OIII]5007 elif (all(comp in param_names for comp in ['na_Hd_amp','na_Hd_voff','na_oiii5007_core_fwhm'])==True) & \ (all(comp not in param_names for comp in ['na_Hd_fwhm','na_Hg_fwhm','oiii4363_core_fwhm'])==True): # Narrow H-gamma na_hd_core_center = 4102.890 # Angstroms na_hd_core_amp = p['na_Hd_amp'] # flux units na_hd_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_hd_core_center,p['na_Hd_voff']) na_hd_core_fwhm = np.sqrt(p['na_oiii5007_core_fwhm']**2+(na_hd_core_fwhm_res)**2) # km/s na_hd_core_voff = p['na_Hd_voff'] # km/s na_Hd_core = gaussian(lam_gal,na_hd_core_center,na_hd_core_amp,na_hd_core_fwhm,na_hd_core_voff,velscale) host_model = host_model - na_Hd_core comp_dict['na_Hd_core'] = {'comp':na_Hd_core,'pcolor':'xkcd:dodger blue','linewidth':1.0} # If tie_narrow=True, but doesn't include [OIII]5007 elif (all(comp in param_names for comp in ['na_Hd_amp','na_Hd_voff','na_Hg_fwhm'])==True) & \ (all(comp not in param_names for comp in ['na_Hg_fwhm','oiii4363_core_fwhm','na_oiii5007_core_fwhm'])==True): # Narrow H-gamma na_hd_core_center = 4102.890 # Angstroms na_hd_core_amp = p['na_Hd_amp'] # flux units na_hd_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_hd_core_center,p['na_Hd_voff']) na_hd_core_fwhm = np.sqrt(p['na_Hg_fwhm']**2+(na_hd_core_fwhm_res)**2) # km/s na_hd_core_voff = p['na_Hd_voff'] # km/s na_Hd_core = gaussian(lam_gal,na_hd_core_center,na_hd_core_amp,na_hd_core_fwhm,na_hd_core_voff,velscale) host_model = host_model - na_Hd_core comp_dict['na_Hd_core'] = {'comp':na_Hd_core,'pcolor':'xkcd:dodger blue','linewidth':1.0} #### H-gamma/[OIII]4363 ########################################################################## if all(comp in param_names for comp in ['na_Hg_amp','na_Hg_fwhm','na_Hg_voff','na_oiii4363_core_amp','na_oiii4363_core_fwhm','na_oiii4363_core_voff'])==True: # Narrow H-gamma na_hg_core_center = 4341.680 # Angstroms na_hg_core_amp = p['na_Hg_amp'] # flux units na_hg_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_hg_core_center,p['na_Hg_voff']) na_hg_core_fwhm = np.sqrt(p['na_Hg_fwhm']**2+(na_hg_core_fwhm_res)**2) # km/s na_hg_core_voff = p['na_Hg_voff'] # km/s na_Hg_core = gaussian(lam_gal,na_hg_core_center,na_hg_core_amp,na_hg_core_fwhm,na_hg_core_voff,velscale) host_model = host_model - na_Hg_core comp_dict['na_Hg_core'] = {'comp':na_Hg_core,'pcolor':'xkcd:dodger blue','linewidth':1.0} # Narrow [OIII]4363 core na_oiii4363_core_center = 4364.436 # Angstroms na_oiii4363_core_amp = p['na_oiii4363_core_amp'] # flux units na_oiii4363_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_oiii4363_core_center,p['na_oiii4363_core_voff']) na_oiii4363_core_fwhm = np.sqrt(p['na_oiii4363_core_fwhm']**2+(na_oiii4363_core_fwhm_res)**2) # km/s na_oiii4363_core_voff = p['na_oiii4363_core_voff'] # km/s na_oiii4363_core = gaussian(lam_gal,na_oiii4363_core_center,na_oiii4363_core_amp,na_oiii4363_core_fwhm,na_oiii4363_core_voff,velscale) host_model = host_model - na_oiii4363_core comp_dict['na_oiii4363_core'] = {'comp':na_oiii4363_core,'pcolor':'xkcd:dodger blue','linewidth':1.0} # If tie_narrow=True, and includes [OIII]5007 elif (all(comp in param_names for comp in ['na_Hg_amp','na_Hg_voff','na_oiii4363_core_amp','na_oiii4363_core_voff','na_oiii5007_core_fwhm'])==True) & \ (all(comp not in param_names for comp in ['na_Hg_fwhm','oiii4363_core_fwhm'])==True): # Narrow H-gamma na_hg_core_center = 4341.680 # Angstroms na_hg_core_amp = p['na_Hg_amp'] # flux units na_hg_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_hg_core_center,p['na_Hg_voff']) na_hg_core_fwhm = np.sqrt(p['na_oiii5007_core_fwhm']**2+(na_hg_core_fwhm_res)**2) # km/s na_hg_core_voff = p['na_Hg_voff'] # km/s na_Hg_core = gaussian(lam_gal,na_hg_core_center,na_hg_core_amp,na_hg_core_fwhm,na_hg_core_voff,velscale) host_model = host_model - na_Hg_core comp_dict['na_Hg_core'] = {'comp':na_Hg_core,'pcolor':'xkcd:dodger blue','linewidth':1.0} # Narrow [OIII]4363 core na_oiii4363_core_center = 4364.436 # Angstroms na_oiii4363_core_amp = p['na_oiii4363_core_amp'] # flux units na_oiii4363_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_oiii4363_core_center,p['na_oiii4363_core_voff']) na_oiii4363_core_fwhm = np.sqrt(p['na_oiii5007_core_fwhm']**2+(na_oiii4363_core_fwhm_res)**2) # km/s na_oiii4363_core_voff = p['na_oiii4363_core_voff'] # km/s na_oiii4363_core = gaussian(lam_gal,na_oiii4363_core_center,na_oiii4363_core_amp,na_oiii4363_core_fwhm,na_oiii4363_core_voff,velscale) host_model = host_model - na_oiii4363_core comp_dict['na_oiii4363_core'] = {'comp':na_oiii4363_core,'pcolor':'xkcd:dodger blue','linewidth':1.0} # If tie_narrow=True, but doesn't include [OIII]5007 elif (all(comp in param_names for comp in ['na_Hg_amp','na_Hg_fwhm','na_Hg_voff','na_oiii4363_core_amp','na_oiii4363_core_voff'])==True) & \ (all(comp not in param_names for comp in ['oiii4363_core_fwhm','na_oiii5007_core_fwhm'])==True): # Narrow H-gamma na_hg_core_center = 4341.680 # Angstroms na_hg_core_amp = p['na_Hg_amp'] # flux units na_hg_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_hg_core_center,p['na_Hg_voff']) na_hg_core_fwhm = np.sqrt(p['na_Hg_fwhm']**2+(na_hg_core_fwhm_res)**2) # km/s na_hg_core_voff = p['na_Hg_voff'] # km/s na_Hg_core = gaussian(lam_gal,na_hg_core_center,na_hg_core_amp,na_hg_core_fwhm,na_hg_core_voff,velscale) host_model = host_model - na_Hg_core comp_dict['na_Hg_core'] = {'comp':na_Hg_core,'pcolor':'xkcd:dodger blue','linewidth':1.0} # Narrow [OIII]4363 core na_oiii4363_core_center = 4364.436 # Angstroms na_oiii4363_core_amp = p['na_oiii4363_core_amp'] # flux units na_oiii4363_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_oiii4363_core_center,p['na_oiii4363_core_voff']) na_oiii4363_core_fwhm = np.sqrt(p['na_Hg_fwhm']**2+(na_oiii4363_core_fwhm_res)**2) # km/s na_oiii4363_core_voff = p['na_oiii4363_core_voff'] # km/s na_oiii4363_core = gaussian(lam_gal,na_oiii4363_core_center,na_oiii4363_core_amp,na_oiii4363_core_fwhm,na_oiii4363_core_voff,velscale) host_model = host_model - na_oiii4363_core comp_dict['na_oiii4363_core'] = {'comp':na_oiii4363_core,'pcolor':'xkcd:dodger blue','linewidth':1.0} #### H-beta/[OIII] ######################################################################################### if all(comp in param_names for comp in ['na_oiii5007_core_amp','na_oiii5007_core_fwhm','na_oiii5007_core_voff'])==True: # Narrow [OIII]5007 Core na_oiii5007_core_center = 5008.240 # Angstroms na_oiii5007_core_amp = p['na_oiii5007_core_amp'] # flux units na_oiii5007_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_oiii5007_core_center,p['na_oiii5007_core_voff']) na_oiii5007_core_fwhm = np.sqrt(p['na_oiii5007_core_fwhm']**2+(na_oiii5007_core_fwhm_res)**2) # km/s na_oiii5007_core_voff = p['na_oiii5007_core_voff'] # km/s na_oiii5007_core = gaussian(lam_gal,na_oiii5007_core_center,na_oiii5007_core_amp,na_oiii5007_core_fwhm,na_oiii5007_core_voff,velscale) # na_oiii5007_core = line_model(line_profile,lam_gal,na_oiii5007_core_center,na_oiii5007_core_amp,na_oiii5007_core_fwhm,na_oiii5007_core_voff,velscale) host_model = host_model - na_oiii5007_core comp_dict['na_oiii5007_core'] = {'comp':na_oiii5007_core,'pcolor':'xkcd:dodger blue','linewidth':1.0} # Narrow [OIII]4959 Core na_oiii4959_core_center = 4960.295 # Angstroms na_oiii4959_core_amp = (1.0/3.0)*na_oiii5007_core_amp # flux units na_oiii4959_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_oiii4959_core_center,na_oiii5007_core_voff) na_oiii4959_core_fwhm = np.sqrt(p['na_oiii5007_core_fwhm']**2+(na_oiii4959_fwhm_res)**2) # km/s na_oiii4959_core_voff = na_oiii5007_core_voff # km/s na_oiii4959_core = gaussian(lam_gal,na_oiii4959_core_center,na_oiii4959_core_amp,na_oiii4959_core_fwhm,na_oiii4959_core_voff,velscale) # na_oiii4959_core = line_model(line_profile,lam_gal,na_oiii4959_core_center,na_oiii4959_core_amp,na_oiii4959_core_fwhm,na_oiii4959_core_voff,velscale) host_model = host_model - na_oiii4959_core comp_dict['na_oiii4959_core'] = {'comp':na_oiii4959_core,'pcolor':'xkcd:dodger blue','linewidth':1.0} if all(comp in param_names for comp in ['na_Hb_core_amp','na_Hb_core_voff'])==True: # Narrow H-beta na_hb_core_center = 4862.680 # Angstroms na_hb_core_amp = p['na_Hb_core_amp'] # flux units na_hb_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_hb_core_center,p['na_Hb_core_voff']) na_hb_core_fwhm = np.sqrt(p['na_oiii5007_core_fwhm']**2+(na_hb_core_fwhm_res)**2) # km/s na_hb_core_voff = p['na_Hb_core_voff'] # km/s na_Hb_core = gaussian(lam_gal,na_hb_core_center,na_hb_core_amp,na_hb_core_fwhm,na_hb_core_voff,velscale) # na_Hb_core = line_model(line_profile,lam_gal,na_hb_core_center,na_hb_core_amp,na_hb_core_fwhm,na_hb_core_voff,velscale) host_model = host_model - na_Hb_core comp_dict['na_Hb_core'] = {'comp':na_Hb_core,'pcolor':'xkcd:dodger blue','linewidth':1.0} #### H-alpha/[NII]/[SII] #################################################################################### if all(comp in param_names for comp in ['na_Ha_core_amp','na_Ha_core_fwhm','na_Ha_core_voff', 'na_nii6585_core_amp', 'na_sii6732_core_amp','na_sii6718_core_amp'])==True: # Narrow H-alpha na_ha_core_center = 6564.610 # Angstroms na_ha_core_amp = p['na_Ha_core_amp'] # flux units na_ha_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_ha_core_center,p['na_Ha_core_voff']) na_ha_core_fwhm = np.sqrt(p['na_Ha_core_fwhm']**2+(na_ha_core_fwhm_res)**2) # km/s na_ha_core_voff = p['na_Ha_core_voff'] # km/s na_Ha_core = gaussian(lam_gal,na_ha_core_center,na_ha_core_amp,na_ha_core_fwhm,na_ha_core_voff,velscale) host_model = host_model - na_Ha_core comp_dict['na_Ha_core'] = {'comp':na_Ha_core,'pcolor':'xkcd:dodger blue','linewidth':1.0} # Narrow [NII]6585 Core na_nii6585_core_center = 6585.270 # Angstroms na_nii6585_core_amp = p['na_nii6585_core_amp'] # flux units na_nii6585_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_nii6585_core_center,na_ha_core_voff) na_nii6585_core_fwhm = np.sqrt(p['na_Ha_core_fwhm']**2+(na_nii6585_core_fwhm_res)**2) # km/s na_nii6585_core_voff = na_ha_core_voff na_nii6585_core = gaussian(lam_gal,na_nii6585_core_center,na_nii6585_core_amp,na_nii6585_core_fwhm,na_nii6585_core_voff,velscale) host_model = host_model - na_nii6585_core comp_dict['na_nii6585_core'] = {'comp':na_nii6585_core,'pcolor':'xkcd:dodger blue','linewidth':1.0} # Narrow [NII]6549 Core na_nii6549_core_center = 6549.860 # Angstroms na_nii6549_core_amp = (1.0/2.93)*na_nii6585_core_amp # flux units na_nii6549_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_nii6549_core_center,na_ha_core_voff) na_nii6549_core_fwhm = np.sqrt(p['na_Ha_core_fwhm']**2+(na_nii6549_core_fwhm_res)**2) # km/s # km/s na_nii6549_core_voff = na_ha_core_voff na_nii6549_core = gaussian(lam_gal,na_nii6549_core_center,na_nii6549_core_amp,na_nii6549_core_fwhm,na_nii6549_core_voff,velscale) host_model = host_model - na_nii6549_core comp_dict['na_nii6549_core'] = {'comp':na_nii6549_core,'pcolor':'xkcd:dodger blue','linewidth':1.0} # Narrow [SII]6718 na_sii6718_core_center = 6718.290 # Angstroms na_sii6718_core_amp = p['na_sii6718_core_amp'] # flux units na_sii6718_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_sii6718_core_center,na_ha_core_voff) na_sii6718_core_fwhm = np.sqrt(p['na_Ha_core_fwhm']**2+(na_sii6718_core_fwhm_res)**2) # km/s #na_sii6732_fwhm # km/s na_sii6718_core_voff = na_ha_core_voff na_sii6718_core = gaussian(lam_gal,na_sii6718_core_center,na_sii6718_core_amp,na_sii6718_core_fwhm,na_sii6718_core_voff,velscale) host_model = host_model - na_sii6718_core comp_dict['na_sii6718_core'] = {'comp':na_sii6718_core,'pcolor':'xkcd:dodger blue','linewidth':1.0} # Narrow [SII]6732 na_sii6732_core_center = 6732.670 # Angstroms na_sii6732_core_amp = p['na_sii6732_core_amp'] # flux units na_sii6732_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_sii6732_core_center,na_ha_core_voff) na_sii6732_core_fwhm = np.sqrt(p['na_Ha_core_fwhm']**2+(na_sii6732_core_fwhm_res)**2) # km/s na_sii6732_core_voff = na_ha_core_voff na_sii6732_core = gaussian(lam_gal,na_sii6732_core_center,na_sii6732_core_amp,na_sii6732_core_fwhm,na_sii6732_core_voff,velscale) host_model = host_model - na_sii6732_core comp_dict['na_sii6732_core'] = {'comp':na_sii6732_core,'pcolor':'xkcd:dodger blue','linewidth':1.0} elif (all(comp in param_names for comp in ['na_Ha_core_amp','na_Ha_core_voff', 'na_nii6585_core_amp', 'na_sii6732_core_amp','na_sii6718_core_amp', 'na_oiii5007_core_fwhm'])==True) & ('na_Ha_core_fwhm' not in param_names): # If all narrow line widths are tied to [OIII]5007 FWHM... # Narrow H-alpha na_ha_core_center = 6564.610 # Angstroms na_ha_core_amp = p['na_Ha_core_amp'] # flux units na_ha_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_ha_core_center,p['na_Ha_core_voff']) na_ha_core_fwhm = np.sqrt(p['na_oiii5007_core_fwhm']**2+(na_ha_core_fwhm_res)**2) # km/s na_ha_core_voff = p['na_Ha_core_voff'] # km/s na_Ha_core = gaussian(lam_gal,na_ha_core_center,na_ha_core_amp,na_ha_core_fwhm,na_ha_core_voff,velscale) host_model = host_model - na_Ha_core comp_dict['na_Ha_core'] = {'comp':na_Ha_core,'pcolor':'xkcd:dodger blue','linewidth':1.0} # Narrow [NII]6585 Core na_nii6585_core_center = 6585.270 # Angstroms na_nii6585_core_amp = p['na_nii6585_core_amp'] # flux units na_nii6585_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_nii6585_core_center,na_ha_core_voff) na_nii6585_core_fwhm = np.sqrt(p['na_oiii5007_core_fwhm']**2+(na_nii6585_core_fwhm_res)**2) # km/s na_nii6585_core_voff = na_ha_core_voff na_nii6585_core = gaussian(lam_gal,na_nii6585_core_center,na_nii6585_core_amp,na_nii6585_core_fwhm,na_nii6585_core_voff,velscale) host_model = host_model - na_nii6585_core comp_dict['na_nii6585_core'] = {'comp':na_nii6585_core,'pcolor':'xkcd:dodger blue','linewidth':1.0} # Narrow [NII]6549 Core na_nii6549_core_center = 6549.860 # Angstroms na_nii6549_core_amp = (1.0/2.93)*na_nii6585_core_amp # flux units na_nii6549_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_nii6549_core_center,na_ha_core_voff) na_nii6549_core_fwhm = np.sqrt(p['na_oiii5007_core_fwhm']**2+(na_nii6549_core_fwhm_res)**2) # km/s na_nii6549_core_voff = na_ha_core_voff na_nii6549_core = gaussian(lam_gal,na_nii6549_core_center,na_nii6549_core_amp,na_nii6549_core_fwhm,na_nii6549_core_voff,velscale) host_model = host_model - na_nii6549_core comp_dict['na_nii6549_core'] = {'comp':na_nii6549_core,'pcolor':'xkcd:dodger blue','linewidth':1.0} # Narrow [SII]6732 na_sii6732_core_center = 6732.670 # Angstroms na_sii6732_core_amp = p['na_sii6732_core_amp'] # flux units na_sii6732_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_sii6732_core_center,na_ha_core_voff) na_sii6732_core_fwhm = np.sqrt(p['na_oiii5007_core_fwhm']**2+(na_sii6732_core_fwhm_res)**2) # km/s na_sii6732_core_voff = na_ha_core_voff na_sii6732_core = gaussian(lam_gal,na_sii6732_core_center,na_sii6732_core_amp,na_sii6732_core_fwhm,na_sii6732_core_voff,velscale) host_model = host_model - na_sii6732_core comp_dict['na_sii6732_core'] = {'comp':na_sii6732_core,'pcolor':'xkcd:dodger blue','linewidth':1.0} # Narrow [SII]6718 na_sii6718_core_center = 6718.290 # Angstroms na_sii6718_core_amp = p['na_sii6718_core_amp'] # flux units na_sii6718_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_sii6718_core_center,na_ha_core_voff) na_sii6718_core_fwhm = np.sqrt(p['na_oiii5007_core_fwhm']**2+(na_sii6718_core_fwhm_res)**2) # km/s na_sii6718_core_voff = na_ha_core_voff na_sii6718_core = gaussian(lam_gal,na_sii6718_core_center,na_sii6718_core_amp,na_sii6718_core_fwhm,na_sii6718_core_voff,velscale) host_model = host_model - na_sii6718_core comp_dict['na_sii6718_core'] = {'comp':na_sii6718_core,'pcolor':'xkcd:dodger blue','linewidth':1.0} ######################################################################################################## # Outflow Components #### Hb/[OIII] outflows ################################################################################ if (all(comp in param_names for comp in ['na_oiii5007_outflow_amp','na_oiii5007_outflow_fwhm','na_oiii5007_outflow_voff'])==True): # Broad [OIII]5007 Outflow; na_oiii5007_outflow_center = 5008.240 # Angstroms na_oiii5007_outflow_amp = p['na_oiii5007_outflow_amp'] # flux units na_oiii5007_outflow_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_oiii5007_outflow_center,p['na_oiii5007_outflow_voff']) na_oiii5007_outflow_fwhm = np.sqrt(p['na_oiii5007_outflow_fwhm']**2+(na_oiii5007_outflow_fwhm_res)**2) # km/s na_oiii5007_outflow_voff = p['na_oiii5007_outflow_voff'] # km/s na_oiii5007_outflow = gaussian(lam_gal,na_oiii5007_outflow_center,na_oiii5007_outflow_amp,na_oiii5007_outflow_fwhm,na_oiii5007_outflow_voff,velscale) host_model = host_model - na_oiii5007_outflow comp_dict['na_oiii5007_outflow'] = {'comp':na_oiii5007_outflow,'pcolor':'xkcd:magenta','linewidth':1.0} # Broad [OIII]4959 Outflow; na_oiii4959_outflow_center = 4960.295 # Angstroms na_oiii4959_outflow_amp = na_oiii4959_core_amp*na_oiii5007_outflow_amp/na_oiii5007_core_amp # flux units na_oiii4959_outflow_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_oiii4959_outflow_center,na_oiii5007_outflow_voff) na_oiii4959_outflow_fwhm = np.sqrt(p['na_oiii5007_outflow_fwhm']**2+(na_oiii4959_outflow_fwhm_res)**2) # km/s na_oiii4959_outflow_voff = na_oiii5007_outflow_voff # km/s if (na_oiii4959_outflow_amp!=na_oiii4959_outflow_amp/1.0) or (na_oiii4959_outflow_amp==np.inf): na_oiii4959_outflow_amp=0.0 na_oiii4959_outflow = gaussian(lam_gal,na_oiii4959_outflow_center,na_oiii4959_outflow_amp,na_oiii4959_outflow_fwhm,na_oiii4959_outflow_voff,velscale) host_model = host_model - na_oiii4959_outflow comp_dict['na_oiii4959_outflow'] = {'comp':na_oiii4959_outflow,'pcolor':'xkcd:magenta','linewidth':1.0} if (all(comp in param_names for comp in ['na_oiii5007_outflow_amp','na_oiii5007_outflow_fwhm','na_oiii5007_outflow_voff','na_Hb_core_amp','na_Hb_core_voff'])==True): # Broad H-beta Outflow; only a model, no free parameters, tied to [OIII]5007 na_hb_core_center = 4862.680 # Angstroms na_hb_outflow_amp = na_hb_core_amp*na_oiii5007_outflow_amp/na_oiii5007_core_amp na_hb_outflow_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_hb_core_center,na_hb_core_voff+na_oiii5007_outflow_voff) na_hb_outflow_fwhm = np.sqrt(p['na_oiii5007_outflow_fwhm']**2+(na_hb_outflow_fwhm_res)**2) # km/s na_hb_outflow_voff = na_hb_core_voff+na_oiii5007_outflow_voff # km/s if (na_hb_outflow_amp!=na_hb_outflow_amp/1.0) or (na_hb_outflow_amp==np.inf): na_hb_outflow_amp=0.0 na_Hb_outflow = gaussian(lam_gal,na_hb_core_center,na_hb_outflow_amp,na_hb_outflow_fwhm,na_hb_outflow_voff,velscale) host_model = host_model - na_Hb_outflow comp_dict['na_Hb_outflow'] = {'comp':na_Hb_outflow,'pcolor':'xkcd:magenta','linewidth':1.0} #### Ha/[NII]/[SII] outflows ########################################################################### # Outflows in H-alpha/[NII] are poorly constrained due to the presence of a broad line and/or blending of narrow lines # First, we check if the fit includes Hb/[OIII] outflows, if it does, we use the outflow in [OIII] to constrain the outflows # in the Ha/[NII]/[SII] region. If the fi does NOT include Hb/[OIII] outflows (*not recommended*), we then allow the outflows # in the Ha/[NII]/[SII] region to be fit as free parameters. if (all(comp in param_names for comp in ['na_Ha_core_amp','na_Ha_core_voff','na_nii6585_core_amp','na_sii6732_core_amp','na_sii6718_core_amp', 'na_oiii5007_outflow_amp','na_oiii5007_outflow_fwhm','na_oiii5007_outflow_voff'])==True) and \ (all(comp not in param_names for comp in ['na_Ha_outflow_amp','na_Ha_outflow_fwhm','na_Ha_outflow_voff'])==True): # H-alpha Outflow; na_ha_outflow_center = 6564.610 # Angstroms na_ha_outflow_amp = p['na_Ha_core_amp']*p['na_oiii5007_outflow_amp']/p['na_oiii5007_core_amp'] # flux units na_ha_outflow_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_ha_outflow_center,p['na_oiii5007_outflow_voff']) na_ha_outflow_fwhm = np.sqrt(p['na_oiii5007_outflow_fwhm']**2+(na_ha_outflow_fwhm_res)**2) # km/s na_ha_outflow_voff = p['na_oiii5007_outflow_voff'] # km/s # km/s if (na_ha_outflow_amp!=na_ha_outflow_amp/1.0) or (na_ha_outflow_amp==np.inf): na_ha_outflow_amp=0.0 na_Ha_outflow = gaussian(lam_gal,na_ha_outflow_center,na_ha_outflow_amp,na_ha_outflow_fwhm,na_ha_outflow_voff,velscale) host_model = host_model - na_Ha_outflow comp_dict['na_Ha_outflow'] = {'comp':na_Ha_outflow,'pcolor':'xkcd:magenta','linewidth':1.0} # [NII]6585 Outflow; na_nii6585_outflow_center = 6585.270 # Angstroms na_nii6585_outflow_amp = na_nii6585_core_amp*na_ha_outflow_amp/na_ha_core_amp # flux units na_nii6585_outflow_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_nii6585_outflow_center,na_ha_outflow_voff) na_nii6585_outflow_fwhm = np.sqrt(p['na_oiii5007_outflow_fwhm']**2+(na_nii6585_outflow_fwhm_res)**2) na_nii6585_outflow_voff = na_ha_outflow_voff if (na_nii6585_outflow_amp!=na_nii6585_outflow_amp/1.0) or (na_nii6585_outflow_amp==np.inf): na_nii6585_outflow_amp=0.0 na_nii6585_outflow = gaussian(lam_gal,na_nii6585_outflow_center,na_nii6585_outflow_amp,na_nii6585_outflow_fwhm,na_nii6585_outflow_voff,velscale) host_model = host_model - na_nii6585_outflow comp_dict['na_nii6585_outflow'] = {'comp':na_nii6585_outflow,'pcolor':'xkcd:magenta','linewidth':1.0} # [NII]6549 Outflow; na_nii6549_outflow_center = 6549.860 # Angstroms na_nii6549_outflow_amp = na_nii6549_core_amp*na_ha_outflow_amp/na_ha_core_amp # flux units na_nii6549_outflow_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_nii6549_outflow_center,na_ha_outflow_voff) na_nii6549_outflow_fwhm = np.sqrt(p['na_oiii5007_outflow_fwhm']**2+(na_nii6549_outflow_fwhm_res)**2) # km/s na_nii6549_outflow_voff = na_ha_outflow_voff # km/s if (na_nii6549_outflow_amp!=na_nii6549_outflow_amp/1.0) or (na_nii6549_outflow_amp==np.inf): na_nii6549_outflow_amp=0.0 na_nii6549_outflow = gaussian(lam_gal,na_nii6549_outflow_center,na_nii6549_outflow_amp,na_nii6549_outflow_fwhm,na_nii6549_outflow_voff,velscale) host_model = host_model - na_nii6549_outflow comp_dict['na_nii6549_outflow'] = {'comp':na_nii6549_outflow,'pcolor':'xkcd:magenta','linewidth':1.0} # Broad [SII]6718 Outflow; na_sii6718_outflow_center = 6718.290 # Angstroms na_sii6718_outflow_amp = na_sii6718_core_amp*na_ha_outflow_amp/na_ha_core_amp # flux units na_sii6718_outflow_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_sii6718_outflow_center,na_ha_outflow_voff) na_sii6718_outflow_fwhm = np.sqrt(p['na_oiii5007_outflow_fwhm']**2+(na_sii6718_outflow_fwhm_res)**2) # km/s na_sii6718_outflow_voff = na_ha_outflow_voff # km/s if (na_sii6718_outflow_amp!=na_sii6718_outflow_amp/1.0) or (na_sii6718_outflow_amp==np.inf): na_sii6718_outflow_amp=0.0 na_sii6718_outflow = gaussian(lam_gal,na_sii6718_outflow_center,na_sii6718_outflow_amp,na_sii6718_outflow_fwhm,na_sii6718_outflow_voff,velscale) host_model = host_model - na_sii6718_outflow comp_dict['na_sii6718_outflow'] = {'comp':na_sii6718_outflow,'pcolor':'xkcd:magenta','linewidth':1.0} # [SII]6732 Outflow; na_sii6732_outflow_center = 6732.670 # Angstroms na_sii6732_outflow_amp = na_sii6732_core_amp*na_ha_outflow_amp/na_ha_core_amp # flux units na_sii6732_outflow_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_sii6732_outflow_center,na_ha_outflow_voff) na_sii6732_outflow_fwhm = np.sqrt(p['na_oiii5007_outflow_fwhm']**2+(na_sii6732_outflow_fwhm_res)**2) # km/s na_sii6732_outflow_voff = na_ha_outflow_voff # km/s if (na_sii6732_outflow_amp!=na_sii6732_outflow_amp/1.0) or (na_sii6732_outflow_amp==np.inf): na_sii6732_outflow_amp=0.0 na_sii6732_outflow = gaussian(lam_gal,na_sii6732_outflow_center,na_sii6732_outflow_amp,na_sii6732_outflow_fwhm,na_sii6732_outflow_voff,velscale) host_model = host_model - na_sii6732_outflow comp_dict['na_sii6732_outflow'] = {'comp':na_sii6732_outflow,'pcolor':'xkcd:magenta','linewidth':1.0} elif (all(comp in param_names for comp in ['na_Ha_outflow_amp','na_Ha_outflow_fwhm','na_Ha_outflow_voff'])==True) and \ (all(comp not in param_names for comp in ['na_oiii5007_outflow_amp','na_oiii5007_outflow_fwhm','na_oiii5007_outflow_voff'])==True): # H-alpha Outflow; na_ha_outflow_center = 6564.610 # Angstroms na_ha_outflow_amp = p['na_Ha_outflow_amp'] # flux units na_ha_outflow_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_ha_outflow_center,p['na_Ha_outflow_voff']) na_ha_outflow_fwhm = np.sqrt(p['na_Ha_outflow_fwhm']**2+(na_ha_outflow_fwhm_res)**2) # km/s na_ha_outflow_voff = p['na_Ha_outflow_voff'] # km/s # km/s if (na_ha_outflow_amp!=na_ha_outflow_amp/1.0) or (na_ha_outflow_amp==np.inf): na_ha_outflow_amp=0.0 na_Ha_outflow = gaussian(lam_gal,na_ha_outflow_center,na_ha_outflow_amp,na_ha_outflow_fwhm,na_ha_outflow_voff,velscale) host_model = host_model - na_Ha_outflow comp_dict['na_Ha_outflow'] = {'comp':na_Ha_outflow,'pcolor':'xkcd:magenta','linewidth':1.0} # [NII]6585 Outflow; na_nii6585_outflow_center = 6585.270 # Angstroms na_nii6585_outflow_amp = na_nii6585_core_amp*na_ha_outflow_amp/na_ha_core_amp # flux units na_nii6585_outflow_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_nii6585_outflow_center,na_ha_outflow_voff) na_nii6585_outflow_fwhm = np.sqrt(p['na_Ha_outflow_fwhm']**2+(na_nii6585_outflow_fwhm_res)**2) na_nii6585_outflow_voff = na_ha_outflow_voff if (na_nii6585_outflow_amp!=na_nii6585_outflow_amp/1.0) or (na_nii6585_outflow_amp==np.inf): na_nii6585_outflow_amp=0.0 na_nii6585_outflow = gaussian(lam_gal,na_nii6585_outflow_center,na_nii6585_outflow_amp,na_nii6585_outflow_fwhm,na_nii6585_outflow_voff,velscale) host_model = host_model - na_nii6585_outflow comp_dict['na_nii6585_outflow'] = {'comp':na_nii6585_outflow,'pcolor':'xkcd:magenta','linewidth':1.0} # [NII]6549 Outflow; na_nii6549_outflow_center = 6549.860 # Angstroms na_nii6549_outflow_amp = na_nii6549_core_amp*na_ha_outflow_amp/na_ha_core_amp # flux units na_nii6549_outflow_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_nii6549_outflow_center,na_ha_outflow_voff) na_nii6549_outflow_fwhm = np.sqrt(p['na_Ha_outflow_fwhm']**2+(na_nii6549_outflow_fwhm_res)**2) # km/s na_nii6549_outflow_voff = na_ha_outflow_voff # km/s if (na_nii6549_outflow_amp!=na_nii6549_outflow_amp/1.0) or (na_nii6549_outflow_amp==np.inf): na_nii6549_outflow_amp=0.0 na_nii6549_outflow = gaussian(lam_gal,na_nii6549_outflow_center,na_nii6549_outflow_amp,na_nii6549_outflow_fwhm,na_nii6549_outflow_voff,velscale) host_model = host_model - na_nii6549_outflow comp_dict['na_nii6549_outflow'] = {'comp':na_nii6549_outflow,'pcolor':'xkcd:magenta','linewidth':1.0} # Broad [SII]6718 Outflow; na_sii6718_outflow_center = 6718.290 # Angstroms na_sii6718_outflow_amp = na_sii6718_core_amp*na_ha_outflow_amp/na_ha_core_amp # flux units na_sii6718_outflow_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_sii6718_outflow_center,na_ha_outflow_voff) na_sii6718_outflow_fwhm = np.sqrt(p['na_Ha_outflow_fwhm']**2+(na_sii6718_outflow_fwhm_res)**2) # km/s na_sii6718_outflow_voff = na_ha_outflow_voff # km/s if (na_sii6718_outflow_amp!=na_sii6718_outflow_amp/1.0) or (na_sii6718_outflow_amp==np.inf): na_sii6718_outflow_amp=0.0 na_sii6718_outflow = gaussian(lam_gal,na_sii6718_outflow_center,na_sii6718_outflow_amp,na_sii6718_outflow_fwhm,na_sii6718_outflow_voff,velscale) host_model = host_model - na_sii6718_outflow comp_dict['na_sii6718_outflow'] = {'comp':na_sii6718_outflow,'pcolor':'xkcd:magenta','linewidth':1.0} # [SII]6732 Outflow; na_sii6732_outflow_center = 6732.670 # Angstroms na_sii6732_outflow_amp = na_sii6732_core_amp*na_ha_outflow_amp/na_ha_core_amp # flux units na_sii6732_outflow_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_sii6732_outflow_center,na_ha_outflow_voff) na_sii6732_outflow_fwhm = np.sqrt(p['na_Ha_outflow_fwhm']**2+(na_sii6732_outflow_fwhm_res)**2) # km/s na_sii6732_outflow_voff = na_ha_outflow_voff # km/s if (na_sii6732_outflow_amp!=na_sii6732_outflow_amp/1.0) or (na_sii6732_outflow_amp==np.inf): na_sii6732_outflow_amp=0.0 na_sii6732_outflow = gaussian(lam_gal,na_sii6732_outflow_center,na_sii6732_outflow_amp,na_sii6732_outflow_fwhm,na_sii6732_outflow_voff,velscale) host_model = host_model - na_sii6732_outflow comp_dict['na_sii6732_outflow'] = {'comp':na_sii6732_outflow,'pcolor':'xkcd:magenta','linewidth':1.0} ######################################################################################################## # Broad Lines #### Br. H-gamma ####################################################################################### if all(comp in param_names for comp in ['br_Hg_amp','br_Hg_fwhm','br_Hg_voff'])==True: br_hg_center = 4341.680 # Angstroms br_hg_amp = p['br_Hg_amp'] # flux units br_hg_fwhm_res = get_fwhm_res(fwhm_gal_ftn,br_hg_center,p['br_Hg_voff']) br_hg_fwhm = np.sqrt(p['br_Hg_fwhm']**2+(br_hg_fwhm_res)**2) # km/s br_hg_voff = p['br_Hg_voff'] # km/s # br_Hg = gaussian(lam_gal,br_hg_center,br_hg_amp,br_hg_fwhm,br_hg_voff,velscale) br_Hg = line_model(line_profile,lam_gal,br_hg_center,br_hg_amp,br_hg_fwhm,br_hg_voff,velscale) host_model = host_model - br_Hg comp_dict['br_Hg'] = {'comp':br_Hg,'pcolor':'xkcd:turquoise','linewidth':1.0} #### Br. H-beta ######################################################################################## if all(comp in param_names for comp in ['br_Hb_amp','br_Hb_fwhm','br_Hb_voff'])==True: br_hb_center = 4862.68 # Angstroms br_hb_amp = p['br_Hb_amp'] # flux units br_hb_fwhm_res = get_fwhm_res(fwhm_gal_ftn,br_hb_center,p['br_Hb_voff']) br_hb_fwhm = np.sqrt(p['br_Hb_fwhm']**2+(br_hb_fwhm_res)**2) # km/s br_hb_voff = p['br_Hb_voff'] # km/s # br_Hb = gaussian(lam_gal,br_hb_center,br_hb_amp,br_hb_fwhm,br_hb_voff,velscale) br_Hb = line_model(line_profile,lam_gal,br_hb_center,br_hb_amp,br_hb_fwhm,br_hb_voff,velscale) host_model = host_model - br_Hb comp_dict['br_Hb'] = {'comp':br_Hb,'pcolor':'xkcd:turquoise','linewidth':1.0} #### Br. H-alpha ####################################################################################### if all(comp in param_names for comp in ['br_Ha_amp','br_Ha_fwhm','br_Ha_voff'])==True: br_ha_center = 6564.610 # Angstroms br_ha_amp = p['br_Ha_amp'] # flux units br_ha_fwhm_res = get_fwhm_res(fwhm_gal_ftn,br_ha_center,p['br_Ha_voff']) br_ha_fwhm = np.sqrt(p['br_Ha_fwhm']**2+(br_ha_fwhm_res)**2) # km/s br_ha_voff = p['br_Ha_voff'] # km/s # br_Ha = gaussian(lam_gal,br_ha_center,br_ha_amp,br_ha_fwhm,br_ha_voff,velscale) br_Ha = line_model(line_profile,lam_gal,br_ha_center,br_ha_amp,br_ha_fwhm,br_ha_voff,velscale) host_model = host_model - br_Ha comp_dict['br_Ha'] = {'comp':br_Ha,'pcolor':'xkcd:turquoise','linewidth':1.0} ######################################################################################################## ######################################################################################################## ############################# Host-galaxy Component ###################################################### if all(comp in param_names for comp in ['gal_temp_amp'])==True: gal_temp = p['gal_temp_amp']*(gal_temp) host_model = (host_model) - (gal_temp) # Subtract off continuum from galaxy, since we only want template weights to be fit comp_dict['host_galaxy'] = {'comp':gal_temp,'pcolor':'xkcd:lime green','linewidth':1.0} ######################################################################################################## ############################# LOSVD Component #################################################### if all(comp in param_names for comp in ['stel_vel','stel_disp'])==True: # Convolve the templates with a LOSVD losvd_params = [p['stel_vel'],p['stel_disp']] # ind 0 = velocity*, ind 1 = sigma* conv_temp = convolve_gauss_hermite(temp_fft,npad,float(velscale),\ losvd_params,npix,velscale_ratio=1,sigma_diff=0,vsyst=vsyst) # Fitted weights of all templates using Non-negative Least Squares (NNLS) host_model[host_model/host_model!=1] = 0 weights = nnls(conv_temp,host_model) # scipy.optimize Non-negative Least Squares host_galaxy = (np.sum(weights*conv_temp,axis=1)) comp_dict['host_galaxy'] = {'comp':host_galaxy,'pcolor':'xkcd:lime green','linewidth':1.0} ######################################################################################################## # The final model gmodel = np.sum((d['comp'] for d in comp_dict.values() if d),axis=0) ########################## Measure Emission Line Fluxes ################################################# # Fluxes of components are stored in a dictionary and returned to emcee as metadata blob. # This is a vast improvement over the previous method, which was storing fluxes in an # output file at each iteration, which is computationally expensive for opening, writing to, and closing # a file nwalkers x niter times. if (fit_type=='final') and (output_model==False): fluxes = {} for key in comp_dict: # compute the integrated flux flux = simps(comp_dict[key]['comp'],lam_gal) # add key/value pair to dictionary fluxes[key+'_flux'] = flux ################################################################################## # Add last components to comp_dict for plotting purposes # Add galaxy, sigma, model, and residuals to comp_dict comp_dict['data'] = {'comp':galaxy ,'pcolor':'xkcd:white', 'linewidth':0.5} comp_dict['wave'] = {'comp':lam_gal ,'pcolor':'xkcd:black', 'linewidth':0.5} comp_dict['noise'] = {'comp':noise ,'pcolor':'xkcd:cyan' , 'linewidth':0.5} comp_dict['model'] = {'comp':gmodel ,'pcolor':'xkcd:red' , 'linewidth':1.0} comp_dict['resid'] = {'comp':galaxy-gmodel ,'pcolor':'xkcd:white', 'linewidth':0.5} ################################################################################## ################################################################################## if (fit_type=='init') and (output_model==False): # For max. likelihood fitting return gmodel if (fit_type=='init') and (output_model==True): # For max. likelihood fitting return comp_dict elif (fit_type=='outflow_test'): return comp_dict elif (fit_type=='final') and (output_model==False): # For emcee return gmodel, fluxes elif (fit_type=='final') and (output_model==True): # output all models for best-fit model return comp_dict
26,646
def main(): """ desc: initialize a session """ tasty_client = tasty_session.create_new_session( environ.get('TW_USER', "") , environ.get('TW_PASSWORD', "")) streamer = DataStreamer(tasty_client) LOGGER.info('Streamer token: %s' % streamer.get_streamer_token()) loop = asyncio.get_event_loop() try: #loop.run_until_complete(main_loop(tasty_client, streamer)) loop.run_until_complete(getquote(tasty_client, streamer)) except Exception: LOGGER.exception('Exception in main loop') finally: # find all futures/tasks still running and wait for them to finish pending_tasks = [task for task in asyncio.all_tasks() if not task.done()] loop.run_until_complete(asyncio.gather(*pending_tasks)) loop.close()
26,647
def _append_pretty(parent, child): """Append child to parent and hack indentation.""" # Calculate indentation level for child (NB: works only if child has siblings!) level = int(len((parent.getchildren()[-1].tail).strip("\n")) / 2 + 1) util.indent_xml(child, level) parent.getchildren()[-1].tail = "\n" + " " * level child.tail = "\n" + " " * (level - 1) parent.append(child)
26,648
def generate_numbers(): """ Function to generate 3 random digits to be guessed. Generate 3 random in a list in order to be compare to the user's digits. Return: str_digits (Array): List with 3 random digits converted to String """ # List comprehension to generate numbers from 0 to 9 and cast it as String str_digits = [str(num) for num in range(10)] # Shuffle randomly the list random.shuffle(str_digits) return str_digits[:3]
26,649
def get_media(): """Retrieves metadata for all of this server's uploaded media. Can use the following query parameters: * max: The maximum number of records to return * page: The page of records """ error_on_unauthorized() media = Upload.query.order_by(Upload.id) total_num = media.count() if total_num == 0: return jsonify(total=0, uploads=[]) try: count = int(request.args.get('max', total_num)) page = int(request.args.get('page', 1)) if count <= 0 or page <= 0: raise APIError(422, "Query parameters out of range") begin = (page - 1) * count end = min(begin + count, total_num) return jsonify(total=total_num, uploads=[upload_to_dict(u) for u in media.all()[begin:end]]), 200 except ValueError: raise APIError(422, "Invalid query parameter")
26,650
def celery(): """Start celery""" assert pkconfig.channel_in('dev') import celery.bin.celery import sirepo.celery_tasks run_dir = _run_dir().join('celery').ensure(dir=True) with pkio.save_chdir(run_dir): celery.bin.celery.main(argv=[ 'celery', 'worker', '--app=sirepo.celery_tasks', '--no-color', '-Ofair', '--queue=' + ','.join(sirepo.celery_tasks.QUEUE_NAMES), ])
26,651
async def test_occupation_not_sure(): """ Selecting not sure should give a description, then ask the question again """ u = User( addr="27820001001", state=StateData(name="state_occupation"), session_id="1" ) app = Application(u) msg = Message( content="not sure", to_addr="27820001002", from_addr="27820001001", transport_name="whatsapp", transport_type=Message.TRANSPORT_TYPE.HTTP_API, ) [info, reply] = await app.process_message(msg) assert info.content == "\n".join( [ "*Health Care Workers* include doctors, nurses, dentists, pharmacists, " "medical specialists and all people involved in providing health " "services such as cleaning, security, medical waste disposal and " "administrative work.", "", "*Essential Workers* include police officers, miners, teachers, people " "working in security, retail, food, funeral, banking and essential " "muncipal and home affairs, border control and port health services.", ] ) assert reply.content == "\n".join( [ "◼️◻️◻️◻️◻️", "", "Which of these positions or job titles describes your current " "employment:", ] ) assert reply.helper_metadata == { "button": "Select Employment", "sections": [ { "rows": [ {"id": "Health Care Worker", "title": "Health Care Worker"}, {"id": "Essential Worker", "title": "Essential Worker"}, {"id": "Other", "title": "Other"}, {"id": "Not Sure", "title": "Not Sure"}, ] } ], } assert u.state.name == "state_occupation" assert u.session_id == "1"
26,652
def start_EC2(ec2_resource, ami_id, i_type, key_path, label, envs, dry_run): """check if have perms to create instance. https://boto3.amazonaws.com/v1/documentation/api/latest/guide/ec2-example-managing-instances.html#start-and-stop-instances if so, the create instance and tag with label. """ try: create_EC2(ec2_resource, ami_id, i_type, envs) except botocore.exceptions.ClientError as e: if 'DryRunOperation' not in str(e): print(e.response['Error']['Message']) print(perm_error) exit(1) elif dry_run: print(e.response['Error']['Message']) exit(0) else: pass mnt_str = gen_mnt_str(envs['EFS_IP']) key_str = read_key(key_path) auth_key_str = 'printf "{}" >> /home/admin/.ssh/authorized_keys;'.format( key_str) u_data = '#!/bin/bash\n{mnt_str}\n{auth_key_str}\n'.format(mnt_str=mnt_str, auth_key_str=auth_key_str) print('Creating EC2...') try: instances = create_EC2(ec2_resource, ami_id, i_type, envs, u_data, False) except botocore.exceptions.ClientError as bce: print(bce) print('\nUnable to launch EC2. \nExiting.') exit(1) if len(instances) is not 1: msg = 'Instances launched: %s' % str(instances) raise NepheleError.UnableToStartEC2Exception(msg=msg) instance = instances[0] instance.wait_until_running() instance.create_tags(Tags=[{'Key': 'Name', 'Value': label}]) print(str(instance) + ' has been created.') print('To connect type:\nssh {ip_addr}'.format( ip_addr=instance.instance_id)) print('To terminate instance type:') print('awssume aws ec2 terminate-instances --instance-ids ' + instance.instance_id)
26,653
def summate2(phasevec): """Calculate values b'(j^vec) for combining 2 phase vectors. Parameter: phasevec: tuple of two phasevectors Example: On input (([b_1(0),b_1(1),...,b_1(L-1)], L), ([b_2(0),b_2(1),...,b_2(L'-1)], L')) give output [b_1(0)+b_2(0), b_1(0)+b_2(1),..., b_1(1)+b_2(0),...,b_1(L-1)+b_2(L'-1)] """ b = [] # array for values of summed phasevector for i in range(phasevec[0][1]): for j in range(phasevec[1][1]): b.append(phasevec[0][0][i] + phasevec[1][0][j]) return b
26,654
def get_children(): """ Return IDs of LIST which currently is zero-based index Modelled after treeview for future alphabetic IDs TODO: Should probably return list of all DICT? """ iid_list = [] #read() #print('location.get_children() LIST count:',len(LIST)) for i, ndx in enumerate(LIST): iid = ndx_to_iid(i) iid_list.append(iid) # treeview uses string for indices # print('location.get_children:',i,ndx) return iid_list
26,655
def resume(): """ resumes a game """ world_menu = ui.Menu("Select a World") worlds = file_manager.find(".zip") world_names = [] for world in worlds: # remove everything but the world name world_name = world.replace("worlds", "").replace(os.getcwd(), "").replace("\\", "").replace("/", "").replace(".zip", "") world_menu.add(world_name) world_names.append(world_name) world_menu.display(False) if not worlds: print("\nNo worlds found \n") return options = list(range(1, len(worlds) + 1)) choice = int(ui.accepted_input(*[str(s) for s in options])) global WORLD_PATH WORLD_PATH = worlds[choice - 1].replace(".zip", "") start(world_names[choice - 1], worlds[choice - 1])
26,656
def makeTracker( path, args = (), kwargs = {} ): """retrieve an instantiated tracker and its associated code. returns a tuple (code, tracker). """ obj, module, pathname, cls = makeObject( path, args, kwargs ) code = getCode( cls, pathname ) return code, obj
26,657
def info(widget): """ Print info about a widget """ pprint(widget)
26,658
def format_each_linpla_subaxis(ax, xticks=None, sess_ns=None, kind="reg", single_lab=True, sess_text=True): """ format_each_linpla_subaxis(ax) Formats each subaxis separately, specifically: - Adds session numbers if provided - Removes bottom lines and ticks for top plots - Adds x tick labels to bottom plots - Adds y tick labels to correct plots Required args: - ax (plt Axis): plt axis Optional args: - xticks (list) : x tick labels (if None, none are added) default: None - sess_ns (list) : list of session numbers default: None - kind (str) : kind of plot "reg" for single plot per layer/line, "traces" for traces plot per session (rows), "prog" for progression plot per session (cols), "idx" for unexpected data index plot per session (rows) default: "reg" - single_lab (bool): if True, only one set of session labels it added to the graph default: True - sess_text (bool) : if True, session numbers are included as text in the subplots default: True """ # make sure to autoscale subplots after this, otherwise bugs emerge for sub_ax in ax.reshape(-1): sub_ax.autoscale() # get information based on kind of graph n_rows, n_cols = ax.shape col_per_grp = 1 pad_p = 0 if kind == "reg": if xticks is not None: div = len(xticks) pad_p = 1.0 / div if n_rows != 2 or n_cols != 2: raise RuntimeError( "Regular plots should have 2 rows and 2 columns." ) elif kind == "prog": if n_cols % 2 != 0: raise RuntimeError("Expected even number of columns") col_per_grp = int(n_cols/2) elif kind == "map": for sub_ax in ax.reshape(-1): plot_util.remove_axis_marks(sub_ax) for spine in ["right", "left", "top", "bottom"]: sub_ax.spines[spine].set_visible(True) elif kind not in ["traces", "idx"]: gen_util.accepted_values_error( "kind", kind, ["reg", "traces", "prog", "idx", "map"] ) if kind == "map": return for r in range(n_rows): for c in range(n_cols): sub_ax = ax[r, c] # set x ticks if xticks is not None: plot_util.set_ticks(sub_ax, axis="x", min_tick=min(xticks), max_tick=max(xticks), n=len(xticks), pad_p=pad_p) # always set ticks (even again) before setting labels sub_ax.set_xticklabels(xticks, weight="bold") # to avoid very wide plot features if len(xticks) == 1: sub_ax.set_xlim(xticks[0] - 1, xticks[0] + 1) # add session numbers if kind in ["traces", "idx", "prog"] and sess_ns is not None: if sess_text: # place session labels in right/top subplots if kind == "prog": sess_idx = c % len(sess_ns) if r != 0 or c < len(sess_ns): sess_idx = None else: sess_idx = r if c != 1 or r >= len(sess_ns): sess_idx = None if sess_idx is not None: sess_lab = f"sess {sess_ns[sess_idx]}" sub_ax.text(0.65, 0.75, sess_lab, fontsize="x-large", transform=sub_ax.transAxes, style="italic") elif kind == "prog": # alternative session labels for "prog" if (sub_ax.is_last_row() and (c < len(sess_ns) or not(single_lab))): # BOTTOM sub_ax.text(0.5, -0.5, sess_ns[c % len(sess_ns)], fontsize="x-large", transform=sub_ax.transAxes, weight="bold") # remove x ticks and spines from graphs if not sub_ax.is_last_row() and kind != "idx": # NOT BOTTOM sub_ax.tick_params(axis="x", which="both", bottom=False) sub_ax.spines["bottom"].set_visible(False) # remove y ticks and spines from graphs colNum = sub_ax.get_subplotspec().colspan.start if kind == "prog" and not colNum in [0, col_per_grp]: sub_ax.tick_params(axis="y", which="both", left=False) sub_ax.spines["left"].set_visible(False) yticks = [np.around(v, 10) for v in sub_ax.get_yticks()] if kind in ["traces", "idx"] and len(yticks) > 3: max_abs = np.max(np.absolute(yticks)) new = [-max_abs, 0, max_abs] yticks = list(filter(lambda x: x == 0 or x in yticks, new)) # always set ticks (even again) before setting labels sub_ax.set_yticks(yticks) sub_ax.set_yticklabels(yticks, weight="bold")
26,659
def get_anchor_generator(anchor_size: Tuple[tuple] = None, aspect_ratios: Tuple[tuple] = None): """Returns the anchor generator.""" if anchor_size is None: anchor_size = ((16,), (32,), (64,), (128,)) if aspect_ratios is None: aspect_ratios = ((0.5, 1.0, 2.0),) * len(anchor_size) anchor_generator = AnchorGenerator(sizes=anchor_size, aspect_ratios=aspect_ratios) return anchor_generator
26,660
def tensor_to_longs(tensor: torch.Tensor) -> list: """converts an array of numerical values to a tensor of longs""" assert tensor.dtype == torch.long return tensor.detach().cpu().numpy()
26,661
def test_fixed(SNRs): """ Fixed (infinite T1) qubit. """ fidelities = [] numShots = 10000 dt = 1e-3 for SNR in SNRs: fakeData = create_fake_data(SNR, dt, 1, numShots, T1=1e9) signal = dt*np.sum(fakeData, axis=1) fidelities.append(fidelity_est(signal)) return fidelities
26,662
def setup(app): """ Add the ``fica`` directive to the Sphinx app. """ app.add_directive("fica", FicaDirective) return { "version": __version__, "parallel_read_safe": True, "parallel_write_safe": True, }
26,663
def auth_add(action_id, auth_type, uid, pid=None): """ action_id : Action ID auth_type : Authorization type. Should be one of SCOPE_ONE_SHOT, SCOPE_PROCESS, SCOPE_SESSION or SCOPE_ALWAYS uid : User ID pid : Process ID of process to grant authorization to. Normally one wants to pass result of os.getpid(). """ user = pwd.getpwuid(uid).pw_name cp = configparser.ConfigParser() cp.optionxform = str cp.read(DB_FILE) title = "user:%s:%s" % (user, action_id) if title in cp.sections(): cp.remove_section(title) cp.add_section(title) cp.set(title, "Action", action_id) cp.set(title, "Identity", "unix-user:%s" % user) cp.set(title, "ResultAny", "yes") cp.set(title, "ResultInactive", "yes") cp.set(title, "ResultActive", "yes") with open(DB_FILE, "w") as configfile: cp.write(configfile)
26,664
def set_fit_state(dag: dtfcordag.DAG, fit_state: NodeState) -> None: """ Initialize a DAG with pre-fit node state. :param dag: dataflow DAG consisting of `FitPredictNode`s :param fit_state: result of node `get_fit_state()` keyed by nid """ hdbg.dassert_isinstance(dag, dtfcordag.DAG) hdbg.dassert_isinstance(fit_state, collections.OrderedDict) hdbg.dassert_eq(len(dag.dag.nodes()), len(fit_state.keys())) # Scan the nodes. for nid in dag.dag.nodes(): node = dag.get_node(nid) # Set the info for the fit state. hdbg.dassert_isinstance(node, dtfconobas.FitPredictNode) hdbg.dassert_in(nid, fit_state.keys()) node_fit_state = copy.copy(fit_state[nid]) node.set_fit_state(node_fit_state)
26,665
def get_summaries(ignore_hidden=True, ordered=True): """Yields sorted (command name, command summary) tuples.""" cmditems = commands.items() for name, command_class in cmditems: if ignore_hidden and command_class.hidden: continue yield (name, command_class.summary)
26,666
def get_binary_statistics( outputs: Tensor, targets: Tensor, label: int = 1, ) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]: """ Computes the number of true negative, false positive, false negative, true negative and support for a binary classification problem for a given label. Args: outputs: estimated targets as predicted by a model with shape [bs; ..., 1] targets: ground truth (correct) target values with shape [bs; ..., 1] label: integer, that specifies label of interest for statistics compute Returns: Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]: stats Example: >>> y_pred = torch.tensor([[0, 0, 1, 1, 0, 1, 0, 1]]) >>> y_true = torch.tensor([[0, 1, 0, 1, 0, 0, 1, 1]]) >>> tn, fp, fn, tp, support = get_binary_statistics(y_pred, y_true) tensor(2) tensor(2) tensor(2) tensor(2) tensor(4) """ tn = ((outputs != label) * (targets != label)).to(torch.long).sum() fp = ((outputs == label) * (targets != label)).to(torch.long).sum() fn = ((outputs != label) * (targets == label)).to(torch.long).sum() tp = ((outputs == label) * (targets == label)).to(torch.long).sum() support = (targets == label).to(torch.long).sum() return tn, fp, fn, tp, support
26,667
def threshold_and_mask(min_normed_weight, W, Mc, coords): # =np.arange(Wc.shape[0])*stride + start): """Normalize the weights W, threshold to min_normed_weight and remove diagonal, reduce DX and DY to the columns and rows still containing weights. Returns ------- coords : array_like the indices of these columns in terms of original image indices W_n_m : array_like the thresholded weights D_X_m : array_like The reduced DX D_Y_m : array_like The reduced DY row_mask : array_like The indices of these columns in terms of calculated arrays. """ #coords = np.arange(Wc.shape[0])*stride + start wcdiag = np.atleast_2d(np.diag(W)) W_n = W / np.sqrt(wcdiag.T*wcdiag) mask = W_n - np.diag(np.diag(W_n)) > min_normed_weight row_mask = np.any(mask, axis=0) W_n = np.where(mask, W_n, 0) DX, DY = Mc[0], Mc[1] W_n_m = W_n[:, row_mask][row_mask, :] coords = coords[row_mask] #mask_red = mask[row_mask, :][:, row_mask] DX_m, DY_m = DX[row_mask, :][:, row_mask], DY[row_mask, :][:, row_mask] return coords, W_n_m, DX_m, DY_m, row_mask
26,668
def strings(): """ Manipulação de strings acessar cada caracter nome = 'roberto' substituir algum caracter por outro nome = 'roberto' lista = list(nome) necessário para substituíção lista[0] = 't' nome = ''.join(lista) para saber o final de uma string | .txt | .png | .pdf | .zip etc... file = 'arquivo.txt' print(file.endswith('.txt')) return boolean print(file.startswith('arq')) return boolean """ texto = 'eu amo estudar javascript, mas no meu tempo livre estudo python' print(texto.replace('javascript', 'php')) # substituir uma palavra da string texto02 = 'eu nasci na data de 26/02/1987 e minha irmã nasceu em 10/12/1980' print(re.sub(r'(\d+)/(\d+)/(\d+)', r'\3-\1-\2', texto02)) # substituir um padrão dentro da string print(re.sub(r'(\d+)/(\d+)/(\d+)', r'\3-\1-\2', texto02, flags=re.IGNORECASE)) # ignorar case sensitive texto03 = 'eu nasci em 1987-02-26' print(re.sub(r'(\d+)-(\d+)-(\d+)', r'\3/\2/\1', texto03)) texto04 = ' python ' print(texto04.strip()) # remover espaço em branco texto05 = '----------javascript---------' print(texto05.strip('-')) # remover caracteres repetidos no inicio texto06 = '------------php' print(texto06.lstrip('-')) # remover apenas do lado esquerdo texto07 = 'mysql----------' print(texto07.rstrip('-')) # remover apenas do lado direito
26,669
def train_cnn(data, model_name, nb_epoch, batch_size): """ Trains a Convolutional Neural Network on the MNIST dataset Largely inspired from https://github.com/fchollet/keras/blob/master/examples/mnist_{cnn, mlp}.py """ X_train, X_test, Y_train, Y_test = data np.random.seed(constants.RANDOM_STATE) # for reproducibility input_shape = (constants.IMG_ROWS, constants.IMG_COLS, 1) model = Sequential() model.add(Convolution2D(constants.NB_FILTERS, constants.KERNEL_SIZE[0], constants.KERNEL_SIZE[1], border_mode='valid', input_shape=input_shape)) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=constants.POOL_SIZE)) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(256)) model.add(Activation('relu')) model.add(Dropout(0.2)) model.add(Dense(128)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(10)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy']) model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, validation_data=(X_test, Y_test)) filepath = os.path.join(constants.MODELS_DIR, model_name) model.save(filepath) logging.info("Successfully saved new model at %s", filepath)
26,670
def readCGcsv(filename, levels): """ Read a .csv file of a callgraph into a dictionary keyed by callgraph level. """ cgdict = {} with open(filename, "r") as cgcsv: cgreader = csv.DictReader(cgcsv) for row in cgreader: lvl = int(row['Level']) if (lvl < levels) or (levels <= 0): cost = row[r'Samp%'] fname = row[r'Calltree/PE=HIDE'] node = CGNode(fname, cost) if lvl not in cgdict.keys(): cgdict[lvl] = [] cgdict[lvl].append(node) if lvl > 0: cgdict[lvl - 1][-1].addCallee(node) return cgdict
26,671
def stderr_print(msg): """Print a msg to stderr.""" print(msg, file=sys.stderr)
26,672
def use_colors(tones, i=None): """ Use specific color tones for plotting. If i is specified, this function returns a specific color from the corresponding color cycle For custom color palettes generation check: http://colorbrewer2.org/#type=sequential&scheme=YlGnBu&n=8 Args: tones : 'hot' or 'cold' for hot and cold colors Returns: color i of the color cycle """ hot = ['#fed976', '#feb24c', '#fd8d3c', '#fc4e2a', '#e31a1c', '#b10026'] cold = ['#a6bddb', '#67a9cf', '#3690c0', '#02818a', '#016c59', '#014636'] # cold = ['#44AE7E', '#388A8D', '#397187', '#3E568E', '#463883', '#461167'] if i is None: if tones is 'hot': colors = hot elif tones is 'cold': colors = cold else: colors = tones plt.rc('axes', prop_cycle=(cycler('color', colors))) return colors else: if tones is 'hot': colors = hot elif tones is 'cold': colors = cold else: colors = tones return colors[i % len(colors)]
26,673
def main(): """ Usage: python commenter.py -i <path to input.tex> -o <path to output.tex> -comment # to add commments python commenter.py -i <path to input.tex> -o <path to output.tex> -uncomment # to remove commments """ parser = argparse.ArgumentParser(description='Add remove comments in tex file.') parser.add_argument('-i', '--input', dest='in_tex', type=str, required=True, help='input tex file.') # pfo_study_nifti_output parser.add_argument('-o', '--output', dest='out_tex', type=str, required=True, help='output tex file.') parser.add_argument('-comment', dest='comment', action='store_true', help='add comments between c-start and c-end.') parser.add_argument('-uncomment', dest='uncomment', action='store_true', help='remove comments between c-start and c-end.') args = parser.parse_args() print('Input:') print(args.in_tex, args.out_tex, args.comment, args.uncomment) if args.comment == args.uncomment: raise IOError if args.comment: print('Add comments to file {} into {}.'.format(args.in_tex, args.out_tex)) commenter(args.in_tex, args.out_tex, 'True') time.sleep(3) # add some suspacnce! print('Done!') else: print('Remove comments to file {} into {}.'.format(args.in_tex, args.out_tex)) commenter(args.in_tex, args.out_tex, 'False') time.sleep(3) # add some suspacnce! print('Done!')
26,674
def fill_form(forms, form): """Fills a given form given a set or known forms. :param forms: A set of known forms. :param form: The form to fill. :return: A mapping from form element IDs to suggested values for the form. """ forms = list(forms) new_form = {} def rec_fill_form(form, labels): if not labels: return new_form unfilled_labels = [] neighbor = get_neighbor(forms, labels) if not neighbor: LOGGER.info('No neighbors found', labels) for label in labels: new_form[form['form'][label]['id']] = None return new_form LOGGER.info('Neighbor', neighbor) for label in labels: if label in neighbor['form']: new_form[form['form'][label]['id']] = neighbor['form'][label]['value'] else: unfilled_labels.append(label) # LOGGER.info('unfilled', unfilled_labels) if len(labels) == len(unfilled_labels): for label in unfilled_labels: new_form[form['form'][label]['id']] = None return new_form return rec_fill_form(form, unfilled_labels) return rec_fill_form(form, list(form['features']))
26,675
def read_sky_model_from_csv(path: str) -> SkyModel: """ Read a CSV file in to create a SkyModel. The CSV should have the following columns - right ascension (deg) - declination (deg) - stokes I Flux (Jy) - stokes Q Flux (Jy): if no information available, set to 0 - stokes U Flux (Jy): if no information available, set to 0 - stokes V Flux (Jy): if no information available, set to 0 - reference_frequency (Hz): if no information available, set to 0 - spectral index (N/A): if no information available, set to 0 - rotation measure (rad / m^2): if no information available, set to 0 - major axis FWHM (arcsec): if no information available, set to 0 - minor axis FWHM (arcsec): if no information available, set to 0 - position angle (deg): if no information available, set to 0 - source id (object): if no information available, set to None :param path: file to read in :return: SkyModel """ # TODO: add validation of csv dataframe = pd.read_csv(path) sources = dataframe.to_numpy() sky = SkyModel(sources) return sky
26,676
def main(): """Run main function.""" with open('data/day3data.txt', 'r') as f: input = f.readlines() dataList = [map(int, i.strip('\n').split()) for i in input] # Transpose the data. dataList = [list(i) for i in zip(*dataList)] # Flatten the list. triList = [item for sublist in dataList for item in sublist] triangles = 0 for i in range(0, len(triList), 3): print([triList[i], triList[i + 1], triList[i + 2]]) if isTriangle([triList[i], triList[i + 1], triList[i + 2]]): triangles += 1 print('There are {} valid triagles.').format(triangles)
26,677
def integer_to_vector(x, options_per_element, n_elements, index_to_element): """Return a vector representing an action/state from a given integer. Args: x (int): the integer to convert. n_options_per_element(int): number of options for each element in the vector. n_elements (int): the number of elements in the vector to return. index_to_element(int=>any): function which converts an integer represents a single option in one of the vector elements and return anything that vector contains. For example, a function which returns 'UP' for 0, 1 for 'RIGHT',etc. Or a function which returns (2,2) given 10 for a 4x4 grid ((2,2) is the 10-th cell of that grid). """ return integer_to_vector_multiple_numbers(x, options_per_element, n_elements, index_to_element)
26,678
def samples_for_each_class(dataset_labels, task): """ Numbers of samples for each class in the task Args: dataset_labels Labels to count samples from task Labels with in a task Returns """ num_samples = np.zeros([len(task)], dtype=np.float32) i = 0 for label in task: global_class_indices = np.column_stack(np.nonzero(dataset_labels)) class_indices = np.squeeze(global_class_indices[global_class_indices[:,1] == label][:,np.array([True, False])]) class_indices = np.sort(class_indices, axis=None) num_samples[i] = len(class_indices) i += 1 return num_samples
26,679
def requiredOneInGroup(col_name, group, dm, df, *args): """ If col_name is present in df, the group validation is satisfied. If not, it still may be satisfied, but not by THIS col_name. If col_name is missing, return col_name, else return None. Later, we will validate to see if there is at least one None (non-missing) value for this group. """ if col_name in df.columns: # if the column name is present, return nothing return None else: # if the column name is missing, return column name return col_name
26,680
def persist(session, obj, return_id=True): """ Use the session to store obj in database, then remove obj from session, so that on a subsequent load from the database we get a clean instance. """ session.add(obj) session.flush() obj_id = obj.id if return_id else None # save this before obj is expunged session.expunge(obj) return obj_id
26,681
def test_bittrex_query_asset_movement_int_transaction_id(bittrex): """Test that if an integer is returned for bittrex transaction id we handle it properly Bittrex deposit withdrawals SHOULD NOT return an integer for transaction id according to their docs https://bittrex.github.io/api/v3#definition-Order but as we saw in practise they sometimes can. Regression test for https://github.com/rotki/rotki/issues/2175 """ problematic_deposit = """ [ { "id": 1, "status": "COMPLETED", "quantity": 2.12345678, "currencySymbol": "RISE", "confirmations": 2, "completedAt": "2014-02-13T07:38:53.883Z", "txId": 9875231951530679373, "cryptoAddress": "15VyEAT4uf7ycrNWZVb1eGMzrs21BH95Va", "source": "foo" } ] """ def mock_get_deposit_withdrawal( url, method, json, **kwargs, ): # pylint: disable=unused-argument if 'deposit' in url: response_str = problematic_deposit else: response_str = '[]' return MockResponse(200, response_str) with patch.object(bittrex.session, 'request', side_effect=mock_get_deposit_withdrawal): movements = bittrex.query_deposits_withdrawals( start_ts=0, end_ts=TEST_END_TS, only_cache=False, ) errors = bittrex.msg_aggregator.consume_errors() warnings = bittrex.msg_aggregator.consume_warnings() assert len(errors) == 0 assert len(warnings) == 0 assert len(movements) == 1 assert movements[0].location == Location.BITTREX assert movements[0].category == AssetMovementCategory.DEPOSIT assert movements[0].timestamp == 1392277134 assert isinstance(movements[0].asset, Asset) assert movements[0].asset == Asset('RISE') assert movements[0].amount == FVal('2.12345678') assert movements[0].fee == ZERO assert movements[0].transaction_id == '9875231951530679373' # also make sure they are written in the db db_movements = bittrex.db.get_asset_movements( filter_query=AssetMovementsFilterQuery.make(), has_premium=True, ) assert len(db_movements) == 1 assert db_movements[0] == movements[0]
26,682
def create_merged_files(ecbplus_original, ecbstart_new, outfile1, outfile2): """ :param ecbplus_original: ECB+ CAT data :param ecbstart_new: ESC CAT data :param outfile1: event mention extended :param outfile2: event extended coref chain :return: """ ecbplus = etree.parse(ecbplus_original, etree.XMLParser(remove_blank_text=True)) root_ecbplus = ecbplus.getroot() root_ecbplus.getchildren() ecb_event_mentions = extract_event_CAT(root_ecbplus) ecb_coref_relations = extract_corefRelations(root_ecbplus, ecb_event_mentions) """ ecbstar data """ ecbstar = etree.parse(ecbstart_new, etree.XMLParser(remove_blank_text=True)) ecbstar_root = ecbstar.getroot() ecbstar_root.getchildren() ecb_star_events = extract_event_CAT(ecbstar_root) ecbstar_events_plotLink = extract_plotLink(ecbstar_root, ecb_star_events) get_extended_mention = {} for k, v in ecbstar_events_plotLink.items(): source = check_entry_dict(k[0], ecb_coref_relations).split(" ") target = check_entry_dict(k[1], ecb_coref_relations).split(" ") output = open(outfile1, "a") output.writelines(check_entry_dict(k[0], ecb_coref_relations) + "\t" + check_entry_dict(k[1], ecb_coref_relations) + "\t" + v + "\n") output.close() mention_pairs = [(x, y) for x in source for y in target] for i in mention_pairs: get_extended_mention[(k, i)] = v mention_elem = {} for k, v in get_extended_mention.items(): first_elem = k[1][0].split("_")[0] second_elem = k[1][1].split("_")[0] if int(first_elem) < int(second_elem): mention_elem[k[1]] = v if int(first_elem) > int(second_elem): if v == "FALLING_ACTION": new_key = (k[1][1], k[1][0],) mention_elem[new_key] = "PRECONDITION" if v == "PRECONDITION": new_key = (k[1][1], k[1][0],) mention_elem[new_key] = "FALLING_ACTION" for k, v in mention_elem.items(): output2 = open(outfile2, "a") output2.writelines(k[0] + "\t" + k[1] + "\t" + v + "\n") output2.close()
26,683
def _fixed_point( searcher: 'AbstractSearcher', parsed: parsed_file.ParsedFile, initial_substitutions: Sequence[substitution.Substitution], start: int, end: int, max_iterations: int, ): """Repeatedly apply searcher until there are no more changes.""" if max_iterations <= 1: return initial_substitutions # TODO(b/116068515): sort the substitutions here and below. new_substitutions = [ s.relative_to_span(start, end) for s in initial_substitutions ] if None in new_substitutions: logging.error('Out of bounds substitution after filtering: %s', initial_substitutions[new_substitutions.index(None)]) return initial_substitutions # give up text = parsed.text[start:end] logging.debug( 'Applying _fixed_point with initial subs=%r on on parsed.text[%d:%d]: %r', new_substitutions, start, end, text) all_substitutions = [] # max_iterations + 1 to get the extra iteration before the break, # and then - 1 to account for the fact that we already did an iteration. for i in range(max_iterations): rewritten = formatting.apply_substitutions(text, new_substitutions) try: parsed = _matcher.parse_ast(rewritten, parsed.path) except _matcher.ParseError as e: logging.error( 'Could not parse rewritten substitution in %s: %s\n' 'Tried to rewrite text[%s:%s] == %r\n' 'Rewrite was: %r\n' 'Substitutions: %r', parsed.path, e, start, end, text, rewritten, new_substitutions) break # These substitutions parsed and were valid, add them to the list: all_substitutions.extend(new_substitutions) # Set up the variables for the next rewrite attempt logging.debug('_fixed_point Iteration %d: rewrote %r -> %r', i, text, rewritten) text = rewritten if i == max_iterations: # no point bothering to get the next substitution break new_substitutions = list(searcher.find_iter_parsed(parsed)) if not new_substitutions: break if not all_substitutions: # even the first rewrite failed to parse return [] elif len(all_substitutions) == len(initial_substitutions): # We didn't discover any new substitutions. return initial_substitutions else: return [_compile_substitutions(all_substitutions, start, end, text)]
26,684
def masker(mask, val): """Enforce the defined bits in the <mask> on <val>.""" ones = sub(r"[^1]", "0", mask) val |= int(ones,2) zeros = sub(r"[^0]", "1", mask) val &= int(zeros,2) return val
26,685
def ct_lt_u32(val_a, val_b): """ Returns 1 if val_a < val_b, 0 otherwise. Constant time. :type val_a: int :type val_b: int :param val_a: an unsigned integer representable as a 32 bit value :param val_b: an unsigned integer representable as a 32 bit value :rtype: int """ val_a &= 0xffffffff val_b &= 0xffffffff return (val_a^((val_a^val_b)|(((val_a-val_b)&0xffffffff)^val_b)))>>31
26,686
async def is_banned(ctx: Context, user: typing.Union[discord.Member, discord.User]) -> bool: """Returns true if user is in guild's ban list.""" bans = await ctx.guild.bans() for entry in bans: if entry.user.id == user.id: return True return False
26,687
def test_ce_aftertax_income(cps_subsample): """ Test ce_aftertax_income method. """ rec = Records.cps_constructor(data=cps_subsample) pol = Policy() calc1 = Calculator(policy=pol, records=rec) pol.implement_reform({'SS_Earnings_c': {2013: 9e99}}) calc2 = Calculator(policy=pol, records=rec) res = calc1.ce_aftertax_income(calc2) assert isinstance(res, dict)
26,688
def _create_transformation_vectors_for_pixel_offsets( detector_group: h5py.Group, wrapper: nx.NexusWrapper ) -> List[QVector3D]: """ Construct a transformation (as a QVector3D) for each pixel offset """ x_offsets = wrapper.get_field_value(detector_group, "x_pixel_offset") y_offsets = wrapper.get_field_value(detector_group, "y_pixel_offset") z_offsets = wrapper.get_field_value(detector_group, "z_pixel_offset") if x_offsets is None or y_offsets is None: raise Exception( "In pixel_shape_component expected to find x_pixel_offset and y_pixel_offset datasets" ) if z_offsets is None: z_offsets = np.zeros_like(x_offsets) # offsets datasets can be 2D to match dimensionality of detector, so flatten to 1D return [ QVector3D(x, y, z) for x, y, z in zip( x_offsets.flatten(), y_offsets.flatten(), z_offsets.flatten() ) ]
26,689
def buildIterator(spec_name, param_spec, global_state, random_selection=False): """ :param param_spec: argument specification :param random_selection: produce a continuous stream of random selections :return: a iterator function to construct an iterator over possible values """ if param_spec['type'] == 'list': if not random_selection: return ListPermuteGroupElement(spec_name, param_spec['values']) else: return PermuteGroupElement(spec_name,randomGeneratorFactory(lambda: random.choice(param_spec['values']))) elif 'int' in param_spec['type'] : v = param_spec['type'] vals = [int(x) for x in v[v.rfind('[') + 1:-1].split(':')] beg = vals[0] if len (vals) > 0 else 0 end = vals[1] if len(vals) > 1 else beg+1 if not random_selection: increment = 1 if len(vals) > 2: increment = vals[2] return IteratorPermuteGroupElement(spec_name,lambda : xrange(beg, end+1,increment).__iter__()) else: return PermuteGroupElement(spec_name,randomGeneratorFactory(lambda: random.randint(beg, end))) elif 'float' in param_spec['type'] : v = param_spec['type'] vals = [float(x) for x in v[v.rfind('[') + 1:-1].split(':')] beg = vals[0] if len(vals) > 0 else 0 end = vals[1] if len(vals) > 1 else beg+1.0 if not random_selection: increment = 1 if len(vals) > 2: increment = vals[2] return IteratorPermuteGroupElement(spec_name,lambda: np.arange(beg, end,increment).__iter__()) else: return PermuteGroupElement(spec_name,randomGeneratorFactory(lambda: beg+ random.random()* (end-beg))) elif param_spec['type'] == 'yesno': if not random_selection: return ListPermuteGroupElement(spec_name,['yes','no']) else: return PermuteGroupElement(spec_name,randomGeneratorFactory(lambda: random.choice(['yes', 'no']))) elif param_spec['type'].startswith('donor'): mydata = local() local_state = mydata.current_local_state choices = [node for node in local_state.getGraph().nodes() \ if len(local_state.getGraph().predecessors(node)) == 0] if not random_selection: # do not think we can save this state since it is tied to the local project return PermuteGroupElement(spec_name,choices.__iter__) else: return PermuteGroupElement(spec_name, randomGeneratorFactory(lambda: random.choice(choices))) return PermuteGroupElement(spec_name,randomGeneratorFactory(lambda: None))
26,690
def logged(obj): """Add a logger member to a decorated class or function. :arg obj: the class or function object being decorated, or an optional :class:`logging.Logger` object to be used as the parent logger (instead of the default module-named logger) :return: *obj* if *obj* is a class or function; otherwise, if *obj* is a logger, return a lambda decorator that will in turn set the logger attribute and return *obj* If *obj* is a :obj:`class`, then ``obj.__log`` will have the logger name "<module-name>.<class-name>": >>> import sys >>> logging.basicConfig( ... level=logging.DEBUG, stream=sys.stdout, ... format="%(levelname)s:%(name)s:%(funcName)s:%(message)s") >>> @logged ... class Sample: ... ... def test(self): ... self.__log.debug("This is a test.") ... >>> Sample().test() DEBUG:autologging.Sample:test:This is a test. .. note:: Autologging will prefer to use the class's ``__qualname__`` when it is available (Python 3.3+). Otherwise, the class's ``__name__`` is used. For example:: class Outer: @logged class Nested: pass Under Python 3.3+, ``Nested.__log`` will have the name "autologging.Outer.Nested", while under Python 2.7 or 3.2, the logger name will be "autologging.Nested". .. versionchanged:: 0.4.0 Functions decorated with ``@logged`` use a *single* underscore in the logger variable name (e.g. ``my_function._log``) rather than a double underscore. If *obj* is a function, then ``obj._log`` will have the logger name "<module-name>": >>> import sys >>> logging.basicConfig( ... level=logging.DEBUG, stream=sys.stdout, ... format="%(levelname)s:%(name)s:%(funcName)s:%(message)s") >>> @logged ... def test(): ... test._log.debug("This is a test.") ... >>> test() DEBUG:autologging:test:This is a test. .. note:: Within a logged function, the ``_log`` attribute must be qualified by the function name. If *obj* is a :class:`logging.Logger` object, then that logger is used as the parent logger (instead of the default module-named logger): >>> import sys >>> logging.basicConfig( ... level=logging.DEBUG, stream=sys.stdout, ... format="%(levelname)s:%(name)s:%(funcName)s:%(message)s") >>> @logged(logging.getLogger("test.parent")) ... class Sample: ... def test(self): ... self.__log.debug("This is a test.") ... >>> Sample().test() DEBUG:test.parent.Sample:test:This is a test. Again, functions are similar: >>> import sys >>> logging.basicConfig( ... level=logging.DEBUG, stream=sys.stdout, ... format="%(levelname)s:%(name)s:%(funcName)s:%(message)s") >>> @logged(logging.getLogger("test.parent")) ... def test(): ... test._log.debug("This is a test.") ... >>> test() DEBUG:test.parent:test:This is a test. .. note:: For classes, the logger member is made "private" (i.e. ``__log`` with double underscore) to ensure that log messages that include the *%(name)s* format placeholder are written with the correct name. Consider a subclass of a ``@logged``-decorated parent class. If the subclass were **not** decorated with ``@logged`` and could access the parent's logger member directly to make logging calls, those log messages would display the name of the parent class, not the subclass. Therefore, subclasses of a ``@logged``-decorated parent class that wish to use a provided ``self.__log`` object must themselves be decorated with ``@logged``. .. warning:: Although the ``@logged`` and ``@traced`` decorators will "do the right thing" regardless of the order in which they are applied to the same function, it is recommended that ``@logged`` always be used as the innermost decorator:: @traced @logged def my_function(): my_function._log.info("message") This is because ``@logged`` simply sets the ``_log`` attribute and then returns the original function, making it "safe" to use in combination with any other decorator. .. note:: Both `Jython <http://www.jython.org/>`_ and `IronPython <http://ironpython.net/>`_ report an "internal" class name using its mangled form, which will be reflected in the default logger name. For example, in the sample code below, both Jython and IronPython will use the default logger name "autologging._Outer__Nested" (whereas CPython/PyPy/Stackless would use "autologging.__Nested" under Python 2 or "autologging.Outer.__Nested" under Python 3.3+) :: class Outer: @logged class __Nested: pass .. warning:: `IronPython <http://ironpython.net/>`_ does not fully support frames (even with the -X:FullFrames option), so you are likely to see things like misreported line numbers and "<unknown file>" in log records emitted when running under IronPython. """ if isinstance(obj, logging.Logger): # `@logged(logger)' return lambda class_or_fn: _add_logger_to( class_or_fn, logger_name=_generate_logger_name( class_or_fn, parent_name=obj.name)) else: # `@logged' return _add_logger_to(obj)
26,691
def outlier_removal_mean(dataframe, colname, low_cut, high_cut): """Replace outliers with the mean on dataframe[colname]""" col = dataframe[colname] col_numerics = col.loc[ col.apply( lambda x: isinstance(x, (int, float)) and (x >= low_cut and x <= high_cut) ) ] dataframe.loc[ col.apply( lambda x: isinstance(x, (int, float)) and (x < low_cut or x > high_cut) ), colname, ] = col_numerics.mean() return dataframe
26,692
def _on_rmtree_error(func, path, exc_info): """ Error handler for ``shutil.rmtree``. If the error is due to an access error (read only file) it attempts to add write permission and then retries. If the error is for another reason it re-raises the error. Usage : ``shutil.rmtree(path, onerror=onerror)`` """ import stat import os if not os.access(path, os.W_OK): # Is the error an access error ? os.chmod(path, stat.S_IWUSR) func(path) else: raise exc_info
26,693
def reac_depr(filename, output): """ Extract and transform a table with deprecated reaction identifiers. INPUT FILE is the path to the raw MetaNetX source table. OUTPUT FILE is the path for the transformed table output. """ logger.info("Processing deprecated compartment identifiers.") config = TableConfigurationModel.load() logger.info("Extracting...") deprecated = extract.extract_table( Path(filename), config.reac_depr.columns, config.reac_depr.skip ) logger.info("Transforming...") logger.info("Loading...") deprecated.to_csv(Path(output), **api.OUTPUT_OPTIONS) logger.info("Complete.")
26,694
def non_repeating(value, counts, q): """Finds the first non-repeating string in a stream. Args: value (str): Latest string received in the string counts (dict): Dictionary of strings containing the counts to determine if string is repeated q (Queue): Container for all strings in stream that have yet determined as being repeated Return: str: First non-repeating string. None if all strings are repeated. """ q.put(value) if value in counts: counts[value] += 1 else: counts[value] = 1 while not q.empty(): if counts[q.queue[0]] > 1: q.get() else: return q.queue[0] if q.empty(): return None
26,695
def test_run(s3_stubber): """Test that the command updates the relevant records ignoring ones with errors.""" sectors = SectorFactory.create_batch(5) investment_projects = [ # investment project in CSV doesn't exist so row should fail # sector should get updated InvestmentProjectFactory(sector_id=sectors[0].id), # sector should get updated InvestmentProjectFactory(sector_id=None), # sector should not get updated InvestmentProjectFactory(sector_id=None), # should be ignored InvestmentProjectFactory(sector_id=sectors[3].id), # should be skipped because of an error InvestmentProjectFactory(sector_id=sectors[4].id), ] new_sectors = SectorFactory.create_batch(5) bucket = 'test_bucket' object_key = 'test_key' csv_content = f"""id,old_sector,new_sector 00000000-0000-0000-0000-000000000000,NULL,NULL {investment_projects[0].id},{sectors[0].id},{new_sectors[0].id} {investment_projects[1].id},NULL,{new_sectors[1].id} {investment_projects[2].id},{new_sectors[2].id},{new_sectors[2].id} {investment_projects[4].id},invalid_id,another_invalid_id """ s3_stubber.add_response( 'get_object', { 'Body': BytesIO(bytes(csv_content, encoding='utf-8')), }, expected_params={ 'Bucket': bucket, 'Key': object_key, }, ) call_command('update_investment_project_sector', bucket, object_key) for investment_project in investment_projects: investment_project.refresh_from_db() assert investment_projects[0].sector == new_sectors[0] assert investment_projects[1].sector == new_sectors[1] assert investment_projects[2].sector is None assert investment_projects[3].sector == sectors[3] assert investment_projects[4].sector == sectors[4]
26,696
def test_default(carousels: List[element.Tag]): """Test.""" carousel = carousels[0] assert carousel["class"] == ["scbs-carousel", "scbs-slide", "scbs-carousel-fade"] carousel = carousels[1] assert carousel["class"] == ["scbs-carousel", "scbs-slide"] carousel = carousels[2] assert carousel["class"] == ["scbs-carousel", "scbs-slide"]
26,697
async def insult(e): """ Don't Rape too much """ await e.edit(choice(RAPE_STRINGS))
26,698
def test_cli_generate_tests(): """Test tests command.""" runner = CliRunner() gen_tests_result = runner.invoke( cli.main, ["generate-tests", "--from", "bundled", "Example"] ) assert gen_tests_result.exit_code == 0 yaml_ = yaml.safe_load(StringIO(gen_tests_result.output)) assert len(yaml_) == 5 and type(yaml_) == dict
26,699