_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q28400
define_passive_branch_flows_with_kirchhoff
train
def define_passive_branch_flows_with_kirchhoff(network,snapshots,skip_vars=False): """ define passive branch flows with the kirchoff method """ for sub_network in network.sub_networks.obj: find_tree(sub_network) find_cycles(sub_network) #following is necessary to calculate angles post-facto find_bus_controls(sub_network) if len(sub_network.branches_i()) > 0: calculate_B_H(sub_network) passive_branches = network.passive_branches() if not skip_vars: network.model.passive_branch_p = Var(list(passive_branches.index), snapshots) cycle_index = [] cycle_constraints = {} for subnetwork in network.sub_networks.obj: branches = subnetwork.branches() attribute = "r_pu_eff" if network.sub_networks.at[subnetwork.name,"carrier"] == "DC" else "x_pu_eff" sub_network_cycle_index, sub_network_cycle_constraints = define_sub_network_cycle_constraints( subnetwork, snapshots, network.model.passive_branch_p, attribute) cycle_index.extend( sub_network_cycle_index) cycle_constraints.update( sub_network_cycle_constraints) l_constraint(network.model, "cycle_constraints", cycle_constraints, cycle_index, snapshots)
python
{ "resource": "" }
q28401
network_lopf_build_model
train
def network_lopf_build_model(network, snapshots=None, skip_pre=False, formulation="angles", ptdf_tolerance=0.): """ Build pyomo model for linear optimal power flow for a group of snapshots. Parameters ---------- snapshots : list or index slice A list of snapshots to optimise, must be a subset of network.snapshots, defaults to network.snapshots skip_pre: bool, default False Skip the preliminary steps of computing topology, calculating dependent values and finding bus controls. formulation : string Formulation of the linear power flow equations to use; must be one of ["angles","cycles","kirchhoff","ptdf"] ptdf_tolerance : float Value below which PTDF entries are ignored Returns ------- network.model """ if not skip_pre: network.determine_network_topology() calculate_dependent_values(network) for sub_network in network.sub_networks.obj: find_slack_bus(sub_network) logger.info("Performed preliminary steps") snapshots = _as_snapshots(network, snapshots) logger.info("Building pyomo model using `%s` formulation", formulation) network.model = ConcreteModel("Linear Optimal Power Flow") define_generator_variables_constraints(network,snapshots) define_storage_variables_constraints(network,snapshots) define_store_variables_constraints(network,snapshots) define_branch_extension_variables(network,snapshots) define_link_flows(network,snapshots) define_nodal_balances(network,snapshots) define_passive_branch_flows(network,snapshots,formulation,ptdf_tolerance) define_passive_branch_constraints(network,snapshots) if formulation in ["angles", "kirchhoff"]: define_nodal_balance_constraints(network,snapshots) elif formulation in ["ptdf", "cycles"]: define_sub_network_balance_constraints(network,snapshots) define_global_constraints(network,snapshots) define_linear_objective(network, snapshots) #tidy up auxilliary expressions del network._p_balance #force solver to also give us the dual prices network.model.dual = Suffix(direction=Suffix.IMPORT) return network.model
python
{ "resource": "" }
q28402
network_lopf_prepare_solver
train
def network_lopf_prepare_solver(network, solver_name="glpk", solver_io=None): """ Prepare solver for linear optimal power flow. Parameters ---------- solver_name : string Must be a solver name that pyomo recognises and that is installed, e.g. "glpk", "gurobi" solver_io : string, default None Solver Input-Output option, e.g. "python" to use "gurobipy" for solver_name="gurobi" Returns ------- None """ network.opt = SolverFactory(solver_name, solver_io=solver_io) patch_optsolver_record_memusage_before_solving(network.opt, network) if isinstance(network.opt, PersistentSolver): network.opt.set_instance(network.model) return network.opt
python
{ "resource": "" }
q28403
network_lopf_solve
train
def network_lopf_solve(network, snapshots=None, formulation="angles", solver_options={},solver_logfile=None, keep_files=False, free_memory={'pyomo'},extra_postprocessing=None): """ Solve linear optimal power flow for a group of snapshots and extract results. Parameters ---------- snapshots : list or index slice A list of snapshots to optimise, must be a subset of network.snapshots, defaults to network.snapshots formulation : string Formulation of the linear power flow equations to use; must be one of ["angles","cycles","kirchhoff","ptdf"]; must match formulation used for building the model. solver_options : dictionary A dictionary with additional options that get passed to the solver. (e.g. {'threads':2} tells gurobi to use only 2 cpus) solver_logfile : None|string If not None, sets the logfile option of the solver. keep_files : bool, default False Keep the files that pyomo constructs from OPF problem construction, e.g. .lp file - useful for debugging free_memory : set, default {'pyomo'} Any subset of {'pypsa', 'pyomo'}. Allows to stash `pypsa` time-series data away while the solver runs (as a pickle to disk) and/or free `pyomo` data after the solution has been extracted. extra_postprocessing : callable function This function must take three arguments `extra_postprocessing(network,snapshots,duals)` and is called after the model has solved and the results are extracted. It allows the user to extract further information about the solution, such as additional shadow prices. Returns ------- None """ snapshots = _as_snapshots(network, snapshots) logger.info("Solving model using %s", network.opt.name) if isinstance(network.opt, PersistentSolver): args = [] else: args = [network.model] if isinstance(free_memory, string_types): free_memory = {free_memory} if 'pypsa' in free_memory: with empty_network(network): network.results = network.opt.solve(*args, suffixes=["dual"], keepfiles=keep_files, logfile=solver_logfile, options=solver_options) else: network.results = network.opt.solve(*args, suffixes=["dual"], keepfiles=keep_files, logfile=solver_logfile, options=solver_options) if logger.isEnabledFor(logging.INFO): network.results.write() status = network.results["Solver"][0]["Status"].key termination_condition = network.results["Solver"][0]["Termination condition"].key if status == "ok" and termination_condition == "optimal": logger.info("Optimization successful") extract_optimisation_results(network, snapshots, formulation, free_pyomo='pyomo' in free_memory, extra_postprocessing=extra_postprocessing) elif status == "warning" and termination_condition == "other": logger.warning("WARNING! Optimization might be sub-optimal. Writing output anyway") extract_optimisation_results(network, snapshots, formulation, free_pyomo='pyomo' in free_memory, extra_postprocessing=extra_postprocessing) else: logger.error("Optimisation failed with status %s and terminal condition %s" % (status, termination_condition)) return status, termination_condition
python
{ "resource": "" }
q28404
network_lopf
train
def network_lopf(network, snapshots=None, solver_name="glpk", solver_io=None, skip_pre=False, extra_functionality=None, solver_logfile=None, solver_options={}, keep_files=False, formulation="angles", ptdf_tolerance=0., free_memory={},extra_postprocessing=None): """ Linear optimal power flow for a group of snapshots. Parameters ---------- snapshots : list or index slice A list of snapshots to optimise, must be a subset of network.snapshots, defaults to network.snapshots solver_name : string Must be a solver name that pyomo recognises and that is installed, e.g. "glpk", "gurobi" solver_io : string, default None Solver Input-Output option, e.g. "python" to use "gurobipy" for solver_name="gurobi" skip_pre: bool, default False Skip the preliminary steps of computing topology, calculating dependent values and finding bus controls. extra_functionality : callable function This function must take two arguments `extra_functionality(network,snapshots)` and is called after the model building is complete, but before it is sent to the solver. It allows the user to add/change constraints and add/change the objective function. solver_logfile : None|string If not None, sets the logfile option of the solver. solver_options : dictionary A dictionary with additional options that get passed to the solver. (e.g. {'threads':2} tells gurobi to use only 2 cpus) keep_files : bool, default False Keep the files that pyomo constructs from OPF problem construction, e.g. .lp file - useful for debugging formulation : string Formulation of the linear power flow equations to use; must be one of ["angles","cycles","kirchhoff","ptdf"] ptdf_tolerance : float Value below which PTDF entries are ignored free_memory : set, default {'pyomo'} Any subset of {'pypsa', 'pyomo'}. Allows to stash `pypsa` time-series data away while the solver runs (as a pickle to disk) and/or free `pyomo` data after the solution has been extracted. extra_postprocessing : callable function This function must take three arguments `extra_postprocessing(network,snapshots,duals)` and is called after the model has solved and the results are extracted. It allows the user to extract further information about the solution, such as additional shadow prices. Returns ------- None """ snapshots = _as_snapshots(network, snapshots) network_lopf_build_model(network, snapshots, skip_pre=skip_pre, formulation=formulation, ptdf_tolerance=ptdf_tolerance) if extra_functionality is not None: extra_functionality(network,snapshots) network_lopf_prepare_solver(network, solver_name=solver_name, solver_io=solver_io) return network_lopf_solve(network, snapshots, formulation=formulation, solver_logfile=solver_logfile, solver_options=solver_options, keep_files=keep_files, free_memory=free_memory, extra_postprocessing=extra_postprocessing)
python
{ "resource": "" }
q28405
replace_gen
train
def replace_gen(network,gen_to_replace): """Replace the generator gen_to_replace with a bus for the energy carrier, a link for the conversion from the energy carrier to electricity and a store to keep track of the depletion of the energy carrier and its CO2 emissions.""" gen = network.generators.loc[gen_to_replace] bus_name = "{} {}".format(gen["bus"], gen["carrier"]) link_name = "{} converter {} to AC".format(gen_to_replace,gen["carrier"]) store_name = "{} store {}".format(gen_to_replace,gen["carrier"]) network.add("Bus", bus_name, carrier=gen["carrier"]) network.add("Link", link_name, bus0=bus_name, bus1=gen["bus"], capital_cost=gen["capital_cost"]*gen["efficiency"], p_nom = gen["p_nom"]/gen["efficiency"], p_nom_extendable=gen["p_nom_extendable"], p_nom_max = gen["p_nom_max"]/gen["efficiency"], p_nom_min = gen["p_nom_min"]/gen["efficiency"], p_max_pu = network.generators_t.p_max_pu.loc[:,gen_to_replace] if gen_to_replace in network.generators_t.p_max_pu.columns else gen["p_max_pu"], p_min_pu = network.generators_t.p_min_pu.loc[:,gen_to_replace] if gen_to_replace in network.generators_t.p_min_pu.columns else gen["p_min_pu"], marginal_cost=gen["marginal_cost"]*gen["efficiency"], efficiency=gen["efficiency"]) network.add("Store", store_name, bus=bus_name, e_nom_min=-float("inf"), e_nom_max=0, e_nom_extendable=True, e_min_pu=1., e_max_pu=0.) network.remove("Generator",gen_to_replace) return bus_name, link_name, store_name
python
{ "resource": "" }
q28406
get_switchable_as_dense
train
def get_switchable_as_dense(network, component, attr, snapshots=None, inds=None): """ Return a Dataframe for a time-varying component attribute with values for all non-time-varying components filled in with the default values for the attribute. Parameters ---------- network : pypsa.Network component : string Component object name, e.g. 'Generator' or 'Link' attr : string Attribute name snapshots : pandas.Index Restrict to these snapshots rather than network.snapshots. inds : pandas.Index Restrict to these components rather than network.components.index Returns ------- pandas.DataFrame Examples -------- >>> get_switchable_as_dense(network, 'Generator', 'p_max_pu') """ df = network.df(component) pnl = network.pnl(component) index = df.index varying_i = pnl[attr].columns fixed_i = df.index.difference(varying_i) if inds is not None: index = index.intersection(inds) varying_i = varying_i.intersection(inds) fixed_i = fixed_i.intersection(inds) if snapshots is None: snapshots = network.snapshots return (pd.concat([ pd.DataFrame(np.repeat([df.loc[fixed_i, attr].values], len(snapshots), axis=0), index=snapshots, columns=fixed_i), pnl[attr].loc[snapshots, varying_i] ], axis=1, sort=False).reindex(columns=index))
python
{ "resource": "" }
q28407
get_switchable_as_iter
train
def get_switchable_as_iter(network, component, attr, snapshots, inds=None): """ Return an iterator over snapshots for a time-varying component attribute with values for all non-time-varying components filled in with the default values for the attribute. Parameters ---------- network : pypsa.Network component : string Component object name, e.g. 'Generator' or 'Link' attr : string Attribute name snapshots : pandas.Index Restrict to these snapshots rather than network.snapshots. inds : pandas.Index Restrict to these items rather than all of network.{generators,..}.index Returns ------- pandas.DataFrame Examples -------- >>> get_switchable_as_iter(network, 'Generator', 'p_max_pu', snapshots) """ df = network.df(component) pnl = network.pnl(component) index = df.index varying_i = pnl[attr].columns fixed_i = df.index.difference(varying_i) if inds is not None: inds = pd.Index(inds) index = inds.intersection(index) varying_i = inds.intersection(varying_i) fixed_i = inds.intersection(fixed_i) # Short-circuit only fixed if len(varying_i) == 0: return repeat(df.loc[fixed_i, attr], len(snapshots)) def is_same_indices(i1, i2): return len(i1) == len(i2) and (i1 == i2).all() if is_same_indices(fixed_i.append(varying_i), index): def reindex_maybe(s): return s else: def reindex_maybe(s): return s.reindex(index) return ( reindex_maybe(df.loc[fixed_i, attr].append(pnl[attr].loc[sn, varying_i])) for sn in snapshots )
python
{ "resource": "" }
q28408
allocate_series_dataframes
train
def allocate_series_dataframes(network, series): """ Populate time-varying outputs with default values. Parameters ---------- network : pypsa.Network series : dict Dictionary of components and their attributes to populate (see example) Returns ------- None Examples -------- >>> allocate_series_dataframes(network, {'Generator': ['p'], 'Load': ['p']}) """ for component, attributes in iteritems(series): df = network.df(component) pnl = network.pnl(component) for attr in attributes: pnl[attr] = pnl[attr].reindex(columns=df.index, fill_value=network.components[component]["attrs"].at[attr,"default"])
python
{ "resource": "" }
q28409
network_pf
train
def network_pf(network, snapshots=None, skip_pre=False, x_tol=1e-6, use_seed=False): """ Full non-linear power flow for generic network. Parameters ---------- snapshots : list-like|single snapshot A subset or an elements of network.snapshots on which to run the power flow, defaults to network.snapshots skip_pre: bool, default False Skip the preliminary steps of computing topology, calculating dependent values and finding bus controls. x_tol: float Tolerance for Newton-Raphson power flow. use_seed : bool, default False Use a seed for the initial guess for the Newton-Raphson algorithm. Returns ------- Dictionary with keys 'n_iter', 'converged', 'error' and dataframe values indicating number of iterations, convergence status, and iteration error for each snapshot (rows) and sub_network (columns) """ return _network_prepare_and_run_pf(network, snapshots, skip_pre, linear=False, x_tol=x_tol, use_seed=use_seed)
python
{ "resource": "" }
q28410
network_lpf
train
def network_lpf(network, snapshots=None, skip_pre=False): """ Linear power flow for generic network. Parameters ---------- snapshots : list-like|single snapshot A subset or an elements of network.snapshots on which to run the power flow, defaults to network.snapshots skip_pre: bool, default False Skip the preliminary steps of computing topology, calculating dependent values and finding bus controls. Returns ------- None """ _network_prepare_and_run_pf(network, snapshots, skip_pre, linear=True)
python
{ "resource": "" }
q28411
apply_line_types
train
def apply_line_types(network): """Calculate line electrical parameters x, r, b, g from standard types. """ lines_with_types_b = network.lines.type != "" if lines_with_types_b.zsum() == 0: return missing_types = (pd.Index(network.lines.loc[lines_with_types_b, 'type'].unique()) .difference(network.line_types.index)) assert missing_types.empty, ("The type(s) {} do(es) not exist in network.line_types" .format(", ".join(missing_types))) # Get a copy of the lines data l = (network.lines.loc[lines_with_types_b, ["type", "length", "num_parallel"]] .join(network.line_types, on='type')) for attr in ["r","x"]: l[attr] = l[attr + "_per_length"] * l["length"] / l["num_parallel"] l["b"] = 2*np.pi*1e-9*l["f_nom"] * l["c_per_length"] * l["length"] * l["num_parallel"] # now set calculated values on live lines for attr in ["r", "x", "b"]: network.lines.loc[lines_with_types_b, attr] = l[attr]
python
{ "resource": "" }
q28412
apply_transformer_types
train
def apply_transformer_types(network): """Calculate transformer electrical parameters x, r, b, g from standard types. """ trafos_with_types_b = network.transformers.type != "" if trafos_with_types_b.zsum() == 0: return missing_types = (pd.Index(network.transformers.loc[trafos_with_types_b, 'type'].unique()) .difference(network.transformer_types.index)) assert missing_types.empty, ("The type(s) {} do(es) not exist in network.transformer_types" .format(", ".join(missing_types))) # Get a copy of the transformers data # (joining pulls in "phase_shift", "s_nom", "tap_side" from TransformerType) t = (network.transformers.loc[trafos_with_types_b, ["type", "tap_position", "num_parallel"]] .join(network.transformer_types, on='type')) t["r"] = t["vscr"] /100. t["x"] = np.sqrt((t["vsc"]/100.)**2 - t["r"]**2) #NB: b and g are per unit of s_nom t["g"] = t["pfe"]/(1000. * t["s_nom"]) #for some bizarre reason, some of the standard types in pandapower have i0^2 < g^2 t["b"] = - np.sqrt(((t["i0"]/100.)**2 - t["g"]**2).clip(lower=0)) for attr in ["r","x"]: t[attr] /= t["num_parallel"] for attr in ["b","g"]: t[attr] *= t["num_parallel"] #deal with tap positions t["tap_ratio"] = 1. + (t["tap_position"] - t["tap_neutral"]) * (t["tap_step"]/100.) # now set calculated values on live transformers for attr in ["r", "x", "g", "b", "phase_shift", "s_nom", "tap_side", "tap_ratio"]: network.transformers.loc[trafos_with_types_b, attr] = t[attr]
python
{ "resource": "" }
q28413
apply_transformer_t_model
train
def apply_transformer_t_model(network): """Convert given T-model parameters to PI-model parameters using wye-delta transformation""" z_series = network.transformers.r_pu + 1j*network.transformers.x_pu y_shunt = network.transformers.g_pu + 1j*network.transformers.b_pu ts_b = (network.transformers.model == "t") & (y_shunt != 0.) if ts_b.zsum() == 0: return za,zb,zc = wye_to_delta(z_series.loc[ts_b]/2,z_series.loc[ts_b]/2,1/y_shunt.loc[ts_b]) network.transformers.loc[ts_b,"r_pu"] = zc.real network.transformers.loc[ts_b,"x_pu"] = zc.imag network.transformers.loc[ts_b,"g_pu"] = (2/za).real network.transformers.loc[ts_b,"b_pu"] = (2/za).imag
python
{ "resource": "" }
q28414
calculate_dependent_values
train
def calculate_dependent_values(network): """Calculate per unit impedances and append voltages to lines and shunt impedances.""" apply_line_types(network) apply_transformer_types(network) network.lines["v_nom"] = network.lines.bus0.map(network.buses.v_nom) network.lines["x_pu"] = network.lines.x/(network.lines.v_nom**2) network.lines["r_pu"] = network.lines.r/(network.lines.v_nom**2) network.lines["b_pu"] = network.lines.b*network.lines.v_nom**2 network.lines["g_pu"] = network.lines.g*network.lines.v_nom**2 network.lines["x_pu_eff"] = network.lines["x_pu"] network.lines["r_pu_eff"] = network.lines["r_pu"] #convert transformer impedances from base power s_nom to base = 1 MVA network.transformers["x_pu"] = network.transformers.x/network.transformers.s_nom network.transformers["r_pu"] = network.transformers.r/network.transformers.s_nom network.transformers["b_pu"] = network.transformers.b*network.transformers.s_nom network.transformers["g_pu"] = network.transformers.g*network.transformers.s_nom network.transformers["x_pu_eff"] = network.transformers["x_pu"]* network.transformers["tap_ratio"] network.transformers["r_pu_eff"] = network.transformers["r_pu"]* network.transformers["tap_ratio"] apply_transformer_t_model(network) network.shunt_impedances["v_nom"] = network.shunt_impedances["bus"].map(network.buses.v_nom) network.shunt_impedances["b_pu"] = network.shunt_impedances.b*network.shunt_impedances.v_nom**2 network.shunt_impedances["g_pu"] = network.shunt_impedances.g*network.shunt_impedances.v_nom**2
python
{ "resource": "" }
q28415
find_slack_bus
train
def find_slack_bus(sub_network): """Find the slack bus in a connected sub-network.""" gens = sub_network.generators() if len(gens) == 0: logger.warning("No generators in sub-network {}, better hope power is already balanced".format(sub_network.name)) sub_network.slack_generator = None sub_network.slack_bus = sub_network.buses_i()[0] else: slacks = gens[gens.control == "Slack"].index if len(slacks) == 0: sub_network.slack_generator = gens.index[0] sub_network.network.generators.loc[sub_network.slack_generator,"control"] = "Slack" logger.debug("No slack generator found in sub-network {}, using {} as the slack generator".format(sub_network.name, sub_network.slack_generator)) elif len(slacks) == 1: sub_network.slack_generator = slacks[0] else: sub_network.slack_generator = slacks[0] sub_network.network.generators.loc[slacks[1:],"control"] = "PV" logger.debug("More than one slack generator found in sub-network {}, using {} as the slack generator".format(sub_network.name, sub_network.slack_generator)) sub_network.slack_bus = gens.bus[sub_network.slack_generator] #also put it into the dataframe sub_network.network.sub_networks.at[sub_network.name,"slack_bus"] = sub_network.slack_bus logger.info("Slack bus for sub-network {} is {}".format(sub_network.name, sub_network.slack_bus))
python
{ "resource": "" }
q28416
find_bus_controls
train
def find_bus_controls(sub_network): """Find slack and all PV and PQ buses for a sub_network. This function also fixes sub_network.buses_o, a DataFrame ordered by control type.""" network = sub_network.network find_slack_bus(sub_network) gens = sub_network.generators() buses_i = sub_network.buses_i() #default bus control is PQ network.buses.loc[buses_i, "control"] = "PQ" #find all buses with one or more gens with PV pvs = gens[gens.control == 'PV'].index.to_series() if len(pvs) > 0: pvs = pvs.groupby(gens.bus).first() network.buses.loc[pvs.index, "control"] = "PV" network.buses.loc[pvs.index, "generator"] = pvs network.buses.loc[sub_network.slack_bus, "control"] = "Slack" network.buses.loc[sub_network.slack_bus, "generator"] = sub_network.slack_generator buses_control = network.buses.loc[buses_i, "control"] sub_network.pvs = buses_control.index[buses_control == "PV"] sub_network.pqs = buses_control.index[buses_control == "PQ"] sub_network.pvpqs = sub_network.pvs.append(sub_network.pqs) # order buses sub_network.buses_o = sub_network.pvpqs.insert(0, sub_network.slack_bus)
python
{ "resource": "" }
q28417
calculate_B_H
train
def calculate_B_H(sub_network,skip_pre=False): """Calculate B and H matrices for AC or DC sub-networks.""" network = sub_network.network if not skip_pre: calculate_dependent_values(network) find_bus_controls(sub_network) if network.sub_networks.at[sub_network.name,"carrier"] == "DC": attribute="r_pu_eff" else: attribute="x_pu_eff" #following leans heavily on pypower.makeBdc #susceptances b = 1./np.concatenate([(c.df.loc[c.ind, attribute]).values \ for c in sub_network.iterate_components(network.passive_branch_components)]) if np.isnan(b).any(): logger.warning("Warning! Some series impedances are zero - this will cause a singularity in LPF!") b_diag = csr_matrix((b, (r_[:len(b)], r_[:len(b)]))) #incidence matrix sub_network.K = sub_network.incidence_matrix(busorder=sub_network.buses_o) sub_network.H = b_diag*sub_network.K.T #weighted Laplacian sub_network.B = sub_network.K * sub_network.H sub_network.p_branch_shift = -b*np.concatenate([(c.df.loc[c.ind, "phase_shift"]).values*np.pi/180. if c.name == "Transformer" else np.zeros((len(c.ind),)) for c in sub_network.iterate_components(network.passive_branch_components)]) sub_network.p_bus_shift = sub_network.K * sub_network.p_branch_shift
python
{ "resource": "" }
q28418
find_tree
train
def find_tree(sub_network, weight='x_pu'): """Get the spanning tree of the graph, choose the node with the highest degree as a central "tree slack" and then see for each branch which paths from the slack to each node go through the branch. """ branches_bus0 = sub_network.branches()["bus0"] branches_i = branches_bus0.index buses_i = sub_network.buses_i() graph = sub_network.graph(weight=weight, inf_weight=1.) sub_network.tree = nx.minimum_spanning_tree(graph) #find bus with highest degree to use as slack tree_slack_bus, slack_degree = max(degree(sub_network.tree), key=itemgetter(1)) logger.info("Tree slack bus is %s with degree %d.", tree_slack_bus, slack_degree) #determine which buses are supplied in tree through branch from slack #matrix to store tree structure sub_network.T = dok_matrix((len(branches_i),len(buses_i))) for j,bus in enumerate(buses_i): path = nx.shortest_path(sub_network.tree,bus,tree_slack_bus) for i in range(len(path)-1): branch = next(iterkeys(graph[path[i]][path[i+1]])) branch_i = branches_i.get_loc(branch) sign = +1 if branches_bus0.iat[branch_i] == path[i] else -1 sub_network.T[branch_i,j] = sign
python
{ "resource": "" }
q28419
find_cycles
train
def find_cycles(sub_network, weight='x_pu'): """ Find all cycles in the sub_network and record them in sub_network.C. networkx collects the cycles with more than 2 edges; then the 2-edge cycles from the MultiGraph must be collected separately (for cases where there are multiple lines between the same pairs of buses). Cycles with infinite impedance are skipped. """ branches_bus0 = sub_network.branches()["bus0"] branches_i = branches_bus0.index #reduce to a non-multi-graph for cycles with > 2 edges mgraph = sub_network.graph(weight=weight, inf_weight=False) graph = nx.OrderedGraph(mgraph) cycles = nx.cycle_basis(graph) #number of 2-edge cycles num_multi = len(mgraph.edges()) - len(graph.edges()) sub_network.C = dok_matrix((len(branches_bus0),len(cycles)+num_multi)) for j,cycle in enumerate(cycles): for i in range(len(cycle)): branch = next(iterkeys(mgraph[cycle[i]][cycle[(i+1)%len(cycle)]])) branch_i = branches_i.get_loc(branch) sign = +1 if branches_bus0.iat[branch_i] == cycle[i] else -1 sub_network.C[branch_i,j] += sign #counter for multis c = len(cycles) #add multi-graph 2-edge cycles for multiple branches between same pairs of buses for u,v in graph.edges(): bs = list(mgraph[u][v].keys()) if len(bs) > 1: first = bs[0] first_i = branches_i.get_loc(first) for b in bs[1:]: b_i = branches_i.get_loc(b) sign = -1 if branches_bus0.iat[b_i] == branches_bus0.iat[first_i] else +1 sub_network.C[first_i,c] = 1 sub_network.C[b_i,c] = sign c+=1
python
{ "resource": "" }
q28420
network_lpf_contingency
train
def network_lpf_contingency(network, snapshots=None, branch_outages=None): """ Computes linear power flow for a selection of branch outages. Parameters ---------- snapshots : list-like|single snapshot A subset or an elements of network.snapshots on which to run the power flow, defaults to network.snapshots NB: currently this only works for a single snapshot branch_outages : list-like A list of passive branches which are to be tested for outages. If None, it's take as all network.passive_branches_i() Returns ------- p0 : pandas.DataFrame num_passive_branch x num_branch_outages DataFrame of new power flows """ if snapshots is None: snapshots = network.snapshots if isinstance(snapshots, collections.Iterable): logger.warning("Apologies LPF contingency, this only works for single snapshots at the moment, taking the first snapshot.") snapshot = snapshots[0] else: snapshot = snapshots network.lpf(snapshot) # Store the flows from the base case passive_branches = network.passive_branches() if branch_outages is None: branch_outages = passive_branches.index p0_base = pd.Series(index=passive_branches.index) for c in network.passive_branch_components: pnl = network.pnl(c) p0_base[c] = pnl.p0.loc[snapshot] for sn in network.sub_networks.obj: sn._branches = sn.branches() sn.calculate_BODF() p0 = pd.DataFrame(index=passive_branches.index) p0["base"] = p0_base for branch in branch_outages: if type(branch) is not tuple: logger.warning("No type given for {}, assuming it is a line".format(branch)) branch = ("Line",branch) sn = network.sub_networks.obj[passive_branches.sub_network[branch]] branch_i = sn._branches.index.get_loc(branch) p0_new = p0_base + pd.Series(sn.BODF[:,branch_i]*p0_base[branch],sn._branches.index) p0[branch] = p0_new return p0
python
{ "resource": "" }
q28421
compute_bbox_with_margins
train
def compute_bbox_with_margins(margin, x, y): 'Helper function to compute bounding box for the plot' # set margins pos = np.asarray((x, y)) minxy, maxxy = pos.min(axis=1), pos.max(axis=1) xy1 = minxy - margin*(maxxy - minxy) xy2 = maxxy + margin*(maxxy - minxy) return tuple(xy1), tuple(xy2)
python
{ "resource": "" }
q28422
projected_area_factor
train
def projected_area_factor(ax, original_crs): """ Helper function to get the area scale of the current projection in reference to the default projection. """ if not hasattr(ax, 'projection'): return 1 if isinstance(ax.projection, ccrs.PlateCarree): return 1 x1, x2, y1, y2 = ax.get_extent() pbounds = \ get_projection_from_crs(original_crs).transform_points(ax.projection, np.array([x1, x2]), np.array([y1, y2])) return np.sqrt(abs((x2 - x1) * (y2 - y1)) /abs((pbounds[0] - pbounds[1])[:2].prod()))
python
{ "resource": "" }
q28423
_export_to_exporter
train
def _export_to_exporter(network, exporter, basename, export_standard_types=False): """ Export to exporter. Both static and series attributes of components are exported, but only if they have non-default values. Parameters ---------- exporter : Exporter Initialized exporter instance basename : str Basename, used for logging export_standard_types : boolean, default False If True, then standard types are exported too (upon reimporting you should then set "ignore_standard_types" when initialising the netowrk). """ #exportable component types #what about None???? - nan is float? allowed_types = (float,int,bool) + string_types + tuple(np.typeDict.values()) #first export network properties attrs = dict((attr, getattr(network, attr)) for attr in dir(network) if (not attr.startswith("__") and isinstance(getattr(network,attr), allowed_types))) exporter.save_attributes(attrs) #now export snapshots snapshots = pd.DataFrame(dict(weightings=network.snapshot_weightings), index=pd.Index(network.snapshots, name="name")) exporter.save_snapshots(snapshots) exported_components = [] for component in network.all_components - {"SubNetwork"}: list_name = network.components[component]["list_name"] attrs = network.components[component]["attrs"] df = network.df(component) pnl = network.pnl(component) if not export_standard_types and component in network.standard_type_components: df = df.drop(network.components[component]["standard_types"].index) # first do static attributes df.index.name = "name" if df.empty: exporter.remove_static(list_name) continue col_export = [] for col in df.columns: # do not export derived attributes if col in ["sub_network", "r_pu", "x_pu", "g_pu", "b_pu"]: continue if col in attrs.index and pd.isnull(attrs.at[col, "default"]) and pd.isnull(df[col]).all(): continue if (col in attrs.index and df[col].dtype == attrs.at[col, 'dtype'] and (df[col] == attrs.at[col, "default"]).all()): continue col_export.append(col) exporter.save_static(list_name, df[col_export]) #now do varying attributes for attr in pnl: if attr not in attrs.index: col_export = pnl[attr].columns else: default = attrs.at[attr, "default"] if pd.isnull(default): col_export = pnl[attr].columns[(~pd.isnull(pnl[attr])).any()] else: col_export = pnl[attr].columns[(pnl[attr] != default).any()] if len(col_export) > 0: df = pnl[attr][col_export] exporter.save_series(list_name, attr, df) else: exporter.remove_series(list_name, attr) exported_components.append(list_name) logger.info("Exported network {} has {}".format(basename, ", ".join(exported_components)))
python
{ "resource": "" }
q28424
import_from_csv_folder
train
def import_from_csv_folder(network, csv_folder_name, encoding=None, skip_time=False): """ Import network data from CSVs in a folder. The CSVs must follow the standard form, see pypsa/examples. Parameters ---------- csv_folder_name : string Name of folder encoding : str, default None Encoding to use for UTF when reading (ex. 'utf-8'). `List of Python standard encodings <https://docs.python.org/3/library/codecs.html#standard-encodings>`_ skip_time : bool, default False Skip reading in time dependent attributes """ basename = os.path.basename(csv_folder_name) with ImporterCSV(csv_folder_name, encoding=encoding) as importer: _import_from_importer(network, importer, basename=basename, skip_time=skip_time)
python
{ "resource": "" }
q28425
export_to_csv_folder
train
def export_to_csv_folder(network, csv_folder_name, encoding=None, export_standard_types=False): """ Export network and components to a folder of CSVs. Both static and series attributes of components are exported, but only if they have non-default values. If csv_folder_name does not already exist, it is created. Parameters ---------- csv_folder_name : string Name of folder to which to export. encoding : str, default None Encoding to use for UTF when reading (ex. 'utf-8'). `List of Python standard encodings <https://docs.python.org/3/library/codecs.html#standard-encodings>`_ export_standard_types : boolean, default False If True, then standard types are exported too (upon reimporting you should then set "ignore_standard_types" when initialising the netowrk). Examples -------- >>> export_to_csv(network,csv_folder_name) OR >>> network.export_to_csv(csv_folder_name) """ basename = os.path.basename(csv_folder_name) with ExporterCSV(csv_folder_name=csv_folder_name, encoding=encoding) as exporter: _export_to_exporter(network, exporter, basename=basename, export_standard_types=export_standard_types)
python
{ "resource": "" }
q28426
import_from_hdf5
train
def import_from_hdf5(network, path, skip_time=False): """ Import network data from HDF5 store at `path`. Parameters ---------- path : string Name of HDF5 store skip_time : bool, default False Skip reading in time dependent attributes """ basename = os.path.basename(path) with ImporterHDF5(path) as importer: _import_from_importer(network, importer, basename=basename, skip_time=skip_time)
python
{ "resource": "" }
q28427
export_to_hdf5
train
def export_to_hdf5(network, path, export_standard_types=False, **kwargs): """ Export network and components to an HDF store. Both static and series attributes of components are exported, but only if they have non-default values. If path does not already exist, it is created. Parameters ---------- path : string Name of hdf5 file to which to export (if it exists, it is overwritten) **kwargs Extra arguments for pd.HDFStore to specify f.i. compression (default: complevel=4) Examples -------- >>> export_to_hdf5(network, filename) OR >>> network.export_to_hdf5(filename) """ kwargs.setdefault('complevel', 4) basename = os.path.basename(path) with ExporterHDF5(path, **kwargs) as exporter: _export_to_exporter(network, exporter, basename=basename, export_standard_types=export_standard_types)
python
{ "resource": "" }
q28428
import_from_netcdf
train
def import_from_netcdf(network, path, skip_time=False): """ Import network data from netCDF file or xarray Dataset at `path`. Parameters ---------- path : string|xr.Dataset Path to netCDF dataset or instance of xarray Dataset skip_time : bool, default False Skip reading in time dependent attributes """ assert has_xarray, "xarray must be installed for netCDF support." basename = os.path.basename(path) if isinstance(path, string_types) else None with ImporterNetCDF(path=path) as importer: _import_from_importer(network, importer, basename=basename, skip_time=skip_time)
python
{ "resource": "" }
q28429
export_to_netcdf
train
def export_to_netcdf(network, path=None, export_standard_types=False, least_significant_digit=None): """Export network and components to a netCDF file. Both static and series attributes of components are exported, but only if they have non-default values. If path does not already exist, it is created. If no path is passed, no file is exported, but the xarray.Dataset is still returned. Be aware that this cannot export boolean attributes on the Network class, e.g. network.my_bool = False is not supported by netCDF. Parameters ---------- path : string|None Name of netCDF file to which to export (if it exists, it is overwritten); if None is passed, no file is exported. least_significant_digit This is passed to the netCDF exporter, but currently makes no difference to file size or float accuracy. We're working on improving this... Returns ------- ds : xarray.Dataset Examples -------- >>> export_to_netcdf(network, "my_file.nc") OR >>> network.export_to_netcdf("my_file.nc") """ assert has_xarray, "xarray must be installed for netCDF support." basename = os.path.basename(path) if path is not None else None with ExporterNetCDF(path, least_significant_digit) as exporter: _export_to_exporter(network, exporter, basename=basename, export_standard_types=export_standard_types) return exporter.ds
python
{ "resource": "" }
q28430
_import_from_importer
train
def _import_from_importer(network, importer, basename, skip_time=False): """ Import network data from importer. Parameters ---------- skip_time : bool Skip importing time """ attrs = importer.get_attributes() current_pypsa_version = [int(s) for s in network.pypsa_version.split(".")] pypsa_version = None if attrs is not None: network.name = attrs.pop('name') try: pypsa_version = [int(s) for s in attrs.pop("pypsa_version").split(".")] except KeyError: pypsa_version = None for attr, val in iteritems(attrs): setattr(network, attr, val) ##https://docs.python.org/3/tutorial/datastructures.html#comparing-sequences-and-other-types if pypsa_version is None or pypsa_version < current_pypsa_version: logger.warning(dedent(""" Importing PyPSA from older version of PyPSA than current version {}. Please read the release notes at https://pypsa.org/doc/release_notes.html carefully to prepare your network for import. """).format(network.pypsa_version)) importer.pypsa_version = pypsa_version importer.current_pypsa_version = current_pypsa_version # if there is snapshots.csv, read in snapshot data df = importer.get_snapshots() if df is not None: network.set_snapshots(df.index) if "weightings" in df.columns: network.snapshot_weightings = df["weightings"].reindex(network.snapshots) imported_components = [] # now read in other components; make sure buses and carriers come first for component in ["Bus", "Carrier"] + sorted(network.all_components - {"Bus", "Carrier", "SubNetwork"}): list_name = network.components[component]["list_name"] df = importer.get_static(list_name) if df is None: if component == "Bus": logger.error("Error, no buses found") return else: continue import_components_from_dataframe(network, df, component) if not skip_time: for attr, df in importer.get_series(list_name): import_series_from_dataframe(network, df, component, attr) logger.debug(getattr(network,list_name)) imported_components.append(list_name) logger.info("Imported network{} has {}".format(" " + basename, ", ".join(imported_components)))
python
{ "resource": "" }
q28431
import_series_from_dataframe
train
def import_series_from_dataframe(network, dataframe, cls_name, attr): """ Import time series from a pandas DataFrame. Parameters ---------- dataframe : pandas.DataFrame cls_name : string Name of class of component attr : string Name of series attribute Examples -------- >>> import_series_from_dataframe(dataframe,"Load","p_set") """ df = network.df(cls_name) pnl = network.pnl(cls_name) list_name = network.components[cls_name]["list_name"] diff = dataframe.columns.difference(df.index) if len(diff) > 0: logger.warning("Components {} for attribute {} of {} are not in main components dataframe {}".format(diff,attr,cls_name,list_name)) attr_series = network.components[cls_name]["attrs"].loc[attr] columns = dataframe.columns diff = network.snapshots.difference(dataframe.index) if len(diff): logger.warning("Snapshots {} are missing from {} of {}. Filling with default value '{}'".format(diff,attr,cls_name,attr_series["default"])) dataframe = dataframe.reindex(network.snapshots, fill_value=attr_series["default"]) if not attr_series.static: pnl[attr] = pnl[attr].reindex(columns=df.index|columns, fill_value=attr_series.default) else: pnl[attr] = pnl[attr].reindex(columns=(pnl[attr].columns | columns)) pnl[attr].loc[network.snapshots, columns] = dataframe.loc[network.snapshots, columns]
python
{ "resource": "" }
q28432
graph
train
def graph(network, branch_components=None, weight=None, inf_weight=False): """ Build NetworkX graph. Arguments --------- network : Network|SubNetwork branch_components : [str] Components to use as branches. The default are passive_branch_components in the case of a SubNetwork and branch_components in the case of a Network. weight : str Branch attribute to use as weight inf_weight : bool|float How to treat infinite weights (default: False). True keeps the infinite weight. False skips edges with infinite weight. If a float is given it is used instead. Returns ------- graph : OrderedGraph NetworkX graph """ from . import components if isinstance(network, components.Network): if branch_components is None: branch_components = network.branch_components buses_i = network.buses.index elif isinstance(network, components.SubNetwork): if branch_components is None: branch_components = network.network.passive_branch_components buses_i = network.buses_i() else: raise TypeError("graph must be called with a Network or a SubNetwork") graph = OrderedGraph() # add nodes first, in case there are isolated buses not connected with branches graph.add_nodes_from(buses_i) # Multigraph uses the branch type and name as key def gen_edges(): for c in network.iterate_components(branch_components): for branch in c.df.loc[slice(None) if c.ind is None else c.ind].itertuples(): if weight is None: data = {} else: data = dict(weight=getattr(branch, weight, 0)) if np.isinf(data['weight']) and inf_weight is not True: if inf_weight is False: continue else: data['weight'] = inf_weight yield (branch.bus0, branch.bus1, (c.name, branch.Index), data) graph.add_edges_from(gen_edges()) return graph
python
{ "resource": "" }
q28433
l_constraint
train
def l_constraint(model,name,constraints,*args): """A replacement for pyomo's Constraint that quickly builds linear constraints. Instead of model.name = Constraint(index1,index2,...,rule=f) call instead l_constraint(model,name,constraints,index1,index2,...) where constraints is a dictionary of constraints of the form: constraints[i] = LConstraint object OR using the soon-to-be-deprecated list format: constraints[i] = [[(coeff1,var1),(coeff2,var2),...],sense,constant_term] i.e. the first argument is a list of tuples with the variables and their coefficients, the second argument is the sense string (must be one of "==","<=",">=","><") and the third argument is the constant term (a float). The sense "><" allows lower and upper bounds and requires `constant_term` to be a 2-tuple. Variables may be repeated with different coefficients, which pyomo will sum up. Parameters ---------- model : pyomo.environ.ConcreteModel name : string Name of constraints to be constructed constraints : dict A dictionary of constraints (see format above) *args : Indices of the constraints """ setattr(model,name,Constraint(*args,noruleinit=True)) v = getattr(model,name) for i in v._index: c = constraints[i] if type(c) is LConstraint: variables = c.lhs.variables + [(-item[0],item[1]) for item in c.rhs.variables] sense = c.sense constant = c.rhs.constant - c.lhs.constant else: variables = c[0] sense = c[1] constant = c[2] v._data[i] = pyomo.core.base.constraint._GeneralConstraintData(None,v) v._data[i]._body = _build_sum_expression(variables) if sense == "==": v._data[i]._equality = True v._data[i]._lower = pyomo.core.base.numvalue.NumericConstant(constant) v._data[i]._upper = pyomo.core.base.numvalue.NumericConstant(constant) elif sense == "<=": v._data[i]._equality = False v._data[i]._lower = None v._data[i]._upper = pyomo.core.base.numvalue.NumericConstant(constant) elif sense == ">=": v._data[i]._equality = False v._data[i]._lower = pyomo.core.base.numvalue.NumericConstant(constant) v._data[i]._upper = None elif sense == "><": v._data[i]._equality = False v._data[i]._lower = pyomo.core.base.numvalue.NumericConstant(constant[0]) v._data[i]._upper = pyomo.core.base.numvalue.NumericConstant(constant[1]) else: raise KeyError('`sense` must be one of "==","<=",">=","><"; got: {}'.format(sense))
python
{ "resource": "" }
q28434
l_objective
train
def l_objective(model,objective=None): """ A replacement for pyomo's Objective that quickly builds linear objectives. Instead of model.objective = Objective(expr=sum(vars[i]*coeffs[i] for i in index)+constant) call instead l_objective(model,objective) where objective is an LExpression. Variables may be repeated with different coefficients, which pyomo will sum up. Parameters ---------- model : pyomo.environ.ConcreteModel objective : LExpression """ if objective is None: objective = LExpression() #initialise with a dummy model.objective = Objective(expr = 0.) model.objective._expr = _build_sum_expression(objective.variables, constant=objective.constant)
python
{ "resource": "" }
q28435
Network._build_dataframes
train
def _build_dataframes(self): """Function called when network is created to build component pandas.DataFrames.""" for component in self.all_components: attrs = self.components[component]["attrs"] static_dtypes = attrs.loc[attrs.static, "dtype"].drop(["name"]) df = pd.DataFrame({k: pd.Series(dtype=d) for k, d in static_dtypes.iteritems()}, columns=static_dtypes.index) df.index.name = "name" setattr(self,self.components[component]["list_name"],df) pnl = Dict({k : pd.DataFrame(index=self.snapshots, columns=[], #it's currently hard to imagine non-float series, but this could be generalised dtype=np.dtype(float)) for k in attrs.index[attrs.varying]}) setattr(self,self.components[component]["list_name"]+"_t",pnl)
python
{ "resource": "" }
q28436
Network.set_snapshots
train
def set_snapshots(self,snapshots): """ Set the snapshots and reindex all time-dependent data. This will reindex all pandas.Panels of time-dependent data; NaNs are filled with the default value for that quantity. Parameters ---------- snapshots : list or pandas.Index All time steps. Returns ------- None """ self.snapshots = pd.Index(snapshots) self.snapshot_weightings = self.snapshot_weightings.reindex(self.snapshots,fill_value=1.) if isinstance(snapshots, pd.DatetimeIndex) and _pd_version < '0.18.0': snapshots = pd.Index(snapshots.values) for component in self.all_components: pnl = self.pnl(component) attrs = self.components[component]["attrs"] for k,default in attrs.default[attrs.varying].iteritems(): pnl[k] = pnl[k].reindex(self.snapshots).fillna(default)
python
{ "resource": "" }
q28437
Network.add
train
def add(self, class_name, name, **kwargs): """ Add a single component to the network. Adds it to component DataFrames. Parameters ---------- class_name : string Component class name in ["Bus","Generator","Load","StorageUnit","Store","ShuntImpedance","Line","Transformer","Link"] name : string Component name kwargs Component attributes, e.g. x=0.1, length=123 Examples -------- >>> network.add("Line", "line 12345", x=0.1) """ assert class_name in self.components, "Component class {} not found".format(class_name) cls_df = self.df(class_name) cls_pnl = self.pnl(class_name) name = str(name) assert name not in cls_df.index, "Failed to add {} component {} because there is already an object with this name in {}".format(class_name, name, self.components[class_name]["list_name"]) attrs = self.components[class_name]["attrs"] static_attrs = attrs[attrs.static].drop("name") #This guarantees that the correct attribute type is maintained obj_df = pd.DataFrame(data=[static_attrs.default],index=[name],columns=static_attrs.index) new_df = cls_df.append(obj_df, sort=False) setattr(self, self.components[class_name]["list_name"], new_df) for k,v in iteritems(kwargs): if k not in attrs.index: logger.warning("{} has no attribute {}, ignoring this passed value.".format(class_name,k)) continue typ = attrs.at[k, "typ"] if not attrs.at[k,"varying"]: new_df.at[name,k] = typ(v) elif attrs.at[k,"static"] and not isinstance(v, (pd.Series, np.ndarray, list)): new_df.at[name,k] = typ(v) else: cls_pnl[k][name] = pd.Series(data=v, index=self.snapshots, dtype=typ) for attr in ["bus","bus0","bus1"]: if attr in new_df.columns: bus_name = new_df.at[name,attr] if bus_name not in self.buses.index: logger.warning("The bus name `{}` given for {} of {} `{}` does not appear in network.buses".format(bus_name,attr,class_name,name))
python
{ "resource": "" }
q28438
Network.remove
train
def remove(self, class_name, name): """ Removes a single component from the network. Removes it from component DataFrames. Parameters ---------- class_name : string Component class name name : string Component name Examples -------- >>> network.remove("Line","my_line 12345") """ if class_name not in self.components: logger.error("Component class {} not found".format(class_name)) return None cls_df = self.df(class_name) cls_df.drop(name, inplace=True) pnl = self.pnl(class_name) for df in itervalues(pnl): if name in df: df.drop(name, axis=1, inplace=True)
python
{ "resource": "" }
q28439
Network.madd
train
def madd(self, class_name, names, suffix='', **kwargs): """ Add multiple components to the network, along with their attributes. Make sure when adding static attributes as pandas Series that they are indexed by names. Make sure when adding time-varying attributes as pandas DataFrames that their index is a superset of network.snapshots and their columns are a subset of names. Parameters ---------- class_name : string Component class name in ["Bus","Generator","Load","StorageUnit","Store","ShuntImpedance","Line","Transformer","Link"] names : list-like or pandas.Index Component names suffix : string, default '' All components are named after names with this added suffix. It is assumed that all Series and DataFrames are indexed by the original names. kwargs Component attributes, e.g. x=[0.1,0.2], can be list, pandas.Series of pandas.DataFrame for time-varying Returns -------- new_names : pandas.index Names of new components (including suffix) Examples -------- >>> network.madd("Load", ["load 1", "load 2"], bus=["1","2"], p_set=np.random.rand(len(network.snapshots),2)) """ if class_name not in self.components: logger.error("Component class {} not found".format(class_name)) return None if not isinstance(names, pd.Index): names = pd.Index(names) new_names = names.astype(str) + suffix static = {}; series = {} for k, v in iteritems(kwargs): if isinstance(v, pd.DataFrame): series[k] = v.rename(columns=lambda i: str(i)+suffix) elif isinstance(v, pd.Series): static[k] = v.rename(lambda i: str(i)+suffix) elif isinstance(v, np.ndarray) and v.shape == (len(self.snapshots), len(names)): series[k] = pd.DataFrame(v, index=self.snapshots, columns=new_names) else: static[k] = v self.import_components_from_dataframe(pd.DataFrame(static, index=new_names), class_name) for k, v in iteritems(series): self.import_series_from_dataframe(v, class_name, k) return new_names
python
{ "resource": "" }
q28440
Network.mremove
train
def mremove(self, class_name, names): """ Removes multiple components from the network. Removes them from component DataFrames. Parameters ---------- class_name : string Component class name name : list-like Component names Examples -------- >>> network.mremove("Line", ["line x", "line y"]) """ if class_name not in self.components: logger.error("Component class {} not found".format(class_name)) return None if not isinstance(names, pd.Index): names = pd.Index(names) cls_df = self.df(class_name) cls_df.drop(names, inplace=True) pnl = self.pnl(class_name) for df in itervalues(pnl): df.drop(df.columns.intersection(names), axis=1, inplace=True)
python
{ "resource": "" }
q28441
Network.copy
train
def copy(self, with_time=True, ignore_standard_types=False): """ Returns a deep copy of the Network object with all components and time-dependent data. Returns -------- network : pypsa.Network Parameters ---------- with_time : boolean, default True Copy snapshots and time-varying network.component_names_t data too. ignore_standard_types : boolean, default False Ignore the PyPSA standard types. Examples -------- >>> network_copy = network.copy() """ override_components, override_component_attrs = self._retrieve_overridden_components() network = self.__class__(ignore_standard_types=ignore_standard_types, override_components=override_components, override_component_attrs=override_component_attrs) for component in self.iterate_components(["Bus", "Carrier"] + sorted(self.all_components - {"Bus","Carrier"})): df = component.df #drop the standard types to avoid them being read in twice if not ignore_standard_types and component.name in self.standard_type_components: df = component.df.drop(network.components[component.name]["standard_types"].index) import_components_from_dataframe(network, df, component.name) if with_time: network.set_snapshots(self.snapshots) for component in self.iterate_components(): pnl = getattr(network, component.list_name+"_t") for k in iterkeys(component.pnl): pnl[k] = component.pnl[k].copy() #catch all remaining attributes of network for attr in ["name", "srid"]: setattr(network,attr,getattr(self,attr)) network.snapshot_weightings = self.snapshot_weightings.copy() return network
python
{ "resource": "" }
q28442
Network.determine_network_topology
train
def determine_network_topology(self): """ Build sub_networks from topology. """ adjacency_matrix = self.adjacency_matrix(self.passive_branch_components) n_components, labels = csgraph.connected_components(adjacency_matrix, directed=False) # remove all old sub_networks for sub_network in self.sub_networks.index: obj = self.sub_networks.at[sub_network,"obj"] self.remove("SubNetwork", sub_network) del obj for i in np.arange(n_components): # index of first bus buses_i = (labels == i).nonzero()[0] carrier = self.buses.carrier.iat[buses_i[0]] if carrier not in ["AC","DC"] and len(buses_i) > 1: logger.warning("Warning, sub network {} is not electric but contains multiple buses\n" "and branches. Passive flows are not allowed for non-electric networks!".format(i)) if (self.buses.carrier.iloc[buses_i] != carrier).any(): logger.warning("Warning, sub network {} contains buses with mixed carriers! Value counts:\n{}".format(i), self.buses.carrier.iloc[buses_i].value_counts()) self.add("SubNetwork", i, carrier=carrier) #add objects self.sub_networks["obj"] = [SubNetwork(self, name) for name in self.sub_networks.index] self.buses.loc[:, "sub_network"] = labels.astype(str) for c in self.iterate_components(self.passive_branch_components): c.df["sub_network"] = c.df.bus0.map(self.buses["sub_network"])
python
{ "resource": "" }
q28443
logger_init
train
def logger_init(): """Initialize logger instance.""" log = logging.getLogger("pyinotify") console_handler = logging.StreamHandler() console_handler.setFormatter( logging.Formatter("[%(asctime)s %(name)s %(levelname)s] %(message)s")) log.addHandler(console_handler) log.setLevel(20) return log
python
{ "resource": "" }
q28444
INotifyWrapper.create
train
def create(): """ Factory method instanciating and returning the right wrapper. """ # First, try to use ctypes. if ctypes: inotify = _CtypesLibcINotifyWrapper() if inotify.init(): return inotify # Second, see if C extension is compiled. if inotify_syscalls: inotify = _INotifySyscallsWrapper() if inotify.init(): return inotify
python
{ "resource": "" }
q28445
Stats.my_init
train
def my_init(self): """ Method automatically called from base class constructor. """ self._start_time = time.time() self._stats = {} self._stats_lock = threading.Lock()
python
{ "resource": "" }
q28446
Stats.process_default
train
def process_default(self, event): """ Processes |event|. """ self._stats_lock.acquire() try: events = event.maskname.split('|') for event_name in events: count = self._stats.get(event_name, 0) self._stats[event_name] = count + 1 finally: self._stats_lock.release()
python
{ "resource": "" }
q28447
ThreadedNotifier.stop
train
def stop(self): """ Stop notifier's loop. Stop notification. Join the thread. """ self._stop_event.set() os.write(self._pipe[1], b'stop') threading.Thread.join(self) Notifier.stop(self) self._pollobj.unregister(self._pipe[0]) os.close(self._pipe[0]) os.close(self._pipe[1])
python
{ "resource": "" }
q28448
TornadoAsyncNotifier.handle_read
train
def handle_read(self, *args, **kwargs): """ See comment in AsyncNotifier. """ self.read_events() self.process_events() if self.handle_read_callback is not None: self.handle_read_callback(self)
python
{ "resource": "" }
q28449
WatchManager.del_watch
train
def del_watch(self, wd): """ Remove watch entry associated to watch descriptor wd. @param wd: Watch descriptor. @type wd: int """ try: del self._wmd[wd] except KeyError as err: log.error('Cannot delete unknown watch descriptor %s' % str(err))
python
{ "resource": "" }
q28450
WatchManager.__add_watch
train
def __add_watch(self, path, mask, proc_fun, auto_add, exclude_filter): """ Add a watch on path, build a Watch object and insert it in the watch manager dictionary. Return the wd value. """ path = self.__format_path(path) if auto_add and not mask & IN_CREATE: mask |= IN_CREATE wd = self._inotify_wrapper.inotify_add_watch(self._fd, path, mask) if wd < 0: return wd watch = Watch(wd=wd, path=path, mask=mask, proc_fun=proc_fun, auto_add=auto_add, exclude_filter=exclude_filter) # wd are _always_ indexed with their original unicode paths in wmd. self._wmd[wd] = watch log.debug('New %s', watch) return wd
python
{ "resource": "" }
q28451
WatchManager.update_watch
train
def update_watch(self, wd, mask=None, proc_fun=None, rec=False, auto_add=False, quiet=True): """ Update existing watch descriptors |wd|. The |mask| value, the processing object |proc_fun|, the recursive param |rec| and the |auto_add| and |quiet| flags can all be updated. @param wd: Watch Descriptor to update. Also accepts a list of watch descriptors. @type wd: int or list of int @param mask: Optional new bitmask of events. @type mask: int @param proc_fun: Optional new processing function. @type proc_fun: function or ProcessEvent instance or instance of one of its subclasses or callable object. @param rec: Optionally adds watches recursively on all subdirectories contained into |wd| directory. @type rec: bool @param auto_add: Automatically adds watches on newly created directories in the watch's path corresponding to |wd|. If |auto_add| is True, IN_CREATE is ored with |mask| when the watch is updated. @type auto_add: bool @param quiet: If False raises a WatchManagerError exception on error. See example not_quiet.py @type quiet: bool @return: dict of watch descriptors associated to booleans values. True if the corresponding wd has been successfully updated, False otherwise. @rtype: dict of {int: bool} """ lwd = self.__format_param(wd) if rec: lwd = self.__get_sub_rec(lwd) ret_ = {} # return {wd: bool, ...} for awd in lwd: apath = self.get_path(awd) if not apath or awd < 0: err = 'update_watch: invalid WD=%d' % awd if quiet: log.error(err) continue raise WatchManagerError(err, ret_) if mask: wd_ = self._inotify_wrapper.inotify_add_watch(self._fd, apath, mask) if wd_ < 0: ret_[awd] = False err = ('update_watch: cannot update %s WD=%d, %s' % \ (apath, wd_, self._inotify_wrapper.str_errno())) if quiet: log.error(err) continue raise WatchManagerError(err, ret_) assert(awd == wd_) if proc_fun or auto_add: watch_ = self._wmd[awd] if proc_fun: watch_.proc_fun = proc_fun if auto_add: watch_.auto_add = auto_add ret_[awd] = True log.debug('Updated watch - %s', self._wmd[awd]) return ret_
python
{ "resource": "" }
q28452
WatchManager.get_path
train
def get_path(self, wd): """ Returns the path associated to WD, if WD is unknown it returns None. @param wd: Watch descriptor. @type wd: int @return: Path or None. @rtype: string or None """ watch_ = self._wmd.get(wd) if watch_ is not None: return watch_.path
python
{ "resource": "" }
q28453
WatchManager.__walk_rec
train
def __walk_rec(self, top, rec): """ Yields each subdirectories of top, doesn't follow symlinks. If rec is false, only yield top. @param top: root directory. @type top: string @param rec: recursive flag. @type rec: bool @return: path of one subdirectory. @rtype: string """ if not rec or os.path.islink(top) or not os.path.isdir(top): yield top else: for root, dirs, files in os.walk(top): yield root
python
{ "resource": "" }
q28454
_SysProcessEvent.process_IN_CREATE
train
def process_IN_CREATE(self, raw_event): """ If the event affects a directory and the auto_add flag of the targetted watch is set to True, a new watch is added on this new directory, with the same attribute values than those of this watch. """ if raw_event.mask & IN_ISDIR: watch_ = self._watch_manager.get_watch(raw_event.wd) created_dir = os.path.join(watch_.path, raw_event.name) if watch_.auto_add and not watch_.exclude_filter(created_dir): addw = self._watch_manager.add_watch # The newly monitored directory inherits attributes from its # parent directory. addw_ret = addw(created_dir, watch_.mask, proc_fun=watch_.proc_fun, rec=False, auto_add=watch_.auto_add, exclude_filter=watch_.exclude_filter) # Trick to handle mkdir -p /d1/d2/t3 where d1 is watched and # d2 and t3 (directory or file) are created. # Since the directory d2 is new, then everything inside it must # also be new. created_dir_wd = addw_ret.get(created_dir) if ((created_dir_wd is not None) and (created_dir_wd > 0) and os.path.isdir(created_dir)): try: for name in os.listdir(created_dir): inner = os.path.join(created_dir, name) if self._watch_manager.get_wd(inner) is not None: continue # Generate (simulate) creation events for sub- # directories and files. if os.path.isfile(inner): # symlinks are handled as files. flags = IN_CREATE elif os.path.isdir(inner): flags = IN_CREATE | IN_ISDIR else: # This path should not be taken. continue rawevent = _RawEvent(created_dir_wd, flags, 0, name) self._notifier.append_event(rawevent) except OSError, err: msg = "process_IN_CREATE, invalid directory %s: %s" log.debug(msg % (created_dir, str(err))) return self.process_default(raw_event)
python
{ "resource": "" }
q28455
Notifier.check_events
train
def check_events(self, timeout=None): """ Check for new events available to read, blocks up to timeout milliseconds. @param timeout: If specified it overrides the corresponding instance attribute _timeout. timeout must be sepcified in milliseconds. @type timeout: int @return: New events to read. @rtype: bool """ while True: try: # blocks up to 'timeout' milliseconds if timeout is None: timeout = self._timeout ret = self._pollobj.poll(timeout) except select.error, err: if err[0] == errno.EINTR: continue # interrupted, retry else: raise else: break if not ret or (self._pipe[0] == ret[0][0]): return False # only one fd is polled return ret[0][1] & select.POLLIN
python
{ "resource": "" }
q28456
Notifier.read_events
train
def read_events(self): """ Read events from device, build _RawEvents, and enqueue them. """ buf_ = array.array('i', [0]) # get event queue size if fcntl.ioctl(self._fd, termios.FIONREAD, buf_, 1) == -1: return queue_size = buf_[0] if queue_size < self._threshold: log.debug('(fd: %d) %d bytes available to read but threshold is ' 'fixed to %d bytes', self._fd, queue_size, self._threshold) return try: # Read content from file r = os.read(self._fd, queue_size) except Exception, msg: raise NotifierError(msg) log.debug('Event queue size: %d', queue_size) rsum = 0 # counter while rsum < queue_size: s_size = 16 # Retrieve wd, mask, cookie and fname_len wd, mask, cookie, fname_len = struct.unpack('iIII', r[rsum:rsum+s_size]) # Retrieve name fname, = struct.unpack('%ds' % fname_len, r[rsum + s_size:rsum + s_size + fname_len]) rawevent = _RawEvent(wd, mask, cookie, fname) if self._coalesce: # Only enqueue new (unique) events. raweventstr = str(rawevent) if raweventstr not in self._eventset: self._eventset.add(raweventstr) self._eventq.append(rawevent) else: self._eventq.append(rawevent) rsum += s_size + fname_len
python
{ "resource": "" }
q28457
ensure_conf
train
def ensure_conf(app): """ Ensure for the given app the the redbeat_conf attribute is set to an instance of the RedBeatConfig class. """ name = 'redbeat_conf' app = app_or_default(app) try: config = getattr(app, name) except AttributeError: config = RedBeatConfig(app) setattr(app, name, config) return config
python
{ "resource": "" }
q28458
http2time
train
def http2time(text): """Returns time in seconds since epoch of time represented by a string. Return value is an integer. None is returned if the format of str is unrecognized, the time is outside the representable range, or the timezone string is not recognized. If the string contains no timezone, UTC is assumed. The timezone in the string may be numerical (like "-0800" or "+0100") or a string timezone (like "UTC", "GMT", "BST" or "EST"). Currently, only the timezone strings equivalent to UTC (zero offset) are known to the function. The function loosely parses the following formats: Wed, 09 Feb 1994 22:23:32 GMT -- HTTP format Tuesday, 08-Feb-94 14:15:29 GMT -- old rfc850 HTTP format Tuesday, 08-Feb-1994 14:15:29 GMT -- broken rfc850 HTTP format 09 Feb 1994 22:23:32 GMT -- HTTP format (no weekday) 08-Feb-94 14:15:29 GMT -- rfc850 format (no weekday) 08-Feb-1994 14:15:29 GMT -- broken rfc850 format (no weekday) The parser ignores leading and trailing whitespace. The time may be absent. If the year is given with only 2 digits, the function will select the century that makes the year closest to the current date. """ # fast exit for strictly conforming string m = STRICT_DATE_RE.search(text) if m: g = m.groups() mon = MONTHS_LOWER.index(g[1].lower()) + 1 tt = (int(g[2]), mon, int(g[0]), int(g[3]), int(g[4]), float(g[5])) return _timegm(tt) # No, we need some messy parsing... # clean up text = text.lstrip() text = WEEKDAY_RE.sub("", text, 1) # Useless weekday # tz is time zone specifier string day, mon, yr, hr, min, sec, tz = [None]*7 # loose regexp parse m = LOOSE_HTTP_DATE_RE.search(text) if m is not None: day, mon, yr, hr, min, sec, tz = m.groups() else: return None # bad format return _str2time(day, mon, yr, hr, min, sec, tz)
python
{ "resource": "" }
q28459
unmatched
train
def unmatched(match): """Return unmatched part of re.Match object.""" start, end = match.span(0) return match.string[:start]+match.string[end:]
python
{ "resource": "" }
q28460
split_header_words
train
def split_header_words(header_values): r"""Parse header values into a list of lists containing key,value pairs. The function knows how to deal with ",", ";" and "=" as well as quoted values after "=". A list of space separated tokens are parsed as if they were separated by ";". If the header_values passed as argument contains multiple values, then they are treated as if they were a single value separated by comma ",". This means that this function is useful for parsing header fields that follow this syntax (BNF as from the HTTP/1.1 specification, but we relax the requirement for tokens). headers = #header header = (token | parameter) *( [";"] (token | parameter)) token = 1*<any CHAR except CTLs or separators> separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT quoted-string = ( <"> *(qdtext | quoted-pair ) <"> ) qdtext = <any TEXT except <">> quoted-pair = "\" CHAR parameter = attribute "=" value attribute = token value = token | quoted-string Each header is represented by a list of key/value pairs. The value for a simple token (not part of a parameter) is None. Syntactically incorrect headers will not necessarily be parsed as you would want. This is easier to describe with some examples: >>> split_header_words(['foo="bar"; port="80,81"; discard, bar=baz']) [[('foo', 'bar'), ('port', '80,81'), ('discard', None)], [('bar', 'baz')]] >>> split_header_words(['text/html; charset="iso-8859-1"']) [[('text/html', None), ('charset', 'iso-8859-1')]] >>> split_header_words([r'Basic realm="\"foo\bar\""']) [[('Basic', None), ('realm', '"foobar"')]] """ assert not isinstance(header_values, str) result = [] for text in header_values: orig_text = text pairs = [] while text: m = HEADER_TOKEN_RE.search(text) if m: text = unmatched(m) name = m.group(1) m = HEADER_QUOTED_VALUE_RE.search(text) if m: # quoted value text = unmatched(m) value = m.group(1) value = HEADER_ESCAPE_RE.sub(r"\1", value) else: m = HEADER_VALUE_RE.search(text) if m: # unquoted value text = unmatched(m) value = m.group(1) value = value.rstrip() else: # no value, a lone token value = None pairs.append((name, value)) elif text.lstrip().startswith(","): # concatenated headers, as per RFC 2616 section 4.2 text = text.lstrip()[1:] if pairs: result.append(pairs) pairs = [] else: # skip junk non_junk, nr_junk_chars = re.subn("^[=\s;]*", "", text) assert nr_junk_chars > 0, ( "split_header_words bug: '%s', '%s', %s" % (orig_text, text, pairs)) text = non_junk if pairs: result.append(pairs) return result
python
{ "resource": "" }
q28461
parse_ns_headers
train
def parse_ns_headers(ns_headers): """Ad-hoc parser for Netscape protocol cookie-attributes. The old Netscape cookie format for Set-Cookie can for instance contain an unquoted "," in the expires field, so we have to use this ad-hoc parser instead of split_header_words. XXX This may not make the best possible effort to parse all the crap that Netscape Cookie headers contain. Ronald Tschalar's HTTPClient parser is probably better, so could do worse than following that if this ever gives any trouble. Currently, this is also used for parsing RFC 2109 cookies. """ known_attrs = ("expires", "domain", "path", "secure", # RFC 2109 attrs (may turn up in Netscape cookies, too) "version", "port", "max-age") result = [] for ns_header in ns_headers: pairs = [] version_set = False for ii, param in enumerate(re.split(r";\s*", ns_header)): param = param.rstrip() if param == "": continue if "=" not in param: k, v = param, None else: k, v = re.split(r"\s*=\s*", param, 1) k = k.lstrip() if ii != 0: lc = k.lower() if lc in known_attrs: k = lc if k == "version": # This is an RFC 2109 cookie. v = strip_quotes(v) version_set = True if k == "expires": # convert expires date to seconds since epoch v = http2time(strip_quotes(v)) # None if invalid pairs.append((k, v)) if pairs: if not version_set: pairs.append(("version", "0")) result.append(pairs) return result
python
{ "resource": "" }
q28462
is_HDN
train
def is_HDN(text): """Return True if text is a host domain name.""" # XXX # This may well be wrong. Which RFC is HDN defined in, if any (for # the purposes of RFC 2965)? # For the current implementation, what about IPv6? Remember to look # at other uses of IPV4_RE also, if change this. if IPV4_RE.search(text): return False if text == "": return False if text[0] == "." or text[-1] == ".": return False return True
python
{ "resource": "" }
q28463
domain_match
train
def domain_match(A, B): """Return True if domain A domain-matches domain B, according to RFC 2965. A and B may be host domain names or IP addresses. RFC 2965, section 1: Host names can be specified either as an IP address or a HDN string. Sometimes we compare one host name with another. (Such comparisons SHALL be case-insensitive.) Host A's name domain-matches host B's if * their host name strings string-compare equal; or * A is a HDN string and has the form NB, where N is a non-empty name string, B has the form .B', and B' is a HDN string. (So, x.y.com domain-matches .Y.com but not Y.com.) Note that domain-match is not a commutative operation: a.b.c.com domain-matches .c.com, but not the reverse. """ # Note that, if A or B are IP addresses, the only relevant part of the # definition of the domain-match algorithm is the direct string-compare. A = A.lower() B = B.lower() if A == B: return True if not is_HDN(A): return False i = A.rfind(B) if i == -1 or i == 0: # A does not have form NB, or N is the empty string return False if not B.startswith("."): return False if not is_HDN(B[1:]): return False return True
python
{ "resource": "" }
q28464
request_path
train
def request_path(request): """Path component of request-URI, as defined by RFC 2965.""" url = request.get_full_url() parts = urlsplit(url) path = escape_path(parts.path) if not path.startswith("/"): # fix bad RFC 2396 absoluteURI path = "/" + path return path
python
{ "resource": "" }
q28465
escape_path
train
def escape_path(path): """Escape any invalid characters in HTTP URL, and uppercase all escapes.""" # There's no knowing what character encoding was used to create URLs # containing %-escapes, but since we have to pick one to escape invalid # path characters, we pick UTF-8, as recommended in the HTML 4.0 # specification: # http://www.w3.org/TR/REC-html40/appendix/notes.html#h-B.2.1 # And here, kind of: draft-fielding-uri-rfc2396bis-03 # (And in draft IRI specification: draft-duerst-iri-05) # (And here, for new URI schemes: RFC 2718) path = quote(path, HTTP_PATH_SAFE) path = ESCAPED_CHAR_RE.sub(uppercase_escaped_char, path) return path
python
{ "resource": "" }
q28466
reach
train
def reach(h): """Return reach of host h, as defined by RFC 2965, section 1. The reach R of a host name H is defined as follows: * If - H is the host domain name of a host; and, - H has the form A.B; and - A has no embedded (that is, interior) dots; and - B has at least one embedded dot, or B is the string "local". then the reach of H is .B. * Otherwise, the reach of H is H. >>> reach("www.acme.com") '.acme.com' >>> reach("acme.com") 'acme.com' >>> reach("acme.local") '.local' """ i = h.find(".") if i >= 0: #a = h[:i] # this line is only here to show what a is b = h[i+1:] i = b.find(".") if is_HDN(h) and (i >= 0 or b == "local"): return "."+b return h
python
{ "resource": "" }
q28467
deepvalues
train
def deepvalues(mapping): """Iterates over nested mapping, depth-first, in sorted order by key.""" values = vals_sorted_by_key(mapping) for obj in values: mapping = False try: obj.items except AttributeError: pass else: mapping = True for subobj in deepvalues(obj): yield subobj if not mapping: yield obj
python
{ "resource": "" }
q28468
lwp_cookie_str
train
def lwp_cookie_str(cookie): """Return string representation of Cookie in an the LWP cookie file format. Actually, the format is extended a bit -- see module docstring. """ h = [(cookie.name, cookie.value), ("path", cookie.path), ("domain", cookie.domain)] if cookie.port is not None: h.append(("port", cookie.port)) if cookie.path_specified: h.append(("path_spec", None)) if cookie.port_specified: h.append(("port_spec", None)) if cookie.domain_initial_dot: h.append(("domain_dot", None)) if cookie.secure: h.append(("secure", None)) if cookie.expires: h.append(("expires", time2isoz(float(cookie.expires)))) if cookie.discard: h.append(("discard", None)) if cookie.comment: h.append(("comment", cookie.comment)) if cookie.comment_url: h.append(("commenturl", cookie.comment_url)) keys = sorted(cookie._rest.keys()) for k in keys: h.append((k, str(cookie._rest[k]))) h.append(("version", str(cookie.version))) return join_header_words([h])
python
{ "resource": "" }
q28469
DefaultCookiePolicy.set_allowed_domains
train
def set_allowed_domains(self, allowed_domains): """Set the sequence of allowed domains, or None.""" if allowed_domains is not None: allowed_domains = tuple(allowed_domains) self._allowed_domains = allowed_domains
python
{ "resource": "" }
q28470
CookieJar._cookies_for_request
train
def _cookies_for_request(self, request): """Return a list of cookies to be returned to server.""" cookies = [] for domain in self._cookies.keys(): cookies.extend(self._cookies_for_domain(domain, request)) return cookies
python
{ "resource": "" }
q28471
CookieJar._cookie_attrs
train
def _cookie_attrs(self, cookies): """Return a list of cookie-attributes to be returned to server. like ['foo="bar"; $Path="/"', ...] The $Version attribute is also added when appropriate (currently only once per request). """ # add cookies in order of most specific (ie. longest) path first cookies.sort(key=lambda a: len(a.path), reverse=True) version_set = False attrs = [] for cookie in cookies: # set version of Cookie header # XXX # What should it be if multiple matching Set-Cookie headers have # different versions themselves? # Answer: there is no answer; was supposed to be settled by # RFC 2965 errata, but that may never appear... version = cookie.version if not version_set: version_set = True if version > 0: attrs.append("$Version=%s" % version) # quote cookie value if necessary # (not for Netscape protocol, which already has any quotes # intact, due to the poorly-specified Netscape Cookie: syntax) if ((cookie.value is not None) and self.non_word_re.search(cookie.value) and version > 0): value = self.quote_re.sub(r"\\\1", cookie.value) else: value = cookie.value # add cookie-attributes to be returned in Cookie header if cookie.value is None: attrs.append(cookie.name) else: attrs.append("%s=%s" % (cookie.name, value)) if version > 0: if cookie.path_specified: attrs.append('$Path="%s"' % cookie.path) if cookie.domain.startswith("."): domain = cookie.domain if (not cookie.domain_initial_dot and domain.startswith(".")): domain = domain[1:] attrs.append('$Domain="%s"' % domain) if cookie.port is not None: p = "$Port" if cookie.port_specified: p = p + ('="%s"' % cookie.port) attrs.append(p) return attrs
python
{ "resource": "" }
q28472
CookieJar._normalized_cookie_tuples
train
def _normalized_cookie_tuples(self, attrs_set): """Return list of tuples containing normalised cookie information. attrs_set is the list of lists of key,value pairs extracted from the Set-Cookie or Set-Cookie2 headers. Tuples are name, value, standard, rest, where name and value are the cookie name and value, standard is a dictionary containing the standard cookie-attributes (discard, secure, version, expires or max-age, domain, path and port) and rest is a dictionary containing the rest of the cookie-attributes. """ cookie_tuples = [] boolean_attrs = "discard", "secure" value_attrs = ("version", "expires", "max-age", "domain", "path", "port", "comment", "commenturl") for cookie_attrs in attrs_set: name, value = cookie_attrs[0] # Build dictionary of standard cookie-attributes (standard) and # dictionary of other cookie-attributes (rest). # Note: expiry time is normalised to seconds since epoch. V0 # cookies should have the Expires cookie-attribute, and V1 cookies # should have Max-Age, but since V1 includes RFC 2109 cookies (and # since V0 cookies may be a mish-mash of Netscape and RFC 2109), we # accept either (but prefer Max-Age). max_age_set = False bad_cookie = False standard = {} rest = {} for k, v in cookie_attrs[1:]: lc = k.lower() # don't lose case distinction for unknown fields if lc in value_attrs or lc in boolean_attrs: k = lc if k in boolean_attrs and v is None: # boolean cookie-attribute is present, but has no value # (like "discard", rather than "port=80") v = True if k in standard: # only first value is significant continue if k == "domain": if v is None: _debug(" missing value for domain attribute") bad_cookie = True break # RFC 2965 section 3.3.3 v = v.lower() if k == "expires": if max_age_set: # Prefer max-age to expires (like Mozilla) continue if v is None: _debug(" missing or invalid value for expires " "attribute: treating as session cookie") continue if k == "max-age": max_age_set = True try: v = int(v) except ValueError: _debug(" missing or invalid (non-numeric) value for " "max-age attribute") bad_cookie = True break # convert RFC 2965 Max-Age to seconds since epoch # XXX Strictly you're supposed to follow RFC 2616 # age-calculation rules. Remember that zero Max-Age is a # is a request to discard (old and new) cookie, though. k = "expires" v = self._now + v if (k in value_attrs) or (k in boolean_attrs): if (v is None and k not in ("port", "comment", "commenturl")): _debug(" missing value for %s attribute" % k) bad_cookie = True break standard[k] = v else: rest[k] = v if bad_cookie: continue cookie_tuples.append((name, value, standard, rest)) return cookie_tuples
python
{ "resource": "" }
q28473
CookieJar.make_cookies
train
def make_cookies(self, response, request): """Return sequence of Cookie objects extracted from response object.""" # get cookie-attributes for RFC 2965 and Netscape protocols headers = response.info() rfc2965_hdrs = headers.get_all("Set-Cookie2", []) ns_hdrs = headers.get_all("Set-Cookie", []) rfc2965 = self._policy.rfc2965 netscape = self._policy.netscape if ((not rfc2965_hdrs and not ns_hdrs) or (not ns_hdrs and not rfc2965) or (not rfc2965_hdrs and not netscape) or (not netscape and not rfc2965)): return [] # no relevant cookie headers: quick exit try: cookies = self._cookies_from_attrs_set( split_header_words(rfc2965_hdrs), request) except Exception: _warn_unhandled_exception() cookies = [] if ns_hdrs and netscape: try: # RFC 2109 and Netscape cookies ns_cookies = self._cookies_from_attrs_set( parse_ns_headers(ns_hdrs), request) except Exception: _warn_unhandled_exception() ns_cookies = [] self._process_rfc2109_cookies(ns_cookies) # Look for Netscape cookies (from Set-Cookie headers) that match # corresponding RFC 2965 cookies (from Set-Cookie2 headers). # For each match, keep the RFC 2965 cookie and ignore the Netscape # cookie (RFC 2965 section 9.1). Actually, RFC 2109 cookies are # bundled in with the Netscape cookies for this purpose, which is # reasonable behaviour. if rfc2965: lookup = {} for cookie in cookies: lookup[(cookie.domain, cookie.path, cookie.name)] = None def no_matching_rfc2965(ns_cookie, lookup=lookup): key = ns_cookie.domain, ns_cookie.path, ns_cookie.name return key not in lookup ns_cookies = filter(no_matching_rfc2965, ns_cookies) if ns_cookies: cookies.extend(ns_cookies) return cookies
python
{ "resource": "" }
q28474
CookieJar.set_cookie_if_ok
train
def set_cookie_if_ok(self, cookie, request): """Set a cookie if policy says it's OK to do so.""" self._cookies_lock.acquire() try: self._policy._now = self._now = int(time.time()) if self._policy.set_ok(cookie, request): self.set_cookie(cookie) finally: self._cookies_lock.release()
python
{ "resource": "" }
q28475
CookieJar.set_cookie
train
def set_cookie(self, cookie): """Set a cookie, without checking whether or not it should be set.""" c = self._cookies self._cookies_lock.acquire() try: if cookie.domain not in c: c[cookie.domain] = {} c2 = c[cookie.domain] if cookie.path not in c2: c2[cookie.path] = {} c3 = c2[cookie.path] c3[cookie.name] = cookie finally: self._cookies_lock.release()
python
{ "resource": "" }
q28476
CookieJar.extract_cookies
train
def extract_cookies(self, response, request): """Extract cookies from response, where allowable given the request.""" _debug("extract_cookies: %s", response.info()) self._cookies_lock.acquire() try: self._policy._now = self._now = int(time.time()) for cookie in self.make_cookies(response, request): if self._policy.set_ok(cookie, request): _debug(" setting cookie: %s", cookie) self.set_cookie(cookie) finally: self._cookies_lock.release()
python
{ "resource": "" }
q28477
CookieJar.clear
train
def clear(self, domain=None, path=None, name=None): """Clear some cookies. Invoking this method without arguments will clear all cookies. If given a single argument, only cookies belonging to that domain will be removed. If given two arguments, cookies belonging to the specified path within that domain are removed. If given three arguments, then the cookie with the specified name, path and domain is removed. Raises KeyError if no matching cookie exists. """ if name is not None: if (domain is None) or (path is None): raise ValueError( "domain and path must be given to remove a cookie by name") del self._cookies[domain][path][name] elif path is not None: if domain is None: raise ValueError( "domain must be given to remove cookies by path") del self._cookies[domain][path] elif domain is not None: del self._cookies[domain] else: self._cookies = {}
python
{ "resource": "" }
q28478
CookieJar.clear_session_cookies
train
def clear_session_cookies(self): """Discard all session cookies. Note that the .save() method won't save session cookies anyway, unless you ask otherwise by passing a true ignore_discard argument. """ self._cookies_lock.acquire() try: for cookie in self: if cookie.discard: self.clear(cookie.domain, cookie.path, cookie.name) finally: self._cookies_lock.release()
python
{ "resource": "" }
q28479
CookieJar.clear_expired_cookies
train
def clear_expired_cookies(self): """Discard all expired cookies. You probably don't need to call this method: expired cookies are never sent back to the server (provided you're using DefaultCookiePolicy), this method is called by CookieJar itself every so often, and the .save() method won't save expired cookies anyway (unless you ask otherwise by passing a true ignore_expires argument). """ self._cookies_lock.acquire() try: now = time.time() for cookie in self: if cookie.is_expired(now): self.clear(cookie.domain, cookie.path, cookie.name) finally: self._cookies_lock.release()
python
{ "resource": "" }
q28480
FileCookieJar.load
train
def load(self, filename=None, ignore_discard=False, ignore_expires=False): """Load cookies from a file.""" if filename is None: if self.filename is not None: filename = self.filename else: raise ValueError(MISSING_FILENAME_TEXT) f = open(filename) try: self._really_load(f, filename, ignore_discard, ignore_expires) finally: f.close()
python
{ "resource": "" }
q28481
FileCookieJar.revert
train
def revert(self, filename=None, ignore_discard=False, ignore_expires=False): """Clear all cookies and reload cookies from a saved file. Raises LoadError (or IOError) if reversion is not successful; the object's state will not be altered if this happens. """ if filename is None: if self.filename is not None: filename = self.filename else: raise ValueError(MISSING_FILENAME_TEXT) self._cookies_lock.acquire() try: old_state = copy.deepcopy(self._cookies) self._cookies = {} try: self.load(filename, ignore_discard, ignore_expires) except (LoadError, IOError): self._cookies = old_state raise finally: self._cookies_lock.release()
python
{ "resource": "" }
q28482
LWPCookieJar.as_lwp_str
train
def as_lwp_str(self, ignore_discard=True, ignore_expires=True): """Return cookies as a string of "\\n"-separated "Set-Cookie3" headers. ignore_discard and ignore_expires: see docstring for FileCookieJar.save """ now = time.time() r = [] for cookie in self: if not ignore_discard and cookie.discard: continue if not ignore_expires and cookie.is_expired(now): continue r.append("Set-Cookie3: %s" % lwp_cookie_str(cookie)) return "\n".join(r+[""])
python
{ "resource": "" }
q28483
Address.addr_spec
train
def addr_spec(self): """The addr_spec (username@domain) portion of the address, quoted according to RFC 5322 rules, but with no Content Transfer Encoding. """ nameset = set(self.username) if len(nameset) > len(nameset-parser.DOT_ATOM_ENDS): lp = parser.quote_string(self.username) else: lp = self.username if self.domain: return lp + '@' + self.domain if not lp: return '<>' return lp
python
{ "resource": "" }
q28484
BaseHeader.fold
train
def fold(self, **_3to2kwargs): policy = _3to2kwargs['policy']; del _3to2kwargs['policy'] """Fold header according to policy. The parsed representation of the header is folded according to RFC5322 rules, as modified by the policy. If the parse tree contains surrogateescaped bytes, the bytes are CTE encoded using the charset 'unknown-8bit". Any non-ASCII characters in the parse tree are CTE encoded using charset utf-8. XXX: make this a policy setting. The returned value is an ASCII-only string possibly containing linesep characters, and ending with a linesep character. The string includes the header name and the ': ' separator. """ # At some point we need to only put fws here if it was in the source. header = parser.Header([ parser.HeaderLabel([ parser.ValueTerminal(self.name, 'header-name'), parser.ValueTerminal(':', 'header-sep')]), parser.CFWSList([parser.WhiteSpaceTerminal(' ', 'fws')]), self._parse_tree]) return header.fold(policy=policy)
python
{ "resource": "" }
q28485
nobody_uid
train
def nobody_uid(): """Internal routine to get nobody's uid""" global nobody if nobody: return nobody try: import pwd except ImportError: return -1 try: nobody = pwd.getpwnam('nobody')[2] except KeyError: nobody = 1 + max(x[2] for x in pwd.getpwall()) return nobody
python
{ "resource": "" }
q28486
HTTPServer.server_bind
train
def server_bind(self): """Override server_bind to store the server name.""" socketserver.TCPServer.server_bind(self) host, port = self.socket.getsockname()[:2] self.server_name = socket.getfqdn(host) self.server_port = port
python
{ "resource": "" }
q28487
BaseHTTPRequestHandler.handle
train
def handle(self): """Handle multiple requests if necessary.""" self.close_connection = 1 self.handle_one_request() while not self.close_connection: self.handle_one_request()
python
{ "resource": "" }
q28488
BaseHTTPRequestHandler.send_response
train
def send_response(self, code, message=None): """Add the response header to the headers buffer and log the response code. Also send two standard headers with the server software version and the current date. """ self.log_request(code) self.send_response_only(code, message) self.send_header('Server', self.version_string()) self.send_header('Date', self.date_time_string())
python
{ "resource": "" }
q28489
BaseHTTPRequestHandler.send_response_only
train
def send_response_only(self, code, message=None): """Send the response header only.""" if message is None: if code in self.responses: message = self.responses[code][0] else: message = '' if self.request_version != 'HTTP/0.9': if not hasattr(self, '_headers_buffer'): self._headers_buffer = [] self._headers_buffer.append(("%s %d %s\r\n" % (self.protocol_version, code, message)).encode( 'latin-1', 'strict'))
python
{ "resource": "" }
q28490
BaseHTTPRequestHandler.send_header
train
def send_header(self, keyword, value): """Send a MIME header to the headers buffer.""" if self.request_version != 'HTTP/0.9': if not hasattr(self, '_headers_buffer'): self._headers_buffer = [] self._headers_buffer.append( ("%s: %s\r\n" % (keyword, value)).encode('latin-1', 'strict')) if keyword.lower() == 'connection': if value.lower() == 'close': self.close_connection = 1 elif value.lower() == 'keep-alive': self.close_connection = 0
python
{ "resource": "" }
q28491
BaseHTTPRequestHandler.end_headers
train
def end_headers(self): """Send the blank line ending the MIME headers.""" if self.request_version != 'HTTP/0.9': self._headers_buffer.append(b"\r\n") self.flush_headers()
python
{ "resource": "" }
q28492
BaseHTTPRequestHandler.log_message
train
def log_message(self, format, *args): """Log an arbitrary message. This is used by all other logging functions. Override it if you have specific logging wishes. The first argument, FORMAT, is a format string for the message to be logged. If the format string contains any % escapes requiring parameters, they should be specified as subsequent arguments (it's just like printf!). The client ip and current date/time are prefixed to every message. """ sys.stderr.write("%s - - [%s] %s\n" % (self.address_string(), self.log_date_time_string(), format%args))
python
{ "resource": "" }
q28493
BaseHTTPRequestHandler.log_date_time_string
train
def log_date_time_string(self): """Return the current time formatted for logging.""" now = time.time() year, month, day, hh, mm, ss, x, y, z = time.localtime(now) s = "%02d/%3s/%04d %02d:%02d:%02d" % ( day, self.monthname[month], year, hh, mm, ss) return s
python
{ "resource": "" }
q28494
CGIHTTPRequestHandler.is_cgi
train
def is_cgi(self): """Test whether self.path corresponds to a CGI script. Returns True and updates the cgi_info attribute to the tuple (dir, rest) if self.path requires running a CGI script. Returns False otherwise. If any exception is raised, the caller should assume that self.path was rejected as invalid and act accordingly. The default implementation tests whether the normalized url path begins with one of the strings in self.cgi_directories (and the next character is a '/' or the end of the string). """ collapsed_path = _url_collapse_path(self.path) dir_sep = collapsed_path.find('/', 1) head, tail = collapsed_path[:dir_sep], collapsed_path[dir_sep+1:] if head in self.cgi_directories: self.cgi_info = head, tail return True return False
python
{ "resource": "" }
q28495
CGIHTTPRequestHandler.is_python
train
def is_python(self, path): """Test whether argument path is a Python script.""" head, tail = os.path.splitext(path) return tail.lower() in (".py", ".pyw")
python
{ "resource": "" }
q28496
newrange.index
train
def index(self, value): """Return the 0-based position of integer `value` in the sequence this range represents.""" try: diff = value - self._start except TypeError: raise ValueError('%r is not in range' % value) quotient, remainder = divmod(diff, self._step) if remainder == 0 and 0 <= quotient < self._len: return abs(quotient) raise ValueError('%r is not in range' % value)
python
{ "resource": "" }
q28497
newrange.__getitem_slice
train
def __getitem_slice(self, slce): """Return a range which represents the requested slce of the sequence represented by this range. """ scaled_indices = (self._step * n for n in slce.indices(self._len)) start_offset, stop_offset, new_step = scaled_indices return newrange(self._start + start_offset, self._start + stop_offset, new_step)
python
{ "resource": "" }
q28498
RobotFileParser.set_url
train
def set_url(self, url): """Sets the URL referring to a robots.txt file.""" self.url = url self.host, self.path = urllib.parse.urlparse(url)[1:3]
python
{ "resource": "" }
q28499
RobotFileParser.read
train
def read(self): """Reads the robots.txt URL and feeds it to the parser.""" try: f = urllib.request.urlopen(self.url) except urllib.error.HTTPError as err: if err.code in (401, 403): self.disallow_all = True elif err.code >= 400: self.allow_all = True else: raw = f.read() self.parse(raw.decode("utf-8").splitlines())
python
{ "resource": "" }