hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0ef084880aa5a686fc78250f687244510146698b | 1,235 | py | Python | slothql/types/fields/shortcuts.py | IndioInc/slothql | 64a574013e249968746044555bd8779ac353b13f | [
"MIT"
] | 2 | 2018-02-09T19:12:00.000Z | 2018-04-13T01:50:15.000Z | slothql/types/fields/shortcuts.py | IndioInc/slothql | 64a574013e249968746044555bd8779ac353b13f | [
"MIT"
] | 29 | 2018-02-02T01:07:07.000Z | 2018-05-28T23:04:28.000Z | slothql/types/fields/shortcuts.py | IndioInc/slothql | 64a574013e249968746044555bd8779ac353b13f | [
"MIT"
] | null | null | null | from slothql.types.scalars import IntegerType, FloatType, StringType, BooleanType, IDType
from slothql.types.json import JsonStringType
from slothql.types.datetime import DateTimeType, DateType, TimeType
from .field import Field
| 24.7 | 89 | 0.687449 | from slothql.types.scalars import IntegerType, FloatType, StringType, BooleanType, IDType
from slothql.types.json import JsonStringType
from slothql.types.datetime import DateTimeType, DateType, TimeType
from .field import Field
class Integer(Field):
def __init__(self, **kwargs):
super().__init__(of_type=IntegerType, **kwargs)
class Float(Field):
def __init__(self, **kwargs):
super().__init__(of_type=FloatType, **kwargs)
class String(Field):
def __init__(self, **kwargs):
super().__init__(of_type=StringType, **kwargs)
class Boolean(Field):
def __init__(self, **kwargs):
super().__init__(of_type=BooleanType, **kwargs)
class ID(Field):
def __init__(self, **kwargs):
super().__init__(of_type=IDType, **kwargs)
class JsonString(Field):
def __init__(self, **kwargs):
super().__init__(of_type=JsonStringType, **kwargs)
class DateTime(Field):
def __init__(self, **kwargs):
super().__init__(of_type=DateTimeType, **kwargs)
class Date(Field):
def __init__(self, **kwargs):
super().__init__(of_type=DateType, **kwargs)
class Time(Field):
def __init__(self, **kwargs):
super().__init__(of_type=TimeType, **kwargs)
| 566 | -10 | 441 |
50578585f1b1c3714387270db94790d2634d62a0 | 2,573 | py | Python | db/createDB.py | DigasNikas/PyRecommender | fb056929bba45431a5fc98691332b9bf91e730bb | [
"MIT"
] | 2 | 2017-05-27T15:06:04.000Z | 2018-11-23T06:43:25.000Z | db/createDB.py | DigasNikas/PyRecommender | fb056929bba45431a5fc98691332b9bf91e730bb | [
"MIT"
] | null | null | null | db/createDB.py | DigasNikas/PyRecommender | fb056929bba45431a5fc98691332b9bf91e730bb | [
"MIT"
] | null | null | null | import psycopg2
import json
import os
import sys
def create_tables():
""" create tables in the PostgreSQL database"""
commands = (
"""DROP TABLE main;
""",
"""
CREATE TABLE main (
package VARCHAR NOT NULL,
category VARCHAR NOT NULL,
downloads BIGINT NOT NULL,
description TEXT NOT NULL,
developer VARCHAR NOT NULL
);
""")
conn = None
try:
# read the connection parameters
params = "dbname='app_data' user='postgres' host='localhost' password='postgres'"
# connect to the PostgreSQL server
conn = psycopg2.connect(params)
cur = conn.cursor()
# create table one by one
for command in commands:
cur.execute(command)
# close communication with the PostgreSQL database server
cur.close()
# commit the changes
conn.commit()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
if __name__ == '__main__':
create_tables()
read_jason(sys.argv[1]) | 32.1625 | 107 | 0.551108 | import psycopg2
import json
import os
import sys
def read_jason(path):
conn = None
part_files = os.listdir(path)
try:
# read the connection parameters
params = "dbname='app_data' user='postgres' host='localhost' password='postgres'"
# connect to the PostgreSQL server
conn = psycopg2.connect(params)
cur = conn.cursor()
for part_file in part_files:
part = path + part_file
print ("reading "+part)
with open(part, 'r') as p:
alllines = p.readlines()
for line in alllines:
json_line = ""
json_line = json.loads(line)
cur.execute('''INSERT INTO main (package,category,downloads,description,developer)
VALUES (%s,%s,%s,%s,%s);''',
(json_line['package'], json_line['category'],
json_line['downloads'], json_line['description'], json_line['developer']))
conn.commit()
print part + "\n"
# close communication with the PostgreSQL database server
cur.close()
# commit the changes
conn.commit()
print("operation commited")
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
print("operation commited")
def create_tables():
""" create tables in the PostgreSQL database"""
commands = (
"""DROP TABLE main;
""",
"""
CREATE TABLE main (
package VARCHAR NOT NULL,
category VARCHAR NOT NULL,
downloads BIGINT NOT NULL,
description TEXT NOT NULL,
developer VARCHAR NOT NULL
);
""")
conn = None
try:
# read the connection parameters
params = "dbname='app_data' user='postgres' host='localhost' password='postgres'"
# connect to the PostgreSQL server
conn = psycopg2.connect(params)
cur = conn.cursor()
# create table one by one
for command in commands:
cur.execute(command)
# close communication with the PostgreSQL database server
cur.close()
# commit the changes
conn.commit()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
if __name__ == '__main__':
create_tables()
read_jason(sys.argv[1]) | 1,385 | 0 | 23 |
37b72cb9ac1bd75472e0e7f41ef60e09200233d3 | 1,821 | py | Python | mlqm/models/GaussianBoundaryCondition.py | Nuclear-Physics-with-Machine-Learning/MLQM | 69472921b130abb530b11840ab8c1b8c608b5089 | [
"Apache-2.0"
] | 8 | 2021-05-13T13:58:56.000Z | 2022-02-28T22:11:06.000Z | mlqm/models/GaussianBoundaryCondition.py | coreyjadams/AI-for-QM | 69472921b130abb530b11840ab8c1b8c608b5089 | [
"Apache-2.0"
] | 1 | 2021-09-23T01:44:26.000Z | 2021-09-23T17:51:43.000Z | mlqm/models/GaussianBoundaryCondition.py | coreyjadams/AI-for-QM | 69472921b130abb530b11840ab8c1b8c608b5089 | [
"Apache-2.0"
] | 1 | 2022-03-15T07:18:24.000Z | 2022-03-15T07:18:24.000Z | import tensorflow as tf
import numpy
class GaussianBoundaryCondition(tf.keras.layers.Layer):
"""A simple module for applying an exponential boundary condition in N dimensions
Note that the exponent is *inside* of the power of 2 in the exponent.
This is to prevent divergence when it is trainable and goes negative.
Extends:
tf.keras.layers.Layer
"""
def __init__(self, n : int, exp : float=0.1, trainable : bool=True, dtype = tf.float64):
"""Initializer
Create a new exponentional boundary condition
Arguments:
n {int} -- Number of dimensions
Keyword Arguments:
exp {float} -- Starting value of exponents. Must be broadcastable to the number of dimensions (default: {1.0})
trainable {bool} -- Whether to allow the boundary condition to be trainable (default: {True})
"""
tf.keras.layers.Layer.__init__(self, dtype=dtype)
self.mean_subtract = True
if n < 1:
raise Exception("Dimension must be at least 1 for GaussianBoundaryCondition")
# This is the parameter controlling the shape of the exponent:
self.exponent = tf.Variable(exp, trainable=True, dtype=dtype)
self.exponent2 = tf.Variable(0.02, trainable=True, dtype=dtype)
@tf.function
| 34.358491 | 123 | 0.643053 | import tensorflow as tf
import numpy
class GaussianBoundaryCondition(tf.keras.layers.Layer):
"""A simple module for applying an exponential boundary condition in N dimensions
Note that the exponent is *inside* of the power of 2 in the exponent.
This is to prevent divergence when it is trainable and goes negative.
Extends:
tf.keras.layers.Layer
"""
def __init__(self, n : int, exp : float=0.1, trainable : bool=True, dtype = tf.float64):
"""Initializer
Create a new exponentional boundary condition
Arguments:
n {int} -- Number of dimensions
Keyword Arguments:
exp {float} -- Starting value of exponents. Must be broadcastable to the number of dimensions (default: {1.0})
trainable {bool} -- Whether to allow the boundary condition to be trainable (default: {True})
"""
tf.keras.layers.Layer.__init__(self, dtype=dtype)
self.mean_subtract = True
if n < 1:
raise Exception("Dimension must be at least 1 for GaussianBoundaryCondition")
# This is the parameter controlling the shape of the exponent:
self.exponent = tf.Variable(exp, trainable=True, dtype=dtype)
self.exponent2 = tf.Variable(0.02, trainable=True, dtype=dtype)
@tf.function
def call(self, inputs):
# Mean subtract for all particles:
if self.mean_subtract:
mean = tf.reduce_mean(inputs, axis=1)
xinputs = inputs - mean[:,None,:]
else:
xinputs = inputs
exponent_term1 = tf.reduce_sum((xinputs)**2, axis=(1,2))
exponent_term2 = tf.reduce_sum((xinputs)**4, axis=(1,2))
result = - self.exponent * exponent_term1 - self.exponent2*exponent_term2
return tf.reshape(result, [-1,1])
| 472 | 0 | 26 |
a380d47347ebec46f0b4459ad3d764d33d8e18ac | 70,890 | py | Python | gridsim/grid_sim_linear_program.py | mfastudillo/energysimulation | 09c40bc52fd41e00be0fcafca40e2f5cb9e8ef8f | [
"Apache-2.0"
] | null | null | null | gridsim/grid_sim_linear_program.py | mfastudillo/energysimulation | 09c40bc52fd41e00be0fcafca40e2f5cb9e8ef8f | [
"Apache-2.0"
] | null | null | null | gridsim/grid_sim_linear_program.py | mfastudillo/energysimulation | 09c40bc52fd41e00be0fcafca40e2f5cb9e8ef8f | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simulate cost-optimal electrical grid construction under different policies.
Code contains GridElements: Power Sources, Demands and Storage. Grid
Elements are placed in different grid regions. Grid regions are
separated from each other so only sources with grid_region_id == x can
power Demands with grid_region_id == x
The costs of constructing GridElements are based upon:
nameplate_unit_cost: The cost to build one unit (e.g. Megawatt) of power.
variable_unit_cost: The cost to provide one unit of power over time.
(e.g. Megawatt-Hour)
The code simulates the grid over multiple time-slices. e.g. Hourly
over a one year period which would map to 24 * 365 = 8760 time-slices.
The code is based upon a linear-program which contains:
- An objective which is to minimize costs.
- Constraints which must be met before the solution can converge.
- conserve_power_constraint: Ensure that sum(power[t]) >=
demand[t] for all t in each grid-region
This code will work with any set of consistent units. For the
purposes of documentation, the units chosen are:
Power: Megawatts
Time: Hours
(Derived) Energy = Power * Time => Megawatt-Hours
Cost: Dollars ($)
CO2 Emissions: Tonnes
(Derived) CO2 Emitted per Energy => Tonnes / Megawatt-Hours
Carbon Tax: $ / Tonnes
"""
import logging
import numpy as np
from ortools.linear_solver import pywraplp
class Constraint(object):
"""Holds an LP Constraint object with extra debugging information.
Attributes:
constraint: underlying pywraplp.Constraint object
name: name of constraint
formula: hashtable that maps names of variables to coefficients
pywraplp.Constraint doesn't surface a list of variables/coefficients, so
we have to keep track ourselves.
"""
def __init__(self, lp, lower_bound, upper_bound, name=None, debug=False):
"""Initializes Constraint.
Args:
lp: LinearProgramContainer that wraps the LP solver which
creates the constraint.
lower_bound: (float) Lower bound on product between coeffs and variables.
upper_bound: (float) Upper bound on product between coeffs and variables.
name: Optional human readable string.
debug: Boolean which if set, logs constraint info.
"""
self.constraint = lp.solver.Constraint(lower_bound, upper_bound)
self.name = name
self.formula = {}
self.debug = debug
if self.debug:
logging.debug("CONSTRAINT: %f <= %s <= %f", lower_bound, name, upper_bound)
def set_coefficient(self, variable, coefficient):
"""Adds variable * coefficient to LP Coefficient.
Wraps pywrap.SetCoefficient(variable, coefficient) method and
saves variable, coefficient to formula dict.
After calling this method, Objective += variable * coefficient
Args:
variable: (Lp Variable) The Variable multiplicand.
coefficient: (float) The coefficient multiplicand.
"""
self.constraint.SetCoefficient(variable, coefficient)
self.formula[variable.name()] = coefficient
if self.debug:
logging.debug("%s += %s * %f", self.name, variable.name(), coefficient)
class Objective(object):
"""Holds an LP Objective object with extra debugging information.
Attributes:
objective: Underlying pywraplp.Objective object.
"""
def __init__(self, lp, minimize=True):
"""Initializes Objective.
Args:
lp: LinearProgramContainer that wraps the LP solver which
creates the Objective.
minimize: boolean, True if objective should be minimized
otherwise objective is maximizied.
"""
self.objective = lp.solver.Objective()
self.formula = {}
if minimize:
self.objective.SetMinimization()
else:
self.objective.SetMaximization()
def set_coefficient(self, variable, coefficient):
"""Adds variable * coefficient to LP Objective.
Wraps pywrap.SetCoefficient(variable, coefficient) method and
saves variable, coefficient to formula dict.
After calling this method, Objective += variable * coefficient
Args:
variable: (Lp Variable) The Variable multiplicand.
coefficient: (float) The coefficient multiplicand.
"""
self.objective.SetCoefficient(variable, coefficient)
self.formula[variable.name()] = coefficient
class GridDemand(object):
"""Simple place-holder object which represents load on the grid."""
def __init__(self, name, grid_region_id=0):
"""Initializes GridDemand object.
Args:
name: name of the demand object
grid_region_id: An int specifying the grid region of the demand.
Only sources with the same grid_region_id can power this demand.
"""
self.name = name
self.grid_region_id = grid_region_id
class GridSource(object):
"""Denotes Costs, co2, region, power and energy limitations of a power source.
Grid Sources may either be dispatchable or non-dispatchable.
- Dispatchable sources may power at any time, e.g. fossil fuel plants.
- Non-dispatchable sources are dependent on the environment to
generate power. e.g. Solar or Wind plants.
If there is a time-slice power profile indexed by the same name as
this source in LinearProgramContainer.profiles. The source is
considered Non-dispatchable. Otherwise, it is considered dispatchable.
Attributes:
name: (str) name of the object.
nameplate_unit_cost: (float) Cost to build a unit of
dispatchable power. ($ / Megawatt of capacity)
variable_unit_cost: (float) Cost to supply a unit of dispatchable power
per time. ($ / Megawatt-Hour)
grid_region_id: An int specifying the grid region of the source.
Only demands with the same grid_region_id can sink the power
from this source.
max_power: (float) Optional Maximum power which object can supply.
(Megawatt). Set < 0 if there is no limit.
max_energy: (float) Optional maximum energy which object can
supply. (Megawatt-Hours) Set < 0 if there is no limit.
co2_per_electrical_energy: (float) (Tonnes of CO2 / Megawatt Hour).
power_coefficient: (float) ratio of how much power is supplied by
object vs. how much power gets on the grid. 0 <
power_coefficient < 1. Nominally 1.0.
is_rps_source: Boolean which denotes if the source is included
in the Renewable Portfolio Standard.
solver: Either a _GridSourceDispatchableSolver or
_GridSourceNonDispatchableSolver. Used to setup LP
Constraints, Objectives and variables for the source and to
report results.
timeslice_variables: An array of LP variables, one per time-slice
of simulation. Array is mapped so that variable for
time-slice t is at index t.
e.g.
Variable for first time-slice is timeslice_variable[0].
Variable for last time-slice is timeslice_variable[-1].
Variable for time-slice at time t is timeslice_variable[t].
Only gets declared if GridSource is a DispatchableSource.
nameplate_variable: LP variable representing the nameplate or
maximum power the GridSource can output at any given
time.
"""
def __init__(
self,
name,
nameplate_unit_cost,
variable_unit_cost,
grid_region_id=0,
max_power=-1.0,
max_energy=-1.0,
co2_per_electrical_energy=0,
power_coefficient=1.0,
is_rps_source=False,
):
"""Sets characteristics of a GridSource object.
Args:
name: (str) name of the object.
nameplate_unit_cost: (float) Cost to build a unit of
dispatchable power. ($ / Megawatt of capacity)
variable_unit_cost: (float) Cost to supply a unit of dispatchable power
per time. ($ / Megawatt-Hour)
grid_region_id: An int specifying the grid region of the demand.
Only demands with the same grid_region_id can sink the power
from this source.
max_power: (float) Maximum power which object can supply. (Megawatt)
max_energy: (float) Maximum energy which object can
supply. (Megawatt-Hours)
co2_per_electrical_energy: (float) (Tonnes of CO2 / Megawatt Hour).
power_coefficient: (float) ratio of how much power is supplied by
object vs. how much power gets on the grid. 0 <
power_coefficient < 1. Nominally 1.0.
is_rps_source: Boolean which denotes if the source is included
in the Renewable Portfolio Standard.
"""
self.name = name
self.nameplate_unit_cost = nameplate_unit_cost
self.variable_unit_cost = variable_unit_cost
self.max_energy = max_energy
self.max_power = max_power
self.grid_region_id = grid_region_id
self.co2_per_electrical_energy = co2_per_electrical_energy
self.power_coefficient = power_coefficient
self.is_rps_source = is_rps_source
self.solver = None
self.timeslice_variables = None
self.nameplate_variable = None
def configure_lp_variables_and_constraints(self, lp):
"""Declare lp variables, and set constraints.
Args:
lp: The LinearProgramContainer.
Defers to self.solver which properly configures variables and
constraints in this object.
See Also:
_GridSourceDispatchableSolver, _GridSourceNonDispatchableSolver
"""
self.solver.configure_lp_variables_and_constraints(lp)
def post_process(self, lp):
"""Update lp post_processing result variables.
This is done post lp.solve() so that sanity data checks can be done
on RPS before returning results.
Args:
lp: The LinearProgramContainer where the post processing variables reside.
"""
if lp.rps_percent > 0.0 and self.is_rps_source:
lp.rps_total[self.grid_region_id] += self.get_solution_values()
else:
lp.non_rps_total[self.grid_region_id] += self.get_solution_values()
def get_solution_values(self):
"""Gets the linear program solver results.
Must be called after lp.solve() to ensure solver has properly
converged and has generated results.
Returns:
np.array of solutions for each timeslice variable.
"""
return self.solver.get_solution_values()
def get_nameplate_solution_value(self):
"""Gets the linear program solver results for nameplate.
Must be called after lp.solve() to ensure solver has properly
converged and has generated results.
Raises:
RuntimeError: If called before LinearProgramContainer.solve().
Returns:
Float value representing solved nameplate value.
"""
nameplate_variable = self.nameplate_variable
if nameplate_variable is None:
raise RuntimeError("Get_nameplate_solution_value called before solve().")
return nameplate_variable.solution_value()
class _GridSourceDispatchableSolver(object):
"""Power Source which can provide power at any time.
Attributes:
source: GridSource object where self generates LP variables
"""
def configure_lp_variables_and_constraints(self, lp):
"""Declare lp variables, and set constraints in grid_source.
Args:
lp: The LinearProgramContainer.
Variables Declared include:
- timeslice variables: represent how much power the source is
outputting at each time-slice.
- nameplate variable: represents the maximum power sourced.
The values of these variables are solved by the linear program to
optimize costs subject to some constraints.
The overall objective is to minimize cost. Herein, the overall
cost is increased by:
- nameplate cost: nameplate_unit_cost * nameplate variable
- variable cost: variable_unit_cost * sum(timeslice_variables)
- carbon cost: lp.carbon_tax * sum(timeslice_variables) *
co2_per_electrical_energy
Since variable and carbon costs accrue on a periodic basis, we
multiply them by lp.cost_of_money to make periodic and
one-time costs comparable.
Constraints created / modified here include:
- Maximum Energy: Ensure sum timeslice-variables < max_energy if
self.max_energy >= 0.
This constraint is only for sources where there are limits
to the total amount of generation which can be built.
E.g. There are only a limited number of places where one can
build hydropower.
- Maximum Power: Ensure no timeslice-variables > max_power if
self.max_power is >= 0.
This constraint is only for sources where there are limits
to the maximum amount of power which can be built.
E.g. hydropower which can only discharge at a maximum rate.
- Conserve Power: Ensure that sum(power) > demand for all
time-slices. Colloquially called "Keeping the Lights on."
- Ensure nameplate variable > power(t) for all t. We must make
sure that we've priced out a plant which can supply the
requested power.
"""
source = self.source
# setup LP variables.
source.timeslice_variables = lp.declare_timeslice_variables(
source.name, source.grid_region_id
)
source.nameplate_variable = lp.declare_nameplate_variable(
source.name, source.grid_region_id
)
solver = lp.solver
# Configure maximum energy if it is >= 0. Otherwise do not
# create a constraint.
max_energy_constraint = (
lp.constraint(0.0, source.max_energy) if source.max_energy >= 0 else None
)
# Configure maximum nameplate if it is >= 0. Otherwise do not
# create a constraint.
max_power = source.max_power
if max_power >= 0:
lp.constraint(0.0, max_power).set_coefficient(
source.nameplate_variable, 1.0
)
# Total_cost includes nameplate cost.
cost_objective = lp.minimize_costs_objective
cost_objective.set_coefficient(
source.nameplate_variable, source.nameplate_unit_cost
)
# Add timeslice variables to coefficients.
for t, var in enumerate(source.timeslice_variables):
# Total_cost also includes variable and carbon cost.
variable_coef = (
source.variable_unit_cost
+ source.co2_per_electrical_energy * lp.carbon_tax
) * lp.cost_of_money
cost_objective.set_coefficient(var, variable_coef)
# Keep the lights on at all times. Power_coefficient is usually
# 1.0, but is -1.0 for GridStorage.sink and discharge_efficiency
# for GridStorage.source.
lp.conserve_power_constraint[source.grid_region_id][t].set_coefficient(
var, source.power_coefficient
)
# Constrain rps_credit if needed.
if source.is_rps_source:
lp.rps_source_constraints[source.grid_region_id][t].set_coefficient(
var, source.power_coefficient
)
# Ensure total energy is less than source.max_energy.
if max_energy_constraint is not None:
max_energy_constraint.set_coefficient(var, 1.0)
# Ensure power doesn't exceed source.max_power.
if max_power >= 0:
lp.constraint(0.0, max_power).set_coefficient(var, 1.0)
# Nameplate must be bigger than largest power.
# If nameplate_unit_cost > 0, Cost Optimization will push
# Nameplate near max(timeslice_variables).
nameplate_constraint = lp.constraint(0.0, solver.infinity())
nameplate_constraint.set_coefficient(var, -1.0)
nameplate_constraint.set_coefficient(source.nameplate_variable, 1.0)
# Constrain maximum nameplate if max_power is set.
if source.max_power >= 0:
lp.constraint(0.0, source.max_power).set_coefficient(
source.nameplate_variable, 1.0
)
def get_solution_values(self):
"""Gets the linear program solver results.
Must be called after lp.solve() to ensure solver has properly
converged and has generated results.
Raises:
RuntimeError: If called before LinearProgramContainer.solve().
Returns:
np.array of solutions for each timeslice variable.
"""
timeslice_variables = self.source.timeslice_variables
if timeslice_variables is None:
raise RuntimeError("get_solution_values called before solve.")
return np.array([v.solution_value() for v in timeslice_variables])
class _GridSourceNonDispatchableSolver(object):
"""Power Source which can provide nameplate multiple of its profile.
Attributes:
source: GridSource object where self generates LP variables
profile: pandas Series which represents what fraction of the
nameplate the source can provide at any given time.
"""
def configure_lp_variables_and_constraints(self, lp):
"""Declare lp variables, and set constraints in grid_source.
Args:
lp: The LinearProgramContainer.
Variables Declared include:
- nameplate variable: represents the maximum power sourced.
The values of these variables are solved by the linear program to
optimize costs subject to some constraints.
The overall objective is to minimize cost. Herein, the overall
cost is increased by:
- nameplate cost: nameplate_unit_cost * nameplate variable
- variable cost: variable_unit_cost * nameplate variable * sum(profile)
- carbon cost: lp.carbon_tax * nameplate variable * sum(profile)
Since variable and carbon costs accrue on a yearly basis, we
multiply them by lp.cost_of_money to make yearly and
one-time costs comparable.
Constraints created / modified here include:
- Maximum Energy: Ensure nameplate * sum(profile) < max_energy if
self.max_energy >= 0.
This constraint is only for sources where there are limits
to the total amount of generation which can be built.
E.g. There are only a limited number of places where one can
build hydropower.
- Maximum Power: Ensure nameplate <= max_power if
self.max_power >= 0.
This constraint is only for sources where there are limits
to the maximum amount of power which can be built.
E.g. hydropower which can only discharge at a maximum rate.
- Conserve Power: Ensure that sum(power) > demand for all
time-slices. Colloquially called "Keeping the Lights on."
"""
source = self.source
# setup LP variables.
source.nameplate_variable = lp.declare_nameplate_variable(
source.name, source.grid_region_id
)
sum_profile = sum(self.profile)
# Configure maximum energy if it is >= 0. Otherwise do not
# create a constraint.
if source.max_energy >= 0:
lp.constraint(0.0, source.max_energy).set_coefficient(
source.nameplate_variable, sum_profile
)
# Configure maximum energy if it is >= 0. Otherwise do not
# create a constraint.
max_power = source.max_power
if max_power >= 0:
lp.constraint(0.0, max_power).set_coefficient(
source.nameplate_variable, 1.0
)
# Total_cost includes nameplate cost.
cost_objective = lp.minimize_costs_objective
cost_coefficient = source.nameplate_unit_cost + lp.cost_of_money * (
source.variable_unit_cost * sum_profile
+ source.co2_per_electrical_energy * sum_profile * lp.carbon_tax
)
cost_objective.set_coefficient(source.nameplate_variable, cost_coefficient)
# Add timeslice variables to coefficients.
for t, profile_t in enumerate(self.profile):
# Keep the lights on at all times.
try:
constraint = lp.conserve_power_constraint[source.grid_region_id]
except KeyError:
raise KeyError(
"No Demand declared in grid_region %d." % (source.grid_region_id)
)
constraint[t].set_coefficient(source.nameplate_variable, profile_t)
# Constrain rps_credit if needed.
if source.is_rps_source:
lp.rps_source_constraints[source.grid_region_id][t].set_coefficient(
source.nameplate_variable, profile_t
)
def get_solution_values(self):
"""Gets the linear program solver results.
Must be called after lp.solve() to ensure solver has properly
converged and has generated results.
Raises:
RuntimeError: If called before LinearProgramContainer.solve().
Returns:
np.array of solutions for each timeslice variable.
"""
nameplate_variable = self.source.nameplate_variable
if nameplate_variable is None:
raise RuntimeError("get_solution_values called before solve.")
return nameplate_variable.solution_value() * self.profile.values
class GridStorage(object):
"""Stores energy from the grid and returns it when needed subject to losses.
Attributes:
name: A string which is the name of the object.
storage_nameplate_cost: A float which is the cost per nameplate of
energy storage. E.g. The cost of batteries.
charge_nameplate_cost: A float which is the cost per nameplate
power to charge the storage. E.g. The rectifier cost to convert
an AC grid to DC storage.
discharge_nameplate_cost: A float which is the cost per nameplate
power to recharge the grid. E.g. The cost of a power inverter to
convert DC storage back to AC
grid_region_id: An int specifying the grid region of the storage.
The storage can only store energy generated by sources with the
same grid_region_id. Only demands with the same grid_region_id
can sink power from this.
charge_efficiency: A float ranging from 0.0 - 1.0 which describes
the energy loss between the grid and the storage element. 0.0
means complete loss, 1.0 means no loss.
storage_efficiency: A float ranging from 0.0 - 1.0 which describes
how much stored energy remains from previous stored energy after
one time-cycle. 1.0 means no loss. 0.0 means all stored energy
is lost.
discharge_efficiency: A float ranging from 0.0 - 1.0 which describes
the energy loss between storage and grid when recharging the grid.
0.0 means complete loss, 1.0 means no loss.
max_charge_power: A float which represents the maximum power that
can charge storage (calculated before any efficiency losses.).
A value < 0 means there is no charge power limit.
max_discharge_power: A float which represents the maximum power
that can discharge storage (calculated before any efficiency
losses.). A value < 0 means there is no discharge power limit.
max_storage: An optional float which represents the maximum energy
that can be stored. A value < 0 means there is no maximum
storage limit.
is_rps: Boolean; if true, keeps track of rps_credit as storage is
charged / discharged. Amount charging[t] is subtracted from
rps_credit[t] from rps_credit[t]. Amount discharging[t] is
added to rps_credit[t]. If false, no rps_credits are adjusted.
"""
def configure_lp_variables_and_constraints(self, lp):
"""Declare lp variables, and set constraints.
Args:
lp: LinearProgramContainer, contains lp solver and constraints.
"""
# Set up LP variables.
self.energy_variables = lp.declare_timeslice_variables(
self.name, self.grid_region_id
)
if self.storage_nameplate_cost:
self.energy_nameplate = lp.declare_nameplate_variable(
self.name, self.grid_region_id
)
# Set up source and configure LP variables.
self.source = GridSource(
name=self.name + " source",
nameplate_unit_cost=self.discharge_nameplate_cost,
variable_unit_cost=0.0,
grid_region_id=self.grid_region_id,
max_power=self.max_discharge_power,
co2_per_electrical_energy=0.0,
power_coefficient=self.discharge_efficiency,
is_rps_source=self.is_rps,
)
self.source.solver = _GridSourceDispatchableSolver(self.source)
self.source.configure_lp_variables_and_constraints(lp)
# Set up sink and configure LP variables.
self.sink = GridSource(
name=self.name + " sink",
nameplate_unit_cost=self.discharge_nameplate_cost,
variable_unit_cost=0.0,
grid_region_id=self.grid_region_id,
max_power=self.max_charge_power,
co2_per_electrical_energy=0.0,
power_coefficient=-1.0,
is_rps_source=self.is_rps,
)
self.sink.solver = _GridSourceDispatchableSolver(self.sink)
self.sink.configure_lp_variables_and_constraints(lp)
# Add energy nameplate costs to the objective. Other costs are
# added by source/sink.configure_lp_variables_and_constraints.
if self.storage_nameplate_cost:
nameplate = self.energy_nameplate
lp.minimize_costs_objective.set_coefficient(
nameplate, self.storage_nameplate_cost
)
# Constrain Energy Storage to be Energy Last time plus sink minus source.
# Storage is circular so variables at t=0 depend on variables at t=-1
# which is equivalent to last value in python indexing scheme.
variables = self.energy_variables
for t in lp.time_index_iterable:
# Ce = charge_efficiency,
# Se = storage_efficiency.
# Stored[i] = se * Stored[i-1] + ce * sink[i-1] - source[i-1]
# 0 = -Stored[i] + se * Stored[i-1] + ce * sink[i-1] - source[i-1]
c = lp.constraint(0.0, 0.0)
c.set_coefficient(variables[t], -1.0) # -Stored[i]
c.set_coefficient(variables[t - 1], self.storage_efficiency)
# Source and sink are relative to the grid, so opposite here:
# Sink adds to storage, source subtracts from storage.
c.set_coefficient(self.source.timeslice_variables[t - 1], -1.0)
c.set_coefficient(
self.sink.timeslice_variables[t - 1], self.charge_efficiency
)
# Ensure nameplate is larger than stored_value.
if self.storage_nameplate_cost:
nameplate_constraint = lp.constraint(0.0, lp.solver.infinity())
nameplate_constraint.set_coefficient(nameplate, 1.0)
nameplate_constraint.set_coefficient(variables[t], -1.0)
# Constrain maximum storage if max_storage >= 0
if self.max_storage >= 0.0:
max_storage_constraint = lp.constraint(0.0, self.max_storage)
max_storage_constraint.set_coefficient(variables[t], 1.0)
def post_process(self, lp):
"""Update lp post_processing result variables.
This is done post lp.solve() so that sanity data checks can be done
on RPS before returning results.
Args:
lp: The LinearProgramContainer where the post processing variables reside.
"""
sink_vals = self.sink.get_solution_values()
source_vals = self.source.get_solution_values() * self.discharge_efficiency
if self.is_rps:
lp.rps_total[self.grid_region_id] += source_vals - sink_vals
else:
lp.non_rps_total[self.grid_region_id] += source_vals - sink_vals
def get_nameplate_solution_value(self):
"""Gets the linear program solver results for nameplate.
Must be called after lp.solve() to ensure solver has properly
converged and has generated results.
Raises:
RuntimeError: If called before LinearProgramContainer.solve().
Returns:
Float value representing solved nameplate value.
"""
if self.storage_nameplate_cost:
nameplate_variable = self.energy_nameplate
if nameplate_variable is None:
raise RuntimeError(
"Get_nameplate_solution_value called before solve()."
)
return nameplate_variable.solution_value()
else:
return max(self.get_solution_values())
def get_solution_values(self):
"""Gets the linear program solver results.
Must be called after lp.solve() to ensure solver has properly
converged and has generated results.
Raises:
RuntimeError: If called before LinearProgramContainer.solve().
Returns:
np.array of solutions for each timeslice variable.
"""
timeslice_variables = self.energy_variables
if timeslice_variables is None:
raise RuntimeError("get_solution_values called before solve.")
return np.array([v.solution_value() for v in timeslice_variables])
class GridRecStorage(object):
"""Stores energy from the grid and returns it when needed subject to losses.
This is a wrapper around two GridStorage objects, one which stores
"clean" energy (is_rps) and one which stores "dirty" energy (not
is_rps). There is a need for both types of storage to keep track of
renewable energy credits.
Attributes:
name: A string which is the name of the object.
storage_nameplate_cost: A float which is the cost per nameplate of
energy storage. E.g. The cost of batteries.
charge_nameplate_cost: A float which is the cost per nameplate
power to charge the storage. E.g. The rectifier cost to convert
an AC grid to DC storage.
discharge_nameplate_cost: A float which is the cost per nameplate
power to recharge the grid. E.g. The cost of a power inverter to
convert DC storage back to AC
grid_region_id: An int specifying the grid region of the storage.
The storage can only store energy generated by sources with the
same grid_region_id. Only demands with the same grid_region_id
can sink power from this.
charge_efficiency: A float ranging from 0.0 - 1.0 which describes
the energy loss between the grid and the storage element. 0.0
means complete loss, 1.0 means no loss.
storage_efficiency: A float ranging from 0.0 - 1.0 which describes
how much stored energy remains from previous stored energy after
one time-cycle. 1.0 means no loss. 0.0 means all stored energy
is lost.
discharge_efficiency: A float ranging from 0.0 - 1.0 which describes
the energy loss between storage and grid when recharging the grid.
0.0 means complete loss, 1.0 means no loss.
max_charge_power: A float which represents the maximum power that
can charge storage (calculated before any efficiency losses.).
A value < 0 means there is no charge power limit.
max_discharge_power: A float which represents the maximum power
that can discharge storage (calculated before any efficiency
losses.). A value < 0 means there is no discharge power limit.
max_storage: An optional float which represents the maximum energy
that can be stored. A value < 0 means there is no maximum
storage limit.
rec_storage: GridStorage object which stores "clean" energy.
no_rec_storage: GridStorage object which stores "dirty" energy.
"""
def configure_lp_variables_and_constraints(self, lp):
"""Declare lp variables, and set constraints."""
# For rec_storage and no_rec_storage storage, set all costs to 0
# and with no limits. Calculate costs and limits after
# declaration.
self.rec_storage = GridStorage(
name=self.name + " REC_STORAGE",
storage_nameplate_cost=0,
grid_region_id=self.grid_region_id,
charge_efficiency=self.charge_efficiency,
discharge_efficiency=self.discharge_efficiency,
storage_efficiency=self.storage_efficiency,
is_rps=True,
)
self.no_rec_storage = GridStorage(
name=self.name + " NO_REC_STORAGE",
storage_nameplate_cost=0,
grid_region_id=self.grid_region_id,
charge_efficiency=self.charge_efficiency,
discharge_efficiency=self.discharge_efficiency,
storage_efficiency=self.storage_efficiency,
is_rps=False,
)
self.rec_storage.configure_lp_variables_and_constraints(lp)
self.no_rec_storage.configure_lp_variables_and_constraints(lp)
# Calculate costs and limits based on the sum of both rec_storage
# and no_rec_storage.
# Set up LP variables.
self.energy_variables = lp.declare_timeslice_variables(
self.name, self.grid_region_id
)
self.energy_nameplate = lp.declare_nameplate_variable(
self.name, self.grid_region_id
)
self.charge_nameplate = lp.declare_nameplate_variable(
self.name + " charge nameplate", self.grid_region_id
)
self.discharge_nameplate = lp.declare_nameplate_variable(
self.name + " discharge nameplate", self.grid_region_id
)
# Set limits if needed.
if self.max_storage >= 0:
lp.constraint(0.0, self.max_storage).set_coefficient(
self.energy_nameplate, 1.0
)
if self.max_charge_power >= 0:
lp.constraint(0.0, self.max_charge_power).set_coefficient(
self.charge_nameplate, 1.0
)
if self.max_discharge_power >= 0:
lp.constraint(0.0, self.max_discharge_power).set_coefficient(
self.discharge_nameplate, 1.0
)
# Add energy nameplate costs to the objective.
lp.minimize_costs_objective.set_coefficient(
self.energy_nameplate, self.storage_nameplate_cost
)
lp.minimize_costs_objective.set_coefficient(
self.charge_nameplate, self.charge_nameplate_cost
)
lp.minimize_costs_objective.set_coefficient(
self.discharge_nameplate, self.discharge_nameplate_cost
)
rec_storage_energy_variables = self.rec_storage.energy_variables
no_rec_storage_energy_variables = self.no_rec_storage.energy_variables
for t in lp.time_index_iterable:
# Ensure nameplate is >= sum(stored_values)[t].
nameplate_constraint = lp.constraint(0.0, lp.solver.infinity())
nameplate_constraint.set_coefficient(self.energy_nameplate, 1.0)
nameplate_constraint.set_coefficient(rec_storage_energy_variables[t], -1.0)
nameplate_constraint.set_coefficient(
no_rec_storage_energy_variables[t], -1.0
)
rec_storage_charge_variables = self.rec_storage.sink.timeslice_variables
no_rec_storage_charge_variables = (
self.no_rec_storage.sink.timeslice_variables
)
rec_storage_discharge_variables = (
self.rec_storage.source.timeslice_variables
)
no_rec_storage_discharge_variables = (
self.no_rec_storage.source.timeslice_variables
)
max_charge_constraint = lp.constraint(0.0, lp.solver.infinity())
max_charge_constraint.set_coefficient(self.charge_nameplate, 1.0)
max_charge_constraint.set_coefficient(rec_storage_charge_variables[t], -1.0)
max_charge_constraint.set_coefficient(
no_rec_storage_charge_variables[t], -1.0
)
max_charge_constraint.set_coefficient(
rec_storage_discharge_variables[t], 1.0
)
max_charge_constraint.set_coefficient(
no_rec_storage_discharge_variables[t], 1.0
)
max_discharge_constraint = lp.constraint(0.0, lp.solver.infinity())
max_discharge_constraint.set_coefficient(self.discharge_nameplate, 1.0)
max_discharge_constraint.set_coefficient(
rec_storage_charge_variables[t], 1.0
)
max_discharge_constraint.set_coefficient(
no_rec_storage_charge_variables[t], 1.0
)
max_discharge_constraint.set_coefficient(
rec_storage_discharge_variables[t], -1.0
)
max_discharge_constraint.set_coefficient(
no_rec_storage_discharge_variables[t], -1.0
)
def get_nameplate_solution_value(self):
"""Gets the linear program solver results for nameplate.
Must be called after lp.solve() to ensure solver has properly
converged and has generated results.
Raises:
RuntimeError: If called before LinearProgramContainer.solve().
Returns:
Float value representing solved nameplate value.
"""
if self.storage_nameplate_cost:
nameplate_variable = self.energy_nameplate
if nameplate_variable is None:
raise RuntimeError(
"Get_nameplate_solution_value called before solve()."
)
return nameplate_variable.solution_value()
else:
return max(self.get_solution_values())
class _GridTransmission(GridSource):
"""Shuttles power from one time-zone to another."""
def __init__(
self,
name,
nameplate_unit_cost,
source_grid_region_id=0,
sink_grid_region_id=1,
max_power=-1.0,
efficiency=1.0,
):
"""Init function.
Args:
name: String name of the object.
nameplate_unit_cost: (float) Cost to build a unit of
transmission capacity. ($ / Megawatt of capacity)
source_grid_region_id: An int specifying which grid_region
power gets power added.
sink_grid_region_id: An int specifying which grid_region
power gets power subtracted.
max_power: (float) Optional Maximum power which can be transmitted.
(Megawatt). Set < 0 if there is no limit.
efficiency: (float) ratio of how much power gets moved one
grid_region to the other grid_region. Acceptable values are
0. < efficiency < 1.
"""
super(_GridTransmission, self).__init__(
name,
nameplate_unit_cost=nameplate_unit_cost,
variable_unit_cost=0,
grid_region_id=source_grid_region_id,
max_power=max_power,
max_energy=-1,
co2_per_electrical_energy=0,
power_coefficient=efficiency,
)
self.sink_grid_region_id = sink_grid_region_id
self.solver = _GridSourceDispatchableSolver(self)
def configure_lp_variables_and_constraints(self, lp):
"""Declare lp variables, and set constraints.
Args:
lp: LinearProgramContainer, contains lp solver and constraints.
"""
super(_GridTransmission, self).configure_lp_variables_and_constraints(lp)
# Handle Constraints.
for t, var in enumerate(self.timeslice_variables):
sink_id = self.sink_grid_region_id
source_id = self.grid_region_id
# Whatever the super-class is sourcing in source_grid_region_id,
# sink it from sink_grid_region_id.
lp.conserve_power_constraint[sink_id][t].set_coefficient(var, -1.0)
if self.is_rps_source:
lp.rps_source_constraints[sink_id][t].set_coefficient(var, -1.0)
def post_process(self, lp):
"""Update lp post_processing result variables.
This is done so that sanity data checks can be done on RPS before
returning results.
Args:
lp: The LinearProgramContainer where the post processing variables reside.
"""
# Normal source post_process
super(_GridTransmission, self).post_process(lp)
# Sink post_process
sink_id = self.sink_grid_region_id
if lp.rps_percent > 0.0 and self.is_rps_source:
lp.rps_total[sink_id] -= self.get_solution_values()
else:
lp.non_rps_total[sink_id] -= self.get_solution_values()
class GridTransmission(object):
"""Transmits power bidirectionally between two grid_regions.
At interface level, transmitting from region-m to region-n is
identical to transmitting from region-n to region-m.
Attributes:
name: (str) name of the object.
nameplate_unit_cost: (float) Cost to build a unit of
transmission capacity. ($ / Megawatt of capacity)
grid_region_id_a: An int specifying one grid_region transmission
terminus
grid_region_id_b: An int specifying a different grid_region
transmission terminus
max_power: (float) Optional Maximum power which can be transmitted.
(Megawatt). Set < 0 if there is no limit.
efficiency: (float) ratio of how much power gets moved one
grid_region to the other grid_region. Acceptable values are
0. < efficiency < 1.
a_to_b: _GridTransmission object which moves dirty power from
grid_region_a to grid_region_b
b_to_a: _GridTransmission object which moves dirty power from
grid_region_b to grid_region_a
rec_a_to_b: _GridTransmission object which moves clean power
from grid_region_a to grid_region_b
rec_b_to_a: _GridTransmission object which moves clean power
from grid_region_b to grid_region_a
"""
def configure_lp_variables_and_constraints(self, lp):
"""Declare lp variables, and set constraints.
Args:
lp: LinearProgramContainer, contains lp solver and constraints.
"""
self.a_to_b = _GridTransmission(
self.name + " a_to_b",
0,
self.grid_region_id_b,
self.grid_region_id_a,
self.max_power,
self.efficiency,
)
self.b_to_a = _GridTransmission(
self.name + " b_to_a",
0,
self.grid_region_id_a,
self.grid_region_id_b,
self.max_power,
self.efficiency,
)
self.rec_a_to_b = _GridTransmission(
self.name + " rec a_to_b",
0,
self.grid_region_id_b,
self.grid_region_id_a,
self.max_power,
self.efficiency,
is_rps=True,
)
self.rec_b_to_a = _GridTransmission(
self.name + " rec b_to_a",
0,
self.grid_region_id_a,
self.grid_region_id_b,
self.max_power,
self.efficiency,
is_rps=True,
)
self.a_to_b.configure_lp_variables_and_constraints(lp)
self.b_to_a.configure_lp_variables_and_constraints(lp)
self.rec_a_to_b.configure_lp_variables_and_constraints(lp)
self.rec_b_to_a.configure_lp_variables_and_constraints(lp)
# Make sure nameplate >= sum(a_to_b) and nameplate >= sum(b_to_a)
self.nameplate_variable = lp.declare_nameplate_variable(
self.name, "%d_%d" % (self.grid_region_id_a, self.grid_region_id_b)
)
lp.minimize_costs_objective.set_coefficient(
self.nameplate_variable, self.nameplate_unit_cost
)
for t in lp.time_index_iterable:
# nameplate >= a_to_b[t] + rec_a_to_b[t] - b_to_a[t] - rec_b_to_a[t]
a_to_b_constraint = lp.constraint(0.0, lp.solver.infinity())
a_to_b_constraint.set_coefficient(self.nameplate_variable, 1.0)
a_to_b_constraint.set_coefficient(self.a_to_b.timeslice_variables[t], -1.0)
a_to_b_constraint.set_coefficient(
self.rec_a_to_b.timeslice_variables[t], -1.0
)
a_to_b_constraint.set_coefficient(self.b_to_a.timeslice_variables[t], 1.0)
a_to_b_constraint.set_coefficient(
self.rec_b_to_a.timeslice_variables[t], 1.0
)
# nameplate >= b_to_a[t] + rec_b_to_a[t] - a_to_b[t] - rec_a_to_b[t]
b_to_a_constraint = lp.constraint(0.0, lp.solver.infinity())
b_to_a_constraint.set_coefficient(self.nameplate_variable, 1.0)
b_to_a_constraint.set_coefficient(self.b_to_a.timeslice_variables[t], -1.0)
b_to_a_constraint.set_coefficient(
self.rec_b_to_a.timeslice_variables[t], -1.0
)
b_to_a_constraint.set_coefficient(self.a_to_b.timeslice_variables[t], 1.0)
b_to_a_constraint.set_coefficient(
self.rec_a_to_b.timeslice_variables[t], 1.0
)
def post_process(self, lp):
"""Update lp post_processing result variables.
This is done so that sanity data checks can be done on RPS before
returning results.
Args:
lp: The LinearProgramContainer where the post processing variables reside.
"""
self.a_to_b.post_process(lp)
self.b_to_a.post_process(lp)
self.rec_a_to_b.post_process(lp)
self.rec_b_to_a.post_process(lp)
def get_nameplate_solution_value(self):
"""Gets the linear program solver results for nameplate.
Must be called after lp.solve() to ensure solver has properly
converged and has generated results.
Raises:
RuntimeError: If called before LinearProgramContainer.solve().
Returns:
Float value representing solved nameplate value.
"""
nameplate_variable = self.nameplate_variable
if nameplate_variable is None:
raise RuntimeError("Get_nameplate_solution_value called before solve().")
return nameplate_variable.solution_value()
class LinearProgramContainer(object):
"""Instantiates and interfaces to LP Solver.
Example Usage:
Initialize: lp = LinearProgramContainer()
Add objects:
lp.add_demands(<GridDemand>)
lp.add_sources(<GridSource>)
lp.add_transmissions(<GridTransmission>)
lp.solve()
Attributes:
carbon_tax: The amount to tax 1 unit of co2 emissions.
cost_of_money: The amount to multiply variable costs by to
make yearly costs and fixed costs comparable.
profiles: time-series profiles indexed by name which map to
GridDemands and GridNonDispatchableSources.
number_of_timeslices: int representing one timeslice per profile index.
time_index_iterable: A simple int range from 0 - number_of_timeslices.
Constraints:
conserve_power_constraint: Dict keyed by grid_region_id. Value
is a list of LP Constraints which ensures that power > demand
at all times in all grid_regions.
minimize_costs_objective: The LP Objective which is to minimize costs.
rps_source_constraints: Dict keyed by grid_region_id. Value is a
list of LP Constraints which ensures that
rps_credit[grid_region, t] <= sum(rps_sources[grid_region, t])
rps_demand_constraints: Dict keyed by grid_region_id. Value is
a list of LP Constraints which ensures that
rps_credit[grid_region, t] <= demand[grid_region, t]
RPS Variables:
rps_credit_variables: Dict object keyed by grid_region_id. Value is a
list of rps_credit[grid_region, t] variables for calculating rps.
Post Processing Variables. Computed after LP converges:
rps_total: Dict object keyed by grid_region_id. Value is sum
(GridSource_power[grid_region, t]) of all rps sources.
non_rps_total: Dict object keyed by grid_region_id. Value is sum
(GridSource_power[grid_region, t]) of all non_rps sources.
adjusted_demand: Dict object keyed by grid_region_id. Value is
Demand[grid_region, t]
rps_credit_values: Dict object keyed by grid_region_id. Value is
rps_credit.value[grid_region, t]
Grid Elements:
demands: A list of GridDemand(s).
sources: A list of GridSource(s).
storage: A list of GridStorage(s).
transmission: A list of GridTransmission(s).
solver: The wrapped pywraplp.Solver.
solver_precision: A float representing estimated precision of the solver.
"""
def __init__(self, profiles):
"""Initializes LP Container.
Args:
profiles: Time-series pandas dataframe profiles indexed by name
which map to GridDemands and GridNonDispatchableSources.
Raises:
ValueError: If any value in profiles is < 0 or Nan / None.
"""
self.carbon_tax = 0.0
self.cost_of_money = 1.0
self.rps_percent = 0.0
self.profiles = profiles
# Constraints
self.conserve_power_constraint = {}
self.minimize_costs_objective = None
# RPS Constraints
self.rps_source_constraints = {}
self.rps_demand_constraints = {}
# RPS Variables
self.rps_credit_variables = {}
# Post Processing Variables
self.rps_total = {}
self.non_rps_total = {}
self.adjusted_demand = {}
self.total_demand = 0
self.rps_demand = 0
self.rps_credit_values = {}
self.demands = []
self.sources = []
self.storage = []
self.transmission = []
self.solver = None
self.solver_precision = 1e-3
# Validate profiles
if profiles is None:
raise ValueError("No profiles specified.")
if profiles.empty:
raise ValueError("No Data in Profiles.")
if profiles.isnull().values.any():
raise ValueError("Profiles may not be Null or None")
profiles_lt_0 = profiles.values < 0
if profiles_lt_0.any():
raise ValueError("Profiles must not be < 0.")
self.number_of_timeslices = len(profiles)
self.time_index_iterable = range(self.number_of_timeslices)
def add_demands(self, *demands):
"""Add all GridDemands in Args to self.demands."""
for d in demands:
self.demands.append(d)
def add_dispatchable_sources(self, *sources):
"""Verify source has no profile associated with it and add to self.sources.
Args:
*sources: arbitrary number of GridSources.
Raises:
KeyError: if Source has a profile associated with it which would
indicate the source was non-dispatchable instead of
dispatchable.
"""
for source in sources:
if source.name in self.profiles:
raise KeyError(
"Dispatchable Source %s has a profile associated with it"
% (source.name)
)
source.solver = _GridSourceDispatchableSolver(source)
self.sources.append(source)
def add_nondispatchable_sources(self, *sources):
"""Verify source has a profile associated with it and add to self.sources.
Args:
*sources: arbitrary number of GridSources.
Raises:
KeyError: if Source has no profile associated with it which would
indicate the source was dispatchable instead of
non-dispatchable.
"""
for source in sources:
if source.name not in self.profiles:
known_sources = ",".join(sorted(self.profiles.columns))
known_source_string = "Known sources are (%s)." % known_sources
raise KeyError(
"Nondispatchable Source %s has no profile. %s"
% (source.name, known_source_string)
)
source.solver = _GridSourceNonDispatchableSolver(
source, self.profiles[source.name]
)
self.sources.append(source)
def add_storage(self, *storage):
"""Add storage to lp."""
self.storage.extend(storage)
def add_transmissions(self, *transmission):
"""Add transmission to lp."""
self.transmission.extend(transmission)
def constraint(self, lower, upper, name=None, debug=False):
"""Build a new Constraint which with valid range between lower and upper."""
return Constraint(self, lower, upper, name, debug)
def _initialize_solver(self):
"""Initializes solver, declares objective and set constraints.
Solver is pywraplp.solver.
Objective is to minimize costs subject to constraints.
One constraint declared here is to ensure that
power[grid_region][t] > demand[grid_region][t] for all t and
grid_regions.
Also configures GridElements.
"""
self.solver = pywraplp.Solver(
"SolveEnergy", pywraplp.Solver.CLP_LINEAR_PROGRAMMING
)
self.minimize_costs_objective = Objective(self, minimize=True)
# Initialize GridDemands and GridSources
demand_sum = 0.0
for d in self.demands:
try:
profiles = self.profiles[d.name]
self.adjusted_demand[d.grid_region_id] = np.array(profiles.values)
except KeyError:
profile_names = str(self.profiles.keys())
error_string = (
"GridDemand %s. No profile found! Known profiles:(%s)"
% (d.name, profile_names)
)
raise KeyError(error_string)
self.conserve_power_constraint[d.grid_region_id] = [
self.constraint(
p,
self.solver.infinity(),
"Conserve Power gid:%d t:%d" % (d.grid_region_id, t),
)
for t, p in enumerate(profiles)
]
demand_sum += sum(profiles)
# Handle RPS which is tricky. It requires special credit
# variables[grid_region][time] and 3 constraints.
#
# Constraint #1:
# The overall goal is to have RPS exceed rps_percent of total
# demand. Given that:
# total_rps_credit := sum(rps_credit[g][t])
# total_demand := sum(demand[g][t])
#
# The constraint named total_rps_credit_gt_rps_percent_constraint
# is:
# total_rps_credit >= (self.rps_percent / 100) * total_demand
#
# Constraint #2:
# rps_credit[g][t] cannot exceed sum of rps_sources - sum of
# rps_sinks at each g,t. An example of rps_sink is the 'REC_STORAGE'
# part of GridRecStorage which stores rps energy off the grid only
# to put it back on the grid later as a rps_source. This is
# reflected in the constraint named
# rps_source_constraints[g][t]:
# rps_credit[g][t] <= sum(rps_sources[g][t]) - sum(rps_sinks[g][t])
#
# Constraint #3
# rps_credit[g][t] cannot exceed what can be used at each g,t. if
# rps_sources generate a Gigawatt at g,t = 0,0 and only 1MW can be
# used at g,t then we don't want to credit the unused 999 MW.
#
# The constraint named rps_demand_constraints is:
# rps_credit[g][t] <= demand[g][t]
#
self.total_demand = demand_sum
self.rps_demand = demand_sum * self.rps_percent / 100.0
solver = self.solver
total_rps_credit_gt_rps_percent_constraint = self.constraint(
self.rps_demand, solver.infinity()
)
for d in self.demands:
profiles = self.profiles[d.name]
if self.rps_percent > 0.0:
rps_credit_variables = self.declare_timeslice_variables(
"__rps_credit__", d.grid_region_id
)
else:
rps_credit_variables = [
solver.NumVar(
0.0, 0.0, "__bogus rps_credit__ %d %d" % (d.grid_region_id, t)
)
for t in self.time_index_iterable
]
rps_demand_constraints = []
rps_source_constraints = [
self.constraint(0.0, solver.infinity())
for t in self.time_index_iterable
]
self.rps_source_constraints[d.grid_region_id] = rps_source_constraints
self.rps_credit_variables[d.grid_region_id] = rps_credit_variables
for t in self.time_index_iterable:
# Sum(rps_credit[grid_region, t]) >= rps_percent * total demand.
total_rps_credit_gt_rps_percent_constraint.set_coefficient(
rps_credit_variables[t], 1.0
)
# Rps_credit[grid_region, t] <= demand[grid_region, t].
rps_credit_less_than_demand = self.constraint(
-solver.infinity(), profiles[t]
)
rps_credit_less_than_demand.set_coefficient(
rps_credit_variables[t], 1.0
)
rps_demand_constraints.append(rps_credit_less_than_demand)
# Rps_credit[grid_region, t] <= (sum(rps_sources[grid_region, t])
# Constraint also gets adjusted by _GridSource(Non)DispatchableSolver.
# configure_lp_variables_and_constraints
rps_source_constraints[t].set_coefficient(rps_credit_variables[t], -1.0)
self.rps_demand_constraints[d.grid_region_id] = rps_demand_constraints
# Configure sources and storage.
for s in self.sources + self.storage + self.transmission:
s.configure_lp_variables_and_constraints(self)
def solve(self):
"""Initializes and runs linear program.
This is the main routine to call after __init__.
Returns:
True if linear program gave an optimal result. False otherwise.
"""
self._initialize_solver()
status = self.solver.Solve()
converged = status == self.solver.OPTIMAL
if converged:
self._post_process()
return converged
def _post_process(self):
"""Generates data used for calculating consumed rps/non-rps values.
Also double-checks results to make sure they match constraints.
Raises:
RuntimeError: If double-checked results do not match constraints.
"""
# Initialize post_processing totals.
for d in self.demands:
# Total amount of rps_sources[g][t] power.
self.rps_total[d.grid_region_id] = np.zeros(self.number_of_timeslices)
# Total amount of non-rps_sources[g][t] power.
self.non_rps_total[d.grid_region_id] = np.zeros(self.number_of_timeslices)
for s in self.sources + self.storage + self.transmission:
s.post_process(self)
# Sanity error check results against constraints. If any of these
# get raised, it indicates a bug in the code.
solver_precision = self.solver_precision
sum_rps_credits = 0.0
for g_id in [d.grid_region_id for d in self.demands]:
power_deficit = self.adjusted_demand[g_id] - (
self.rps_total[g_id] + self.non_rps_total[g_id]
)
lights_kept_on = (power_deficit < solver_precision).all()
rps_credits = np.array(
[rcv.solution_value() for rcv in self.rps_credit_variables[g_id]]
)
sum_rps_credits += sum(rps_credits)
self.rps_credit_values[g_id] = rps_credits
rps_credit_gt_demand = (
rps_credits > self.adjusted_demand[g_id] + solver_precision
).all()
rps_credit_gt_rps_sources = (
rps_credits > self.rps_total[g_id] + solver_precision
).all()
storage_exceeds_demand = (
self.adjusted_demand[g_id] < -solver_precision
).all()
if not lights_kept_on:
raise DemandNotSatisfiedError(
"Demand not satisfied by %f for region %d"
% (max(power_deficit), g_id)
)
if rps_credit_gt_demand:
raise RpsExceedsDemandError(
"RPS Credits Exceed Demand for region %d" % g_id
)
if rps_credit_gt_rps_sources:
raise RpsCreditExceedsSourcesError(
"RPS Credits Exceed RPS Sources for region %d" % g_id
)
if storage_exceeds_demand:
raise StorageExceedsDemandError(
"Storage Exceeds Demand for region %d" % g_id
)
# Scale solver_precision by number of timeslices to get precision
# for a summed comparison.
sum_solver_precision = solver_precision * self.number_of_timeslices
if sum_solver_precision + sum_rps_credits < self.rps_demand:
raise RpsPercentNotMetError(
"Sum RPS credits (%f) < demand * (%f rps_percent) (%f)"
% (sum_rps_credits, float(self.rps_percent), self.rps_demand)
)
def declare_timeslice_variables(self, name, grid_region_id):
"""Declares timeslice variables for a grid_region.
Args:
name: String to be included in the generated variable name.
grid_region_id: Int which identifies which grid these variables affect.
Do Not call this function with the same (name, grid_region_id)
pair more than once. There may not be identically named variables
in the same grid_region.
Returns:
Array of lp variables, each which range from 0 to infinity.
Array is mapped so that variable for time-slice x is at index x.
e.g. variable for first time-slice is variable[0]. variable for
last time-slice is variable[-1]
"""
solver = self.solver
variables = []
for t in self.time_index_iterable:
var_name = "__".join(
[name, "grid_region_id", str(grid_region_id), "at_t", str(t)]
)
variables.append(solver.NumVar(0.0, solver.infinity(), var_name))
return variables
def declare_nameplate_variable(self, name, grid_region_id):
"""Declares a nameplate variable for a grid_region.
Args:
name: String to be included in the generated variable name.
grid_region_id: Stringifyable object which identifies which grid
these variables affect.
Do Not call this function with the same (name, grid_region_id)
pair more than once. There may not be identically named variables
in the same grid_region.
Returns:
A lp variable which values range from 0 to infinity.
"""
nameplate_name = "__".join(
[name, "grid_region_id", str(grid_region_id), "peak"]
)
solver = self.solver
return solver.NumVar(0.0, solver.infinity(), nameplate_name)
def extrapolate_cost(cost, discount_rate, time_span_1, time_span_2):
"""Extrapolate cost from one time span to another.
Args:
cost: cost incurred during time_span_1 (in units of currency)
discount_rate: rate that money decays, per year (as decimal, e.g., .06)
time_span_1: time span when cost incurred (in units of years)
time_span_2: time span to extrapolate cost (in units of years)
Returns:
Cost extrapolated to time_span_2, units of currency.
Model parameters are costs over time spans. For example, the demand
may be a time series that lasts 1 year. The variable cost to fulfill
that demand would then be for 1 year of operation. However, the
GridModel is supposed to compute the total cost over a longer time
span (e.g., 30 years).
If there were no time value of money, the extrapolated cost would be
the ratio of time_span_2 to time_span_1 (e.g., 30 in the
example). However, payments in the future are less costly than
payments in the present. We extrapolate the cost by first finding
the equivalent continuous stream of payments over time_span_1 that
is equivalent to the cost, then assume that stream of payments
occurs over time_span_2, instead.
"""
growth_rate = 1.0 + discount_rate
value_decay_1 = pow(growth_rate, -time_span_2)
value_decay_2 = pow(growth_rate, -time_span_1)
try:
return cost * (1.0 - value_decay_1) / (1.0 - value_decay_2)
except ZeroDivisionError:
return cost
| 36.883455 | 88 | 0.645253 | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simulate cost-optimal electrical grid construction under different policies.
Code contains GridElements: Power Sources, Demands and Storage. Grid
Elements are placed in different grid regions. Grid regions are
separated from each other so only sources with grid_region_id == x can
power Demands with grid_region_id == x
The costs of constructing GridElements are based upon:
nameplate_unit_cost: The cost to build one unit (e.g. Megawatt) of power.
variable_unit_cost: The cost to provide one unit of power over time.
(e.g. Megawatt-Hour)
The code simulates the grid over multiple time-slices. e.g. Hourly
over a one year period which would map to 24 * 365 = 8760 time-slices.
The code is based upon a linear-program which contains:
- An objective which is to minimize costs.
- Constraints which must be met before the solution can converge.
- conserve_power_constraint: Ensure that sum(power[t]) >=
demand[t] for all t in each grid-region
This code will work with any set of consistent units. For the
purposes of documentation, the units chosen are:
Power: Megawatts
Time: Hours
(Derived) Energy = Power * Time => Megawatt-Hours
Cost: Dollars ($)
CO2 Emissions: Tonnes
(Derived) CO2 Emitted per Energy => Tonnes / Megawatt-Hours
Carbon Tax: $ / Tonnes
"""
import logging
import numpy as np
from ortools.linear_solver import pywraplp
class GridSimError(RuntimeError):
pass
class DemandNotSatisfiedError(GridSimError):
pass
class RpsExceedsDemandError(GridSimError):
pass
class RpsCreditExceedsSourcesError(GridSimError):
pass
class StorageExceedsDemandError(GridSimError):
pass
class RpsPercentNotMetError(GridSimError):
pass
class Constraint(object):
"""Holds an LP Constraint object with extra debugging information.
Attributes:
constraint: underlying pywraplp.Constraint object
name: name of constraint
formula: hashtable that maps names of variables to coefficients
pywraplp.Constraint doesn't surface a list of variables/coefficients, so
we have to keep track ourselves.
"""
def __init__(self, lp, lower_bound, upper_bound, name=None, debug=False):
"""Initializes Constraint.
Args:
lp: LinearProgramContainer that wraps the LP solver which
creates the constraint.
lower_bound: (float) Lower bound on product between coeffs and variables.
upper_bound: (float) Upper bound on product between coeffs and variables.
name: Optional human readable string.
debug: Boolean which if set, logs constraint info.
"""
self.constraint = lp.solver.Constraint(lower_bound, upper_bound)
self.name = name
self.formula = {}
self.debug = debug
if self.debug:
logging.debug("CONSTRAINT: %f <= %s <= %f", lower_bound, name, upper_bound)
def set_coefficient(self, variable, coefficient):
"""Adds variable * coefficient to LP Coefficient.
Wraps pywrap.SetCoefficient(variable, coefficient) method and
saves variable, coefficient to formula dict.
After calling this method, Objective += variable * coefficient
Args:
variable: (Lp Variable) The Variable multiplicand.
coefficient: (float) The coefficient multiplicand.
"""
self.constraint.SetCoefficient(variable, coefficient)
self.formula[variable.name()] = coefficient
if self.debug:
logging.debug("%s += %s * %f", self.name, variable.name(), coefficient)
class Objective(object):
"""Holds an LP Objective object with extra debugging information.
Attributes:
objective: Underlying pywraplp.Objective object.
"""
def __init__(self, lp, minimize=True):
"""Initializes Objective.
Args:
lp: LinearProgramContainer that wraps the LP solver which
creates the Objective.
minimize: boolean, True if objective should be minimized
otherwise objective is maximizied.
"""
self.objective = lp.solver.Objective()
self.formula = {}
if minimize:
self.objective.SetMinimization()
else:
self.objective.SetMaximization()
def set_coefficient(self, variable, coefficient):
"""Adds variable * coefficient to LP Objective.
Wraps pywrap.SetCoefficient(variable, coefficient) method and
saves variable, coefficient to formula dict.
After calling this method, Objective += variable * coefficient
Args:
variable: (Lp Variable) The Variable multiplicand.
coefficient: (float) The coefficient multiplicand.
"""
self.objective.SetCoefficient(variable, coefficient)
self.formula[variable.name()] = coefficient
def value(self):
return self.objective.Value()
class GridDemand(object):
"""Simple place-holder object which represents load on the grid."""
def __init__(self, name, grid_region_id=0):
"""Initializes GridDemand object.
Args:
name: name of the demand object
grid_region_id: An int specifying the grid region of the demand.
Only sources with the same grid_region_id can power this demand.
"""
self.name = name
self.grid_region_id = grid_region_id
class GridSource(object):
"""Denotes Costs, co2, region, power and energy limitations of a power source.
Grid Sources may either be dispatchable or non-dispatchable.
- Dispatchable sources may power at any time, e.g. fossil fuel plants.
- Non-dispatchable sources are dependent on the environment to
generate power. e.g. Solar or Wind plants.
If there is a time-slice power profile indexed by the same name as
this source in LinearProgramContainer.profiles. The source is
considered Non-dispatchable. Otherwise, it is considered dispatchable.
Attributes:
name: (str) name of the object.
nameplate_unit_cost: (float) Cost to build a unit of
dispatchable power. ($ / Megawatt of capacity)
variable_unit_cost: (float) Cost to supply a unit of dispatchable power
per time. ($ / Megawatt-Hour)
grid_region_id: An int specifying the grid region of the source.
Only demands with the same grid_region_id can sink the power
from this source.
max_power: (float) Optional Maximum power which object can supply.
(Megawatt). Set < 0 if there is no limit.
max_energy: (float) Optional maximum energy which object can
supply. (Megawatt-Hours) Set < 0 if there is no limit.
co2_per_electrical_energy: (float) (Tonnes of CO2 / Megawatt Hour).
power_coefficient: (float) ratio of how much power is supplied by
object vs. how much power gets on the grid. 0 <
power_coefficient < 1. Nominally 1.0.
is_rps_source: Boolean which denotes if the source is included
in the Renewable Portfolio Standard.
solver: Either a _GridSourceDispatchableSolver or
_GridSourceNonDispatchableSolver. Used to setup LP
Constraints, Objectives and variables for the source and to
report results.
timeslice_variables: An array of LP variables, one per time-slice
of simulation. Array is mapped so that variable for
time-slice t is at index t.
e.g.
Variable for first time-slice is timeslice_variable[0].
Variable for last time-slice is timeslice_variable[-1].
Variable for time-slice at time t is timeslice_variable[t].
Only gets declared if GridSource is a DispatchableSource.
nameplate_variable: LP variable representing the nameplate or
maximum power the GridSource can output at any given
time.
"""
def __init__(
self,
name,
nameplate_unit_cost,
variable_unit_cost,
grid_region_id=0,
max_power=-1.0,
max_energy=-1.0,
co2_per_electrical_energy=0,
power_coefficient=1.0,
is_rps_source=False,
):
"""Sets characteristics of a GridSource object.
Args:
name: (str) name of the object.
nameplate_unit_cost: (float) Cost to build a unit of
dispatchable power. ($ / Megawatt of capacity)
variable_unit_cost: (float) Cost to supply a unit of dispatchable power
per time. ($ / Megawatt-Hour)
grid_region_id: An int specifying the grid region of the demand.
Only demands with the same grid_region_id can sink the power
from this source.
max_power: (float) Maximum power which object can supply. (Megawatt)
max_energy: (float) Maximum energy which object can
supply. (Megawatt-Hours)
co2_per_electrical_energy: (float) (Tonnes of CO2 / Megawatt Hour).
power_coefficient: (float) ratio of how much power is supplied by
object vs. how much power gets on the grid. 0 <
power_coefficient < 1. Nominally 1.0.
is_rps_source: Boolean which denotes if the source is included
in the Renewable Portfolio Standard.
"""
self.name = name
self.nameplate_unit_cost = nameplate_unit_cost
self.variable_unit_cost = variable_unit_cost
self.max_energy = max_energy
self.max_power = max_power
self.grid_region_id = grid_region_id
self.co2_per_electrical_energy = co2_per_electrical_energy
self.power_coefficient = power_coefficient
self.is_rps_source = is_rps_source
self.solver = None
self.timeslice_variables = None
self.nameplate_variable = None
def configure_lp_variables_and_constraints(self, lp):
"""Declare lp variables, and set constraints.
Args:
lp: The LinearProgramContainer.
Defers to self.solver which properly configures variables and
constraints in this object.
See Also:
_GridSourceDispatchableSolver, _GridSourceNonDispatchableSolver
"""
self.solver.configure_lp_variables_and_constraints(lp)
def post_process(self, lp):
"""Update lp post_processing result variables.
This is done post lp.solve() so that sanity data checks can be done
on RPS before returning results.
Args:
lp: The LinearProgramContainer where the post processing variables reside.
"""
if lp.rps_percent > 0.0 and self.is_rps_source:
lp.rps_total[self.grid_region_id] += self.get_solution_values()
else:
lp.non_rps_total[self.grid_region_id] += self.get_solution_values()
def get_solution_values(self):
"""Gets the linear program solver results.
Must be called after lp.solve() to ensure solver has properly
converged and has generated results.
Returns:
np.array of solutions for each timeslice variable.
"""
return self.solver.get_solution_values()
def get_nameplate_solution_value(self):
"""Gets the linear program solver results for nameplate.
Must be called after lp.solve() to ensure solver has properly
converged and has generated results.
Raises:
RuntimeError: If called before LinearProgramContainer.solve().
Returns:
Float value representing solved nameplate value.
"""
nameplate_variable = self.nameplate_variable
if nameplate_variable is None:
raise RuntimeError("Get_nameplate_solution_value called before solve().")
return nameplate_variable.solution_value()
class _GridSourceDispatchableSolver(object):
"""Power Source which can provide power at any time.
Attributes:
source: GridSource object where self generates LP variables
"""
def __init__(self, source):
self.source = source
def configure_lp_variables_and_constraints(self, lp):
"""Declare lp variables, and set constraints in grid_source.
Args:
lp: The LinearProgramContainer.
Variables Declared include:
- timeslice variables: represent how much power the source is
outputting at each time-slice.
- nameplate variable: represents the maximum power sourced.
The values of these variables are solved by the linear program to
optimize costs subject to some constraints.
The overall objective is to minimize cost. Herein, the overall
cost is increased by:
- nameplate cost: nameplate_unit_cost * nameplate variable
- variable cost: variable_unit_cost * sum(timeslice_variables)
- carbon cost: lp.carbon_tax * sum(timeslice_variables) *
co2_per_electrical_energy
Since variable and carbon costs accrue on a periodic basis, we
multiply them by lp.cost_of_money to make periodic and
one-time costs comparable.
Constraints created / modified here include:
- Maximum Energy: Ensure sum timeslice-variables < max_energy if
self.max_energy >= 0.
This constraint is only for sources where there are limits
to the total amount of generation which can be built.
E.g. There are only a limited number of places where one can
build hydropower.
- Maximum Power: Ensure no timeslice-variables > max_power if
self.max_power is >= 0.
This constraint is only for sources where there are limits
to the maximum amount of power which can be built.
E.g. hydropower which can only discharge at a maximum rate.
- Conserve Power: Ensure that sum(power) > demand for all
time-slices. Colloquially called "Keeping the Lights on."
- Ensure nameplate variable > power(t) for all t. We must make
sure that we've priced out a plant which can supply the
requested power.
"""
source = self.source
# setup LP variables.
source.timeslice_variables = lp.declare_timeslice_variables(
source.name, source.grid_region_id
)
source.nameplate_variable = lp.declare_nameplate_variable(
source.name, source.grid_region_id
)
solver = lp.solver
# Configure maximum energy if it is >= 0. Otherwise do not
# create a constraint.
max_energy_constraint = (
lp.constraint(0.0, source.max_energy) if source.max_energy >= 0 else None
)
# Configure maximum nameplate if it is >= 0. Otherwise do not
# create a constraint.
max_power = source.max_power
if max_power >= 0:
lp.constraint(0.0, max_power).set_coefficient(
source.nameplate_variable, 1.0
)
# Total_cost includes nameplate cost.
cost_objective = lp.minimize_costs_objective
cost_objective.set_coefficient(
source.nameplate_variable, source.nameplate_unit_cost
)
# Add timeslice variables to coefficients.
for t, var in enumerate(source.timeslice_variables):
# Total_cost also includes variable and carbon cost.
variable_coef = (
source.variable_unit_cost
+ source.co2_per_electrical_energy * lp.carbon_tax
) * lp.cost_of_money
cost_objective.set_coefficient(var, variable_coef)
# Keep the lights on at all times. Power_coefficient is usually
# 1.0, but is -1.0 for GridStorage.sink and discharge_efficiency
# for GridStorage.source.
lp.conserve_power_constraint[source.grid_region_id][t].set_coefficient(
var, source.power_coefficient
)
# Constrain rps_credit if needed.
if source.is_rps_source:
lp.rps_source_constraints[source.grid_region_id][t].set_coefficient(
var, source.power_coefficient
)
# Ensure total energy is less than source.max_energy.
if max_energy_constraint is not None:
max_energy_constraint.set_coefficient(var, 1.0)
# Ensure power doesn't exceed source.max_power.
if max_power >= 0:
lp.constraint(0.0, max_power).set_coefficient(var, 1.0)
# Nameplate must be bigger than largest power.
# If nameplate_unit_cost > 0, Cost Optimization will push
# Nameplate near max(timeslice_variables).
nameplate_constraint = lp.constraint(0.0, solver.infinity())
nameplate_constraint.set_coefficient(var, -1.0)
nameplate_constraint.set_coefficient(source.nameplate_variable, 1.0)
# Constrain maximum nameplate if max_power is set.
if source.max_power >= 0:
lp.constraint(0.0, source.max_power).set_coefficient(
source.nameplate_variable, 1.0
)
def get_solution_values(self):
"""Gets the linear program solver results.
Must be called after lp.solve() to ensure solver has properly
converged and has generated results.
Raises:
RuntimeError: If called before LinearProgramContainer.solve().
Returns:
np.array of solutions for each timeslice variable.
"""
timeslice_variables = self.source.timeslice_variables
if timeslice_variables is None:
raise RuntimeError("get_solution_values called before solve.")
return np.array([v.solution_value() for v in timeslice_variables])
class _GridSourceNonDispatchableSolver(object):
"""Power Source which can provide nameplate multiple of its profile.
Attributes:
source: GridSource object where self generates LP variables
profile: pandas Series which represents what fraction of the
nameplate the source can provide at any given time.
"""
def __init__(self, source, profile):
self.source = source
# check profile isn't all zeros
if (profile.values == 0.0).all():
raise ValueError("%s profile may not be all zero." % source.name)
self.profile = source.power_coefficient * profile / max(profile)
def configure_lp_variables_and_constraints(self, lp):
"""Declare lp variables, and set constraints in grid_source.
Args:
lp: The LinearProgramContainer.
Variables Declared include:
- nameplate variable: represents the maximum power sourced.
The values of these variables are solved by the linear program to
optimize costs subject to some constraints.
The overall objective is to minimize cost. Herein, the overall
cost is increased by:
- nameplate cost: nameplate_unit_cost * nameplate variable
- variable cost: variable_unit_cost * nameplate variable * sum(profile)
- carbon cost: lp.carbon_tax * nameplate variable * sum(profile)
Since variable and carbon costs accrue on a yearly basis, we
multiply them by lp.cost_of_money to make yearly and
one-time costs comparable.
Constraints created / modified here include:
- Maximum Energy: Ensure nameplate * sum(profile) < max_energy if
self.max_energy >= 0.
This constraint is only for sources where there are limits
to the total amount of generation which can be built.
E.g. There are only a limited number of places where one can
build hydropower.
- Maximum Power: Ensure nameplate <= max_power if
self.max_power >= 0.
This constraint is only for sources where there are limits
to the maximum amount of power which can be built.
E.g. hydropower which can only discharge at a maximum rate.
- Conserve Power: Ensure that sum(power) > demand for all
time-slices. Colloquially called "Keeping the Lights on."
"""
source = self.source
# setup LP variables.
source.nameplate_variable = lp.declare_nameplate_variable(
source.name, source.grid_region_id
)
sum_profile = sum(self.profile)
# Configure maximum energy if it is >= 0. Otherwise do not
# create a constraint.
if source.max_energy >= 0:
lp.constraint(0.0, source.max_energy).set_coefficient(
source.nameplate_variable, sum_profile
)
# Configure maximum energy if it is >= 0. Otherwise do not
# create a constraint.
max_power = source.max_power
if max_power >= 0:
lp.constraint(0.0, max_power).set_coefficient(
source.nameplate_variable, 1.0
)
# Total_cost includes nameplate cost.
cost_objective = lp.minimize_costs_objective
cost_coefficient = source.nameplate_unit_cost + lp.cost_of_money * (
source.variable_unit_cost * sum_profile
+ source.co2_per_electrical_energy * sum_profile * lp.carbon_tax
)
cost_objective.set_coefficient(source.nameplate_variable, cost_coefficient)
# Add timeslice variables to coefficients.
for t, profile_t in enumerate(self.profile):
# Keep the lights on at all times.
try:
constraint = lp.conserve_power_constraint[source.grid_region_id]
except KeyError:
raise KeyError(
"No Demand declared in grid_region %d." % (source.grid_region_id)
)
constraint[t].set_coefficient(source.nameplate_variable, profile_t)
# Constrain rps_credit if needed.
if source.is_rps_source:
lp.rps_source_constraints[source.grid_region_id][t].set_coefficient(
source.nameplate_variable, profile_t
)
def get_solution_values(self):
"""Gets the linear program solver results.
Must be called after lp.solve() to ensure solver has properly
converged and has generated results.
Raises:
RuntimeError: If called before LinearProgramContainer.solve().
Returns:
np.array of solutions for each timeslice variable.
"""
nameplate_variable = self.source.nameplate_variable
if nameplate_variable is None:
raise RuntimeError("get_solution_values called before solve.")
return nameplate_variable.solution_value() * self.profile.values
class GridStorage(object):
"""Stores energy from the grid and returns it when needed subject to losses.
Attributes:
name: A string which is the name of the object.
storage_nameplate_cost: A float which is the cost per nameplate of
energy storage. E.g. The cost of batteries.
charge_nameplate_cost: A float which is the cost per nameplate
power to charge the storage. E.g. The rectifier cost to convert
an AC grid to DC storage.
discharge_nameplate_cost: A float which is the cost per nameplate
power to recharge the grid. E.g. The cost of a power inverter to
convert DC storage back to AC
grid_region_id: An int specifying the grid region of the storage.
The storage can only store energy generated by sources with the
same grid_region_id. Only demands with the same grid_region_id
can sink power from this.
charge_efficiency: A float ranging from 0.0 - 1.0 which describes
the energy loss between the grid and the storage element. 0.0
means complete loss, 1.0 means no loss.
storage_efficiency: A float ranging from 0.0 - 1.0 which describes
how much stored energy remains from previous stored energy after
one time-cycle. 1.0 means no loss. 0.0 means all stored energy
is lost.
discharge_efficiency: A float ranging from 0.0 - 1.0 which describes
the energy loss between storage and grid when recharging the grid.
0.0 means complete loss, 1.0 means no loss.
max_charge_power: A float which represents the maximum power that
can charge storage (calculated before any efficiency losses.).
A value < 0 means there is no charge power limit.
max_discharge_power: A float which represents the maximum power
that can discharge storage (calculated before any efficiency
losses.). A value < 0 means there is no discharge power limit.
max_storage: An optional float which represents the maximum energy
that can be stored. A value < 0 means there is no maximum
storage limit.
is_rps: Boolean; if true, keeps track of rps_credit as storage is
charged / discharged. Amount charging[t] is subtracted from
rps_credit[t] from rps_credit[t]. Amount discharging[t] is
added to rps_credit[t]. If false, no rps_credits are adjusted.
"""
def __init__(
self,
name,
storage_nameplate_cost,
charge_nameplate_cost=0.0,
discharge_nameplate_cost=0.0,
grid_region_id=0,
charge_efficiency=1.0,
storage_efficiency=1.0,
discharge_efficiency=1.0,
max_charge_power=-1,
max_discharge_power=-1,
max_storage=-1,
is_rps=False,
):
self.name = name
self.storage_nameplate_cost = storage_nameplate_cost
self.charge_nameplate_cost = charge_nameplate_cost
self.discharge_nameplate_cost = discharge_nameplate_cost
self.grid_region_id = grid_region_id
self.charge_efficiency = charge_efficiency
self.storage_efficiency = storage_efficiency
self.discharge_efficiency = discharge_efficiency
self.max_charge_power = max_charge_power
self.max_discharge_power = max_discharge_power
self.max_storage = max_storage
self.is_rps = is_rps
# Sink is a power element which sinks from the grid into storage.
# Source is a power element which sources to the grid from storage.
# Both are constructed in configure_lp_variables_and_constraints
self.sink = None
self.source = None
def configure_lp_variables_and_constraints(self, lp):
"""Declare lp variables, and set constraints.
Args:
lp: LinearProgramContainer, contains lp solver and constraints.
"""
# Set up LP variables.
self.energy_variables = lp.declare_timeslice_variables(
self.name, self.grid_region_id
)
if self.storage_nameplate_cost:
self.energy_nameplate = lp.declare_nameplate_variable(
self.name, self.grid_region_id
)
# Set up source and configure LP variables.
self.source = GridSource(
name=self.name + " source",
nameplate_unit_cost=self.discharge_nameplate_cost,
variable_unit_cost=0.0,
grid_region_id=self.grid_region_id,
max_power=self.max_discharge_power,
co2_per_electrical_energy=0.0,
power_coefficient=self.discharge_efficiency,
is_rps_source=self.is_rps,
)
self.source.solver = _GridSourceDispatchableSolver(self.source)
self.source.configure_lp_variables_and_constraints(lp)
# Set up sink and configure LP variables.
self.sink = GridSource(
name=self.name + " sink",
nameplate_unit_cost=self.discharge_nameplate_cost,
variable_unit_cost=0.0,
grid_region_id=self.grid_region_id,
max_power=self.max_charge_power,
co2_per_electrical_energy=0.0,
power_coefficient=-1.0,
is_rps_source=self.is_rps,
)
self.sink.solver = _GridSourceDispatchableSolver(self.sink)
self.sink.configure_lp_variables_and_constraints(lp)
# Add energy nameplate costs to the objective. Other costs are
# added by source/sink.configure_lp_variables_and_constraints.
if self.storage_nameplate_cost:
nameplate = self.energy_nameplate
lp.minimize_costs_objective.set_coefficient(
nameplate, self.storage_nameplate_cost
)
# Constrain Energy Storage to be Energy Last time plus sink minus source.
# Storage is circular so variables at t=0 depend on variables at t=-1
# which is equivalent to last value in python indexing scheme.
variables = self.energy_variables
for t in lp.time_index_iterable:
# Ce = charge_efficiency,
# Se = storage_efficiency.
# Stored[i] = se * Stored[i-1] + ce * sink[i-1] - source[i-1]
# 0 = -Stored[i] + se * Stored[i-1] + ce * sink[i-1] - source[i-1]
c = lp.constraint(0.0, 0.0)
c.set_coefficient(variables[t], -1.0) # -Stored[i]
c.set_coefficient(variables[t - 1], self.storage_efficiency)
# Source and sink are relative to the grid, so opposite here:
# Sink adds to storage, source subtracts from storage.
c.set_coefficient(self.source.timeslice_variables[t - 1], -1.0)
c.set_coefficient(
self.sink.timeslice_variables[t - 1], self.charge_efficiency
)
# Ensure nameplate is larger than stored_value.
if self.storage_nameplate_cost:
nameplate_constraint = lp.constraint(0.0, lp.solver.infinity())
nameplate_constraint.set_coefficient(nameplate, 1.0)
nameplate_constraint.set_coefficient(variables[t], -1.0)
# Constrain maximum storage if max_storage >= 0
if self.max_storage >= 0.0:
max_storage_constraint = lp.constraint(0.0, self.max_storage)
max_storage_constraint.set_coefficient(variables[t], 1.0)
def post_process(self, lp):
"""Update lp post_processing result variables.
This is done post lp.solve() so that sanity data checks can be done
on RPS before returning results.
Args:
lp: The LinearProgramContainer where the post processing variables reside.
"""
sink_vals = self.sink.get_solution_values()
source_vals = self.source.get_solution_values() * self.discharge_efficiency
if self.is_rps:
lp.rps_total[self.grid_region_id] += source_vals - sink_vals
else:
lp.non_rps_total[self.grid_region_id] += source_vals - sink_vals
def get_nameplate_solution_value(self):
"""Gets the linear program solver results for nameplate.
Must be called after lp.solve() to ensure solver has properly
converged and has generated results.
Raises:
RuntimeError: If called before LinearProgramContainer.solve().
Returns:
Float value representing solved nameplate value.
"""
if self.storage_nameplate_cost:
nameplate_variable = self.energy_nameplate
if nameplate_variable is None:
raise RuntimeError(
"Get_nameplate_solution_value called before solve()."
)
return nameplate_variable.solution_value()
else:
return max(self.get_solution_values())
def get_solution_values(self):
"""Gets the linear program solver results.
Must be called after lp.solve() to ensure solver has properly
converged and has generated results.
Raises:
RuntimeError: If called before LinearProgramContainer.solve().
Returns:
np.array of solutions for each timeslice variable.
"""
timeslice_variables = self.energy_variables
if timeslice_variables is None:
raise RuntimeError("get_solution_values called before solve.")
return np.array([v.solution_value() for v in timeslice_variables])
class GridRecStorage(object):
"""Stores energy from the grid and returns it when needed subject to losses.
This is a wrapper around two GridStorage objects, one which stores
"clean" energy (is_rps) and one which stores "dirty" energy (not
is_rps). There is a need for both types of storage to keep track of
renewable energy credits.
Attributes:
name: A string which is the name of the object.
storage_nameplate_cost: A float which is the cost per nameplate of
energy storage. E.g. The cost of batteries.
charge_nameplate_cost: A float which is the cost per nameplate
power to charge the storage. E.g. The rectifier cost to convert
an AC grid to DC storage.
discharge_nameplate_cost: A float which is the cost per nameplate
power to recharge the grid. E.g. The cost of a power inverter to
convert DC storage back to AC
grid_region_id: An int specifying the grid region of the storage.
The storage can only store energy generated by sources with the
same grid_region_id. Only demands with the same grid_region_id
can sink power from this.
charge_efficiency: A float ranging from 0.0 - 1.0 which describes
the energy loss between the grid and the storage element. 0.0
means complete loss, 1.0 means no loss.
storage_efficiency: A float ranging from 0.0 - 1.0 which describes
how much stored energy remains from previous stored energy after
one time-cycle. 1.0 means no loss. 0.0 means all stored energy
is lost.
discharge_efficiency: A float ranging from 0.0 - 1.0 which describes
the energy loss between storage and grid when recharging the grid.
0.0 means complete loss, 1.0 means no loss.
max_charge_power: A float which represents the maximum power that
can charge storage (calculated before any efficiency losses.).
A value < 0 means there is no charge power limit.
max_discharge_power: A float which represents the maximum power
that can discharge storage (calculated before any efficiency
losses.). A value < 0 means there is no discharge power limit.
max_storage: An optional float which represents the maximum energy
that can be stored. A value < 0 means there is no maximum
storage limit.
rec_storage: GridStorage object which stores "clean" energy.
no_rec_storage: GridStorage object which stores "dirty" energy.
"""
def __init__(
self,
name,
storage_nameplate_cost,
charge_nameplate_cost=0.0,
discharge_nameplate_cost=0.0,
grid_region_id=0,
charge_efficiency=1.0,
storage_efficiency=1.0,
discharge_efficiency=1.0,
max_charge_power=-1,
max_discharge_power=-1,
max_storage=-1,
):
self.name = name
self.storage_nameplate_cost = storage_nameplate_cost
self.charge_nameplate_cost = charge_nameplate_cost
self.discharge_nameplate_cost = discharge_nameplate_cost
self.grid_region_id = grid_region_id
self.charge_efficiency = charge_efficiency
self.storage_efficiency = storage_efficiency
self.discharge_efficiency = discharge_efficiency
self.max_charge_power = max_charge_power
self.max_discharge_power = max_discharge_power
self.max_storage = max_storage
self.rec_storage = None
self.no_rec_storage = None
def configure_lp_variables_and_constraints(self, lp):
"""Declare lp variables, and set constraints."""
# For rec_storage and no_rec_storage storage, set all costs to 0
# and with no limits. Calculate costs and limits after
# declaration.
self.rec_storage = GridStorage(
name=self.name + " REC_STORAGE",
storage_nameplate_cost=0,
grid_region_id=self.grid_region_id,
charge_efficiency=self.charge_efficiency,
discharge_efficiency=self.discharge_efficiency,
storage_efficiency=self.storage_efficiency,
is_rps=True,
)
self.no_rec_storage = GridStorage(
name=self.name + " NO_REC_STORAGE",
storage_nameplate_cost=0,
grid_region_id=self.grid_region_id,
charge_efficiency=self.charge_efficiency,
discharge_efficiency=self.discharge_efficiency,
storage_efficiency=self.storage_efficiency,
is_rps=False,
)
self.rec_storage.configure_lp_variables_and_constraints(lp)
self.no_rec_storage.configure_lp_variables_and_constraints(lp)
# Calculate costs and limits based on the sum of both rec_storage
# and no_rec_storage.
# Set up LP variables.
self.energy_variables = lp.declare_timeslice_variables(
self.name, self.grid_region_id
)
self.energy_nameplate = lp.declare_nameplate_variable(
self.name, self.grid_region_id
)
self.charge_nameplate = lp.declare_nameplate_variable(
self.name + " charge nameplate", self.grid_region_id
)
self.discharge_nameplate = lp.declare_nameplate_variable(
self.name + " discharge nameplate", self.grid_region_id
)
# Set limits if needed.
if self.max_storage >= 0:
lp.constraint(0.0, self.max_storage).set_coefficient(
self.energy_nameplate, 1.0
)
if self.max_charge_power >= 0:
lp.constraint(0.0, self.max_charge_power).set_coefficient(
self.charge_nameplate, 1.0
)
if self.max_discharge_power >= 0:
lp.constraint(0.0, self.max_discharge_power).set_coefficient(
self.discharge_nameplate, 1.0
)
# Add energy nameplate costs to the objective.
lp.minimize_costs_objective.set_coefficient(
self.energy_nameplate, self.storage_nameplate_cost
)
lp.minimize_costs_objective.set_coefficient(
self.charge_nameplate, self.charge_nameplate_cost
)
lp.minimize_costs_objective.set_coefficient(
self.discharge_nameplate, self.discharge_nameplate_cost
)
rec_storage_energy_variables = self.rec_storage.energy_variables
no_rec_storage_energy_variables = self.no_rec_storage.energy_variables
for t in lp.time_index_iterable:
# Ensure nameplate is >= sum(stored_values)[t].
nameplate_constraint = lp.constraint(0.0, lp.solver.infinity())
nameplate_constraint.set_coefficient(self.energy_nameplate, 1.0)
nameplate_constraint.set_coefficient(rec_storage_energy_variables[t], -1.0)
nameplate_constraint.set_coefficient(
no_rec_storage_energy_variables[t], -1.0
)
rec_storage_charge_variables = self.rec_storage.sink.timeslice_variables
no_rec_storage_charge_variables = (
self.no_rec_storage.sink.timeslice_variables
)
rec_storage_discharge_variables = (
self.rec_storage.source.timeslice_variables
)
no_rec_storage_discharge_variables = (
self.no_rec_storage.source.timeslice_variables
)
max_charge_constraint = lp.constraint(0.0, lp.solver.infinity())
max_charge_constraint.set_coefficient(self.charge_nameplate, 1.0)
max_charge_constraint.set_coefficient(rec_storage_charge_variables[t], -1.0)
max_charge_constraint.set_coefficient(
no_rec_storage_charge_variables[t], -1.0
)
max_charge_constraint.set_coefficient(
rec_storage_discharge_variables[t], 1.0
)
max_charge_constraint.set_coefficient(
no_rec_storage_discharge_variables[t], 1.0
)
max_discharge_constraint = lp.constraint(0.0, lp.solver.infinity())
max_discharge_constraint.set_coefficient(self.discharge_nameplate, 1.0)
max_discharge_constraint.set_coefficient(
rec_storage_charge_variables[t], 1.0
)
max_discharge_constraint.set_coefficient(
no_rec_storage_charge_variables[t], 1.0
)
max_discharge_constraint.set_coefficient(
rec_storage_discharge_variables[t], -1.0
)
max_discharge_constraint.set_coefficient(
no_rec_storage_discharge_variables[t], -1.0
)
def get_solution_values(self):
return (
self.rec_storage.get_solution_values()
+ self.no_rec_storage.get_solution_values()
)
def get_source_solution_values(self):
return (
self.rec_storage.source.get_solution_values()
+ self.no_rec_storage.source.get_solution_values()
- self.rec_storage.sink.get_solution_values()
- self.no_rec_storage.sink.get_solution_values()
)
def get_sink_solution_values(self):
return -self.get_source_solution_values()
def get_nameplate_solution_value(self):
"""Gets the linear program solver results for nameplate.
Must be called after lp.solve() to ensure solver has properly
converged and has generated results.
Raises:
RuntimeError: If called before LinearProgramContainer.solve().
Returns:
Float value representing solved nameplate value.
"""
if self.storage_nameplate_cost:
nameplate_variable = self.energy_nameplate
if nameplate_variable is None:
raise RuntimeError(
"Get_nameplate_solution_value called before solve()."
)
return nameplate_variable.solution_value()
else:
return max(self.get_solution_values())
def post_process(self, lp):
self.rec_storage.post_process(lp)
self.no_rec_storage.post_process(lp)
class _GridTransmission(GridSource):
"""Shuttles power from one time-zone to another."""
def __init__(
self,
name,
nameplate_unit_cost,
source_grid_region_id=0,
sink_grid_region_id=1,
max_power=-1.0,
efficiency=1.0,
):
"""Init function.
Args:
name: String name of the object.
nameplate_unit_cost: (float) Cost to build a unit of
transmission capacity. ($ / Megawatt of capacity)
source_grid_region_id: An int specifying which grid_region
power gets power added.
sink_grid_region_id: An int specifying which grid_region
power gets power subtracted.
max_power: (float) Optional Maximum power which can be transmitted.
(Megawatt). Set < 0 if there is no limit.
efficiency: (float) ratio of how much power gets moved one
grid_region to the other grid_region. Acceptable values are
0. < efficiency < 1.
"""
super(_GridTransmission, self).__init__(
name,
nameplate_unit_cost=nameplate_unit_cost,
variable_unit_cost=0,
grid_region_id=source_grid_region_id,
max_power=max_power,
max_energy=-1,
co2_per_electrical_energy=0,
power_coefficient=efficiency,
)
self.sink_grid_region_id = sink_grid_region_id
self.solver = _GridSourceDispatchableSolver(self)
def configure_lp_variables_and_constraints(self, lp):
"""Declare lp variables, and set constraints.
Args:
lp: LinearProgramContainer, contains lp solver and constraints.
"""
super(_GridTransmission, self).configure_lp_variables_and_constraints(lp)
# Handle Constraints.
for t, var in enumerate(self.timeslice_variables):
sink_id = self.sink_grid_region_id
source_id = self.grid_region_id
# Whatever the super-class is sourcing in source_grid_region_id,
# sink it from sink_grid_region_id.
lp.conserve_power_constraint[sink_id][t].set_coefficient(var, -1.0)
if self.is_rps_source:
lp.rps_source_constraints[sink_id][t].set_coefficient(var, -1.0)
def post_process(self, lp):
"""Update lp post_processing result variables.
This is done so that sanity data checks can be done on RPS before
returning results.
Args:
lp: The LinearProgramContainer where the post processing variables reside.
"""
# Normal source post_process
super(_GridTransmission, self).post_process(lp)
# Sink post_process
sink_id = self.sink_grid_region_id
if lp.rps_percent > 0.0 and self.is_rps_source:
lp.rps_total[sink_id] -= self.get_solution_values()
else:
lp.non_rps_total[sink_id] -= self.get_solution_values()
class GridTransmission(object):
"""Transmits power bidirectionally between two grid_regions.
At interface level, transmitting from region-m to region-n is
identical to transmitting from region-n to region-m.
Attributes:
name: (str) name of the object.
nameplate_unit_cost: (float) Cost to build a unit of
transmission capacity. ($ / Megawatt of capacity)
grid_region_id_a: An int specifying one grid_region transmission
terminus
grid_region_id_b: An int specifying a different grid_region
transmission terminus
max_power: (float) Optional Maximum power which can be transmitted.
(Megawatt). Set < 0 if there is no limit.
efficiency: (float) ratio of how much power gets moved one
grid_region to the other grid_region. Acceptable values are
0. < efficiency < 1.
a_to_b: _GridTransmission object which moves dirty power from
grid_region_a to grid_region_b
b_to_a: _GridTransmission object which moves dirty power from
grid_region_b to grid_region_a
rec_a_to_b: _GridTransmission object which moves clean power
from grid_region_a to grid_region_b
rec_b_to_a: _GridTransmission object which moves clean power
from grid_region_b to grid_region_a
"""
def __init__(
self,
name,
nameplate_unit_cost,
grid_region_id_a,
grid_region_id_b,
efficiency=1.0,
max_power=-1.0,
):
self.name = name
self.nameplate_unit_cost = nameplate_unit_cost
self.grid_region_id_a = grid_region_id_a
self.grid_region_id_b = grid_region_id_b
self.efficiency = efficiency
self.max_power = max_power
self.a_to_b = None
self.b_to_a = None
self.rec_a_to_b = None
self.rec_b_to_a = None
def configure_lp_variables_and_constraints(self, lp):
"""Declare lp variables, and set constraints.
Args:
lp: LinearProgramContainer, contains lp solver and constraints.
"""
self.a_to_b = _GridTransmission(
self.name + " a_to_b",
0,
self.grid_region_id_b,
self.grid_region_id_a,
self.max_power,
self.efficiency,
)
self.b_to_a = _GridTransmission(
self.name + " b_to_a",
0,
self.grid_region_id_a,
self.grid_region_id_b,
self.max_power,
self.efficiency,
)
self.rec_a_to_b = _GridTransmission(
self.name + " rec a_to_b",
0,
self.grid_region_id_b,
self.grid_region_id_a,
self.max_power,
self.efficiency,
is_rps=True,
)
self.rec_b_to_a = _GridTransmission(
self.name + " rec b_to_a",
0,
self.grid_region_id_a,
self.grid_region_id_b,
self.max_power,
self.efficiency,
is_rps=True,
)
self.a_to_b.configure_lp_variables_and_constraints(lp)
self.b_to_a.configure_lp_variables_and_constraints(lp)
self.rec_a_to_b.configure_lp_variables_and_constraints(lp)
self.rec_b_to_a.configure_lp_variables_and_constraints(lp)
# Make sure nameplate >= sum(a_to_b) and nameplate >= sum(b_to_a)
self.nameplate_variable = lp.declare_nameplate_variable(
self.name, "%d_%d" % (self.grid_region_id_a, self.grid_region_id_b)
)
lp.minimize_costs_objective.set_coefficient(
self.nameplate_variable, self.nameplate_unit_cost
)
for t in lp.time_index_iterable:
# nameplate >= a_to_b[t] + rec_a_to_b[t] - b_to_a[t] - rec_b_to_a[t]
a_to_b_constraint = lp.constraint(0.0, lp.solver.infinity())
a_to_b_constraint.set_coefficient(self.nameplate_variable, 1.0)
a_to_b_constraint.set_coefficient(self.a_to_b.timeslice_variables[t], -1.0)
a_to_b_constraint.set_coefficient(
self.rec_a_to_b.timeslice_variables[t], -1.0
)
a_to_b_constraint.set_coefficient(self.b_to_a.timeslice_variables[t], 1.0)
a_to_b_constraint.set_coefficient(
self.rec_b_to_a.timeslice_variables[t], 1.0
)
# nameplate >= b_to_a[t] + rec_b_to_a[t] - a_to_b[t] - rec_a_to_b[t]
b_to_a_constraint = lp.constraint(0.0, lp.solver.infinity())
b_to_a_constraint.set_coefficient(self.nameplate_variable, 1.0)
b_to_a_constraint.set_coefficient(self.b_to_a.timeslice_variables[t], -1.0)
b_to_a_constraint.set_coefficient(
self.rec_b_to_a.timeslice_variables[t], -1.0
)
b_to_a_constraint.set_coefficient(self.a_to_b.timeslice_variables[t], 1.0)
b_to_a_constraint.set_coefficient(
self.rec_a_to_b.timeslice_variables[t], 1.0
)
def post_process(self, lp):
"""Update lp post_processing result variables.
This is done so that sanity data checks can be done on RPS before
returning results.
Args:
lp: The LinearProgramContainer where the post processing variables reside.
"""
self.a_to_b.post_process(lp)
self.b_to_a.post_process(lp)
self.rec_a_to_b.post_process(lp)
self.rec_b_to_a.post_process(lp)
def get_nameplate_solution_value(self):
"""Gets the linear program solver results for nameplate.
Must be called after lp.solve() to ensure solver has properly
converged and has generated results.
Raises:
RuntimeError: If called before LinearProgramContainer.solve().
Returns:
Float value representing solved nameplate value.
"""
nameplate_variable = self.nameplate_variable
if nameplate_variable is None:
raise RuntimeError("Get_nameplate_solution_value called before solve().")
return nameplate_variable.solution_value()
class LinearProgramContainer(object):
"""Instantiates and interfaces to LP Solver.
Example Usage:
Initialize: lp = LinearProgramContainer()
Add objects:
lp.add_demands(<GridDemand>)
lp.add_sources(<GridSource>)
lp.add_transmissions(<GridTransmission>)
lp.solve()
Attributes:
carbon_tax: The amount to tax 1 unit of co2 emissions.
cost_of_money: The amount to multiply variable costs by to
make yearly costs and fixed costs comparable.
profiles: time-series profiles indexed by name which map to
GridDemands and GridNonDispatchableSources.
number_of_timeslices: int representing one timeslice per profile index.
time_index_iterable: A simple int range from 0 - number_of_timeslices.
Constraints:
conserve_power_constraint: Dict keyed by grid_region_id. Value
is a list of LP Constraints which ensures that power > demand
at all times in all grid_regions.
minimize_costs_objective: The LP Objective which is to minimize costs.
rps_source_constraints: Dict keyed by grid_region_id. Value is a
list of LP Constraints which ensures that
rps_credit[grid_region, t] <= sum(rps_sources[grid_region, t])
rps_demand_constraints: Dict keyed by grid_region_id. Value is
a list of LP Constraints which ensures that
rps_credit[grid_region, t] <= demand[grid_region, t]
RPS Variables:
rps_credit_variables: Dict object keyed by grid_region_id. Value is a
list of rps_credit[grid_region, t] variables for calculating rps.
Post Processing Variables. Computed after LP converges:
rps_total: Dict object keyed by grid_region_id. Value is sum
(GridSource_power[grid_region, t]) of all rps sources.
non_rps_total: Dict object keyed by grid_region_id. Value is sum
(GridSource_power[grid_region, t]) of all non_rps sources.
adjusted_demand: Dict object keyed by grid_region_id. Value is
Demand[grid_region, t]
rps_credit_values: Dict object keyed by grid_region_id. Value is
rps_credit.value[grid_region, t]
Grid Elements:
demands: A list of GridDemand(s).
sources: A list of GridSource(s).
storage: A list of GridStorage(s).
transmission: A list of GridTransmission(s).
solver: The wrapped pywraplp.Solver.
solver_precision: A float representing estimated precision of the solver.
"""
def __init__(self, profiles):
"""Initializes LP Container.
Args:
profiles: Time-series pandas dataframe profiles indexed by name
which map to GridDemands and GridNonDispatchableSources.
Raises:
ValueError: If any value in profiles is < 0 or Nan / None.
"""
self.carbon_tax = 0.0
self.cost_of_money = 1.0
self.rps_percent = 0.0
self.profiles = profiles
# Constraints
self.conserve_power_constraint = {}
self.minimize_costs_objective = None
# RPS Constraints
self.rps_source_constraints = {}
self.rps_demand_constraints = {}
# RPS Variables
self.rps_credit_variables = {}
# Post Processing Variables
self.rps_total = {}
self.non_rps_total = {}
self.adjusted_demand = {}
self.total_demand = 0
self.rps_demand = 0
self.rps_credit_values = {}
self.demands = []
self.sources = []
self.storage = []
self.transmission = []
self.solver = None
self.solver_precision = 1e-3
# Validate profiles
if profiles is None:
raise ValueError("No profiles specified.")
if profiles.empty:
raise ValueError("No Data in Profiles.")
if profiles.isnull().values.any():
raise ValueError("Profiles may not be Null or None")
profiles_lt_0 = profiles.values < 0
if profiles_lt_0.any():
raise ValueError("Profiles must not be < 0.")
self.number_of_timeslices = len(profiles)
self.time_index_iterable = range(self.number_of_timeslices)
def add_demands(self, *demands):
"""Add all GridDemands in Args to self.demands."""
for d in demands:
self.demands.append(d)
def add_dispatchable_sources(self, *sources):
"""Verify source has no profile associated with it and add to self.sources.
Args:
*sources: arbitrary number of GridSources.
Raises:
KeyError: if Source has a profile associated with it which would
indicate the source was non-dispatchable instead of
dispatchable.
"""
for source in sources:
if source.name in self.profiles:
raise KeyError(
"Dispatchable Source %s has a profile associated with it"
% (source.name)
)
source.solver = _GridSourceDispatchableSolver(source)
self.sources.append(source)
def add_nondispatchable_sources(self, *sources):
"""Verify source has a profile associated with it and add to self.sources.
Args:
*sources: arbitrary number of GridSources.
Raises:
KeyError: if Source has no profile associated with it which would
indicate the source was dispatchable instead of
non-dispatchable.
"""
for source in sources:
if source.name not in self.profiles:
known_sources = ",".join(sorted(self.profiles.columns))
known_source_string = "Known sources are (%s)." % known_sources
raise KeyError(
"Nondispatchable Source %s has no profile. %s"
% (source.name, known_source_string)
)
source.solver = _GridSourceNonDispatchableSolver(
source, self.profiles[source.name]
)
self.sources.append(source)
def add_storage(self, *storage):
"""Add storage to lp."""
self.storage.extend(storage)
def add_transmissions(self, *transmission):
"""Add transmission to lp."""
self.transmission.extend(transmission)
def constraint(self, lower, upper, name=None, debug=False):
"""Build a new Constraint which with valid range between lower and upper."""
return Constraint(self, lower, upper, name, debug)
def _initialize_solver(self):
"""Initializes solver, declares objective and set constraints.
Solver is pywraplp.solver.
Objective is to minimize costs subject to constraints.
One constraint declared here is to ensure that
power[grid_region][t] > demand[grid_region][t] for all t and
grid_regions.
Also configures GridElements.
"""
self.solver = pywraplp.Solver(
"SolveEnergy", pywraplp.Solver.CLP_LINEAR_PROGRAMMING
)
self.minimize_costs_objective = Objective(self, minimize=True)
# Initialize GridDemands and GridSources
demand_sum = 0.0
for d in self.demands:
try:
profiles = self.profiles[d.name]
self.adjusted_demand[d.grid_region_id] = np.array(profiles.values)
except KeyError:
profile_names = str(self.profiles.keys())
error_string = (
"GridDemand %s. No profile found! Known profiles:(%s)"
% (d.name, profile_names)
)
raise KeyError(error_string)
self.conserve_power_constraint[d.grid_region_id] = [
self.constraint(
p,
self.solver.infinity(),
"Conserve Power gid:%d t:%d" % (d.grid_region_id, t),
)
for t, p in enumerate(profiles)
]
demand_sum += sum(profiles)
# Handle RPS which is tricky. It requires special credit
# variables[grid_region][time] and 3 constraints.
#
# Constraint #1:
# The overall goal is to have RPS exceed rps_percent of total
# demand. Given that:
# total_rps_credit := sum(rps_credit[g][t])
# total_demand := sum(demand[g][t])
#
# The constraint named total_rps_credit_gt_rps_percent_constraint
# is:
# total_rps_credit >= (self.rps_percent / 100) * total_demand
#
# Constraint #2:
# rps_credit[g][t] cannot exceed sum of rps_sources - sum of
# rps_sinks at each g,t. An example of rps_sink is the 'REC_STORAGE'
# part of GridRecStorage which stores rps energy off the grid only
# to put it back on the grid later as a rps_source. This is
# reflected in the constraint named
# rps_source_constraints[g][t]:
# rps_credit[g][t] <= sum(rps_sources[g][t]) - sum(rps_sinks[g][t])
#
# Constraint #3
# rps_credit[g][t] cannot exceed what can be used at each g,t. if
# rps_sources generate a Gigawatt at g,t = 0,0 and only 1MW can be
# used at g,t then we don't want to credit the unused 999 MW.
#
# The constraint named rps_demand_constraints is:
# rps_credit[g][t] <= demand[g][t]
#
self.total_demand = demand_sum
self.rps_demand = demand_sum * self.rps_percent / 100.0
solver = self.solver
total_rps_credit_gt_rps_percent_constraint = self.constraint(
self.rps_demand, solver.infinity()
)
for d in self.demands:
profiles = self.profiles[d.name]
if self.rps_percent > 0.0:
rps_credit_variables = self.declare_timeslice_variables(
"__rps_credit__", d.grid_region_id
)
else:
rps_credit_variables = [
solver.NumVar(
0.0, 0.0, "__bogus rps_credit__ %d %d" % (d.grid_region_id, t)
)
for t in self.time_index_iterable
]
rps_demand_constraints = []
rps_source_constraints = [
self.constraint(0.0, solver.infinity())
for t in self.time_index_iterable
]
self.rps_source_constraints[d.grid_region_id] = rps_source_constraints
self.rps_credit_variables[d.grid_region_id] = rps_credit_variables
for t in self.time_index_iterable:
# Sum(rps_credit[grid_region, t]) >= rps_percent * total demand.
total_rps_credit_gt_rps_percent_constraint.set_coefficient(
rps_credit_variables[t], 1.0
)
# Rps_credit[grid_region, t] <= demand[grid_region, t].
rps_credit_less_than_demand = self.constraint(
-solver.infinity(), profiles[t]
)
rps_credit_less_than_demand.set_coefficient(
rps_credit_variables[t], 1.0
)
rps_demand_constraints.append(rps_credit_less_than_demand)
# Rps_credit[grid_region, t] <= (sum(rps_sources[grid_region, t])
# Constraint also gets adjusted by _GridSource(Non)DispatchableSolver.
# configure_lp_variables_and_constraints
rps_source_constraints[t].set_coefficient(rps_credit_variables[t], -1.0)
self.rps_demand_constraints[d.grid_region_id] = rps_demand_constraints
# Configure sources and storage.
for s in self.sources + self.storage + self.transmission:
s.configure_lp_variables_and_constraints(self)
def solve(self):
"""Initializes and runs linear program.
This is the main routine to call after __init__.
Returns:
True if linear program gave an optimal result. False otherwise.
"""
self._initialize_solver()
status = self.solver.Solve()
converged = status == self.solver.OPTIMAL
if converged:
self._post_process()
return converged
def _post_process(self):
"""Generates data used for calculating consumed rps/non-rps values.
Also double-checks results to make sure they match constraints.
Raises:
RuntimeError: If double-checked results do not match constraints.
"""
# Initialize post_processing totals.
for d in self.demands:
# Total amount of rps_sources[g][t] power.
self.rps_total[d.grid_region_id] = np.zeros(self.number_of_timeslices)
# Total amount of non-rps_sources[g][t] power.
self.non_rps_total[d.grid_region_id] = np.zeros(self.number_of_timeslices)
for s in self.sources + self.storage + self.transmission:
s.post_process(self)
# Sanity error check results against constraints. If any of these
# get raised, it indicates a bug in the code.
solver_precision = self.solver_precision
sum_rps_credits = 0.0
for g_id in [d.grid_region_id for d in self.demands]:
power_deficit = self.adjusted_demand[g_id] - (
self.rps_total[g_id] + self.non_rps_total[g_id]
)
lights_kept_on = (power_deficit < solver_precision).all()
rps_credits = np.array(
[rcv.solution_value() for rcv in self.rps_credit_variables[g_id]]
)
sum_rps_credits += sum(rps_credits)
self.rps_credit_values[g_id] = rps_credits
rps_credit_gt_demand = (
rps_credits > self.adjusted_demand[g_id] + solver_precision
).all()
rps_credit_gt_rps_sources = (
rps_credits > self.rps_total[g_id] + solver_precision
).all()
storage_exceeds_demand = (
self.adjusted_demand[g_id] < -solver_precision
).all()
if not lights_kept_on:
raise DemandNotSatisfiedError(
"Demand not satisfied by %f for region %d"
% (max(power_deficit), g_id)
)
if rps_credit_gt_demand:
raise RpsExceedsDemandError(
"RPS Credits Exceed Demand for region %d" % g_id
)
if rps_credit_gt_rps_sources:
raise RpsCreditExceedsSourcesError(
"RPS Credits Exceed RPS Sources for region %d" % g_id
)
if storage_exceeds_demand:
raise StorageExceedsDemandError(
"Storage Exceeds Demand for region %d" % g_id
)
# Scale solver_precision by number of timeslices to get precision
# for a summed comparison.
sum_solver_precision = solver_precision * self.number_of_timeslices
if sum_solver_precision + sum_rps_credits < self.rps_demand:
raise RpsPercentNotMetError(
"Sum RPS credits (%f) < demand * (%f rps_percent) (%f)"
% (sum_rps_credits, float(self.rps_percent), self.rps_demand)
)
def declare_timeslice_variables(self, name, grid_region_id):
"""Declares timeslice variables for a grid_region.
Args:
name: String to be included in the generated variable name.
grid_region_id: Int which identifies which grid these variables affect.
Do Not call this function with the same (name, grid_region_id)
pair more than once. There may not be identically named variables
in the same grid_region.
Returns:
Array of lp variables, each which range from 0 to infinity.
Array is mapped so that variable for time-slice x is at index x.
e.g. variable for first time-slice is variable[0]. variable for
last time-slice is variable[-1]
"""
solver = self.solver
variables = []
for t in self.time_index_iterable:
var_name = "__".join(
[name, "grid_region_id", str(grid_region_id), "at_t", str(t)]
)
variables.append(solver.NumVar(0.0, solver.infinity(), var_name))
return variables
def declare_nameplate_variable(self, name, grid_region_id):
"""Declares a nameplate variable for a grid_region.
Args:
name: String to be included in the generated variable name.
grid_region_id: Stringifyable object which identifies which grid
these variables affect.
Do Not call this function with the same (name, grid_region_id)
pair more than once. There may not be identically named variables
in the same grid_region.
Returns:
A lp variable which values range from 0 to infinity.
"""
nameplate_name = "__".join(
[name, "grid_region_id", str(grid_region_id), "peak"]
)
solver = self.solver
return solver.NumVar(0.0, solver.infinity(), nameplate_name)
def extrapolate_cost(cost, discount_rate, time_span_1, time_span_2):
"""Extrapolate cost from one time span to another.
Args:
cost: cost incurred during time_span_1 (in units of currency)
discount_rate: rate that money decays, per year (as decimal, e.g., .06)
time_span_1: time span when cost incurred (in units of years)
time_span_2: time span to extrapolate cost (in units of years)
Returns:
Cost extrapolated to time_span_2, units of currency.
Model parameters are costs over time spans. For example, the demand
may be a time series that lasts 1 year. The variable cost to fulfill
that demand would then be for 1 year of operation. However, the
GridModel is supposed to compute the total cost over a longer time
span (e.g., 30 years).
If there were no time value of money, the extrapolated cost would be
the ratio of time_span_2 to time_span_1 (e.g., 30 in the
example). However, payments in the future are less costly than
payments in the present. We extrapolate the cost by first finding
the equivalent continuous stream of payments over time_span_1 that
is equivalent to the cost, then assume that stream of payments
occurs over time_span_2, instead.
"""
growth_rate = 1.0 + discount_rate
value_decay_1 = pow(growth_rate, -time_span_2)
value_decay_2 = pow(growth_rate, -time_span_1)
try:
return cost * (1.0 - value_decay_1) / (1.0 - value_decay_2)
except ZeroDivisionError:
return cost
| 3,654 | 184 | 408 |
d5ab3a7720db01daa3de43c7dfde773e221acba3 | 899 | py | Python | tests/test_tfidf.py | eig-2017/dedupe | 7f90bcbbf345534b8692df726e9ba2479856bba1 | [
"MIT"
] | null | null | null | tests/test_tfidf.py | eig-2017/dedupe | 7f90bcbbf345534b8692df726e9ba2479856bba1 | [
"MIT"
] | null | null | null | tests/test_tfidf.py | eig-2017/dedupe | 7f90bcbbf345534b8692df726e9ba2479856bba1 | [
"MIT"
] | null | null | null | import dedupe
import unittest
if __name__ == "__main__":
unittest.main()
| 29 | 69 | 0.575083 | import dedupe
import unittest
class ParsingTest(unittest.TestCase) :
def setUp(self) :
self.index = dedupe.tfidf.TfIdfIndex()
def test_keywords(self) :
self.index.index(('AND', 'OR', 'EOF', 'NOT'))
self.index._index.initSearch()
assert self.index.search(('AND', 'OR', 'EOF', 'NOT'))[0] == 1
def test_keywords_title(self) :
self.index.index(('And', 'Or', 'Eof', 'Not'))
self.index._index.initSearch()
assert self.index.search(('And', 'Or', 'Eof', 'Not'))[0] == 1
def test_empty_search(self) :
self.index._index.initSearch()
assert self.index.search(()) == []
def test_wildcards(self) :
self.index.index(('f\o',))
self.index.index(('f*',))
self.index._index.initSearch()
assert len(self.index.search(('f*',))) == 1
if __name__ == "__main__":
unittest.main()
| 638 | 17 | 165 |
c34d95a85f6568f6c9a18b0c92ccbd0ba5e083c9 | 2,788 | py | Python | get_data.py | Jack07310/Project-2-Image-Classifier-Part-2 | cafb78ca541faae45851281e87b99c2cdc493f04 | [
"MIT"
] | null | null | null | get_data.py | Jack07310/Project-2-Image-Classifier-Part-2 | cafb78ca541faae45851281e87b99c2cdc493f04 | [
"MIT"
] | null | null | null | get_data.py | Jack07310/Project-2-Image-Classifier-Part-2 | cafb78ca541faae45851281e87b99c2cdc493f04 | [
"MIT"
] | null | null | null | #get_data.py
#234567890123456789012345678901234567890123456789012345678901234567890123456789
# Imports here
import torch
from torchvision import datasets, transforms
# The command line parser for train.py | 40.405797 | 79 | 0.532999 | #get_data.py
#234567890123456789012345678901234567890123456789012345678901234567890123456789
# Imports here
import torch
from torchvision import datasets, transforms
# The command line parser for train.py
def get_dataloaders(data_dir):
# Load the data
train_dir = data_dir + '/train'
valid_dir = data_dir + '/valid'
test_dir = data_dir + '/test'
# DONE: Define your transforms for the training, validation, and testing sets
train_transforms = \
transforms.Compose([transforms.RandomRotation(30),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
valid_transforms = \
transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
test_transforms = \
transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
data_transforms = {'train': train_transforms,
'valid': valid_transforms,
'test': test_transforms}
# DONE: Load the datasets with ImageFolder
train_data = datasets.ImageFolder(train_dir, transform=train_transforms)
valid_data = datasets.ImageFolder(valid_dir, transform=valid_transforms)
test_data = datasets.ImageFolder(test_dir, transform=test_transforms)
image_datasets = {'train': train_data,
'valid': valid_data,
'test': test_data}
# DONE: Using the image datasets and the trainforms, define the dataloaders
trainloader = torch.utils.data.DataLoader(train_data,
batch_size=64,
shuffle=True)
validloader = torch.utils.data.DataLoader(valid_data,
batch_size=64,
shuffle=True)
testloader = torch.utils.data.DataLoader(test_data,
batch_size=64)
dataloaders = {'train': trainloader,
'valid': validloader,
'test': testloader}
return(image_datasets, dataloaders) | 2,560 | 0 | 22 |
cceaa1dde18416d633f3357f1ad781584903faad | 1,421 | py | Python | Backend/migrations/alembic/versions/f8791d49d830_create_bed_capacity_table.py | dbvis-ukon/coronavis | f00374ac655c9d68541183d28ede6fe5536581dc | [
"Apache-2.0"
] | 15 | 2020-04-24T20:18:11.000Z | 2022-01-31T21:05:05.000Z | Backend/migrations/alembic/versions/f8791d49d830_create_bed_capacity_table.py | dbvis-ukon/coronavis | f00374ac655c9d68541183d28ede6fe5536581dc | [
"Apache-2.0"
] | 2 | 2021-05-19T07:15:09.000Z | 2022-03-07T08:29:34.000Z | Backend/migrations/alembic/versions/f8791d49d830_create_bed_capacity_table.py | dbvis-ukon/coronavis | f00374ac655c9d68541183d28ede6fe5536581dc | [
"Apache-2.0"
] | 4 | 2020-04-27T16:20:13.000Z | 2021-02-23T10:39:42.000Z | """create bed_capacity table
Revision ID: f8791d49d830
Revises: b84312f6532e
Create Date: 2020-11-26 15:22:19.299937
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = 'f8791d49d830'
down_revision = '4fcda072e8c6'
branch_labels = None
depends_on = None
| 28.42 | 82 | 0.635468 | """create bed_capacity table
Revision ID: f8791d49d830
Revises: b84312f6532e
Create Date: 2020-11-26 15:22:19.299937
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = 'f8791d49d830'
down_revision = '4fcda072e8c6'
branch_labels = None
depends_on = None
def upgrade():
op.get_bind().execute("""
-- auto-generated definition
create table bed_capacity
(
datenbestand timestamp with time zone,
bl varchar,
bl_id varchar(255),
county varchar
constraint bed_capacity_bed_capacity2landkreise_extended_bed_capacity_name
references bed_capacity2landkreise_extended,
anzahl_standorte integer,
anzahl_meldebereiche integer,
betten_frei integer,
betten_belegt integer,
betten_gesamt integer,
anteil_betten_frei double precision,
faelle_covid_aktuell integer,
faelle_covid_aktuell_beatmet integer,
anteil_covid_beatmet integer,
anteil_covid_betten double precision,
id serial not null
constraint bed_capacity_pkey
primary key
);
create index bed_capacity_datenbestand_index
on bed_capacity (datenbestand desc);
""")
def downgrade():
op.drop_table('bed_capacity')
| 1,089 | 0 | 46 |
cdb565b081ef8900b32c772239390e1a1c75c7d1 | 4,897 | py | Python | openjij/sampler/response.py | Atsushi-Machida/OpenJij | e4bddebb13536eb26ff0b7b9fc6b1c75659fe934 | [
"Apache-2.0"
] | null | null | null | openjij/sampler/response.py | Atsushi-Machida/OpenJij | e4bddebb13536eb26ff0b7b9fc6b1c75659fe934 | [
"Apache-2.0"
] | null | null | null | openjij/sampler/response.py | Atsushi-Machida/OpenJij | e4bddebb13536eb26ff0b7b9fc6b1c75659fe934 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Jij Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import openjij
class Response:
"""A class of response from samplers.
Args:
var_type (str):
Type of variables: 'SPIN' or 'BINARY' which mean {-1, 1} or {0, 1}.
indices (int):
Indices of `openjij.sampler.response.Response` object.
Attributes:
states (list):
States of the system.
energies (list):
Energies for the states.
q_states (list):
Quantum states of the system.
q_energies (list):
Quantum energies for the quantum states.
min_samples (list):
Samples with minimum energy.
info (dict):
Other information.
"""
def update_ising_states_energies(self, states, energies):
"""Update states and energies.
Args:
states (list):
Updated states.
energies (list):
Updated energies.
Attributes:
min_samples (dict):
Minimun energies, states, and number of occurrences.
"""
if self.var_type == openjij.SPIN:
self.states = states
else:
self.states = [
list(np.array((np.array(state) + 1)/2).astype(np.int)) for state in states]
self.energies = energies
self.min_samples = self._minimum_sample()
def update_trotter_ising_states_energies(self, trotter_states, q_energies):
"""Update quantum states and energies.
Args:
trotter_states (list):
Updated trotter states.
q_energies (list):
Updated quantum energies.
Attributes:
min_samples (dict):
Minimun energies, states, and number of occurrences.
"""
if self.var_type == openjij.SPIN:
self.q_states = list(np.array(trotter_states).astype(np.int))
else:
self.q_states = [[list(np.array((np.array(state) + 1)/2).astype(np.int))
for state in t_state] for t_state in trotter_states]
self.q_energies = q_energies
# save minimum energy of each trotter_state
min_e_indices = np.argmin(q_energies, axis=1)
self.energies = [q_e[min_ind]
for min_ind, q_e in zip(min_e_indices, q_energies)]
self.states = [list(t_state[min_ind]) for min_ind,
t_state in zip(min_e_indices, self.q_states)]
self.min_samples = self._minimum_sample()
@property
def samples(self):
"""Returns samples as list.
Returns:
list: all the samples.
"""
return [dict(zip(self.indices, state)) for state in self.states]
| 32.430464 | 91 | 0.602001 | # Copyright 2019 Jij Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import openjij
class Response:
"""A class of response from samplers.
Args:
var_type (str):
Type of variables: 'SPIN' or 'BINARY' which mean {-1, 1} or {0, 1}.
indices (int):
Indices of `openjij.sampler.response.Response` object.
Attributes:
states (list):
States of the system.
energies (list):
Energies for the states.
q_states (list):
Quantum states of the system.
q_energies (list):
Quantum energies for the quantum states.
min_samples (list):
Samples with minimum energy.
info (dict):
Other information.
"""
def __init__(self, var_type, indices):
self.states = []
self.energies = []
self.var_type = openjij.cast_var_type(var_type)
self.q_states = []
self.q_energies = []
self.indices = indices
self.min_samples = {}
self.info = {}
def __repr__(self):
if len(self.states) == 0:
return "Response\n\tvar_type: {}\n\tstates: empty".format(self.var_type)
if len(self.min_samples) == 0:
self.min_samples = self._minimum_sample()
min_energy_index = np.argmin(self.energies) if len(
self.energies) != 0 else None
ground_energy = self.energies[min_energy_index]
ground_state = self.states[min_energy_index]
res_str = "Response\n\titeration : {},\n\t".format(len(self.states))
res_str += "minimum energy: {}\n\t".format(ground_energy)
res_str += "var_type: {}\n\t".format(self.var_type)
res_str += "indices: {} \n\tminimum energy state : {}".format(
self.indices, ground_state)
return res_str
def update_ising_states_energies(self, states, energies):
"""Update states and energies.
Args:
states (list):
Updated states.
energies (list):
Updated energies.
Attributes:
min_samples (dict):
Minimun energies, states, and number of occurrences.
"""
if self.var_type == openjij.SPIN:
self.states = states
else:
self.states = [
list(np.array((np.array(state) + 1)/2).astype(np.int)) for state in states]
self.energies = energies
self.min_samples = self._minimum_sample()
def update_trotter_ising_states_energies(self, trotter_states, q_energies):
"""Update quantum states and energies.
Args:
trotter_states (list):
Updated trotter states.
q_energies (list):
Updated quantum energies.
Attributes:
min_samples (dict):
Minimun energies, states, and number of occurrences.
"""
if self.var_type == openjij.SPIN:
self.q_states = list(np.array(trotter_states).astype(np.int))
else:
self.q_states = [[list(np.array((np.array(state) + 1)/2).astype(np.int))
for state in t_state] for t_state in trotter_states]
self.q_energies = q_energies
# save minimum energy of each trotter_state
min_e_indices = np.argmin(q_energies, axis=1)
self.energies = [q_e[min_ind]
for min_ind, q_e in zip(min_e_indices, q_energies)]
self.states = [list(t_state[min_ind]) for min_ind,
t_state in zip(min_e_indices, self.q_states)]
self.min_samples = self._minimum_sample()
def _minimum_sample(self):
min_energy_ind = np.argmin(self.energies) if len(
self.energies) != 0 else None
min_energy = self.energies[min_energy_ind]
min_e_indices = np.where(np.array(self.energies) == min_energy)[0]
min_states = np.array(self.states)[min_e_indices]
min_states, counts = np.unique(min_states, axis=0, return_counts=True)
return {'states': min_states, 'num_occurrences': counts, 'min_energy': min_energy}
@property
def samples(self):
"""Returns samples as list.
Returns:
list: all the samples.
"""
return [dict(zip(self.indices, state)) for state in self.states]
| 1,484 | 0 | 81 |
559b08125c8dd5044f6a3227a31f3322c215c5f1 | 231 | py | Python | Round #534 (Div 2)/B.py | julianferres/Codeforces | ac80292a4d53b8078fc1a85e91db353c489555d9 | [
"MIT"
] | 4 | 2020-01-31T15:49:25.000Z | 2020-07-07T11:44:03.000Z | Round #534 (Div 2)/B.py | julianferres/CodeForces | 14e8369e82a2403094183d6f7824201f681c9f65 | [
"MIT"
] | null | null | null | Round #534 (Div 2)/B.py | julianferres/CodeForces | 14e8369e82a2403094183d6f7824201f681c9f65 | [
"MIT"
] | null | null | null |
B() | 12.833333 | 44 | 0.554113 | def B():
s = input()
stack = []
counter = 0
for x in s:
if(not len(stack)):
stack.append(x)
elif(stack[-1]==x):
stack.pop()
counter+=1
else:
stack.append(x)
print("Yes") if(counter%2) else print("No")
B() | 205 | 0 | 22 |
6b14c5d2b9d6119d7b2f3a785b2ddfa6e6e4344f | 1,799 | py | Python | py_se_day03/py_fucntions.py | letscodedjango/py-se-bootcamp | 302e2acb69f4aaefbb2fe6361083c9135b000394 | [
"MIT"
] | null | null | null | py_se_day03/py_fucntions.py | letscodedjango/py-se-bootcamp | 302e2acb69f4aaefbb2fe6361083c9135b000394 | [
"MIT"
] | null | null | null | py_se_day03/py_fucntions.py | letscodedjango/py-se-bootcamp | 302e2acb69f4aaefbb2fe6361083c9135b000394 | [
"MIT"
] | 1 | 2020-05-20T14:48:48.000Z | 2020-05-20T14:48:48.000Z | #
# add_numbers()
# x = add_numbers_version_01(20,30)
# print(x) # ?
# not returning anything to you
# add_numbers_version_01(50, 10)
# add_numbers_version_01(300, 29)
# add_numbers_version_01(20.78, 56.89)
# # Average of two numbers
# Add 2 nos and then divide the sum with nos. of value
# no_one = 50
# no_two = 60
# nos_sum = no_one + no_two
# nos_sum = add_numbers_version_01(50, 50) ### This particular line of code will throw error
# avg = nos_sum/2 # 50.0
# print(avg) ##
# nos_sum = add_numbers_version_01(40, 50) ### This particular line of code will throw error
# avg = nos_sum/2 # 50.0
# print(avg) ##
# nos_sum = add_numbers_version_01(140, 150) ### This particular line of code will throw error
# avg = nos_sum/2 # 50.0
# print(avg) ##
output = calculate_avg_of_two_numbers()
print(output)
output = calculate_avg_of_two_numbers_version_01(30, 30)
print(output)
output = calculate_avg_of_two_numbers_version_01(40, 50)
print(output)
output = calculate_avg_of_two_numbers_version_01(140, 150)
print(output)
output = calculate_avg_of_two_numbers_version_01(230, 30)
print(output)
| 19.344086 | 96 | 0.712062 | #
def add_numbers():
no_one = 20
no_two = 30
result = no_one + no_two
print(result)
# add_numbers()
def add_numbers_version_01(x, y):
no_one = x
no_two = y
result = no_one + no_two
#print("The value of result variable is " + str(result) ) ### ?
return result
# print(result)
# x = add_numbers_version_01(20,30)
# print(x) # ?
# not returning anything to you
# add_numbers_version_01(50, 10)
# add_numbers_version_01(300, 29)
# add_numbers_version_01(20.78, 56.89)
# # Average of two numbers
# Add 2 nos and then divide the sum with nos. of value
# no_one = 50
# no_two = 60
# nos_sum = no_one + no_two
# nos_sum = add_numbers_version_01(50, 50) ### This particular line of code will throw error
# avg = nos_sum/2 # 50.0
# print(avg) ##
# nos_sum = add_numbers_version_01(40, 50) ### This particular line of code will throw error
# avg = nos_sum/2 # 50.0
# print(avg) ##
# nos_sum = add_numbers_version_01(140, 150) ### This particular line of code will throw error
# avg = nos_sum/2 # 50.0
# print(avg) ##
def calculate_avg_of_two_numbers():
nos_sum = add_numbers_version_01(50, 50)
avg = nos_sum/2 # 50.0
return avg
output = calculate_avg_of_two_numbers()
print(output)
def calculate_avg_of_two_numbers_version_02(x, y):
nos_sum = add_numbers_version_01(50, 50)
avg = nos_sum/2 # 50.0
return avg
def calculate_avg_of_two_numbers_version_01(x, y):
nos_sum = add_numbers_version_01(x, y)
avg = nos_sum/2 # 50.0
return avg
output = calculate_avg_of_two_numbers_version_01(30, 30)
print(output)
output = calculate_avg_of_two_numbers_version_01(40, 50)
print(output)
output = calculate_avg_of_two_numbers_version_01(140, 150)
print(output)
output = calculate_avg_of_two_numbers_version_01(230, 30)
print(output)
| 538 | 0 | 114 |
f4e25421a136d0f49225bf4f0ffc79ec0d8aff41 | 264 | py | Python | GTFS2OMNS/test.py | luyuliu/playground | 0e0382be27abf6714bda8ea1bc34249286ef53c1 | [
"MIT"
] | null | null | null | GTFS2OMNS/test.py | luyuliu/playground | 0e0382be27abf6714bda8ea1bc34249286ef53c1 | [
"MIT"
] | null | null | null | GTFS2OMNS/test.py | luyuliu/playground | 0e0382be27abf6714bda8ea1bc34249286ef53c1 | [
"MIT"
] | null | null | null | # Note:
# 1. Need dependency autoinstall
# 2. GDAL
import os
os.environ['PATH']
import gtfs2gmns as gg
gtfs_path = "H:\\ChromeDownload\\gtfscota"
gmns_path = "H:\\ChromeDownload\\gtfscota\\output"
node_transit,link_transit = gg.Convert_GTFS(gtfs_path,gmns_path) | 22 | 64 | 0.761364 | # Note:
# 1. Need dependency autoinstall
# 2. GDAL
import os
os.environ['PATH']
import gtfs2gmns as gg
gtfs_path = "H:\\ChromeDownload\\gtfscota"
gmns_path = "H:\\ChromeDownload\\gtfscota\\output"
node_transit,link_transit = gg.Convert_GTFS(gtfs_path,gmns_path) | 0 | 0 | 0 |
6399edf0c794754950ba2f79efc7d0c101bed496 | 9,518 | py | Python | scipy/sparse/linalg/eigen/_svds.py | jcharlong/scipy | 153467a9174b0c6f4b90ffeed5871e5018658108 | [
"BSD-3-Clause"
] | 1 | 2021-08-16T09:32:42.000Z | 2021-08-16T09:32:42.000Z | scipy/sparse/linalg/eigen/_svds.py | jcharlong/scipy | 153467a9174b0c6f4b90ffeed5871e5018658108 | [
"BSD-3-Clause"
] | 44 | 2019-06-27T15:56:14.000Z | 2022-03-15T22:21:10.000Z | scipy/sparse/linalg/eigen/_svds.py | jcharlong/scipy | 153467a9174b0c6f4b90ffeed5871e5018658108 | [
"BSD-3-Clause"
] | 4 | 2020-06-13T10:32:25.000Z | 2021-12-03T15:48:16.000Z | import numpy as np
from .arpack import _arpack # type: ignore[attr-defined]
from . import eigsh
from scipy.sparse.linalg.interface import LinearOperator
from scipy.sparse import isspmatrix
from scipy.sparse.sputils import is_pydata_spmatrix
from scipy.sparse.linalg.eigen.lobpcg import lobpcg # type: ignore[no-redef]
arpack_int = _arpack.timing.nbx.dtype
__all__ = ['svds']
def svds(A, k=6, ncv=None, tol=0, which='LM', v0=None,
maxiter=None, return_singular_vectors=True,
solver='arpack', options=None):
"""
Partial singular value decomposition of a sparse matrix.
Compute the largest or smallest `k` singular values and corresponding
singular vectors of a sparse matrix `A`. The order in which the singular
values are returned is not guaranteed.
In the descriptions below, let ``M, N = A.shape``.
Parameters
----------
A : sparse matrix or LinearOperator
Matrix to decompose.
k : int, default: 6
Number of singular values and singular vectors to compute.
Must satisfy ``1 <= k < min(M, N)``.
ncv : int, optional
When ``solver='arpack'``, this is the number of Lanczos vectors
generated. See :ref:`'arpack' <sparse.linalg.svds-arpack>` for details.
When ``solver='lobpcg'``, this parameter is ignored.
tol : float, optional
Tolerance for singular values. Zero (default) means machine precision.
which : {'LM', 'SM'}
Which `k` singular values to find: either the largest magnitude ('LM')
or smallest magnitude ('SM') singular values.
v0 : ndarray, optional
The starting vector for iteration; see method-specific
documentation (:ref:`'arpack' <sparse.linalg.svds-arpack>` or
:ref:`'lobpcg' <sparse.linalg.svds-lobpcg>`) for details.
maxiter : int, optional
Maximum number of iterations; see method-specific
documentation (:ref:`'arpack' <sparse.linalg.svds-arpack>` or
:ref:`'lobpcg' <sparse.linalg.svds-lobpcg>`) for details.
return_singular_vectors : bool or str, optional
Singular values are always computed and returned; this parameter
controls the computation and return of singular vectors.
- ``True``: return singular vectors.
- ``False``: do not return singular vectors.
- ``"u"``: only return the left singular values, without computing the
right singular vectors (if ``N > M``).
- ``"vh"``: only return the right singular values, without computing
the left singular vectors (if ``N <= M``).
solver : str, optional
The solver used.
:ref:`'arpack' <sparse.linalg.svds-arpack>` and
:ref:`'lobpcg' <sparse.linalg.svds-lobpcg>` are supported.
Default: `'arpack'`.
options : dict, optional
A dictionary of solver-specific options. No solver-specific options
are currently supported; this parameter is reserved for future use.
Returns
-------
u : ndarray, shape=(M, k)
Unitary matrix having left singular vectors as columns.
If `return_singular_vectors` is ``"vh"``, this variable is not
computed, and ``None`` is returned instead.
s : ndarray, shape=(k,)
The singular values.
vh : ndarray, shape=(k, N)
Unitary matrix having right singular vectors as rows.
If `return_singular_vectors` is ``"u"``, this variable is not computed,
and ``None`` is returned instead.
Notes
-----
This is a naive implementation using ARPACK or LOBPCG as an eigensolver
on ``A.conj().T @ A`` or ``A @ A.conj().T``, depending on which one is more
efficient.
Examples
--------
Construct a matrix ``A`` from singular values and vectors.
>>> from scipy.stats import ortho_group
>>> from scipy.sparse import csc_matrix, diags
>>> from scipy.sparse.linalg import svds
>>> rng = np.random.default_rng()
>>> orthogonal = csc_matrix(ortho_group.rvs(10, random_state=rng))
>>> s = [0.0001, 0.001, 3, 4, 5] # singular values
>>> u = orthogonal[:, :5] # left singular vectors
>>> vT = orthogonal[:, 5:].T # right singular vectors
>>> A = u @ diags(s) @ vT
With only three singular values/vectors, the SVD approximates the original
matrix.
>>> u2, s2, vT2 = svds(A, k=3)
>>> A2 = u2 @ np.diag(s2) @ vT2
>>> np.allclose(A2, A.todense(), atol=1e-3)
True
With all five singular values/vectors, we can reproduce the original
matrix.
>>> u3, s3, vT3 = svds(A, k=5)
>>> A3 = u3 @ np.diag(s3) @ vT3
>>> np.allclose(A3, A.todense())
True
The singular values match the expected singular values, and the singular
values are as expected up to a difference in sign. Consequently, the
returned arrays of singular vectors must also be orthogonal.
>>> (np.allclose(s3, s) and
... np.allclose(np.abs(u3), np.abs(u.todense())) and
... np.allclose(np.abs(vT3), np.abs(vT.todense())))
True
"""
if which == 'LM':
largest = True
elif which == 'SM':
largest = False
else:
raise ValueError("which must be either 'LM' or 'SM'.")
if (not (isinstance(A, LinearOperator) or isspmatrix(A)
or is_pydata_spmatrix(A))):
A = np.asarray(A)
n, m = A.shape
if k <= 0 or k >= min(n, m):
raise ValueError("k must be between 1 and min(A.shape), k=%d" % k)
if isinstance(A, LinearOperator):
if n > m:
X_dot = A.matvec
X_matmat = A.matmat
XH_dot = A.rmatvec
XH_mat = A.rmatmat
else:
X_dot = A.rmatvec
X_matmat = A.rmatmat
XH_dot = A.matvec
XH_mat = A.matmat
dtype = getattr(A, 'dtype', None)
if dtype is None:
dtype = A.dot(np.zeros([m, 1])).dtype
else:
if n > m:
X_dot = X_matmat = A.dot
XH_dot = XH_mat = _herm(A).dot
else:
XH_dot = XH_mat = A.dot
X_dot = X_matmat = _herm(A).dot
XH_X = LinearOperator(matvec=matvec_XH_X, dtype=A.dtype,
matmat=matmat_XH_X,
shape=(min(A.shape), min(A.shape)))
# Get a low rank approximation of the implicitly defined gramian matrix.
# This is not a stable way to approach the problem.
if solver == 'lobpcg':
if k == 1 and v0 is not None:
X = np.reshape(v0, (-1, 1))
else:
X = np.random.RandomState(52).randn(min(A.shape), k)
eigvals, eigvec = lobpcg(XH_X, X, tol=tol ** 2, maxiter=maxiter,
largest=largest)
elif solver == 'arpack' or solver is None:
eigvals, eigvec = eigsh(XH_X, k=k, tol=tol ** 2, maxiter=maxiter,
ncv=ncv, which=which, v0=v0)
else:
raise ValueError("solver must be either 'arpack', or 'lobpcg'.")
# Gramian matrices have real non-negative eigenvalues.
eigvals = np.maximum(eigvals.real, 0)
# Use the sophisticated detection of small eigenvalues from pinvh.
t = eigvec.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
cutoff = cond * np.max(eigvals)
# Get a mask indicating which eigenpairs are not degenerately tiny,
# and create the re-ordered array of thresholded singular values.
above_cutoff = (eigvals > cutoff)
nlarge = above_cutoff.sum()
nsmall = k - nlarge
slarge = np.sqrt(eigvals[above_cutoff])
s = np.zeros_like(eigvals)
s[:nlarge] = slarge
if not return_singular_vectors:
return np.sort(s)
if n > m:
vlarge = eigvec[:, above_cutoff]
ularge = (X_matmat(vlarge) / slarge
if return_singular_vectors != 'vh' else None)
vhlarge = _herm(vlarge)
else:
ularge = eigvec[:, above_cutoff]
vhlarge = (_herm(X_matmat(ularge) / slarge)
if return_singular_vectors != 'u' else None)
u = (_augmented_orthonormal_cols(ularge, nsmall)
if ularge is not None else None)
vh = (_augmented_orthonormal_rows(vhlarge, nsmall)
if vhlarge is not None else None)
indexes_sorted = np.argsort(s)
s = s[indexes_sorted]
if u is not None:
u = u[:, indexes_sorted]
if vh is not None:
vh = vh[indexes_sorted]
return u, s, vh
| 34.737226 | 79 | 0.604854 | import numpy as np
from .arpack import _arpack # type: ignore[attr-defined]
from . import eigsh
from scipy.sparse.linalg.interface import LinearOperator
from scipy.sparse import isspmatrix
from scipy.sparse.sputils import is_pydata_spmatrix
from scipy.sparse.linalg.eigen.lobpcg import lobpcg # type: ignore[no-redef]
arpack_int = _arpack.timing.nbx.dtype
__all__ = ['svds']
def _augmented_orthonormal_cols(x, k):
# extract the shape of the x array
n, m = x.shape
# create the expanded array and copy x into it
y = np.empty((n, m+k), dtype=x.dtype)
y[:, :m] = x
# do some modified gram schmidt to add k random orthonormal vectors
for i in range(k):
# sample a random initial vector
v = np.random.randn(n)
if np.iscomplexobj(x):
v = v + 1j*np.random.randn(n)
# subtract projections onto the existing unit length vectors
for j in range(m+i):
u = y[:, j]
v -= (np.dot(v, u.conj()) / np.dot(u, u.conj())) * u
# normalize v
v /= np.sqrt(np.dot(v, v.conj()))
# add v into the output array
y[:, m+i] = v
# return the expanded array
return y
def _augmented_orthonormal_rows(x, k):
return _augmented_orthonormal_cols(x.T, k).T
def _herm(x):
return x.T.conj()
def svds(A, k=6, ncv=None, tol=0, which='LM', v0=None,
maxiter=None, return_singular_vectors=True,
solver='arpack', options=None):
"""
Partial singular value decomposition of a sparse matrix.
Compute the largest or smallest `k` singular values and corresponding
singular vectors of a sparse matrix `A`. The order in which the singular
values are returned is not guaranteed.
In the descriptions below, let ``M, N = A.shape``.
Parameters
----------
A : sparse matrix or LinearOperator
Matrix to decompose.
k : int, default: 6
Number of singular values and singular vectors to compute.
Must satisfy ``1 <= k < min(M, N)``.
ncv : int, optional
When ``solver='arpack'``, this is the number of Lanczos vectors
generated. See :ref:`'arpack' <sparse.linalg.svds-arpack>` for details.
When ``solver='lobpcg'``, this parameter is ignored.
tol : float, optional
Tolerance for singular values. Zero (default) means machine precision.
which : {'LM', 'SM'}
Which `k` singular values to find: either the largest magnitude ('LM')
or smallest magnitude ('SM') singular values.
v0 : ndarray, optional
The starting vector for iteration; see method-specific
documentation (:ref:`'arpack' <sparse.linalg.svds-arpack>` or
:ref:`'lobpcg' <sparse.linalg.svds-lobpcg>`) for details.
maxiter : int, optional
Maximum number of iterations; see method-specific
documentation (:ref:`'arpack' <sparse.linalg.svds-arpack>` or
:ref:`'lobpcg' <sparse.linalg.svds-lobpcg>`) for details.
return_singular_vectors : bool or str, optional
Singular values are always computed and returned; this parameter
controls the computation and return of singular vectors.
- ``True``: return singular vectors.
- ``False``: do not return singular vectors.
- ``"u"``: only return the left singular values, without computing the
right singular vectors (if ``N > M``).
- ``"vh"``: only return the right singular values, without computing
the left singular vectors (if ``N <= M``).
solver : str, optional
The solver used.
:ref:`'arpack' <sparse.linalg.svds-arpack>` and
:ref:`'lobpcg' <sparse.linalg.svds-lobpcg>` are supported.
Default: `'arpack'`.
options : dict, optional
A dictionary of solver-specific options. No solver-specific options
are currently supported; this parameter is reserved for future use.
Returns
-------
u : ndarray, shape=(M, k)
Unitary matrix having left singular vectors as columns.
If `return_singular_vectors` is ``"vh"``, this variable is not
computed, and ``None`` is returned instead.
s : ndarray, shape=(k,)
The singular values.
vh : ndarray, shape=(k, N)
Unitary matrix having right singular vectors as rows.
If `return_singular_vectors` is ``"u"``, this variable is not computed,
and ``None`` is returned instead.
Notes
-----
This is a naive implementation using ARPACK or LOBPCG as an eigensolver
on ``A.conj().T @ A`` or ``A @ A.conj().T``, depending on which one is more
efficient.
Examples
--------
Construct a matrix ``A`` from singular values and vectors.
>>> from scipy.stats import ortho_group
>>> from scipy.sparse import csc_matrix, diags
>>> from scipy.sparse.linalg import svds
>>> rng = np.random.default_rng()
>>> orthogonal = csc_matrix(ortho_group.rvs(10, random_state=rng))
>>> s = [0.0001, 0.001, 3, 4, 5] # singular values
>>> u = orthogonal[:, :5] # left singular vectors
>>> vT = orthogonal[:, 5:].T # right singular vectors
>>> A = u @ diags(s) @ vT
With only three singular values/vectors, the SVD approximates the original
matrix.
>>> u2, s2, vT2 = svds(A, k=3)
>>> A2 = u2 @ np.diag(s2) @ vT2
>>> np.allclose(A2, A.todense(), atol=1e-3)
True
With all five singular values/vectors, we can reproduce the original
matrix.
>>> u3, s3, vT3 = svds(A, k=5)
>>> A3 = u3 @ np.diag(s3) @ vT3
>>> np.allclose(A3, A.todense())
True
The singular values match the expected singular values, and the singular
values are as expected up to a difference in sign. Consequently, the
returned arrays of singular vectors must also be orthogonal.
>>> (np.allclose(s3, s) and
... np.allclose(np.abs(u3), np.abs(u.todense())) and
... np.allclose(np.abs(vT3), np.abs(vT.todense())))
True
"""
if which == 'LM':
largest = True
elif which == 'SM':
largest = False
else:
raise ValueError("which must be either 'LM' or 'SM'.")
if (not (isinstance(A, LinearOperator) or isspmatrix(A)
or is_pydata_spmatrix(A))):
A = np.asarray(A)
n, m = A.shape
if k <= 0 or k >= min(n, m):
raise ValueError("k must be between 1 and min(A.shape), k=%d" % k)
if isinstance(A, LinearOperator):
if n > m:
X_dot = A.matvec
X_matmat = A.matmat
XH_dot = A.rmatvec
XH_mat = A.rmatmat
else:
X_dot = A.rmatvec
X_matmat = A.rmatmat
XH_dot = A.matvec
XH_mat = A.matmat
dtype = getattr(A, 'dtype', None)
if dtype is None:
dtype = A.dot(np.zeros([m, 1])).dtype
else:
if n > m:
X_dot = X_matmat = A.dot
XH_dot = XH_mat = _herm(A).dot
else:
XH_dot = XH_mat = A.dot
X_dot = X_matmat = _herm(A).dot
def matvec_XH_X(x):
return XH_dot(X_dot(x))
def matmat_XH_X(x):
return XH_mat(X_matmat(x))
XH_X = LinearOperator(matvec=matvec_XH_X, dtype=A.dtype,
matmat=matmat_XH_X,
shape=(min(A.shape), min(A.shape)))
# Get a low rank approximation of the implicitly defined gramian matrix.
# This is not a stable way to approach the problem.
if solver == 'lobpcg':
if k == 1 and v0 is not None:
X = np.reshape(v0, (-1, 1))
else:
X = np.random.RandomState(52).randn(min(A.shape), k)
eigvals, eigvec = lobpcg(XH_X, X, tol=tol ** 2, maxiter=maxiter,
largest=largest)
elif solver == 'arpack' or solver is None:
eigvals, eigvec = eigsh(XH_X, k=k, tol=tol ** 2, maxiter=maxiter,
ncv=ncv, which=which, v0=v0)
else:
raise ValueError("solver must be either 'arpack', or 'lobpcg'.")
# Gramian matrices have real non-negative eigenvalues.
eigvals = np.maximum(eigvals.real, 0)
# Use the sophisticated detection of small eigenvalues from pinvh.
t = eigvec.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
cutoff = cond * np.max(eigvals)
# Get a mask indicating which eigenpairs are not degenerately tiny,
# and create the re-ordered array of thresholded singular values.
above_cutoff = (eigvals > cutoff)
nlarge = above_cutoff.sum()
nsmall = k - nlarge
slarge = np.sqrt(eigvals[above_cutoff])
s = np.zeros_like(eigvals)
s[:nlarge] = slarge
if not return_singular_vectors:
return np.sort(s)
if n > m:
vlarge = eigvec[:, above_cutoff]
ularge = (X_matmat(vlarge) / slarge
if return_singular_vectors != 'vh' else None)
vhlarge = _herm(vlarge)
else:
ularge = eigvec[:, above_cutoff]
vhlarge = (_herm(X_matmat(ularge) / slarge)
if return_singular_vectors != 'u' else None)
u = (_augmented_orthonormal_cols(ularge, nsmall)
if ularge is not None else None)
vh = (_augmented_orthonormal_rows(vhlarge, nsmall)
if vhlarge is not None else None)
indexes_sorted = np.argsort(s)
s = s[indexes_sorted]
if u is not None:
u = u[:, indexes_sorted]
if vh is not None:
vh = vh[indexes_sorted]
return u, s, vh
| 924 | 0 | 123 |
99b6bd941f90c609486b360ca9e450f760cfa0d4 | 691 | py | Python | monty/os/__init__.py | yanikou19/monty | 822ae841f7d29bd7464287fd99b51da6e5960088 | [
"MIT"
] | null | null | null | monty/os/__init__.py | yanikou19/monty | 822ae841f7d29bd7464287fd99b51da6e5960088 | [
"MIT"
] | null | null | null | monty/os/__init__.py | yanikou19/monty | 822ae841f7d29bd7464287fd99b51da6e5960088 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
__author__ = 'Shyue Ping Ong'
__copyright__ = 'Copyright 2013, The Materials Project'
__version__ = '0.1'
__maintainer__ = 'Shyue Ping Ong'
__email__ = 'ongsp@ucsd.edu'
__date__ = '1/24/14'
import os
from contextlib import contextmanager
@contextmanager
def cd(path):
"""
A Fabric-inspired cd context that temporarily changes directory for
performing some tasks, and returns to the original working directory
afterwards. E.g.,
with cd("/my/path/"):
do_something()
Args:
path: Path to cd to.
"""
cwd = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(cwd) | 20.939394 | 72 | 0.66136 | from __future__ import absolute_import
__author__ = 'Shyue Ping Ong'
__copyright__ = 'Copyright 2013, The Materials Project'
__version__ = '0.1'
__maintainer__ = 'Shyue Ping Ong'
__email__ = 'ongsp@ucsd.edu'
__date__ = '1/24/14'
import os
from contextlib import contextmanager
@contextmanager
def cd(path):
"""
A Fabric-inspired cd context that temporarily changes directory for
performing some tasks, and returns to the original working directory
afterwards. E.g.,
with cd("/my/path/"):
do_something()
Args:
path: Path to cd to.
"""
cwd = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(cwd) | 0 | 0 | 0 |
0aa7b9480412288c537a477f9a614f1b0959bd31 | 5,683 | py | Python | eval.py | khchow-gt/mnist_challenge | 89e9e05b538e887de3e2e5ba45809b7b7505f5c1 | [
"MIT"
] | null | null | null | eval.py | khchow-gt/mnist_challenge | 89e9e05b538e887de3e2e5ba45809b7b7505f5c1 | [
"MIT"
] | null | null | null | eval.py | khchow-gt/mnist_challenge | 89e9e05b538e887de3e2e5ba45809b7b7505f5c1 | [
"MIT"
] | null | null | null | """
Infinite evaluation loop going through the checkpoints in the model directory
as they appear and evaluating them. Accuracy and average loss are printed and
added as tensorboard summaries.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import json
import math
import os
import sys
import time
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from model import Model
from pgd_attack import LinfPGDAttack
# Global constants
with open('config.json') as config_file:
config = json.load(config_file)
num_eval_examples = config['num_eval_examples']
eval_batch_size = config['eval_batch_size']
eval_on_cpu = config['eval_on_cpu']
model_dir = config['model_dir']
# Set upd the data, hyperparameters, and the model
mnist = input_data.read_data_sets('MNIST_data', one_hot=False)
if eval_on_cpu:
with tf.device("/cpu:0"):
model = Model()
attack = LinfPGDAttack(model,
config['epsilon'],
config['k'],
config['a'],
config['random_start'],
config['loss_func'])
else:
model = Model()
attack = LinfPGDAttack(model,
config['epsilon'],
config['k'],
config['a'],
config['random_start'],
config['loss_func'])
global_step = tf.contrib.framework.get_or_create_global_step()
# Setting up the Tensorboard and checkpoint outputs
if not os.path.exists(model_dir):
os.makedirs(model_dir)
eval_dir = os.path.join(model_dir, 'eval')
if not os.path.exists(eval_dir):
os.makedirs(eval_dir)
last_checkpoint_filename = ''
already_seen_state = False
saver = tf.train.Saver()
summary_writer = tf.summary.FileWriter(eval_dir)
# A function for evaluating a single checkpoint
# Infinite eval loop
while True:
cur_checkpoint = tf.train.latest_checkpoint(model_dir)
# Case 1: No checkpoint yet
if cur_checkpoint is None:
if not already_seen_state:
print('No checkpoint yet, waiting ...', end='')
already_seen_state = True
else:
print('.', end='')
sys.stdout.flush()
time.sleep(10)
# Case 2: Previously unseen checkpoint
elif cur_checkpoint != last_checkpoint_filename:
print('\nCheckpoint {}, evaluating ... ({})'.format(cur_checkpoint,
datetime.now()))
sys.stdout.flush()
last_checkpoint_filename = cur_checkpoint
already_seen_state = False
evaluate_checkpoint(cur_checkpoint)
# Case 3: Previously evaluated checkpoint
else:
if not already_seen_state:
print('Waiting for the next checkpoint ... ({}) '.format(
datetime.now()),
end='')
already_seen_state = True
else:
print('.', end='')
sys.stdout.flush()
time.sleep(10)
| 35.298137 | 79 | 0.600211 | """
Infinite evaluation loop going through the checkpoints in the model directory
as they appear and evaluating them. Accuracy and average loss are printed and
added as tensorboard summaries.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import json
import math
import os
import sys
import time
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from model import Model
from pgd_attack import LinfPGDAttack
# Global constants
with open('config.json') as config_file:
config = json.load(config_file)
num_eval_examples = config['num_eval_examples']
eval_batch_size = config['eval_batch_size']
eval_on_cpu = config['eval_on_cpu']
model_dir = config['model_dir']
# Set upd the data, hyperparameters, and the model
mnist = input_data.read_data_sets('MNIST_data', one_hot=False)
if eval_on_cpu:
with tf.device("/cpu:0"):
model = Model()
attack = LinfPGDAttack(model,
config['epsilon'],
config['k'],
config['a'],
config['random_start'],
config['loss_func'])
else:
model = Model()
attack = LinfPGDAttack(model,
config['epsilon'],
config['k'],
config['a'],
config['random_start'],
config['loss_func'])
global_step = tf.contrib.framework.get_or_create_global_step()
# Setting up the Tensorboard and checkpoint outputs
if not os.path.exists(model_dir):
os.makedirs(model_dir)
eval_dir = os.path.join(model_dir, 'eval')
if not os.path.exists(eval_dir):
os.makedirs(eval_dir)
last_checkpoint_filename = ''
already_seen_state = False
saver = tf.train.Saver()
summary_writer = tf.summary.FileWriter(eval_dir)
# A function for evaluating a single checkpoint
def evaluate_checkpoint(filename):
with tf.Session() as sess:
# Restore the checkpoint
saver.restore(sess, filename)
# Iterate over the samples batch-by-batch
num_batches = int(math.ceil(num_eval_examples / eval_batch_size))
total_xent_nat = 0.
total_xent_adv = 0.
total_corr_nat = 0
total_corr_adv = 0
for ibatch in range(num_batches):
bstart = ibatch * eval_batch_size
bend = min(bstart + eval_batch_size, num_eval_examples)
x_batch = mnist.test.images[bstart:bend, :]
y_batch = mnist.test.labels[bstart:bend]
dict_nat = {model.x_input: x_batch,
model.y_input: y_batch}
x_batch_adv = attack.perturb(x_batch, y_batch, sess)
dict_adv = {model.x_input: x_batch_adv,
model.y_input: y_batch}
cur_corr_nat, cur_xent_nat = sess.run(
[model.num_correct, model.xent],
feed_dict=dict_nat)
cur_corr_adv, cur_xent_adv = sess.run(
[model.num_correct, model.xent],
feed_dict=dict_adv)
total_xent_nat += cur_xent_nat
total_xent_adv += cur_xent_adv
total_corr_nat += cur_corr_nat
total_corr_adv += cur_corr_adv
avg_xent_nat = total_xent_nat / num_eval_examples
avg_xent_adv = total_xent_adv / num_eval_examples
acc_nat = total_corr_nat / num_eval_examples
acc_adv = total_corr_adv / num_eval_examples
summary = tf.Summary(value=[
tf.Summary.Value(tag='xent adv eval', simple_value=avg_xent_adv),
tf.Summary.Value(tag='xent adv', simple_value=avg_xent_adv),
tf.Summary.Value(tag='xent nat', simple_value=avg_xent_nat),
tf.Summary.Value(tag='accuracy adv eval', simple_value=acc_adv),
tf.Summary.Value(tag='accuracy adv', simple_value=acc_adv),
tf.Summary.Value(tag='accuracy nat', simple_value=acc_nat)])
summary_writer.add_summary(summary, global_step.eval(sess))
print('natural: {:.2f}%'.format(100 * acc_nat))
print('adversarial: {:.2f}%'.format(100 * acc_adv))
print('avg nat loss: {:.4f}'.format(avg_xent_nat))
print('avg adv loss: {:.4f}'.format(avg_xent_adv))
# Infinite eval loop
while True:
cur_checkpoint = tf.train.latest_checkpoint(model_dir)
# Case 1: No checkpoint yet
if cur_checkpoint is None:
if not already_seen_state:
print('No checkpoint yet, waiting ...', end='')
already_seen_state = True
else:
print('.', end='')
sys.stdout.flush()
time.sleep(10)
# Case 2: Previously unseen checkpoint
elif cur_checkpoint != last_checkpoint_filename:
print('\nCheckpoint {}, evaluating ... ({})'.format(cur_checkpoint,
datetime.now()))
sys.stdout.flush()
last_checkpoint_filename = cur_checkpoint
already_seen_state = False
evaluate_checkpoint(cur_checkpoint)
# Case 3: Previously evaluated checkpoint
else:
if not already_seen_state:
print('Waiting for the next checkpoint ... ({}) '.format(
datetime.now()),
end='')
already_seen_state = True
else:
print('.', end='')
sys.stdout.flush()
time.sleep(10)
| 2,393 | 0 | 23 |
04b1d605171844fcc802147261d398861e284991 | 5,204 | py | Python | tests/test_xbse_calendar.py | GitDjTHU/exchange_calendars | 5c544f74d14fe01aa43a18a73667033124495d0c | [
"Apache-2.0"
] | null | null | null | tests/test_xbse_calendar.py | GitDjTHU/exchange_calendars | 5c544f74d14fe01aa43a18a73667033124495d0c | [
"Apache-2.0"
] | 7 | 2021-11-03T01:20:29.000Z | 2022-03-31T01:28:12.000Z | tests/test_xbse_calendar.py | GitDjTHU/exchange_calendars | 5c544f74d14fe01aa43a18a73667033124495d0c | [
"Apache-2.0"
] | null | null | null | from unittest import TestCase
import pandas as pd
from pytz import UTC
from exchange_calendars.exchange_calendar_xbse import XBSEExchangeCalendar
from .test_exchange_calendar import ExchangeCalendarTestBase
| 43.008264 | 88 | 0.590123 | from unittest import TestCase
import pandas as pd
from pytz import UTC
from exchange_calendars.exchange_calendar_xbse import XBSEExchangeCalendar
from .test_exchange_calendar import ExchangeCalendarTestBase
class XBSECalendarTestCase(ExchangeCalendarTestBase, TestCase):
answer_key_filename = "xbse"
calendar_class = XBSEExchangeCalendar
# The XBSE is open from 10:00 to 5:20PM on its longest trading day
MAX_SESSION_HOURS = 7 + (3 / 4)
HAVE_EARLY_CLOSES = False
DAYLIGHT_SAVINGS_DATES = ["2018-03-26", "2018-10-29"]
def test_regular_holidays(self):
all_sessions = self.calendar.all_sessions
expected_holidays = [
pd.Timestamp("2021-01-01", tz=UTC), # New Year's Day
pd.Timestamp("2021-04-30", tz=UTC), # Orthodox Good Friday
pd.Timestamp("2021-06-01", tz=UTC), # Children's day
pd.Timestamp("2021-06-21", tz=UTC), # Orthodox Pentecost
pd.Timestamp("2021-11-30", tz=UTC), # St. Adnrew's Day
pd.Timestamp("2021-12-01", tz=UTC), # National Day
pd.Timestamp("2020-01-01", tz=UTC), # New Year's Day
pd.Timestamp("2020-01-02", tz=UTC), # New Year's Day
pd.Timestamp(
"2020-01-24", tz=UTC
), # Romanian Principalities Unification Day
pd.Timestamp("2020-04-17", tz=UTC), # Good Friday
pd.Timestamp("2020-04-20", tz=UTC), # Orthodox Easter
pd.Timestamp("2020-05-01", tz=UTC), # Labour Day
pd.Timestamp("2020-06-01", tz=UTC), # Children's Day
pd.Timestamp("2020-06-08", tz=UTC), # Orthodox Pentecost
pd.Timestamp("2020-11-30", tz=UTC), # St. Adnrew's day
pd.Timestamp("2020-12-01", tz=UTC), # National Day
pd.Timestamp("2020-12-25", tz=UTC), # Christmans
pd.Timestamp("2019-01-01", tz=UTC), # New Year's Day
pd.Timestamp("2019-01-02", tz=UTC), # New Year's Day
pd.Timestamp(
"2019-01-24", tz=UTC
), # Romanian Principalities Unification Day
pd.Timestamp("2019-04-26", tz=UTC), # Good Friday
pd.Timestamp("2019-04-29", tz=UTC), # Orthodox Easter
pd.Timestamp("2019-05-01", tz=UTC), # Labour Day
pd.Timestamp("2019-06-17", tz=UTC), # Orthodox Pentecost
pd.Timestamp("2019-08-15", tz=UTC), # Assumption of Virgin Mary
pd.Timestamp("2019-12-25", tz=UTC), # Christmans
pd.Timestamp("2019-12-26", tz=UTC), # Christmans
]
for holiday_label in expected_holidays:
self.assertNotIn(holiday_label, all_sessions)
def test_holidays_fall_on_weekend(self):
all_sessions = self.calendar.all_sessions
# All holidays that fall on a weekend should not be made
# up, so ensure surrounding days are open market
expected_sessions = [
# Second New Years Day on Saturday, Jan 2st
pd.Timestamp("2021-01-04", tz=UTC),
# Christmas on a Saturday Sunday
# Note: 25th and 26th are holidays
pd.Timestamp("2021-12-24", tz=UTC),
pd.Timestamp("2021-12-27", tz=UTC),
# Labour Day on Saturday + Good Friday on Friday + Orthodox Easter on Monday
pd.Timestamp("2021-04-29", tz=UTC),
pd.Timestamp("2021-05-04", tz=UTC),
# Children's Day on Saturday
pd.Timestamp("2019-05-31", tz=UTC),
pd.Timestamp("2019-06-03", tz=UTC),
# Assumption of Virgin Mary on Sunday
pd.Timestamp("2021-08-13", tz=UTC),
pd.Timestamp("2021-08-16", tz=UTC),
# Assumption of Virgin Mary on Saturday
pd.Timestamp("2020-08-14", tz=UTC),
pd.Timestamp("2020-08-17", tz=UTC),
]
for session_label in expected_sessions:
self.assertIn(session_label, all_sessions)
def test_orthodox_easter(self):
"""
The Athens Stock Exchange observes Orthodox (or Eastern) Easter,
as well as Western Easter. All holidays that are tethered to
Easter (i.e. Whit Monday, Good Friday, etc.), are relative to
Orthodox Easter. This test checks that Orthodox Easter and all
related holidays are correct.
"""
all_sessions = self.calendar.all_sessions
expected_holidays = [
# Some Orthodox Good Friday dates
pd.Timestamp("2002-05-03", tz=UTC),
pd.Timestamp("2005-04-29", tz=UTC),
pd.Timestamp("2008-04-25", tz=UTC),
pd.Timestamp("2009-04-17", tz=UTC),
pd.Timestamp("2016-04-29", tz=UTC),
pd.Timestamp("2017-04-14", tz=UTC),
# Some Orthodox Pentecost dates
pd.Timestamp("2002-06-24", tz=UTC),
pd.Timestamp("2005-06-20", tz=UTC),
pd.Timestamp("2006-06-12", tz=UTC),
pd.Timestamp("2008-06-16", tz=UTC),
pd.Timestamp("2013-06-24", tz=UTC),
pd.Timestamp("2016-06-20", tz=UTC),
]
for holiday_label in expected_holidays:
self.assertNotIn(holiday_label, all_sessions)
| 3,352 | 1,618 | 23 |
50f41c52255abfd0c6e8b9f4a85116ed07d020a3 | 887 | py | Python | jp.atcoder/abc086/arc089_a/8309315.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | 1 | 2022-02-09T03:06:25.000Z | 2022-02-09T03:06:25.000Z | jp.atcoder/abc086/arc089_a/8309315.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | 1 | 2022-02-05T22:53:18.000Z | 2022-02-09T01:29:30.000Z | jp.atcoder/abc086/arc089_a/8309315.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | null | null | null | # author: kagemeka
# created: 2019-11-06 12:47:30(JST)
import sys
# import collections
# import math
# import string
# import bisect
# import re
# import itertools
# import statistics
if __name__ == "__main__":
# execute only if run as a script
main()
| 22.74359 | 74 | 0.445321 | # author: kagemeka
# created: 2019-11-06 12:47:30(JST)
import sys
# import collections
# import math
# import string
# import bisect
# import re
# import itertools
# import statistics
def main():
n, *txy = (int(x) for x in sys.stdin.read().split())
t, x, y = [0], [0], [0]
for i in range(0, n * 3, 3):
t.append(txy[i])
x.append(txy[i + 1])
y.append(txy[i + 2])
for i in range(n):
dt = t[i + 1] - t[i]
distx = abs(x[i + 1] - x[i])
disty = abs(y[i + 1] - y[i])
if dt - (distx + disty) >= 0 and (dt - (distx + disty)) % 2 == 0:
continue
else:
ans = "No"
break
else:
ans = "Yes"
print(ans)
if __name__ == "__main__":
# execute only if run as a script
main()
| 532 | 0 | 25 |
9d8e89264f07302c0cb52316292081e4a527f1bb | 7,218 | py | Python | src/sima/workflow/plotnode.py | SINTEF/simapy | 650b8c2f15503dad98e2bfc0d0788509593822c7 | [
"MIT"
] | null | null | null | src/sima/workflow/plotnode.py | SINTEF/simapy | 650b8c2f15503dad98e2bfc0d0788509593822c7 | [
"MIT"
] | null | null | null | src/sima/workflow/plotnode.py | SINTEF/simapy | 650b8c2f15503dad98e2bfc0d0788509593822c7 | [
"MIT"
] | null | null | null | # This an autogenerated file
#
# Generated with PlotNode
from typing import Dict,Sequence,List
from dmt.entity import Entity
from dmt.blueprint import Blueprint
from .blueprints.plotnode import PlotNodeBlueprint
from typing import Dict
from sima.post.controlsignalinputslot import ControlSignalInputSlot
from sima.post.figuretemplate import FigureTemplate
from sima.post.inputslot import InputSlot
from sima.post.outputnode import OutputNode
from sima.post.outputslot import OutputSlot
from sima.post.traceconfiguration import TraceConfiguration
from sima.sima.scriptablevalue import ScriptableValue
class PlotNode(OutputNode):
"""
Keyword arguments
-----------------
name : str
(default "")
description : str
(default "")
_id : str
(default "")
scriptableValues : List[ScriptableValue]
x : int
(default 0)
y : int
(default 0)
h : int
(default 0)
w : int
(default 0)
controlSignalInputSlots : List[ControlSignalInputSlot]
inputSlot : InputSlot
figureTemplate : FigureTemplate
traces : List[TraceConfiguration]
fixed : bool
(default False)
title : str
(default "")
xLabel : str
(default "")
yLabel : str
(default "")
selectAll : bool
Will export all signals as plot(default False)
outputSlot : OutputSlot
createImages : bool
Create images and store these to disk. The output will then be the paths to the images(default True)
"""
@property
def blueprint(self) -> Blueprint:
"""Return blueprint that this entity represents"""
return PlotNodeBlueprint()
@property
def name(self) -> str:
""""""
return self.__name
@name.setter
def name(self, value: str):
"""Set name"""
self.__name = str(value)
@property
def description(self) -> str:
""""""
return self.__description
@description.setter
def description(self, value: str):
"""Set description"""
self.__description = str(value)
@property
def _id(self) -> str:
""""""
return self.___id
@_id.setter
def _id(self, value: str):
"""Set _id"""
self.___id = str(value)
@property
def scriptableValues(self) -> List[ScriptableValue]:
""""""
return self.__scriptableValues
@scriptableValues.setter
def scriptableValues(self, value: List[ScriptableValue]):
"""Set scriptableValues"""
if not isinstance(value, Sequence):
raise Exception("Expected sequense, but was " , type(value))
self.__scriptableValues = value
@property
def x(self) -> int:
""""""
return self.__x
@x.setter
def x(self, value: int):
"""Set x"""
self.__x = int(value)
@property
def y(self) -> int:
""""""
return self.__y
@y.setter
def y(self, value: int):
"""Set y"""
self.__y = int(value)
@property
def h(self) -> int:
""""""
return self.__h
@h.setter
def h(self, value: int):
"""Set h"""
self.__h = int(value)
@property
def w(self) -> int:
""""""
return self.__w
@w.setter
def w(self, value: int):
"""Set w"""
self.__w = int(value)
@property
def controlSignalInputSlots(self) -> List[ControlSignalInputSlot]:
""""""
return self.__controlSignalInputSlots
@controlSignalInputSlots.setter
def controlSignalInputSlots(self, value: List[ControlSignalInputSlot]):
"""Set controlSignalInputSlots"""
if not isinstance(value, Sequence):
raise Exception("Expected sequense, but was " , type(value))
self.__controlSignalInputSlots = value
@property
def inputSlot(self) -> InputSlot:
""""""
return self.__inputSlot
@inputSlot.setter
def inputSlot(self, value: InputSlot):
"""Set inputSlot"""
self.__inputSlot = value
@property
def figureTemplate(self) -> FigureTemplate:
""""""
return self.__figureTemplate
@figureTemplate.setter
def figureTemplate(self, value: FigureTemplate):
"""Set figureTemplate"""
self.__figureTemplate = value
@property
def traces(self) -> List[TraceConfiguration]:
""""""
return self.__traces
@traces.setter
def traces(self, value: List[TraceConfiguration]):
"""Set traces"""
if not isinstance(value, Sequence):
raise Exception("Expected sequense, but was " , type(value))
self.__traces = value
@property
def fixed(self) -> bool:
""""""
return self.__fixed
@fixed.setter
def fixed(self, value: bool):
"""Set fixed"""
self.__fixed = bool(value)
@property
def title(self) -> str:
""""""
return self.__title
@title.setter
def title(self, value: str):
"""Set title"""
self.__title = str(value)
@property
def xLabel(self) -> str:
""""""
return self.__xLabel
@xLabel.setter
def xLabel(self, value: str):
"""Set xLabel"""
self.__xLabel = str(value)
@property
def yLabel(self) -> str:
""""""
return self.__yLabel
@yLabel.setter
def yLabel(self, value: str):
"""Set yLabel"""
self.__yLabel = str(value)
@property
def selectAll(self) -> bool:
"""Will export all signals as plot"""
return self.__selectAll
@selectAll.setter
def selectAll(self, value: bool):
"""Set selectAll"""
self.__selectAll = bool(value)
@property
def outputSlot(self) -> OutputSlot:
""""""
return self.__outputSlot
@outputSlot.setter
def outputSlot(self, value: OutputSlot):
"""Set outputSlot"""
self.__outputSlot = value
@property
def createImages(self) -> bool:
"""Create images and store these to disk. The output will then be the paths to the images"""
return self.__createImages
@createImages.setter
def createImages(self, value: bool):
"""Set createImages"""
self.__createImages = bool(value)
| 25.595745 | 168 | 0.590053 | # This an autogenerated file
#
# Generated with PlotNode
from typing import Dict,Sequence,List
from dmt.entity import Entity
from dmt.blueprint import Blueprint
from .blueprints.plotnode import PlotNodeBlueprint
from typing import Dict
from sima.post.controlsignalinputslot import ControlSignalInputSlot
from sima.post.figuretemplate import FigureTemplate
from sima.post.inputslot import InputSlot
from sima.post.outputnode import OutputNode
from sima.post.outputslot import OutputSlot
from sima.post.traceconfiguration import TraceConfiguration
from sima.sima.scriptablevalue import ScriptableValue
class PlotNode(OutputNode):
"""
Keyword arguments
-----------------
name : str
(default "")
description : str
(default "")
_id : str
(default "")
scriptableValues : List[ScriptableValue]
x : int
(default 0)
y : int
(default 0)
h : int
(default 0)
w : int
(default 0)
controlSignalInputSlots : List[ControlSignalInputSlot]
inputSlot : InputSlot
figureTemplate : FigureTemplate
traces : List[TraceConfiguration]
fixed : bool
(default False)
title : str
(default "")
xLabel : str
(default "")
yLabel : str
(default "")
selectAll : bool
Will export all signals as plot(default False)
outputSlot : OutputSlot
createImages : bool
Create images and store these to disk. The output will then be the paths to the images(default True)
"""
def __init__(self , name="", description="", _id="", x=0, y=0, h=0, w=0, fixed=False, title="", xLabel="", yLabel="", selectAll=False, createImages=True, **kwargs):
super().__init__(**kwargs)
self.name = name
self.description = description
self._id = _id
self.scriptableValues = list()
self.x = x
self.y = y
self.h = h
self.w = w
self.controlSignalInputSlots = list()
self.inputSlot = None
self.figureTemplate = None
self.traces = list()
self.fixed = fixed
self.title = title
self.xLabel = xLabel
self.yLabel = yLabel
self.selectAll = selectAll
self.outputSlot = None
self.createImages = createImages
for key, value in kwargs.items():
if not isinstance(value, Dict):
setattr(self, key, value)
@property
def blueprint(self) -> Blueprint:
"""Return blueprint that this entity represents"""
return PlotNodeBlueprint()
@property
def name(self) -> str:
""""""
return self.__name
@name.setter
def name(self, value: str):
"""Set name"""
self.__name = str(value)
@property
def description(self) -> str:
""""""
return self.__description
@description.setter
def description(self, value: str):
"""Set description"""
self.__description = str(value)
@property
def _id(self) -> str:
""""""
return self.___id
@_id.setter
def _id(self, value: str):
"""Set _id"""
self.___id = str(value)
@property
def scriptableValues(self) -> List[ScriptableValue]:
""""""
return self.__scriptableValues
@scriptableValues.setter
def scriptableValues(self, value: List[ScriptableValue]):
"""Set scriptableValues"""
if not isinstance(value, Sequence):
raise Exception("Expected sequense, but was " , type(value))
self.__scriptableValues = value
@property
def x(self) -> int:
""""""
return self.__x
@x.setter
def x(self, value: int):
"""Set x"""
self.__x = int(value)
@property
def y(self) -> int:
""""""
return self.__y
@y.setter
def y(self, value: int):
"""Set y"""
self.__y = int(value)
@property
def h(self) -> int:
""""""
return self.__h
@h.setter
def h(self, value: int):
"""Set h"""
self.__h = int(value)
@property
def w(self) -> int:
""""""
return self.__w
@w.setter
def w(self, value: int):
"""Set w"""
self.__w = int(value)
@property
def controlSignalInputSlots(self) -> List[ControlSignalInputSlot]:
""""""
return self.__controlSignalInputSlots
@controlSignalInputSlots.setter
def controlSignalInputSlots(self, value: List[ControlSignalInputSlot]):
"""Set controlSignalInputSlots"""
if not isinstance(value, Sequence):
raise Exception("Expected sequense, but was " , type(value))
self.__controlSignalInputSlots = value
@property
def inputSlot(self) -> InputSlot:
""""""
return self.__inputSlot
@inputSlot.setter
def inputSlot(self, value: InputSlot):
"""Set inputSlot"""
self.__inputSlot = value
@property
def figureTemplate(self) -> FigureTemplate:
""""""
return self.__figureTemplate
@figureTemplate.setter
def figureTemplate(self, value: FigureTemplate):
"""Set figureTemplate"""
self.__figureTemplate = value
@property
def traces(self) -> List[TraceConfiguration]:
""""""
return self.__traces
@traces.setter
def traces(self, value: List[TraceConfiguration]):
"""Set traces"""
if not isinstance(value, Sequence):
raise Exception("Expected sequense, but was " , type(value))
self.__traces = value
@property
def fixed(self) -> bool:
""""""
return self.__fixed
@fixed.setter
def fixed(self, value: bool):
"""Set fixed"""
self.__fixed = bool(value)
@property
def title(self) -> str:
""""""
return self.__title
@title.setter
def title(self, value: str):
"""Set title"""
self.__title = str(value)
@property
def xLabel(self) -> str:
""""""
return self.__xLabel
@xLabel.setter
def xLabel(self, value: str):
"""Set xLabel"""
self.__xLabel = str(value)
@property
def yLabel(self) -> str:
""""""
return self.__yLabel
@yLabel.setter
def yLabel(self, value: str):
"""Set yLabel"""
self.__yLabel = str(value)
@property
def selectAll(self) -> bool:
"""Will export all signals as plot"""
return self.__selectAll
@selectAll.setter
def selectAll(self, value: bool):
"""Set selectAll"""
self.__selectAll = bool(value)
@property
def outputSlot(self) -> OutputSlot:
""""""
return self.__outputSlot
@outputSlot.setter
def outputSlot(self, value: OutputSlot):
"""Set outputSlot"""
self.__outputSlot = value
@property
def createImages(self) -> bool:
"""Create images and store these to disk. The output will then be the paths to the images"""
return self.__createImages
@createImages.setter
def createImages(self, value: bool):
"""Set createImages"""
self.__createImages = bool(value)
| 867 | 0 | 27 |
6c83e241e2fd1082aae160c9ab4a297c935583b4 | 10,275 | py | Python | src/photo/tests/test_utils.py | rjhelms/photo | fd9e3ff384554c5bc1e3b024d13ca40f50e049e5 | [
"MIT"
] | null | null | null | src/photo/tests/test_utils.py | rjhelms/photo | fd9e3ff384554c5bc1e3b024d13ca40f50e049e5 | [
"MIT"
] | null | null | null | src/photo/tests/test_utils.py | rjhelms/photo | fd9e3ff384554c5bc1e3b024d13ca40f50e049e5 | [
"MIT"
] | null | null | null | # pylint: disable=invalid-name
"""
Tests for photo.utils
"""
import uuid
from django.test import TestCase
from photo import utils
# pylint: disable=too-few-public-methods
class DummyInstance:
"""
Dummy instance object for passing into UploadToPathAndRename
"""
pk = None
class UploadToPathAndRenameTestCase(TestCase):
"""
Tests for utils.UploadToPathAndRename
"""
def test_extension_preserved(self):
"""
Verify that UploadToPathAndRename preserves file extensions.
"""
result = self.upload_to_path_and_rename(self.instance, "filename.jpg")
ext = result.split('.')[-1]
self.assertEqual(ext, 'jpg', "New filename has wrong extension")
def test_path_appended(self):
"""
Verify that UploadToPathAndRename appends specified path.
"""
result = self.upload_to_path_and_rename(self.instance, "filename.jpg")
path = result.split('/')[0]
self.assertEqual(path, 'test', "New filename has wrong path")
def test_instance_with_no_pk(self):
"""
Verify handling when instance does not have a primary key
"""
result = self.upload_to_path_and_rename(self.instance, "filename.jpg")
generated_uuid_string = result.split('/')[1].split('.')[0]
generated_uuid = uuid.UUID(generated_uuid_string, version=4)
self.assertNotEqual(generated_uuid, self.instance.pk,
"New filename did not get a random UUID")
def test_instance_with_uuid_pk(self):
"""
Verify handling when instance has a UUID primary key
"""
self.instance.pk = uuid.uuid4()
result = self.upload_to_path_and_rename(self.instance, "filename.jpg")
generated_uuid_string = result.split('/')[1].split('.')[0]
generated_uuid = uuid.UUID(generated_uuid_string, version=4)
self.assertEqual(generated_uuid, self.instance.pk,
"New filename does not match UUID of instance")
def test_insance_with_non_uuid_pk(self):
"""
Verify handling when instance has a non-UUID primary key
"""
self.instance.pk = "test"
with self.assertRaises(TypeError):
self.upload_to_path_and_rename(self.instance, "filename.jpg")
class StopTimeConversionTestCase(TestCase):
"""Tests for utils.StopTimeConversion."""
def test_exception_time_difference_in_stops(self):
"""
Verify exception for invalid values in
StopTimeConversion.time_difference_in_stops.
"""
with self.assertRaises(ValueError):
utils.StopTimeConversion.time_difference_in_stops(0, 1)
with self.assertRaises(ValueError):
utils.StopTimeConversion.time_difference_in_stops(1, 0)
with self.assertRaises(ValueError):
utils.StopTimeConversion.time_difference_in_stops(0, 0)
with self.assertRaises(ValueError):
utils.StopTimeConversion.time_difference_in_stops(-1, -1)
def test_exception_adjust_time_by_points(self):
"""
Verify exception for invalid values in
StopTimeConversion.adjust_time_by_points.
"""
with self.assertRaises(ValueError):
utils.StopTimeConversion.adjust_time_by_points(0, 1)
with self.assertRaises(ValueError):
utils.StopTimeConversion.adjust_time_by_points(-1, 1)
def test_exception_adjust_time_by_stops(self):
"""
Verify exception for invalid values in
StopTimeConversion.adjust_time_by_stops.
"""
with self.assertRaises(ValueError):
utils.StopTimeConversion.adjust_time_by_stops(0, 1)
with self.assertRaises(ValueError):
utils.StopTimeConversion.adjust_time_by_stops(-1, 1)
def test_time_difference_in_stops(self):
"""
Verify values returned by
StopTimeConversion.time_difference_in_stops.
"""
self.assertEqual(
utils.StopTimeConversion.time_difference_in_stops(6, 12), 1)
self.assertEqual(
utils.StopTimeConversion.time_difference_in_stops(12, 12), 0)
self.assertEqual(
utils.StopTimeConversion.time_difference_in_stops(12, 6), -1)
def test_time_difference_in_points(self):
"""
Verify values returned by
StopTimeConversion.time_difference_in_points.
"""
self.assertEqual(
utils.StopTimeConversion.time_difference_in_points(6, 12), 12)
self.assertEqual(
utils.StopTimeConversion.time_difference_in_points(12, 12), 0)
self.assertEqual(
utils.StopTimeConversion.time_difference_in_points(12, 6), -12)
def test_stop_difference_to_multiplier(self):
"""
Verify values returned by
StopTimeConversion.stop_difference_to_multiplier.
"""
self.assertEqual(
utils.StopTimeConversion.stop_difference_to_multiplier(1), 2)
self.assertEqual(
utils.StopTimeConversion.stop_difference_to_multiplier(0), 1)
self.assertEqual(
utils.StopTimeConversion.stop_difference_to_multiplier(-1), 0.5)
def test_point_difference_to_multiplier(self):
"""
Verify values returned by
StopTimeConversion.point_difference_to_multiplier.
"""
self.assertEqual(
utils.StopTimeConversion.point_difference_to_multiplier(12), 2)
self.assertEqual(
utils.StopTimeConversion.point_difference_to_multiplier(0), 1)
self.assertEqual(
utils.StopTimeConversion.point_difference_to_multiplier(-12), 0.5)
def test_adjust_time_by_stops(self):
"""
Verify values returned by
StopTimeConversion.adjust_time_by_stops.
"""
self.assertEqual(
utils.StopTimeConversion.adjust_time_by_stops(12, 1), 24)
self.assertEqual(
utils.StopTimeConversion.adjust_time_by_stops(12, 0), 12)
self.assertEqual(
utils.StopTimeConversion.adjust_time_by_stops(12, -1), 6)
def test_adjust_time_by_points(self):
"""
Verify values returned by
StopTimeConversion.adjust_time_by_points.
"""
self.assertEqual(
utils.StopTimeConversion.adjust_time_by_points(12, 12), 24)
self.assertEqual(
utils.StopTimeConversion.adjust_time_by_points(12, 0), 12)
self.assertEqual(
utils.StopTimeConversion.adjust_time_by_points(12, -12), 6)
def test_resize_print_enlarge(self):
"""
Verify values returned by StopTimeConversion.resize_print,
for a constant-aspect enlargement.
"""
old_print = {'x':4, 'y':6}
new_print = {'x':8, 'y':12}
self.assertEqual(
utils.StopTimeConversion.resize_print_in_stops(
old_print, new_print),
2)
def test_resize_print_same(self):
"""
Verify values returned by StopTimeConversion.resize_print,
for a constant-aspect print of same size.
"""
old_print = {'x':4, 'y':6}
new_print = {'x':4, 'y':6}
self.assertEqual(
utils.StopTimeConversion.resize_print_in_stops(
old_print, new_print),
0)
def test_resize_print_reduce(self):
"""
Verify values returned by StopTimeConversion.resize_print,
for a constant-aspect reduction.
"""
old_print = {'x':8, 'y':12}
new_print = {'x':4, 'y':6}
self.assertEqual(
utils.StopTimeConversion.resize_print_in_stops(
old_print, new_print),
- 2)
def test_resize_print_enlarge_high_aspect(self):
"""
Verify values returned by StopTimeConversion.resize_print,
for a higher-aspect ratio enlargement.
"""
old_print = {'x':4, 'y':6}
new_print = {'x':8, 'y':10}
self.assertEqual(
utils.StopTimeConversion.resize_print_in_stops(
old_print, new_print),
2)
def test_resize_print_same_high_aspect(self):
"""
Verify values returned by StopTimeConversion.resize_print,
for a higher-aspect ratio print of same size.
"""
old_print = {'x':4, 'y':6}
new_print = {'x':4, 'y':5}
self.assertEqual(
utils.StopTimeConversion.resize_print_in_stops(
old_print, new_print),
0)
def test_resize_print_reduce_high_aspect(self):
"""
Verify values returned by StopTimeConversion.resize_print,
for a higher-aspect ratio reduction.
"""
old_print = {'x':8, 'y':12}
new_print = {'x':4, 'y':5}
self.assertEqual(
utils.StopTimeConversion.resize_print_in_stops(
old_print, new_print),
- 2)
def test_resize_print_enlarge_low_aspect(self):
"""
Verify values returned by StopTimeConversion.resize_print,
for a lower-aspect ratio enlargement.
"""
old_print = {'x':4, 'y':6}
new_print = {'x':7, 'y':12}
self.assertEqual(
utils.StopTimeConversion.resize_print_in_stops(
old_print, new_print),
2)
def test_resize_print_same_low_aspect(self):
"""
Verify values returned by StopTimeConversion.resize_print,
for a lower-aspect ratio print of same size.
"""
old_print = {'x':4, 'y':6}
new_print = {'x':3, 'y':6}
self.assertEqual(
utils.StopTimeConversion.resize_print_in_stops(
old_print, new_print),
0)
def test_resize_print_reduce_low_aspect(self):
"""
Verify values returned by StopTimeConversion.resize_print,
for a higher-aspect ratio reduction.
"""
old_print = {'x':8, 'y':12}
new_print = {'x':3, 'y':6}
self.assertEqual(
utils.StopTimeConversion.resize_print_in_stops(
old_print, new_print),
- 2)
| 35.801394 | 78 | 0.631533 | # pylint: disable=invalid-name
"""
Tests for photo.utils
"""
import uuid
from django.test import TestCase
from photo import utils
# pylint: disable=too-few-public-methods
class DummyInstance:
"""
Dummy instance object for passing into UploadToPathAndRename
"""
pk = None
class UploadToPathAndRenameTestCase(TestCase):
"""
Tests for utils.UploadToPathAndRename
"""
def setUp(self):
self.upload_to_path_and_rename = utils.UploadToPathAndRename('test')
self.instance = DummyInstance()
def test_extension_preserved(self):
"""
Verify that UploadToPathAndRename preserves file extensions.
"""
result = self.upload_to_path_and_rename(self.instance, "filename.jpg")
ext = result.split('.')[-1]
self.assertEqual(ext, 'jpg', "New filename has wrong extension")
def test_path_appended(self):
"""
Verify that UploadToPathAndRename appends specified path.
"""
result = self.upload_to_path_and_rename(self.instance, "filename.jpg")
path = result.split('/')[0]
self.assertEqual(path, 'test', "New filename has wrong path")
def test_instance_with_no_pk(self):
"""
Verify handling when instance does not have a primary key
"""
result = self.upload_to_path_and_rename(self.instance, "filename.jpg")
generated_uuid_string = result.split('/')[1].split('.')[0]
generated_uuid = uuid.UUID(generated_uuid_string, version=4)
self.assertNotEqual(generated_uuid, self.instance.pk,
"New filename did not get a random UUID")
def test_instance_with_uuid_pk(self):
"""
Verify handling when instance has a UUID primary key
"""
self.instance.pk = uuid.uuid4()
result = self.upload_to_path_and_rename(self.instance, "filename.jpg")
generated_uuid_string = result.split('/')[1].split('.')[0]
generated_uuid = uuid.UUID(generated_uuid_string, version=4)
self.assertEqual(generated_uuid, self.instance.pk,
"New filename does not match UUID of instance")
def test_insance_with_non_uuid_pk(self):
"""
Verify handling when instance has a non-UUID primary key
"""
self.instance.pk = "test"
with self.assertRaises(TypeError):
self.upload_to_path_and_rename(self.instance, "filename.jpg")
class StopTimeConversionTestCase(TestCase):
"""Tests for utils.StopTimeConversion."""
def test_exception_time_difference_in_stops(self):
"""
Verify exception for invalid values in
StopTimeConversion.time_difference_in_stops.
"""
with self.assertRaises(ValueError):
utils.StopTimeConversion.time_difference_in_stops(0, 1)
with self.assertRaises(ValueError):
utils.StopTimeConversion.time_difference_in_stops(1, 0)
with self.assertRaises(ValueError):
utils.StopTimeConversion.time_difference_in_stops(0, 0)
with self.assertRaises(ValueError):
utils.StopTimeConversion.time_difference_in_stops(-1, -1)
def test_exception_adjust_time_by_points(self):
"""
Verify exception for invalid values in
StopTimeConversion.adjust_time_by_points.
"""
with self.assertRaises(ValueError):
utils.StopTimeConversion.adjust_time_by_points(0, 1)
with self.assertRaises(ValueError):
utils.StopTimeConversion.adjust_time_by_points(-1, 1)
def test_exception_adjust_time_by_stops(self):
"""
Verify exception for invalid values in
StopTimeConversion.adjust_time_by_stops.
"""
with self.assertRaises(ValueError):
utils.StopTimeConversion.adjust_time_by_stops(0, 1)
with self.assertRaises(ValueError):
utils.StopTimeConversion.adjust_time_by_stops(-1, 1)
def test_time_difference_in_stops(self):
"""
Verify values returned by
StopTimeConversion.time_difference_in_stops.
"""
self.assertEqual(
utils.StopTimeConversion.time_difference_in_stops(6, 12), 1)
self.assertEqual(
utils.StopTimeConversion.time_difference_in_stops(12, 12), 0)
self.assertEqual(
utils.StopTimeConversion.time_difference_in_stops(12, 6), -1)
def test_time_difference_in_points(self):
"""
Verify values returned by
StopTimeConversion.time_difference_in_points.
"""
self.assertEqual(
utils.StopTimeConversion.time_difference_in_points(6, 12), 12)
self.assertEqual(
utils.StopTimeConversion.time_difference_in_points(12, 12), 0)
self.assertEqual(
utils.StopTimeConversion.time_difference_in_points(12, 6), -12)
def test_stop_difference_to_multiplier(self):
"""
Verify values returned by
StopTimeConversion.stop_difference_to_multiplier.
"""
self.assertEqual(
utils.StopTimeConversion.stop_difference_to_multiplier(1), 2)
self.assertEqual(
utils.StopTimeConversion.stop_difference_to_multiplier(0), 1)
self.assertEqual(
utils.StopTimeConversion.stop_difference_to_multiplier(-1), 0.5)
def test_point_difference_to_multiplier(self):
"""
Verify values returned by
StopTimeConversion.point_difference_to_multiplier.
"""
self.assertEqual(
utils.StopTimeConversion.point_difference_to_multiplier(12), 2)
self.assertEqual(
utils.StopTimeConversion.point_difference_to_multiplier(0), 1)
self.assertEqual(
utils.StopTimeConversion.point_difference_to_multiplier(-12), 0.5)
def test_adjust_time_by_stops(self):
"""
Verify values returned by
StopTimeConversion.adjust_time_by_stops.
"""
self.assertEqual(
utils.StopTimeConversion.adjust_time_by_stops(12, 1), 24)
self.assertEqual(
utils.StopTimeConversion.adjust_time_by_stops(12, 0), 12)
self.assertEqual(
utils.StopTimeConversion.adjust_time_by_stops(12, -1), 6)
def test_adjust_time_by_points(self):
"""
Verify values returned by
StopTimeConversion.adjust_time_by_points.
"""
self.assertEqual(
utils.StopTimeConversion.adjust_time_by_points(12, 12), 24)
self.assertEqual(
utils.StopTimeConversion.adjust_time_by_points(12, 0), 12)
self.assertEqual(
utils.StopTimeConversion.adjust_time_by_points(12, -12), 6)
def test_resize_print_enlarge(self):
"""
Verify values returned by StopTimeConversion.resize_print,
for a constant-aspect enlargement.
"""
old_print = {'x':4, 'y':6}
new_print = {'x':8, 'y':12}
self.assertEqual(
utils.StopTimeConversion.resize_print_in_stops(
old_print, new_print),
2)
def test_resize_print_same(self):
"""
Verify values returned by StopTimeConversion.resize_print,
for a constant-aspect print of same size.
"""
old_print = {'x':4, 'y':6}
new_print = {'x':4, 'y':6}
self.assertEqual(
utils.StopTimeConversion.resize_print_in_stops(
old_print, new_print),
0)
def test_resize_print_reduce(self):
"""
Verify values returned by StopTimeConversion.resize_print,
for a constant-aspect reduction.
"""
old_print = {'x':8, 'y':12}
new_print = {'x':4, 'y':6}
self.assertEqual(
utils.StopTimeConversion.resize_print_in_stops(
old_print, new_print),
- 2)
def test_resize_print_enlarge_high_aspect(self):
"""
Verify values returned by StopTimeConversion.resize_print,
for a higher-aspect ratio enlargement.
"""
old_print = {'x':4, 'y':6}
new_print = {'x':8, 'y':10}
self.assertEqual(
utils.StopTimeConversion.resize_print_in_stops(
old_print, new_print),
2)
def test_resize_print_same_high_aspect(self):
"""
Verify values returned by StopTimeConversion.resize_print,
for a higher-aspect ratio print of same size.
"""
old_print = {'x':4, 'y':6}
new_print = {'x':4, 'y':5}
self.assertEqual(
utils.StopTimeConversion.resize_print_in_stops(
old_print, new_print),
0)
def test_resize_print_reduce_high_aspect(self):
"""
Verify values returned by StopTimeConversion.resize_print,
for a higher-aspect ratio reduction.
"""
old_print = {'x':8, 'y':12}
new_print = {'x':4, 'y':5}
self.assertEqual(
utils.StopTimeConversion.resize_print_in_stops(
old_print, new_print),
- 2)
def test_resize_print_enlarge_low_aspect(self):
"""
Verify values returned by StopTimeConversion.resize_print,
for a lower-aspect ratio enlargement.
"""
old_print = {'x':4, 'y':6}
new_print = {'x':7, 'y':12}
self.assertEqual(
utils.StopTimeConversion.resize_print_in_stops(
old_print, new_print),
2)
def test_resize_print_same_low_aspect(self):
"""
Verify values returned by StopTimeConversion.resize_print,
for a lower-aspect ratio print of same size.
"""
old_print = {'x':4, 'y':6}
new_print = {'x':3, 'y':6}
self.assertEqual(
utils.StopTimeConversion.resize_print_in_stops(
old_print, new_print),
0)
def test_resize_print_reduce_low_aspect(self):
"""
Verify values returned by StopTimeConversion.resize_print,
for a higher-aspect ratio reduction.
"""
old_print = {'x':8, 'y':12}
new_print = {'x':3, 'y':6}
self.assertEqual(
utils.StopTimeConversion.resize_print_in_stops(
old_print, new_print),
- 2)
| 112 | 0 | 26 |
e5b3c5630bb7e2c668fe7689326b01560779d8e5 | 2,970 | py | Python | python_code/medium/430_Flatten_Multilevel_Doubly_Linked_List_medium/solution.py | timshenkao/interview_coding_exercises | c531fa5e0c09faef976539275589e957fcb88393 | [
"Apache-2.0"
] | null | null | null | python_code/medium/430_Flatten_Multilevel_Doubly_Linked_List_medium/solution.py | timshenkao/interview_coding_exercises | c531fa5e0c09faef976539275589e957fcb88393 | [
"Apache-2.0"
] | null | null | null | python_code/medium/430_Flatten_Multilevel_Doubly_Linked_List_medium/solution.py | timshenkao/interview_coding_exercises | c531fa5e0c09faef976539275589e957fcb88393 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2021 - present, Timur Shenkao
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from typing import Optional
from python_code.helper.linked_lists import MultiDoubleListNode
# 430. Flatten a Multilevel Doubly Linked List https://leetcode.com/problems/flatten-a-multilevel-doubly-linked-list/
# You are given a doubly linked list, which contains nodes that have a next pointer, a previous pointer, and an
# additional child pointer. This child pointer may or may not point to a separate doubly linked list, also containing
# these special nodes. These child lists may have one or more children of their own, and so on, to produce a multilevel
# data structure.
# Given the head of the first level of the list, flatten the list so that all the nodes appear in a single-level, doubly
# linked list. Let curr be a node with a child list. The nodes in the child list should appear after curr and before
# curr.next in the flattened list.
# Return the head of the flattened list. The nodes in the list must have all of their child pointers set to null.
# The number of Nodes will not exceed 1000.
# 1 <= Node.val <= 105
| 47.903226 | 120 | 0.678451 | # Copyright (c) 2021 - present, Timur Shenkao
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from typing import Optional
from python_code.helper.linked_lists import MultiDoubleListNode
# 430. Flatten a Multilevel Doubly Linked List https://leetcode.com/problems/flatten-a-multilevel-doubly-linked-list/
# You are given a doubly linked list, which contains nodes that have a next pointer, a previous pointer, and an
# additional child pointer. This child pointer may or may not point to a separate doubly linked list, also containing
# these special nodes. These child lists may have one or more children of their own, and so on, to produce a multilevel
# data structure.
# Given the head of the first level of the list, flatten the list so that all the nodes appear in a single-level, doubly
# linked list. Let curr be a node with a child list. The nodes in the child list should appear after curr and before
# curr.next in the flattened list.
# Return the head of the flattened list. The nodes in the list must have all of their child pointers set to null.
# The number of Nodes will not exceed 1000.
# 1 <= Node.val <= 105
class Solution:
def _subflatten(self, head: Optional[MultiDoubleListNode]) -> \
(Optional[MultiDoubleListNode], Optional[MultiDoubleListNode]):
curr_node = head
tail = None
while curr_node:
if curr_node.child_node:
temp = curr_node.next_node
child_head, child_tail = self._subflatten(curr_node.child_node)
curr_node.next_node = child_head
child_tail.next_node = temp
curr_node.child_node = None
curr_node = temp
else:
if not curr_node.next_node:
tail = curr_node
curr_node = curr_node.next_node
return head, tail
def flatten(self, head: Optional[MultiDoubleListNode]) -> Optional[MultiDoubleListNode]:
""" Time complexity: O(N). N - total number of elements
Space complexity: O(N). In extreme case, nodes are chained with each other only with the child pointers.
In this case, the recursive calls would pile up, and it would take NNN space in the function call stack.
"""
if not head:
return None
head, _ = self._subflatten(head)
return head
| 691 | 530 | 23 |
53fa4a3dfcdf40efa45d965ce57530c2fb2653cb | 5,266 | py | Python | file_forensics.py | nshadov/file_forensics | ff87d84876daf12614185acc8b31ace2aa21fe0a | [
"MIT"
] | 8 | 2017-06-09T21:18:52.000Z | 2022-02-18T00:55:28.000Z | file_forensics.py | nshadov/file_forensics | ff87d84876daf12614185acc8b31ace2aa21fe0a | [
"MIT"
] | null | null | null | file_forensics.py | nshadov/file_forensics | ff87d84876daf12614185acc8b31ace2aa21fe0a | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Searches specified directory for miss named files."""
import os
class bcolors:
"""Color text in terminal."""
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
class FileForensics:
"""Identify miss named files."""
def __init__(self):
"""Initialize object without processing any files."""
self.filelist = list()
def scan_dir(self, dir):
"""Scan dir looking for files and performs basic checks."""
import pathlib
import magic
for filename in find_all_files(dir):
self.filelist.append({
"filename": filename,
"mime": magic.from_file(filename, mime=True),
"size_bytes": os.path.getsize(filename),
"ext": pathlib.Path(filename).suffix
})
def get_lenght(self):
"""Return number of processed files."""
return len(self.filelist)
def get_big_files(self, size_threshold=10):
"""Return list of file bigger than X MB (size in MB)."""
for f in self.filelist:
if f["size_bytes"] > size_threshold*(1024*1024):
yield f["size_bytes"]/(1024*1024), f["mime"], f["filename"]
def get_keyword_files(
self,
filename_keywords="keywords",
read_size=1024*1024,
offset=50):
"""Return list of files matching keywords with matched information."""
import ahocorasick
A = ahocorasick.Automaton()
with open(filename_keywords, "r") as f:
while True:
word = f.readline()
if not word:
break
A.add_word(word.strip(), word.strip())
A.make_automaton()
for file in self.filelist:
with open(file["filename"], "r") as f:
matches = list()
buff = f.read(read_size)
for match in A.iter(buff):
pos_cur = match[0]
pos_start = max(match[0]-offset, 0)
pos_end = min(match[0]+offset, read_size)
offset_start = buff[
pos_start:pos_cur-len(match[1])+1
].find("\n")
offset_end = buff[pos_cur+1:pos_end].rfind("\n")
if offset_start >= offset:
offset_start = 0
if offset_end <= 0:
offset_end = offset
offset_end = offset - offset_end
matched_text = buff[
pos_start+offset_start:pos_cur-len(match[1])+1
] + \
bcolors.FAIL + \
buff[pos_cur-len(match[1])+1:pos_cur+1] + \
bcolors.ENDC + \
buff[pos_cur+1:pos_end-offset_end]
matches.append((matched_text.replace("\n", " "), match[1]))
if len(matches) > 0:
yield (file, matches)
def get_highentropy_files(self, ent_threshold=0.7):
"""Return list of files with higher entropy (encrypted, compressed)."""
import entropy
ignored_mimetypes = [
"application/x-shockwave-flash",
"application/x-font-",
"application/pdf",
"image/"
]
for file in self.filelist:
with open(file["filename"], "r") as f:
buff = f.read(1024*1024)
skip = False
for mime in ignored_mimetypes:
if file["mime"].startswith(mime):
skip = True
break
if not skip:
ent = entropy.shannon_entropy(buff)
if ent >= ent_threshold:
yield (file, ent)
def find_all_files(path):
"""Find all files in specified directory and yields them."""
for root, dirs, files in os.walk(os.path.join(path)):
for filename in files:
yield os.path.join(root, filename)
def main():
"""Analyze directory from command line looking for suspicious files."""
ff = FileForensics()
# ff.scan_dir("/Users/ns/notes") # FIXME
ff.scan_dir("/Users/ns/work/termination_data")
print "\n--- BIG FILES ---"
for (size, mime, filename) in ff.get_big_files():
print (bcolors.FAIL+"{:>10} MB"+bcolors.ENDC+" {:<20} {:<10}").\
format(size, mime, filename)
print "\n--- FOUND KEYWORDS ---"
for (file, matches) in ff.get_keyword_files():
print "{:<5} {:<20} ({:<10})".format(
len(matches), file["mime"], file["filename"])
for position, match in matches:
print "\t- {:<10} {:<10}".format(position, match)
print
print "\n--- HIGH ENTROPY FILES ---"
for (file, ent) in ff.get_highentropy_files():
print (bcolors.FAIL+"\t {:.2f}"+bcolors.ENDC+" ({:<10}) {:<10}").\
format(ent, file["mime"], file["filename"])
if __name__ == "__main__":
main()
| 32.9125 | 79 | 0.507026 | #!/usr/bin/env python
"""Searches specified directory for miss named files."""
import os
class bcolors:
"""Color text in terminal."""
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
class FileForensics:
"""Identify miss named files."""
def __init__(self):
"""Initialize object without processing any files."""
self.filelist = list()
def scan_dir(self, dir):
"""Scan dir looking for files and performs basic checks."""
import pathlib
import magic
for filename in find_all_files(dir):
self.filelist.append({
"filename": filename,
"mime": magic.from_file(filename, mime=True),
"size_bytes": os.path.getsize(filename),
"ext": pathlib.Path(filename).suffix
})
def get_lenght(self):
"""Return number of processed files."""
return len(self.filelist)
def get_big_files(self, size_threshold=10):
"""Return list of file bigger than X MB (size in MB)."""
for f in self.filelist:
if f["size_bytes"] > size_threshold*(1024*1024):
yield f["size_bytes"]/(1024*1024), f["mime"], f["filename"]
def get_keyword_files(
self,
filename_keywords="keywords",
read_size=1024*1024,
offset=50):
"""Return list of files matching keywords with matched information."""
import ahocorasick
A = ahocorasick.Automaton()
with open(filename_keywords, "r") as f:
while True:
word = f.readline()
if not word:
break
A.add_word(word.strip(), word.strip())
A.make_automaton()
for file in self.filelist:
with open(file["filename"], "r") as f:
matches = list()
buff = f.read(read_size)
for match in A.iter(buff):
pos_cur = match[0]
pos_start = max(match[0]-offset, 0)
pos_end = min(match[0]+offset, read_size)
offset_start = buff[
pos_start:pos_cur-len(match[1])+1
].find("\n")
offset_end = buff[pos_cur+1:pos_end].rfind("\n")
if offset_start >= offset:
offset_start = 0
if offset_end <= 0:
offset_end = offset
offset_end = offset - offset_end
matched_text = buff[
pos_start+offset_start:pos_cur-len(match[1])+1
] + \
bcolors.FAIL + \
buff[pos_cur-len(match[1])+1:pos_cur+1] + \
bcolors.ENDC + \
buff[pos_cur+1:pos_end-offset_end]
matches.append((matched_text.replace("\n", " "), match[1]))
if len(matches) > 0:
yield (file, matches)
def get_highentropy_files(self, ent_threshold=0.7):
"""Return list of files with higher entropy (encrypted, compressed)."""
import entropy
ignored_mimetypes = [
"application/x-shockwave-flash",
"application/x-font-",
"application/pdf",
"image/"
]
for file in self.filelist:
with open(file["filename"], "r") as f:
buff = f.read(1024*1024)
skip = False
for mime in ignored_mimetypes:
if file["mime"].startswith(mime):
skip = True
break
if not skip:
ent = entropy.shannon_entropy(buff)
if ent >= ent_threshold:
yield (file, ent)
def find_all_files(path):
"""Find all files in specified directory and yields them."""
for root, dirs, files in os.walk(os.path.join(path)):
for filename in files:
yield os.path.join(root, filename)
def main():
"""Analyze directory from command line looking for suspicious files."""
ff = FileForensics()
# ff.scan_dir("/Users/ns/notes") # FIXME
ff.scan_dir("/Users/ns/work/termination_data")
print "\n--- BIG FILES ---"
for (size, mime, filename) in ff.get_big_files():
print (bcolors.FAIL+"{:>10} MB"+bcolors.ENDC+" {:<20} {:<10}").\
format(size, mime, filename)
print "\n--- FOUND KEYWORDS ---"
for (file, matches) in ff.get_keyword_files():
print "{:<5} {:<20} ({:<10})".format(
len(matches), file["mime"], file["filename"])
for position, match in matches:
print "\t- {:<10} {:<10}".format(position, match)
print
print "\n--- HIGH ENTROPY FILES ---"
for (file, ent) in ff.get_highentropy_files():
print (bcolors.FAIL+"\t {:.2f}"+bcolors.ENDC+" ({:<10}) {:<10}").\
format(ent, file["mime"], file["filename"])
if __name__ == "__main__":
main()
| 0 | 0 | 0 |
605b26d6d3532dde3890281f964748b6b7ca2774 | 4,620 | py | Python | test_equipment/openSMILE_dir_to_csv.py | ChildMindInstitute/test_recording_equipment | ce062be5c278f4c32c72eadd90b68e4898e9f1fe | [
"Apache-2.0"
] | 1 | 2018-07-20T23:45:22.000Z | 2018-07-20T23:45:22.000Z | test_equipment/openSMILE_dir_to_csv.py | ChildMindInstitute/test-recording-equipment | ce062be5c278f4c32c72eadd90b68e4898e9f1fe | [
"Apache-2.0"
] | null | null | null | test_equipment/openSMILE_dir_to_csv.py | ChildMindInstitute/test-recording-equipment | ce062be5c278f4c32c72eadd90b68e4898e9f1fe | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
openSMILE_dir_to_csv.py
Script to format openSMILE emobase *.csv output for a given set of files into a
single csv. Also returns the data as a pandas dataframe.
Created on Mon Jan 23 10:43:34 2017
Author:
– Jon Clucas, 2017 (jon.clucas@childmind.org)
© 2017, Child Mind Institute, Apache v2.0 License
"""
import argparse, csv, os, pandas as pd, subprocess
def oS_csv_reformat(oS_csv, first):
"""
Function to get features from openSMILE emobase configuration file csv
outputs.
Parameters
----------
csv_file : string
absolute path to a *.csv openSMILE output file
first : boolean
if this is the first csv of the set, `True`; otherwise, `False`
Returns
-------
data : pandas dataframe or list
if `first`, a dataframe with feature names, types, and csv values;
if !`first`, a list of csv values.
"""
print(oS_csv)
if first:
header = []
# type_header = []
temp_list = []
# initialize data_flag
data_flag = False
# read file
at_at = "@attribute "
with open(oS_csv, 'r') as f:
# open file for reading
reader = csv.reader(f)
for index, row in enumerate(reader):
if first:
header_element = ''.join(row)
if header_element.startswith(at_at):
he1, he2 = str(header_element.split(at_at)[1]).split(' ')
header.append(str(he1))
# if he2 != "unknown":
# type_header.append(str(he2))
# else:
# type_header.append("string")
if data_flag:
# read data row
temp_list.append(row)
if ''.join(row).startswith("@data"):
data_flag = True
if first:
data = pd.DataFrame(data=temp_list[1], index=header, columns=[
os.path.basename(oS_csv).rstrip('.csv').casefold(
)])
return(data)
else:
return(temp_list[1])
def oS_dir_to_csv(top_dir):
"""
Function collect all openSMILE output csv files in a given top-level
directory into a single csv file that also includes some summary columns
with one column for each csv in the original directory.
Parameters
----------
top_dir : string
absolute path to a directory of *.csv openSMILE output files
Outputs
-------
(top_dir + `/collected/all-collected.csv`) : csv file
a csv file containing all of the data from the input files and some
added summary columns
Returns
-------
collected_data : pandas dataframe
the exported *.csv as a pandas dataframe
"""
cols = []
col_dir = os.path.join(top_dir, "collected")
if not os.path.exists(col_dir):
os.makedirs(col_dir)
collected_data = None
for i, file in enumerate(os.listdir(top_dir)):
if file.casefold().endswith('.csv'.casefold()):
if i == 0:
collected_data = oS_csv_reformat(os.path.join(top_dir, file),
True)
else:
collected_data[os.path.basename(file).rstrip('.csv').casefold(
)] = oS_csv_reformat(os.path.join(top_dir,
file), False)
collected_data = collected_data.apply(pd.to_numeric, errors='coerce')
for index, column in enumerate(list(collected_data)):
if index > 0:
cols.append(column)
collected_data['mean'] = collected_data[cols].mean(axis=1)
collected_data['median'] = collected_data[cols].median(axis=1)
collected_data['std'] = collected_data[cols].std(axis=1)
collected_data['mad'] = collected_data[cols].mad(axis=1)
collected_data.sort_values(by='mad', axis=0, ascending=False, inplace=True)
collected_data.to_csv(os.path.join(col_dir, "all_collected.csv"))
return collected_data
# ============================================================================
if __name__ == '__main__':
main() | 34.222222 | 79 | 0.585714 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
openSMILE_dir_to_csv.py
Script to format openSMILE emobase *.csv output for a given set of files into a
single csv. Also returns the data as a pandas dataframe.
Created on Mon Jan 23 10:43:34 2017
Author:
– Jon Clucas, 2017 (jon.clucas@childmind.org)
© 2017, Child Mind Institute, Apache v2.0 License
"""
import argparse, csv, os, pandas as pd, subprocess
def oS_csv_reformat(oS_csv, first):
"""
Function to get features from openSMILE emobase configuration file csv
outputs.
Parameters
----------
csv_file : string
absolute path to a *.csv openSMILE output file
first : boolean
if this is the first csv of the set, `True`; otherwise, `False`
Returns
-------
data : pandas dataframe or list
if `first`, a dataframe with feature names, types, and csv values;
if !`first`, a list of csv values.
"""
print(oS_csv)
if first:
header = []
# type_header = []
temp_list = []
# initialize data_flag
data_flag = False
# read file
at_at = "@attribute "
with open(oS_csv, 'r') as f:
# open file for reading
reader = csv.reader(f)
for index, row in enumerate(reader):
if first:
header_element = ''.join(row)
if header_element.startswith(at_at):
he1, he2 = str(header_element.split(at_at)[1]).split(' ')
header.append(str(he1))
# if he2 != "unknown":
# type_header.append(str(he2))
# else:
# type_header.append("string")
if data_flag:
# read data row
temp_list.append(row)
if ''.join(row).startswith("@data"):
data_flag = True
if first:
data = pd.DataFrame(data=temp_list[1], index=header, columns=[
os.path.basename(oS_csv).rstrip('.csv').casefold(
)])
return(data)
else:
return(temp_list[1])
def oS_dir_to_csv(top_dir):
"""
Function collect all openSMILE output csv files in a given top-level
directory into a single csv file that also includes some summary columns
with one column for each csv in the original directory.
Parameters
----------
top_dir : string
absolute path to a directory of *.csv openSMILE output files
Outputs
-------
(top_dir + `/collected/all-collected.csv`) : csv file
a csv file containing all of the data from the input files and some
added summary columns
Returns
-------
collected_data : pandas dataframe
the exported *.csv as a pandas dataframe
"""
cols = []
col_dir = os.path.join(top_dir, "collected")
if not os.path.exists(col_dir):
os.makedirs(col_dir)
collected_data = None
for i, file in enumerate(os.listdir(top_dir)):
if file.casefold().endswith('.csv'.casefold()):
if i == 0:
collected_data = oS_csv_reformat(os.path.join(top_dir, file),
True)
else:
collected_data[os.path.basename(file).rstrip('.csv').casefold(
)] = oS_csv_reformat(os.path.join(top_dir,
file), False)
collected_data = collected_data.apply(pd.to_numeric, errors='coerce')
for index, column in enumerate(list(collected_data)):
if index > 0:
cols.append(column)
collected_data['mean'] = collected_data[cols].mean(axis=1)
collected_data['median'] = collected_data[cols].median(axis=1)
collected_data['std'] = collected_data[cols].std(axis=1)
collected_data['mad'] = collected_data[cols].mad(axis=1)
collected_data.sort_values(by='mad', axis=0, ascending=False, inplace=True)
collected_data.to_csv(os.path.join(col_dir, "all_collected.csv"))
return collected_data
def main():
# script can be used from commandline.
parser = argparse.ArgumentParser(description='get directory')
parser.add_argument('in_dir', metavar='in_dir', type=str)
arg = parser.parse_args()
if os.path.exists(os.path.join(arg.in_dir, ".DS_Store")):
shell_command = ''.join(['rm ', os.path.join(arg.in_dir, ".DS_Store")])
print(shell_command)
subprocess.run(shell_command, shell = True)
oS_dir_to_csv(arg.in_dir)
# ============================================================================
if __name__ == '__main__':
main() | 444 | 0 | 23 |
8973e35176c8a1a48a4120591aa32770860149cc | 2,186 | py | Python | examples/frameworks/catboost/catboost_example.py | thepycoder/clearml | 717edba8c2b39fb7486bd2aba9ca0294f309b4c3 | [
"Apache-2.0"
] | 2,097 | 2019-06-11T14:36:25.000Z | 2020-12-21T03:52:59.000Z | examples/frameworks/catboost/catboost_example.py | thepycoder/clearml | 717edba8c2b39fb7486bd2aba9ca0294f309b4c3 | [
"Apache-2.0"
] | 247 | 2019-06-11T15:10:26.000Z | 2020-12-21T17:34:32.000Z | examples/frameworks/catboost/catboost_example.py | thepycoder/clearml | 717edba8c2b39fb7486bd2aba9ca0294f309b4c3 | [
"Apache-2.0"
] | 256 | 2019-06-11T14:36:28.000Z | 2020-12-18T08:32:47.000Z | # ClearML - Example of CatBoost training, saving model and loading model
#
import argparse
from catboost import CatBoostRegressor, Pool
from catboost.datasets import msrank
from clearml import Task
import numpy as np
from sklearn.model_selection import train_test_split
if __name__ == "__main__":
Task.init(project_name="examples", task_name="CatBoost simple example")
parser = argparse.ArgumentParser()
parser.add_argument("--iterations", default=200)
args = parser.parse_args()
main(args.iterations)
| 35.836066 | 100 | 0.741537 | # ClearML - Example of CatBoost training, saving model and loading model
#
import argparse
from catboost import CatBoostRegressor, Pool
from catboost.datasets import msrank
from clearml import Task
import numpy as np
from sklearn.model_selection import train_test_split
def main(iterations):
# Download train and validation datasets
train_df, test_df = msrank()
# Column 0 contains label values, column 1 contains group ids.
X_train, y_train = train_df.drop([0, 1], axis=1).values, train_df[0].values
X_test, y_test = test_df.drop([0, 1], axis=1).values, test_df[0].values
# Split train data into two parts. First part - for baseline model,
# second part - for major model
splitted_data = train_test_split(X_train, y_train, test_size=0.5)
X_train_first, X_train_second, y_train_first, y_train_second = splitted_data
catboost_model = CatBoostRegressor(iterations=iterations, verbose=False)
# Prepare simple baselines (just mean target on first part of train pool).
baseline_value = y_train_first.mean()
train_baseline = np.array([baseline_value] * y_train_second.shape[0])
test_baseline = np.array([baseline_value] * y_test.shape[0])
# Create pools
train_pool = Pool(X_train_second, y_train_second, baseline=train_baseline)
test_pool = Pool(X_test, y_test, baseline=test_baseline)
# Train CatBoost model
catboost_model.fit(train_pool, eval_set=test_pool, verbose=True, plot=False, save_snapshot=True)
catboost_model.save_model("example.cbm")
catboost_model = CatBoostRegressor()
catboost_model.load_model("example.cbm")
# Apply model on pool with baseline values
preds1 = catboost_model.predict(test_pool)
# Apply model on numpy.array and then add the baseline values
preds2 = test_baseline + catboost_model.predict(X_test)
# Check that preds have small diffs
assert (np.abs(preds1 - preds2) < 1e-6).all()
if __name__ == "__main__":
Task.init(project_name="examples", task_name="CatBoost simple example")
parser = argparse.ArgumentParser()
parser.add_argument("--iterations", default=200)
args = parser.parse_args()
main(args.iterations)
| 1,634 | 0 | 23 |
1f3ee22025ccb85d0fe14837daa5f25f69472cd0 | 411 | py | Python | src/product/signals.py | asanka94/OMS-backend | 0b0637b40e71b9b71156d28fdc0ff1fb7a3d12ac | [
"MIT"
] | null | null | null | src/product/signals.py | asanka94/OMS-backend | 0b0637b40e71b9b71156d28fdc0ff1fb7a3d12ac | [
"MIT"
] | 9 | 2021-03-19T01:50:04.000Z | 2022-03-12T00:23:18.000Z | src/product/signals.py | asanka94/OMS-backend | 0b0637b40e71b9b71156d28fdc0ff1fb7a3d12ac | [
"MIT"
] | null | null | null | from django.template.defaultfilters import slugify
def slug_generator(sender, instance, *args, **kwargs):
''' capitalize first letter of each word and generates slug '''
instance.name = instance.name.title()
slug = slugify(instance.name)
exists = sender.objects.filter(slug=slug).exists()
if not exists:
instance.slug = slug
else:
instance.slug = "%s-%s" % (slug, instance.id)
| 27.4 | 66 | 0.688564 | from django.template.defaultfilters import slugify
def slug_generator(sender, instance, *args, **kwargs):
''' capitalize first letter of each word and generates slug '''
instance.name = instance.name.title()
slug = slugify(instance.name)
exists = sender.objects.filter(slug=slug).exists()
if not exists:
instance.slug = slug
else:
instance.slug = "%s-%s" % (slug, instance.id)
| 0 | 0 | 0 |
5d777d6f520440d73be3b7e20022b539bde151fb | 14,330 | py | Python | tests/utils/html.py | boltnev/iktomi | bc92006c026f9b42e56f1af5ced2fe577673a486 | [
"MIT"
] | 14 | 2015-02-15T05:24:22.000Z | 2020-03-19T10:07:28.000Z | tests/utils/html.py | boltnev/iktomi | bc92006c026f9b42e56f1af5ced2fe577673a486 | [
"MIT"
] | 10 | 2015-04-04T10:10:41.000Z | 2016-06-01T13:17:58.000Z | tests/utils/html.py | boltnev/iktomi | bc92006c026f9b42e56f1af5ced2fe577673a486 | [
"MIT"
] | 5 | 2015-02-20T11:18:58.000Z | 2016-10-18T15:30:13.000Z | # -*- coding: utf-8 -*-
import unittest
import os
import re
from iktomi.utils import html
from lxml.html import Element
from lxml import etree
import lxml.html as h
class TestSanitizer(unittest.TestCase):
'''Tests for sanitizer based on lxml'''
@unittest.skip('lxml does not provide css filtration')
def test_safe_css(self):
u'''Ensure that sanitizer does not remove safe css'''
self.attrs['allowed_attributes'].append('style')
res = self.sanitize('<p style="color: #000; background-color: red; font-size: 1.2em">p</p>')
assert 'color: #000; background-color: red; font-size: 1.2em' in res
@unittest.skip('not supported')
@unittest.skip('lxml does not provide css filtration')
def test_unsafe_css(self):
u'''Special test for html5: html5lib has very ultimate css cleanup with gauntlets'''
self.attrs['allowed_attributes'].append('style')
res = self.sanitize('<p style="background: url(javascript:void); '
'color: #000; width: e/**/xpression(alert());">p</p>')
self.assertEqual(res, '<p>p</p>')
def test_on_real_data(self):
'''
Compare with logged genshi output to ensure that there are no
new errors
'''
return None
skips = 10
if os.path.isdir('clean_html'):
self.attrs['string_callbacks'] = [html.remove_TinyMCE_trash,
html.strip_empty_tags_nested,
spaceless]
for dir, dirs, files in os.walk('clean_html'):
for file in filter(lambda x: x.endswith('.in'), files):
path = os.path.join(dir, file)
in_ = open(path, 'r').read().decode('utf-8')
out = open(path[:-3] + '.out', 'r').read().decode('utf-8')
out = html.remove_TinyMCE_trash(out) # Old sanitizer can't do this
#out = self.sanitize(out).strip()
res = self.sanitize(in_).strip()
if res != out:
if skips < 10:
print(in_, '\n----------\n', res + '---\n!=\n' + out + '---\n\n\n')
skips -= 1
if not skips:
return
#print "asserted"
@unittest.skip('lxml does not support this option')
# cannot create Cleaner with wrong parameters
| 42.776119 | 105 | 0.52589 | # -*- coding: utf-8 -*-
import unittest
import os
import re
from iktomi.utils import html
from lxml.html import Element
from lxml import etree
import lxml.html as h
class TestSanitizer(unittest.TestCase):
'''Tests for sanitizer based on lxml'''
def setUp(self):
self.attrs = {
'allow_tags': ['a', 'p', 'br', 'li', 'ul', 'ol', 'hr', 'u', 'i', 'b',
'blockquote', 'sub', 'sup', 'span', 'img'],
'safe_attrs': ['href', 'src', 'alt', 'title', 'class', 'rel'],
'drop_empty_tags': ['p', 'a', 'u', 'i', 'b', 'sub', 'sup'],
'allow_classes': {},
'tags_to_wrap': [],
#'strip_whitespace': True,
}
def sanitize(self, text):
return html.sanitize(text, **self.attrs)
def assertSanitize(self, text, right):
res = self.sanitize(text)
self.assertEqual(res, right)
def test_safe_attrs(self):
self.assertSanitize('<p notsafeattr="s" abbr="1" alt="Alt">Safe p</p>',
'<p alt="Alt">Safe p</p>')
def test_allowed_protocols(self):
self.attrs['allowed_protocols'] = set(['http'])
self.attrs['allow_external_src'] = True
self.attrs['safe_attrs'].append('cite')
self.assertSanitize('<a href="http://iktomi.com">sample text</a>',
'<a href="http://iktomi.com">sample text</a>')
self.assertSanitize('<a href="iktomi://http.com">sample text</a>',
'sample text')
self.assertSanitize('<img src="http://iktomi.com">',
'<img src="http://iktomi.com">')
self.assertSanitize('<img src="iktomi://http.com">', '')
self.assertSanitize('<blockquote cite="http://iktomi.com">sample text</blockquote>',
'<blockquote cite="http://iktomi.com">sample text</blockquote>')
self.assertSanitize('<blockquote cite="iktomi://http.com">sample text</blockquote>',
'<blockquote>sample text</blockquote>')
def test_safe_tags(self):
self.assertSanitize('<p alt="Alt">Safe p <script>bad_script()</script></p> <accept>acc</accept>',
'<p alt="Alt">Safe p </p> acc')
def test_empty_tags(self):
self.assertSanitize('<p alt="Alt">p</p><p alt="Alt"> </p><p style="color:red"></p><p></p>',
'<p alt="Alt">p</p><p alt="Alt"> </p>')
self.assertSanitize('<b>some<span> </span>text</b>',
'<b>some<span> </span>text</b>')
self.assertSanitize('<p>head</p><p><br></p><p>tail</p>',
'<p>head</p><p>tail</p>')
self.assertSanitize('<p>head</p><p><b><i> <br /> </i></b></p><p>tail</p>',
'<p>head</p><p>tail</p>')
self.assertSanitize('<p>head</p><p><b>mid<i></i></b></p><p>tail</p>',
'<p>head</p><p><b>mid</b></p><p>tail</p>')
self.attrs['allow_tags'].append('div')
self.assertSanitize('<div>text<br>text</div>',
'<div>text<br>text</div>')
self.assertSanitize('<i><br>text</i>',
'<i><br>text</i>')
@unittest.skip('lxml does not provide css filtration')
def test_safe_css(self):
u'''Ensure that sanitizer does not remove safe css'''
self.attrs['allowed_attributes'].append('style')
res = self.sanitize('<p style="color: #000; background-color: red; font-size: 1.2em">p</p>')
assert 'color: #000; background-color: red; font-size: 1.2em' in res
def test_allowed_classes(self):
self.attrs['allow_classes']['p'] = ['yellow']
self.attrs['allow_classes']['b'] = lambda x: 'b' in x
self.assertSanitize('<p class="yellow green">',
'<p class="yellow"></p>')
self.assertSanitize('<sup class="yellow green" title="Alt">a</sup>',
'<sup title="Alt">a</sup>')
self.assertSanitize('<b class="has_b has_c">a</b>',
'<b class="has_b">a</b>')
def test_tags_sticking(self):
self.attrs['allow_tags'].remove('span')
res = self.sanitize('<p>a</p> <p>b</p>')
self.assertEqual(res, '<p>a</p> <p>b</p>')
res = self.sanitize('<b>a</b> <b>b</b>')
self.assertEqual(res, '<b>a</b> <b>b</b>')
res = self.sanitize('<span>a</span> <p>b</p>')
self.assertEqual(res, 'a <p>b</p>')
res = self.sanitize('<p><span>a</span> <span>b</span></p>')
self.assertEqual(res, '<p>a b</p>')
# lxml parser eats the space on some environments
#res = self.sanitize('<brbr>a</brbr> <p>b</p>')
#self.assertEqual(res, 'a <p>b</p>')
#res = self.sanitize('<p><brbr>a</brbr> <brbr>b</brbr></p>')
#self.assertEqual(res, '<p>a b</p>')
@unittest.skip('not supported')
def test_autoclosing_attrs_xhtml(self):
self.attrs['method'] = 'xhtml'
res = self.sanitize('<br><hr>b ')
self.assertEqual(res, '<br /><hr />b')
def test_autoclosing_attrs_html(self):
self.attrs['drop_empty_tags'] = []
res = self.sanitize('<br><hr>b <p>')
self.assertEqual(res, '<br><hr>b <p></p>')
def test_remove_empty_a(self):
self.assertSanitize('<a href="moo">BLABLA</a> <a>txt <span>foo</span></a>',
'<a href="moo">BLABLA</a> txt <span>foo</span>')
self.assertSanitize('<p><a>run</a><b><a>bar</a></b></p>',
'<p>run<b>bar</b></p>')
@unittest.skip('lxml does not provide css filtration')
def test_unsafe_css(self):
u'''Special test for html5: html5lib has very ultimate css cleanup with gauntlets'''
self.attrs['allowed_attributes'].append('style')
res = self.sanitize('<p style="background: url(javascript:void); '
'color: #000; width: e/**/xpression(alert());">p</p>')
self.assertEqual(res, '<p>p</p>')
def test_plain_text(self):
res = self.sanitize('Some plain text')
self.assertEqual(res, 'Some plain text')
def test_empty_strings(self):
res = self.sanitize('')
self.assertEqual(res, '')
res = self.sanitize('\t \n')
self.assertEqual(res, '')
def test_on_real_data(self):
'''
Compare with logged genshi output to ensure that there are no
new errors
'''
return None
skips = 10
if os.path.isdir('clean_html'):
self.attrs['string_callbacks'] = [html.remove_TinyMCE_trash,
html.strip_empty_tags_nested,
spaceless]
for dir, dirs, files in os.walk('clean_html'):
for file in filter(lambda x: x.endswith('.in'), files):
path = os.path.join(dir, file)
in_ = open(path, 'r').read().decode('utf-8')
out = open(path[:-3] + '.out', 'r').read().decode('utf-8')
out = html.remove_TinyMCE_trash(out) # Old sanitizer can't do this
#out = self.sanitize(out).strip()
res = self.sanitize(in_).strip()
if res != out:
if skips < 10:
print(in_, '\n----------\n', res + '---\n!=\n' + out + '---\n\n\n')
skips -= 1
if not skips:
return
#print "asserted"
def test_no_initial_data(self):
self.attrs = {}
res = self.sanitize('a<p color: #000" class="2">p</p><script></script>')
self.assertEqual(res, 'a<p>p</p>')
@unittest.skip('lxml does not support this option')
def test_escaping(self):
self.attrs['escape_invalid_tags'] = True
res = self.sanitize('a<p>p</p><script>alert()</script>')
self.assertEqual(res, 'a<p>p</p><script>alert()</script>')
def test_get_wrapper_tag(self):
c = html.Cleaner(allow_tags=None, wrap_inline_tags='div')
self.assertEqual(c.get_wrapper_tag(), None)
c = html.Cleaner(allow_tags=['p', 'div'], wrap_inline_tags=False)
self.assertEqual(c.get_wrapper_tag(), None)
c = html.Cleaner(allow_tags=['p', 'div'], wrap_inline_tags=None)
self.assertEqual(c.get_wrapper_tag().tag, 'p')
c = html.Cleaner(allow_tags=['div'], wrap_inline_tags=None)
self.assertEqual(c.get_wrapper_tag().tag, 'div')
c = html.Cleaner(allow_tags=['b'], wrap_inline_tags=None)
self.assertEqual(c.get_wrapper_tag(), None)
c = html.Cleaner(allow_tags=['p', 'div'], wrap_inline_tags='div')
self.assertEqual(c.get_wrapper_tag().tag, 'div')
c = html.Cleaner(allow_tags=['p', 'div', 'span'],
wrap_inline_tags=(lambda:Element('span')))
self.assertEqual(c.get_wrapper_tag().tag, 'span')
c = html.Cleaner(allow_tags=['p', 'div'],
wrap_inline_tags=(lambda:Element('span')))
self.assertEqual(c.get_wrapper_tag(), None)
def test_is_element_empty(self):
c = html.Cleaner(allow_tags=['p', 'div', 'span', 'br', 'pre'],
drop_empty_tags=['p', 'span'])
doc = h.fragment_fromstring('<p></p><span>asd</span><br><pre></pre>',
create_parent=True)
p = doc.xpath('.//p')[0]
self.assertTrue(c.is_element_empty(p))
span = doc.xpath('.//span')[0]
self.assertFalse(c.is_element_empty(span))
br = doc.xpath('.//br')[0]
self.assertTrue(c.is_element_empty(br))
pre = doc.xpath('.//pre')[0]
self.assertFalse(c.is_element_empty(pre))
def test_tags_to_wrap(self):
self.attrs['tags_to_wrap'] = ['b', 'i', 'br']
self.attrs['wrap_inline_tags'] = True
self.assertSanitize("head<b>bold</b>tail",
"<p>head<b>bold</b>tail</p>")
self.assertSanitize("head<b>bold</b>boldtail<i>italic</i><p>par</p>tail",
"<p>head<b>bold</b>boldtail<i>italic</i></p><p>par</p><p>tail</p>")
self.assertSanitize("<p>par</p><b>bla</b>text<p>blabla</p>",
"<p>par</p><p><b>bla</b>text</p><p>blabla</p>")
self.assertSanitize("<p>par</p>text<b>bla</b>text<p>blabla</p>",
"<p>par</p><p>text<b>bla</b>text</p><p>blabla</p>")
self.assertSanitize('first<br>second<br>third',
'<p>first</p><p>second</p><p>third</p>')
self.assertSanitize('first<br>second<p>third</p>',
'<p>first</p><p>second</p><p>third</p>')
self.assertSanitize('<p>first</p>tail<br>second<p>third</p>',
'<p>first</p><p>tail</p><p>second</p><p>third</p>')
def test_dom_callback(self):
def fix_link_domain(dom):
# sample callback
for el in dom.xpath('.//a'):
if el.attrib['href']:
el.attrib['href'] = el.attrib['href'].replace('example', 'iktomi')
self.attrs['dom_callbacks'] = [fix_link_domain]
self.assertSanitize('<a href="http://example.com">sample text</a>',
'<a href="http://iktomi.com">sample text</a>')
def test_tags_to_wrap_trailing_br(self):
self.attrs['tags_to_wrap'] = ['b', 'i', 'br']
self.attrs['wrap_inline_tags'] = True
self.assertSanitize("<p>head</p><br> ",
"<p>head</p>")
def test_tags_to_wrap_double_br(self):
self.attrs['tags_to_wrap'] = ['b', 'i', 'br']
self.attrs['wrap_inline_tags'] = True
self.assertSanitize("head<br><br>tail",
"<p>head</p><p>tail</p>")
self.assertSanitize("head<br> <br>tail",
"<p>head</p><p>tail</p>")
self.assertSanitize("<br><br><br><br>", "")
def test_split_paragraphs_by_br(self):
self.attrs['tags_to_wrap'] = ['b', 'i', 'br']
self.attrs['wrap_inline_tags'] = True
self.attrs['drop_empty_tags'] = []
self.assertSanitize("<p>head<br><br><br></p>",
"<p>head</p><p></p><p></p><p></p>")
self.assertSanitize("<p>head<br>body<br>tail</p>",
"<p>head</p><p>body</p><p>tail</p>")
self.assertSanitize("<p>head<br><b>body<sup>letters</sup></b><br><i>ta</i>il</p>",
"<p>head</p><p><b>body<sup>letters</sup></b></p><p><i>ta</i>il</p>")
def test_wrap_inline_tags(self):
self.attrs['tags_to_wrap'] = ['b', 'i', 'br']
self.attrs['wrap_inline_tags'] = False
self.assertSanitize('first<br>second<br>third',
'first<br>second<br>third')
def test_p_not_allowed(self):
self.attrs['tags_to_wrap'] = ['b', 'i', 'br']
self.attrs['wrap_inline_tags'] = 'div'
# replacing p with div in allow_tags
self.attrs['allow_tags'].remove('p')
self.attrs['allow_tags'].append('div')
self.assertSanitize("head<br><br>tail",
"<div>head</div><div>tail</div>")
def test_lambda_wrap_tag(self):
self.attrs['tags_to_wrap'] = ['b', 'i', 'br']
self.attrs['wrap_inline_tags'] = lambda:Element('span')
self.assertSanitize("head<br><br>tail",
"<span>head</span><span>tail</span>")
self.attrs['allow_tags'].remove('p')
def test_no_wrap_tags(self):
self.attrs['tags_to_wrap'] = ['b', 'i', 'br']
self.attrs['wrap_inline_tags'] = True
self.attrs['allow_tags'].remove('p')
self.assertRaises(ValueError, self.sanitize, 'head<br><br>tail')
# cannot create Cleaner with wrong parameters
def test_create_cleaner_with_wrong_parameters(self):
self.attrs['wrap_inline_tags'] = True
self.attrs['allow_tags'].remove('p')
self.assertRaises(ValueError, html.Cleaner, **self.attrs)
def spaceless(clean, **kwargs):
clean = re.compile('\s+').sub(' ', clean)
return clean.strip()
| 11,055 | 0 | 776 |
38b2d2763ba8fad7eb60aaae1908527d08cc5568 | 1,216 | py | Python | Algorithms/Strings/remove_vowels.py | Praggya17/HacktoberFestContribute | 098cb1012f1f2ed6ca6b3544a7b962b6c49e2643 | [
"MIT"
] | 98 | 2018-10-09T15:42:41.000Z | 2021-10-04T15:25:44.000Z | Algorithms/Strings/remove_vowels.py | Praggya17/HacktoberFestContribute | 098cb1012f1f2ed6ca6b3544a7b962b6c49e2643 | [
"MIT"
] | 141 | 2018-10-06T16:55:20.000Z | 2021-10-31T18:25:35.000Z | Algorithms/Strings/remove_vowels.py | Praggya17/HacktoberFestContribute | 098cb1012f1f2ed6ca6b3544a7b962b6c49e2643 | [
"MIT"
] | 885 | 2018-10-06T17:14:44.000Z | 2022-01-29T03:16:21.000Z | #!/usr/bin/env python
# from any given string, remove all vowels
# NON REGEX VERSION
sample_string='Sed ut perspiciatis unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam, eaque ipsa quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt explicabo. Nemo enim ipsam voluptatem quia voluptas sit aspernatur aut odit aut fugit, sed quia consequuntur magni dolores eos qui ratione voluptatem sequi nesciunt. Neque porro quisquam est, qui dolorem ipsum quia dolor sit amet, consectetur, adipisci velit, sed quia non numquam eius modi tempora incidunt ut labore et dolore magnam aliquam quaerat voluptatem. Ut enim ad minima veniam, quis nostrum exercitationem ullam corporis suscipit laboriosam, nisi ut aliquid ex ea commodi consequatur? Quis autem vel eum iure reprehenderit qui in ea voluptate velit esse quam nihil molestiae consequatur, vel illum qui dolorem eum fugiat quo voluptas nulla pariatur?'
en_vowels='aeiouAEIOU'
target_string = sample_string
for char in en_vowels:
target_string = target_string.replace(char,'')
print(target_string)
# REGEX VERSION
import re
vowels = re.compile(r'[aeiouAEIOU]')
print(vowels.sub('',sample_string)) | 64 | 881 | 0.813322 | #!/usr/bin/env python
# from any given string, remove all vowels
# NON REGEX VERSION
sample_string='Sed ut perspiciatis unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam, eaque ipsa quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt explicabo. Nemo enim ipsam voluptatem quia voluptas sit aspernatur aut odit aut fugit, sed quia consequuntur magni dolores eos qui ratione voluptatem sequi nesciunt. Neque porro quisquam est, qui dolorem ipsum quia dolor sit amet, consectetur, adipisci velit, sed quia non numquam eius modi tempora incidunt ut labore et dolore magnam aliquam quaerat voluptatem. Ut enim ad minima veniam, quis nostrum exercitationem ullam corporis suscipit laboriosam, nisi ut aliquid ex ea commodi consequatur? Quis autem vel eum iure reprehenderit qui in ea voluptate velit esse quam nihil molestiae consequatur, vel illum qui dolorem eum fugiat quo voluptas nulla pariatur?'
en_vowels='aeiouAEIOU'
target_string = sample_string
for char in en_vowels:
target_string = target_string.replace(char,'')
print(target_string)
# REGEX VERSION
import re
vowels = re.compile(r'[aeiouAEIOU]')
print(vowels.sub('',sample_string)) | 0 | 0 | 0 |
16be4ccde99385c97c7b33aca56b05715bfed523 | 5,490 | py | Python | src/sdgen/svg/cairosvg/parser.py | PP-TSD/sdgen | 58a3a46f7f612c8d7774dd43a4ab55df4f33ab20 | [
"MIT"
] | 1 | 2015-02-18T17:59:05.000Z | 2015-02-18T17:59:05.000Z | src/sdgen/svg/cairosvg/parser.py | PP-TSD/sdgen | 58a3a46f7f612c8d7774dd43a4ab55df4f33ab20 | [
"MIT"
] | null | null | null | src/sdgen/svg/cairosvg/parser.py | PP-TSD/sdgen | 58a3a46f7f612c8d7774dd43a4ab55df4f33ab20 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# This file is part of CairoSVG
# Copyright © 2010-2012 Kozea
#
# This library is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with CairoSVG. If not, see <http://www.gnu.org/licenses/>.
"""
SVG Parser.
"""
# Fallbacks for Python 2/3 and lxml/ElementTree
# pylint: disable=E0611,F0401,W0611
try:
import lxml.etree as ElementTree
from lxml.etree import XMLSyntaxError as ParseError
HAS_LXML = True
except ImportError:
from xml.etree import ElementTree
from xml.parsers import expat
# ElementTree's API changed between 2.6 and 2.7
# pylint: disable=C0103
ParseError = getattr(ElementTree, 'ParseError', expat.ExpatError)
# pylint: enable=C0103
HAS_LXML = False
try:
from urllib import urlopen
import urlparse
except ImportError:
from urllib.request import urlopen
from urllib import parse as urlparse # Python 3
# pylint: enable=E0611,F0401,W0611
from .css import apply_stylesheets
# Python 2/3 compat
try:
basestring
except NameError:
basestring = str
class Node(dict):
"""SVG node with dict-like properties and children."""
def __init__(self, node, parent=None):
"""Create the Node from ElementTree ``node``, with ``parent`` Node."""
super(Node, self).__init__()
self.children = ()
self.root = False
self.tag = node.tag.split("}", 1)[-1]
self.text = node.text
# Handle the CSS
style = node.attrib.get("style")
if style:
for attribute in style.split(";"):
if ":" in attribute:
name, value = attribute.split(":", 1)
node.attrib[name.strip()] = value.strip()
del node.attrib["style"]
# Inherits from parent properties
if parent is not None:
items = parent.copy()
not_inherited = ("transform", "opacity")
if self.tag == "tspan":
not_inherited += ("x", "y")
for attribute in not_inherited:
if attribute in items:
del items[attribute]
# TODO: drop other attributes that should not be inherited
self.update(items)
self.url = parent.url
self.xml_tree = parent.xml_tree
self.parent = parent
self.update(dict(node.attrib.items()))
# Manage text by creating children
if self.tag == "text" or self.tag == "textPath":
self.children = self.text_children(node)
if not self.children:
self.children = tuple(
Node(child, self) for child in node
if isinstance(child.tag, basestring))
def text_children(self, node):
"""Create children and return them."""
children = []
for child in node:
children.append(Node(child, parent=self))
if child.tail:
anonymous = ElementTree.Element('tspan')
anonymous.text = child.tail
children.append(Node(anonymous, parent=self))
return list(children)
class Tree(Node):
"""SVG tree."""
def __init__(self, **kwargs):
"""Create the Tree from SVG ``text``."""
# Make the parameters keyword-only:
bytestring = kwargs.pop('bytestring', None)
file_obj = kwargs.pop('file_obj', None)
url = kwargs.pop('url', None)
parent = kwargs.pop('parent', None)
if bytestring is not None:
tree = ElementTree.fromstring(bytestring)
self.url = url
elif file_obj is not None:
tree = ElementTree.parse(file_obj).getroot()
self.url = getattr(file_obj, 'name', url)
elif url is not None:
if "#" in url:
url, element_id = url.split("#", 1)
else:
element_id = None
if parent and parent.url:
if url:
url = urlparse.urljoin(parent.url, url)
elif element_id:
url = parent.url
self.url = url
if url:
if urlparse.urlparse(url).scheme:
input_ = urlopen(url)
else:
input_ = url # filename
tree = ElementTree.parse(input_).getroot()
else:
tree = parent.xml_tree
if element_id:
iterator = (
tree.iter() if hasattr(tree, 'iter')
else tree.getiterator())
for element in iterator:
if element.get("id") == element_id:
tree = element
break
else:
raise TypeError(
'No input. Use one of bytestring, file_obj or url.')
apply_stylesheets(tree)
self.xml_tree = tree
super(Tree, self).__init__(tree, parent)
self.root = True
| 32.485207 | 79 | 0.573953 | # -*- coding: utf-8 -*-
# This file is part of CairoSVG
# Copyright © 2010-2012 Kozea
#
# This library is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with CairoSVG. If not, see <http://www.gnu.org/licenses/>.
"""
SVG Parser.
"""
# Fallbacks for Python 2/3 and lxml/ElementTree
# pylint: disable=E0611,F0401,W0611
try:
import lxml.etree as ElementTree
from lxml.etree import XMLSyntaxError as ParseError
HAS_LXML = True
except ImportError:
from xml.etree import ElementTree
from xml.parsers import expat
# ElementTree's API changed between 2.6 and 2.7
# pylint: disable=C0103
ParseError = getattr(ElementTree, 'ParseError', expat.ExpatError)
# pylint: enable=C0103
HAS_LXML = False
try:
from urllib import urlopen
import urlparse
except ImportError:
from urllib.request import urlopen
from urllib import parse as urlparse # Python 3
# pylint: enable=E0611,F0401,W0611
from .css import apply_stylesheets
# Python 2/3 compat
try:
basestring
except NameError:
basestring = str
class Node(dict):
"""SVG node with dict-like properties and children."""
def __init__(self, node, parent=None):
"""Create the Node from ElementTree ``node``, with ``parent`` Node."""
super(Node, self).__init__()
self.children = ()
self.root = False
self.tag = node.tag.split("}", 1)[-1]
self.text = node.text
# Handle the CSS
style = node.attrib.get("style")
if style:
for attribute in style.split(";"):
if ":" in attribute:
name, value = attribute.split(":", 1)
node.attrib[name.strip()] = value.strip()
del node.attrib["style"]
# Inherits from parent properties
if parent is not None:
items = parent.copy()
not_inherited = ("transform", "opacity")
if self.tag == "tspan":
not_inherited += ("x", "y")
for attribute in not_inherited:
if attribute in items:
del items[attribute]
# TODO: drop other attributes that should not be inherited
self.update(items)
self.url = parent.url
self.xml_tree = parent.xml_tree
self.parent = parent
self.update(dict(node.attrib.items()))
# Manage text by creating children
if self.tag == "text" or self.tag == "textPath":
self.children = self.text_children(node)
if not self.children:
self.children = tuple(
Node(child, self) for child in node
if isinstance(child.tag, basestring))
def text_children(self, node):
"""Create children and return them."""
children = []
for child in node:
children.append(Node(child, parent=self))
if child.tail:
anonymous = ElementTree.Element('tspan')
anonymous.text = child.tail
children.append(Node(anonymous, parent=self))
return list(children)
class Tree(Node):
"""SVG tree."""
def __init__(self, **kwargs):
"""Create the Tree from SVG ``text``."""
# Make the parameters keyword-only:
bytestring = kwargs.pop('bytestring', None)
file_obj = kwargs.pop('file_obj', None)
url = kwargs.pop('url', None)
parent = kwargs.pop('parent', None)
if bytestring is not None:
tree = ElementTree.fromstring(bytestring)
self.url = url
elif file_obj is not None:
tree = ElementTree.parse(file_obj).getroot()
self.url = getattr(file_obj, 'name', url)
elif url is not None:
if "#" in url:
url, element_id = url.split("#", 1)
else:
element_id = None
if parent and parent.url:
if url:
url = urlparse.urljoin(parent.url, url)
elif element_id:
url = parent.url
self.url = url
if url:
if urlparse.urlparse(url).scheme:
input_ = urlopen(url)
else:
input_ = url # filename
tree = ElementTree.parse(input_).getroot()
else:
tree = parent.xml_tree
if element_id:
iterator = (
tree.iter() if hasattr(tree, 'iter')
else tree.getiterator())
for element in iterator:
if element.get("id") == element_id:
tree = element
break
else:
raise TypeError(
'No input. Use one of bytestring, file_obj or url.')
apply_stylesheets(tree)
self.xml_tree = tree
super(Tree, self).__init__(tree, parent)
self.root = True
| 0 | 0 | 0 |
b349d1e936564787580505606709c070ef2e106f | 1,567 | py | Python | src/onegov/town6/views/form_definition.py | politbuero-kampagnen/onegov-cloud | 20148bf321b71f617b64376fe7249b2b9b9c4aa9 | [
"MIT"
] | null | null | null | src/onegov/town6/views/form_definition.py | politbuero-kampagnen/onegov-cloud | 20148bf321b71f617b64376fe7249b2b9b9c4aa9 | [
"MIT"
] | null | null | null | src/onegov/town6/views/form_definition.py | politbuero-kampagnen/onegov-cloud | 20148bf321b71f617b64376fe7249b2b9b9c4aa9 | [
"MIT"
] | null | null | null | from onegov.core.security import Private, Public
from onegov.form import FormCollection, FormDefinition
from onegov.org.forms.form_definition import FormDefinitionUrlForm
from onegov.org.views.form_definition import get_form_class, \
handle_new_definition, handle_edit_definition, handle_defined_form, \
handle_change_form_name
from onegov.town6 import TownApp
from onegov.town6.layout import FormEditorLayout, FormSubmissionLayout
@TownApp.form(model=FormDefinition, template='form.pt', permission=Public,
form=lambda self, request: self.form_class)
@TownApp.form(model=FormCollection, name='new', template='form.pt',
permission=Private, form=get_form_class)
@TownApp.form(model=FormDefinition, template='form.pt', permission=Private,
form=get_form_class, name='edit')
@TownApp.form(
model=FormDefinition, form=FormDefinitionUrlForm,
template='form.pt', permission=Private,
name='change-url'
)
| 39.175 | 75 | 0.767709 | from onegov.core.security import Private, Public
from onegov.form import FormCollection, FormDefinition
from onegov.org.forms.form_definition import FormDefinitionUrlForm
from onegov.org.views.form_definition import get_form_class, \
handle_new_definition, handle_edit_definition, handle_defined_form, \
handle_change_form_name
from onegov.town6 import TownApp
from onegov.town6.layout import FormEditorLayout, FormSubmissionLayout
@TownApp.form(model=FormDefinition, template='form.pt', permission=Public,
form=lambda self, request: self.form_class)
def town_handle_defined_form(self, request, form):
return handle_defined_form(
self, request, form, FormSubmissionLayout(self, request))
@TownApp.form(model=FormCollection, name='new', template='form.pt',
permission=Private, form=get_form_class)
def town_handle_new_definition(self, request, form):
return handle_new_definition(
self, request, form, FormEditorLayout(self, request))
@TownApp.form(model=FormDefinition, template='form.pt', permission=Private,
form=get_form_class, name='edit')
def town_handle_edit_definition(self, request, form):
return handle_edit_definition(
self, request, form, FormEditorLayout(self, request))
@TownApp.form(
model=FormDefinition, form=FormDefinitionUrlForm,
template='form.pt', permission=Private,
name='change-url'
)
def town_handle_change_form_name(self, request, form):
return handle_change_form_name(
self, request, form, FormEditorLayout(self, request))
| 514 | 0 | 88 |
19adb3a4de59d8b8ef5b77d31fbc5e8cc66282df | 7,498 | py | Python | src/gameplay.py | Gravens/AirDimples | 5531b0e43afb1349b121a54bb8ab1dd29bba0a84 | [
"Apache-2.0"
] | null | null | null | src/gameplay.py | Gravens/AirDimples | 5531b0e43afb1349b121a54bb8ab1dd29bba0a84 | [
"Apache-2.0"
] | 2 | 2021-07-14T09:32:01.000Z | 2021-07-19T16:25:15.000Z | src/gameplay.py | Gravens/AirDimples | 5531b0e43afb1349b121a54bb8ab1dd29bba0a84 | [
"Apache-2.0"
] | 1 | 2021-07-14T15:31:02.000Z | 2021-07-14T15:31:02.000Z | import cv2
from time import time
from random import randint
from object_manager import DefaultCircleManager, PackmanManager, MoovingCircleManager
from utils import log, Joint
from drawing import draw_objects
from config import config
| 36.935961 | 106 | 0.609629 | import cv2
from time import time
from random import randint
from object_manager import DefaultCircleManager, PackmanManager, MoovingCircleManager
from utils import log, Joint
from drawing import draw_objects
from config import config
class Game:
def __init__(self, w_size):
self.w_size = w_size
self.body_part_indexes = config.app.model.BODY_PART_INDEXES
self.hands_only = not config.gameplay.foot_circles_enabled
self.circle_radius = config.gameplay.circle_radius
self.last_draw_timestamp = time()
self.DCM = DefaultCircleManager(w_size)
self.PM = PackmanManager(w_size)
self.MCM = MoovingCircleManager(w_size)
self.score = 0
def add_new_ellipse_curve(self):
self.MCM.add(self.circle_radius)
self.last_draw_timestamp = time()
def add_packman(self):
self.PM.add(self.circle_radius)
self.last_draw_timestamp = time()
def draw_score(self, frame):
cv2.putText(frame, "Score " + str(self.score), (10, 50), cv2.FONT_ITALIC, 2, (255, 0, 0), 3)
def add_new_circle(self):
self.DCM.add(self.circle_radius, hands_only=self.hands_only)
self.last_draw_timestamp = time()
class SoloIntensiveFastAim(Game):
def __init__(self, w_size):
super().__init__(w_size)
self.max_items = config.gameplay.intensive_max_circles_on_screen
self.interval = config.gameplay.intensive_interval
def process(self, frame, landmarks=None):
if landmarks:
self.pop_out_circles(landmarks)
self.pop_out_packmans(landmarks)
self.pop_out_ellipse_curves(landmarks)
cur_time = time()
if cur_time - self.last_draw_timestamp > self.interval:
chance = randint(1, 10)
if chance > 2:
self.add_new_circle()
else:
if chance == 1:
self.add_packman()
else:
self.add_new_ellipse_curve()
if len(self.DCM.circles) + len(self.PM.packmans) + len(self.MCM.ellipse_curves) == self.max_items:
log.info("Max items on the screen! You lost!")
return False
draw_objects(
frame, self.DCM.circles, self.PM.packmans, self.MCM.ellipse_curves, self.circle_radius,
self.PM.vectors, self.body_part_indexes, landmarks
)
self.draw_score(frame)
return True
def pop_out_ellipse_curves(self, landmarks):
score_bonus = self.MCM.pop_out(landmarks, self.body_part_indexes, self.circle_radius)
self.score += score_bonus
def pop_out_packmans(self, landmarks):
score_bonus = self.PM.pop_out(landmarks, self.body_part_indexes, self.circle_radius)
self.score += score_bonus
def pop_out_circles(self, landmarks):
score_bonus = self.DCM.pop_out(landmarks, self.body_part_indexes, self.circle_radius)
self.score += score_bonus
class SoloClassic(Game):
def __init__(self, w_size):
super().__init__(w_size)
self.max_items = config.gameplay.classic_max_circles_destroyed
self.obj_life_time = config.gameplay.classic_circle_life_time
self.death_count = -1
self.obj_live_status = {
"circle": False,
"packman": False,
"mooving_circle": False
}
def process(self, frame, landmarks=None):
if landmarks:
cur_time = time()
self.pop_out_circles(landmarks, cur_time)
self.pop_out_packmans(landmarks, cur_time)
self.pop_out_ellipse_curves(landmarks, cur_time)
if not any(self.obj_live_status.values()):
chance = randint(1, 10)
self.death_count += 1
if chance > 2:
self.add_new_circle()
self.obj_live_status["circle"] = True
else:
if chance == 1:
self.add_packman()
self.obj_live_status["packman"] = True
else:
self.add_new_ellipse_curve()
self.obj_live_status["mooving_circle"] = True
if self.death_count == self.max_items:
log.info(f"Game over, your score: {self.score}")
return False
draw_objects(
frame, self.DCM.circles, self.PM.packmans, self.MCM.ellipse_curves, self.circle_radius,
self.PM.vectors, self.body_part_indexes, landmarks
)
self.draw_score(frame)
return True
def pop_out_ellipse_curves(self, landmarks, cur_time):
score_bonus = self.MCM.pop_out(landmarks, self.body_part_indexes, self.circle_radius)
if score_bonus or cur_time - self.last_draw_timestamp >= self.obj_life_time:
if len(self.MCM.ellipse_curves) \
and 0 < self.MCM.ellipse_curves[0].progress < self.MCM.ellipse_curves[0].a * 4:
return
self.obj_live_status["mooving_circle"] = False
self.MCM.ellipse_curves = []
self.score += score_bonus
def pop_out_packmans(self, landmarks, cur_time):
score_bonus = self.PM.pop_out(landmarks, self.body_part_indexes, self.circle_radius)
if score_bonus or cur_time - self.last_draw_timestamp >= self.obj_life_time:
if len(self.PM.packmans) and 0 < self.PM.packmans[0].progress < self.PM.max_packman_progress:
return
self.obj_live_status["packman"] = False
self.PM.packmans = []
self.score += score_bonus
def pop_out_circles(self, landmarks, cur_time):
score_bonus = self.DCM.pop_out(landmarks, self.body_part_indexes, self.circle_radius)
if score_bonus or cur_time - self.last_draw_timestamp >= self.obj_life_time:
self.obj_live_status["circle"] = False
self.DCM.circles = []
self.score += score_bonus
class GameWithFriendOpenVINO:
def __init__(self, w_size, mode1, mode2):
self.w_size = w_size
self.p1 = mode1
self.p2 = mode2
self.p1_game_status = True
self.p2_game_status = True
def get_side(self, joints):
left_count = 0
right_count = 0
for joint in joints:
if joint.x <= 1/2:
left_count += 1
else:
right_count += 1
print(left_count, right_count)
return left_count > right_count
def validate_joints(self, joints, side):
for index, joint in enumerate(joints):
if side == 1:
if joint.x >= 1/2:
joints[index] = None
else:
joints[index] = Joint(joint.x * 2, joint.y, joint.score)
elif side == 0:
if joint.x <= 1/2:
joints[index] = None
else:
joints[index] = Joint((joint.x - 0.5) * 2, joint.y, joint.score)
def process(self, image, results):
for item in results:
if self.get_side(item):
self.validate_joints(item, 1)
if self.p1_game_status:
self.p1_game_status = self.p1.process(image[:, :self.w_size[1] // 2], item)
else:
self.validate_joints(item, 0)
if self.p2_game_status:
self.p2_game_status = self.p2.process(image[:, self.w_size[1] // 2:], item)
return self.p1_game_status or self.p2_game_status
| 6,646 | 13 | 601 |
13406aa5f5d3a775426e7a74d7015441611738ac | 8,888 | py | Python | dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numba/tests/npyufunc/test_caching.py | jeikabu/lumberyard | 07228c605ce16cbf5aaa209a94a3cb9d6c1a4115 | [
"AML"
] | 8 | 2019-10-07T16:33:47.000Z | 2020-12-07T03:59:58.000Z | dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numba/tests/npyufunc/test_caching.py | jeikabu/lumberyard | 07228c605ce16cbf5aaa209a94a3cb9d6c1a4115 | [
"AML"
] | 1 | 2018-04-03T22:37:40.000Z | 2018-04-03T23:53:43.000Z | dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numba/tests/npyufunc/test_caching.py | jeikabu/lumberyard | 07228c605ce16cbf5aaa209a94a3cb9d6c1a4115 | [
"AML"
] | 5 | 2020-08-27T20:44:18.000Z | 2021-08-21T22:54:11.000Z | from __future__ import print_function, absolute_import, division
import sys
import os.path
import re
from contextlib import contextmanager
import subprocess
import numpy as np
from numba import unittest_support as unittest
from numba import config
from ..support import captured_stdout
from ..test_dispatcher import BaseCacheTest
class UfuncCacheTest(BaseCacheTest):
"""
Since the cache stats is not exposed by ufunc, we test by looking at the
cache debug log.
"""
here = os.path.dirname(__file__)
usecases_file = os.path.join(here, "cache_usecases.py")
modname = "ufunc_caching_test_fodder"
regex_data_saved = re.compile(r'\[cache\] data saved to')
regex_index_saved = re.compile(r'\[cache\] index saved to')
regex_data_loaded = re.compile(r'\[cache\] data loaded from')
regex_index_loaded = re.compile(r'\[cache\] index loaded from')
@contextmanager
def check_cache_saved(self, cachelog, count):
"""
Check number of cache-save were issued
"""
data_saved = self.regex_data_saved.findall(cachelog)
index_saved = self.regex_index_saved.findall(cachelog)
self.assertEqual(len(data_saved), count)
self.assertEqual(len(index_saved), count)
def check_cache_loaded(self, cachelog, count):
"""
Check number of cache-load were issued
"""
data_loaded = self.regex_data_loaded.findall(cachelog)
index_loaded = self.regex_index_loaded.findall(cachelog)
self.assertEqual(len(data_loaded), count)
self.assertEqual(len(index_loaded), count)
def check_ufunc_cache(self, usecase_name, n_overloads, **kwargs):
"""
Check number of cache load/save.
There should be one per overloaded version.
"""
mod = self.import_module()
usecase = getattr(mod, usecase_name)
# New cache entry saved
with self.capture_cache_log() as out:
new_ufunc = usecase(**kwargs)
cachelog = out.getvalue()
self.check_cache_saved(cachelog, count=n_overloads)
# Use cached version
with self.capture_cache_log() as out:
cached_ufunc = usecase(**kwargs)
cachelog = out.getvalue()
self.check_cache_loaded(cachelog, count=n_overloads)
return new_ufunc, cached_ufunc
# Note: DUFunc doesn't support parallel target yet
#
# The following test issue #2198 that loading cached (g)ufunc first
# bypasses some target context initialization.
#
if __name__ == '__main__':
unittest.main()
| 37.344538 | 84 | 0.670117 | from __future__ import print_function, absolute_import, division
import sys
import os.path
import re
from contextlib import contextmanager
import subprocess
import numpy as np
from numba import unittest_support as unittest
from numba import config
from ..support import captured_stdout
from ..test_dispatcher import BaseCacheTest
class UfuncCacheTest(BaseCacheTest):
"""
Since the cache stats is not exposed by ufunc, we test by looking at the
cache debug log.
"""
here = os.path.dirname(__file__)
usecases_file = os.path.join(here, "cache_usecases.py")
modname = "ufunc_caching_test_fodder"
regex_data_saved = re.compile(r'\[cache\] data saved to')
regex_index_saved = re.compile(r'\[cache\] index saved to')
regex_data_loaded = re.compile(r'\[cache\] data loaded from')
regex_index_loaded = re.compile(r'\[cache\] index loaded from')
@contextmanager
def capture_cache_log(self):
with captured_stdout() as out:
old, config.DEBUG_CACHE = config.DEBUG_CACHE, True
yield out
config.DEBUG_CACHE = old
def check_cache_saved(self, cachelog, count):
"""
Check number of cache-save were issued
"""
data_saved = self.regex_data_saved.findall(cachelog)
index_saved = self.regex_index_saved.findall(cachelog)
self.assertEqual(len(data_saved), count)
self.assertEqual(len(index_saved), count)
def check_cache_loaded(self, cachelog, count):
"""
Check number of cache-load were issued
"""
data_loaded = self.regex_data_loaded.findall(cachelog)
index_loaded = self.regex_index_loaded.findall(cachelog)
self.assertEqual(len(data_loaded), count)
self.assertEqual(len(index_loaded), count)
def check_ufunc_cache(self, usecase_name, n_overloads, **kwargs):
"""
Check number of cache load/save.
There should be one per overloaded version.
"""
mod = self.import_module()
usecase = getattr(mod, usecase_name)
# New cache entry saved
with self.capture_cache_log() as out:
new_ufunc = usecase(**kwargs)
cachelog = out.getvalue()
self.check_cache_saved(cachelog, count=n_overloads)
# Use cached version
with self.capture_cache_log() as out:
cached_ufunc = usecase(**kwargs)
cachelog = out.getvalue()
self.check_cache_loaded(cachelog, count=n_overloads)
return new_ufunc, cached_ufunc
class TestUfuncCacheTest(UfuncCacheTest):
def test_direct_ufunc_cache(self, **kwargs):
new_ufunc, cached_ufunc = self.check_ufunc_cache(
"direct_ufunc_cache_usecase", n_overloads=2, **kwargs)
# Test the cached and original versions
inp = np.random.random(10).astype(np.float64)
np.testing.assert_equal(new_ufunc(inp), cached_ufunc(inp))
inp = np.arange(10, dtype=np.intp)
np.testing.assert_equal(new_ufunc(inp), cached_ufunc(inp))
def test_direct_ufunc_cache_objmode(self):
self.test_direct_ufunc_cache(forceobj=True)
def test_direct_ufunc_cache_parallel(self):
self.test_direct_ufunc_cache(target='parallel')
def test_indirect_ufunc_cache(self, **kwargs):
new_ufunc, cached_ufunc = self.check_ufunc_cache(
"indirect_ufunc_cache_usecase", n_overloads=3, **kwargs)
# Test the cached and original versions
inp = np.random.random(10).astype(np.float64)
np.testing.assert_equal(new_ufunc(inp), cached_ufunc(inp))
inp = np.arange(10, dtype=np.intp)
np.testing.assert_equal(new_ufunc(inp), cached_ufunc(inp))
def test_indirect_ufunc_cache_parallel(self):
self.test_indirect_ufunc_cache(target='parallel')
class TestDUfuncCacheTest(UfuncCacheTest):
# Note: DUFunc doesn't support parallel target yet
def check_dufunc_usecase(self, usecase_name):
mod = self.import_module()
usecase = getattr(mod, usecase_name)
# Create dufunc
with self.capture_cache_log() as out:
ufunc = usecase()
self.check_cache_saved(out.getvalue(), count=0)
# Compile & cache
with self.capture_cache_log() as out:
ufunc(np.arange(10))
self.check_cache_saved(out.getvalue(), count=1)
self.check_cache_loaded(out.getvalue(), count=0)
# Use cached
with self.capture_cache_log() as out:
ufunc = usecase()
ufunc(np.arange(10))
self.check_cache_loaded(out.getvalue(), count=1)
def test_direct_dufunc_cache(self):
# We don't test for objmode because DUfunc don't support it.
self.check_dufunc_usecase('direct_dufunc_cache_usecase')
def test_indirect_dufunc_cache(self):
self.check_dufunc_usecase('indirect_dufunc_cache_usecase')
def _fix_raw_path(rstr):
if config.IS_WIN32:
rstr = rstr.replace(r'/', r'\\\\')
return rstr
class TestGUfuncCacheTest(UfuncCacheTest):
def test_filename_prefix(self):
mod = self.import_module()
usecase = getattr(mod, "direct_gufunc_cache_usecase")
with self.capture_cache_log() as out:
usecase()
cachelog = out.getvalue()
# find number filename with "guf-" prefix
fmt1 = _fix_raw_path(r'/__pycache__/guf-{}')
prefixed = re.findall(fmt1.format(self.modname), cachelog)
fmt2 = _fix_raw_path(r'/__pycache__/{}')
normal = re.findall(fmt2.format(self.modname), cachelog)
# expecting 2 overloads
self.assertGreater(len(normal), 2)
# expecting equal number of wrappers and overloads cache entries
self.assertEqual(len(normal), len(prefixed))
def test_direct_gufunc_cache(self, **kwargs):
# 2 cache entry for the 2 overloads
# and 2 cache entry for the gufunc wrapper
new_ufunc, cached_ufunc = self.check_ufunc_cache(
"direct_gufunc_cache_usecase", n_overloads=2 + 2, **kwargs)
# Test the cached and original versions
inp = np.random.random(10).astype(np.float64)
np.testing.assert_equal(new_ufunc(inp), cached_ufunc(inp))
inp = np.arange(10, dtype=np.intp)
np.testing.assert_equal(new_ufunc(inp), cached_ufunc(inp))
def test_direct_gufunc_cache_objmode(self):
self.test_direct_gufunc_cache(forceobj=True)
def test_direct_gufunc_cache_parallel(self):
self.test_direct_gufunc_cache(target='parallel')
def test_indirect_gufunc_cache(self, **kwargs):
# 3 cache entry for the 3 overloads
# and no cache entry for the gufunc wrapper
new_ufunc, cached_ufunc = self.check_ufunc_cache(
"indirect_gufunc_cache_usecase", n_overloads=3, **kwargs)
# Test the cached and original versions
inp = np.random.random(10).astype(np.float64)
np.testing.assert_equal(new_ufunc(inp), cached_ufunc(inp))
inp = np.arange(10, dtype=np.intp)
np.testing.assert_equal(new_ufunc(inp), cached_ufunc(inp))
def test_indirect_gufunc_cache_parallel(self, **kwargs):
self.test_indirect_gufunc_cache(target='parallel')
class TestCacheSpecificIssue(UfuncCacheTest):
def run_in_separate_process(self, runcode):
# Based on the same name util function in test_dispatcher but modified
# to allow user to define what to run.
code = """if 1:
import sys
sys.path.insert(0, %(tempdir)r)
mod = __import__(%(modname)r)
mod.%(runcode)s
""" % dict(tempdir=self.tempdir, modname=self.modname,
runcode=runcode)
popen = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = popen.communicate()
if popen.returncode != 0:
raise AssertionError("process failed with code %s: stderr follows\n%s\n"
% (popen.returncode, err.decode()))
#
# The following test issue #2198 that loading cached (g)ufunc first
# bypasses some target context initialization.
#
def test_first_load_cached_ufunc(self):
# ensure function is cached
self.run_in_separate_process('direct_ufunc_cache_usecase()')
# use the cached function
# this will fail if the target context is not init'ed
self.run_in_separate_process('direct_ufunc_cache_usecase()')
def test_first_load_cached_gufunc(self):
# ensure function is cached
self.run_in_separate_process('direct_gufunc_cache_usecase()')
# use the cached function
# this will fail out if the target context is not init'ed
self.run_in_separate_process('direct_gufunc_cache_usecase()')
if __name__ == '__main__':
unittest.main()
| 5,613 | 86 | 600 |
395d8eca7d1e0f912a781947a6d97be3bde8a106 | 35,045 | py | Python | generator.py | s-fifteen-instruments/Digital_Pattern_Generator_DPG1 | e8113154ba150a5d19e26171c9204cf22d55760d | [
"MIT"
] | null | null | null | generator.py | s-fifteen-instruments/Digital_Pattern_Generator_DPG1 | e8113154ba150a5d19e26171c9204cf22d55760d | [
"MIT"
] | null | null | null | generator.py | s-fifteen-instruments/Digital_Pattern_Generator_DPG1 | e8113154ba150a5d19e26171c9204cf22d55760d | [
"MIT"
] | null | null | null | """
this is an attempt to convert a readable patt format (similar to the ones compatible with arbitrarypatterngenerator) into the 4-word format that can be cat into pattern generator v2
it reads pattfile written in the following format and generate the corresponding dpatt:
#triggered input_line use_table threshold_counts_per_second if_success_table if_failure_table time_to_trigger [bits to turn on(0-25)]
#sequential [repeat_table(at least 0)] use_table end_table time [bits to turn on(0-25)]
#conditional input_line use_table if_success_table trigger_width [bits to turn on(0-25)]
in this version,
1) internal counters 0 and 1 are always loaded with 10 and 100 to deal with '>3ms' and '>30ms' scale signal
counters 2 and 3 is used if sequential repeats (which means only two sequential can repeat currently)
2) toupper func is not implemented, so the comparison is case-sensitive
3) triggered accepts only 1-line pattern
4) hooks are not implemented yet
5) fixed directory to save dpatt
7) always start from row 0 and table 0
8) at sequential loops, the loading of counter will introduce a 10ns shift; if next-table existed, the additional line to jump to next-table will also introduce 10ns shift
9) conditional is implemented
10) check number of lines before the file is being cat'ed
updated on 10/7/2019
Update on 2/1/2020 by Chin Chean:
1) table_dic, table_lst, and rep_count are cleared every time generator is called from the GUI.
2) When uploading scripts multiple times, adding a config 0; at the start of the .word file might prevent crashing of device. This command is now appended on the generated file.
@author: Chang Hoong QO LAB, NUS
"""
mode_list = ['','.','triggered','sequential','conditional']
unit_list = ['','ns','us','ms']
error_list = ['no error (0)','invalid token (1)','too many sequential loop (2)', 'invalid output (3)', 'invalid number (4)', 'no unit found (5)', 'shorter than clock cycle (6)', 'repeated output warning (7)', 'invalid termination (8)','null error (9)','repeated table number (10)','multiple thresholds for same input (11)', 'pattern too long (12)']
table_dic = {} #contains information about address of each table for branching
table_lst = [] #contains all patterns to be applied
rep_count = [] #contains additional internal counters being used
# format of table_lst
# (if triggered) [table_no, success_table_no, fail_table_no, input_line, threshold_counts, num_clock_cycle, output] (7 components)
# (if sequential) [table_no, next_table, repeat, num_clock_cycle, output] (5 components)
# (if conditional) [table_no success_table_no, NULL, input_line, num_clock_cycle, output] (6 components)
# number of components can decide mode type
# 'output' consists of a two-element array (left right word)
# find token and return token_nb, ptr_in_str
# return the readout number + remaining string
# if number not found, output -1 and ''
# need old token so that 2nd line of sequential can be interpredated as sequential
# return the old token and the remaining argument
Max_cyclenumber_per_line = 65536
# output the remaining string
# should read duration and output bits
# output the remaining string
# should read input_line use_table threshold_counts_per_second if_success_table if_failure_table time_to_trigger [bits to turn on(0-25)]
# output the remaining string
# should read input_line use_table if_success_table if_failure_table time_to_trigger [bits to turn on(0-25)]
# return chain of str for this table, and the new addr_ptr
# if __name__ == '__main__':
# import argparse
# parser = argparse.ArgumentParser(description='Generate dpatt from patt')
# parser.add_argument('-i','--inputstr',type=str,default='load_atom_redu.patt')
# parser.add_argument('-o','--outputstr',type=str,default='isto.dat')
# args = parser.parse_args()
# pattfile = open(args.inputstr,'r')
# outputfile = open(args.outputstr,'w+')
#
# output = main(pattfile)
# outputfile.write(output)
#
# pattfile.close()
# outputfile.close()
#
# num_lines = sum(1 for line in open(args.outputstr,'r'))
# #print(num_lines-len(table_dic)-4) #for debugging
# if (num_lines-len(table_dic)-4) > 256:
# raise Exception(error_list[12])
| 44.081761 | 349 | 0.706549 | """
this is an attempt to convert a readable patt format (similar to the ones compatible with arbitrarypatterngenerator) into the 4-word format that can be cat into pattern generator v2
it reads pattfile written in the following format and generate the corresponding dpatt:
#triggered input_line use_table threshold_counts_per_second if_success_table if_failure_table time_to_trigger [bits to turn on(0-25)]
#sequential [repeat_table(at least 0)] use_table end_table time [bits to turn on(0-25)]
#conditional input_line use_table if_success_table trigger_width [bits to turn on(0-25)]
in this version,
1) internal counters 0 and 1 are always loaded with 10 and 100 to deal with '>3ms' and '>30ms' scale signal
counters 2 and 3 is used if sequential repeats (which means only two sequential can repeat currently)
2) toupper func is not implemented, so the comparison is case-sensitive
3) triggered accepts only 1-line pattern
4) hooks are not implemented yet
5) fixed directory to save dpatt
7) always start from row 0 and table 0
8) at sequential loops, the loading of counter will introduce a 10ns shift; if next-table existed, the additional line to jump to next-table will also introduce 10ns shift
9) conditional is implemented
10) check number of lines before the file is being cat'ed
updated on 10/7/2019
Update on 2/1/2020 by Chin Chean:
1) table_dic, table_lst, and rep_count are cleared every time generator is called from the GUI.
2) When uploading scripts multiple times, adding a config 0; at the start of the .word file might prevent crashing of device. This command is now appended on the generated file.
@author: Chang Hoong QO LAB, NUS
"""
mode_list = ['','.','triggered','sequential','conditional']
unit_list = ['','ns','us','ms']
error_list = ['no error (0)','invalid token (1)','too many sequential loop (2)', 'invalid output (3)', 'invalid number (4)', 'no unit found (5)', 'shorter than clock cycle (6)', 'repeated output warning (7)', 'invalid termination (8)','null error (9)','repeated table number (10)','multiple thresholds for same input (11)', 'pattern too long (12)']
table_dic = {} #contains information about address of each table for branching
table_lst = [] #contains all patterns to be applied
rep_count = [] #contains additional internal counters being used
# format of table_lst
# (if triggered) [table_no, success_table_no, fail_table_no, input_line, threshold_counts, num_clock_cycle, output] (7 components)
# (if sequential) [table_no, next_table, repeat, num_clock_cycle, output] (5 components)
# (if conditional) [table_no success_table_no, NULL, input_line, num_clock_cycle, output] (6 components)
# number of components can decide mode type
# 'output' consists of a two-element array (left right word)
# find token and return token_nb, ptr_in_str
def find_token(argument,token_list):
num_list = len(token_list)
for i in range(num_list-1,0,-1):
ptr=argument.find(token_list[i])
if(ptr >= 0):
return i,ptr
return 0,-1 # invalid token
# return the readout number + remaining string
# if number not found, output -1 and ''
def parse_number(argument):
newpos = 0
# take out empty spaces
while(argument[newpos]==' ' or argument[newpos]=='\t' or argument[newpos]==':' or argument[newpos]==','):
newpos = newpos + 1
if (argument[newpos]=='\n'):
print(error_list[4])
return -1,''
num_str = ''
while(True):
c = argument[newpos]
# ASCII comparison
if ((c<'0') or (c >'9')):
break; # end of number
else:
num_str = num_str + c
newpos = newpos+1
if num_str=='':
return -1,argument[newpos:]
return int(num_str),argument[newpos:]
# need old token so that 2nd line of sequential can be interpredated as sequential
# return the old token and the remaining argument
def parse_command(argument, old_token):
global table_lst
# argument is a long string
newpos = 0
# take out empty spaces
while(argument[newpos]==' ' or argument[newpos]=='\t' or argument[newpos]==':' or argument[newpos]==','):
newpos = newpos + 1
if (argument[newpos]=='\n'):
return old_token, argument[newpos:] #end of line
token, ptr=find_token(argument[newpos:],mode_list)
# 0: invalid token, 1: termination, 2: triggered, 3: sequential, 4: conditional
if (token==0 and old_token==3):
# use the previous input table number
new_argmt = interpret_seq([-1], argument[newpos:])
return 3, new_argmt
elif token==0: #no token found
raise Exception(error_list[1]) #error
elif token==1: #termination
return 1, ''
elif token==2: #trigger
new_argmt = interpret_tri(argument[(ptr+9):])
return 2, new_argmt
elif token==4: #conditional
new_argmt = interpret_con(argument[(ptr+11):])
return 4, new_argmt
else: # sequential
# extract information for repeat, table_no, and next_table
new_argmt = argument[(ptr+10):]
repeat, new_argmt = parse_number(new_argmt) # first argument is repeat
if (repeat < 0 or repeat>65535):
raise Exception('Invalid repeat number at sequential.')
table_no, new_argmt = parse_number(new_argmt) # 2nd argument is table_no
if (table_no < 0):
raise Exception('Invalid table number at sequential.')
next_table, new_argmt = parse_number(new_argmt) # 3rd argument is next_table
if (next_table < 0):
raise Exception('Invalid next-table number at sequential.')
# standard interpretation for sequential, read TIME and output bit
new_argmt = interpret_seq([table_no,next_table,repeat],new_argmt) # rearrange the information to facilitate construction
return 3, new_argmt
Max_cyclenumber_per_line = 65536
def time_balancer(left_output, right_output, Nclockcycle, table_str, addr_ptr, inputline=-1):
Ncounter_temp = int(Nclockcycle/Max_cyclenumber_per_line)
if Ncounter_temp < 5: # <3.28ms, don't need loop
num_step_loop = Nclockcycle
if inputline!=-1: # trigger
table_str = table_str+'writew %d,%d,%d,%d;#row %d\n' %(left_output, right_output, 0, 4096+2**(inputline),addr_ptr) #load counters, 4096->special command
addr_ptr = addr_ptr + 1 # next line
while(num_step_loop>Max_cyclenumber_per_line):
table_str = table_str+'writew %d,%d,%d,%d;#row %d\n' %(left_output, right_output, Max_cyclenumber_per_line-1, addr_ptr+1,addr_ptr)
num_step_loop = num_step_loop - Max_cyclenumber_per_line
addr_ptr = addr_ptr + 1
if num_step_loop == 0:
num_step_loop = 1
print('Duration remainder = 0!')
table_str = table_str+'writew %d,%d,%d,%d;#row %d\n' %(left_output, right_output, num_step_loop-1, addr_ptr+1,addr_ptr)
addr_ptr = addr_ptr + 1
elif Ncounter_temp <= 50: # 3.28ms<t<32.8ms, we use loop with counter 10
Nclockcycle = Nclockcycle - 1 # correction to take into account the first step used for counter loading
num_step_loop = int(Nclockcycle/10) # number of clock cycle per loop
# the if-else condition here ensures we can allocate time into "decrement" and "conditional check"
if num_step_loop <= Max_cyclenumber_per_line:
loop_step_size = int((num_step_loop-1)/2)
else:
loop_step_size = Max_cyclenumber_per_line # maximum step size
remainder = Nclockcycle%10
if inputline!=-1: # trigger
table_str = table_str+'writew %d,%d,%d,%d;#row %d\n' %(left_output, right_output, remainder, 4096+16+2**(inputline),addr_ptr) #load counters, 4096->special command, 16->intcounter for 10
else:
table_str = table_str+'writew %d,%d,%d,%d;#row %d\n' %(left_output, right_output, remainder, 4096+16,addr_ptr) #load counters, 4096->special command, 16->intcounter for 10
addr_ptr = addr_ptr + 1 # next line
loop_start = addr_ptr
while(num_step_loop!=0):
if num_step_loop > loop_step_size:
if (loop_start==addr_ptr):
table_str = table_str+'writew %d,%d,%d,%d;#row %d\n' %(left_output,right_output,loop_step_size-1,4352,addr_ptr) # decrement on int counter 0 (4096+256)
else:
table_str = table_str+'writew %d,%d,%d,%d;#row %d\n' %(left_output,right_output,loop_step_size-1,addr_ptr+1,addr_ptr) # decrement on int counter 0 (4096+256)
addr_ptr = addr_ptr + 1
num_step_loop = num_step_loop - loop_step_size
else:
# last step of the loop
table_str = table_str+'writew %d,%d,%d,%d;#row %d\n' %(left_output,right_output,num_step_loop-1,49152+loop_start,addr_ptr) # jump to loop_start on nonzero int counter 0 (49152 + loop_start)
addr_ptr = addr_ptr + 1
break;
else: # longer than 32.8ms operation
Nclockcycle = Nclockcycle - 1 # correction to take into account the first step used for counter loading
num_step_loop = int(Nclockcycle/100) # number of clock cycle per loop
if num_step_loop <= Max_cyclenumber_per_line:
loop_step_size = int((num_step_loop-1)/2)
else:
loop_step_size = Max_cyclenumber_per_line # maximum step size
remainder = Nclockcycle%100
if inputline!=-1: # trigger
table_str = table_str+'writew %d,%d,%d,%d;#row %d\n' %(left_output, right_output, remainder, 4096+32+2**(inputline),addr_ptr) #load counters, 4096->special command, 32->intcounter for 100
else:
table_str = table_str+'writew %d,%d,%d,%d;#row %d\n' %(left_output, right_output, remainder, 4096+32,addr_ptr) #load counters, 4096->special command, 32->intcounter for 100
addr_ptr = addr_ptr + 1 # next line
loop_start = addr_ptr
while(num_step_loop!=0):
if num_step_loop > loop_step_size:
if (loop_start==addr_ptr):
table_str = table_str+'writew %d,%d,%d,%d;#row %d\n' %(left_output,right_output,loop_step_size-1,4608,addr_ptr) # decrement on int counter 1 (4096+512)
else:
table_str = table_str+'writew %d,%d,%d,%d;#row %d\n' %(left_output,right_output,loop_step_size-1,addr_ptr+1,addr_ptr)
addr_ptr = addr_ptr + 1
num_step_loop = num_step_loop - loop_step_size
else:
# last step of the loop
table_str = table_str+'writew %d,%d,%d,%d;#row %d\n' %(left_output,right_output,num_step_loop-1,53248+loop_start,addr_ptr) # jump to loop_start on nonzero int counter 1 (49152 + loop_start)
addr_ptr = addr_ptr + 1
break;
return table_str, addr_ptr
# output the remaining string
# should read duration and output bits
def interpret_seq(table_no_arr, argument):
global table_lst
temp_action_table = []
if table_no_arr[0]==-1:
# use the previous table settings
temp_action_table.append(table_lst[-1][0]) # table_no
temp_action_table.append(table_lst[-1][1]) # next_table
temp_action_table.append(table_lst[-1][2]) # repeat
else:
temp_action_table.append(table_no_arr[0]) # table_no
temp_action_table.append(table_no_arr[1]) # next table
temp_action_table.append(table_no_arr[2]) # repeat
# read time
dura, new_argmt = parse_number(argument)
# identify unit
token, ptr = find_token(new_argmt, unit_list)
if token == 0:
raise Exception(error_list[5]+' :'+argument)
elif token == 1: # ns
if dura < 10:
print(error_list[6]+', '+str(dura)+'ns is round up to 10ns')
n_cycle = 1
else:
n_cycle = int(dura/10)
elif token == 2: # us
n_cycle = int(1000*dura/10)
elif token == 3: # ms
n_cycle = int(1000000*dura/10)
else:
raise Exception('interpret_seq error')
temp_action_table.append(n_cycle)
# READ OUTPUT BITS
new_argmt = new_argmt[ptr+2:]
outputbitarray = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0] # to store on off
while (True):
outbit, new_argmt = parse_number(new_argmt)
if outbit==-1:
break
elif outbit > 31:
raise Exception(error_list[3])
elif outputbitarray[outbit] == 1:
print(error_list[7])
else:
outputbitarray[outbit] = 1
decsum_l,decsum_r = 0,0
for i in range(16):
decsum_l = decsum_l + outputbitarray[i]*2**i
decsum_r = decsum_r + outputbitarray[i+16]*2**i
temp_action_table.append([int(decsum_l),int(decsum_r)])
newpos = 0
# take out empty spaces
while(new_argmt[newpos]==' ' or new_argmt[newpos]=='\t' or new_argmt[newpos]==':' or new_argmt[newpos]==','):
newpos = newpos + 1
if new_argmt[newpos]!=';':
raise Exception(error_list[8]+': ";"')
# upload to main action table list
table_lst.append(temp_action_table)
#print(temp_action_table) #debug
return new_argmt
# output the remaining string
# should read input_line use_table threshold_counts_per_second if_success_table if_failure_table time_to_trigger [bits to turn on(0-25)]
def interpret_tri(argument):
global table_lst
temp_action_table = []
input_line, new_argmt = parse_number(argument) # input_line
if (input_line < 0 or input_line > 3):
raise Exception('Invalid input line at trigger.')
use_table, new_argmt = parse_number(new_argmt) # use_table
if (use_table < 0):
raise Exception('Invalid table number at trigger.')
threshold, new_argmt = parse_number(new_argmt) # threshold
if (threshold <= 0):
raise Exception('Invalid trigger threshold')
goto_table, new_argmt = parse_number(new_argmt) # goto_table
if (goto_table < 0):
raise Exception('Invalid success_table number at trigger.')
fail_table, new_argmt = parse_number(new_argmt) # fail_table
if (fail_table < 0):
raise Exception('Invalid fail_table number at trigger.')
#[table_no, success_table_no, fail_table_no, input_line, threshold_counts, num_clock_cycle, output]
temp_action_table.append(use_table)
temp_action_table.append(goto_table)
temp_action_table.append(fail_table)
temp_action_table.append(input_line)
temp_action_table.append(threshold)
# read time
dura, new_argmt = parse_number(new_argmt)
# identify unit
token, ptr = find_token(new_argmt, unit_list)
if token == 0:
raise Exception(error_list[5]+' :'+argument)
elif token == 1: # ns
if dura < 10:
print(error_list[6]+', '+str(dura)+'ns is round up to 10ns')
n_cycle = 1
else:
n_cycle = int(dura/10)
elif token == 2: # us
n_cycle = int(1000*dura/10)
elif token == 3: # ms
n_cycle = int(1000000*dura/10)
else:
raise Exception('interpret_tri error')
# replace threshold per second with threshold counts
temp_action_table[-1] = int(temp_action_table[-1]*n_cycle*10e-9)
temp_action_table.append(n_cycle)
# READ OUTPUT BITS
new_argmt = new_argmt[ptr+2:]
outputbitarray = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0] # to store on off
while (True):
outbit, new_argmt = parse_number(new_argmt)
if outbit==-1:
break
elif outbit > 31:
raise Exception(error_list[3])
elif outputbitarray[outbit] == 1:
print(error_list[7])
else:
outputbitarray[outbit] = 1
decsum_l,decsum_r = 0,0
for i in range(16):
decsum_l = decsum_l + outputbitarray[i]*2**i
decsum_r = decsum_r + outputbitarray[i+16]*2**i
temp_action_table.append([int(decsum_l),int(decsum_r)])
newpos = 0
# take out empty spaces
while(new_argmt[newpos]==' ' or new_argmt[newpos]=='\t' or new_argmt[newpos]==':' or new_argmt[newpos]==','):
newpos = newpos + 1
if new_argmt[newpos]!=';':
raise Exception(error_list[8]+': ";"')
# upload to main action table list
table_lst.append(temp_action_table)
#print(temp_action_table) #debug
return new_argmt
# output the remaining string
# should read input_line use_table if_success_table if_failure_table time_to_trigger [bits to turn on(0-25)]
def interpret_con(argument):
global table_lst
temp_action_table = []
input_line, new_argmt = parse_number(argument) # input_line
if (input_line < 0 or input_line > 3):
raise Exception('Invalid input line at conditional.')
use_table, new_argmt = parse_number(new_argmt) # use_table
if (use_table < 0):
raise Exception('Invalid table number at conditional.')
goto_table, new_argmt = parse_number(new_argmt) # goto_table
if (goto_table < 0):
raise Exception('Invalid success_table number at conditional.')
#[table_no, success_table_no, fail_table_no, input_line, num_clock_cycle, output]
temp_action_table.append(use_table)
temp_action_table.append(goto_table)
temp_action_table.append('NULL')
temp_action_table.append(input_line)
# read time
dura, new_argmt = parse_number(new_argmt)
# identify unit
token, ptr = find_token(new_argmt, unit_list)
if token == 0:
raise Exception(error_list[5]+' :'+argument)
elif token == 1: # ns
if dura < 10:
print(error_list[6]+', '+str(dura)+'ns is round up to 10ns')
n_cycle = 1
else:
n_cycle = int(dura/10)
elif token == 2: # us
n_cycle = int(1000*dura/10)
elif token == 3: # ms
n_cycle = int(1000000*dura/10)
else:
raise Exception('interpret_tri error')
temp_action_table.append(n_cycle)
# READ OUTPUT BITS
new_argmt = new_argmt[ptr+2:]
outputbitarray = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0] # to store on off
while (True):
outbit, new_argmt = parse_number(new_argmt)
if outbit==-1:
break
elif outbit > 31:
raise Exception(error_list[3])
elif outputbitarray[outbit] == 1:
print(error_list[7])
else:
outputbitarray[outbit] = 1
decsum_l,decsum_r = 0,0
for i in range(16):
decsum_l = decsum_l + outputbitarray[i]*2**i
decsum_r = decsum_r + outputbitarray[i+16]*2**i
temp_action_table.append([int(decsum_l),int(decsum_r)])
newpos = 0
# take out empty spaces
while(new_argmt[newpos]==' ' or new_argmt[newpos]=='\t' or new_argmt[newpos]==':' or new_argmt[newpos]==','):
newpos = newpos + 1
if new_argmt[newpos]!=';':
raise Exception(error_list[8]+': ";"')
# upload to main action table list
table_lst.append(temp_action_table)
#print(temp_action_table) #debug
return new_argmt
def flush(): #reading termination
# last check before generation of dpatt
global table_lst
global table_dic
global rep_count
#print(table_lst) #for debugging
# construct a dictionary for each table
table_dic = {}
rep_count = [] # to record number of internal counters dedicated to loop seq, keeps the table_no
ext_counter = [0,0,0,0] # to prevent funny situations where same counter has multiple threshold
# key (table_no), val (counter_no)
for i in range(len(table_lst)):
if len(table_lst[i])==7: #triggered
if table_lst[i][0] in table_dic: # repeated table number
raise Exception(error_list[10])
table_dic[table_lst[i][0]]=0
if ext_counter[table_lst[i][3]]==0: #if the external counter hasnt been used previously
ext_counter[table_lst[i][3]] = table_lst[i][4]
elif ext_counter[table_lst[i][3]] != table_lst[i][4]: #recorded threshold != current threshold
print(error_list[11])
ext_counter[table_lst[i][3]] = min(ext_counter[table_lst[i][3]], table_lst[i][4])
# in current setting, it will pick the smallest threshold value
elif len(table_lst[i])==5: # sequential
if table_lst[i][0] in table_dic:
if i==0:
raise Exception(error_list[10])
elif table_lst[i-1][0]!=table_lst[i][0]:
raise Exception(error_list[10])
else:
table_dic[table_lst[i][0]]=table_lst[i][2]
if table_lst[i][2] > 0:
rep_count.append(table_lst[i][2])
else: # conditional
if table_lst[i][0] in table_dic: # repeated table number
raise Exception(error_list[10])
table_dic[table_lst[i][0]]=0
#print(table_dic) # for debugging
num_lep_count = len(rep_count)
if num_lep_count > 2: # too many loop sequentials
raise Exception(error_list[2])
elif num_lep_count == 1:
rep_count.append(0)
elif num_lep_count == 0:
rep_count.append(0)
rep_count.append(0)
# will only encode in parameter if rep_count > 0
param_rep_count = [0,0]
if rep_count[0] > 0: # loop 2 times when rep_count = 1
param_rep_count[0] = rep_count[0] + 1
if rep_count[1] > 0:
param_rep_count[1] = rep_count[1] + 1
# start encoding
dpatt_str = ''
# parameter registers
dpatt_str = dpatt_str + '#parameter registers\n'
# Add config 0 to clear memory before programming to prevent crash at device.
dpatt_str = dpatt_str + 'config 0;\n'
# internal counters 0 and 1 are always kept as 10 and 100 for long duration actions
dpatt_str = dpatt_str + ('param 0 %d %d %d %d 10 100 %d %d;\n' %(ext_counter[0],ext_counter[1],ext_counter[2],ext_counter[3],param_rep_count[0],param_rep_count[1]))
dpatt_str = dpatt_str + 'holdadr; ramprog;\n'
# use the previous dictionary to store address of each table, initiation to -1
for table_no_temp in table_dic:
table_dic[table_no_temp] = -1
# start ram programming here
addr_ptr = 0
table_no_temp = 0
table_pos = program_table(table_no_temp)
if len(table_lst[table_pos])==7: #triggered
table_dic[table_no_temp]=addr_ptr # assign address for this soon-to-be-programmed table
action_str, new_addr = trigger_encode(table_lst[table_pos],addr_ptr)
elif len(table_lst[i])==5: # sequential
table_dic[table_no_temp]=addr_ptr # assign address for this soon-to-be-programmed table
action_str, new_addr = sequential_encode(table_pos,addr_ptr)
else: # conditional
table_dic[table_no_temp]=addr_ptr
action_str, new_addr = conditional_encode(table_lst[table_pos],addr_ptr)
dpatt_str = dpatt_str + action_str # combined
dpatt_str = dpatt_str + 'run; #start sequences\n'
#print(dpatt_str) # debug
# check if there is unprogrammed tables
for table_no_temp in table_dic:
if(table_dic[table_no_temp]==-1):
print('warning: %d is not programmed.'%table_no_temp)
return dpatt_str
def program_table(table_no):
global table_lst
for i in range(len(table_lst)):
if (table_lst[i][0]==table_no):
return i # this is the table we want to programme
# for sequentials, it always returns the first occurance
raise Exception('Table %d not found.' %table_no)
# return chain of str for this table, and the new addr_ptr
def trigger_encode(action_table,addr_ptr):
global table_dic
global table_lst
table_str = '#table %d\n' %action_table[0] #table_no
# branches between two tables, check ext counter
left_output = action_table[6][0]
right_output = action_table[6][1]
# internal counter to implement, num_of_step per loop, duration_per_step
if action_table[5] < 100000: # less than 1 ms
print('Warning: trigger duration is shorter than 1ms')
table_str, addr_ptr = time_balancer(left_output, right_output, action_table[5], table_str, addr_ptr, action_table[3])
# check external counter
# if the fail_table is unprogrammed then put fail_table after this trigger_table
fail_table_no = action_table[2]
if table_dic[fail_table_no]==-1: # unprogrammed
table_dic[fail_table_no]=addr_ptr+2 # addr_ptr is failed condition, +1 is success condition, +2 is start of new table
# fail
table_str = table_str+'writew %d,%d,%d,%d;#row %d\n' %(left_output,right_output,0,((8+action_table[3])<<12)+table_dic[fail_table_no],addr_ptr) # jump to fail_table on nonzero ext cnt
addr_ptr = addr_ptr + 1 # now addr_ptr is success condition, +1 is failed table
# program the fail_table
table_pos = program_table(fail_table_no)
if len(table_lst[table_pos])==7: #triggered
next_str, next_ptr = trigger_encode(table_lst[table_pos],addr_ptr+1)
elif len(table_lst[table_pos])==5:
next_str, next_ptr = sequential_encode(table_pos,addr_ptr+1)
else:
next_str, next_ptr = conditional_encode(table_lst[table_pos],addr_ptr+1)
# next_ptr is after fail_table, new table
else: # failed table already exists
table_str = table_str+'writew %d,%d,%d,%d;#row %d\n' %(left_output,right_output,0,((8+action_table[3])<<12)+table_dic[fail_table_no],addr_ptr) # jump to fail_table on nonzero ext cnt
addr_ptr = addr_ptr + 1 # now addr_ptr is success condition, +1 is failed table
next_str = ''
next_ptr = addr_ptr+1 # here addr is the success condtion, +1 is the new table, next_adr is the new table
# success, start the sucess table
success_table_no = action_table[1]
if table_dic[success_table_no]==-1: # unprogrammed
table_dic[success_table_no]=next_ptr # addr_ptr is success condition, +1 is start of new table
table_str = table_str+'writew %d,%d,%d,%d;#row %d\n' %(left_output,right_output,0,table_dic[success_table_no],addr_ptr) # jump to success table
addr_ptr = addr_ptr + 1 # here addr is new table after success condition
table_pos = program_table(success_table_no)
if len(table_lst[table_pos])==7: #triggered
next2_str, next2_ptr = trigger_encode(table_lst[table_pos],next_ptr)
elif len(table_lst[table_pos])==5:
next2_str, next2_ptr = sequential_encode(table_pos,next_ptr)
else:
next2_str, next2_ptr = conditional_encode(table_lst[table_pos],next_ptr)
else: # success table already exists
table_str = table_str+'writew %d,%d,%d,%d;#row %d\n' %(left_output,right_output,0,table_dic[success_table_no],addr_ptr) # jump to success table
addr_ptr = addr_ptr + 1
next2_str = ''
table_str = table_str + next_str + next2_str
return table_str, addr_ptr
def sequential_encode(table_pos,addr_ptr,first_occurance=True):
global table_dic
global table_lst
global rep_count
action_table = table_lst[table_pos]
[left_output,right_output]=action_table[4]
table_str = ''
if first_occurance:
table_str = '#table %d\n' %action_table[0] #table_no
# check if it is a loop sequential
if action_table[2] > 0:
#find counter, counter 0 and 1 are for durations, so always start from 2
if rep_count[0]==action_table[2]:
loop_counter = 2
else:
loop_counter = 3
table_str = table_str+'writew %d,%d,%d,%d;#row %d\n' %(left_output, right_output, 0, 4096+2**(loop_counter+4),addr_ptr) #load counters, 4096->special command, 2**(loop_counter+4)
addr_ptr = addr_ptr + 1
table_str = table_str+'writew %d,%d,%d,%d;#row %d\n' %(left_output, right_output, 0, 4096+2**(loop_counter+8),addr_ptr) #load counters, 4096->special command, decrement on counter
addr_ptr = addr_ptr + 1
action_table[3] = action_table[3]-1 # correction due to decrement step
# correction for the last action before check, ERRATUM
# if (table_pos+1<len(table_lst)): # still within table_lst
# if (table_lst[table_pos+1][0]!=action_table[0]): #new table
# next_table_no = action_table[1]
# if table_dic[next_table_no]==-1: # unprogrammed
# if action_table[2] > 0: # loop sequential
# action_table[3] = action_table[3]-1
# else: # existed, need additional step to redirect
# if action_table[2] > 0: # loop sequential
# action_table[3] = action_table[3]-2
# else:
# action_table[3] = action_table[3]-1
# else: #end of the list, there will be a redirect to new table action, conditional statement if loop sequential
# if action_table[2] > 0: # loop sequential
# action_table[3] = action_table[3]-2
# else:
# action_table[3] = action_table[3]-1
# last action of the table
if (table_pos+1<len(table_lst)): # still within table_lst
if (table_lst[table_pos+1][0]!=action_table[0]): #new table
# correction for the conditional check
if action_table[2] > 0: # loop sequential
action_table[3] = action_table[3]-1 #correction for conditional check
else:
if action_table[2] > 0: # loop sequential
action_table[3] = action_table[3]-1 #correction for conditional check
# timing loop
table_str, addr_ptr = time_balancer(left_output, right_output, action_table[3], table_str, addr_ptr)
# check if this is the end of this sequential table
next_str = ''
if (table_pos+1<len(table_lst)):
if (table_lst[table_pos+1][0]==action_table[0]): #same table
next_str, addr_ptr = sequential_encode(table_pos+1,addr_ptr,False)
table_str = table_str + next_str
# NEXT IS ALREADY A NEW TABLE
else:
# end of loop
if action_table[2] > 0:
#find counter, counter 0 and 1 are for durations, so always start from 2
if rep_count[0]==action_table[2]:
loop_counter = 2
else:
loop_counter = 3
table_str = table_str+'writew %d,%d,%d,%d;#row %d\n' %(left_output, right_output, 0,((loop_counter+12)<<12)+table_dic[action_table[0]]+1,addr_ptr) #jump to start of sequential with nonzero counter value
addr_ptr = addr_ptr + 1
# go into new table, check what they are again
next_table_no = action_table[1]
if table_dic[next_table_no]==-1: # unprogrammed
table_dic[next_table_no]=addr_ptr
table_pos = program_table(next_table_no)
if len(table_lst[table_pos])==7: #triggered
next_str, addr_ptr = trigger_encode(table_lst[table_pos],addr_ptr)
elif len(table_lst[table_pos])==5:
next_str, addr_ptr = sequential_encode(table_pos,addr_ptr)
else:
next_str, addr_ptr = conditional_encode(table_lst[table_pos],addr_ptr)
else: #table already exist, bring it back to branch
next_str = 'writew %d,%d,%d,%d;#row %d\n' %(left_output, right_output, 0, table_dic[next_table_no],addr_ptr)
addr_ptr = addr_ptr + 1
table_str = table_str + next_str
else:
if action_table[2] > 0:
#find counter, counter 0 and 1 are for durations, so always start from 2
if rep_count[0]==action_table[2]:
loop_counter = 2
else:
loop_counter = 3
table_str = table_str+'writew %d,%d,%d,%d;#row %d\n' %(left_output, right_output, 0,((loop_counter+12)<<12)+table_dic[action_table[0]]+1,addr_ptr) #jump to start of sequential with nonzero counter value
addr_ptr = addr_ptr + 1
# go into new table, check what they are again
next_table_no = action_table[1]
if table_dic[next_table_no]==-1: # unprogrammed
table_dic[next_table_no]=addr_ptr
table_pos = program_table(next_table_no)
if len(table_lst[table_pos])==7: #triggered
next_str, addr_ptr = trigger_encode(table_lst[table_pos],addr_ptr)
elif len(table_lst[table_pos])==5:
next_str, addr_ptr = sequential_encode(table_pos,addr_ptr)
else:
next_str, addr_ptr = conditional_encode(table_lst[table_pos],addr_ptr)
else: #table already exist, bring it back to branch
next_str = 'writew %d,%d,%d,%d;#row %d\n' %(left_output, right_output, 0, table_dic[next_table_no],addr_ptr)
addr_ptr = addr_ptr + 1
table_str = table_str + next_str
return table_str, addr_ptr
def conditional_encode(action_table,addr_ptr):
global table_dic
global table_lst
table_str = '#table %d\n' %action_table[0] #table_no
# branches between two tables, check ext counter
left_output = action_table[5][0]
right_output = action_table[5][1]
if action_table[4] > Max_cyclenumber_per_line:
tri_width = Max_cyclenumber_per_line
# introduce a wait time first
table_str, addr_ptr = time_balancer(left_output, right_output, action_table[4]-Max_cyclenumber_per_line, table_str, addr_ptr)
print('Warning: conditional wait time is longer than 655.36us.')
else:
tri_width = action_table[4]
# conditional statement, first detect if success table is created
success_table_no = action_table[1]
if table_dic[success_table_no]==-1: # unprogrammed
next_addr = addr_ptr + 2 #addr_ptr = the conditional check, +1 is the go back statement, +2 is new table
else:
next_addr = table_dic[success_table_no]
table_str = table_str+'writew %d,%d,%d,%d;#row %d\n' %(left_output,right_output,tri_width-1,((4+action_table[3])<<12)+next_addr,addr_ptr)
addr_ptr = addr_ptr + 1
# go back up statement
if tri_width < Max_cyclenumber_per_line:
table_str = table_str+'writew %d,%d,%d,%d;#row %d\n' %(left_output,right_output,tri_width-1,table_dic[action_table[0]],addr_ptr)
addr_ptr = addr_ptr + 1
else:
# only spend a maximum of 655.36us here
table_str = table_str+'writew %d,%d,%d,%d;#row %d\n' %(left_output,right_output,Max_cyclenumber_per_line-1,table_dic[action_table[0]],addr_ptr)
addr_ptr = addr_ptr + 1
if table_dic[success_table_no]==-1: # unprogrammed
table_dic[success_table_no]=addr_ptr
table_pos = program_table(success_table_no)
if len(table_lst[table_pos])==7: #triggered
next_str, addr_ptr = trigger_encode(table_lst[table_pos],addr_ptr)
elif len(table_lst[table_pos])==5: #triggered:
next_str, addr_ptr = sequential_encode(table_pos,addr_ptr)
else:
next_str, addr_ptr = conditional_encode(table_lst[table_pos],addr_ptr)
else: #table already exist, bring it back to branch
next_str = 'writew %d,%d,%d,%d;#row %d\n' %(left_output, right_output, 0, table_dic[success_table_no],addr_ptr)
addr_ptr = addr_ptr + 1
table_str = table_str + next_str
# go to next table
return table_str, addr_ptr
def generator(pattfile):
global table_lst
global table_dic
global rep_count
table_lst.clear()
table_dic.clear()
rep_count.clear()
output = ''
#take out commented regions
#and also cases where we have skip lines \n
file_line = pattfile.readline()
# take care of the comments
ptr = file_line.find('#')
if ptr >= 0: #if commented, replace the commented part with skip line
file_line = file_line[:ptr]+str('\n')
# initial token value
token = 0
while(True):
while (file_line[0] == '\n' or file_line[0]=='\r'): # requires to skip a line
file_line = pattfile.readline()
# take care of the comments
ptr = file_line.find('#')
if ptr >= 0: #if commented, replace the commented part with skip line
file_line = file_line[:ptr]+str('\n')
#print(file_line) #debug
token, file_line = parse_command(file_line,token)
if token==1: # termination
output = flush()
break
if file_line == '':
raise Exception(error_list[8]+"''") #invalid termination
else:
if token==0:
raise Exception(error_list[1])
newpos = 0
# take out empty spaces
while(file_line[newpos]==' ' or file_line[newpos]=='\t' or file_line[newpos]==':' or file_line[newpos]==','):
newpos = newpos + 1
if file_line[newpos:]=='':
raise Exception(error_list[8]+': '+file_line) #invalid termination
file_line = file_line[newpos:]
if (file_line[0] == '\n' or file_line[0]=='\r'): # requires to skip a line
file_line = pattfile.readline()
# take care of the comments
ptr = file_line.find('#')
if ptr >= 0: #if commented, replace the commented part with skip line
file_line = file_line[:ptr]+str('\n')
elif (file_line[0] == ';'): #
file_line = file_line[1:]
else:
raise Exception(error_list[8]+': '+file_line) #invalid termination
return output
# if __name__ == '__main__':
# import argparse
# parser = argparse.ArgumentParser(description='Generate dpatt from patt')
# parser.add_argument('-i','--inputstr',type=str,default='load_atom_redu.patt')
# parser.add_argument('-o','--outputstr',type=str,default='isto.dat')
# args = parser.parse_args()
# pattfile = open(args.inputstr,'r')
# outputfile = open(args.outputstr,'w+')
#
# output = main(pattfile)
# outputfile.write(output)
#
# pattfile.close()
# outputfile.close()
#
# num_lines = sum(1 for line in open(args.outputstr,'r'))
# #print(num_lines-len(table_dic)-4) #for debugging
# if (num_lines-len(table_dic)-4) > 256:
# raise Exception(error_list[12])
| 30,471 | 0 | 310 |
8d68291ec8146ddef556012f26a8ece3f9f8aed0 | 237 | py | Python | api/tasks.py | mkeller3/django_celery_redis | 193e550644ef79b5f12d51aa8866c20f1a7a2a3a | [
"Apache-2.0"
] | null | null | null | api/tasks.py | mkeller3/django_celery_redis | 193e550644ef79b5f12d51aa8866c20f1a7a2a3a | [
"Apache-2.0"
] | null | null | null | api/tasks.py | mkeller3/django_celery_redis | 193e550644ef79b5f12d51aa8866c20f1a7a2a3a | [
"Apache-2.0"
] | null | null | null |
# Create your tasks here
from __future__ import absolute_import, unicode_literals
from celery import shared_task
import time
@shared_task
@shared_task | 15.8 | 56 | 0.725738 |
# Create your tasks here
from __future__ import absolute_import, unicode_literals
from celery import shared_task
import time
@shared_task
def add(x, y):
time.sleep(30)
return x + y
@shared_task
def mul(x, y):
return x * y | 39 | 0 | 44 |
3b627431f1cb8b0cc63515012844c169b8b05a75 | 321 | py | Python | djangoapi/courses/serializers.py | peter0083/djangoapi | fe4e9e28d9c8df6625b401592ed4f91c7791a902 | [
"MIT"
] | null | null | null | djangoapi/courses/serializers.py | peter0083/djangoapi | fe4e9e28d9c8df6625b401592ed4f91c7791a902 | [
"MIT"
] | null | null | null | djangoapi/courses/serializers.py | peter0083/djangoapi | fe4e9e28d9c8df6625b401592ed4f91c7791a902 | [
"MIT"
] | null | null | null | from rest_framework import serializers
from .models import Course | 45.857143 | 122 | 0.750779 | from rest_framework import serializers
from .models import Course
class CourseSerializer(serializers.HyperlinkedModelSerializer): # serializers.HyperLinkedModelSerializer is the superclass
class Meta: # a class container for metadata
model = Course
fields = ('id', 'url', 'name', 'language', 'price') | 0 | 233 | 23 |
78d0c18558fc3459c334f45b248e8979c544b47b | 6,927 | py | Python | modules/audio_processing/test/py_quality_assessment/quality_assessment/eval_scores.py | Aexyn/webrtc2 | daea5bf2deb843567a792f22ea2047a037e09d78 | [
"DOC",
"BSD-3-Clause"
] | 2 | 2018-01-16T13:29:45.000Z | 2018-08-10T09:15:23.000Z | modules/audio_processing/test/py_quality_assessment/quality_assessment/eval_scores.py | Aexyn/webrtc2 | daea5bf2deb843567a792f22ea2047a037e09d78 | [
"DOC",
"BSD-3-Clause"
] | null | null | null | modules/audio_processing/test/py_quality_assessment/quality_assessment/eval_scores.py | Aexyn/webrtc2 | daea5bf2deb843567a792f22ea2047a037e09d78 | [
"DOC",
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Evaluation score abstract class and implementations.
"""
from __future__ import division
import logging
import os
import re
import subprocess
from . import data_access
from . import exceptions
from . import signal_processing
@EvaluationScore.RegisterClass
class AudioLevelPeakScore(EvaluationScore):
"""Peak audio level score.
Defined as the difference between the peak audio level of the tested and
the reference signals.
Unit: dB
Ideal: 0 dB
Worst case: +/-inf dB
"""
NAME = 'audio_level_peak'
@EvaluationScore.RegisterClass
class MeanAudioLevelScore(EvaluationScore):
"""Mean audio level score.
Defined as the difference between the mean audio level of the tested and
the reference signals.
Unit: dB
Ideal: 0 dB
Worst case: +/-inf dB
"""
NAME = 'audio_level_mean'
@EvaluationScore.RegisterClass
class PolqaScore(EvaluationScore):
"""POLQA score.
See http://www.polqa.info/.
Unit: MOS
Ideal: 4.5
Worst case: 1.0
"""
NAME = 'polqa'
@classmethod
def _ParseOutputFile(cls, polqa_out_filepath):
"""
Parses the POLQA tool output formatted as a table ('-t' option).
Args:
polqa_out_filepath: path to the POLQA tool output file.
Returns:
A dict.
"""
data = []
with open(polqa_out_filepath) as f:
for line in f:
line = line.strip()
if len(line) == 0 or line.startswith('*'):
# Ignore comments.
continue
# Read fields.
data.append(re.split(r'\t+', line))
# Two rows expected (header and values).
assert len(data) == 2, 'Cannot parse POLQA output'
number_of_fields = len(data[0])
assert number_of_fields == len(data[1])
# Build and return a dictionary with field names (header) as keys and the
# corresponding field values as values.
return {data[0][index]: data[1][index] for index in range(number_of_fields)}
| 28.273469 | 80 | 0.711708 | # Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Evaluation score abstract class and implementations.
"""
from __future__ import division
import logging
import os
import re
import subprocess
from . import data_access
from . import exceptions
from . import signal_processing
class EvaluationScore(object):
NAME = None
REGISTERED_CLASSES = {}
def __init__(self, score_filename_prefix):
self._score_filename_prefix = score_filename_prefix
self._reference_signal = None
self._reference_signal_filepath = None
self._tested_signal = None
self._tested_signal_filepath = None
self._output_filepath = None
self._score = None
@classmethod
def RegisterClass(cls, class_to_register):
"""Registers an EvaluationScore implementation.
Decorator to automatically register the classes that extend EvaluationScore.
Example usage:
@EvaluationScore.RegisterClass
class AudioLevelScore(EvaluationScore):
pass
"""
cls.REGISTERED_CLASSES[class_to_register.NAME] = class_to_register
return class_to_register
@property
def output_filepath(self):
return self._output_filepath
@property
def score(self):
return self._score
def SetReferenceSignalFilepath(self, filepath):
""" Sets the path to the audio track used as reference signal.
Args:
filepath: path to the reference audio track.
"""
self._reference_signal_filepath = filepath
def SetTestedSignalFilepath(self, filepath):
""" Sets the path to the audio track used as test signal.
Args:
filepath: path to the test audio track.
"""
self._tested_signal_filepath = filepath
def Run(self, output_path):
"""Extracts the score for the set test data pair.
Args:
output_path: path to the directory where the output is written.
"""
self._output_filepath = os.path.join(
output_path, self._score_filename_prefix + self.NAME + '.txt')
try:
# If the score has already been computed, load.
self._LoadScore()
logging.debug('score found and loaded')
except IOError:
# Compute the score.
logging.debug('score not found, compute')
self._Run(output_path)
def _Run(self, output_path):
# Abstract method.
raise NotImplementedError()
def _LoadReferenceSignal(self):
assert self._reference_signal_filepath is not None
self._reference_signal = signal_processing.SignalProcessingUtils.LoadWav(
self._reference_signal_filepath)
def _LoadTestedSignal(self):
assert self._tested_signal_filepath is not None
self._tested_signal = signal_processing.SignalProcessingUtils.LoadWav(
self._tested_signal_filepath)
def _LoadScore(self):
return data_access.ScoreFile.Load(self._output_filepath)
def _SaveScore(self):
return data_access.ScoreFile.Save(self._output_filepath, self._score)
@EvaluationScore.RegisterClass
class AudioLevelPeakScore(EvaluationScore):
"""Peak audio level score.
Defined as the difference between the peak audio level of the tested and
the reference signals.
Unit: dB
Ideal: 0 dB
Worst case: +/-inf dB
"""
NAME = 'audio_level_peak'
def __init__(self, score_filename_prefix):
EvaluationScore.__init__(self, score_filename_prefix)
def _Run(self, output_path):
self._LoadReferenceSignal()
self._LoadTestedSignal()
self._score = self._tested_signal.dBFS - self._reference_signal.dBFS
self._SaveScore()
@EvaluationScore.RegisterClass
class MeanAudioLevelScore(EvaluationScore):
"""Mean audio level score.
Defined as the difference between the mean audio level of the tested and
the reference signals.
Unit: dB
Ideal: 0 dB
Worst case: +/-inf dB
"""
NAME = 'audio_level_mean'
def __init__(self, score_filename_prefix):
EvaluationScore.__init__(self, score_filename_prefix)
def _Run(self, output_path):
self._LoadReferenceSignal()
self._LoadTestedSignal()
dbfs_diffs_sum = 0.0
seconds = min(len(self._tested_signal), len(self._reference_signal)) // 1000
for t in range(seconds):
t0 = t * seconds
t1 = t0 + seconds
dbfs_diffs_sum += (
self._tested_signal[t0:t1].dBFS - self._reference_signal[t0:t1].dBFS)
self._score = dbfs_diffs_sum / float(seconds)
self._SaveScore()
@EvaluationScore.RegisterClass
class PolqaScore(EvaluationScore):
"""POLQA score.
See http://www.polqa.info/.
Unit: MOS
Ideal: 4.5
Worst case: 1.0
"""
NAME = 'polqa'
def __init__(self, score_filename_prefix, polqa_bin_filepath):
EvaluationScore.__init__(self, score_filename_prefix)
# POLQA binary file path.
self._polqa_bin_filepath = polqa_bin_filepath
if not os.path.exists(self._polqa_bin_filepath):
logging.error('cannot find POLQA tool binary file')
raise exceptions.FileNotFoundError()
# Path to the POLQA directory with binary and license files.
self._polqa_tool_path, _ = os.path.split(self._polqa_bin_filepath)
def _Run(self, output_path):
polqa_out_filepath = os.path.join(output_path, 'polqa.out')
if os.path.exists(polqa_out_filepath):
os.unlink(polqa_out_filepath)
args = [
self._polqa_bin_filepath, '-t', '-q', '-Overwrite',
'-Ref', self._reference_signal_filepath,
'-Test', self._tested_signal_filepath,
'-LC', 'NB',
'-Out', polqa_out_filepath,
]
logging.debug(' '.join(args))
subprocess.call(args, cwd=self._polqa_tool_path)
# Parse POLQA tool output and extract the score.
polqa_output = self._ParseOutputFile(polqa_out_filepath)
self._score = float(polqa_output['PolqaScore'])
self._SaveScore()
@classmethod
def _ParseOutputFile(cls, polqa_out_filepath):
"""
Parses the POLQA tool output formatted as a table ('-t' option).
Args:
polqa_out_filepath: path to the POLQA tool output file.
Returns:
A dict.
"""
data = []
with open(polqa_out_filepath) as f:
for line in f:
line = line.strip()
if len(line) == 0 or line.startswith('*'):
# Ignore comments.
continue
# Read fields.
data.append(re.split(r'\t+', line))
# Two rows expected (header and values).
assert len(data) == 2, 'Cannot parse POLQA output'
number_of_fields = len(data[0])
assert number_of_fields == len(data[1])
# Build and return a dictionary with field names (header) as keys and the
# corresponding field values as values.
return {data[0][index]: data[1][index] for index in range(number_of_fields)}
| 2,769 | 1,686 | 173 |
fde991a1ade12f993fa6cd31e8d7a1c98760897e | 407 | py | Python | airloft/utils/icao.py | Allaye/airloft | deb6274b9eb1de2de79d0b152d67411d1f747afe | [
"FTL"
] | 2 | 2022-03-06T10:34:12.000Z | 2022-03-27T15:41:52.000Z | airloft/utils/icao.py | Allaye/airloft | deb6274b9eb1de2de79d0b152d67411d1f747afe | [
"FTL"
] | null | null | null | airloft/utils/icao.py | Allaye/airloft | deb6274b9eb1de2de79d0b152d67411d1f747afe | [
"FTL"
] | null | null | null | ICAO = {
"Anaa": "NTGA",
"Apalachicola Regional": "KAAF",
"Malamala": "FAMD",
"Al Ain International": "OMAL",
"Atlantic City": "KACY",
"Albany International": "KBAL",
"Baise Youjiang": "ZGBS",
"Albuquerque International Sunport": "KABQ",
"RAF Abisko": "EAAK",
"RAF Leuchars": "EGQL",
"Santiago de Compostela": "LEST",
"Seve Ballesteros-Santander": "LEXJ",
}
| 25.4375 | 48 | 0.592138 | ICAO = {
"Anaa": "NTGA",
"Apalachicola Regional": "KAAF",
"Malamala": "FAMD",
"Al Ain International": "OMAL",
"Atlantic City": "KACY",
"Albany International": "KBAL",
"Baise Youjiang": "ZGBS",
"Albuquerque International Sunport": "KABQ",
"RAF Abisko": "EAAK",
"RAF Leuchars": "EGQL",
"Santiago de Compostela": "LEST",
"Seve Ballesteros-Santander": "LEXJ",
}
| 0 | 0 | 0 |
ac0348ba5b1b7a69a6c3f778f9a0b37ea7be7200 | 2,145 | py | Python | rightarrow/lexer.py | wuzzeb/python-rightarrow | bc26059d272e4a903fa2a18db9ebb484e7f74aed | [
"Apache-2.0"
] | 1 | 2020-04-30T22:24:41.000Z | 2020-04-30T22:24:41.000Z | rightarrow/lexer.py | wuzzeb/python-rightarrow | bc26059d272e4a903fa2a18db9ebb484e7f74aed | [
"Apache-2.0"
] | null | null | null | rightarrow/lexer.py | wuzzeb/python-rightarrow | bc26059d272e4a903fa2a18db9ebb484e7f74aed | [
"Apache-2.0"
] | null | null | null | import sys
import logging
import ply.lex
logger = logging.getLogger(__name__)
class Lexer(object):
'''
A Lexical analyzer for Python Typelanguage.
'''
def tokenize(self, string):
'''
Maps a string to an iterator over tokens. In other words: [char] -> [token]
'''
new_lexer = ply.lex.lex(module=self, debug=self.debug, errorlog=logger)
new_lexer.latest_newline = 0
new_lexer.input(string)
while True:
t = new_lexer.token()
if t is None: break
t.col = t.lexpos - new_lexer.latest_newline
yield t
# ============== PLY Lexer specification ==================
#
# This probably should be private but:
# - the parser requires access to `tokens` (perhaps they should be defined in a third, shared dependency)
# - things like `literals` might be a legitimate part of the public interface.
#
# Anyhow, it is pythonic to give some rope to hang oneself with :-)
literals = ['|', '(', ')', '{', '}', '[', ']', ':', '*', ',', ';']
reserved_words = { 'object': 'OBJECT' }
tokens = ['ID', 'TYVAR', 'ARROW', 'KWARG', 'ANY'] + reserved_words.values()
t_ARROW = r'->'
t_KWARG = r'\*\*'
t_ANY = r'\?\?'
t_ignore = ' \t'
def t_ID(self, t):
r'~?[a-zA-Z_][a-zA-Z0-9_]*'
if t.value[0] == '~':
t.type = 'TYVAR'
t.value = t.value[1:]
elif t.value in self.reserved_words:
t.type = self.reserved_words[t.value]
else:
t.type = 'ID'
return t
def t_newline(self, t):
r'\n'
t.lexer.lineno += 1
t.lexer.latest_newline = t.lexpos
if __name__ == '__main__':
logging.basicConfig()
lexer = Lexer(debug=True)
for token in lexer.tokenize(sys.stdin.read()):
print '%-20s%s' % (token.value, token.type)
| 28.223684 | 138 | 0.549184 | import sys
import logging
import ply.lex
logger = logging.getLogger(__name__)
class Lexer(object):
'''
A Lexical analyzer for Python Typelanguage.
'''
def __init__(self, debug=False):
self.debug = debug
def tokenize(self, string):
'''
Maps a string to an iterator over tokens. In other words: [char] -> [token]
'''
new_lexer = ply.lex.lex(module=self, debug=self.debug, errorlog=logger)
new_lexer.latest_newline = 0
new_lexer.input(string)
while True:
t = new_lexer.token()
if t is None: break
t.col = t.lexpos - new_lexer.latest_newline
yield t
# ============== PLY Lexer specification ==================
#
# This probably should be private but:
# - the parser requires access to `tokens` (perhaps they should be defined in a third, shared dependency)
# - things like `literals` might be a legitimate part of the public interface.
#
# Anyhow, it is pythonic to give some rope to hang oneself with :-)
literals = ['|', '(', ')', '{', '}', '[', ']', ':', '*', ',', ';']
reserved_words = { 'object': 'OBJECT' }
tokens = ['ID', 'TYVAR', 'ARROW', 'KWARG', 'ANY'] + reserved_words.values()
t_ARROW = r'->'
t_KWARG = r'\*\*'
t_ANY = r'\?\?'
t_ignore = ' \t'
def t_ID(self, t):
r'~?[a-zA-Z_][a-zA-Z0-9_]*'
if t.value[0] == '~':
t.type = 'TYVAR'
t.value = t.value[1:]
elif t.value in self.reserved_words:
t.type = self.reserved_words[t.value]
else:
t.type = 'ID'
return t
def t_newline(self, t):
r'\n'
t.lexer.lineno += 1
t.lexer.latest_newline = t.lexpos
def t_error(self, t):
raise Exception('Error on line %s, col %s: Unexpected character: %s ' % (t.lexer.lineno, t.lexpos - t.latest_newline, t.value[0]))
if __name__ == '__main__':
logging.basicConfig()
lexer = Lexer(debug=True)
for token in lexer.tokenize(sys.stdin.read()):
print '%-20s%s' % (token.value, token.type)
| 177 | 0 | 58 |
f7ba2a9383bd30b230072da3c6b714a3283b8ada | 287 | py | Python | output/models/nist_data/list_pkg/normalized_string/schema_instance/nistschema_sv_iv_list_normalized_string_length_5_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 1 | 2021-08-14T17:59:21.000Z | 2021-08-14T17:59:21.000Z | output/models/nist_data/list_pkg/normalized_string/schema_instance/nistschema_sv_iv_list_normalized_string_length_5_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 4 | 2020-02-12T21:30:44.000Z | 2020-04-15T20:06:46.000Z | output/models/nist_data/list_pkg/normalized_string/schema_instance/nistschema_sv_iv_list_normalized_string_length_5_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | null | null | null | from output.models.nist_data.list_pkg.normalized_string.schema_instance.nistschema_sv_iv_list_normalized_string_length_5_xsd.nistschema_sv_iv_list_normalized_string_length_5 import NistschemaSvIvListNormalizedStringLength5
__all__ = [
"NistschemaSvIvListNormalizedStringLength5",
]
| 47.833333 | 222 | 0.905923 | from output.models.nist_data.list_pkg.normalized_string.schema_instance.nistschema_sv_iv_list_normalized_string_length_5_xsd.nistschema_sv_iv_list_normalized_string_length_5 import NistschemaSvIvListNormalizedStringLength5
__all__ = [
"NistschemaSvIvListNormalizedStringLength5",
]
| 0 | 0 | 0 |
b4cb5cadefacca2ad51093281c7e045a146a2c6c | 423 | py | Python | paper/convert_matt.py | ACTCollaboration/tilec | 11ed8d027ad6ffac09b3e291a047f33e97673f14 | [
"BSD-3-Clause"
] | 1 | 2021-01-04T14:51:44.000Z | 2021-01-04T14:51:44.000Z | paper/convert_matt.py | ACTCollaboration/tilec | 11ed8d027ad6ffac09b3e291a047f33e97673f14 | [
"BSD-3-Clause"
] | 4 | 2019-09-03T22:19:16.000Z | 2020-07-13T12:38:08.000Z | paper/convert_matt.py | ACTCollaboration/tilec | 11ed8d027ad6ffac09b3e291a047f33e97673f14 | [
"BSD-3-Clause"
] | 1 | 2020-08-10T14:51:11.000Z | 2020-08-10T14:51:11.000Z | from __future__ import print_function
from orphics import maps,io,cosmology,catalogs
from pixell import enmap
import numpy as np
import os,sys
ifile = "paper/E-D56Clusters.fits"
#catalogs.convert_hilton_catalog_to_enplot_annotate_file('public_clusters.csv',ifile,radius=15,width=3,color='red')
catalogs.convert_hilton_catalog_to_enplot_annotate_file('paper/test_public_clusters.csv',ifile,radius=15,width=3,color='red')
| 35.25 | 125 | 0.839243 | from __future__ import print_function
from orphics import maps,io,cosmology,catalogs
from pixell import enmap
import numpy as np
import os,sys
ifile = "paper/E-D56Clusters.fits"
#catalogs.convert_hilton_catalog_to_enplot_annotate_file('public_clusters.csv',ifile,radius=15,width=3,color='red')
catalogs.convert_hilton_catalog_to_enplot_annotate_file('paper/test_public_clusters.csv',ifile,radius=15,width=3,color='red')
| 0 | 0 | 0 |
3dbb54ea861a369163c812d4d62ac2373f9253b7 | 1,374 | py | Python | oogli/utils.py | brianbruggeman/oogli | 6a6f681468d609035924ede27d895afcc9d432b6 | [
"Apache-2.0"
] | 3 | 2016-01-18T22:10:51.000Z | 2016-06-10T16:02:55.000Z | oogli/utils.py | brianbruggeman/oogli | 6a6f681468d609035924ede27d895afcc9d432b6 | [
"Apache-2.0"
] | null | null | null | oogli/utils.py | brianbruggeman/oogli | 6a6f681468d609035924ede27d895afcc9d432b6 | [
"Apache-2.0"
] | null | null | null | import glfw
from glfw import gl
import numpy as np
def opengl_supported(major, minor):
'''Determines if opengl is supported for the version provided'''
assert glfw.core.init() != 0
version = (major, minor)
glfw.core.window_hint(glfw.CONTEXT_VERSION_MAJOR, major)
glfw.core.window_hint(glfw.CONTEXT_VERSION_MINOR, minor)
profile = glfw.OPENGL_ANY_PROFILE if version < (3, 2) else glfw.OPENGL_CORE_PROFILE
glfw.core.window_hint(glfw.OPENGL_PROFILE, profile)
# Setup forward compatibility if able
forward_compat = gl.FALSE if version < (3, 0) else gl.TRUE
glfw.core.window_hint(glfw.OPENGL_FORWARD_COMPAT, forward_compat)
# Keep the window invisible
glfw.core.window_hint(glfw.VISIBLE, gl.FALSE)
glfw.core.window_hint(glfw.FOCUSED, gl.FALSE)
win = glfw.create_window(title='test', width=1, height=1)
return win is not None
# TODO: Fill this out or automate it.
uniform_mapping = {
'vec1': gl.uniform_1f,
'vec2': gl.uniform_2f,
'vec3': gl.uniform_3f,
'vec4': gl.uniform_4f,
'mat4': gl.uniform_matrix_4fv,
}
| 35.230769 | 87 | 0.706696 | import glfw
from glfw import gl
import numpy as np
def screenshot(win, pixels=None):
width, height = win.width, win.height
if not isinstance(pixels, np.ndarray):
shape = (width, height, 3)
pixels = np.zeros(shape, dtype=np.uint8)
return gl.read_pixels(0, 0, width, height, gl.RGB, gl.UNSIGNED_BYTE, pixels)
def opengl_supported(major, minor):
'''Determines if opengl is supported for the version provided'''
assert glfw.core.init() != 0
version = (major, minor)
glfw.core.window_hint(glfw.CONTEXT_VERSION_MAJOR, major)
glfw.core.window_hint(glfw.CONTEXT_VERSION_MINOR, minor)
profile = glfw.OPENGL_ANY_PROFILE if version < (3, 2) else glfw.OPENGL_CORE_PROFILE
glfw.core.window_hint(glfw.OPENGL_PROFILE, profile)
# Setup forward compatibility if able
forward_compat = gl.FALSE if version < (3, 0) else gl.TRUE
glfw.core.window_hint(glfw.OPENGL_FORWARD_COMPAT, forward_compat)
# Keep the window invisible
glfw.core.window_hint(glfw.VISIBLE, gl.FALSE)
glfw.core.window_hint(glfw.FOCUSED, gl.FALSE)
win = glfw.create_window(title='test', width=1, height=1)
return win is not None
# TODO: Fill this out or automate it.
uniform_mapping = {
'vec1': gl.uniform_1f,
'vec2': gl.uniform_2f,
'vec3': gl.uniform_3f,
'vec4': gl.uniform_4f,
'mat4': gl.uniform_matrix_4fv,
}
| 262 | 0 | 23 |
e6add1302c8c0b6938d09deef9aee73e224f4832 | 2,362 | py | Python | tools/vim/ninja_output.py | Wzzzx/chromium-crosswalk | 768dde8efa71169f1c1113ca6ef322f1e8c9e7de | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2 | 2019-01-28T08:09:58.000Z | 2021-11-15T15:32:10.000Z | tools/vim/ninja_output.py | maidiHaitai/haitaibrowser | a232a56bcfb177913a14210e7733e0ea83a6b18d | [
"BSD-3-Clause"
] | null | null | null | tools/vim/ninja_output.py | maidiHaitai/haitaibrowser | a232a56bcfb177913a14210e7733e0ea83a6b18d | [
"BSD-3-Clause"
] | 6 | 2020-09-23T08:56:12.000Z | 2021-11-18T03:40:49.000Z | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import os
import exceptions
import itertools
import re
def GetNinjaOutputDirectory(chrome_root):
"""Returns <chrome_root>/<output_dir>/(Release|Debug|<other>).
If either of the following environment variables are set, their
value is used to determine the output directory:
1. CHROMIUM_OUT_DIR environment variable.
2. GYP_GENERATOR_FLAGS environment variable output_dir property.
Otherwise, all directories starting with the word out are examined.
The configuration chosen is the one most recently generated/built.
"""
output_dirs = []
if ('CHROMIUM_OUT_DIR' in os.environ and
os.path.isdir(os.path.join(chrome_root, os.environ['CHROMIUM_OUT_DIR']))):
output_dirs = [os.environ['CHROMIUM_OUT_DIR']]
if not output_dirs:
generator_flags = os.getenv('GYP_GENERATOR_FLAGS', '').split(' ')
for flag in generator_flags:
name_value = flag.split('=', 1)
if (len(name_value) == 2 and name_value[0] == 'output_dir' and
os.path.isdir(os.path.join(chrome_root, name_value[1]))):
output_dirs = [name_value[1]]
if not output_dirs:
for f in os.listdir(chrome_root):
if re.match(r'out(\b|_)', f):
out = os.path.realpath(os.path.join(chrome_root, f))
if os.path.isdir(out):
output_dirs.append(os.path.relpath(out, start = chrome_root))
try:
return max(generate_paths(), key=approx_directory_mtime)
except ValueError:
raise exceptions.RuntimeError(
'Unable to find a valid ninja output directory.')
if __name__ == '__main__':
if len(sys.argv) != 2:
raise exceptions.RuntimeError('Expected a single path argument.')
print GetNinjaOutputDirectory(sys.argv[1])
| 35.253731 | 80 | 0.701948 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import os
import exceptions
import itertools
import re
def GetNinjaOutputDirectory(chrome_root):
"""Returns <chrome_root>/<output_dir>/(Release|Debug|<other>).
If either of the following environment variables are set, their
value is used to determine the output directory:
1. CHROMIUM_OUT_DIR environment variable.
2. GYP_GENERATOR_FLAGS environment variable output_dir property.
Otherwise, all directories starting with the word out are examined.
The configuration chosen is the one most recently generated/built.
"""
output_dirs = []
if ('CHROMIUM_OUT_DIR' in os.environ and
os.path.isdir(os.path.join(chrome_root, os.environ['CHROMIUM_OUT_DIR']))):
output_dirs = [os.environ['CHROMIUM_OUT_DIR']]
if not output_dirs:
generator_flags = os.getenv('GYP_GENERATOR_FLAGS', '').split(' ')
for flag in generator_flags:
name_value = flag.split('=', 1)
if (len(name_value) == 2 and name_value[0] == 'output_dir' and
os.path.isdir(os.path.join(chrome_root, name_value[1]))):
output_dirs = [name_value[1]]
if not output_dirs:
for f in os.listdir(chrome_root):
if re.match(r'out(\b|_)', f):
out = os.path.realpath(os.path.join(chrome_root, f))
if os.path.isdir(out):
output_dirs.append(os.path.relpath(out, start = chrome_root))
def generate_paths():
for out_dir in output_dirs:
out_path = os.path.join(chrome_root, out_dir)
for config in os.listdir(out_path):
path = os.path.join(out_path, config)
if os.path.exists(os.path.join(path, 'build.ninja')):
yield path
def approx_directory_mtime(path):
# This is a heuristic; don't recurse into subdirectories.
paths = [path] + [os.path.join(path, f) for f in os.listdir(path)]
return max(os.path.getmtime(p) for p in paths)
try:
return max(generate_paths(), key=approx_directory_mtime)
except ValueError:
raise exceptions.RuntimeError(
'Unable to find a valid ninja output directory.')
if __name__ == '__main__':
if len(sys.argv) != 2:
raise exceptions.RuntimeError('Expected a single path argument.')
print GetNinjaOutputDirectory(sys.argv[1])
| 451 | 0 | 50 |
1036b6605f5cc8baea52e6bdd3a4d604cd22c71e | 4,604 | py | Python | Kill Confirm Calculator.py | camando-73/Kill-Confirm-Calculator | d5c08f24092e99ceb3a66d8a5135c78c41879e7a | [
"MIT"
] | null | null | null | Kill Confirm Calculator.py | camando-73/Kill-Confirm-Calculator | d5c08f24092e99ceb3a66d8a5135c78c41879e7a | [
"MIT"
] | null | null | null | Kill Confirm Calculator.py | camando-73/Kill-Confirm-Calculator | d5c08f24092e99ceb3a66d8a5135c78c41879e7a | [
"MIT"
] | null | null | null | from tkinter import *
import PySimpleGUI as sg
window = Tk()
window.title("Kill Confirm Calculator")
global oppKilPer
promptLabel = Label(window, text="Select your Opponents Rival").grid(row=1, column=4)
promptLabel1 = Label(window, text="by clicking a Button").grid(row=2, column=4)
#Functions containing each characters different kill percent values
#calculation of final percent
#individual character buttons for users to choose
zetButton = Button(window, text="Zetterburn", command=zet)
zetButton.grid(row=2, column=3)
forsButton = Button(window, text="Forsburn", command=fors)
forsButton.grid(row=2, column=2)
claButton = Button(window, text="Clairen", command=cla)
claButton.grid(row=2, column=1)
orcButton = Button(window, text="Orcane", command=orc)
orcButton.grid(row=3, column=5)
etaButton = Button(window, text="Etalus", command=eta)
etaButton.grid(row=3, column=6)
ranButton = Button(window, text="Ranno", command=ran)
ranButton.grid(row=3, column=7)
wraButton = Button(window, text="Wrastor", command=wra)
wraButton.grid(row=2, column=5)
absaButton = Button(window, text="Absa", command=absa)
absaButton.grid(row=2, column=6)
ellButton = Button(window, text="Ellianna", command=ell)
ellButton.grid(row=2, column=7)
kraButton = Button(window, text="Kragg", command=kra)
kraButton.grid(row=3, column=3)
mayButton = Button(window, text="Maypul", command=may)
mayButton.grid(row=3, column=2)
sylButton = Button(window, text="Sylvanos", command=syl)
sylButton.grid(row=3, column=1)
oriButton = Button(window, text="Ori and Sein", command=ori)
oriButton.grid(row=4, column=3)
shoButton = Button(window, text="Shovel Knight", command=sho)
shoButton.grid(row=4, column=5)
#Entry widget for opponents percent
oppPercentLabel = Label(window, text="Enter your Opponents Percent").grid(row=6,column=4)
conOfPer = StringVar()
entPercent = Entry(window, width=20, textvariable=conOfPer).grid(row=7, column=4)
#result button that will display if the kill confirm was succesful
submitButton = Button(window, text="Result", command=calOppPer).grid(row=9, column=4)
window.mainloop()
| 37.430894 | 91 | 0.68397 | from tkinter import *
import PySimpleGUI as sg
window = Tk()
window.title("Kill Confirm Calculator")
global oppKilPer
promptLabel = Label(window, text="Select your Opponents Rival").grid(row=1, column=4)
promptLabel1 = Label(window, text="by clicking a Button").grid(row=2, column=4)
#Functions containing each characters different kill percent values
def zet():
characterLabel = Label(window, text="You Chose: Zetterburn").grid(row=5, column=4)
global oppKilPer
oppKilPer = 118
def orc():
characterLabel = Label(window, text="You Chose: Orcane").grid(row=5, column=4)
global oppKilPer
oppKilPer = 118
def fors():
characterLabel = Label(window, text="You Chose: Forsburn").grid(row=5, column=4)
global oppKilPer
oppKilPer = 118
def eta():
characterLabel = Label(window, text="You Chose: Etalus").grid(row=5, column=4)
global oppKilPer
oppKilPer = 133
def cla():
characterLabel = Label(window, text="You Chose: Clairen").grid(row=5, column=4)
global oppKilPer
oppKilPer = 118
def ran():
characterLabel = Label(window, text="You Chose: Ranno").grid(row=5, column=4)
global oppKilPer
oppKilPer = 112
def wra():
characterLabel = Label(window, text="You Chose: Wrastor").grid(row=5, column=4)
global oppKilPer
oppKilPer = 88
def kra():
characterLabel = Label(window, text="You Chose: Kragg").grid(row=5, column=4)
global oppKilPer
oppKilPer = 138
def absa():
characterLabel = Label(window, text="You Chose: Absa").grid(row=5, column=4)
global oppKilPer
oppKilPer = 97
def may():
characterLabel = Label(window, text="You Chose: Maypul").grid(row=5, column=4)
global oppKilPer
oppKilPer = 106
def ell():
characterLabel = Label(window, text="You Chose: Ellianna").grid(row=5, column=4)
global oppKilPer
oppKilPer = 122
def syl():
characterLabel = Label(window, text="You Chose: Sylvanos").grid(row=5, column=4)
global oppKilPer
oppKilPer = 127
def ori():
characterLabel = Label(window, text="You Chose: Ori and Sein").grid(row=5, column=4)
global oppKilPer
oppKilPer = 101
def sho():
characterLabel = Label(window, text="You Chose: Shovel Knight").grid(row=5, column=4)
global oppKilPer
oppKilPer = 125
#calculation of final percent
def calOppPer():
try:
percent = int(conOfPer.get())
except ValueError:
sg.Popup('Opps!', 'Must enter a whole number!')
finOppPer = oppKilPer - percent
if finOppPer <= 0:
succLabel = Label(window, text="Kill Confirm Successful")
succLabel.grid(row=10, column=4)
else:
succLabel = Label(window, text="Kill Confirm Unsucessful")
succLabel.grid(row=10, column=4)
#individual character buttons for users to choose
zetButton = Button(window, text="Zetterburn", command=zet)
zetButton.grid(row=2, column=3)
forsButton = Button(window, text="Forsburn", command=fors)
forsButton.grid(row=2, column=2)
claButton = Button(window, text="Clairen", command=cla)
claButton.grid(row=2, column=1)
orcButton = Button(window, text="Orcane", command=orc)
orcButton.grid(row=3, column=5)
etaButton = Button(window, text="Etalus", command=eta)
etaButton.grid(row=3, column=6)
ranButton = Button(window, text="Ranno", command=ran)
ranButton.grid(row=3, column=7)
wraButton = Button(window, text="Wrastor", command=wra)
wraButton.grid(row=2, column=5)
absaButton = Button(window, text="Absa", command=absa)
absaButton.grid(row=2, column=6)
ellButton = Button(window, text="Ellianna", command=ell)
ellButton.grid(row=2, column=7)
kraButton = Button(window, text="Kragg", command=kra)
kraButton.grid(row=3, column=3)
mayButton = Button(window, text="Maypul", command=may)
mayButton.grid(row=3, column=2)
sylButton = Button(window, text="Sylvanos", command=syl)
sylButton.grid(row=3, column=1)
oriButton = Button(window, text="Ori and Sein", command=ori)
oriButton.grid(row=4, column=3)
shoButton = Button(window, text="Shovel Knight", command=sho)
shoButton.grid(row=4, column=5)
#Entry widget for opponents percent
oppPercentLabel = Label(window, text="Enter your Opponents Percent").grid(row=6,column=4)
conOfPer = StringVar()
entPercent = Entry(window, width=20, textvariable=conOfPer).grid(row=7, column=4)
#result button that will display if the kill confirm was succesful
submitButton = Button(window, text="Result", command=calOppPer).grid(row=9, column=4)
window.mainloop()
| 2,114 | 0 | 345 |
c81f3107437fcc895e116096a76e159155a09374 | 5,791 | py | Python | FindAllRNA/Archieved/encoders.py | Koushul/FindAllRNA | a2c41831f67be2cd52629ebc2ef24cfca3e172f5 | [
"MIT"
] | null | null | null | FindAllRNA/Archieved/encoders.py | Koushul/FindAllRNA | a2c41831f67be2cd52629ebc2ef24cfca3e172f5 | [
"MIT"
] | null | null | null | FindAllRNA/Archieved/encoders.py | Koushul/FindAllRNA | a2c41831f67be2cd52629ebc2ef24cfca3e172f5 | [
"MIT"
] | null | null | null |
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
from Bio import SeqIO
from Bio.Seq import Seq
import numpy as np
import random
import itertools
from textwrap import wrap
from collections import defaultdict
if __name__ == '__main__':
# s = Seq('TTATGACCC')
# encoder = KMerEncoder(2, 50, 'constant')
# e = encoder.encode(s)
# print(s)
# print(encoder.char_to_int)
# print(e)
# encoder = KMerEncoder(3, 50, 'random')
# e = encoder.encode(s)
# print(s)
# print(encoder.char_to_int)
# print(e)
# encoder = OneHotEncoder(2, 50, 'constant')
# e = encoder.encode(s)
# print(s)
# print(encoder.char_to_int)
# print(e)
# from pprint import pprint
# e = RandomEncoder('../datasets/fixture.fasta')
# s = e.encode(2, 2)
# print(len(s))
# print(s)
# print()
# pprint(e.archieve)
from pprint import pprint
e = NoisyEncoder('../datasets/fixture.fasta')
s = e.encode(10)
pprint(s)
| 29.697436 | 120 | 0.572267 |
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
from Bio import SeqIO
from Bio.Seq import Seq
import numpy as np
import random
import itertools
from textwrap import wrap
from collections import defaultdict
class AbstractSequenceEncoder:
ALLOWED_CHARACTERS = 'ATGC'
def encode(self, seq: Seq):
raise NotImplementedError
def decode(self):
raise NotImplementedError
class AbstractFastaEncoder(AbstractSequenceEncoder):
pass
class DummySeqEncoder(AbstractSequenceEncoder):
def encode(self, seq: Seq):
return seq
class DummyFastaEncoder(AbstractSequenceEncoder):
def __init__(self, fasta_file: str):
self.fasta_file = fasta_file
def encode(self):
seq_rec = list(SeqIO.parse(self.fasta_file, "fasta"))
return seq_rec
class NoisyEncoder(AbstractFastaEncoder):
def __init__(self, fasta_file: str):
self.fasta_file = fasta_file
def encode(self, nperc: float):
"""
Pads sequences with random di-nucleotide preserving random segments at the head and tail.
Returns a list of padded sequences.
"""
seq_rec = list(SeqIO.parse(self.fasta_file, "fasta"))
samples = []
for i, r in enumerate(seq_rec):
head = ''
tail = ''
if nperc > 0:
sw = wrap(str(r.seq), 2) + wrap(str(r.seq)[1:], 2)
head = np.random.choice(sw, int(0.25*len(r.seq)*nperc/100))
tail = np.random.choice(sw, int(0.25*len(r.seq)*nperc/100))
head = ''.join(head)
tail = ''.join(tail)
samples.append(head+str(r.seq)+tail)
return np.array(samples)
class RandomEncoder(AbstractFastaEncoder):
def __init__(self, fasta_file: str):
self.fasta_file = fasta_file
self.archieve = defaultdict(set) #used for getting the original sequence and ID back
def encode(self, k: int, x: int) -> list:
"""
Returns a list of randomly shuffled sequences preserving the `k`-nucleotide frequency
Suggested values for k are 2, 3, 6, 12
Each sequence in the fasta file is shuffled `x` times
"""
seq_rec = list(SeqIO.parse(self.fasta_file, "fasta"))
samples=[]
for _ in range(x):
for i, r in enumerate(seq_rec):
rseq = str(r.seq)
sw = [rseq[i:i+k] for i in range(len(rseq)-k+1)]
np.random.shuffle(sw)
ss = Seq(''.join(sw))
self.archieve[r.seq].add((ss, r.name))
samples.append(str(ss))
return samples
def decode(self, seq):
return self.archieve[seq]
class KMerEncoder(AbstractSequenceEncoder):
def __init__(self, k: int, n: int, padding=None):
self.k = k
self.n = n
assert padding in ['random', 'constant', None]
self.padding = padding
self.kmers = [''.join(x) for x in itertools.product(self.ALLOWED_CHARACTERS, repeat=self.k)] #all possible kmers
self.char_to_int = dict((c, i) for i, c in enumerate(self.kmers)) #encodings
def encode(self, seq):
"""
Returns an integer encoding of the sequence of shape (k, n)
Window skips instead of sliding
"""
seq = seq[:self.n]
if self.padding == 'random':
seqr = np.random.randint(len(self.ALLOWED_CHARACTERS), size=self.n)
elif self.padding == 'constant':
seqr = np.zeros(self.n)
data = wrap(str(seq), self.k)
data = [d for d in data if len(d) == self.k]
integer_encoded = [self.char_to_int[c] for c in data]
if self.padding == None:
return integer_encoded
else:
seqr[0:len(integer_encoded)] = integer_encoded
return seqr
class OneHotEncoder(AbstractSequenceEncoder):
def __init__(self, k: int, n: int, padding: str):
self.k = k
self.n = n
assert padding in ['random', 'constant']
self.padding = padding
self.kmers = [''.join(x) for x in itertools.product(self.ALLOWED_CHARACTERS, repeat=self.k)] #all possible kmers
self.char_to_int = dict((c, i) for i, c in enumerate(self.kmers)) #encodings
def encode(self, seq: Seq):
seq = seq[:self.n]
if self.padding == 'random':
seqr = np.random.randint(len(self.ALLOWED_CHARACTERS), size=self.n)
elif self.padding == 'constant':
seqr = np.zeros(self.n)
data = [str(seq)[i:i+self.k] for i in range(len(seq)-self.k+1)]
integer_encoded = [self.char_to_int[c] for c in data]
seqr[0:len(integer_encoded)] = integer_encoded
return seqr
if __name__ == '__main__':
# s = Seq('TTATGACCC')
# encoder = KMerEncoder(2, 50, 'constant')
# e = encoder.encode(s)
# print(s)
# print(encoder.char_to_int)
# print(e)
# encoder = KMerEncoder(3, 50, 'random')
# e = encoder.encode(s)
# print(s)
# print(encoder.char_to_int)
# print(e)
# encoder = OneHotEncoder(2, 50, 'constant')
# e = encoder.encode(s)
# print(s)
# print(encoder.char_to_int)
# print(e)
# from pprint import pprint
# e = RandomEncoder('../datasets/fixture.fasta')
# s = e.encode(2, 2)
# print(len(s))
# print(s)
# print()
# pprint(e.archieve)
from pprint import pprint
e = NoisyEncoder('../datasets/fixture.fasta')
s = e.encode(10)
pprint(s)
| 1,646 | 2,700 | 359 |
77b6756cbbe27bf7e8e52e03f492721847e9e2cc | 2,531 | py | Python | utils/reviews_utils.py | woctezuma/steam-reviews-to-sales | 51ca279d07a2a1cf26d099a5cfe51566760298cd | [
"MIT"
] | 3 | 2021-08-08T21:06:19.000Z | 2021-12-27T05:29:50.000Z | utils/reviews_utils.py | woctezuma/steam-reviews-to-sales | 51ca279d07a2a1cf26d099a5cfe51566760298cd | [
"MIT"
] | null | null | null | utils/reviews_utils.py | woctezuma/steam-reviews-to-sales | 51ca279d07a2a1cf26d099a5cfe51566760298cd | [
"MIT"
] | null | null | null | import requests
from utils.time_utils import get_target_date_as_timestamp
if __name__ == "__main__":
main()
| 28.122222 | 118 | 0.66772 | import requests
from utils.time_utils import get_target_date_as_timestamp
def get_steam_api_url(app_id):
return f"https://store.steampowered.com/appreviews/{app_id}"
def get_request_params(target_timestamp=None, verbose=True):
# References:
# - https://partner.steamgames.com/doc/store/getreviews
# - browser dev tools on store pages, e.g. https://store.steampowered.com/app/570/#app_reviews_hash
if target_timestamp is None:
target_timestamp = get_target_date_as_timestamp(verbose=verbose)
params = {
"json": "1",
"num_per_page": "0", # text content of reviews is not needed
"language": "all", # caveat: default seems to be "english", so reviews would be missing if unchanged!
"purchase_type": "all", # caveat: default is "steam", so reviews would be missing if unchanged!
"filter_offtopic_activity": "0", # to un-filter review-bombs, e.g. https://store.steampowered.com/app/481510/
"start_date": "1", # this is the minimal value which allows to filter by date
"end_date": str(target_timestamp),
"date_range_type": "include", # this parameter is needed to filter by date
}
return params
def download_review_stats(app_id, target_timestamp=None, verbose=True):
url = get_steam_api_url(app_id)
params = get_request_params(target_timestamp, verbose=verbose)
response = requests.get(url, params=params)
if response.ok:
result = response.json()
else:
result = None
if verbose:
print(result)
return result
def get_review_score_descriptions():
review_score_descriptions = {
0: "(No|[1-9]) user reviews",
1: "Overwhelmingly Negative",
2: "Very Negative",
3: "Negative",
4: "Mostly Negative",
5: "Mixed",
6: "Mostly Positive",
7: "Positive",
8: "Very Positive",
9: "Overwhelmingly Positive",
}
return review_score_descriptions
def unify_descriptions(df):
review_score_descriptions = get_review_score_descriptions()
df = df.replace(
{"review_score_desc": review_score_descriptions[0]},
{"review_score_desc": review_score_descriptions[0]},
regex=True,
)
return df
def main():
app_ids = [329070, 573170]
target_timestamp = get_target_date_as_timestamp()
for app_id in app_ids:
result = download_review_stats(app_id, target_timestamp, verbose=True)
return True
if __name__ == "__main__":
main()
| 2,272 | 0 | 138 |
76675b41d2da4ee91a92fb16e3bd655bdb21e69c | 17,737 | py | Python | adapters/pox/ext/debugger/component_launcher/component_launcher.py | ARCCN/elt | 3bf4e6cc0c7abbe442d6513ed294e956143c3bea | [
"BSD-3-Clause"
] | 1 | 2016-07-14T14:45:56.000Z | 2016-07-14T14:45:56.000Z | adapters/pox/ext/debugger/component_launcher/component_launcher.py | ARCCN/elt | 3bf4e6cc0c7abbe442d6513ed294e956143c3bea | [
"BSD-3-Clause"
] | null | null | null | adapters/pox/ext/debugger/component_launcher/component_launcher.py | ARCCN/elt | 3bf4e6cc0c7abbe442d6513ed294e956143c3bea | [
"BSD-3-Clause"
] | null | null | null | from __future__ import print_function
import sys
import traceback
import inspect
import types
from ConfigParser import ConfigParser
import os
from functools import partial
from pox.boot import _do_imports
from pox.core import core
from pox.lib.revent.revent import EventHalt
log = core.getLogger("ComponentLauncher")
CONFIG = ["debugger/component_launcher/component_config/",
"ext/debugger/component_launcher/component_config/",
"pox/ext/debugger/component_launcher/component_config/",
"adapters/pox/ext/debugger/component_launcher/component_config/"]
HIGHEST_PRIORITY = 1000000
# This function is stolen from pox/boot.py
| 37.106695 | 79 | 0.537408 | from __future__ import print_function
import sys
import traceback
import inspect
import types
from ConfigParser import ConfigParser
import os
from functools import partial
from pox.boot import _do_imports
from pox.core import core
from pox.lib.revent.revent import EventHalt
log = core.getLogger("ComponentLauncher")
CONFIG = ["debugger/component_launcher/component_config/",
"ext/debugger/component_launcher/component_config/",
"pox/ext/debugger/component_launcher/component_config/",
"adapters/pox/ext/debugger/component_launcher/component_config/"]
HIGHEST_PRIORITY = 1000000
class CaseConfigParser(ConfigParser):
def __init__(self, allow_no_value=False):
ConfigParser.__init__(self, allow_no_value=allow_no_value)
self.optionxform = str
class ComponentLauncher(object):
def __init__(self):
self.config = {} # {component_name: CaseConfigParser()}
self.event_queue = {} # {target: {event_name: [events]}}
self.tmp_queue = {}
self.my_handlers = {} # {target: {event_name: [(4-tuple)]}}
self.halt_events = False # True -> tmp_queue & halt.
# False -> event_queue
self.launched = [] # We do not support multiload.
# Multiple launch is error.
self._read_config()
self._set_listeners()
def launch_single(self, argv):
"""
Launch 1 module. Example: argv = ["openflow.of_01", "--port=3366"].
The instantiated module will get the previous events from its config.
"""
argv = self._preprocess(argv)
components = [arg for arg in argv if not arg.startswith("-")]
self._check_components(components)
old_handlers = self._grab_handlers(components[0])
result = launch_all(argv)
self.launched.append(components[0])
# Subscribe on events for launched module.
self._set_listeners_to_source(components[0])
if result is False:
return result
new_handlers = self._grab_handlers(components[0])
# We must continue listen to events while handlers are reset.
# After our hack we must raise all events
# that were missed by removed handlers.
target_handlers = self._subtract_handlers(new_handlers, old_handlers)
# Copy queue structure.
self.tmp_queue = {a: {b: [] for b in self.event_queue[a]}
for a in self.event_queue}
# Now we halt all events using our highest priority.
# We store them to tmp_queue.
self.halt_events = True
# We feed the previous messages to newly created components.
log.info(str(target_handlers))
self._set_handlers(target_handlers)
self._raise_events(target_handlers, self.event_queue)
# We restore handlers and stop halting events.
self._set_handlers(new_handlers)
self.halt_events = False
# We copy missed events to main queue.
for section in self.tmp_queue:
for event_name, events in self.tmp_queue[section].items():
self.event_queue[section][event_name].extend(events)
# Now we feed missed events to everyone.
self._raise_events(new_handlers, self.tmp_queue)
def launch_hierarchical(self, argv):
"""
Launch component's dependency hierarchy using default params.
If a dependency is already launched, skip it.
After everything is loaded, load target component (strictly 1).
"""
# TODO: Process default args.
argv = self._preprocess(argv)
components = [arg for arg in argv if not arg.startswith("-")]
self._check_components(components)
try:
cp = self.config[components[0]]
# Launch dependencies.
for section in cp.sections():
for component, cfg in self.config.items():
try:
if (cfg.get("self", "name") == section and
component not in self.launched):
# We found dependency.
arg = [component]
for k, v in cfg.defaults():
if v is None:
arg.append(k)
else:
arg.append("%s=%s" % (k, v))
self.launch_hierarchical(arg)
self.launched.append(component)
except Exception as e:
log.info(str(e))
continue
except:
pass
# Now all known dependencies must be loaded.
# We can load our target.
log.info("Launching %s" % components[0])
self.launch_single(argv)
def _check_components(self, components):
if len(components) != 1:
raise ValueError("The number of component must be 1, not %d" %
len(components))
if components[0] in self.launched:
raise ValueError("%s is already launched. Cancel." % components[0])
def _read_config(self):
"""
Read config files from CONFIG. Files must be
CONFIG[i]/*.cfg.
"""
def _raise(x):
raise x
for directory in CONFIG:
try:
for dirname, dirnames, filenames in os.walk(
directory, onerror=_raise):
del dirnames[:]
for filename in filenames:
if not filename.endswith(".cfg"):
continue
cp = CaseConfigParser(allow_no_value=True)
log.info("Read config: %s" %
cp.read(os.path.join(dirname, filename)))
self.config[filename.replace(".cfg", "")] = cp
except Exception as e:
pass
def _set_listeners(self):
"""
Subscribe to events from all config files.
"""
for cp in self.config.values():
for section in cp.sections():
if section == "self":
continue
try:
event_source = eval(section)
self.event_queue[section] = {}
self.my_handlers[section] = {}
for event_name, module in cp.items(section):
self._import_and_listen(section, event_source,
event_name, module)
except Exception as e:
log.debug(str(e))
def _set_listeners_to_source(self, component):
"""
Given a module, listen to instances from this module.
Module -> name from config.
"""
# We must take the name of component instance.
# Then look through configs to find out
# who wants its messages.
cp = self.config.get(component)
section = None
try:
section = cp.get("self", "name")
except:
log.info("Unable to find instance of %s" % component)
# We don't know how to find component instance.
return
for cp in self.config.values():
try:
event_source = eval(section)
if not cp.has_section(section):
continue
# Can these two be already set?
if section not in self.event_queue:
self.event_queue[section] = {}
if section not in self.my_handlers:
self.my_handlers[section] = {}
for event_name, module in cp.items(section):
self._import_and_listen(section, event_source,
event_name, module)
except Exception as e:
log.info(str(e))
def _import_and_listen(self, section, event_source, event_name, module):
"""
Import event class named "event_name" from "module".
Subscribe to this event of event_source object.
"""
try:
# Maybe we are already listening?
h = self.my_handlers[section][event_name]
q = self.event_queue[section][event_name]
if h is not None and q is not None:
return
except:
pass
_temp = __import__(module, fromlist=[event_name])
globals()[event_name] = _temp.__dict__[event_name]
h = partial(self._enqueue_event, section, event_name)
self.my_handlers[section][event_name] = h
self.event_queue[section][event_name] = []
event_source.addListener(eval(event_name), h,
priority=HIGHEST_PRIORITY)
def _enqueue_event(self, section, event_name, event):
"""
If halt -> tmp_queue and return EventHalt.
Otherwise -> event_queue.
"""
# TODO: Maximum queue size.
if event not in self.event_queue[section][event_name]:
if self.halt_events:
self.tmp_queue[section][event_name].append(event)
return EventHalt
self.event_queue[section][event_name].append(event)
def _grab_handlers(self, component):
"""
Copy and return the handlers from events wanted be component.
"""
handlers = {}
cp = self.config.get(component)
if cp is None:
return handlers
for section in cp.sections():
if section == "self":
continue
try:
event_source = eval(section)
handlers[section] = {}
for event_name, module in cp.items(section):
handlers[section][event_name] = event_source.\
_eventMixin_handlers.get(eval(event_name), [])[:]
except:
pass
return handlers
def _subtract_handlers(self, new, old):
"""
return new[i][j] \ old[i][j]. (unique handlers of new)
If the subtraction result is not empty (handlers changed)
we explicitly add our handlers that were removed.
"""
try:
result = {}
for section in new:
res = {}
for event_name in new[section]:
handlers = [h for h in new[section][event_name]
if h not in old[section][event_name]
or h[1] != self.my_handlers[section][
event_name]]
# We are the only listener. Nothing changed -> ignore.
if (len(handlers) == 0 or (len(handlers) == 1 and
handlers[0][1] == self.my_handlers[
section][event_name])):
continue
res[event_name] = handlers
if len(res) > 0:
result[section] = res
return result
except Exception as e:
log.error(str(e))
return {}
def _set_handlers(self, handlers):
"""
Change the handlers to given structure.
handlers = {object_name: {event_name: [handler]}}
"""
for section in handlers:
try:
event_source = eval(section)
for event_name, hlist in handlers[section].items():
event_source._eventMixin_handlers[eval(event_name)] = hlist
except:
pass
return True
def _raise_events(self, handlers, event_queue):
"""
Raise events for handlers using event_queue.
"""
for section in handlers:
try:
event_source = eval(section)
for event_name in handlers[section]:
for event in event_queue[section][event_name]:
event_source.raiseEventNoErrors(event)
except:
pass
return True
def _preprocess(self, argv):
"""
We allow strings w/o list.
"""
if isinstance(argv, basestring):
return [argv]
return argv
# This function is stolen from pox/boot.py
def launch_all (argv):
component_order = []
components = {}
# Looks like we don't need pox args here.
curargs = {}
for arg in argv:
if not arg.startswith("-"):
if arg not in components:
components[arg] = []
curargs = {}
components[arg].append(curargs)
component_order.append(arg)
else:
arg = arg.lstrip("-").split("=", 1)
arg[0] = arg[0].replace("-", "_")
if len(arg) == 1: arg.append(True)
curargs[arg[0]] = arg[1]
modules = _do_imports(n.split(':')[0] for n in component_order)
if modules is False:
return False
inst = {}
for name in component_order:
cname = name
inst[name] = inst.get(name, -1) + 1
params = components[name][inst[name]]
name = name.split(":", 1)
launch = name[1] if len(name) == 2 else "launch"
name = name[0]
name,module,members = modules[name]
if launch in members:
f = members[launch]
# We explicitly test for a function and not an arbitrary callable
if type(f) is not types.FunctionType:
print(launch, "in", name, "isn't a function!")
return False
if getattr(f, '_pox_eval_args', False):
import ast
for k,v in params.items():
if isinstance(v, str):
try:
params[k] = ast.literal_eval(v)
except:
# Leave it as a string
pass
multi = False
if f.__code__.co_argcount > 0:
#FIXME: This code doesn't look quite right to me and may be broken
# in some cases. We should refactor to use inspect anyway,
# which should hopefully just fix it.
if (f.__code__.co_varnames[f.__code__.co_argcount-1]
== '__INSTANCE__'):
# It's a multi-instance-aware component.
multi = True
# Special __INSTANCE__ paramter gets passed a tuple with:
# 1. The number of this instance (0...n-1)
# 2. The total number of instances for this module
# 3. True if this is the last instance, False otherwise
# The last is just a comparison between #1 and #2, but it's
# convenient.
params['__INSTANCE__'] = (inst[cname], len(components[cname]),
inst[cname] + 1 == len(components[cname]))
if multi == False and len(components[cname]) != 1:
print(name, "does not accept multiple instances")
return False
try:
if f(**params) is False:
# Abort startup
return False
except TypeError as exc:
instText = ''
if inst[cname] > 0:
instText = "instance {0} of ".format(inst[cname] + 1)
print("Error executing {2}{0}.{1}:".format(name,launch,instText))
if inspect.currentframe() is sys.exc_info()[2].tb_frame:
# Error is with calling the function
# Try to give some useful feedback
# if _options.verbose:
# traceback.print_exc()
# else:
exc = sys.exc_info()[0:2]
print(''.join(traceback.format_exception_only(*exc)), end='')
print()
EMPTY = "<Unspecified>"
code = f.__code__
argcount = code.co_argcount
argnames = code.co_varnames[:argcount]
defaults = list((f.func_defaults) or [])
defaults = [EMPTY] * (argcount - len(defaults)) + defaults
args = {}
for n, a in enumerate(argnames):
args[a] = [EMPTY,EMPTY]
if n < len(defaults):
args[a][0] = defaults[n]
if a in params:
args[a][1] = params[a]
del params[a]
if '__INSTANCE__' in args:
del args['__INSTANCE__']
if f.__doc__ is not None:
print("Documentation for {0}:".format(name))
doc = f.__doc__.split("\n")
#TODO: only strip the same leading space as was on the first
# line
doc = map(str.strip, doc)
print('',("\n ".join(doc)).strip())
#print(params)
#print(args)
print("Parameters for {0}:".format(name))
if len(args) == 0:
print(" None.")
else:
print(" {0:25} {1:25} {2:25}".format("Name", "Default",
"Active"))
print(" {0:25} {0:25} {0:25}".format("-" * 15))
for k,v in args.iteritems():
print(" {0:25} {1:25} {2:25}".format(k,str(v[0]),
str(v[1] if v[1] is not EMPTY else v[0])))
if len(params):
print("This component does not have a parameter named "
+ "'{0}'.".format(params.keys()[0]))
return False
missing = [k for k,x in args.iteritems()
if x[1] is EMPTY and x[0] is EMPTY]
if len(missing):
print("You must specify a value for the '{0}' "
"parameter.".format(missing[0]))
return False
return False
else:
# Error is inside the function
raise
elif len(params) > 0 or launch is not "launch":
print("Module %s has no %s(), but it was specified or passed " \
"arguments" % (name, launch))
return False
return True
| 6,225 | 10,757 | 94 |
b9917b0364f6a2e7a4445c0bc9b7221c3b487eb7 | 2,771 | py | Python | wagtailnest/views.py | ionata/wagtailnest | da903db0967e6f3b87db7213c9d94c0ec98048f5 | [
"BSD-3-Clause"
] | 1 | 2018-04-11T23:47:33.000Z | 2018-04-11T23:47:33.000Z | wagtailnest/views.py | ionata/wagtailnest | da903db0967e6f3b87db7213c9d94c0ec98048f5 | [
"BSD-3-Clause"
] | 12 | 2017-07-18T01:52:06.000Z | 2021-09-08T00:15:37.000Z | wagtailnest/views.py | ionata/wagtailnest | da903db0967e6f3b87db7213c9d94c0ec98048f5 | [
"BSD-3-Clause"
] | 1 | 2017-05-05T06:18:44.000Z | 2017-05-05T06:18:44.000Z | from django.views.generic.base import RedirectView
from rest_framework.generics import RetrieveAPIView
from rest_framework.settings import api_settings
from wagtail.core.models import Page
from wagtail.core.views import serve as serve_page
from wagtail.documents.views.serve import serve as serve_doc
from wagtail.images.views.serve import ServeView, generate_signature
from wagtailnest.utils import (get_image_filter_spec, get_root_relative_url,
import_setting)
_permissions = {
name: import_setting(
'{}_PERMISSION_CLASSES'.format(name),
api_settings.DEFAULT_PERMISSION_CLASSES)
for name in ['PAGE', 'DOCUMENT', 'IMAGE']
}
class DraftRedirectView(RedirectView):
"""View that redirects to the correct URL for a draft."""
# pylint: disable=unused-argument
class RevisionRedirectView(RedirectView):
"""View that redirects to the correct URL for a revision."""
# pylint: disable=unused-argument
class PageServeView(RetrieveAPIView):
"""View which serves a rendered page."""
permission_classes = _permissions['PAGE']
# pylint: disable=no-self-use,arguments-differ
class DocumentServeView(RetrieveAPIView):
"""View which serves a document."""
permission_classes = _permissions['DOCUMENT']
# pylint: disable=no-self-use,arguments-differ
class ImageServeView(RetrieveAPIView):
"""View which serves an image."""
permission_classes = _permissions['IMAGE']
| 36.460526 | 76 | 0.693973 | from django.views.generic.base import RedirectView
from rest_framework.generics import RetrieveAPIView
from rest_framework.settings import api_settings
from wagtail.core.models import Page
from wagtail.core.views import serve as serve_page
from wagtail.documents.views.serve import serve as serve_doc
from wagtail.images.views.serve import ServeView, generate_signature
from wagtailnest.utils import (get_image_filter_spec, get_root_relative_url,
import_setting)
_permissions = {
name: import_setting(
'{}_PERMISSION_CLASSES'.format(name),
api_settings.DEFAULT_PERMISSION_CLASSES)
for name in ['PAGE', 'DOCUMENT', 'IMAGE']
}
class DraftRedirectView(RedirectView):
"""View that redirects to the correct URL for a draft."""
# pylint: disable=unused-argument
def get_redirect_url(self, *args, **kwargs):
page = Page.objects.filter(pk=self.kwargs.get('pk', None)).first()
url_path = '/' if page is None else page.specific.url_path
return '{}?preview=True'.format(get_root_relative_url(url_path))
class RevisionRedirectView(RedirectView):
"""View that redirects to the correct URL for a revision."""
# pylint: disable=unused-argument
def get_redirect_url(self, *args, **kwargs):
page = Page.objects.filter(pk=self.kwargs.get('pk', None)).first()
url_path = '/' if page is None else page.specific.url_path
rpk = self.kwargs.get('rpk', '')
return '{}?revision={}'.format(get_root_relative_url(url_path), rpk)
class PageServeView(RetrieveAPIView):
"""View which serves a rendered page."""
permission_classes = _permissions['PAGE']
# pylint: disable=no-self-use,arguments-differ
def get(self, request, path):
return serve_page(request, path)
class DocumentServeView(RetrieveAPIView):
"""View which serves a document."""
permission_classes = _permissions['DOCUMENT']
# pylint: disable=no-self-use,arguments-differ
def get(self, request, document_id, document_filename=None):
return serve_doc(request, document_id, document_filename)
class ImageServeView(RetrieveAPIView):
"""View which serves an image."""
permission_classes = _permissions['IMAGE']
def get(self, request, *args, **kwargs): # pylint: disable=no-self-use
pk = kwargs.pop('pk', None)
if pk is not None:
request.GET = request.GET.copy() # QueryDict is immutable
filter_spec = get_image_filter_spec(
request.GET.get('filter_spec', None))
request.GET.pop('filter_spec', None)
signature = generate_signature(pk, filter_spec)
args = (signature, pk, filter_spec)
return ServeView.as_view()(request, *args)
| 1,170 | 0 | 131 |
ed915cdbf0499255e73830ce7fa7cc05fd04e05b | 6,126 | py | Python | cs15211/CheapestFlightsWithinKStops.py | JulyKikuAkita/PythonPrac | 0ba027d9b8bc7c80bc89ce2da3543ce7a49a403c | [
"Apache-2.0"
] | 1 | 2021-07-05T01:53:30.000Z | 2021-07-05T01:53:30.000Z | cs15211/CheapestFlightsWithinKStops.py | JulyKikuAkita/PythonPrac | 0ba027d9b8bc7c80bc89ce2da3543ce7a49a403c | [
"Apache-2.0"
] | null | null | null | cs15211/CheapestFlightsWithinKStops.py | JulyKikuAkita/PythonPrac | 0ba027d9b8bc7c80bc89ce2da3543ce7a49a403c | [
"Apache-2.0"
] | 1 | 2018-01-08T07:14:08.000Z | 2018-01-08T07:14:08.000Z | __source__ = 'https://leetcode.com/problems/cheapest-flights-within-k-stops/'
# Time: O()
# Space: O()
#
# Description: Leetcode # 787. Cheapest Flights Within K Stops
#
# There are n cities connected by m flights.
# Each fight starts from city u and arrives at v with a price w.
#
# Now given all the cities and flights,
# together with starting city src and the destination dst,
# your task is to find the cheapest price from src to dst with up to k stops.
# If there is no such route, output -1.
#
# Example 1:
# Input:
# n = 3, edges = [[0,1,100],[1,2,100],[0,2,500]]
# src = 0, dst = 2, k = 1
# Output: 200
# Explanation:
# The graph looks like this:
#
#
# The cheapest price from city 0 to city 2 with at most 1 stop costs 200, as marked red in the picture.
# Example 2:
# Input:
# n = 3, edges = [[0,1,100],[1,2,100],[0,2,500]]
# src = 0, dst = 2, k = 0
# Output: 500
# Explanation:
# The graph looks like this:
#
#
# The cheapest price from city 0 to city 2 with at most 0 stop costs 500, as marked blue in the picture.
# Note:
#
# The number of nodes n will be in range [1, 100], with nodes labeled from 0 to n - 1.
# The size of flights will be in range [0, n * (n - 1) / 2].
# The format of each flight will be (src, dst, price).
# The price of each flight will be in the range [1, 10000].
# k is in the range of [0, n - 1].
# There will not be any duplicated flights or self cycles.
#
import unittest
import collections
# 73,45% 44ms
from heapq import *
#96ms 22.03%
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought: https://leetcode.com/problems/cheapest-flights-within-k-stops/solution/
# Approach #1: Maintain Cheapest To Target [Accepted]
# Complexity Analysis
# Time Complexity: O(E * K), where E is the length of flights.
# Space Complexity: O(n), the space used to store dis and pre.
# 6ms 100%
class Solution {
public int findCheapestPrice(int n, int[][] flights, int src, int dst, int K) {
int[][] dist = new int[2][n];
int INF = Integer.MAX_VALUE / 2;
Arrays.fill(dist[0], INF);
Arrays.fill(dist[1], INF);
dist[0][src] = dist[1][src] = 0;
for (int i = 0; i <= K; ++i)
for (int[] edge: flights)
dist[i&1][edge[1]] = Math.min(dist[i&1][edge[1]], dist[~i&1][edge[0]] + edge[2]);
return dist[K&1][dst] < INF ? dist[K&1][dst] : -1;
}
}
#
# Approach #2: Dijkstra's [Accepted]
# Complexity Analysis
# Time Complexity: O(E+nlogn), where E is the total number of flights.
# Space Complexity: O(n), the size of the heap.
#
# 4ms 100%
class Solution {
private class City implements Comparable<City>{
int id;
int costFromSrc;
int stopFromSrc;
public City(int id, int costFromSrc, int stopFromSrc){
this.id = id;
this.costFromSrc = costFromSrc;
this.stopFromSrc = stopFromSrc;
}
public boolean equals(City c){
if (c instanceof City) return this.id == c.id;
return false;
}
public int compareTo(City c){
return this.costFromSrc - c.costFromSrc;
}
}
public int findCheapestPrice(int n, int[][] flights, int src, int dst, int K) {
int[][] srcToDst = new int[n][n];
for (int i = 0; i < flights.length; i++) {
srcToDst[flights[i][0]][flights[i][1]] = flights[i][2];
}
PriorityQueue<City> minHeap = new PriorityQueue();
minHeap.offer(new City(src,0,0));
int[] cost = new int[n];
Arrays.fill(cost, Integer.MAX_VALUE);
cost[src] = 0;
int[] stop = new int[n];
Arrays.fill(stop, Integer.MAX_VALUE);
stop[src] = 0;
while(!minHeap.isEmpty()){
City curCity = minHeap.poll();
if (curCity.id == dst) return curCity.costFromSrc;
if (curCity.stopFromSrc == K + 1) continue;
int[] nexts = srcToDst[curCity.id];
for (int i = 0; i < n; i++) {
if (nexts[i] != 0) {
int newCost = curCity.costFromSrc + nexts[i];
int newStop = curCity.stopFromSrc + 1;
if (newCost < cost[i]) {
minHeap.offer(new City(i, newCost, newStop));
cost[i] = newCost;
} else if (newStop < stop[i]){
minHeap.offer(new City(i, newCost, newStop));
stop[i] = newStop;
}
}
}
}
return cost[dst] == Integer.MAX_VALUE? -1:cost[dst];
}
}
''' | 31.096447 | 104 | 0.550278 | __source__ = 'https://leetcode.com/problems/cheapest-flights-within-k-stops/'
# Time: O()
# Space: O()
#
# Description: Leetcode # 787. Cheapest Flights Within K Stops
#
# There are n cities connected by m flights.
# Each fight starts from city u and arrives at v with a price w.
#
# Now given all the cities and flights,
# together with starting city src and the destination dst,
# your task is to find the cheapest price from src to dst with up to k stops.
# If there is no such route, output -1.
#
# Example 1:
# Input:
# n = 3, edges = [[0,1,100],[1,2,100],[0,2,500]]
# src = 0, dst = 2, k = 1
# Output: 200
# Explanation:
# The graph looks like this:
#
#
# The cheapest price from city 0 to city 2 with at most 1 stop costs 200, as marked red in the picture.
# Example 2:
# Input:
# n = 3, edges = [[0,1,100],[1,2,100],[0,2,500]]
# src = 0, dst = 2, k = 0
# Output: 500
# Explanation:
# The graph looks like this:
#
#
# The cheapest price from city 0 to city 2 with at most 0 stop costs 500, as marked blue in the picture.
# Note:
#
# The number of nodes n will be in range [1, 100], with nodes labeled from 0 to n - 1.
# The size of flights will be in range [0, n * (n - 1) / 2].
# The format of each flight will be (src, dst, price).
# The price of each flight will be in the range [1, 10000].
# k is in the range of [0, n - 1].
# There will not be any duplicated flights or self cycles.
#
import unittest
import collections
# 73,45% 44ms
from heapq import *
class SolutionDijkstra(object):
def findCheapestPrice(self, n, flights, src, dst, K):
"""
:type n: int
:type flights: List[List[int]]
:type src: int
:type dst: int
:type K: int
:rtype: int
"""
minCost=None
graph=collections.defaultdict(list)
for i in flights:
graph[i[0]].append((i[1],i[2]))
if src not in graph:
return -1
visited={}
heap=[]
heappush(heap,(0,src,0))
while heap:
price,des,stop=heappop(heap)
if des==dst:
return price
visited[des]=1
if stop<K+1:
for i in graph[des]:
if i[0] not in visited:
heappush(heap,(i[1]+price,i[0],stop+1))
return minCost if minCost else -1
#96ms 22.03%
class Solution2(object):
def findCheapestPrice(self, n, flights, src, dst, K):
"""
:type n: int
:type flights: List[List[int]]
:type src: int
:type dst: int
:type K: int
:rtype: int
"""
dist = [[float('inf')] * n for _ in xrange(2)]
dist[0][src] = dist[1][src] = 0
for i in xrange(K + 1):
for u, v, w in flights:
dist[i&1][v] = min(dist[i&1][v], dist[~i&1][u] + w)
return dist[K&1][dst] if dist[K&1][dst] < float('inf') else -1
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought: https://leetcode.com/problems/cheapest-flights-within-k-stops/solution/
# Approach #1: Maintain Cheapest To Target [Accepted]
# Complexity Analysis
# Time Complexity: O(E * K), where E is the length of flights.
# Space Complexity: O(n), the space used to store dis and pre.
# 6ms 100%
class Solution {
public int findCheapestPrice(int n, int[][] flights, int src, int dst, int K) {
int[][] dist = new int[2][n];
int INF = Integer.MAX_VALUE / 2;
Arrays.fill(dist[0], INF);
Arrays.fill(dist[1], INF);
dist[0][src] = dist[1][src] = 0;
for (int i = 0; i <= K; ++i)
for (int[] edge: flights)
dist[i&1][edge[1]] = Math.min(dist[i&1][edge[1]], dist[~i&1][edge[0]] + edge[2]);
return dist[K&1][dst] < INF ? dist[K&1][dst] : -1;
}
}
#
# Approach #2: Dijkstra's [Accepted]
# Complexity Analysis
# Time Complexity: O(E+nlogn), where E is the total number of flights.
# Space Complexity: O(n), the size of the heap.
#
# 4ms 100%
class Solution {
private class City implements Comparable<City>{
int id;
int costFromSrc;
int stopFromSrc;
public City(int id, int costFromSrc, int stopFromSrc){
this.id = id;
this.costFromSrc = costFromSrc;
this.stopFromSrc = stopFromSrc;
}
public boolean equals(City c){
if (c instanceof City) return this.id == c.id;
return false;
}
public int compareTo(City c){
return this.costFromSrc - c.costFromSrc;
}
}
public int findCheapestPrice(int n, int[][] flights, int src, int dst, int K) {
int[][] srcToDst = new int[n][n];
for (int i = 0; i < flights.length; i++) {
srcToDst[flights[i][0]][flights[i][1]] = flights[i][2];
}
PriorityQueue<City> minHeap = new PriorityQueue();
minHeap.offer(new City(src,0,0));
int[] cost = new int[n];
Arrays.fill(cost, Integer.MAX_VALUE);
cost[src] = 0;
int[] stop = new int[n];
Arrays.fill(stop, Integer.MAX_VALUE);
stop[src] = 0;
while(!minHeap.isEmpty()){
City curCity = minHeap.poll();
if (curCity.id == dst) return curCity.costFromSrc;
if (curCity.stopFromSrc == K + 1) continue;
int[] nexts = srcToDst[curCity.id];
for (int i = 0; i < n; i++) {
if (nexts[i] != 0) {
int newCost = curCity.costFromSrc + nexts[i];
int newStop = curCity.stopFromSrc + 1;
if (newCost < cost[i]) {
minHeap.offer(new City(i, newCost, newStop));
cost[i] = newCost;
} else if (newStop < stop[i]){
minHeap.offer(new City(i, newCost, newStop));
stop[i] = newStop;
}
}
}
}
return cost[dst] == Integer.MAX_VALUE? -1:cost[dst];
}
}
''' | 31 | 1,398 | 93 |
c8bd77d23986fec7b1ef68b2bccc25888a90ed92 | 1,093 | py | Python | examples/examples-by-ml-library/libraries/raw_file_example.py | cdknorow/modelstore | f08839478432b89e828a8dcb41adf27b0e3aa66b | [
"Apache-2.0"
] | 151 | 2020-09-20T14:53:06.000Z | 2022-03-22T20:49:06.000Z | examples/examples-by-ml-library/libraries/raw_file_example.py | cdknorow/modelstore | f08839478432b89e828a8dcb41adf27b0e3aa66b | [
"Apache-2.0"
] | 29 | 2020-12-07T16:27:39.000Z | 2022-03-30T22:11:17.000Z | examples/examples-by-ml-library/libraries/raw_file_example.py | cdknorow/modelstore | f08839478432b89e828a8dcb41adf27b0e3aa66b | [
"Apache-2.0"
] | 4 | 2022-01-10T17:42:02.000Z | 2022-03-07T14:14:43.000Z | import json
import os
import tempfile
from modelstore.model_store import ModelStore
_DOMAIN_NAME = "example-model-file"
| 32.147059 | 79 | 0.709973 | import json
import os
import tempfile
from modelstore.model_store import ModelStore
_DOMAIN_NAME = "example-model-file"
def _train_and_save_example_model(tmp_dir: str) -> str:
# Create a file with a "model" -- in this case it is a json file,
# but modelstore can handle any file type
model_path = os.path.join(tmp_dir, "model.json")
with open(model_path, "w") as out:
out.write(json.dumps({"weights": [0.1, 0.2, 0.3]}))
return model_path
def train_and_upload(modelstore: ModelStore) -> dict:
# Train a "model" and save it into a temp directory
with tempfile.TemporaryDirectory() as tmp_dir:
model_path = _train_and_save_example_model(tmp_dir)
# Upload the model to the model store
print(f'⤴️ Uploading the saved model to the "{_DOMAIN_NAME}" domain.')
meta_data = modelstore.upload(_DOMAIN_NAME, model=model_path)
return meta_data
def load_and_test(modelstore: ModelStore, model_id: str):
# Loading the model back into memory is not supported
# for models that have been saved to disk manually
pass
| 903 | 0 | 69 |
64a70685034132430735b61b79a01c637c745705 | 5,472 | py | Python | client/read_data.py | aitmlouk/FEDn-client-FedQAS-tf | 278a1c402b313d22a8a060a40e434a8c59dbe93b | [
"Apache-2.0"
] | 1 | 2022-02-16T05:56:43.000Z | 2022-02-16T05:56:43.000Z | client/read_data.py | aitmlouk/FEDn-client-FedQAS-tf | 278a1c402b313d22a8a060a40e434a8c59dbe93b | [
"Apache-2.0"
] | null | null | null | client/read_data.py | aitmlouk/FEDn-client-FedQAS-tf | 278a1c402b313d22a8a060a40e434a8c59dbe93b | [
"Apache-2.0"
] | null | null | null | import json
import os
import numpy as np
from tokenizers import BertWordPieceTokenizer
from transformers import BertTokenizer
class SquadExample:
"""
Process SQUAD dataset
"""
def read_data(filename, settings):
"""
Helper function to read and preprocess SQUAD data for training and validation with Keras.
:return: test, training data or validation data and nbr of examples
"""
slow_tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
save_path = "bert_base_uncased/"
if not os.path.exists(save_path):
os.makedirs(save_path)
slow_tokenizer.save_pretrained(save_path)
# Load the fast tokenizer from saved file
tokenizer = BertWordPieceTokenizer("bert_base_uncased/vocab.txt", lowercase=True)
with open(filename) as f: raw_train_data = json.load(f)
train_squad_examples = create_squad_examples(raw_train_data, tokenizer, settings)
x_train, y_train = create_inputs_targets(train_squad_examples)
return x_train, y_train, train_squad_examples
| 36.724832 | 119 | 0.623904 | import json
import os
import numpy as np
from tokenizers import BertWordPieceTokenizer
from transformers import BertTokenizer
class SquadExample:
"""
Process SQUAD dataset
"""
def __init__(self, question, context, start_char_idx, answer_text, all_answers):
self.question = question
self.context = context
self.start_char_idx = start_char_idx
self.answer_text = answer_text
self.all_answers = all_answers
self.skip = False
self.start_token_idx = -1
self.end_token_idx = -1
def preprocess(self, tokenizer, settings):
context = self.context
question = self.question
answer_text = self.answer_text
start_char_idx = self.start_char_idx
# Clean context, answer and question
context = " ".join(str(context).split())
question = " ".join(str(question).split())
answer = " ".join(str(answer_text).split())
# Tokenize context
tokenized_context = tokenizer.encode(context)
# Tokenize question
tokenized_question = tokenizer.encode(question)
if self.answer_text is not None:
# Find end character index of answer in context
end_char_idx = start_char_idx + len(answer)
if end_char_idx >= len(context):
self.skip = True
return
# Mark the character indexes in context that are in answer
is_char_in_ans = [0] * len(context)
for idx in range(start_char_idx, end_char_idx):
is_char_in_ans[idx] = 1
# Find tokens that were created from answer characters
ans_token_idx = []
for idx, (start, end) in enumerate(tokenized_context.offsets):
if sum(is_char_in_ans[start:end]) > 0:
ans_token_idx.append(idx)
if len(ans_token_idx) == 0:
self.skip = True
return
# Find start and end token index for tokens from answer
self.start_token_idx = ans_token_idx[0]
self.end_token_idx = ans_token_idx[-1]
# Create inputs
input_ids = tokenized_context.ids + tokenized_question.ids[1:]
token_type_ids = [0] * len(tokenized_context.ids) + [1] * len(
tokenized_question.ids[1:]
)
attention_mask = [1] * len(input_ids)
# Pad and create attention masks.
# Skip if truncation is needed
padding_length = settings['max_seq_length'] - len(input_ids)
if padding_length > 0: # pad
input_ids = input_ids + ([0] * padding_length)
attention_mask = attention_mask + ([0] * padding_length)
token_type_ids = token_type_ids + ([0] * padding_length)
elif padding_length < 0: # skip
self.skip = True
return
self.input_ids = input_ids
self.token_type_ids = token_type_ids
self.attention_mask = attention_mask
self.context_token_to_char = tokenized_context.offsets
def create_squad_examples(raw_data, tokenizer, settings):
squad_examples = []
for item in raw_data["data"]:
for para in item["paragraphs"]:
context = para["context"]
for qa in para["qas"]:
question = qa["question"]
if "answers" in qa:
answer_text = qa["answers"][0]["text"]
all_answers = [_["text"] for _ in qa["answers"]]
start_char_idx = qa["answers"][0]["answer_start"]
squad_eg = SquadExample(question, context, start_char_idx, answer_text, all_answers)
else:
squad_eg = SquadExample(question, context, start_char_idx=None, answer_text=None, all_answers=None)
squad_eg.preprocess(tokenizer, settings)
squad_examples.append(squad_eg)
return squad_examples
def create_inputs_targets(squad_examples):
dataset_dict = {
"input_ids": [],
"token_type_ids": [],
"attention_mask": [],
"start_token_idx": [],
"end_token_idx": [],
}
for item in squad_examples:
if item.skip == False:
for key in dataset_dict:
dataset_dict[key].append(getattr(item, key))
for key in dataset_dict:
dataset_dict[key] = np.array(dataset_dict[key])
x = [
dataset_dict["input_ids"],
dataset_dict["token_type_ids"],
dataset_dict["attention_mask"],
]
y = [dataset_dict["start_token_idx"], dataset_dict["end_token_idx"]]
return x, y
def read_data(filename, settings):
"""
Helper function to read and preprocess SQUAD data for training and validation with Keras.
:return: test, training data or validation data and nbr of examples
"""
slow_tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
save_path = "bert_base_uncased/"
if not os.path.exists(save_path):
os.makedirs(save_path)
slow_tokenizer.save_pretrained(save_path)
# Load the fast tokenizer from saved file
tokenizer = BertWordPieceTokenizer("bert_base_uncased/vocab.txt", lowercase=True)
with open(filename) as f: raw_train_data = json.load(f)
train_squad_examples = create_squad_examples(raw_train_data, tokenizer, settings)
x_train, y_train = create_inputs_targets(train_squad_examples)
return x_train, y_train, train_squad_examples
| 4,340 | 0 | 100 |
00a1b50f3dd846088071791349bcd247d295c762 | 9,000 | py | Python | data/common.py | Khanhnn00/blind-image-sr | 42bba4894ac9ee6595d2fff9b25a7678f323ad87 | [
"Apache-2.0"
] | null | null | null | data/common.py | Khanhnn00/blind-image-sr | 42bba4894ac9ee6595d2fff9b25a7678f323ad87 | [
"Apache-2.0"
] | null | null | null | data/common.py | Khanhnn00/blind-image-sr | 42bba4894ac9ee6595d2fff9b25a7678f323ad87 | [
"Apache-2.0"
] | null | null | null | import os
import random
import numpy as np
import scipy.misc as misc
import imageio
from tqdm import tqdm
import cv2
from PIL import Image
import torch
import torch.nn.functional as F
IMG_EXTENSIONS = ['.jpg', '.JPG', '.jpeg', '.JPEG', '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP']
BINARY_EXTENSIONS = ['.npy']
BENCHMARK = ['Set5', 'Set14', 'B100', 'Urban100', 'Manga109', 'DIV2K', 'DF2K']
####################
# Files & IO
####################
####################
#for BD degradation#
####################
# image processing
# process on numpy image
####################
| 31.690141 | 113 | 0.585778 | import os
import random
import numpy as np
import scipy.misc as misc
import imageio
from tqdm import tqdm
import cv2
from PIL import Image
import torch
import torch.nn.functional as F
IMG_EXTENSIONS = ['.jpg', '.JPG', '.jpeg', '.JPEG', '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP']
BINARY_EXTENSIONS = ['.npy']
BENCHMARK = ['Set5', 'Set14', 'B100', 'Urban100', 'Manga109', 'DIV2K', 'DF2K']
####################
# Files & IO
####################
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def is_binary_file(filename):
return any(filename.endswith(extension) for extension in BINARY_EXTENSIONS)
def _get_paths_from_images(path):
assert os.path.isdir(path), '[Error] [%s] is not a valid directory' % path
images = []
for dirpath, _, fnames in sorted(os.walk(path)):
for fname in sorted(fnames):
if is_image_file(fname):
img_path = os.path.join(dirpath, fname)
images.append(img_path)
assert images, '[%s] has no valid image file' % path
return images
def _get_paths_from_binary(path):
assert os.path.isdir(path), '[Error] [%s] is not a valid directory' % path
files = []
for dirpath, _, fnames in sorted(os.walk(path)):
for fname in sorted(fnames):
if is_binary_file(fname):
binary_path = os.path.join(dirpath, fname)
files.append(binary_path)
assert files, '[%s] has no valid binary file' % path
return files
def get_image_paths(data_type, dataroot):
paths = None
if dataroot is not None:
if data_type == 'img':
paths = sorted(_get_paths_from_images(dataroot))
elif data_type == 'npy':
if dataroot.find('_npy') < 0 :
old_dir = dataroot
dataroot = dataroot + '_npy'
if not os.path.exists(dataroot):
print('===> Creating binary files in [%s]' % dataroot)
os.makedirs(dataroot)
img_paths = sorted(_get_paths_from_images(old_dir))
path_bar = tqdm(img_paths)
for v in path_bar:
img = imageio.imread(v, pilmode='RGB')
ext = os.path.splitext(os.path.basename(v))[-1]
name_sep = os.path.basename(v.replace(ext, '.npy'))
np.save(os.path.join(dataroot, name_sep), img)
else:
print('===> Binary files already exists in [%s]. Skip binary files generation.' % dataroot)
paths = sorted(_get_paths_from_binary(dataroot))
else:
raise NotImplementedError("[Error] Data_type [%s] is not recognized." % data_type)
return paths
def find_benchmark(dataroot):
bm_list = [dataroot.find(bm)>=0 for bm in BENCHMARK]
if not sum(bm_list) == 0:
bm_idx = bm_list.index(True)
bm_name = BENCHMARK[bm_idx]
else:
bm_name = 'MyImage'
return bm_name
def read_img(path, data_type):
# read image by misc or from .npy
# return: Numpy float32, HWC, RGB, [0,255]
if data_type == 'img':
img = imageio.imread(path, pilmode='RGB')
elif data_type.find('npy') >= 0:
img = np.load(path)
else:
raise NotImplementedError
if img.ndim == 2:
img = np.expand_dims(img, axis=2)
return modcrop(img, 4)
####################
#for BD degradation#
def get_imgs(path):
# print(path)
hr = np.load(path)
# print(hr.shape)
hr = modcrop(hr, scale=4)
hr_x = cv2.GaussianBlur(hr,(7,7),1.6).clip(0, 255)
lr = misc.imresize(hr_x, 1 / 4, interp='bicubic')
# print(type(lr))
return lr.clip(0, 255).astype(np.uint8), hr_x.clip(0, 255).astype(np.uint8), hr.clip(0, 255).astype(np.uint8)
def get_patch_hrx(img_in, img_x, img_tar, patch_size, scale):
ih, iw = img_in.shape[:2]
oh, ow = img_tar.shape[:2]
ip = patch_size
if ih == oh:
tp = ip
ix = random.randrange(0, iw - ip + 1)
iy = random.randrange(0, ih - ip + 1)
tx, ty = ix, iy
else:
tp = ip * scale
ix = random.randrange(0, iw - ip + 1)
iy = random.randrange(0, ih - ip + 1)
tx, ty = scale * ix, scale * iy
img_in = img_in[iy:iy + ip, ix:ix + ip, :]
img_x = img_x[ty:ty + tp, tx:tx + tp, :]
img_tar = img_tar[ty:ty + tp, tx:tx + tp, :]
return img_in, img_x, img_tar
####################
# image processing
# process on numpy image
####################
def np2Tensor(l, rgb_range):
def _np2Tensor(img):
# print(type(img))
#if img.shape[2] == 3: # for opencv imread
# img = img[:, :, [2, 1, 0]]
# print(img.shape)
np_transpose = np.ascontiguousarray(img.transpose((2, 0, 1)))
tensor = torch.from_numpy(np_transpose.copy()).float()
tensor.mul_(rgb_range / 255.)
return tensor
return [_np2Tensor(_l) for _l in l]
def k2tensor(k):
np_transpose = np.ascontiguousarray(k.transpose((2, 0, 1)))
tensor = torch.from_numpy(np_transpose.copy()).float()
return tensor
def conv(inp, k, padding):
#inp: B C H W
#k: B 1 15 15
return F.conv2d(inp, k, padding=padding)
def downsample(img):
return F.interpolate(img, scale_factor=(1/4, 1/4), mode='bicubic')
def get_patch(img_tar, patch_size, scale):
# print(type(img_tar))
oh, ow = img_tar.shape[:2]
ip = patch_size
tp = ip * scale
tx = random.randrange(0, ow - tp + 1)
ty = random.randrange(0, oh - tp + 1)
img_tar = img_tar[ty:ty + tp, tx:tx + tp, :]
# print(type(img_tar))
return img_tar
def get_patch_lrx(img_in, img_inx, img_tar, patch_size, scale):
ih, iw = img_in.shape[:2]
oh, ow = img_tar.shape[:2]
ip = patch_size
if ih == oh:
tp = ip
ix = random.randrange(0, iw - ip + 1)
iy = random.randrange(0, ih - ip + 1)
tx, ty = ix, iy
else:
tp = ip * scale
ix = random.randrange(0, iw - ip + 1)
iy = random.randrange(0, ih - ip + 1)
tx, ty = scale * ix, scale * iy
img_in = img_in[iy:iy + ip, ix:ix + ip, :]
img_inx = img_inx[iy:iy + ip, ix:ix + ip, :]
img_tar = img_tar[ty:ty + tp, tx:tx + tp, :]
return img_in, img_inx, img_tar
def quantize_to_1(img, rgb_range):
if rgb_range != -1:
pixel_range = 1. / rgb_range
# return img.mul(pixel_range).clamp(0, 255).round().div(pixel_range)
return img.mul(pixel_range).clamp(0, 1).round()
else:
return img
def augment(img_list, hflip=True, rot=True):
# horizontal flip OR rotate
hflip = hflip and random.random() < 0.5
vflip = rot and random.random() < 0.5
rot90 = rot and random.random() < 0.5
def _augment(img):
if hflip: img = img[:, ::-1, :]
if vflip: img = img[::-1, :, :]
if rot90: img = img.transpose(1, 0, 2)
return img
return [_augment(img) for img in img_list]
def inv_covariance_matrix(sig_x, sig_y, theta):
# sig_x : x-direction standard deviation
# sig_x : y-direction standard deviation
# theta : rotation angle
D_inv = np.array([[1/(sig_x ** 2), 0.], [0., 1/(sig_y ** 2)]]) # inverse of diagonal matrix D
U = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]) # eigenvector matrix
inv_cov = np.dot(U, np.dot(D_inv, U.T)) # inverse of covariance matrix
return inv_cov
def anisotropic_gaussian_kernel(width, inv_cov):
# width : kernel size of anisotropic gaussian filter
ax = np.arange(-width // 2 + 1., width // 2 + 1.)
# avoid shift
if width % 2 == 0:
ax = ax - 0.5
xx, yy = np.meshgrid(ax, ax)
xy = np.stack([xx, yy], axis=2)
# pdf of bivariate gaussian distribution with the covariance matrix
kernel = np.exp(-0.5 * np.sum(np.dot(xy, inv_cov) * xy, 2))
kernel = kernel / np.sum(kernel)
return kernel
def random_anisotropic_gaussian_kernel(width=15, sig_min=0.2, sig_max=4.0):
# width : kernel size of anisotropic gaussian filter
# sig_min : minimum of standard deviation
# sig_max : maximum of standard deviation
sig_x = np.random.random() * (sig_max - sig_min) + sig_min
sig_y = np.random.random() * (sig_max - sig_min) + sig_min
theta = np.random.random() * 3.141/2.
inv_cov = inv_covariance_matrix(sig_x, sig_y, theta)
kernel = anisotropic_gaussian_kernel(width, inv_cov)
kernel = kernel.astype(np.float32)
kernel = np.expand_dims(kernel, axis=0)
return torch.from_numpy(kernel)
def modcrop(img_in, scale):
img = np.copy(img_in)
if img.ndim == 2:
H, W = img.shape
H_r, W_r = H % scale, W % scale
img = img[:H - H_r, :W - W_r]
elif img.ndim == 3:
H, W, C = img.shape
H_r, W_r = H % scale, W % scale
img = img[:H - H_r, :W - W_r, :]
else:
raise ValueError('Wrong img ndim: [%d].' % img.ndim)
return img
| 7,930 | 0 | 481 |
a0355f919cc4728a58640cd8403ab48bfc29e577 | 460 | py | Python | Exe15_triangulo_isoseles_equilatero_escaleno.py | lucaslk122/Exercicios_Python_estutura_decisao | 51a9699c5d85aa6cfb163d891c56e804a7255634 | [
"MIT"
] | null | null | null | Exe15_triangulo_isoseles_equilatero_escaleno.py | lucaslk122/Exercicios_Python_estutura_decisao | 51a9699c5d85aa6cfb163d891c56e804a7255634 | [
"MIT"
] | null | null | null | Exe15_triangulo_isoseles_equilatero_escaleno.py | lucaslk122/Exercicios_Python_estutura_decisao | 51a9699c5d85aa6cfb163d891c56e804a7255634 | [
"MIT"
] | null | null | null | print("Entre com os dados de um trangulo")
lado1 = int(input("Lado 1: "))
lado2 = int(input("Lado 2: "))
lado3 = int(input("Lado 3: "))
if lado1 < lado2 + lado3 and lado2 < lado1 + lado3 and lado3 < lado1 + lado2:
if lado1 == lado2 == lado3:
print("Triângulo equilatero")
elif lado1 != lado2 != lado3 != lado1:
print("Triângulo escaleno")
else:
print("Triângulo isósceles")
else:
print("Os lados não foram um triangulo") | 35.384615 | 77 | 0.626087 | print("Entre com os dados de um trangulo")
lado1 = int(input("Lado 1: "))
lado2 = int(input("Lado 2: "))
lado3 = int(input("Lado 3: "))
if lado1 < lado2 + lado3 and lado2 < lado1 + lado3 and lado3 < lado1 + lado2:
if lado1 == lado2 == lado3:
print("Triângulo equilatero")
elif lado1 != lado2 != lado3 != lado1:
print("Triângulo escaleno")
else:
print("Triângulo isósceles")
else:
print("Os lados não foram um triangulo") | 0 | 0 | 0 |
67698325e8a8db41808cc663d5073ddee6cdf27f | 6,346 | py | Python | game.py | matthewspangler/quillengine | 80bce8f4d17a15766c6e43b768d3bf028c74ba73 | [
"MIT-0"
] | 1 | 2020-10-27T12:26:25.000Z | 2020-10-27T12:26:25.000Z | game.py | matthewspangler/quillengine | 80bce8f4d17a15766c6e43b768d3bf028c74ba73 | [
"MIT-0"
] | null | null | null | game.py | matthewspangler/quillengine | 80bce8f4d17a15766c6e43b768d3bf028c74ba73 | [
"MIT-0"
] | null | null | null | # -------------------------------------------------------------------- #
# game.py
# contains main game loop,
# including events, drawing, and update
# -------------------------------------------------------------------- #
# General imports:
import sys
# Game related imports:
import pygame
import pyscroll
import pytmx
from pygame.locals import *
# Local imports:
from constants import *
from player import Player
from scene import Scene
from solid_platform import Platform
# Events: processing input from user via keyboard, mouse, etc
# game logic/mechanics here. process user input
# Code for what is drawn on screen each frame here
# All this function's code could just be put into the draw() function,
# but I put it here because I'm tired of scrolling over it.
# TODO: rewrite debug drawing code so all text in in a list that is displayed within a for loop.
# TODO: that way we can add more debug outputs easily by appending them to the list
| 39.17284 | 109 | 0.608887 | # -------------------------------------------------------------------- #
# game.py
# contains main game loop,
# including events, drawing, and update
# -------------------------------------------------------------------- #
# General imports:
import sys
# Game related imports:
import pygame
import pyscroll
import pytmx
from pygame.locals import *
# Local imports:
from constants import *
from player import Player
from scene import Scene
from solid_platform import Platform
class GameScene(Scene):
def __init__(self):
Scene.__init__(self)
# Set window caption
pygame.display.set_caption(WINDOW_CAPTION)
# TODO: rewrite scenes class to allow resizing of screen globally.
# See end paragraph of this article:https://nerdparadise.com/programming/pygame/part7
screen_size = [SCREEN_WIDTH, SCREEN_HEIGHT]
# List variable for layers - 0 = background color; 1 = scenery; 2 = level; 3 = player; 4 = foreground
# Each layer is a seperate surface.
self.layers = [pygame.Surface(screen_size) for i in range(4)]
# Create a sprite group of active sprites, which are all rendered in draw() function
self.active_sprite_list = pygame.sprite.Group()
# Create instance of player
self.player_one = Player(150, 50, self)
# Add player to list of active sprites, so it gets rendered in draw() function
self.active_sprite_list.add(self.player_one)
# Time to load our TMX level map.
self.lvl1_tmx_data = pytmx.load_pygame(LEVEL_01_TMX)
# Create new data source for pyscroll
self.map_data = pyscroll.data.TiledMapData(self.lvl1_tmx_data)
# Create new renderer (camera)
# Clamp_camera is used to prevent the map from scrolling past the edge
# TODO: remove screen width/height constants once we get dynamic screen sizes figured out
self.map_layer = pyscroll.BufferedRenderer(self.map_data,
screen_size,
clamp_camera=True)
self.group = pyscroll.PyscrollGroup(map_layer=self.map_layer)
# TODO: Figure out how to center player in map.
# TODO: uncomment the following lines of code, and remove/rewrite active_sprite_list
# For that, see https://github.com/bitcraft/pyscroll/wiki/Tutorial-(WIP)
# Add our player to the group
self.group.add(self.player_one)
# Can be switched on with F10 key, for that see events()
self.debug_mode = False
self.jump_key_pressed = False
self.platforms = self.get_solid_platforms(self.lvl1_tmx_data.get_layer_by_name("Collision Mask"))
def get_solid_platforms(self, tmx_layer):
solid_platforms = []
# Least effort involved getting all tile images.
# TODO: If we could only check tiles near player's sensors, that might be faster.
for x, y, image in tmx_layer.tiles():
solid_platforms.append(Platform(x, y, image))
return solid_platforms
# Events: processing input from user via keyboard, mouse, etc
def events(self, events, pressed_keys):
for event in events:
if event.type == QUIT:
# exit button or quit command issued
pygame.quit()
sys.exit()
# Key down events
elif event.type == pygame.KEYDOWN:
# Player keypress events
self.player_one.key_press(event, pressed_keys)
# Advances player animation to next frame in list
if event.key == pygame.K_n:
self.player_one.advance_animation()
# Quit game key
elif event.key == pygame.K_ESCAPE:
pygame.quit()
sys.exit()
# Turn on debug mode
elif event.key == pygame.K_F10:
self.debug_mode = not self.debug_mode
print("Debug mode switched")
# Key up events
elif event.type == pygame.KEYUP:
# Player key release events
self.player_one.key_release(event, pressed_keys)
# game logic/mechanics here. process user input
def update(self, dt):
# Update active sprite group
self.active_sprite_list.update(dt)
# Code for what is drawn on screen each frame here
def draw(self, screen, surface):
# Clear screen/fill with background color
surface.fill(GAME_BG_COLOR)
# Draw sprite / level data group to surface
self.group.draw(surface)
# Debug mode rendering logic
if self.debug_mode:
self.draw_debug(screen, surface)
# Draw/render surface onto screen
screen.blit(surface, (0, 0))
# All this function's code could just be put into the draw() function,
# but I put it here because I'm tired of scrolling over it.
# TODO: rewrite debug drawing code so all text in in a list that is displayed within a for loop.
# TODO: that way we can add more debug outputs easily by appending them to the list
def draw_debug(self, screen, surface):
screen.fill(TITLE_BG_COLOR)
# Make small font
debugFont = pygame.font.Font(HUD_FONT, 20)
# Create instances of text
debugText = debugFont.render("Debug mode", False, WHITE)
positionText = debugFont.render("Player X,Y: %s,%s" %
(self.player_one.rect.x, self.player_one.rect.y), False, WHITE)
speedText = debugFont.render("XSP, YSP: %s,%s" %
(self.player_one.x_speed, self.player_one.y_speed), False, WHITE)
playerstateText = debugFont.render("State: %s" % self.player_one._state, False, WHITE)
# Render the debug text
surface.blit(debugText, (5, 5))
surface.blit(positionText, (20, 35))
surface.blit(speedText, (20, 65))
surface.blit(playerstateText, (20, 95))
# Render the sensors
for sensor in self.player_one.sensors:
if sensor.activated:
pygame.draw.rect(surface, sensor.active_color, sensor)
else:
pygame.draw.rect(surface, sensor.inactive_color, sensor)
| 5,179 | 2 | 180 |
d9f00e14ddde6217639bd67754bf5cb2607b36a4 | 7,592 | py | Python | notebooks/SignDetectorAndClassifier/src/utils/autoanchor.py | lsd-maddrive/adas_system | 0352d59a500aebbd68fbf45f416fb98d1b850e13 | [
"MIT"
] | null | null | null | notebooks/SignDetectorAndClassifier/src/utils/autoanchor.py | lsd-maddrive/adas_system | 0352d59a500aebbd68fbf45f416fb98d1b850e13 | [
"MIT"
] | 14 | 2021-10-02T10:10:45.000Z | 2022-03-26T08:32:48.000Z | notebooks/SignDetectorAndClassifier/src/utils/autoanchor.py | lsd-maddrive/adas_system | 0352d59a500aebbd68fbf45f416fb98d1b850e13 | [
"MIT"
] | null | null | null | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
"""
Auto-anchor utils
"""
import random
import numpy as np
import torch
import yaml
from tqdm import tqdm
from utils.general import LOGGER, colorstr, emojis
PREFIX = colorstr("AutoAnchor: ")
def kmean_anchors(
dataset="./data/coco128.yaml", n=9, img_size=640, thr=4.0, gen=1000, verbose=True
):
""" Creates kmeans-evolved anchors from training dataset
Arguments:
dataset: path to data.yaml, or a loaded dataset
n: number of anchors
img_size: image size used for training
thr: anchor-label wh ratio threshold hyperparameter hyp['anchor_t'] used for training, default=4.0
gen: generations to evolve anchors using genetic algorithm
verbose: print all results
Return:
k: kmeans evolved anchors
Usage:
from utils.autoanchor import *; _ = kmean_anchors()
"""
from scipy.cluster.vq import kmeans
thr = 1 / thr
if isinstance(dataset, str): # *.yaml file
with open(dataset, errors="ignore") as f:
data_dict = yaml.safe_load(f) # model dict
from utils.datasets import LoadImagesAndLabels
dataset = LoadImagesAndLabels(data_dict["train"], augment=True, rect=True)
# Get label wh
shapes = img_size * dataset.shapes / dataset.shapes.max(1, keepdims=True)
wh0 = np.concatenate([l[:, 3:5] * s for s, l in zip(shapes, dataset.labels)]) # wh
# Filter
i = (wh0 < 3.0).any(1).sum()
if i:
LOGGER.info(
f"{PREFIX}WARNING: Extremely small objects found. {i} of {len(wh0)} labels are < 3 pixels in size."
)
wh = wh0[(wh0 >= 2.0).any(1)] # filter > 2 pixels
# wh = wh * (np.random.rand(wh.shape[0], 1) * 0.9 + 0.1) # multiply by random scale 0-1
# Kmeans calculation
LOGGER.info(f"{PREFIX}Running kmeans for {n} anchors on {len(wh)} points...")
s = wh.std(0) # sigmas for whitening
k, dist = kmeans(wh / s, n, iter=30) # points, mean distance
assert (
len(k) == n
), f"{PREFIX}ERROR: scipy.cluster.vq.kmeans requested {n} points but returned only {len(k)}"
k *= s
wh = torch.tensor(wh, dtype=torch.float32) # filtered
wh0 = torch.tensor(wh0, dtype=torch.float32) # unfiltered
k = print_results(k, verbose=False)
# Plot
# k, d = [None] * 20, [None] * 20
# for i in tqdm(range(1, 21)):
# k[i-1], d[i-1] = kmeans(wh / s, i) # points, mean distance
# fig, ax = plt.subplots(1, 2, figsize=(14, 7), tight_layout=True)
# ax = ax.ravel()
# ax[0].plot(np.arange(1, 21), np.array(d) ** 2, marker='.')
# fig, ax = plt.subplots(1, 2, figsize=(14, 7)) # plot wh
# ax[0].hist(wh[wh[:, 0]<100, 0],400)
# ax[1].hist(wh[wh[:, 1]<100, 1],400)
# fig.savefig('wh.png', dpi=200)
# Evolve
npr = np.random
f, sh, mp, s = (
anchor_fitness(k),
k.shape,
0.9,
0.1,
) # fitness, generations, mutation prob, sigma
pbar = tqdm(
range(gen), desc=f"{PREFIX}Evolving anchors with Genetic Algorithm:"
) # progress bar
for _ in pbar:
v = np.ones(sh)
while (v == 1).all(): # mutate until a change occurs (prevent duplicates)
v = ((npr.random(sh) < mp) * random.random() * npr.randn(*sh) * s + 1).clip(
0.3, 3.0
)
kg = (k.copy() * v).clip(min=2.0)
fg = anchor_fitness(kg)
if fg > f:
f, k = fg, kg.copy()
pbar.desc = (
f"{PREFIX}Evolving anchors with Genetic Algorithm: fitness = {f:.4f}"
)
if verbose:
print_results(k, verbose)
return print_results(k)
| 36.854369 | 111 | 0.559405 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
"""
Auto-anchor utils
"""
import random
import numpy as np
import torch
import yaml
from tqdm import tqdm
from utils.general import LOGGER, colorstr, emojis
PREFIX = colorstr("AutoAnchor: ")
def check_anchor_order(m):
# Check anchor order against stride order for YOLOv5 Detect() module m, and correct if necessary
a = m.anchors.prod(-1).view(-1) # anchor area
da = a[-1] - a[0] # delta a
ds = m.stride[-1] - m.stride[0] # delta s
if da.sign() != ds.sign(): # same order
LOGGER.info(f"{PREFIX}Reversing anchor order")
m.anchors[:] = m.anchors.flip(0)
def check_anchors(dataset, model, thr=4.0, imgsz=640):
# Check anchor fit to data, recompute if necessary
m = (
model.module.model[-1] if hasattr(model, "module") else model.model[-1]
) # Detect()
shapes = imgsz * dataset.shapes / dataset.shapes.max(1, keepdims=True)
scale = np.random.uniform(0.9, 1.1, size=(shapes.shape[0], 1)) # augment scale
wh = torch.tensor(
np.concatenate([l[:, 3:5] * s for s, l in zip(shapes * scale, dataset.labels)])
).float() # wh
def metric(k): # compute metric
r = wh[:, None] / k[None]
x = torch.min(r, 1 / r).min(2)[0] # ratio metric
best = x.max(1)[0] # best_x
aat = (x > 1 / thr).float().sum(1).mean() # anchors above threshold
bpr = (best > 1 / thr).float().mean() # best possible recall
return bpr, aat
anchors = m.anchors.clone() * m.stride.to(m.anchors.device).view(
-1, 1, 1
) # current anchors
bpr, aat = metric(anchors.cpu().view(-1, 2))
s = f"\n{PREFIX}{aat:.2f} anchors/target, {bpr:.3f} Best Possible Recall (BPR). "
if bpr > 0.98: # threshold to recompute
LOGGER.info(emojis(f"{s}Current anchors are a good fit to dataset ✅"))
else:
LOGGER.info(
emojis(f"{s}Anchors are a poor fit to dataset ⚠️, attempting to improve...")
)
na = m.anchors.numel() // 2 # number of anchors
try:
anchors = kmean_anchors(
dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False
)
except Exception as e:
LOGGER.info(f"{PREFIX}ERROR: {e}")
new_bpr = metric(anchors)[0]
if new_bpr > bpr: # replace anchors
anchors = torch.tensor(anchors, device=m.anchors.device).type_as(m.anchors)
m.anchors[:] = anchors.clone().view_as(m.anchors) / m.stride.to(
m.anchors.device
).view(
-1, 1, 1
) # loss
check_anchor_order(m)
LOGGER.info(
f"{PREFIX}New anchors saved to model. Update model *.yaml to use these anchors in the future."
)
else:
LOGGER.info(
f"{PREFIX}Original anchors better than new anchors. Proceeding with original anchors."
)
def kmean_anchors(
dataset="./data/coco128.yaml", n=9, img_size=640, thr=4.0, gen=1000, verbose=True
):
""" Creates kmeans-evolved anchors from training dataset
Arguments:
dataset: path to data.yaml, or a loaded dataset
n: number of anchors
img_size: image size used for training
thr: anchor-label wh ratio threshold hyperparameter hyp['anchor_t'] used for training, default=4.0
gen: generations to evolve anchors using genetic algorithm
verbose: print all results
Return:
k: kmeans evolved anchors
Usage:
from utils.autoanchor import *; _ = kmean_anchors()
"""
from scipy.cluster.vq import kmeans
thr = 1 / thr
def metric(k, wh): # compute metrics
r = wh[:, None] / k[None]
x = torch.min(r, 1 / r).min(2)[0] # ratio metric
# x = wh_iou(wh, torch.tensor(k)) # iou metric
return x, x.max(1)[0] # x, best_x
def anchor_fitness(k): # mutation fitness
_, best = metric(torch.tensor(k, dtype=torch.float32), wh)
return (best * (best > thr).float()).mean() # fitness
def print_results(k, verbose=True):
k = k[np.argsort(k.prod(1))] # sort small to large
x, best = metric(k, wh0)
bpr, aat = (
(best > thr).float().mean(),
(x > thr).float().mean() * n,
) # best possible recall, anch > thr
s = (
f"{PREFIX}thr={thr:.2f}: {bpr:.4f} best possible recall, {aat:.2f} anchors past thr\n"
f"{PREFIX}n={n}, img_size={img_size}, metric_all={x.mean():.3f}/{best.mean():.3f}-mean/best, "
f"past_thr={x[x > thr].mean():.3f}-mean: "
)
for i, x in enumerate(k):
s += "%i,%i, " % (round(x[0]), round(x[1]))
if verbose:
LOGGER.info(s[:-2])
return k
if isinstance(dataset, str): # *.yaml file
with open(dataset, errors="ignore") as f:
data_dict = yaml.safe_load(f) # model dict
from utils.datasets import LoadImagesAndLabels
dataset = LoadImagesAndLabels(data_dict["train"], augment=True, rect=True)
# Get label wh
shapes = img_size * dataset.shapes / dataset.shapes.max(1, keepdims=True)
wh0 = np.concatenate([l[:, 3:5] * s for s, l in zip(shapes, dataset.labels)]) # wh
# Filter
i = (wh0 < 3.0).any(1).sum()
if i:
LOGGER.info(
f"{PREFIX}WARNING: Extremely small objects found. {i} of {len(wh0)} labels are < 3 pixels in size."
)
wh = wh0[(wh0 >= 2.0).any(1)] # filter > 2 pixels
# wh = wh * (np.random.rand(wh.shape[0], 1) * 0.9 + 0.1) # multiply by random scale 0-1
# Kmeans calculation
LOGGER.info(f"{PREFIX}Running kmeans for {n} anchors on {len(wh)} points...")
s = wh.std(0) # sigmas for whitening
k, dist = kmeans(wh / s, n, iter=30) # points, mean distance
assert (
len(k) == n
), f"{PREFIX}ERROR: scipy.cluster.vq.kmeans requested {n} points but returned only {len(k)}"
k *= s
wh = torch.tensor(wh, dtype=torch.float32) # filtered
wh0 = torch.tensor(wh0, dtype=torch.float32) # unfiltered
k = print_results(k, verbose=False)
# Plot
# k, d = [None] * 20, [None] * 20
# for i in tqdm(range(1, 21)):
# k[i-1], d[i-1] = kmeans(wh / s, i) # points, mean distance
# fig, ax = plt.subplots(1, 2, figsize=(14, 7), tight_layout=True)
# ax = ax.ravel()
# ax[0].plot(np.arange(1, 21), np.array(d) ** 2, marker='.')
# fig, ax = plt.subplots(1, 2, figsize=(14, 7)) # plot wh
# ax[0].hist(wh[wh[:, 0]<100, 0],400)
# ax[1].hist(wh[wh[:, 1]<100, 1],400)
# fig.savefig('wh.png', dpi=200)
# Evolve
npr = np.random
f, sh, mp, s = (
anchor_fitness(k),
k.shape,
0.9,
0.1,
) # fitness, generations, mutation prob, sigma
pbar = tqdm(
range(gen), desc=f"{PREFIX}Evolving anchors with Genetic Algorithm:"
) # progress bar
for _ in pbar:
v = np.ones(sh)
while (v == 1).all(): # mutate until a change occurs (prevent duplicates)
v = ((npr.random(sh) < mp) * random.random() * npr.randn(*sh) * s + 1).clip(
0.3, 3.0
)
kg = (k.copy() * v).clip(min=2.0)
fg = anchor_fitness(kg)
if fg > f:
f, k = fg, kg.copy()
pbar.desc = (
f"{PREFIX}Evolving anchors with Genetic Algorithm: fitness = {f:.4f}"
)
if verbose:
print_results(k, verbose)
return print_results(k)
| 3,735 | 0 | 127 |
453c5d60dedd3235c1f46cd9497155d8b3a70989 | 12,610 | py | Python | tests/test_sendmail.py | thijstriemstra/aiosmtplib | 39bab303bf15cc8e14aa0b7cffe800f1e7f83e58 | [
"MIT"
] | null | null | null | tests/test_sendmail.py | thijstriemstra/aiosmtplib | 39bab303bf15cc8e14aa0b7cffe800f1e7f83e58 | [
"MIT"
] | null | null | null | tests/test_sendmail.py | thijstriemstra/aiosmtplib | 39bab303bf15cc8e14aa0b7cffe800f1e7f83e58 | [
"MIT"
] | null | null | null | """
SMTP.sendmail and SMTP.send_message method testing.
"""
import copy
import email.generator
import email.header
import pytest
from aiosmtplib import (
SMTPNotSupported,
SMTPRecipientsRefused,
SMTPResponseException,
SMTPStatus,
)
pytestmark = pytest.mark.asyncio()
async def test_rset_after_sendmail_error_response_to_mail(
smtp_client, smtpd_server, received_commands
):
"""
If an error response is given to the MAIL command in the sendmail method,
test that we reset the server session.
"""
async with smtp_client:
response = await smtp_client.ehlo()
assert response.code == SMTPStatus.completed
with pytest.raises(SMTPResponseException) as excinfo:
await smtp_client.sendmail(">foobar<", ["test@example.com"], "Hello World")
assert excinfo.value.code == SMTPStatus.unrecognized_parameters
assert received_commands[-1][0] == "RSET"
async def test_rset_after_sendmail_error_response_to_rcpt(
smtp_client, smtpd_server, received_commands
):
"""
If an error response is given to the RCPT command in the sendmail method,
test that we reset the server session.
"""
async with smtp_client:
response = await smtp_client.ehlo()
assert response.code == SMTPStatus.completed
with pytest.raises(SMTPRecipientsRefused) as excinfo:
await smtp_client.sendmail(
"test@example.com", [">not an addr<"], "Hello World"
)
assert excinfo.value.recipients[0].code == SMTPStatus.unrecognized_parameters
assert received_commands[-1][0] == "RSET"
async def test_rset_after_sendmail_error_response_to_data(
smtp_client,
smtpd_server,
smtpd_class,
smtpd_response_handler_factory,
monkeypatch,
error_code,
sender_str,
recipient_str,
message_str,
received_commands,
):
"""
If an error response is given to the DATA command in the sendmail method,
test that we reset the server session.
"""
response_handler = smtpd_response_handler_factory("{} error".format(error_code))
monkeypatch.setattr(smtpd_class, "smtp_DATA", response_handler)
async with smtp_client:
response = await smtp_client.ehlo()
assert response.code == SMTPStatus.completed
with pytest.raises(SMTPResponseException) as excinfo:
await smtp_client.sendmail(sender_str, [recipient_str], message_str)
assert excinfo.value.code == error_code
assert received_commands[-1][0] == "RSET"
| 29.670588 | 87 | 0.691356 | """
SMTP.sendmail and SMTP.send_message method testing.
"""
import copy
import email.generator
import email.header
import pytest
from aiosmtplib import (
SMTPNotSupported,
SMTPRecipientsRefused,
SMTPResponseException,
SMTPStatus,
)
pytestmark = pytest.mark.asyncio()
async def test_sendmail_simple_success(
smtp_client, smtpd_server, sender_str, recipient_str, message_str
):
async with smtp_client:
errors, response = await smtp_client.sendmail(
sender_str, [recipient_str], message_str
)
assert not errors
assert isinstance(errors, dict)
assert response != ""
async def test_sendmail_binary_content(
smtp_client, smtpd_server, sender_str, recipient_str, message_str
):
async with smtp_client:
errors, response = await smtp_client.sendmail(
sender_str, [recipient_str], bytes(message_str, "ascii")
)
assert not errors
assert isinstance(errors, dict)
assert response != ""
async def test_sendmail_with_recipients_string(
smtp_client, smtpd_server, sender_str, recipient_str, message_str
):
async with smtp_client:
errors, response = await smtp_client.sendmail(
sender_str, recipient_str, message_str
)
assert not errors
assert response != ""
async def test_sendmail_with_mail_option(
smtp_client, smtpd_server, sender_str, recipient_str, message_str
):
async with smtp_client:
errors, response = await smtp_client.sendmail(
sender_str, [recipient_str], message_str, mail_options=["BODY=8BITMIME"]
)
assert not errors
assert response != ""
async def test_sendmail_without_size_option(
smtp_client,
smtpd_server,
smtpd_class,
smtpd_response_handler_factory,
monkeypatch,
sender_str,
recipient_str,
message_str,
received_commands,
):
response_handler = smtpd_response_handler_factory(
"{} done".format(SMTPStatus.completed)
)
monkeypatch.setattr(smtpd_class, "smtp_EHLO", response_handler)
async with smtp_client:
errors, response = await smtp_client.sendmail(
sender_str, [recipient_str], message_str
)
assert not errors
assert response != ""
async def test_sendmail_with_invalid_mail_option(
smtp_client, smtpd_server, sender_str, recipient_str, message_str
):
async with smtp_client:
with pytest.raises(SMTPResponseException) as excinfo:
await smtp_client.sendmail(
sender_str,
[recipient_str],
message_str,
mail_options=["BADDATA=0x00000000"],
)
assert excinfo.value.code == SMTPStatus.syntax_error
async def test_sendmail_with_rcpt_option(
smtp_client, smtpd_server, sender_str, recipient_str, message_str
):
async with smtp_client:
with pytest.raises(SMTPRecipientsRefused) as excinfo:
await smtp_client.sendmail(
sender_str,
[recipient_str],
message_str,
rcpt_options=["NOTIFY=FAILURE,DELAY"],
)
recipient_exc = excinfo.value.recipients[0]
assert recipient_exc.code == SMTPStatus.syntax_error
assert (
recipient_exc.message
== "RCPT TO parameters not recognized or not implemented"
)
async def test_sendmail_simple_failure(smtp_client, smtpd_server):
async with smtp_client:
with pytest.raises(SMTPRecipientsRefused):
# @@ is an invalid recipient.
await smtp_client.sendmail("test@example.com", ["@@"], "blah")
async def test_sendmail_error_silent_rset_handles_disconnect(
smtp_client,
smtpd_server,
smtpd_class,
smtpd_response_handler_factory,
monkeypatch,
sender_str,
recipient_str,
message_str,
):
response_handler = smtpd_response_handler_factory(
"{} error".format(SMTPStatus.unrecognized_parameters), close_after=True
)
monkeypatch.setattr(smtpd_class, "smtp_DATA", response_handler)
async with smtp_client:
with pytest.raises(SMTPResponseException):
await smtp_client.sendmail(sender_str, [recipient_str], message_str)
async def test_rset_after_sendmail_error_response_to_mail(
smtp_client, smtpd_server, received_commands
):
"""
If an error response is given to the MAIL command in the sendmail method,
test that we reset the server session.
"""
async with smtp_client:
response = await smtp_client.ehlo()
assert response.code == SMTPStatus.completed
with pytest.raises(SMTPResponseException) as excinfo:
await smtp_client.sendmail(">foobar<", ["test@example.com"], "Hello World")
assert excinfo.value.code == SMTPStatus.unrecognized_parameters
assert received_commands[-1][0] == "RSET"
async def test_rset_after_sendmail_error_response_to_rcpt(
smtp_client, smtpd_server, received_commands
):
"""
If an error response is given to the RCPT command in the sendmail method,
test that we reset the server session.
"""
async with smtp_client:
response = await smtp_client.ehlo()
assert response.code == SMTPStatus.completed
with pytest.raises(SMTPRecipientsRefused) as excinfo:
await smtp_client.sendmail(
"test@example.com", [">not an addr<"], "Hello World"
)
assert excinfo.value.recipients[0].code == SMTPStatus.unrecognized_parameters
assert received_commands[-1][0] == "RSET"
async def test_rset_after_sendmail_error_response_to_data(
smtp_client,
smtpd_server,
smtpd_class,
smtpd_response_handler_factory,
monkeypatch,
error_code,
sender_str,
recipient_str,
message_str,
received_commands,
):
"""
If an error response is given to the DATA command in the sendmail method,
test that we reset the server session.
"""
response_handler = smtpd_response_handler_factory("{} error".format(error_code))
monkeypatch.setattr(smtpd_class, "smtp_DATA", response_handler)
async with smtp_client:
response = await smtp_client.ehlo()
assert response.code == SMTPStatus.completed
with pytest.raises(SMTPResponseException) as excinfo:
await smtp_client.sendmail(sender_str, [recipient_str], message_str)
assert excinfo.value.code == error_code
assert received_commands[-1][0] == "RSET"
async def test_send_message(smtp_client, smtpd_server, message):
async with smtp_client:
errors, response = await smtp_client.send_message(message)
assert not errors
assert isinstance(errors, dict)
assert response != ""
async def test_send_message_with_sender_and_recipient_args(
smtp_client, smtpd_server, message, received_messages
):
sender = "sender2@example.com"
recipients = ["recipient1@example.com", "recipient2@example.com"]
async with smtp_client:
errors, response = await smtp_client.send_message(
message, sender=sender, recipients=recipients
)
assert not errors
assert isinstance(errors, dict)
assert response != ""
assert len(received_messages) == 1
assert received_messages[0]["X-MailFrom"] == sender
assert received_messages[0]["X-RcptTo"] == ", ".join(recipients)
async def test_send_multiple_messages_in_sequence(smtp_client, smtpd_server, message):
message1 = copy.copy(message)
message2 = copy.copy(message)
del message2["To"]
message2["To"] = "recipient2@example.com"
async with smtp_client:
errors1, response1 = await smtp_client.send_message(message1)
assert not errors1
assert isinstance(errors1, dict)
assert response1 != ""
errors2, response2 = await smtp_client.send_message(message2)
assert not errors2
assert isinstance(errors2, dict)
assert response2 != ""
async def test_send_message_without_recipients(smtp_client, smtpd_server, message):
del message["To"]
async with smtp_client:
with pytest.raises(ValueError):
await smtp_client.send_message(message)
async def test_send_message_without_sender(smtp_client, smtpd_server, message):
del message["From"]
async with smtp_client:
with pytest.raises(ValueError):
await smtp_client.send_message(message)
async def test_send_message_smtputf8_sender(
smtp_client_smtputf8,
smtpd_server_smtputf8,
message,
received_commands,
received_messages,
):
del message["From"]
message["From"] = "séndër@exåmple.com"
async with smtp_client_smtputf8:
errors, response = await smtp_client_smtputf8.send_message(message)
assert not errors
assert response != ""
assert received_commands[1][0] == "MAIL"
assert received_commands[1][1] == message["From"]
# Size varies depending on the message type
assert received_commands[1][2][0].startswith("SIZE=")
assert received_commands[1][2][1:] == ["SMTPUTF8", "BODY=8BITMIME"]
assert len(received_messages) == 1
assert received_messages[0]["X-MailFrom"] == message["From"]
async def test_send_mime_message_smtputf8_recipient(
smtp_client_smtputf8,
smtpd_server_smtputf8,
mime_message,
received_commands,
received_messages,
):
mime_message["To"] = "reçipïént@exåmple.com"
async with smtp_client_smtputf8:
errors, response = await smtp_client_smtputf8.send_message(mime_message)
assert not errors
assert response != ""
assert received_commands[2][0] == "RCPT"
assert received_commands[2][1] == mime_message["To"]
assert len(received_messages) == 1
assert received_messages[0]["X-RcptTo"] == ", ".join(mime_message.get_all("To"))
async def test_send_compat32_message_smtputf8_recipient(
smtp_client_smtputf8,
smtpd_server_smtputf8,
compat32_message,
received_commands,
received_messages,
):
recipient_bytes = bytes("reçipïént@exåmple.com", "utf-8")
compat32_message["To"] = email.header.Header(recipient_bytes, "utf-8")
async with smtp_client_smtputf8:
errors, response = await smtp_client_smtputf8.send_message(compat32_message)
assert not errors
assert response != ""
assert received_commands[2][0] == "RCPT"
assert received_commands[2][1] == compat32_message["To"]
assert len(received_messages) == 1
assert (
received_messages[0]["X-RcptTo"]
== "recipient@example.com, reçipïént@exåmple.com"
)
async def test_send_message_smtputf8_not_supported(smtp_client, smtpd_server, message):
message["To"] = "reçipïént2@exåmple.com"
async with smtp_client:
with pytest.raises(SMTPNotSupported):
await smtp_client.send_message(message)
async def test_send_compat32_message_utf8_text_without_smtputf8(
smtp_client, smtpd_server, compat32_message, received_commands, received_messages
):
compat32_message["To"] = email.header.Header(
"reçipïént <recipient2@example.com>", "utf-8"
)
async with smtp_client:
errors, response = await smtp_client.send_message(compat32_message)
assert not errors
assert response != ""
assert received_commands[2][0] == "RCPT"
assert received_commands[2][1] == compat32_message["To"].encode()
assert len(received_messages) == 1
assert (
received_messages[0]["X-RcptTo"]
== "recipient@example.com, recipient2@example.com"
)
# Name should be encoded
assert received_messages[0].get_all("To") == [
"recipient@example.com",
"=?utf-8?b?cmXDp2lww6/DqW50IDxyZWNpcGllbnQyQGV4YW1wbGUuY29tPg==?=",
]
async def test_send_mime_message_utf8_text_without_smtputf8(
smtp_client, smtpd_server, mime_message, received_commands, received_messages
):
mime_message["To"] = "reçipïént <recipient2@example.com>"
async with smtp_client:
errors, response = await smtp_client.send_message(mime_message)
assert not errors
assert response != ""
assert received_commands[2][0] == "RCPT"
assert received_commands[2][1] == mime_message["To"]
assert len(received_messages) == 1
assert (
received_messages[0]["X-RcptTo"]
== "recipient@example.com, recipient2@example.com"
)
# Name should be encoded
assert received_messages[0].get_all("To") == [
"recipient@example.com",
"=?utf-8?b?cmXDp2lww6/DqW50IDxyZWNpcGllbnQyQGV4YW1wbGUuY29tPg==?=",
]
| 9,602 | 0 | 460 |
8763891fb88cc1cd0e9f6ac035c59a16230135e5 | 129 | py | Python | route/main_error_404.py | susemeee/openNAMU | 0f3d8acb5f0fe179cc8dbbfea1846da3ca0cd4e1 | [
"BSD-3-Clause"
] | null | null | null | route/main_error_404.py | susemeee/openNAMU | 0f3d8acb5f0fe179cc8dbbfea1846da3ca0cd4e1 | [
"BSD-3-Clause"
] | null | null | null | route/main_error_404.py | susemeee/openNAMU | 0f3d8acb5f0fe179cc8dbbfea1846da3ca0cd4e1 | [
"BSD-3-Clause"
] | 1 | 2020-01-04T09:43:35.000Z | 2020-01-04T09:43:35.000Z | from .tool.func import * | 21.5 | 49 | 0.666667 | from .tool.func import *
def main_error_404_2(conn):
curs = conn.cursor()
return redirect('/w/' + url_pas(wiki_set(2))) | 82 | 0 | 23 |
b2ddef6aa2fb689d119c86fd4710810575904ef7 | 18,985 | py | Python | Matrix_Portal_Moon_Clock/code.py | jposada202020/Adafruit_Learning_System_Guides | d9656b8ba59532926240ddee50b81160e2e3fd11 | [
"MIT"
] | 1 | 2021-01-05T02:08:27.000Z | 2021-01-05T02:08:27.000Z | Matrix_Portal_Moon_Clock/code.py | jposada202020/Adafruit_Learning_System_Guides | d9656b8ba59532926240ddee50b81160e2e3fd11 | [
"MIT"
] | null | null | null | Matrix_Portal_Moon_Clock/code.py | jposada202020/Adafruit_Learning_System_Guides | d9656b8ba59532926240ddee50b81160e2e3fd11 | [
"MIT"
] | null | null | null | """
MOON PHASE CLOCK for Adafruit Matrix Portal: displays current time, lunar
phase and time of next moonrise or moonset. Requires WiFi internet access.
Written by Phil 'PaintYourDragon' Burgess for Adafruit Industries.
MIT license, all text above must be included in any redistribution.
BDF fonts from the X.Org project. Startup 'splash' images should not be
included in derivative projects, thanks. Tall splash images licensed from
123RF.com, wide splash images used with permission of artist Lew Lashmit
(viergacht@gmail.com). Rawr!
"""
# pylint: disable=import-error
import gc
import time
import math
import json
import board
import busio
import displayio
from rtc import RTC
from adafruit_matrixportal.network import Network
from adafruit_matrixportal.matrix import Matrix
from adafruit_bitmap_font import bitmap_font
import adafruit_display_text.label
import adafruit_lis3dh
try:
from secrets import secrets
except ImportError:
print('WiFi secrets are kept in secrets.py, please add them there!')
raise
# CONFIGURABLE SETTINGS ----------------------------------------------------
TWELVE_HOUR = True # If set, use 12-hour time vs 24-hour (e.g. 3:00 vs 15:00)
COUNTDOWN = False # If set, show time to (vs time of) next rise/set event
MONTH_DAY = True # If set, use MM/DD vs DD/MM (e.g. 31/12 vs 12/31)
BITPLANES = 6 # Ideally 6, but can set lower if RAM is tight
# SOME UTILITY FUNCTIONS AND CLASSES ---------------------------------------
def parse_time(timestring, is_dst=-1):
""" Given a string of the format YYYY-MM-DDTHH:MM:SS.SS-HH:MM (and
optionally a DST flag), convert to and return an equivalent
time.struct_time (strptime() isn't available here). Calling function
can use time.mktime() on result if epoch seconds is needed instead.
Time string is assumed local time; UTC offset is ignored. If seconds
value includes a decimal fraction it's ignored.
"""
date_time = timestring.split('T') # Separate into date and time
year_month_day = date_time[0].split('-') # Separate time into Y/M/D
hour_minute_second = date_time[1].split('+')[0].split('-')[0].split(':')
return time.struct_time(int(year_month_day[0]),
int(year_month_day[1]),
int(year_month_day[2]),
int(hour_minute_second[0]),
int(hour_minute_second[1]),
int(hour_minute_second[2].split('.')[0]),
-1, -1, is_dst)
def update_time(timezone=None):
""" Update system date/time from WorldTimeAPI public server;
no account required. Pass in time zone string
(http://worldtimeapi.org/api/timezone for list)
or None to use IP geolocation. Returns current local time as a
time.struct_time and UTC offset as string. This may throw an
exception on fetch_data() - it is NOT CAUGHT HERE, should be
handled in the calling code because different behaviors may be
needed in different situations (e.g. reschedule for later).
"""
if timezone: # Use timezone api
time_url = 'http://worldtimeapi.org/api/timezone/' + timezone
else: # Use IP geolocation
time_url = 'http://worldtimeapi.org/api/ip'
time_data = NETWORK.fetch_data(time_url,
json_path=[['datetime'], ['dst'],
['utc_offset']])
time_struct = parse_time(time_data[0], time_data[1])
RTC().datetime = time_struct
return time_struct, time_data[2]
def hh_mm(time_struct):
""" Given a time.struct_time, return a string as H:MM or HH:MM, either
12- or 24-hour style depending on global TWELVE_HOUR setting.
This is ONLY for 'clock time,' NOT for countdown time, which is
handled separately in the one spot where it's needed.
"""
if TWELVE_HOUR:
if time_struct.tm_hour > 12:
hour_string = str(time_struct.tm_hour - 12) # 13-23 -> 1-11 (pm)
elif time_struct.tm_hour > 0:
hour_string = str(time_struct.tm_hour) # 1-12
else:
hour_string = '12' # 0 -> 12 (am)
else:
hour_string = '{0:0>2}'.format(time_struct.tm_hour)
return hour_string + ':' + '{0:0>2}'.format(time_struct.tm_min)
# pylint: disable=too-few-public-methods
class MoonData():
""" Class holding lunar data for a given day (00:00:00 to 23:59:59).
App uses two of these -- one for the current day, and one for the
following day -- then some interpolations and such can be made.
Elements include:
age : Moon phase 'age' at midnight (start of period)
expressed from 0.0 (new moon) through 0.5 (full moon)
to 1.0 (next new moon).
midnight : Epoch time in seconds @ midnight (start of period).
rise : Epoch time of moon rise within this 24-hour period.
set : Epoch time of moon set within this 24-hour period.
"""
def __init__(self, datetime, hours_ahead, utc_offset):
""" Initialize MoonData object elements (see above) from a
time.struct_time, hours to skip ahead (typically 0 or 24),
and a UTC offset (as a string) and a query to the MET Norway
Sunrise API (also provides lunar data), documented at:
https://api.met.no/weatherapi/sunrise/2.0/documentation
"""
if hours_ahead:
# Can't change attribute in datetime struct, need to create
# a new one which will roll the date ahead as needed. Convert
# to epoch seconds and back for the offset to work
datetime = time.localtime(time.mktime(time.struct_time(
datetime.tm_year,
datetime.tm_mon,
datetime.tm_mday,
datetime.tm_hour + hours_ahead,
datetime.tm_min,
datetime.tm_sec,
-1, -1, -1)))
# strftime() not available here
url = ('https://api.met.no/weatherapi/sunrise/2.0/.json?lat=' +
str(LATITUDE) + '&lon=' + str(LONGITUDE) +
'&date=' + str(datetime.tm_year) + '-' +
'{0:0>2}'.format(datetime.tm_mon) + '-' +
'{0:0>2}'.format(datetime.tm_mday) +
'&offset=' + utc_offset)
print('Fetching moon data via', url)
# pylint: disable=bare-except
for _ in range(5): # Retries
try:
full_data = json.loads(NETWORK.fetch_data(url))
moon_data = full_data['location']['time'][0]
#print(moon_data)
# Reconstitute JSON data into the elements we need
self.age = float(moon_data['moonphase']['value']) / 100
self.midnight = time.mktime(parse_time(
moon_data['moonphase']['time']))
if 'moonrise' in moon_data:
self.rise = time.mktime(
parse_time(moon_data['moonrise']['time']))
else:
self.rise = None
if 'moonset' in moon_data:
self.set = time.mktime(
parse_time(moon_data['moonset']['time']))
else:
self.set = None
return # Success!
except:
# Moon server error (maybe), try again after 15 seconds.
# (Might be a memory error, that should be handled different)
time.sleep(15)
# ONE-TIME INITIALIZATION --------------------------------------------------
MATRIX = Matrix(bit_depth=BITPLANES)
DISPLAY = MATRIX.display
ACCEL = adafruit_lis3dh.LIS3DH_I2C(busio.I2C(board.SCL, board.SDA),
address=0x19)
_ = ACCEL.acceleration # Dummy reading to blow out any startup residue
time.sleep(0.1)
DISPLAY.rotation = (int(((math.atan2(-ACCEL.acceleration.y,
-ACCEL.acceleration.x) + math.pi) /
(math.pi * 2) + 0.875) * 4) % 4) * 90
LARGE_FONT = bitmap_font.load_font('/fonts/helvB12.bdf')
SMALL_FONT = bitmap_font.load_font('/fonts/helvR10.bdf')
SYMBOL_FONT = bitmap_font.load_font('/fonts/6x10.bdf')
LARGE_FONT.load_glyphs('0123456789:')
SMALL_FONT.load_glyphs('0123456789:/.%')
SYMBOL_FONT.load_glyphs('\u21A5\u21A7')
# Display group is set up once, then we just shuffle items around later.
# Order of creation here determines their stacking order.
GROUP = displayio.Group(max_size=10)
# Element 0 is a stand-in item, later replaced with the moon phase bitmap
# pylint: disable=bare-except
try:
FILENAME = 'moon/splash-' + str(DISPLAY.rotation) + '.bmp'
BITMAP = displayio.OnDiskBitmap(open(FILENAME, 'rb'))
TILE_GRID = displayio.TileGrid(BITMAP,
pixel_shader=displayio.ColorConverter(),)
GROUP.append(TILE_GRID)
except:
GROUP.append(adafruit_display_text.label.Label(SMALL_FONT, color=0xFF0000,
text='AWOO'))
GROUP[0].x = (DISPLAY.width - GROUP[0].bounding_box[2] + 1) // 2
GROUP[0].y = DISPLAY.height // 2 - 1
# Elements 1-4 are an outline around the moon percentage -- text labels
# offset by 1 pixel up/down/left/right. Initial position is off the matrix,
# updated on first refresh. Initial text value must be long enough for
# longest anticipated string later.
for i in range(4):
GROUP.append(adafruit_display_text.label.Label(SMALL_FONT, color=0,
text='99.9%', y=-99))
# Element 5 is the moon percentage (on top of the outline labels)
GROUP.append(adafruit_display_text.label.Label(SMALL_FONT, color=0xFFFF00,
text='99.9%', y=-99))
# Element 6 is the current time
GROUP.append(adafruit_display_text.label.Label(LARGE_FONT, color=0x808080,
text='12:00', y=-99))
# Element 7 is the current date
GROUP.append(adafruit_display_text.label.Label(SMALL_FONT, color=0x808080,
text='12/31', y=-99))
# Element 8 is a symbol indicating next rise or set
GROUP.append(adafruit_display_text.label.Label(SYMBOL_FONT, color=0x00FF00,
text='x', y=-99))
# Element 9 is the time of (or time to) next rise/set event
GROUP.append(adafruit_display_text.label.Label(SMALL_FONT, color=0x00FF00,
text='12:00', y=-99))
DISPLAY.show(GROUP)
NETWORK = Network(status_neopixel=board.NEOPIXEL, debug=False)
NETWORK.connect()
# LATITUDE, LONGITUDE, TIMEZONE are set up once, constant over app lifetime
# Fetch latitude/longitude from secrets.py. If not present, use
# IP geolocation. This only needs to be done once, at startup!
try:
LATITUDE = secrets['latitude']
LONGITUDE = secrets['longitude']
print('Using stored geolocation: ', LATITUDE, LONGITUDE)
except KeyError:
LATITUDE, LONGITUDE = (
NETWORK.fetch_data('http://www.geoplugin.net/json.gp',
json_path=[['geoplugin_latitude'],
['geoplugin_longitude']]))
print('Using IP geolocation: ', LATITUDE, LONGITUDE)
# Load time zone string from secrets.py, else IP geolocation for this too
# (http://worldtimeapi.org/api/timezone for list).
try:
TIMEZONE = secrets['timezone'] # e.g. 'America/New_York'
except:
TIMEZONE = None # IP geolocation
# Set initial clock time, also fetch initial UTC offset while
# here (NOT stored in secrets.py as it may change with DST).
# pylint: disable=bare-except
try:
DATETIME, UTC_OFFSET = update_time(TIMEZONE)
except:
DATETIME, UTC_OFFSET = time.localtime(), '+00:00'
LAST_SYNC = time.mktime(DATETIME)
# Poll server for moon data for current 24-hour period and +24 ahead
PERIOD = []
for DAY in range(2):
PERIOD.append(MoonData(DATETIME, DAY * 24, UTC_OFFSET))
# PERIOD[0] is the current 24-hour time period we're in. PERIOD[1] is the
# following 24 hours. Data is shifted down and new data fetched as days
# expire. Thought we might need a PERIOD[2] for certain circumstances but
# it appears not, that's changed easily enough if needed.
# MAIN LOOP ----------------------------------------------------------------
while True:
gc.collect()
NOW = time.time() # Current epoch time in seconds
# Sync with time server every ~12 hours
if NOW - LAST_SYNC > 12 * 60 * 60:
try:
DATETIME, UTC_OFFSET = update_time(TIMEZONE)
LAST_SYNC = time.mktime(DATETIME)
continue # Time may have changed; refresh NOW value
except:
# update_time() can throw an exception if time server doesn't
# respond. That's OK, keep running with our current time, and
# push sync time ahead to retry in 30 minutes (don't overwhelm
# the server with repeated queries).
LAST_SYNC += 30 * 60 # 30 minutes -> seconds
# If PERIOD has expired, move data down and fetch new +24-hour data
if NOW >= PERIOD[1].midnight:
PERIOD[0] = PERIOD[1]
PERIOD[1] = MoonData(time.localtime(), 24, UTC_OFFSET)
# Determine weighting of tomorrow's phase vs today's, using current time
RATIO = ((NOW - PERIOD[0].midnight) /
(PERIOD[1].midnight - PERIOD[0].midnight))
# Determine moon phase 'age'
# 0.0 = new moon
# 0.25 = first quarter
# 0.5 = full moon
# 0.75 = last quarter
# 1.0 = new moon
if PERIOD[0].age < PERIOD[1].age:
AGE = (PERIOD[0].age +
(PERIOD[1].age - PERIOD[0].age) * RATIO) % 1.0
else: # Handle age wraparound (1.0 -> 0.0)
# If tomorrow's age is less than today's, it indicates a new moon
# crossover. Add 1 to tomorrow's age when computing age delta.
AGE = (PERIOD[0].age +
(PERIOD[1].age + 1 - PERIOD[0].age) * RATIO) % 1.0
# AGE can be used for direct lookup to moon bitmap (0 to 99) -- these
# images are pre-rendered for a linear timescale (solar terminator moves
# nonlinearly across sphere).
FRAME = int(AGE * 100) % 100 # Bitmap 0 to 99
# Then use some trig to get percentage lit
if AGE <= 0.5: # New -> first quarter -> full
PERCENT = (1 - math.cos(AGE * 2 * math.pi)) * 50
else: # Full -> last quarter -> new
PERCENT = (1 + math.cos((AGE - 0.5) * 2 * math.pi)) * 50
# Find next rise/set event, complicated by the fact that some 24-hour
# periods might not have one or the other (but usually do) due to the
# Moon rising ~50 mins later each day. This uses a brute force approach,
# working backwards through the time periods to locate rise/set events
# that A) exist in that 24-hour period (are not None), B) are still in
# the future, and C) are closer than the last guess. What's left at the
# end is the next rise or set (and the inverse of the event type tells
# us whether Moon's currently risen or not).
NEXT_EVENT = PERIOD[1].midnight + 100000 # Force first match
for DAY in reversed(PERIOD):
if DAY.rise and NEXT_EVENT >= DAY.rise >= NOW:
NEXT_EVENT = DAY.rise
RISEN = False
if DAY.set and NEXT_EVENT >= DAY.set >= NOW:
NEXT_EVENT = DAY.set
RISEN = True
if DISPLAY.rotation in (0, 180): # Horizontal 'landscape' orientation
CENTER_X = 48 # Text along right
MOON_Y = 0 # Moon at left
TIME_Y = 6 # Time at top right
EVENT_Y = 26 # Rise/set at bottom right
else: # Vertical 'portrait' orientation
CENTER_X = 16 # Text down center
if RISEN:
MOON_Y = 0 # Moon at top
EVENT_Y = 38 # Rise/set in middle
TIME_Y = 49 # Time/date at bottom
else:
TIME_Y = 6 # Time/date at top
EVENT_Y = 26 # Rise/set in middle
MOON_Y = 32 # Moon at bottom
print()
# Update moon image (GROUP[0])
FILENAME = 'moon/moon' + '{0:0>2}'.format(FRAME) + '.bmp'
BITMAP = displayio.OnDiskBitmap(open(FILENAME, 'rb'))
TILE_GRID = displayio.TileGrid(BITMAP,
pixel_shader=displayio.ColorConverter(),)
TILE_GRID.x = 0
TILE_GRID.y = MOON_Y
GROUP[0] = TILE_GRID
# Update percent value (5 labels: GROUP[1-4] for outline, [5] for text)
if PERCENT >= 99.95:
STRING = '100%'
else:
STRING = '{:.1f}'.format(PERCENT + 0.05) + '%'
print(NOW, STRING, 'full')
# Set element 5 first, use its size and position for setting others
GROUP[5].text = STRING
GROUP[5].x = 16 - GROUP[5].bounding_box[2] // 2
GROUP[5].y = MOON_Y + 16
for _ in range(1, 5):
GROUP[_].text = GROUP[5].text
GROUP[1].x, GROUP[1].y = GROUP[5].x, GROUP[5].y - 1 # Up 1 pixel
GROUP[2].x, GROUP[2].y = GROUP[5].x - 1, GROUP[5].y # Left
GROUP[3].x, GROUP[3].y = GROUP[5].x + 1, GROUP[5].y # Right
GROUP[4].x, GROUP[4].y = GROUP[5].x, GROUP[5].y + 1 # Down
# Update next-event time (GROUP[8] and [9])
# Do this before time because we need uncorrupted NOW value
EVENT_TIME = time.localtime(NEXT_EVENT) # Convert to struct for later
if COUNTDOWN: # Show NEXT_EVENT as countdown to event
NEXT_EVENT -= NOW # Time until (vs time of) next rise/set
MINUTES = NEXT_EVENT // 60
STRING = str(MINUTES // 60) + ':' + '{0:0>2}'.format(MINUTES % 60)
else: # Show NEXT_EVENT in clock time
STRING = hh_mm(EVENT_TIME)
GROUP[9].text = STRING
XPOS = CENTER_X - (GROUP[9].bounding_box[2] + 6) // 2
GROUP[8].x = XPOS
if RISEN: # Next event is SET
GROUP[8].text = '\u21A7' # Downwards arrow from bar
GROUP[8].y = EVENT_Y - 2
print('Sets:', STRING)
else: # Next event is RISE
GROUP[8].text = '\u21A5' # Upwards arrow from bar
GROUP[8].y = EVENT_Y - 1
print('Rises:', STRING)
GROUP[9].x = XPOS + 6
GROUP[9].y = EVENT_Y
# Show event time in green if a.m., amber if p.m.
GROUP[8].color = GROUP[9].color = (0x00FF00 if EVENT_TIME.tm_hour < 12
else 0xC04000)
# Update time (GROUP[6]) and date (GROUP[7])
NOW = time.localtime()
STRING = hh_mm(NOW)
GROUP[6].text = STRING
GROUP[6].x = CENTER_X - GROUP[6].bounding_box[2] // 2
GROUP[6].y = TIME_Y
if MONTH_DAY:
STRING = str(NOW.tm_mon) + '/' + str(NOW.tm_mday)
else:
STRING = str(NOW.tm_mday) + '/' + str(NOW.tm_mon)
GROUP[7].text = STRING
GROUP[7].x = CENTER_X - GROUP[7].bounding_box[2] // 2
GROUP[7].y = TIME_Y + 10
DISPLAY.refresh() # Force full repaint (splash screen sometimes sticks)
time.sleep(5)
| 43.946759 | 78 | 0.603318 | """
MOON PHASE CLOCK for Adafruit Matrix Portal: displays current time, lunar
phase and time of next moonrise or moonset. Requires WiFi internet access.
Written by Phil 'PaintYourDragon' Burgess for Adafruit Industries.
MIT license, all text above must be included in any redistribution.
BDF fonts from the X.Org project. Startup 'splash' images should not be
included in derivative projects, thanks. Tall splash images licensed from
123RF.com, wide splash images used with permission of artist Lew Lashmit
(viergacht@gmail.com). Rawr!
"""
# pylint: disable=import-error
import gc
import time
import math
import json
import board
import busio
import displayio
from rtc import RTC
from adafruit_matrixportal.network import Network
from adafruit_matrixportal.matrix import Matrix
from adafruit_bitmap_font import bitmap_font
import adafruit_display_text.label
import adafruit_lis3dh
try:
from secrets import secrets
except ImportError:
print('WiFi secrets are kept in secrets.py, please add them there!')
raise
# CONFIGURABLE SETTINGS ----------------------------------------------------
TWELVE_HOUR = True # If set, use 12-hour time vs 24-hour (e.g. 3:00 vs 15:00)
COUNTDOWN = False # If set, show time to (vs time of) next rise/set event
MONTH_DAY = True # If set, use MM/DD vs DD/MM (e.g. 31/12 vs 12/31)
BITPLANES = 6 # Ideally 6, but can set lower if RAM is tight
# SOME UTILITY FUNCTIONS AND CLASSES ---------------------------------------
def parse_time(timestring, is_dst=-1):
""" Given a string of the format YYYY-MM-DDTHH:MM:SS.SS-HH:MM (and
optionally a DST flag), convert to and return an equivalent
time.struct_time (strptime() isn't available here). Calling function
can use time.mktime() on result if epoch seconds is needed instead.
Time string is assumed local time; UTC offset is ignored. If seconds
value includes a decimal fraction it's ignored.
"""
date_time = timestring.split('T') # Separate into date and time
year_month_day = date_time[0].split('-') # Separate time into Y/M/D
hour_minute_second = date_time[1].split('+')[0].split('-')[0].split(':')
return time.struct_time(int(year_month_day[0]),
int(year_month_day[1]),
int(year_month_day[2]),
int(hour_minute_second[0]),
int(hour_minute_second[1]),
int(hour_minute_second[2].split('.')[0]),
-1, -1, is_dst)
def update_time(timezone=None):
""" Update system date/time from WorldTimeAPI public server;
no account required. Pass in time zone string
(http://worldtimeapi.org/api/timezone for list)
or None to use IP geolocation. Returns current local time as a
time.struct_time and UTC offset as string. This may throw an
exception on fetch_data() - it is NOT CAUGHT HERE, should be
handled in the calling code because different behaviors may be
needed in different situations (e.g. reschedule for later).
"""
if timezone: # Use timezone api
time_url = 'http://worldtimeapi.org/api/timezone/' + timezone
else: # Use IP geolocation
time_url = 'http://worldtimeapi.org/api/ip'
time_data = NETWORK.fetch_data(time_url,
json_path=[['datetime'], ['dst'],
['utc_offset']])
time_struct = parse_time(time_data[0], time_data[1])
RTC().datetime = time_struct
return time_struct, time_data[2]
def hh_mm(time_struct):
""" Given a time.struct_time, return a string as H:MM or HH:MM, either
12- or 24-hour style depending on global TWELVE_HOUR setting.
This is ONLY for 'clock time,' NOT for countdown time, which is
handled separately in the one spot where it's needed.
"""
if TWELVE_HOUR:
if time_struct.tm_hour > 12:
hour_string = str(time_struct.tm_hour - 12) # 13-23 -> 1-11 (pm)
elif time_struct.tm_hour > 0:
hour_string = str(time_struct.tm_hour) # 1-12
else:
hour_string = '12' # 0 -> 12 (am)
else:
hour_string = '{0:0>2}'.format(time_struct.tm_hour)
return hour_string + ':' + '{0:0>2}'.format(time_struct.tm_min)
# pylint: disable=too-few-public-methods
class MoonData():
""" Class holding lunar data for a given day (00:00:00 to 23:59:59).
App uses two of these -- one for the current day, and one for the
following day -- then some interpolations and such can be made.
Elements include:
age : Moon phase 'age' at midnight (start of period)
expressed from 0.0 (new moon) through 0.5 (full moon)
to 1.0 (next new moon).
midnight : Epoch time in seconds @ midnight (start of period).
rise : Epoch time of moon rise within this 24-hour period.
set : Epoch time of moon set within this 24-hour period.
"""
def __init__(self, datetime, hours_ahead, utc_offset):
""" Initialize MoonData object elements (see above) from a
time.struct_time, hours to skip ahead (typically 0 or 24),
and a UTC offset (as a string) and a query to the MET Norway
Sunrise API (also provides lunar data), documented at:
https://api.met.no/weatherapi/sunrise/2.0/documentation
"""
if hours_ahead:
# Can't change attribute in datetime struct, need to create
# a new one which will roll the date ahead as needed. Convert
# to epoch seconds and back for the offset to work
datetime = time.localtime(time.mktime(time.struct_time(
datetime.tm_year,
datetime.tm_mon,
datetime.tm_mday,
datetime.tm_hour + hours_ahead,
datetime.tm_min,
datetime.tm_sec,
-1, -1, -1)))
# strftime() not available here
url = ('https://api.met.no/weatherapi/sunrise/2.0/.json?lat=' +
str(LATITUDE) + '&lon=' + str(LONGITUDE) +
'&date=' + str(datetime.tm_year) + '-' +
'{0:0>2}'.format(datetime.tm_mon) + '-' +
'{0:0>2}'.format(datetime.tm_mday) +
'&offset=' + utc_offset)
print('Fetching moon data via', url)
# pylint: disable=bare-except
for _ in range(5): # Retries
try:
full_data = json.loads(NETWORK.fetch_data(url))
moon_data = full_data['location']['time'][0]
#print(moon_data)
# Reconstitute JSON data into the elements we need
self.age = float(moon_data['moonphase']['value']) / 100
self.midnight = time.mktime(parse_time(
moon_data['moonphase']['time']))
if 'moonrise' in moon_data:
self.rise = time.mktime(
parse_time(moon_data['moonrise']['time']))
else:
self.rise = None
if 'moonset' in moon_data:
self.set = time.mktime(
parse_time(moon_data['moonset']['time']))
else:
self.set = None
return # Success!
except:
# Moon server error (maybe), try again after 15 seconds.
# (Might be a memory error, that should be handled different)
time.sleep(15)
# ONE-TIME INITIALIZATION --------------------------------------------------
MATRIX = Matrix(bit_depth=BITPLANES)
DISPLAY = MATRIX.display
ACCEL = adafruit_lis3dh.LIS3DH_I2C(busio.I2C(board.SCL, board.SDA),
address=0x19)
_ = ACCEL.acceleration # Dummy reading to blow out any startup residue
time.sleep(0.1)
DISPLAY.rotation = (int(((math.atan2(-ACCEL.acceleration.y,
-ACCEL.acceleration.x) + math.pi) /
(math.pi * 2) + 0.875) * 4) % 4) * 90
LARGE_FONT = bitmap_font.load_font('/fonts/helvB12.bdf')
SMALL_FONT = bitmap_font.load_font('/fonts/helvR10.bdf')
SYMBOL_FONT = bitmap_font.load_font('/fonts/6x10.bdf')
LARGE_FONT.load_glyphs('0123456789:')
SMALL_FONT.load_glyphs('0123456789:/.%')
SYMBOL_FONT.load_glyphs('\u21A5\u21A7')
# Display group is set up once, then we just shuffle items around later.
# Order of creation here determines their stacking order.
GROUP = displayio.Group(max_size=10)
# Element 0 is a stand-in item, later replaced with the moon phase bitmap
# pylint: disable=bare-except
try:
FILENAME = 'moon/splash-' + str(DISPLAY.rotation) + '.bmp'
BITMAP = displayio.OnDiskBitmap(open(FILENAME, 'rb'))
TILE_GRID = displayio.TileGrid(BITMAP,
pixel_shader=displayio.ColorConverter(),)
GROUP.append(TILE_GRID)
except:
GROUP.append(adafruit_display_text.label.Label(SMALL_FONT, color=0xFF0000,
text='AWOO'))
GROUP[0].x = (DISPLAY.width - GROUP[0].bounding_box[2] + 1) // 2
GROUP[0].y = DISPLAY.height // 2 - 1
# Elements 1-4 are an outline around the moon percentage -- text labels
# offset by 1 pixel up/down/left/right. Initial position is off the matrix,
# updated on first refresh. Initial text value must be long enough for
# longest anticipated string later.
for i in range(4):
GROUP.append(adafruit_display_text.label.Label(SMALL_FONT, color=0,
text='99.9%', y=-99))
# Element 5 is the moon percentage (on top of the outline labels)
GROUP.append(adafruit_display_text.label.Label(SMALL_FONT, color=0xFFFF00,
text='99.9%', y=-99))
# Element 6 is the current time
GROUP.append(adafruit_display_text.label.Label(LARGE_FONT, color=0x808080,
text='12:00', y=-99))
# Element 7 is the current date
GROUP.append(adafruit_display_text.label.Label(SMALL_FONT, color=0x808080,
text='12/31', y=-99))
# Element 8 is a symbol indicating next rise or set
GROUP.append(adafruit_display_text.label.Label(SYMBOL_FONT, color=0x00FF00,
text='x', y=-99))
# Element 9 is the time of (or time to) next rise/set event
GROUP.append(adafruit_display_text.label.Label(SMALL_FONT, color=0x00FF00,
text='12:00', y=-99))
DISPLAY.show(GROUP)
NETWORK = Network(status_neopixel=board.NEOPIXEL, debug=False)
NETWORK.connect()
# LATITUDE, LONGITUDE, TIMEZONE are set up once, constant over app lifetime
# Fetch latitude/longitude from secrets.py. If not present, use
# IP geolocation. This only needs to be done once, at startup!
try:
LATITUDE = secrets['latitude']
LONGITUDE = secrets['longitude']
print('Using stored geolocation: ', LATITUDE, LONGITUDE)
except KeyError:
LATITUDE, LONGITUDE = (
NETWORK.fetch_data('http://www.geoplugin.net/json.gp',
json_path=[['geoplugin_latitude'],
['geoplugin_longitude']]))
print('Using IP geolocation: ', LATITUDE, LONGITUDE)
# Load time zone string from secrets.py, else IP geolocation for this too
# (http://worldtimeapi.org/api/timezone for list).
try:
TIMEZONE = secrets['timezone'] # e.g. 'America/New_York'
except:
TIMEZONE = None # IP geolocation
# Set initial clock time, also fetch initial UTC offset while
# here (NOT stored in secrets.py as it may change with DST).
# pylint: disable=bare-except
try:
DATETIME, UTC_OFFSET = update_time(TIMEZONE)
except:
DATETIME, UTC_OFFSET = time.localtime(), '+00:00'
LAST_SYNC = time.mktime(DATETIME)
# Poll server for moon data for current 24-hour period and +24 ahead
PERIOD = []
for DAY in range(2):
PERIOD.append(MoonData(DATETIME, DAY * 24, UTC_OFFSET))
# PERIOD[0] is the current 24-hour time period we're in. PERIOD[1] is the
# following 24 hours. Data is shifted down and new data fetched as days
# expire. Thought we might need a PERIOD[2] for certain circumstances but
# it appears not, that's changed easily enough if needed.
# MAIN LOOP ----------------------------------------------------------------
while True:
gc.collect()
NOW = time.time() # Current epoch time in seconds
# Sync with time server every ~12 hours
if NOW - LAST_SYNC > 12 * 60 * 60:
try:
DATETIME, UTC_OFFSET = update_time(TIMEZONE)
LAST_SYNC = time.mktime(DATETIME)
continue # Time may have changed; refresh NOW value
except:
# update_time() can throw an exception if time server doesn't
# respond. That's OK, keep running with our current time, and
# push sync time ahead to retry in 30 minutes (don't overwhelm
# the server with repeated queries).
LAST_SYNC += 30 * 60 # 30 minutes -> seconds
# If PERIOD has expired, move data down and fetch new +24-hour data
if NOW >= PERIOD[1].midnight:
PERIOD[0] = PERIOD[1]
PERIOD[1] = MoonData(time.localtime(), 24, UTC_OFFSET)
# Determine weighting of tomorrow's phase vs today's, using current time
RATIO = ((NOW - PERIOD[0].midnight) /
(PERIOD[1].midnight - PERIOD[0].midnight))
# Determine moon phase 'age'
# 0.0 = new moon
# 0.25 = first quarter
# 0.5 = full moon
# 0.75 = last quarter
# 1.0 = new moon
if PERIOD[0].age < PERIOD[1].age:
AGE = (PERIOD[0].age +
(PERIOD[1].age - PERIOD[0].age) * RATIO) % 1.0
else: # Handle age wraparound (1.0 -> 0.0)
# If tomorrow's age is less than today's, it indicates a new moon
# crossover. Add 1 to tomorrow's age when computing age delta.
AGE = (PERIOD[0].age +
(PERIOD[1].age + 1 - PERIOD[0].age) * RATIO) % 1.0
# AGE can be used for direct lookup to moon bitmap (0 to 99) -- these
# images are pre-rendered for a linear timescale (solar terminator moves
# nonlinearly across sphere).
FRAME = int(AGE * 100) % 100 # Bitmap 0 to 99
# Then use some trig to get percentage lit
if AGE <= 0.5: # New -> first quarter -> full
PERCENT = (1 - math.cos(AGE * 2 * math.pi)) * 50
else: # Full -> last quarter -> new
PERCENT = (1 + math.cos((AGE - 0.5) * 2 * math.pi)) * 50
# Find next rise/set event, complicated by the fact that some 24-hour
# periods might not have one or the other (but usually do) due to the
# Moon rising ~50 mins later each day. This uses a brute force approach,
# working backwards through the time periods to locate rise/set events
# that A) exist in that 24-hour period (are not None), B) are still in
# the future, and C) are closer than the last guess. What's left at the
# end is the next rise or set (and the inverse of the event type tells
# us whether Moon's currently risen or not).
NEXT_EVENT = PERIOD[1].midnight + 100000 # Force first match
for DAY in reversed(PERIOD):
if DAY.rise and NEXT_EVENT >= DAY.rise >= NOW:
NEXT_EVENT = DAY.rise
RISEN = False
if DAY.set and NEXT_EVENT >= DAY.set >= NOW:
NEXT_EVENT = DAY.set
RISEN = True
if DISPLAY.rotation in (0, 180): # Horizontal 'landscape' orientation
CENTER_X = 48 # Text along right
MOON_Y = 0 # Moon at left
TIME_Y = 6 # Time at top right
EVENT_Y = 26 # Rise/set at bottom right
else: # Vertical 'portrait' orientation
CENTER_X = 16 # Text down center
if RISEN:
MOON_Y = 0 # Moon at top
EVENT_Y = 38 # Rise/set in middle
TIME_Y = 49 # Time/date at bottom
else:
TIME_Y = 6 # Time/date at top
EVENT_Y = 26 # Rise/set in middle
MOON_Y = 32 # Moon at bottom
print()
# Update moon image (GROUP[0])
FILENAME = 'moon/moon' + '{0:0>2}'.format(FRAME) + '.bmp'
BITMAP = displayio.OnDiskBitmap(open(FILENAME, 'rb'))
TILE_GRID = displayio.TileGrid(BITMAP,
pixel_shader=displayio.ColorConverter(),)
TILE_GRID.x = 0
TILE_GRID.y = MOON_Y
GROUP[0] = TILE_GRID
# Update percent value (5 labels: GROUP[1-4] for outline, [5] for text)
if PERCENT >= 99.95:
STRING = '100%'
else:
STRING = '{:.1f}'.format(PERCENT + 0.05) + '%'
print(NOW, STRING, 'full')
# Set element 5 first, use its size and position for setting others
GROUP[5].text = STRING
GROUP[5].x = 16 - GROUP[5].bounding_box[2] // 2
GROUP[5].y = MOON_Y + 16
for _ in range(1, 5):
GROUP[_].text = GROUP[5].text
GROUP[1].x, GROUP[1].y = GROUP[5].x, GROUP[5].y - 1 # Up 1 pixel
GROUP[2].x, GROUP[2].y = GROUP[5].x - 1, GROUP[5].y # Left
GROUP[3].x, GROUP[3].y = GROUP[5].x + 1, GROUP[5].y # Right
GROUP[4].x, GROUP[4].y = GROUP[5].x, GROUP[5].y + 1 # Down
# Update next-event time (GROUP[8] and [9])
# Do this before time because we need uncorrupted NOW value
EVENT_TIME = time.localtime(NEXT_EVENT) # Convert to struct for later
if COUNTDOWN: # Show NEXT_EVENT as countdown to event
NEXT_EVENT -= NOW # Time until (vs time of) next rise/set
MINUTES = NEXT_EVENT // 60
STRING = str(MINUTES // 60) + ':' + '{0:0>2}'.format(MINUTES % 60)
else: # Show NEXT_EVENT in clock time
STRING = hh_mm(EVENT_TIME)
GROUP[9].text = STRING
XPOS = CENTER_X - (GROUP[9].bounding_box[2] + 6) // 2
GROUP[8].x = XPOS
if RISEN: # Next event is SET
GROUP[8].text = '\u21A7' # Downwards arrow from bar
GROUP[8].y = EVENT_Y - 2
print('Sets:', STRING)
else: # Next event is RISE
GROUP[8].text = '\u21A5' # Upwards arrow from bar
GROUP[8].y = EVENT_Y - 1
print('Rises:', STRING)
GROUP[9].x = XPOS + 6
GROUP[9].y = EVENT_Y
# Show event time in green if a.m., amber if p.m.
GROUP[8].color = GROUP[9].color = (0x00FF00 if EVENT_TIME.tm_hour < 12
else 0xC04000)
# Update time (GROUP[6]) and date (GROUP[7])
NOW = time.localtime()
STRING = hh_mm(NOW)
GROUP[6].text = STRING
GROUP[6].x = CENTER_X - GROUP[6].bounding_box[2] // 2
GROUP[6].y = TIME_Y
if MONTH_DAY:
STRING = str(NOW.tm_mon) + '/' + str(NOW.tm_mday)
else:
STRING = str(NOW.tm_mday) + '/' + str(NOW.tm_mon)
GROUP[7].text = STRING
GROUP[7].x = CENTER_X - GROUP[7].bounding_box[2] // 2
GROUP[7].y = TIME_Y + 10
DISPLAY.refresh() # Force full repaint (splash screen sometimes sticks)
time.sleep(5)
| 0 | 0 | 0 |
228275216d91b7a87408c952779feef91a8be9f0 | 2,466 | py | Python | ignite/metrics/metrics_lambda.py | ndronen/ignite | 5d7298017ceb0e5457551263d3f4090c34dbf636 | [
"BSD-3-Clause"
] | null | null | null | ignite/metrics/metrics_lambda.py | ndronen/ignite | 5d7298017ceb0e5457551263d3f4090c34dbf636 | [
"BSD-3-Clause"
] | null | null | null | ignite/metrics/metrics_lambda.py | ndronen/ignite | 5d7298017ceb0e5457551263d3f4090c34dbf636 | [
"BSD-3-Clause"
] | null | null | null | from ignite.metrics.metric import Metric
from ignite.engine import Events
class MetricsLambda(Metric):
"""
Apply a function to other metrics to obtain a new metric.
The result of the new metric is defined to be the result
of applying the function to the result of argument metrics.
When update, this metric does not recursively update the metrics
it depends on. When reset, all its dependency metrics would be
resetted. When attach, all its dependencies would be automatically
attached.
Args:
f (callable): the function that defines the computation
args (sequence): Sequence of other metrics or something
else that will be fed to ``f`` as arguments.
Example:
.. code-block:: python
precision = Precision(average=False)
recall = Recall(average=False)
def Fbeta(r, p, beta):
return torch.mean((1 + beta ** 2) * p * r / (beta ** 2 * p + r + 1e-20)).item()
F1 = MetricsLambda(Fbeta, recall, precision, 1)
F2 = MetricsLambda(Fbeta, recall, precision, 2)
F3 = MetricsLambda(Fbeta, recall, precision, 3)
F4 = MetricsLambda(Fbeta, recall, precision, 4)
"""
| 37.938462 | 104 | 0.652068 | from ignite.metrics.metric import Metric
from ignite.engine import Events
class MetricsLambda(Metric):
"""
Apply a function to other metrics to obtain a new metric.
The result of the new metric is defined to be the result
of applying the function to the result of argument metrics.
When update, this metric does not recursively update the metrics
it depends on. When reset, all its dependency metrics would be
resetted. When attach, all its dependencies would be automatically
attached.
Args:
f (callable): the function that defines the computation
args (sequence): Sequence of other metrics or something
else that will be fed to ``f`` as arguments.
Example:
.. code-block:: python
precision = Precision(average=False)
recall = Recall(average=False)
def Fbeta(r, p, beta):
return torch.mean((1 + beta ** 2) * p * r / (beta ** 2 * p + r + 1e-20)).item()
F1 = MetricsLambda(Fbeta, recall, precision, 1)
F2 = MetricsLambda(Fbeta, recall, precision, 2)
F3 = MetricsLambda(Fbeta, recall, precision, 3)
F4 = MetricsLambda(Fbeta, recall, precision, 4)
"""
def __init__(self, f, *args):
self.function = f
self.args = args
super(MetricsLambda, self).__init__()
def reset(self):
for i in self.args:
if isinstance(i, Metric):
i.reset()
def update(self, output):
# NB: this method does not recursively update dependency metrics,
# which might cause duplicate update issue. To update this metric,
# users should manually update its dependencies.
pass
def compute(self):
materialized = [i.compute() if isinstance(i, Metric) else i for i in self.args]
return self.function(*materialized)
def attach(self, engine, name):
# recursively attach all its dependencies
for index, metric in enumerate(self.args):
if isinstance(metric, Metric):
if not engine.has_event_handler(metric.started, Events.EPOCH_STARTED):
engine.add_event_handler(Events.EPOCH_STARTED, metric.started)
if not engine.has_event_handler(metric.iteration_completed, Events.ITERATION_COMPLETED):
engine.add_event_handler(Events.ITERATION_COMPLETED, metric.iteration_completed)
super(MetricsLambda, self).attach(engine, name)
| 1,130 | 0 | 134 |
074a4b4261490d73d5f971d4834dfbbf2f1de8c7 | 1,688 | py | Python | loginBuptGw/loginBuptGw.py | JamesLinus/Scripts | 28aa33b60154ad11dd60c5d74642343aa5059a7f | [
"MIT"
] | 1 | 2019-04-22T09:10:46.000Z | 2019-04-22T09:10:46.000Z | loginBuptGw/loginBuptGw.py | JamesLinus/Scripts | 28aa33b60154ad11dd60c5d74642343aa5059a7f | [
"MIT"
] | null | null | null | loginBuptGw/loginBuptGw.py | JamesLinus/Scripts | 28aa33b60154ad11dd60c5d74642343aa5059a7f | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding:utf-8 -*-
"""
北邮人网关登录脚本:
使用方法:
登录:python loginBuptGw.py i
退出:python loginBuptGw.py o
"""
import urllib2
import urllib
import cookielib
import hashlib
import os
import re
import sys
reload(sys)
sys.setdefaultencoding('utf8')
uname = XXXXXX #请正确填写学号
upass = 'XXXXXX' #请正确填写密码
if __name__ == '__main__':
if len(sys.argv) < 2 or len(sys.argv) >= 3:
usage()
else:
if sys.argv[1] == "i":
u_pass = safe_md5(upass)
u_data = login(u_pass).decode('gbk','ignore').encode('utf-8')
#print u_data
check_success(u_data)
elif sys.argv[1] == "o":
quit()
else:
usage()
| 21.1 | 174 | 0.683649 | #!/usr/bin/python
# -*- coding:utf-8 -*-
"""
北邮人网关登录脚本:
使用方法:
登录:python loginBuptGw.py i
退出:python loginBuptGw.py o
"""
import urllib2
import urllib
import cookielib
import hashlib
import os
import re
import sys
reload(sys)
sys.setdefaultencoding('utf8')
uname = XXXXXX #请正确填写学号
upass = 'XXXXXX' #请正确填写密码
def safe_md5(temp_pass):
tempchar = '1' + temp_pass + '12345678'
#print tempchar
upass = hashlib.md5(tempchar).hexdigest() + '123456781'
return upass
def login(u_pass):
postdata = {
'DDDDD': uname,
'upass': u_pass,
'R1': 0,
'R2': 1,
'para': 00,
'0MKKey': 123456
}
url_login = 'http://10.3.8.211'
cj = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
urllib2.install_opener(opener)
opener.add_handler = [('User-agent','Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/32.0.1700.107 Chrome/32.0.1700.107 Safari/537.36')]
en_url = urllib.urlencode(postdata)
r_login = urllib2.urlopen(url_login,en_url)
return r_login.read()
def check_success(u_data):
re_check = re.compile(r'<title>登录成功窗</title>')
if re_check.search(u_data):
print '登录成功'
else:
print '登录失败'
def quit():
quit_url = 'http://10.3.8.211/F.htm'
urllib2.urlopen(quit_url)
print '退出成功'
def usage():
print "请正确选择登录、退出方式:"
print "登录:python loginBuptGw.py i "
print "退出:python loginBuptGw.py o "
if __name__ == '__main__':
if len(sys.argv) < 2 or len(sys.argv) >= 3:
usage()
else:
if sys.argv[1] == "i":
u_pass = safe_md5(upass)
u_data = login(u_pass).decode('gbk','ignore').encode('utf-8')
#print u_data
check_success(u_data)
elif sys.argv[1] == "o":
quit()
else:
usage()
| 1,024 | 0 | 115 |
8d9d97da1eeec5795d33bf640fc9d80248931137 | 1,639 | py | Python | trendfilter/extrapolate.py | dave31415/trendfilter | 793714359ec243857d0557ede9abcae61981f119 | [
"MIT"
] | null | null | null | trendfilter/extrapolate.py | dave31415/trendfilter | 793714359ec243857d0557ede9abcae61981f119 | [
"MIT"
] | null | null | null | trendfilter/extrapolate.py | dave31415/trendfilter | 793714359ec243857d0557ede9abcae61981f119 | [
"MIT"
] | null | null | null | from scipy.interpolate import interp1d
import numpy as np
def vectorize(func_orig):
"""
A function that takes a function and
returns another that can fun on lists and arrays
:param func_orig: any functions
:return: vectorized function
"""
return func
def get_interp_extrapolate_functions(x, base_model, linear_deviations):
"""
Get the three interp/extrapolation model functions:
base function, deviates function, total model function
:param x: the x data
:param base_model: model model cvxpy expression
:param linear_deviations: list of completed linear_deviations objects
:return: base function, deviates function, total model function
"""
# TODO: this requires mapping to be given, make it work with matrix only
interp_base_model_func = interp1d(x, base_model.value, fill_value="extrapolate")
return vectorize(func_base), vectorize(func_deviates), vectorize(func)
| 30.924528 | 84 | 0.681513 | from scipy.interpolate import interp1d
import numpy as np
def vectorize(func_orig):
"""
A function that takes a function and
returns another that can fun on lists and arrays
:param func_orig: any functions
:return: vectorized function
"""
def func(x_new):
if isinstance(x_new, list):
return [func_orig(xx) for xx in x_new]
if isinstance(x_new, np.ndarray):
return np.array([func_orig(xx) for xx in x_new])
return func_orig(x_new)
return func
def get_interp_extrapolate_functions(x, base_model, linear_deviations):
"""
Get the three interp/extrapolation model functions:
base function, deviates function, total model function
:param x: the x data
:param base_model: model model cvxpy expression
:param linear_deviations: list of completed linear_deviations objects
:return: base function, deviates function, total model function
"""
# TODO: this requires mapping to be given, make it work with matrix only
interp_base_model_func = interp1d(x, base_model.value, fill_value="extrapolate")
def func_base(x_new):
return interp_base_model_func(x_new)
def func_deviates(x_new):
linear_dev_value = 0.0
for lin_dev in linear_deviations:
index = lin_dev['mapping'](x_new)
var = lin_dev['variable'].value
value = var[index]
linear_dev_value += value
return linear_dev_value
def func(x_new):
return func_base(x_new) + func_deviates(x_new)
return vectorize(func_base), vectorize(func_deviates), vectorize(func)
| 583 | 0 | 107 |
8a1e23316ee7f7cad0e7bf9eafe1c857d3b52749 | 328 | py | Python | Scripts/simulation/objects/decorative/__init__.py | velocist/TS4CheatsInfo | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | [
"Apache-2.0"
] | null | null | null | Scripts/simulation/objects/decorative/__init__.py | velocist/TS4CheatsInfo | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | [
"Apache-2.0"
] | null | null | null | Scripts/simulation/objects/decorative/__init__.py | velocist/TS4CheatsInfo | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | [
"Apache-2.0"
] | null | null | null | # uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\objects\decorative\__init__.py
# Compiled at: 2009-11-20 02:49:20
# Size of source mod 2**32: 106 bytes
pass | 46.857143 | 107 | 0.728659 | # uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\objects\decorative\__init__.py
# Compiled at: 2009-11-20 02:49:20
# Size of source mod 2**32: 106 bytes
pass | 0 | 0 | 0 |
9d55fcd21fd668728d576d29f89f13e00a3cb35f | 412 | py | Python | tests/utils_tests.py | joyongjin/peb | 078faf1a83cc4c161e9b4779bb9af6068b25c5b9 | [
"MIT"
] | null | null | null | tests/utils_tests.py | joyongjin/peb | 078faf1a83cc4c161e9b4779bb9af6068b25c5b9 | [
"MIT"
] | null | null | null | tests/utils_tests.py | joyongjin/peb | 078faf1a83cc4c161e9b4779bb9af6068b25c5b9 | [
"MIT"
] | null | null | null | import random
from unittest import TestCase
import peb
| 22.888889 | 72 | 0.667476 | import random
from unittest import TestCase
import peb
class UtilTest(TestCase):
def test_raise_or(self):
error = ValueError('Test error')
value = 'TEST_VALUE'
self.assertEqual(peb.raise_or(error, value, throw=False), value)
with self.assertRaises(ValueError) as res:
peb.raise_or(error, value, throw=True)
self.assertEqual(res.exception, error)
| 301 | 4 | 49 |
15dd6aac8e204d47fef9c8e7da2d4dd65a45dcc0 | 1,555 | py | Python | patients/migrations/0012_auto_20201104_1553.py | joshgoshbgosh/ccs-final-project | 566bf8bfaa82986010a2dfbdc3b32592c796d4a9 | [
"MIT"
] | 1 | 2020-11-05T14:21:45.000Z | 2020-11-05T14:21:45.000Z | patients/migrations/0012_auto_20201104_1553.py | joshgoshbgosh/ccs-final-project | 566bf8bfaa82986010a2dfbdc3b32592c796d4a9 | [
"MIT"
] | null | null | null | patients/migrations/0012_auto_20201104_1553.py | joshgoshbgosh/ccs-final-project | 566bf8bfaa82986010a2dfbdc3b32592c796d4a9 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.2 on 2020-11-04 15:53
from django.db import migrations, models
| 42.027027 | 170 | 0.583923 | # Generated by Django 3.1.2 on 2020-11-04 15:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('patients', '0011_patient_image'),
]
operations = [
migrations.CreateModel(
name='Prescription',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=225)),
('last_name', models.CharField(max_length=225)),
('patient_address', models.CharField(max_length=225)),
('brand_name', models.CharField(max_length=225)),
('medication_name', models.CharField(max_length=225)),
('directions', models.CharField(max_length=225)),
('quantity', models.CharField(max_length=225)),
('refills', models.CharField(max_length=225)),
('pharmacy_number', models.CharField(max_length=225)),
('rx', models.CharField(max_length=225)),
('prescriber', models.CharField(max_length=225)),
('label_image', models.ImageField(null=True, upload_to='precription/')),
],
),
migrations.AlterField(
model_name='patient',
name='walking_devices',
field=models.CharField(blank=True, choices=[('Wheel_Chair', 'Wheel_Chair'), ('Walker', 'Walker'), ('Cane', 'Cane')], default=None, max_length=225, null=True),
),
]
| 0 | 1,441 | 23 |
fe2ccb0b7614d1cc9f61af3c2a9134c91f178963 | 49,940 | py | Python | bin/sofa_analyze.py | pingsutw/sofa | 66b960859dde4be7143ed06ed98396c743a6478a | [
"Apache-2.0"
] | 1 | 2020-04-21T09:05:29.000Z | 2020-04-21T09:05:29.000Z | bin/sofa_analyze.py | y78078/sofa | 2f8967df18abc03cedf7a97f8bbf77164ca69bf9 | [
"Apache-2.0"
] | null | null | null | bin/sofa_analyze.py | y78078/sofa | 2f8967df18abc03cedf7a97f8bbf77164ca69bf9 | [
"Apache-2.0"
] | null | null | null | import argparse
import matplotlib
matplotlib.use('agg')
import csv
import json
import multiprocessing as mp
import os
import random
import re
import sys
from functools import partial
from operator import attrgetter, itemgetter
import networkx as nx
import numpy as np
import pandas as pd
import time
from sofa_aisi import *
from sofa_common import *
from sofa_config import *
from sofa_print import *
from matplotlib import pyplot as plt
import grpc
import potato_pb2
import potato_pb2_grpc
import socket
import random
import subprocess
from sofa_ml import hsg_v2
# input: pfv(performance feature vector), Pandas.DataFrame
# output: hint, docker_image
| 44.470169 | 259 | 0.569704 | import argparse
import matplotlib
matplotlib.use('agg')
import csv
import json
import multiprocessing as mp
import os
import random
import re
import sys
from functools import partial
from operator import attrgetter, itemgetter
import networkx as nx
import numpy as np
import pandas as pd
import time
from sofa_aisi import *
from sofa_common import *
from sofa_config import *
from sofa_print import *
from matplotlib import pyplot as plt
import grpc
import potato_pb2
import potato_pb2_grpc
import socket
import random
import subprocess
from sofa_ml import hsg_v2
def random_generate_color():
rand = lambda: random.randint(0, 255)
return '#%02X%02X%02X' % (64, rand(), rand())
def get_top_k_events(cfg, df, topk):
topk_events=[]
gby = df.groupby(['name'])
df_agg = gby.aggregate(np.sum)
df_agg_sorted = df_agg.sort_values(by=['duration'],ascending=False)
#memcpy = ['copyKind_1_','copyKind_2_','copyKind_8_']
if cfg.verbose:
print("Top %d Events: "%topk)
print(df_agg_sorted[['duration']][0:topk])
eventName = df_agg_sorted[df_agg_sorted.columns[0:0]].head(topk).index.values.tolist()
return eventName
# input: pfv(performance feature vector), Pandas.DataFrame
# output: hint, docker_image
def get_hint(potato_server, features):
if len(features) > 0:
pfv = potato_pb2.PerformanceFeatureVector()
for i in range(len(features)):
name = features.iloc[i]['name']
value = features.iloc[i]['value']
#print('%s%s%s' % (str(i).ljust(10), name.ljust(30), ('%.3lf'%value).ljust(20)))
pfv.name.append(name)
pfv.value.append(value)
#print('Wait for response from POTATO server...')
myhostname = socket.gethostname()
channel = grpc.insecure_channel(potato_server)
stub = potato_pb2_grpc.HintStub(channel)
request = potato_pb2.HintRequest( hostname = myhostname,
pfv = pfv)
response = stub.Hint(request)
hint = response.hint
docker_image = response.docker_image
else:
hint = 'There is no pfv to get hints.'
docker_image = 'NA'
return hint, docker_image
def concurrency_breakdown(logdir, cfg, df_mpstat, df_cpu, df_gpu, df_nvsmi, df_bandwidth, features):
if cfg.verbose:
print_title('Concurrency Breakdown Analysis')
total_elapsed_time = {'usr':0, 'sys':0, 'gpu':0, 'iow':0}
elapsed_time_ratio = {'usr':0, 'sys':0, 'gpu':0, 'iow':0}
total_interval_vector = []
total_performace_vector = []
if len(df_mpstat) == 0:
print_warning(cfg, 'no mpstat and perf traces!')
return features
t_begin = df_mpstat.iloc[0]['timestamp']
t_end = df_mpstat.iloc[-1]['timestamp']
t = t_begin
while t < t_end:
t = t + 0.1
if cfg.roi_end > 0 and (t < cfg.roi_begin or t > cfg.roi_end):
continue
window_begin = t - 0.1
window_end = t
if len(df_cpu) > 0:
if df_cpu.iloc[0].timestamp > window_end:
continue
cond1 = (df_cpu['timestamp'] > window_begin)
cond2 = (df_cpu['timestamp'] <= window_end)
df_cpu_interval = df_cpu[ cond1 & cond2 ]
num_gpus = len(list(set(df_nvsmi['deviceId'])))
cond1 = (df_nvsmi['timestamp'] > window_begin)
cond2 = (df_nvsmi['timestamp'] <= window_end)
sm = df_nvsmi['event'] == int(0)
df_nvsmi_interval = df_nvsmi[ cond1 & cond2 & sm ]
cond1 = (df_mpstat['timestamp'] > window_begin)
cond2 = (df_mpstat['timestamp'] <= window_end)
df_mpstat_interval = df_mpstat[ cond1 & cond2 ]
cond1 = (df_bandwidth['timestamp'] > window_begin)
cond2 = (df_bandwidth['timestamp'] <= window_end)
tx = df_bandwidth['event'] == float(0)
rx = df_bandwidth['event'] == float(1)
df_tx_interval = df_bandwidth[ cond1 & cond2 & tx ]
df_rx_interval = df_bandwidth[ cond1 & cond2 & rx ]
mp_usr = []
mp_sys = []
mp_iow = []
usr = []
sys = []
irq = []
cpu_max = 0
cpu_min = 100
for i in range(len(df_mpstat_interval)):
ratios = df_mpstat_interval.iloc[i]['name'].split(':')[1].split('|')
#print(ratios)
mp_usr.append(0.1*int(ratios[1])/100.0)
mp_sys.append(0.1*int(ratios[2])/100.0)
mp_iow.append(0.1*int(ratios[4])/100.0)
usr.append(int(ratios[1]))
sys.append(int(ratios[2]))
irq.append(int(ratios[5]))
cpu_tmp = int(ratios[1]) + int(ratios[2]) + int(ratios[5])
if cpu_tmp > cpu_max:
cpu_max = cpu_tmp
if cpu_tmp < cpu_min:
cpu_min = cpu_tmp
mp_usr = np.asarray(mp_usr)
mp_sys = np.asarray(mp_sys)
mp_iow = np.asarray(mp_iow)
usr = np.asarray(usr)
sys = np.asarray(sys)
irq = np.asarray(irq)
elapsed_time = {'usr':0, 'sys':0, 'gpu':0, 'iow':0}
if len(df_mpstat_interval) > 0:
elapsed_time['usr'] = mp_usr.max()
elapsed_time['sys'] = mp_sys.max()
elapsed_time['gpu'] = df_nvsmi_interval['duration'].sum() * 0.01 * 0.1
elapsed_time['iow'] = mp_iow.max()
#print('gput,usrt = ', elapsed_time['gpu'], elapsed_time['usr'])
dominator = max(elapsed_time, key=elapsed_time.get)
#if elapsed_time['gpu'] > 0.1 :
# dominator = 'gpu'
total_elapsed_time[dominator] = total_elapsed_time[dominator] + 0.1
if num_gpus > 0:
time_gpu_avg = df_nvsmi_interval['duration'].sum() * 0.01 * 0.1 / num_gpus
else:
time_gpu_avg = 0
interval_vector = [mp_usr.max(),
mp_sys.max(),
mp_iow.max(),
time_gpu_avg,
df_tx_interval['bandwidth'].sum(),
df_rx_interval['bandwidth'].sum()]
total_interval_vector.append(tuple(interval_vector))
if num_gpus > 0:
sm_avg = df_nvsmi_interval['duration'].sum() / int(len(list(set(df_nvsmi_interval['deviceId']))))
else:
sm_avg = 0
performace_vector = [window_end,
df_nvsmi_interval['duration'].max(),
sm_avg,
df_nvsmi_interval['duration'].min(),
round((usr.mean() + sys.mean() + irq.mean()), 0),
cpu_max,
cpu_min]
total_performace_vector.append(tuple(performace_vector))
total_all_elapsed_time = sum(total_elapsed_time.values())
if total_all_elapsed_time > 0 :
elapsed_time_ratio['usr'] = 100 * total_elapsed_time['usr'] / total_all_elapsed_time
elapsed_time_ratio['sys'] = 100 * total_elapsed_time['sys'] / total_all_elapsed_time
elapsed_time_ratio['gpu'] = 100 * total_elapsed_time['gpu'] / total_all_elapsed_time
elapsed_time_ratio['iow'] = 100 * total_elapsed_time['iow'] / total_all_elapsed_time
if cfg.verbose:
print('Elapsed Time = %.1lf ' % total_all_elapsed_time)
print('USR = %.1lf %%' % elapsed_time_ratio['usr'])
print('SYS = %.1lf %%' % elapsed_time_ratio['sys'])
print('GPU = %.1lf %%' % elapsed_time_ratio['gpu'])
print('IOW = %.1lf %%' % elapsed_time_ratio['iow'])
if cfg.spotlight_gpu:
elapsed_hotspot_time = cfg.roi_end - cfg.roi_begin
else:
elapsed_hotspot_time = 0
df = pd.DataFrame({ 'name':['elapsed_usr_time_ratio', 'elapsed_sys_time_ratio', 'elapsed_gpu_time_ratio',
'elapsed_iow_time_ratio', 'elapsed_hotspot_time'],
'value':[elapsed_time_ratio['usr'], elapsed_time_ratio['sys'], elapsed_time_ratio['gpu'],
elapsed_time_ratio['iow'], elapsed_hotspot_time ] },
columns=['name','value'])
features = pd.concat([features, df])
if len(total_performace_vector) > 0:
performance_table = pd.DataFrame(total_performace_vector, columns = ['time', 'max_gpu_util', 'avg_gpu_util', 'min_gpu_util', 'cpu_util', 'cpu_max', 'cpu_min'])
performance_table.to_csv('%s/performance.csv' % logdir)
vector_table = pd.DataFrame(total_interval_vector, columns = ['usr' , 'sys', 'iow', 'gpu', 'net_tx', 'net_rx'])
pearson = vector_table.corr(method ='pearson').round(2)
if cfg.verbose:
print('Correlation Table :')
print(pearson)
df = pd.DataFrame({ 'name':['corr_gpu_usr', 'corr_gpu_sys', 'corr_gpu_iow', 'corr_gpu_ntx', 'corr_gpu_nrx'], 'value':[pearson['gpu'].usr, pearson['gpu'].sys, pearson['gpu'].iow, pearson['gpu'].net_tx, pearson['gpu'].net_rx]}, columns=['name','value'])
features = pd.concat([features, df])
return features
def payload_sum(df):
print((len(df)))
class Event:
def __init__(self, name, ttype, timestamp, duration):
self.name = name
self.ttype = ttype # 0 for begin, 1 for end
self.timestamp = timestamp
self.duration = duration
def __repr__(self):
return repr((self.name, self.ttype, self.timestamp, self.duration))
def nvsmi_profile(logdir, cfg, df_nvsmi, features):
if not cfg.cluster_ip and cfg.verbose:
print_title('SM & MEM & ENCODE/DECODE Profiling')
if cfg.spotlight_gpu:
if cfg.roi_end == 0 :
print_warning(cfg, 'spotlight_gpu has no effects.')
else:
cond1 = (df_nvsmi['timestamp'] > cfg.roi_begin)
cond2 = (df_nvsmi['timestamp'] <= cfg.roi_end)
df_nvsmi = df_nvsmi[ cond1 & cond2 ]
sm_start = df_nvsmi.iloc[0].timestamp
sm_end = df_nvsmi.iloc[-1].timestamp
SM_time = sm_end - sm_start
result = df_nvsmi.groupby(['deviceId','event'])['duration'].mean()
result = result.astype(int)
gpu_sm_util = df_nvsmi.groupby(['event'])['duration'].mean()[0]
gpu_mem_util = df_nvsmi.groupby(['event'])['duration'].mean()[1]
if cfg.nvsmi_data:
gpu_enc_util = df_nvsmi.groupby(['event'])['duration'].mean()[2]
gpu_dec_util = df_nvsmi.groupby(['event'])['duration'].mean()[3]
else:
gpu_enc_util = 0
gpu_dec_util = 0
sm = df_nvsmi['event'] == int(0)
mem = df_nvsmi['event'] == int(1)
enc = df_nvsmi['event'] == int(2)
dec = df_nvsmi['event'] == int(3)
gpunum = list(set(df_nvsmi['deviceId']))
res = pd.DataFrame([], columns=['sm', 'mem', 'enc', 'dec'])
sm_q = pd.DataFrame([], columns=['Q1', 'Q2', 'Q3', 'Avg'])
mem_q = pd.DataFrame([], columns=['Q1', 'Q2', 'Q3', 'Avg'])
for i in gpunum:
gpuid = df_nvsmi['deviceId'] == int(i)
gpudata = [round(df_nvsmi[sm & gpuid]['duration'].mean(), 2),
round(df_nvsmi[mem & gpuid]['duration'].mean(), 2),
round(df_nvsmi[enc & gpuid]['duration'].mean(), 2),
round(df_nvsmi[dec & gpuid]['duration'].mean(), 2)]
smdata = [round(df_nvsmi[sm & gpuid]['duration'].quantile(0.25), 2),
round(df_nvsmi[sm & gpuid]['duration'].quantile(0.5), 2),
round(df_nvsmi[sm & gpuid]['duration'].quantile(0.75), 2),
round(df_nvsmi[sm & gpuid]['duration'].mean(), 2)]
memdata = [round(df_nvsmi[mem & gpuid]['duration'].quantile(0.25), 2),
round(df_nvsmi[mem & gpuid]['duration'].quantile(0.5), 2),
round(df_nvsmi[mem & gpuid]['duration'].quantile(0.75), 2),
round(df_nvsmi[mem & gpuid]['duration'].mean(), 2)]
gpu_tmp = pd.DataFrame([gpudata], columns=['sm', 'mem', 'enc', 'dec'], index=[i])
sm_tmp = pd.DataFrame([smdata], columns=['Q1', 'Q2', 'Q3', 'Avg'], index=[i])
mem_tmp = pd.DataFrame([memdata], columns=['Q1', 'Q2', 'Q3', 'Avg'], index=[i])
res = pd.concat([res, gpu_tmp])
sm_q = pd.concat([sm_q, sm_tmp])
mem_q = pd.concat([mem_q, mem_tmp])
res.index.name = 'gpu_id'
sm_q.index.name = 'gpu_id'
mem_q.index.name = 'gpu_id'
if not cfg.cluster_ip and cfg.verbose:
print('GPU Utilization (%):')
print(res)
print('\nGPU SM Quartile (%):')
print(sm_q)
print('\nGPU MEM Quartile (%):')
print(mem_q)
print('Overall Average SM Utilization (%): ', int(gpu_sm_util))
print('Overall Average MEM Utilization (%): ', int(gpu_mem_util))
print('Overall Average ENC Utilization (%): ', int(gpu_enc_util))
print('Overall Average DEC Utilization (%): ', int(gpu_dec_util))
print('Overall Active GPU Time (s): %.3lf' % (SM_time * gpu_sm_util/100.0))
df = pd.DataFrame({'name':['gpu_sm_util_q2', 'gpu_sm_util_q3', 'gpu_sm_util', 'gpu_mem_util_q2', 'gpu_mem_util_q3', 'gpu_mem_util'],
'value':[df_nvsmi[sm & gpuid]['duration'].quantile(0.5),
df_nvsmi[sm & gpuid]['duration'].quantile(0.75),
int(gpu_sm_util),
df_nvsmi[mem & gpuid]['duration'].quantile(0.5),
df_nvsmi[mem & gpuid]['duration'].quantile(0.75),
int(gpu_mem_util),
]},
columns=['name','value'])
features = pd.concat([features, df])
return features
def gpu_profile(logdir, cfg, df_gpu, features):
if cfg.verbose:
print_title('GPU Profiling')
print('Per-GPU time (s):')
groups = df_gpu.groupby("deviceId")["duration"]
gpu_time = 0
for key, item in groups:
gpuid = int(float(key))
per_gpu_time = groups.get_group(key).sum()
if cfg.verbose:
print("[%d]: %lf" % (gpuid, per_gpu_time))
gpu_time = gpu_time + per_gpu_time
num_gpus = len(groups)
kernel_time = 0
grouped_df = df_gpu.groupby("copyKind")["duration"]
for key, item in grouped_df:
if key == 0:
kernel_time = grouped_df.get_group(key).sum()
nccl_time = 0
grouped_df = df_gpu.groupby("name")["duration"]
for key, item in grouped_df:
#print("[%s]: %lf" % (key, grouped_df.get_group(key).sum()))
if key.find("nccl") != -1:
nccl_time = nccl_time + grouped_df.get_group(key).sum()
features = comm_profile(logdir, cfg, df_gpu, features)
get_top_k_events(cfg, df_gpu, 10)
df = pd.DataFrame({'name':['gpu_time', 'num_gpus', 'kernel_time', 'nccl_time'],
'value':[gpu_time, num_gpus, kernel_time, nccl_time] },
columns=['name','value'])
features = pd.concat([features, df])
return features
def strace_profile(logdir, cfg, df, features):
print_title('STRACE Profiling:')
return features
def net_profile(logdir, cfg, df, features):
if not cfg.cluster_ip:
print_title("Network Profiling:")
grouped_df = df.groupby("name")["duration"]
net_time = 0
n_packets = 0
for key, item in grouped_df:
#print("[%s]: %lf" % (key, grouped_df.get_group(key).sum()))
if key.find("network:tcp:") != -1:
net_time = net_time + grouped_df.get_group(key).sum()
n_packets = n_packets + 1
#print(("total network time (s) = %.3lf" % net_time))
#print(("total amount of network packets = %d" % n_packets))
# total network packet
packet_num_matrix = df.groupby(['pkt_src','pkt_dst','payload']).size().unstack(level=1, fill_value=0)
# total network traffic
packet_sum_matrix = df.groupby(['pkt_src','pkt_dst'])["payload"].sum().unstack(level=1, fill_value=0)
# ================ change pandas table columns and index name ====
rename_index = packet_sum_matrix.index.tolist()
rename_index2 = packet_num_matrix.index.tolist()
rename_columns = packet_sum_matrix.columns.tolist()
rename_columns2 = packet_num_matrix.columns.tolist()
def zero(s):
if s[0:2] == '00':
s = s[2]
elif (s[0] == '0') and (s[1] != '0'):
s = s[1:3]
return(s)
def check_str(rename_list):
rename_list_new = []
for j in rename_list:
j = str(int(j))
a = j[-9:-6]
b = j[-6:-3]
c = j[-3:]
j = j[:-9] + '.' + zero(a) + '.' + zero(b) + '.' + zero(c)
rename_list_new.append(j)
return(rename_list_new)
def check_str2(rename_list):
rename_columns_2 = []
for i in rename_list:
i = str(int(i[0]))
a = i[-9:-6]
b = i[-6:-3]
c = i[-3:]
i = i[:-9] + '.' + zero(a) + '.' + zero(b) + '.' + zero(c)
rename_columns_2.append(i)
return(rename_columns_2)
rename_index_new = check_str(rename_index)
rename_index_new = dict(zip(rename_index, rename_index_new))
rename_index2_new = check_str2(rename_index2)
rename_index2_final = list(set(rename_index2_new))
rename_index2_final.sort(key=rename_index2_new.index)
rename_columns_new = check_str(rename_columns)
rename_columns_new = dict(zip(rename_columns, rename_columns_new))
rename_columns2_new = check_str(rename_columns2)
rename_columns2_new = dict(zip(rename_columns2, rename_columns2_new))
# rename here
packet_sum_matrix = packet_sum_matrix.rename(columns=rename_columns_new)
packet_num_matrix = packet_num_matrix.rename(columns=rename_columns2_new)
packet_sum_matrix = packet_sum_matrix.rename(index=rename_index_new)
packet_num_matrix.index.set_levels(rename_index2_final , level = 0, inplace = True)
if cfg.verbose:
print("total amount of network traffic : ", convertbyte(df['payload'].sum()), '\n', packet_sum_matrix.to_string(), "\n")
print("total amount of network packets = %d\n" % packet_num_matrix.sum().sum() ,packet_num_matrix.to_string(), "\n")
network_value = []
src = []
dst = []
final = []
for index in packet_sum_matrix.index:
for column in packet_sum_matrix.columns:
src.append(index)
dst.append(column)
network_value.append(packet_sum_matrix[column][index])
record = list(zip(src, dst, network_value))
record.sort(key=lambda tup:tup[2], reverse=True)
for src, dst, value in record:
if value == 0:
pass
else:
item = [src, dst, convertbyte(value), round(value / df['payload'].sum(), 2)]
final.append(item)
summary = pd.DataFrame(final, columns=['Source', 'Destination', 'Amount', 'Percentage of a Node'])
summary.to_csv(logdir + 'netrank.csv',
mode='w',
header=True,
index=False)
df = pd.DataFrame({'name':['net_time'],
'value':[net_time] },
columns=['name','value'])
features = pd.concat([features, df])
return features
def convertbyte(B):
B = int(B)
KB = float(1024)
MB = float(KB ** 2) # 1,048,576
GB = float(KB ** 3) # 1,073,741,824
TB = float(KB ** 4) # 1,099,511,627,776
if B < KB:
return '{} Bytes'.format(B)
elif KB <= B < MB:
return '{0:.2f} KB'.format(B/KB)
elif MB <= B < GB:
return '{0:.2f} MB'.format(B/MB)
elif GB <= B < TB:
return '{0:.2f} GB'.format(B/GB)
elif TB <= B:
return '{0:.2f} TB'.format(B/TB)
def convertbytes(B):
B = float(B)
KB = float(1024)
MB = float(KB ** 2) # 1,048,576
GB = float(KB ** 3) # 1,073,741,824
TB = float(KB ** 4) # 1,099,511,627,776
if B < KB:
return '{0:.2f} B/s'.format(B)
elif KB <= B < MB:
return '{0:.2f} KB/s'.format(B/KB)
elif MB <= B < GB:
return '{0:.2f} MB/s'.format(B/MB)
elif GB <= B < TB:
return '{0:.2f} GB/s'.format(B/GB)
elif TB <= B:
return '{0:.2f} TB/s'.format(B/TB)
def netbandwidth_profile(logdir, cfg, df, features):
if not cfg.cluster_ip and cfg.verbose:
print_title('Network Bandwidth Profiling:')
tx = df['event'] == float(0)
rx = df['event'] == float(1)
bw_tx_q1 = df[tx]['bandwidth'].quantile(0.25)
bw_tx_q2 = df[tx]['bandwidth'].quantile(0.5)
bw_tx_q3 = df[tx]['bandwidth'].quantile(0.75)
bw_tx_mean = int(df[tx]['bandwidth'].mean())
bw_rx_q1 = df[rx]['bandwidth'].quantile(0.25)
bw_rx_q2 = df[rx]['bandwidth'].quantile(0.5)
bw_rx_q3 = df[rx]['bandwidth'].quantile(0.75)
bw_rx_mean = int(df[rx]['bandwidth'].mean())
with open('%s/netstat.txt' % logdir) as f:
lines = f.readlines()
first_line = lines[0]
last_line = lines[-1]
tx_begin = first_line.split(',')[1]
rx_begin = first_line.split(',')[2]
tx_end = last_line.split(',')[1]
rx_end = last_line.split(',')[2]
tx_amount = int(last_line.split(',')[1]) - int(first_line.split(',')[1])
rx_amount = int(last_line.split(',')[2]) - int(first_line.split(',')[2])
if not cfg.cluster_ip:
bw_tx_q1 = df[tx]['bandwidth'].quantile(0.25)
bw_tx_q2 = df[tx]['bandwidth'].quantile(0.5)
bw_tx_q3 = df[tx]['bandwidth'].quantile(0.75)
bw_tx_mean = int(df[tx]['bandwidth'].mean())
bw_rx_q1 = df[rx]['bandwidth'].quantile(0.25)
bw_rx_q2 = df[rx]['bandwidth'].quantile(0.5)
bw_rx_q3 = df[rx]['bandwidth'].quantile(0.75)
bw_rx_mean = int(df[rx]['bandwidth'].mean())
if cfg.verbose:
print('Amount of Network Traffic : %s' % (convertbyte(tx_amount + rx_amount)))
print('Amount of tx : %s' % convertbyte(tx_amount))
print('Amount of rx : %s' % convertbyte(rx_amount))
print('Bandwidth Quartile :')
print('Q1 tx : %s, rx : %s' % ( convertbytes(bw_tx_q1), convertbytes(bw_rx_q1)))
print('Q2 tx : %s, rx : %s' % ( convertbytes(bw_tx_q2), convertbytes(bw_rx_q2)))
print('Q3 tx : %s, rx : %s' % ( convertbytes(bw_tx_q3), convertbytes(bw_rx_q3)))
print('Avg tx : %s, rx : %s'% ( convertbytes(bw_tx_mean), convertbytes(bw_rx_mean)))
#network chart part
all_time = df[tx]['timestamp'].tolist()
all_tx = df[tx]['bandwidth'].tolist()
all_rx = df[rx]['bandwidth'].tolist()
fig = plt.figure(dpi=128, figsize=(16, 14))
plt.plot(all_time, all_tx, c='red', alpha=0.5, label='tx')
plt.plot(all_time, all_rx, c='blue', alpha=0.5, label='rx')
plt.legend(loc='upper right')
plt.title("Network Report", fontsize=18)
plt.xlabel('Timestamp (s)', fontsize=16)
plt.ylabel("Bandwidth (bytes)", fontsize=16)
fig.savefig("%s/network_report.pdf" % logdir, bbox_inches='tight')
if not cfg.cluster_ip and cfg.verbose:
print('Network Bandwidth Chart is saved at %s/network_report.pdf' %logdir)
df_feature = pd.DataFrame({ 'name':['bw_tx_q2', 'bw_tx_q3', 'bw_rx_q2', 'bw_rx_q3'],
'value':[bw_tx_q2, bw_tx_q3, bw_rx_q2, bw_rx_q3] },
columns=['name','value'])
features = pd.concat([features, df_feature])
return features
def blktrace_latency_profile(logdir, cfg, df, features):
with open('%s/btt.txt' % logdir) as f:
lines = f.readlines()
for i, line in enumerate(lines):
if '==================== All Devices ====================' in line:
start = i
if '==================== Device Merge Information ====================' in line:
end = i
break
bttoutput_result = lines[start:end]
df_offset = pd.read_table('%s/offset_all.txt' % logdir, delim_whitespace=True, names=('time', 'start', 'end'))
time = df_offset['time'].tolist()
start_b = df_offset['start'].tolist()
end_b = df_offset['end'].tolist()
fig = plt.figure(dpi=128, figsize=(16, 14))
plt.plot(time, start_b, c='red', marker='o', alpha=0.3, label='Start block')
plt.legend(loc='upper right')
plt.title("Block Offset Report", fontsize=18)
plt.xlabel('Timestamp (s)', fontsize=16)
plt.ylabel("Block Number", fontsize=16)
fig.savefig("%s/offset_of_device_report.pdf" % logdir, bbox_inches='tight')
print('Offset of Device Report is saved at %s/offset_of_device_report.pdf' %logdir)
if cfg.verbose:
print_title('Storage Profiling:')
print('Blktracae Latency (s):')
for btt in bttoutput_result:
print(btt[:-1])
blktrace_latency = df['event'] == 'C'
blktrace_latency_q1 = df[blktrace_latency]['duration'].quantile(0.25)
blktrace_latency_q2 = df[blktrace_latency]['duration'].quantile(0.5)
blktrace_latency_q3 = df[blktrace_latency]['duration'].quantile(0.75)
blktrace_latency_mean = df[blktrace_latency]['duration'].mean()
df_feature = pd.DataFrame({ 'name':['blktrace_latency_q1','blktrace_latency_q2','blktrace_latency_q3'],
'value': [blktrace_latency_q1, blktrace_latency_q2, blktrace_latency_q3] },
columns=['name','value'])
features = pd.concat([features, df_feature])
return features
def diskstat_profile(logdir, cfg, df, features):
diskstat_dev = list(set(df['dev']))
diskstat_r_q1 = df.groupby('dev')['d_read'].quantile(0.25)
diskstat_w_q1 = df.groupby('dev')['d_write'].quantile(0.25)
diskstat_q1 = df.groupby('dev')['d_disk_total'].quantile(0.25)
diskstat_r_q2 = df.groupby('dev')['d_read'].quantile(0.5)
diskstat_w_q2 = df.groupby('dev')['d_write'].quantile(0.5)
diskstat_q2 = df.groupby('dev')['d_disk_total'].quantile(0.5)
diskstat_r_q3 = df.groupby('dev')['d_read'].quantile(0.75)
diskstat_w_q3 = df.groupby('dev')['d_write'].quantile(0.75)
diskstat_q3 = df.groupby('dev')['d_disk_total'].quantile(0.75)
diskstat_r_avg = df.groupby('dev')['d_read'].mean()
diskstat_w_avg = df.groupby('dev')['d_write'].mean()
diskstat_avg = df.groupby('dev')['d_disk_total'].mean()
diskstat_r_iops = df.groupby('dev')['r_iops'].mean()
diskstat_w_iops = df.groupby('dev')['w_iops'].mean()
diskstat_iops = df.groupby('dev')['iops'].mean()
diskstat_wait = df.groupby('dev')['await_time'].mean()
diskstat_table = pd.concat([diskstat_r_q1, diskstat_r_q2, diskstat_r_q3, diskstat_r_avg,
diskstat_w_q1, diskstat_w_q2, diskstat_w_q3, diskstat_w_avg,
diskstat_q1, diskstat_q2, diskstat_q3, diskstat_avg,
diskstat_r_iops, diskstat_w_iops, diskstat_iops,
diskstat_wait], axis=1, sort=False)
diskstat_columns = ['Q1 throughput(Read)', 'Q2 throughput(Read)', 'Q3 throughput(Read)', 'Avg throughput(Read)',
'Q1 throughput(Write)', 'Q2 throughput(Write)', 'Q3 throughput(Write)', 'Avg throughput(Write)',
'Q1 throughput(R+W)', 'Q2 throughput(R+W)', 'Q3 throughput(R+W)', 'Avg throughput(R+W)',
'Avg IOPS(Read)', 'Avg IOPS(Write)', 'Avg IOPS(R+W)', 'Avg Await time(ms)']
diskstat_table.columns = diskstat_columns
final_table = pd.DataFrame(columns=diskstat_columns)
for j, dev in enumerate(diskstat_dev):
tmp_list = []
for i in diskstat_columns[:-4]:
tmp_list.append(convertbytes(diskstat_table.iloc[j][i]))
for i in diskstat_columns[-4:-1]:
tmp_list.append('%d' % int(diskstat_table.iloc[j][i]))
tmp_list.append('%.2lf ms' % diskstat_table.iloc[j][-1])
tmp_table = pd.DataFrame([tuple(tmp_list)],
columns=diskstat_columns,
index=[dev])
final_table = pd.concat([final_table, tmp_table])
if cfg.verbose:
print_title('DISKSTAT Profiling:')
print('Disk Throughput Quartile :')
print(final_table.T)
df_feature = pd.DataFrame({ 'name':['diskstat_q1','diskstat_q2','diskstat_q3'],
'value': [diskstat_q1.mean(), diskstat_q2.mean(), diskstat_q3.mean()] },
columns=['name','value'])
features = pd.concat([features, df_feature])
return features
def cpu_profile(logdir, cfg, df):
if cfg.verbose:
print_title('CPU Profiling:')
print('elapsed_time (s) = %.6lf' % cfg.elapsed_time)
grouped_df = df.groupby("deviceId")["duration"]
total_exec_time = 0
for key, item in grouped_df:
print(("[%d]: %lf" % (key, grouped_df.get_group(key).sum())))
total_exec_time = total_exec_time + grouped_df.get_group(key).sum()
print("total execution time (s) = %.3lf" % total_exec_time)
cpu_detail_profile_df = df[['timestamp','duration','name']]
cpu_detail_profile_df = cpu_detail_profile_df.sort_values(by=['duration'], ascending=False)
cpu_detail_profile_df['ratio(%)'] = cpu_detail_profile_df['duration']/total_exec_time * 100
cpu_detail_profile_df = cpu_detail_profile_df[['timestamp','ratio(%)','duration','name']]
print(cpu_detail_profile_df[:20].to_string(index=False))
def vmstat_profile(logdir, cfg, df, features):
_,_,_,_,_,_,df['si'],df['so'],df['bi'],df['bo'],df['in'],df['cs'],_,_,_,_,_=df['name'].str.split('|').str
for col_name in ('si','so','bi','bo','in','cs'):
df[col_name] = df[col_name].str[3:]
vmstat_traces = df[['si','so','bi','bo','in','cs']].astype(float)
vm_bi = vmstat_traces['bi'].mean()
vm_bo = vmstat_traces['bo'].mean()
vm_cs = vmstat_traces['cs'].mean()
vm_in = vmstat_traces['in'].mean()
if cfg.verbose:
print_title('VMSTAT Profiling:')
print('average bi/s: %d' % int(vm_cs))
print('average bo/s: %d' % int(vm_in))
print('average cs/s: %d' % int(vm_bi))
print('average in/s: %d' % int(vm_bo))
df_feature = pd.DataFrame({ 'name':['vm_bi', 'vm_bo', 'vm_cs', 'vm_in' ],
'value':[vm_bi, vm_bo, vm_cs, vm_in] },
columns=['name','value'])
features = pd.concat([features, df_feature])
return features
def mpstat_profile(logdir, cfg, df, features):
if not cfg.cluster_ip and cfg.verbose:
print_title('MPSTAT Profiling:')
num_cores = int(df['deviceId'].max() + 1)
df_summary = pd.DataFrame( np.zeros((num_cores,5)), columns=['USR','SYS','IDL','IOW','IRQ'])
_,_,_,_,_,df['USR'],df['SYS'],df['IDL'],df['IOW'],df['IRQ'],_ = df["name"].str.split('|').str
df[['USR','SYS','IDL','IOW','IRQ']] = df[['USR','SYS','IDL','IOW','IRQ']].astype(float)
df["dt_all"] = np.where(df["IDL"]==100, 0.1, df["duration"]/((100-df["IDL"])/100.0))
df["t_USR"] = df['dt_all'] * df['USR']/100.0
df["t_SYS"] = df['dt_all'] * df['SYS']/100.0
df["t_IDL"] = df['dt_all'] * df['IDL']/100.0
df["t_IOW"] = df['dt_all'] * df['IOW']/100.0
df["t_IRQ"] = df['dt_all'] * df['IRQ']/100.0
dfs=[]
for i in range(num_cores):
dfs.append(df.loc[df['deviceId'] == float(i)])
for index,dff in enumerate(dfs):
df_summary.iloc[index]['USR'] = dff['t_USR'].sum()
df_summary.iloc[index]['SYS'] = dff['t_SYS'].sum()
df_summary.iloc[index]['IDL'] = dff['t_IDL'].sum()
df_summary.iloc[index]['IRQ'] = dff['t_IRQ'].sum()
df_summary.iloc[index]['IOW'] = dff['t_IOW'].sum()
if not cfg.cluster_ip and cfg.verbose:
print('CPU Utilization (%):')
print('core\tUSR\tSYS\tIDL\tIOW\tIRQ')
for i in range(len(df_summary)):
t_sum = df_summary.iloc[i].sum()
if not cfg.cluster_ip and cfg.verbose:
print('%3d\t%3d\t%3d\t%3d\t%3d\t%3d'%(i,int(100.0*df_summary.iloc[i]['USR']/t_sum),
int(100.0*df_summary.iloc[i]['SYS']/t_sum),
int(100.0*df_summary.iloc[i]['IDL']/t_sum),
int(100.0*df_summary.iloc[i]['IOW']/t_sum),
int(100.0*df_summary.iloc[i]['IRQ']/t_sum) ))
if not cfg.cluster_ip and cfg.verbose:
print('CPU Time (s):')
print('core\tUSR\tSYS\tIDL\tIOW\tIRQ')
for i in range(len(df_summary)):
t_sum = df_summary.iloc[i].sum()
if not cfg.cluster_ip and cfg.verbose:
print('%3d\t%.2lf\t%.2lf\t%.2lf\t%.2lf\t%.2lf'%(i,
df_summary.iloc[i]['USR'],
df_summary.iloc[i]['SYS'],
df_summary.iloc[i]['IDL'],
df_summary.iloc[i]['IOW'],
df_summary.iloc[i]['IRQ'] ))
total_cpu_time = df_summary[['USR','SYS','IRQ']].sum().sum()
cpu_util = int(100*total_cpu_time / (num_cores*cfg.elapsed_time))
if not cfg.cluster_ip and cfg.verbose:
print('Active CPU Time (s): %.3lf' % total_cpu_time)
print('Active CPU ratio (%%): %3d' % cpu_util)
df_feature = pd.DataFrame({ 'name':['num_cores', 'cpu_util'],
'value':[num_cores, cpu_util] },
columns=['name','value'])
features = pd.concat([features, df_feature])
return features
def sofa_analyze(cfg):
print_main_progress('SOFA analyzing...')
filein = []
df_cpu = pd.DataFrame([], columns=cfg.columns)
df_gpu = pd.DataFrame([], columns=cfg.columns)
df_net = pd.DataFrame([], columns=cfg.columns)
df_mpstat = pd.DataFrame([], columns=cfg.columns)
df_vmstat = pd.DataFrame([], columns=cfg.columns)
df_bandwidth = pd.DataFrame([], columns=cfg.columns)
df_blktrace = pd.DataFrame([], columns=cfg.columns)
df_diskstat = pd.DataFrame([], columns=cfg.columns)
df_nvsmi = pd.DataFrame([], columns=cfg.columns)
iter_summary = None
logdir = cfg.logdir
with open(logdir+'/misc.txt') as f:
lines = f.readlines()
elapsed_time = float(lines[0].split()[1])
vcores = int(lines[2].split()[1])
cfg.elapsed_time = float(lines[0].split()[1])
filein_gpu = logdir + "gputrace.csv"
filein_cpu = logdir + "cputrace.csv"
filein_net = logdir + "nettrace.csv"
filein_vmstat = logdir + "vmstat.csv"
filein_mpstat = logdir + "mpstat.csv"
filein_strace = logdir + "strace.csv"
filein_nvsmi = logdir + "nvsmi_trace.csv"
filein_bandwidth = logdir + "netstat.csv"
filein_blktrace = logdir + "blktrace.csv"
filein_diskstat = logdir + "diskstat_vector.csv"
if os.path.isfile('%s/nvlink_topo.txt' % logdir):
with open(logdir + 'nvlink_topo.txt') as f:
lines = f.readlines()
if len(lines) > 0:
title = lines[0]
num_gpus = 1
for word in title.split():
if re.match(r'GPU', word) != None :
num_gpus = num_gpus + 1
print_info(cfg,'# of GPUs: ' + str(num_gpus) )
edges = []
if len(lines) >= num_gpus+1:
for i in range(num_gpus):
connections = lines[1+i].split()
for j in range(len(connections)):
if connections[j] == 'NV1' or connections[j] == 'NV2':
edges.append((i,j-1))
#print('%d connects to %d' % (i, j-1))
ring_found = False
G = nx.DiGraph(edges)
# Try to find ring with its length of num_gpus
for cycle in nx.simple_cycles(G):
if len(cycle) == num_gpus:
if cfg.verbose:
print('One of the recommended ring having length of %d' % len(cycle))
ring_found = True
os.system("mkdir -p sofalog/sofa_hints/")
xring_order = ','.join(map(str, cycle))
with open("sofalog/sofa_hints/xring_order.txt", "w") as f:
f.write('export CUDA_VISIBLE_DEVICES=' + xring_order)
break
# Try to find ring with its length of num_gpus/2
if not ring_found:
for cycle in nx.simple_cycles(G):
if len(cycle) == num_gpus/2:
print(("One of the recommended ring having length of %d" % len(cycle) ))
ring_found = True
os.system("mkdir -p sofalog/sofa_hints/")
xring_order = ','.join(map(str, cycle))
with open("sofalog/sofa_hints/xring_order.txt", "w") as f:
f.write('export CUDA_VISIBLE_DEVICES=' + xring_order)
break
# Construct Performance Features
features = pd.DataFrame({'name':['elapsed_time'], 'value':[cfg.elapsed_time]}, columns=['name','value'])
try:
df_nvsmi = pd.read_csv(filein_nvsmi)
if not df_nvsmi.empty and cfg.spotlight_gpu:
state = 0
sm_high = 0
trigger = 10
for i in range(len(df_nvsmi)):
if df_nvsmi.iloc[i].event == 0 and df_nvsmi.iloc[i].deviceId == 0 :
if df_nvsmi.iloc[i].duration >= 50:
sm_high = min(trigger, sm_high + 1)
if df_nvsmi.iloc[i].duration < 10:
sm_high = max(0, sm_high - 1)
if state == 0 and sm_high == trigger:
state = 1
cfg.roi_begin = df_nvsmi.iloc[i].timestamp
elif state == 1 and sm_high == 0:
state = 0
cfg.roi_end = df_nvsmi.iloc[i].timestamp
#print('sm_high=%d state=%d' % (sm_high, state))
if cfg.roi_end - cfg.roi_begin < 0:
cfg.roi_end = 0
cfg.roi_begin = 0
except IOError:
print_warning(cfg, "nvsmi_trace.csv is not found")
try:
df_cpu = pd.read_csv(filein_cpu)
if not df_cpu.empty:
if cfg.verbose:
cpu_profile(logdir, cfg, df_cpu)
if cfg.enable_swarms and len(df_cpu) > cfg.num_swarms:
df_cpu, swarms = hsg_v2(cfg, df_cpu)
except IOError as e:
df_cpu = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found" % filein_cpu)
try:
df_strace = pd.read_csv(filein_strace)
if not df_strace.empty:
features = strace_profile(logdir, cfg, df_strace, features)
except IOError as e:
df_strace = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found" % filein_strace)
try:
df_net = pd.read_csv(filein_net)
if not df_net.empty:
features = net_profile(logdir, cfg, df_net, features)
except IOError as e:
df_net = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found" % filein_net)
try:
df_bandwidth = pd.read_csv(filein_bandwidth)
if not df_bandwidth.empty:
features = netbandwidth_profile(logdir, cfg, df_bandwidth, features)
except IOError as e:
df_bandwidth = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found" % filein_bandwidth)
try:
df_blktrace = pd.read_csv(filein_blktrace)
if not df_blktrace.empty:
features = blktrace_latency_profile(logdir, cfg, df_blktrace, features)
except IOError as e:
df_blktrace = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found" % filein_blktrace)
try:
df_diskstat = pd.read_csv(filein_diskstat)
if not df_diskstat.empty:
features = diskstat_profile(logdir, cfg, df_diskstat, features)
except IOError as e:
df_diskstat = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found" % filein_diskstat)
try:
df_vmstat = pd.read_csv(filein_vmstat)
if not df_vmstat.empty:
features = vmstat_profile(logdir, cfg, df_vmstat, features)
except IOError as e:
df_vmstat = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found" % filein_vmstat)
try:
df_mpstat = pd.read_csv(filein_mpstat)
if not df_mpstat.empty:
features = mpstat_profile(logdir, cfg, df_mpstat, features)
except IOError as e:
df_mpstat = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found" % filein_mpstat)
try:
df_nvsmi = pd.read_csv(filein_nvsmi)
features = nvsmi_profile(logdir, cfg, df_nvsmi, features)
except IOError:
print_warning(cfg, "nvsmi_trace.csv is not found")
try:
df_gpu = pd.read_csv(filein_gpu)
if not df_gpu.empty:
features = gpu_profile(logdir, cfg, df_gpu, features)
except IOError:
df_gpu = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found. If there is no need to profile GPU, just ignore it." % filein_gpu)
try:
if len(df_nvsmi)>0 and len(df_mpstat)>0:
df_nvsmi.append(df_mpstat.iloc[0])
features = concurrency_breakdown(logdir, cfg, df_mpstat, df_cpu, df_gpu, df_nvsmi, df_bandwidth, features)
except IOError as e:
print_warning(cfg, "Some files are not found, which are needed for concurrency_breakdown analysis")
if cfg.enable_aisi:
selected_pattern, iter_summary, features = sofa_aisi(logdir, cfg, df_cpu, df_gpu, df_strace, df_mpstat, features)
if 'IS_SOFA_ON_HAIHUB' not in os.environ or os.environ['IS_SOFA_ON_HAIHUB'] == 'no':
print_title('Final Performance Features')
print('%s%s%s%s' % ('ID'.ljust(10),'Feature'.ljust(30),'Value'.ljust(20),'Unit'.ljust(20)) )
for i in range(len(features)):
name = features.iloc[i]['name']
value = features.iloc[i]['value']
print('%s%s%s' % (str(i).ljust(10), name.ljust(30), ('%.3lf'%value).ljust(20)))
if cfg.spotlight_gpu:
try:
print('Elapsed hotspot time: %.3lf' % features[features.name=='elapsed_hotspot_time'].value)
except:
print_warning(cfg, 'elpased_hostspot_time is not defined.')
if cfg.potato_server:
if cfg.potato_server.find(':') == -1:
cfg.potato_server = cfg.potato_server + ':50051'
hint, docker_image = get_hint(cfg.potato_server, features)
df_report = pd.read_json(hint, orient='table')
file_potato_report = cfg.logdir + 'potato_report.html'
# Export report to HTML file.
df_report.to_html(file_potato_report )
with open(file_potato_report, 'a') as f:
f.write('<head><link rel=stylesheet type="text/css" href="potato_report.css"></head>')
print_title('POTATO Feedback')
print('%s%s%s%s' % ('ID'.ljust(5), 'Metric'.ljust(20), 'Value'.ljust(10), 'Reference-Value'.ljust(30) ) )
for i in range(len(df_report)):
metric = df_report.iloc[i]['Metric']
if metric != 'hybrid_suggestion':
value = df_report.iloc[i]['Value']
ref_value = df_report.iloc[i]['ReferenceValue']
print('%s%s%s%s' % (str(i).ljust(5), metric.ljust(20), ('%.3lf'%value).ljust(20), str(ref_value).ljust(30)))
print('\n')
print_hint('General Suggestions:')
for i in range(len(df_report)):
metric = df_report.iloc[i]['Metric']
if metric != 'hybrid_suggestion':
suggestion = df_report.iloc[i]['Suggestion']
print('%d. %s' % (i, suggestion))
print('\n')
print_hint('Framework-specific Optimization Suggestions:')
for i in range(len(df_report)):
metric = df_report.iloc[i]['Metric']
if metric == 'hybrid_suggestion':
suggestion = df_report.iloc[i]['Suggestion']
print('%d. %s' % (i, suggestion))
#print(df_report[['Metric', 'Value', 'Reference Value']])
#print(df_report[['Suggestion']])
#print('Tag of optimal image recommended from POTATO: ' + highlight(docker_image))
print('\n')
print_hint('Please re-launch KubeFlow Jupyter-notebook to have suggested images or resources if necessary.')
sofa_home = os.path.dirname(os.path.realpath(__file__))
subprocess.Popen(
['bash', '-c', 'cp %s/../sofaboard/* %s;' % (sofa_home, cfg.logdir)])
subprocess.Popen(['sleep', '2'])
print('\n\n')
def cluster_analyze(cfg):
if cfg.verbose:
print_title('Cluster Network Profiling :')
cluster = cfg.cluster_ip.split(',')
summary_net = pd.DataFrame([], columns=['Source', 'Destination', 'Amount', 'Percentage of a Node'])
summary_compute = pd.DataFrame([], columns=['gpu_sm_util','gpu_mem_util','cpu_util'])
summary_band = pd.DataFrame([], columns=['Q1', 'Q2', 'Q3', 'Avg'])
all = []
for i, ip in enumerate(cluster):
features = pd.DataFrame({'name':['elapsed_time'],
'value':[cfg.elapsed_time]},
columns=['name','value'])
node = 'node ' + str(i)
if cfg.verbose:
print('node ' + str(i) + ' is ' + ip)
logdir = './sofalog-' + ip +'/'
filein_net = logdir + "nettrace.csv"
filein_mpstat = logdir + "mpstat.csv"
filein_nvsmi = logdir + "nvsmi_trace.csv"
filein_bandwidth = logdir + "netstat.csv"
with open(logdir+'/misc.txt') as f:
lines = f.readlines()
elapsed_time = float(lines[0].split()[1])
vcores = int(lines[2].split()[1])
cfg.elapsed_time = float(lines[0].split()[1])
try:
df_net = pd.read_csv(filein_net)
features = net_profile(logdir, cfg, df_net, features)
except IOError as e:
df_net = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found" % filein_net)
try:
df_mpstat = pd.read_csv(filein_mpstat)
features = mpstat_profile(logdir, cfg, df_mpstat, features)
except IOError as e:
df_mpstat = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found" % filein_mpstat)
try:
df_nvsmi = pd.read_csv(filein_nvsmi)
features = nvsmi_profile(logdir, cfg, df_nvsmi, features)
except IOError:
print_warning(cfg, "nvsmi_trace.csv is not found")
try:
df_bandwidth = pd.read_csv(filein_bandwidth)
features = netbandwidth_profile(logdir, cfg, df_bandwidth, features)
except IOError as e:
df_bandwidth = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found" % filein_bandwidth)
sm = int(features[features['name'] == 'gpu_sm_util']['value'])
mem = int(features[features['name'] == 'gpu_mem_util']['value'])
cpu = int(features[features['name'] == 'cpu_util']['value'])
sm_mem_cpu = [sm, mem, cpu]
compute_tmp = pd.DataFrame([sm_mem_cpu], columns = ['gpu_sm_util', 'gpu_mem_util', 'cpu_util'])
summary_compute = pd.concat([summary_compute, pd.concat([compute_tmp], keys=[node])])
net_tmp = pd.read_csv(logdir + "netrank.csv")
summary_net = pd.concat([summary_net, pd.concat([net_tmp], keys=[node])])
# for bandwidth report
tx = df_bandwidth['event'] == float(0)
rx = df_bandwidth['event'] == float(1)
tx_tmp = [convertbytes(df_bandwidth[tx]['bandwidth'].quantile(0.25)),
convertbytes(df_bandwidth[tx]['bandwidth'].quantile(0.5)),
convertbytes(df_bandwidth[tx]['bandwidth'].quantile(0.75)),
convertbytes(df_bandwidth[tx]['bandwidth'].mean())]
rx_tmp = [convertbytes(df_bandwidth[rx]['bandwidth'].quantile(0.25)),
convertbytes(df_bandwidth[rx]['bandwidth'].quantile(0.5)),
convertbytes(df_bandwidth[rx]['bandwidth'].quantile(0.75)),
convertbytes(df_bandwidth[rx]['bandwidth'].mean())]
band_tmp = pd.DataFrame([tx_tmp], columns = ['Q1', 'Q2', 'Q3', 'Avg'], index = ['tx'])
rx_pd = pd.DataFrame([rx_tmp], columns = ['Q1', 'Q2', 'Q3', 'Avg'], index = ['rx'])
band_tmp = pd.concat([band_tmp, rx_pd])
summary_band = pd.concat([summary_band, pd.concat([band_tmp], keys=[node])])
if cfg.verbose:
with pd.option_context('display.max_rows', None, 'display.max_columns', None): # more options can be specified also
print('Ranked Network Traffic : \n', summary_net, '\n')
print('Cluster Bandwidth Quartile: \n', summary_band)
print_title('Cluster Computation Profiling:')
print(summary_compute)
| 48,776 | -9 | 515 |
75cc48f5e1deff324b747de89196a4848b8c3eec | 1,439 | py | Python | backend/database.py | metabsd/GLaDOS-Potato | 40d42e3d1471cdf6f24ddd36a06c5efd5fb3726b | [
"Apache-2.0"
] | 1 | 2016-09-22T00:05:38.000Z | 2016-09-22T00:05:38.000Z | backend/database.py | metabsd/GLaDOS-Potato | 40d42e3d1471cdf6f24ddd36a06c5efd5fb3726b | [
"Apache-2.0"
] | null | null | null | backend/database.py | metabsd/GLaDOS-Potato | 40d42e3d1471cdf6f24ddd36a06c5efd5fb3726b | [
"Apache-2.0"
] | null | null | null | import MySQLdb
db = MySQLdb.connect("db", "root", "my-secret-pw", "bd_notes")
cursor = db.cursor()
global resultsExportEtudiants
resultsExportEtudiants = []
| 28.215686 | 146 | 0.512856 | import MySQLdb
db = MySQLdb.connect("db", "root", "my-secret-pw", "bd_notes")
cursor = db.cursor()
global resultsExportEtudiants
resultsExportEtudiants = []
def getetudiants():
del resultsExportEtudiants[:]
sql = "SELECT * FROM t_etudiant"
try:
cursor.execute(sql)
results = cursor.fetchall()
for row in results:
item = {
"id_etudiant": row[0],
"matricule": row[1],
"prenom": row[2],
"nom": row[3]
}
resultsExportEtudiants.append(item)
except MySQLdb.Error as e:
try:
print ("MySQL Error [%d]: %s" % (e.args[0], e.args[1]))
return None
except IndexError:
print ("MySQL Error: %s" % str(e))
return None
finally:
cursor.close()
db.close()
def createetudiant(etudiant):
sql = "Insert into t_etudiant(matricule, nom, prenom) values('%s', '%s', '%s')" % (etudiant['matricule'], etudiant['nom'], etudiant['prenom'])
try:
cursor.execute(sql)
db.commit()
except MySQLdb.Error as e:
try:
print ("MySQL Error [%d]: %s" % (e.args[0], e.args[1]))
return None
except IndexError:
db.rollback()
print ("MySQL Error: %s" % str(e))
return None
finally:
cursor.close()
db.close()
| 1,233 | 0 | 46 |
409cef07b90435c6fa62817fde97b01e461264b5 | 3,210 | py | Python | async_blp/base_request.py | rockscie/async_blp | acb8777ccf2499681bde87d76ca780b61219699c | [
"MIT"
] | 12 | 2019-08-05T16:56:54.000Z | 2021-02-02T11:09:37.000Z | async_blp/base_request.py | lightning-like/async_blp | acb8777ccf2499681bde87d76ca780b61219699c | [
"MIT"
] | null | null | null | async_blp/base_request.py | lightning-like/async_blp | acb8777ccf2499681bde87d76ca780b61219699c | [
"MIT"
] | 5 | 2019-12-08T15:43:13.000Z | 2021-11-14T08:38:07.000Z | import abc
import asyncio
from typing import Any
from typing import Dict
from typing import Optional
from async_blp.enums import ErrorBehaviour
from async_blp.utils import log
# pylint: disable=ungrouped-imports
try:
import blpapi
except ImportError:
from async_blp.utils import env_test as blpapi
LOGGER = log.get_logger()
| 31.165049 | 80 | 0.62648 | import abc
import asyncio
from typing import Any
from typing import Dict
from typing import Optional
from async_blp.enums import ErrorBehaviour
from async_blp.utils import log
# pylint: disable=ungrouped-imports
try:
import blpapi
except ImportError:
from async_blp.utils import env_test as blpapi
LOGGER = log.get_logger()
class RequestBase(metaclass=abc.ABCMeta):
service_name = None
request_name = None
def __init__(self,
request_options: Dict[str, Any],
error_behavior: ErrorBehaviour = ErrorBehaviour.RETURN,
loop: asyncio.AbstractEventLoop = None,
):
try:
self._loop = loop or asyncio.get_running_loop()
self._msg_queue = asyncio.Queue(loop=self._loop)
except RuntimeError:
self._loop = None
self._msg_queue: Optional[asyncio.Queue] = None
self._error_behaviour = error_behavior
self._request_options = request_options or {}
def send_queue_message(self, msg):
"""
Thread-safe method that put the given msg into async queue
"""
if self._loop is None or self._msg_queue is None:
raise RuntimeError('Please create request inside async loop or set '
'loop explicitly if you want to use async')
self._loop.call_soon_threadsafe(self._msg_queue.put_nowait, msg)
LOGGER.debug('%s: message sent', self.__class__.__name__)
async def _get_message_from_queue(self):
LOGGER.debug('%s: waiting for messages', self.__class__.__name__)
msg: blpapi.Message = await self._msg_queue.get()
if msg is None:
LOGGER.debug('%s: last message received, processing is '
'finished',
self.__class__.__name__)
LOGGER.debug('%s: message received', self.__class__.__name__)
return msg
def set_running_loop_as_default(self):
"""
Set currently active loop as default for this request and create
new message queue
"""
self._loop = asyncio.get_running_loop()
if self._msg_queue is not None and not self._msg_queue.empty():
raise RuntimeError('Current message queue is not empty')
self._msg_queue = asyncio.Queue()
LOGGER.debug('%s: loop has been changed', self.__class__.__name__)
def create(self, service: blpapi.Service) -> blpapi.Request:
"""
Create Bloomberg request. Given `service` must be opened beforehand.
"""
request = service.createRequest(self.request_name)
for name, value in self._request_options.items():
if isinstance(value, list):
for item in value:
request.append(name, item)
else:
request.set(name, value)
return request
@abc.abstractmethod
async def process(self):
pass
@property
@abc.abstractmethod
def weight(self) -> int:
"""
Approximate request complexity; used to balance load
between handlers. More complex requests receive higher value.
"""
pass
| 991 | 1,860 | 23 |
6059b582a58a0a62cbb1d858d37bdc9bba85ab35 | 3,108 | py | Python | qcengine/programs/madness/keywords.py | ahurta92/QCEngine | 7e7482886eb320696904a5ee35c0d37f98341728 | [
"BSD-3-Clause"
] | null | null | null | qcengine/programs/madness/keywords.py | ahurta92/QCEngine | 7e7482886eb320696904a5ee35c0d37f98341728 | [
"BSD-3-Clause"
] | null | null | null | qcengine/programs/madness/keywords.py | ahurta92/QCEngine | 7e7482886eb320696904a5ee35c0d37f98341728 | [
"BSD-3-Clause"
] | null | null | null | import collections
from typing import Any, Dict, Tuple
def format_keyword(keyword: str, val: Any, lop_off: bool = True) -> Tuple[str, str]:
"""Function to reformat value `val` for `keyword` from python into nwchem-speak."""
# Transform string booleans into " "
if val is True:
return keyword.lower(), "true"
elif val is False:
return keyword.lower(), "false"
# complete hack
# if keyword.upper() == "MEMORY":
# return keyword.lower(), f"{val} byte"
elif isinstance(val, list): # if it is a list... join the list into a string ??? when is this in play
text = " ".join([str(v) for v in val])
elif isinstance(val, dict): # val is a dict... text is list
text = []
for k, v in val.items():
merge = [k]
merge.extend(str(v) if isinstance(v, (int, float)) else list(map(str, v)))
text.append(" ".join(merge))
text = " ".join(text)
else:
text = str(val)
if lop_off:
return keyword[7:].lower(), text
else:
return keyword.lower(), text
def format_keywords(keywords: Dict[str, Any]) -> str:
"""From NWCHEM-directed, non-default `keywords` dictionary, write a NWCHEM deck."""
grouped_options = rec_dd()
for group_key, val in keywords.items():
nesting = group_key.split("__")
if len(nesting) == 1:
key = nesting[0]
grouped_options["aaaglobal"][key] = val
elif len(nesting) == 2:
g1, key = nesting
grouped_options[g1][key] = val
elif len(nesting) == 3:
g1, g2, key = nesting
grouped_options[g1][g2][key] = val
else:
print(nesting)
raise ValueError("Nesting N!")
grouped_lines = {}
for group, opts in sorted(grouped_options.items()):
lines = []
group_level_lines = []
for key, val in grouped_options[group].items():
if isinstance(val, dict):
g2_level_lines = []
g2_level_lines.append(key.lower())
for k2, v2 in val.items():
line2 = " ".join(format_keyword(k2, v2, lop_off=False))
g2_level_lines.append(line2)
g2_level_lines = " ".join(g2_level_lines)
lines.append(g2_level_lines)
else:
line = " ".join(format_keyword(key, val, lop_off=False))
if group.lower() == "basis" and any(
[word in line for word in ["spherical", "cartesian", "print", "noprint", "rel"]]
):
group_level_lines.append(line)
else:
lines.append(line)
if group == "aaaglobal":
grouped_lines[group] = "\n".join(lines) + "\n"
else:
grouped_lines[group] = (
f"{group.lower()} " + " ".join(group_level_lines) + "\n " + "\n ".join(lines) + "\nend\n"
)
return "\n".join(grouped_lines.values()) + "\n"
| 35.318182 | 107 | 0.536358 | import collections
from typing import Any, Dict, Tuple
def format_keyword(keyword: str, val: Any, lop_off: bool = True) -> Tuple[str, str]:
"""Function to reformat value `val` for `keyword` from python into nwchem-speak."""
# Transform string booleans into " "
if val is True:
return keyword.lower(), "true"
elif val is False:
return keyword.lower(), "false"
# complete hack
# if keyword.upper() == "MEMORY":
# return keyword.lower(), f"{val} byte"
elif isinstance(val, list): # if it is a list... join the list into a string ??? when is this in play
text = " ".join([str(v) for v in val])
elif isinstance(val, dict): # val is a dict... text is list
text = []
for k, v in val.items():
merge = [k]
merge.extend(str(v) if isinstance(v, (int, float)) else list(map(str, v)))
text.append(" ".join(merge))
text = " ".join(text)
else:
text = str(val)
if lop_off:
return keyword[7:].lower(), text
else:
return keyword.lower(), text
def format_keywords(keywords: Dict[str, Any]) -> str:
"""From NWCHEM-directed, non-default `keywords` dictionary, write a NWCHEM deck."""
def rec_dd():
return collections.defaultdict(rec_dd)
grouped_options = rec_dd()
for group_key, val in keywords.items():
nesting = group_key.split("__")
if len(nesting) == 1:
key = nesting[0]
grouped_options["aaaglobal"][key] = val
elif len(nesting) == 2:
g1, key = nesting
grouped_options[g1][key] = val
elif len(nesting) == 3:
g1, g2, key = nesting
grouped_options[g1][g2][key] = val
else:
print(nesting)
raise ValueError("Nesting N!")
grouped_lines = {}
for group, opts in sorted(grouped_options.items()):
lines = []
group_level_lines = []
for key, val in grouped_options[group].items():
if isinstance(val, dict):
g2_level_lines = []
g2_level_lines.append(key.lower())
for k2, v2 in val.items():
line2 = " ".join(format_keyword(k2, v2, lop_off=False))
g2_level_lines.append(line2)
g2_level_lines = " ".join(g2_level_lines)
lines.append(g2_level_lines)
else:
line = " ".join(format_keyword(key, val, lop_off=False))
if group.lower() == "basis" and any(
[word in line for word in ["spherical", "cartesian", "print", "noprint", "rel"]]
):
group_level_lines.append(line)
else:
lines.append(line)
if group == "aaaglobal":
grouped_lines[group] = "\n".join(lines) + "\n"
else:
grouped_lines[group] = (
f"{group.lower()} " + " ".join(group_level_lines) + "\n " + "\n ".join(lines) + "\nend\n"
)
return "\n".join(grouped_lines.values()) + "\n"
| 39 | 0 | 27 |
07a5e20ea4239cfeedbc6865859a61d72d396fa3 | 785 | py | Python | common/DirCleaner.py | hep-cce/hpc-edge-service | 57f2b9252d21d478eabe06cbdced5b623f08c75f | [
"BSD-3-Clause"
] | null | null | null | common/DirCleaner.py | hep-cce/hpc-edge-service | 57f2b9252d21d478eabe06cbdced5b623f08c75f | [
"BSD-3-Clause"
] | null | null | null | common/DirCleaner.py | hep-cce/hpc-edge-service | 57f2b9252d21d478eabe06cbdced5b623f08c75f | [
"BSD-3-Clause"
] | null | null | null | from common.file_tools import delete_old_files_directories
import time
| 37.380952 | 106 | 0.634395 | from common.file_tools import delete_old_files_directories
import time
class DirCleaner:
def __init__(self,path,period_in_sec,cutoff_in_seconds,remove_files = True, remove_directories = True):
self.start_time = time.time()
self.path = path
self.period = period_in_sec
self.last_time = self.start_time
self.cutoff = cutoff_in_seconds
self.remove_files = remove_files
self.remove_directories = remove_directories
def clean(self):
current_time = time.time()
if current_time - self.last_time > self.period:
delete_old_files_directories(self.path,
self.cutoff,
self.remove_files,
self.remove_directories)
| 644 | -4 | 74 |
b2af71edc5b61342cca24d22cee1ab418ea58547 | 1,314 | py | Python | Client/inputs.py | Cescollino/FederatedPi | e7261c04c40cc7721e77287348c557c932d58543 | [
"MIT"
] | null | null | null | Client/inputs.py | Cescollino/FederatedPi | e7261c04c40cc7721e77287348c557c932d58543 | [
"MIT"
] | null | null | null | Client/inputs.py | Cescollino/FederatedPi | e7261c04c40cc7721e77287348c557c932d58543 | [
"MIT"
] | null | null | null |
from smbus2 import SMBus
import time
# RPi Channel 1
channel = 1
bus = SMBus(channel)
# ADS1115 address and registers
address = 0x48
reg_config = 0x01
# Config value:
# - Single conversion
# - A0 input
# - 4.096V reference
config = [0xC2, 0xB3]
while True:
reg_conversion = 0x00
# Start conversion
bus.write_i2c_block_data(address, reg_config, config)
# Wait for conversion
time.sleep(0.01)
# Read 16-bit result
result = bus.read_i2c_block_data(address, reg_conversion, 2)
# Convert from 2-complement
value = ((result[0] & 0xFF) << 8) | (result[1] & 0xFF)
if value & 0x8000 != 0:
value -= 1 << 16
# Convert value to voltage
v = value * 4.096 / 32768
print("A0:", v)
# Wait a second to start again
time.sleep(1)
# ADS1115 address and registers
address = 0x48
reg_config = 0x01
reg_conversion = 0x01
# Start conversion
bus.write_i2c_block_data(address, reg_config, config)
# Wait for conversion
time.sleep(0.01)
# Read 16-bit result
result = bus.read_i2c_block_data(address, reg_conversion, 2)
# Convert from 2-complement
value = ((result[0] & 0xFF) << 8) | (result[1] & 0xFF)
if value & 0x8000 != 0:
value -= 1 << 16
# Convert value to voltage
v = value * 4.096 / 32768
print("A1:", v)
# Wait a second to start again
time.sleep(1)
| 22.271186 | 62 | 0.671233 |
from smbus2 import SMBus
import time
# RPi Channel 1
channel = 1
bus = SMBus(channel)
# ADS1115 address and registers
address = 0x48
reg_config = 0x01
# Config value:
# - Single conversion
# - A0 input
# - 4.096V reference
config = [0xC2, 0xB3]
while True:
reg_conversion = 0x00
# Start conversion
bus.write_i2c_block_data(address, reg_config, config)
# Wait for conversion
time.sleep(0.01)
# Read 16-bit result
result = bus.read_i2c_block_data(address, reg_conversion, 2)
# Convert from 2-complement
value = ((result[0] & 0xFF) << 8) | (result[1] & 0xFF)
if value & 0x8000 != 0:
value -= 1 << 16
# Convert value to voltage
v = value * 4.096 / 32768
print("A0:", v)
# Wait a second to start again
time.sleep(1)
# ADS1115 address and registers
address = 0x48
reg_config = 0x01
reg_conversion = 0x01
# Start conversion
bus.write_i2c_block_data(address, reg_config, config)
# Wait for conversion
time.sleep(0.01)
# Read 16-bit result
result = bus.read_i2c_block_data(address, reg_conversion, 2)
# Convert from 2-complement
value = ((result[0] & 0xFF) << 8) | (result[1] & 0xFF)
if value & 0x8000 != 0:
value -= 1 << 16
# Convert value to voltage
v = value * 4.096 / 32768
print("A1:", v)
# Wait a second to start again
time.sleep(1)
| 0 | 0 | 0 |
6e22ddda76a72a3664a045ce067b390f4547bec7 | 8,076 | py | Python | utils/SamplingUtils.py | nanohedra/nanohedra | 3921b7f5ce10e0e3393c3b675bb97ccbecb96663 | [
"MIT"
] | 2 | 2020-12-07T00:38:32.000Z | 2021-05-13T19:36:17.000Z | utils/SamplingUtils.py | nanohedra/nanohedra | 3921b7f5ce10e0e3393c3b675bb97ccbecb96663 | [
"MIT"
] | null | null | null | utils/SamplingUtils.py | nanohedra/nanohedra | 3921b7f5ce10e0e3393c3b675bb97ccbecb96663 | [
"MIT"
] | 1 | 2021-05-13T19:36:18.000Z | 2021-05-13T19:36:18.000Z | import numpy as np
import math
# Copyright 2020 Joshua Laniado and Todd O. Yeates.
__author__ = "Joshua Laniado and Todd O. Yeates"
__copyright__ = "Copyright 2020, Nanohedra"
__version__ = "1.0"
# ROTATION RANGE DEG
C2 = 180
C3 = 120
C4 = 90
C5 = 72
C6 = 60
RotRangeDict = {"C2": C2, "C3": C3, "C4": C4, "C5": C5, "C6": C6}
| 40.38 | 138 | 0.59943 | import numpy as np
import math
# Copyright 2020 Joshua Laniado and Todd O. Yeates.
__author__ = "Joshua Laniado and Todd O. Yeates"
__copyright__ = "Copyright 2020, Nanohedra"
__version__ = "1.0"
# ROTATION RANGE DEG
C2 = 180
C3 = 120
C4 = 90
C5 = 72
C6 = 60
RotRangeDict = {"C2": C2, "C3": C3, "C4": C4, "C5": C5, "C6": C6}
def get_degeneracy_matrices(oligomer_symmetry_1, oligomer_symmetry_2, design_dimension, design_symmetry):
valid_pt_gp_symm_list = ["C2", "C3", "C4", "C5", "C6", "D2", "D3", "D4", "D6", "T", "O", "I"]
if oligomer_symmetry_1 not in valid_pt_gp_symm_list:
raise ValueError("Invalid Point Group Symmetry")
if oligomer_symmetry_2 not in valid_pt_gp_symm_list:
raise ValueError("Invalid Point Group Symmetry")
if design_symmetry not in valid_pt_gp_symm_list:
raise ValueError("Invalid Point Group Symmetry")
if design_dimension not in [0, 2, 3]:
raise ValueError("Invalid Design Dimension")
degeneracies = [None, None]
for i in range(2):
degeneracy_matrices = None
oligomer_symmetry = oligomer_symmetry_1 if i == 0 else oligomer_symmetry_2
# For cages, only one of the two oligomers need to be flipped. By convention we flip oligomer 2.
if design_dimension == 0 and i == 1:
degeneracy_matrices = [[[-1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, -1.0]]] # ROT180y
# For layers that obey a cyclic point group symmetry
# and that are constructed from two oligomers that both obey cyclic symmetry
# only one of the two oligomers need to be flipped. By convention we flip oligomer 2.
elif design_dimension == 2 and i == 1 and (oligomer_symmetry_1[0], oligomer_symmetry_2[0], design_symmetry[0]) == ("C", "C", "C"):
degeneracy_matrices = [[[-1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, -1.0]]] # ROT180y
elif oligomer_symmetry in ["D3", "D4", "D6"] and design_symmetry in ["D3", "D4", "D6", "T", "O"]:
if oligomer_symmetry == "D3":
degeneracy_matrices = [[[0.5, -0.86603, 0.0], [0.86603, 0.5, 0.0], [0.0, 0.0, 1.0]]] # ROT60z
elif oligomer_symmetry == "D4":
# 45 degrees about z; z unaffected; x goes to [1,-1,0] direction
degeneracy_matrices = [[[0.707107, 0.707107, 0.0], [-0.707107, 0.707107, 0.0], [0.0, 0.0, 1.0]]]
elif oligomer_symmetry == "D6":
degeneracy_matrices = [[[0.86603, -0.5, 0.0], [0.5, 0.86603, 0.0], [0.0, 0.0, 1.0]]] # ROT30z
elif oligomer_symmetry == "D2" and design_symmetry != "O":
if design_symmetry == "T":
degeneracy_matrices = [[[0.0, -1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 1.0]]] # ROT90z
elif design_symmetry == "D4":
degeneracy_matrices = [[[0.0, 0.0, 1.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]],
[[0.0, 1.0, 0.0], [0.0, 0.0, 1.0], [1.0, 0.0, 0.0]]] # z,x,y and y,z,x
elif design_symmetry == "D2" or design_symmetry == "D6":
degeneracy_matrices = [[[0.0, 0.0, 1.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]],
[[0.0, 1.0, 0.0], [0.0, 0.0, 1.0], [1.0, 0.0, 0.0]],
[[-1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, 1.0, 0.0]],
[[0.0, 0.0, 1.0], [0.0, -1.0, 0.0], [1.0, 0.0, 0.0]],
[[0.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, -1.0]]]
elif oligomer_symmetry == "T" and design_symmetry == "T":
degeneracy_matrices = [[[0.0, -1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 1.0]]] # ROT90z
degeneracies[i] = degeneracy_matrices
return degeneracies
def parse_ref_tx_dof_str_to_list(ref_frame_tx_dof_string):
s1 = ref_frame_tx_dof_string.replace('<', '')
s2 = s1.replace('>', '')
l1 = s2.split(',')
l2 = [x.replace(' ', '') for x in l1]
return l2
def get_tx_dof_ref_frame_var_vec(string_vec, var):
return_vec = [0.0, 0.0, 0.0]
for i in range(3):
if var in string_vec[i] and '*' in string_vec[i]:
return_vec[i] = float(string_vec[i].split('*')[0])
elif "-" + var in string_vec[i]:
return_vec[i] = -1.0
elif var == string_vec[i]:
return_vec[i] = 1.0
return return_vec
def get_ext_dof(ref_frame_tx_dof1, ref_frame_tx_dof2):
ext_dof = []
parsed_1 = parse_ref_tx_dof_str_to_list(ref_frame_tx_dof1)
parsed_2 = parse_ref_tx_dof_str_to_list(ref_frame_tx_dof2)
e1_var_vec = get_tx_dof_ref_frame_var_vec(parsed_1, 'e')
f1_var_vec = get_tx_dof_ref_frame_var_vec(parsed_1, 'f')
g1_var_vec = get_tx_dof_ref_frame_var_vec(parsed_1, 'g')
e2_var_vec = get_tx_dof_ref_frame_var_vec(parsed_2, 'e')
f2_var_vec = get_tx_dof_ref_frame_var_vec(parsed_2, 'f')
g2_var_vec = get_tx_dof_ref_frame_var_vec(parsed_2, 'g')
e2e1_diff = (np.array(e2_var_vec) - np.array(e1_var_vec)).tolist()
f2f1_diff = (np.array(f2_var_vec) - np.array(f1_var_vec)).tolist()
g2g1_diff = (np.array(g2_var_vec) - np.array(g1_var_vec)).tolist()
if e2e1_diff != [0, 0, 0]:
ext_dof.append(e2e1_diff)
if f2f1_diff != [0, 0, 0]:
ext_dof.append(f2f1_diff)
if g2g1_diff != [0, 0, 0]:
ext_dof.append(g2g1_diff)
return ext_dof
def get_optimal_external_tx_vector(ref_frame_tx_dof, optimal_ext_dof_shifts):
ext_dof_variables = ['e', 'f', 'g']
parsed_ref_tx_vec = parse_ref_tx_dof_str_to_list(ref_frame_tx_dof)
optimal_external_tx_vector = np.array([0.0, 0.0, 0.0])
for dof_shift_index in range(len(optimal_ext_dof_shifts)):
dof_shift = optimal_ext_dof_shifts[dof_shift_index]
var_vec = get_tx_dof_ref_frame_var_vec(parsed_ref_tx_vec, ext_dof_variables[dof_shift_index])
shifted_var_vec = np.array(var_vec) * dof_shift
optimal_external_tx_vector += shifted_var_vec
return optimal_external_tx_vector.tolist()
def get_rot_matrices(step_deg, axis, rot_range_deg):
rot_matrices = []
if axis == 'x':
for angle_deg in range(0, rot_range_deg, step_deg):
rad = math.radians(float(angle_deg))
rotmatrix = [[1, 0, 0], [0, math.cos(rad), -1 * math.sin(rad)], [0, math.sin(rad), math.cos(rad)]]
rot_matrices.append(rotmatrix)
return rot_matrices
elif axis == 'y':
for angle_deg in range(0, rot_range_deg, step_deg):
rad = math.radians(float(angle_deg))
rotmatrix = [[math.cos(rad), 0, math.sin(rad)], [0, 1, 0], [-1 * math.sin(rad), 0, math.cos(rad)]]
rot_matrices.append(rotmatrix)
return rot_matrices
elif axis == 'z':
for angle_deg in range(0, rot_range_deg, step_deg):
rad = math.radians(float(angle_deg))
rotmatrix = [[math.cos(rad), -1 * math.sin(rad), 0], [math.sin(rad), math.cos(rad), 0], [0, 0, 1]]
rot_matrices.append(rotmatrix)
return rot_matrices
else:
print "AXIS SELECTED FOR SAMPLING IS NOT SUPPORTED"
return None
def get_degen_rotmatrices(degeneracy_matrices, rotation_matrices):
if rotation_matrices == list() and degeneracy_matrices is not None:
identity_matrix = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]
return [[identity_matrix]] + [[degen_mat] for degen_mat in degeneracy_matrices]
elif rotation_matrices != list() and degeneracy_matrices is None:
return [rotation_matrices]
elif rotation_matrices != list() and degeneracy_matrices is not None:
degen_rotmatrices = [rotation_matrices]
for degen in degeneracy_matrices:
degen_list = []
for rot in rotation_matrices:
combined = np.matmul(rot, degen)
degen_list.append(combined.tolist())
degen_rotmatrices.append(degen_list)
return degen_rotmatrices
else:
identity_matrix = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]
return [[identity_matrix]]
| 7,579 | 0 | 161 |
5b1d9807ad96abc37e81be4b9f133dcc9788a6ff | 466 | py | Python | tests/helpers/test_utils.py | biern/precious | 4b533bfaa850641a410ef05a453422a09f2d9603 | [
"MIT"
] | 2 | 2016-11-06T13:24:53.000Z | 2018-07-15T11:19:15.000Z | tests/helpers/test_utils.py | biern/precious | 4b533bfaa850641a410ef05a453422a09f2d9603 | [
"MIT"
] | null | null | null | tests/helpers/test_utils.py | biern/precious | 4b533bfaa850641a410ef05a453422a09f2d9603 | [
"MIT"
] | null | null | null | from precious import Value, assign_attributes, copy
| 20.26087 | 54 | 0.60515 | from precious import Value, assign_attributes, copy
class Point(Value):
@assign_attributes
def __init__(self, x, y): pass
class TestCopyObject:
def test_copy_object_with_same_attributes(self):
p1 = Point(1, 2)
p2 = copy(p1)
assert p2.x == 1
assert p2.y == 2
def test_copy_object_with_changed_attribute(self):
p1 = Point(1, 2)
p2 = copy(p1, x=3)
assert p2.x == 3
assert p2.y == 2
| 266 | 47 | 99 |
7ac0bfbd29d462f9bf8bf305205720af17e182d2 | 550 | py | Python | tests/handlers/test_maven_releases.py | sawood14012/fabric8-analytics-jobs | a7d850dfef5785144676b9a3b4e29942161e5347 | [
"Apache-2.0"
] | 5 | 2017-05-04T11:22:31.000Z | 2018-08-24T16:12:30.000Z | tests/handlers/test_maven_releases.py | sawood14012/fabric8-analytics-jobs | a7d850dfef5785144676b9a3b4e29942161e5347 | [
"Apache-2.0"
] | 325 | 2017-05-03T08:44:03.000Z | 2021-12-13T21:03:49.000Z | tests/handlers/test_maven_releases.py | sawood14012/fabric8-analytics-jobs | a7d850dfef5785144676b9a3b4e29942161e5347 | [
"Apache-2.0"
] | 28 | 2017-05-02T05:09:32.000Z | 2021-03-11T09:42:34.000Z | """Tests for maven_releases.py."""
# import pytest
# TODO enable when new test(s) will be added
# from f8a_jobs.handlers.maven_releases import MavenReleasesAnalyses
class TestMavenReleasesAnalyses(object):
"""Tests for MavenReleasesAnalyses class."""
def setup_method(self, method):
"""Set up any state tied to the execution of the given method in a class."""
assert method
def teardown_method(self, method):
"""Teardown any state that was previously setup with a setup_method call."""
assert method
| 28.947368 | 84 | 0.710909 | """Tests for maven_releases.py."""
# import pytest
# TODO enable when new test(s) will be added
# from f8a_jobs.handlers.maven_releases import MavenReleasesAnalyses
class TestMavenReleasesAnalyses(object):
"""Tests for MavenReleasesAnalyses class."""
def setup_method(self, method):
"""Set up any state tied to the execution of the given method in a class."""
assert method
def teardown_method(self, method):
"""Teardown any state that was previously setup with a setup_method call."""
assert method
| 0 | 0 | 0 |
a9553230e74a1b791dc28477f6cff5a0798d610e | 901 | py | Python | python/design_patterns/template_method.py | pgularski/snippets | 0b34ac837dd768f3dfa72dbf3e3d271c51e71da9 | [
"MIT"
] | null | null | null | python/design_patterns/template_method.py | pgularski/snippets | 0b34ac837dd768f3dfa72dbf3e3d271c51e71da9 | [
"MIT"
] | null | null | null | python/design_patterns/template_method.py | pgularski/snippets | 0b34ac837dd768f3dfa72dbf3e3d271c51e71da9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# −*− coding: UTF−8 −*−
import abc
if __name__ == '__main__':
test()
| 18.02 | 44 | 0.588235 | #!/usr/bin/env python
# −*− coding: UTF−8 −*−
import abc
class HotBeverage(object):
__metaclass__ = abc.ABCMeta
def prepare(self):
self.boil_water()
self.brew()
self.pour_in_cup()
if self.customer_wants_condiments():
self.add_condiments()
@abc.abstractmethod
def brew(self):
""" Implement this """
@abc.abstractmethod
def add_condiments(self):
""" Implement this """
def boil_water(self):
print "Boiling water..."
def pour_in_cup(self):
print "Pouring..."
def customer_wants_condiments(self):
""" Reimplement this if needed. """
return True
class Coffee(HotBeverage):
def brew(self):
print "Preparing cofee..."
def add_condiments(self):
print "Adding milk..."
def test():
Coffee().prepare()
if __name__ == '__main__':
test()
| 287 | 391 | 122 |
45d6e9edd987d3f729c82e2725f851919559b12b | 1,177 | py | Python | pages/themes/ParallelProgramming-Lecture4/examples/download_images_demos/download_images_with_threas.py | WWWCourses/PythonCourseNetIT-Slides | 78dbb5eb7695cc64042b71a1911d4ef3feddb074 | [
"MIT"
] | null | null | null | pages/themes/ParallelProgramming-Lecture4/examples/download_images_demos/download_images_with_threas.py | WWWCourses/PythonCourseNetIT-Slides | 78dbb5eb7695cc64042b71a1911d4ef3feddb074 | [
"MIT"
] | null | null | null | pages/themes/ParallelProgramming-Lecture4/examples/download_images_demos/download_images_with_threas.py | WWWCourses/PythonCourseNetIT-Slides | 78dbb5eb7695cc64042b71a1911d4ef3feddb074 | [
"MIT"
] | null | null | null | import threading
import requests
import os
import time
urls = [
"https://unsplash.com/photos/CTflmHHVrBM/download?force=true",
"https://unsplash.com/photos/pWV8HjvHzk8/download?force=true",
# "https://unsplash.com/photos/1jn_3WBp60I/download?force=true",
# "https://unsplash.com/photos/8E5HawfqCMM/download?force=true",
# "https://unsplash.com/photos/yTOkMc2q01o/download?force=true"
]
download_path = os.path.join(os.getcwd(),"downloaded_images")
if __name__ == "__main__":
start= time.time()
threads = []
# create and start a thread per each url
for url in urls:
tr = threading.Thread(target=download_file,args=(url,))
threads.append(tr)
tr.start()
# result= download_file(url)
# join all threads
for tr in threads:
tr.join()
end = time.time()
print(f"Procesing time: {end-start}") | 24.020408 | 65 | 0.723025 | import threading
import requests
import os
import time
def download_file(url):
file_name=url.split('/')[4]+'.jpg'
full_file_name = os.path.join(download_path,file_name)
# get image bytes
print(f"Start downloading {url}")
response = requests.get(url, allow_redirects=True)
# write image to file
with open(full_file_name, 'wb') as fh:
fh.write(response.content)
print(f"File saved to {full_file_name}")
urls = [
"https://unsplash.com/photos/CTflmHHVrBM/download?force=true",
"https://unsplash.com/photos/pWV8HjvHzk8/download?force=true",
# "https://unsplash.com/photos/1jn_3WBp60I/download?force=true",
# "https://unsplash.com/photos/8E5HawfqCMM/download?force=true",
# "https://unsplash.com/photos/yTOkMc2q01o/download?force=true"
]
download_path = os.path.join(os.getcwd(),"downloaded_images")
if __name__ == "__main__":
start= time.time()
threads = []
# create and start a thread per each url
for url in urls:
tr = threading.Thread(target=download_file,args=(url,))
threads.append(tr)
tr.start()
# result= download_file(url)
# join all threads
for tr in threads:
tr.join()
end = time.time()
print(f"Procesing time: {end-start}") | 338 | 0 | 23 |
7e6fb24c9fdf24078eca20ccd5e9b951ede5ba72 | 350 | py | Python | lhotse/recipes/__init__.py | freewym/lhotse | 66e9bbaf25b75011388ab00189baa162c3c1d435 | [
"Apache-2.0"
] | null | null | null | lhotse/recipes/__init__.py | freewym/lhotse | 66e9bbaf25b75011388ab00189baa162c3c1d435 | [
"Apache-2.0"
] | null | null | null | lhotse/recipes/__init__.py | freewym/lhotse | 66e9bbaf25b75011388ab00189baa162c3c1d435 | [
"Apache-2.0"
] | null | null | null | from .ami import prepare_ami
from .broadcast_news import prepare_broadcast_news
from .librimix import prepare_librimix
from .librispeech import prepare_librispeech
from .switchboard import prepare_switchboard
__all__ = [
'prepare_ami',
'prepare_broadcast_news',
'prepare_librimix',
'prepare_librispeech',
'prepare_switchboard'
]
| 25 | 50 | 0.794286 | from .ami import prepare_ami
from .broadcast_news import prepare_broadcast_news
from .librimix import prepare_librimix
from .librispeech import prepare_librispeech
from .switchboard import prepare_switchboard
__all__ = [
'prepare_ami',
'prepare_broadcast_news',
'prepare_librimix',
'prepare_librispeech',
'prepare_switchboard'
]
| 0 | 0 | 0 |
34c4a49b1a56810d1ba1396800b3826151f2cfe8 | 1,109 | py | Python | linux/local_article.py | shienka07/no_corona_zone | 3b015d3bf7bff38eadfad722da4222bf5ed92c8e | [
"Apache-2.0"
] | null | null | null | linux/local_article.py | shienka07/no_corona_zone | 3b015d3bf7bff38eadfad722da4222bf5ed92c8e | [
"Apache-2.0"
] | null | null | null | linux/local_article.py | shienka07/no_corona_zone | 3b015d3bf7bff38eadfad722da4222bf5ed92c8e | [
"Apache-2.0"
] | 4 | 2021-07-23T07:35:44.000Z | 2021-09-10T11:51:32.000Z | import re
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import argparse
import ncz
import chromedriver_binary
parser = argparse.ArgumentParser()
parser.add_argument('keyword1', help="main_city_name")
parser.add_argument('keyword2', help="sub_city_name")
parser.add_argument('keyword3', help="file_path")
mainCityName = parser.parse_args().keyword1
subCityName = parser.parse_args().keyword2
filePath = re.sub(r"[^./_a-z]","",parser.parse_args().keyword3)
if re.search(subCityName, "전체"):
subCityName = ""
chrome_options = Options()
# chrome_options.add_argument('--headless') # 화면 안띄움
#chrome_options.add_argument('--start-maximized') # F11 전체 화면 설정
driver = webdriver.Chrome(options=chrome_options)
url = "https://search.naver.com/search.naver?query={}+{}+코로나&where=news&ie=utf8&sm=nws_hty".format(
mainCityName, subCityName)
driver.get(url)
elem = driver.find_element_by_xpath('//*[@id="main_pack"]/section/div/div[2]/ul')
articles = elem.find_elements_by_class_name("news_wrap.api_ani_send") # 기사들
ncz.naverArticlePattern(filePath, articles)
| 27.04878 | 99 | 0.762849 | import re
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import argparse
import ncz
import chromedriver_binary
parser = argparse.ArgumentParser()
parser.add_argument('keyword1', help="main_city_name")
parser.add_argument('keyword2', help="sub_city_name")
parser.add_argument('keyword3', help="file_path")
mainCityName = parser.parse_args().keyword1
subCityName = parser.parse_args().keyword2
filePath = re.sub(r"[^./_a-z]","",parser.parse_args().keyword3)
if re.search(subCityName, "전체"):
subCityName = ""
chrome_options = Options()
# chrome_options.add_argument('--headless') # 화면 안띄움
#chrome_options.add_argument('--start-maximized') # F11 전체 화면 설정
driver = webdriver.Chrome(options=chrome_options)
url = "https://search.naver.com/search.naver?query={}+{}+코로나&where=news&ie=utf8&sm=nws_hty".format(
mainCityName, subCityName)
driver.get(url)
elem = driver.find_element_by_xpath('//*[@id="main_pack"]/section/div/div[2]/ul')
articles = elem.find_elements_by_class_name("news_wrap.api_ani_send") # 기사들
ncz.naverArticlePattern(filePath, articles)
| 0 | 0 | 0 |
3be02ab6c72b00d31e2b2218e2fec095e4a55665 | 3,712 | py | Python | CalibMuon/DTCalibration/python/Workflow/DTTTrigTimeBoxesWriter.py | pasmuss/cmssw | 566f40c323beef46134485a45ea53349f59ae534 | [
"Apache-2.0"
] | null | null | null | CalibMuon/DTCalibration/python/Workflow/DTTTrigTimeBoxesWriter.py | pasmuss/cmssw | 566f40c323beef46134485a45ea53349f59ae534 | [
"Apache-2.0"
] | null | null | null | CalibMuon/DTCalibration/python/Workflow/DTTTrigTimeBoxesWriter.py | pasmuss/cmssw | 566f40c323beef46134485a45ea53349f59ae534 | [
"Apache-2.0"
] | null | null | null | from tools import loadCmsProcess,writeCfg
from addPoolDBESSource import addPoolDBESSource
from CmsswTask import CmsswTask
import os
| 52.28169 | 129 | 0.655172 | from tools import loadCmsProcess,writeCfg
from addPoolDBESSource import addPoolDBESSource
from CmsswTask import CmsswTask
import os
class DTTTrigTimeBoxesWriter:
def __init__(self, run, dir, result_dir, config):
self.runnumber = int(run)
self.config = config
self.dir = dir
self.result_dir = result_dir
"""
self.configs = ['dtTTrigWriter_cfg.py',
'dumpDBToFile_first_cfg.py',
'dtTTrigCorrection_cfg.py',
'dumpDBToFile_second_cfg.py']
self.pset_templates = {'dtTTrigWriter_cfg.py':'CalibMuon.DTCalibration.dtTTrigWriter_cfg',
'dumpDBToFile_first_cfg.py':'CalibMuon.DTCalibration.dumpDBToFile_ttrig_cfg',
'dtTTrigCorrection_cfg.py':'CalibMuon.DTCalibration.dtTTrigCorrection_cfg',
'dumpDBToFile_second_cfg.py':'CalibMuon.DTCalibration.dumpDBToFile_ttrig_cfg'}
"""
self.configs = ['dtTTrigWriter_cfg.py',
'dtTTrigCorrection_cfg.py',
'dumpDBToFile_ttrig_cfg.py']
self.pset_templates = {'dtTTrigWriter_cfg.py':'CalibMuon.DTCalibration.dtTTrigWriter_cfg',
'dtTTrigCorrection_cfg.py':'CalibMuon.DTCalibration.dtTTrigCorrection_cfg',
'dumpDBToFile_ttrig_cfg.py':'CalibMuon.DTCalibration.dumpDBToFile_ttrig_cfg'}
self.initProcess()
self.task = CmsswTask(self.dir,self.configs)
def initProcess(self):
timeBoxes = os.path.abspath( self.result_dir + '/' + 'DTTimeBoxes_' + str(self.runnumber) + '.root' )
ttrig_uncorrected_db = os.path.abspath( self.result_dir + '/' + 'ttrig_uncorrected_' + str(self.runnumber) + '.db' )
ttrig_timeboxes_db = os.path.abspath( self.result_dir + '/' + 'ttrig_timeboxes_' + str(self.runnumber) + '.db' )
ttrig_timeboxes_txt = os.path.abspath( self.result_dir + '/' + 'ttrig_timeboxes_' + str(self.runnumber) + '.txt' )
self.process = {}
# dtTTrigWriter_cfg.py
self.process['dtTTrigWriter_cfg.py'] = loadCmsProcess(self.pset_templates['dtTTrigWriter_cfg.py'])
self.process['dtTTrigWriter_cfg.py'].dtTTrigWriter.rootFileName = timeBoxes
self.process['dtTTrigWriter_cfg.py'].PoolDBOutputService.connect = 'sqlite_file:%s' % ttrig_uncorrected_db
# dtTTrigCorrection_cfg.py
self.process['dtTTrigCorrection_cfg.py'] = loadCmsProcess(self.pset_templates['dtTTrigCorrection_cfg.py'])
self.process['dtTTrigCorrection_cfg.py'].GlobalTag.globaltag = self.config.globaltag
self.process['dtTTrigCorrection_cfg.py'].source.firstRun = self.runnumber
addPoolDBESSource(process = self.process['dtTTrigCorrection_cfg.py'],
moduleName = 'calibDB',record = 'DTTtrigRcd',tag = 'ttrig',
connect = 'sqlite_file:%s' % ttrig_uncorrected_db)
self.process['dtTTrigCorrection_cfg.py'].PoolDBOutputService.connect = 'sqlite_file:%s' % ttrig_timeboxes_db
# dumpDBToFile_ttrig_cfg.py
self.process['dumpDBToFile_ttrig_cfg.py'] = loadCmsProcess(self.pset_templates['dumpDBToFile_ttrig_cfg.py'])
self.process['dumpDBToFile_ttrig_cfg.py'].calibDB.connect = 'sqlite_file:%s' % ttrig_timeboxes_db
self.process['dumpDBToFile_ttrig_cfg.py'].dumpToFile.outputFileName = ttrig_timeboxes_txt
def writeCfg(self):
for cfg in self.configs:
writeCfg(self.process[cfg],self.dir,cfg)
#writeCfgPkl(self.process[cfg],self.dir,cfg)
def run(self):
self.task.run()
return
| 3,429 | 8 | 143 |
4e0b4f1736db9347d30a2d18993a1c64f19dbfc6 | 504 | py | Python | node2vec/src/testNetworkX.py | CEfanmin/DataMiningProjects | b6375f542c68c0001ae2971dd7e8046a0b4afc7a | [
"MIT"
] | 3 | 2018-04-26T06:44:27.000Z | 2018-09-01T13:58:21.000Z | node2vec/src/testNetworkX.py | CEfanmin/DataMiningProjects | b6375f542c68c0001ae2971dd7e8046a0b4afc7a | [
"MIT"
] | null | null | null | node2vec/src/testNetworkX.py | CEfanmin/DataMiningProjects | b6375f542c68c0001ae2971dd7e8046a0b4afc7a | [
"MIT"
] | 1 | 2018-09-01T13:58:27.000Z | 2018-09-01T13:58:27.000Z | import networkx as nx
import matplotlib.pyplot as plt
## add a node
G = nx.Graph()
G.add_node(1)
G.add_nodes_from([2, 3])
H = nx.path_graph(10)
G.add_nodes_from(H)
G.add_node(H)
G.add_node('shopping')
## edges
G.add_edge(1, 2, {'weight': 3.1415})
e = (2, 3)
G.add_edge(*e)
G.add_edges_from([(1,2), (1,3)])
# G.add_edges_from(H.edges)
nx.draw(G, with_labels=True)
plt.show()
G.clear()
edgelist = [('n1','n2'), ('n1','n3'), ('n2','n3')]
H = nx.Graph(edgelist)
nx.draw(H, with_labels= True)
plt.show()
| 18 | 50 | 0.646825 | import networkx as nx
import matplotlib.pyplot as plt
## add a node
G = nx.Graph()
G.add_node(1)
G.add_nodes_from([2, 3])
H = nx.path_graph(10)
G.add_nodes_from(H)
G.add_node(H)
G.add_node('shopping')
## edges
G.add_edge(1, 2, {'weight': 3.1415})
e = (2, 3)
G.add_edge(*e)
G.add_edges_from([(1,2), (1,3)])
# G.add_edges_from(H.edges)
nx.draw(G, with_labels=True)
plt.show()
G.clear()
edgelist = [('n1','n2'), ('n1','n3'), ('n2','n3')]
H = nx.Graph(edgelist)
nx.draw(H, with_labels= True)
plt.show()
| 0 | 0 | 0 |
7a5a12a2b65d10df25cd9f836ac609b209cce23a | 918 | py | Python | adv/mona.py | dl-stuff/dl | 185cc8a16339c47ed873768ff30804f8d06090a2 | [
"Apache-2.0"
] | 22 | 2020-04-04T17:34:16.000Z | 2021-09-25T00:22:23.000Z | adv/mona.py | dl-stuff/dl | 185cc8a16339c47ed873768ff30804f8d06090a2 | [
"Apache-2.0"
] | 92 | 2020-04-04T15:30:34.000Z | 2022-03-24T01:43:11.000Z | adv/mona.py | dl-stuff/dl | 185cc8a16339c47ed873768ff30804f8d06090a2 | [
"Apache-2.0"
] | 37 | 2020-04-16T02:47:07.000Z | 2021-03-28T23:18:50.000Z | from core.advbase import *
from module.template import Adv_INFUTP
variants = {None: Mona, "RNG": Mona_RNG, "INFUTP": Mona_INFUTP}
| 26.228571 | 85 | 0.595861 | from core.advbase import *
from module.template import Adv_INFUTP
class Mona(Adv):
def prerun(self):
self.beast_eye = Selfbuff("beast_eye", 0.2, 30, "utph", "buff").ex_bufftime()
Event("dragon").listener(self.ddrive_buff_off)
def fs_proc(self, e):
if self.nihilism:
return
self.beast_eye.on()
def ddrive_buff_off(self, e):
self.beast_eye.off()
class Mona_RNG(Mona):
def s1_before(self, e):
if e.group == "ddrive":
if random.random() <= 0.25:
log("s1_ddrive", "variant", "plus")
self.conf.s1_ddrive.attr = self.conf.s1_ddrive.attr_PLUS
else:
log("s1_ddrive", "variant", "base")
self.conf.s1_ddrive.attr = self.conf.s1_ddrive.attr_BASE
class Mona_INFUTP(Mona, Adv_INFUTP):
pass
variants = {None: Mona, "RNG": Mona_RNG, "INFUTP": Mona_INFUTP}
| 589 | 19 | 175 |
8948fe9ebd947bd18a879bad266e94c8f09b14d4 | 89 | py | Python | Code/facebook_credentials.py | PritomDas/Automated-Tinder-AI-Bot | 085a41c586671767b976f18627d8f72ed3614dcd | [
"MIT"
] | 1 | 2020-02-11T06:44:33.000Z | 2020-02-11T06:44:33.000Z | Code/facebook_credentials.py | PritomDas/Automated-Tinder-AI-Bot | 085a41c586671767b976f18627d8f72ed3614dcd | [
"MIT"
] | 1 | 2020-02-11T06:56:48.000Z | 2020-02-11T06:56:48.000Z | Code/facebook_credentials.py | PritomDas/Automated-Tinder-AI-Bot | 085a41c586671767b976f18627d8f72ed3614dcd | [
"MIT"
] | null | null | null | username = '' #enter your facebook user name
password = '' #enter your facebook password
| 29.666667 | 44 | 0.741573 | username = '' #enter your facebook user name
password = '' #enter your facebook password
| 0 | 0 | 0 |
de336d770cfcbea47a843316d1fc22134021b6c5 | 908 | py | Python | main.py | arieroos/crypto-trader | c419f712fd7c501507c99a48a142e909f92a949c | [
"MIT"
] | 2 | 2021-09-08T07:23:51.000Z | 2021-11-08T14:59:32.000Z | main.py | arieroos/crypto-trader | c419f712fd7c501507c99a48a142e909f92a949c | [
"MIT"
] | null | null | null | main.py | arieroos/crypto-trader | c419f712fd7c501507c99a48a142e909f92a949c | [
"MIT"
] | null | null | null | import sys
from datetime import datetime
import error_handler
import valr
UNKNOWN_TREND = "unknown"
DOWN_TREND = "down"
UP_TREND = "up"
if __name__ == "__main__":
sys.excepthook = error_handler.excepthook
orders = [x for x in valr.get_open_orders() if x["side"].upper() == "BUY"]
if len(orders) > 0:
log("open orders found: closing")
valr.close_open_buys()
market_summary = valr.market_summary()
base_price = float(market_summary["lastTradedPrice"])
else:
sell_price = valr.sell_at_market()
log(f"Sold at {sell_price}")
base_price = sell_price
percentage = 0.33 / 100.0
buy_adjustment = 1 - percentage
buy_price = base_price * buy_adjustment
log(f"Placing buy order at {buy_price}")
valr.buy_order(buy_price)
log("Buy order placed")
| 24.540541 | 78 | 0.65859 | import sys
from datetime import datetime
import error_handler
import valr
UNKNOWN_TREND = "unknown"
DOWN_TREND = "down"
UP_TREND = "up"
def log(msg: str):
print(f"[{datetime.now()}] {msg}", flush=True)
if __name__ == "__main__":
sys.excepthook = error_handler.excepthook
orders = [x for x in valr.get_open_orders() if x["side"].upper() == "BUY"]
if len(orders) > 0:
log("open orders found: closing")
valr.close_open_buys()
market_summary = valr.market_summary()
base_price = float(market_summary["lastTradedPrice"])
else:
sell_price = valr.sell_at_market()
log(f"Sold at {sell_price}")
base_price = sell_price
percentage = 0.33 / 100.0
buy_adjustment = 1 - percentage
buy_price = base_price * buy_adjustment
log(f"Placing buy order at {buy_price}")
valr.buy_order(buy_price)
log("Buy order placed")
| 48 | 0 | 23 |
b7bab76e2c1606435bce4d050c6f35fd5a5ee94d | 2,106 | py | Python | bucket_manifest/utils.py | uc-cdis/cdis-dataflow-templates | 6a3d28d27094db34e2df6ef10d61a700bfc6c94d | [
"Apache-2.0"
] | null | null | null | bucket_manifest/utils.py | uc-cdis/cdis-dataflow-templates | 6a3d28d27094db34e2df6ef10d61a700bfc6c94d | [
"Apache-2.0"
] | 1 | 2020-06-17T21:54:37.000Z | 2020-06-17T21:54:37.000Z | bucket_manifest/utils.py | uc-cdis/cdis-dataflow-templates | 6a3d28d27094db34e2df6ef10d61a700bfc6c94d | [
"Apache-2.0"
] | null | null | null | import csv
from google.cloud import storage
import logging
def write_tsv(filename, files, fieldnames=None):
"""
write to tsv file
Args:
filename(str): file name
files(list(dict)): list of file info
[
{
"GUID": "guid_example",
"filename": "example",
"size": 100,
"acl": "['open']",
"md5": "md5_hash",
},
]
fieldnames(list(str)): list of column names
Returns:
filename(str): file name
"""
if not files:
return None
# Get column names
fieldnames = fieldnames or files[0].keys()
# Open tsv file
with open(filename, mode="w") as outfile:
writer = csv.DictWriter(outfile, delimiter="\t", fieldnames=fieldnames)
# write header
writer.writeheader()
# Write data
for f in files:
for field in fieldnames:
if field not in f:
f[field] = None
writer.writerow(f)
return filename
def upload_file(bucket_name, source_file_name, destination_blob_name):
"""
Upload a file to an gs bucket
Args:
file_name: File to upload
bucket: Bucket to upload to
object_name: gs object name. If not specified then file_name is used
Returns:
Bool: True if file was uploaded, else False
"""
# Initialize a storage client.
storage_client = storage.Client()
try:
# Initialize a bucket client.
bucket = storage_client.bucket(bucket_name)
# Create a dest blob.
blob = bucket.blob(destination_blob_name)
# Upload file to the bucket
blob.upload_from_filename(source_file_name)
except Exception as e:
logging.error(
"Fail to upload {} to {}. Detail {}".format(
source_file_name, bucket_name, e
)
)
return False
logging.info(
"File {} uploaded to {}/{}.".format(source_file_name, bucket_name, destination_blob_name)
)
return True
| 26.325 | 97 | 0.565052 | import csv
from google.cloud import storage
import logging
def write_tsv(filename, files, fieldnames=None):
"""
write to tsv file
Args:
filename(str): file name
files(list(dict)): list of file info
[
{
"GUID": "guid_example",
"filename": "example",
"size": 100,
"acl": "['open']",
"md5": "md5_hash",
},
]
fieldnames(list(str)): list of column names
Returns:
filename(str): file name
"""
if not files:
return None
# Get column names
fieldnames = fieldnames or files[0].keys()
# Open tsv file
with open(filename, mode="w") as outfile:
writer = csv.DictWriter(outfile, delimiter="\t", fieldnames=fieldnames)
# write header
writer.writeheader()
# Write data
for f in files:
for field in fieldnames:
if field not in f:
f[field] = None
writer.writerow(f)
return filename
def upload_file(bucket_name, source_file_name, destination_blob_name):
"""
Upload a file to an gs bucket
Args:
file_name: File to upload
bucket: Bucket to upload to
object_name: gs object name. If not specified then file_name is used
Returns:
Bool: True if file was uploaded, else False
"""
# Initialize a storage client.
storage_client = storage.Client()
try:
# Initialize a bucket client.
bucket = storage_client.bucket(bucket_name)
# Create a dest blob.
blob = bucket.blob(destination_blob_name)
# Upload file to the bucket
blob.upload_from_filename(source_file_name)
except Exception as e:
logging.error(
"Fail to upload {} to {}. Detail {}".format(
source_file_name, bucket_name, e
)
)
return False
logging.info(
"File {} uploaded to {}/{}.".format(source_file_name, bucket_name, destination_blob_name)
)
return True
| 0 | 0 | 0 |
631cfccd8f55be80c2b322c2b729d8a58d117353 | 5,153 | py | Python | main/commands/breakpoint_menus.py | sexybiggetje/sublime_db | 0d1cac7b87b32f843e65d8bee6583cadd5f9ea6a | [
"MIT"
] | null | null | null | main/commands/breakpoint_menus.py | sexybiggetje/sublime_db | 0d1cac7b87b32f843e65d8bee6583cadd5f9ea6a | [
"MIT"
] | null | null | null | main/commands/breakpoint_menus.py | sexybiggetje/sublime_db | 0d1cac7b87b32f843e65d8bee6583cadd5f9ea6a | [
"MIT"
] | null | null | null | from sublime_db.core.typecheck import (
Any,
Callable,
Optional
)
import sublime
import sublime_plugin
from sublime_db import core
from sublime_db import ui
from sublime_db.main.breakpoints import Breakpoints, Breakpoint, FunctionBreakpoint
from .commands import AutoCompleteTextInputHandler
@core.async
@core.async
| 37.613139 | 194 | 0.761692 | from sublime_db.core.typecheck import (
Any,
Callable,
Optional
)
import sublime
import sublime_plugin
from sublime_db import core
from sublime_db import ui
from sublime_db.main.breakpoints import Breakpoints, Breakpoint, FunctionBreakpoint
from .commands import AutoCompleteTextInputHandler
def edit_breakpoint(breakpoints: Breakpoints, breakpoint: Breakpoint, selected_index = 0):
if isinstance(breakpoint, Breakpoint):
edit_line_breakpoint(breakpoints, breakpoint, selected_index)
elif isinstance(breakpoint, FunctionBreakpoint):
edit_function_breakpoint(breakpoints, breakpoint, selected_index)
else:
assert False, "expected Breakpoint or FunctionBreakpoint"
def edit_line_breakpoint(breakpoints: Breakpoints, breakpoint: Breakpoint, selected_index = 0):
window = sublime.active_window()
core.run(open_file_and_hightlight(window, breakpoint.file, breakpoint.line-1))
cancel_select_async = open_file_and_cancel_highlight(window, breakpoint.file, breakpoint.line-1)
values = []
values.append(ListInputItemChecked("Expr", breakpoint.condition, "Breaks when expression is true"))
values.append(ListInputItemChecked("Log", breakpoint.log, "Message to log, expressions within {} are interpolated"))
values.append(ListInputItemChecked("Count", breakpoint.count, "Break when hit count condition is met"))
values.append(ui.ListInputItem(["○ Disabled", "● Disabled"][not breakpoint.enabled]))
values.append(ui.ListInputItem(" Remove"))
input = ui.ListInput(values, placeholder="Edit breakpoint @ line {}".format(breakpoint.line), index=selected_index)
def run_main(**args):
core.run(cancel_select_async)
i = args['list']
if i == 4:
breakpoints.remove_breakpoint(breakpoint)
return
if i == 0:
breakpoints.set_breakpoint_condition(breakpoint, args['text'])
if i == 1:
breakpoints.set_breakpoint_log(breakpoint, args['text'])
if i == 2:
breakpoints.set_breakpoint_count(breakpoint, args['text'])
if i == 3:
breakpoints.set_breakpoint_enabled(breakpoint, not breakpoint.enabled)
edit_line_breakpoint(breakpoints, breakpoint, i)
def on_cancel():
core.run(cancel_select_async)
ui.run_input_command(input, run_main, on_cancel=on_cancel)
def edit_function_breakpoint(breakpoints: Breakpoints, breakpoint: FunctionBreakpoint, selected_index = 0):
values = []
values.append(ListInputItemChecked("Expr", breakpoint.condition, "Breaks when expression is true"))
values.append(ListInputItemChecked("Count", breakpoint.hitCondition, "Break when hit count condition is met"))
values.append(ui.ListInputItem(["○ Disabled", "● Disabled"][not breakpoint.enabled]))
values.append(ui.ListInputItem(" Remove"))
input = ui.ListInput(values, placeholder="Edit breakpoint @ function {}".format(breakpoint.name), index=selected_index)
def run_main(**args):
i = args['list']
if i == 3:
breakpoints.remove_breakpoint(breakpoint)
return
if i == 0:
breakpoints.set_breakpoint_condition(breakpoint, args['text'])
if i == 1:
breakpoints.set_breakpoint_count(breakpoint, args['text'])
if i == 2:
breakpoints.set_breakpoint_enabled(breakpoint, not breakpoint.enabled)
edit_function_breakpoint(breakpoints, breakpoint, i)
ui.run_input_command(input, run_main)
class ListInputItemChecked (ui.ListInputItem):
def __init__(self, name, initial, placeholder):
text = ""
if initial: text += '● '
else: text += '○ '
text += name
if initial:
text += ": "
text += initial
elif placeholder:
text += ": "
text += placeholder
next_input = ui.TextInput(
initial=initial,
placeholder=placeholder
)
super().__init__(text, name, next_input)
@core.async
def open_file_and_hightlight(window, file, line):
view = yield from core.sublime_open_file_async(window, file, line-1)
print('sel')
view.sel().clear()
rl = view.line(view.text_point(line, 0))
view.add_regions("debug.add_breakpoint", [sublime.Region(rl.a, rl.a)], scope="markup.deleted",flags=sublime.DRAW_SOLID_UNDERLINE|sublime.DRAW_NO_FILL|sublime.DRAW_NO_OUTLINE|sublime.DRAW_EMPTY)
@core.async
def open_file_and_cancel_highlight(window, file, line):
view = yield from core.sublime_open_file_async(window, file, line-1)
view.erase_regions("debug.add_breakpoint")
def add_breakpoint(breakpoints: Breakpoints, file: str, line: int, selected_index = 0):
window = sublime.active_window()
core.run(open_file_and_hightlight(window, file, line-1))
cancel_select_async = open_file_and_cancel_highlight(window, file, line-1)
values = []
values.append(ui.ListInputItem("Add breakpoint"))
input = AutoCompleteTextInputHandler("name of function to break on")
values.append(ui.ListInputItem("Add function breakpoint", name="function name", next_input=input))
input = ui.ListInput(values, placeholder="Add breakpoint @ line {}".format(line), index=selected_index)
def run_main(**args):
print(args)
i = args['list']
if i == 0:
breakpoints.add_breakpoint(file, line)
if i == 1:
breakpoints.add_function_breakpoint(args['text'])
core.run(cancel_select_async)
def on_cancel():
core.run(cancel_select_async)
ui.run_input_command(input, run_main, on_cancel=on_cancel)
| 4,633 | 25 | 181 |
72412d50c7dc555be4686d28e6d8ca8e63ec7561 | 539 | py | Python | main/templatetags/custom_tags.py | CrazyOrr/gamemob | defdf748d8860ac9a1af317781c4b88421326762 | [
"Apache-2.0"
] | null | null | null | main/templatetags/custom_tags.py | CrazyOrr/gamemob | defdf748d8860ac9a1af317781c4b88421326762 | [
"Apache-2.0"
] | null | null | null | main/templatetags/custom_tags.py | CrazyOrr/gamemob | defdf748d8860ac9a1af317781c4b88421326762 | [
"Apache-2.0"
] | null | null | null | __author__ = 'wanglei02'
from django import template
from django.utils.html import conditional_escape
from django.utils.safestring import mark_safe
register = template.Library()
@register.filter(name='join_link', needs_autoescape=True)
| 23.434783 | 57 | 0.658627 | __author__ = 'wanglei02'
from django import template
from django.utils.html import conditional_escape
from django.utils.safestring import mark_safe
register = template.Library()
@register.filter(name='join_link', needs_autoescape=True)
def join_link(value, arg, autoescape=True):
arr = []
if autoescape:
esc = conditional_escape
else:
esc = lambda x: x
for i in value:
arr.append('<a href="%s">%s</a>' % (
i.get_absolute_url(), esc(i)
))
return mark_safe(arg.join(arr))
| 277 | 0 | 22 |
94bc3a6ce98787e7cedbb192ad7f8bf17596c45c | 32,960 | py | Python | rayserve/inference.py | drunkcoding/model-inference | 02d2240bc7052fa32223a80fa63625fe681db102 | [
"MIT"
] | 1 | 2021-11-15T19:07:13.000Z | 2021-11-15T19:07:13.000Z | rayserve/inference.py | drunkcoding/model-inference | 02d2240bc7052fa32223a80fa63625fe681db102 | [
"MIT"
] | null | null | null | rayserve/inference.py | drunkcoding/model-inference | 02d2240bc7052fa32223a80fa63625fe681db102 | [
"MIT"
] | null | null | null | import copy
from dataclasses import dataclass
import dataclasses
import functools
import io
import logging
from multiprocessing.connection import wait
import uuid
from typing import Any, Dict, List, Optional, Tuple, Union
from attr import field
from concurrent.futures import ProcessPoolExecutor
import pandas as pd
import pynvml
from transformers import (
AutoConfig,
HfArgumentParser,
T5ForConditionalGeneration,
AutoModelForQuestionAnswering,
DistilBertForQuestionAnswering,
ViTForImageClassification,
AutoModelForCausalLM,
)
import requests
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3,4,5,6,7"
import torch
import time
import threading
import multiprocessing as mp
from multiprocessing import Process, Manager
from multiprocessing.managers import BaseManager
from fastapi import FastAPI
import json
from requests import Request
import numpy as np
from scipy.special import softmax
from transformers import AutoTokenizer
from tqdm import tqdm
import gc
import dill
# ====== ray serve
import ray
from ray import data, serve
from ray.serve import pipeline
from ray.util.metrics import Counter, Gauge, Histogram, Metric
# ====== hfutils
from hfutils.arg_parser import RayArguments
from hfutils.logger import Logger
from hfutils.calibration import agg_logits, temperature_scale
from hfutils.constants import MODEL_KEYS
from hfutils.pipe.t5 import (
T5_ENCODER_INPUTS,
T5_ENCODER_OUTPUTS,
T5_DECODER_INPUTS,
T5_DECODER_OUTPUTS,
T5PyTorchPipe,
T5PytorchPipeRandom,
)
from hfutils.pipe.bert import (
BERT_INPUTS,
BERT_OUTPUTS,
BertPyTorchPipeForQuestionAnswering,
BertPytorchPipeRandom,
)
from hfutils.pipe.vit import (
VIT_INPUTS,
VIT_OUTPUTS,
ViTPyTorchPipeForImageClassification,
ViTPytorchPipeRandom,
)
from hfutils.pipe.gpt import GPTPytorchPipeRandom
from hfutils.pipe.distilbert import (
DISTILBERT_INPUTS,
DISTILBERT_OUTPUTS,
DistilBertPyTorchPipeForQuestionAnswering,
)
from hfutils.pipe.gpt import GPT_INPUTS, GPT_OUTPUTS, GPTLMHeadModelPipe
from hfutils.calibration import temperature_scale
from hfutils.constants import np_to_torch_dtype
from hfutils.options import (
ReplicationOptions,
SystemOptions,
EnsembleOptions,
ParallelOptions,
ModelConfig,
HostOptions,
)
# ======= DEFINE CONSTANTS =========
T5_TASK_LABELS = [1176, 6136, 59] # HACK with GLUE labels
m = functools.partial(softmax, axis=1)
VISIBLE_GPUS = [str(i) for i in range(torch.cuda.device_count())]
m = torch.nn.Softmax(dim=1)
@dataclass
parser = HfArgumentParser(Arguments)
args = parser.parse_args_into_dataclasses()[0]
# ======= PARSE CONFIGURATION =========
# with open(args.ensemble_cfg, "r") as fp:
# ensemble_config = json.load(fp)
with open(args.model_cfg, "r") as fp:
model_config = json.load(fp)
ensembles = model_config["ensembles"]
base_dir = model_config["base_dir"]
alpha = model_config["alpha"]
type = model_config["type"]
instance = model_config["instance"]
host_options = {
ins["host"]: HostOptions(
host=ins["host"],
# alpha=alpha,
# ens=len(ensembles),
type=type,
placement={
gid: [
ModelConfig(
name=model["name"],
path=os.path.join(base_dir, model_config[model["name"]]["path"]),
type=model_config[model["name"]]["type"],
stages=model_config[model["name"]]["parallel_stages"],
ppos=model["stage"],
epos=ensembles.index(model["name"]),
temp=model_config[model["name"]]["temperature"],
util_params=model_config[model["name"]]["util_params"],
ray_actor_options={
"num_cpus": 1,
"num_gpus": 1 / len(models),
"resources": {ins["host"]: 1},
},
key="_".join([ins["host"], model["name"], gid, str(i)]),
)
for i, model in enumerate(models)
]
for gid, models in ins["placement"].items()
},
)
for ins in instance
}
# host_resource = {
# ins["host"]: sum([len(models) for gid, models in ins["placement"].items()])
# for ins in instance
# }
# model_replicas = {
# name: sum(
# [
# 1
# for ins in instance
# for gid, models in ins["placement"].items()
# for model in models
# if model["name"] == name
# ]
# )
# for name in ensembles
# }
system_options = SystemOptions(
alpha=alpha,
ens=len(ensembles),
type=type,
ensemble_options=[
EnsembleOptions(
epos=i,
th=model_config[name]["threshold"],
name=name,
parallel_options=[
ParallelOptions(
stages=model_config[name]["parallel_stages"],
ppos=p,
replications=[
model.key
for host in host_options.values()
for models in host.placement.values()
for model in models
if model.epos == i and model.ppos == p
],
)
for p in range(model_config[name]["parallel_stages"])
],
)
for i, name in enumerate(ensembles)
],
)
# for idx, name in enumerate(ensembles):
# meta = model_config[name]
# path = os.path.join(base_dir, meta["path"])
# threshold = meta["threshold"]
# temperature = meta["temperature"]
# stages = meta["parallel_stages"]
# util_params = meta["util_params"]
# instance = meta["instance"]
# parallel_options = [
# ParallelOptions(
# stages=stages,
# ppos=p,
# replication_options=[
# ReplicationOptions(
# k,
# "_".join([name, idx, ins["stage"], k]),
# torch.device(ins["device"]),
# )
# for k in range(ins["count"])
# for ins in instance
# if ins["stage"] == p
# ],
# )
# for p in range(stages)
# ]
# for i, ins in enumerate(instance):
# for k in range(ins["count"]):
# key = "_".join([name, idx, ins["stage"], k])
# replication_options = ReplicationOptions(
# k, key, torch.device(ins["device"])
# )
# config = ModelConfig(
# name,
# path,
# type,
# stages,
# ins["stage"],
# idx,
# len(ensembles),
# alpha,
# temperature,
# threshold,
# util_params,
# ins["device"],
# k,
# )
# deploy_config.append(config)
# ====== MODEL DEFINATION ==============
@serve.deployment(max_concurrent_queries=100)
@serve.deployment(max_concurrent_queries=1000)
# ray.init(address="ray://129.215.164.41:10001")
import socket
# ====== START SERVER ==============
# ray.init(namespace=args.namespace, num_cpus=80, num_gpus=torch.cuda.device_count())
host_ip = get_host_ip()
ray.init(address=f"ray://{host_ip}:10001", namespace=args.namespace)
serve.start(detached=True, http_options=serve.HTTPOptions(port=8888))
# print("ray initialized", args)
for host, h_op in host_options.items():
for gid, models in h_op.placement.items():
for i, model in enumerate(models):
key = "_".join([host, model.name, gid, str(i)])
HServeModel.options(
name=key, ray_actor_options=model.ray_actor_options
).deploy(options=host_options, model_id=i, key=key)
# for e_op in system_options.ensemble_options:
# for p_op in e_op.parallel_options:
# for r_op in p_op.replication_options:
# HServeModel.options(
# name=r_op.key, ray_actor_options={"num_cpus": 4, "num_gpus": 2},
# ).deploy(
# options=system_options,
# epos=e_op.epos,
# ppos=p_op.ppos,
# replica=r_op.replica,
# )
for host, _ in host_options.items():
for r in range(1):
HybridScheduler.options(
name=f"hybrid-scheduler_{host}_{r}",
num_replicas=1,
ray_actor_options={"num_cpus": 0.1, "resources": {f"{host}": 1}},
).deploy(system_options, r)
| 35.251337 | 136 | 0.543811 | import copy
from dataclasses import dataclass
import dataclasses
import functools
import io
import logging
from multiprocessing.connection import wait
import uuid
from typing import Any, Dict, List, Optional, Tuple, Union
from attr import field
from concurrent.futures import ProcessPoolExecutor
import pandas as pd
import pynvml
from transformers import (
AutoConfig,
HfArgumentParser,
T5ForConditionalGeneration,
AutoModelForQuestionAnswering,
DistilBertForQuestionAnswering,
ViTForImageClassification,
AutoModelForCausalLM,
)
import requests
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3,4,5,6,7"
import torch
import time
import threading
import multiprocessing as mp
from multiprocessing import Process, Manager
from multiprocessing.managers import BaseManager
from fastapi import FastAPI
import json
from requests import Request
import numpy as np
from scipy.special import softmax
from transformers import AutoTokenizer
from tqdm import tqdm
import gc
import dill
# ====== ray serve
import ray
from ray import data, serve
from ray.serve import pipeline
from ray.util.metrics import Counter, Gauge, Histogram, Metric
# ====== hfutils
from hfutils.arg_parser import RayArguments
from hfutils.logger import Logger
from hfutils.calibration import agg_logits, temperature_scale
from hfutils.constants import MODEL_KEYS
from hfutils.pipe.t5 import (
T5_ENCODER_INPUTS,
T5_ENCODER_OUTPUTS,
T5_DECODER_INPUTS,
T5_DECODER_OUTPUTS,
T5PyTorchPipe,
T5PytorchPipeRandom,
)
from hfutils.pipe.bert import (
BERT_INPUTS,
BERT_OUTPUTS,
BertPyTorchPipeForQuestionAnswering,
BertPytorchPipeRandom,
)
from hfutils.pipe.vit import (
VIT_INPUTS,
VIT_OUTPUTS,
ViTPyTorchPipeForImageClassification,
ViTPytorchPipeRandom,
)
from hfutils.pipe.gpt import GPTPytorchPipeRandom
from hfutils.pipe.distilbert import (
DISTILBERT_INPUTS,
DISTILBERT_OUTPUTS,
DistilBertPyTorchPipeForQuestionAnswering,
)
from hfutils.pipe.gpt import GPT_INPUTS, GPT_OUTPUTS, GPTLMHeadModelPipe
from hfutils.calibration import temperature_scale
from hfutils.constants import np_to_torch_dtype
from hfutils.options import (
ReplicationOptions,
SystemOptions,
EnsembleOptions,
ParallelOptions,
ModelConfig,
HostOptions,
)
# ======= DEFINE CONSTANTS =========
T5_TASK_LABELS = [1176, 6136, 59] # HACK with GLUE labels
m = functools.partial(softmax, axis=1)
VISIBLE_GPUS = [str(i) for i in range(torch.cuda.device_count())]
m = torch.nn.Softmax(dim=1)
@dataclass
class Arguments:
# ensemble_cfg: str = field(metadata={"help": "Path to configuration meta file for ensemble, including partition and replications"})
model_cfg: str = field(
metadata={
"help": "Path to configuration meta file, including thresholds and temperatures"
}
)
namespace: str = field(metadata={"help": "Namespace for ray serve"})
parser = HfArgumentParser(Arguments)
args = parser.parse_args_into_dataclasses()[0]
# ======= PARSE CONFIGURATION =========
# with open(args.ensemble_cfg, "r") as fp:
# ensemble_config = json.load(fp)
with open(args.model_cfg, "r") as fp:
model_config = json.load(fp)
ensembles = model_config["ensembles"]
base_dir = model_config["base_dir"]
alpha = model_config["alpha"]
type = model_config["type"]
instance = model_config["instance"]
host_options = {
ins["host"]: HostOptions(
host=ins["host"],
# alpha=alpha,
# ens=len(ensembles),
type=type,
placement={
gid: [
ModelConfig(
name=model["name"],
path=os.path.join(base_dir, model_config[model["name"]]["path"]),
type=model_config[model["name"]]["type"],
stages=model_config[model["name"]]["parallel_stages"],
ppos=model["stage"],
epos=ensembles.index(model["name"]),
temp=model_config[model["name"]]["temperature"],
util_params=model_config[model["name"]]["util_params"],
ray_actor_options={
"num_cpus": 1,
"num_gpus": 1 / len(models),
"resources": {ins["host"]: 1},
},
key="_".join([ins["host"], model["name"], gid, str(i)]),
)
for i, model in enumerate(models)
]
for gid, models in ins["placement"].items()
},
)
for ins in instance
}
# host_resource = {
# ins["host"]: sum([len(models) for gid, models in ins["placement"].items()])
# for ins in instance
# }
# model_replicas = {
# name: sum(
# [
# 1
# for ins in instance
# for gid, models in ins["placement"].items()
# for model in models
# if model["name"] == name
# ]
# )
# for name in ensembles
# }
system_options = SystemOptions(
alpha=alpha,
ens=len(ensembles),
type=type,
ensemble_options=[
EnsembleOptions(
epos=i,
th=model_config[name]["threshold"],
name=name,
parallel_options=[
ParallelOptions(
stages=model_config[name]["parallel_stages"],
ppos=p,
replications=[
model.key
for host in host_options.values()
for models in host.placement.values()
for model in models
if model.epos == i and model.ppos == p
],
)
for p in range(model_config[name]["parallel_stages"])
],
)
for i, name in enumerate(ensembles)
],
)
# for idx, name in enumerate(ensembles):
# meta = model_config[name]
# path = os.path.join(base_dir, meta["path"])
# threshold = meta["threshold"]
# temperature = meta["temperature"]
# stages = meta["parallel_stages"]
# util_params = meta["util_params"]
# instance = meta["instance"]
# parallel_options = [
# ParallelOptions(
# stages=stages,
# ppos=p,
# replication_options=[
# ReplicationOptions(
# k,
# "_".join([name, idx, ins["stage"], k]),
# torch.device(ins["device"]),
# )
# for k in range(ins["count"])
# for ins in instance
# if ins["stage"] == p
# ],
# )
# for p in range(stages)
# ]
# for i, ins in enumerate(instance):
# for k in range(ins["count"]):
# key = "_".join([name, idx, ins["stage"], k])
# replication_options = ReplicationOptions(
# k, key, torch.device(ins["device"])
# )
# config = ModelConfig(
# name,
# path,
# type,
# stages,
# ins["stage"],
# idx,
# len(ensembles),
# alpha,
# temperature,
# threshold,
# util_params,
# ins["device"],
# k,
# )
# deploy_config.append(config)
# ====== MODEL DEFINATION ==============
@serve.deployment(max_concurrent_queries=100)
class HServeModel:
def __init__(self, options: Dict, model_id: int, key: str) -> None:
# os.environ["CUDA_VISIBLE_DEVICES"] = ",".join(VISIBLE_GPUS)
gid = str(ray.get_gpu_ids()[0])
host = ray._private.services.get_node_ip_address()
pynvml.nvmlInit()
self.handle = pynvml.nvmlDeviceGetHandleByIndex(0)
self.namespace = ray.get_runtime_context().namespace
self.config = options[host].placement[gid][model_id]
print(self.config, flush=True)
filename = os.path.join(
os.path.dirname(__file__),
self.namespace,
f"{host}_e{self.config.epos}p{self.config.ppos}_{gid}_{model_id}",
)
print(filename, flush=True)
try:
os.mkdir(os.path.dirname(filename))
except OSError as error:
print(error)
self.logger = Logger(filename, logging.INFO, 50000000, 5, mode="w")
self.device = torch.cuda.current_device() # torch.device(f"cuda:{gid}")
print("device", "cuda:" + str(gid), flush=True)
print(gid, host, self.device, flush=True)
# self.logger.info("options %s", options)
# self.logger.info("%s", ["x"] * 1000)
self.key = key
self.cuda_stream = torch.cuda.Stream(device=self.device, priority=-1)
print("stream", self.key, self.cuda_stream, flush=True)
# self._get_gpu_uuid()
self._load_model()
self.is_last_stage = self.config.ppos == self.config.stages - 1
# def _get_gpu_uuid(self):
# command = "nvidia-smi --query-gpu=index,uuid,gpu_bus_id --format=csv"
# result = subprocess.run(command.split(), stdout=subprocess.PIPE)
# df = pd.read_csv(io.StringIO(result.stdout.decode("utf-8")), index_col="index")
# df = df.sort_index()
# print(df)
# df.iloc[:, 0] = df.iloc[:, 0].str.strip()
# self.gpu_uuid = df.iloc[self.device.index][" uuid"]
# print(self.gpu_uuid, flush=True)
def _load_model(self):
print("load model", self.config.type, self.config.name, flush=True)
if "t5" == self.config.type:
model = T5ForConditionalGeneration.from_pretrained(self.config.path)
self.model = T5PyTorchPipe(model)
elif "bert" == self.config.type:
model = AutoModelForQuestionAnswering.from_pretrained(self.config.path)
self.model = BertPyTorchPipeForQuestionAnswering(model)
elif "distilbert" == self.config.type:
model = DistilBertForQuestionAnswering.from_pretrained(self.config.path)
self.model = DistilBertPyTorchPipeForQuestionAnswering(model)
elif "vit" == self.config.type:
model = ViTForImageClassification.from_pretrained(self.config.path)
self.model = ViTPyTorchPipeForImageClassification(model)
elif "gpt" == self.config.type:
model = AutoModelForCausalLM.from_pretrained(self.config.path)
self.model = GPTLMHeadModelPipe(model)
elif "random" == self.config.type:
config = AutoConfig.from_pretrained(self.config.path)
if "bert" in self.config.name:
self.model = BertPytorchPipeRandom(config)
elif "vit" in self.config.name:
self.model = ViTPytorchPipeRandom(config)
elif "gpt" in self.config.name:
self.model = GPTPytorchPipeRandom(config)
self.model.layer_param = np.array([
321361920,
453064704,
453064704,
453064704,
453064704,
453064704,
453064704,
453064704,
453064704,
453064704,
453064704,
453064704,
453064704,
453064704,
453064704,
453064704,
453064704,
453064704,
453064704,
453064704,
453064704,
453064704,
453064704,
453064704,
453064704,
453064704,
453064704,
453064704,
453064704,
453064704,
453064704,
453064704,
453064704,
453064704,
453064704,
453064704,
453064704,
453064704,
453064704,
453064704,
453064704,
453064704,
453064704,
453064704,
453076992,
308779008,
])
self.model.total_params = sum(self.model.layer_param)
elif "t5" in self.config.name:
self.model = T5PytorchPipeRandom(config)
self.model.layer_param = np.array([
197394432,
289421312,
289419264,
289419264,
289419264,
289419264,
289419264,
289419264,
289419264,
289419264,
289419264,
289419264,
289419264,
289419264,
289419264,
289419264,
289419264,
289419264,
289419264,
289419264,
289419264,
289419264,
289419264,
289419264,
289419264,
6144,
197394432,
390090752,
390088704,
390088704,
390088704,
390088704,
390088704,
390088704,
390088704,
390088704,
390088704,
390088704,
390088704,
390088704,
390088704,
390088704,
390088704,
390088704,
390088704,
390088704,
390088704,
390088704,
390088704,
390088704,
390088704,
6144,
197394432,
])
self.model.total_params = sum(self.model.layer_param)
else:
raise ValueError(
"%s undefined random model name %s" % (self.key, self.config.name)
)
# print("asdgfadsgfad", flush=True)
else:
raise ValueError("%s unknown model type %s" % (self.key, self.config.type))
# print("load model", type(self.model), flush=True)
print(
"partition_by_parameter", self.config.ppos, self.config.stages, flush=True
)
self.model.partition_by_parameter(
self.config.ppos, self.config.stages, "random" == self.config.type
)
# print("load model aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", flush=True)
if "random" != self.config.type:
print("convert", flush=True)
self.model.convert(self.device)
del model
else:
print("convert_layer_specs", flush=True)
self.model.convert_layer_specs(self.device)
# self.model.partition_by_parameter(self.config.ppos, 4) # TEST MULTIPLEX
self.model.eval()
gc.collect()
torch.cuda.empty_cache()
@torch.no_grad()
def model_inference(self, args, mask, uid):
start_time = time.perf_counter()
start_power = pynvml.nvmlDeviceGetPowerUsage(self.handle)
# self.logger.debug("%s args %s", self.key, args)
if self.config.ppos == 0:
args = tuple(torch.from_numpy(arg[mask]).to(self.device) for arg in args)
else:
args = tuple(arg.to(self.device) for arg in args)
batch_size = args[0].shape[0]
with torch.cuda.stream(self.cuda_stream):
outputs = self.model(args)
self.cuda_stream.synchronize() # MUST sync otherwise outputs are zeros
end_time = time.perf_counter()
if self.is_last_stage:
# self.logger.debug(
# "[%s] %s outputs %s",
# uid,
# self.key,
# outputs.shape,
# )
outputs = outputs.squeeze(1)
if "t5" in self.config.name:
outputs = outputs[:, T5_TASK_LABELS]
if "gpt" in self.config.name:
outputs = outputs[:, -1, :50257]
outputs = outputs.detach().cpu().numpy()
# outputs = temperature_scale(outputs, self.config.temp)
end_power = pynvml.nvmlDeviceGetPowerUsage(self.handle)
self.logger.info(
"[%s,%s] %s (%s) inference %s %s",
uid, self.namespace, self.key, batch_size,
(start_time, end_time, start_power, end_power),
end_time - start_time,
)
return outputs
async def __call__(self, *args: Any, **kwds: Any) -> Any:
return self.model_inference(*args, **kwds)
@serve.deployment(max_concurrent_queries=1000)
class HybridScheduler:
def __init__(self, options: SystemOptions, id: int):
self.config = options
self.namespace = ray.get_runtime_context().namespace
filename = os.path.join(
os.path.dirname(__file__),
self.namespace,
f"{ray._private.services.get_node_ip_address()}_{id}",
)
print(filename, flush=True)
try:
os.mkdir(os.path.dirname(filename))
except OSError as error:
print(error)
self.logger = Logger(filename, logging.INFO, 50000000, 5, mode="w")
# self.logger.info("HybridScheduler logger initialized")
# os.environ["CUDA_VISIBLE_DEVICES"] = ",".join(VISIBLE_GPUS)
# self.logger.info("CUDA_VISIBLE_DEVICES: %s", os.environ["CUDA_VISIBLE_DEVICES"])
# self.logger.info("HybridScheduler cfg %s", options)
self.calibrators = {}
self.ensembles = options.ensemble_options
self.num_ensembles = len(options.ensemble_options)
# self.logger.info("Current ensembles %s", self.ensembles)
# self.logger.info("Worker IP %s", ray._private.services.get_node_ip_address())
def schedule_handle(self, parallel_options):
# keys = list(parallel_options.keys())
# values = list(parallel_options.values())
# key = keys[values.index(min(values))]
# idx = np.random.choice(list(range(len(keys))), 1)[0]
# key = keys[idx]
# parallel_options[key] += 1
# self.logger.debug("parallel_options %s", parallel_options)
num_replicas = len(parallel_options.replications)
parallel_options.rr_counter += 1
r = (parallel_options.rr_counter + np.random.randint(0, num_replicas)) % num_replicas
key = parallel_options.replications[r]
# self.logger.debug("parallel_options %s", parallel_options)
return serve.get_deployment(key).get_handle(sync=False)
async def __call__(self, request) -> Any:
data = await request.json()
if self.config.type == "gpt" or self.config.type == "t5":
# args = (
# np.load(io.BytesIO(data['input_ids']), allow_pickle=False),
# np.load(io.BytesIO(data['attention_mask']), allow_pickle=False),
# )
args = (
np.asarray(data["input_ids"], dtype=np.int64),
np.asarray(data["attention_mask"], dtype=np.int64),
)
if self.config.type == "vit":
# args = (
# np.load(io.BytesIO(data['pixel_values']), allow_pickle=False),
# )
args = (np.asarray(data["pixel_values"], dtype=np.float32),)
logits = await self.ensemble_inference(args)
# self.logger.debug("logits %s", logits)
return {"logits": logits.tolist()}
# self.logger.info("data %s", data, ray.ObjectRef(bytes.fromhex(data["args"])))
# args = ray.get(ray.ObjectRef(bytes.fromhex(data["args"])))
# self.logger.info("args %s", args)
# ref = await self.ensemble_inference(args)
# self.logger.info("ref %s", ref)
# return {"logits": ref.hex()}
async def post_processing(self, ensemble_outputs, outputs, batch_mask, idx, uid):
local_mask = batch_mask[idx]
outputs = ray.get(outputs)
ensemble_outputs = self.model_ensemble(
ensemble_outputs, outputs, local_mask, idx
)
# name = self.config.ensemble_options[idx].name
# if ensemble_outputs is None:
# ensemble_outputs = ray.get(outputs)
# else:
# ensemble_outputs[local_mask] =
extended_mask, max_prob = self.offload_mask(
ensemble_outputs, local_mask, idx, uid
)
num_next_models = self.num_ensembles - idx - 1
if np.any(extended_mask) and num_next_models > 0:
# batch_mask[idx] &= ~extended_mask
# batch_mask[idx + 1] |= extended_mask
batch_mask = self.update_batch_mask(
max_prob, batch_mask.copy(), extended_mask, idx, uid
)
# self.logger.debug("%s batch_mask updated %s", name, batch_mask)
return ensemble_outputs, batch_mask
@serve.batch(max_batch_size=4)
async def handle_batch(self, args_list):
outputs_list = []
for args in args_list:
outputs = await self.ensemble_inference(args)
outputs_list.append(outputs)
return outputs_list
# This method can be called concurrently!
async def ensemble_inference(self, args):
# self.logger.info("received %s", uid)
req_start_time = time.perf_counter()
# start_time = time.perf_counter()
uid = uuid.uuid4().hex
# end_time = time.perf_counter()
# self.logger.info(
# "uuid gen %s",
# end_time - start_time
# )
# start_time = time.perf_counter()
batch_size = len(args[0])
ensemble_outputs = None
batch_mask = np.zeros((self.num_ensembles, batch_size))
# batch_mask = np.ones((self.num_ensembles, batch_size))
batch_mask[0, :] = 1 # WHERE TO ENTER
batch_mask = batch_mask.astype(bool)
# end_time = time.perf_counter()
# self.logger.info(
# "batch_mask gen %s",
# end_time - start_time
# )
for idx, options in enumerate(self.ensembles):
# name = self.config.ensemble_options[idx].name
outputs = args
local_mask = batch_mask[idx]
# self.logger.debug("%s local_mask %s", options.name, local_mask)
if np.any(local_mask):
for parallel_options in options.parallel_options:
handle = self.schedule_handle(parallel_options)
# start_time = time.perf_counter()
outputs = await handle.model_inference.remote(
outputs, local_mask, uid
)
# end_time = time.perf_counter()
# self.logger.info(
# "[%s] %s (%s) inference (%s, %s) %s (ms)",
# uid,
# name,
# parallel_options.ppos,
# start_time,
# end_time,
# (end_time - start_time) * 1000,
# )
ensemble_outputs, batch_mask = await self.post_processing(
ensemble_outputs, outputs, batch_mask, idx, uid
)
if (idx + 1) < self.num_ensembles and np.sum(
batch_mask[(idx + 1) :]
) == 0:
# self.logger.debug("%s early exit %s", name, batch_mask)
break
# outputs = ray.get(outputs)
# ensemble_outputs = self.model_ensemble(
# ensemble_outputs, outputs, local_mask, idx
# )
# extended_mask, _ = self.offload_mask(
# ensemble_outputs, local_mask, idx
# )
# if np.all(~extended_mask): break
# self.logger.debug(
# "%s local_mask updated %s", options.name, extended_mask
# )
# num_next_models = self.num_ensembles - idx - 1
# if np.any(extended_mask) and num_next_models > 0:
# batch_mask[idx] &= ~extended_mask
# batch_mask[idx + 1] |= extended_mask
# # batch_mask = self.update_batch_mask(
# # max_prob, batch_mask.copy(), extended_mask, idx
# # )
# # self.logger.debug(
# # "%s batch_mask updated %s", options.name, batch_mask
# # )
assert np.sum(batch_mask) == batch_size
req_end_time = time.perf_counter()
self.logger.info(
"[%s,%s] request(%s) %s %s",
uid, self.namespace, batch_size,
(req_start_time, req_end_time),
req_end_time - req_start_time
)
gc.collect()
torch.cuda.empty_cache()
return ensemble_outputs
def offload_mask(self, logits, mask, idx, uid):
start_time = time.perf_counter()
probabilities = np.power(softmax(logits, axis=1), 2)
name = self.config.ensemble_options[idx].name
if name not in self.calibrators:
user = os.path.expanduser("~")
with open(
os.path.join(user, f"model-inference/inference_dump/{name}_calibrator"),
"rb",
) as f:
self.calibrators[name] = dill.load(f)
max_prob = self.calibrators[name].calibrate(probabilities)
if "bert" == self.config.type:
if max_prob.shape[1] == 1:
max_prob = max_prob.squeeze(1)
max_prob = np.min(max_prob, axis=1)
prob_mask = max_prob < self.config.ensemble_options[idx].th
self.logger.debug(
"(offload_mask) prob_mask %s %s",
prob_mask,
mask,
)
combined_mask = mask & prob_mask
self.logger.debug("max_prob %s, combined_mask %s", max_prob, combined_mask)
end_time = time.perf_counter()
self.logger.info(
"[%s] %s offload_mask (%s, %s) %s",
uid,
name,
start_time,
end_time,
end_time - start_time,
)
return combined_mask, max_prob
# def offload_mask(self, logits, mask, idx):
# probabilities = np.power(m(logits), 2)
# max_prob = np.max(probabilities, axis=-1)
# prob_mask = max_prob < self.ensembles[idx].threshold
# self.logger.debug(
# "%s (offload_mask) prob_mask %s %s",
# self.ensembles[idx].name,
# prob_mask,
# mask,
# )
# extended_mask = mask & prob_mask
# # combined_mask[mask] &= prob_mask[mask]
# self.logger.debug("max_prob %s, extended_mask %s", max_prob, extended_mask)
# return extended_mask, max_prob
def model_ensemble(self, hist_outputs, outputs, mask, idx):
start_time = time.perf_counter()
if hist_outputs is not None:
hist_outputs[mask] = (
hist_outputs[mask] * (1 - self.config.alpha)
+ outputs * self.config.alpha
)
else:
hist_outputs = outputs.copy()
end_time = time.perf_counter()
self.logger.info(
"%s model_ensemble time elapsed (%s, %s) %s (ms)",
self.config.ensemble_options[idx].name,
start_time,
end_time,
(end_time - start_time) * 1000,
)
return hist_outputs # MEMCOPY MUTABLE
# def model_ensemble(self, ensemble_outputs, local_outputs, mask, idx):
# # start_time = time.perf_counter()
# if ensemble_outputs is not None:
# ensemble_weight = self.ensembles[idx].ensemble_weight
# ensemble_outputs[mask] = (
# ensemble_outputs[mask] * (1 - ensemble_weight)
# + local_outputs * ensemble_weight
# )
# self.logger.debug(
# "%s ensemble_outputs %s", self.ensembles[idx].name, ensemble_outputs,
# )
# return (
# ensemble_outputs if ensemble_outputs is not None else local_outputs.copy()
# ) # MEMCOPY
# def update_batch_mask(self, max_prob, mask, local_mask, idx):
# num_next_models = self.num_ensembles - idx - 1
# if num_next_models <= 0:
# return mask
# if self.ensembles[idx].skip_connection:
# base_step = (self.ensembles[idx].threshold - 0.25) / num_next_models
# for skip in range(num_next_models):
# skip_th_lower = base_step * (num_next_models - 1 - skip) + 0.25
# skip_th_upper = base_step * (num_next_models - skip) + 0.25
# skip_mask = (
# (max_prob >= skip_th_lower)
# & (max_prob < skip_th_upper)
# & local_mask
# )
# self.logger.debug(
# "%s skip_th_lower %s, skip_th_upper %s, skip_mask %s",
# self.ensembles[idx].name,
# skip_th_lower,
# skip_th_upper,
# skip_mask,
# )
# mask[skip + 1 + idx] |= skip_mask
# else:
# mask[1 + idx] |= (max_prob < self.ensembles[idx].threshold) & local_mask
# mask[idx] &= ~local_mask
# return mask
def update_batch_mask(self, max_prob, mask, local_mask, idx, uid):
start_time = time.perf_counter()
num_next_models = len(mask) - self.config.ensemble_options[idx].epos - 1
base_step = (self.config.ensemble_options[idx].th) / num_next_models
for skip in range(num_next_models):
skip_th_lower = base_step * (num_next_models - 1 - skip)
skip_th_upper = base_step * (num_next_models - skip)
skip_mask = (
(max_prob >= skip_th_lower) & (max_prob < skip_th_upper) & local_mask
)
self.logger.debug(
"skip_th_lower %s, skip_th_upper %s, skip_mask %s",
skip_th_lower,
skip_th_upper,
skip_mask,
)
mask[skip + 1 + self.config.ensemble_options[idx].epos] |= skip_mask
mask[self.config.ensemble_options[idx].epos] &= ~local_mask
end_time = time.perf_counter()
self.logger.info(
"[%s] %s update_batch_mask time elapsed (%s,%s) %s (ms)",
uid,
self.config.ensemble_options[idx].name,
start_time,
end_time,
(end_time - start_time) * 1000,
)
return mask
# ray.init(address="ray://129.215.164.41:10001")
import socket
def get_host_ip():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 1))
host_ip = s.getsockname()[0]
return host_ip
# ====== START SERVER ==============
# ray.init(namespace=args.namespace, num_cpus=80, num_gpus=torch.cuda.device_count())
host_ip = get_host_ip()
ray.init(address=f"ray://{host_ip}:10001", namespace=args.namespace)
serve.start(detached=True, http_options=serve.HTTPOptions(port=8888))
# print("ray initialized", args)
for host, h_op in host_options.items():
for gid, models in h_op.placement.items():
for i, model in enumerate(models):
key = "_".join([host, model.name, gid, str(i)])
HServeModel.options(
name=key, ray_actor_options=model.ray_actor_options
).deploy(options=host_options, model_id=i, key=key)
# for e_op in system_options.ensemble_options:
# for p_op in e_op.parallel_options:
# for r_op in p_op.replication_options:
# HServeModel.options(
# name=r_op.key, ray_actor_options={"num_cpus": 4, "num_gpus": 2},
# ).deploy(
# options=system_options,
# epos=e_op.epos,
# ppos=p_op.ppos,
# replica=r_op.replica,
# )
for host, _ in host_options.items():
for r in range(1):
HybridScheduler.options(
name=f"hybrid-scheduler_{host}_{r}",
num_replicas=1,
ray_actor_options={"num_cpus": 0.1, "resources": {f"{host}": 1}},
).deploy(system_options, r)
| 20,418 | 3,792 | 89 |
2946e941bb095163cf2a9d20b54c7195e621b50f | 8,050 | py | Python | test.py | byaka/CapybaraMail | c6bad864c56e8a7f4e93397b095d5013f6ba0c7f | [
"Apache-2.0"
] | null | null | null | test.py | byaka/CapybaraMail | c6bad864c56e8a7f4e93397b095d5013f6ba0c7f | [
"Apache-2.0"
] | 5 | 2019-06-18T14:03:44.000Z | 2019-09-01T12:44:40.000Z | test.py | byaka/CapybaraMail | c6bad864c56e8a7f4e93397b095d5013f6ba0c7f | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from functionsex import *
from VombatiDB import VombatiDB, showDB, showStats, Workspace
from VombatiDB import errors as dbError
from importMail import ImportMail_MBox
import errors as storeError
import api
from utils import RepairDialogLinking
from libs.plainText import plaintext
import textwrap
if __name__ == '__main__':
# importer=ImportMail_MBox('/home/byaka/Загрузки/gmail_exported/all.mbox')
# tMap=set()
# i1=i2=i3=i4=0
# print
# for _, headers, (body_plain, body_html), attachments in importer:
# if headers.get('message-id'):
# if headers['message-id'] in tMap: i4+=1
# tMap.add(headers['message-id'])
# else:
# i2+=1
# i1+=1
# if headers.get('in-reply-to') in tMap: i3+=1
# print console.color.clearLast, i1, i2, i3, i4
# if not headers.get('message-id'):
# print _.raw
# print '='*30
# print
# continue
# for k in importer._headers:
# print k+':', strUniDecode('%r'%(headers[k],))
# print
# # print body_plain or body_html
# print
# for o in attachments:
# o=o.copy()
# o['payload']='...'
# print o
# print '='*40
# print _.defects, raw_input()
# print console.color.clearLast, i1, i2, i3, i4, sys.exit()
o=MyEnv()
o.repairDialogs('John Smith')
# o.test_filter({'or':[
# {'key':'from', 'value':'mail@ajon.ru', 'match':'=='},
# # {'key':'label', 'value':u'черновики', 'match':'=='},
# ]}, asDialogs=True, returnFull=False, limitDates=30, limitResults=100)
o()
| 37.268519 | 181 | 0.600124 | # -*- coding: utf-8 -*-
from functionsex import *
from VombatiDB import VombatiDB, showDB, showStats, Workspace
from VombatiDB import errors as dbError
from importMail import ImportMail_MBox
import errors as storeError
import api
from utils import RepairDialogLinking
from libs.plainText import plaintext
import textwrap
class MyEnv(object):
def __init__(self):
self._istty=console.inTerm()
self._autoLabel_inbox='Inbox'
self.workspace=Workspace()
self.store=api.makeStoreClass()(self.workspace)
self.api=ClassFactory(api.ApiBase, (
api.ApiAccount,
api.ApiLabel,
api.ApiFilter,
))(self.workspace, store=self.store)
self.store.start()
self.api.start()
def listUsers(self):
p=console.color.copy()
tpl='%(enabled)s%(bold)s%(name)s%(end)s (%(descr)s)' if self._istty else '%(name)s (%(descr)s)'
if self._istty: print '-'*40
for n,o in self.store.userList():
print tpl%dict(
p.items()+o.items(),
name=n,
enabled=(console.color.green, console.color.red)[o.isActive]
)
if self._istty: print '-'*40
def addUser(self, user, password, email, descr=None, avatar=None):
if avatar:
raise NotImplementedError
self.store.userAdd(user, password, descr=descr, avatar=avatar, strictMode=True)
for s in ((email,) if isinstance(email, (str, unicode)) else email):
self.store.userSelfEmailAdd(user, s, name=user, strictMode=False)
def importData(self, user, path, skip=0):
if not os.path.isfile(path):
raise ValueError('File not exists')
self.store.userIsExist(user,needException=True)
importer=ImportMail_MBox(path, skip)
msg=None
if self._istty:
print
msg='%(clearLast)sImporting from %(bold)s'+path+'%(end)s: %%i'
msg=msg%console.color
for i, (msgObj, headers, body, attachments) in enumerate(importer):
try:
isIncoming=self.store._msgProc_isIncoming(user, headers, msgObj.raw, True)
except storeError.IncorrectInputError: continue
if isIncoming:
self.addMsgIncoming(user, msgObj, body, headers, attachments)
else:
self.addMsgOutgoing(user, msgObj, body, headers, attachments)
if self._istty:
print msg%(i+1)
# raw_input('Success! Continue?')
# print console.color.clearLast
def addMsgIncoming(self, user, msgObj, body, headers, attachments):
labels=(self._autoLabel_inbox,)
self._addMsg(user, msgObj, body, headers, attachments, labels, extractMoreLabels=True)
def addMsgOutgoing(self, user, msgObj, body, headers, attachments):
self._addMsg(user, msgObj, body, headers, attachments, None, extractMoreLabels=True)
def _addMsg(self, user, msgObj, body, headers, attachments, labels, extractMoreLabels=True):
if msgObj.defects:
self.workspace.log(2, 'Some defects founded in msg: \t\n%s\n'%
'\t\n'.join(
'%s:\n%s'%(k, '\t\t\n'.join(o)) for k, o in msgObj.defects.iteritems()
)
)
labels=labels or ()
if extractMoreLabels:
if headers.get('x-gmail-labels'):
labels+=headers['x-gmail-labels']
try:
self.store.msgAdd(user, body, headers, msgObj.raw, attachments=attachments, labels=labels, strictMode=True, allowCompress=True)
except storeError.NoMessageIdError:
print '='*30
print 'ERROR: no msg-id founded'
print msgObj.raw
print '='*30
print
def repairDialogs(self, user):
RepairDialogLinking(self.store).run(user)
def show(self, branch=None, limit=None):
showDB(self.store.db, branch=branch, limit=limit)
def stats(self):
showStats(self.store.db)
def __call__(self):
assert self._istty
scope=globals().copy()
scope.update((k, getattr(self, k)) for k in dir(self) if not k.startswith('_'))
console.interact(scope)
def test_filter(self, q, dates=None, limitDates=2, limitResults=10, asDialogs=True, returnFull=True):
data, targets, nextDates=o.api.filterMessages('John Smith', dates=dates, query=q, limitDates=limitDates, limitResults=limitResults, asDialogs=asDialogs, returnFull=returnFull)
for date, data in data:
print date
# body=data['bodyPlain'] or plaintext(data['bodyHtml'], linebreaks=1, indentation=False)
# del data['bodyHtml']
# del data['bodyPlain']
print_r(data)
print '-'*30
print 'TARGETS =', targets
print 'NEXT_DATES =', nextDates
print '='*30
def test_dialogs(self, min_msgs=2):
for idsDialog, _ in self.store.db.iterBranch((self.store.userId('John Smith'), 'node_dialog'), recursive=False):
for idsDialogLinked, _ in self.store.db.iterBacklinks(idsDialog, recursive=False):
lines=['DIALOG (%s)'%idsDialog[-1]]
n=len(idsDialogLinked)
for idsMsg,_ in self.store.db.iterBranch(idsDialogLinked):
data=self.store.db.get(idsMsg)
lines.append('%s%s [%s] `%s`'%(
' '*(len(idsMsg)-n),
'>>' if data.isIncoming else '<<',
data.timestamp,
data.subject
))
body=data.bodyPlain or plaintext(data.bodyHtml, linebreaks=1, indentation=False)
body=textwrap.wrap(body, 100)
s=' '*(1+len(idsMsg)-n)
body='\n'.join(s+line for line in body)
lines.append(body)
lines.append('%s%s'%(
' '*(len(idsMsg)-n+1),
'='*40
))
if len(lines)>=1+min_msgs*3:
print '\n'.join(lines)
def find_broken_msgs_without_dialogs(self, user):
""" Изза ошибки в коде `utils.RepairDialogLinking` у некоторых сообщений пропадал линк на диалог. Данный тест искал такие сломанные сообщения."""
i1=i2=0
tArr=[]
g=self.store.db.iterBranch((self.store.userId(user), 'node_date'), strictMode=True, recursive=True, treeMode=True, safeMode=False, calcProperties=False, skipLinkChecking=True)
for ids, (props, l) in g:
if len(ids)<4: continue
if ids[3]!='node_msg': g.send(False) # skip not-msgs nodes
if len(ids)>5: g.send(False) # skip branch inside msgs
if len(ids)==5:
try:
self.store.dialogFind_byMsgIds(ids, strictMode=True, asThread=True)
i1+=1
except Exception:
tArr.append(ids)
i2+=1
print console.color.clearLast+'%i %i'%(i1, i2)
print tArr
if __name__ == '__main__':
# importer=ImportMail_MBox('/home/byaka/Загрузки/gmail_exported/all.mbox')
# tMap=set()
# i1=i2=i3=i4=0
# print
# for _, headers, (body_plain, body_html), attachments in importer:
# if headers.get('message-id'):
# if headers['message-id'] in tMap: i4+=1
# tMap.add(headers['message-id'])
# else:
# i2+=1
# i1+=1
# if headers.get('in-reply-to') in tMap: i3+=1
# print console.color.clearLast, i1, i2, i3, i4
# if not headers.get('message-id'):
# print _.raw
# print '='*30
# print
# continue
# for k in importer._headers:
# print k+':', strUniDecode('%r'%(headers[k],))
# print
# # print body_plain or body_html
# print
# for o in attachments:
# o=o.copy()
# o['payload']='...'
# print o
# print '='*40
# print _.defects, raw_input()
# print console.color.clearLast, i1, i2, i3, i4, sys.exit()
o=MyEnv()
o.repairDialogs('John Smith')
# o.test_filter({'or':[
# {'key':'from', 'value':'mail@ajon.ru', 'match':'=='},
# # {'key':'label', 'value':u'черновики', 'match':'=='},
# ]}, asDialogs=True, returnFull=False, limitDates=30, limitResults=100)
o()
| 5,147 | 1,346 | 23 |
b4bec296bcdb6c4b7fe42711cd71ddae86964068 | 15,585 | py | Python | cytopy/tests/test_gating_strategy.py | JANHMS/CytoPy | 8537d707fa25645b55b4ec1e25fff9f19847fb1b | [
"MIT"
] | 41 | 2020-04-08T11:01:28.000Z | 2022-03-11T17:17:18.000Z | cytopy/tests/test_gating_strategy.py | JANHMS/CytoPy | 8537d707fa25645b55b4ec1e25fff9f19847fb1b | [
"MIT"
] | 27 | 2020-04-07T14:59:24.000Z | 2022-03-01T20:43:34.000Z | cytopy/tests/test_gating_strategy.py | JANHMS/CytoPy | 8537d707fa25645b55b4ec1e25fff9f19847fb1b | [
"MIT"
] | 8 | 2020-04-28T15:16:24.000Z | 2022-03-02T19:02:14.000Z | from cytopy.data.gating_strategy import GatingStrategy, DuplicatePopulationError
from cytopy.data.gate import ThresholdGate, PolygonGate, EllipseGate
from cytopy.data.project import Project
import matplotlib.pyplot as plt
import pandas as pd
import pytest
@pytest.mark.parametrize("gate,child_n",
[(create_threshold_gate, 4),
(create_poly_gate, 1),
(create_ellipse_gate, 2)])
@pytest.mark.parametrize("gate,populations",
[(create_threshold_gate, ["root", "Top right", "Top left", "Bottom populations"]),
(create_poly_gate, ["root", "Big pop"]),
(create_ellipse_gate, ["root", "Big pop", "Little pop"])])
@pytest.mark.parametrize("remove_associations", [True, False])
| 42.933884 | 112 | 0.637857 | from cytopy.data.gating_strategy import GatingStrategy, DuplicatePopulationError
from cytopy.data.gate import ThresholdGate, PolygonGate, EllipseGate
from cytopy.data.project import Project
import matplotlib.pyplot as plt
import pandas as pd
import pytest
def create_gatingstrategy_and_load(example_populated_experiment):
gs = GatingStrategy(name="test")
gs.load_data(experiment=example_populated_experiment,
sample_id="test sample")
return gs
def reload_gatingstrategy(example_populated_experiment):
gs = GatingStrategy.objects(name="test").get()
gs.load_data(experiment=example_populated_experiment, sample_id="test sample")
return gs
def create_poly_gate():
y = [0.2, 0.2, 0.35, 0.35, 0.2]
x = [600, 1000, 1000, 600, 600]
poly = PolygonGate(gate_name="test poly",
parent="root",
x="FS Lin",
y="IgG1-FITC",
transform_x=None,
transform_y="logicle",
method="manual",
method_kwargs={"x_values": x,
"y_values": y})
return poly
def create_threshold_gate():
threshold = ThresholdGate(gate_name="test threshold",
parent="root",
x="FS Lin",
y="IgG1-FITC",
transform_x=None,
transform_y="logicle",
method="density")
return threshold
def create_ellipse_gate():
ellipse = EllipseGate(gate_name="test ellipse",
parent="root",
x="FS Lin",
y="IgG1-FITC",
transform_x=None,
transform_y="logicle",
method="GaussianMixture",
method_kwargs={"n_components": 1,
"random_state": 42,
"conf": 0.999})
return ellipse
def apply_some_gates(gs: GatingStrategy):
# Apply threshold gate
gate = create_threshold_gate()
gs.preview_gate(gate=gate)
gate.label_children(labels={"++": "pop1",
"--": "other",
"+-": "other",
"-+": "other"})
gs.apply_gate(gate)
# Apply ellipse gate
gate = create_ellipse_gate()
gate.parent = "pop1"
gate.y = "CD45-ECD"
gs.preview_gate(gate=gate)
gate.label_children({"A": "pop2"})
gs.apply_gate(gate)
# Apply another threshold gate
gate = create_threshold_gate()
gate.gate_name = "test threshold 2"
gate.parent = "pop2"
gate.x, gate.y = "IgG1-PC5", None
gate.transform_x, gate.transform_y = "logicle", None
gs.preview_gate(gate=gate)
gate.label_children({"+": "pop3", "-": "pop4"})
gs.apply_gate(gate=gate)
return gs
def test_load_data(example_populated_experiment):
gs = create_gatingstrategy_and_load(example_populated_experiment)
assert gs.filegroup is not None
assert isinstance(gs.filegroup.data("primary"), pd.DataFrame)
assert isinstance(gs.filegroup.data("test_ctrl"), pd.DataFrame)
assert list(gs.filegroup.list_populations()) == ["root"]
@pytest.mark.parametrize("gate,child_n",
[(create_threshold_gate, 4),
(create_poly_gate, 1),
(create_ellipse_gate, 2)])
def test_preview_gate(example_populated_experiment, gate, child_n):
gs = create_gatingstrategy_and_load(example_populated_experiment)
gate = gate()
gs.preview_gate(gate)
assert len(gate.children) == child_n
plt.show()
@pytest.mark.parametrize("gate,populations",
[(create_threshold_gate, ["root", "Top right", "Top left", "Bottom populations"]),
(create_poly_gate, ["root", "Big pop"]),
(create_ellipse_gate, ["root", "Big pop", "Little pop"])])
def test_apply_gate(example_populated_experiment, gate, populations):
gs = create_gatingstrategy_and_load(example_populated_experiment)
gate = gate()
gate.fit(data=gs.filegroup.load_population_df(population=gate.parent,
transform=None,
label_downstream_affiliations=False))
if isinstance(gate, ThresholdGate):
gate.label_children(labels={"++": "Top right",
"-+": "Top left",
"--": "Bottom populations",
"+-": "Bottom populations"})
elif isinstance(gate, EllipseGate):
pops = sorted([(c.name, c.geom.x_values) for c in gate.children], key=lambda x: x[1])
gate.label_children({pops[0][0]: "Little pop",
pops[1][0]: "Big pop"})
else:
gate.label_children({"A": "Big pop"})
gs.apply_gate(gate=gate,
plot=True)
plt.show()
assert set(gs.list_populations()) == set(populations)
not_root = [p for p in gs.filegroup.populations if p.population_name != "root"]
root = gs.filegroup.get_population("root")
assert all([len(p.index) < len(root.index) for p in not_root])
biggest_pop = [p for p in not_root
if p.population_name == "Top right" or p.population_name == "Big pop"][0]
assert all([len(p.index) <= len(biggest_pop.index) for p in not_root])
def test_add_hyperparameter_grid_invalid_gate(example_populated_experiment):
gs = create_gatingstrategy_and_load(example_populated_experiment)
with pytest.raises(AssertionError) as err:
gs.add_hyperparameter_grid("invalid",
params={})
assert str(err.value) == "invalid is not a valid gate"
def test_add_hyperparameter_grid_threshold(example_populated_experiment):
gs = create_gatingstrategy_and_load(example_populated_experiment)
gate = create_threshold_gate()
gate.fit(data=gs.filegroup.load_population_df(population=gate.parent,
transform=None,
label_downstream_affiliations=False))
gate.label_children(labels={"++": "Top right",
"-+": "Top left",
"--": "Bottom populations",
"+-": "Bottom populations"})
gs.apply_gate(gate)
gs.add_hyperparameter_grid(gate_name=gate.gate_name,
params={"min_peak_threshold": [0.01, 0.1]})
assert gs.hyperparameter_search.get(gate.gate_name).get("grid").get("min_peak_threshold") == [0.01, 0.1]
def test_add_hyperparameter_grid_ellipse(example_populated_experiment):
gs = create_gatingstrategy_and_load(example_populated_experiment)
gate = create_ellipse_gate()
gate.fit(data=gs.filegroup.load_population_df(population=gate.parent,
transform=None,
label_downstream_affiliations=False))
pops = sorted([(c.name, c.geom.x_values) for c in gate.children], key=lambda x: x[1])
gate.label_children({pops[0][0]: "Little pop",
pops[1][0]: "Big pop"})
gs.apply_gate(gate)
gs.add_hyperparameter_grid(gate_name=gate.gate_name,
params={"n_components": [2, 3, 4]})
assert gs.hyperparameter_search.get(gate.gate_name).get("grid").get("n_components") == [2, 3, 4]
def assert_expected_gated_pops(gs: GatingStrategy):
# Test expected populations present
expected_pops = {"root", "pop1", "pop2", "pop3", "pop4"}
assert set(gs.list_populations()) == expected_pops
assert all([x in gs.filegroup.tree.keys() for x in expected_pops])
# Test population tree
gs.filegroup.print_population_tree()
assert gs.filegroup.get_population("pop1").parent == "root"
assert gs.filegroup.get_population("pop2").parent == "pop1"
assert gs.filegroup.get_population("pop3").parent == "pop2"
assert gs.filegroup.get_population("pop4").parent == "pop2"
# Test population indexes
root_n = len(gs.filegroup.get_population("root").index)
assert all([len(gs.filegroup.get_population(x).index) < root_n
for x in ["pop1", "pop2", "pop3", "pop4"]])
assert len(gs.filegroup.get_population("pop1").index) > len(gs.filegroup.get_population("pop2").index)
assert gs.filegroup.get_population("pop1").n > gs.filegroup.get_population("pop2").n
assert len(gs.filegroup.get_population("pop2").index) > len(gs.filegroup.get_population("pop3").index)
assert gs.filegroup.get_population("pop2").n > gs.filegroup.get_population("pop3").n
assert len(gs.filegroup.get_population("pop2").index) > len(gs.filegroup.get_population("pop4").index)
assert gs.filegroup.get_population("pop2").n > gs.filegroup.get_population("pop4").n
def test_apply_downstream(example_populated_experiment):
gs = create_gatingstrategy_and_load(example_populated_experiment)
gs = apply_some_gates(gs)
assert_expected_gated_pops(gs)
def test_apply_all(example_populated_experiment):
gs = create_gatingstrategy_and_load(example_populated_experiment)
with pytest.raises(AssertionError) as err:
gs.apply_all()
assert str(err.value) == "No gates to apply"
gs = apply_some_gates(gs)
exp = Project.objects(project_id="test").get().get_experiment("test experiment")
gs.load_data(experiment=exp,
sample_id="test sample")
gs.apply_all()
assert_expected_gated_pops(gs)
with pytest.raises(DuplicatePopulationError) as err:
gs.apply_all()
assert str(err.value) == "One or more of the populations generated from this gating strategy are already " \
"presented in the population tree"
def test_delete_gate(example_populated_experiment):
gs = create_gatingstrategy_and_load(example_populated_experiment)
gs = apply_some_gates(gs)
gs.delete_gate("test ellipse")
assert "test ellipse" not in [g.gate_name for g in gs.gates]
def test_plot_gate(example_populated_experiment):
gs = create_gatingstrategy_and_load(example_populated_experiment)
gs = apply_some_gates(gs)
plt.close("all")
gs.plot_gate(gate=gs.gates[0].gate_name)
plt.show()
def test_plot_gate_by_name(example_populated_experiment):
gs = create_gatingstrategy_and_load(example_populated_experiment)
gs = apply_some_gates(gs)
plt.close("all")
gs.plot_gate(gate="test threshold", create_plot_kwargs={"title": "test threshold"})
plt.show()
def test_plot_gate_invalid(example_populated_experiment):
gs = create_gatingstrategy_and_load(example_populated_experiment)
gs = apply_some_gates(gs)
with pytest.raises(ValueError) as err:
gs.plot_gate(gate="test ellipse", y="FS Lin")
assert str(err.value) == "Can only override y-axis variable for Threshold geometries"
def test_plot_backgate(example_populated_experiment):
gs = create_gatingstrategy_and_load(example_populated_experiment)
gs = apply_some_gates(gs)
plt.close("all")
gs.plot_backgate(parent="root",
overlay=["pop3", "pop4"],
x="FS Lin",
y="IgG1-FITC",
create_plot_kwargs={"transform_x": None,
"transform_y": "logicle"})
plt.show()
def test_plot_population(example_populated_experiment):
gs = create_gatingstrategy_and_load(example_populated_experiment)
gs = apply_some_gates(gs)
plt.close("all")
gs.plot_population(population="pop1",
x="FS Lin",
y="IgG1-FITC")
plt.show()
def test_population_stats(example_populated_experiment):
gs = create_gatingstrategy_and_load(example_populated_experiment)
gs = apply_some_gates(gs)
stats = gs.filegroup.population_stats(population="root")
assert isinstance(stats, dict)
assert stats.get("population_name") == "root"
assert stats.get("n") == 30000
assert stats.get("prop_of_parent") is None
assert stats.get("prop_of_root") is None
def test_edit_threshold_gate(example_populated_experiment):
gs = create_gatingstrategy_and_load(example_populated_experiment)
gs = apply_some_gates(gs)
gs.save()
gs = reload_gatingstrategy(example_populated_experiment)
before_pop1 = gs.filegroup.get_population("pop1").n
before_pop2 = gs.filegroup.get_population("pop2").n
before_pop3 = gs.filegroup.get_population("pop3").n
before_pop4 = gs.filegroup.get_population("pop4").n
gs.edit_gate("test threshold", x_threshold=0, y_threshold=0)
assert gs.filegroup.get_population("pop1").n == gs.filegroup.data(source="primary").shape[0]
assert gs.filegroup.get_population("pop2").n > before_pop2
assert gs.filegroup.get_population("pop3").n > before_pop3
assert gs.filegroup.get_population("pop4").n > before_pop4
def test_edit_polygon_gate(example_populated_experiment):
gs = create_gatingstrategy_and_load(example_populated_experiment)
gs = apply_some_gates(gs)
gs.save()
gs = reload_gatingstrategy(example_populated_experiment)
before_pop1 = gs.filegroup.get_population("pop1").n
before_pop2 = gs.filegroup.get_population("pop2").n
before_pop3 = gs.filegroup.get_population("pop3").n
before_pop4 = gs.filegroup.get_population("pop4").n
gs.edit_gate("test ellipse", coords={"pop2": [[0, 1200, 1200, 0, 0],
[-100, -100, 10000, 10000, -100]]})
assert gs.filegroup.get_population("pop1").n == before_pop1
assert gs.filegroup.get_population("pop2").n > before_pop2
assert gs.filegroup.get_population("pop2").n == before_pop1
assert gs.filegroup.get_population("pop3").n > before_pop3
assert gs.filegroup.get_population("pop4").n > before_pop4
def test_save(example_populated_experiment):
gs = create_gatingstrategy_and_load(example_populated_experiment)
gs = apply_some_gates(gs)
gs.save()
gs = GatingStrategy.objects(name="test")
assert len(gs) == 1
gs = gs.get()
assert len(gs.gates) == 3
@pytest.mark.parametrize("remove_associations", [True, False])
def test_delete(example_populated_experiment, remove_associations):
gs = create_gatingstrategy_and_load(example_populated_experiment)
gs = apply_some_gates(gs)
gs.save()
gs = GatingStrategy.objects(name="test").get()
populations = [[c.name for c in g.children] for g in gs.gates]
populations = list(set([x for sl in populations for x in sl]))
gs.delete(remove_associations=remove_associations,
delete_gates=remove_associations)
assert len(GatingStrategy.objects(name="test")) == 0
n = [0, 0]
if not remove_associations:
n = [2, 1]
for n_, gate in zip(n, [ThresholdGate, EllipseGate]):
assert len(gate.objects()) == n_
fg = (Project.objects(project_id="test")
.get()
.get_experiment("test experiment")
.get_sample("test sample"))
if remove_associations:
assert len(fg.gating_strategy) == 0
assert all([p not in fg.list_populations() for p in populations])
else:
assert len(fg.gating_strategy) == 1
assert all([p in fg.list_populations() for p in populations])
| 14,140 | 0 | 595 |
5f9010516eb2e0571e28ec33c2d1d8fc9a1902b4 | 2,070 | py | Python | scheduler_task/process_verification_every_minute.py | jepret/jepret-backend | 2a892e1a7141da09d286b14f47769c3e6f4c0979 | [
"MIT"
] | null | null | null | scheduler_task/process_verification_every_minute.py | jepret/jepret-backend | 2a892e1a7141da09d286b14f47769c3e6f4c0979 | [
"MIT"
] | 6 | 2021-03-19T01:58:42.000Z | 2022-03-11T23:52:43.000Z | scheduler_task/process_verification_every_minute.py | jepret/jepret-backend | 2a892e1a7141da09d286b14f47769c3e6f4c0979 | [
"MIT"
] | null | null | null | import dotenv
dotenv.load_dotenv()
from time import sleep
import schedule
import requests
import json
from service.image_recognition import check_image_similarity
from model import UMKM, UMKMValidator, Verification, Campaign
schedule.every().minute.do(process_verifications)
while True:
try:
schedule.run_pending()
except:
pass
finally:
sleep(1)
| 26.538462 | 120 | 0.666184 | import dotenv
dotenv.load_dotenv()
from time import sleep
import schedule
import requests
import json
from service.image_recognition import check_image_similarity
from model import UMKM, UMKMValidator, Verification, Campaign
def get_base_images(umkm):
validator = UMKMValidator.get_or_none(UMKMValidator.umkm == umkm)
if not validator:
return []
return json.loads(validator.seed_images)
def get_verifications(umkm):
verifications = Verification.select().where((Verification.umkm == umkm) & (Verification.pending == True))
return verifications
def is_verification_valid(verification, seed_images):
r = requests.get(verification.photo)
photo = r.content
count = 0
for s in seed_images:
with open(s, 'rb') as f:
distance = check_image_similarity(f, photo)
if distance < 30:
count += 1
if count == 3:
return True
return False
def update_user_campaign(verification, umkm):
verification_count = len(Verification.select().where((Verification.umkm == umkm) & (Verification.pending == False)))
campaign = Campaign.get_or_none(Campaign.umkm == umkm)
user = verification.user
if campaign.budget - campaign.price >= 0:
percentage_done = campaign.budget * (campaign.budget + campaign.price * verification_count)
user.balance += campaign.price * percentage_done
campaign.budget -= campaign.price * percentage_done
user.save()
campaign.save()
def process_verifications():
umkms = UMKM.select()
for u in umkms:
verifications = get_verifications(u)
seed_images = get_base_images(u)
if not seed_images:
continue
for v in verifications:
v.success = is_verification_valid(v, seed_images)
v.pending = False
v.save()
update_user_campaign(v, u)
schedule.every().minute.do(process_verifications)
while True:
try:
schedule.run_pending()
except:
pass
finally:
sleep(1)
| 1,562 | 0 | 115 |
9c1092d1f7b7cde8a89cbf00955b5db64557a26d | 21,342 | py | Python | modules/teacher_dashboard/teacher_dashboard.py | ehiller/mobilecsp-v18 | a59801c44c616d30f5e916d6771e479c8a9e88f7 | [
"Apache-2.0"
] | null | null | null | modules/teacher_dashboard/teacher_dashboard.py | ehiller/mobilecsp-v18 | a59801c44c616d30f5e916d6771e479c8a9e88f7 | [
"Apache-2.0"
] | null | null | null | modules/teacher_dashboard/teacher_dashboard.py | ehiller/mobilecsp-v18 | a59801c44c616d30f5e916d6771e479c8a9e88f7 | [
"Apache-2.0"
] | null | null | null | __author__ = 'ehiller@css.edu'
# Module to support custom teacher views in CourseBuilder dashboard
# Views include:
# Section Roster - list of students in section
# Sections - list of sections for current user
# Student Dashboard - view of a single student's performance in the course
# Teacher Workspace - teacher registration and list of all registered teachers
import jinja2
import os
import appengine_config
from common import tags
from common import crypto
from models import custom_modules
from models import roles
from models import transforms
from models.models import Student
#since we are extending the dashboard, probably want to include dashboard stuff
from modules.dashboard import dashboard
from modules.dashboard import tabs
#import our own modules
import teacher_entity
import teacher_rest_handlers
import teacher_parsers
#Setup paths and directories for templates and resources
RESOURCES_PATH = '/modules/teacher_dashboard/resources'
TEMPLATES_DIR = os.path.join(
appengine_config.BUNDLE_ROOT, 'modules', 'teacher_dashboard', 'templates')
#setup permissions that will be registered with the dashboard
ACCESS_ASSETS_PERMISSION = 'can_access_assets'
ACCESS_ASSETS_PERMISSION_DESCRIPTION = 'Can access the Assets Dashboard'
ACCESS_SETTINGS_PERMISSION = 'can_access_settings'
ACCESS_SETTINGS_PERMISSION_DESCRIPTION = 'Can access the Settings Dashboard'
ACCESS_ROLES_PERMISSION = 'can_access_roles'
ACCESS_ROLES_PERMISSION_DESCRIPTION = 'Can access the Roles Dashboard'
ACCESS_ANALYTICS_PERMISSION = 'can_access_analytics'
ACCESS_ANALYTICS_PERMISSION_DESCRIPTION = 'Can access the Analytics Dashboard'
ACCESS_SEARCH_PERMISSION = 'can_access_search'
ACCESS_SEARCH_PERMISSION_DESCRIPTION = 'Can access the Search Dashboard'
ACCESS_PEERREVIEW_PERMISSION = 'can_access_peer_review'
ACCESS_PEERREVIEW_PERMISSION_DESCRIPTION = 'Can access the Peer Review Dashboard'
ACCESS_SKILLMAP_PERMISSION = 'can_access_skill_map'
ACCESS_SKILLMAP_PERMISSION_DESCRIPTION = 'Can access the Skill Map Dashboard'
ACCESS_TEACHER_DASHBOARD_PERMISSION = 'can_access_teacher_dashboard'
ACCESS_TEACHER_DASHBOARD_PERMISSION_DESCRIPTION = 'Can access the Teacher Dashboard'
#setup custom module for, needs to be referenced later
custom_module = None
class TeacherHandler(dashboard.DashboardHandler):
"""Handler for everything under the Teacher tab in the CourseBuilder dashboard.
Note:
Inherits from the DashboardHandler, makes use of many of those functions to
integrate with existing dashboard.
Attributes:
ACTION (str): Value used to handler navigation in the dashboard, top level label.
DEFAULT_TAB (str): Default sub-navigation value.
URL (str): Path to module from working directory.
XSRF_TOKEN_NAME (str): Token used for xsrf security functions.
"""
ACTION = 'teacher_dashboard'
DEFAULT_TAB = 'sections'
URL = '/modules/teacher_dashboard'
XSRF_TOKEN_NAME = ''
@classmethod
def register_tabs(cls):
"""Handles registering all sub-navigation tabs"""
def register_tab(key, label, handler, href=None):
"""Registers tab using the tab registry"""
if href:
target = '_blank'
else:
href = 'dashboard?action=teacher_dashboard&tab=%s' % key
target = None
tabs.Registry.register(
cls.ACTION, key, label, contents=handler, href=href, target=target
)
register_tab('sections', 'Sections', TeacherHandler)
register_tab('student_detail', 'Student Dashboard', TeacherHandler)
register_tab('teacher_reg', 'Teacher Workspace', TeacherHandler)
def get_teacher_dashboard(self):
"""Process navigation requests sent to teacher handler. Routers to appropriate function."""
in_tab = self.request.get('tab') or self.DEFAULT_TAB
tab_action = self.request.get('tab_action') or None #defined a secondary tab property so I can go load a
# separate view in the same tab
if in_tab == 'sections':
if tab_action == 'roster':
return self.get_roster()
else:
return self.get_sections()
elif in_tab == 'teacher_reg':
return self.get_teacher_reg()
elif in_tab == 'student_detail':
return self.get_student_dashboard()
def get_sections(self):
"""Renders Sections view. Javascript handles getting course sections and building the view"""
template_values = {}
template_values['namespace'] = self.get_course()._namespace.replace('ns_', '')
main_content = self.get_template(
'teacher_sections.html', [TEMPLATES_DIR]).render(template_values)
self.render_page({
'page_title': self.format_title('Sections'),
'main_content': jinja2.utils.Markup(main_content)})
def get_student_dashboard(self):
"""Renders Student Dashboard view.
Also gets ALL students in ALL course sections for the registered user to
build a jQuery autocomplete dropdown on the view.
"""
student_email = self.request.get('student') or None #email will be in the request if opened from student list
# view, otherwise it will be None
#need to go through every course section for the current user and get all unique students
students = []
course_sections = teacher_entity.CourseSectionEntity.get_course_sections_for_user()
if course_sections and len(course_sections) > 0:
for course_section in course_sections.values():
if course_section.students and len(course_section.students) > 0:
for student_in_section in course_section.students.values():
if not any(x['user_id'] == student_in_section['user_id'] for x in students):
students.append(student_in_section)
#check to see if we have a student and if we need to get detailed progress
student = None
if student_email:
student = Student.get_by_email(student_email)
if (student):
course = self.get_course()
units = teacher_parsers.StudentProgressTracker.get_detailed_progress(student, course)
scores = teacher_parsers.ActivityScoreParser.get_activity_scores([student.user_id], course)
else:
units = None
scores = None
#render the template for the student dashboard view
main_content = self.get_template(
'student_detailed_progress.html', [TEMPLATES_DIR]).render(
{
'units': units, #unit completion
'student': student, #course defined student object, need email and name
'students': students, #list of students, names and emails, from a course section student list
'scores': scores
})
#call DashboardHandler function to render the page
self.render_page({
'page_title': self.format_title('Student Dashboard'),
'main_content': jinja2.utils.Markup(main_content)
})
def get_roster(self):
"""Renders the Roster view. Displays all students in a single course section
Also allows user to add students to a course section
"""
template_values = {}
template_values['add_student_xsrf_token'] = crypto.XsrfTokenManager.create_xsrf_token(
teacher_rest_handlers.CourseSectionRestHandler.XSRF_TOKEN)
#need list of units and lessons for select elements that determine which progress value to display
#need a list of units, need the titles, unit ids, types
units = self.get_course().get_units()
units_filtered = filter(lambda x: x.type == 'U', units) #filter out assessments
template_values['units'] = units_filtered
#need to get lessons, but only for units that aren't assessments
lessons = {}
for unit in units_filtered:
unit_lessons = self.get_course().get_lessons(unit.unit_id)
unit_lessons_filtered = []
for lesson in unit_lessons:
unit_lessons_filtered.append({
'title': lesson.title,
'unit_id': lesson.unit_id,
'lesson_id': lesson.lesson_id
})
lessons[unit.unit_id] = unit_lessons_filtered
template_values['lessons'] = transforms.dumps(lessons, {}) #passing in JSON to template so it can be used
# in JavaScript
course_section_id = self.request.get('section')
course_section = teacher_entity.CourseSectionEntity.get_course_for_user(course_section_id)
students = {}
#need to get progress values for ALL students since we show completion for every student
if course_section.students and len(course_section.students) > 0:
#course_section.students = sorted(course_section.students.values(), key=lambda k: (k['name']))
for student in course_section.students.values():
temp_student = {}
temp_student['unit_completion'] = teacher_parsers.StudentProgressTracker.get_unit_completion(
Student.get_by_email(
student[
'email']), self.get_course())
temp_student['course_completion'] = teacher_parsers.StudentProgressTracker.get_overall_progress(Student.get_by_email(student[
'email']), self.get_course())
temp_student['detailed_course_completion'] = teacher_parsers.StudentProgressTracker.get_detailed_progress(
Student.get_by_email(student['email']), self.get_course())
temp_student['email'] = student['email']
temp_student['name'] = student['name']
students[student['email']] = temp_student
course_section.students = students
#passing in students as JSON so JavaScript can handle updating completion values easier
template_values['students_json'] = transforms.dumps(course_section.students, {})
template_values['namespace'] = self.get_course()._namespace.replace('ns_', '')
if course_section:
template_values['section'] = course_section
#render student_list.html for Roster view
main_content = self.get_template(
'student_list.html', [TEMPLATES_DIR]).render(template_values)
#DashboardHandler renders the page
self.render_page({
'page_title': self.format_title('Student List'),
'main_content': jinja2.utils.Markup(main_content)})
def get_teacher_reg(self):
"""Renders Teacher Workspace view. Displays form to add or update a teacher
Also displays all registered teachers.
"""
alerts = []
disable_form = False
if not roles.Roles.is_course_admin(self.app_context):
alerts.append('Access denied. Please contact a course admin.')
disable_form = True
template_values = {}
template_values['teacher_reg_xsrf_token'] = self.create_xsrf_token('teacher_reg')
template_values['teachers'] = teacher_entity.Teacher.get_all_teachers_for_course()
template_values['alert_messages'] = alerts
template_values['disable'] = disable_form
template_values['action'] = self.get_action_url('teacher_reg')
main_content = self.get_template(
'teacher_registration.html', [TEMPLATES_DIR]).render(template_values)
self.render_page({
'page_title': self.format_title('Teacher Registration'),
'main_content': jinja2.utils.Markup(main_content)})
@classmethod
def post_teacher_reg(cls, handler):
"""Handles form submit for teacher registration"""
#get values entered on form
email = handler.request.get('email').strip()
school = handler.request.get('school')
#getting checkbox value is a little weird, might look different depending on browser
active = handler.request.get('active-teacher')
if active == 'on' or len(active) > 0:
active = True
else:
active = False
teacher = teacher_entity.Teacher.get_by_email(email)
#keep track of any errors we might want to pass back to the UI
alerts = []
#check to see if a teacher already exists
if teacher:
template_values = {}
template_values['teacher_reg_xsrf_token'] = handler.create_xsrf_token('teacher_reg')
sections = {}
#don't let the teacher be deactivated if they have active courses
can_inactivate = True
if active == False:
if teacher.sections:
course_sections_decoded = transforms.loads(teacher.sections)
for course_section_key in course_sections_decoded:
course_section = teacher_entity.CourseSectionEntity(course_sections_decoded[course_section_key])
sections[course_section.section_id] = course_section
for section in sections.values():
if section.is_active:
can_inactivate = False
#let user know if they can't deactivate, but only if they are trying to deactivate the teacher
if not can_inactivate and not active:
alerts.append('Cannot deactivate teacher. Teacher still has active courses')
#go for the update if all is good
if can_inactivate:
teacher_entity.Teacher.update_teacher_for_user(email, school, active, '', alerts)
#let user know all is well if save was successful
if len(alerts) == 0:
alerts.append('Teacher was successfully updated')
#render teacher_registration.html for view, pass alerts in
template_values['alert_messages'] = '\n'.join(alerts)
main_content = handler.get_template(
'teacher_registration.html', [TEMPLATES_DIR]).render(template_values)
#DashboardHandler renders the page
handler.render_page({
'page_title': handler.format_title('Teacher Dashboard'),
'main_content': jinja2.utils.Markup(main_content)
},
'teacher_dashboard'
)
else:
#go for it if teacher doesn't already exist
teacher_entity.Teacher.add_new_teacher_for_user(email, school, '', alerts)
template_values = {}
template_values['alert_messages'] = '\n'.join(alerts)
template_values['teacher_reg_xsrf_token'] = handler.create_xsrf_token('teacher_reg')
main_content = handler.get_template(
'teacher_registration.html', [TEMPLATES_DIR]).render(template_values)
#DashboardHandler renders the page
handler.render_page({
'page_title': handler.format_title('Teacher Dashboard'),
'main_content': jinja2.utils.Markup(main_content)
},
'teacher_dashboard'
)
def notify_module_enabled():
"""Handles things after module has been enabled."""
def get_action(handler):
"""Redirects to teacher_dashboard."""
handler.redirect('/modules/teacher_dashboard?action=teacher_dashboard&tab=%s' % handler.request.get('tab') or
TeacherHandler.DEFAULT_TAB)
dashboard.DashboardHandler.add_nav_mapping(
TeacherHandler.ACTION, 'Teacher')
dashboard.DashboardHandler.get_actions.append('teacher_dashboard')
setattr(dashboard.DashboardHandler, 'get_teacher_dashboard', get_action)
#add post actions
dashboard.DashboardHandler.add_custom_post_action('teacher_reg', post_action)
setattr(dashboard.DashboardHandler, 'post_teacher_reg', post_action)
#add permissions for the dashboard sections
dashboard.DashboardHandler.add_external_permission(
ACCESS_ASSETS_PERMISSION, ACCESS_ASSETS_PERMISSION_DESCRIPTION)
dashboard.DashboardHandler.add_external_permission(
ACCESS_SETTINGS_PERMISSION, ACCESS_SETTINGS_PERMISSION_DESCRIPTION)
dashboard.DashboardHandler.add_external_permission(
ACCESS_ROLES_PERMISSION, ACCESS_ROLES_PERMISSION_DESCRIPTION)
dashboard.DashboardHandler.add_external_permission(
ACCESS_ANALYTICS_PERMISSION, ACCESS_ANALYTICS_PERMISSION_DESCRIPTION)
dashboard.DashboardHandler.add_external_permission(
ACCESS_SEARCH_PERMISSION, ACCESS_SEARCH_PERMISSION_DESCRIPTION)
dashboard.DashboardHandler.add_external_permission(
ACCESS_PEERREVIEW_PERMISSION, ACCESS_PEERREVIEW_PERMISSION_DESCRIPTION)
dashboard.DashboardHandler.add_external_permission(
ACCESS_SKILLMAP_PERMISSION, ACCESS_SKILLMAP_PERMISSION_DESCRIPTION)
dashboard.DashboardHandler.add_external_permission(
ACCESS_TEACHER_DASHBOARD_PERMISSION, ACCESS_TEACHER_DASHBOARD_PERMISSION_DESCRIPTION)
#map permissions to actions
dashboard.DashboardHandler.map_action_to_permission('get_' + str(TeacherHandler.ACTION),
ACCESS_TEACHER_DASHBOARD_PERMISSION)
nav_mappings = dashboard.DashboardHandler.get_nav_mappings()
dashboard.DashboardHandler.map_action_to_permission('get_' + str(nav_mappings[1][0]), ACCESS_ASSETS_PERMISSION)
dashboard.DashboardHandler.map_action_to_permission('get_' + str(nav_mappings[2][0]), ACCESS_SETTINGS_PERMISSION)
dashboard.DashboardHandler.map_action_to_permission('get_' + str(nav_mappings[3][0]), ACCESS_ROLES_PERMISSION)
dashboard.DashboardHandler.map_action_to_permission('get_' + str(nav_mappings[4][0]), ACCESS_ANALYTICS_PERMISSION)
dashboard.DashboardHandler.map_action_to_permission('get_' + str(nav_mappings[5][0]), ACCESS_SEARCH_PERMISSION)
dashboard.DashboardHandler.map_action_to_permission('get_' + str(nav_mappings[6][0]), ACCESS_PEERREVIEW_PERMISSION)
dashboard.DashboardHandler.map_action_to_permission('get_' + str(nav_mappings[7][0]), ACCESS_SKILLMAP_PERMISSION)
dashboard.DashboardHandler.EXTRA_JS_HREF_LIST.append(
'/modules/teacher_dashboard/resources/js/popup.js')
dashboard.DashboardHandler.EXTRA_JS_HREF_LIST.append(
'/modules/teacher_dashboard/resources/js/course_section_analytics.js')
dashboard.DashboardHandler.EXTRA_JS_HREF_LIST.append(
'/modules/teacher_dashboard/resources/js/activity_score_manager.js')
dashboard.DashboardHandler.EXTRA_JS_HREF_LIST.append(
'/modules/teacher_dashboard/resources/js/student_list_table_manager')
dashboard.DashboardHandler.EXTRA_JS_HREF_LIST.append(
'/modules/teacher_dashboard/resources/js/student_list_table_rebuild_manager.js')
dashboard.DashboardHandler.EXTRA_JS_HREF_LIST.append(
'/modules/teacher_dashboard/resources/js/activity_score_table_manager.js')
dashboard.DashboardHandler.EXTRA_JS_HREF_LIST.append(
'/modules/teacher_dashboard/resources/js/student_score_manager.js')
dashboard.DashboardHandler.EXTRA_CSS_HREF_LIST.append(
'/modules/teacher_dashboard/resources/css/student_list.css')
transforms.CUSTOM_JSON_ENCODERS.append(teacher_entity.CourseSectionEntity.json_encoder)
#register tabs
TeacherHandler.register_tabs()
def register_module():
"""Registers this module in the registry."""
global_routes = [
(os.path.join(RESOURCES_PATH, 'js', '.*'), tags.JQueryHandler),
(os.path.join(RESOURCES_PATH, '.*'), tags.ResourcesHandler),
(RESOURCES_PATH + '/js/popup.js', tags.IifeHandler),
(RESOURCES_PATH + '/js/course_section_analytics.js', tags.IifeHandler),
(RESOURCES_PATH + '/js/activity_score_manager.js', tags.IifeHandler),
(RESOURCES_PATH + '/js/student_list_table_manager', tags.IifeHandler),
(RESOURCES_PATH + '/js/student_list_table_rebuild_manager.js', tags.IifeHandler),
(RESOURCES_PATH + '/js/activity_score_table_manager.js', tags.IifeHandler),
(RESOURCES_PATH + '/js/student_score_manager.js', tags.IifeHandler)
]
namespaced_routes = [
(TeacherHandler.URL, TeacherHandler),
(teacher_rest_handlers.CourseSectionRestHandler.URL, teacher_rest_handlers.CourseSectionRestHandler),
(teacher_rest_handlers.StudentProgressRestHandler.URL, teacher_rest_handlers.StudentProgressRestHandler),
(teacher_rest_handlers.ActivityScoreRestHandler.URL, teacher_rest_handlers.ActivityScoreRestHandler)
]
global custom_module # pylint: disable=global-statement
custom_module = custom_modules.Module(
'Teacher Dashboard Module',
'A module provide teacher workflow.',
global_routes, namespaced_routes,
notify_module_enabled=notify_module_enabled)
return custom_module
| 44.370062 | 141 | 0.685222 | __author__ = 'ehiller@css.edu'
# Module to support custom teacher views in CourseBuilder dashboard
# Views include:
# Section Roster - list of students in section
# Sections - list of sections for current user
# Student Dashboard - view of a single student's performance in the course
# Teacher Workspace - teacher registration and list of all registered teachers
import jinja2
import os
import appengine_config
from common import tags
from common import crypto
from models import custom_modules
from models import roles
from models import transforms
from models.models import Student
#since we are extending the dashboard, probably want to include dashboard stuff
from modules.dashboard import dashboard
from modules.dashboard import tabs
#import our own modules
import teacher_entity
import teacher_rest_handlers
import teacher_parsers
#Setup paths and directories for templates and resources
RESOURCES_PATH = '/modules/teacher_dashboard/resources'
TEMPLATES_DIR = os.path.join(
appengine_config.BUNDLE_ROOT, 'modules', 'teacher_dashboard', 'templates')
#setup permissions that will be registered with the dashboard
ACCESS_ASSETS_PERMISSION = 'can_access_assets'
ACCESS_ASSETS_PERMISSION_DESCRIPTION = 'Can access the Assets Dashboard'
ACCESS_SETTINGS_PERMISSION = 'can_access_settings'
ACCESS_SETTINGS_PERMISSION_DESCRIPTION = 'Can access the Settings Dashboard'
ACCESS_ROLES_PERMISSION = 'can_access_roles'
ACCESS_ROLES_PERMISSION_DESCRIPTION = 'Can access the Roles Dashboard'
ACCESS_ANALYTICS_PERMISSION = 'can_access_analytics'
ACCESS_ANALYTICS_PERMISSION_DESCRIPTION = 'Can access the Analytics Dashboard'
ACCESS_SEARCH_PERMISSION = 'can_access_search'
ACCESS_SEARCH_PERMISSION_DESCRIPTION = 'Can access the Search Dashboard'
ACCESS_PEERREVIEW_PERMISSION = 'can_access_peer_review'
ACCESS_PEERREVIEW_PERMISSION_DESCRIPTION = 'Can access the Peer Review Dashboard'
ACCESS_SKILLMAP_PERMISSION = 'can_access_skill_map'
ACCESS_SKILLMAP_PERMISSION_DESCRIPTION = 'Can access the Skill Map Dashboard'
ACCESS_TEACHER_DASHBOARD_PERMISSION = 'can_access_teacher_dashboard'
ACCESS_TEACHER_DASHBOARD_PERMISSION_DESCRIPTION = 'Can access the Teacher Dashboard'
#setup custom module for, needs to be referenced later
custom_module = None
class TeacherHandler(dashboard.DashboardHandler):
"""Handler for everything under the Teacher tab in the CourseBuilder dashboard.
Note:
Inherits from the DashboardHandler, makes use of many of those functions to
integrate with existing dashboard.
Attributes:
ACTION (str): Value used to handler navigation in the dashboard, top level label.
DEFAULT_TAB (str): Default sub-navigation value.
URL (str): Path to module from working directory.
XSRF_TOKEN_NAME (str): Token used for xsrf security functions.
"""
ACTION = 'teacher_dashboard'
DEFAULT_TAB = 'sections'
URL = '/modules/teacher_dashboard'
XSRF_TOKEN_NAME = ''
@classmethod
def register_tabs(cls):
"""Handles registering all sub-navigation tabs"""
def register_tab(key, label, handler, href=None):
"""Registers tab using the tab registry"""
if href:
target = '_blank'
else:
href = 'dashboard?action=teacher_dashboard&tab=%s' % key
target = None
tabs.Registry.register(
cls.ACTION, key, label, contents=handler, href=href, target=target
)
register_tab('sections', 'Sections', TeacherHandler)
register_tab('student_detail', 'Student Dashboard', TeacherHandler)
register_tab('teacher_reg', 'Teacher Workspace', TeacherHandler)
def get_teacher_dashboard(self):
"""Process navigation requests sent to teacher handler. Routers to appropriate function."""
in_tab = self.request.get('tab') or self.DEFAULT_TAB
tab_action = self.request.get('tab_action') or None #defined a secondary tab property so I can go load a
# separate view in the same tab
if in_tab == 'sections':
if tab_action == 'roster':
return self.get_roster()
else:
return self.get_sections()
elif in_tab == 'teacher_reg':
return self.get_teacher_reg()
elif in_tab == 'student_detail':
return self.get_student_dashboard()
def get_sections(self):
"""Renders Sections view. Javascript handles getting course sections and building the view"""
template_values = {}
template_values['namespace'] = self.get_course()._namespace.replace('ns_', '')
main_content = self.get_template(
'teacher_sections.html', [TEMPLATES_DIR]).render(template_values)
self.render_page({
'page_title': self.format_title('Sections'),
'main_content': jinja2.utils.Markup(main_content)})
def get_student_dashboard(self):
"""Renders Student Dashboard view.
Also gets ALL students in ALL course sections for the registered user to
build a jQuery autocomplete dropdown on the view.
"""
student_email = self.request.get('student') or None #email will be in the request if opened from student list
# view, otherwise it will be None
#need to go through every course section for the current user and get all unique students
students = []
course_sections = teacher_entity.CourseSectionEntity.get_course_sections_for_user()
if course_sections and len(course_sections) > 0:
for course_section in course_sections.values():
if course_section.students and len(course_section.students) > 0:
for student_in_section in course_section.students.values():
if not any(x['user_id'] == student_in_section['user_id'] for x in students):
students.append(student_in_section)
#check to see if we have a student and if we need to get detailed progress
student = None
if student_email:
student = Student.get_by_email(student_email)
if (student):
course = self.get_course()
units = teacher_parsers.StudentProgressTracker.get_detailed_progress(student, course)
scores = teacher_parsers.ActivityScoreParser.get_activity_scores([student.user_id], course)
else:
units = None
scores = None
#render the template for the student dashboard view
main_content = self.get_template(
'student_detailed_progress.html', [TEMPLATES_DIR]).render(
{
'units': units, #unit completion
'student': student, #course defined student object, need email and name
'students': students, #list of students, names and emails, from a course section student list
'scores': scores
})
#call DashboardHandler function to render the page
self.render_page({
'page_title': self.format_title('Student Dashboard'),
'main_content': jinja2.utils.Markup(main_content)
})
def get_roster(self):
"""Renders the Roster view. Displays all students in a single course section
Also allows user to add students to a course section
"""
template_values = {}
template_values['add_student_xsrf_token'] = crypto.XsrfTokenManager.create_xsrf_token(
teacher_rest_handlers.CourseSectionRestHandler.XSRF_TOKEN)
#need list of units and lessons for select elements that determine which progress value to display
#need a list of units, need the titles, unit ids, types
units = self.get_course().get_units()
units_filtered = filter(lambda x: x.type == 'U', units) #filter out assessments
template_values['units'] = units_filtered
#need to get lessons, but only for units that aren't assessments
lessons = {}
for unit in units_filtered:
unit_lessons = self.get_course().get_lessons(unit.unit_id)
unit_lessons_filtered = []
for lesson in unit_lessons:
unit_lessons_filtered.append({
'title': lesson.title,
'unit_id': lesson.unit_id,
'lesson_id': lesson.lesson_id
})
lessons[unit.unit_id] = unit_lessons_filtered
template_values['lessons'] = transforms.dumps(lessons, {}) #passing in JSON to template so it can be used
# in JavaScript
course_section_id = self.request.get('section')
course_section = teacher_entity.CourseSectionEntity.get_course_for_user(course_section_id)
students = {}
#need to get progress values for ALL students since we show completion for every student
if course_section.students and len(course_section.students) > 0:
#course_section.students = sorted(course_section.students.values(), key=lambda k: (k['name']))
for student in course_section.students.values():
temp_student = {}
temp_student['unit_completion'] = teacher_parsers.StudentProgressTracker.get_unit_completion(
Student.get_by_email(
student[
'email']), self.get_course())
temp_student['course_completion'] = teacher_parsers.StudentProgressTracker.get_overall_progress(Student.get_by_email(student[
'email']), self.get_course())
temp_student['detailed_course_completion'] = teacher_parsers.StudentProgressTracker.get_detailed_progress(
Student.get_by_email(student['email']), self.get_course())
temp_student['email'] = student['email']
temp_student['name'] = student['name']
students[student['email']] = temp_student
course_section.students = students
#passing in students as JSON so JavaScript can handle updating completion values easier
template_values['students_json'] = transforms.dumps(course_section.students, {})
template_values['namespace'] = self.get_course()._namespace.replace('ns_', '')
if course_section:
template_values['section'] = course_section
#render student_list.html for Roster view
main_content = self.get_template(
'student_list.html', [TEMPLATES_DIR]).render(template_values)
#DashboardHandler renders the page
self.render_page({
'page_title': self.format_title('Student List'),
'main_content': jinja2.utils.Markup(main_content)})
def get_teacher_reg(self):
"""Renders Teacher Workspace view. Displays form to add or update a teacher
Also displays all registered teachers.
"""
alerts = []
disable_form = False
if not roles.Roles.is_course_admin(self.app_context):
alerts.append('Access denied. Please contact a course admin.')
disable_form = True
template_values = {}
template_values['teacher_reg_xsrf_token'] = self.create_xsrf_token('teacher_reg')
template_values['teachers'] = teacher_entity.Teacher.get_all_teachers_for_course()
template_values['alert_messages'] = alerts
template_values['disable'] = disable_form
template_values['action'] = self.get_action_url('teacher_reg')
main_content = self.get_template(
'teacher_registration.html', [TEMPLATES_DIR]).render(template_values)
self.render_page({
'page_title': self.format_title('Teacher Registration'),
'main_content': jinja2.utils.Markup(main_content)})
@classmethod
def post_teacher_reg(cls, handler):
"""Handles form submit for teacher registration"""
#get values entered on form
email = handler.request.get('email').strip()
school = handler.request.get('school')
#getting checkbox value is a little weird, might look different depending on browser
active = handler.request.get('active-teacher')
if active == 'on' or len(active) > 0:
active = True
else:
active = False
teacher = teacher_entity.Teacher.get_by_email(email)
#keep track of any errors we might want to pass back to the UI
alerts = []
#check to see if a teacher already exists
if teacher:
template_values = {}
template_values['teacher_reg_xsrf_token'] = handler.create_xsrf_token('teacher_reg')
sections = {}
#don't let the teacher be deactivated if they have active courses
can_inactivate = True
if active == False:
if teacher.sections:
course_sections_decoded = transforms.loads(teacher.sections)
for course_section_key in course_sections_decoded:
course_section = teacher_entity.CourseSectionEntity(course_sections_decoded[course_section_key])
sections[course_section.section_id] = course_section
for section in sections.values():
if section.is_active:
can_inactivate = False
#let user know if they can't deactivate, but only if they are trying to deactivate the teacher
if not can_inactivate and not active:
alerts.append('Cannot deactivate teacher. Teacher still has active courses')
#go for the update if all is good
if can_inactivate:
teacher_entity.Teacher.update_teacher_for_user(email, school, active, '', alerts)
#let user know all is well if save was successful
if len(alerts) == 0:
alerts.append('Teacher was successfully updated')
#render teacher_registration.html for view, pass alerts in
template_values['alert_messages'] = '\n'.join(alerts)
main_content = handler.get_template(
'teacher_registration.html', [TEMPLATES_DIR]).render(template_values)
#DashboardHandler renders the page
handler.render_page({
'page_title': handler.format_title('Teacher Dashboard'),
'main_content': jinja2.utils.Markup(main_content)
},
'teacher_dashboard'
)
else:
#go for it if teacher doesn't already exist
teacher_entity.Teacher.add_new_teacher_for_user(email, school, '', alerts)
template_values = {}
template_values['alert_messages'] = '\n'.join(alerts)
template_values['teacher_reg_xsrf_token'] = handler.create_xsrf_token('teacher_reg')
main_content = handler.get_template(
'teacher_registration.html', [TEMPLATES_DIR]).render(template_values)
#DashboardHandler renders the page
handler.render_page({
'page_title': handler.format_title('Teacher Dashboard'),
'main_content': jinja2.utils.Markup(main_content)
},
'teacher_dashboard'
)
def notify_module_enabled():
"""Handles things after module has been enabled."""
def get_action(handler):
"""Redirects to teacher_dashboard."""
handler.redirect('/modules/teacher_dashboard?action=teacher_dashboard&tab=%s' % handler.request.get('tab') or
TeacherHandler.DEFAULT_TAB)
def post_action(handler):
TeacherHandler.post_teacher_reg(handler)
dashboard.DashboardHandler.add_nav_mapping(
TeacherHandler.ACTION, 'Teacher')
dashboard.DashboardHandler.get_actions.append('teacher_dashboard')
setattr(dashboard.DashboardHandler, 'get_teacher_dashboard', get_action)
#add post actions
dashboard.DashboardHandler.add_custom_post_action('teacher_reg', post_action)
setattr(dashboard.DashboardHandler, 'post_teacher_reg', post_action)
#add permissions for the dashboard sections
dashboard.DashboardHandler.add_external_permission(
ACCESS_ASSETS_PERMISSION, ACCESS_ASSETS_PERMISSION_DESCRIPTION)
dashboard.DashboardHandler.add_external_permission(
ACCESS_SETTINGS_PERMISSION, ACCESS_SETTINGS_PERMISSION_DESCRIPTION)
dashboard.DashboardHandler.add_external_permission(
ACCESS_ROLES_PERMISSION, ACCESS_ROLES_PERMISSION_DESCRIPTION)
dashboard.DashboardHandler.add_external_permission(
ACCESS_ANALYTICS_PERMISSION, ACCESS_ANALYTICS_PERMISSION_DESCRIPTION)
dashboard.DashboardHandler.add_external_permission(
ACCESS_SEARCH_PERMISSION, ACCESS_SEARCH_PERMISSION_DESCRIPTION)
dashboard.DashboardHandler.add_external_permission(
ACCESS_PEERREVIEW_PERMISSION, ACCESS_PEERREVIEW_PERMISSION_DESCRIPTION)
dashboard.DashboardHandler.add_external_permission(
ACCESS_SKILLMAP_PERMISSION, ACCESS_SKILLMAP_PERMISSION_DESCRIPTION)
dashboard.DashboardHandler.add_external_permission(
ACCESS_TEACHER_DASHBOARD_PERMISSION, ACCESS_TEACHER_DASHBOARD_PERMISSION_DESCRIPTION)
#map permissions to actions
dashboard.DashboardHandler.map_action_to_permission('get_' + str(TeacherHandler.ACTION),
ACCESS_TEACHER_DASHBOARD_PERMISSION)
nav_mappings = dashboard.DashboardHandler.get_nav_mappings()
dashboard.DashboardHandler.map_action_to_permission('get_' + str(nav_mappings[1][0]), ACCESS_ASSETS_PERMISSION)
dashboard.DashboardHandler.map_action_to_permission('get_' + str(nav_mappings[2][0]), ACCESS_SETTINGS_PERMISSION)
dashboard.DashboardHandler.map_action_to_permission('get_' + str(nav_mappings[3][0]), ACCESS_ROLES_PERMISSION)
dashboard.DashboardHandler.map_action_to_permission('get_' + str(nav_mappings[4][0]), ACCESS_ANALYTICS_PERMISSION)
dashboard.DashboardHandler.map_action_to_permission('get_' + str(nav_mappings[5][0]), ACCESS_SEARCH_PERMISSION)
dashboard.DashboardHandler.map_action_to_permission('get_' + str(nav_mappings[6][0]), ACCESS_PEERREVIEW_PERMISSION)
dashboard.DashboardHandler.map_action_to_permission('get_' + str(nav_mappings[7][0]), ACCESS_SKILLMAP_PERMISSION)
dashboard.DashboardHandler.EXTRA_JS_HREF_LIST.append(
'/modules/teacher_dashboard/resources/js/popup.js')
dashboard.DashboardHandler.EXTRA_JS_HREF_LIST.append(
'/modules/teacher_dashboard/resources/js/course_section_analytics.js')
dashboard.DashboardHandler.EXTRA_JS_HREF_LIST.append(
'/modules/teacher_dashboard/resources/js/activity_score_manager.js')
dashboard.DashboardHandler.EXTRA_JS_HREF_LIST.append(
'/modules/teacher_dashboard/resources/js/student_list_table_manager')
dashboard.DashboardHandler.EXTRA_JS_HREF_LIST.append(
'/modules/teacher_dashboard/resources/js/student_list_table_rebuild_manager.js')
dashboard.DashboardHandler.EXTRA_JS_HREF_LIST.append(
'/modules/teacher_dashboard/resources/js/activity_score_table_manager.js')
dashboard.DashboardHandler.EXTRA_JS_HREF_LIST.append(
'/modules/teacher_dashboard/resources/js/student_score_manager.js')
dashboard.DashboardHandler.EXTRA_CSS_HREF_LIST.append(
'/modules/teacher_dashboard/resources/css/student_list.css')
transforms.CUSTOM_JSON_ENCODERS.append(teacher_entity.CourseSectionEntity.json_encoder)
#register tabs
TeacherHandler.register_tabs()
def register_module():
"""Registers this module in the registry."""
global_routes = [
(os.path.join(RESOURCES_PATH, 'js', '.*'), tags.JQueryHandler),
(os.path.join(RESOURCES_PATH, '.*'), tags.ResourcesHandler),
(RESOURCES_PATH + '/js/popup.js', tags.IifeHandler),
(RESOURCES_PATH + '/js/course_section_analytics.js', tags.IifeHandler),
(RESOURCES_PATH + '/js/activity_score_manager.js', tags.IifeHandler),
(RESOURCES_PATH + '/js/student_list_table_manager', tags.IifeHandler),
(RESOURCES_PATH + '/js/student_list_table_rebuild_manager.js', tags.IifeHandler),
(RESOURCES_PATH + '/js/activity_score_table_manager.js', tags.IifeHandler),
(RESOURCES_PATH + '/js/student_score_manager.js', tags.IifeHandler)
]
namespaced_routes = [
(TeacherHandler.URL, TeacherHandler),
(teacher_rest_handlers.CourseSectionRestHandler.URL, teacher_rest_handlers.CourseSectionRestHandler),
(teacher_rest_handlers.StudentProgressRestHandler.URL, teacher_rest_handlers.StudentProgressRestHandler),
(teacher_rest_handlers.ActivityScoreRestHandler.URL, teacher_rest_handlers.ActivityScoreRestHandler)
]
global custom_module # pylint: disable=global-statement
custom_module = custom_modules.Module(
'Teacher Dashboard Module',
'A module provide teacher workflow.',
global_routes, namespaced_routes,
notify_module_enabled=notify_module_enabled)
return custom_module
| 53 | 0 | 26 |
55c6ebbc315a030d0f8f7cc93fc5471567d72e3b | 36,442 | py | Python | python/bigipconfigdriver.py | ymaxgit/f5-bigip-ctrl | 51af36727025b18e6b4d54b2fbaefa77d73125e6 | [
"Apache-2.0"
] | null | null | null | python/bigipconfigdriver.py | ymaxgit/f5-bigip-ctrl | 51af36727025b18e6b4d54b2fbaefa77d73125e6 | [
"Apache-2.0"
] | null | null | null | python/bigipconfigdriver.py | ymaxgit/f5-bigip-ctrl | 51af36727025b18e6b4d54b2fbaefa77d73125e6 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright 2016, 2017 F5 Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import argparse
import base64
import fcntl
import hashlib
import ipaddress
import json
import logging
import os
import os.path
import sys
import time
import threading
import signal
import urllib
import pyinotify
from urlparse import urlparse
from f5_cccl._f5 import CloudBigIP, get_protocol, has_partition, log_sequence
from f5_cccl.common import extract_partition_and_name, ipv4_to_mac,\
list_diff_exclusive, IPV4FormatError, PartitionNameError
from f5.bigip import ManagementRoot
log = logging.getLogger(__name__)
console = logging.StreamHandler()
console.setFormatter(
logging.Formatter("[%(asctime)s %(name)s %(levelname)s] %(message)s"))
root_logger = logging.getLogger()
root_logger.addHandler(console)
root_logger.addFilter(ResponseStatusFilter())
root_logger.addFilter(CertFilter())
root_logger.addFilter(KeyFilter())
DEFAULT_LOG_LEVEL = logging.INFO
DEFAULT_VERIFY_INTERVAL = 30.0
class K8sCloudBigIP(CloudBigIP):
"""K8sCloudBigIP class.
Generates a configuration for a BigIP based upon the apps/tasks managed
by services/pods/nodes in Kubernetes.
- Matches apps/sevices by BigIP partition
- Creates a Virtual Server and pool for each service type that matches a
BigIP partition
- For each backend (task, node, or pod), it creates a pool member and adds
the member to the pool
- If the app has a Marathon Health Monitor configured, create a
corresponding health monitor for the BigIP pool member
- Token-based authentication is used by specifying a token named 'tmos'.
This will allow non-admin users to use the API (BIG-IP must configure
the accounts with proper permissions, for either local or remote auth).
Args:
hostname: IP address of BIG-IP
username: BIG-IP username
password: BIG-IP password
partitions: List of BIG-IP partitions to manage
"""
def __init__(self, hostname, port, username, password, partitions,
manage_types):
"""Initialize the K8sCloudBigIP object."""
super(K8sCloudBigIP, self).__init__(hostname, port, username,
password, partitions,
token="tmos",
manage_types=manage_types)
def _apply_config(self, config):
"""Apply the configuration to the BIG-IP.
Args:
config: BIG-IP config dict
"""
if 'ltm' in config:
CloudBigIP._apply_config(self, config['ltm'])
if 'network' in config:
self._apply_network_config(config['network'])
def _apply_network_config(self, config):
"""Apply the network configuration to the BIG-IP.
Args:
config: BIG-IP network config dict
"""
if 'fdb' in config:
self._apply_network_fdb_config(config['fdb'])
def _apply_network_fdb_config(self, fdb_config):
"""Apply the network fdb configuration to the BIG-IP.
Args:
config: BIG-IP network fdb config dict
"""
req_vxlan_name = fdb_config['vxlan-name']
req_fdb_record_endpoint_list = fdb_config['vxlan-node-ips']
try:
f5_fdb_record_endpoint_list = self.get_fdb_records(req_vxlan_name)
log_sequence('req_fdb_record_list', req_fdb_record_endpoint_list)
log_sequence('f5_fdb_record_list', f5_fdb_record_endpoint_list)
# See if the list of records is different.
# If so, update with new list.
if list_diff_exclusive(f5_fdb_record_endpoint_list,
req_fdb_record_endpoint_list):
self.fdb_records_update(req_vxlan_name,
req_fdb_record_endpoint_list)
except (PartitionNameError, IPV4FormatError) as e:
log.error(e)
return
except Exception as e:
log.error('Failed to configure the FDB for VxLAN tunnel '
'{}: {}'.format(req_vxlan_name, e))
def get_vxlan_tunnel(self, vxlan_name):
"""Get a vxlan tunnel object.
Args:
vxlan_name: Name of the vxlan tunnel
"""
partition, name = extract_partition_and_name(vxlan_name)
vxlan_tunnel = self.net.fdb.tunnels.tunnel.load(
partition=partition, name=urllib.quote(name))
return vxlan_tunnel
def get_fdb_records(self, vxlan_name):
"""Get a list of FDB records (just the endpoint list) for the vxlan.
Args:
vxlan_name: Name of the vxlan tunnel
"""
endpoint_list = []
vxlan_tunnel = self.get_vxlan_tunnel(vxlan_name)
if hasattr(vxlan_tunnel, 'records'):
for record in vxlan_tunnel.records:
endpoint_list.append(record['endpoint'])
return endpoint_list
def fdb_records_update(self, vxlan_name, endpoint_list):
"""Update the fdb records for a vxlan tunnel.
Args:
vxlan_name: Name of the vxlan tunnel
fdb_record_list: IP address associated with the fdb record
"""
vxlan_tunnel = self.get_vxlan_tunnel(vxlan_name)
data = {'records': []}
records = data['records']
for endpoint in endpoint_list:
record = {'name': ipv4_to_mac(endpoint), 'endpoint': endpoint}
records.append(record)
log.debug("Updating records for vxlan tunnel {}: {}".format(
vxlan_name, data['records']))
vxlan_tunnel.update(**data)
def create_config_kubernetes(bigip, config):
"""Create a BIG-IP configuration from the Kubernetes configuration.
Args:
config: Kubernetes BigIP config
"""
log.debug("Generating config for BIG-IP from Kubernetes state")
f5 = {'ltm': {}, 'network': {}}
if 'openshift-sdn' in config:
f5['network'] = create_network_config_kubernetes(config)
if 'resources' in config and 'virtualServers' in config['resources']:
f5['ltm'] = create_ltm_config_kubernetes(bigip, config['resources'])
return f5
def create_network_config_kubernetes(config):
"""Create a BIG-IP Network configuration from the Kubernetes config.
Args:
config: Kubernetes BigIP config which contains openshift-sdn defs
"""
f5_network = {}
if 'openshift-sdn' in config:
openshift_sdn = config['openshift-sdn']
f5_network['fdb'] = openshift_sdn
return f5_network
def create_ltm_config_kubernetes(bigip, config):
"""Create a BIG-IP LTM configuration from the Kubernetes configuration.
Args:
config: Kubernetes BigIP config which contains a svc list
"""
configuration = {}
configuration['l7Policies'] = config.get('l7Policies', [])
configuration['monitors'] = config.get('monitors', [])
configuration['pools'] = []
f5_pools = config.get('pools', [])
f5_services = {}
# partitions this script is responsible for:
partitions = frozenset(bigip.get_partitions())
svcs = config['virtualServers']
for svc in svcs:
vs_partition = svc['partition']
# Only handle application if it's partition is one that this script
# is responsible for
if not has_partition(partitions, vs_partition):
continue
f5_service = {}
vs_name = svc['name']
f5_service['balance'] = svc.get('balance', '')
policies = svc.get('policies', [])
profiles = svc.get('profiles', [])
pool = {}
# No address for this port
if (('virtualAddress' not in svc or
'bindAddr' not in svc['virtualAddress']) and
'iapp' not in svc):
log.debug("Creating pool only for %s", vs_name)
elif ('iapp' not in svc and 'bindAddr' not in
svc['virtualAddress']):
continue
f5_service['name'] = vs_name
f5_service['partition'] = vs_partition
if 'iapp' in svc:
f5_service['iapp'] = {'template': svc['iapp'],
'poolMemberTable':
svc['iappPoolMemberTable'],
'variables': svc['iappVariables'],
'options': svc['iappOptions']}
f5_service['iapp']['tables'] = svc.get('iappTables', {})
else:
f5_service['virtual'] = {}
f5_service['pool'] = {}
f5_service['health'] = []
# Parse the SSL profile into partition and name
if 'sslProfile' in svc:
# The sslProfile item can be empty or have either
# 'f5ProfileName' or 'f5ProfileNames', not both.
if 'f5ProfileName' in svc['sslProfile']:
append_ssl_profile(
profiles, svc['sslProfile']['f5ProfileName'])
elif 'f5ProfileNames' in svc['sslProfile']:
for profName in svc['sslProfile']['f5ProfileNames']:
append_ssl_profile(profiles, profName)
# Add appropriate profiles
profile_http = {'partition': 'Common', 'name': 'http'}
profile_tcp = {'partition': 'Common', 'name': 'tcp'}
if str(svc['mode']).lower() == 'http':
if profile_http not in profiles:
profiles.append(profile_http)
elif get_protocol(svc['mode']) == 'tcp':
if profile_tcp not in profiles:
profiles.append(profile_tcp)
if ('virtualAddress' in svc and
'bindAddr' in svc['virtualAddress']):
f5_service['virtual_address'] = \
svc['virtualAddress']['bindAddr']
addr = svc['virtualAddress']['bindAddr']
port = svc['virtualAddress']['port']
destination = None
if isinstance(ipaddress.ip_address(addr),
ipaddress.IPv6Address):
destination = ("/%s/%s.%d" %
(vs_partition, addr, port))
else:
destination = ("/%s/%s:%d" %
(vs_partition, addr, port))
f5_service['virtual'].update({
'enabled': True,
'disabled': False,
'ipProtocol': get_protocol(svc['mode']),
'destination': destination,
'pool': "%s" % (svc['pool']),
'sourceAddressTranslation': {'type': 'automap'},
'profiles': profiles,
'policies': policies
})
f5_services.update({vs_name: f5_service})
configuration['virtualServers'] = f5_services
# FIXME(garyr): CCCL presently expects pools slightly differently than
# we get from the controller, so convert to the expected format here.
for pool in f5_pools:
found_svc = False
new_pool = {}
members = {}
pname = pool['name']
new_pool['name'] = pname
monitors = None
if 'monitor' in pool and pool['monitor']:
monitors = ' and '.join(pool['monitor'])
new_pool['monitor'] = monitors
balance = None
vname = pname.rsplit('_', 1)[0]
if pname in f5_services:
if 'balance' in f5_services[pname]:
balance = f5_services[pname]['balance']
elif vname in f5_services:
if 'balance' in f5_services[vname]:
balance = f5_services[vname]['balance']
new_pool['loadBalancingMode'] = balance
new_pool['partition'] = pool['partition']
if pool['name'] in f5_services or vname in f5_services:
if pool['poolMemberAddrs'] is not None:
found_svc = True
for member in pool['poolMemberAddrs']:
members.update({member: {
'state': 'user-up',
'session': 'user-enabled'
}})
new_pool['members'] = members
configuration['pools'].append(new_pool)
if not found_svc:
log.info(
'Pool "{}" has service "{}", which is empty - '
'configuring 0 pool members.'.format(
pname, pool['serviceName']))
return configuration
if __name__ == "__main__":
main()
| 36.661972 | 79 | 0.562455 | #!/usr/bin/env python
# Copyright 2016, 2017 F5 Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import argparse
import base64
import fcntl
import hashlib
import ipaddress
import json
import logging
import os
import os.path
import sys
import time
import threading
import signal
import urllib
import pyinotify
from urlparse import urlparse
from f5_cccl._f5 import CloudBigIP, get_protocol, has_partition, log_sequence
from f5_cccl.common import extract_partition_and_name, ipv4_to_mac,\
list_diff_exclusive, IPV4FormatError, PartitionNameError
from f5.bigip import ManagementRoot
log = logging.getLogger(__name__)
console = logging.StreamHandler()
console.setFormatter(
logging.Formatter("[%(asctime)s %(name)s %(levelname)s] %(message)s"))
root_logger = logging.getLogger()
root_logger.addHandler(console)
class ResponseStatusFilter(logging.Filter):
def filter(self, record):
return not record.getMessage().startswith("RESPONSE::STATUS")
class CertFilter(logging.Filter):
def filter(self, record):
return "CERTIFICATE" not in record.getMessage()
class KeyFilter(logging.Filter):
def filter(self, record):
return "PRIVATE KEY" not in record.getMessage()
root_logger.addFilter(ResponseStatusFilter())
root_logger.addFilter(CertFilter())
root_logger.addFilter(KeyFilter())
DEFAULT_LOG_LEVEL = logging.INFO
DEFAULT_VERIFY_INTERVAL = 30.0
class K8sCloudBigIP(CloudBigIP):
"""K8sCloudBigIP class.
Generates a configuration for a BigIP based upon the apps/tasks managed
by services/pods/nodes in Kubernetes.
- Matches apps/sevices by BigIP partition
- Creates a Virtual Server and pool for each service type that matches a
BigIP partition
- For each backend (task, node, or pod), it creates a pool member and adds
the member to the pool
- If the app has a Marathon Health Monitor configured, create a
corresponding health monitor for the BigIP pool member
- Token-based authentication is used by specifying a token named 'tmos'.
This will allow non-admin users to use the API (BIG-IP must configure
the accounts with proper permissions, for either local or remote auth).
Args:
hostname: IP address of BIG-IP
username: BIG-IP username
password: BIG-IP password
partitions: List of BIG-IP partitions to manage
"""
def __init__(self, hostname, port, username, password, partitions,
manage_types):
"""Initialize the K8sCloudBigIP object."""
super(K8sCloudBigIP, self).__init__(hostname, port, username,
password, partitions,
token="tmos",
manage_types=manage_types)
def _apply_config(self, config):
"""Apply the configuration to the BIG-IP.
Args:
config: BIG-IP config dict
"""
if 'ltm' in config:
CloudBigIP._apply_config(self, config['ltm'])
if 'network' in config:
self._apply_network_config(config['network'])
def _apply_network_config(self, config):
"""Apply the network configuration to the BIG-IP.
Args:
config: BIG-IP network config dict
"""
if 'fdb' in config:
self._apply_network_fdb_config(config['fdb'])
def _apply_network_fdb_config(self, fdb_config):
"""Apply the network fdb configuration to the BIG-IP.
Args:
config: BIG-IP network fdb config dict
"""
req_vxlan_name = fdb_config['vxlan-name']
req_fdb_record_endpoint_list = fdb_config['vxlan-node-ips']
try:
f5_fdb_record_endpoint_list = self.get_fdb_records(req_vxlan_name)
log_sequence('req_fdb_record_list', req_fdb_record_endpoint_list)
log_sequence('f5_fdb_record_list', f5_fdb_record_endpoint_list)
# See if the list of records is different.
# If so, update with new list.
if list_diff_exclusive(f5_fdb_record_endpoint_list,
req_fdb_record_endpoint_list):
self.fdb_records_update(req_vxlan_name,
req_fdb_record_endpoint_list)
except (PartitionNameError, IPV4FormatError) as e:
log.error(e)
return
except Exception as e:
log.error('Failed to configure the FDB for VxLAN tunnel '
'{}: {}'.format(req_vxlan_name, e))
def get_vxlan_tunnel(self, vxlan_name):
"""Get a vxlan tunnel object.
Args:
vxlan_name: Name of the vxlan tunnel
"""
partition, name = extract_partition_and_name(vxlan_name)
vxlan_tunnel = self.net.fdb.tunnels.tunnel.load(
partition=partition, name=urllib.quote(name))
return vxlan_tunnel
def get_fdb_records(self, vxlan_name):
"""Get a list of FDB records (just the endpoint list) for the vxlan.
Args:
vxlan_name: Name of the vxlan tunnel
"""
endpoint_list = []
vxlan_tunnel = self.get_vxlan_tunnel(vxlan_name)
if hasattr(vxlan_tunnel, 'records'):
for record in vxlan_tunnel.records:
endpoint_list.append(record['endpoint'])
return endpoint_list
def fdb_records_update(self, vxlan_name, endpoint_list):
"""Update the fdb records for a vxlan tunnel.
Args:
vxlan_name: Name of the vxlan tunnel
fdb_record_list: IP address associated with the fdb record
"""
vxlan_tunnel = self.get_vxlan_tunnel(vxlan_name)
data = {'records': []}
records = data['records']
for endpoint in endpoint_list:
record = {'name': ipv4_to_mac(endpoint), 'endpoint': endpoint}
records.append(record)
log.debug("Updating records for vxlan tunnel {}: {}".format(
vxlan_name, data['records']))
vxlan_tunnel.update(**data)
class IntervalTimerError(Exception):
def __init__(self, msg):
Exception.__init__(self, msg)
class IntervalTimer(object):
def __init__(self, interval, cb):
float(interval)
if 0 >= interval:
raise IntervalTimerError("interval must be greater than 0")
if not cb or not callable(cb):
raise IntervalTimerError("cb must be callable object")
self._cb = cb
self._interval = interval
self._execution_time = 0.0
self._running = False
self._timer = None
self._lock = threading.RLock()
def _set_execution_time(self, start_time, stop_time):
if stop_time >= start_time:
self._execution_time = stop_time - start_time
else:
self._execution_time = 0.0
def _adjust_interval(self):
adjusted_interval = self._interval - self._execution_time
if adjusted_interval < 0.0:
adjusted_interval = 0.0
self._execution_time = 0.0
return adjusted_interval
def _run(self):
start_time = time.clock()
try:
self._cb()
except Exception:
log.exception('Unexpected error')
finally:
with self._lock:
stop_time = time.clock()
self._set_execution_time(start_time, stop_time)
if self._running:
self.start()
def is_running(self):
return self._running
def start(self):
with self._lock:
if self._running:
# restart timer, possibly with a new interval
self.stop()
self._timer = threading.Timer(self._adjust_interval(), self._run)
# timers can't be stopped, cancel just prevents the callback from
# occuring when the timer finally expires. Make it a daemon allows
# cancelled timers to exit eventually without a need for join.
self._timer.daemon = True
self._timer.start()
self._running = True
def stop(self):
with self._lock:
if self._running:
self._timer.cancel()
self._timer = None
self._running = False
class ConfigError(Exception):
def __init__(self, msg):
Exception.__init__(self, msg)
def create_config_kubernetes(bigip, config):
"""Create a BIG-IP configuration from the Kubernetes configuration.
Args:
config: Kubernetes BigIP config
"""
log.debug("Generating config for BIG-IP from Kubernetes state")
f5 = {'ltm': {}, 'network': {}}
if 'openshift-sdn' in config:
f5['network'] = create_network_config_kubernetes(config)
if 'resources' in config and 'virtualServers' in config['resources']:
f5['ltm'] = create_ltm_config_kubernetes(bigip, config['resources'])
return f5
def create_network_config_kubernetes(config):
"""Create a BIG-IP Network configuration from the Kubernetes config.
Args:
config: Kubernetes BigIP config which contains openshift-sdn defs
"""
f5_network = {}
if 'openshift-sdn' in config:
openshift_sdn = config['openshift-sdn']
f5_network['fdb'] = openshift_sdn
return f5_network
def append_ssl_profile(profiles, profName):
profile = (profName.split('/'))
if len(profile) != 2:
log.error("Could not parse partition and name "
"from SSL profile: %s", profName)
else:
profiles.append({'partition': profile[0],
'name': profile[1]})
def create_ltm_config_kubernetes(bigip, config):
"""Create a BIG-IP LTM configuration from the Kubernetes configuration.
Args:
config: Kubernetes BigIP config which contains a svc list
"""
configuration = {}
configuration['l7Policies'] = config.get('l7Policies', [])
configuration['monitors'] = config.get('monitors', [])
configuration['pools'] = []
f5_pools = config.get('pools', [])
f5_services = {}
# partitions this script is responsible for:
partitions = frozenset(bigip.get_partitions())
svcs = config['virtualServers']
for svc in svcs:
vs_partition = svc['partition']
# Only handle application if it's partition is one that this script
# is responsible for
if not has_partition(partitions, vs_partition):
continue
f5_service = {}
vs_name = svc['name']
f5_service['balance'] = svc.get('balance', '')
policies = svc.get('policies', [])
profiles = svc.get('profiles', [])
pool = {}
# No address for this port
if (('virtualAddress' not in svc or
'bindAddr' not in svc['virtualAddress']) and
'iapp' not in svc):
log.debug("Creating pool only for %s", vs_name)
elif ('iapp' not in svc and 'bindAddr' not in
svc['virtualAddress']):
continue
f5_service['name'] = vs_name
f5_service['partition'] = vs_partition
if 'iapp' in svc:
f5_service['iapp'] = {'template': svc['iapp'],
'poolMemberTable':
svc['iappPoolMemberTable'],
'variables': svc['iappVariables'],
'options': svc['iappOptions']}
f5_service['iapp']['tables'] = svc.get('iappTables', {})
else:
f5_service['virtual'] = {}
f5_service['pool'] = {}
f5_service['health'] = []
# Parse the SSL profile into partition and name
if 'sslProfile' in svc:
# The sslProfile item can be empty or have either
# 'f5ProfileName' or 'f5ProfileNames', not both.
if 'f5ProfileName' in svc['sslProfile']:
append_ssl_profile(
profiles, svc['sslProfile']['f5ProfileName'])
elif 'f5ProfileNames' in svc['sslProfile']:
for profName in svc['sslProfile']['f5ProfileNames']:
append_ssl_profile(profiles, profName)
# Add appropriate profiles
profile_http = {'partition': 'Common', 'name': 'http'}
profile_tcp = {'partition': 'Common', 'name': 'tcp'}
if str(svc['mode']).lower() == 'http':
if profile_http not in profiles:
profiles.append(profile_http)
elif get_protocol(svc['mode']) == 'tcp':
if profile_tcp not in profiles:
profiles.append(profile_tcp)
if ('virtualAddress' in svc and
'bindAddr' in svc['virtualAddress']):
f5_service['virtual_address'] = \
svc['virtualAddress']['bindAddr']
addr = svc['virtualAddress']['bindAddr']
port = svc['virtualAddress']['port']
destination = None
if isinstance(ipaddress.ip_address(addr),
ipaddress.IPv6Address):
destination = ("/%s/%s.%d" %
(vs_partition, addr, port))
else:
destination = ("/%s/%s:%d" %
(vs_partition, addr, port))
f5_service['virtual'].update({
'enabled': True,
'disabled': False,
'ipProtocol': get_protocol(svc['mode']),
'destination': destination,
'pool': "%s" % (svc['pool']),
'sourceAddressTranslation': {'type': 'automap'},
'profiles': profiles,
'policies': policies
})
f5_services.update({vs_name: f5_service})
configuration['virtualServers'] = f5_services
# FIXME(garyr): CCCL presently expects pools slightly differently than
# we get from the controller, so convert to the expected format here.
for pool in f5_pools:
found_svc = False
new_pool = {}
members = {}
pname = pool['name']
new_pool['name'] = pname
monitors = None
if 'monitor' in pool and pool['monitor']:
monitors = ' and '.join(pool['monitor'])
new_pool['monitor'] = monitors
balance = None
vname = pname.rsplit('_', 1)[0]
if pname in f5_services:
if 'balance' in f5_services[pname]:
balance = f5_services[pname]['balance']
elif vname in f5_services:
if 'balance' in f5_services[vname]:
balance = f5_services[vname]['balance']
new_pool['loadBalancingMode'] = balance
new_pool['partition'] = pool['partition']
if pool['name'] in f5_services or vname in f5_services:
if pool['poolMemberAddrs'] is not None:
found_svc = True
for member in pool['poolMemberAddrs']:
members.update({member: {
'state': 'user-up',
'session': 'user-enabled'
}})
new_pool['members'] = members
configuration['pools'].append(new_pool)
if not found_svc:
log.info(
'Pool "{}" has service "{}", which is empty - '
'configuring 0 pool members.'.format(
pname, pool['serviceName']))
return configuration
def _create_client_ssl_profile(bigip, profile):
# bigip object is of type f5.bigip.tm;
# we need f5.bigip.shared for the uploader
mgmt = ManagementRoot(bigip.hostname, bigip._username, bigip._password)
uploader = mgmt.shared.file_transfer.uploads
cert_registrar = bigip.sys.crypto.certs
key_registrar = bigip.sys.crypto.keys
ssl_client_profile = bigip.ltm.profile.client_ssls.client_ssl
name = profile['name']
partition = profile['partition']
cert = base64.b64decode(profile['cert'])
key = base64.b64decode(profile['key'])
# No need to create if it exists
if ssl_client_profile.exists(name=name, partition=partition):
return
certfilename = name + '.crt'
keyfilename = name + '.key'
try:
# In-memory upload -- data not written to local file system but
# is saved as a file on the BIG-IP
uploader.upload_bytes(cert, certfilename)
uploader.upload_bytes(key, keyfilename)
# import certificate
param_set = {}
param_set['name'] = certfilename
param_set['from-local-file'] = os.path.join(
'/var/config/rest/downloads', certfilename)
cert_registrar.exec_cmd('install', **param_set)
# import key
param_set['name'] = keyfilename
param_set['from-local-file'] = os.path.join(
'/var/config/rest/downloads', keyfilename)
key_registrar.exec_cmd('install', **param_set)
# create ssl-client profile from cert/key pair
chain = [{'name': name,
'cert': '/Common/' + certfilename,
'key': '/Common/' + keyfilename}]
ssl_client_profile.create(name=name,
partition=partition,
certKeyChain=chain,
sniDefault=False,
defaultsFrom=None)
except Exception as err:
log.error("Error creating SSL profile: %s" % err.message)
def _delete_client_ssl_profiles(bigip, config):
if 'customProfiles' not in config:
# delete any profiles in managed partitions
for partition in bigip._partitions:
for prof in bigip.ltm.profile.client_ssls.get_collection(
requests_params={'params': '$filter=partition+eq+%s'
% partition}):
prof.delete()
else:
# delete profiles no longer in our config
for partition in bigip._partitions:
for prof in bigip.ltm.profile.client_ssls.get_collection(
requests_params={'params': '$filter=partition+eq+%s'
% partition}):
if not any(d['name'] == prof.name and
d['partition'] == partition
for d in config['customProfiles']):
prof.delete()
class ConfigHandler():
def __init__(self, config_file, bigip, verify_interval):
self._config_file = config_file
self._bigip = bigip
self._condition = threading.Condition()
self._thread = threading.Thread(target=self._do_reset)
self._pending_reset = False
self._stop = False
self._backoff_time = 1
self._backoff_timer = None
self._max_backoff_time = 128
self._interval = None
self._verify_interval = 0
self.set_interval_timer(verify_interval)
self._thread.start()
def set_interval_timer(self, verify_interval):
if verify_interval != self._verify_interval:
if self._interval is not None:
self._interval.stop()
self._interval = None
self._verify_interval = verify_interval
if self._verify_interval > 0:
self._interval = IntervalTimer(self._verify_interval,
self.notify_reset)
def stop(self):
self._condition.acquire()
self._stop = True
self._condition.notify()
self._condition.release()
if self._backoff_timer is not None:
self.cleanup_backoff()
def notify_reset(self):
self._condition.acquire()
self._pending_reset = True
self._condition.notify()
self._condition.release()
def _do_reset(self):
log.debug('config handler thread start')
with self._condition:
# customProfiles is true when we've written out a custom profile.
# Once we know we've written out a profile, we can call delete
# if needed.
customProfiles = False
while True:
self._condition.acquire()
if not self._pending_reset and not self._stop:
self._condition.wait()
log.debug('config handler woken for reset')
self._pending_reset = False
self._condition.release()
if self._stop:
log.info('stopping config handler')
if self._backoff_timer is not None:
self.cleanup_backoff()
break
try:
start_time = time.time()
config = _parse_config(self._config_file)
if 'resources' not in config:
continue
verify_interval, _ = _handle_global_config(config)
_handle_openshift_sdn_config(config)
self.set_interval_timer(verify_interval)
# Manually create custom profiles;CCCL doesn't yet do this
if 'customProfiles' in config['resources']:
for profile in config['resources']['customProfiles']:
_create_client_ssl_profile(self._bigip, profile)
customProfiles = True
cfg = create_config_kubernetes(self._bigip, config)
if self._bigip.regenerate_config_f5(cfg):
# Error occurred, perform retries
log.warning(
'regenerate operation failed, restarting')
self.handle_backoff()
else:
if (self._interval and self._interval.is_running() is
False):
self._interval.start()
self._backoff_time = 1
if self._backoff_timer is not None:
self.cleanup_backoff()
# Manually delete custom profiles (if needed)
if customProfiles:
_delete_client_ssl_profiles(self._bigip,
config['resources'])
perf_enable = os.environ.get('SCALE_PERF_ENABLE')
if perf_enable: # pragma: no cover
test_data = {}
app_count = 0
backend_count = 0
for service in config['resources'][
'virtualServers']:
app_count += 1
backends = 0
for pool in config['resources']['pools']:
if pool['name'] == service['name']:
backends = len(pool['poolMemberAddrs'])
break
test_data[service['name']] = backends
backend_count += backends
test_data['Total_Services'] = app_count
test_data['Total_Backends'] = backend_count
test_data['Time'] = time.time()
json_data = json.dumps(test_data)
log.info('SCALE_PERF: Test data: %s', json_data)
log.debug('updating tasks finished, took %s seconds',
time.time() - start_time)
except Exception:
log.exception('Unexpected error')
self.handle_backoff()
if self._interval:
self._interval.stop()
def cleanup_backoff(self):
"""Cleans up canceled backoff timers."""
self._backoff_timer.cancel()
self._backoff_timer.join()
self._backoff_timer = None
def handle_backoff(self):
"""Wrapper for calls to retry_backoff."""
if (self._interval and self._interval.is_running() is
True):
self._interval.stop()
if self._backoff_timer is None:
self.retry_backoff()
def retry_backoff(self):
"""Add a backoff timer to retry in case of failure."""
def timer_cb():
self._backoff_timer = None
self.notify_reset()
self._backoff_timer = threading.Timer(
self._backoff_time, timer_cb
)
log.error("Error applying config, will try again in %s seconds",
self._backoff_time)
self._backoff_timer.start()
if self._backoff_time < self._max_backoff_time:
self._backoff_time *= 2
class ConfigWatcher(pyinotify.ProcessEvent):
def __init__(self, config_file, bigip, on_change):
basename = os.path.basename(config_file)
if not basename or 0 == len(basename):
raise ConfigError('config_file must be a file path')
self._config_file = config_file
self._bigip = bigip
self._on_change = on_change
self._config_dir = os.path.dirname(self._config_file)
self._config_stats = None
if os.path.exists(self._config_file):
try:
self._config_stats = self._md5()
except IOError as ioe:
log.warning('ioerror during md5 sum calculation: {}'.
format(ioe))
self._running = False
self._polling = False
self._user_abort = False
signal.signal(signal.SIGINT, self._exit_gracefully)
signal.signal(signal.SIGTERM, self._exit_gracefully)
def _exit_gracefully(self, signum, frame):
self._user_abort = True
self._running = False
def _loop_check(self, notifier):
if self._polling:
log.debug('inotify loop ended - returning to polling mode')
return True
else:
return False
def loop(self):
self._running = True
if not os.path.exists(self._config_dir):
log.info(
'configured directory doesn\'t exist {}, entering poll loop'.
format(self._config_dir))
self._polling = True
while self._running:
try:
while self._polling:
if self._polling:
if os.path.exists(self._config_dir):
log.debug('found watchable directory - {}'.format(
self._config_dir))
self._polling = False
break
else:
log.debug('waiting for watchable directory - {}'.
format(self._config_dir))
time.sleep(1)
_wm = pyinotify.WatchManager()
_notifier = pyinotify.Notifier(_wm, default_proc_fun=self)
_notifier.coalesce_events(True)
mask = (pyinotify.IN_CREATE | pyinotify.IN_DELETE |
pyinotify.IN_MOVED_FROM | pyinotify.IN_MOVED_TO |
pyinotify.IN_CLOSE_WRITE | pyinotify.IN_MOVE_SELF |
pyinotify.IN_DELETE_SELF)
_wm.add_watch(
path=self._config_dir,
mask=mask,
quiet=False,
exclude_filter=lambda path: False)
log.info('entering inotify loop to watch {}'.format(
self._config_file))
_notifier.loop(callback=self._loop_check)
if (not self._polling and _notifier._fd is None):
log.info('terminating')
self._running = False
except Exception as e:
log.warning(e)
if self._user_abort:
log.info('Received user kill signal, terminating.')
def _md5(self):
md5 = hashlib.md5()
with open(self._config_file, 'rb') as f:
fcntl.lockf(f.fileno(), fcntl.LOCK_SH, 0, 0, 0)
while True:
buf = f.read(4096)
if not buf:
break
md5.update(buf)
fcntl.lockf(f.fileno(), fcntl.LOCK_UN, 0, 0, 0)
return md5.digest()
def _should_watch(self, pathname):
if pathname == self._config_file:
return True
return False
def _is_changed(self):
changed = False
cur_hash = None
if not os.path.exists(self._config_file):
if cur_hash != self._config_stats:
changed = True
else:
changed = False
else:
try:
cur_hash = self._md5()
if cur_hash != self._config_stats:
changed = True
else:
changed = False
except IOError as ioe:
log.warning('ioerror during md5 sum calculation: {}'.
format(ioe))
return (changed, cur_hash)
def process_default(self, event):
if (pyinotify.IN_DELETE_SELF == event.mask or
pyinotify.IN_MOVE_SELF == event.mask):
log.warn(
'watchpoint {} has been moved or destroyed, using poll loop'.
format(self._config_dir))
self._polling = True
if self._config_stats is not None:
log.debug('config file {} changed, parent gone'.format(
self._config_file))
self._config_stats = None
self._on_change()
if self._should_watch(event.pathname):
(changed, md5) = self._is_changed()
if changed:
log.debug('config file {0} changed - signalling bigip'.format(
self._config_file, self._config_stats, md5))
self._config_stats = md5
self._on_change()
def _parse_config(config_file):
if os.path.exists(config_file):
with open(config_file, 'r') as config:
fcntl.lockf(config.fileno(), fcntl.LOCK_SH, 0, 0, 0)
config_json = json.load(config)
fcntl.lockf(config.fileno(), fcntl.LOCK_UN, 0, 0, 0)
log.debug('loaded configuration file successfully')
return config_json
else:
return None
def _handle_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'--config-file',
type=str,
required=True,
help='BigIp configuration file')
args = parser.parse_args()
basename = os.path.basename(args.config_file)
if not basename or 0 == len(basename):
raise ConfigError('must provide a file path')
args.config_file = os.path.realpath(args.config_file)
return args
def _handle_global_config(config):
level = DEFAULT_LOG_LEVEL
verify_interval = DEFAULT_VERIFY_INTERVAL
if config and 'global' in config:
global_cfg = config['global']
if 'log-level' in global_cfg:
log_level = global_cfg['log-level']
try:
level = logging.getLevelName(log_level.upper())
except (AttributeError):
log.warn('The "global:log-level" field in the configuration '
'file should be a string')
if 'verify-interval' in global_cfg:
try:
verify_interval = float(global_cfg['verify-interval'])
if verify_interval < 0:
verify_interval = DEFAULT_VERIFY_INTERVAL
log.warn('The "global:verify-interval" field in the '
'configuration file should be a non-negative '
'number')
except (ValueError):
log.warn('The "global:verify-interval" field in the '
'configuration file should be a number')
try:
root_logger.setLevel(level)
if level > logging.DEBUG:
logging.getLogger('requests.packages.urllib3.'
'connectionpool').setLevel(logging.WARNING)
except:
level = DEFAULT_LOG_LEVEL
root_logger.setLevel(level)
if level > logging.DEBUG:
logging.getLogger('requests.packages.urllib3.'
'connectionpool').setLevel(logging.WARNING)
log.warn('Undefined value specified for the '
'"global:log-level" field in the configuration file')
# level only is needed for unit tests
return verify_interval, level
def _handle_bigip_config(config):
if (not config) or ('bigip' not in config):
raise ConfigError('Configuration file missing "bigip" section')
bigip = config['bigip']
if 'username' not in bigip:
raise ConfigError('Configuration file missing '
'"bigip:username" section')
if 'password' not in bigip:
raise ConfigError('Configuration file missing '
'"bigip:password" section')
if 'url' not in bigip:
raise ConfigError('Configuration file missing "bigip:url" section')
if ('partitions' not in bigip) or (len(bigip['partitions']) == 0):
raise ConfigError('Configuration file must specify at least one '
'partition in the "bigip:partitions" section')
url = urlparse(bigip['url'])
host = url.hostname
port = url.port
if not port:
port = 443
return host, port
def _handle_openshift_sdn_config(config):
if config and 'openshift-sdn' in config:
sdn = config['openshift-sdn']
if 'vxlan-name' not in sdn:
raise ConfigError('Configuration file missing '
'"openshift-sdn:vxlan-name" section')
if 'vxlan-node-ips' not in sdn:
raise ConfigError('Configuration file missing '
'"openshift-sdn:vxlan-node-ips" section')
def main():
try:
args = _handle_args()
config = _parse_config(args.config_file)
verify_interval, _ = _handle_global_config(config)
host, port = _handle_bigip_config(config)
# FIXME (kenr): Big-IP settings are currently static (we ignore any
# changes to these fields in subsequent updates). We
# may want to make the changes dynamic in the future.
bigip = K8sCloudBigIP(host, port,
config['bigip']['username'],
config['bigip']['password'],
config['bigip']['partitions'],
manage_types=[
'/tm/ltm/virtual',
'/tm/ltm/pool',
'/tm/ltm/monitor',
'/tm/sys/application/service',
'/tm/ltm/policy'])
handler = ConfigHandler(args.config_file, bigip, verify_interval)
if os.path.exists(args.config_file):
handler.notify_reset()
watcher = ConfigWatcher(args.config_file, bigip, handler.notify_reset)
watcher.loop()
handler.stop()
except (IOError, ValueError, ConfigError) as e:
log.error(e)
sys.exit(1)
except Exception:
log.exception('Unexpected error')
sys.exit(1)
return 0
if __name__ == "__main__":
main()
| 21,195 | 1,155 | 924 |
bc89cfbc8e32ea0a9edc5ecb60c9f0aad0a8306a | 7,098 | py | Python | Scripts/SeaIce/NSIDCseaice_quartiles.py | dargueso/IceVarFigs | 6c76cdde6f51cecf73beaaca83c7a5db51864bfd | [
"MIT"
] | 1 | 2019-02-02T06:12:01.000Z | 2019-02-02T06:12:01.000Z | Scripts/SeaIce/NSIDCseaice_quartiles.py | jq222/IceVarFigs | d9b1bb3ac09a9dfd097e72b0dba78276b7e251e4 | [
"MIT"
] | null | null | null | Scripts/SeaIce/NSIDCseaice_quartiles.py | jq222/IceVarFigs | d9b1bb3ac09a9dfd097e72b0dba78276b7e251e4 | [
"MIT"
] | null | null | null | """
Reads in current year's Arctic sea ice extent from Sea Ice Index 3 (NSIDC)
Website : ftp://sidads.colorado.edu/DATASETS/NOAA/G02135/north/daily/data/
Author : Zachary M. Labe
Date : 5 September 2016
"""
### Import modules
import numpy as np
import urllib as UL
import datetime
import matplotlib.pyplot as plt
### Directory and time
directoryfigure = '/home/zlabe/Documents/Projects/IceVarFigs/Figures/'
now = datetime.datetime.now()
currentmn = str(now.month)
currentdy = str(now.day)
currentyr = str(now.year)
currenttime = currentmn + '_' + currentdy + '_' + currentyr
currentdoy = now.timetuple().tm_yday
### Load url
url = 'ftp://sidads.colorado.edu/DATASETS/NOAA/G02135/north/daily/data/' \
'N_seaice_extent_daily_v3.0.csv'
### Read file
raw_data = UL.request.urlopen(url)
dataset = np.genfromtxt(raw_data, skip_header=2,delimiter=',',
usecols=[0,1,2,3,4])
print('\nCompleted: Read sea ice data!')
### Set missing data to nan
dataset[np.where(dataset==-9999)] = np.nan
### Variables
year = dataset[:,0]
month = dataset[:,1]
day = dataset[:,2]
ice = dataset[:,3]
missing = dataset[:,4]
### Call present year
yr2018 = np.where(year == 2018)[0]
ice18 = ice[yr2018]
### Ice Conversion
iceval = ice18 * 1e6
### Printing info
print('\n----- NSIDC Arctic Sea Ice -----')
print('Current Date =', now.strftime("%Y-%m-%d %H:%M"), '\n')
print('SIE Date = %s/%s/%s' % (int(month[-1]),int(day[-1]),int(year[-1])))
print('Current SIE = %s km^2 \n' % (iceval[-1]))
print('1-day change SIE = %s km^2' % (iceval[-1]-iceval[-2]))
print('7-day change SIE = %s km^2 \n' % (iceval[-1]-iceval[-8]))
###########################################################################
###########################################################################
###########################################################################
### Reads in 1981-2010 means
### Load url
url2 = 'ftp://sidads.colorado.edu/DATASETS/NOAA/G02135/north/daily/data/' \
'N_seaice_extent_climatology_1981-2010_v3.0.csv'
### Read file
raw_data2 = UL.request.urlopen(url2)
dataset2 = np.genfromtxt(raw_data2, skip_header=2,delimiter=',',
usecols=[0,1,2,3,4,5,6,7])
### Create variables
doy = dataset2[:,0]
meanice = dataset2[:,1] * 1e6
std = dataset2[:,2]
### Quartiles
quartile10 = dataset2[:,3]
quartile25 = dataset2[:,4]
quartile50 = dataset2[:,5]
quartile75 = dataset2[:,6]
quartile90 = dataset2[:,7]
### Anomalies
currentanom = iceval[-1]-meanice[currentdoy-2]
### Printing info
print('Current anomaly = %s km^2 \n' % currentanom)
### Selected other years for comparisons
yr2007 = np.where(year == 2007)[0]
yr2012 = np.where(year == 2012)[0]
yr2016 = np.where(year == 2016)[0]
sie7 = ice[yr2007]
sie12 = ice[yr2012]
sie16 = ice[yr2016]
###########################################################################
###########################################################################
###########################################################################
### Create plot
plt.rc('text',usetex=True)
plt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']})
plt.rc('savefig',facecolor='black')
plt.rc('axes',edgecolor='white')
plt.rc('xtick',color='white')
plt.rc('ytick',color='white')
plt.rc('axes',labelcolor='white')
plt.rc('axes',facecolor='black')
fig = plt.figure()
ax = plt.subplot(111)
xlabels = [r'Jan',r'Feb',r'Mar',r'Apr',r'May',r'Jun',r'Jul',
r'Aug',r'Sep',r'Oct',r'Nov',r'Dec',r'Jan']
plt.xticks(np.arange(0,361,30.4),xlabels,rotation=0)
ylabels = map(str,np.arange(2,19,2))
plt.yticks(np.arange(2,19,2),ylabels)
plt.ylim([2,18])
plt.xlim([0,360])
strmonth = xlabels[int(currentmn)-1]
asof = strmonth + ' ' + currentdy + ', ' + currentyr
### Adjust axes in time series plots
ax.tick_params('both',length=5.5,width=2,which='major')
adjust_spines(ax, ['left','bottom'])
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.spines['bottom'].set_linewidth(2)
ax.spines['left'].set_linewidth(2)
upper2std = (meanice/1e6)+(std*2)
lower2std = (meanice/1e6)-(std*2)
ax.grid(zorder=1,color='w',alpha=0.2)
plt.plot(ice18,linewidth=1.8,color='aqua',zorder=9,label=r'Current Year (2018)')
plt.plot(doy,upper2std,color='white',alpha=0.7,zorder=3,linewidth=0.1)
plt.plot(doy,lower2std,color='white',alpha=0.7,zorder=4,linewidth=0.1)
plt.plot(doy,quartile10,color='m',alpha=0.7,zorder=3,linewidth=0.4)
plt.plot(doy,quartile25,color='cornflowerblue',alpha=0.7,zorder=4,linewidth=0.4)
plt.plot(doy,quartile75,color='cornflowerblue',alpha=0.7,zorder=4,linewidth=0.4)
plt.plot(doy,quartile90,color='m',alpha=0.7,zorder=3,linewidth=0.4)
ax.fill_between(doy, lower2std, upper2std, facecolor='white', alpha=0.35,
label=r'$\pm$2 standard deviations',zorder=2)
plt.plot(doy,quartile50,color='gold',alpha=1,zorder=3,linewidth=2,
label=r'Median (1981-2010)')
ax.fill_between(doy, quartile90, quartile75, facecolor='m', alpha=0.55,
label=r'10-90th percentiles',zorder=2)
ax.fill_between(doy, quartile10, quartile25, facecolor='m', alpha=0.55,
zorder=2)
ax.fill_between(doy, quartile25, quartile50, facecolor='cornflowerblue', alpha=0.6,
zorder=2)
ax.fill_between(doy, quartile50, quartile75, facecolor='cornflowerblue', alpha=0.6,
label=r'25-75th percentiles',zorder=2)
plt.scatter(doy[currentdoy-3],ice[-1],s=10,color='aqua',zorder=9)
plt.ylabel(r'\textbf{Extent} [$\times$10$^{6}$ km$^2$]',fontsize=15,
color='darkgrey')
le = plt.legend(shadow=False,fontsize=6,loc='upper left',
bbox_to_anchor=(0.473, 1.011),fancybox=True,ncol=2)
for text in le.get_texts():
text.set_color('w')
plt.title(r'\textbf{ARCTIC SEA ICE}',
fontsize=21,color='darkgrey')
plt.text(doy[currentdoy]-5,ice[-1]-1.35,r'\textbf{2018}',
fontsize=13.5,rotation='horizontal',ha='left',color='aqua')
plt.text(0.5,3.1,r'\textbf{DATA:} National Snow \& Ice Data Center, Boulder CO',
fontsize=5.5,rotation='horizontal',ha='left',color='darkgrey')
plt.text(0.5,2.6,r'\textbf{SOURCE:} ftp://sidads.colorado.edu/DATASETS/NOAA/G02135/',
fontsize=5.5,rotation='horizontal',ha='left',color='darkgrey')
plt.text(0.5,2.1,r'\textbf{GRAPHIC:} Zachary Labe (@ZLabe)',
fontsize=5.5,rotation='horizontal',ha='left',color='darkgrey')
fig.subplots_adjust(top=0.91)
### Save figure
plt.savefig(directoryfigure + 'nsidc_sie_quartiles_currentyear.png',dpi=300) | 34.965517 | 85 | 0.606509 | """
Reads in current year's Arctic sea ice extent from Sea Ice Index 3 (NSIDC)
Website : ftp://sidads.colorado.edu/DATASETS/NOAA/G02135/north/daily/data/
Author : Zachary M. Labe
Date : 5 September 2016
"""
### Import modules
import numpy as np
import urllib as UL
import datetime
import matplotlib.pyplot as plt
### Directory and time
directoryfigure = '/home/zlabe/Documents/Projects/IceVarFigs/Figures/'
now = datetime.datetime.now()
currentmn = str(now.month)
currentdy = str(now.day)
currentyr = str(now.year)
currenttime = currentmn + '_' + currentdy + '_' + currentyr
currentdoy = now.timetuple().tm_yday
### Load url
url = 'ftp://sidads.colorado.edu/DATASETS/NOAA/G02135/north/daily/data/' \
'N_seaice_extent_daily_v3.0.csv'
### Read file
raw_data = UL.request.urlopen(url)
dataset = np.genfromtxt(raw_data, skip_header=2,delimiter=',',
usecols=[0,1,2,3,4])
print('\nCompleted: Read sea ice data!')
### Set missing data to nan
dataset[np.where(dataset==-9999)] = np.nan
### Variables
year = dataset[:,0]
month = dataset[:,1]
day = dataset[:,2]
ice = dataset[:,3]
missing = dataset[:,4]
### Call present year
yr2018 = np.where(year == 2018)[0]
ice18 = ice[yr2018]
### Ice Conversion
iceval = ice18 * 1e6
### Printing info
print('\n----- NSIDC Arctic Sea Ice -----')
print('Current Date =', now.strftime("%Y-%m-%d %H:%M"), '\n')
print('SIE Date = %s/%s/%s' % (int(month[-1]),int(day[-1]),int(year[-1])))
print('Current SIE = %s km^2 \n' % (iceval[-1]))
print('1-day change SIE = %s km^2' % (iceval[-1]-iceval[-2]))
print('7-day change SIE = %s km^2 \n' % (iceval[-1]-iceval[-8]))
###########################################################################
###########################################################################
###########################################################################
### Reads in 1981-2010 means
### Load url
url2 = 'ftp://sidads.colorado.edu/DATASETS/NOAA/G02135/north/daily/data/' \
'N_seaice_extent_climatology_1981-2010_v3.0.csv'
### Read file
raw_data2 = UL.request.urlopen(url2)
dataset2 = np.genfromtxt(raw_data2, skip_header=2,delimiter=',',
usecols=[0,1,2,3,4,5,6,7])
### Create variables
doy = dataset2[:,0]
meanice = dataset2[:,1] * 1e6
std = dataset2[:,2]
### Quartiles
quartile10 = dataset2[:,3]
quartile25 = dataset2[:,4]
quartile50 = dataset2[:,5]
quartile75 = dataset2[:,6]
quartile90 = dataset2[:,7]
### Anomalies
currentanom = iceval[-1]-meanice[currentdoy-2]
### Printing info
print('Current anomaly = %s km^2 \n' % currentanom)
### Selected other years for comparisons
yr2007 = np.where(year == 2007)[0]
yr2012 = np.where(year == 2012)[0]
yr2016 = np.where(year == 2016)[0]
sie7 = ice[yr2007]
sie12 = ice[yr2012]
sie16 = ice[yr2016]
###########################################################################
###########################################################################
###########################################################################
### Create plot
plt.rc('text',usetex=True)
plt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']})
plt.rc('savefig',facecolor='black')
plt.rc('axes',edgecolor='white')
plt.rc('xtick',color='white')
plt.rc('ytick',color='white')
plt.rc('axes',labelcolor='white')
plt.rc('axes',facecolor='black')
fig = plt.figure()
ax = plt.subplot(111)
xlabels = [r'Jan',r'Feb',r'Mar',r'Apr',r'May',r'Jun',r'Jul',
r'Aug',r'Sep',r'Oct',r'Nov',r'Dec',r'Jan']
plt.xticks(np.arange(0,361,30.4),xlabels,rotation=0)
ylabels = map(str,np.arange(2,19,2))
plt.yticks(np.arange(2,19,2),ylabels)
plt.ylim([2,18])
plt.xlim([0,360])
strmonth = xlabels[int(currentmn)-1]
asof = strmonth + ' ' + currentdy + ', ' + currentyr
### Adjust axes in time series plots
def adjust_spines(ax, spines):
for loc, spine in ax.spines.items():
if loc in spines:
spine.set_position(('outward', 5))
else:
spine.set_color('none')
if 'left' in spines:
ax.yaxis.set_ticks_position('left')
else:
ax.yaxis.set_ticks([])
if 'bottom' in spines:
ax.xaxis.set_ticks_position('bottom')
else:
ax.xaxis.set_ticks([])
ax.tick_params('both',length=5.5,width=2,which='major')
adjust_spines(ax, ['left','bottom'])
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.spines['bottom'].set_linewidth(2)
ax.spines['left'].set_linewidth(2)
upper2std = (meanice/1e6)+(std*2)
lower2std = (meanice/1e6)-(std*2)
ax.grid(zorder=1,color='w',alpha=0.2)
plt.plot(ice18,linewidth=1.8,color='aqua',zorder=9,label=r'Current Year (2018)')
plt.plot(doy,upper2std,color='white',alpha=0.7,zorder=3,linewidth=0.1)
plt.plot(doy,lower2std,color='white',alpha=0.7,zorder=4,linewidth=0.1)
plt.plot(doy,quartile10,color='m',alpha=0.7,zorder=3,linewidth=0.4)
plt.plot(doy,quartile25,color='cornflowerblue',alpha=0.7,zorder=4,linewidth=0.4)
plt.plot(doy,quartile75,color='cornflowerblue',alpha=0.7,zorder=4,linewidth=0.4)
plt.plot(doy,quartile90,color='m',alpha=0.7,zorder=3,linewidth=0.4)
ax.fill_between(doy, lower2std, upper2std, facecolor='white', alpha=0.35,
label=r'$\pm$2 standard deviations',zorder=2)
plt.plot(doy,quartile50,color='gold',alpha=1,zorder=3,linewidth=2,
label=r'Median (1981-2010)')
ax.fill_between(doy, quartile90, quartile75, facecolor='m', alpha=0.55,
label=r'10-90th percentiles',zorder=2)
ax.fill_between(doy, quartile10, quartile25, facecolor='m', alpha=0.55,
zorder=2)
ax.fill_between(doy, quartile25, quartile50, facecolor='cornflowerblue', alpha=0.6,
zorder=2)
ax.fill_between(doy, quartile50, quartile75, facecolor='cornflowerblue', alpha=0.6,
label=r'25-75th percentiles',zorder=2)
plt.scatter(doy[currentdoy-3],ice[-1],s=10,color='aqua',zorder=9)
plt.ylabel(r'\textbf{Extent} [$\times$10$^{6}$ km$^2$]',fontsize=15,
color='darkgrey')
le = plt.legend(shadow=False,fontsize=6,loc='upper left',
bbox_to_anchor=(0.473, 1.011),fancybox=True,ncol=2)
for text in le.get_texts():
text.set_color('w')
plt.title(r'\textbf{ARCTIC SEA ICE}',
fontsize=21,color='darkgrey')
plt.text(doy[currentdoy]-5,ice[-1]-1.35,r'\textbf{2018}',
fontsize=13.5,rotation='horizontal',ha='left',color='aqua')
plt.text(0.5,3.1,r'\textbf{DATA:} National Snow \& Ice Data Center, Boulder CO',
fontsize=5.5,rotation='horizontal',ha='left',color='darkgrey')
plt.text(0.5,2.6,r'\textbf{SOURCE:} ftp://sidads.colorado.edu/DATASETS/NOAA/G02135/',
fontsize=5.5,rotation='horizontal',ha='left',color='darkgrey')
plt.text(0.5,2.1,r'\textbf{GRAPHIC:} Zachary Labe (@ZLabe)',
fontsize=5.5,rotation='horizontal',ha='left',color='darkgrey')
fig.subplots_adjust(top=0.91)
### Save figure
plt.savefig(directoryfigure + 'nsidc_sie_quartiles_currentyear.png',dpi=300) | 400 | 0 | 23 |
7d899950b4d9b46ac5424339c27486ecac02f536 | 4,486 | py | Python | Predict-next-word/train.py | PandoraLS/python_toys | d3f78815ada5c20863656c643342d48bcc3aef53 | [
"MIT"
] | null | null | null | Predict-next-word/train.py | PandoraLS/python_toys | d3f78815ada5c20863656c643342d48bcc3aef53 | [
"MIT"
] | null | null | null | Predict-next-word/train.py | PandoraLS/python_toys | d3f78815ada5c20863656c643342d48bcc3aef53 | [
"MIT"
] | null | null | null | import collections
import nltk
import tensorflow as tf
import numpy as np
from tensorflow.contrib import rnn
from utils import chunks
import json
import os
import shutil
def word_indexing(words):
"""
:param words: a string
:return: a vocabulary dictionary {word1: 1, word2: 2, ...} and
its reveres {1: word1, 2: word2, ...}
"""
vocab = collections.Counter(words).most_common()
vocab_dict = dict()
for word, _ in vocab:
vocab_dict[word] = len(vocab_dict)
rev_vocab_dict = dict(zip(vocab_dict.values(), vocab_dict.keys()))
return vocab_dict, rev_vocab_dict
def data_sampling(content, window):
"""
:param content: Text vocab as string
:param window: Window size for sampling, the window moves on the text vocab to build the samples
:return: Training vocab includes (input, label) pair and number of classes
If the window includes "cats like to chase mice" X is "cats like to chase" and y is "mice"
"""
words = nltk.tokenize.word_tokenize(content)
vocab_dict, rev_vocab_dict = word_indexing(words)
with open('vocab/rev_vocab.json', 'w') as fp:
json.dump(rev_vocab_dict, fp)
with open('vocab/vocab.json', 'w') as fp:
json.dump(vocab_dict, fp)
training_data = []
samples = chunks(words, window, truncate=True)
for sample in samples:
training_data.append(([vocab_dict[z] for z in sample[:-1]], vocab_dict[sample[-1:][0]]))
return training_data, len(words)
with open("data.txt") as f:
content = f.read()
window = 6
time_steps = window - 1
num_hidden = 512
num_input = 1
batch_size = 100
iteration = 250
training_data, num_classes = data_sampling(content, window=window)
# Build the Batches:
batches = chunks(training_data, batch_size)
# RNN output node weights and biases
weights = {
'out': tf.Variable(tf.random_normal([num_hidden, num_classes]))
}
biases = {
'out': tf.Variable(tf.random_normal([num_classes]))
}
# tf graph input
X = tf.placeholder("float", [None, time_steps, num_input], name='X')
Y = tf.placeholder("float", [None, num_classes])
logits = RNN(X, weights, biases)
y_pred = tf.argmax(tf.nn.softmax(logits), 1, name='y_pred')
y_true = tf.argmax(Y, 1)
# Loss and optimizer
loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=Y))
train_op = tf.train.RMSPropOptimizer(learning_rate=0.0001).minimize(loss_op)
correct_pred = tf.equal(y_pred, y_true)
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Initialize the variables with default values
init = tf.global_variables_initializer()
saver = tf.train.Saver()
with tf.Session() as sess:
# Run the initializer
sess.run(init)
for i in range(0, iteration):
loss_list = []
acc_list = []
for batch in batches:
X_batch = [x[0] for x in batch]
Y_batch = [x[1] for x in batch]
Y_batch_encoded = []
for x in Y_batch:
one_hot_vector = np.zeros([num_classes], dtype=float)
one_hot_vector[x] = 1.0
Y_batch_encoded.append(one_hot_vector)
Y_batch_encoded = np.vstack(Y_batch_encoded)
X_batch = np.vstack(X_batch)
X_batch = X_batch.reshape(len(batch), time_steps, num_input)
Y_batch_encoded = Y_batch_encoded.reshape(len(batch), num_classes)
_, acc, loss, onehot_pred = sess.run(
[train_op, accuracy, loss_op, logits], feed_dict={X: X_batch, Y: Y_batch_encoded})
loss_list.append(loss)
acc_list.append(acc)
loss = sum(loss_list)/len(loss_list)
acc = sum(acc_list)/len(acc_list)
print("Iteration " + str(i) + ", Loss= " + "{:.4f}".format(loss)
+ ", Training Accuracy= " + "{:.2f}".format(acc * 100))
inputs = {
"X": X,
}
outputs = {"y_pred": y_pred}
if os.path.isdir("model"):
shutil.rmtree('model')
tf.saved_model.simple_save(
sess, 'model/', inputs, outputs
) | 32.507246 | 100 | 0.656933 | import collections
import nltk
import tensorflow as tf
import numpy as np
from tensorflow.contrib import rnn
from utils import chunks
import json
import os
import shutil
def word_indexing(words):
"""
:param words: a string
:return: a vocabulary dictionary {word1: 1, word2: 2, ...} and
its reveres {1: word1, 2: word2, ...}
"""
vocab = collections.Counter(words).most_common()
vocab_dict = dict()
for word, _ in vocab:
vocab_dict[word] = len(vocab_dict)
rev_vocab_dict = dict(zip(vocab_dict.values(), vocab_dict.keys()))
return vocab_dict, rev_vocab_dict
def data_sampling(content, window):
"""
:param content: Text vocab as string
:param window: Window size for sampling, the window moves on the text vocab to build the samples
:return: Training vocab includes (input, label) pair and number of classes
If the window includes "cats like to chase mice" X is "cats like to chase" and y is "mice"
"""
words = nltk.tokenize.word_tokenize(content)
vocab_dict, rev_vocab_dict = word_indexing(words)
with open('vocab/rev_vocab.json', 'w') as fp:
json.dump(rev_vocab_dict, fp)
with open('vocab/vocab.json', 'w') as fp:
json.dump(vocab_dict, fp)
training_data = []
samples = chunks(words, window, truncate=True)
for sample in samples:
training_data.append(([vocab_dict[z] for z in sample[:-1]], vocab_dict[sample[-1:][0]]))
return training_data, len(words)
with open("data.txt") as f:
content = f.read()
window = 6
time_steps = window - 1
num_hidden = 512
num_input = 1
batch_size = 100
iteration = 250
training_data, num_classes = data_sampling(content, window=window)
# Build the Batches:
batches = chunks(training_data, batch_size)
# RNN output node weights and biases
weights = {
'out': tf.Variable(tf.random_normal([num_hidden, num_classes]))
}
biases = {
'out': tf.Variable(tf.random_normal([num_classes]))
}
# tf graph input
X = tf.placeholder("float", [None, time_steps, num_input], name='X')
Y = tf.placeholder("float", [None, num_classes])
def RNN(x, weights, biases):
# Unstack to get a list of 'timesteps' tensors, each tensor has shape (batch_size, n_input)
x = tf.unstack(x, time_steps, 1)
# Build a LSTM cell
lstm_cell = rnn.BasicLSTMCell(num_hidden)
# Get LSTM cell output
outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)
# Linear activation, using rnn inner loop last output
return tf.matmul(outputs[-1], weights['out']) + biases['out']
logits = RNN(X, weights, biases)
y_pred = tf.argmax(tf.nn.softmax(logits), 1, name='y_pred')
y_true = tf.argmax(Y, 1)
# Loss and optimizer
loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=Y))
train_op = tf.train.RMSPropOptimizer(learning_rate=0.0001).minimize(loss_op)
correct_pred = tf.equal(y_pred, y_true)
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Initialize the variables with default values
init = tf.global_variables_initializer()
saver = tf.train.Saver()
with tf.Session() as sess:
# Run the initializer
sess.run(init)
for i in range(0, iteration):
loss_list = []
acc_list = []
for batch in batches:
X_batch = [x[0] for x in batch]
Y_batch = [x[1] for x in batch]
Y_batch_encoded = []
for x in Y_batch:
one_hot_vector = np.zeros([num_classes], dtype=float)
one_hot_vector[x] = 1.0
Y_batch_encoded.append(one_hot_vector)
Y_batch_encoded = np.vstack(Y_batch_encoded)
X_batch = np.vstack(X_batch)
X_batch = X_batch.reshape(len(batch), time_steps, num_input)
Y_batch_encoded = Y_batch_encoded.reshape(len(batch), num_classes)
_, acc, loss, onehot_pred = sess.run(
[train_op, accuracy, loss_op, logits], feed_dict={X: X_batch, Y: Y_batch_encoded})
loss_list.append(loss)
acc_list.append(acc)
loss = sum(loss_list)/len(loss_list)
acc = sum(acc_list)/len(acc_list)
print("Iteration " + str(i) + ", Loss= " + "{:.4f}".format(loss)
+ ", Training Accuracy= " + "{:.2f}".format(acc * 100))
inputs = {
"X": X,
}
outputs = {"y_pred": y_pred}
if os.path.isdir("model"):
shutil.rmtree('model')
tf.saved_model.simple_save(
sess, 'model/', inputs, outputs
) | 434 | 0 | 23 |
a68d277ee2f5e00f491c500efd8e74e9cf78e73f | 2,719 | py | Python | paper_analyses/compute_panel_d.py | benmaier/vaccontrib | 1be75e049d3069ba465e7779e850c2e2504daae9 | [
"MIT"
] | 1 | 2021-12-14T15:51:19.000Z | 2021-12-14T15:51:19.000Z | paper_analyses/compute_panel_d.py | benmaier/vaccontrib | 1be75e049d3069ba465e7779e850c2e2504daae9 | [
"MIT"
] | null | null | null | paper_analyses/compute_panel_d.py | benmaier/vaccontrib | 1be75e049d3069ba465e7779e850c2e2504daae9 | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as pl
from vaccontrib.covid import (
get_covid_matrices
)
from vaccontrib.main import (
get_reduced_vaccinated_susceptible_contribution_matrix,
get_reduced_vaccinated_susceptible_eigenvector,
get_eigenvector,
get_next_generation_matrix_from_matrices,
get_contribution_matrix,
)
from tqdm import tqdm
import matplotlib.ticker as mtick
import bfmplot as bp
colors = [
['#E75740', '#58BDB2'],
['#F2957D', '#268D7C'],
]
uv_colors = [ colors[0][0], colors[1][1] ]
reduction = np.linspace(1,0,41)
n = len(reduction)
matrices = get_covid_matrices('delta','01_upper',('no','vacc'))
s0 = np.array(matrices['s'])
r0 = np.array(matrices['r'])
b0 = np.array(matrices['b'])
Cs = np.zeros((2,n,2,2))
for imode, reduce_susc_only in enumerate([True,False]):
_v = np.array([1.,1.,1.,1])
for ired, red in enumerate(reduction):
s = s0.copy()
r = r0.copy()
b = b0.copy()
s[:,1] = 1 - (1-s0[:,0] ) * (1-(1-red)*_v)
if reduce_susc_only:
r = r0
b = b0
else:
r[:,1] = (1-red)*r0[:,1]
b[:,1] = (1-red)*b0[:,1] + red * (b0[:,0])
matrices['s'] = s
matrices['r'] = r
matrices['b'] = b
K = get_next_generation_matrix_from_matrices(1,**matrices)
C = get_reduced_vaccinated_susceptible_contribution_matrix(K)
C /= C.sum()
Cs[imode,ired,:,:] = C
fig, ax = pl.subplots(1,1,figsize=(5,3.5))
x = 1 - reduction
linestyles = ['-','--']
labels = ['const. breakthrough\ntransmissibility reduction',
'decreasing breakthrough\ntransmissibility reduction',
]
ax.plot(x,0.5*np.ones_like(x),c='#aaaaaa',ls='-')
ax.plot([0.22,0.22],[0,.5],c='#aaaaaa',ls='-')
ax.plot([0.41,0.41],[0,.5],c='#aaaaaa',ls='-')
for imode in range(2):
unvacc = Cs[imode,:,:,:].sum(axis=1)[:,0]
vacc = Cs[imode,:,:,:].sum(axis=1)[:,1]
ax.plot(x,unvacc,color=uv_colors[0],label=labels[imode],ls=linestyles[imode])
ax.plot(x,vacc,color=uv_colors[1],ls=linestyles[imode])
ax.set_ylabel('fraction of new infections caused by ...')
ax.legend()
ax.yaxis.set_major_formatter(mtick.PercentFormatter(1))
ax.xaxis.set_major_formatter(mtick.PercentFormatter(1))
ax.set_yticks([0,.25,.5,.75,1])
ax.set_xlim(0,1)
ax.set_ylim(0,1)
ax.text(0.85,0.65,'unvaccinated',ha='right',va='top',color=uv_colors[0])
ax.text(0.8,0.1,'vaccinated',ha='right',va='bottom',color=uv_colors[1])
fig.tight_layout()
ax.set_xlabel('age-independent vaccine efficacy s')
bp.strip_axis(ax)
fig.tight_layout()
fig.savefig('efficacy_scan.pdf')
pl.show()
| 24.944954 | 81 | 0.614932 | import numpy as np
import matplotlib.pyplot as pl
from vaccontrib.covid import (
get_covid_matrices
)
from vaccontrib.main import (
get_reduced_vaccinated_susceptible_contribution_matrix,
get_reduced_vaccinated_susceptible_eigenvector,
get_eigenvector,
get_next_generation_matrix_from_matrices,
get_contribution_matrix,
)
from tqdm import tqdm
import matplotlib.ticker as mtick
import bfmplot as bp
colors = [
['#E75740', '#58BDB2'],
['#F2957D', '#268D7C'],
]
uv_colors = [ colors[0][0], colors[1][1] ]
reduction = np.linspace(1,0,41)
n = len(reduction)
matrices = get_covid_matrices('delta','01_upper',('no','vacc'))
s0 = np.array(matrices['s'])
r0 = np.array(matrices['r'])
b0 = np.array(matrices['b'])
Cs = np.zeros((2,n,2,2))
for imode, reduce_susc_only in enumerate([True,False]):
_v = np.array([1.,1.,1.,1])
for ired, red in enumerate(reduction):
s = s0.copy()
r = r0.copy()
b = b0.copy()
s[:,1] = 1 - (1-s0[:,0] ) * (1-(1-red)*_v)
if reduce_susc_only:
r = r0
b = b0
else:
r[:,1] = (1-red)*r0[:,1]
b[:,1] = (1-red)*b0[:,1] + red * (b0[:,0])
matrices['s'] = s
matrices['r'] = r
matrices['b'] = b
K = get_next_generation_matrix_from_matrices(1,**matrices)
C = get_reduced_vaccinated_susceptible_contribution_matrix(K)
C /= C.sum()
Cs[imode,ired,:,:] = C
fig, ax = pl.subplots(1,1,figsize=(5,3.5))
x = 1 - reduction
linestyles = ['-','--']
labels = ['const. breakthrough\ntransmissibility reduction',
'decreasing breakthrough\ntransmissibility reduction',
]
ax.plot(x,0.5*np.ones_like(x),c='#aaaaaa',ls='-')
ax.plot([0.22,0.22],[0,.5],c='#aaaaaa',ls='-')
ax.plot([0.41,0.41],[0,.5],c='#aaaaaa',ls='-')
for imode in range(2):
unvacc = Cs[imode,:,:,:].sum(axis=1)[:,0]
vacc = Cs[imode,:,:,:].sum(axis=1)[:,1]
ax.plot(x,unvacc,color=uv_colors[0],label=labels[imode],ls=linestyles[imode])
ax.plot(x,vacc,color=uv_colors[1],ls=linestyles[imode])
ax.set_ylabel('fraction of new infections caused by ...')
ax.legend()
ax.yaxis.set_major_formatter(mtick.PercentFormatter(1))
ax.xaxis.set_major_formatter(mtick.PercentFormatter(1))
ax.set_yticks([0,.25,.5,.75,1])
ax.set_xlim(0,1)
ax.set_ylim(0,1)
ax.text(0.85,0.65,'unvaccinated',ha='right',va='top',color=uv_colors[0])
ax.text(0.8,0.1,'vaccinated',ha='right',va='bottom',color=uv_colors[1])
fig.tight_layout()
ax.set_xlabel('age-independent vaccine efficacy s')
bp.strip_axis(ax)
fig.tight_layout()
fig.savefig('efficacy_scan.pdf')
pl.show()
| 0 | 0 | 0 |
28b21e86c02b08e5ab2bdee34146d5d7f7ba3d30 | 1,358 | py | Python | baekjoon/15483.py | GihwanKim/Baekjoon | 52eb2bf80bb1243697858445e5b5e2d50d78be4e | [
"MIT"
] | null | null | null | baekjoon/15483.py | GihwanKim/Baekjoon | 52eb2bf80bb1243697858445e5b5e2d50d78be4e | [
"MIT"
] | null | null | null | baekjoon/15483.py | GihwanKim/Baekjoon | 52eb2bf80bb1243697858445e5b5e2d50d78be4e | [
"MIT"
] | null | null | null | """
15483 : 최소 편집
URL : https://www.acmicpc.net/problem/15483
Input #1 :
abc
ab
Output #1 :
1
Input #2 :
ca
abc
Output #2 :
3
Input #3 :
abc
cba
Output #3 :
2
Input #4 :
abcd
bcde
Output #4 :
2
Input #5 :
abababababa
aaaaaaaaaaa
Output #5 :
5
Input #6 :
for
whileforif
Output #6 :
7
Input #7 :
whilewhile
whalewhale
Output #7 :
2
Input #8 :
aaabaaa
acacaca
Output #8 :
3
Input #9 :
qwerty
dvorak
Output #9 :
5
Input #10 :
asdf
asdf
Output #10 :
0
"""
import sys
sys.setrecursionlimit(987654321)
MAX_N = 1001
a = input()
b = input()
cache = [[None for _ in range(MAX_N)] for _ in range(MAX_N)]
print(lds(0, 0))
| 15.609195 | 65 | 0.431517 | """
15483 : 최소 편집
URL : https://www.acmicpc.net/problem/15483
Input #1 :
abc
ab
Output #1 :
1
Input #2 :
ca
abc
Output #2 :
3
Input #3 :
abc
cba
Output #3 :
2
Input #4 :
abcd
bcde
Output #4 :
2
Input #5 :
abababababa
aaaaaaaaaaa
Output #5 :
5
Input #6 :
for
whileforif
Output #6 :
7
Input #7 :
whilewhile
whalewhale
Output #7 :
2
Input #8 :
aaabaaa
acacaca
Output #8 :
3
Input #9 :
qwerty
dvorak
Output #9 :
5
Input #10 :
asdf
asdf
Output #10 :
0
"""
import sys
sys.setrecursionlimit(987654321)
MAX_N = 1001
a = input()
b = input()
cache = [[None for _ in range(MAX_N)] for _ in range(MAX_N)]
def lds(ia, ib):
if (ia >= len(a)) and (ib < len(b)):
return len(b[ib:])
elif ib >= len(b):
return len(a[ia:])
if cache[ia][ib] is not None:
return cache[ia][ib]
if a[ia] == b[ib]:
cache[ia][ib] = lds(ia + 1, ib + 1)
else:
cache[ia][ib] = 1 + min(
lds(ia + 1, ib), lds(ia, ib + 1), lds(ia + 1, ib + 1)
)
return cache[ia][ib]
print(lds(0, 0))
| 390 | 0 | 23 |
2ef90b04f7e868f7f0bca13278b34e23837d7c75 | 2,247 | py | Python | BioClients/jensenlab/Utils.py | jeremyjyang/BioClients | b78ab2b948c79616fed080112e31d383346bec58 | [
"CC0-1.0"
] | 10 | 2020-05-26T07:29:14.000Z | 2021-12-06T21:33:40.000Z | BioClients/jensenlab/Utils.py | jeremyjyang/BioClients | b78ab2b948c79616fed080112e31d383346bec58 | [
"CC0-1.0"
] | 1 | 2021-10-05T12:25:30.000Z | 2021-10-05T17:05:56.000Z | BioClients/jensenlab/Utils.py | jeremyjyang/BioClients | b78ab2b948c79616fed080112e31d383346bec58 | [
"CC0-1.0"
] | 2 | 2021-03-16T03:20:24.000Z | 2021-08-08T20:17:10.000Z | #!/usr/bin/env python3
"""
Utility functions for JensenLab REST APIs.
https://api.jensenlab.org/Textmining?type1=-26&id1=DOID:10652&type2=9606&limit=10&format=json
https://api.jensenlab.org/Textmining?query=jetlag[tiab]%20OR%20jet-lag[tiab]&type2=9606&limit=10&format=json
https://api.jensenlab.org/Knowledge?type1=-26&id1=DOID:10652&type2=9606&limit=10&format=json
https://api.jensenlab.org/Experiments?type1=-26&id1=DOID:10652&type2=9606&limit=10&format=json
"""
import sys,os,re,json,time,logging
import pandas as pd
from ..util import rest
#
API_HOST='api.jensenlab.org'
API_BASE_PATH=''
BASE_URL='https://'+API_HOST+API_BASE_PATH
#
##############################################################################
##############################################################################
def GetPubmedComentionGenes(ids, base_url=BASE_URL, fout=None):
"""Search by co-mentioned terms."""
tags=[]; df=pd.DataFrame();
for id_this in ids:
rval = rest.Utils.GetURL(base_url+f'/Textmining?query={id_this}[tiab]&type2=9606&limit=10&format=json', parse_json=True)
genes = rval[0] #dict
ensgs = list(genes.keys())
flag = rval[1] #?
for ensg in ensgs:
gene = genes[ensg]
logging.debug(json.dumps(gene, indent=2))
if not tags: tags = list(gene.keys())
df = pd.concat([df, pd.DataFrame({tags[j]:[gene[tags[j]]] for j in range(len(tags))})])
if fout: df.to_csv(fout, "\t", index=False)
logging.info("n_out: {}".format(df.shape[0]))
return df
##############################################################################
| 41.611111 | 129 | 0.611927 | #!/usr/bin/env python3
"""
Utility functions for JensenLab REST APIs.
https://api.jensenlab.org/Textmining?type1=-26&id1=DOID:10652&type2=9606&limit=10&format=json
https://api.jensenlab.org/Textmining?query=jetlag[tiab]%20OR%20jet-lag[tiab]&type2=9606&limit=10&format=json
https://api.jensenlab.org/Knowledge?type1=-26&id1=DOID:10652&type2=9606&limit=10&format=json
https://api.jensenlab.org/Experiments?type1=-26&id1=DOID:10652&type2=9606&limit=10&format=json
"""
import sys,os,re,json,time,logging
import pandas as pd
from ..util import rest
#
API_HOST='api.jensenlab.org'
API_BASE_PATH=''
BASE_URL='https://'+API_HOST+API_BASE_PATH
#
##############################################################################
def GetDiseaseGenes(channel, ids, nmax, base_url=BASE_URL, fout=None):
tags=[]; df=pd.DataFrame();
for id_this in ids:
rval = rest.Utils.GetURL(base_url+f'/{channel}?type1=-26&id1={id_this}&type2=9606&limit={nmax}&format=json', parse_json=True)
genes = rval[0] #dict
ensgs = list(genes.keys())
flag = rval[1] #?
for ensg in ensgs:
gene = genes[ensg]
logging.debug(json.dumps(gene, indent=2))
if not tags: tags = list(gene.keys())
df = pd.concat([df, pd.DataFrame({tags[j]:[gene[tags[j]]] for j in range(len(tags))})])
if fout: df.to_csv(fout, "\t", index=False)
logging.info("n_out: {}".format(df.shape[0]))
return df
##############################################################################
def GetPubmedComentionGenes(ids, base_url=BASE_URL, fout=None):
"""Search by co-mentioned terms."""
tags=[]; df=pd.DataFrame();
for id_this in ids:
rval = rest.Utils.GetURL(base_url+f'/Textmining?query={id_this}[tiab]&type2=9606&limit=10&format=json', parse_json=True)
genes = rval[0] #dict
ensgs = list(genes.keys())
flag = rval[1] #?
for ensg in ensgs:
gene = genes[ensg]
logging.debug(json.dumps(gene, indent=2))
if not tags: tags = list(gene.keys())
df = pd.concat([df, pd.DataFrame({tags[j]:[gene[tags[j]]] for j in range(len(tags))})])
if fout: df.to_csv(fout, "\t", index=False)
logging.info("n_out: {}".format(df.shape[0]))
return df
##############################################################################
| 650 | 0 | 22 |
88a3d975b40f4ab3e7c50b58fa6b52cf82ff0344 | 8,252 | py | Python | sandbox/finetuning/runs/test_env_change.py | andrewli77/rllab-finetuning | 2dae9141d0fdc284d04f18931907131d66b43023 | [
"MIT"
] | 23 | 2020-04-27T23:53:44.000Z | 2022-03-10T03:13:16.000Z | sandbox/finetuning/runs/test_env_change.py | WeiChengTseng/rllab-finetuning | 2dae9141d0fdc284d04f18931907131d66b43023 | [
"MIT"
] | 1 | 2021-11-14T13:30:22.000Z | 2021-11-14T13:30:22.000Z | sandbox/finetuning/runs/test_env_change.py | WeiChengTseng/rllab-finetuning | 2dae9141d0fdc284d04f18931907131d66b43023 | [
"MIT"
] | 8 | 2020-06-17T03:28:34.000Z | 2022-03-09T03:13:03.000Z | import numpy as np
import joblib
from rllab.sampler.utils import rollout
import os
from rllab import config
from rllab.misc import ext
from tqdm import trange, tqdm
import IPython
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import itertools
from sandbox.finetuning.envs.mujoco.modified.modified_ant_env import ModifiedAntEnv
from sandbox.finetuning.envs.mujoco.modified.modified_ant_gather_env import ModifiedAntLowGearGatherEnv
from rllab.envs.normalized_env import normalize
import math
# mutates the policy, but not in a way that matters
if __name__ == "__main__":
main()
| 42.536082 | 272 | 0.628817 | import numpy as np
import joblib
from rllab.sampler.utils import rollout
import os
from rllab import config
from rllab.misc import ext
from tqdm import trange, tqdm
import IPython
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import itertools
from sandbox.finetuning.envs.mujoco.modified.modified_ant_env import ModifiedAntEnv
from sandbox.finetuning.envs.mujoco.modified.modified_ant_gather_env import ModifiedAntLowGearGatherEnv
from rllab.envs.normalized_env import normalize
import math
# mutates the policy, but not in a way that matters
def eval_performance(policy, env, max_path_length, num_rollouts):
#do the rollouts and aggregate the performances
ext.set_seed(0)
returns = []
with policy.manager.set_std_to_0():
for i in trange(num_rollouts):
returns.append(np.sum(rollout(env, policy, max_path_length=max_path_length)['rewards']))
# if i%50 == 0:
# print(np.mean(np.array(returns)))
return returns
def get_latent_info(policy, env, period, max_path_length, num_rollouts):
# change the policy period
#do the rollouts and aggregate the performances
policy.period = period
ext.set_seed(0)
latents = []
for i in trange(num_rollouts):
latent_infos = rollout(env, policy, max_path_length=max_path_length)['agent_infos']['latents']
latents.append(latent_infos[np.array(range(0, len(latent_infos), 10), dtype=np.uint32)])
return latents
def save_return_info(policy, env, env_name):
periods = [1, 2, 5, 10, 25, 50, 100, 200]
# periods = [1]
returns = []
for period in tqdm(periods, desc="Period"):
# print("Period:", period)
returns.append(eval_performance(policy, env, period, 5000, 1000))
returns = np.array(returns)
print(np.mean(returns, axis=1))
print(np.std(returns, axis=1))
np.save("{}_timestepagg_returns.npy".format(env_name), returns)
IPython.embed()
def main():
import argparse
parser = argparse.ArgumentParser()
# parser.add_argument('pkl_path', type=str)
# parser.add_argument('--env_name', type=str, default="cartpole")
args = parser.parse_args()
pkl_path = 'data/s3/antlowgeargather-hippo-random-p-randominit-trainablelat-fixedvec-latdim6-period10-lr0.003-tpi10-epsilon0.1-bs100000/antlowgeargather_hippo_random_p_randominit_trainablelat_fixedvec_latdim6_period10_lr0.003_tpi10_epsilon0.1_bs100000_10/itr_1600.pkl'
data = joblib.load(os.path.join(config.PROJECT_PATH, pkl_path))
policy = data['policy']
# env = data['env']
env = normalize(
ModifiedAntLowGearGatherEnv(param_name="body_inertia", multiplier=np.concatenate([1.3 * np.ones((7, 3)), np.ones((7, 3))]), activity_range=6.0, sensor_range=6.0, sensor_span=math.pi * 2, ego_obs=True))
keyword_args = dict(activity_range=6.0, sensor_range=6.0, sensor_span=math.pi * 2, ego_obs=True)
env3 = normalize(ModifiedAntLowGearGatherEnv(**keyword_args))
keyword_args["param_name"] = "geom_friction"
keyword_args["multiplier"] = np.concatenate([1.5 * np.ones((9, 3)), np.ones((9, 3))])
env2 = normalize(ModifiedAntLowGearGatherEnv(**keyword_args))
# if args.env_name == 'cartpole':
# env = normalize(CartpoleEnv())
# snn_pkl_path = None
# manager_pkl_path = None
# n_parallel = 1
# latent_dim = 4
# batch_size = 4000
# max_path_length = 100
# n_itr = 40
# discount = 0.99
# if args.learning_rate < 0:
# learning_rate = 0.0003
# else:
# learning_rate = args.learning_rate
# if args.period != -1:
# period = args.period
# else:
# period = 4
# elif args.env_name in {'swimmergather', 'swimmergatherhfield', 'swimmergatherreversed', 'swimmer3dgather', 'snakegather'}:
# if args.env_name == 'swimmergather':
# env = normalize(
# SwimmerGatherEnv(activity_range=6.0, sensor_range=6.0, sensor_span=math.pi * 2, ego_obs=True))
# max_path_length = 5e3
# elif args.env_name == 'swimmergatherhfield':
# env = normalize(SwimmerGatherUnevenFloorEnv(activity_range=6.0, sensor_range=6.0, sensor_span=math.pi * 2,
# ego_obs=True))
# max_path_length = 5e3
# elif args.env_name == 'swimmergatherreversed':
# env = normalize(
# SwimmerGatherReversedEnv(activity_range=6.0, sensor_range=6.0, sensor_span=math.pi * 2, ego_obs=True))
# max_path_length = 5e3
# elif args.env_name == 'swimmer3dgather':
# env = normalize(
# Swimmer3dGatherEnv(activity_range=6.0, sensor_range=6.0, sensor_span=math.pi * 2, ego_obs=True))
# max_path_length = 5e3
# elif args.env_name == 'snakegather':
# env = normalize(SnakeGatherEnv(activity_range=6.0, sensor_range=6.0, sensor_span=math.pi * 2, ego_obs=True))
# max_path_length = 8e3
# else:
# raise NotImplementedError
# snn_pkl_path = 'data_upload/egoSwimmer-snn/egoSwimmer-snn_005MI_5grid_6latCat_bil_0030/params.pkl'
# if len(args.pkl_path) > 0:
# pkl_path = args.pkl_path
# snn_pkl_path = None
# manager_pkl_path = None
# elif args.pretrained_manager:
# manager_pkl_path = "data_upload/hier-snn-egoSwimmer-gather/hier-snn-egoSwimmer-gather6range_10agg_500pl_PREegoSwimmer-snn_005MI_5grid_6latCat_bil_0030_0/params.pkl"
# else:
# manager_pkl_path = None
# if args.random_init:
# snn_pkl_path = None
# manager_pkl_path = None
# n_parallel = 8 # 4
# latent_dim = 6
# batch_size = 5e5
#
# n_itr = 500
# discount = 0.99
# if args.learning_rate < 0:
# learning_rate = 0.003
# else:
# learning_rate = args.learning_rate
# if args.period != -1:
# period = args.period
# else:
# period = 10
# elif args.env_name == 'antlowgeargather' or args.env_name == 'antnormalgeargather' or args.env_name == 'antlowgeargatherreversed':
# if args.env_name == 'antlowgeargather':
# env = normalize(
# AntLowGearGatherEnv(activity_range=6.0, sensor_range=6.0, sensor_span=math.pi * 2, ego_obs=True))
# elif args.env_name == 'antnormalgeargather':
# env = normalize(
# AntNormalGearGatherEnv(activity_range=6.0, sensor_range=6.0, sensor_span=math.pi * 2, ego_obs=True))
# elif args.env_name == "antlowgeargatherreversed":
# env = normalize(
# AntLowGearGatherReversedEnv(activity_range=6.0, sensor_range=6.0, sensor_span=math.pi * 2,
# ego_obs=True))
# else:
# raise NotImplementedError
# snn_pkl_path = None
# if len(args.pkl_path) > 0:
# assert args.pretrained_manager
# assert not args.random_init
# pkl_path = args.pkl_path
# snn_pkl_path = None
# manager_pkl_path = None
# elif args.pretrained_manager:
# raise NotImplementedError
# else:
# manager_pkl_path = None
# if args.random_init:
# snn_pkl_path = None
# manager_pkl_path = None
# n_parallel = 4
# latent_dim = 6
# batch_size = 5e5
# max_path_length = 5e3
# n_itr = 500
# discount = 0.99
# if args.learning_rate < 0:
# learning_rate = 0.003
# else:
# learning_rate = args.learning_rate
# if args.period != -1:
# period = args.period
# else:
# period = 10
# else:
# raise NotImplementedError
# save_return_info(policy, env, "swimmergather")
# save_latent_info(policy, env, "swimmergather")
# returns = eval_performance(policy, env, 10, 5000, 1000);
# save_latent_info(policy, env, env_name)
# save_return_info(policy, env, env_name)
IPython.embed()
if __name__ == "__main__":
main()
| 7,548 | 0 | 91 |
e9dfe2e63bb667d6bffa21f11821945741f40f73 | 398 | py | Python | pwlocker/views.py | boosh/pwlocker | 60fce09e5d81fdf055e7448ca3e179d70f98c8a7 | [
"MIT"
] | 14 | 2015-03-15T23:58:29.000Z | 2021-11-16T11:01:43.000Z | pwlocker/views.py | boosh/pwlocker | 60fce09e5d81fdf055e7448ca3e179d70f98c8a7 | [
"MIT"
] | null | null | null | pwlocker/views.py | boosh/pwlocker | 60fce09e5d81fdf055e7448ca3e179d70f98c8a7 | [
"MIT"
] | 6 | 2015-01-31T18:40:00.000Z | 2019-03-10T14:42:37.000Z | from django.shortcuts import render_to_response
from django.http import HttpResponseRedirect
from django.http import HttpResponse
from django.core.urlresolvers import reverse
from django.views.decorators.csrf import csrf_exempt
from django.template import RequestContext
## The home page. | 36.181818 | 52 | 0.829146 | from django.shortcuts import render_to_response
from django.http import HttpResponseRedirect
from django.http import HttpResponse
from django.core.urlresolvers import reverse
from django.views.decorators.csrf import csrf_exempt
from django.template import RequestContext
## The home page.
def home(request):
context = RequestContext(request)
return render_to_response('base.html', context) | 87 | 0 | 22 |
b2e129ea7d56ff16cc1bce77021a476210227221 | 456 | py | Python | api/checklist/migrations/0004_auto_20180507_0047.py | Samuel-L/checklistshare | 3953f4c1175f2c0641a6683a7667e9809a6efc8d | [
"MIT"
] | null | null | null | api/checklist/migrations/0004_auto_20180507_0047.py | Samuel-L/checklistshare | 3953f4c1175f2c0641a6683a7667e9809a6efc8d | [
"MIT"
] | 7 | 2018-05-06T20:25:17.000Z | 2018-05-07T01:33:37.000Z | api/checklist/migrations/0004_auto_20180507_0047.py | Samuel-L/checklistshare | 3953f4c1175f2c0641a6683a7667e9809a6efc8d | [
"MIT"
] | null | null | null | # Generated by Django 2.0.5 on 2018-05-07 00:47
from django.db import migrations
| 19.826087 | 47 | 0.546053 | # Generated by Django 2.0.5 on 2018-05-07 00:47
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('checklist', '0003_item'),
]
operations = [
migrations.RenameModel(
old_name='Checklist',
new_name='List',
),
migrations.RenameField(
model_name='item',
old_name='checklist',
new_name='List',
),
]
| 0 | 350 | 23 |
a1fe930fe84ff9aae4969cbbe95da039a19f3d78 | 1,291 | py | Python | main.py | rhdimas/basik-calc | 7c77869bdd80d8a01922eccd6836d02b69c6b980 | [
"MIT"
] | null | null | null | main.py | rhdimas/basik-calc | 7c77869bdd80d8a01922eccd6836d02b69c6b980 | [
"MIT"
] | null | null | null | main.py | rhdimas/basik-calc | 7c77869bdd80d8a01922eccd6836d02b69c6b980 | [
"MIT"
] | null | null | null | ##################################
# Basik-Calc
# Calculator for basic operations
# Author: Ricardo Dimas
##################################
import sys
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print('\nExiting...')
sys.exit()
| 21.516667 | 67 | 0.516654 | ##################################
# Basik-Calc
# Calculator for basic operations
# Author: Ricardo Dimas
##################################
def main():
print('Welcome to Basik-Calc\n')
import sys
def sum(a, b):
print(int(a) + int(b))
def sub(a, b):
print(int(a) - int(b))
def mult(a, b):
print(int(a) * int(b))
def div(a, b):
print(int(a) / int(b))
def main():
print('Welcome to Basik-Calc\n')
operation = input("""Which operation would you like to perform?
(1) Addition;
(2) Subtraction;
(3) Multiplication;
(4) Division;\n""")
if int(operation) == 1:
a = input('Type in the first number: ')
b = input('Type in the second number: ')
sum(a, b)
if int(operation) == 2:
a = input('Type in the first number: ')
b = input('Type in the second number: ')
sub(a, b)
if int(operation) == 3:
a = input('Type in the first number: ')
b = input('Type in the second number: ')
mult(a, b)
if int(operation) == 4:
a = input('Type in the first number: ')
b = input('Type in the second number: ')
div(a, b)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print('\nExiting...')
sys.exit()
| 868 | 0 | 138 |
6058bfd3df4d9c845cb5afb45d2ec2c5750cc310 | 7,248 | py | Python | corehq/apps/export/management/commands/process_skipped_pages.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | 471 | 2015-01-10T02:55:01.000Z | 2022-03-29T18:07:18.000Z | corehq/apps/export/management/commands/process_skipped_pages.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | 14,354 | 2015-01-01T07:38:23.000Z | 2022-03-31T20:55:14.000Z | corehq/apps/export/management/commands/process_skipped_pages.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | 175 | 2015-01-06T07:16:47.000Z | 2022-03-29T13:27:01.000Z | import multiprocessing
import os
import re
import shutil
import tempfile
import zipfile
from datetime import datetime
from django.core.management.base import BaseCommand, CommandError
import sh
from corehq.apps.export.dbaccessors import get_properly_wrapped_export_instance
from corehq.apps.export.multiprocess import (
UNPROCESSED_PAGES_DIR,
MultiprocessExporter,
RetryResult,
_add_compressed_page_to_zip,
)
from corehq.util.files import safe_filename
| 40.49162 | 115 | 0.650248 | import multiprocessing
import os
import re
import shutil
import tempfile
import zipfile
from datetime import datetime
from django.core.management.base import BaseCommand, CommandError
import sh
from corehq.apps.export.dbaccessors import get_properly_wrapped_export_instance
from corehq.apps.export.multiprocess import (
UNPROCESSED_PAGES_DIR,
MultiprocessExporter,
RetryResult,
_add_compressed_page_to_zip,
)
from corehq.util.files import safe_filename
class Command(BaseCommand):
help = "Remove sensitive columns from an export"
def add_arguments(self, parser):
parser.add_argument('export_id')
parser.add_argument(
'--export_path',
help='Path to export ZIP',
)
parser.add_argument(
'--processes',
type=int,
dest='processes',
default=multiprocessing.cpu_count() - 1,
help='Number of parallel processes to run.'
)
parser.add_argument(
'--force-upload',
action='store_true',
help='Upload the final archive even if there are still unprocessed pages'
)
def handle(self, **options):
if __debug__:
raise CommandError("You should run this with 'python -O'")
export_id = options.pop('export_id')
export_archive_path = options.pop('export_path')
processes = options.pop('processes')
force_upload = options.pop('force_upload')
export_instance = get_properly_wrapped_export_instance(export_id)
if not export_archive_path or not os.path.exists(export_archive_path):
confirm = input(
"""
No export archive provided. Do you want to download the latest one? [y/N]
"""
)
if not confirm == "y":
raise CommandError("Export path missing: {}".format(export_archive_path))
export_archive_path = self._download_export(export_instance)
extract_to = tempfile.mkdtemp()
total_docs, unprocessed_pages = self._get_unprocessed_pages(export_archive_path, extract_to)
print('{} pages still to process'.format(len(unprocessed_pages)))
exporter = MultiprocessExporter(export_instance, total_docs, processes)
error_pages, successful_pages = self._process_pages(
exporter, unprocessed_pages
)
final_path = self.compile_final_zip(
error_pages, export_archive_path, export_instance, successful_pages
)
if force_upload or not error_pages:
print('Uploading final archive', '(forced)' if force_upload and error_pages else '')
exporter.upload(final_path, clean=not error_pages)
else:
print(self.style.ERROR(
'Not all pages processed successfully.\n'
'You can re-run the command on the final archive to try again: {}\n'
'NOTE: final archive not uploaded. '
'Use --force-upload to upload even with errors'.format(final_path))
)
shutil.rmtree(extract_to)
self.stdout.write(self.style.SUCCESS('Rebuild Complete and payload uploaded'))
def _download_export(self, export_instance):
export_archive_path = '{}_{}.zip'.format(
safe_filename(export_instance.name or 'Export'),
datetime.utcnow().isoformat()
)
payload = export_instance.get_payload(stream=True)
with open(export_archive_path, 'wb') as download:
shutil.copyfileobj(payload, download)
return export_archive_path
def compile_final_zip(self, error_pages, export_archive_path, export_instance, successful_pages):
final_dir, orig_name = os.path.split(export_archive_path)
if not error_pages:
fd, final_path = tempfile.mkstemp()
else:
final_name = 'INCOMPLETE_{}_{}.zip'.format(orig_name, datetime.utcnow().isoformat())
final_path = os.path.join(final_dir, final_name)
print('Recompiling export')
export_name = safe_filename(export_instance.name or 'Export')
with zipfile.ZipFile(final_path, mode='w', compression=zipfile.ZIP_DEFLATED, allowZip64=True) as final_zip:
for result in successful_pages:
print(' Adding page {} to final file'.format(result.page))
_add_compressed_page_to_zip(final_zip, result.page, result.path)
print(' Adding original export pages and unprocessed pages final file')
def _include_member(member):
# add original export pages and any raw data that we weren't able to process
add = member.startswith(export_name) or member in error_pages
if add:
print(' {}'.format(member))
return add
_copy_files_from_zip_to_zip(final_zip, export_archive_path, _include_member)
return final_path
def _process_pages(self, exporter, unprocessed_pages):
exporter.start()
for page_path, page_number, doc_count in unprocessed_pages:
exporter.process_page(RetryResult(page_number, page_path, doc_count, 0))
export_results = exporter.get_results(retries_per_page=0)
successful_pages = [res for res in export_results if res.success]
error_pages = {
'{}/page_{}.json.gz'.format(UNPROCESSED_PAGES_DIR, res.page)
for res in export_results if not res.success
}
return error_pages, successful_pages
def _get_unprocessed_pages(self, export_archive_path, extract_to_path):
print('Extracting unprocessed pages')
with zipfile.ZipFile(export_archive_path, 'r') as zipref:
for member in zipref.namelist():
if member.startswith(UNPROCESSED_PAGES_DIR):
zipref.extract(member, extract_to_path)
unprocessed_path = os.path.join(extract_to_path, UNPROCESSED_PAGES_DIR)
if not os.path.exists(unprocessed_path):
shutil.rmtree(extract_to_path)
raise CommandError('Export has no unprocessed pages.')
unprocessed_pages = []
total_docs = 0
for page_filename in os.listdir(unprocessed_path):
page_path = os.path.join(unprocessed_path, page_filename)
page_search = re.search(r'page_(\d+).json.gz', page_filename)
if page_search:
page_number = int(page_search.group(1))
else:
raise CommandError('Unexpected page filename: {}'.format(page_filename))
doc_count = int(sh.wc('-l', page_path).split(' ')[0])
total_docs += doc_count
unprocessed_pages.append((page_path, page_number, doc_count))
if not unprocessed_pages:
raise CommandError('No pages left to process')
return total_docs, unprocessed_pages
def _copy_files_from_zip_to_zip(to_zip, from_zip_path, include_filter=None):
with zipfile.ZipFile(from_zip_path, 'r') as from_zip:
for member in from_zip.namelist():
if not include_filter or include_filter(member):
to_zip.writestr(member, from_zip.read(member))
| 6,507 | 221 | 46 |
d2ac279ca965701a840e4ddb8b46f6fec912175d | 14,317 | py | Python | openpifpaf/plugins/apollocar3d/apollo_kp.py | anhvth/openpifpaf | a88edd744b50f76dfdae9dbb180d4a403cbfd060 | [
"CC-BY-2.0",
"CC-BY-4.0"
] | null | null | null | openpifpaf/plugins/apollocar3d/apollo_kp.py | anhvth/openpifpaf | a88edd744b50f76dfdae9dbb180d4a403cbfd060 | [
"CC-BY-2.0",
"CC-BY-4.0"
] | null | null | null | openpifpaf/plugins/apollocar3d/apollo_kp.py | anhvth/openpifpaf | a88edd744b50f76dfdae9dbb180d4a403cbfd060 | [
"CC-BY-2.0",
"CC-BY-4.0"
] | null | null | null | """
Interface for custom data.
This module handles datasets and is the class that you need to inherit from for your custom dataset.
This class gives you all the handles so that you can train with a new –dataset=mydataset.
The particular configuration of keypoints and skeleton is specified in the headmeta instances
"""
import argparse
import torch
import numpy as np
try:
from pycocotools.coco import COCO
except ImportError:
COCO = None
from openpifpaf.datasets import DataModule
from openpifpaf import encoder, headmeta, metric, transforms
from openpifpaf.datasets import collate_images_anns_meta, collate_images_targets_meta
from openpifpaf.plugins.coco import CocoDataset as CocoLoader
from .constants import get_constants, training_weights_local_centrality
from .metrics import MeanPixelError
class ApolloKp(DataModule):
"""
DataModule for the Apollocar3d Dataset.
"""
train_annotations = 'data-apollocar3d/annotations/apollo_keypoints_66_train.json'
val_annotations = 'data-apollocar3d/annotations/apollo_keypoints_66_val.json'
eval_annotations = val_annotations
train_image_dir = 'data-apollocar3d/images/train/'
val_image_dir = 'data-apollocar3d/images/val/'
eval_image_dir = val_image_dir
n_images = None
square_edge = 513
extended_scale = False
orientation_invariant = 0.0
blur = 0.0
augmentation = True
rescale_images = 1.0
upsample_stride = 1
min_kp_anns = 1
b_min = 1 # 1 pixel
eval_annotation_filter = True
eval_long_edge = 0 # set to zero to deactivate rescaling
eval_orientation_invariant = 0.0
eval_extended_scale = False
@classmethod
@classmethod
@classmethod
# TODO: make sure that 24kp flag is activated when evaluating a 24kp model
| 42.993994 | 100 | 0.607809 | """
Interface for custom data.
This module handles datasets and is the class that you need to inherit from for your custom dataset.
This class gives you all the handles so that you can train with a new –dataset=mydataset.
The particular configuration of keypoints and skeleton is specified in the headmeta instances
"""
import argparse
import torch
import numpy as np
try:
from pycocotools.coco import COCO
except ImportError:
COCO = None
from openpifpaf.datasets import DataModule
from openpifpaf import encoder, headmeta, metric, transforms
from openpifpaf.datasets import collate_images_anns_meta, collate_images_targets_meta
from openpifpaf.plugins.coco import CocoDataset as CocoLoader
from .constants import get_constants, training_weights_local_centrality
from .metrics import MeanPixelError
class ApolloKp(DataModule):
"""
DataModule for the Apollocar3d Dataset.
"""
train_annotations = 'data-apollocar3d/annotations/apollo_keypoints_66_train.json'
val_annotations = 'data-apollocar3d/annotations/apollo_keypoints_66_val.json'
eval_annotations = val_annotations
train_image_dir = 'data-apollocar3d/images/train/'
val_image_dir = 'data-apollocar3d/images/val/'
eval_image_dir = val_image_dir
n_images = None
square_edge = 513
extended_scale = False
orientation_invariant = 0.0
blur = 0.0
augmentation = True
rescale_images = 1.0
upsample_stride = 1
min_kp_anns = 1
b_min = 1 # 1 pixel
eval_annotation_filter = True
eval_long_edge = 0 # set to zero to deactivate rescaling
eval_orientation_invariant = 0.0
eval_extended_scale = False
def __init__(self):
super().__init__()
if self.weights is not None:
caf_weights = []
for bone in self.CAR_SKELETON:
caf_weights.append(max(self.weights[bone[0] - 1],
self.weights[bone[1] - 1]))
w_np = np.array(caf_weights)
caf_weights = list(w_np / np.sum(w_np) * len(caf_weights))
else:
caf_weights = None
cif = headmeta.Cif('cif', 'apollo',
keypoints=self.CAR_KEYPOINTS,
sigmas=self.CAR_SIGMAS,
pose=self.CAR_POSE,
draw_skeleton=self.CAR_SKELETON,
score_weights=self.CAR_SCORE_WEIGHTS,
training_weights=self.weights)
caf = headmeta.Caf('caf', 'apollo',
keypoints=self.CAR_KEYPOINTS,
sigmas=self.CAR_SIGMAS,
pose=self.CAR_POSE,
skeleton=self.CAR_SKELETON,
training_weights=caf_weights)
cif.upsample_stride = self.upsample_stride
caf.upsample_stride = self.upsample_stride
self.head_metas = [cif, caf]
@classmethod
def cli(cls, parser: argparse.ArgumentParser):
group = parser.add_argument_group('data module Apollo')
group.add_argument('--apollo-train-annotations',
default=cls.train_annotations)
group.add_argument('--apollo-val-annotations',
default=cls.val_annotations)
group.add_argument('--apollo-train-image-dir',
default=cls.train_image_dir)
group.add_argument('--apollo-val-image-dir',
default=cls.val_image_dir)
group.add_argument('--apollo-square-edge',
default=cls.square_edge, type=int,
help='square edge of input images')
assert not cls.extended_scale
group.add_argument('--apollo-extended-scale',
default=False, action='store_true',
help='augment with an extended scale range')
group.add_argument('--apollo-orientation-invariant',
default=cls.orientation_invariant, type=float,
help='augment with random orientations')
group.add_argument('--apollo-blur',
default=cls.blur, type=float,
help='augment with blur')
assert cls.augmentation
group.add_argument('--apollo-no-augmentation',
dest='apollo_augmentation',
default=True, action='store_false',
help='do not apply data augmentation')
group.add_argument('--apollo-rescale-images',
default=cls.rescale_images, type=float,
help='overall rescale factor for images')
group.add_argument('--apollo-upsample',
default=cls.upsample_stride, type=int,
help='head upsample stride')
group.add_argument('--apollo-min-kp-anns',
default=cls.min_kp_anns, type=int,
help='filter images with fewer keypoint annotations')
group.add_argument('--apollo-bmin',
default=cls.b_min, type=int,
help='b minimum in pixels')
group.add_argument('--apollo-apply-local-centrality-weights',
dest='apollo_apply_local_centrality',
default=False, action='store_true',
help='Weigh the CIF and CAF head during training.')
# evaluation
assert cls.eval_annotation_filter
group.add_argument('--apollo-no-eval-annotation-filter',
dest='apollo_eval_annotation_filter',
default=True, action='store_false')
group.add_argument('--apollo-eval-long-edge', default=cls.eval_long_edge, type=int,
help='set to zero to deactivate rescaling')
assert not cls.eval_extended_scale
group.add_argument('--apollo-eval-extended-scale', default=False, action='store_true')
group.add_argument('--apollo-eval-orientation-invariant',
default=cls.eval_orientation_invariant, type=float)
group.add_argument('--apollo-use-24-kps', default=False, action='store_true',
help=('The ApolloCar3D dataset can '
'be trained with 24 or 66 kps. If you want to train a model '
'with 24 kps activate this flag. Change the annotations '
'path to the json files with 24 kps.'))
@classmethod
def configure(cls, args: argparse.Namespace):
# extract global information
cls.debug = args.debug
cls.pin_memory = args.pin_memory
# Apollo specific
cls.train_annotations = args.apollo_train_annotations
cls.val_annotations = args.apollo_val_annotations
cls.eval_annotations = cls.val_annotations
cls.train_image_dir = args.apollo_train_image_dir
cls.val_image_dir = args.apollo_val_image_dir
cls.eval_image_dir = cls.val_image_dir
cls.square_edge = args.apollo_square_edge
cls.extended_scale = args.apollo_extended_scale
cls.orientation_invariant = args.apollo_orientation_invariant
cls.blur = args.apollo_blur
cls.augmentation = args.apollo_augmentation # loaded by the dest name
cls.rescale_images = args.apollo_rescale_images
cls.upsample_stride = args.apollo_upsample
cls.min_kp_anns = args.apollo_min_kp_anns
cls.b_min = args.apollo_bmin
if args.apollo_use_24_kps:
(cls.CAR_KEYPOINTS, cls.CAR_SKELETON, cls.HFLIP, cls.CAR_SIGMAS, cls.CAR_POSE,
cls.CAR_CATEGORIES, cls.CAR_SCORE_WEIGHTS) = get_constants(24)
else:
(cls.CAR_KEYPOINTS, cls.CAR_SKELETON, cls.HFLIP, cls.CAR_SIGMAS, cls.CAR_POSE,
cls.CAR_CATEGORIES, cls.CAR_SCORE_WEIGHTS) = get_constants(66)
# evaluation
cls.eval_annotation_filter = args.apollo_eval_annotation_filter
cls.eval_long_edge = args.apollo_eval_long_edge
cls.eval_orientation_invariant = args.apollo_eval_orientation_invariant
cls.eval_extended_scale = args.apollo_eval_extended_scale
if args.apollo_apply_local_centrality:
if args.apollo_use_24_kps:
raise Exception("Applying local centrality weights only works with 66 kps.")
cls.weights = training_weights_local_centrality
else:
cls.weights = None
def _preprocess(self):
encoders = (encoder.Cif(self.head_metas[0], bmin=self.b_min),
encoder.Caf(self.head_metas[1], bmin=self.b_min))
if not self.augmentation:
return transforms.Compose([
transforms.NormalizeAnnotations(),
transforms.RescaleAbsolute(self.square_edge),
transforms.CenterPad(self.square_edge),
transforms.EVAL_TRANSFORM,
transforms.Encoders(encoders),
])
if self.extended_scale:
rescale_t = transforms.RescaleRelative(
scale_range=(0.2 * self.rescale_images,
2.0 * self.rescale_images),
power_law=True, stretch_range=(0.75, 1.33))
else:
rescale_t = transforms.RescaleRelative(
scale_range=(0.33 * self.rescale_images,
1.33 * self.rescale_images),
power_law=True, stretch_range=(0.75, 1.33))
return transforms.Compose([
transforms.NormalizeAnnotations(),
# transforms.AnnotationJitter(),
transforms.RandomApply(transforms.HFlip(self.CAR_KEYPOINTS, self.HFLIP), 0.5),
rescale_t,
transforms.RandomApply(transforms.Blur(), self.blur),
transforms.RandomChoice(
[transforms.RotateBy90(),
transforms.RotateUniform(30.0)],
[self.orientation_invariant, 0.2],
),
transforms.Crop(self.square_edge, use_area_of_interest=True),
transforms.CenterPad(self.square_edge),
transforms.MinSize(min_side=32.0),
transforms.TRAIN_TRANSFORM,
transforms.Encoders(encoders),
])
def train_loader(self):
train_data = CocoLoader(
image_dir=self.train_image_dir,
ann_file=self.train_annotations,
preprocess=self._preprocess(),
annotation_filter=True,
min_kp_anns=self.min_kp_anns,
category_ids=[1],
)
return torch.utils.data.DataLoader(
train_data, batch_size=self.batch_size, shuffle=not self.debug,
pin_memory=self.pin_memory, num_workers=self.loader_workers, drop_last=True,
collate_fn=collate_images_targets_meta)
def val_loader(self):
val_data = CocoLoader(
image_dir=self.val_image_dir,
ann_file=self.val_annotations,
preprocess=self._preprocess(),
annotation_filter=True,
min_kp_anns=self.min_kp_anns,
category_ids=[1],
)
return torch.utils.data.DataLoader(
val_data, batch_size=self.batch_size, shuffle=False,
pin_memory=self.pin_memory, num_workers=self.loader_workers, drop_last=True,
collate_fn=collate_images_targets_meta)
@classmethod
def common_eval_preprocess(cls):
rescale_t = None
if cls.eval_extended_scale:
assert cls.eval_long_edge
rescale_t = [
transforms.DeterministicEqualChoice([
transforms.RescaleAbsolute(cls.eval_long_edge),
transforms.RescaleAbsolute((cls.eval_long_edge - 1) // 2 + 1),
], salt=1)
]
elif cls.eval_long_edge:
rescale_t = transforms.RescaleAbsolute(cls.eval_long_edge)
if cls.batch_size == 1:
padding_t = transforms.CenterPadTight(16)
else:
assert cls.eval_long_edge
padding_t = transforms.CenterPad(cls.eval_long_edge)
orientation_t = None
if cls.eval_orientation_invariant:
orientation_t = transforms.DeterministicEqualChoice([
None,
transforms.RotateBy90(fixed_angle=90),
transforms.RotateBy90(fixed_angle=180),
transforms.RotateBy90(fixed_angle=270),
], salt=3)
return [
transforms.NormalizeAnnotations(),
rescale_t,
padding_t,
orientation_t,
]
def _eval_preprocess(self):
return transforms.Compose([
*self.common_eval_preprocess(),
transforms.ToAnnotations([
transforms.ToKpAnnotations(
self.CAR_CATEGORIES,
keypoints_by_category={1: self.head_metas[0].keypoints},
skeleton_by_category={1: self.head_metas[1].skeleton},
),
transforms.ToCrowdAnnotations(self.CAR_CATEGORIES),
]),
transforms.EVAL_TRANSFORM,
])
def eval_loader(self):
eval_data = CocoLoader(
image_dir=self.eval_image_dir,
ann_file=self.eval_annotations,
preprocess=self._eval_preprocess(),
annotation_filter=self.eval_annotation_filter,
min_kp_anns=self.min_kp_anns if self.eval_annotation_filter else 0,
category_ids=[1] if self.eval_annotation_filter else [],
)
return torch.utils.data.DataLoader(
eval_data, batch_size=self.batch_size, shuffle=False,
pin_memory=self.pin_memory, num_workers=self.loader_workers, drop_last=False,
collate_fn=collate_images_anns_meta)
# TODO: make sure that 24kp flag is activated when evaluating a 24kp model
def metrics(self):
return [metric.Coco(
COCO(self.eval_annotations),
max_per_image=20,
category_ids=[1],
iou_type='keypoints',
keypoint_oks_sigmas=self.CAR_SIGMAS
), MeanPixelError()]
| 12,269 | 0 | 266 |
d5e96b96dbd80b64de082352f330662845ad51bf | 2,194 | py | Python | win32comext/axscript/client/pydumper.py | zhanqxun/cv_fish | f78f4f5bdafb070c179efee8b9276719dfaef1d7 | [
"Apache-2.0"
] | 1 | 2017-08-07T14:52:02.000Z | 2017-08-07T14:52:02.000Z | Lib/site-packages/win32comext/axscript/client/pydumper.py | adzhou/Python27 | a7113b69d54a04cc780143241c2f1fe81939ad3a | [
"bzip2-1.0.6"
] | null | null | null | Lib/site-packages/win32comext/axscript/client/pydumper.py | adzhou/Python27 | a7113b69d54a04cc780143241c2f1fe81939ad3a | [
"bzip2-1.0.6"
] | 4 | 2021-02-11T03:51:39.000Z | 2021-02-12T05:10:43.000Z | # pydumper.py
#
# This is being worked on - it does not yet work at all, in ay way
# shape or form :-)
#
# A new script engine, derived from the standard scripting engine,
# which dumps information.
# This generally can be used to grab all sorts of useful details about
# an engine - expose bugs in it or Python, dump the object model, etc.
# As it is derived from the standard engine, it fully supports Python
# as a scripting language - meaning the dumps produced can be quite dynamic,
# and based on the script code you execute.
import pyscript
from win32com.axscript import axscript
from pyscript import RaiseAssert, trace, Exception, SCRIPTTEXT_FORCEEXECUTION
PyDump_CLSID = '{ac527e60-c693-11d0-9c25-00aa00125a98}'
if __name__=='__main__':
Register()
| 30.901408 | 97 | 0.709663 | # pydumper.py
#
# This is being worked on - it does not yet work at all, in ay way
# shape or form :-)
#
# A new script engine, derived from the standard scripting engine,
# which dumps information.
# This generally can be used to grab all sorts of useful details about
# an engine - expose bugs in it or Python, dump the object model, etc.
# As it is derived from the standard engine, it fully supports Python
# as a scripting language - meaning the dumps produced can be quite dynamic,
# and based on the script code you execute.
import pyscript
from win32com.axscript import axscript
from pyscript import RaiseAssert, trace, Exception, SCRIPTTEXT_FORCEEXECUTION
PyDump_CLSID = '{ac527e60-c693-11d0-9c25-00aa00125a98}'
class AXScriptAttribute(pyscript.AXScriptAttribute):
pass
class NamedScriptAttribute(pyscript.NamedScriptAttribute):
pass
class PyScript(pyscript.PyScript):
pass
def Register():
import sys
if '-d' in sys.argv:
dispatcher = "DispatcherWin32trace"
debug_desc = " ("+dispatcher+")"
debug_option = "Yes"
else:
dispatcher = None
debug_desc = ""
debug_option = ""
categories = [axscript.CATID_ActiveScript,axscript.CATID_ActiveScriptParse]
clsid = PyDump_CLSID
lcid = 0x0409 # // english
policy = None # "win32com.axscript.client.axspolicy.AXScriptPolicy"
print "Registering COM server%s..." % debug_desc
from win32com.server.register import RegisterServer
languageName = "PyDump"
verProgId = "Python.Dumper.1"
RegisterServer(clsid = clsid, pythonInstString = "win32com.axscript.client.pyscript.PyDumper",
className = "Python Debugging/Dumping ActiveX Scripting Engine",
progID = languageName, verProgID = verProgId,
catids = categories,
policy=policy, dispatcher = dispatcher)
CreateRegKey(languageName + "\\OLEScript")
# Basic Registration for wsh.
win32com.server.register._set_string(".pysDump", "pysDumpFile")
win32com.server.register._set_string("pysDumpFile\\ScriptEngine", languageName)
print "Dumping Server registered."
if __name__=='__main__':
Register()
| 1,195 | 102 | 100 |
ee77d4c66934df1360d5087f5f5f89a2e5c47844 | 521 | py | Python | kCoreBots/CoreBotEN/MachineLearning/NaiveBayes/PythonScripts/obtainList.py | sai6kiran/TwitterBotFarms | cf6bfddda9fac1e27477186fd4f4b086ac711781 | [
"MIT"
] | null | null | null | kCoreBots/CoreBotEN/MachineLearning/NaiveBayes/PythonScripts/obtainList.py | sai6kiran/TwitterBotFarms | cf6bfddda9fac1e27477186fd4f4b086ac711781 | [
"MIT"
] | null | null | null | kCoreBots/CoreBotEN/MachineLearning/NaiveBayes/PythonScripts/obtainList.py | sai6kiran/TwitterBotFarms | cf6bfddda9fac1e27477186fd4f4b086ac711781 | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
import csv
#################Obtain the List of All bpts that sent Political Tweets [i.e. Political Bots]:#######################
dfn = pd.read_csv("/root/.encrypted/.pythonSai/kCoreBots/CoreBotEN/MachineLearning/NaiveBayes/datasets/CoreBotTweetsCombinedEN.csv", sep=",", skiprows=[0], header=None, usecols=[1], names=["userid"])
column_values = dfn[["userid"]].values.ravel()
unique_values = pd.unique(column_values)
pd.DataFrame(unique_values).to_csv("ListIDS.csv", index=False)
| 52.1 | 199 | 0.710173 | import pandas as pd
import numpy as np
import csv
#################Obtain the List of All bpts that sent Political Tweets [i.e. Political Bots]:#######################
dfn = pd.read_csv("/root/.encrypted/.pythonSai/kCoreBots/CoreBotEN/MachineLearning/NaiveBayes/datasets/CoreBotTweetsCombinedEN.csv", sep=",", skiprows=[0], header=None, usecols=[1], names=["userid"])
column_values = dfn[["userid"]].values.ravel()
unique_values = pd.unique(column_values)
pd.DataFrame(unique_values).to_csv("ListIDS.csv", index=False)
| 0 | 0 | 0 |