code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
import math
"""
The Purpose of this Program is to automate the task of Ventilation Surveying in Underground Mining Engineering.
This program can accept input of tabulated values (in csv file) for Leapfrogging and Roving-Base Altimeter
Indirect Method Surveys and provide data analysis and calculation output to a csv file.
Also provides many tools (Psychometric Properties of Air, Head Loss around Circuit, Specific Weight/Humidity Calc)
Analysis for Mine Ventilation Engineers.
pg 206, Mine Ventilation and Air Conditioning
Measured Values:
-Air Velocities (Vane Anemometer: v)
-Absolute and Differential Pressures or Heads (Manometer/Barometer/Altimeter: pb)
-Dry- and Wet-Bulb Temperatures (Thermometers: Tw, Td)
-Airway Dimensions (A)
Determine:
-Air Quantities
-Pressure Losses
-Air Specific Weights/Humidities
-Airway Resistance
Copyright Joaquin Roibal, August 2016, Latest Revision: 10/3/2016
All Rights Reserved
"""
def ManometerDirectMethod(TopList, BottomList, Manometer_Reading):
"""
A Manometer is used to directly measure pressure difference (Direct Method).
This function will perform the data analysis for Manometer Surveying given measured input values.
Hl12 = (Hs1-Hs2)+(Hz1-Hz2) #Head Loss (1->2) components
Equations and Values from Example 6.3, page 208, Mine Ventilation and Air Conditioning Ramani and Wang
"""
p3 = (BottomList[1] + (Manometer_Reading/27.69)) #Page 210, Pressure 3 = p2 + manometer reading (converted to psi)
Ws = (TopList[-1]+BottomList[-1])/2 #Mean Specific Weight of Air in Hose
Wh = (TopList[-1]+0.0705)/2 #Mean Specific Weight of Air in Shaft
#Hl13 = (144*(Top[1]-p3)/5.2)+(Wh*(Top[0]-Bottom[0])/5.2) #Page 209, Head Loss 1->3
#Hl12 = (144*(Top[1]-Bottom[1])/5.2)+(Ws*(Top[0]-Bottom[0])/5.2) #Page 209, Head Loss 1 -> 2
Hl12 = round((144/5.2)*(p3-BottomList[1])+(1/5.2)*(TopList[0]-BottomList[0])*(Ws-Wh), 3)
return "Manometer Survey Results:\nWs: %s, Wh: %s, Head Loss in. Water: %s" %(Ws, Wh, Hl12)
def CalculateFrictionFactor(head_loss_f, length, diameter, quantity, spec_weight_air=0.075):
"""
The Purpose of this function is to calculate the friction factor of an airway/ducting given parameters
Utilizes Darcy-Weisbach equation and Atkinson Equation, 5.20 page 153 Mine Ventilation and Air Conditioning 3rd Edition
"""
duct_perimeter = 2 * 3.14159 * (diameter / 2)
area_opening = 3.14159 * (diameter / 2)**2
rubbing_surface = length * duct_perimeter
friction_factor_k = (spec_weight_air/0.075) * (head_loss_f*5.2*area_opening**3) / (duct_perimeter * length * quantity**2)
return friction_factor_k
def NaturalVentilation(ShaftATop, ShaftABottom, ShaftBTop, ShaftBBottom):
"""
The purpose of this function is to calculate the Natural Ventilation Head in Inches Water Gage.
Inputs required: Lists in the following format: [DryBulbTemp, WetBulbTemp, Elevation, Pressure (in Hg)
Method 2, page 297 in Ramani "Mine Ventilation And Air Conditioning" is used in commented example
Equation used is from ME 440: Mine Ventilation with Dr. Bhattacharyya, ignoring vapor pressure
:param ShaftATop:
:param ShaftABottom:
:param ShaftBTop:
:param ShaftBBottom:
:return:
"""
"""
This Section is Commented Out Because NOT WORKING: Alternative Method Below
spec_weight_air_shaft_a_top = psychrometricPropAir(ShaftATop[0], ShaftATop[1], ShaftATop[3])
spec_weight_air_shaft_a_bottom = psychrometricPropAir(ShaftABottom[0], ShaftABottom[1], ShaftABottom[3])
spec_weight_air_avg_upcast = (spec_weight_air_shaft_a_top[8] + spec_weight_air_shaft_a_bottom[8])/2
spec_weight_air_shaft_b_top = psychrometricPropAir(ShaftBTop[0], ShaftBTop[1], ShaftBTop[3])
spec_weight_air_shaft_b_bottom = psychrometricPropAir(ShaftBBottom[0], ShaftBBottom[1], ShaftBBottom[3])
spec_weight_air_avg_downcast = (spec_weight_air_shaft_b_top[8] + spec_weight_air_shaft_b_bottom[8])/2
L = ShaftBTop[2]-ShaftATop[2]
print(L)
print("Specific Weight Air Top A: ", spec_weight_air_shaft_a_top[9])
print("Specific Weight Air Bottom A: ", spec_weight_air_shaft_a_bottom[9])
print("Avg Spec Weight Upcast: ", spec_weight_air_avg_upcast)
print("Avg Spec Weight Downcast: ", spec_weight_air_avg_downcast)
inches_water_gage = (L/5.2)*(spec_weight_air_avg_downcast-spec_weight_air_avg_upcast)
return inches_water_gage
"""
#The Following Method Utilizes the equation from ME 440: Mine Ventilation by Dr. Bhattacharyya
#NOTE: IGNORES VAPOR PRESSURE
density_air_shaft_a_top = round((1.327/(460+ShaftATop[0]))*ShaftATop[-1], 6)
print("Density Air Shaft A Top: ", density_air_shaft_a_top)
density_air_shaft_a_bottom = round((1.327/(460+ShaftABottom[0])*ShaftABottom[-1]), 6)
print("Density Air Shaft A Bottom: ", density_air_shaft_a_bottom)
density_air_shaft_b_top = round((1.327/(460+ShaftBTop[0])*ShaftBTop[-1]), 6)
print("Density Air Shaft B Top: ", density_air_shaft_b_top)
density_air_shaft_b_bottom = round((1.327/(460+ShaftBBottom[0])*ShaftBBottom[-1]), 6)
print("Density Air Shaft B Bottom: ", density_air_shaft_b_bottom)
density_avg_shaft_a = (density_air_shaft_a_bottom + density_air_shaft_a_top)/2
density_avg_shaft_b = (density_air_shaft_b_bottom + density_air_shaft_b_top)/2
pressure_diff = round(abs((density_avg_shaft_a - density_avg_shaft_b)), 6)
elevation_diff = (ShaftBTop[-2]-ShaftABottom[-2])
print("Pressure Difference: ", pressure_diff)
print("Elevation Difference: ", elevation_diff)
inches_water_gage = round((pressure_diff*elevation_diff)/5.2, 4)
return inches_water_gage
def psychrometricPropAir(td, tw, pb):
"""
The purpose of this function is to accept input of measured values (wet-bulb, dry-bulb temp, barometric pressure)
to calculate the Psychrometric properties of Air (Spec Weight)and return a list of values calculated:
Ps, Ps Prime, Pv, Phi, W, Ws, Mu, Pa, V, w, h . Will be used in other functions to calculate head loss, etc.
Example Values and Equations from Page 13, Mine Ventilation And Air Conditioning Textbook by Ramani and Wang
:param td: Dry Bulb Temperature
:param tw: Wet Bulb Temperature
:param pb: Pressure (in Hg)
:return:
"""
Td = (td + 459.67) #Convert Temperature from Fahrenheit to Kelvin
val_list = [td, Td, tw, pb] #List of Values to be returned by function,
#Final Format for List: [td, Td, tw, pb, ps, ps_prime, pv, phi, W, Ws, mu, pa, v, w, h]
m = 28.97 #Molecular Weight
s = 1 #Specific Gravity
R = 53.35 #ft*lb/lb mass*Degree Rankine, Gas Constant
w = 0.0750 #lb/ft^3, Specific Weight at Standard Conditions
standard_pb = 29.92 #in. Hg, Standard Barometric Pressure at Sea Level
cp = 0.2403 #Btu/lb*degreeF, Specific Heat at Constant Pressure
cv = 0.1714 #Btu/lb*degreeF, Specific Heat at Constant Volume
gamma = 1.402 #Ratio of Spec heats at constant pressure and volume for diatomic gas
#Calculate Saturation Vapor Pressures: (Page 15, Mine Ventilation and Air Conditioning)
ps = 0.18079*math.exp((17.27*td-552.64)/(td+395.14)) #in. Hg, Saturation Vapor Pressure, Dry Bulb Temp, eq 2.2
val_list.append(ps)
ps_prime = 0.18079*math.exp((17.27*tw-552.64)/(tw+395.14)) #in. Hg, Saturation Vapor Pressure, Wet Bulb Temp
val_list.append(ps_prime)
pv = ps_prime - ((pb-ps_prime)*(td-tw))/(2800-1.3*tw) #in. Hg, Partial Pressure of Water Vapor in Air, eq. 2.3
val_list.append(pv)
phi = pv/ps*100 #Relative Humidity, eq. 2.4
val_list.append(phi)
W = 0.622*pv/(pb-pv) #lb/lb dry air, Specific Humidity, Eq. 2.5
val_list.append(W)
W_grain = W*7000 #grains/lb dry air
Ws = 0.622*ps/(pb-ps) #lb/lb wet air, Specific Humidity, Eq. 2.5 (Wet Bulb Temp)
val_list.append(Ws)
Ws_grain = Ws*7000 #grains/lb wet air
mu = W/Ws*100 #Degree of Saturation, eq 2.6
val_list.append(mu)
pa = pb-pv #in Hg, Pressure of Dry Air
val_list.append(pa)
v = (R*Td)/(pa*0.491*144) #ft**3/lb dry air, Specific Volume (volume per unit weight of dry air), eq. 2.7
val_list.append(v)
w = (1/v)*(W+1) #lb/ft**3, Specific Weight of Moist air or Mixture, eq. 2.8
val_list.append(w)
#w1 = (1.325/Td)*(pb-0.378*pv_prime) #Alt Method for Calculating Spec. Weight. pv_prime unknown (?), eq. 2.9
#h =ha+hv = cp*td+W*(hfg+hf) #Enthalpy, total heat content of Air
h = cp*td+W*(1060+0.45*td) #Btu/lb dry air, Enthalpy, eq. 2.10
val_list.append(h)
return val_list
def PressureSurveyCalc(pa2, pa1, pb2, pb1, pb, td, pv_prime, Z2, Z1, V2, V1):
"""
The Pressure Survey Calc function will perform the calculations required for Indirect Method of
Ventilation Survey (Leapfrogging Method, Roving-Base Altimeter), including:
-Head Loss
:return:
"""
w1, w2 = 0.0750, 0.0750 #Assumed Values for specific weight, Page 217
CF = 69 #Conversion Factor Assumed to be 69 ft of air column = 1 inch Water (Example 6.5)
DR = 1 #((1.325/(460+50))*pb) / ((1.325/(460+td))*(pb-0.378*pv_prime)) #Density Ratio, Eq 6.13 page 216
#HL21 = (H2 - H1) + (Ha2-Ha1) + (Hv2-Hv1) + (Hz2-Hz1) Head Loss Equation, Eq 6.11
HL21 = -((pa2-pa1)-(pb2-pb1)-(Z2-Z1)/DR)/CF + (V2**2-V1**2)/(4009**2) #Calculate Head Loss Based on Altimeter
#Units, Elevation and Temperature, Equation 6.12 Page 216
Hv21 = ((V2**2-V1**2)/(4009**2))*(((w1+w2)/2)/0.0750) #Velocity Head, Eq 6.14
return [HL21, Hv21, DR, CF]
def RovingBaseAltimeter(measurement_list):
"""
Roving Base Altimeter Function will accept inputted list of measured values and output a formatted table of
calculated results. Formatting Based on Example 6.6 page 222-223 in Mine Ventilation and Air Conditioning.
Input Format: Stat - Location - I/R - Elev (ft) - Time - RAR, ft - WetBulb T - DryBulb T - Velocity (fpm) - BAR, ft
Output Format: Stat - Phi - Hv in Water - Diff Elev - DR - Alt Diff - Base Corr. - Elev. Corr. - Head ft Air - (cont)
- Avg Alt Reading - Feet of Air per in Water - Delta Hs - Delta Hv - Delta Ht - Ht
:param measurement_list:
:return:
"""
Altimeter_Vent_Survey_Table = []
for measurement in measurement_list:
results_table = [] #Create Empty List which will be used to append calculated values in table format
air_prop_list = psychrometricPropAir(measurement[6], measurement[7], measurement[9]) #Calculate Psychometric Prop
results_table.append(measurement[0]) #Append Station Number
results_table.append(air_prop_list[7]) #Append Relative Humidity % (Phi) from Psychometric Prop List
#[Hl, Hv, DR, CF] = PressureSurveyCalc() #Retrieve Velocity Head Values from PressureSurveyCalc
#results_table.append(Hv) #Append Velocity Head Values to Results Table
#results_table.append(Elev_Diff) #Append Elevation Difference to Results Table
#results_table.append(DR) #Append DR from Pressure Survey Calc function
#Altimeter_Diff = measurement[5]-Prev_Altimeter #Calculate Altimeter Difference from Previous Altimeter Value
#results_table.append(Altimeter_Diff) #Append Calculated Altimeter Difference Value to Results Table
#results_table.append(Base_Correct) #Append Base Correction
#results_table.append(Elev_Correct) #Append Elevation Correction
#results_table.append(HeadFtOfAir) #Append Head Feet of Air
#results_table.append(AvgAltReading)
#results_table.append(CF)
#results_table.append(DeltaHs) #All Head in in H20
#results_table.append(DeltaHv)
#results_table.append(DeltaHt)
#results_table.append(Ht)
Altimeter_Vent_Survey_Table.append(results_table) #Append Results Table as One Line in Altimeter Vent Survey Table
return Altimeter_Vent_Survey_Table
def LeapfroggingAltimeter(User_List ="Table63_AltimeterLeapfrogging.csv" ):
"""
Leap Frog Altimeter is a Function To Determine Values for a Leapfrogging Altimeter Ventilation Survey.
Accepts Input in csv format and returns a list of calculated values in format:
- Hl (Head Loss) - Hv (Head Loss due to Velocity) - DR (Density Ratio) - CF (Conversion Factor, ft air per in water)
Uses Example 6.5 page 220 as example to verify process
:param User_List:
:return:
"""
Vent_list_leapfrog = LoadVentDataLeapfrog(User_List, [])
print(Vent_list_leapfrog)
Results_List = [] #Create Empty List to return Results Table of Calculated Values for Leapfrog Altimeter Surv
for vent in Vent_list_leapfrog:
line_list = [] #Create Empty List for each vent point
line_list.append(str(vent[0]) + "-" + str(vent[1])) #Display Stations
line_list.append(int(vent[4])-int(vent[5])) #Calculate Altimeter Difference
line_list.append(int(vent[2])-int(vent[3])) #Calculate and Append Elevation Difference
[Hl, Hv, DR, CF] = PressureSurveyCalc(int(vent[4]), int(vent[5]), 0, 0, 0, 0, 0, int(vent[2]),
int(vent[3]), int(vent[-2]), int(vent[-1]))
line_list.append(Hl) #Calculate Head Loss
air_flow = ((int(vent[-1])+int(vent[-2]))/2)*((float(vent[-4])+float(vent[-3]))/2) #Calculate
line_list.append(air_flow)
Results_List.append(line_list)
print(Results_List)
def LoadVentDataLeapfrog(vent_data_csv, vent_data_list):
#This Function Will Load Vent Data from a CSV file and send to AddVentData Function to create a list of dicts
with open(vent_data_csv, 'r') as vent_file:
i = 0
for line in vent_file:
new_line = line.split(',')
if i<3: #Skip first two lines of CSV file due to headings
i += 1
pass
else:
vent_data_list.append([new_line[0], new_line[1], new_line[2], new_line[3], new_line[4], new_line[5],
new_line[6], new_line[7], new_line[8], new_line[9], new_line[10], new_line[11],
new_line[12], new_line[13].strip("\n")]) #Create List of Formatted CSV Values
return vent_data_list
def HeadLossCurcuit(List_Head):
"""
Head Loss Circuit is a function which calculates the head loss around a closed ventilation circuit.
Accepts input of a list (Junctions From-To) and Head Losses, in Water
A closed-circuit head loss is calculate and returned as a percentage (%)
Returns a Tuple of (Head Loss Error, Error Percentage)
"""
HeadLossVal = 0 #Set Initial Head Loss to 0
TotalHeadLoss = min(List_Head) #Total Head Loss Determined by Lowest Press. Measurement, Error Percentage (%)
for HeadLoss in List_Head:
HeadLossVal += HeadLoss #All Values are summed to determine closure error of circuit
#print(TotalHeadLoss)
percentage_error = round(abs(HeadLossVal)/abs(TotalHeadLoss)*100, 2)
print("Error Percentage of Head Loss Circuit:", percentage_error)
return (round(HeadLossVal, 3), percentage_error)
def main():
"""
Examples and Solutions based on Mine Ventilation and Air Conditioning Textbook to
demonstrate the proper usage of functions and verify process and Data Analysis.
:return:
"""
#An example of direct method of pressure measurement with Manometer
#Key = [Elevation (ft), Pressure (psi), Temp (Dry Bulb F), Temp (Wet Bulb F), Spec Humid, Spec Weight]
Top = [-1748.7, 12.594, 59.4, 50, 0.0072, 0.0655]
Bottom = [-4368.70, 13.773, 67.3, 57,0, 0.0082, 0.0702]
Manometer_Reading = 1.51 #Inches of Water
print(ManometerDirectMethod(Top, Bottom, Manometer_Reading)) #Page 209/210 Example Mine Ventilation Textbook
print(psychrometricPropAir(70, 50, 29.921)) #Example 2.1 from Mine Ventilation and Air Conditioning, Page 17
list_of_head = [.445, 1.075, -8.6, 0.245, 2.8, 0.19, 0.084, 0.455, 1.50, 1.71] #Example 6.4 pg 211 Mine Ventilation
print("Head Loss in in H20: ", HeadLossCurcuit(list_of_head))
LeapfroggingAltimeter()
#An Example of Natural Ventilation Head in Inches Water, Example from Dr. Bhattacharyya ME 440 HW #1
ShaftATop = [63, 63, 1000, 28.95]
ShaftABottom = [65, 65, 300, 29.80]
ShaftBTop = [67, 59, 1200, 28.75]
ShaftBBottom = [59, 53, 500, 29.60]
NaturalVent = NaturalVentilation(ShaftATop, ShaftABottom, ShaftBTop, ShaftBBottom)
print(NaturalVent)
#An Example 5.6, page 159 Ramani, Wang, Mutmansky and Hartman to calculate friction factor
frict_factor_k = CalculateFrictionFactor(21.04, 3000, 4, 48000, 0.075)
print("Example 5.6, Friction Factor K: ", frict_factor_k)
if __name__ == "__main__":
main()
|
Roibal/Geotechnical_Engineering_Python_Code
|
Ventilation-Mining-Engineering/Ventilation_Mining_Python_Toolbox.py
|
Python
|
mit
| 17,670
|
"""
the flask extension
"""
import warnings
from functools import wraps
import logging
from flask import request, current_app, g, Blueprint
from werkzeug.http import http_date
from limits.errors import ConfigurationError
from limits.storage import storage_from_string, MemoryStorage
from limits.strategies import STRATEGIES
from limits.util import parse_many
import six
import sys
import time
from .errors import RateLimitExceeded
from .util import get_ipaddr
class C:
ENABLED = "RATELIMIT_ENABLED"
HEADERS_ENABLED = "RATELIMIT_HEADERS_ENABLED"
STORAGE_URL = "RATELIMIT_STORAGE_URL"
STORAGE_OPTIONS = "RATELIMIT_STORAGE_OPTIONS"
STRATEGY = "RATELIMIT_STRATEGY"
GLOBAL_LIMITS = "RATELIMIT_GLOBAL"
HEADER_LIMIT = "RATELIMIT_HEADER_LIMIT"
HEADER_REMAINING = "RATELIMIT_HEADER_REMAINING"
HEADER_RESET = "RATELIMIT_HEADER_RESET"
SWALLOW_ERRORS = "RATELIMIT_SWALLOW_ERRORS"
IN_MEMORY_FALLBACK = "RATELIMIT_IN_MEMORY_FALLBACK"
HEADER_RETRY_AFTER = "RATELIMIT_HEADER_RETRY_AFTER"
HEADER_RETRY_AFTER_VALUE = "RATELIMIT_HEADER_RETRY_AFTER_VALUE"
class HEADERS:
RESET = 1
REMAINING = 2
LIMIT = 3
RETRY_AFTER = 4
MAX_BACKEND_CHECKS = 5
class ExtLimit(object):
"""
simple wrapper to encapsulate limits and their context
"""
def __init__(self, limit, key_func, scope, per_method, methods, error_message,
exempt_when):
self._limit = limit
self.key_func = key_func
self._scope = scope
self.per_method = per_method
self.methods = methods and [m.lower() for m in methods] or methods
self.error_message = error_message
self.exempt_when = exempt_when
@property
def limit(self):
return self._limit() if callable(self._limit) else self._limit
@property
def scope(self):
return self._scope(request.endpoint) if callable(self._scope) else self._scope
@property
def is_exempt(self):
"""Check if the limit is exempt."""
return self.exempt_when and self.exempt_when()
class Limiter(object):
"""
:param app: :class:`flask.Flask` instance to initialize the extension
with.
:param list global_limits: a variable list of strings denoting global
limits to apply to all routes. :ref:`ratelimit-string` for more details.
:param function key_func: a callable that returns the domain to rate limit by.
:param bool headers_enabled: whether ``X-RateLimit`` response headers are written.
:param str strategy: the strategy to use. refer to :ref:`ratelimit-strategy`
:param str storage_uri: the storage location. refer to :ref:`ratelimit-conf`
:param dict storage_options: kwargs to pass to the storage implementation upon
instantiation.
:param bool auto_check: whether to automatically check the rate limit in the before_request
chain of the application. default ``True``
:param bool swallow_errors: whether to swallow errors when hitting a rate limit.
An exception will still be logged. default ``False``
:param list in_memory_fallback: a variable list of strings denoting fallback
limits to apply when the storage is down.
"""
def __init__(self, app=None
, key_func=None
, global_limits=[]
, headers_enabled=False
, strategy=None
, storage_uri=None
, storage_options={}
, auto_check=True
, swallow_errors=False
, in_memory_fallback=[]
, retry_after=None
):
self.app = app
self.logger = logging.getLogger("flask-limiter")
self.enabled = True
self._global_limits = []
self._in_memory_fallback = []
self._exempt_routes = set()
self._request_filters = []
self._headers_enabled = headers_enabled
self._header_mapping = {}
self._retry_after = retry_after
self._strategy = strategy
self._storage_uri = storage_uri
self._storage_options = storage_options
self._auto_check = auto_check
self._swallow_errors = swallow_errors
if not key_func:
warnings.warn(
"Use of the default `get_ipaddr` function is discouraged."
" Please refer to https://flask-limiter.readthedocs.org/#rate-limit-domain"
" for the recommended configuration",
UserWarning
)
self._key_func = key_func or get_ipaddr
for limit in global_limits:
self._global_limits.extend(
[
ExtLimit(
limit, self._key_func, None, False, None, None, None
) for limit in parse_many(limit)
]
)
for limit in in_memory_fallback:
self._in_memory_fallback.extend(
[
ExtLimit(
limit, self._key_func, None, False, None, None, None
) for limit in parse_many(limit)
]
)
self._route_limits = {}
self._dynamic_route_limits = {}
self._blueprint_limits = {}
self._blueprint_dynamic_limits = {}
self._blueprint_exempt = set()
self._storage = self._limiter = None
self._storage_dead = False
self._fallback_limiter = None
self.__check_backend_count = 0
self.__last_check_backend = time.time()
class BlackHoleHandler(logging.StreamHandler):
def emit(*_):
return
self.logger.addHandler(BlackHoleHandler())
if app:
self.init_app(app)
def init_app(self, app):
"""
:param app: :class:`flask.Flask` instance to rate limit.
"""
self.enabled = app.config.setdefault(C.ENABLED, True)
self._swallow_errors = app.config.setdefault(
C.SWALLOW_ERRORS, self._swallow_errors
)
self._headers_enabled = (
self._headers_enabled
or app.config.setdefault(C.HEADERS_ENABLED, False)
)
self._storage_options.update(
app.config.get(C.STORAGE_OPTIONS, {})
)
self._storage = storage_from_string(
self._storage_uri
or app.config.setdefault(C.STORAGE_URL, 'memory://'),
** self._storage_options
)
strategy = (
self._strategy
or app.config.setdefault(C.STRATEGY, 'fixed-window')
)
if strategy not in STRATEGIES:
raise ConfigurationError("Invalid rate limiting strategy %s" % strategy)
self._limiter = STRATEGIES[strategy](self._storage)
self._header_mapping.update({
HEADERS.RESET : self._header_mapping.get(HEADERS.RESET,None) or app.config.setdefault(C.HEADER_RESET, "X-RateLimit-Reset"),
HEADERS.REMAINING : self._header_mapping.get(HEADERS.REMAINING,None) or app.config.setdefault(C.HEADER_REMAINING, "X-RateLimit-Remaining"),
HEADERS.LIMIT : self._header_mapping.get(HEADERS.LIMIT,None) or app.config.setdefault(C.HEADER_LIMIT, "X-RateLimit-Limit"),
HEADERS.RETRY_AFTER : self._header_mapping.get(HEADERS.RETRY_AFTER,None) or app.config.setdefault(C.HEADER_RETRY_AFTER, "Retry-After"),
})
self._retry_after = (
self._retry_after
or app.config.get(C.HEADER_RETRY_AFTER_VALUE)
)
conf_limits = app.config.get(C.GLOBAL_LIMITS, None)
if not self._global_limits and conf_limits:
self._global_limits = [
ExtLimit(
limit, self._key_func, None, False, None, None, None
) for limit in parse_many(conf_limits)
]
fallback_limits = app.config.get(C.IN_MEMORY_FALLBACK, None)
if not self._in_memory_fallback and fallback_limits:
self._in_memory_fallback = [
ExtLimit(
limit, self._key_func, None, False, None, None, None
) for limit in parse_many(fallback_limits)
]
if self._auto_check:
app.before_request(self.__check_request_limit)
app.after_request(self.__inject_headers)
if self._in_memory_fallback:
self._fallback_storage = MemoryStorage()
self._fallback_limiter = STRATEGIES[strategy](self._fallback_storage)
# purely for backward compatibility as stated in flask documentation
if not hasattr(app, 'extensions'):
app.extensions = {} # pragma: no cover
app.extensions['limiter'] = self
def __should_check_backend(self):
if self.__check_backend_count > MAX_BACKEND_CHECKS:
self.__check_backend_count = 0
if time.time() - self.__last_check_backend > pow(2, self.__check_backend_count):
self.__last_check_backend = time.time()
self.__check_backend_count += 1
return True
return False
def check(self):
"""
check the limits for the current request
:raises: RateLimitExceeded
"""
self.__check_request_limit()
def reset(self):
"""
resets the storage if it supports being reset
"""
try:
self._storage.reset()
self.logger.info("Storage has be reset and all limits cleared")
except NotImplementedError:
self.logger.warning("This storage type does not support being reset")
@property
def limiter(self):
if self._storage_dead and self._in_memory_fallback:
return self._fallback_limiter
else:
return self._limiter
def __inject_headers(self, response):
current_limit = getattr(g, 'view_rate_limit', None)
if self.enabled and self._headers_enabled and current_limit:
window_stats = self.limiter.get_window_stats(*current_limit)
response.headers.add(
self._header_mapping[HEADERS.LIMIT],
str(current_limit[0].amount)
)
response.headers.add(
self._header_mapping[HEADERS.REMAINING],
window_stats[1]
)
response.headers.add(
self._header_mapping[HEADERS.RESET],
window_stats[0]
)
response.headers.add(
self._header_mapping[HEADERS.RETRY_AFTER],
self._retry_after == 'http-date' and http_date(window_stats[0])
or int(window_stats[0] - time.time())
)
return response
def __check_request_limit(self):
endpoint = request.endpoint or ""
view_func = current_app.view_functions.get(endpoint, None)
name = ("%s.%s" % (
view_func.__module__, view_func.__name__
) if view_func else ""
)
if (not request.endpoint
or not self.enabled
or view_func == current_app.send_static_file
or name in self._exempt_routes
or request.blueprint in self._blueprint_exempt
or any(fn() for fn in self._request_filters)
):
return
limits = (
name in self._route_limits and self._route_limits[name]
or []
)
dynamic_limits = []
if name in self._dynamic_route_limits:
for lim in self._dynamic_route_limits[name]:
try:
dynamic_limits.extend(
ExtLimit(
limit, lim.key_func, lim.scope, lim.per_method,
lim.methods, lim.error_message, lim.exempt_when
) for limit in parse_many(lim.limit)
)
except ValueError as e:
self.logger.error(
"failed to load ratelimit for view function %s (%s)"
, name, e
)
if request.blueprint:
if (request.blueprint in self._blueprint_dynamic_limits
and not dynamic_limits
):
for lim in self._blueprint_dynamic_limits[request.blueprint]:
try:
dynamic_limits.extend(
ExtLimit(
limit, lim.key_func, lim.scope, lim.per_method,
lim.methods, lim.error_message, lim.exempt_when
) for limit in parse_many(lim.limit)
)
except ValueError as e:
self.logger.error(
"failed to load ratelimit for blueprint %s (%s)"
, request.blueprint, e
)
if (request.blueprint in self._blueprint_limits
and not limits
):
limits.extend(self._blueprint_limits[request.blueprint])
failed_limit = None
limit_for_header = None
try:
all_limits = []
if self._storage_dead and self._fallback_limiter:
if self.__should_check_backend() and self._storage.check():
self.logger.info(
"Rate limit storage recovered"
)
self._storage_dead = False
self.__check_backend_count = 0
else:
all_limits = self._in_memory_fallback
if not all_limits:
all_limits = (limits + dynamic_limits or self._global_limits)
for lim in all_limits:
limit_scope = lim.scope or endpoint
if lim.is_exempt:
return
if lim.methods is not None and request.method.lower() not in lim.methods:
return
if lim.per_method:
limit_scope += ":%s" % request.method
if not limit_for_header or lim.limit < limit_for_header[0]:
limit_for_header = (lim.limit, lim.key_func(), limit_scope)
if not self.limiter.hit(lim.limit, lim.key_func(), limit_scope):
self.logger.warning(
"ratelimit %s (%s) exceeded at endpoint: %s"
, lim.limit, lim.key_func(), limit_scope
)
failed_limit = lim
limit_for_header = (lim.limit, lim.key_func(), limit_scope)
break
g.view_rate_limit = limit_for_header
if failed_limit:
if failed_limit.error_message:
exc_description = failed_limit.error_message if not callable(
failed_limit.error_message
) else failed_limit.error_message()
else:
exc_description = six.text_type(failed_limit.limit)
raise RateLimitExceeded(exc_description)
except Exception as e: # no qa
if isinstance(e, RateLimitExceeded):
six.reraise(*sys.exc_info())
if self._in_memory_fallback and not self._storage_dead:
self.logger.warn(
"Rate limit storage unreachable - falling back to"
" in-memory storage"
)
self._storage_dead = True
self.__check_request_limit()
else:
if self._swallow_errors:
self.logger.exception(
"Failed to rate limit. Swallowing error"
)
else:
six.reraise(*sys.exc_info())
def __limit_decorator(self, limit_value,
key_func=None, shared=False,
scope=None,
per_method=False,
methods=None,
error_message=None,
exempt_when=None):
_scope = scope if shared else None
def _inner(obj):
func = key_func or self._key_func
is_route = not isinstance(obj, Blueprint)
name = "%s.%s" % (obj.__module__, obj.__name__) if is_route else obj.name
dynamic_limit, static_limits = None, []
if callable(limit_value):
dynamic_limit = ExtLimit(limit_value, func, _scope, per_method,
methods, error_message, exempt_when)
else:
try:
static_limits = [ExtLimit(
limit, func, _scope, per_method,
methods, error_message, exempt_when
) for limit in parse_many(limit_value)]
except ValueError as e:
self.logger.error(
"failed to configure %s %s (%s)",
"view function" if is_route else "blueprint", name, e
)
if isinstance(obj, Blueprint):
if dynamic_limit:
self._blueprint_dynamic_limits.setdefault(name, []).append(
dynamic_limit
)
else:
self._blueprint_limits.setdefault(name, []).extend(
static_limits
)
else:
@wraps(obj)
def __inner(*a, **k):
return obj(*a, **k)
if dynamic_limit:
self._dynamic_route_limits.setdefault(name, []).append(
dynamic_limit
)
else:
self._route_limits.setdefault(name, []).extend(
static_limits
)
return __inner
return _inner
def limit(self, limit_value, key_func=None, per_method=False,
methods=None, error_message=None, exempt_when=None):
"""
decorator to be used for rate limiting individual routes or blueprints.
:param limit_value: rate limit string or a callable that returns a string.
:ref:`ratelimit-string` for more details.
:param function key_func: function/lambda to extract the unique identifier for
the rate limit. defaults to remote address of the request.
:param bool per_method: whether the limit is sub categorized into the http
method of the request.
:param list methods: if specified, only the methods in this list will be rate
limited (default: None).
:param error_message: string (or callable that returns one) to override the
error message used in the response.
:return:
"""
return self.__limit_decorator(limit_value, key_func, per_method=per_method,
methods=methods, error_message=error_message,
exempt_when=exempt_when)
def shared_limit(self, limit_value, scope, key_func=None,
error_message=None, exempt_when=None):
"""
decorator to be applied to multiple routes sharing the same rate limit.
:param limit_value: rate limit string or a callable that returns a string.
:ref:`ratelimit-string` for more details.
:param scope: a string or callable that returns a string
for defining the rate limiting scope.
:param function key_func: function/lambda to extract the unique identifier for
the rate limit. defaults to remote address of the request.
:param error_message: string (or callable that returns one) to override the
error message used in the response.
"""
return self.__limit_decorator(
limit_value, key_func, True, scope, error_message=error_message,
exempt_when=exempt_when
)
def exempt(self, obj):
"""
decorator to mark a view or all views in a blueprint as exempt from rate limits.
"""
if not isinstance(obj, Blueprint):
name = "%s.%s" % (obj.__module__, obj.__name__)
@wraps(obj)
def __inner(*a, **k):
return obj(*a, **k)
self._exempt_routes.add(name)
return __inner
else:
self._blueprint_exempt.add(obj.name)
def request_filter(self, fn):
"""
decorator to mark a function as a filter to be executed
to check if the request is exempt from rate limiting.
"""
self._request_filters.append(fn)
return fn
|
duyet-website/api.duyet.net
|
lib/flask_limiter/extension.py
|
Python
|
mit
| 20,800
|
import os
import matplotlib.pyplot as plt
import numpy as np
from plotting_styles import onecolumn_figure, default_figure
from paths import paper1_figures_path
'''
Make a UV plot of the 1000th HI channel.
'''
uvw = np.load("/mnt/MyRAID/M33/VLA/14B-088/HI/"
"14B-088_HI_LSRK.ms.contsub_channel_1000.uvw.npy")
onecolumn_figure()
fig = plt.figure()
ax = fig.add_subplot(111) # , rasterized=True)
# plt.hexbin(uvw[0], uvw[1], bins='log', cmap='afmhot_r')
ax.scatter(uvw[0], uvw[1], s=0.1, color='k', rasterized=True)
plt.xlabel("U (m)")
plt.ylabel("V (m)")
plt.xlim([-3200, 3500])
plt.ylim([-3200, 3200])
plt.grid()
plt.tight_layout()
plt.savefig(paper1_figures_path("m33_hi_uv_plane_chan1000.pdf"))
plt.savefig(paper1_figures_path("m33_hi_uv_plane_chan1000.png"))
plt.close()
default_figure()
|
e-koch/VLA_Lband
|
14B-088/HI/analysis/uv_plots/channel_1000_uvplot.py
|
Python
|
mit
| 813
|
#! /usr/bin/python
#encoding=UTF-8
'''
Created on 2014-5-15
@author: XIAO Zhen
'''
'''哈哈'''
import Tkinter as tk
import time
import random
class Application(tk.Frame):
def __init__(self, master=None):
tk.Frame.__init__(self, master)
self.winfo_toplevel().rowconfigure(0,minsize = 1)
self.winfo_toplevel().columnconfigure(0,minsize = 1)
self.grid()
self.createWidgets()
self.random()
self.random()
self.focus_set()
self.bind("<Up>", self.callback)
self.bind("<Down>", self.callback)
self.bind("<Left>", self.callback)
self.bind("<Right>", self.callback)
self.pack()
def createWidgets(self):
#direction buttons, up down left and right
self.direction = {}
self.direction['up'] = tk.Button(self, text = '⇩', height = 2)
self.direction['up'].configure(command = (lambda dir = 'to_down': self.todirection(dir)))
self.direction['up'].grid(row = 0,column = 1, columnspan = 4, sticky = tk.W + tk.E)
self.direction['down'] = tk.Button(self, text = '⇧', height = 2)
self.direction['down'].configure(command = (lambda dir = 'to_up': self.todirection(dir)))
self.direction['down'].grid(row = 5,column = 1, columnspan = 4, sticky = tk.W + tk.E)
self.direction['left'] = tk.Button(self, text = '⇨', width = 3)
self.direction['left'].configure(command = (lambda dir = 'to_right': self.todirection(dir)))
self.direction['left'].grid(row = 1,column = 0, rowspan = 4, sticky = tk.N + tk.S)
self.direction['right'] = tk.Button(self, text = '⇦', width = 3)
self.direction['right'].configure(command = (lambda dir = 'to_left': self.todirection(dir)))
self.direction['right'].grid(row = 1,column = 5, rowspan = 4, sticky = tk.N + tk.S)
self.buttons = []
for i in range(0,16):
self.buttons.append(tk.Button(self, text = '0', height = 2, width = 5, background = "#FFFFFF", fg = '#FFFFFF'))
self.buttons[i].configure(command = (lambda b = self.buttons[i]: self.setNumber(b)))
self.buttons[i].grid(row = i/4 + 1,column=i%4 + 1)
#self.triggerButton = tk.Button(self, text = 'Print')
#self.triggerButton.grid(row = 0, column=1,ipadx = 100)
#control buttons, including mainly start and mode selections
self.controls = {}
self.controls['startgame'] = tk.Button(self, text = 'Start', height = 2, width = 5, command=self.startgame)
self.controls['startgame'].grid(row = 6, column = 4)
self.controls['test1'] = tk.Button(self, text = 'Test1', height = 2, width = 5, command=self.random)
self.controls['test1'].grid(row = 6,column = 1)
self.controls['test2'] = tk.Button(self, text = 'Test2', height = 2, width = 5, command=self.test2)
self.controls['test2'].grid(row = 6,column = 2)
self.controls['test3'] = tk.Button(self, text = 'Test3', height = 2, width = 5, command=self.test3)
self.controls['test3'].grid(row = 6,column = 3)
def setNumber(self,button):
pass
def startgame(self):
print('start game!')
def random(self):
empty = []
rand = -1
for i in range(0,16):
if self.buttons[i]['text'] == '0':
empty.append(i)
if len(empty) != 0:
rand = random.randrange(0,len(empty))
self.buttons[empty[rand]]['text'] = str(random.randrange(1,3) * 2)
self.setColors()
else:
print("no more fields")
if rand != -1:
self.buttons[empty[rand]].configure(background = '#0404B4', fg = '#000000')
def test2(self):
print('test2')
self.buttons[0]['text'] = '2'
self.buttons[1]['text'] = '2'
self.buttons[2]['text'] = '4'
self.buttons[3]['text'] = '8'
self.buttons[4]['text'] = '4'
self.buttons[5]['text'] = '2'
self.buttons[6]['text'] = '2'
self.buttons[7]['text'] = '8'
self.buttons[8]['text'] = '4'
self.buttons[9]['text'] = '2'
self.buttons[10]['text'] = '2'
self.buttons[11]['text'] = '8'
self.buttons[12]['text'] = '8'
self.buttons[13]['text'] = '8'
self.buttons[14]['text'] = '8'
self.buttons[15]['text'] = '8'
self.setColors()
def test3(self):
print('test3')
def callback(self,event):
if event.keysym == 'Up':
self.todirection('to_up')
elif event.keysym == 'Down':
self.todirection('to_down')
elif event.keysym == 'Left':
self.todirection('to_left')
elif event.keysym == 'Right':
self.todirection('to_right')
def sum(self,list):
for i in range (len(list),5):
list.append(0)
for i in range(0,3):
if list[i] == list[i+1] and list[i] != 0:
list[i] += list[i+1]
list[i+1] = 0
re = []
for i in range(0,4):
if list[i] != 0:
re.append(list[i])
for i in range (len(re),5):
re.append(0)
return re
def todirection(self, direction):
flag = 0
if direction == 'to_right':
#rows
for i in range(0, 4):
#columns:
list = []
for j in range(3, -1, -1):
if self.buttons[i*4 + j] != '0':
list.append(int(self.buttons[i*4 + j]['text']))
re = self.sum(list)
k = 0
for j in range(3, -1, -1):
if self.buttons[i*4 + j]['text'] != str(re[k]):
flag = 1
self.buttons[i*4 + j]['text'] = str(re[k])
k += 1
elif direction == 'to_left':
#rows
for i in range(0, 4):
#columns:
list = []
for j in range(0, 4):
if self.buttons[i*4 + j] != '0':
list.append(int(self.buttons[i*4 + j]['text']))
re = self.sum(list)
k = 0
for j in range(0, 4):
if self.buttons[i*4 + j]['text'] != str(re[k]):
flag = 1
self.buttons[i*4 + j]['text'] = str(re[k])
k += 1
elif direction == 'to_up':
#column
for i in range(0, 4):
#row:
list = []
for j in range(0, 4):
if self.buttons[i + j*4] != '0':
list.append(int(self.buttons[i + j*4]['text']))
re = self.sum(list)
k = 0
for j in range(0, 4):
if self.buttons[i + j*4]['text'] != str(re[k]):
flag = 1
self.buttons[i + j*4]['text'] = str(re[k])
k += 1
elif direction == 'to_down':
#column
for i in range(0, 4):
#rows:
list = []
for j in range(3, -1, -1):
if self.buttons[i + j*4] != '0':
list.append(int(self.buttons[i + j*4]['text']))
re = self.sum(list)
k = 0
for j in range(3, -1, -1):
if self.buttons[i + j*4]['text'] != str(re[k]):
flag = 1
self.buttons[i + j*4]['text'] = str(re[k])
k += 1
if flag != 0:
self.random()
def setColors(self):
for i in range(0,16):
self.setColor(self.buttons[i])
def setColor(self,button):
tmp = button['text']
if tmp == '0':
button.configure(background = '#FFFFFF', fg = '#FFFFFF')
elif tmp == '2':
button.configure(background = '#F7F2E0', fg = '#000000')
elif tmp == '4':
button.configure(background = '#F3E2A9', fg = '#000000')
elif tmp == '8':
button.configure(background = '#F7BE81', fg = '#000000')
elif tmp == '16':
button.configure(background = '#FF8000', fg = '#000000')
elif tmp == '32':
button.configure(background = '#FF4000', fg = '#000000')
elif tmp == '64':
button.configure(background = '#FF0000', fg = '#000000')
elif tmp == '128':
button.configure(background = '#B18904', fg = '#000000')
elif tmp == '256':
button.configure(background = '#8A4B08', fg = '#000000')
elif tmp == '512':
button.configure(background = '#8A0808', fg = '#000000')
elif tmp == '1024':
button.configure(background = '#00FFFF', fg = '#000000')
elif tmp == '2048':
button.configure(background = '#00FFFF', fg = '#000000')
elif tmp == '4096':
button.configure(background = '#01DFD7', fg = '#000000')
else:
button.configure(background = '#0404B4', fg = '#000000')
if __name__ == '__main__':
print("Hello World!")
app = Application()
app.master.title('Sample application')
app.mainloop()
pass
|
eriwoon/2048
|
main.py
|
Python
|
mit
| 9,629
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def levelOrderBottom(self, root):
list = []
self.helper(list, root, 0)
return list[::-1]
def helper(self, list, root, level):
if root == None:
return
if level >= len(list):
list.append([])
list[level].append(root.val)
self.helper(list, root.left, level + 1)
self.helper(list, root.right, level + 1)
from TestObjects import *
b = BinaryTree()
s = Solution()
print s.levelOrderBottom(b.root)
|
Jspsun/LEETCodePractice
|
Python/BinaryTreeLevelOrderTraversal2.py
|
Python
|
mit
| 687
|
import pytest
import cv2
from plantcv.plantcv import auto_crop
@pytest.mark.parametrize('padx,pady,expected', [[20, 20, (98, 56, 4)], [(400, 400), (400, 400), (58, 16, 4)]])
def test_auto_crop(padx, pady, expected, test_data):
"""Test for PlantCV."""
# Read in test data
img = cv2.imread(test_data.small_rgb_img, -1)
contour = test_data.load_composed_contours(test_data.small_composed_contours_file)
cropped = auto_crop(img=img, obj=contour, padding_x=padx, padding_y=pady, color='image')
assert cropped.shape == expected
@pytest.mark.parametrize("color", ["black", "white", "image"])
def test_auto_crop_grayscale(color, test_data):
"""Test for PlantCV."""
# Read in test data
gray_img = cv2.imread(test_data.small_gray_img, -1)
contour = test_data.load_composed_contours(test_data.small_composed_contours_file)
cropped = auto_crop(img=gray_img, obj=contour, padding_x=20, padding_y=20, color=color)
assert cropped.shape == (98, 56)
def test_auto_crop_bad_color_input(test_data):
"""Test for PlantCV."""
# Read in test data
gray_img = cv2.imread(test_data.small_gray_img, -1)
contour = test_data.load_composed_contours(test_data.small_composed_contours_file)
with pytest.raises(RuntimeError):
_ = auto_crop(img=gray_img, obj=contour, padding_x=20, padding_y=20, color='wite')
def test_auto_crop_bad_padding_input(test_data):
"""Test for PlantCV."""
# Read in test data
gray_img = cv2.imread(test_data.small_gray_img, -1)
contour = test_data.load_composed_contours(test_data.small_composed_contours_file)
with pytest.raises(RuntimeError):
_ = auto_crop(img=gray_img, obj=contour, padding_x="one", padding_y=20, color='white')
|
danforthcenter/plantcv
|
tests/plantcv/test_auto_crop.py
|
Python
|
mit
| 1,737
|
"""Support for Netgear LTE notifications."""
import logging
import attr
import eternalegypt
from homeassistant.components.notify import ATTR_TARGET, BaseNotificationService
from . import CONF_NOTIFY, CONF_RECIPIENT, DATA_KEY
_LOGGER = logging.getLogger(__name__)
async def async_get_service(hass, config, discovery_info=None):
"""Get the notification service."""
if discovery_info is None:
return
return NetgearNotifyService(hass, discovery_info)
@attr.s
class NetgearNotifyService(BaseNotificationService):
"""Implementation of a notification service."""
hass = attr.ib()
config = attr.ib()
async def async_send_message(self, message="", **kwargs):
"""Send a message to a user."""
modem_data = self.hass.data[DATA_KEY].get_modem_data(self.config)
if not modem_data:
_LOGGER.error("Modem not ready")
return
targets = kwargs.get(ATTR_TARGET, self.config[CONF_NOTIFY][CONF_RECIPIENT])
if not targets:
_LOGGER.warning("No recipients")
return
if not message:
return
for target in targets:
try:
await modem_data.modem.sms(target, message)
except eternalegypt.Error:
_LOGGER.error("Unable to send to %s", target)
|
rohitranjan1991/home-assistant
|
homeassistant/components/netgear_lte/notify.py
|
Python
|
mit
| 1,330
|
"""
UNIT 2: Logic Puzzle
You will write code to solve the following logic puzzle:
1. The person who arrived on Wednesday bought the laptop.
2. The programmer is not Wilkes.
3. Of the programmer and the person who bought the droid,
one is Wilkes and the other is Hamming.
4. The writer is not Minsky.
5. Neither Knuth nor the person who bought the tablet is the manager.
6. Knuth arrived the day after Simon.
7. The person who arrived on Thursday is not the designer.
8. The person who arrived on Friday didn't buy the tablet.
9. The designer didn't buy the droid.
10. Knuth arrived the day after the manager.
11. Of the person who bought the laptop and Wilkes,
one arrived on Monday and the other is the writer.
12. Either the person who bought the iphone or the person who bought the tablet
arrived on Tuesday.
You will write the function logic_puzzle(), which should return a list of the
names of the people in the order in which they arrive. For example, if they
happen to arrive in alphabetical order, Hamming on Monday, Knuth on Tuesday, etc.,
then you would return:
['Hamming', 'Knuth', 'Minsky', 'Simon', 'Wilkes']
(You can assume that the days mentioned are all in the same week.)
"""
import itertools
def day_after(first, second):
return first - 1 == second
def logic_puzzle():
days = [ monday, tuesday, wednesday, thursday, friday ] = [ 0, 1, 2, 3, 4 ]
orderings = list(itertools.permutations(days))
people = ('Wilkes', 'Minsky', 'Hamming', 'Knuth', 'Simon')
order = next((Wilkes, Minsky, Hamming, Knuth, Simon)
for (laptop, droid, tablet, iphone, _) in orderings
for (Wilkes, Minsky, Hamming, Knuth, Simon) in orderings
for (programmer, writer, manager, designer, _) in orderings
if wednesday == laptop
if programmer != Wilkes
if (programmer == Wilkes and droid == Hamming) or (programmer == Hamming and Wilkes == droid)
if writer != Minsky
if tablet != manager and Knuth != manager
if thursday != designer
if designer != droid
if friday != tablet
if day_after(Knuth, manager)
if day_after(Knuth, Simon)
if (Wilkes == monday and laptop == writer) or (laptop == monday and Wilkes == writer)
if iphone == tuesday or tablet == tuesday
)
result = []
print order
for pers in range(5):
result.append(people[order[pers]])
return result
print logic_puzzle()
|
feredean/cs313
|
notes/7_puzzle.py
|
Python
|
mit
| 2,458
|
# run some tests
#
# author: sganis
# date: 05/16/2015
import unittest
class TestVersions(unittest.TestCase):
def test_python(self):
import platform
self.assertEqual(platform.python_version(), "2.7.10rc1")
def test_numpy(self):
import numpy
self.assertEqual(numpy.version.version, "1.9.2")
if __name__ == '__main__':
unittest.main()
|
sganis/pyportable
|
test.py
|
Python
|
mit
| 352
|
import theano
import theano.tensor as T
import numpy as np
import matplotlib.pyplot as plt
from lasagne.layers import InputLayer, DenseLayer, ReshapeLayer
import lasagne.layers
import lasagne.nonlinearities
import lasagne.updates
import lasagne.objectives
import lasagne.init
from ntm.layers import NTMLayer
from ntm.memory import Memory
from ntm.controllers import DenseController
from ntm.heads import WriteHead, ReadHead
from ntm.updates import graves_rmsprop
from utils.generators import AssociativeRecallTask
from utils.visualization import Dashboard
def model(input_var, batch_size=1, size=8, num_units=100, memory_shape=(128, 20)):
# Input Layer
l_input = InputLayer((batch_size, None, size + 2), input_var=input_var)
_, seqlen, _ = l_input.input_var.shape
# Neural Turing Machine Layer
memory = Memory(memory_shape, name='memory', memory_init=lasagne.init.Constant(1e-6), learn_init=False)
controller = DenseController(l_input, memory_shape=memory_shape,
num_units=num_units, num_reads=1,
nonlinearity=lasagne.nonlinearities.rectify,
name='controller')
heads = [
WriteHead(controller, num_shifts=3, memory_shape=memory_shape, name='write', learn_init=False,
nonlinearity_key=lasagne.nonlinearities.rectify,
nonlinearity_add=lasagne.nonlinearities.rectify),
ReadHead(controller, num_shifts=3, memory_shape=memory_shape, name='read', learn_init=False,
nonlinearity_key=lasagne.nonlinearities.rectify)
]
l_ntm = NTMLayer(l_input, memory=memory, controller=controller, heads=heads)
# Output Layer
l_output_reshape = ReshapeLayer(l_ntm, (-1, num_units))
l_output_dense = DenseLayer(l_output_reshape, num_units=size + 2, nonlinearity=lasagne.nonlinearities.sigmoid, \
name='dense')
l_output = ReshapeLayer(l_output_dense, (batch_size, seqlen, size + 2))
return l_output, l_ntm
if __name__ == '__main__':
# Define the input and expected output variable
input_var, target_var = T.tensor3s('input', 'target')
# The generator to sample examples from
generator = AssociativeRecallTask(batch_size=1, max_iter=1000000, size=8, max_num_items=6, \
min_item_length=1, max_item_length=3)
# The model (1-layer Neural Turing Machine)
l_output, l_ntm = model(input_var, batch_size=generator.batch_size,
size=generator.size, num_units=100, memory_shape=(128, 20))
# The generated output variable and the loss function
pred_var = T.clip(lasagne.layers.get_output(l_output), 1e-6, 1. - 1e-6)
loss = T.mean(lasagne.objectives.binary_crossentropy(pred_var, target_var))
# Create the update expressions
params = lasagne.layers.get_all_params(l_output, trainable=True)
learning_rate = theano.shared(1e-4)
updates = lasagne.updates.adam(loss, params, learning_rate=learning_rate)
# Compile the function for a training step, as well as the prediction function and
# a utility function to get the inner details of the NTM
train_fn = theano.function([input_var, target_var], loss, updates=updates)
ntm_fn = theano.function([input_var], pred_var)
ntm_layer_fn = theano.function([input_var], lasagne.layers.get_output(l_ntm, get_details=True))
# Training
try:
scores, all_scores = [], []
for i, (example_input, example_output) in generator:
score = train_fn(example_input, example_output)
scores.append(score)
all_scores.append(score)
if i % 500 == 0:
mean_scores = np.mean(scores)
if mean_scores < 0.01:
learning_rate.set_value(1e-5)
print 'Batch #%d: %.6f' % (i, mean_scores)
scores = []
except KeyboardInterrupt:
pass
# Visualization
def marker1(params):
return params['num_items'] * (params['item_length'] + 1)
def marker2(params):
return (params['num_items'] + 1) * (params['item_length'] + 1)
markers = [
{
'location': marker1,
'style': {'color': 'red', 'ls': '-'}
},
{
'location': marker2,
'style': {'color': 'green', 'ls': '-'}
}
]
dashboard = Dashboard(generator=generator, ntm_fn=ntm_fn, ntm_layer_fn=ntm_layer_fn, \
memory_shape=(128, 20), markers=markers, cmap='bone')
# Example
params = generator.sample_params()
dashboard.sample(**params)
|
snipsco/ntm-lasagne
|
examples/associative-recall-task.py
|
Python
|
mit
| 4,480
|
# Generated by YCM Generator at 2019-06-21 11:57:11.711058
# This file is NOT licensed under the GPLv3, which is the license for the rest
# of YouCompleteMe.
#
# Here's the license text for this file:
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
import os
import ycm_core
flags = [
'-x',
'c++',
'-I../../utils/',
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# You can get CMake to generate this file for you by adding:
# set( CMAKE_EXPORT_COMPILE_COMMANDS 1 )
# to your CMakeLists.txt file.
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.C', '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.H', '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
|
PysKa-Ratzinger/personal_project_euler_solutions
|
solutions/076-100/77/.ycm_extra_conf.py
|
Python
|
mit
| 4,697
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-25 11:53
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('consent', '0013_auto_20170217_1606'),
]
operations = [
migrations.AlterField(
model_name='educationdetail',
name='college_passout_year',
field=models.CharField(default=2017, max_length=4),
preserve_default=False,
),
]
|
aakashrana1995/svnit-tnp
|
tnp/consent/migrations/0014_auto_20170325_1723.py
|
Python
|
mit
| 523
|
import os, sys
import json
import copy
import numpy as np
import random
from multiprocessing import Pool
import ipdb
################################################################################################
utils_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),'nlp scripts')
source_vh_dir = '/home/ronotex/Downloads/vector_hash/ingenierias_mineria'
#source_vh_dir = '/home/ronotex/Downloads/vector_hash/mantenimiento_en_minernia'
#treemap_name = 'carreras_rubro_mina'
#adj_name = 'ing_total_adjmatrix'
treemap_name = 'carreras_mantto_mina'
adj_name = 'mantto_mina_adjmatrix'
class LabelDict(dict):
def __init__(self, label_names=[]):
self.names = []
for name in label_names:
self.add(name)
def add(self, name):
label_id = len(self.names)
if name in self:
#warnings.warn('Ignoring duplicated label ' + name)
return self[name]
self[name] = label_id
self.names.append(name)
return label_id
def get_label_name(self, label_id):
return self.names[label_id]
def get_label_id(self, name):
if name not in self:
return -1
return self[name]
def size(self):
return len(self.names)
################################################################################################
hierarchy = json.loads(open('carreras_ing2.json').read())
# docname : {docname : true name}
nameByFile = json.loads(open('ident_names2.json').read())
fileByName = {}
temp={}
for (file,name) in nameByFile.items():
temp[file.strip(' ')] = name.strip(' ')
fileByName[name.strip(' ')] = file.strip(' ')
nameByFile = dict(temp)
################################################################################################
def sorter(T,sizeById, file_dict):
if "children" not in T:
_id = file_dict.get_label_id(fileByName[T["name"]])
try:
T["size"] = int(sizeById[_id])
except:
T["size"] = sizeById[_id]
return float(T["size"])
children = T["children"]
temp = []
_total = 0
for child in children:
subt_sum = sorter(child,sizeById, file_dict)
_total += subt_sum
temp.append(subt_sum)
temp = list(zip(temp,range(len(children))))
temp.sort(reverse=True)
T["children"] = [children[k[1]] for k in temp]
return _total
def getSortedLeaves(T, V,file_dict):
if "children" not in T:
fn = fileByName[ T["name"] ]
V.append(file_dict.get_label_id(fn))
return
for child in T["children"]:
getSortedLeaves(child,V,file_dict)
################################################################################################
################################################################################################
if __name__=='__main__':
vh_dict = LabelDict()
file_dict = LabelDict()
graph = np.zeros([30,30])
vhsByFile = [set() for i in range(30)]
freq_major = np.zeros([30])
for root,dirs,filenames in os.walk(source_vh_dir):
for f in filenames:
if f[-1]!='~':
#file_name = f[3:] # vh_name
#if file_name=='all' or file_name=='ing':
# continue
p = f.find('_mineria')
#p = f.find('_mantto_mineria')
file_name = f[3:p] # vh_name_mineria
#file_name = f[14:] # mantto_min_vh_name
id_file = file_dict.add(file_name)
for line in open(os.path.join(source_vh_dir,f)):
line = line.strip('\n')
if line!='':
id_vh = vh_dict.add(line)
freq_major[id_file]+=1
vhsByFile[id_file].add(id_vh)
count_id_vh = vh_dict.size()
count_id_file = file_dict.size()
print(count_id_vh)
print(count_id_file)
ipdb.set_trace()
# node
for k in range(count_id_file):
# posible edges
outgoing = set()
for i in range(count_id_file):
if k!=i:
temp = vhsByFile[k] & vhsByFile[i]
graph[k,i] = len(temp)
outgoing |= temp
graph[k,k] = freq_major[k] - len(outgoing)
# GENERATE CARRERAS.JSON
tot = sorter(hierarchy,freq_major,file_dict)
open(treemap_name+'.json','w').write(json.dumps(hierarchy,ensure_ascii=False, indent = 2))
per_hierarchy = dict(hierarchy)
temp = [format(x,'.2f') for x in 100.0*freq_major/count_id_vh]
tot = sorter(per_hierarchy,temp,file_dict)
open(treemap_name+'_perc.json','w').write(json.dumps(per_hierarchy,ensure_ascii=False, indent = 2))
# GENERATE ADJMATRIX.JSON
sorted_ids = []
getSortedLeaves(hierarchy,sorted_ids,file_dict)
adjmatrix = []
for k in sorted_ids:
if freq_major[k]==0:
continue
u = file_dict.get_label_name(k)
item = dict()
item["name"] = nameByFile[u]
item["size"] = int(freq_major[k])
item["imports"] = []
for i in sorted_ids:
if graph[k,i]>0:
v = file_dict.get_label_name(i)
imp = dict({'name':nameByFile[v],'weight':int(graph[k,i])})
item["imports"].append(imp)
adjmatrix.append(item)
open(adj_name + '.json','w').write(json.dumps(adjmatrix,ensure_ascii=False, indent = 2))
|
ronaldahmed/labor-market-demand-analysis
|
rule based major_extractor/count_custom_vhs.py
|
Python
|
mit
| 4,738
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "vgid.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
Guest007/vgid
|
manage.py
|
Python
|
mit
| 247
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
The sax module contains a collection of classes that provide a
(D)ocument (O)bject (M)odel representation of an XML document.
The goal is to provide an easy, intuative interface for managing XML
documents. Although, the term, DOM, is used above, this model is
B{far} better.
XML namespaces in suds are represented using a (2) element tuple
containing the prefix and the URI. Eg: I{('tns', 'http://myns')}
"""
from logging import getLogger
import suds.metrics
from suds import *
from suds.sax import *
from suds.sax.document import Document
from suds.sax.element import Element
from suds.sax.text import Text
from suds.sax.attribute import Attribute
from xml.sax import make_parser, InputSource, ContentHandler
from xml.sax.handler import feature_external_ges
from cStringIO import StringIO
log = getLogger(__name__)
class Handler(ContentHandler):
""" sax hanlder """
def __init__(self):
self.nodes = [Document()]
def startElement(self, name, attrs):
top = self.top()
node = Element(unicode(name), parent=top)
for a in attrs.getNames():
n = unicode(a)
v = unicode(attrs.getValue(a))
attribute = Attribute(n,v)
if self.mapPrefix(node, attribute):
continue
node.append(attribute)
node.charbuffer = []
top.append(node)
self.push(node)
def mapPrefix(self, node, attribute):
skip = False
if attribute.name == 'xmlns':
if len(attribute.value):
node.expns = unicode(attribute.value)
skip = True
elif attribute.prefix == 'xmlns':
prefix = attribute.name
node.nsprefixes[prefix] = unicode(attribute.value)
skip = True
return skip
def endElement(self, name):
name = unicode(name)
current = self.top()
if len(current.charbuffer):
current.text = Text(u''.join(current.charbuffer))
del current.charbuffer
if len(current):
current.trim()
currentqname = current.qname()
if name == currentqname:
self.pop()
else:
raise Exception('malformed document')
def characters(self, content):
text = unicode(content)
node = self.top()
node.charbuffer.append(text)
def push(self, node):
self.nodes.append(node)
return node
def pop(self):
return self.nodes.pop()
def top(self):
return self.nodes[len(self.nodes)-1]
class Parser:
""" SAX Parser """
@classmethod
def saxparser(cls):
p = make_parser()
p.setFeature(feature_external_ges, 0)
h = Handler()
p.setContentHandler(h)
return (p, h)
def parse(self, file=None, string=None):
"""
SAX parse XML text.
@param file: Parse a python I{file-like} object.
@type file: I{file-like} object.
@param string: Parse string XML.
@type string: str
"""
timer = metrics.Timer()
timer.start()
sax, handler = self.saxparser()
if file is not None:
sax.parse(file)
timer.stop()
#metrics.log.debug('sax (%s) duration: %s', file, timer)
return handler.nodes[0]
if string is not None:
source = InputSource(None)
source.setByteStream(StringIO(string))
sax.parse(source)
timer.stop()
#metrics.log.debug('%s\nsax duration: %s', string, timer)
return handler.nodes[0]
|
jumoconnect/openjumo
|
jumodjango/lib/suds/sax/parser.py
|
Python
|
mit
| 4,435
|
"""The www.cartoonmad.com analyzer.
[Entry examples]
- http://www.cartoonmad.com/comic/5640.html
- https://www.cartoonmad.com/comic/5640.html
"""
import re
from urllib.parse import parse_qsl
from cmdlr.analyzer import BaseAnalyzer
from cmdlr.autil import fetch
class Analyzer(BaseAnalyzer):
"""The www.cartoonmad.com analyzer.
[Entry examples]
- http://www.cartoonmad.com/comic/5640.html
- https://www.cartoonmad.com/comic/5640.html
"""
entry_patterns = [
re.compile(
r'^https?://(?:www.)?cartoonmad.com/comic/(\d+)(?:\.html)?$'
),
]
def entry_normalizer(self, url):
"""Normalize all possible entry url to single one form."""
match = self.entry_patterns[0].search(url)
id = match.group(1)
return 'https://www.cartoonmad.com/comic/{}.html'.format(id)
@staticmethod
def __extract_name(fetch_result):
return fetch_result.soup.title.string.split(' - ')[0]
@staticmethod
def __extract_volumes(fetch_result):
a_tags = (fetch_result.soup
.find('legend', string=re.compile('漫畫線上觀看'))
.parent
.find_all(href=re.compile(r'^/comic/')))
return {a.string: fetch_result.absurl(a.get('href'))
for a in a_tags}
@staticmethod
def __extract_finished(fetch_result):
return (True
if fetch_result.soup.find('img', src='/image/chap9.gif')
else False)
@staticmethod
def __extract_description(fetch_result):
return (fetch_result.soup
.find('fieldset', id='info').td.get_text().strip())
@staticmethod
def __extract_authors(fetch_result):
return [fetch_result.soup
.find(string=re.compile('作者:'))
.string.split(':')[1].strip()]
async def get_comic_info(self, url, request, **unused):
"""Get comic info."""
fetch_result = await fetch(url, request, encoding='big5')
return {
'name': self.__extract_name(fetch_result),
'volumes': self.__extract_volumes(fetch_result),
'description': self.__extract_description(fetch_result),
'authors': self.__extract_authors(fetch_result),
'finished': self.__extract_finished(fetch_result),
}
@staticmethod
def __get_imgurl_func(soup, absurl):
# print(soup.find('img', src=re.compile('comicpic.asp')))
src = soup.find('img', src=re.compile(r'comicpic.asp'))['src']
abspath, qs_string = absurl(src).split('?', maxsplit=1)
qs = dict(parse_qsl(qs_string))
file_parts = qs['file'].split('/')
file_parts[-1] = '{:0>3}'
qs['file'] = '/'.join(file_parts)
qs_tpl = '&'.join(['{}={}'.format(key, value)
for key, value in qs.items()])
abspath_tpl = '{}?{}'.format(abspath, qs_tpl)
def get_imgurl(page_number):
return abspath_tpl.format(page_number)
return get_imgurl
async def save_volume_images(self, url, request, save_image, **unused):
"""Get all images in one volume."""
soup, absurl = await fetch(url, request, encoding='big5')
get_img_url = self.__get_imgurl_func(soup, absurl)
page_count = len(soup.find_all('option', value=True))
for page_num in range(1, page_count + 1):
save_image(
page_num,
url=get_img_url(page_num),
headers={'Referer': url},
)
|
civalin/cmdlr
|
src/cmdlr/analyzers/cartoonmad.py
|
Python
|
mit
| 3,587
|
from __future__ import print_function
import sys, time
import requests, urllib
import demjson, shelve
import os.path
class Archiver:
def __init__(self):
"""
A class for archiving URLS into the wayback machine
"""
self._machine = "http://archive.org/wayback/available?url="
self._arch = "https://web.archive.org/save/"
self.archived_urls = []
# load data
if os.path.isfile("archived_urls.dat"):
self.archived_urls = self.load_data()
def available(self, url, silent=False):
"""
:param: url
:param: silent=False
Checks if the given URL exists in the wayback machine.
The silent argument if set True does not print anything to the console
"""
print("[Checking]: %s\n" % url) if silent == False else 0
data = demjson.decode(requests.get(self._machine+url).text)["archived_snapshots"]
if "closest" in data:
print(self.print_item(data)) if silent == False else 0
return (data["closest"])["available"]
return False
def load_data(self):
"""
Loads the archived URLS from a file called archived_urls.dat
"""
return shelve.open("archived_urls.dat")["main"]
def out_text(self, filename):
"""
:param: filename
Outputs a list of archived urls into text format
"""
map(open(filename, 'w').write, map(lambda x : x+"\n",self.archived_urls))
print("Done.")
def save_data(self):
"""
Saves the archived urls into archived_urls.dat
"""
shelve.open("archived_urls.dat")["main"] = self.archived_urls
def archive(self, url):
"""
:param: url
Archves a url into the wayback machine.
"""
l = requests.get(self._arch+url)
print("Archiving...")
self.archived_urls.append(url)
self.save_data()
def print_item(self, data):
"""
:param: data
Print function for json data for archive data
"""
dat = data["closest"]
stamp = "Archived:%s\nAvailable:%s\nURL:%s\nStatus:%s" % (dat["timestamp"], dat['available'], dat['url'], dat['status'])
return stamp
def save_webpage(self, url, filename):
"""
:param: url
:param: filename
Saves a webpage
"""
print("[OK]: Saving webpage..")
if not os.path.isdir(os.getcwd()+"\\saved_webpages"): os.mkdir("saved_webpages")
open(os.getcwd()+"\\saved_webpages\\"+filename, 'w').write((requests.get(url).text).encode("utf-8"))
if os.path.isfile(os.getcwd()+"\\saved_webpages\\"+filename): print("Done.")
Help = \
" \
Usage: archive.py [option] [option2]\n \
\
Options:\n \
-CH/ch [url] - Check if a URL already exists in the wayback machine and return it's information if it does\n \
-ARCH/arch [url] - Archive a URL\n \
-CHARCH/charch [url] - Archive a url if it doesn't already exists\n \
-OUTT/outt [filename] - Output a list of archived urls in text format\n \
-H/h - Print this help message\n \
-LARCH/larch - print out a list of urls you archived\n \
-SAVE/save [url] [filename] - Save a url into a file"
def main():
global Help
A = Archiver()
args = map(lambda x : x.lower(), sys.argv[1:len(sys.argv)])
print(args)
if len(args) == 2:
print(args[0])
if args[0] == "-ch":
if A.available(args[1]) is True:
print("URL found.")
else:
print("URL not found in wayback machine.")
sys.exit(0)
elif args[0] == "-arch":
A.archive(args[1])
if A.available(args[1], True) is True:
print("[Success]: Archiving is successful")
else:
print("[Error]: Archiving failed!")
b = list(A.archived_urls[len(A.archived_urls)-1])
A.archived_urls.remove(A.archived_urls[len(A.archived_urls)-1])
b.insert(0, "FAILED TO ARCHIVE: ")
A.archived_urls.append(b)
sys.exit(0)
elif args[0] == "-charch":
main = A.available(args[1])
if main is True or main == "True":
print("URL exists.")
elif main is False:
print("URL does not exist.")
A.archive(args[1])
sys.exit(0)
elif args[0] == "-outt":
A.out_text(args[1])
sys.exit(0)
elif len(args) == 3:
if args[0] == "-save":
A.save_webpage(args[1], args[2])
sys.exit(0)
elif len(args) == 1:
if args[0] == "-h":
print("-h")
print(Help)
sys.exit(0)
elif args[0] == "-larch":
print("-larch")
map(lambda x : print(x), A.archived_urls)
sys.exit(0)
else:
print("[Error]: Unknown argument \'%s\'" % args[0])
sys.exit(0)
else:
print("Archiver: No arguments found.\n Type '-h' for help")
sys.exit(0)
if __name__ == "__main__":
main()
|
saberman888/Archiver
|
archive.py
|
Python
|
mit
| 5,894
|
# script.py
import argparse
import pyexcel as pe
import configparser
import os
import sys
import sqlite3
import my_connection_grp_desg as connection
from insert import sanction_grp as insert_sanc
DB_URL = None
global conn
conn = None
sheet = None
desg_ls=None
unit_ls=None
sect_ls=None
def load_tables():
# conn.row_factory = sqlite3.Row
c = conn.cursor()
c.execute("select dscd from desg")
global desg_ls
desg_ls = [ x[0] for x in c.fetchall()]
c.execute("select code from unit")
global unit_ls
unit_ls = [ x[0] for x in c.fetchall()]
c.execute("select code from section")
global sect_ls
sect_ls = [ x[0] for x in c.fetchall()]
c.execute("select eis from employee")
global eis_ls
eis_ls = [ x[0] for x in c.fetchall()]
eis_list = []
working_unit = []
not_first_row = False
def validate_row(row, ignore_multi_unit):
# hold all error list
err = []
# check wrong codes
if row['DSCD'] not in desg_ls:
err.append(" dscd("+row['DSCD']+")")
if row['SECTION_CD'] not in sect_ls:
err.append(" sect("+row['SECTION_CD']+")")
if row['WORKING UNIT'] not in unit_ls:
err.append(" unit("+row['WORKING UNIT']+")")
if row['ONROLL_UNIT'] not in unit_ls:
err.append(" roll_unit("+row['ONROLL_UNIT']+")")
# check duplicate eis in file
global eis_list
if str(row['EIS']) in eis_list:
err.append(" eis_repeat("+str(row['EIS'])+")")
else:
eis_list.append(str(row['EIS']))
# check duplicate eis in db
try:
if int(row['EIS']) in eis_ls:
err.append(" eis_dup_db("+str(row['EIS'])+")")
except ValueError as e:
err.append(" eis_err("+str(row['EIS'])+")")
# check if multiple working_unit present in file
global working_unit
global not_first_row
if not ignore_multi_unit:
if not_first_row:
if str(row['WORKING UNIT']) not in working_unit:
err.append(" multiple_work_unit("+str(row['WORKING UNIT'])+")")
else:
working_unit.append(str(row['WORKING UNIT']))
not_first_row = True
# return err list
if not err:
return None
else:
return err
def read_file(xls_path, sheet_name, upload, ignore_multi_unit):
# book = pe.get_book(file_name=os.path.normpath(xls_path))
# sheets = book.to_dict()
# for name in sheets.keys():
# print(name)
try:
sheet = pe.get_sheet(file_name=os.path.normpath(xls_path), sheet_name=sheet_name, name_columns_by_row=0)
except ValueError as e:
print("Sheet name not in excel file: {0}".format(e))
sys.exit()
except AttributeError as e:
print("Sheet name not in excel file: {0}".format(e))
sys.exit()
#raise e
except NotImplementedError as e:
print("File not found or File not in proper format: {0}".format(e))
sys.exit()
#raise e
records = sheet.get_records()
error_ls = []
for idx, record in enumerate(records):
err_row = validate_row(record, ignore_multi_unit)
if err_row:
error_ls.append(err_row)
print('ERR @ ROW {} => {}'.format(idx+2, err_row))
if error_ls:
print('correct the above errors and upload')
else:
print('{0} rows will be inserted. add "-u" to upload'.format(len(records)))
if upload:
ls=[]
for idx, r in enumerate(records):
#sno AREA UNIT MINE_TYPE ONROLL_UNIT WORKING UNIT SECTION_TYPE CADRE SECTION SECTION_CD DESIG DSCD EIS NAME GENDER DOB Comments
ls.append(('N','W',None,r['SECTION_CD'],r['WORKING UNIT'],r['ONROLL_UNIT'],r['DSCD'],r['GENDER'],r['DOB'],r['NAME'],r['EIS'],r['Comments']))
c = conn.cursor()
c.executemany('''insert into employee (emp_type,working,o_dcd,sect,ucde,roll_ucde,desg,gend,dob,name,eis,comments)
values(?,?,?,?,?, ?,?,?,?,?, ?,?)''',ls)
conn.commit()
print('--->{0} records inserted sucessfully'.format(len(ls)))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# parser.add_argument(
# 'filename', metavar='int', type=int, choices=range(10),
# nargs='+', help='give a file name')
parser.add_argument('filename', help='give file name')
parser.add_argument('table', help='e for inserting employee; s for inserting sanction')
parser.add_argument("-sh",'--sheetname', help='give sheet name; type * to include all sheets')
parser.add_argument("-u", "--upload", action="store_true", help="to update/commit changes into database")
parser.add_argument("-im", "--ignore_multi_unit", action="store_true", help="to upload file with multiple units and suppress its errors")#default=max,
args = parser.parse_args()
print(args)
#read_config()
conn = connection.get_connection()
if args.table == 's':
insert_sanc.load_tables()
insert_sanc.read_file(args.filename, args.sheetname, args.upload)
else:
print('supplied argument or order of argument is wrong')
sys.exit()
|
venkat299/mpBudget
|
insert_grp_desg.py
|
Python
|
mit
| 4,685
|
#!/usr/bin/python -u
"""
This script replaces underscores with spaces (%20)
Project: https://github.com/LagrangianPoint/Apache-Rewrite-Maps-Python/
http://httpd.apache.org/docs/current/rewrite/rewritemap.html
http://fragmentsofcode.wordpress.com/2009/02/04/python-script-for-apache-rewritemap/
http://codeblow.com/questions/apache2-rewritemap-python-when-coming-back-null-apache-dangles/
HINTS FOR DEBUGGING:
RewriteEngine On
RewriteLogLevel 9
RewriteLog /var/log/apache2/rewrite.log
"""
import sys
while sys.stdin:
try:
strLine = sys.stdin.readline().strip() ## It is very important to use strip!
strLine = strLine.replace('_', ' ')
print strLine
sys.stdout.flush()
except:
print 'NULL'
sys.stdout.flush()
|
LagrangianPoint/Apache-Rewrite-Maps-Python
|
under2space.py
|
Python
|
mit
| 754
|
# -*- coding: utf-8 -*-
"""
kintone上のデータを、バックアップを取ってから一括アップデートするスクリプト
オプション指定なし→ローカルキャッシュを用いてDry Run
-r(--real) →最新のデータを取得してバックアップし、更新
-f(--from-backup) →-rで問題が起きたとき用。バックアップを指定して、そのデータを元に更新する。
"""
from cache import get_all, get_app
import time
import argparse
from render import pretty
def concat_lines(x, y):
if isinstance(y, str): y = y.decode('utf-8')
return x.rstrip('\n') + '\n' + y
def add_a_tag(tags, a_tag, length=None):
assert not '\n' in a_tag
if length:
assert len(a_tag[1:].split(a_tag[0])) == length
tags.append(a_tag)
def rows_to_csv(rows):
import cStringIO
import unicodecsv as csv
f = cStringIO.StringIO()
csv.writer(f, encoding='utf-8').writerows(rows)
return f.getvalue()
def convert(xs, args):
"add new creators from 2015_creators_170113.csv"
import unicodecsv as csv
name2x = dict((x.name, x) for x in xs)
to_update = []
to_add = []
rd = csv.reader(file('2015_creators_170113.csv'), encoding='utf-8')
for row in rd:
year = row[2]
kubun = row[3]
sc = row[4]
theme = row[5]
name = row[6]
pm = row[9]
affil1 = row[7]
affil2 = row[8]
if name in name2x:
x = name2x[name]
to_update.append(x)
else:
from mymodel import Person
x = Person()
x.name = name
to_add.append(x)
tags = [
["未踏採択", year, kubun, sc, theme, pm],
["所属", affil1, "{}年時点".format(year), affil2]]
tags = rows_to_csv(tags)
x.tags = concat_lines(x.tags, tags)
print name
print tags
return to_add, to_update
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--real', '-r',
action='store_true', help='read from kintone and write to kintone')
parser.add_argument(
'--from-backup', '-f',
action='store', help='read from backup and write to kintone')
parser.add_argument(
'--converter', '-c',
action='store', help='use specific converter')
parser.add_argument(
'--infile', '-i',
action='store', help='input file')
args = parser.parse_args()
if args.real:
dumpdir = time.strftime('backup_%m%d_%H%M')
xs = get_all(cache=False, name=dumpdir)
elif args.from_backup:
xs = get_all(cache=True, name=args.from_backup)
else:
xs = get_all(cache=True)
if not args.converter:
to_add, to_update = convert(xs, args)
else:
import imp
info = imp.find_module('converter/' + args.converter)
m = imp.load_module('m', *info)
to_add, to_update = m.convert(xs, args)
print "{} items to update, {} items to add".format(len(to_update), len(to_add))
# when recover from backup we need to ignore revision
if args.from_backup:
for x in xs:
x.revision = -1 # ignore revision
if args.real or args.from_backup:
app = get_app()
result = app.batch_create(to_add)
assert result.ok
for i in range(0, len(to_update), 100):
print i, to_update[i].name
result = app.batch_update(to_update[i:i + 100])
assert result.ok
else:
# for debug: Run this script with `ipython -i`
globals()['xs'] = xs
if __name__ == '__main__':
main()
|
mitou/meikan
|
updater.py
|
Python
|
mit
| 3,661
|
import xml.etree.ElementTree as ET
class Definition(object):
def __init__(self, definition_root):
self._definition_root = definition_root
def type_name(self):
type_id = self._definition_root.find('Id').find('TypeId')
subtype_id = self._definition_root.find('Id').find('SubtypeId')
if type_id is None or not type_id.text:
return subtype_id.text
elif subtype_id is None or not subtype_id.text:
return type_id.text
else:
return type_id.text + ':' + subtype_id.text
@property
def build_time(self):
time = self._definition_root.find('BuildTimeSeconds')
if time is not None:
return float(time.text)
@build_time.setter
def build_time(self, value):
time = self._definition_root.find('BuildTimeSeconds')
if time is None:
time = ET.SubElement(self._definition_root, 'BuildTimeSeconds')
time.text = str(value)
@property
def disassembly_ratio(self):
ratio = self._definition_root.find('DisassembleRatio')
if ratio is not None:
return float(ratio.text)
return 1
@disassembly_ratio.setter
def disassembly_ratio(self, value):
ratio = self._definition_root.find('DisassembleRatio')
if ratio is None:
ratio = ET.SubElement(self._definition_root, 'DisassembleRatio')
ratio.text = str(value)
|
vmrob/sbsmanip
|
sbsmanip/cubeblocks.py
|
Python
|
mit
| 1,449
|
"""
The MIT License (MIT)
Copyright (c) 2015-2021 Kim Blomqvist
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import sys
from .yasha import ENCODING
def parse_json(file):
import json
assert file.name.endswith('.json')
variables = json.loads(file.read().decode(ENCODING))
return variables if variables else dict()
def parse_yaml(file):
import yaml
assert file.name.endswith(('.yaml', '.yml'))
variables = yaml.safe_load(file)
return variables if variables else dict()
def parse_toml(file):
import pytoml as toml
assert file.name.endswith('.toml')
variables = toml.load(file)
return variables if variables else dict()
def parse_xml(file):
import xmltodict
assert file.name.endswith('.xml')
variables = xmltodict.parse(file.read().decode(ENCODING))
return variables if variables else dict()
def parse_svd(file):
# TODO: To be moved into its own repo
from .cmsis import SVDFile
svd = SVDFile(file)
svd.parse()
return {
"cpu": svd.cpu,
"device": svd.device,
"peripherals": svd.peripherals,
}
def parse_ini(file):
from configparser import ConfigParser
cfg = ConfigParser()
# yasha opens files in binary mode, configparser expects files in text mode
content = file.read().decode(ENCODING)
cfg.read_string(content)
result = dict(cfg)
for section, data in result.items():
result[section] = dict(data)
return result
def parse_csv(file):
from csv import reader, DictReader, Sniffer
from io import TextIOWrapper
from os.path import basename, splitext
assert file.name.endswith('.csv')
name = splitext(basename(file.name))[0] # get the filename without the extension
content = TextIOWrapper(file, encoding='utf-8', errors='replace')
sample = content.read(1024)
content.seek(0)
csv = list()
if Sniffer().has_header(sample):
for row in DictReader(content):
csv.append(dict(row))
else:
for row in reader(content):
csv.append(row)
return {name: csv}
PARSERS = {
'.json': parse_json,
'.yaml': parse_yaml,
'.yml': parse_yaml,
'.toml': parse_toml,
'.xml': parse_xml,
'.svd': parse_svd,
'.ini': parse_ini,
'.csv': parse_csv
}
|
kblomqvist/yasha
|
yasha/parsers.py
|
Python
|
mit
| 3,274
|
# coding: utf-8
from django.http import HttpResponseRedirect, HttpResponse
from django.template import RequestContext
from django.shortcuts import get_object_or_404, render_to_response
from collections import defaultdict
from django.contrib.auth.decorators import login_required
from django.core.context_processors import csrf
from django.core.paginator import Paginator, InvalidPage, EmptyPage
from django.db.models import Q
from django.contrib.auth.models import User
from django.forms import ModelForm
from .models import Image, Album, Tag
def main(request):
"""Main listing."""
context = RequestContext(request)
albums = Album.objects.all()
if not request.user.is_authenticated():
albums = albums.filter(public=True)
paginator = Paginator(albums, 4)
try:
page = int(request.GET.get("page", '1'))
except ValueError:
page = 1
try:
albums = paginator.page(page)
except (InvalidPage, EmptyPage):
albums = paginator.page(paginator.num_pages)
for album in albums.object_list:
album.images = album.image_set.all()[:4]
#album.images = album.image_set.all()
context_dict = {'albums':albums}
return render_to_response("photo/list.html", context_dict, context)
def album(request, pk, view="thumbnails"):
"""Album listing."""
# Code without Slideshow
"""album = Album.objects.get(pk=pk)
if not album.public and not request.user.is_authenticated():
return HttpResponse("Error: you need to be logged in to view this album.")
images = album.image_set.all()
paginator = Paginator(images, 30)
try: page = int(request.GET.get("page", '1'))
except ValueError: page = 1
try:
images = paginator.page(page)
except (InvalidPage, EmptyPage):
images = paginator.page(paginator.num_pages)"""
#Write another code for Slideshow realization
num_images = 30
if view == "full": num_images = 10
album = Album.objects.get(pk=pk)
images = album.image_set.all()
paginator = Paginator(images, num_images)
try: page = int(request.GET.get("page", '1'))
except ValueError: page = 1
try:
images = paginator.page(page)
except (InvalidPage, EmptyPage):
images = paginator.page(paginator.num_pages)
# add list of tags as string and list of album objects to each image object
for img in images.object_list:
tags = [x[1] for x in img.tags.values_list()]
img.tag_lst = ", ".join(tags)
img.album_lst = [x[1] for x in img.albums.values_list()]
context = RequestContext(request)
context_dict = dict(album=album, images=images, view=view, albums=Album.objects.all())
#context_dict.update(csrf(request))
return render_to_response("photo/album.html", context_dict, context )
def image(request, pk):
"""Image page."""
img = Image.objects.get(pk=pk)
context = RequestContext(request)
context_dict = dict(image=img, backurl=request.META["HTTP_REFERER"])
return render_to_response("photo/image.html", context_dict, context)
def update(request):
"""Update image title, rating, tags, albums."""
p = request.POST
images = defaultdict(dict)
# create dictionary of properties for each image
for k, v in p.items():
if k.startswith("title") or k.startswith("rating") or k.startswith("tags"):
k, pk = k.split('-')
images[pk][k] = v
elif k.startswith("album"):
pk = k.split('-')[1]
images[pk]["albums"] = p.getlist(k)
# process properties, assign to image objects and save
for k, d in images.items():
image = Image.objects.get(pk=k)
image.title = d["title"]
image.rating = int(d["rating"])
# tags - assign or create if a new tag!
tags = d["tags"].split(',')
lst = []
for t in tags:
if t:
t = t.strip()
lst.append(Tag.objects.get_or_create(tag=t)[0])
image.tags = lst
if "albums" in d:
image.albums = d["albums"]
image.save()
return HttpResponseRedirect(request.META["HTTP_REFERER"])
#@login_required
def search(request):
"""Search, filter, sort images."""
context = RequestContext(request)
context_dict = dict( albums=Album.objects.all(), authors=User.objects.all())
# Если это первый заход по ссылке Search , то просто отображаем страницу, не производя расчетов
if request.method == 'GET' and not request.GET.get("page"):
return render_to_response("photo/search.html", context_dict, context)
# Тут уже работает метод POST or GET(?page)
try:
page = int(request.GET.get("page", '1'))
except ValueError:
page = 1
p = request.POST
images = defaultdict(dict)
# init parameters
parameters = {}
keys = ['title', 'filename', 'rating_from', 'rating_to', 'width_from',
'width_to', 'height_from', 'height_to', 'tags', 'view', 'user', 'sort', 'asc_desc']
for k in keys:
parameters[k] = ''
parameters["album"] = []
# create dictionary of properties for each image and a dict of search/filter parameters
for k, v in p.items():
if k == "album":
parameters[k] = [int(x) for x in p.getlist(k)]
elif k in parameters:
parameters[k] = v
elif k.startswith("title") or k.startswith("rating") or k.startswith("tags"):
k, pk = k.split('-')
images[pk][k] = v
elif k.startswith("album"):
pk = k.split('-')[1]
images[pk]["albums"] = p.getlist(k)
# save or restore parameters from session
if page != 1 and "parameters" in request.session:
parameters = request.session["parameters"]
else:
request.session["parameters"] = parameters
results = update_and_filter(images, parameters)
# make paginator
paginator = Paginator(results, 20)
try:
results = paginator.page(page)
except (InvalidPage, EmptyPage):
results = paginator.page(paginator.num_pages)
# add list of tags as string and list of album names to each image object
for img in results.object_list:
tags = [x[1] for x in img.tags.values_list()]
img.tag_lst = ", ".join(tags)
img.album_lst = [x[1] for x in img.albums.values_list()]
context_dict['results'] = results
context_dict['prm'] = parameters
return render_to_response("photo/search.html", context_dict, context)
def update_and_filter(images, p):
"""Update image data if changed, filter results through parameters and return results list."""
# process properties, assign to image objects and save
for k, d in images.items():
image = Image.objects.get(pk=k)
image.title = d["title"]
image.rating = int(d["rating"])
# tags - assign or create if a new tag!
tags = d["tags"].split(',')
lst = []
for t in tags:
if t:
t = t.strip()
lst.append(Tag.objects.get_or_create(tag=t)[0])
image.tags = lst
if "albums" in d:
image.albums = d["albums"]
image.save()
# filter results by parameters
results = Image.objects.all()
if p["title"] : results = results.filter(title__icontains=p["title"])
if p["filename"] : results = results.filter(image__icontains=p["filename"])
if p["rating_from"] : results = results.filter(rating__gte=int(p["rating_from"]))
if p["rating_to"] : results = results.filter(rating__lte=int(p["rating_to"]))
if p["width_from"] : results = results.filter(width__gte=int(p["width_from"]))
if p["width_to"] : results = results.filter(width__lte=int(p["width_to"]))
if p["height_from"] : results = results.filter(height__gte=int(p["height_from"]))
if p["height_to"] : results = results.filter(height__lte=int(p["height_to"]))
if p["tags"]:
tags = p["tags"].split(',')
lst = []
for t in tags:
if t:
t = t.strip()
results = results.filter(tags=Tag.objects.get(tag=t))
if p["album"]:
lst = p["album"]
or_query = Q(albums=lst[0])
for album in lst[1:]:
or_query = or_query | Q(albums=album)
results = results.filter(or_query).distinct()
return results
|
vadosl/photorganizer
|
photorganizer/photo/views__.py
|
Python
|
mit
| 8,479
|
import logging
import boto3
import io
from slovar import slovar
from prf import fs
log = logging.getLogger(__name__)
def includeme(config):
Settings = slovar(config.registry.settings)
S3.setup(Settings)
class S3(fs.FS):
def __init__(self, ds, create=False):
path = ds.ns.split('/')
bucket_name = path[0]
self.path = '/'.join(path[1:]+[ds.name])
s3 = boto3.resource('s3')
self.bucket = s3.Bucket(bucket_name)
self.file_or_buff = None
self._total = None
self.reader = fs.FileReader(
self.get_file_or_buff(),
format = fs.FileReader.get_format_from_file(self.path)
)
def drop_collection(self):
for it in self.bucket.objects.filter(Prefix=self.path):
it.delete()
def get_file_or_buff(self):
obj = boto3.resource('s3').Object(self.bucket.name, self.path)
return io.BytesIO(obj.get()['Body'].read())
|
vahana/prf
|
prf/s3.py
|
Python
|
mit
| 954
|
import unittest
from ai_graph_color import line, problem_generator
class TestProblemGenerator(unittest.TestCase):
def test_generate_file_path(self):
"""
Tests create file
"""
file_names = ['test.json', '']
for file_name in file_names:
file_path = problem_generator.generate_file_path(file_name)
self.assertEqual(type(''), type(file_path))
self.assertEqual('problems/{}'.format(file_name), file_path)
def test_read_and_write_graph_to_file(self):
"""
Tests write graph to file
"""
num_verts = [0, 5, 100]
for index, num_vert in enumerate(num_verts):
graph = problem_generator.generate_graph(num_vert)
problem_generator.write_graph_to_file(
'test{}.json'.format(index),
graph
)
self.assertEqual(
graph,
problem_generator.read_graph_from_file(
'test{}.json'.format(index)
)
)
def test_generate_graph(self):
"""
Tests generate graph
"""
num_verts = [0, 5, 100]
for num_vert in num_verts:
graph = problem_generator.generate_graph(num_vert)
self.assertEquals(
num_vert,
len(graph)
)
for connections in graph:
self.assertGreater(
len(connections),
0
)
def test_build_graph(self):
"""
Tests build graph
"""
points = [
[(0, 0), (0, 0)],
[],
[(100, 100), (1000, 1000)]
]
# This will need to change once build_graph is implemented
for point in points:
graph = problem_generator.build_graph(point)
self.assertEquals(
len(point),
len(graph)
)
for connections in graph:
self.assertGreater(
len(connections),
0
)
def test_scatter_points(self):
"""
Tests scatter points
"""
num_points = [0, 1, 100]
for num_point in num_points:
self.assertEqual(
num_point,
len(problem_generator.scatter_points(num_point))
)
def test_create_lines(self):
"""
Tests certain properties hold for the lines-map on sample points:
- The points indexed by a line's key are the same as the points
listed in the line
- The distance calculated in a mapped line matches the distance
between the points indexed by that line's key
- The line can be freed without exception
"""
points = [(0.0, 0.0), (0.0, 3.0), (1.0, 1.0), (1.0, 5.0)]
lines = problem_generator.create_lines(points)
for pair, connecting_line in lines.items():
distance = line.point_distance(
*map(lambda i: points[i], pair)
)
self.assertAlmostEqual(distance, connecting_line.distance)
self.assertEqual(
frozenset(map(lambda i: points[i], pair)),
frozenset([connecting_line.left_point,
connecting_line.right_point])
)
connecting_line.free() # should not raise any errors
def test_create_distance_list(self):
"""
Tests certain properties hold for the distance list:
- Lines in a linked-list are ordered by distance
- Each line described in a list uses the correct indexes
- Freeing a line removes it from the line-map and both of
the linked-lists in the distance-list
"""
points = [(0.0, 0.0), (0.0, 3.0), (1.0, 1.0), (1.0, 5.0)]
lines = problem_generator.create_lines(points)
distance_list = problem_generator.create_distance_list(
lines, len(points)
)
for src_index, connections in enumerate(distance_list):
distances = map(lambda i: i[1].distance, connections)
self.assertTrue(all(
distances[i] <= distances[i + 1]
for i in xrange(len(distances) - 1)
))
for dst_index, connecting_line in connections:
self.assertEqual(
frozenset(map(lambda i: points[i],
[src_index, dst_index])),
frozenset([connecting_line.left_point,
connecting_line.right_point])
)
for connections in distance_list:
for other_index, connecting_line in connections:
first_list_size = len(connections)
other_list_size = len(distance_list[other_index])
lines_size = len(lines)
connecting_line.free()
self.assertEqual(first_list_size - 1, len(connections))
self.assertEqual(other_list_size - 1,
len(distance_list[other_index]))
self.assertEqual(lines_size - 1, len(lines))
self.assertEquals(0, len(connections))
self.assertEquals(0, len(lines))
|
sagersmith8/ai_graph_coloring
|
tests/test_problem_generator.py
|
Python
|
mit
| 5,335
|
import struct
import time
from dataclasses import dataclass
from typing import Iterator, Literal, Optional, Tuple
from OpenSSL import SSL
from mitmproxy import certs, connection
from mitmproxy.net import tls as net_tls
from mitmproxy.proxy import commands, events, layer, tunnel
from mitmproxy.proxy import context
from mitmproxy.proxy.commands import StartHook
from mitmproxy.utils import human
def is_tls_handshake_record(d: bytes) -> bool:
"""
Returns:
True, if the passed bytes start with the TLS record magic bytes
False, otherwise.
"""
# TLS ClientHello magic, works for SSLv3, TLSv1.0, TLSv1.1, TLSv1.2.
# TLS 1.3 mandates legacy_record_version to be 0x0301.
# http://www.moserware.com/2009/06/first-few-milliseconds-of-https.html#client-hello
return (
len(d) >= 3 and
d[0] == 0x16 and
d[1] == 0x03 and
0x0 <= d[2] <= 0x03
)
def handshake_record_contents(data: bytes) -> Iterator[bytes]:
"""
Returns a generator that yields the bytes contained in each handshake record.
This will raise an error on the first non-handshake record, so fully exhausting this
generator is a bad idea.
"""
offset = 0
while True:
if len(data) < offset + 5:
return
record_header = data[offset:offset + 5]
if not is_tls_handshake_record(record_header):
raise ValueError(f"Expected TLS record, got {record_header!r} instead.")
record_size = struct.unpack("!H", record_header[3:])[0]
if record_size == 0:
raise ValueError("Record must not be empty.")
offset += 5
if len(data) < offset + record_size:
return
record_body = data[offset:offset + record_size]
yield record_body
offset += record_size
def get_client_hello(data: bytes) -> Optional[bytes]:
"""
Read all TLS records that contain the initial ClientHello.
Returns the raw handshake packet bytes, without TLS record headers.
"""
client_hello = b""
for d in handshake_record_contents(data):
client_hello += d
if len(client_hello) >= 4:
client_hello_size = struct.unpack("!I", b'\x00' + client_hello[1:4])[0] + 4
if len(client_hello) >= client_hello_size:
return client_hello[:client_hello_size]
return None
def parse_client_hello(data: bytes) -> Optional[net_tls.ClientHello]:
"""
Check if the supplied bytes contain a full ClientHello message,
and if so, parse it.
Returns:
- A ClientHello object on success
- None, if the TLS record is not complete
Raises:
- A ValueError, if the passed ClientHello is invalid
"""
# Check if ClientHello is complete
client_hello = get_client_hello(data)
if client_hello:
try:
return net_tls.ClientHello(client_hello[4:])
except EOFError as e:
raise ValueError("Invalid ClientHello") from e
return None
HTTP1_ALPNS = (b"http/1.1", b"http/1.0", b"http/0.9")
HTTP_ALPNS = (b"h2",) + HTTP1_ALPNS
# We need these classes as hooks can only have one argument at the moment.
@dataclass
class ClientHelloData:
context: context.Context
"""The context object for this connection."""
client_hello: net_tls.ClientHello
"""The entire parsed TLS ClientHello."""
establish_server_tls_first: bool = False
"""
If set to `True`, pause this handshake and establish TLS with an upstream server first.
This makes it possible to process the server certificate when generating an interception certificate.
"""
@dataclass
class TlsClienthelloHook(StartHook):
"""
Mitmproxy has received a TLS ClientHello message.
This hook decides whether a server connection is needed
to negotiate TLS with the client (data.establish_server_tls_first)
"""
data: ClientHelloData
@dataclass
class TlsStartData:
conn: connection.Connection
context: context.Context
ssl_conn: Optional[SSL.Connection] = None
@dataclass
class TlsStartClientHook(StartHook):
"""
TLS Negotation between mitmproxy and a client is about to start.
An addon is expected to initialize data.ssl_conn.
(by default, this is done by mitmproxy.addons.TlsConfig)
"""
data: TlsStartData
@dataclass
class TlsStartServerHook(StartHook):
"""
TLS Negotation between mitmproxy and a server is about to start.
An addon is expected to initialize data.ssl_conn.
(by default, this is done by mitmproxy.addons.TlsConfig)
"""
data: TlsStartData
class _TLSLayer(tunnel.TunnelLayer):
tls: SSL.Connection = None # type: ignore
"""The OpenSSL connection object"""
def __init__(self, context: context.Context, conn: connection.Connection):
super().__init__(
context,
tunnel_connection=conn,
conn=conn,
)
conn.tls = True
def __repr__(self):
return super().__repr__().replace(")", f" {self.conn.sni} {self.conn.alpn})")
def start_tls(self) -> layer.CommandGenerator[None]:
assert not self.tls
tls_start = TlsStartData(self.conn, self.context)
if tls_start.conn == tls_start.context.client:
yield TlsStartClientHook(tls_start)
else:
yield TlsStartServerHook(tls_start)
if not tls_start.ssl_conn:
yield commands.Log("No TLS context was provided, failing connection.", "error")
yield commands.CloseConnection(self.conn)
assert tls_start.ssl_conn
self.tls = tls_start.ssl_conn
def tls_interact(self) -> layer.CommandGenerator[None]:
while True:
try:
data = self.tls.bio_read(65535)
except SSL.WantReadError:
return # Okay, nothing more waiting to be sent.
else:
yield commands.SendData(self.conn, data)
def receive_handshake_data(self, data: bytes) -> layer.CommandGenerator[Tuple[bool, Optional[str]]]:
# bio_write errors for b"", so we need to check first if we actually received something.
if data:
self.tls.bio_write(data)
try:
self.tls.do_handshake()
except SSL.WantReadError:
yield from self.tls_interact()
return False, None
except SSL.Error as e:
# provide more detailed information for some errors.
last_err = e.args and isinstance(e.args[0], list) and e.args[0] and e.args[0][-1]
if last_err == ('SSL routines', 'tls_process_server_certificate', 'certificate verify failed'):
verify_result = SSL._lib.SSL_get_verify_result(self.tls._ssl) # type: ignore
error = SSL._ffi.string(SSL._lib.X509_verify_cert_error_string(verify_result)).decode() # type: ignore
err = f"Certificate verify failed: {error}"
elif last_err in [
('SSL routines', 'ssl3_read_bytes', 'tlsv1 alert unknown ca'),
('SSL routines', 'ssl3_read_bytes', 'sslv3 alert bad certificate')
]:
assert isinstance(last_err, tuple)
err = last_err[2]
elif last_err == ('SSL routines', 'ssl3_get_record', 'wrong version number') and data[:4].isascii():
err = f"The remote server does not speak TLS."
else: # pragma: no cover
# TODO: Add test case once we find one.
err = f"OpenSSL {e!r}"
self.conn.error = err
return False, err
else:
# Here we set all attributes that are only known *after* the handshake.
# Get all peer certificates.
# https://www.openssl.org/docs/man1.1.1/man3/SSL_get_peer_cert_chain.html
# If called on the client side, the stack also contains the peer's certificate; if called on the server
# side, the peer's certificate must be obtained separately using SSL_get_peer_certificate(3).
all_certs = self.tls.get_peer_cert_chain() or []
if self.conn == self.context.client:
cert = self.tls.get_peer_certificate()
if cert:
all_certs.insert(0, cert)
self.conn.timestamp_tls_setup = time.time()
self.conn.alpn = self.tls.get_alpn_proto_negotiated()
self.conn.certificate_list = [certs.Cert.from_pyopenssl(x) for x in all_certs]
self.conn.cipher = self.tls.get_cipher_name()
self.conn.tls_version = self.tls.get_protocol_version_name()
if self.debug:
yield commands.Log(f"{self.debug}[tls] tls established: {self.conn}", "debug")
yield from self.receive_data(b"")
return True, None
def receive_data(self, data: bytes) -> layer.CommandGenerator[None]:
if data:
self.tls.bio_write(data)
yield from self.tls_interact()
plaintext = bytearray()
close = False
while True:
try:
plaintext.extend(self.tls.recv(65535))
except SSL.WantReadError:
break
except SSL.ZeroReturnError:
close = True
break
if plaintext:
yield from self.event_to_child(
events.DataReceived(self.conn, bytes(plaintext))
)
if close:
self.conn.state &= ~connection.ConnectionState.CAN_READ
if self.debug:
yield commands.Log(f"{self.debug}[tls] close_notify {self.conn}", level="debug")
yield from self.event_to_child(
events.ConnectionClosed(self.conn)
)
def receive_close(self) -> layer.CommandGenerator[None]:
if self.tls.get_shutdown() & SSL.RECEIVED_SHUTDOWN:
pass # We have already dispatched a ConnectionClosed to the child layer.
else:
yield from super().receive_close()
def send_data(self, data: bytes) -> layer.CommandGenerator[None]:
try:
self.tls.sendall(data)
except SSL.ZeroReturnError:
# The other peer may still be trying to send data over, which we discard here.
pass
yield from self.tls_interact()
def send_close(self, half_close: bool) -> layer.CommandGenerator[None]:
# We should probably shutdown the TLS connection properly here.
yield from super().send_close(half_close)
class ServerTLSLayer(_TLSLayer):
"""
This layer establishes TLS for a single server connection.
"""
wait_for_clienthello: bool = False
def __init__(self, context: context.Context, conn: Optional[connection.Server] = None):
super().__init__(context, conn or context.server)
def start_handshake(self) -> layer.CommandGenerator[None]:
wait_for_clienthello = (
# if command_to_reply_to is set, we've been instructed to open the connection from the child layer.
# in that case any potential ClientHello is already parsed (by the ClientTLS child layer).
not self.command_to_reply_to
# if command_to_reply_to is not set, the connection was already open when this layer received its Start
# event (eager connection strategy). We now want to establish TLS right away, _unless_ we already know
# that there's TLS on the client side as well (we check if our immediate child layer is set to be ClientTLS)
# In this case want to wait for ClientHello to be parsed, so that we can incorporate SNI/ALPN from there.
and isinstance(self.child_layer, ClientTLSLayer)
)
if wait_for_clienthello:
self.wait_for_clienthello = True
self.tunnel_state = tunnel.TunnelState.CLOSED
else:
yield from self.start_tls()
yield from self.receive_handshake_data(b"")
def event_to_child(self, event: events.Event) -> layer.CommandGenerator[None]:
if self.wait_for_clienthello:
for command in super().event_to_child(event):
if isinstance(command, commands.OpenConnection) and command.connection == self.conn:
self.wait_for_clienthello = False
# swallow OpenConnection here by not re-yielding it.
else:
yield command
else:
yield from super().event_to_child(event)
def on_handshake_error(self, err: str) -> layer.CommandGenerator[None]:
yield commands.Log(f"Server TLS handshake failed. {err}", level="warn")
yield from super().on_handshake_error(err)
class ClientTLSLayer(_TLSLayer):
"""
This layer establishes TLS on a single client connection.
┌─────┐
│Start│
└┬────┘
↓
┌────────────────────┐
│Wait for ClientHello│
└┬───────────────────┘
↓
┌────────────────┐
│Process messages│
└────────────────┘
"""
recv_buffer: bytearray
server_tls_available: bool
client_hello_parsed: bool = False
def __init__(self, context: context.Context):
if context.client.tls:
# In the case of TLS-over-TLS, we already have client TLS. As the outer TLS connection between client
# and proxy isn't that interesting to us, we just unset the attributes here and keep the inner TLS
# session's attributes.
# Alternatively we could create a new Client instance,
# but for now we keep it simple. There is a proof-of-concept at
# https://github.com/mitmproxy/mitmproxy/commit/9b6e2a716888b7787514733b76a5936afa485352.
context.client.alpn = None
context.client.cipher = None
context.client.sni = None
context.client.timestamp_tls_setup = None
context.client.tls_version = None
context.client.certificate_list = []
context.client.mitmcert = None
context.client.alpn_offers = []
context.client.cipher_list = []
super().__init__(context, context.client)
self.server_tls_available = isinstance(self.context.layers[-2], ServerTLSLayer)
self.recv_buffer = bytearray()
def start_handshake(self) -> layer.CommandGenerator[None]:
yield from ()
def receive_handshake_data(self, data: bytes) -> layer.CommandGenerator[Tuple[bool, Optional[str]]]:
if self.client_hello_parsed:
return (yield from super().receive_handshake_data(data))
self.recv_buffer.extend(data)
try:
client_hello = parse_client_hello(self.recv_buffer)
except ValueError:
return False, f"Cannot parse ClientHello: {self.recv_buffer.hex()}"
if client_hello:
self.client_hello_parsed = True
else:
return False, None
self.conn.sni = client_hello.sni
self.conn.alpn_offers = client_hello.alpn_protocols
tls_clienthello = ClientHelloData(self.context, client_hello)
yield TlsClienthelloHook(tls_clienthello)
if tls_clienthello.establish_server_tls_first and not self.context.server.tls_established:
err = yield from self.start_server_tls()
if err:
yield commands.Log(f"Unable to establish TLS connection with server ({err}). "
f"Trying to establish TLS with client anyway.")
yield from self.start_tls()
if not self.conn.connected:
return False, "connection closed early"
ret = yield from super().receive_handshake_data(bytes(self.recv_buffer))
self.recv_buffer.clear()
return ret
def start_server_tls(self) -> layer.CommandGenerator[Optional[str]]:
"""
We often need information from the upstream connection to establish TLS with the client.
For example, we need to check if the client does ALPN or not.
"""
if not self.server_tls_available:
return "No server TLS available."
err = yield commands.OpenConnection(self.context.server)
return err
def on_handshake_error(self, err: str) -> layer.CommandGenerator[None]:
if self.conn.sni:
dest = self.conn.sni
else:
dest = human.format_address(self.context.server.address)
level: Literal["warn", "info"] = "warn"
if err.startswith("Cannot parse ClientHello"):
pass
elif "unknown ca" in err or "bad certificate" in err:
err = f"The client does not trust the proxy's certificate for {dest} ({err})"
elif err == "connection closed":
err = (
f"The client disconnected during the handshake. If this happens consistently for {dest}, "
f"this may indicate that the client does not trust the proxy's certificate."
)
level = "info"
elif err == "connection closed early":
pass
else:
err = f"The client may not trust the proxy's certificate for {dest} ({err})"
if err != "connection closed early":
yield commands.Log(f"Client TLS handshake failed. {err}", level=level)
yield from super().on_handshake_error(err)
self.event_to_child = self.errored # type: ignore
def errored(self, event: events.Event) -> layer.CommandGenerator[None]:
if self.debug is not None:
yield commands.Log(f"Swallowing {event} as handshake failed.", "debug")
class MockTLSLayer(_TLSLayer):
"""Mock layer to disable actual TLS and use cleartext in tests.
Use like so:
monkeypatch.setattr(tls, "ServerTLSLayer", tls.MockTLSLayer)
"""
def __init__(self, ctx: context.Context):
super().__init__(ctx, connection.Server(None))
|
mhils/mitmproxy
|
mitmproxy/proxy/layers/tls.py
|
Python
|
mit
| 18,080
|
"""
Illustration of the heat equation
Solve the heat equation using finite differences and Forward Euler.
Based on: https://commons.wikimedia.org/wiki/File:Heat_eqn.gif
"""
from __future__ import division, print_function
import numpy as np
from mayavi import mlab
def step_function(N, scale, X, Y, shape="crescent"):
"""Function that is 1 on a set and 0 outside of it"""
shapes = ["crescent", "cylinder", "hexagon", "superquadric", "smiley"]
if shape not in shapes:
shape = "crescent"
if shape == "cylinder":
Z = np.ones_like(X)
Z[X**2 + Y**2 < 0.5] = 0
Z[X**2 + Y**2 > 2] = 0
if shape == "superquadric":
Z = np.ones_like(X)
Z[np.abs(X)**0.5 + np.abs(Y)**0.5 > 1.5] = 0
if shape == "hexagon":
Z = np.ones_like(X)
hexa = 2*np.abs(X) + np.abs(X - Y*np.sqrt(3)) +\
np.abs(X + Y*np.sqrt(3))
Z[hexa > 6] = 0
if shape == "crescent":
c = 2
d = -1
e = 1
f = 0.5
k = 1.2
shift = 10
Z = (c**2 - (X/e - d)**2 - (Y/f)**2)**2 + k*(c + d - X/e)**3 - shift
Z = 1 - np.maximum(np.sign(Z), 0)
if shape == "smiley":
Z = np.ones_like(X)
fac = 1.2
x_eye = 0.5
y_eye = 0.4
bicorn = fac**2*(Y + 0.3)**2*(1 - fac**2*X**2) -\
(fac**2*X**2 - 2*fac*(Y + 0.3) - 1)**2
left_eye = (X + x_eye)**2/0.1 + (Y - y_eye)**2/0.4 - 1
right_eye = (X - x_eye)**2/0.1 + (Y - y_eye)**2/0.4 - 1
Z[X**2 + Y**2 > 2] = 0
Z[bicorn > 0] = 0
Z[left_eye < 0] = 0
Z[right_eye < 0] = 0
Z = scale * Z
return Z
def data_gen(num):
# Solve the heat equation with zero boundary conditions
for cont in range(ntime_anim):
Z[1:N-1, 1:N-1] = Z[1:N-1, 1:N-1] + dt*(Z[2:N, 1:N-1] +
Z[0:N-2, 1:N-1] + Z[1:N-1, 0:N-2] +
Z[1:N-1, 2:N] - 4*Z[1:N-1, 1:N-1])/dx**2
surf = mlab.surf(X, Y, Z, colormap='autumn', warp_scale=1)
# Change the visualization parameters.
surf.actor.property.interpolation = 'phong'
surf.actor.property.specular = 0.3
surf.actor.property.specular_power = 20
surf.module_manager.scalar_lut_manager.reverse_lut = True
surf.module_manager.scalar_lut_manager.data_range = np.array([ 0., scale])
return surf
N = 500 # Grid points
L = 2.5 # Box size
X, Y = np.mgrid[-L:L:N*1j, -L:L:N*1j]
scale = 2
Z = step_function(N, scale, X, Y, shape="smiley")
CFL = 0.125
dx = X[1, 0] - X[0, 0]
dy = dx
dt = CFL*dx**2
end_time = 0.05
time = np.arange(0, end_time, dt)
nframes = 50
ntime = time.shape[0]
ntime_anim = int(ntime/nframes)
#%% Plot frames
fname = "heat_smiley"
bgcolor = (1, 1, 1)
fig = mlab.figure(size=(1200, 1000), bgcolor=bgcolor)
fig.scene.camera.azimuth(180)
mlab.get_engine()
engine = mlab.get_engine()
scene = engine.scenes[0]
for cont in range(nframes):
mlab.clf()
surf = data_gen(cont)
scene.scene.camera.position = [-8, -8, 7]
scene.scene.camera.clipping_range = [7, 22]
scene.scene.camera.focal_point = [0, 0, 1]
print(cont)
mlab.savefig("{}_{n:02d}.png".format(fname, n=cont))
|
nicoguaro/AdvancedMath
|
examples/heat_iterations.py
|
Python
|
mit
| 3,191
|
'''
Created on Mar 8, 2013
@author: Gary
'''
import unittest
from housemonitor.outputs.zigbee.zigbeecontrol import ZigBeeControl
from housemonitor.outputs.zigbee.zigbeeoutputstep import ZigBeeOutputStep
from housemonitor.outputs.zigbee.zigbeeoutputthread import ZigBeeOutputThread
from housemonitor.lib.hmqueue import HMQueue
from housemonitor.lib.constants import Constants
from mock import Mock, MagicMock, patch
from housemonitor.lib.common import Common
import logging.config
class Test( unittest.TestCase ):
logger = logging.getLogger( 'UnitTest' )
def setUp( self ):
logging.config.fileConfig( "unittest_logging.conf" )
def tearDown( self ):
pass
def test_logger_name( self ):
queue = HMQueue()
zig = ZigBeeOutputStep( queue )
self.assertEqual( Constants.LogKeys.outputsZigBee, zig.logger_name )
def test_topic_name( self ):
queue = HMQueue()
zig = ZigBeeOutputStep( queue )
self.assertEqual( Constants.TopicNames.ZigBeeOutput, zig.topic_name )
def test_step( self ):
value = 5
data = {Constants.DataPacket.device: 'device',
Constants.DataPacket.port: 'port',
Constants.DataPacket.arrival_time: 'arrival_time'}
listeners = ['a', 'b', 'c']
package = {'data': data, 'value': value}
queue = MagicMock( spec=HMQueue )
zig = ZigBeeOutputStep( queue )
v, d, l = zig.step( value, data, listeners )
queue.transmit.assert_called_once()
self.assertEqual( value, v )
self.assertEqual( data, d )
self.assertEqual( listeners, l )
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
gary-pickens/HouseMonitor
|
housemonitor/outputs/zigbee/test/zigbeeoutputstep_test.py
|
Python
|
mit
| 1,738
|
#! /usr/bin/env python3
# Suggest name to z3 binary based on it its sha
import sys
import words
import subprocess
import argparse
import os.path
import shutil
from pathlib import Path
import yaml
class Bencher(object):
def __init__(self):
self._name = 'bencher'
self._help = 'Make benchmark direcotry'
def mk_arg_parser(self, ap):
ap.add_argument('--suffix',
'-s',
metavar='EXT',
type=str,
default='smt2',
help='File extension')
ap.add_argument('--prefix',
'-p',
metavar='PREF',
required='True',
help='Prefix to assign')
ap.add_argument('--out',
'-o',
type=str,
metavar="DIR",
help='Output directory',
required=True)
ap.add_argument('files', nargs='+')
ap.add_argument(
'--mv',
action='store_true',
help='Move (instead of copy) benchmarks into new location')
ap.add_argument('--verbose', '-v', action='store_true')
ap.add_argument('--dry-run', action='store_true')
return ap
def run(self, args=None):
num_files = len(args.files)
num_fmt = '{idx:0' + str(len(str(num_files))) + '}'
out_dir = Path(args.out)
out_dir.mkdir(parents=True, exist_ok=True)
prefix = args.prefix
suffix = args.suffix
# pick an action to apply to each file
if args.dry_run:
def _dry_run_action(src, dst):
pass
file_action = _dry_run_action
elif args.mv:
file_action = shutil.move
else:
file_action = shutil.copy2
inverse = dict()
for id, src in enumerate(args.files):
idx_str = num_fmt.format(idx=id)
dst_name = f'{prefix}-{idx_str}.{suffix}'
dst = out_dir / dst_name
if (args.verbose):
print(f'{src} --> {dst}')
file_action(src, dst)
inverse[dst_name] = src
with open(out_dir / 'inverse.yaml', 'w') as inverse_file:
yaml.dump(inverse, inverse_file)
return 0
def main(self, argv):
ap = argparse.ArgumentParser(prog=self._name, description=self._help)
ap = self.mk_arg_parser(ap)
args = ap.parse_args(argv)
return self.run(args)
def main():
cmd = Bencher()
return cmd.main(sys.argv[1:])
if __name__ == '__main__':
sys.exit(main())
|
agurfinkel/brunch
|
exp/bencher.py
|
Python
|
mit
| 2,706
|
# Create your views here.
from django.http import HttpResponse, HttpResponseRedirect
from django.template import RequestContext, loader
from django.core.urlresolvers import reverse
from django.shortcuts import render, get_object_or_404
from django.utils import timezone
from django.views import generic
from polls.models import Choice,Poll
class IndexView(generic.ListView):
template_name = 'polls/index.html'
context_object_name = 'latest_poll_list'
def get_queryset(self):
"""
Return the last five published polls (not including those set to be
published in the future).
"""
return Poll.objects.filter(
pub_date__lte=timezone.now()
).order_by('-pub_date')[:5]
class DetailView(generic.DetailView):
model = Poll
template_name = 'polls/detail.html'
class ResultsView(generic.DetailView):
model = Poll
template_name = 'polls/results.html'
def vote(request, poll_id):
p = get_object_or_404(Poll, pk=poll_id)
try:
selected_choice = p.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
# display voting form
return render(request, 'polls/detail.html', {
'poll':p,
'error_message': "You didn't select a choice.",
})
else:
selected_choice.votes += 1
selected_choice.save()
return HttpResponseRedirect(reverse('polls:results',args=(p.id,)))
|
Drhealsgood/learning_django
|
polls/views.py
|
Python
|
mit
| 1,526
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2018-03-05 05:39
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sponsors', '0012_sponsor_level_smallint'),
]
operations = [
migrations.AlterField(
model_name='sponsor',
name='conference',
field=models.SlugField(choices=[('pycontw-2016', 'PyCon Taiwan 2016'), ('pycontw-2017', 'PyCon Taiwan 2017'), ('pycontw-2018', 'PyCon Taiwan 2018')], default='pycontw-2018', verbose_name='conference'),
),
]
|
pycontw/pycontw2016
|
src/sponsors/migrations/0013_auto_20180305_1339.py
|
Python
|
mit
| 624
|
######## Script to convert IRS spectra into pseudophotometric
######## datapoints for modeling the TDs
import asciitable
import numpy as np
import matplotlib.pyplot as plt
import pyfits
from scipy import interpolate
def remove_duplicates_func(seq):
""" This function takes a list and returns
the same without duplicate elements."""
seen = set()
seen_add = seen.add
return [ x for x in seq if x not in seen and not seen_add(x)]
#####
avs_dictionary = {}
avs_dictionary['CS_Cha'] = 0.25
avs_dictionary['SZ_Cha'] = 1.90
avs_dictionary['T25'] = 0.78
avs_dictionary['T35'] = 3.5
avs_dictionary['T56'] = 0.23
avs_dictionary['ISO_52'] = 1.3
obj = 'T25'
av = avs_dictionary[obj]
# path informations
path_main = '../objects/'
path_object = path_main + obj + '/' + obj +'_data/'
# Read the information of the filters
filters_info=asciitable.read(path_main+'filters_info.txt')
total_filters_names=filters_info['Filter']
total_filters_lmb=filters_info['Lmb'] #Armstrongs
total_filters_av_almb=filters_info['Av/Alambda']
total_filters_zp=filters_info['ZP']
# read the phot info and get it in right units
phot_file = asciitable.read(path_object + obj +'_phot.txt')
filters = phot_file['Filter']
lmb = phot_file['Lmb']
fluxes = phot_file['Value']
errors = phot_file['Error']
units = phot_file['Units']
detections = phot_file['Detection']
zp = phot_file['ZP']
filters_av_almb = list()
filters_zp = list()
for element in phot_file:
index = np.where(total_filters_names == element['Filter'])[0][0]
filters_av_almb.append(total_filters_av_almb[index])
filters_zp.append(total_filters_zp[index])
filters_av_almb = np.array(filters_av_almb)
filters_zp = np.array(filters_zp)
# convert to jy
indexes_conversion = np.where(phot_file['Units'] == 'mag')[0]
error_fractions = errors[indexes_conversion]
fluxes[indexes_conversion] = zp[indexes_conversion] * 10 ** (-0.4 * fluxes[indexes_conversion])
errors[indexes_conversion] = error_fractions * fluxes[indexes_conversion]
# derreden
fluxes = fluxes / 10**(-0.4 * av * filters_av_almb)
errors = errors / 10**(-0.4 * av * filters_av_almb)
# convert to flmb, fluxes in Jy, wavelength in microns
fluxes = (3e-8 * fluxes*1e3 / (lmb*1e4)**2)
errors = (3e-8 * errors*1e3 / (lmb*1e4)**2)
# Convert now to lmbflmb, fluxes in erg/cm2/s/A, wavelength in microns
lmb_flmb = fluxes * lmb * 1e4 #set lmb to A
lmb_flmb_e = errors * lmb * 1e4 #set lmb to A
indexes_upper = np.where(detections == 0)[0]
lmb_flmb_e[indexes_upper] = 0.
file_to_write = open(path_object + obj +'_processed.txt','w')
file_to_write.write('#Filter\tLmb[um]\tLmb_flmb[erg/cm2/s]\tLmb_flmb_err[erg/cm2/s]\tDetection\n')
for index in xrange(len(fluxes)):
file_to_write.write(filters[index]+'\t')
file_to_write.write('{:.3e}\t'.format(lmb[index]))
file_to_write.write('{:.3e}\t'.format(lmb_flmb[index]))
file_to_write.write('{:.3e}\t'.format(lmb_flmb_e[index]))
file_to_write.write('{:}\n'.format(detections[index]))
file_to_write.close()
file_lmb = open(path_object + obj + '.lambda','w')
lmb_unique = remove_duplicates_func(lmb)
lmb_unique = np.sort(lmb_unique)
for element in lmb_unique:
file_lmb.write('{:.4e}\n'.format(element))
file_lmb.close()
##########################
## IRS Spectrum
# Derredening data
# Mathis1990 extinction law for spitzer (Rv=5)
mathis_lmb=[2.2,3.4,5.,7.,9.,9.7,10.,12.,15.,18.,20.,25.,35.]
mathis_alambda_aj=[0.382,0.182,0.095,0.07,0.157,0.2,0.192,0.098,0.053,0.083,0.075,0.0048,0.013]
mathis_interpol=interpolate.interp1d(mathis_lmb,mathis_alambda_aj,kind='linear')
#McClure2009 extinction law (lmb in microns)
mcclure=pyfits.open(path_main + 'McClure2009.fits')
mcclure_lmb=mcclure[1].data['lambda']
mcclure_alambda_ak1=mcclure[1].data['Al/AK1']
mcclure_alambda_ak2=mcclure[1].data['Al/AK2']
indexes=[(mcclure_lmb < 36) & (mcclure_lmb > 4)]
mcclure_lmb=mcclure_lmb[indexes]
mcclure_alambda_ak1=mcclure_alambda_ak1[indexes]
mcclure_alambda_ak2=mcclure_alambda_ak2[indexes]
mcclure_interpol1=interpolate.interp1d(mcclure_lmb,mcclure_alambda_ak1,kind='linear')
mcclure_interpol2=interpolate.interp1d(mcclure_lmb,mcclure_alambda_ak2,kind='linear')
irs=pyfits.open(path_object + obj +'_IRS.fits')
spectrum=irs[0].data
irs_lmb=spectrum[:,0] ## In microns
irs_fnu=spectrum[:,1]
irs_fnu_err = np.sqrt(spectrum[:,2]**2 + spectrum[:,3]**2 + spectrum[:,4]**2)
# get the errors in relative error
irs_fnu_rel_err = irs_fnu_err / irs_fnu
# cut the order1 between 7-14 and 20.5 - 35 microns
order1=[(spectrum[:,8] == 1) & (((irs_lmb > 7.6) &(irs_lmb < 14.)) | ((irs_lmb > 20.5) & (irs_lmb < 35.)))]
# cut the order2 up to 20.5 microns
order2=[(spectrum[:,8] == 2) & (irs_lmb < 20.5)]
# get to corresponding values and sort them
lmb1=irs_lmb[order1]
lmb2=irs_lmb[order2]
irs_fnu1=irs_fnu[order1]
irs_fnu2=irs_fnu[order2]
irs_lmb=np.concatenate((lmb1,lmb2),axis=0)
irs_fnu=np.concatenate((irs_fnu1,irs_fnu2),axis=0)
irs_fnu=irs_fnu[np.argsort(irs_lmb)]
irs_lmb=irs_lmb[np.argsort(irs_lmb)]
#
aj=av*0.31
ak=av*0.13
print 'Aj:'+str(aj)
print 'Ak:'+str(ak)
# derreden
if (aj < 0.8):
coeffs=mathis_interpol(irs_lmb)
almbs=coeffs*aj
irs_fnu_der=irs_fnu*10.**(-0.4*(-almbs))
if (aj > 0.8 and ak < 1):
coeffs=mcclure_interpol1(irs_lmb)
almbs=coeffs*ak
irs_fnu_der=irs_fnu*10.**(-0.4*(-almbs))
else:
coeffs=mcclure_interpol2(irs_lmb)
almbs=coeffs*ak
irs_fnu_der=irs_fnu*10.**(-0.4*(-almbs))
print 'Min:'+str(almbs.min())+'/Max:'+str(almbs.max())
# convert from Jy to erg/cm2/s/Hz
irs_fnu_der=irs_fnu_der*1e-23
# convert to erg/cm2/s/A
irs_flmb=irs_fnu_der*3.e8*1.e2/irs_lmb**2
# sort everything and get lmb_flmb
indexes_real = np.isfinite(irs_flmb)
irs_lmb = irs_lmb[indexes_real]
irs_flmb = irs_flmb[indexes_real]
irs_fnu_rel_err = irs_fnu_rel_err[indexes_real]
irs_lmbflmb = irs_lmb *1e4 * irs_flmb # irs_lmb in microns
# bring back the errors, only when the errors are real too
indexes_real = np.isfinite(irs_fnu_rel_err)
irs_lmb = irs_lmb[indexes_real]
irs_lmbflmb = irs_lmbflmb[indexes_real]
irs_fnu_rel_err = irs_fnu_rel_err[indexes_real]
irs_lmbflmb_err = irs_lmbflmb * irs_fnu_rel_err
# now bin everything
n_bins = 10
len_bins = np.int(np.floor(len(irs_lmb) / n_bins))
lmb_binned = list()
lmbflmb_binned = list()
lmbflmb_err_binned = list()
for n_bin in xrange(n_bins):
# compute indexes for binning
indexes_bin = np.arange(n_bin * len_bins, (n_bin+1) * len_bins)
# in the last case, take the remaining datapoints in the last bin
if n_bin == n_bins-1:
indexes_bin = np.arange(n_bin * len_bins,len(irs_lmb))
lmb_value = np.mean(irs_lmb[indexes_bin])
lmbflmb_value = np.mean(irs_lmbflmb[indexes_bin])
lmbflmb_err_value = np.std(irs_lmbflmb[indexes_bin]) / np.sqrt(len(indexes_bin))
# append the results
lmb_binned.append(lmb_value)
lmbflmb_binned.append(lmbflmb_value)
lmbflmb_err_binned.append(lmbflmb_err_value)
# finally, convert it to np arrays
lmb_binned = np.array(lmb_binned)
lmbflmb_binned = np.array(lmbflmb_binned)
lmbflmb_err_binned = np.array(lmbflmb_err_binned)
# append the results to the files
file_to_write = open(path_object + obj +'_processed.txt','a')
file_lmb = open(path_object + obj + '.lambda','a')
for index in xrange(len(lmb_binned)):
file_lmb.write('{:.4e}\n'.format(lmb_binned[index]))
file_to_write.write('IRS_binned\t')
file_to_write.write('{:.3e}\t'.format(lmb_binned[index]))
file_to_write.write('{:.3e}\t'.format(lmbflmb_binned[index]))
file_to_write.write('{:.3e}\t'.format(lmbflmb_err_binned[index]))
file_to_write.write('1\n')
file_to_write.close()
file_lmb.close()
# OPTIONAL: plot to check
plot_to_check = True
if plot_to_check:
plt.errorbar(irs_lmb,irs_lmbflmb,yerr=irs_lmbflmb_err,fmt='o',mec=None, ms=1, mfc='blue')
plt.errorbar(lmb_binned,lmbflmb_binned,yerr=lmbflmb_err_binned,fmt='o',mfc='red',mec=None,ms=8,color='red')
plt.xscale('log')
plt.yscale('log')
plt.xlim(4,40)
plt.show()
|
alvaroribas/modeling_TDs
|
data_converter.py
|
Python
|
mit
| 7,982
|
"""
=====================================
Include On-leveling into Cape Cod
=====================================
This example demonstrates how to incorporate on-leveling into the `CapeCod`
estimator. The on-level approach emulates the approach taken by Friedland in
"Estimating Unpaid Claims Using Basic Techniques" Chapter 10. The `ParallelogramOLF`
estimator is new in chainladder 0.7.9 as is the ``xyz`` triangle.
"""
import chainladder as cl
import pandas as pd
# Grab a triangle
xyz = cl.load_sample('xyz')
# Premium on-leveling factors
rate_history = pd.DataFrame({
'date': ['1/1/1999', '1/1/2000', '1/1/2001', '1/1/2002', '1/1/2003',
'1/1/2004', '1/1/2005', '1/1/2006', '1/1/2007', '1/1/2008'],
'rate_change': [.02, .02, .02, .02, .05, .075, .15, .1, -.2, -.2]
})
# Loss on-leveling factors
tort_reform = pd.DataFrame({
'date': ['1/1/2006', '1/1/2007'],
'rate_change': [-0.1067, -.25]
})
# In addition to development, include onlevel estimator in pipeline for loss
pipe = cl.Pipeline(steps=[
('olf', cl.ParallelogramOLF(tort_reform, change_col='rate_change', date_col='date', vertical_line=True)),
('dev', cl.Development(n_periods=2)),
('model', cl.CapeCod(trend=0.034))
])
# Define X
X = cl.load_sample('xyz')['Incurred']
# Separately apply on-level factors for premium
sample_weight = cl.ParallelogramOLF(
rate_history, change_col='rate_change', date_col='date',
vertical_line=True).fit_transform(xyz['Premium'].latest_diagonal)
# Fit Cod Estimator
pipe.fit(X, sample_weight=sample_weight).named_steps.model.ultimate_
# Create a Cape Cod pipeline without onleveling
pipe2 = cl.Pipeline(steps=[
('dev', cl.Development(n_periods=2)),
('model', cl.CapeCod(trend=0.034))
])
# Finally fit Cod Estimator without on-leveling
pipe2.fit(X, sample_weight=xyz['Premium'].latest_diagonal).named_steps.model.ultimate_
# Plot results
cl.concat((
pipe.named_steps.model.ultimate_.rename('columns', ['With On-level']),
pipe2.named_steps.model.ultimate_.rename('columns', ['Without On-level'])), 1).T.plot(
kind='bar', title='Cape Cod sensitivity to on-leveling', grid=True, subplots=True, legend=False);
|
jbogaardt/chainladder-python
|
examples/plot_capecod_onlevel.py
|
Python
|
mit
| 2,182
|
from __future__ import absolute_import, division, print_function
from builtins import * # @UnusedWildImport
from mcculw import ul
from mcculw.ul import ULError
from mcculw.enums import (BoardInfo, InfoType, ErrorCode, EventType,
ExpansionInfo)
from .ai_info import AiInfo
from .ao_info import AoInfo
from .ctr_info import CtrInfo
from .daqi_info import DaqiInfo
from .daqo_info import DaqoInfo
from .dio_info import DioInfo
class DaqDeviceInfo:
"""Provides hardware information for the DAQ device configured with the
specified board number.
NOTE: This class is primarily used to provide hardware information for the
library examples and may change some hardware configuration values. It is
recommended that values provided by this class be hard-coded in production
code.
Parameters
----------
board_num : int
The board number associated with the device when created with
:func:`.create_daq_device` or configured with Instacal.
"""
def __init__(self, board_num):
self._board_num = board_num
self._board_type = ul.get_config(InfoType.BOARDINFO, board_num, 0,
BoardInfo.BOARDTYPE)
if self._board_type == 0:
raise ULError(ErrorCode.BADBOARD)
self._ai_info = AiInfo(self._board_num)
self._ao_info = AoInfo(self._board_num)
self._ctr_info = CtrInfo(self._board_num)
self._daqi_info = DaqiInfo(self._board_num)
self._daqo_info = DaqoInfo(self._board_num)
self._dio_info = DioInfo(self._board_num)
@property
def board_num(self): # -> int
return self._board_num
@property
def product_name(self): # -> str
return ul.get_board_name(self._board_num)
@property
def unique_id(self): # -> str
return ul.get_config_string(InfoType.BOARDINFO, self._board_num, 0,
BoardInfo.DEVUNIQUEID, 32)
@property
def supports_analog_input(self): # -> boolean
return self._ai_info.is_supported
@property
def supports_temp_input(self): # -> boolean
return self._ai_info.temp_supported
def get_ai_info(self): # -> AiInfo
return self._ai_info
@property
def supports_analog_output(self): # -> boolean
return self._ao_info.is_supported
def get_ao_info(self): # -> AoInfo
return self._ao_info
@property
def supports_counters(self): # -> boolean
return self._ctr_info.is_supported
def get_ctr_info(self): # -> CtrInfo
return self._ctr_info
@property
def supports_daq_input(self): # -> boolean
return self._daqi_info.is_supported
def get_daqi_info(self): # -> DaqiInfo
return self._daqi_info
@property
def supports_daq_output(self): # -> boolean
return self._daqo_info.is_supported
def get_daqo_info(self): # -> DaqoInfo
return self._daqo_info
@property
def supports_digital_io(self): # -> boolean
return self._dio_info.is_supported
def get_dio_info(self): # -> DioInfo
return self._dio_info
@property
def supported_event_types(self): # -> list[EventType]
event_types = []
for event_type in EventType:
try:
ul.disable_event(self._board_num, event_type)
event_types.append(event_type)
except ULError:
pass
return event_types
@property
def num_expansions(self): # -> int
return ul.get_config(InfoType.BOARDINFO, self.board_num, 0,
BoardInfo.NUMEXPS)
@property
def exp_info(self): # -> list[ExpInfo]
exp_info = []
for expansion_num in range(self.num_expansions):
exp_info.append(ExpInfo(self._board_num, expansion_num))
return exp_info
class ExpInfo:
def __init__(self, board_num, expansion_num):
self._board_num = board_num
self._expansion_num = expansion_num
@property
def board_type(self):
return ul.get_config(InfoType.EXPANSIONINFO, self._board_num,
self._expansion_num, ExpansionInfo.BOARDTYPE)
@property
def mux_ad_chan(self):
return ul.get_config(InfoType.EXPANSIONINFO, self._board_num,
self._expansion_num, ExpansionInfo.MUX_AD_CHAN1)
|
mccdaq/mcculw
|
mcculw/device_info/daq_device_info.py
|
Python
|
mit
| 4,437
|
import inspect
import re
import sys
import traceback
from inspect import CO_VARARGS
from inspect import CO_VARKEYWORDS
from traceback import format_exception_only
from types import TracebackType
from typing import Generic
from typing import Optional
from typing import Pattern
from typing import Tuple
from typing import TypeVar
from typing import Union
from weakref import ref
import attr
import pluggy
import py
import _pytest
from _pytest._io.saferepr import safeformat
from _pytest._io.saferepr import saferepr
if False: # TYPE_CHECKING
from typing import Type
class Code:
""" wrapper around Python code objects """
def __init__(self, rawcode):
if not hasattr(rawcode, "co_filename"):
rawcode = getrawcode(rawcode)
try:
self.filename = rawcode.co_filename
self.firstlineno = rawcode.co_firstlineno - 1
self.name = rawcode.co_name
except AttributeError:
raise TypeError("not a code object: {!r}".format(rawcode))
self.raw = rawcode
def __eq__(self, other):
return self.raw == other.raw
# Ignore type because of https://github.com/python/mypy/issues/4266.
__hash__ = None # type: ignore
def __ne__(self, other):
return not self == other
@property
def path(self):
""" return a path object pointing to source code (note that it
might not point to an actually existing file). """
try:
p = py.path.local(self.raw.co_filename)
# maybe don't try this checking
if not p.check():
raise OSError("py.path check failed.")
except OSError:
# XXX maybe try harder like the weird logic
# in the standard lib [linecache.updatecache] does?
p = self.raw.co_filename
return p
@property
def fullsource(self):
""" return a _pytest._code.Source object for the full source file of the code
"""
from _pytest._code import source
full, _ = source.findsource(self.raw)
return full
def source(self):
""" return a _pytest._code.Source object for the code object's source only
"""
# return source only for that part of code
import _pytest._code
return _pytest._code.Source(self.raw)
def getargs(self, var=False):
""" return a tuple with the argument names for the code object
if 'var' is set True also return the names of the variable and
keyword arguments when present
"""
# handfull shortcut for getting args
raw = self.raw
argcount = raw.co_argcount
if var:
argcount += raw.co_flags & CO_VARARGS
argcount += raw.co_flags & CO_VARKEYWORDS
return raw.co_varnames[:argcount]
class Frame:
"""Wrapper around a Python frame holding f_locals and f_globals
in which expressions can be evaluated."""
def __init__(self, frame):
self.lineno = frame.f_lineno - 1
self.f_globals = frame.f_globals
self.f_locals = frame.f_locals
self.raw = frame
self.code = Code(frame.f_code)
@property
def statement(self):
""" statement this frame is at """
import _pytest._code
if self.code.fullsource is None:
return _pytest._code.Source("")
return self.code.fullsource.getstatement(self.lineno)
def eval(self, code, **vars):
""" evaluate 'code' in the frame
'vars' are optional additional local variables
returns the result of the evaluation
"""
f_locals = self.f_locals.copy()
f_locals.update(vars)
return eval(code, self.f_globals, f_locals)
def exec_(self, code, **vars):
""" exec 'code' in the frame
'vars' are optiona; additional local variables
"""
f_locals = self.f_locals.copy()
f_locals.update(vars)
exec(code, self.f_globals, f_locals)
def repr(self, object):
""" return a 'safe' (non-recursive, one-line) string repr for 'object'
"""
return saferepr(object)
def is_true(self, object):
return object
def getargs(self, var=False):
""" return a list of tuples (name, value) for all arguments
if 'var' is set True also include the variable and keyword
arguments when present
"""
retval = []
for arg in self.code.getargs(var):
try:
retval.append((arg, self.f_locals[arg]))
except KeyError:
pass # this can occur when using Psyco
return retval
class TracebackEntry:
""" a single entry in a traceback """
_repr_style = None
exprinfo = None
def __init__(self, rawentry, excinfo=None):
self._excinfo = excinfo
self._rawentry = rawentry
self.lineno = rawentry.tb_lineno - 1
def set_repr_style(self, mode):
assert mode in ("short", "long")
self._repr_style = mode
@property
def frame(self):
import _pytest._code
return _pytest._code.Frame(self._rawentry.tb_frame)
@property
def relline(self):
return self.lineno - self.frame.code.firstlineno
def __repr__(self):
return "<TracebackEntry %s:%d>" % (self.frame.code.path, self.lineno + 1)
@property
def statement(self):
""" _pytest._code.Source object for the current statement """
source = self.frame.code.fullsource
return source.getstatement(self.lineno)
@property
def path(self):
""" path to the source code """
return self.frame.code.path
@property
def locals(self):
""" locals of underlaying frame """
return self.frame.f_locals
def getfirstlinesource(self):
return self.frame.code.firstlineno
def getsource(self, astcache=None):
""" return failing source code. """
# we use the passed in astcache to not reparse asttrees
# within exception info printing
from _pytest._code.source import getstatementrange_ast
source = self.frame.code.fullsource
if source is None:
return None
key = astnode = None
if astcache is not None:
key = self.frame.code.path
if key is not None:
astnode = astcache.get(key, None)
start = self.getfirstlinesource()
try:
astnode, _, end = getstatementrange_ast(
self.lineno, source, astnode=astnode
)
except SyntaxError:
end = self.lineno + 1
else:
if key is not None:
astcache[key] = astnode
return source[start:end]
source = property(getsource)
def ishidden(self):
""" return True if the current frame has a var __tracebackhide__
resolving to True.
If __tracebackhide__ is a callable, it gets called with the
ExceptionInfo instance and can decide whether to hide the traceback.
mostly for internal use
"""
f = self.frame
tbh = f.f_locals.get(
"__tracebackhide__", f.f_globals.get("__tracebackhide__", False)
)
if tbh and callable(tbh):
return tbh(None if self._excinfo is None else self._excinfo())
return tbh
def __str__(self):
try:
fn = str(self.path)
except py.error.Error:
fn = "???"
name = self.frame.code.name
try:
line = str(self.statement).lstrip()
except KeyboardInterrupt:
raise
except: # noqa
line = "???"
return " File %r:%d in %s\n %s\n" % (fn, self.lineno + 1, name, line)
@property
def name(self):
""" co_name of underlaying code """
return self.frame.code.raw.co_name
class Traceback(list):
""" Traceback objects encapsulate and offer higher level
access to Traceback entries.
"""
Entry = TracebackEntry
def __init__(self, tb, excinfo=None):
""" initialize from given python traceback object and ExceptionInfo """
self._excinfo = excinfo
if hasattr(tb, "tb_next"):
def f(cur):
while cur is not None:
yield self.Entry(cur, excinfo=excinfo)
cur = cur.tb_next
list.__init__(self, f(tb))
else:
list.__init__(self, tb)
def cut(self, path=None, lineno=None, firstlineno=None, excludepath=None):
""" return a Traceback instance wrapping part of this Traceback
by provding any combination of path, lineno and firstlineno, the
first frame to start the to-be-returned traceback is determined
this allows cutting the first part of a Traceback instance e.g.
for formatting reasons (removing some uninteresting bits that deal
with handling of the exception/traceback)
"""
for x in self:
code = x.frame.code
codepath = code.path
if (
(path is None or codepath == path)
and (
excludepath is None
or not hasattr(codepath, "relto")
or not codepath.relto(excludepath)
)
and (lineno is None or x.lineno == lineno)
and (firstlineno is None or x.frame.code.firstlineno == firstlineno)
):
return Traceback(x._rawentry, self._excinfo)
return self
def __getitem__(self, key):
val = super().__getitem__(key)
if isinstance(key, type(slice(0))):
val = self.__class__(val)
return val
def filter(self, fn=lambda x: not x.ishidden()):
""" return a Traceback instance with certain items removed
fn is a function that gets a single argument, a TracebackEntry
instance, and should return True when the item should be added
to the Traceback, False when not
by default this removes all the TracebackEntries which are hidden
(see ishidden() above)
"""
return Traceback(filter(fn, self), self._excinfo)
def getcrashentry(self):
""" return last non-hidden traceback entry that lead
to the exception of a traceback.
"""
for i in range(-1, -len(self) - 1, -1):
entry = self[i]
if not entry.ishidden():
return entry
return self[-1]
def recursionindex(self):
""" return the index of the frame/TracebackEntry where recursion
originates if appropriate, None if no recursion occurred
"""
cache = {}
for i, entry in enumerate(self):
# id for the code.raw is needed to work around
# the strange metaprogramming in the decorator lib from pypi
# which generates code objects that have hash/value equality
# XXX needs a test
key = entry.frame.code.path, id(entry.frame.code.raw), entry.lineno
# print "checking for recursion at", key
values = cache.setdefault(key, [])
if values:
f = entry.frame
loc = f.f_locals
for otherloc in values:
if f.is_true(
f.eval(
co_equal,
__recursioncache_locals_1=loc,
__recursioncache_locals_2=otherloc,
)
):
return i
values.append(entry.frame.f_locals)
return None
co_equal = compile(
"__recursioncache_locals_1 == __recursioncache_locals_2", "?", "eval"
)
_E = TypeVar("_E", bound=BaseException)
@attr.s(repr=False)
class ExceptionInfo(Generic[_E]):
""" wraps sys.exc_info() objects and offers
help for navigating the traceback.
"""
_assert_start_repr = "AssertionError('assert "
_excinfo = attr.ib(type=Optional[Tuple["Type[_E]", "_E", TracebackType]])
_striptext = attr.ib(type=str, default="")
_traceback = attr.ib(type=Optional[Traceback], default=None)
@classmethod
def from_exc_info(
cls,
exc_info: Tuple["Type[_E]", "_E", TracebackType],
exprinfo: Optional[str] = None,
) -> "ExceptionInfo[_E]":
"""returns an ExceptionInfo for an existing exc_info tuple.
.. warning::
Experimental API
:param exprinfo: a text string helping to determine if we should
strip ``AssertionError`` from the output, defaults
to the exception message/``__str__()``
"""
_striptext = ""
if exprinfo is None and isinstance(exc_info[1], AssertionError):
exprinfo = getattr(exc_info[1], "msg", None)
if exprinfo is None:
exprinfo = saferepr(exc_info[1])
if exprinfo and exprinfo.startswith(cls._assert_start_repr):
_striptext = "AssertionError: "
return cls(exc_info, _striptext)
@classmethod
def from_current(
cls, exprinfo: Optional[str] = None
) -> "ExceptionInfo[BaseException]":
"""returns an ExceptionInfo matching the current traceback
.. warning::
Experimental API
:param exprinfo: a text string helping to determine if we should
strip ``AssertionError`` from the output, defaults
to the exception message/``__str__()``
"""
tup = sys.exc_info()
assert tup[0] is not None, "no current exception"
assert tup[1] is not None, "no current exception"
assert tup[2] is not None, "no current exception"
exc_info = (tup[0], tup[1], tup[2])
return cls.from_exc_info(exc_info)
@classmethod
def for_later(cls) -> "ExceptionInfo[_E]":
"""return an unfilled ExceptionInfo
"""
return cls(None)
def fill_unfilled(self, exc_info: Tuple["Type[_E]", _E, TracebackType]) -> None:
"""fill an unfilled ExceptionInfo created with for_later()"""
assert self._excinfo is None, "ExceptionInfo was already filled"
self._excinfo = exc_info
@property
def type(self) -> "Type[_E]":
"""the exception class"""
assert (
self._excinfo is not None
), ".type can only be used after the context manager exits"
return self._excinfo[0]
@property
def value(self) -> _E:
"""the exception value"""
assert (
self._excinfo is not None
), ".value can only be used after the context manager exits"
return self._excinfo[1]
@property
def tb(self) -> TracebackType:
"""the exception raw traceback"""
assert (
self._excinfo is not None
), ".tb can only be used after the context manager exits"
return self._excinfo[2]
@property
def typename(self) -> str:
"""the type name of the exception"""
assert (
self._excinfo is not None
), ".typename can only be used after the context manager exits"
return self.type.__name__
@property
def traceback(self) -> Traceback:
"""the traceback"""
if self._traceback is None:
self._traceback = Traceback(self.tb, excinfo=ref(self))
return self._traceback
@traceback.setter
def traceback(self, value: Traceback) -> None:
self._traceback = value
def __repr__(self) -> str:
if self._excinfo is None:
return "<ExceptionInfo for raises contextmanager>"
return "<ExceptionInfo %s tblen=%d>" % (self.typename, len(self.traceback))
def exconly(self, tryshort: bool = False) -> str:
""" return the exception as a string
when 'tryshort' resolves to True, and the exception is a
_pytest._code._AssertionError, only the actual exception part of
the exception representation is returned (so 'AssertionError: ' is
removed from the beginning)
"""
lines = format_exception_only(self.type, self.value)
text = "".join(lines)
text = text.rstrip()
if tryshort:
if text.startswith(self._striptext):
text = text[len(self._striptext) :]
return text
def errisinstance(
self, exc: Union["Type[BaseException]", Tuple["Type[BaseException]", ...]]
) -> bool:
""" return True if the exception is an instance of exc """
return isinstance(self.value, exc)
def _getreprcrash(self) -> "ReprFileLocation":
exconly = self.exconly(tryshort=True)
entry = self.traceback.getcrashentry()
path, lineno = entry.frame.code.raw.co_filename, entry.lineno
return ReprFileLocation(path, lineno + 1, exconly)
def getrepr(
self,
showlocals: bool = False,
style: str = "long",
abspath: bool = False,
tbfilter: bool = True,
funcargs: bool = False,
truncate_locals: bool = True,
chain: bool = True,
):
"""
Return str()able representation of this exception info.
:param bool showlocals:
Show locals per traceback entry.
Ignored if ``style=="native"``.
:param str style: long|short|no|native traceback style
:param bool abspath:
If paths should be changed to absolute or left unchanged.
:param bool tbfilter:
Hide entries that contain a local variable ``__tracebackhide__==True``.
Ignored if ``style=="native"``.
:param bool funcargs:
Show fixtures ("funcargs" for legacy purposes) per traceback entry.
:param bool truncate_locals:
With ``showlocals==True``, make sure locals can be safely represented as strings.
:param bool chain: if chained exceptions in Python 3 should be shown.
.. versionchanged:: 3.9
Added the ``chain`` parameter.
"""
if style == "native":
return ReprExceptionInfo(
ReprTracebackNative(
traceback.format_exception(
self.type, self.value, self.traceback[0]._rawentry
)
),
self._getreprcrash(),
)
fmt = FormattedExcinfo(
showlocals=showlocals,
style=style,
abspath=abspath,
tbfilter=tbfilter,
funcargs=funcargs,
truncate_locals=truncate_locals,
chain=chain,
)
return fmt.repr_excinfo(self)
def match(self, regexp: "Union[str, Pattern]") -> bool:
"""
Check whether the regular expression 'regexp' is found in the string
representation of the exception using ``re.search``. If it matches
then True is returned (so that it is possible to write
``assert excinfo.match()``). If it doesn't match an AssertionError is
raised.
"""
__tracebackhide__ = True
if not re.search(regexp, str(self.value)):
assert 0, "Pattern {!r} not found in {!r}".format(regexp, str(self.value))
return True
@attr.s
class FormattedExcinfo:
""" presenting information about failing Functions and Generators. """
# for traceback entries
flow_marker = ">"
fail_marker = "E"
showlocals = attr.ib(default=False)
style = attr.ib(default="long")
abspath = attr.ib(default=True)
tbfilter = attr.ib(default=True)
funcargs = attr.ib(default=False)
truncate_locals = attr.ib(default=True)
chain = attr.ib(default=True)
astcache = attr.ib(default=attr.Factory(dict), init=False, repr=False)
def _getindent(self, source):
# figure out indent for given source
try:
s = str(source.getstatement(len(source) - 1))
except KeyboardInterrupt:
raise
except: # noqa
try:
s = str(source[-1])
except KeyboardInterrupt:
raise
except: # noqa
return 0
return 4 + (len(s) - len(s.lstrip()))
def _getentrysource(self, entry):
source = entry.getsource(self.astcache)
if source is not None:
source = source.deindent()
return source
def repr_args(self, entry):
if self.funcargs:
args = []
for argname, argvalue in entry.frame.getargs(var=True):
args.append((argname, saferepr(argvalue)))
return ReprFuncArgs(args)
def get_source(self, source, line_index=-1, excinfo=None, short=False):
""" return formatted and marked up source lines. """
import _pytest._code
lines = []
if source is None or line_index >= len(source.lines):
source = _pytest._code.Source("???")
line_index = 0
if line_index < 0:
line_index += len(source)
space_prefix = " "
if short:
lines.append(space_prefix + source.lines[line_index].strip())
else:
for line in source.lines[:line_index]:
lines.append(space_prefix + line)
lines.append(self.flow_marker + " " + source.lines[line_index])
for line in source.lines[line_index + 1 :]:
lines.append(space_prefix + line)
if excinfo is not None:
indent = 4 if short else self._getindent(source)
lines.extend(self.get_exconly(excinfo, indent=indent, markall=True))
return lines
def get_exconly(self, excinfo, indent=4, markall=False):
lines = []
indent = " " * indent
# get the real exception information out
exlines = excinfo.exconly(tryshort=True).split("\n")
failindent = self.fail_marker + indent[1:]
for line in exlines:
lines.append(failindent + line)
if not markall:
failindent = indent
return lines
def repr_locals(self, locals):
if self.showlocals:
lines = []
keys = [loc for loc in locals if loc[0] != "@"]
keys.sort()
for name in keys:
value = locals[name]
if name == "__builtins__":
lines.append("__builtins__ = <builtins>")
else:
# This formatting could all be handled by the
# _repr() function, which is only reprlib.Repr in
# disguise, so is very configurable.
if self.truncate_locals:
str_repr = saferepr(value)
else:
str_repr = safeformat(value)
# if len(str_repr) < 70 or not isinstance(value,
# (list, tuple, dict)):
lines.append("{:<10} = {}".format(name, str_repr))
# else:
# self._line("%-10s =\\" % (name,))
# # XXX
# pprint.pprint(value, stream=self.excinfowriter)
return ReprLocals(lines)
def repr_traceback_entry(self, entry, excinfo=None):
import _pytest._code
source = self._getentrysource(entry)
if source is None:
source = _pytest._code.Source("???")
line_index = 0
else:
line_index = entry.lineno - entry.getfirstlinesource()
lines = []
style = entry._repr_style
if style is None:
style = self.style
if style in ("short", "long"):
short = style == "short"
reprargs = self.repr_args(entry) if not short else None
s = self.get_source(source, line_index, excinfo, short=short)
lines.extend(s)
if short:
message = "in %s" % (entry.name)
else:
message = excinfo and excinfo.typename or ""
path = self._makepath(entry.path)
filelocrepr = ReprFileLocation(path, entry.lineno + 1, message)
localsrepr = None
if not short:
localsrepr = self.repr_locals(entry.locals)
return ReprEntry(lines, reprargs, localsrepr, filelocrepr, style)
if excinfo:
lines.extend(self.get_exconly(excinfo, indent=4))
return ReprEntry(lines, None, None, None, style)
def _makepath(self, path):
if not self.abspath:
try:
np = py.path.local().bestrelpath(path)
except OSError:
return path
if len(np) < len(str(path)):
path = np
return path
def repr_traceback(self, excinfo):
traceback = excinfo.traceback
if self.tbfilter:
traceback = traceback.filter()
if excinfo.errisinstance(RecursionError):
traceback, extraline = self._truncate_recursive_traceback(traceback)
else:
extraline = None
last = traceback[-1]
entries = []
for index, entry in enumerate(traceback):
einfo = (last == entry) and excinfo or None
reprentry = self.repr_traceback_entry(entry, einfo)
entries.append(reprentry)
return ReprTraceback(entries, extraline, style=self.style)
def _truncate_recursive_traceback(self, traceback):
"""
Truncate the given recursive traceback trying to find the starting point
of the recursion.
The detection is done by going through each traceback entry and finding the
point in which the locals of the frame are equal to the locals of a previous frame (see ``recursionindex()``.
Handle the situation where the recursion process might raise an exception (for example
comparing numpy arrays using equality raises a TypeError), in which case we do our best to
warn the user of the error and show a limited traceback.
"""
try:
recursionindex = traceback.recursionindex()
except Exception as e:
max_frames = 10
extraline = (
"!!! Recursion error detected, but an error occurred locating the origin of recursion.\n"
" The following exception happened when comparing locals in the stack frame:\n"
" {exc_type}: {exc_msg}\n"
" Displaying first and last {max_frames} stack frames out of {total}."
).format(
exc_type=type(e).__name__,
exc_msg=str(e),
max_frames=max_frames,
total=len(traceback),
)
traceback = traceback[:max_frames] + traceback[-max_frames:]
else:
if recursionindex is not None:
extraline = "!!! Recursion detected (same locals & position)"
traceback = traceback[: recursionindex + 1]
else:
extraline = None
return traceback, extraline
def repr_excinfo(self, excinfo):
repr_chain = []
e = excinfo.value
descr = None
seen = set()
while e is not None and id(e) not in seen:
seen.add(id(e))
if excinfo:
reprtraceback = self.repr_traceback(excinfo)
reprcrash = excinfo._getreprcrash()
else:
# fallback to native repr if the exception doesn't have a traceback:
# ExceptionInfo objects require a full traceback to work
reprtraceback = ReprTracebackNative(
traceback.format_exception(type(e), e, None)
)
reprcrash = None
repr_chain += [(reprtraceback, reprcrash, descr)]
if e.__cause__ is not None and self.chain:
e = e.__cause__
excinfo = (
ExceptionInfo((type(e), e, e.__traceback__))
if e.__traceback__
else None
)
descr = "The above exception was the direct cause of the following exception:"
elif (
e.__context__ is not None and not e.__suppress_context__ and self.chain
):
e = e.__context__
excinfo = (
ExceptionInfo((type(e), e, e.__traceback__))
if e.__traceback__
else None
)
descr = "During handling of the above exception, another exception occurred:"
else:
e = None
repr_chain.reverse()
return ExceptionChainRepr(repr_chain)
class TerminalRepr:
def __str__(self):
# FYI this is called from pytest-xdist's serialization of exception
# information.
io = py.io.TextIO()
tw = py.io.TerminalWriter(file=io)
self.toterminal(tw)
return io.getvalue().strip()
def __repr__(self):
return "<{} instance at {:0x}>".format(self.__class__, id(self))
class ExceptionRepr(TerminalRepr):
def __init__(self):
self.sections = []
def addsection(self, name, content, sep="-"):
self.sections.append((name, content, sep))
def toterminal(self, tw):
for name, content, sep in self.sections:
tw.sep(sep, name)
tw.line(content)
class ExceptionChainRepr(ExceptionRepr):
def __init__(self, chain):
super().__init__()
self.chain = chain
# reprcrash and reprtraceback of the outermost (the newest) exception
# in the chain
self.reprtraceback = chain[-1][0]
self.reprcrash = chain[-1][1]
def toterminal(self, tw):
for element in self.chain:
element[0].toterminal(tw)
if element[2] is not None:
tw.line("")
tw.line(element[2], yellow=True)
super().toterminal(tw)
class ReprExceptionInfo(ExceptionRepr):
def __init__(self, reprtraceback, reprcrash):
super().__init__()
self.reprtraceback = reprtraceback
self.reprcrash = reprcrash
def toterminal(self, tw):
self.reprtraceback.toterminal(tw)
super().toterminal(tw)
class ReprTraceback(TerminalRepr):
entrysep = "_ "
def __init__(self, reprentries, extraline, style):
self.reprentries = reprentries
self.extraline = extraline
self.style = style
def toterminal(self, tw):
# the entries might have different styles
for i, entry in enumerate(self.reprentries):
if entry.style == "long":
tw.line("")
entry.toterminal(tw)
if i < len(self.reprentries) - 1:
next_entry = self.reprentries[i + 1]
if (
entry.style == "long"
or entry.style == "short"
and next_entry.style == "long"
):
tw.sep(self.entrysep)
if self.extraline:
tw.line(self.extraline)
class ReprTracebackNative(ReprTraceback):
def __init__(self, tblines):
self.style = "native"
self.reprentries = [ReprEntryNative(tblines)]
self.extraline = None
class ReprEntryNative(TerminalRepr):
style = "native"
def __init__(self, tblines):
self.lines = tblines
def toterminal(self, tw):
tw.write("".join(self.lines))
class ReprEntry(TerminalRepr):
def __init__(self, lines, reprfuncargs, reprlocals, filelocrepr, style):
self.lines = lines
self.reprfuncargs = reprfuncargs
self.reprlocals = reprlocals
self.reprfileloc = filelocrepr
self.style = style
def toterminal(self, tw):
if self.style == "short":
self.reprfileloc.toterminal(tw)
for line in self.lines:
red = line.startswith("E ")
tw.line(line, bold=True, red=red)
return
if self.reprfuncargs:
self.reprfuncargs.toterminal(tw)
for line in self.lines:
red = line.startswith("E ")
tw.line(line, bold=True, red=red)
if self.reprlocals:
tw.line("")
self.reprlocals.toterminal(tw)
if self.reprfileloc:
if self.lines:
tw.line("")
self.reprfileloc.toterminal(tw)
def __str__(self):
return "{}\n{}\n{}".format(
"\n".join(self.lines), self.reprlocals, self.reprfileloc
)
class ReprFileLocation(TerminalRepr):
def __init__(self, path, lineno, message):
self.path = str(path)
self.lineno = lineno
self.message = message
def toterminal(self, tw):
# filename and lineno output for each entry,
# using an output format that most editors unterstand
msg = self.message
i = msg.find("\n")
if i != -1:
msg = msg[:i]
tw.write(self.path, bold=True, red=True)
tw.line(":{}: {}".format(self.lineno, msg))
class ReprLocals(TerminalRepr):
def __init__(self, lines):
self.lines = lines
def toterminal(self, tw):
for line in self.lines:
tw.line(line)
class ReprFuncArgs(TerminalRepr):
def __init__(self, args):
self.args = args
def toterminal(self, tw):
if self.args:
linesofar = ""
for name, value in self.args:
ns = "{} = {}".format(name, value)
if len(ns) + len(linesofar) + 2 > tw.fullwidth:
if linesofar:
tw.line(linesofar)
linesofar = ns
else:
if linesofar:
linesofar += ", " + ns
else:
linesofar = ns
if linesofar:
tw.line(linesofar)
tw.line("")
def getrawcode(obj, trycall=True):
""" return code object for given function. """
try:
return obj.__code__
except AttributeError:
obj = getattr(obj, "im_func", obj)
obj = getattr(obj, "func_code", obj)
obj = getattr(obj, "f_code", obj)
obj = getattr(obj, "__code__", obj)
if trycall and not hasattr(obj, "co_firstlineno"):
if hasattr(obj, "__call__") and not inspect.isclass(obj):
x = getrawcode(obj.__call__, trycall=False)
if hasattr(x, "co_firstlineno"):
return x
return obj
# relative paths that we use to filter traceback entries from appearing to the user;
# see filter_traceback
# note: if we need to add more paths than what we have now we should probably use a list
# for better maintenance
_PLUGGY_DIR = py.path.local(pluggy.__file__.rstrip("oc"))
# pluggy is either a package or a single module depending on the version
if _PLUGGY_DIR.basename == "__init__.py":
_PLUGGY_DIR = _PLUGGY_DIR.dirpath()
_PYTEST_DIR = py.path.local(_pytest.__file__).dirpath()
_PY_DIR = py.path.local(py.__file__).dirpath()
def filter_traceback(entry):
"""Return True if a TracebackEntry instance should be removed from tracebacks:
* dynamically generated code (no code to show up for it);
* internal traceback from pytest or its internal libraries, py and pluggy.
"""
# entry.path might sometimes return a str object when the entry
# points to dynamically generated code
# see https://bitbucket.org/pytest-dev/py/issues/71
raw_filename = entry.frame.code.raw.co_filename
is_generated = "<" in raw_filename and ">" in raw_filename
if is_generated:
return False
# entry.path might point to a non-existing file, in which case it will
# also return a str object. see #1133
p = py.path.local(entry.path)
return (
not p.relto(_PLUGGY_DIR) and not p.relto(_PYTEST_DIR) and not p.relto(_PY_DIR)
)
|
tomviner/pytest
|
src/_pytest/_code/code.py
|
Python
|
mit
| 36,328
|
# remove duplicates of linked list
class Node(object): # define constructor
def __init__(self, data):
self.data = data
self.next = None
class LinkedList(object):
def __init__(self, head=None):
self.head = head
def add(self, new_node):
current_node = self.head
if self.head:
while current_node.next:
current_node = current_node.next
current_node.next = new_node # add to end of linked list
else:
self.head = new_node
def __repr__(self):
current_node = self.head
output_arr = []
while current_node:
output_arr.append(str(current_node.data))
current_node = current_node.next
return "->".join(output_arr)
def search(self, position):
counter = 1
current_node = self.head
if position < 1:
return None
while current_node and counter <= position: # go through linked list until you reach input position
if counter == position:
return current_node
current_node = current_node.next
counter += 1
return None # if position is larger than length of linked list
def remove(self, data):
current_node = self.head
previous = None
while current_node.data != data and current_node.next: # move through linked list until you hit node you want to delete
previous = current_node
current_node = current_node.next
if current_node.data == data:
if previous:
previous.next = current_node.next
else:
self.head = current_node.next
def remove_dups(self):
current_node = self.head
next_node = current_node.next
dups = {}
dups[current_node.data] = True
# print dups[current_node.data]
while next_node:
if dups.has_key(next_node.data):
current_node.next = next_node.next
else:
dups[next_node.data] = True
current_node = next_node
next_node = next_node.next
# test cases
node_one = Node(1)
node_two = Node(2)
node_three = Node(3)
node_four = Node(4)
node_five = Node(1)
node_six = Node(2)
test_one = LinkedList(node_one)
test_one.add(node_two)
test_one.add(node_three)
test_one.add(node_four)
test_one.remove_dups()
print test_one # prints "1->2->3->4"
test_one.add(node_five)
test_one.add(node_six)
print test_one # prints "1->2->3->4->1->2"
test_one.remove_dups()
print test_one # prints "1->2->3->4"
|
derekmpham/interview-prep
|
linked-list/remove-dups.py
|
Python
|
mit
| 2,214
|
#Ensure there is an exceptional edge from the following case
def f2():
b, d = Base, Derived
try:
class MyNewClass(b, d):
pass
except:
e2
def f3():
sequence_of_four = a_global
try:
a, b, c = sequence_of_four
except:
e3
#Always treat locals as non-raising to keep DB size down.
def f4():
if cond:
local = 1
try:
local
except:
e4
def f5():
try:
a_global
except:
e5
def f6():
local = a_global
try:
local()
except:
e6
#Literals can't raise
def f7():
try:
4
except:
e7
def f8():
try:
a + b
except:
e8
#OK assignments
def f9():
try:
a, b = 1, 2
except:
e9
def fa():
seq = a_global
try:
a = seq
except:
ea
def fb():
a, b, c = a_global
try:
seq = a, b, c
except:
eb
#Ensure that a.b and c[d] can raise
def fc():
a, b = a_global
try:
return a[b]
except:
ec
def fd():
a = a_global
try:
return a.b
except:
ed
def fe():
try:
call()
except:
ee
else:
ef
|
github/codeql
|
python/ql/test/library-tests/ControlFlow/except/test.py
|
Python
|
mit
| 1,251
|
# Python3
from solution1 import urlSimilarity as f
qa = [
('https://codesignal.com/home/test?param1=42¶m3=testing&login=admin',
'https://codesignal.com/home/secret/test?param3=fish¶m1=42&password=admin',
19),
('https://codesignal.com/home/test?param1=42¶m3=testing&login=admin',
'http://codesignal.org/about?42=param1&tesing=param3&admin=login',
0),
('https://www.google.com/search?q=codesignal',
'http://www.google.com/search?q=codesignal',
13),
('ftp://www.example.com/query?varName=value',
'http://example.com/query?varName=value',
3),
('ftp://www',
'http://anotherexample.com/www?ftp=http',
0),
('https://codesignal.com/home/test?param1=42¶m3=testing&login=admin¶m4=abc¶m5=codesignal',
'https://codesignal.com/home/secret/test?param3=fish¶m1=42&codesignal=admin¶m5=test',
20)
]
for *q, a in qa:
for i, e in enumerate(q):
print('input{0}: {1}'.format(i + 1, e))
ans = f(*q)
if ans != a:
print(' [failed]')
print(' output:', ans)
print(' expected:', a)
else:
print(' [ok]')
print(' output:', ans)
print()
|
RevansChen/online-judge
|
Codefights/arcade/python-arcade/level-13/90.Url-Similarity/Python/test.py
|
Python
|
mit
| 1,205
|
"""Main module."""
import os
import pygame
import signal
import logging
import argparse
from threading import Event
from multiprocessing import Value
from injector import Injector, singleton
from ballercfg import ConfigurationManager
from .locals import * # noqa
from .events import EventManager, TickEvent
from .modules import ModuleLoader
from .logger import configure_logging
from .states import StateManager
from .assets import AssetManager
from .entities import EntityManager
from .session import SessionManager
from .utils import get_data_path
os.chdir(os.path.dirname(os.path.dirname(__file__)))
logger = logging.getLogger(__name__)
container = None
def build_container(binder):
"""Build a service container by binding dependencies to an injector."""
# General flags and shared objects
binder.bind(ShutdownFlag, to=Event())
binder.bind(DisplayClock, to=pygame.time.Clock())
# Core components
binder.bind(EventManager, scope=singleton)
binder.bind(EntityManager, scope=singleton)
binder.bind(AssetManager, scope=singleton)
binder.bind(SessionManager, scope=singleton)
binder.bind(StateManager, scope=singleton)
class Akurra:
"""Base game class."""
def __init__(self, game, log_level='INFO', debug=False):
"""Constructor."""
# Set up container
global container
self.container = container = Injector(build_container)
self.game = game
self.log_level = log_level
self.debug = debug
# Load configuration
cfg_files = [
os.path.expanduser('~/.config/akurra/*.yml'),
os.path.expanduser('~/.config/akurra/games/%s/*.yml' % self.game)
]
cfg = ConfigurationManager.load([get_data_path('*.yml')] + cfg_files)
self.container.binder.bind(Configuration, to=cfg)
self.container.binder.bind(DebugFlag, to=Value('b', self.debug))
self.container.binder.bind(Akurra, to=self)
# Start pygame (+ audio frequency, size, channels, buffersize)
pygame.mixer.pre_init(44100, 16, 2, 4096)
pygame.init()
configure_logging(log_level=self.log_level)
logger.info('Initializing..')
self.configuration = self.container.get(Configuration)
self.shutdown = self.container.get(ShutdownFlag)
self.clock = self.container.get(DisplayClock)
self.modules = ModuleLoader(
group=self.configuration.get('akurra.modules.entry_point_group', 'akurra.modules'),
whitelist=self.configuration.get('akurra.modules.whitelist', None),
blacklist=self.configuration.get('akurra.modules.blacklist', None),
)
self.games = ModuleLoader(
group=self.configuration.get('akurra.games.entry_point_group', 'akurra.games'),
)
self.events = self.container.get(EventManager)
self.entities = self.container.get(EntityManager)
self.states = self.container.get(StateManager)
self.assets = self.container.get(AssetManager)
self.session = self.container.get(SessionManager)
self.loop_wait_millis = self.configuration.get('akurra.core.loop_wait_millis', 5)
self.max_fps = self.configuration.get('akurra.display.max_fps', 60)
# Handle shutdown signals properly
signal.signal(signal.SIGINT, self.handle_signal)
signal.signal(signal.SIGTERM, self.handle_signal)
def start(self):
"""Start."""
logger.debug('Starting..')
# Reset shutdown flag
self.shutdown.clear()
self.modules.load()
self.entities.start()
self.modules.start()
try:
# Attempt to fetch and launch game
self.games.load_single(self.game)
game = self.games.modules.get(self.game)
game.play()
except AttributeError:
raise ValueError('No game module named "%s" exists!' % self.game)
while not self.shutdown.is_set():
# Pump/handle events (both pygame and akurra)
self.events.poll()
# Calculate time (in seconds) that has passed since last tick
delta_time = self.clock.tick(self.max_fps) / 1000
# Dispatch tick
self.events.dispatch(TickEvent(delta_time=delta_time))
# Wait a bit to lower CPU usage
pygame.time.wait(self.loop_wait_millis)
self.stop()
def stop(self):
"""Stop."""
logger.info('Stopping..')
self.shutdown.set()
self.modules.stop()
self.entities.stop()
self.modules.unload()
self.states.close()
def handle_signal(self, signum, frame):
"""Handle a shutdown signal."""
logger.debug('Received signal, setting shutdown flag [signal=%s]', signum)
self.shutdown.set()
def main():
"""Main entry point."""
# Parse command-line arguments and set required variables
parser = argparse.ArgumentParser(description='Run the Akurra game engine.')
parser.add_argument('--log-level', type=str, default='INFO', help='set the log level',
choices=['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG', 'INSANE'])
parser.add_argument('-d', '--debug', action='store_true', help='toggle debugging')
parser.add_argument('-g', '--game', required=True, type=str, help='game to run')
args = parser.parse_args()
akurra = Akurra(game=args.game, log_level=args.log_level, debug=args.debug)
akurra.start()
if __name__ == '__main__':
main()
|
multatronic/akurra
|
akurra/__init__.py
|
Python
|
mit
| 5,555
|
from __future__ import absolute_import
class ConfigNotFound(LookupError):
"""Raise this exception to signal that a requested config item
was not found in the config."""
pass
|
inklesspen/montague
|
src/montague/exceptions.py
|
Python
|
mit
| 191
|
from rest_framework import permissions
class IsOwnerOrReadOnly(permissions.BasePermission):
"""
Custom permission to only allow owners of an object to edit it.
"""
def has_object_permissions(self, request, view, obj):
# Read permissions are allowed to any request,
# so we'll always allow GET, HEAD, or OPTIONS requests.
(request.method in permissions.SAFE_METHODS) or (obj.owner == request.user)
|
dfurtado/generator-djangospa
|
generators/app/templates/root/main/permissions.py
|
Python
|
mit
| 438
|
import storage
from representation import Representation
from patch import Patch
|
ChrisTimperley/EvoAnalyser.py
|
src/representation/__init__.py
|
Python
|
mit
| 81
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: yuchou
@time: 2017/8/7 10:28
"""
|
yuchou/xblog
|
blog/templatetags/__init__.py
|
Python
|
mit
| 91
|
#!/usr/bin/python
#======================================================================
#
# Project : hpp_IOStressTest
# File : Libs/IOST_WAboutDialog/IOST_AboutDialog.py
# Date : Sep 21, 2016
# Author : HuuHoang Nguyen
# Contact : hhnguyen@apm.com
# : hoangnh.hpp@gmail.com
# License : MIT License
# Copyright : 2016
# Description: The hpp_IOStressTest is under the MIT License, a copy of license which may be found in LICENSE
#
#======================================================================
import io
import os
import re
import operator
import sys
import base64
import time
from IOST_Basic import *
from IOST_Config import *
import gtk
import gtk.glade
import gobject
#======================================================================
class IOST_AboutDialog():
def __init__(self, glade_filename, window_name, object_name ,main_builder=None):
"This is a function get of Diaglog Help -> About Window"
self.IOST_AboutDialog_WindowName = window_name
self.IOST_AboutDialog_ObjectName = object_name
if not main_builder:
self.IOST_AboutDialog_Builder = gtk.Builder()
self.IOST_AboutDialog_Builder.add_from_file(glade_filename)
self.IOST_AboutDialog_Builder.connect_signals(self)
else:
self.IOST_AboutDialog_Builder = main_builder
# self.IOST_Objs[window_name][window_name+ object_name] = self.IOST_AboutDialog_Builder.get_object(window_name+object_name)
# self.IOST_Objs[window_name][window_name+ object_name].set_version(self.IOST_Data["ProjectVersion"])
self.CreateObjsDictFromDict(self.IOST_AboutDialog_WindowName,
self.IOST_Objs[self.IOST_AboutDialog_WindowName],
self.IOST_AboutDialog_Builder,
0)
self.IOST_Objs[self.IOST_AboutDialog_WindowName][self.IOST_AboutDialog_ObjectName].set_version(self.IOST_Data["ProjectVersion"])
def Run(self, window_name, object_name):
self.IOST_Objs[window_name][object_name].run()
self.IOST_Objs[window_name][object_name].hide()
def ActiveLink(self, object_name):
self.IOST_Objs[self.IOST_AboutDialog_WindowName][ self.IOST_AboutDialog_object_name].hide()
def on_IOST_WHelpAbout_destroy(self, object, data=None):
""
self.IOST_Objs[self.IOST_AboutDialog_WindowName][self.IOST_AboutDialog_object_name].hide()
def on_IOST_WHelpAbout_DialogActionArea_destroy(self, object, data=None):
""
self.IOST_Objs[self.IOST_AboutDialog_WindowName][self.IOST_AboutDialog_object_name].hide()
def on_IOST_WHelpAbout_button_press_event(self, widget, event, data=None):
""
self.IOST_Objs[self.IOST_AboutDialog_WindowName][self.IOST_AboutDialog_object_name].hide()
def on_IOST_WHelpAbout_DialogVB_button_press_event(self, widget, event, data=None):
""
self.IOST_Objs[self.IOST_AboutDialog_WindowName][self.IOST_AboutDialog_objectt_name].hide()
|
HPPTECH/hpp_IOSTressTest
|
IOST_0.23/Libs/IOST_AboutDialog/IOST_AboutDialog.py
|
Python
|
mit
| 3,089
|
"""add follow table
Revision ID: f045592adab0
Revises: 56a3d184ac27
Create Date: 2017-10-06 00:38:24.001488
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'f045592adab0'
down_revision = '56a3d184ac27'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('follows',
sa.Column('follower_id', sa.Integer(), nullable=False),
sa.Column('followed_id', sa.Integer(), nullable=False),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['followed_id'], ['users.id'], ),
sa.ForeignKeyConstraint(['follower_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('follower_id', 'followed_id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('follows')
# ### end Alembic commands ###
|
mikkylok/mikky.lu
|
migrations/versions/f045592adab0_add_follow_table.py
|
Python
|
mit
| 964
|
import datetime
import uuid
from flask import current_app as app
from flask import url_for
from database import db
from sqlalchemy import Column, DateTime, String, Text
from sqlalchemy.dialects.postgresql import UUID
class Poster(db.Model):
__tablename__ = 'posters'
id = Column(UUID(as_uuid=True), primary_key=True)
title = Column(String(400), nullable=False, default='Untitled')
authors = Column(Text)
abstract = Column(Text)
source_url = Column(String(400), nullable=False)
download_url = Column(String(400), nullable=False)
presented_at = Column(String(200))
created_at = Column('create_date', DateTime, default=datetime.datetime.now())
id_admin = Column(UUID(as_uuid=True), unique=True, nullable=False)
email = Column(String(50))
def __init__(self, title, source_url, download_url, authors=None,
abstract=None, presented_at=None):
self.id = uuid.uuid4()
self.title = title
self.authors = authors
self.abstract = abstract
self.source_url = source_url
self.download_url = download_url
self.presented_at = presented_at
self.id_admin = uuid.uuid4()
def __repr__(self):
return '<User {}>'.format(str(self.id))
def serialize(self):
return {
'id': self.id,
'title': self.title,
'authors': self.authors,
'abstract': self.abstract,
'source_url': self.source_url,
'download_url': self.download_url,
'presented_at': self.presented_at,
'created_at': self.created_at.isoformat(),
'thumbnail_url': self.thumbnail_url(),
}
def public_url(self, absolute=False):
return url_for('get_poster', id=self.id, _external=absolute)
def admin_url(self, absolute=False):
return url_for('edit_poster', id_admin=self.id_admin, _external=absolute)
def qrcode_svg_url(self, absolute=False):
return url_for('get_qrcode_svg', id=self.id, _external=absolute)
def qrcode_png_url(self, absolute=False):
return url_for('get_qrcode_png', id=self.id, _external=absolute)
def is_image(self):
return self.download_url.endswith('.png') or self.download_url.endswith('.jpg')
def viewable_download_url(self):
cloudinary = app.config['CLOUDINARY_BASE_URL']
if self.is_image() or self.download_url.startswith(cloudinary):
return self.download_url
return '{}/image/fetch/{}'.format(cloudinary, self.download_url)
def thumbnail_url(self):
cloudinary = app.config['CLOUDINARY_BASE_URL']
transformations = 'c_thumb,w_370,h_200,f_png'
if self.download_url.startswith(cloudinary):
return self.download_url.replace('/upload/', '/upload/{}/'.format(transformations))
return '{}/image/fetch/{}/{}'.format(cloudinary, transformations, self.download_url)
|
TailorDev/pauling
|
api/models.py
|
Python
|
mit
| 2,935
|
import sublime, sublime_plugin
import webbrowser
class OpenWikiOnString(sublime_plugin.TextCommand):
def run(self, edit):
query = self.view.substr(self.view.sel()[0])
webbrowser.open_new("http://wiki.sa-mp.com/wiki/" + query)
|
ziggi/pawn-sublime-language
|
OpenWikiOnString.py
|
Python
|
mit
| 233
|
import etcd
import logging
import os
import signal
import time
import unittest
import patroni.config as config
from mock import Mock, PropertyMock, patch
from patroni.api import RestApiServer
from patroni.async_executor import AsyncExecutor
from patroni.dcs.etcd import AbstractEtcdClientWithFailover
from patroni.exceptions import DCSError
from patroni.postgresql import Postgresql
from patroni.postgresql.config import ConfigHandler
from patroni import check_psycopg
from patroni.__main__ import Patroni, main as _main, patroni_main
from six.moves import BaseHTTPServer, builtins
from threading import Thread
from . import psycopg_connect, SleepException
from .test_etcd import etcd_read, etcd_write
from .test_postgresql import MockPostmaster
def mock_import(*args, **kwargs):
if args[0] == 'psycopg':
raise ImportError
ret = Mock()
ret.__version__ = '2.5.3.dev1 a b c'
return ret
class MockFrozenImporter(object):
toc = set(['patroni.dcs.etcd'])
@patch('time.sleep', Mock())
@patch('subprocess.call', Mock(return_value=0))
@patch('patroni.psycopg.connect', psycopg_connect)
@patch.object(ConfigHandler, 'append_pg_hba', Mock())
@patch.object(ConfigHandler, 'write_postgresql_conf', Mock())
@patch.object(ConfigHandler, 'write_recovery_conf', Mock())
@patch.object(Postgresql, 'is_running', Mock(return_value=MockPostmaster()))
@patch.object(Postgresql, 'call_nowait', Mock())
@patch.object(BaseHTTPServer.HTTPServer, '__init__', Mock())
@patch.object(AsyncExecutor, 'run', Mock())
@patch.object(etcd.Client, 'write', etcd_write)
@patch.object(etcd.Client, 'read', etcd_read)
class TestPatroni(unittest.TestCase):
def test_no_config(self):
self.assertRaises(SystemExit, patroni_main)
@patch('sys.argv', ['patroni.py', '--validate-config', 'postgres0.yml'])
def test_validate_config(self):
self.assertRaises(SystemExit, patroni_main)
@patch('pkgutil.iter_importers', Mock(return_value=[MockFrozenImporter()]))
@patch('sys.frozen', Mock(return_value=True), create=True)
@patch.object(BaseHTTPServer.HTTPServer, '__init__', Mock())
@patch.object(etcd.Client, 'read', etcd_read)
@patch.object(Thread, 'start', Mock())
@patch.object(AbstractEtcdClientWithFailover, 'machines', PropertyMock(return_value=['http://remotehost:2379']))
def setUp(self):
self._handlers = logging.getLogger().handlers[:]
RestApiServer._BaseServer__is_shut_down = Mock()
RestApiServer._BaseServer__shutdown_request = True
RestApiServer.socket = 0
os.environ['PATRONI_POSTGRESQL_DATA_DIR'] = 'data/test0'
conf = config.Config('postgres0.yml')
self.p = Patroni(conf)
def tearDown(self):
logging.getLogger().handlers[:] = self._handlers
@patch('patroni.dcs.AbstractDCS.get_cluster', Mock(side_effect=[None, DCSError('foo'), None]))
def test_load_dynamic_configuration(self):
self.p.config._dynamic_configuration = {}
self.p.load_dynamic_configuration()
self.p.load_dynamic_configuration()
@patch('sys.argv', ['patroni.py', 'postgres0.yml'])
@patch('time.sleep', Mock(side_effect=SleepException))
@patch.object(etcd.Client, 'delete', Mock())
@patch.object(AbstractEtcdClientWithFailover, 'machines', PropertyMock(return_value=['http://remotehost:2379']))
@patch.object(Thread, 'join', Mock())
def test_patroni_patroni_main(self):
with patch('subprocess.call', Mock(return_value=1)):
with patch.object(Patroni, 'run', Mock(side_effect=SleepException)):
os.environ['PATRONI_POSTGRESQL_DATA_DIR'] = 'data/test0'
self.assertRaises(SleepException, patroni_main)
with patch.object(Patroni, 'run', Mock(side_effect=KeyboardInterrupt())):
with patch('patroni.ha.Ha.is_paused', Mock(return_value=True)):
os.environ['PATRONI_POSTGRESQL_DATA_DIR'] = 'data/test0'
patroni_main()
@patch('os.getpid')
@patch('multiprocessing.Process')
@patch('patroni.__main__.patroni_main', Mock())
def test_patroni_main(self, mock_process, mock_getpid):
mock_getpid.return_value = 2
_main()
mock_getpid.return_value = 1
def mock_signal(signo, handler):
handler(signo, None)
with patch('signal.signal', mock_signal):
with patch('os.waitpid', Mock(side_effect=[(1, 0), (0, 0)])):
_main()
with patch('os.waitpid', Mock(side_effect=OSError)):
_main()
ref = {'passtochild': lambda signo, stack_frame: 0}
def mock_sighup(signo, handler):
if hasattr(signal, 'SIGHUP') and signo == signal.SIGHUP:
ref['passtochild'] = handler
def mock_join():
ref['passtochild'](0, None)
mock_process.return_value.join = mock_join
with patch('signal.signal', mock_sighup), patch('os.kill', Mock()):
self.assertIsNone(_main())
@patch('patroni.config.Config.save_cache', Mock())
@patch('patroni.config.Config.reload_local_configuration', Mock(return_value=True))
@patch('patroni.ha.Ha.is_leader', Mock(return_value=True))
@patch.object(Postgresql, 'state', PropertyMock(return_value='running'))
@patch.object(Postgresql, 'data_directory_empty', Mock(return_value=False))
def test_run(self):
self.p.postgresql.set_role('replica')
self.p.sighup_handler()
self.p.ha.dcs.watch = Mock(side_effect=SleepException)
self.p.api.start = Mock()
self.p.logger.start = Mock()
self.p.config._dynamic_configuration = {}
self.assertRaises(SleepException, self.p.run)
with patch('patroni.config.Config.reload_local_configuration', Mock(return_value=False)):
self.p.sighup_handler()
self.assertRaises(SleepException, self.p.run)
with patch('patroni.config.Config.set_dynamic_configuration', Mock(return_value=True)):
self.assertRaises(SleepException, self.p.run)
with patch('patroni.postgresql.Postgresql.data_directory_empty', Mock(return_value=False)):
self.assertRaises(SleepException, self.p.run)
def test_sigterm_handler(self):
self.assertRaises(SystemExit, self.p.sigterm_handler)
def test_schedule_next_run(self):
self.p.ha.cluster = Mock()
self.p.ha.dcs.watch = Mock(return_value=True)
self.p.schedule_next_run()
self.p.next_run = time.time() - self.p.dcs.loop_wait - 1
self.p.schedule_next_run()
def test_noloadbalance(self):
self.p.tags['noloadbalance'] = True
self.assertTrue(self.p.noloadbalance)
def test_nofailover(self):
self.p.tags['nofailover'] = True
self.assertTrue(self.p.nofailover)
self.p.tags['nofailover'] = None
self.assertFalse(self.p.nofailover)
def test_replicatefrom(self):
self.assertIsNone(self.p.replicatefrom)
self.p.tags['replicatefrom'] = 'foo'
self.assertEqual(self.p.replicatefrom, 'foo')
def test_reload_config(self):
self.p.reload_config()
self.p.get_tags = Mock(side_effect=Exception)
self.p.reload_config(local=True)
def test_nosync(self):
self.p.tags['nosync'] = True
self.assertTrue(self.p.nosync)
self.p.tags['nosync'] = None
self.assertFalse(self.p.nosync)
@patch.object(Thread, 'join', Mock())
def test_shutdown(self):
self.p.api.shutdown = Mock(side_effect=Exception)
self.p.ha.shutdown = Mock(side_effect=Exception)
self.p.shutdown()
def test_check_psycopg(self):
with patch.object(builtins, '__import__', Mock(side_effect=ImportError)):
self.assertRaises(SystemExit, check_psycopg)
with patch.object(builtins, '__import__', mock_import):
self.assertRaises(SystemExit, check_psycopg)
|
zalando/patroni
|
tests/test_patroni.py
|
Python
|
mit
| 7,937
|
# -*- coding: utf-8 -*-
"""
This is the Windows backend for keyboard events, and is implemented by
invoking the Win32 API through the ctypes module. This is error prone
and can introduce very unpythonic failure modes, such as segfaults and
low level memory leaks. But it is also dependency-free, very performant
well documented on Microsoft's webstie and scattered examples.
# TODO:
- Keypad numbers still print as numbers even when numlock is off.
- No way to specify if user wants a keypad key or not in `map_char`.
"""
from __future__ import unicode_literals
import re
import atexit
import traceback
from threading import Lock
from collections import defaultdict
from ._keyboard_event import KeyboardEvent, KEY_DOWN, KEY_UP
from ._canonical_names import normalize_name
try:
# Force Python2 to convert to unicode and not to str.
chr = unichr
except NameError:
pass
# This part is just declaring Win32 API structures using ctypes. In C
# this would be simply #include "windows.h".
import ctypes
from ctypes import c_short, c_char, c_uint8, c_int32, c_int, c_uint, c_uint32, c_long, Structure, CFUNCTYPE, POINTER
from ctypes.wintypes import WORD, DWORD, BOOL, HHOOK, MSG, LPWSTR, WCHAR, WPARAM, LPARAM, LONG, HMODULE, LPCWSTR, HINSTANCE, HWND
LPMSG = POINTER(MSG)
ULONG_PTR = POINTER(DWORD)
kernel32 = ctypes.WinDLL('kernel32', use_last_error=True)
GetModuleHandleW = kernel32.GetModuleHandleW
GetModuleHandleW.restype = HMODULE
GetModuleHandleW.argtypes = [LPCWSTR]
#https://github.com/boppreh/mouse/issues/1
#user32 = ctypes.windll.user32
user32 = ctypes.WinDLL('user32', use_last_error = True)
VK_PACKET = 0xE7
INPUT_MOUSE = 0
INPUT_KEYBOARD = 1
INPUT_HARDWARE = 2
KEYEVENTF_KEYUP = 0x02
KEYEVENTF_UNICODE = 0x04
class KBDLLHOOKSTRUCT(Structure):
_fields_ = [("vk_code", DWORD),
("scan_code", DWORD),
("flags", DWORD),
("time", c_int),
("dwExtraInfo", ULONG_PTR)]
# Included for completeness.
class MOUSEINPUT(ctypes.Structure):
_fields_ = (('dx', LONG),
('dy', LONG),
('mouseData', DWORD),
('dwFlags', DWORD),
('time', DWORD),
('dwExtraInfo', ULONG_PTR))
class KEYBDINPUT(ctypes.Structure):
_fields_ = (('wVk', WORD),
('wScan', WORD),
('dwFlags', DWORD),
('time', DWORD),
('dwExtraInfo', ULONG_PTR))
class HARDWAREINPUT(ctypes.Structure):
_fields_ = (('uMsg', DWORD),
('wParamL', WORD),
('wParamH', WORD))
class _INPUTunion(ctypes.Union):
_fields_ = (('mi', MOUSEINPUT),
('ki', KEYBDINPUT),
('hi', HARDWAREINPUT))
class INPUT(ctypes.Structure):
_fields_ = (('type', DWORD),
('union', _INPUTunion))
LowLevelKeyboardProc = CFUNCTYPE(c_int, WPARAM, LPARAM, POINTER(KBDLLHOOKSTRUCT))
SetWindowsHookEx = user32.SetWindowsHookExW
SetWindowsHookEx.argtypes = [c_int, LowLevelKeyboardProc, HINSTANCE , DWORD]
SetWindowsHookEx.restype = HHOOK
CallNextHookEx = user32.CallNextHookEx
#CallNextHookEx.argtypes = [c_int , c_int, c_int, POINTER(KBDLLHOOKSTRUCT)]
CallNextHookEx.restype = c_int
UnhookWindowsHookEx = user32.UnhookWindowsHookEx
UnhookWindowsHookEx.argtypes = [HHOOK]
UnhookWindowsHookEx.restype = BOOL
GetMessage = user32.GetMessageW
GetMessage.argtypes = [LPMSG, HWND, c_uint, c_uint]
GetMessage.restype = BOOL
TranslateMessage = user32.TranslateMessage
TranslateMessage.argtypes = [LPMSG]
TranslateMessage.restype = BOOL
DispatchMessage = user32.DispatchMessageA
DispatchMessage.argtypes = [LPMSG]
keyboard_state_type = c_uint8 * 256
GetKeyboardState = user32.GetKeyboardState
GetKeyboardState.argtypes = [keyboard_state_type]
GetKeyboardState.restype = BOOL
GetKeyNameText = user32.GetKeyNameTextW
GetKeyNameText.argtypes = [c_long, LPWSTR, c_int]
GetKeyNameText.restype = c_int
MapVirtualKey = user32.MapVirtualKeyW
MapVirtualKey.argtypes = [c_uint, c_uint]
MapVirtualKey.restype = c_uint
ToUnicode = user32.ToUnicode
ToUnicode.argtypes = [c_uint, c_uint, keyboard_state_type, LPWSTR, c_int, c_uint]
ToUnicode.restype = c_int
SendInput = user32.SendInput
SendInput.argtypes = [c_uint, POINTER(INPUT), c_int]
SendInput.restype = c_uint
# https://msdn.microsoft.com/en-us/library/windows/desktop/ms646307(v=vs.85).aspx
MAPVK_VK_TO_CHAR = 2
MAPVK_VK_TO_VSC = 0
MAPVK_VSC_TO_VK = 1
MAPVK_VK_TO_VSC_EX = 4
MAPVK_VSC_TO_VK_EX = 3
VkKeyScan = user32.VkKeyScanW
VkKeyScan.argtypes = [WCHAR]
VkKeyScan.restype = c_short
LLKHF_INJECTED = 0x00000010
WM_KEYDOWN = 0x0100
WM_KEYUP = 0x0101
WM_SYSKEYDOWN = 0x104 # Used for ALT key
WM_SYSKEYUP = 0x105
# This marks the end of Win32 API declarations. The rest is ours.
keyboard_event_types = {
WM_KEYDOWN: KEY_DOWN,
WM_KEYUP: KEY_UP,
WM_SYSKEYDOWN: KEY_DOWN,
WM_SYSKEYUP: KEY_UP,
}
# List taken from the official documentation, but stripped of the OEM-specific keys.
# Keys are virtual key codes, values are pairs (name, is_keypad).
official_virtual_keys = {
0x03: ('control-break processing', False),
0x08: ('backspace', False),
0x09: ('tab', False),
0x0c: ('clear', False),
0x0d: ('enter', False),
0x10: ('shift', False),
0x11: ('ctrl', False),
0x12: ('alt', False),
0x13: ('pause', False),
0x14: ('caps lock', False),
0x15: ('ime kana mode', False),
0x15: ('ime hanguel mode', False),
0x15: ('ime hangul mode', False),
0x17: ('ime junja mode', False),
0x18: ('ime final mode', False),
0x19: ('ime hanja mode', False),
0x19: ('ime kanji mode', False),
0x1b: ('esc', False),
0x1c: ('ime convert', False),
0x1d: ('ime nonconvert', False),
0x1e: ('ime accept', False),
0x1f: ('ime mode change request', False),
0x20: ('spacebar', False),
0x21: ('page up', False),
0x22: ('page down', False),
0x23: ('end', False),
0x24: ('home', False),
0x25: ('left', False),
0x26: ('up', False),
0x27: ('right', False),
0x28: ('down', False),
0x29: ('select', False),
0x2a: ('print', False),
0x2b: ('execute', False),
0x2c: ('print screen', False),
0x2d: ('insert', False),
0x2e: ('delete', False),
0x2f: ('help', False),
0x30: ('0', False),
0x31: ('1', False),
0x32: ('2', False),
0x33: ('3', False),
0x34: ('4', False),
0x35: ('5', False),
0x36: ('6', False),
0x37: ('7', False),
0x38: ('8', False),
0x39: ('9', False),
0x41: ('a', False),
0x42: ('b', False),
0x43: ('c', False),
0x44: ('d', False),
0x45: ('e', False),
0x46: ('f', False),
0x47: ('g', False),
0x48: ('h', False),
0x49: ('i', False),
0x4a: ('j', False),
0x4b: ('k', False),
0x4c: ('l', False),
0x4d: ('m', False),
0x4e: ('n', False),
0x4f: ('o', False),
0x50: ('p', False),
0x51: ('q', False),
0x52: ('r', False),
0x53: ('s', False),
0x54: ('t', False),
0x55: ('u', False),
0x56: ('v', False),
0x57: ('w', False),
0x58: ('x', False),
0x59: ('y', False),
0x5a: ('z', False),
0x5b: ('left windows', False),
0x5c: ('right windows', False),
0x5d: ('applications', False),
0x5f: ('sleep', False),
0x60: ('0', True),
0x61: ('1', True),
0x62: ('2', True),
0x63: ('3', True),
0x64: ('4', True),
0x65: ('5', True),
0x66: ('6', True),
0x67: ('7', True),
0x68: ('8', True),
0x69: ('9', True),
0x6a: ('*', True),
0x6b: ('+', True),
0x6c: ('separator', True),
0x6d: ('-', True),
0x6e: ('decimal', True),
0x6f: ('/', True),
0x70: ('f1', False),
0x71: ('f2', False),
0x72: ('f3', False),
0x73: ('f4', False),
0x74: ('f5', False),
0x75: ('f6', False),
0x76: ('f7', False),
0x77: ('f8', False),
0x78: ('f9', False),
0x79: ('f10', False),
0x7a: ('f11', False),
0x7b: ('f12', False),
0x7c: ('f13', False),
0x7d: ('f14', False),
0x7e: ('f15', False),
0x7f: ('f16', False),
0x80: ('f17', False),
0x81: ('f18', False),
0x82: ('f19', False),
0x83: ('f20', False),
0x84: ('f21', False),
0x85: ('f22', False),
0x86: ('f23', False),
0x87: ('f24', False),
0x90: ('num lock', False),
0x91: ('scroll lock', False),
0xa0: ('left shift', False),
0xa1: ('right shift', False),
0xa2: ('left ctrl', False),
0xa3: ('right ctrl', False),
0xa4: ('left menu', False),
0xa5: ('right menu', False),
0xa6: ('browser back', False),
0xa7: ('browser forward', False),
0xa8: ('browser refresh', False),
0xa9: ('browser stop', False),
0xaa: ('browser search key', False),
0xab: ('browser favorites', False),
0xac: ('browser start and home', False),
0xad: ('volume mute', False),
0xae: ('volume down', False),
0xaf: ('volume up', False),
0xb0: ('next track', False),
0xb1: ('previous track', False),
0xb2: ('stop media', False),
0xb3: ('play/pause media', False),
0xb4: ('start mail', False),
0xb5: ('select media', False),
0xb6: ('start application 1', False),
0xb7: ('start application 2', False),
0xbb: ('+', False),
0xbc: (',', False),
0xbd: ('-', False),
0xbe: ('.', False),
#0xbe:('/', False), # Used for miscellaneous characters; it can vary by keyboard. For the US standard keyboard, the '/?.
0xe5: ('ime process', False),
0xf6: ('attn', False),
0xf7: ('crsel', False),
0xf8: ('exsel', False),
0xf9: ('erase eof', False),
0xfa: ('play', False),
0xfb: ('zoom', False),
0xfc: ('reserved ', False),
0xfd: ('pa1', False),
0xfe: ('clear', False),
}
tables_lock = Lock()
to_name = defaultdict(list)
from_name = defaultdict(list)
scan_code_to_vk = {}
distinct_modifiers = [
(),
('shift',),
('alt gr',),
('num lock',),
('shift', 'num lock'),
('caps lock',),
('shift', 'caps lock'),
('alt gr', 'num lock'),
]
name_buffer = ctypes.create_unicode_buffer(32)
unicode_buffer = ctypes.create_unicode_buffer(32)
keyboard_state = keyboard_state_type()
def get_event_names(scan_code, vk, is_extended, modifiers):
is_keypad = (scan_code, vk, is_extended) in keypad_keys
is_official = vk in official_virtual_keys
if is_keypad and is_official:
yield official_virtual_keys[vk][0]
keyboard_state[0x10] = 0x80 * ('shift' in modifiers)
keyboard_state[0x11] = 0x80 * ('alt gr' in modifiers)
keyboard_state[0x12] = 0x80 * ('alt gr' in modifiers)
keyboard_state[0x14] = 0x01 * ('caps lock' in modifiers)
keyboard_state[0x90] = 0x01 * ('num lock' in modifiers)
keyboard_state[0x91] = 0x01 * ('scroll lock' in modifiers)
unicode_ret = ToUnicode(vk, scan_code, keyboard_state, unicode_buffer, len(unicode_buffer), 0)
if unicode_ret and unicode_buffer.value:
yield unicode_buffer.value
# unicode_ret == -1 -> is dead key
# ToUnicode has the side effect of setting global flags for dead keys.
# Therefore we need to call it twice to clear those flags.
# If your 6 and 7 keys are named "^6" and "^7", this is the reason.
ToUnicode(vk, scan_code, keyboard_state, unicode_buffer, len(unicode_buffer), 0)
name_ret = GetKeyNameText(scan_code << 16 | is_extended << 24, name_buffer, 1024)
if name_ret and name_buffer.value:
yield name_buffer.value
char = user32.MapVirtualKeyW(vk, MAPVK_VK_TO_CHAR) & 0xFF
if char != 0:
yield chr(char)
if not is_keypad and is_official:
yield official_virtual_keys[vk][0]
def _setup_name_tables():
"""
Ensures the scan code/virtual key code/name translation tables are
filled.
"""
with tables_lock:
if to_name: return
# Go through every possible scan code, and map them to virtual key codes.
# Then vice-versa.
all_scan_codes = [(sc, user32.MapVirtualKeyExW(sc, MAPVK_VSC_TO_VK_EX, 0)) for sc in range(0x100)]
all_vks = [(user32.MapVirtualKeyExW(vk, MAPVK_VK_TO_VSC_EX, 0), vk) for vk in range(0x100)]
for scan_code, vk in all_scan_codes + all_vks:
# `to_name` and `from_name` entries will be a tuple (scan_code, vk, extended, shift_state).
if (scan_code, vk, 0, 0, 0) in to_name:
continue
if scan_code not in scan_code_to_vk:
scan_code_to_vk[scan_code] = vk
# Brute force all combinations to find all possible names.
for extended in [0, 1]:
for modifiers in distinct_modifiers:
entry = (scan_code, vk, extended, modifiers)
# Get key names from ToUnicode, GetKeyNameText, MapVirtualKeyW and official virtual keys.
names = list(get_event_names(*entry))
if names:
# Also map lowercased key names, but only after the properly cased ones.
lowercase_names = [name.lower() for name in names]
to_name[entry] = names + lowercase_names
# Remember the "id" of the name, as the first techniques
# have better results and therefore priority.
for i, name in enumerate(map(normalize_name, names + lowercase_names)):
from_name[name].append((i, entry))
# TODO: single quotes on US INTL is returning the dead key (?), and therefore
# not typing properly.
# Alt gr is way outside the usual range of keys (0..127) and on my
# computer is named as 'ctrl'. Therefore we add it manually and hope
# Windows is consistent in its inconsistency.
for extended in [0, 1]:
for modifiers in distinct_modifiers:
to_name[(541, 162, extended, modifiers)] = ['alt gr']
from_name['alt gr'].append((1, (541, 162, extended, modifiers)))
modifiers_preference = defaultdict(lambda: 10)
modifiers_preference.update({(): 0, ('shift',): 1, ('alt gr',): 2, ('ctrl',): 3, ('alt',): 4})
def order_key(line):
i, entry = line
scan_code, vk, extended, modifiers = entry
return modifiers_preference[modifiers], i, extended, vk, scan_code
for name, entries in list(from_name.items()):
from_name[name] = sorted(set(entries), key=order_key)
# Called by keyboard/__init__.py
init = _setup_name_tables
# List created manually.
keypad_keys = [
# (scan_code, virtual_key_code, is_extended)
(126, 194, 0),
(126, 194, 0),
(28, 13, 1),
(28, 13, 1),
(53, 111, 1),
(53, 111, 1),
(55, 106, 0),
(55, 106, 0),
(69, 144, 1),
(69, 144, 1),
(71, 103, 0),
(71, 36, 0),
(72, 104, 0),
(72, 38, 0),
(73, 105, 0),
(73, 33, 0),
(74, 109, 0),
(74, 109, 0),
(75, 100, 0),
(75, 37, 0),
(76, 101, 0),
(76, 12, 0),
(77, 102, 0),
(77, 39, 0),
(78, 107, 0),
(78, 107, 0),
(79, 35, 0),
(79, 97, 0),
(80, 40, 0),
(80, 98, 0),
(81, 34, 0),
(81, 99, 0),
(82, 45, 0),
(82, 96, 0),
(83, 110, 0),
(83, 46, 0),
]
shift_is_pressed = False
altgr_is_pressed = False
ignore_next_right_alt = False
shift_vks = set([0x10, 0xa0, 0xa1])
def prepare_intercept(callback):
"""
Registers a Windows low level keyboard hook. The provided callback will
be invoked for each high-level keyboard event, and is expected to return
True if the key event should be passed to the next program, or False if
the event is to be blocked.
No event is processed until the Windows messages are pumped (see
start_intercept).
"""
_setup_name_tables()
def process_key(event_type, vk, scan_code, is_extended):
global shift_is_pressed, altgr_is_pressed, ignore_next_right_alt
#print(event_type, vk, scan_code, is_extended)
# Pressing alt-gr also generates an extra "right alt" event
if vk == 0xA5 and ignore_next_right_alt:
ignore_next_right_alt = False
return True
modifiers = (
('shift',) * shift_is_pressed +
('alt gr',) * altgr_is_pressed +
('num lock',) * (user32.GetKeyState(0x90) & 1) +
('caps lock',) * (user32.GetKeyState(0x14) & 1) +
('scroll lock',) * (user32.GetKeyState(0x91) & 1)
)
entry = (scan_code, vk, is_extended, modifiers)
if entry not in to_name:
to_name[entry] = list(get_event_names(*entry))
names = to_name[entry]
name = names[0] if names else None
# TODO: inaccurate when holding multiple different shifts.
if vk in shift_vks:
shift_is_pressed = event_type == KEY_DOWN
if scan_code == 541 and vk == 162:
ignore_next_right_alt = True
altgr_is_pressed = event_type == KEY_DOWN
is_keypad = (scan_code, vk, is_extended) in keypad_keys
return callback(KeyboardEvent(event_type=event_type, scan_code=scan_code or -vk, name=name, is_keypad=is_keypad))
def low_level_keyboard_handler(nCode, wParam, lParam):
try:
vk = lParam.contents.vk_code
# Ignore the second `alt` DOWN observed in some cases.
fake_alt = (LLKHF_INJECTED | 0x20)
# Ignore events generated by SendInput with Unicode.
if vk != VK_PACKET and lParam.contents.flags & fake_alt != fake_alt:
event_type = keyboard_event_types[wParam]
is_extended = lParam.contents.flags & 1
scan_code = lParam.contents.scan_code
should_continue = process_key(event_type, vk, scan_code, is_extended)
if not should_continue:
return -1
except Exception as e:
print('Error in keyboard hook:')
traceback.print_exc()
return CallNextHookEx(None, nCode, wParam, lParam)
WH_KEYBOARD_LL = c_int(13)
keyboard_callback = LowLevelKeyboardProc(low_level_keyboard_handler)
handle = GetModuleHandleW(None)
thread_id = DWORD(0)
keyboard_hook = SetWindowsHookEx(WH_KEYBOARD_LL, keyboard_callback, handle, thread_id)
# Register to remove the hook when the interpreter exits. Unfortunately a
# try/finally block doesn't seem to work here.
atexit.register(UnhookWindowsHookEx, keyboard_callback)
def listen(callback):
prepare_intercept(callback)
msg = LPMSG()
while not GetMessage(msg, 0, 0, 0):
TranslateMessage(msg)
DispatchMessage(msg)
def map_name(name):
_setup_name_tables()
entries = from_name.get(name)
if not entries:
raise ValueError('Key name {} is not mapped to any known key.'.format(repr(name)))
for i, entry in entries:
scan_code, vk, is_extended, modifiers = entry
yield scan_code or -vk, modifiers
def _send_event(code, event_type):
if code == 541:
# Alt-gr is made of ctrl+alt. Just sending even 541 doesn't do anything.
user32.keybd_event(0x11, code, event_type, 0)
user32.keybd_event(0x12, code, event_type, 0)
elif code > 0:
vk = scan_code_to_vk.get(code, 0)
user32.keybd_event(vk, code, event_type, 0)
else:
# Negative scan code is a way to indicate we don't have a scan code,
# and the value actually contains the Virtual key code.
user32.keybd_event(-code, 0, event_type, 0)
def press(code):
_send_event(code, 0)
def release(code):
_send_event(code, 2)
def type_unicode(character):
# This code and related structures are based on
# http://stackoverflow.com/a/11910555/252218
surrogates = bytearray(character.encode('utf-16le'))
presses = []
releases = []
for i in range(0, len(surrogates), 2):
higher, lower = surrogates[i:i+2]
structure = KEYBDINPUT(0, (lower << 8) + higher, KEYEVENTF_UNICODE, 0, None)
presses.append(INPUT(INPUT_KEYBOARD, _INPUTunion(ki=structure)))
structure = KEYBDINPUT(0, (lower << 8) + higher, KEYEVENTF_UNICODE | KEYEVENTF_KEYUP, 0, None)
releases.append(INPUT(INPUT_KEYBOARD, _INPUTunion(ki=structure)))
inputs = presses + releases
nInputs = len(inputs)
LPINPUT = INPUT * nInputs
pInputs = LPINPUT(*inputs)
cbSize = c_int(ctypes.sizeof(INPUT))
SendInput(nInputs, pInputs, cbSize)
if __name__ == '__main__':
_setup_name_tables()
import pprint
pprint.pprint(to_name)
pprint.pprint(from_name)
#listen(lambda e: print(e.to_json()) or True)
|
glitchassassin/keyboard
|
keyboard/_winkeyboard.py
|
Python
|
mit
| 20,607
|
# Module: docs
# Date: 03rd April 2013
# Author: James Mills, j dot mills at griffith dot edu dot au
"""Documentation Tasks"""
from fabric.api import lcd, local, task
from .utils import pip, requires
PACKAGE = "mio"
@task()
@requires("make", "sphinx-apidoc")
def clean():
"""Delete Generated Documentation"""
with lcd("docs"):
local("make clean")
@task(default=True)
@requires("make")
def build(**options):
"""Build the Documentation"""
pip(requirements="docs/requirements.txt")
if PACKAGE is not None:
local("sphinx-apidoc -f -T -o docs/source/api {0:s}".format(PACKAGE))
with lcd("docs"):
local("make html")
@task()
@requires("open")
def view(**options):
"""View the Documentation"""
with lcd("docs"):
local("open build/html/index.html")
|
prologic/mio
|
fabfile/docs.py
|
Python
|
mit
| 828
|
from unittest import TestCase
from pydigmips import instructions, loaders
class HexaLoaderTestCase(TestCase):
def testAdd(self):
i = ['1510', # 000 101 010 001 0000
'1C60',
'2C60'] # 001 011 000 110 0000
o = [instructions.Add(5, 2, 1), instructions.Add(7, 0, 6),
instructions.Sub(3, 0, 6)]
prog = loaders.load_hexa(i)
self.assertEqual(prog, o)
def testLd(self):
i = ['4EAA', # 010 011 101 0101010
'6EAA'] # 011 011 101 0101010
o = [instructions.Ld(3, (5, 42)),
instructions.St(3, (5, 42))]
prog = loaders.load_hexa(i)
self.assertEqual(prog, o)
def testBle(self):
i = ['8EAA'] # 100 011 101 0101010
o = [instructions.Ble(3, 5, 42)]
prog = loaders.load_hexa(i)
self.assertEqual(prog, o)
def testLdi(self):
i = ['B0AA'] # 101 100 00 10101010
o = [instructions.Ldi(4, 170)]
prog = loaders.load_hexa(i)
self.assertEqual(prog, o)
def testJa(self):
i = ['CE80'] # 110 011 101 0000000
o = [instructions.Ja(3, 5)]
prog = loaders.load_hexa(i)
self.assertEqual(prog, o)
def testJ(self):
i = ['EAAA'] # 111 0101010101010
o = [instructions.J(2730)]
prog = loaders.load_hexa(i)
self.assertEqual(prog, o)
|
ProgVal/pydigmips
|
tests/test_loaders.py
|
Python
|
mit
| 1,378
|
import json
from pprint import pprint
json_data=open('jsonFormated')
#json_data=open('jsonFile')
data = json.load(json_data)
pprint(data)
json_data.close()
|
krthkj/learningPython
|
readjson.py
|
Python
|
mit
| 157
|
#!/usr/bin/env python3
# Copyright (c) 2019-2021 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test RPC misc output."""
import xml.etree.ElementTree as ET
from test_framework.test_framework import SyscoinTestFramework
from test_framework.util import (
assert_raises_rpc_error,
assert_equal,
assert_greater_than,
assert_greater_than_or_equal,
)
from test_framework.authproxy import JSONRPCException
class RpcMiscTest(SyscoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.supports_cli = False
def run_test(self):
node = self.nodes[0]
self.log.info("test CHECK_NONFATAL")
assert_raises_rpc_error(
-1,
'Internal bug detected: \'request.params[9].get_str() != "trigger_internal_bug"\'',
lambda: node.echo(arg9='trigger_internal_bug'),
)
self.log.info("test getmemoryinfo")
memory = node.getmemoryinfo()['locked']
assert_greater_than(memory['used'], 0)
assert_greater_than(memory['free'], 0)
assert_greater_than(memory['total'], 0)
# assert_greater_than_or_equal() for locked in case locking pages failed at some point
assert_greater_than_or_equal(memory['locked'], 0)
assert_greater_than(memory['chunks_used'], 0)
assert_greater_than(memory['chunks_free'], 0)
assert_equal(memory['used'] + memory['free'], memory['total'])
self.log.info("test mallocinfo")
try:
mallocinfo = node.getmemoryinfo(mode="mallocinfo")
self.log.info('getmemoryinfo(mode="mallocinfo") call succeeded')
tree = ET.fromstring(mallocinfo)
assert_equal(tree.tag, 'malloc')
except JSONRPCException:
self.log.info('getmemoryinfo(mode="mallocinfo") not available')
assert_raises_rpc_error(-8, 'mallocinfo is only available when compiled with glibc 2.10+', node.getmemoryinfo, mode="mallocinfo")
assert_raises_rpc_error(-8, "unknown mode foobar", node.getmemoryinfo, mode="foobar")
self.log.info("test logging rpc and help")
# SYSCOIN Test logging RPC returns the expected number of logging categories.
assert_equal(len(node.logging()), 36)
# Test toggling a logging category on/off/on with the logging RPC.
assert_equal(node.logging()['qt'], True)
node.logging(exclude=['qt'])
assert_equal(node.logging()['qt'], False)
node.logging(include=['qt'])
assert_equal(node.logging()['qt'], True)
# Test logging RPC returns the logging categories in alphabetical order.
sorted_logging_categories = sorted(node.logging())
assert_equal(list(node.logging()), sorted_logging_categories)
# Test logging help returns the logging categories string in alphabetical order.
categories = ', '.join(sorted_logging_categories)
logging_help = self.nodes[0].help('logging')
assert f"valid logging categories are: {categories}" in logging_help
self.log.info("test echoipc (testing spawned process in multiprocess build)")
assert_equal(node.echoipc("hello"), "hello")
self.log.info("test getindexinfo")
# Without any indices running the RPC returns an empty object
assert_equal(node.getindexinfo(), {})
# Restart the node with indices and wait for them to sync
self.restart_node(0, ["-txindex", "-blockfilterindex", "-coinstatsindex"])
self.wait_until(lambda: all(i["synced"] for i in node.getindexinfo().values()))
# Returns a list of all running indices by default
values = {"synced": True, "best_block_height": 200}
assert_equal(
node.getindexinfo(),
{
"txindex": values,
"basic block filter index": values,
"coinstatsindex": values,
}
)
# Specifying an index by name returns only the status of that index
for i in {"txindex", "basic block filter index", "coinstatsindex"}:
assert_equal(node.getindexinfo(i), {i: values})
# Specifying an unknown index name returns an empty result
assert_equal(node.getindexinfo("foo"), {})
if __name__ == '__main__':
RpcMiscTest().main()
|
syscoin/syscoin
|
test/functional/rpc_misc.py
|
Python
|
mit
| 4,423
|
from io import BytesIO
import json
import os
import urllib.parse
import six
import sys
from ftptool import FTPHost
import xlsxwriter
# from https://docs.djangoproject.com/en/1.10/_modules/django/utils/encoding/
def smart_text(s, encoding="utf-8", strings_only=False, errors="strict"):
"""
Returns a text object representing 's' -- unicode on Python 2 and str on
Python 3. Treats bytestrings using the 'encoding' codec.
If strings_only is True, don't convert (some) non-string-like objects.
"""
return force_text(s, encoding, strings_only, errors)
# from https://docs.djangoproject.com/en/1.10/_modules/django/utils/encoding/
def force_text(s, encoding="utf-8", strings_only=False, errors="strict"):
"""
Similar to smart_text, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
# Handle the common case first for performance reasons.
if issubclass(type(s), six.text_type):
return s
try:
if not issubclass(type(s), six.string_types):
if six.PY3:
if isinstance(s, bytes):
s = six.text_type(s, encoding, errors)
else:
s = six.text_type(s)
elif hasattr(s, "__unicode__"):
s = six.text_type(s)
else:
s = six.text_type(bytes(s), encoding, errors)
else:
# Note: We use .decode() here, instead of six.text_type(s, encoding,
# errors), so that if s is a SafeBytes, it ends up being a
# SafeText at the end.
s = s.decode(encoding, errors)
except UnicodeDecodeError as e:
# If we get to here, the caller has passed in an Exception
# subclass populated with non-ASCII bytestring data without a
# working unicode method. Try to handle this without raising a
# further exception by individually forcing the exception args
# to unicode.
s = " ".join(force_text(arg, encoding, strings_only, errors) for arg in s)
return s
def _get_ftp_data(data):
outputs = []
conn = FTPHost.connect(
data["address"], user=data["user"], password=data["password"]
)
for (dirname, subdirs, files) in conn.walk(data.get("path", "/")):
outputs.append((dirname, files))
conn.try_quit()
return outputs
def _get_root_url(data):
return f'ftp://{data["user"]}:{data["password"]}@{data["address"]}'
def _populate_workbook(wb, root_url, data):
ws = wb.add_worksheet()
# write header rows
ws.write(0, 0, "Folder")
ws.write(0, 1, "Filename")
ws.write(0, 2, "URL")
parser = urllib.parse.quote
# write data rows
row = 0
for path, files in data:
for fn in files:
row += 1
path_url = parser(
os.path.join(path.decode("utf8"), fn.decode("utf8")).encode("utf8")
)
url = root_url + path_url
ws.write(row, 0, smart_text(path))
ws.write(row, 1, smart_text(fn))
ws.write(row, 2, smart_text(url))
# setup header and autofilter
bold = wb.add_format({"bold": True})
ws.set_row(0, None, bold)
ws.autofilter(f"A1:C{row + 1}")
# set widths
ws.set_column("A:A", 30)
ws.set_column("B:B", 65)
ws.set_column("C:C", 100)
def _generate_xlsx(data):
# create workbook
output = BytesIO()
wb = xlsxwriter.Workbook(output, {"constant_memory": True})
# add stuff to workbook
ftp_data = _get_ftp_data(data)
root_url = _get_root_url(data)
_populate_workbook(wb, root_url, ftp_data)
# return base64 encoded workbook
wb.close()
output.seek(0)
return output.read().encode("base64")
if __name__ == "__main__":
for data in sys.stdin:
b64 = _generate_xlsx(json.loads(data))
print(json.dumps({"xlsx": b64}))
|
shapiromatron/tblBuilder
|
src/private/scripts/ftpScraper.py
|
Python
|
mit
| 3,966
|
from holmium.core import (
Page, Element, Locators, Elements, ElementMap, Section, Sections
)
from holmium.core.cucumber import init_steps
init_steps()
class TestSection(Section):
el = Element(Locators.NAME, "el")
els = Elements(Locators.NAME, "els")
elmap = ElementMap(Locators.NAME, "elmap")
class TestSections(Sections):
el = Element(Locators.NAME, "el")
class TestPage(Page):
el = Element(Locators.NAME, "el")
els = Elements(Locators.NAME, "els")
elmap = ElementMap(Locators.NAME, "elmap")
sections = TestSections(Locators.NAME, "sections")
section = TestSection(Locators.NAME, "section")
def do_stuff(self, a, b):
return a + b
def do_stuff_no_args(self):
return True
def do_stuff_var_args(self, *args, **kwargs):
return args, kwargs
|
alisaifee/holmium.core
|
tests/support/cucumber/steps.py
|
Python
|
mit
| 823
|
#!/usr/bin/env python2.6
'''
Test docx module
'''
import os
import lxml
from docx import *
TEST_FILE = 'ShortTest.docx'
IMAGE1_FILE = 'image1.png'
# --- Setup & Support Functions ---
def setup_module():
'''Set up test fixtures'''
import shutil
if IMAGE1_FILE not in os.listdir('.'):
shutil.copyfile(os.path.join(os.path.pardir,IMAGE1_FILE), IMAGE1_FILE)
testsavedocument()
def teardown_module():
'''Tear down test fixtures'''
if TEST_FILE in os.listdir('.'):
os.remove(TEST_FILE)
def simpledoc():
'''Make a docx (document, relationships) for use in other docx tests'''
doc = newdocx('Python docx testnewdocument','A short example of making docx from Python','Alan Brooks',['python','Office Open XML','Word'])
document = getdocument(doc)
relationships = getrelationshiplist(doc)
docbody = document.xpath('/w:document/w:body', namespaces=nsprefixes)[0]
docbody.append(heading('Heading 1',1) )
docbody.append(heading('Heading 2',2))
docbody.append(paragraph('Paragraph 1'))
for point in ['List Item 1','List Item 2','List Item 3']:
docbody.append(paragraph(point,style='ListNumber'))
docbody.append(pagebreak(type='page'))
docbody.append(paragraph('Paragraph 2'))
docbody.append(table([['A1','A2','A3'],['B1','B2','B3'],['C1','C2','C3']]))
docbody.append(pagebreak(type='section', orient='portrait'))
relationships,picpara = picture(relationships,IMAGE1_FILE,'This is a test description')
docbody.append(picpara)
docbody.append(pagebreak(type='section', orient='landscape'))
docbody.append(paragraph('Paragraph 3'))
doc['word/document.xml'] = document
doc['word/_rels/document.xml.rels'] = wordrelationships(relationships)
return doc
# --- Test Functions ---
def testsearchandreplace():
'''Ensure search and replace functions work'''
doc = simpledoc()
document = getdocument(doc)
docbody = getdocbody(document)
assert search(docbody, 'ing 1')
assert search(docbody, 'ing 2')
assert search(docbody, 'graph 3')
assert search(docbody, 'ist Item')
assert search(docbody, 'A1')
if search(docbody, 'Paragraph 2'):
docbody = replace(docbody,'Paragraph 2','Whacko 55')
assert search(docbody, 'Whacko 55')
def testtextextraction():
'''Ensure text can be pulled out of a document'''
document = opendocx(TEST_FILE)
paratextlist = getdocumenttext(document)
assert len(paratextlist) > 0
def testunsupportedpagebreak():
'''Ensure unsupported page break types are trapped'''
document = newdocument()
docbody = document.xpath('/w:document/w:body', namespaces=nsprefixes)[0]
try:
docbody.append(pagebreak(type='unsup'))
except ValueError:
return # passed
assert False # failed
def testsavedocument():
'''Tests a new document can be saved'''
document = simpledoc()
savedocx(document, TEST_FILE)
def testgetdocument():
'''Ensure an etree element is returned'''
doc = opendocx(TEST_FILE)
document = getdocument(doc)
if isinstance(document,lxml.etree._Element):
pass
else:
assert False
def testmakeelement():
'''Ensure custom elements get created'''
testelement = makeelement('testname',attributes={'testattribute':'testvalue'},tagtext='testtagtext')
assert testelement.tag == '{http://schemas.openxmlformats.org/wordprocessingml/2006/main}testname'
assert testelement.attrib == {'{http://schemas.openxmlformats.org/wordprocessingml/2006/main}testattribute': 'testvalue'}
assert testelement.text == 'testtagtext'
def testparagraph():
'''Ensure paragraph creates p elements'''
testpara = paragraph('paratext',style='BodyText')
assert testpara.tag == '{http://schemas.openxmlformats.org/wordprocessingml/2006/main}p'
pass
def testtable():
'''Ensure tables make sense'''
testtable = table([['A1','A2'],['B1','B2'],['C1','C2']])
ns = '{http://schemas.openxmlformats.org/wordprocessingml/2006/main}'
assert testtable.xpath('/ns0:tbl/ns0:tr[2]/ns0:tc[2]/ns0:p/ns0:r/ns0:t',namespaces={'ns0':'http://schemas.openxmlformats.org/wordprocessingml/2006/main'})[0].text == 'B2'
if __name__=='__main__':
import nose
nose.main()
|
tomchipchase/python-docx
|
tests/test_docx.py
|
Python
|
mit
| 4,253
|
# Generated by Django 2.2.19 on 2021-04-28 09:12
from django.db import migrations, models
import image_cropping.fields
class Migration(migrations.Migration):
dependencies = [
('markets', '0065_auto_20190426_1255'),
]
operations = [
migrations.AlterField(
model_name="logo",
name="cropping",
field=image_cropping.fields.ImageRatioField(
"image",
"400x302",
adapt_rotation=False,
allow_fullsize=False,
free_crop=False,
help_text="Use cropping tool to cut the image to the right format. Always leave enough white space around the edges and try to keep the largest possible size for good image quality.", # noqa
hide_image_field=False,
size_warning=False,
verbose_name="cropping",
),
),
migrations.AlterField(
model_name="logo",
name="image",
field=models.ImageField(
help_text="After choosing an image to upload click 'Save' to access the 'Cropping' tool and edit the image", # noqa
null=True,
upload_to="",
),
),
]
|
uktrade/navigator
|
app/markets/migrations/0066_auto_20210428_0912.py
|
Python
|
mit
| 1,397
|
#encoding:utf-8
subreddit = 'technology'
t_channel = '@r_technology'
def send_post(submission, r2t):
return r2t.send_simple(submission)
|
Fillll/reddit2telegram
|
reddit2telegram/channels/~inactive/r_technology/app.py
|
Python
|
mit
| 143
|
#
# Copyright (C) 2021 Satoru SATOH <satoru.satoh@gmail.com>
# License: MIT
#
# pylint: disable=missing-docstring, relative-beyond-top-level
from ..multi_load import test_multi_types as multi
from ..single_load import test_multi_types as single
from . import common
class SingleTestCase(common.SingleBase, single.TestCase):
pass
class MultiTestCase(common.MultiBase, multi.TestCase):
pass
# vim:sw=4:ts=4:et:
|
ssato/python-anyconfig
|
tests/api/load/test_multi_types.py
|
Python
|
mit
| 423
|
# -*- coding: utf-8 -*-
import os
import oss2
from oss2.models import (ConditionInlcudeHeader,
Condition,
Redirect,
RedirectMirrorHeaders,
MirrorHeadersSet,
RoutingRule,
BucketWebsite,
REDIRECT_TYPE_MIRROR,
REDIRECT_TYPE_EXTERNAL,
REDIRECT_TYPE_ALICDN,
REDIRECT_TYPE_INTERNAL)
# 以下代码展示了设置静态网站托管的相关操作
# 首先初始化AccessKeyId、AccessKeySecret、Endpoint等信息。
# 通过环境变量获取,或者把诸如“<你的AccessKeyId>”替换成真实的AccessKeyId等。
access_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID', '<你的AccessKeyId>')
access_key_secret = os.getenv('OSS_TEST_ACCESS_KEY_SECRET', '<你的AccessKeySecret>')
bucket_name = os.getenv('OSS_TEST_BUCKET', '<你的Bucket>')
endpoint = os.getenv('OSS_TEST_ENDPOINT', '<你的访问域名>')
# 确认上面的参数都填写正确了
for param in (access_key_id, access_key_secret, bucket_name, endpoint):
assert '<' not in param, '请设置参数:' + param
# 创建Bucket对象,所有Object相关的接口都可以通过Bucket对象来进行
bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name)
index_file = 'index.html'
error_file = 'error.html'
# 以下代码展示只设置主页与404页面的静态网站托管
bucket.put_bucket_website(BucketWebsite(index_file, error_file))
# 获取website配置
result = bucket.get_bucket_website()
print('get_bucket_website without redirect:')
print('result index_file:', result.index_file)
print('result error_file:', result.error_file)
bucket.delete_bucket_website()
# 以下代码展示镜像回源的网站托管配置,采用主备模式或者多站点模式
# 设置匹配规则
include_header1= ConditionInlcudeHeader('host', 'test.oss-cn-beijing-internal.aliyuncs.com')
include_header2 = ConditionInlcudeHeader('host', 'test.oss-cn-shenzhen-internal.aliyuncs.com')
condition1 = Condition(key_prefix_equals='key1',
http_err_code_return_equals=404, include_header_list=[include_header1, include_header2])
condition2 = Condition(key_prefix_equals='key2',
http_err_code_return_equals=404, include_header_list=[include_header1, include_header2])
# 设置跳转规则,
mirror_headers_set_1 = MirrorHeadersSet("myheader-key5","myheader-value5")
mirror_headers_set_2 = MirrorHeadersSet("myheader-key6","myheader-value6")
set_list = [mirror_headers_set_1, mirror_headers_set_2]
pass_list = ['myheader-key1', 'myheader-key2']
remove_list = ['myheader-key3', 'myheader-key4']
mirror_header = RedirectMirrorHeaders(pass_all=True, pass_list=pass_list, remove_list=remove_list, set_list=set_list)
# 使用主备源站模式, 使用mirror_url_slave,mirror_url_probe参数
redirect1 = Redirect(redirect_type=REDIRECT_TYPE_MIRROR, pass_query_string=False, mirror_url='http://www.test.com/',
mirror_url_slave='http://www.slave.com/', mirror_url_probe='http://www.test.com/index.html', mirror_pass_query_string=False,
mirror_follow_redirect=True, mirror_check_md5=True, mirror_headers=mirror_header)
# 不指定备站
redirect2 = Redirect(redirect_type=REDIRECT_TYPE_MIRROR, mirror_url='http://www.test.com/',
mirror_pass_query_string=True, mirror_follow_redirect=True, mirror_check_md5=False)
# 可以设置一条或多条,本示例展示设置多条
rule1 = RoutingRule(rule_num=1, condition=condition1, redirect=redirect1)
rule2 = RoutingRule(rule_num=2, condition=condition2, redirect=redirect2)
website_set = BucketWebsite(index_file, error_file, [rule1, rule2])
bucket.put_bucket_website(website_set)
# 获取website配置
website_get = bucket.get_bucket_website()
print('get_bucket_website mirror type:')
print('indext_file:', website_get.index_file)
print('error_file:', website_get.error_file)
print('rule sum:', len(website_get.rules))
bucket.delete_bucket_website()
# 以下代码展示阿里云CDN跳转以及外部跳转或者内部跳转的设置
include_header1= ConditionInlcudeHeader('host', 'test.oss-cn-beijing-internal.aliyuncs.com')
include_header2 = ConditionInlcudeHeader('host', 'test.oss-cn-shenzhen-internal.aliyuncs.com')
condition1 = Condition(key_prefix_equals='key3',
http_err_code_return_equals=404, include_header_list=[include_header1, include_header2])
condition2 = Condition(key_prefix_equals='key4',
http_err_code_return_equals=404, include_header_list=[include_header1, include_header2])
condition3 = Condition(key_prefix_equals='key5',
http_err_code_return_equals=404, include_header_list=[include_header1, include_header2])
# AliCDN
redirect1 = Redirect(redirect_type=REDIRECT_TYPE_ALICDN, pass_query_string=True,
replace_key_with='${key}.suffix', proto='http', http_redirect_code=302)
# External
redirect2 = Redirect(redirect_type=REDIRECT_TYPE_EXTERNAL, pass_query_string=False, replace_key_prefix_with='abc',
proto='https', host_name='oss.aliyuncs.com', http_redirect_code=302)
# Internal
redirect3 = Redirect(redirect_type=REDIRECT_TYPE_INTERNAL, pass_query_string=False, replace_key_with='${key}.suffix')
# 可以设置一条或多条规则,本示例展示设置多条
rule1 = RoutingRule(rule_num=1, condition=condition1, redirect=redirect1)
rule2 = RoutingRule(rule_num=2, condition=condition2, redirect=redirect2)
rule3 = RoutingRule(rule_num=3, condition=condition3, redirect=redirect3)
website_set = BucketWebsite(index_file, error_file, [rule1, rule2, rule3])
bucket.put_bucket_website(website_set)
# 获取website配置
website_get = bucket.get_bucket_website()
print('get_bucket_website other type:')
print('indext_file:', website_get.index_file)
print('error_file:', website_get.error_file)
print('rule sum:', len(website_get.rules))
for rule in website_get.rules:
print('rule_num:{}, redirect_type:{}'.format(rule.rule_num, rule.redirect.redirect_type))
bucket.delete_bucket_website()
|
aliyun/aliyun-oss-python-sdk
|
examples/bucket_website.py
|
Python
|
mit
| 6,202
|
# flake8: noqa
from .config import INFER_HOST
from .main import run_app, runserver, serve_static
|
samuelcolvin/aiohttp-devtools
|
aiohttp_devtools/runserver/__init__.py
|
Python
|
mit
| 97
|
from textwrap import dedent
import inspect
from collections import OrderedDict
from clusterjob import JobScript
import pytest
import logging
try:
from ConfigParser import Error as ConfigParserError
except ImportError:
from configparser import Error as ConfigParserError
# built-in fixtures: tmpdir
# pytest-capturelog fixutres: caplog
def get_methods(obj):
"""Get list of methods of object or class"""
return sorted([k for (k, v) in inspect.getmembers(obj, inspect.isroutine)])
# isroutine works in Python 2 and Python 3, while ismethod does not work in
# Python 3 if obj is a class (since the methods are not *bound*)
def get_attributes(obj, hidden=False):
"""Get list of attributes of object"""
methods = get_methods(obj)
attribs = sorted([k for k in obj.__dict__ if k not in methods])
if hidden:
return attribs
else:
return [attr for attr in attribs if not attr.startswith('_')]
def default_class_attr_val(attr):
"""Return the default value for the given class attribute"""
defaults = JobScript._attributes.copy()
defaults.update(JobScript._protected_attributes)
try:
return defaults[attr]
except KeyError:
if attr == 'resources':
return OrderedDict()
else:
return None
def check_attributes(obj, expected):
for key in expected:
assert getattr(obj, key) == expected[key]
def check_resources(obj, expected):
for key in expected:
assert obj.resources[key] == expected[key]
def example_inidata():
inidata = dedent(r'''
[Attributes]
remote = login.cluster.edu
backend = pbs
shell = /bin/sh
cache_folder = cache
prologue =
ssh {remote} 'mkdir -p {rootdir}/{workdir}'
rsync -av {workdir}/ {remote}:{rootdir}/{workdir}
epilogue = rsync -av {remote}:{rootdir}/{workdir}/ {workdir}
rootdir = ~/jobs/
workdir = run001
max_sleep_interval = 60
# the following is a new attribute
text = Hello World
[Resources]
queue = exec
nodes = 1
threads = 12
mem = 10000
''')
expected_attribs = {
'remote': 'login.cluster.edu',
'backend': 'pbs',
'shell': '/bin/sh',
'prologue' : "ssh {remote} 'mkdir -p {rootdir}/{workdir}'\n"
"rsync -av {workdir}/ {remote}:{rootdir}/{workdir}",
'epilogue': "rsync -av {remote}:{rootdir}/{workdir}/ {workdir}",
'rootdir': '~/jobs',
'workdir': 'run001',
'max_sleep_interval': 60,
'text': "Hello World"
}
expected_resources = {
'queue': 'exec',
'nodes': 1,
'threads': 12,
'mem': 10000,
}
return inidata, expected_attribs, expected_resources
def test_read_inifile(tmpdir):
p = tmpdir.join("default.ini")
ini_filename = str(p)
attribs = {}
resources = {}
def attr_setter(k,v):
attribs[k] = v
def rsrc_setter(k,v):
resources[k] = v
inidata = ''
p.write(inidata)
with pytest.raises(ConfigParserError) as exc_info:
JobScript._read_inifile(ini_filename, attr_setter, rsrc_setter)
assert "must contain at least one of the sections" in str(exc_info.value)
inidata = dedent(r'''
max_sleep_interval = 60
''')
p.write(inidata)
with pytest.raises(ConfigParserError) as exc_info:
JobScript._read_inifile(ini_filename, attr_setter, rsrc_setter)
assert "File contains no section headers" in str(exc_info.value)
inidata = dedent(r'''
[Attributes]
max_sleep_interval = 60
''')
p.write(inidata)
JobScript._read_inifile(ini_filename, attr_setter, rsrc_setter)
assert attribs['max_sleep_interval'] == 60
inidata = dedent(r'''
[Resources]
threads = 2
''')
p.write(inidata)
JobScript._read_inifile(ini_filename, attr_setter, rsrc_setter)
assert attribs['max_sleep_interval'] == 60
assert resources['threads'] == 2
inidata = dedent(r'''
[Attributes]
shell = /bin/bash
[Resources]
nodes = 1
''')
p.write(inidata)
JobScript._read_inifile(ini_filename, attr_setter, rsrc_setter)
assert attribs['max_sleep_interval'] == 60
assert attribs['shell'] == '/bin/bash'
assert resources['threads'] == 2
assert resources['nodes'] == 1
# both section headers and keys are case sensitive
inidata = dedent(r'''
[Attributes]
Max_Sleep_Interval = 120
Shell = /bin/bash
[Resources]
Nodes = 1
''')
p.write(inidata)
JobScript._read_inifile(ini_filename, attr_setter, rsrc_setter)
assert attribs['max_sleep_interval'] == 60
assert attribs['Max_Sleep_Interval'] == '120' # no conversion to int!
inidata = dedent(r'''
[Attributes]
shell = /bin/bash
[Resources]
nodes = 1
[Schedulers]
cluster = login.cluster.com
''')
p.write(inidata)
with pytest.raises(ConfigParserError) as exc_info:
JobScript._read_inifile(ini_filename, attr_setter, rsrc_setter)
assert "Invalid section 'Schedulers'" in str(exc_info.value)
inidata = dedent(r'''
[Attributes]
resources = {1:2}
''')
p.write(inidata)
with pytest.raises(ConfigParserError) as exc_info:
JobScript._read_inifile(ini_filename, attr_setter, rsrc_setter)
assert "not allowed" in str(exc_info.value)
# quotes are not stripped out!
inidata = dedent(r'''
[Attributes]
text = "This is a text"
''')
p.write(inidata)
JobScript._read_inifile(ini_filename, attr_setter, rsrc_setter)
assert attribs['text'] == '"This is a text"'
def test_read_defaults(caplog, tmpdir):
JobScript.read_defaults() # reset
caplog.setLevel(logging.DEBUG, logger='clusterjob')
jobscript = JobScript(body="echo 'Hello'", jobname="test")
assert get_attributes(jobscript) == ['aux_scripts', 'body', 'resources']
assert get_attributes(jobscript.__class__) == ['backend', 'backends',
'cache_folder', 'cache_prefix', 'epilogue', 'filename',
'max_sleep_interval', 'prologue', 'remote', 'resources', 'rootdir',
'scp', 'shell', 'ssh', 'workdir']
for attr in get_attributes(jobscript.__class__):
if attr not in ['resources', 'backends']:
assert getattr(jobscript, attr) == default_class_attr_val(attr)
inidata, expected_attribs, expected_resources = example_inidata()
p = tmpdir.join("default.ini")
p.write(inidata)
ini_filename = str(p)
# Setting class defaults before instantiation sets both the attributes and
# the resources
JobScript.read_defaults(ini_filename)
jobscript = JobScript(body="echo '{text}'", jobname="test")
assert get_attributes(jobscript) == ['aux_scripts', 'body', 'resources']
check_attributes(jobscript, expected_attribs)
check_resources(jobscript, expected_resources)
assert str(jobscript) == dedent(r'''
#!/bin/sh
#PBS -l nodes=1:ppn=12
#PBS -q exec
#PBS -l mem=10000m
#PBS -N test
echo 'Hello World'
''').strip()
# calling read_defaults without filename argument resets the class, and
# thus also changes the attributes of an existing instance
JobScript.read_defaults()
check_resources(jobscript, expected_resources)
for attr in get_attributes(jobscript.__class__):
if attr not in ['resources', 'backends']:
assert getattr(jobscript, attr) == default_class_attr_val(attr)
with pytest.raises(KeyError) as exc_info:
str(jobscript)
assert "no matching attribute or resource entry" in str(exc_info.value)
jobscript.text = 'Hello World' # instance attribute
assert str(jobscript) == dedent(r'''
#!/bin/bash
#SBATCH --partition=exec
#SBATCH --nodes=1
#SBATCH --cpus-per-task=12
#SBATCH --mem=10000
#SBATCH --job-name=test
echo 'Hello World'
''').strip()
# Setting class defaults after instantiation sets the attributes, but not
# the resources
jobscript = JobScript(body="echo '{text}'", jobname="test")
JobScript.read_defaults(ini_filename)
assert str(jobscript) == dedent(r'''
#!/bin/sh
#PBS -N test
echo 'Hello World'
''').strip()
def test_read_settings(caplog, tmpdir):
JobScript.read_defaults() # reset
caplog.setLevel(logging.DEBUG, logger='clusterjob')
jobscript = JobScript(body="echo '{text}'", jobname="test")
assert get_attributes(jobscript) == ['aux_scripts', 'body', 'resources']
jobscript2 = JobScript(body="echo 'Hello'", jobname="test2")
inidata, expected_attribs, expected_resources = example_inidata()
p = tmpdir.join("job.ini")
p.write(inidata)
ini_filename = str(p)
with pytest.raises(AttributeError) as excinfo:
jobscript.read_settings(ini_filename)
assert "'cache_folder' can only be set as a class attribute" \
in str(excinfo.value)
inidata = inidata.replace("cache_folder = cache\n", "")
p.write(inidata)
jobscript.read_settings(ini_filename)
assert get_attributes(jobscript) == ['aux_scripts', 'backend', 'body',
'epilogue', 'max_sleep_interval', 'prologue', 'remote',
'resources', 'rootdir', 'shell', 'text', 'workdir']
# class attributes remain unaffected
for attr in get_attributes(JobScript):
if attr not in ['resources', 'backends']:
assert getattr(JobScript, attr) == default_class_attr_val(attr)
assert str(jobscript) == dedent(r'''
#!/bin/sh
#PBS -l nodes=1:ppn=12
#PBS -N test
#PBS -q exec
#PBS -l mem=10000m
echo 'Hello World'
''').strip()
# the second jobscript is unaffected
assert str(jobscript2) == dedent(r'''
#!/bin/bash
#SBATCH --job-name=test2
echo 'Hello'
''').strip()
def test_read_invalid_attribute(caplog, tmpdir):
JobScript.read_defaults() # reset
caplog.setLevel(logging.DEBUG, logger='clusterjob')
jobscript = JobScript(body="echo '{text}'", jobname="test")
inidata = dedent(r'''
[Attributes]
_remote = login.cluster.edu
''')
p = tmpdir.join("job.ini")
p.write(inidata)
ini_filename = str(p)
with pytest.raises(ConfigParserError) as exc_info:
jobscript.read_settings(ini_filename)
assert "Key '_remote' is invalid" in str(exc_info.value)
inidata = dedent(r'''
[Attributes]
key with spaces = bla
''')
p = tmpdir.join("job.ini")
p.write(inidata)
ini_filename = str(p)
with pytest.raises(ConfigParserError) as exc_info:
jobscript.read_settings(ini_filename)
assert "Key 'key with spaces' is invalid" in str(exc_info.value)
|
goerz/clusterjob
|
tests/test_inifile.py
|
Python
|
mit
| 10,660
|
# MIT license
#
# Copyright (C) 2015 by XESS Corp.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import division
import pcbnew
import FootprintWizardBase
import PadArray as PA
from math import ceil, floor, sqrt
def calc_solderpaste_margin(w,h,fill):
'''Calculate how far in to pull the paste mask to get a certain fill percentage.'''
if fill > 0.99:
return 0
a = (h+w)/2.0
b = w*h*(fill-1.0)
c = sqrt(a**2+b)
return int((a-c)/2.0)
class XessFpWizardDrawingAids(FootprintWizardBase.FootprintWizardDrawingAids):
def Circle(self, x, y, r, filled=False):
"""
Draw a circle at (x,y) of radius r
If filled is true, the width and radius of the line will be set
such that the circle appears filled
"""
circle = pcbnew.EDGE_MODULE(self.module)
start = self.TransformPoint(x, y)
if filled:
circle.SetWidth(int(r))
end = self.TransformPoint(x, y + r/2)
else:
circle.SetWidth(self.dc['width'])
end = self.TransformPoint(x, y + r)
circle.SetLayer(self.dc['layer'])
circle.SetShape(pcbnew.S_CIRCLE)
circle.SetStartEnd(start, end)
self.module.Add(circle)
class XessFpWizard(FootprintWizardBase.FootprintWizard):
def GetValue(self):
return "{}".format(self.parameters["Misc"][self.fp_name_key])
def GetReferencePrefix(self):
return "{}".format(self.parameters["Misc"][self.fp_ref_key])
class XessPeriphPckgWizard(XessFpWizard):
def GetName(self):
return "Edge-Pin Chips"
def GetDescription(self):
return "SOICs, TSSOPs, QFPs, etc."
n_pads_per_col_key = '#Pads (Vertical)'
n_pads_per_row_key = '#Pads (Horizontal)'
total_width_key = 'Total Width (D)'
total_height_key = 'Total Height (E)'
body_width_key = 'Body Width (D1)'
body_height_key = 'Body Height (E1)'
col_to_col_pitch_key = 'Left-to-Right Column Pitch'
row_to_row_pitch_key = 'Top-to-Bottom Row Pitch'
pad_pitch_key = 'Pitch (e)'
pad_width_key = 'Width (b)'
pad_length_key = 'Length (L)'
pad_extension_key = 'Extension'
pad_oval_key = 'Oval (Y) / Rectangular (N)'
pad_smd_key = 'SMD (Y) / Through-Hole (N)'
pad_drill_key = 'Drill Size'
pad_soldermask_margin_key = 'Soldermask Margin'
pad_paste_fill_key = 'Paste Fill (%)'
fp_name_key = 'Footprint Name'
fp_ref_key = 'Reference Prefix'
land_dim_key = 'Land Pattern (Y) / Mechanical (N)'
outline_key = 'Silkscreen Outline (%)'
bevel_key = 'Bevel (%)'
add_index_key = 'Add index (Y/N)'
paddle_enable_key = 'Thermal Pad (Y/N)'
paddle_width_key = 'Width'
paddle_height_key = 'Height'
paddle_orgx_key = 'Center (X)'
paddle_orgy_key = 'Center (Y)'
paddle_soldermask_margin_key = 'Soldermask Margin'
paddle_paste_fill_key = 'Paste Fill (%)'
def GenerateParameterList(self):
self.AddParam("Package", self.n_pads_per_row_key, self.uInteger, 11)
self.AddParam("Package", self.n_pads_per_col_key, self.uInteger, 11)
self.AddParam("Package", self.total_width_key, self.uMM, 12)
self.AddParam("Package", self.total_height_key, self.uMM, 12)
self.AddParam("Package", self.body_width_key, self.uMM, 10)
self.AddParam("Package", self.body_height_key, self.uMM, 10)
self.AddParam("Package", self.col_to_col_pitch_key, self.uMM, 11.25)
self.AddParam("Package", self.row_to_row_pitch_key, self.uMM, 11.25)
self.AddParam("Pad", self.pad_smd_key, self.uBool, True)
self.AddParam("Pad", self.pad_oval_key, self.uBool, False)
self.AddParam("Pad", self.pad_pitch_key, self.uMM, 0.8)
self.AddParam("Pad", self.pad_width_key, self.uMM, 0.45)
self.AddParam("Pad", self.pad_length_key, self.uMM, 0.75)
self.AddParam("Pad", self.pad_extension_key, self.uMM, 0.5)
self.AddParam("Pad", self.pad_soldermask_margin_key, self.uMM, 0)
self.AddParam("Pad", self.pad_paste_fill_key, self.uInteger, 100)
self.AddParam("Pad", self.pad_drill_key, self.uMM, 1)
self.AddParam("Paddle", self.paddle_enable_key, self.uBool, False)
self.AddParam("Paddle", self.paddle_width_key, self.uMM, 0.0)
self.AddParam("Paddle", self.paddle_height_key, self.uMM, 0.0)
self.AddParam("Paddle", self.paddle_orgx_key, self.uMM, 0.0)
self.AddParam("Paddle", self.paddle_orgy_key, self.uMM, 0.0)
self.AddParam("Paddle", self.paddle_soldermask_margin_key, self.uMM, 0)
self.AddParam("Paddle", self.paddle_paste_fill_key, self.uInteger, 70)
self.AddParam("Misc", self.fp_name_key, self.uString, 'Footprint Name')
self.AddParam("Misc", self.fp_ref_key, self.uString, 'U')
self.AddParam("Misc", self.land_dim_key, self.uBool, False)
self.AddParam("Misc", self.outline_key, self.uInteger, 0)
self.AddParam("Misc", self.bevel_key, self.uInteger, 20)
self.AddParam("Misc", self.add_index_key, self.uBool, False)
def CheckParameters(self):
# self.CheckParamInt("Pad", '*'+self.n_pads_per_row_key)
# self.CheckParamInt("Pad", '*'+self.n_pads_per_col_key)
self.CheckParam("Pad", self.pad_oval_key)
self.CheckParam("Pad", self.pad_smd_key)
self.CheckParam("Paddle", self.paddle_enable_key)
self.CheckParam("Misc", self.land_dim_key)
self.CheckParam("Misc", self.add_index_key)
def BuildThisFootprint(self):
self.draw = XessFpWizardDrawingAids(self.module)
misc = self.parameters["Misc"]
pads = self.parameters["Pad"]
pckg = self.parameters["Package"]
paddle = self.parameters["Paddle"]
# Footprints can be specified using land patterns or the IC mechanical dimensions.
land_dim = misc[self.land_dim_key]
outline = misc[self.outline_key] / 100.0
bevel = misc[self.bevel_key] / 100.0
add_index = misc[self.add_index_key]
pad_pitch = pads[self.pad_pitch_key]
pad_width = pads[self.pad_width_key]
pad_length = pads[self.pad_length_key]
pad_extension = pads[self.pad_extension_key]
pad_soldermask_margin = pads[self.pad_soldermask_margin_key]
pad_paste_fill = pads[self.pad_paste_fill_key] / 100.0
pad_shape = pcbnew.PAD_SHAPE_OVAL if pads[self.pad_oval_key] else pcbnew.PAD_SHAPE_RECT
pad_smd = pads[self.pad_smd_key]
pad_drill = pads[self.pad_drill_key]
n_pads_per_row = int(pckg[self.n_pads_per_row_key])
n_pads_per_col = int(pckg[self.n_pads_per_col_key])
# IC epoxy package dimensions.
body_width = pckg[self.body_width_key]
body_height = pckg[self.body_height_key]
# Mechanical dimensions from side-to-side pin-tip to pin-tip.
total_width = pckg[self.total_width_key]
total_height = pckg[self.total_height_key]
if pad_smd is False:
# For through-hole pins, the pins go through the center of the pad.
# So add the pad length to the pin tip-to-tip distance to get the
# pad tip-to-tip distance.
total_width += pad_length
total_height += pad_length
# Land pattern dimensions.
col_to_col_pitch = pckg[self.col_to_col_pitch_key]
row_to_row_pitch = pckg[self.row_to_row_pitch_key]
paddle_enable = paddle[self.paddle_enable_key]
paddle_width = paddle[self.paddle_width_key]
paddle_height = paddle[self.paddle_height_key]
paddle_orgx = paddle[self.paddle_orgx_key]
paddle_orgy = paddle[self.paddle_orgy_key]
paddle_soldermask_margin = paddle[self.paddle_soldermask_margin_key]
paddle_paste_fill = paddle[self.paddle_paste_fill_key] / 100.0
if land_dim: # For footprint land dimensions.
pitch_adjustment = 0
row_to_row_pitch += pitch_adjustment
col_to_col_pitch += pitch_adjustment
else: # For footprint mechanical dimensions.
pitch_adjustment = - pad_length
row_to_row_pitch = total_height + pitch_adjustment
col_to_col_pitch = total_width + pitch_adjustment
if pad_smd is True:
h_pad = PA.PadMaker(self.module).SMDPad(pad_width, pad_length + pad_extension, shape=pad_shape)
v_pad = PA.PadMaker(self.module).SMDPad(pad_length + pad_extension, pad_width, shape=pad_shape)
else:
h_pad = PA.PadMaker(self.module).THPad(pad_width, pad_length + pad_extension, pad_drill, shape=pad_shape)
v_pad = PA.PadMaker(self.module).THPad(pad_length + pad_extension, pad_width, pad_drill, shape=pad_shape)
h_pad.SetLocalSolderMaskMargin(pad_soldermask_margin)
v_pad.SetLocalSolderMaskMargin(pad_soldermask_margin)
m = calc_solderpaste_margin(pad_width, pad_length + pad_extension, pad_paste_fill)
h_pad.SetLocalSolderPasteMargin(m)
v_pad.SetLocalSolderPasteMargin(m)
# left column
if n_pads_per_col != 0:
pin1Pos = pcbnew.wxPoint(-col_to_col_pitch / 2.0, 0)
offset = pcbnew.wxPoint(-pad_extension/2.0, 0)
h_pad.SetOffset(offset)
array = PA.PadLineArray(h_pad, n_pads_per_col, pad_pitch, True, pin1Pos)
array.SetFirstPadInArray(1)
array.AddPadsToModule(self.draw)
# bottom row
if n_pads_per_row != 0:
pin1Pos = pcbnew.wxPoint(0, row_to_row_pitch / 2.0)
offset = pcbnew.wxPoint(0, pad_extension/2.0)
v_pad.SetOffset(offset)
array = PA.PadLineArray(v_pad, n_pads_per_row, pad_pitch, False, pin1Pos)
array.SetFirstPadInArray(n_pads_per_col + 1)
array.AddPadsToModule(self.draw)
# right column
if n_pads_per_col != 0:
pin1Pos = pcbnew.wxPoint(col_to_col_pitch / 2.0, 0)
offset = pcbnew.wxPoint(pad_extension/2.0, 0)
h_pad.SetOffset(offset)
array = PA.PadLineArray(h_pad, n_pads_per_col, -pad_pitch, True, pin1Pos)
array.SetFirstPadInArray(n_pads_per_col + n_pads_per_row + 1)
array.AddPadsToModule(self.draw)
# top row
if n_pads_per_row != 0:
pin1Pos = pcbnew.wxPoint(0, -row_to_row_pitch / 2.0)
offset = pcbnew.wxPoint(0, -pad_extension/2.0)
v_pad.SetOffset(offset)
array = PA.PadLineArray(v_pad, n_pads_per_row, -pad_pitch, False, pin1Pos)
array.SetFirstPadInArray(2 * n_pads_per_col + n_pads_per_row + 1)
array.AddPadsToModule(self.draw)
# Thermal paddle.
if paddle_enable is True:
t_pad = PA.PadMaker(self.module).SMDPad(paddle_width, paddle_height, shape=pcbnew.PAD_SHAPE_RECT)
t_pad_pos = pcbnew.wxPoint(paddle_orgx, paddle_orgy)
t_pad.SetLocalSolderMaskMargin(paddle_soldermask_margin)
m = calc_solderpaste_margin(paddle_width, paddle_height, paddle_paste_fill)
t_pad.SetLocalSolderPasteMargin(m)
array = PA.PadLineArray(t_pad, 1, 0, False, t_pad_pos)
array.SetFirstPadInArray(2*(n_pads_per_col+n_pads_per_row)+1)
array.AddPadsToModule(self.draw)
if n_pads_per_row == 0:
row_to_row_pitch = body_height - pad_length - pad_extension
outline_height = body_height
else:
outline_height = row_to_row_pitch - pad_length + 2 * (pad_length + pad_extension) * outline
if n_pads_per_col == 0:
outline_width = body_width
else:
outline_width = col_to_col_pitch - pad_length + 2 * (pad_length + pad_extension) * outline
# Silkscreen outline
h = outline_height / 2.0
w = outline_width / 2.0
b = min(outline_height * bevel, outline_width * bevel)
self.draw.Polyline([(-w, -h + b), (-w, h), (w, h), (w, -h),
(-w + b, -h), (-w, -h + b)])
# Add corner index.
if add_index is True:
offset = pad_pitch
self.draw.Circle(-w-offset, -h-offset, pad_pitch/2.0, filled=True)
# reference and value
h1 = (row_to_row_pitch + pad_length + pad_extension) / 2.0
h = max(h, h1)
text_size = pcbnew.FromMM(1.2) # IPC nominal
text_offset = h + text_size + pad_pitch/2.0
self.draw.Value(0, -text_offset, text_size)
self.draw.Reference(0, text_offset, text_size)
class XessBgaPckgWizard(XessFpWizard):
def GetName(self):
return "Area-Pin Chips"
def GetDescription(self):
return "Ball Grid Arrays"
n_pads_per_col_key = '#Rows (Vertical)'
n_pads_per_row_key = '#Cols (Horizontal)'
total_width_key = 'Width (D)'
total_height_key = 'Height (E)'
pad_row_pitch_key = 'Row Pitch (e)'
pad_col_pitch_key = 'Column Pitch (e)'
pad_width_key = 'Size (b)'
pad_soldermask_margin_key = 'Soldermask Margin'
pad_paste_fill_key = 'Paste Fill (%)'
fp_name_key = 'Footprint Name'
fp_ref_key = 'Reference Prefix'
outline_key = 'Silkscreen Outline (%)'
bevel_key = 'Bevel (%)'
add_index_key = 'Add index (Y/N)'
def GenerateParameterList(self):
self.AddParam("Package", self.n_pads_per_row_key, self.uInteger, 16)
self.AddParam("Package", self.n_pads_per_col_key, self.uInteger, 16)
self.AddParam("Package", self.total_width_key, self.uMM, 14)
self.AddParam("Package", self.total_height_key, self.uMM, 14)
self.AddParam("Pad", self.pad_row_pitch_key, self.uMM, 0.8)
self.AddParam("Pad", self.pad_col_pitch_key, self.uMM, 0.8)
self.AddParam("Pad", self.pad_width_key, self.uMM, 0.45)
self.AddParam("Pad", self.pad_soldermask_margin_key, self.uMM, 0)
self.AddParam("Pad", self.pad_paste_fill_key, self.uInteger, 100)
self.AddParam("Misc", self.fp_name_key, self.uString, 'Footprint Name')
self.AddParam("Misc", self.fp_ref_key, self.uString, 'U')
self.AddParam("Misc", self.outline_key, self.uInteger, 100)
self.AddParam("Misc", self.bevel_key, self.uInteger, 7)
self.AddParam("Misc", self.add_index_key, self.uBool, False)
def CheckParameters(self):
# self.CheckParamInt("Pad", '*'+self.n_pads_per_row_key)
# self.CheckParamInt("Pad", '*'+self.n_pads_per_col_key)
self.CheckParam("Misc", self.add_index_key)
def BuildThisFootprint(self):
self.draw = XessFpWizardDrawingAids(self.module)
pads = self.parameters["Pad"]
pckg = self.parameters["Package"]
misc = self.parameters["Misc"]
n_pads_per_row = int(pckg[self.n_pads_per_row_key])
n_pads_per_col = int(pckg[self.n_pads_per_col_key])
total_width = pckg[self.total_width_key]
total_height = pckg[self.total_height_key]
pad_row_pitch = pads[self.pad_row_pitch_key]
pad_col_pitch = pads[self.pad_col_pitch_key]
pad_width = pads[self.pad_width_key]
pad_soldermask_margin = pads[self.pad_soldermask_margin_key]
pad_paste_fill = pads[self.pad_paste_fill_key] / 100.0
pad_length = pad_width
pad_shape = pcbnew.PAD_SHAPE_CIRCLE
outline = misc[self.outline_key] / 100.0
bevel = misc[self.bevel_key] / 100.0
add_index = misc[self.add_index_key]
pad = PA.PadMaker(self.module).SMDPad(pad_width, pad_length, shape=pad_shape)
pad.SetLayerSet(pad.SMDMask())
pad.SetLocalSolderMaskMargin(pad_soldermask_margin)
m = int(floor(pad_width * (1.0 - sqrt(pad_paste_fill))))
pad.SetLocalSolderPasteMargin(m)
class BGAPadGridArray(PA.PadGridArray):
def NamingFunction(self, n_x, n_y):
return "%s%d" % (
self.AlphaNameFromNumber(n_y + 1, alphabet="ABCDEFGHJKLMNPRTUVWY"),
n_x + 1)
# Draw pads.
array = BGAPadGridArray(pad, n_pads_per_col, n_pads_per_row, pad_col_pitch, pad_row_pitch)
array.AddPadsToModule(self.draw)
# Draw outline.
h = total_height / 2.0 * outline
w = total_width / 2.0 * outline
b = min(total_height * bevel, total_width * bevel)
self.draw.Polyline([(-w, -h + b), (-w, h), (w, h), (w, -h),
(-w + b, -h), (-w, -h + b)])
# Add corner index.
if add_index is True:
self.draw.Circle(-w-pad_col_pitch, -h-pad_row_pitch, (pad_row_pitch+pad_col_pitch)/4.0, filled=True)
# Add reference and value.
text_size = pcbnew.FromMM(1.2) # IPC nominal
text_offset = h + text_size + pad_row_pitch / 2.0
self.draw.Value(0, -text_offset, text_size)
self.draw.Reference(0, text_offset, text_size)
XessPeriphPckgWizard().register()
XessBgaPckgWizard().register()
|
xesscorp/xess_fp_wizard
|
xess_fp_wizard.py
|
Python
|
mit
| 18,484
|
# ghostAgents.py
# --------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
from game import Agent
from game import Actions
from game import Directions
import random
from util import manhattanDistance
import util
class GhostAgent( Agent ):
def __init__( self, index ):
self.index = index
def getAction( self, state ):
dist = self.getDistribution(state)
if len(dist) == 0:
return Directions.STOP
else:
return util.chooseFromDistribution( dist )
def getDistribution(self, state):
"Returns a Counter encoding a distribution over actions from the provided state."
util.raiseNotDefined()
class WumpusGhost( GhostAgent ):
"A ghost that does not move, but smells!"
def __init__ ( self, index ):
self.index = index
def getAction( self, state ):
return Directions.STOP
def getDistribution(self, state):
util.raiseNotDefined()
class RandomGhost( GhostAgent ):
"A ghost that chooses a legal action uniformly at random."
def getDistribution( self, state ):
dist = util.Counter()
for a in state.getLegalActions( self.index ): dist[a] = 1.0
dist.normalize()
return dist
class DirectionalGhost( GhostAgent ):
"A ghost that prefers to rush Pacman, or flee when scared."
def __init__( self, index, prob_attack=0.8, prob_scaredFlee=0.8 ):
self.index = index
self.prob_attack = prob_attack
self.prob_scaredFlee = prob_scaredFlee
def getDistribution( self, state ):
# Read variables from state
ghostState = state.getGhostState( self.index )
legalActions = state.getLegalActions( self.index )
pos = state.getGhostPosition( self.index )
isScared = ghostState.scaredTimer > 0
speed = 1
if isScared: speed = 0.5
actionVectors = [Actions.directionToVector( a, speed ) for a in legalActions]
newPositions = [( pos[0]+a[0], pos[1]+a[1] ) for a in actionVectors]
pacmanPosition = state.getPacmanPosition()
# Select best actions given the state
distancesToPacman = [manhattanDistance( pos, pacmanPosition ) for pos in newPositions]
if isScared:
bestScore = max( distancesToPacman )
bestProb = self.prob_scaredFlee
else:
bestScore = min( distancesToPacman )
bestProb = self.prob_attack
bestActions = [action for action, distance in zip( legalActions, distancesToPacman ) if distance == bestScore]
# Construct distribution
dist = util.Counter()
for a in bestActions: dist[a] = bestProb / len(bestActions)
for a in legalActions: dist[a] += ( 1-bestProb ) / len(legalActions)
dist.normalize()
return dist
|
DominikDitoIvosevic/Uni
|
AI/lab2/ghostAgents.py
|
Python
|
mit
| 3,390
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import errno
import json
import logging
import threading
import time
import websocket
import parlai.chat_service.utils.logging as log_utils
SOCKET_TIMEOUT = 6
# Socket handler
class ChatServiceMessageSocket:
"""
ChatServiceMessageSocket is a wrapper around websocket to forward messages from the
remote server to the ChatServiceManager.
"""
def __init__(self, server_url, port, message_callback):
"""
server_url: url at which the server is to be run
port: port for the socket to operate on
message_callback: function to be called on incoming message objects (format: message_callback(self, data))
"""
self.server_url = server_url
self.port = port
self.message_callback = message_callback
self.ws = None
self.last_pong = None
self.alive = False
# initialize the state
self.listen_thread = None
# setup the socket
self.keep_running = True
self._setup_socket()
def _safe_send(self, data, force=False):
if not self.alive and not force:
# Try to wait a second to send a packet
timeout = 1
while timeout > 0 and not self.alive:
time.sleep(0.1)
timeout -= 0.1
if not self.alive:
# don't try to send a packet if we're still dead
return False
try:
self.ws.send(data)
except websocket.WebSocketConnectionClosedException:
# The channel died mid-send, wait for it to come back up
return False
return True
def _ensure_closed(self):
try:
self.ws.close()
except websocket.WebSocketConnectionClosedException:
pass
def _send_world_alive(self):
"""
Registers world with the passthrough server.
"""
self._safe_send(
json.dumps(
{
'type': 'world_alive',
'content': {'id': 'WORLD_ALIVE', 'sender_id': 'world'},
}
),
force=True,
)
def _setup_socket(self):
"""
Create socket handlers and registers the socket.
"""
def on_socket_open(*args):
log_utils.print_and_log(logging.DEBUG, 'Socket open: {}'.format(args))
self._send_world_alive()
def on_error(ws, error):
try:
if error.errno == errno.ECONNREFUSED:
self._ensure_closed()
self.use_socket = False
raise Exception("Socket refused connection, cancelling")
else:
log_utils.print_and_log(
logging.WARN, 'Socket logged error: {}'.format(repr(error))
)
except BaseException:
if type(error) is websocket.WebSocketConnectionClosedException:
return # Connection closed is noop
log_utils.print_and_log(
logging.WARN,
'Socket logged error: {} Restarting'.format(repr(error)),
)
self._ensure_closed()
def on_disconnect(*args):
"""
Disconnect event is a no-op for us, as the server reconnects automatically
on a retry.
"""
log_utils.print_and_log(
logging.INFO, 'World server disconnected: {}'.format(args)
)
self.alive = False
self._ensure_closed()
def on_message(*args):
"""
Incoming message handler for messages from the FB user.
"""
packet_dict = json.loads(args[1])
if packet_dict['type'] == 'conn_success':
self.alive = True
return # No action for successful connection
if packet_dict['type'] == 'pong':
self.last_pong = time.time()
return # No further action for pongs
message_data = packet_dict['content']
log_utils.print_and_log(
logging.DEBUG, 'Message data received: {}'.format(message_data)
)
for message_packet in message_data['entry']:
for message in message_packet['messaging']:
self.message_callback(message)
def run_socket(*args):
url_base_name = self.server_url.split('https://')[1]
while self.keep_running:
try:
sock_addr = "wss://{}/".format(url_base_name)
self.ws = websocket.WebSocketApp(
sock_addr,
on_message=on_message,
on_error=on_error,
on_close=on_disconnect,
)
self.ws.on_open = on_socket_open
self.ws.run_forever(ping_interval=1, ping_timeout=0.9)
except Exception as e:
log_utils.print_and_log(
logging.WARN,
'Socket error {}, attempting restart'.format(repr(e)),
)
time.sleep(0.2)
# Start listening thread
self.listen_thread = threading.Thread(
target=run_socket, name='Main-Socket-Thread'
)
self.listen_thread.daemon = True
self.listen_thread.start()
time.sleep(1.2)
while not self.alive:
try:
self._send_world_alive()
except Exception:
pass
time.sleep(0.8)
|
facebookresearch/ParlAI
|
parlai/chat_service/core/socket.py
|
Python
|
mit
| 5,922
|
from os import path
from .taskqueue import TaskQueueClient
__all__ = ['TaskQueueClient']
with open(path.join(path.dirname(__file__), 'version.txt')) as fp:
__version__ = fp.read().strip()
|
guokr/asynx
|
asynx/asynx/__init__.py
|
Python
|
mit
| 194
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('finance', '0003_auto_20140929_0130'),
]
operations = [
migrations.RemoveField(
model_name='extract',
name='provider',
),
]
|
junqueira/balance
|
finance/migrations/0004_remove_extract_provider.py
|
Python
|
mit
| 355
|
import re
from django.core.urlresolvers import reverse
def test_view_with_scss_file(client, precompiled):
"""
Test view that renders *SCSS file* that *imports SCSS file from another Django app*.
:param client: ``pytest-django`` fixture: Django test client
:param precompiled: custom fixture that asserts pre-compiled content
"""
response = client.get(reverse('scss-file'))
assert response.status_code == 200
assert precompiled('app/layout.scss', 'css').strip() == \
'.title {\n font: bold 30px Arial, sans-serif;\n}'
def test_view_with_inline_scss(client):
"""
Test view that renders *inline SCSS* that *imports SCSS file from another Django app*.
:param client: ``pytest-django`` fixture: Django test client
"""
response = client.get(reverse('scss-inline'))
assert response.status_code == 200
assert re.search(
r'<style type="text/css">.title \{\n\s*font: bold 30px Arial, sans-serif;\n\}\s*</style>',
response.content.decode('utf8')
)
def test_view_with_es6_file(client, precompiled):
"""
Test view that renders *ES6 file* into *ES5 file*.
:param client: ``pytest-django`` fixture: Django test client
:param precompiled: custom fixture that asserts pre-compiled content
"""
response = client.get(reverse('es6-file'))
assert response.status_code == 200
assert precompiled('app/scripts.js', 'js') == (
'(function e(t,n,r){function s(o,u){if(!n[o]){if(!t[o]){var a=typeof require=='
'"function"&&require;if(!u&&a)return a(o,!0);if(i)return i(o,!0);var f='
'new Error("Cannot find module \'"+o+"\'");throw f.code="MODULE_NOT_FOUND",f}'
'var l=n[o]={exports:{}};t[o][0].call(l.exports,function(e){var n=t[o][1][e];'
'return s(n?n:e)},l,l.exports,e,t,n,r)}return n[o].exports}var i=typeof require=='
'"function"&&require;for(var o=0;o<r.length;o++)s(r[o]);return s})({1:['
'function(require,module,exports){\n'
'\'use strict\';\n'
'\n'
'var _framework = require(\'base/framework\');\n'
'\n'
'var _framework2 = _interopRequireDefault(_framework);\n'
'\n'
'function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : '
'{ default: obj }; }\n'
'\n'
'new _framework2.default();\n'
'new _framework2.default(\'1.0.1\');\n'
'\n'
'},{"base/framework":2}],2:[function(require,module,exports){\n'
'\'use strict\';\n'
'\n'
'Object.defineProperty(exports, "__esModule", {\n'
' value: true\n'
'});\n'
'\n'
'function _classCallCheck(instance, Constructor) {'
' if (!(instance instanceof Constructor)) {'
' throw new TypeError("Cannot call a class as a function"); } }\n'
'\n'
'var version = exports.version = \'1.0\';\n'
'\n'
'var _class = function _class(customVersion) {\n'
' _classCallCheck(this, _class);\n'
'\n'
' console.log(\'Framework v\' + (customVersion || version) + \' initialized\');\n'
'};\n'
'\n'
'exports.default = _class;\n'
'\n'
'},{}]},{},[1]);\n'
)
def test_view_with_inline_es6(client):
"""
Test view that renders *inline ES6* into *inline ES5*.
:param client: ``pytest-django`` fixture: Django test client
"""
response = client.get(reverse('es6-inline'))
assert response.status_code == 200
assert b'"use strict";\n' \
b'\n' \
b'var square = function square(x) {\n' \
b' return x * x;\n' \
b'};\n'\
b'console.log("Square of 2:", square(2));' in response.content
|
kottenator/django-compressor-toolkit
|
tests/integration_tests/test_views.py
|
Python
|
mit
| 3,738
|
# -*- coding: utf-8 -*-
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('studygroups', '0004_studygroupsignup_mobile'),
]
operations = [
migrations.AddField(
model_name='course',
name='image',
field=models.CharField(default=' ', max_length=32),
preserve_default=False,
),
]
|
p2pu/learning-circles
|
studygroups/migrations/0005_course_image.py
|
Python
|
mit
| 418
|
from django.core.management.base import BaseCommand
from django_brfied.models import UnidadeFederativa, Municipio
from ...migrations import UNIDADE_FEDERATIVA_ROWS, MUNICIPIO_ROWS
class Command(BaseCommand):
help = "Importa as UFs e os Municípios para a base"
# requires_system_checks = False
# def __init__(self, *args, **kwargs):
# super().__init__(*args, **kwargs)
# self.ignore_patterns = []
#
# def add_arguments(self, parser):
# parser.add_argument(
# '--no-default-ignore', action='store_false', dest='use_default_ignore_patterns',
# help="Don't ignore the common private glob-style patterns (defaults to 'CVS', '.*' and '*~').",
# )
#
# def set_options(self, **options):
# """
# Set instance variables based on an options dict
# """
# if options['use_default_ignore_patterns']:
# ignore_patterns += apps.get_app_config('staticfiles').ignore_patterns
def handle(self, **options):
print('Importando UF')
for uf in UNIDADE_FEDERATIVA_ROWS:
UnidadeFederativa.objects.\
update_or_create(sigla=uf[0], defaults={'nome': uf[1], 'codigo': uf[2], 'regiao': uf[3]})
print('UF importadas\n')
print('Importando municípios')
i = 1
q = len(MUNICIPIO_ROWS)
for m in MUNICIPIO_ROWS:
if i%500 == 0:
print('\tImportados %3.2f%%' % ((i / q) * 100))
Municipio.objects.update_or_create(codigo=m[0], defaults={'nome': m[1], 'uf_id': m[2]})
i += 1
print('Municípios importados')
|
kelsoncm/django_brfied
|
django_brfied/management/commands/importar_uf_municipio.py
|
Python
|
mit
| 1,647
|
"""
Author: Sam Ginzburg
Description: This script reads in a blast2go sequence table output of GO Term mappings, and calculates frequencies of GO Terms at specific GO Levels
Example run:
python generate_pie_charts.py [blast2go_file.txt] [GO Level]
"""
import sys
from GeneOntologyLibrary import obo_parser
from GeneOntologyLibrary import go_term as gt
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
def parse_go_terms_by_go(go_counts, go, go_type, term_name):
if go_type == "molecular function":
if go_counts[0].get(go) is None:
go_counts[0][go] = 1
else:
go_counts[0][go] += 1
if go_type == "biological process":
if go_counts[1].get(go) is None:
go_counts[1][go] = 1
else:
go_counts[1][go] += 1
if go_type == "cellular component":
if go_counts[2].get(go) is None:
go_counts[2][go] = 1
else:
go_counts[2][go] += 1
def parse_go_mappped_file(go_counts, string):
#print (string)
if ";" in string:
string = string.split(";") # splits the column by ;
else:
string = [string]
#print("splitstring: " + str(split_string))
return_list = list()
for go_term in string:
go_term = go_term.strip()
if go_term == "-":
continue
if "P:" in go_term or "Biological Process:" in go_term:
go_term = go_term[2:]
if go_counts[0].get(go_term) is None:
go_counts[0][go_term] = 1
else:
go_counts[0][go_term] += 1
if "F:" in go_term or "Molecular Function:" in go_term:
go_term = go_term[2:]
if go_counts[1].get(go_term) is None:
go_counts[1][go_term] = 1
else:
go_counts[1][go_term] += 1
if "C:" in go_term or "Cellular Component:" in go_term:
go_term = go_term[2:]
if go_counts[2].get(go_term) is None:
go_counts[2][go_term] = 1
else:
go_counts[2][go_term] += 1
#print (go_term)
return_list.append(go_term)
return return_list
"""
def filter_by_level(go_dict, level, parser):
for key in dict(go_dict):
go_term_object = parser.go_term_by_name_dict.get(key[2:])
if go_term_object is None:
print ("None -- error has occured:\t" + key[2:])
exit()
else:
print (key)
print ("level:\t" + str(go_term_object[0].calculate_level()))
if go_term_object[0].calculate_level() != int(level):
del go_dict[key]
"""
def filter_by_level(go_dict, level, parser, go_dict_type):
if go_dict_type == "biological_process":
filtered = [x for x in set(go_dict.keys()) & set([gterm.name for gterm in set(parser.get_biological_process_go_terms_by_level(int(level)))])]
if go_dict_type == "molecular_function":
filtered = [x for x in set(go_dict.keys()) & set([gterm.name for gterm in set(parser.get_molecular_function_go_terms_by_level(int(level)))])]
if go_dict_type == "cellular_component":
filtered = [x for x in set(go_dict.keys()) & set([gterm.name for gterm in set(parser.get_cellular_component_go_terms_by_level(int(level)))])]
#print ("filtered:\t" + str(filtered))
ret_dict = dict()
for key in filtered:
ret_dict[key] = go_dict[key]
return ret_dict
def generate_counts(go_dict, parser):
#print (sum(go_dict.values()))
#print (len(go_dict))
for key in dict(go_dict):
go_term_object = parser.go_term_by_name_dict.get(key)
if go_term_object is None:
print ("None -- error has occured:\t" + key)
exit()
else:
for x in range(0, go_dict[key]):
gt.propogate_go_term(go_term_object[0])
#exit()
def save_graph(go_dict, chart_type, level, parser):
fontP = FontProperties()
fontP.set_size('small')
# The slices will be ordered and plotted counter-clockwise.
figure = plt.figure(figsize=(10,10))
labels = go_dict.keys()
sizes = [parser.go_term_by_name_dict.get(x)[0].encountered_count for x in go_dict]
#sizes = go_dict.values()
#print (chart_type)
#print (zip(labels, sizes))
#print (sum(sizes))
plt.title('Graph Level %s Pie Chart [%s]' % (level, chart_type))
total = sum(sizes)
labels = [l+" "+str(float(s)/total * 100)[0:4]+"% ("+ str(s) + ")" for l,s in zip(labels, sizes)]
patches, texts = plt.pie(sizes, startangle=90)
plt.legend(patches, labels, prop = fontP, loc="best")
# Set aspect ratio to be equal so that pie is drawn as a circle.
plt.axis('equal')
#plt.tight_layout()
#plt.show()
print (chart_type)
out = [str(x) + "\t" + str(parser.go_term_by_name_dict.get(x)[0].encountered_count) for x in go_dict]
for x in out:
print (x)
print ("\n")
figure.savefig(chart_type+"_level_"+level+'.png',aspect='auto',dpi=100)
if __name__ == '__main__':
args = sys.argv
args = args[1:]
# these dicts store the name of the GO term and the number of times it occurs
combined = dict()
biological_process = dict()
molecular_function = dict()
cellular_component = dict()
go_counts = [biological_process, molecular_function, cellular_component]
gene_go_term_dict = dict() # key = SeqName description, value = list of gene ontology terms corresponding to the gene
with open(args[0], "r") as f:
for line in f:
line = line.split("\t")
gene_go_term_dict[line[0]] = parse_go_mappped_file(go_counts, line[7])
"""
# remove all genes with no go terms at all
for key in dict(gene_go_term_dict):
if len(gene_go_term_dict[key]) < 1:
del gene_go_term_dict[key]
"""
#print (gene_go_term_dict)
#print (len(gene_go_term_dict))
print ("Number of unique biological processes go terms:\t" + str(len(biological_process)))
print ("Number of unique molecular function go terms:\t" + str(len(molecular_function)))
print ("Number of unique cellular compontent go terms:\t" + str(len(cellular_component)))
print ("Number of unique overall go terms:\t" + str(len(biological_process) + len(molecular_function) + len(cellular_component)))
print ("Number of molecular function go terms:\t" + str(sum(molecular_function.values())))
print ("Number of biological process go terms:\t" + str(sum(biological_process.values())))
print ("Number of cellular component go terms:\t" + str(sum(cellular_component.values())))
parser = obo_parser("go.obo")
parser.build_obo_file()
generate_counts(biological_process, parser)
generate_counts(molecular_function, parser)
generate_counts(cellular_component, parser)
#print (sum(biological_process.values()))
biological_process = filter_by_level(biological_process,args[1], parser, "biological_process")
molecular_function = filter_by_level(molecular_function,args[1], parser, "molecular_function")
cellular_component = filter_by_level(cellular_component,args[1], parser, "cellular_component")
"""
print (biological_process.keys())
print(parser.go_term_by_name_dict.get("biological_process")[0].encountered_count)
print (molecular_function.keys())
print(parser.go_term_by_name_dict.get("molecular_function")[0].encountered_count)
"""
#save_graph(molecular_function, "Molecular Function", str(2), parser)
combined = dict(biological_process)
combined.update(molecular_function)
combined.update(cellular_component)
print ("Number of unique biological processes go terms after filtering by level:\t" + str(len(biological_process)))
print ("Number of unique molecular function go terms after filtering by level:\t" + str(len(molecular_function)))
print ("Number of unique cellular compontent go terms after filtering by level:\t" + str(len(cellular_component)))
print ("Number of unique overall go terms after filtering by level:\t" + str(len(combined)))
print ("Number of molecular function go terms after filtering by level:\t" + str(sum(molecular_function.values())))
print ("Number of biological process go terms after filtering by level:\t" + str(sum(biological_process.values())))
print ("Number of cellular component go terms after filtering by level:\t" + str(sum(cellular_component.values())))
"""
out = [str(x) + "\t" + str(parser.go_term_by_name_dict.get(x)[0].encountered_count) for x in cellular_component]
for x in out:
print (x)
"""
save_graph(biological_process, "Biological Process", args[1], parser)
save_graph(molecular_function, "Molecular Function", args[1], parser)
save_graph(cellular_component, "Cellular Component", args[1], parser)
save_graph(combined, "All", args[1], parser)
|
SamGinzburg/GeneOntologyTools
|
generate_pie_charts.py
|
Python
|
mit
| 8,151
|
from DemoFramework import DemoFramework
from LUIVerticalLayout import LUIVerticalLayout
from LUIFrame import LUIFrame
from LUILabel import LUILabel
from LUIButton import LUIButton
from LUIObject import LUIObject
import random
f = DemoFramework()
f.prepare_demo("LUIFrame")
# Constructor
f.add_constructor_parameter("width", "200")
f.add_constructor_parameter("height", "200")
f.add_constructor_parameter("innerPadding", "5")
f.add_constructor_parameter("scrollable", "False")
f.add_constructor_parameter("style", "UIFrame.Raised")
# Functions
# Events
f.construct_sourcecode("LUIFrame")
# Construct a new frame
frame = LUIFrame(parent=f.get_widget_node())
layout = LUIVerticalLayout(parent=frame, spacing=5)
layout.add(LUILabel(text="This is some frame ..", color=(0.2, 0.6, 1.0, 1.0), font_size=20))
layout.add(LUILabel(text="It can contain arbitrary elements."))
layout.add(LUILabel(text="For example this button:"))
layout.add(LUIButton(text="Fancy button"))
# frame.fit_to_children()
f.set_actions({
"Resize to 300x160": lambda: frame.set_size(300, 160),
"Fit to children": lambda: frame.clear_size(),
})
run()
|
tobspr/LUI
|
Demos/B_Frame.py
|
Python
|
mit
| 1,147
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: sw=4:ts=4:expandtab
"""
meza.io
~~~~~~~
Provides methods for reading/writing/processing tabular formatted files
Examples:
basic usage::
>>> from meza.io import read_csv
>>>
>>> path = p.join(DATA_DIR, 'test.csv')
>>> csv_records = read_csv(path)
>>> csv_header = next(csv_records).keys()
>>> next(csv_records)['Some Value'] == '100'
True
"""
import itertools as it
import sys
import hashlib
import sqlite3
import json
import os
from os import path as p
from datetime import time
from mmap import mmap
from collections import deque
from subprocess import check_output, check_call, Popen, PIPE, CalledProcessError
from http import client
from csv import Error as csvError
from functools import partial
from codecs import iterdecode, iterencode, StreamReader
from itertools import zip_longest
import yaml
import xlrd
import pygogo as gogo
from bs4 import BeautifulSoup, FeatureNotFound
from ijson import items
from chardet.universaldetector import UniversalDetector
from xlrd import (
XL_CELL_DATE, XL_CELL_EMPTY, XL_CELL_NUMBER, XL_CELL_BOOLEAN,
XL_CELL_ERROR)
from xlrd.xldate import xldate_as_datetime as xl2dt
from io import StringIO, TextIOBase, BytesIO, open
from . import (
fntools as ft, process as pr, unicsv as csv, dbf, ENCODING, BOM, DATA_DIR)
# pylint: disable=C0103
logger = gogo.Gogo(__name__, monolog=True, verbose=True).logger
# pylint: disable=C0103
encode = lambda iterable: (s.encode(ENCODING) for s in iterable)
chain = lambda iterable: it.chain.from_iterable(iterable or [])
NEWLINES = {b'\n', b'\r', b'\r\n', '\n', '\r', '\r\n'}
def groupby_line(iterable):
return it.groupby(iterable, lambda s: s not in NEWLINES)
class IterStringIO(TextIOBase):
"""A lazy StringIO that reads a generator of strings.
https://stackoverflow.com/a/32020108/408556
https://stackoverflow.com/a/20260030/408556
"""
# pylint: disable=super-init-not-called
def __init__(self, iterable=None, bufsize=4096, decode=False, **kwargs):
""" IterStringIO constructor
Args:
iterable (Seq[str]): Iterable of strings or bytes
bufsize (Int): Buffer size for seeking
decode (bool): Decode the text into a string (default: False)
Examples:
>>> StringIO(iter('Hello World')).read(5) # doctest: +ELLIPSIS
Traceback (most recent call last):
TypeError:...
>>> IterStringIO(iter('Hello World')).read(5)
b'Hello'
>>> i = IterStringIO(iter('one\\ntwo\\n'))
>>> list(next(i.lines)) == [b'o', b'n', b'e']
True
>>> decoded = IterStringIO(iter('Hello World'), decode=True)
>>> decoded.read(5) == 'Hello'
True
"""
iterable = iterable if iterable else []
chained = chain(iterable)
self.iter = encode(chained)
self.decode = decode
self.bufsize = bufsize
self.last = deque(bytearray(), self.bufsize)
self.pos = 0
def __next__(self):
return self._read(next(self.lines))
def __iter__(self):
return self
@property
def lines(self):
"""Read all the lines of content"""
# TODO: what about a csv with embedded newlines?
groups = groupby_line(self.iter)
return (g for k, g in groups if k)
def _read(self, iterable, num=None, newline=True):
"""Helper method used to read content"""
content = it.islice(iterable, num) if num else iterable
byte = ft.byte(content)
self.last.extend(byte)
self.pos += num or len(byte)
if newline:
self.last.append('\n')
return byte.decode(ENCODING) if self.decode else bytes(byte)
def write(self, iterable):
"""Write the content"""
chained = chain(iterable)
self.iter = it.chain(self.iter, encode(chained))
def read(self, num=None):
"""Read the content"""
return self._read(self.iter, num, False)
def readline(self, num=None):
"""Read a line of content"""
return self._read(next(self.lines), num)
def readlines(self):
"""Read all the lines of content"""
return map(self._read, self.lines)
def seek(self, num):
"""Go to a specific position within a file"""
next_pos = self.pos + 1
beg_buf = max([0, self.pos - self.bufsize])
if num <= beg_buf:
self.iter = it.chain(self.last, self.iter)
self.last = deque([], self.bufsize)
elif self.pos > num > beg_buf:
extend = [self.last.pop() for _ in range(self.pos - num)]
self.iter = it.chain(reversed(extend), self.iter)
elif num == self.pos:
pass
elif num == next_pos:
self.last.append(next(self.iter))
elif num > next_pos:
pos = num - self.pos
[self.last.append(x) for x in it.islice(self.iter, 0, pos)]
self.pos = beg_buf if num < beg_buf else num
def tell(self):
"""Get the current position within a file"""
return self.pos
class Reencoder(StreamReader):
"""Recodes a file like object from one encoding to another.
"""
def __init__(self, f, fromenc=ENCODING, toenc=ENCODING, **kwargs):
""" Reencoder constructor
Args:
f (obj): File-like object
fromenc (str): The input encoding.
toenc (str): The output encoding.
Kwargs:
remove_BOM (bool): Remove Byte Order Marker (default: True)
decode (bool): Decode the text into a string (default: False)
Examples:
>>> encoding = 'utf-16-be'
>>> eff = p.join(DATA_DIR, 'utf16_big.csv')
>>>
>>> with open(eff, 'rb') as f:
... reenc = Reencoder(f, encoding)
... first = reenc.readline(keepends=False)
... first.decode('utf-8') == '\ufeffa,b,c'
... reenc.readlines()[1].decode('utf-8') == '4,5,ʤ'
True
True
>>> with open(eff, 'rb') as f:
... reenc = Reencoder(f, encoding, decode=True)
... reenc.readline(keepends=False) == '\ufeffa,b,c'
True
>>> with open(eff, 'rU', encoding=encoding) as f:
... reenc = Reencoder(f, remove_BOM=True)
... reenc.readline(keepends=False) == b'a,b,c'
... reenc.readline() == b'1,2,3\\n'
... reenc.readline().decode('utf-8') == '4,5,ʤ'
True
True
True
"""
self.fileno = f.fileno
first_line = next(f)
bytes_mode = isinstance(first_line, bytes)
decode = kwargs.get('decode')
rencode = not decode
if kwargs.get('remove_BOM'):
strip = BOM.encode(fromenc) if bytes_mode else BOM
first_line = first_line.lstrip(strip)
chained = it.chain([first_line], f)
if bytes_mode:
decoded = iterdecode(chained, fromenc)
self.binary = rencode
proper_newline = first_line.endswith(os.linesep.encode(fromenc))
else:
decoded = chained
self.binary = bytes_mode or rencode
proper_newline = first_line.endswith(os.linesep)
stream = iterencode(decoded, toenc) if rencode else decoded
if proper_newline:
self.stream = stream
else:
# TODO: make sure the read methods are consistent with
# proper_newline, e.g., `keepends`.
#
# TODO: since the newline isn't recognized, `stream` is contains
# just one (very long) line. we pass in this line to iterate
# over the chars
groups = groupby_line(next(stream))
if self.binary:
self.stream = (b''.join(g) for k, g in groups if k)
else:
self.stream = (''.join(g) for k, g in groups if k)
def __next__(self):
return next(self.stream)
def __iter__(self):
return self
def read(self, n=None):
stream = it.islice(self.stream, n) if n else self.stream
return b''.join(stream) if self.binary else ''.join(stream)
def readline(self, n=None, keepends=True):
line = next(self.stream)
return line if keepends else line.rstrip()
def readlines(self, sizehint=None):
return list(self.stream)
def tell(self):
pass
def reset(self):
pass
class BytesError(ValueError):
pass
def patch_http_response_read(func):
"""Patches httplib to read poorly encoded chunked data.
https://stackoverflow.com/a/14206036/408556
"""
def inner(*args):
"""inner"""
try:
return func(*args)
except client.IncompleteRead as err:
return err.partial
return inner
client.HTTPResponse.read = patch_http_response_read(client.HTTPResponse.read)
def _remove_bom_from_dict(row, bom):
"""Remove a byte order marker (BOM) from a dict"""
for k, v in row.items():
try:
if all([k, v, bom in k, bom in v]):
yield (k.lstrip(bom), v.lstrip(bom))
elif v and bom in v:
yield (k, v.lstrip(bom))
elif k and bom in k:
yield (k.lstrip(bom), v)
else:
yield (k, v)
except TypeError:
yield (k, v)
def _remove_bom_from_list(row, bom):
"""Remove a byte order marker (BOM) from a list"""
for pos, col in enumerate(row):
try:
if not pos and bom in col:
yield col.lstrip(bom)
else:
yield col
except TypeError:
yield col
def _remove_bom_from_scalar(row, bom):
"""Remove a byte order marker (BOM) from a scalar"""
try:
return row.lstrip(bom)
except AttributeError:
return row
def is_listlike(item):
"""Determine if a scalar is listlike"""
if hasattr(item, 'keys'):
listlike = False
else:
listlike = {'append', 'next', '__reversed__'}.intersection(dir(item))
return listlike
def remove_bom(row, bom):
"""Remove a byte order marker (BOM)"""
if is_listlike(row):
bomless = list(_remove_bom_from_list(row, bom))
else:
try:
# pylint: disable=R0204
bomless = dict(_remove_bom_from_dict(row, bom))
except AttributeError:
bomless = _remove_bom_from_scalar(row, bom)
return bomless
def get_encoding(filepath):
"""
Examples:
>>> get_encoding(p.join(DATA_DIR, 'utf16_big.csv')) == 'UTF-16'
True
"""
with open(filepath, 'rb') as f:
encoding = detect_encoding(f)['encoding']
return encoding
def get_file_encoding(f, encoding=None, bytes_error=False):
"""Detects a file's encoding"""
if not encoding and hasattr(f, 'encoding'):
encoding = f.encoding
if not bytes_error:
# Set the encoding to None so that we can detect the correct one.
extra = (' ({})'.format(encoding)) if encoding else ''
logger.warning('%s was opened with the wrong encoding%s', f, extra)
encoding = None
if not encoding:
try:
f.seek(0)
except AttributeError:
pass
else:
try:
# See if we have bytes to avoid reopening the file
encoding = detect_encoding(f)['encoding']
except UnicodeDecodeError:
msg = 'Incorrectly encoded file, reopening with bytes to detect'
msg += ' encoding'
logger.warning(msg)
f.close()
encoding = get_encoding(f.name)
finally:
if hasattr(f, 'name'): # otherwise we can't reopen it
f.close()
if encoding:
logger.debug('detected encoding: %s', encoding)
return encoding
def sanitize_file_encoding(encoding):
if encoding == 'Windows-1252' and os.name == 'posix':
# based on my testing, when excel for mac saves a csv file as
# 'Windows-1252', you have to open with 'mac-roman' in order
# to properly read it
new_encoding = 'mac-roman'
msg = 'Detected a `Windows-1252` encoded file on a %s machine.'
msg += ' Setting encoding to `%s` instead.'
logger.warning(msg, sys.platform, new_encoding)
else:
new_encoding = encoding
return new_encoding
def is_binary(f):
try:
result = 'b' in f.mode
except AttributeError:
result = isinstance(f, BytesIO)
return result
def reopen(f, encoding):
sanitized_encoding = sanitize_file_encoding(encoding)
logger.debug('Reopening %s with encoding: %s', f, sanitized_encoding)
try:
decoded_f = open(f.name, encoding=sanitized_encoding)
except AttributeError:
f.seek(0)
decoded_f = iterdecode(f, sanitized_encoding)
return decoded_f
def _read_any(f, reader, args, pos=0, recursed=False, **kwargs):
"""Helper func to read a file or filepath"""
try:
if is_binary(f) and reader.__name__ != 'writer':
# only allow binary mode for writing files, not reading
message = "%s was opened in bytes mode but isn't being written to"
raise BytesError(message % f)
for num, line in enumerate(reader(f, *args, **kwargs)):
if num >= pos:
yield line
pos += 1
except (UnicodeDecodeError, csvError, BytesError) as err:
logger.warning(err)
encoding = kwargs.pop('encoding', None)
bytes_error = type(err).__name__ == 'BytesError'
if not recursed:
ekwargs = {'encoding': encoding, 'bytes_error': bytes_error}
encoding = get_file_encoding(f, **ekwargs)
if recursed or not encoding:
logger.error('Unable to detect proper file encoding')
return
decoded_f = reopen(f, encoding)
try:
rkwargs = pr.merge([kwargs, {'pos': pos, 'recursed': True}])
for line in _read_any(decoded_f, reader, args, **rkwargs):
yield line
finally:
decoded_f.close()
def read_any(filepath, reader, mode='r', *args, **kwargs):
"""Reads a file or filepath
Args:
filepath (str): The file path or file like object.
reader (func): The processing function.
mode (Optional[str]): The file open mode (default: 'r').
kwargs (dict): Keyword arguments that are passed to the reader.
Kwargs:
encoding (str): File encoding.
See also:
`meza.io.read_csv`
`meza.io.read_fixed_fmt`
`meza.io.read_json`
`meza.io.read_geojson`
`meza.io.write`
`meza.io.hash_file`
Yields:
scalar: Result of applying the reader func to the file.
Examples:
>>> filepath = p.join(DATA_DIR, 'test.csv')
>>> reader = lambda f, **kw: (l.strip().split(',') for l in f)
>>> result = read_any(filepath, reader, 'r')
>>> next(result) == [
... 'Some Date', 'Sparse Data', 'Some Value', 'Unicode Test', '']
True
"""
if hasattr(filepath, 'read'):
if is_binary(filepath):
kwargs.setdefault('encoding', ENCODING)
else:
kwargs.pop('encoding', None)
for line in _read_any(filepath, reader, args, **kwargs):
yield remove_bom(line, BOM)
else:
encoding = None if 'b' in mode else kwargs.pop('encoding', ENCODING)
with open(filepath, mode, encoding=encoding) as f:
for line in _read_any(f, reader, args, **kwargs):
yield remove_bom(line, BOM)
def _read_csv(f, header=None, has_header=True, **kwargs):
"""Helps read a csv file.
Args:
f (obj): The csv file like object.
header (Seq[str]): Sequence of column names.
has_header (bool): Whether or not file has a header.
Kwargs:
first_col (int): The first column (default: 0).
Yields:
dict: A csv record.
See also:
`meza.io.read_csv`
Examples:
>>> filepath = p.join(DATA_DIR, 'test.csv')
>>> with open(filepath, 'r', encoding='utf-8') as f:
... sorted(next(_read_csv(f)).items()) == [
... ('Some Date', '05/04/82'),
... ('Some Value', '234'),
... ('Sparse Data', 'Iñtërnâtiônàližætiøn'),
... ('Unicode Test', 'Ādam')]
True
"""
first_col = kwargs.pop('first_col', 0)
if header and has_header:
next(f)
elif not (header or has_header):
raise ValueError('Either `header` or `has_header` must be specified.')
header = (list(it.repeat('', first_col)) + header) if first_col else header
reader = csv.DictReader(f, header, **kwargs)
# Remove empty keys
records = (dict(x for x in r.items() if x[0]) for r in reader)
# Remove empty rows
for row in records:
if any(v.strip() for v in row.values() if v):
yield row
def read_mdb(filepath, table=None, **kwargs):
"""Reads an MS Access file
Args:
filepath (str): The mdb file path.
table (str): The table to load (default: None, the first found table).
kwargs (dict): Keyword arguments that are passed to the csv reader.
Kwargs:
sanitize (bool): Underscorify and lowercase field names
(default: False).
dedupe (bool): Deduplicate field names (default: False).
ignorecase (bool): Treat file name as case insensitive (default: true).
Yields:
dict: A row of data whose keys are the field names.
Raises:
TypeError: If unable to read the db file.
Examples:
>>> filepath = p.join(DATA_DIR, 'test.mdb')
>>> records = read_mdb(filepath, sanitize=True)
>>> expected = {
... 'surname': 'Aaron',
... 'forenames': 'William',
... 'freedom': '07/03/60 00:00:00',
... 'notes': 'Order of Court',
... 'surname_master_or_father': '',
... 'how_admitted': 'Redn.',
... 'id_no': '1',
... 'forenames_master_or_father': '',
... 'remarks': '',
... 'livery': '',
... 'date_of_order_of_court': '06/05/60 00:00:00',
... 'source_ref': 'MF 324'}
>>> first_row = next(records)
>>> (expected == first_row) if first_row else True
True
"""
args = ['mdb-tables', '-1', filepath]
# Check if 'mdb-tools' is installed on system
try:
check_output(args)
except OSError:
logger.error(
'You must install [mdbtools]'
'(http://sourceforge.net/projects/mdbtools/) in order to use '
'this function')
yield
return
except CalledProcessError:
raise TypeError('{} is not readable by mdbtools'.format(filepath))
sanitize = kwargs.pop('sanitize', None)
dedupe = kwargs.pop('dedupe', False)
table = table or check_output(args).splitlines()[0]
pkwargs = {'stdout': PIPE, 'bufsize': 1, 'universal_newlines': True}
# https://stackoverflow.com/a/2813530/408556
# https://stackoverflow.com/a/17698359/408556
with Popen(['mdb-export', filepath, table], **pkwargs).stdout as pipe:
first_line = StringIO(str(pipe.readline()))
names = next(csv.reader(first_line, **kwargs))
uscored = ft.underscorify(names) if sanitize else names
header = list(ft.dedupe(uscored) if dedupe else uscored)
for line in iter(pipe.readline, b''):
next_line = StringIO(str(line))
values = next(csv.reader(next_line, **kwargs))
yield dict(zip(header, values))
def read_dbf(filepath, **kwargs):
"""Reads a dBase, Visual FoxPro, or FoxBase+ file
Args:
filepath (str): The dbf file path or file like object.
kwargs (dict): Keyword arguments that are passed to the DBF reader.
Kwargs:
load (bool): Load all records into memory (default: false).
encoding (str): Character encoding (default: None, parsed from
the `language_driver`).
sanitize (bool): Underscorify and lowercase field names
(default: False).
ignorecase (bool): Treat file name as case insensitive (default: true).
ignore_missing_memofile (bool): Suppress `MissingMemoFile` exceptions
(default: False).
Yields:
OrderedDict: A row of data whose keys are the field names.
Raises:
MissingMemoFile: If unable to find the memo file.
DBFNotFound: If unable to find the db file.
Examples:
>>> filepath = p.join(DATA_DIR, 'test.dbf')
>>> records = read_dbf(filepath, sanitize=True)
>>> next(records) == {
... 'awater10': 12416573076,
... 'aland10': 71546663636,
... 'intptlat10': '+47.2400052',
... 'lsad10': 'C2',
... 'cd111fp': '08',
... 'namelsad10': 'Congressional District 8',
... 'funcstat10': 'N',
... 'statefp10': '27',
... 'cdsessn': '111',
... 'mtfcc10': 'G5200',
... 'geoid10': '2708',
... 'intptlon10': '-092.9323194'}
True
"""
kwargs['lowernames'] = kwargs.pop('sanitize', None)
return iter(dbf.DBF2(filepath, **kwargs))
def read_sqlite(filepath, table=None):
"""Reads a sqlite file.
Args:
filepath (str): The sqlite file path
table (str): The table to load (default: None, the first found table).
Yields:
dict: A row of data whose keys are the field names.
Raises:
NotFound: If unable to find the resource.
See also:
`meza.io.read_any`
Examples:
>>> filepath = p.join(DATA_DIR, 'test.sqlite')
>>> records = read_sqlite(filepath)
>>> next(records) == {
... 'sparse_data': 'Iñtërnâtiônàližætiøn',
... 'some_date': '05/04/82',
... 'some_value': 234,
... 'unicode_test': 'Ādam'}
True
"""
con = sqlite3.connect(filepath)
con.row_factory = sqlite3.Row
cursor = con.cursor()
cursor.execute("SELECT name FROM sqlite_master WHERE type = 'table'")
if not table or table not in set(cursor.fetchall()):
table = cursor.fetchone()[0]
cursor.execute('SELECT * FROM {}'.format(table))
return map(dict, cursor)
def read_csv(filepath, mode='r', **kwargs):
"""Reads a csv file.
Args:
filepath (str): The csv file path or file like object.
mode (Optional[str]): The file open mode (default: 'r').
kwargs (dict): Keyword arguments that are passed to the csv reader.
Kwargs:
delimiter (str): Field delimiter (default: ',').
quotechar (str): Quote character (default: '"').
encoding (str): File encoding.
has_header (bool): Has header row (default: True).
custom_header (List[str]): Custom header names (default: None).
first_row (int): First row (zero based, default: 0).
first_col (int): First column (zero based, default: 0).
sanitize (bool): Underscorify and lowercase field names
(default: False).
dedupe (bool): Deduplicate field names (default: False).
Yields:
dict: A row of data whose keys are the field names.
Raises:
NotFound: If unable to find the resource.
See also:
`meza.io.read_any`
`meza.io._read_csv`
Examples:
>>> filepath = p.join(DATA_DIR, 'test.csv')
>>> records = read_csv(filepath, sanitize=True)
>>> next(records) == {
... 'sparse_data': 'Iñtërnâtiônàližætiøn',
... 'some_date': '05/04/82',
... 'some_value': '234',
... 'unicode_test': 'Ādam'}
True
"""
def reader(f, **kwargs):
"""File reader"""
first_row = kwargs.pop('first_row', 0)
first_col = kwargs.pop('first_col', 0)
sanitize = kwargs.pop('sanitize', False)
dedupe = kwargs.pop('dedupe', False)
has_header = kwargs.pop('has_header', True)
custom_header = kwargs.pop('custom_header', None)
# position file pointer at the first row
list(it.islice(f, first_row))
first_line = StringIO(str(next(f)))
names = next(csv.reader(first_line, **kwargs))
if has_header or custom_header:
names = custom_header if custom_header else names
stripped = (name for name in names if name.strip())
uscored = ft.underscorify(stripped) if sanitize else stripped
header = list(ft.dedupe(uscored) if dedupe else uscored)
if not has_header:
# reposition file pointer at the first row
try:
f.seek(0)
except AttributeError:
msg = 'Non seekable files must have either a specified or'
msg += 'custom header.'
logger.error(msg)
raise
list(it.islice(f, first_row))
if not (has_header or custom_header):
header = ['column_%i' % (n + 1) for n in range(len(names))]
return _read_csv(f, header, False, first_col=first_col, **kwargs)
return read_any(filepath, reader, mode, **kwargs)
def read_tsv(filepath, mode='r', **kwargs):
"""Reads a csv file.
Args:
filepath (str): The tsv file path or file like object.
mode (Optional[str]): The file open mode (default: 'r').
kwargs (dict): Keyword arguments that are passed to the csv reader.
Kwargs:
quotechar (str): Quote character (default: '"').
encoding (str): File encoding.
has_header (bool): Has header row (default: True).
first_row (int): First row (zero based, default: 0).
first_col (int): First column (zero based, default: 0).
sanitize (bool): Underscorify and lowercase field names
(default: False).
dedupe (bool): Deduplicate field names (default: False).
Yields:
dict: A row of data whose keys are the field names.
Raises:
NotFound: If unable to find the resource.
See also:
`meza.io.read_any`
Examples:
>>> filepath = p.join(DATA_DIR, 'test.tsv')
>>> records = read_tsv(filepath, sanitize=True)
>>> next(records) == {
... 'sparse_data': 'Iñtërnâtiônàližætiøn',
... 'some_date': '05/04/82',
... 'some_value': '234',
... 'unicode_test': 'Ādam'}
True
"""
return read_csv(filepath, mode, dialect='excel-tab', **kwargs)
def read_fixed_fmt(filepath, widths=None, mode='r', **kwargs):
"""Reads a fixed-width csv file.
Args:
filepath (str): The fixed width formatted file path or file like object.
widths (List[int]): The zero-based 'start' position of each column.
mode (Optional[str]): The file open mode (default: 'r').
kwargs (dict): Keyword arguments that are passed to the csv reader.
Kwargs:
has_header (bool): Has header row (default: False).
first_row (int): First row (zero based, default: 0).
first_col (int): First column (zero based, default: 0).
sanitize (bool): Underscorify and lowercase field names
(default: False).
dedupe (bool): Deduplicate field names (default: False).
Yields:
dict: A row of data whose keys are the field names.
Raises:
NotFound: If unable to find the resource.
See also:
`meza.io.read_any`
Examples:
>>> filepath = p.join(DATA_DIR, 'fixed.txt')
>>> widths = [0, 18, 29, 33, 38, 50]
>>> records = read_fixed_fmt(filepath, widths)
>>> next(records) == {
... 'column_1': 'Chicago Reader',
... 'column_2': '1971-01-01',
... 'column_3': '40',
... 'column_4': 'True',
... 'column_5': '1.0',
... 'column_6': '04:14:001971-01-01T04:14:00'}
True
"""
def reader(f, **kwargs):
"""File reader"""
sanitize = kwargs.get('sanitize')
dedupe = kwargs.pop('dedupe', False)
has_header = kwargs.get('has_header')
first_row = kwargs.get('first_row', 0)
schema = tuple(zip_longest(widths, widths[1:]))
[next(f) for _ in range(first_row)]
if has_header:
line = next(f)
names = (_f for _f in (line[s:e].strip() for s, e in schema) if _f)
uscored = ft.underscorify(names) if sanitize else names
header = list(ft.dedupe(uscored) if dedupe else uscored)
else:
header = ['column_%i' % (n + 1) for n in range(len(widths))]
zipped = zip(header, schema)
get_row = lambda line: {k: line[v[0]:v[1]].strip() for k, v in zipped}
return map(get_row, f)
return read_any(filepath, reader, mode, **kwargs)
def sanitize_sheet(sheet, mode, first_col=0, **kwargs):
"""Formats content from xls/xslx files as strings according to its cell
type.
Args:
sheet (obj): `xlrd` sheet object.
mode (str): `xlrd` workbook datemode property.
kwargs (dict): Keyword arguments
first_col (int): The first column (default: 0).
Kwargs:
date_format (str): `strftime()` date format.
dt_format (str): `strftime()` datetime format.
time_format (str): `strftime()` time format.
Yields:
Tuple[int, str]: A tuple of (row_number, value).
Examples:
>>> filepath = p.join(DATA_DIR, 'test.xls')
>>> book = xlrd.open_workbook(filepath)
>>> sheet = book.sheet_by_index(0)
>>> sheet.row_values(1) == [
... 30075.0, 'Iñtërnâtiônàližætiøn', 234.0, 'Ādam', ' ']
True
>>> sanitized = sanitize_sheet(sheet, book.datemode)
>>> [v for i, v in sanitized if i == 1] == [
... '1982-05-04', 'Iñtërnâtiônàližætiøn', '234.0', 'Ādam', ' ']
True
"""
date_format = kwargs.get('date_format', '%Y-%m-%d')
dt_format = kwargs.get('dt_format', '%Y-%m-%d %H:%M:%S')
time_format = kwargs.get('time_format', '%H:%M:%S')
def time_func(value):
"""Converts an excel time into python time"""
args = xlrd.xldate_as_tuple(value, mode)[3:]
return time(*args).strftime(time_format)
switch = {
XL_CELL_DATE: lambda v: xl2dt(v, mode).strftime(date_format),
'datetime': lambda v: xl2dt(v, mode).strftime(dt_format),
'time': time_func,
XL_CELL_EMPTY: lambda v: '',
XL_CELL_NUMBER: str,
XL_CELL_BOOLEAN: lambda v: str(bool(v)),
XL_CELL_ERROR: lambda v: xlrd.error_text_from_code[v],
}
for i in range(sheet.nrows):
types = sheet.row_types(i)[first_col:]
values = sheet.row_values(i)[first_col:]
for _type, value in zip(types, values):
if _type == XL_CELL_DATE and value < 1:
_type = 'time'
elif _type == XL_CELL_DATE and not value.is_integer:
_type = 'datetime'
yield (i, switch.get(_type, lambda v: v)(value))
# pylint: disable=unused-argument
def get_header(names, dedupe=False, sanitize=False, **kwargs):
"""Generates a header row"""
stripped = (name for name in names if name.strip())
uscored = ft.underscorify(stripped) if sanitize else stripped
return list(ft.dedupe(uscored) if dedupe else uscored)
def read_xls(filepath, **kwargs):
"""Reads an xls/xlsx file.
Args:
filepath (str): The xls/xlsx file path, file, or SpooledTemporaryFile.
kwargs (dict): Keyword arguments that are passed to the xls reader.
Kwargs:
sheet (int): Zero indexed sheet to open (default: 0)
has_header (bool): Has header row (default: True).
first_row (int): First row (zero based, default: 0).
first_col (int): First column (zero based, default: 0).
date_format (str): Date format passed to `strftime()` (default:
'%Y-%m-%d', i.e, 'YYYY-MM-DD').
encoding (str): File encoding. By default, the encoding is derived from
the file's `CODEPAGE` number, e.g., 1252 translates to `cp1252`.
sanitize (bool): Underscorify and lowercase field names
(default: False).
dedupe (bool): Deduplicate field names (default: False).
on_demand (bool): open_workbook() loads global data and returns without
releasing resources. At this stage, the only information available
about sheets is Book.nsheets and Book.sheet_names() (default:
False).
pad_rows (bool): Add empty cells so that all rows have the number of
columns `Sheet.ncols` (default: False).
Yields:
dict: A row of data whose keys are the field names.
Raises:
NotFound: If unable to find the resource.
Examples:
>>> filepath = p.join(DATA_DIR, 'test.xls')
>>> records = read_xls(filepath, sanitize=True)
>>> next(records) == {
... 'some_value': '234.0',
... 'some_date': '1982-05-04',
... 'sparse_data': 'Iñtërnâtiônàližætiøn',
... 'unicode_test': 'Ādam'}
True
"""
has_header = kwargs.get('has_header', True)
first_row = kwargs.get('first_row', 0)
xlrd_kwargs = {
'on_demand': kwargs.get('on_demand'),
'ragged_rows': not kwargs.get('pad_rows'),
'encoding_override': kwargs.get('encoding', True)
}
try:
contents = mmap(filepath.fileno(), 0)
book = xlrd.open_workbook(file_contents=contents, **xlrd_kwargs)
except AttributeError:
book = xlrd.open_workbook(filepath, **xlrd_kwargs)
sheet = book.sheet_by_index(kwargs.pop('sheet', 0))
# Get header row and remove empty columns
names = sheet.row_values(first_row)[kwargs.get('first_col', 0):]
if has_header:
header = get_header(names, kwargs.pop('dedupe', False), **kwargs)
else:
header = ['column_%i' % (n + 1) for n in range(len(names))]
# Convert to strings
sanitized = sanitize_sheet(sheet, book.datemode, **kwargs)
for key, group in it.groupby(sanitized, lambda v: v[0]):
if has_header and key == first_row:
continue
values = [g[1] for g in group]
# Remove empty rows
if any(v and v.strip() for v in values):
yield dict(zip(header, values))
def read_json(filepath, mode='r', path='item', newline=False):
"""Reads a json file (both regular and newline-delimited)
Args:
filepath (str): The json file path or file like object.
mode (Optional[str]): The file open mode (default: 'r').
path (Optional[str]): Path to the content you wish to read
(default: 'item', i.e., the root list). Note: `path` must refer to
a list.
newline (Optional[bool]): Interpret file as newline-delimited
(default: False).
Kwargs:
encoding (str): File encoding.
Returns:
Iterable: The parsed records
See also:
`meza.io.read_any`
Examples:
>>> filepath = p.join(DATA_DIR, 'test.json')
>>> records = read_json(filepath)
>>> next(records) == {
... 'text': 'Chicago Reader',
... 'float': 1,
... 'datetime': '1971-01-01T04:14:00',
... 'boolean': True,
... 'time': '04:14:00',
... 'date': '1971-01-01',
... 'integer': 40}
True
"""
reader = lambda f, **kw: map(json.loads, f) if newline else items(f, path)
return read_any(filepath, reader, mode)
def get_point(coords, lat_first):
"""Converts GeoJSON coordinates into a point tuple"""
if lat_first:
point = (coords[1], coords[0])
else:
point = (coords[0], coords[1])
return point
def gen_records(_type, record, coords, properties, **kwargs):
"""GeoJSON record generator"""
lat_first = kwargs.get('lat_first')
if _type == 'Point':
record['lon'], record['lat'] = get_point(coords, lat_first)
yield pr.merge([record, properties])
elif _type == 'LineString':
for point in coords:
record['lon'], record['lat'] = get_point(point, lat_first)
yield pr.merge([record, properties])
elif _type == 'Polygon':
for pos, poly in enumerate(coords):
for point in poly:
record['lon'], record['lat'] = get_point(point, lat_first)
record['pos'] = pos
yield pr.merge([record, properties])
else:
raise TypeError('Invalid geometry type {}.'.format(_type))
def read_geojson(filepath, key='id', mode='r', **kwargs):
"""Reads a geojson file
Args:
filepath (str): The geojson file path or file like object.
key (str): GeoJSON Feature ID (default: 'id').
mode (Optional[str]): The file open mode (default: 'r').
Kwargs:
lat_first (bool): Latitude listed as first coordinate (default: False).
encoding (str): File encoding.
Returns:
Iterable: The parsed records
Raise:
TypeError if no features list or invalid geometry type.
See also:
`meza.io.read_any`
`meza.convert.records2geojson`
Examples:
>>> from decimal import Decimal
>>> filepath = p.join(DATA_DIR, 'test.geojson')
>>> records = read_geojson(filepath)
>>> next(records) == {
... 'id': 6635402,
... 'iso3': 'ABW',
... 'bed_prv_pr': Decimal('0.003'),
... 'ic_mhg_cr': Decimal('0.0246'),
... 'bed_prv_cr': 0,
... 'type': 'Point',
... 'lon': Decimal('-70.0624999987871'),
... 'lat': Decimal('12.637499976568533')}
True
"""
def reader(f, **kwargs):
"""File reader"""
try:
features = items(f, 'features.item')
except KeyError:
raise TypeError('Only GeoJSON with features are supported.')
else:
for feature in features:
_type = feature['geometry']['type']
properties = feature.get('properties') or {}
coords = feature['geometry']['coordinates']
record = {
'id': feature.get(key, properties.get(key)),
'type': feature['geometry']['type']}
args = (record, coords, properties)
for rec in gen_records(_type, *args, **kwargs):
yield rec
return read_any(filepath, reader, mode, **kwargs)
def read_yaml(filepath, mode='r', **kwargs):
"""Reads a YAML file
TODO: convert to a streaming parser
Args:
filepath (str): The yaml file path or file like object.
mode (Optional[str]): The file open mode (default: 'r').
Kwargs:
encoding (str): File encoding.
Returns:
Iterable: The parsed records
See also:
`meza.io.read_any`
Examples:
>>> from datetime import date, datetime as dt
>>> filepath = p.join(DATA_DIR, 'test.yml')
>>> records = read_yaml(filepath)
>>> next(records) == {
... 'text': 'Chicago Reader',
... 'float': 1.0,
... 'datetime': dt(1971, 1, 1, 4, 14),
... 'boolean': True,
... 'time': '04:14:00',
... 'date': date(1971, 1, 1),
... 'integer': 40}
True
"""
return read_any(filepath, yaml.load, mode, **kwargs)
def get_text(element):
if element and element.text:
text = element.text.strip()
else:
text = ''
if not text and element and element.string:
text = element.string.strip()
if not text and element and element.a:
text = element.a.text or element.a.href or ''
text = text.strip()
return text
def _find_table(soup, pos=0):
if pos:
try:
table = soup.find_all('table')[pos]
except IndexError:
table = None
else:
table = soup.table
return table
def _gen_from_rows(rows, header, vertical=False):
if vertical:
# nested_tds = [('one', 'two'), ('uno', 'dos'), ('un', 'deux')]
nested_tds = (tr.find_all('td') for tr in rows)
# tds = ('one', 'uno', 'un')
for tds in zip(*nested_tds):
row = map(get_text, tds)
yield dict(zip(header, row))
else:
for tr in rows: # pylint: disable=C0103
row = map(get_text, tr.find_all('td'))
yield dict(zip(header, row))
def read_html(filepath, table=0, mode='r', **kwargs):
"""Reads tables from an html file
TODO: convert to lxml.etree.iterparse
http://lxml.de/parsing.html#iterparse-and-iterwalk
Args:
filepath (str): The html file path or file like object.
table (int): Zero indexed table to open (default: 0)
mode (Optional[str]): The file open mode (default: 'r').
kwargs (dict): Keyword arguments
Kwargs:
encoding (str): File encoding.
sanitize (bool): Underscorify and lowercase field names
(default: False).
dedupe (bool): Deduplicate field names (default: False).
vertical (bool): The table has headers in the left column (default:
False).
Returns:
Iterable: The parsed records
See also:
`meza.io.read_any`
Examples:
>>> filepath = p.join(DATA_DIR, 'test.html')
>>> records = read_html(filepath, sanitize=True)
>>> next(records) == {
... '': 'Mediterranean',
... 'january': '82',
... 'february': '346',
... 'march': '61',
... 'april': '1,244',
... 'may': '95',
... 'june': '10',
... 'july': '230',
... 'august': '684',
... 'september': '268',
... 'october': '432',
... 'november': '105',
... 'december': '203',
... 'total_to_date': '3,760'}
True
"""
def reader(f, **kwargs):
"""File reader"""
try:
soup = BeautifulSoup(f, 'lxml-xml')
except FeatureNotFound:
soup = BeautifulSoup(f, 'html.parser')
sanitize = kwargs.get('sanitize')
dedupe = kwargs.get('dedupe')
vertical = kwargs.get('vertical')
first_row_as_header = kwargs.get('first_row_as_header')
tbl = _find_table(soup, table)
if tbl:
rows = tbl.find_all('tr')
for num, first_row in enumerate(rows):
if first_row.find('th'):
break
ths = first_row.find_all('th')
if first_row_as_header and not ths:
ths = rows[0].find_all('td')
if vertical or len(ths) == 1:
# the headers are vertical instead of horizontal
vertical = True
names = (get_text(row.th) for row in rows)
elif ths:
rows = rows[1:]
names = map(get_text, ths)
else:
col_nums = range(len(first_row))
names = ['column_{}'.format(i) for i in col_nums]
uscored = ft.underscorify(names) if sanitize else names
header = list(ft.dedupe(uscored) if dedupe else uscored)
records = _gen_from_rows(rows, header, vertical)
else:
records = iter([])
return records
return read_any(filepath, reader, mode, **kwargs)
def write(filepath, content, mode='wb+', **kwargs):
"""Writes content to a file path or file like object.
Args:
filepath (str): The file path or file like object to write to.
content (obj): File like object or `requests` iterable response.
mode (Optional[str]): The file open mode (default: 'wb+').
kwargs: Keyword arguments.
Kwargs:
encoding (str): The file encoding.
chunksize (Optional[int]): Number of bytes to write at a time (default:
None, i.e., all).
length (Optional[int]): Length of content (default: 0).
bar_len (Optional[int]): Length of progress bar (default: 50).
Returns:
int: bytes written
See also:
`meza.io.read_any`
Examples:
>>> from tempfile import TemporaryFile
>>>
>>> write(TemporaryFile(), StringIO('Hello World'))
11
>>> write(StringIO(), StringIO('Hello World'))
11
>>> content = IterStringIO(iter('Internationalization'))
>>> write(StringIO(), content)
20
>>> content = IterStringIO(iter('Iñtërnâtiônàližætiøn'))
>>> write(StringIO(), content)
28
"""
def writer(f, content, **kwargs):
"""File writer"""
chunksize = kwargs.get('chunksize')
length = int(kwargs.get('length') or 0)
bar_len = kwargs.get('bar_len', 50)
encoding = kwargs.get('encoding', ENCODING)
progress = 0
for chunk in ft.chunk(content, chunksize):
text = ft.byte(chunk) if hasattr(chunk, 'sort') else chunk
try:
f.write(text)
except UnicodeEncodeError:
f.write(text.encode(encoding))
except TypeError:
try:
f.write(text.decode(encoding))
except AttributeError:
f.write(bytes(text, encoding))
progress += chunksize or len(text)
if length:
bars = min(int(bar_len * progress / length), bar_len)
logger.debug('\r[%s%s]', '=' * bars, ' ' * (bar_len - bars))
sys.stdout.flush()
yield progress
return sum(read_any(filepath, writer, mode, content, **kwargs))
def hash_file(filepath, algo='sha1', chunksize=0, verbose=False):
"""Hashes a file path or file like object.
https://stackoverflow.com/a/1131255/408556
Args:
filepath (str): The file path or file like object to hash.
algo (str): The hashlib hashing algorithm to use (default: sha1).
chunksize (Optional[int]): Number of bytes to write at a time
(default: 0, i.e., all).
verbose (Optional[bool]): Print debug statements (default: False).
Returns:
str: File hash.
See also:
`meza.io.read_any`
`meza.process.hash`
Examples:
>>> from tempfile import TemporaryFile
>>> resp = 'da39a3ee5e6b4b0d3255bfef95601890afd80709'
>>> hash_file(TemporaryFile()) == resp
True
"""
def writer(f, hasher, **kwargs): # pylint: disable=W0613
"""File writer"""
if chunksize:
while True:
data = f.read(chunksize)
if not data:
break
hasher.update(data)
else:
hasher.update(f.read())
yield hasher.hexdigest()
args = [getattr(hashlib, algo)()]
file_hash = next(read_any(filepath, writer, 'rb', *args))
if verbose:
logger.debug('File %s hash is %s.', filepath, file_hash)
return file_hash
def reencode(f, fromenc=ENCODING, toenc=ENCODING, **kwargs):
"""Reencodes a file from one encoding to another
Args:
f (obj): The file like object to convert.
Kwargs:
fromenc (str): The input encoding.
toenc (str): The output encoding (default: ENCODING).
remove_BOM (bool): Remove Byte Order Marker (default: True)
Returns:
obj: file like object of decoded strings
Examples:
>>> eff = p.join(DATA_DIR, 'utf16_big.csv')
>>>
>>> with open(eff, 'rb') as f:
... encoded = reencode(f, 'utf-16-be', remove_BOM=True)
... encoded.readline(keepends=False) == b'a,b,c'
True
"""
return Reencoder(f, fromenc, toenc, **kwargs)
def detect_encoding(f, verbose=False):
"""Detects a file's encoding.
Args:
f (obj): The file like object to detect.
verbose (Optional[bool]): The file open mode (default: False).
mode (Optional[str]): The file open mode (default: 'r').
Returns:
dict: The encoding result
Examples:
>>> filepath = p.join(DATA_DIR, 'test.csv')
>>>
>>> with open(filepath, 'rb') as f:
... result = detect_encoding(f)
... result == {
... 'confidence': 0.99, 'language': '', 'encoding': 'utf-8'}
True
"""
pos = f.tell()
detector = UniversalDetector()
for line in f:
detector.feed(line)
if detector.done:
break
detector.close()
f.seek(pos)
if verbose:
logger.debug('result %s', detector.result)
return detector.result
def get_reader(extension):
"""Gets the appropriate reader for a given file extension.
Args:
extension (str): The file extension.
Returns:
func: The file reading function
See also:
`meza.io.read`
Raises:
TypeError: If unable to find a suitable reader.
Examples:
>>> get_reader('xls') # doctest: +ELLIPSIS
<function read_xls at 0x...>
"""
switch = {
'csv': read_csv,
'xls': read_xls,
'xlsx': read_xls,
'mdb': read_mdb,
'json': read_json,
'geojson': read_geojson,
'geojson.json': read_geojson,
'sqlite': read_sqlite,
'dbf': read_dbf,
'tsv': read_tsv,
'yaml': read_yaml,
'yml': read_yaml,
'html': read_html,
'fixed': read_fixed_fmt,
}
try:
return switch[extension.lstrip('.').lower()]
except IndexError:
msg = 'Reader for extension `{}` not found!'
raise TypeError(msg.format(extension))
def read(filepath, ext=None, **kwargs):
"""Reads any supported file format.
Args:
filepath (str): The file path or file like object.
ext (str): The file extension.
Returns:
Iterable: The parsed records
See also:
`meza.io.get_reader`
`meza.io.join`
Examples:
>>> filepath = p.join(DATA_DIR, 'test.xls')
>>> next(read(filepath, sanitize=True)) == {
... 'some_value': '234.0',
... 'some_date': '1982-05-04',
... 'sparse_data': 'Iñtërnâtiônàližætiøn',
... 'unicode_test': 'Ādam'}
True
>>> filepath = p.join(DATA_DIR, 'test.csv')
>>> next(read(filepath, sanitize=True)) == {
... 'sparse_data': 'Iñtërnâtiônàližætiøn',
... 'some_date': '05/04/82',
... 'some_value': '234',
... 'unicode_test': 'Ādam'}
True
"""
ext = ext or p.splitext(filepath)[1]
return get_reader(ext)(filepath, **kwargs)
def join(*filepaths, **kwargs):
"""Reads multiple filepaths and yields all the resulting records.
Args:
filepaths (iter[str]): Iterator of filepaths or file like objects.
kwargs (dict): keyword args passed to the individual readers.
Kwargs:
ext (str): The file extension.
Yields:
dict: A parsed record
See also:
`meza.io.read`
Examples:
>>> fs = [p.join(DATA_DIR, 'test.xls'), p.join(DATA_DIR, 'test.csv')]
>>> next(join(*fs, sanitize=True)) == {
... 'some_value': '234.0',
... 'some_date': '1982-05-04',
... 'sparse_data': 'Iñtërnâtiônàližætiøn',
... 'unicode_test': 'Ādam'}
True
"""
reader = partial(read, **kwargs)
return it.chain.from_iterable(map(reader, filepaths))
|
reubano/tabutils
|
meza/io.py
|
Python
|
mit
| 52,280
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-29 22:29
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('almoxarifado', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='equipamento',
name='fabricante',
field=models.CharField(default=None, max_length=90),
),
migrations.AlterField(
model_name='equipamento',
name='ativo_imobilizado',
field=models.PositiveIntegerField(default=None),
),
migrations.AlterField(
model_name='equipamento',
name='data_entrega',
field=models.DateTimeField(default=None),
),
migrations.AlterField(
model_name='equipamento',
name='data_retirada',
field=models.DateTimeField(default=None),
),
migrations.AlterField(
model_name='equipamento',
name='localizacao',
field=models.CharField(default=None, max_length=150),
),
migrations.AlterField(
model_name='equipamento',
name='observacoes',
field=models.TextField(default=None),
),
migrations.AlterField(
model_name='equipamento',
name='serial_number',
field=models.CharField(default=None, max_length=30),
),
migrations.AlterField(
model_name='equipamento',
name='status',
field=models.CharField(default=None, max_length=12),
),
]
|
rvmoura96/projeto-almoxarifado
|
almoxarifado/migrations/0002_auto_20170929_1929.py
|
Python
|
mit
| 1,726
|
from numpy import *
from cmlib import showMatr
A = matrix([[1, 2, 0],
[0, 2, 2]])
B = matrix([[3, -1],
[-1, 3],
[1, 0]])
res = (A * B).T
showMatr(array(res))
|
FeodorM/amm_code
|
cm/lab_3/2_.py
|
Python
|
mit
| 205
|
import os
from setuptools import setup, find_packages
from pip.req import parse_requirements
#REQUIREMENTS_FILE = os.path.join( os.path.dirname(__file__), 'requirements.openshift.txt')
PROJECT_NAME = '<your-project-name>'
AUTHOR_NAME = '<your-name>'
AUTHOR_EMAIL = '<your-email-address>'
PROJECT_URL = ''
DESCRIPTION = '<your-project-description>'
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
setup(name=PROJECT_NAME,
version='1.0',
author=AUTHOR_NAME,
author_email=AUTHOR_EMAIL,
url=PROJECT_URL,
packages=find_packages(),
include_package_data=True,
description=DESCRIPTION,
)
|
appsembler/symposion-openshift-quickstart
|
setup.py
|
Python
|
mit
| 618
|
from distutils.core import setup
setup(
name='stompy',
version='0.1',
packages=['stompy', 'stompy.grid', 'stompy.io', 'stompy.io.local',
'stompy.model', 'stompy.model.delft', 'stompy.model.fvcom',
'stompy.model.pypart', 'stompy.model.suntans',
'stompy.plot', 'stompy.plot.cmaps',
'stompy.spatial'],
package_data={'stompy':['tide_consts.txt']},
license='MIT',
url="https://github.com/rustychris/stompy",
author="Rusty Holleman",
author_email="rustychris@gmail.com",
long_description=open('README.md').read(),
)
|
rustychris/stompy
|
setup.py
|
Python
|
mit
| 603
|
"""
Copyright (C) 2014 Maruf Maniruzzaman
Website: http://cosmosframework.com
Author: Maruf Maniruzzaman
License :: OSI Approved :: MIT License
"""
|
kuasha/cosmos
|
cosmos/schema/object.py
|
Python
|
mit
| 154
|
from django.contrib import admin
from holidays.models import (Holiday, StaticHoliday,
NthXDayHoliday, NthXDayAfterHoliday, CustomHoliday)
class HolidayAdmin(admin.ModelAdmin):
pass
class StaticHolidayAdmin(admin.ModelAdmin):
pass
class NthXDayHolidayAdmin(admin.ModelAdmin):
pass
class NthXDayAfterHolidayAdmin(admin.ModelAdmin):
pass
class CustomHolidayAdmin(admin.ModelAdmin):
pass
admin.site.register(Holiday, HolidayAdmin)
admin.site.register(StaticHoliday, StaticHolidayAdmin)
admin.site.register(NthXDayHoliday, NthXDayHolidayAdmin)
admin.site.register(NthXDayAfterHoliday, NthXDayAfterHolidayAdmin)
admin.site.register(CustomHoliday, CustomHolidayAdmin)
|
dannybrowne86/django-holidays
|
holidays/holidays/admin.py
|
Python
|
mit
| 720
|
"""
gof.py
gof stands for Graph Optimization Framework
The gof submodule of theano implements a framework
for manipulating programs described as graphs. The
gof module defines basic theano graph concepts:
-Apply nodes, which represent the application
of an Op to Variables. Together these make up a
graph.
-The Type, needed for Variables to make sense
-The FunctionGraph, which defines how a subgraph
should be interpreted to implement a function
-The Thunk, a callable object that becames part
of the executable emitted by theano
-Linkers/VMs, the objects that call Thunks in
sequence in order to execute a theano program
Conceptually, gof is intended to be sufficiently abstract
that it could be used to implement a language other than
theano. ie, theano is a domain-specific language for
numerical computation, created by implementing
tensor Variables and Ops that perform mathematical functions.
A different kind of domain-specific language could be
made by using gof with different Variables and Ops.
In practice, gof and the rest of theano are somewhat more
tightly intertwined.
Currently, gof also contains much of the C compilation
functionality. Ideally this should be refactored into
a different submodule.
For more details and discussion, see the theano-dev
e-mail thread "What is gof?"
"""
from theano.gof.cc import \
CLinker, OpWiseCLinker, DualLinker, HideC
# Also adds config vars
from theano.gof.compiledir import \
local_bitwidth, python_int_bitwidth
from theano.gof.fg import \
CachedConstantError, InconsistencyError, MissingInputError, FunctionGraph
from theano.gof.destroyhandler import \
DestroyHandler
from theano.gof.graph import \
Apply, Variable, Constant, view_roots
from theano.gof.link import \
Container, Linker, LocalLinker, PerformLinker, WrapLinker, WrapLinkerMany
from theano.gof.op import \
Op, OpenMPOp, PureOp, COp, ops_with_inner_function
from theano.gof.opt import (
Optimizer,
optimizer, inplace_optimizer,
SeqOptimizer,
MergeOptimizer,
LocalOptimizer, local_optimizer, LocalOptGroup,
OpSub, OpRemove, PatternSub,
NavigatorOptimizer, TopoOptimizer, EquilibriumOptimizer,
OpKeyOptimizer)
from theano.gof.optdb import \
DB, Query, \
EquilibriumDB, SequenceDB, ProxyDB
from theano.gof.toolbox import \
Feature, \
Bookkeeper, History, Validator, ReplaceValidate, NodeFinder,\
PrintListener, ReplacementDidntRemovedError, NoOutputFromInplace
from theano.gof.type import \
Type, Generic, generic
from theano.gof.utils import \
hashtype, object2, MethodNotDefined
import theano
if theano.config.cmodule.preload_cache:
cc.get_module_cache()
|
nke001/attention-lvcsr
|
libs/Theano/theano/gof/__init__.py
|
Python
|
mit
| 2,704
|
def validate(hand):
if hand < 0 or hand > 2:
return False
return True
def print_hand(hand, name='ゲスト'):
hands = ['グー', 'チョキ', 'パー']
print(name + 'は' + hands[hand] + 'を出しました')
def judge(player, computer):
if player == computer:
return '引き分け'
elif player == 0 and computer == 1:
return '勝ち'
elif player == 1 and computer == 2:
return '勝ち'
elif player == 2 and computer == 0:
return '勝ち'
else:
return '負け'
|
hellomichiyabu/Practice
|
Python/じゃんけんゲーム/utils.py
|
Python
|
mit
| 567
|
# -*- coding: ISO-8859-1 -*-
"""
Form Widget classes specific to the geoSite admin site.
"""
# A class that corresponds to an HTML form widget,
# e.g. <input type="text"> or <textarea>.
# This handles rendering of the widget as HTML.
import json
from django.template.loader import render_to_string
from .conf import settings
from django.utils import six
from django import forms
from django.forms import widgets, MultiWidget, Media
from django.utils.html import conditional_escape, format_html, format_html_join
from django.forms.util import flatatt, to_current_timezone
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from django.utils.safestring import mark_safe
from django.templatetags.static import static
from . import LatLng
# classe widget utilizzata dal campo forms.geoFields LatLngField
class LatLngTextInputWidget(forms.MultiWidget):
def __init__(self, attrs=None):
widgets = (
forms.TextInput(),
forms.TextInput(),
)
super(LatLngTextInputWidget, self).__init__(widgets, attrs)
def decompress(self, value):
if isinstance(value, six.text_type):
return value.rsplit(',')
if value:
return [value.lat, value.lng]
return [None,None]
def format_output(self, rendered_widgets):
return render_to_string('geopositionmap/widgets/geopositionmap.html', {
'latitude': {
'html': rendered_widgets[0],
'label': _("latitude"),
},
'longitude': {
'html': rendered_widgets[1],
'label': _("longitude"),
},
'config': {
'map_widget_height': settings.GEOPOSITIONMAP_MAP_WIDGET_HEIGHT,
'map_options': json.dumps(settings.GEOPOSITIONMAP_MAP_OPTIONS),
'marker_options': json.dumps(settings.GEOPOSITIONMAP_MARKER_OPTIONS),
'google_view': json.dumps(settings.GEOPOSITIONMAP_GOOGLE_VIEW),
'osm_view': json.dumps(settings.GEOPOSITIONMAP_OSM_VIEW),
}
})
class Media:
#extend = False
css = {
'all': (
'geopositionmap/geopositionmap.css',
'//cdn.leafletjs.com/leaflet-0.7.3/leaflet.css',
)
}
js = (
'//maps.google.com/maps/api/js?sensor=false',
'//cdn.leafletjs.com/leaflet-0.7.3/leaflet.js',
'geopositionmap/geopositionmap.js',
)
|
ivandm/django-geopositionmap
|
geopositionmap/geoWidgets.py
|
Python
|
mit
| 2,620
|
#!/usr/bin/env python3
#!/usr/bin/python
# https://en.wikipedia.org/wiki/Matplotlib
import numpy
import matplotlib.pyplot as plt
from numpy.random import rand
a = rand(100)
b = rand(100)
plt.scatter(a, b)
plt.show()
|
jtraver/dev
|
python3/matplotlib/plot1.py
|
Python
|
mit
| 220
|
#!/bin/python
import sys
import vlc
import os
import re
from tempfile import *
from gtts import gTTS
from remote2text import RGBRemote2Text
parser = RGBRemote2Text(verbose=True)
while True:
ir_out = input()
response = parser.process(ir_out)
if response:
tts = gTTS(text=response, lang='pt')
tmp = NamedTemporaryFile(delete=False)
tts.write_to_fp(tmp)
path = os.path.join(gettempdir(), str(tmp.name))
vlc.MediaPlayer(path).play()
tmp.close()
|
Macmod/rgb-remote-tts
|
remote-gtts/remote2gtts.py
|
Python
|
mit
| 507
|
from .apps.page import views as page
assert page
|
andreif/heroku_django
|
project/views.py
|
Python
|
mit
| 49
|
#!/usr/bin/python
simulation = "L500_NR_tracers"
dirs = {"sim_root_dir" : "/home/fas/nagai/kln26/group_scratch/L500_NR_tracers",
"db_dir" : "..",
"halo_catalog_dir" : "HC.500",
"profiles_dir" : "profiles",
"logs_dir" : "logs" }
hc_radius = "500c"
halo_list_radii = ["200m", "500c", "200c", "vir"]
h_inverse = {"in_profiles": True,
"in_halo_catalog" : True,
"into_db" : True,
"h" : 0.7 }
initial_migration = True
set_main_halos = True
cluster_ids_file = "cluster_ids.dat"
do_write_halos = True
do_write_profiles = True
enabled_hydro = True
enabled_star_formation = False
enabled_epnoneq = False
|
cavestruz/L500analysis
|
caps/migration/config.py
|
Python
|
mit
| 663
|
# Django settings for flexy project.
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django_mongodb_engine',
'NAME': 'sheesh',
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = u'54c3c2ebf0d6142f25b84dce'
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# MEDIA CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# END MEDIA CONFIGURATION
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '9j(dx#&1&_s5^a71r4%+ct64(22rv6sm@ly07%1fwu4ta##&q)'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.messages.context_processors.messages',
'django.contrib.auth.context_processors.auth',
)
ROOT_URLCONF = 'flexy.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'flexy.wsgi.application'
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates')
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
'bootstrap3',
'app',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
sheeshmohsin/venturesity
|
flexy/flexy/settings.py
|
Python
|
mit
| 5,241
|
import gdsfactory as gf
import gdsfactory.simulation.gtidy3d as gt
from gdsfactory.config import CONFIG
from gdsfactory.simulation.gtidy3d.get_results import get_results
# def test_results_run(data_regression) -> None:
# """Run simulations and checks local results."""
# component = gf.components.straight(length=3)
# sim = gt.get_simulation(component=component, is_3d=False)
# dirpath = CONFIG["sparameters"]
# r = get_results(sim=sim, dirpath=dirpath, overwrite=True).result()
# if data_regression:
# data_regression.check(r.monitor_data)
if __name__ == "__main__":
# test_results_run(None)
component = gf.components.straight(length=3)
sim = gt.get_simulation(component=component, is_3d=False)
dirpath = CONFIG["sparameters"]
r = get_results(sim=sim, dirpath=dirpath, overwrite=True).result()
|
gdsfactory/gdsfactory
|
gdsfactory/simulation/gtidy3d/tests/test_results.py
|
Python
|
mit
| 855
|
#!/usr/bin/env python3
#
#
# copyright Kevin Walchko
#
# Basically a rostopic
from __future__ import print_function
import argparse
import time
# from pygecko import TopicSub
from pygecko.transport import zmqTCP, GeckoCore
from pygecko.multiprocessing import GeckoPy
from pygecko.test import GeckoSimpleProcess
# from pygecko.transport.zmqclass import
# def publisher(**kwargs):
# geckopy = GeckoPy()
#
# p = geckopy.Publisher()
#
# hertz = kwargs.get('rate', 10)
# rate = geckopy.Rate(hertz)
#
# topic = kwargs.get('topic')
# msg = kwargs.get('msg')
#
# cnt = 0
# start = time.time()
# while not geckopy.is_shutdown():
# p.pub(topic, msg) # topic msg
# if cnt % hertz == 0:
# print(">> {}[{:.1f}]: published {} msgs".format(topic, time.time()-start, hertz))
# cnt += 1
# rate.sleep()
def subscriber(**kwargs):
geckopy = GeckoPy(**kwargs)
def f(topic, msg):
print(">> {}: {}".format(topic, msg))
topic = kwargs.get('topic')
s = geckopy.Subscriber([topic], f)
geckopy.spin()
if __name__ == '__main__':
p = GeckoSimpleProcess()
p.start(func=subscriber, name='subscriber', kwargs=args)
# while True:
# try:
# time.sleep(1)
# except KeyboardInterrupt:
# break
#
# # shutdown the processes
# p.join(0.1)
|
walchko/pygecko
|
dev/services/test.py
|
Python
|
mit
| 1,384
|
from shutil import copy
from tkinter import *
from tkinter import filedialog
from tkinter import ttk
from .GetStaticData import *
from .SetStaticData import *
# The following handles everything to do with the gui. (main)
# variables starting with var are default values; when the user changes the value of a ui element,
# these variables auto change with them. It is therefore very easy to get the current state of ui items.
def gen_set():
""" This function is called whenever a save-event is requested. The function retrieves all ui-vars and writes the
data in them to the .prison save file. It also manages the unlimited funds checkbox (it automatically disables the
balance Entry)"""
write_general_to_file(save_file_path, bool(var_misconduct.get()), bool(var_gangs.get()), bool(var_decay.get()),
bool(var_visibility.get()), bool(var_failure_conditions.get()), bool(var_events.get()),
bool(var_unlimited_funds.get()))
write_finance_to_file(save_file_path, var_balance.get(), var_bank_loan.get(), var_credit_rating.get(),
var_ownership.get())
res_set()
if var_unlimited_funds.get():
balance_entry.configure(state=DISABLED)
else:
balance_entry.configure(state=NORMAL)
def res_set(var=0):
global maintainance, security, legal, mental_health, finance, cctv, remote_access, health, cleaning, grounds_keeping, \
clone, deployment, patrols, dogs, prison_labour, education, land_expansion, contraband, policy, armoury, \
body_armour, tazers, tazers_for_everyone, bank_loans, lower_taxes_1, lower_taxes_2, extra_grant, \
advanced_management, death_row, permanent_punishment, remove_min_cell_size, reduce_execution_liability, \
legal_prep, legal_defense
selected = combo_var.get()
fl = var_research_scale.get()
if 'Maintainance' in selected:
maintainance = fl
elif 'Security' in selected:
security = fl
elif 'Legal' in selected and 'Prep' not in selected and 'Defense' not in selected:
legal = fl
elif 'Mental Health' in selected:
mental_health = fl
elif 'Finance' in selected:
finance = fl
elif 'Cctv' in selected:
cctv = fl
elif 'Remote Access' in selected:
remote_access = fl
elif 'Health' in selected:
health = fl
elif 'Cleaning' in selected:
cleaning = fl
elif 'Grounds Keeping' in selected:
grounds_keeping = fl
elif 'Clone' in selected:
clone = fl
elif 'Deployment' in selected:
deployment = fl
elif 'Patrols' in selected:
patrols = fl
elif 'Dogs' in selected:
dogs = fl
elif 'Prison Labour' in selected:
prison_labour = fl
elif 'Education' in selected:
education = fl
elif 'Land Expansion' in selected:
land_expansion = fl
elif 'Contraband' in selected:
contraband = fl
elif 'Policy' in selected:
policy = fl
elif 'Armoury' in selected:
armoury = fl
elif 'BodyArmour' in selected:
body_armour = fl
elif 'Tazers' in selected and 'For Everyone' not in selected:
tazers = fl
elif 'Tazers For Everyone' in selected:
tazers_for_everyone = fl
elif 'Bank Loans' in selected:
bank_loans = fl
elif 'Lower Taxes1' in selected:
lower_taxes_1 = fl
elif 'Lower Taxes2' in selected:
lower_taxes_2 = fl
elif 'Extra Grant' in selected:
extra_grant = fl
elif 'Advanced Management' in selected:
advanced_management = fl
elif 'Deathrow' in selected:
death_row = fl
elif 'Permanent Punishment' in selected:
permanent_punishment = fl
elif 'Remove Min Cell Size' in selected:
remove_min_cell_size = fl
elif 'Reduce Execution Liability' in selected:
reduce_execution_liability = fl
elif 'Legal Prep' in selected:
legal_prep = fl
elif 'Legal Defense' in selected:
legal_defense = fl
write_research_to_file(save_file_path, maintainance, security, legal, mental_health, finance, cctv, remote_access,
health, cleaning, grounds_keeping, clone, deployment, patrols, dogs, prison_labour,
education, land_expansion, contraband, policy, armoury, body_armour, tazers,
tazers_for_everyone, bank_loans, lower_taxes_1, lower_taxes_2, extra_grant,
advanced_management, death_row, permanent_punishment, remove_min_cell_size,
reduce_execution_liability, legal_prep, legal_defense)
def validate_and_save():
""" This function takes the values in the Entry boxes and checks if it is a valid float. It also organises feedback
to the user."""
def isnum(y):
ch = '0123456789.-'
for x in y:
if x not in ch:
return False
return True
a = var_balance.get()
b = var_bank_loan.get()
c = var_credit_rating.get()
d = var_ownership.get()
if isnum(a) and isnum(b) and isnum(c) and isnum(d):
try:
float(a)
float(b)
float(c)
if 100 >= float(d) >= 0:
gen_set()
save_result.configure(background='#66ff66')
var_save_result.set('The data was valid')
else:
save_result.configure(background='#ff6666')
var_save_result.set('% <= 100% ... :|')
except ValueError:
save_result.configure(background='#ff6666')
var_save_result.set('Numbers only! (0123456789.-)')
else:
save_result.configure(background='#ff6666')
var_save_result.set('Numbers only! (0123456789.-)')
def set_default_data(file_path):
""" :param file_path: the file to write to\n
This function retrieves predetermined data from a given file, and then asigns the values to the global ui-vars"""
misconduct, gangs, decay, visibility, failure_conditions, events = get_general_settings(file_path)
var_misconduct.set(misconduct)
var_gangs.set(gangs)
var_decay.set(decay)
var_visibility.set(visibility)
var_failure_conditions.set(failure_conditions)
var_events.set(events)
balance, bank_loan, credit_rating, ownership, unlimited_funds = get_finance_settings(file_path)
var_balance.set(balance)
var_bank_loan.set(bank_loan)
var_credit_rating.set(credit_rating)
var_ownership.set(ownership)
var_unlimited_funds.set(unlimited_funds)
global maintainance, security, legal, mental_health, finance, cctv, remote_access, health, cleaning, grounds_keeping, \
clone, deployment, patrols, dogs, prison_labour, education, land_expansion, contraband, policy, armoury, \
body_armour, tazers, tazers_for_everyone, bank_loans, lower_taxes_1, lower_taxes_2, extra_grant, \
advanced_management, death_row, permanent_punishment, remove_min_cell_size, reduce_execution_liability, \
legal_prep, legal_defense
maintainance, security, legal, mental_health, finance, cctv, remote_access, health, cleaning, grounds_keeping, \
clone, deployment, patrols, dogs, prison_labour, education, land_expansion, contraband, policy, armoury, \
body_armour, tazers, tazers_for_everyone, bank_loans, lower_taxes_1, lower_taxes_2, extra_grant, \
advanced_management, death_row, permanent_punishment, remove_min_cell_size, reduce_execution_liability, \
legal_prep, legal_defense = get_research_settings(file_path)
def select_files():
""" Get file to read from and file to save to
This function first asks the user for two files, and then check's whether the file is valid. If the user cancels
the file-selection proces, () is returned: that is why the check asks for 3 or more characters.
The program then makes a copy of the given source-file, and keeps the copy as changable output dump
At set_default_data, the function fills in the blancs in the form using data from the given source-file."""
open_file_path = filedialog.askopenfilename()
global save_file_path
save_file_path = filedialog.asksaveasfilename()
if len(open_file_path) < 3 or len(save_file_path) < 3:
raise OSError(10, 'There was a problem while selecting files (FILENAME TOO SHORT)')
elif not open_file_path.endswith('.prison'):
raise OSError(11, 'There was a problem while selecting files (FILE 1 WAS NOT A .prison FILE!)')
elif not save_file_path.endswith('.prison'):
raise OSError(12, 'There was a problem while selecting files (FILE 2 WAS NOT A .prison FILE!)')
if open_file_path != save_file_path: # copy throws an error when this is the case
# copy
copy(open_file_path, save_file_path)
# read data from file:
set_default_data(save_file_path)
# show tabs:
mainframe.add(general_page, text='General/finance')
# Disable button:
select_files_button.configure(state=DISABLED)
def combo_update(var=0):
selected = combo_var.get()
if 'Maintainance' in selected:
var_research_scale.set(maintainance)
elif 'Security' in selected:
var_research_scale.set(security)
elif 'Legal' in selected and 'Prep' not in selected and 'Defense' not in selected:
var_research_scale.set(legal)
elif 'Mental Health' in selected:
var_research_scale.set(mental_health)
elif 'Finance' in selected:
var_research_scale.set(finance)
elif 'Cctv' in selected:
var_research_scale.set(cctv)
elif 'Remote Access' in selected:
var_research_scale.set(remote_access)
elif 'Health' in selected:
var_research_scale.set(health)
elif 'Cleaning' in selected:
var_research_scale.set(cleaning)
elif 'Grounds Keeping' in selected:
var_research_scale.set(grounds_keeping)
elif 'Clone' in selected:
var_research_scale.set(clone)
elif 'Deployment' in selected:
var_research_scale.set(deployment)
elif 'Patrols' in selected:
var_research_scale.set(patrols)
elif 'Dogs' in selected:
var_research_scale.set(dogs)
elif 'Prison Labour' in selected:
var_research_scale.set(prison_labour)
elif 'Education' in selected:
var_research_scale.set(education)
elif 'Land Expansion' in selected:
var_research_scale.set(land_expansion)
elif 'Contraband' in selected:
var_research_scale.set(contraband)
elif 'Policy' in selected:
var_research_scale.set(policy)
elif 'Armoury' in selected:
var_research_scale.set(armoury)
elif 'BodyArmour' in selected:
var_research_scale.set(body_armour)
elif 'Tazers' in selected and 'For Everyone' not in selected:
var_research_scale.set(tazers)
elif 'Tazers For Everyone' in selected:
var_research_scale.set(tazers_for_everyone)
elif 'Bank Loans' in selected:
var_research_scale.set(bank_loans)
elif 'Lower Taxes1' in selected:
var_research_scale.set(lower_taxes_1)
elif 'Lower Taxes2' in selected:
var_research_scale.set(lower_taxes_2)
elif 'Extra Grant' in selected:
var_research_scale.set(extra_grant)
elif 'Advanced Management' in selected:
var_research_scale.set(advanced_management)
elif 'Deathrow' in selected:
var_research_scale.set(death_row)
elif 'Permanent Punishment' in selected:
var_research_scale.set(permanent_punishment)
elif 'Remove Min Cell Size' in selected:
var_research_scale.set(remove_min_cell_size)
elif 'Reduce Execution Liability' in selected:
var_research_scale.set(reduce_execution_liability)
elif 'Legal Prep' in selected:
var_research_scale.set(legal_prep)
elif 'Legal Defense' in selected:
var_research_scale.set(legal_defense)
#############
# Main Code #
#############
root = Tk()
root.title("Prison Architect -- sirNoolas -- Save editor")
mainframe = ttk.Notebook(root)
# adding Frames as pages for the ttk.Notebook
# first page: for explanation about program
doc_page = ttk.Frame(mainframe, padding="3 12")
ttk.Label(doc_page, text='This program can be used to edit prison architect save files.'
'\nWhen you edit a value (except when you type it), it is '
'\nautomatically saved to the save-folder which you chose when '
'\nstarting this program (when in doubt: press save anyway)\n'
'\nThis program was written by: sirNoolas'
'\nThe program was inspired by a similar program by fragmer, '
'\nwhich was originally written in C#. It can be found here: '
'\nhttp://forums.introversion.co.uk/viewtopic.php?f=42&t=50603&'
'\nsid=51d9d4228a7b0fae9b1f391c23dd054f\n'
'\nPlease always choose a new file as save-location; '
'\nbecause this program is not 100% save, and might corrupt your '
'\nsave-file.\n'
'\nPress the button to select files to read from and write to.'
'\nIn the dialog that opens first select the PA .prison save file'
'\n you want to edit, then select a file to save your changes to.').grid(column=1, row=1,
columnspan=2,
sticky=(W, N, E))
select_files_button = Button(doc_page, text='Select Files', command=select_files)
select_files_button.grid(column=2, row=2, sticky=(W, E, S))
mainframe.add(doc_page, text='About')
# second page, which would get widgets gridded into it
general_page = ttk.Frame(mainframe, padding="3 12")
#####################################################
# these are the checkboxes with the prison settings #
#####################################################
prison_settings_box = ttk.LabelFrame(general_page, text='Prison Settings')
# misconduct
var_misconduct = IntVar()
ttk.Checkbutton(prison_settings_box, text='Misconduct', variable=var_misconduct, command=gen_set).grid(
column=1, row=1, sticky=(W, E))
# gangs
var_gangs = IntVar()
ttk.Checkbutton(prison_settings_box, text='Gangs', variable=var_gangs, command=gen_set).grid(
column=1, row=2, sticky=(W, E))
# decay
var_decay = IntVar()
ttk.Checkbutton(prison_settings_box, text='Decay', variable=var_decay, command=gen_set).grid(
column=1, row=3, sticky=(W, E))
# visibility
var_visibility = IntVar()
ttk.Checkbutton(prison_settings_box, text='Visibility', variable=var_visibility, command=gen_set).grid(
column=1, row=4, sticky=(W, E))
# failure conditions
var_failure_conditions = IntVar()
ttk.Checkbutton(prison_settings_box, text='Failure Conditions', variable=var_failure_conditions,
command=gen_set).grid(column=1, row=5, sticky=(W, E))
# events
var_events = IntVar()
ttk.Checkbutton(prison_settings_box, text='Events', variable=var_events, command=gen_set).grid(
column=1, row=7, sticky=(W, E))
prison_settings_box.grid(column=1, row=1, sticky=W)
######################################################
# these are the checkboxes with the finance settings #
######################################################
finance_box = ttk.LabelFrame(general_page, text='Finance')
# Unlimited Funds
var_unlimited_funds = IntVar()
ttk.Checkbutton(finance_box, text='Unlimited funds', variable=var_unlimited_funds, command=gen_set).grid(
column=1, row=1, columnspan=2, sticky=(W, E))
# Balance
var_balance = StringVar()
ttk.Label(finance_box, text='Balance ($)').grid(column=1, row=2, sticky=W)
balance_entry = Entry(finance_box, textvariable=var_balance)
balance_entry.grid(column=2, row=2, sticky=E)
# Bank Loan
var_bank_loan = StringVar()
ttk.Label(finance_box, text='Bank Loan ($)').grid(column=1, row=3, sticky=W)
Entry(finance_box, textvariable=var_bank_loan).grid(column=2, row=3, sticky=E)
# Credit Rating
var_credit_rating = StringVar()
ttk.Label(finance_box, text='Credit Rating').grid(column=1, row=4, sticky=W)
Entry(finance_box, textvariable=var_credit_rating).grid(column=2, row=4, sticky=E)
# Ownership
var_ownership = StringVar()
ttk.Label(finance_box, text='Ownership (%)').grid(column=1, row=5, sticky=W)
Spinbox(finance_box, from_=50, to=100.0, textvariable=var_ownership).grid(column=2, row=5, sticky=E)
finance_box.grid(column=2, row=1, sticky=E)
################
# Research Box #
################
research_box = ttk.LabelFrame(general_page, text='research')
combo_var = StringVar()
combo_var.set('Select...')
research_selector = ttk.Combobox(research_box, textvariable=combo_var, state='readonly')
research_selector.configure(values=('Maintainance', 'Security', 'Legal', 'Mental Health', 'Finance', 'Cctv',
'Remote Access', 'Health', 'Cleaning', 'Grounds Keeping', 'Clone', 'Deployment',
'Patrols', 'Dogs', 'Prison Labour', 'Education', 'Land Expansion', 'Contraband',
'Policy', 'Armoury', 'Body Armour', 'Tazers', 'Tazers For Everyone', 'Bank Loans',
'Lower Taxes 1', 'Tower Taxes 2', 'Extra Grant', 'Advanced Management', 'Death Row',
'Permanent Punishment', 'Remove Min Cell Size', 'Reduce Execution Liability',
'Legal Prep', 'Legal Defense'))
research_selector.grid(column=1, row=1, sticky=(W, E))
research_selector.bind('<<ComboboxSelected>>', combo_update)
var_research_scale = DoubleVar()
ttk.Label(research_box, text='%', textvariable=var_research_scale, width=4, state='readonly').grid(column=2, row=1, sticky=N)
research_scale = ttk.Scale(research_box, orient=HORIZONTAL, length=200, from_=0.0, to=100.0, variable=var_research_scale
, command=res_set)
research_scale.grid(column=3, row=1, sticky=E)
research_box.grid(column=1, row=2, columnspan=2, sticky=(W, E))
###############
# Save Button #
###############
var_save_result = StringVar()
save_result = ttk.Label(general_page, textvariable=var_save_result)
save_result.grid(column=1, row=3, columnspan=2, sticky=W)
Button(general_page, text='Save', command=validate_and_save).grid(column=2, row=3, sticky=E)
# END of finance
mainframe.pack(expand=1, fill="both")
###############
# final setup #
###############
# init global variables for later use by several functions
save_file_path = None
maintainance = security = legal = mental_health = finance = cctv = remote_access = health = cleaning = \
grounds_keeping = clone = deployment = patrols = dogs = prison_labour = education = land_expansion = contraband = \
policy = armoury = body_armour = tazers = tazers_for_everyone = bank_loans = lower_taxes_1 = lower_taxes_2 = \
extra_grant = advanced_management = death_row = permanent_punishment = remove_min_cell_size = \
reduce_execution_liability = legal_prep = legal_defense = 0
root.mainloop() # starts the gui loop
|
sirNoolas/PA-Save-editor
|
gui.py
|
Python
|
mit
| 19,373
|
import logging
import traceback
import sys
from celery import Celery
from .callbacks import STATUS_LOADING_DATA
from .config import get_engine, _set_connection_string
from .loader import FDPLoader
from .callbacks import do_request, STATUS_INITIALIZING, STATUS_FAIL, STATUS_DONE
app = Celery('fdp_loader')
app.config_from_object('babbage_fiscal.celeryconfig')
root = logging.getLogger()
root.setLevel(logging.INFO)
ch = logging.StreamHandler(sys.stderr)
ch.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
root.addHandler(ch)
class ProgressSender(object):
def __init__(self, callback, package):
self.count = 0
self.callback = callback
self.package = package
self.error = None
def __call__(self, status=STATUS_LOADING_DATA, count=None, data=None, error=None):
if error is not None:
self.error = error
if count is None:
count = self.count
else:
self.count = count
logging.info('CALLBACK: %s %s (%s / %s)',
'/'.join(self.package.split('/')[4:]),
status, count, error)
do_request(self.callback, self.package, status,
progress=count, error=error, data=data)
@app.task
def load_fdp_task(package, callback, connection_string=None):
send_progress = ProgressSender(callback, package)
if connection_string is not None:
_set_connection_string(connection_string)
try:
logging.info("Starting to load %s" % package)
send_progress(status=STATUS_INITIALIZING)
success = FDPLoader(get_engine()).load_fdp_to_db(package, send_progress)
logging.info("Finished to load %s" % package)
except:
exc = traceback.format_exc()
send_progress(status=STATUS_FAIL, error=str(exc))
success = False
print("Failed to load %s: %s" % (package, exc))
if not success:
raise RuntimeError(send_progress.error)
|
openspending/babbage.fiscal-data-package
|
babbage_fiscal/tasks.py
|
Python
|
mit
| 2,046
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-05 13:59
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('marketplace', '0012_auto_20170604_1335'),
]
operations = [
migrations.AlterField(
model_name='package',
name='name',
field=models.CharField(max_length=255, unique=True, validators=[django.core.validators.RegexValidator('^[a-z]*$', 'Only lowercase letters are allowed.')]),
),
]
|
MOOCworkbench/MOOCworkbench
|
marketplace/migrations/0013_auto_20170605_1359.py
|
Python
|
mit
| 601
|
import _plotly_utils.basevalidators
class ThicknessmodeValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name="thicknessmode",
parent_name="densitymapbox.colorbar",
**kwargs
):
super(ThicknessmodeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
role=kwargs.pop("role", "style"),
values=kwargs.pop("values", ["fraction", "pixels"]),
**kwargs
)
|
plotly/python-api
|
packages/python/plotly/plotly/validators/densitymapbox/colorbar/_thicknessmode.py
|
Python
|
mit
| 587
|
#Camera App
#copyright (c) 2015 Tyler Spadgenske
# MIT License
import sys
import pygame
import picamera
import io
import yuv2rgb
import os
import time
from subprocess import Popen
class Stream():
def __init__(self):
self.mode = 'capture'
self.deleted = False
self.uploading = False
self.no_files = False
#Get current photo name index
try:
index_file = open('/home/pi/index.dat', 'r')
except:
#Create new file if needed
index_file = open('/home/pi/index.dat', 'w+')
index_file.write('0')
index_file.close()
index_file = open('/home/pi/index.dat')
print 'NO INDEX FILE. CREATED /home/pi/index.dat'
self.index = int(index_file.readline())
index_file.close()
#Set screen to SPI
os.environ["SDL_FBDEV"] = "/dev/fb1"
os.environ["SDL_MOUSEDEV"] = "/dev/input/touchscreen" #Use touchscreen instead of event0
os.environ["SDL_MOUSEDRV"] = "TSLIB"
#COnfigure camera
self.camera = picamera.PiCamera()
self.camera.resolution = (320, 480)
self.camera.rotation = 90
# Buffers for viewfinder data
self.rgb = bytearray(320 * 480 * 3)
self.yuv = bytearray(320 * 480 * 3 / 2)
#Setup window
self.screen = pygame.display.set_mode((320, 480), pygame.FULLSCREEN)
pygame.mouse.set_visible(False)
#Setup buttons
self.capture = pygame.image.load('/home/pi/tyos/apps/camera/camera.png')
self.gallery = pygame.image.load('/home/pi/tyos/apps/camera/images/gallery.png')
self.door = pygame.image.load('/home/pi/tyos/apps/camera/images/door.png')
self.right = pygame.image.load('/home/pi/tyos/apps/camera/images/right.png')
self.left = pygame.image.load('/home/pi/tyos/apps/camera/images/left.png')
self.home = pygame.image.load('/home/pi/tyos/apps/camera/images/home.png')
self.upload = pygame.image.load('/home/pi/tyos/apps/camera/images/upload.png')
self.delete = pygame.image.load('/home/pi/tyos/apps/camera/images/trash.png')
self.deleted_image = pygame.image.load('/home/pi/tyos/apps/camera/images/deleted.png')
self.uploading_image = pygame.image.load('/home/pi/tyos/apps/camera/images/uploading.png')
self.no_files_image = pygame.image.load('/home/pi/tyos/apps/camera/images/nofiles.png')
def display(self):
while True:
if self.mode == 'gallery':
self.screen.blit(self.image_in_view, (0,0))
self.screen.blit(self.left, (20, 410))
self.screen.blit(self.right, (240, 410))
self.screen.blit(self.home, (125, 400))
self.screen.blit(self.delete, (5, 5))
self.screen.blit(self.upload, (40, 5))
if self.deleted:
self.screen.blit(self.deleted_image, (79, 200))
if time.time() - self.delete_time > 3:
self.deleted = False
if self.uploading:
self.screen.blit(self.uploading_image, (79, 200))
if time.time() - self.uploading_time > 6:
self.uploading = False
if self.mode == 'capture':
#Get camera stream
self.stream = io.BytesIO() # Capture into in-memory stream
self.camera.capture(self.stream, use_video_port=True, format='raw')
self.stream.seek(0)
self.stream.readinto(self.yuv) # stream -> YUV buffer
self.stream.close()
yuv2rgb.convert(self.yuv, self.rgb, 320, 480)
#Create pygame image from screen and blit it
img = pygame.image.frombuffer(self.rgb[0:(320 * 480 * 3)], (320, 480), 'RGB')
self.screen.blit(img, (0,0))
#Blit buttons
self.screen.blit(self.capture, (125, 400))
self.screen.blit(self.gallery, (20, 415))
self.screen.blit(self.door, (240, 410))
if self.no_files:
self.screen.blit(self.no_files_image, (79, 200))
if time.time() - self.files_time > 3:
self.no_files = False
pygame.display.update()
#Handle events
for event in pygame.event.get():
if event.type == pygame.MOUSEBUTTONUP:
if self.mode == 'gallery':
if event.pos[1] < 40 and event.pos[0] < 35:
self.deleted = True
self.delete_time = time.time()
os.remove('/home/pi/Photos/' + self.images[self.current_image])
self.current_image = 0
self.images = os.listdir('/home/pi/Photos/')
if len(self.images) == 0:
self.mode = 'capture'
self.no_files = True
self.files_time = time.time()
if event.pos[1] < 40 and event.pos[0] > 35 and event.pos[0] < 75:
self.uploading = True
self.uploading_time = time.time()
cam = Popen(['/home/pi/Dropbox-Uploader/./dropbox_uploader.sh', 'upload', '/home/pi/Photos/' +
self.images[self.current_image], self.images[self.current_image]])
if event.pos[1] > 400 and event.pos[1] < 470:
if event.pos[0] > 125 and event.pos[0] < 195:
if self.mode == 'capture':
self.camera.capture('/home/pi/Photos/' + str(self.index) + '.jpg')
self.index += 1
if self.mode == 'gallery':
self.mode = 'capture'
if event.pos[0] < 70:
if self.mode == 'capture':
self.mode = 'gallery'
self.current_image = 0
self.images = os.listdir('/home/pi/Photos/')
if len(self.images) == 0:
self.mode = 'capture'
self.no_files = True
self.files_time = time.time()
else:
self.image_in_view = pygame.image.load('/home/pi/Photos/' + self.images[self.current_image])
if self.mode == 'gallery':
self.current_image -= 1
if self.current_image == -1:
self.current_image = len(self.images) - 1
self.image_in_view = pygame.image.load('/home/pi/Photos/' + self.images[self.current_image])
if event.pos[0] > 255:
if self.mode == 'capture':
print 'exiting...'
os.remove('/home/pi/index.dat')
new = open('/home/pi/index.dat', 'w+')
new.write(str(self.index))
new.close()
cam = Popen(['sudo', 'python', '/home/pi/tyos/src/main.py'])
pygame.quit()
sys.exit()
if self.mode == 'gallery':
if self.current_image == len(self.images) - 1:
self.current_image = 0
else:
self.current_image += 1
self.image_in_view = pygame.image.load('/home/pi/Photos/' + self.images[self.current_image])
if __name__ == '__main__':
q = Stream()
q.display()
|
spadgenske/TYOS
|
apps/camera/app.py
|
Python
|
mit
| 8,419
|
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.postgres.search import SearchVector, SearchQuery, SearchRank
from django.core.urlresolvers import reverse
from django.db import models
from github import UnknownObjectException
from social.apps.django_app.default.models import UserSocialAuth
from documents.tasks.wiki_processor import process_wiki
from interface.utils import get_github
from interface.path_processor import PathProcessor
class UserProxy(User):
class Meta:
proxy = True
def get_auth(self):
try:
data = UserSocialAuth.objects.filter(user=self).values_list('extra_data')[0][0]
except:
return None
username = data['login']
password = data['access_token']
return (username, password)
class Repo(models.Model):
user = models.ForeignKey(UserProxy, related_name='repos')
full_name = models.TextField(unique=True)
webhook_id = models.IntegerField(null=True, blank=True)
is_private = models.BooleanField(default=True)
wiki_branch = models.TextField(default='master')
created_at = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ['full_name']
def __str__(self):
return self.full_name
def get_absolute_url(self):
return reverse('repo_detail', kwargs={'full_name': self.full_name})
@property
def clone_url(self):
return 'https://github.com/{}.git'.format(self.full_name)
def delete(self, *args, **kwargs):
self.remove_webhook()
return super(Repo, self).delete(*args, **kwargs)
def remove_webhook(self):
if not settings.DEBUG:
g = get_github(self.user)
grepo = g.get_repo(self.full_name)
try:
hook = grepo.get_hook(self.webhook_id)
hook.delete()
except UnknownObjectException:
pass
self.webhook_id = None
self.save()
def user_is_collaborator(self, user):
if not user.is_authenticated():
return False
if self.user == user or user.is_staff:
return True
g = get_github(user)
grepo = g.get_repo(self.full_name)
guser = g.get_user(user.username)
return grepo.has_in_collaborators(guser)
def add_webhook(self, request):
if settings.DEBUG:
self.webhook_id = 123
else:
g = get_github(self.user)
grepo = g.get_repo(self.full_name)
hook = grepo.create_hook(
'web',
{
'content_type': 'json',
'url': request.build_absolute_uri(reverse('hooksgithub')),
'secret': settings.WEBHOOK_SECRET
},
events=['push'],
active=True
)
self.webhook_id = hook.id
self.save()
@property
def directory(self):
path_processor = PathProcessor(self.full_name, is_directory=True)
return path_processor.repo_disk_path
def enqueue(self, file_change=None):
file_change = file_change or {}
process_wiki.delay(self.id, file_change)
def get_folder_contents(self, path, documents):
folders = []
docs = []
for document in documents:
doc_path = document.path
if path != '/':
doc_path = doc_path.replace(path, '')
if not doc_path.startswith('/'):
doc_path = '/{}'.format(doc_path)
if doc_path == '/':
docs.append(document.filename)
else:
first_seg = doc_path.split('/', maxsplit=2)[1]
if first_seg:
folder_name = '{}/'.format(first_seg)
if folder_name not in folders:
folders.append(folder_name)
folders = sorted(folders)
docs = sorted(docs)
folders.extend(docs)
return folders
|
ZeroCater/Eyrie
|
interface/models.py
|
Python
|
mit
| 4,056
|
# Explore some possibilities for optimizing the grid.
from __future__ import print_function
from . import (paver,trigrid,orthomaker)
from ..spatial import field
import sys
import numpy as np # from numpy import *
from scipy.linalg import norm
# from pylab import *
import matplotlib.pyplot as plt
class OptimizeGui(object):
def __init__(self,og):
self.og = og
self.p = og.p
def run(self):
self.usage()
self.fig = plt.figure()
self.p.plot(boundary=True)
plt.axis('equal')
self.cid = self.fig.canvas.mpl_connect('key_press_event', self.onpress)
self.cid_mouse = self.fig.canvas.mpl_connect('button_release_event',self.on_buttonrelease)
def usage(self):
print("t: flip edge")
print("y: relax neighborhood")
print("u: regrid at point")
print("i: relax node")
print("p: show bad cells")
def flip_edge_at_point(self,pnt):
"""assumes an og instance is visible """
e = self.p.closest_edge(pnt)
self.p.flip_edge(e)
self.p.plot()
def optimize_at_point(self,pnt):
c = self.p.closest_cell(pnt)
nbr = self.og.cell_neighborhood(c,2)
self.og.relax_neighborhood(nbr)
self.p.plot()
last_node_relaxed = None
def relax_node_at_point(self,pnt):
v = self.p.closest_point(pnt)
if v == self.last_node_relaxed:
print("Allowing beta")
self.p.safe_relax_one(v,use_beta=1)
else:
print("No beta")
self.p.safe_relax_one(v)
self.p.plot()
self.last_node_relaxed = v
def regrid_at_point(self,pnt):
c = self.p.closest_cell(pnt)
self.og.repave_neighborhood(c,scale_factor=0.8)
self.p.plot()
def show_bad_cells(self):
bad = np.nonzero( self.og.cell_scores() <0.1 )[0]
vc = self.p.vcenters()
ax = plt.axis()
plt.gca().texts = []
[plt.annotate(str(i),vc[i]) for i in bad]
plt.axis(ax)
def onpress(self,event):
if event.inaxes is not None:
print(event.xdata,event.ydata,event.key)
pnt = np.array( [event.xdata,event.ydata] )
if event.key == 't':
self.flip_edge_at_point(pnt)
print("Returned from calcs")
elif event.key == 'y':
self.optimize_at_point(pnt)
print("Returned from calcs")
elif event.key == 'u':
self.regrid_at_point(pnt)
elif event.key == 'i':
self.relax_node_at_point(pnt)
elif event.key == 'p':
self.show_bad_cells()
return True
last_axis = None
def on_buttonrelease(self,event):
if event.inaxes is not None:
if plt.axis() != self.last_axis:
print("Viewport has changed")
self.last_axis = plt.axis()
self.p.default_clip = self.last_axis
# simple resolution-dependent plotting:
if self.last_axis[1] - self.last_axis[0] > 3000:
self.p.plot(boundary=True)
else:
self.p.plot(boundary=False)
else:
print("button release but axis is the same")
class GridOptimizer(object):
def __init__(self,p):
self.p = p
self.original_density = p.density
# These are the values that, as needed, are used to construct a reduced scale
# apollonius field. since right now ApolloniusField doesn't support insertion
# and updates, we keep it as an array and recreate the field on demand.
valid = np.isfinite(p.points[:,0])
xy_min = p.points[valid].min(axis=0)
xy_max = p.points[valid].max(axis=0)
self.scale_reductions = np.array( [ [xy_min[0],xy_min[1],1e6],
[xy_min[0],xy_max[1],1e6],
[xy_max[0],xy_max[1],1e6],
[xy_max[0],xy_min[1],1e6]] )
apollo_rate = 1.1
def update_apollonius_field(self):
""" create an apollonius graph using the points/scales in self.scale_reductions,
and install it in self.p.
"""
if len(self.scale_reductions) == 0:
self.apollo = None
return
self.apollo = field.ApolloniusField(self.scale_reductions[:,:2],
self.scale_reductions[:,2],
r=self.apollo_rate)
self.p.density = field.BinopField( self.original_density,
minimum,
self.apollo )
# Explore a cost function based on voronoi-edge distance
def cell_scores(self,cell_ids=None,use_original_density=True):
""" Return scores for each cell, based on the minimum distance from the
voronoi center to an edge, normalized by local scale
invalid cells (i.e. have been deleted) get inf score
use_original_density: defaults to evaluating local scale using the
original density field. If set to false, use the current density
field of the paver (which may be a reduced Apollonius Graph field)
"""
p=self.p
if cell_ids is None:
cell_ids = np.arange(p.Ncells())
valid = (p.cells[cell_ids,0]>=0)
vc = p.vcenters()[cell_ids]
local_scale = np.zeros( len(cell_ids), np.float64)
if use_original_density:
local_scale[valid] = self.original_density( vc[valid,:] )
else:
local_scale[valid] = p.density( vc[valid,:] )
local_scale[~valid] = 1.0 # dummy
#
cell_scores = np.inf*np.ones( len(cell_ids) )
# 3 edge centers for every cell
ec1 = 0.5*(p.points[p.cells[cell_ids,0]] + p.points[p.cells[cell_ids,1]])
ec2 = 0.5*(p.points[p.cells[cell_ids,1]] + p.points[p.cells[cell_ids,2]])
ec3 = 0.5*(p.points[p.cells[cell_ids,2]] + p.points[p.cells[cell_ids,0]])
d1 = ((vc - ec1)**2).sum(axis=1)
d2 = ((vc - ec2)**2).sum(axis=1)
d3 = ((vc - ec3)**2).sum(axis=1)
# could be smarter and ignore boundary edges.. later.
# this also has the downside that as we refine the scales, the scores
# get worse. Maybe it would be better to compare the minimum ec value to
# the mean or maximum, say (max(ec) - min(ec)) / med(ec)
scores = np.sqrt(np.minimum(d1,d2,d3)) / local_scale
scores[~valid] = np.inf
return scores
def relax_neighborhood(self,nodes):
""" starting from the given set of nodes, relax in the area until the neighborhood score
stops going down.
"""
cells = set()
for n in nodes:
cells = cells.union( self.p.pnt2cells(n) )
cells = np.array(list(cells))
starting_worst = self.cell_scores(cells).min()
worst = starting_worst
while 1:
cp = self.p.checkpoint()
for n in nodes:
self.p.safe_relax_one(n)
new_worst = self.cell_scores(cells).min()
sys.stdout.write('.') ; sys.stdout.flush()
if new_worst < worst:
#print "That made it even worse."
self.p.revert(cp)
new_worst = worst
break
if new_worst < 1.01*worst:
#print "Not getting any better. ===> %g"%new_worst
break
worst = new_worst
print("Relax: %g => %g"%(starting_worst,new_worst))
self.p.commit()
def cell_neighborhood_apollo(self,c):
""" return the nodes near the given cell
the apollo version means the termination condition is based on the
expected radius of influence of a reduced scale at c
"""
vcs = self.p.vcenters()
c_vc = self.p.vcenters()[c]
orig_scale = self.original_density(c_vc)
apollo_scale = self.apollo(c_vc)
r = (orig_scale - apollo_scale) / (self.apollo.r - 1)
print("Will clear out a radius of %f"%r)
c_set = set()
def dfs(cc):
if cc in c_set:
return
if np.linalg.norm(vcs[cc] - c_vc) > r:
return
c_set.add(cc)
for child in self.p.cell_neighbors(cc):
dfs(child)
dfs(c)
cell_list = np.array(list(c_set))
return np.unique( self.p.cells[cell_list,:] )
def cell_neighborhood(self,c,nbr_count=2):
""" return the nodes near the given cell
if use_apollo is true, the termination condition is based on where
the apollonius scale is larger than the original scale
"""
c_set = set()
def dfs(cc,i):
if cc in c_set:
return
c_set.add(cc)
if i > 0:
for child in self.p.cell_neighbors(cc):
dfs(child,i-1)
dfs(c,nbr_count)
cell_list = np.array(list(c_set))
return np.unique( self.p.cells[cell_list,:] )
def relax_neighborhoods(self,score_threshold=0.1,count_threshold=5000,
neighborhood_size=2):
""" find the worst scores and try just relaxing in the general vicinity
"""
all_scores = self.cell_scores()
ranking = np.argsort(all_scores)
count = 0
while 1:
c = ranking[count]
score = all_scores[c]
if score > score_threshold:
break
nbr = self.cell_neighborhood(c,neighborhood_size)
self.relax_neighborhood(nbr)
count += 1
if count >= count_threshold:
break
# if set, then scale reductions will be handled through an Apollonius Graph
# otherwise, the density field is temporarily scaled down everywhere.
use_apollo = False
def repave_neighborhoods(self,score_threshold=0.1,count_threshold=5000,
neighborhood_size=3,
scale_factor=None):
""" find the worst scores and try just repaving the general vicinity
see repave_neighborhood for use of scale_factor.
if use_apollo is true, neighborhood size is ignored and instead the
neighborhood is defined by the telescoping ratio
"""
all_scores = self.cell_scores()
ranking = np.argsort(all_scores)
## neighborhoods may overlap - and cells might get deleted. Keep a
# record of cells that get deleted, and skip them later on.
# this could get replaced by a priority queue, and we would just update
# metrics as we go.
expired_cells = {}
def expire_cell(dc):
expired_cells[dc] = 1
cb_id = self.p.listen('delete_cell',expire_cell)
if self.use_apollo and scale_factor is not None and scale_factor < 1.0:
# Add reduction points for all cells currently over the limit
to_reduce = np.nonzero(all_scores<score_threshold)[0]
centers = self.p.vcenters()[to_reduce]
orig_scales = self.original_density( centers )
new_scales = scale_factor * orig_scales
xyz = np.concatenate( [centers,new_scales[:,newaxis]], axis=1)
self.scale_reductions = np.concatenate( [self.scale_reductions,xyz])
print( "Installing new Apollonius Field...")
self.update_apollonius_field()
print( "... Done")
count = 0
while 1:
c = ranking[count]
print("Considering cell %d"%c)
count += 1
# cell may have been deleted during other repaving
if self.p.cells[c,0] < 0:
print("It's been deleted")
continue
if expired_cells.has_key(c):
print("It had been deleted, and some other cell has taken its place")
continue
# cell may have been updated during other repaving
# note that it's possible that this cell was made a bit better,
# but still needs to be repaved. For now, don't worry about that
# because we probably want to relax the neighborhood before a second
# round of repaving.
if self.cell_scores(array([c]))[0] > all_scores[c]:
continue
score = all_scores[c]
if score > score_threshold:
break
# also, this cell may have gotten updated by another repaving -
# in which case we probably want
print( "Repaving a neighborhood")
self.repave_neighborhood(c,neighborhood_size=neighborhood_size,scale_factor=scale_factor)
print( "Done")
if count >= count_threshold:
break
self.p.unlisten(cb_id)
# a more heavy-handed approach -
# remove the neighborhood and repave
def repave_neighborhood(self,c,neighborhood_size=3,scale_factor=None,nbr_nodes=None):
"""
c: The cell around which to repave
n_s: how big the neighborhood is around the cell
scale_factor: if specified, a factor to be applied to the density field
during the repaving.
nbr_nodes: if specified, exactly these nodes will be removed (with their edges and
the cells belonging to those edges). otherwise, a neighborhood will be built up around
c.
"""
print("Top of repave_neighborhood - c = %d"%c)
starting_score = self.cell_scores(array([c]))
p = self.p
if nbr_nodes is None:
if scale_factor is not None and self.use_apollo and scale_factor < 1.0:
print( "dynamically defining neighborhood based on radius of Apollonius Graph influence")
nbr_nodes = self.cell_neighborhood_apollo(c)
else:
nbr_nodes = self.cell_neighborhood(c,neighborhood_size)
# delete all non boundary edges going to these nodes
edges_to_kill = np.unique( np.concatenate( [p.pnt2edges(n) for n in nbr_nodes] ) )
# but don't remove boundary edges:
# check both that it has cells on both sides, but also that it's not an
# internal guide edge
to_remove = (p.edges[edges_to_kill,4] >= 0) & (p.edge_data[edges_to_kill,1] < 0)
edges_to_kill = edges_to_kill[ to_remove]
for e in edges_to_kill:
# print "Deleting edge e=%d"%e
p.delete_edge(e,handle_unpaved=1)
# the nodes that are not on the boundary get deleted:
for n in nbr_nodes:
# node_on_boundary includes internal_guides, so this should be okay.
if p.node_on_boundary(n):
# SLIDE nodes are reset to HINT so that we're free to resample
# the boundary
if p.node_data[n,paver.STAT] == paver.SLIDE:
# print "Setting node n=%d to HINT"%n
p.node_data[n,paver.STAT] = paver.HINT
else:
# print "Deleting node n=%d"%n
p.delete_node(n)
old_ncells = p.Ncells()
saved_density = None
if scale_factor is not None:
if not ( self.use_apollo and scale_factor<1.0):
saved_density = p.density
p.density = p.density * scale_factor
print("Repaving...")
p.pave_all(n_steps=inf) # n_steps will keep it from renumbering afterwards
if saved_density is not None:
p.density = saved_density
new_cells = np.arange(old_ncells,p.Ncells())
new_scores = self.cell_scores(new_cells)
print("Repave: %g => %g"%(starting_score,new_scores.min()))
def full(self):
self.p.verbose = 0
self.relax_neighborhoods()
self.repave_neighborhoods(neighborhood_size=2)
self.relax_neighborhoods()
self.repave_neighborhoods(neighborhood_size=2,scale_factor=0.9)
self.repave_neighborhoods(neighborhood_size=2,scale_factor=0.8)
self.repave_neighborhoods(neighborhood_size=3,scale_factor=0.8)
self.repave_neighborhoods(neighborhood_size=3,scale_factor=0.75)
self.relax_neighborhoods()
for i in range(10):
scores = self.cell_scores()
print("iteration %d, %d bad cells"%(i, (scores<0.1).sum() ))
self.p.write_complete('iter%02d.pav'%i)
self.repave_neighborhoods(neighborhood_size=2,scale_factor=0.7)
self.p.write_complete('iter%02d-repaved.pav'%i)
self.relax_neighborhoods(neighborhood_size=5)
self.stats()
def stats(self):
scores = self.cell_scores()
print("Total cells with score below 0.1: %d"%( (scores<0.1).sum() ))
def gui(self):
""" A very simple gui for hand-optimizing.
"""
g = OptimizeGui(self)
g.run()
return g
if __name__ == '__main__':
p = paver.Paving.load_complete('/home/rusty/classes/research/suntans/grids/fullbay-0610/final.pav')
p.clean_unpaved()
opter = GridOptimizer(p)
opter.stats()
opter.full()
opter.stats()
# down to 25.
## Note on using the Apollonius graph for modifying the scale field:
# The idea is that rather than temporarily scaling down the density
# field to repave a subset of the grid, insert new AG points that will
# blend the reduced scale back into the background scale.
# This field would be persistent throughout the optimization.
|
rustychris/stompy
|
stompy/grid/optimize_grid.py
|
Python
|
mit
| 18,033
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "quotoxic.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
benjsto/ronquixote
|
manage.py
|
Python
|
mit
| 252
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE",
"quickly.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
wearespindle/quickly.press
|
manage.py
|
Python
|
mit
| 276
|
import pymysql
from flask_restful import Resource
from flask import abort
ALLOWED_SHOW = ('processlist', 'databases', 'plugins', 'privileges')
class Mysql(Resource):
def __init__(self):
self.connection = pymysql.connect(user='root')
self.cursor = self.connection.cursor()
def _execute(self, sql):
self.cursor.execute(sql)
desc_id = tuple(x[0] for x in self.cursor.description)
query_result = self.cursor.fetchall()
results = [dict(zip(desc_id, item)) for item in query_result]
return results
def get(self, cmd):
if cmd in ALLOWED_SHOW:
return self._execute('show ' + cmd)
else:
abort(404)
class MysqlDatabase(Mysql):
def get(self, dbname):
try:
self.connection.select_db(dbname)
except pymysql.InternalError as e:
abort(400, e.args)
return self._execute('show tables')
def post(self, dbname):
try:
self.cursor.execute('create database ' + dbname)
except pymysql.ProgrammingError as e:
abort(400, e.args)
def delete(self, dbname):
try:
self.cursor.execute('drop database if exists ' + dbname)
except pymysql.ProgrammingError as e:
abort(400, e.args)
|
natict/roomservice
|
roomservice/mysql.py
|
Python
|
mit
| 1,306
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ApplicationGatewayWebApplicationFirewallConfiguration(Model):
"""Application gateway web application firewall configuration.
All required parameters must be populated in order to send to Azure.
:param enabled: Required. Whether the web application firewall is enabled
or not.
:type enabled: bool
:param firewall_mode: Required. Web application firewall mode. Possible
values include: 'Detection', 'Prevention'
:type firewall_mode: str or
~azure.mgmt.network.v2017_11_01.models.ApplicationGatewayFirewallMode
:param rule_set_type: Required. The type of the web application firewall
rule set. Possible values are: 'OWASP'.
:type rule_set_type: str
:param rule_set_version: Required. The version of the rule set type.
:type rule_set_version: str
:param disabled_rule_groups: The disabled rule groups.
:type disabled_rule_groups:
list[~azure.mgmt.network.v2017_11_01.models.ApplicationGatewayFirewallDisabledRuleGroup]
"""
_validation = {
'enabled': {'required': True},
'firewall_mode': {'required': True},
'rule_set_type': {'required': True},
'rule_set_version': {'required': True},
}
_attribute_map = {
'enabled': {'key': 'enabled', 'type': 'bool'},
'firewall_mode': {'key': 'firewallMode', 'type': 'str'},
'rule_set_type': {'key': 'ruleSetType', 'type': 'str'},
'rule_set_version': {'key': 'ruleSetVersion', 'type': 'str'},
'disabled_rule_groups': {'key': 'disabledRuleGroups', 'type': '[ApplicationGatewayFirewallDisabledRuleGroup]'},
}
def __init__(self, **kwargs):
super(ApplicationGatewayWebApplicationFirewallConfiguration, self).__init__(**kwargs)
self.enabled = kwargs.get('enabled', None)
self.firewall_mode = kwargs.get('firewall_mode', None)
self.rule_set_type = kwargs.get('rule_set_type', None)
self.rule_set_version = kwargs.get('rule_set_version', None)
self.disabled_rule_groups = kwargs.get('disabled_rule_groups', None)
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-network/azure/mgmt/network/v2017_11_01/models/application_gateway_web_application_firewall_configuration.py
|
Python
|
mit
| 2,579
|