repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
gihankarunarathne/udp
|
EXTRACTFLO2DWATERLEVEL.py
|
1
|
22923
|
#!/usr/bin/python3
import csv
import getopt
import json
import os
import sys
import traceback
import copy
from datetime import datetime, timedelta
from os.path import join as pjoin
from curwmysqladapter import MySQLAdapter
import Constants
from LIBFLO2DWATERLEVELGRID import getWaterLevelOfChannels
from Util.LibForecastTimeseries import extractForecastTimeseries
from Util.LibForecastTimeseries import extractForecastTimeseriesInDays
from Util.Utils import getUTCOffset
def usage():
usageText = """
Usage: ./EXTRACTFLO2DTOWATERLEVEL.py [-d YYYY-MM-DD] [-t HH:MM:SS] [-p -o -h] [-S YYYY-MM-DD] [-T HH:MM:SS]
-h --help Show usage
-f --forceInsert Force Insert into the database. May override existing values.
-F --flo2d_config Configuration for FLO2D model run
-d --date Model State Date in YYYY-MM-DD. Default is current date.
-t --time Model State Time in HH:MM:SS. If -d passed, then default is 00:00:00. Otherwise Default is current time.
-S --start_date Base Date of FLO2D model output in YYYY-MM-DD format. Default is same as -d option value.
-T --start_time Base Time of FLO2D model output in HH:MM:SS format. Default is set to 00:00:00
-p --path FLO2D model path which include HYCHAN.OUT
-o --out Suffix for 'water_level-<SUFFIX>' and 'water_level_grid-<SUFFIX>' output directories.
Default is 'water_level-<YYYY-MM-DD>' and 'water_level_grid-<YYYY-MM-DD>' same as -d option value.
-n --name Name field value of the Run table in Database. Use time format such as 'Cloud-1-<%H:%M:%S>' to replace with time(t).
-u --utc_offset UTC offset of current timestamps. "+05:30" or "-10:00". Default value is "+00:00".
"""
print(usageText)
def isfloat(value):
try:
float(value)
return True
except ValueError:
return False
def save_forecast_timeseries(my_adapter, my_timeseries, my_model_date, my_model_time, my_opts):
print('EXTRACTFLO2DWATERLEVEL:: save_forecast_timeseries >>', my_opts)
# Convert date time with offset
date_time = datetime.strptime('%s %s' % (my_model_date, my_model_time), Constants.COMMON_DATE_TIME_FORMAT)
if 'utcOffset' in my_opts:
date_time = date_time + my_opts['utcOffset']
my_model_date = date_time.strftime('%Y-%m-%d')
my_model_time = date_time.strftime('%H:%M:%S')
# If there is an offset, shift by offset before proceed
forecast_timeseries = []
if 'utcOffset' in my_opts:
print('Shit by utcOffset:', my_opts['utcOffset'].resolution)
for item in my_timeseries:
forecast_timeseries.append(
[datetime.strptime(item[0], Constants.COMMON_DATE_TIME_FORMAT) + my_opts['utcOffset'], item[1]])
forecast_timeseries = extractForecastTimeseries(forecast_timeseries, my_model_date, my_model_time, by_day=True)
else:
forecast_timeseries = extractForecastTimeseries(my_timeseries, my_model_date, my_model_time, by_day=True)
# print(forecast_timeseries[:10])
extracted_timeseries = extractForecastTimeseriesInDays(forecast_timeseries)
# for ll in extractedTimeseries :
# print(ll)
# Check whether existing station
force_insert = my_opts.get('forceInsert', False)
station = my_opts.get('station', '')
is_station_exists = adapter.get_station({'name': station})
if is_station_exists is None:
print('WARNING: Station %s does not exists. Continue with others.' % station)
return
# TODO: Create if station does not exists.
run_name = my_opts.get('run_name', 'Cloud-1')
less_char_index = run_name.find('<')
greater_char_index = run_name.find('>')
if -1 < less_char_index > -1 < greater_char_index:
start_str = run_name[:less_char_index]
date_format_str = run_name[less_char_index + 1:greater_char_index]
end_str = run_name[greater_char_index + 1:]
try:
date_str = date_time.strftime(date_format_str)
run_name = start_str + date_str + end_str
except ValueError:
raise ValueError("Incorrect data format " + date_format_str)
types = [
'Forecast-0-d',
'Forecast-1-d-after',
'Forecast-2-d-after',
'Forecast-3-d-after',
'Forecast-4-d-after',
'Forecast-5-d-after',
'Forecast-6-d-after',
'Forecast-7-d-after',
'Forecast-8-d-after',
'Forecast-9-d-after',
'Forecast-10-d-after',
'Forecast-11-d-after',
'Forecast-12-d-after',
'Forecast-13-d-after',
'Forecast-14-d-after'
]
meta_data = {
'station': station,
'variable': 'WaterLevel',
'unit': 'm',
'type': types[0],
'source': 'FLO2D',
'name': run_name
}
for i in range(0, min(len(types), len(extracted_timeseries))):
meta_data_copy = copy.deepcopy(meta_data)
meta_data_copy['type'] = types[i]
event_id = my_adapter.get_event_id(meta_data_copy)
if event_id is None:
event_id = my_adapter.create_event_id(meta_data_copy)
print('HASH SHA256 created: ', event_id)
else:
print('HASH SHA256 exists: ', event_id)
if not force_insert:
print('Timeseries already exists. User --force to update the existing.\n')
continue
# for l in timeseries[:3] + timeseries[-2:] :
# print(l)
row_count = my_adapter.insert_timeseries(event_id, extracted_timeseries[i], force_insert)
print('%s rows inserted.\n' % row_count)
# -- END OF SAVE_FORECAST_TIMESERIES
try:
CONFIG = json.loads(open('CONFIG.json').read())
CWD = os.getcwd()
HYCHAN_OUT_FILE = 'HYCHAN.OUT'
BASE_OUT_FILE = 'BASE.OUT'
WATER_LEVEL_FILE = 'water_level.txt'
WATER_LEVEL_DIR = 'water_level'
OUTPUT_DIR = 'OUTPUT'
RUN_FLO2D_FILE = 'RUN_FLO2D.json'
UTC_OFFSET = '+00:00:00'
MYSQL_HOST = "localhost"
MYSQL_USER = "root"
MYSQL_DB = "curw"
MYSQL_PASSWORD = ""
if 'HYCHAN_OUT_FILE' in CONFIG:
HYCHAN_OUT_FILE = CONFIG['HYCHAN_OUT_FILE']
if 'BASE_OUT_FILE' in CONFIG:
BASE_OUT_FILE = CONFIG['BASE_OUT_FILE']
if 'WATER_LEVEL_FILE' in CONFIG:
WATER_LEVEL_FILE = CONFIG['WATER_LEVEL_FILE']
if 'OUTPUT_DIR' in CONFIG:
OUTPUT_DIR = CONFIG['OUTPUT_DIR']
if 'MYSQL_HOST' in CONFIG:
MYSQL_HOST = CONFIG['MYSQL_HOST']
if 'MYSQL_USER' in CONFIG:
MYSQL_USER = CONFIG['MYSQL_USER']
if 'MYSQL_DB' in CONFIG:
MYSQL_DB = CONFIG['MYSQL_DB']
if 'MYSQL_PASSWORD' in CONFIG:
MYSQL_PASSWORD = CONFIG['MYSQL_PASSWORD']
adapter = MySQLAdapter(host=MYSQL_HOST, user=MYSQL_USER, password=MYSQL_PASSWORD, db=MYSQL_DB)
# TODO: Pass source name as a paramter to script
flo2d_source = adapter.get_source(name='FLO2D')
try:
flo2d_source = json.loads(flo2d_source.get('parameters', "{}"))
except Exception as e:
print(e)
traceback.print_exc()
CHANNEL_CELL_MAP = {}
if 'CHANNEL_CELL_MAP' in flo2d_source:
CHANNEL_CELL_MAP = flo2d_source['CHANNEL_CELL_MAP']
FLOOD_PLAIN_CELL_MAP = {}
if 'FLOOD_PLAIN_CELL_MAP' in flo2d_source:
FLOOD_PLAIN_CELL_MAP = flo2d_source['FLOOD_PLAIN_CELL_MAP']
"""
{
"CHANNEL_CELL_MAP": {
"179": "Wellawatta",
"221": "Dehiwala",
"592": "Torington",
"616": "N'Street-Canal",
"618": "N'Street-River",
"684": "Dematagoda-Canal",
"814": "Heen Ela",
"1062": "Kolonnawa-Canal",
"991": "kittampahuwa-Out",
"1161": "Kittampahuwa-River",
"1515": "Parliament Lake Bridge-Kotte Canal",
"2158": "Parliament Lake-Out",
"2396": "Salalihini-River",
"2496": "Salalihini-Canal",
"3580": "Madiwela-Out",
"3673": "Ambathale"
},
"FLOOD_PLAIN_CELL_MAP": {
"2265": "Parliament Lake",
"3559": "Madiwela-US"
}
}
"""
ELEMENT_NUMBERS = CHANNEL_CELL_MAP.keys()
FLOOD_ELEMENT_NUMBERS = FLOOD_PLAIN_CELL_MAP.keys()
SERIES_LENGTH = 0
MISSING_VALUE = -999
date = ''
time = ''
path = ''
output_suffix = ''
start_date = ''
start_time = ''
flo2d_config = ''
run_name_default = 'Cloud-1'
runName = ''
utc_offset = ''
forceInsert = False
try:
opts, args = getopt.getopt(sys.argv[1:], "hF:d:t:p:o:S:T:fn:u:",
["help", "flo2d_config=", "date=", "time=", "path=", "out=", "start_date=",
"start_time=", "name=", "forceInsert", "utc_offset="])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
elif opt in ("-F", "--flo2d_config"):
flo2d_config = arg
elif opt in ("-d", "--date"):
date = arg
elif opt in ("-t", "--time"):
time = arg
elif opt in ("-p", "--path"):
path = arg.strip()
elif opt in ("-o", "--out"):
output_suffix = arg.strip()
elif opt in ("-S", "--start_date"):
start_date = arg.strip()
elif opt in ("-T", "--start_time"):
start_time = arg.strip()
elif opt in ("-n", "--name"):
runName = arg.strip()
elif opt in ("-f", "--forceInsert"):
forceInsert = True
elif opt in ("-u", "--utc_offset"):
utc_offset = arg.strip()
appDir = pjoin(CWD, date + '_Kelani')
if path:
appDir = pjoin(CWD, path)
# Load FLO2D Configuration file for the Model run if available
FLO2D_CONFIG_FILE = pjoin(appDir, RUN_FLO2D_FILE)
if flo2d_config:
FLO2D_CONFIG_FILE = pjoin(CWD, flo2d_config)
FLO2D_CONFIG = json.loads('{}')
# Check FLO2D Config file exists
if os.path.exists(FLO2D_CONFIG_FILE):
FLO2D_CONFIG = json.loads(open(FLO2D_CONFIG_FILE).read())
# Default run for current day
now = datetime.now()
if 'MODEL_STATE_DATE' in FLO2D_CONFIG and len(
FLO2D_CONFIG['MODEL_STATE_DATE']): # Use FLO2D Config file data, if available
now = datetime.strptime(FLO2D_CONFIG['MODEL_STATE_DATE'], '%Y-%m-%d')
if date:
now = datetime.strptime(date, '%Y-%m-%d')
date = now.strftime("%Y-%m-%d")
if 'MODEL_STATE_TIME' in FLO2D_CONFIG and len(
FLO2D_CONFIG['MODEL_STATE_TIME']): # Use FLO2D Config file data, if available
now = datetime.strptime('%s %s' % (date, FLO2D_CONFIG['MODEL_STATE_TIME']), '%Y-%m-%d %H:%M:%S')
if time:
now = datetime.strptime('%s %s' % (date, time), '%Y-%m-%d %H:%M:%S')
time = now.strftime("%H:%M:%S")
if start_date:
start_date = datetime.strptime(start_date, '%Y-%m-%d')
start_date = start_date.strftime("%Y-%m-%d")
elif 'TIMESERIES_START_DATE' in FLO2D_CONFIG and len(
FLO2D_CONFIG['TIMESERIES_START_DATE']): # Use FLO2D Config file data, if available
start_date = datetime.strptime(FLO2D_CONFIG['TIMESERIES_START_DATE'], '%Y-%m-%d')
start_date = start_date.strftime("%Y-%m-%d")
else:
start_date = date
if start_time:
start_time = datetime.strptime('%s %s' % (start_date, start_time), '%Y-%m-%d %H:%M:%S')
start_time = start_time.strftime("%H:%M:%S")
elif 'TIMESERIES_START_TIME' in FLO2D_CONFIG and len(
FLO2D_CONFIG['TIMESERIES_START_TIME']): # Use FLO2D Config file data, if available
start_time = datetime.strptime('%s %s' % (start_date, FLO2D_CONFIG['TIMESERIES_START_TIME']),
'%Y-%m-%d %H:%M:%S')
start_time = start_time.strftime("%H:%M:%S")
else:
start_time = datetime.strptime(start_date, '%Y-%m-%d') # Time is set to 00:00:00
start_time = start_time.strftime("%H:%M:%S")
# Run Name of DB
if 'RUN_NAME' in FLO2D_CONFIG and len(FLO2D_CONFIG['RUN_NAME']): # Use FLO2D Config file data, if available
runName = FLO2D_CONFIG['RUN_NAME']
if not runName:
runName = run_name_default
# UTC Offset
if 'UTC_OFFSET' in FLO2D_CONFIG and len(FLO2D_CONFIG['UTC_OFFSET']): # Use FLO2D Config file data, if available
UTC_OFFSET = FLO2D_CONFIG['UTC_OFFSET']
if utc_offset:
UTC_OFFSET = utc_offset
utcOffset = getUTCOffset(UTC_OFFSET, default=True)
print('Extract Water Level Result of FLO2D on', date, '@', time, 'with Bast time of', start_date, '@', start_time)
print('With UTC Offset of ', str(utcOffset), ' <= ', UTC_OFFSET)
OUTPUT_DIR_PATH = pjoin(CWD, OUTPUT_DIR)
HYCHAN_OUT_FILE_PATH = pjoin(appDir, HYCHAN_OUT_FILE)
WATER_LEVEL_DIR_PATH = pjoin(OUTPUT_DIR_PATH, "%s-%s" % (WATER_LEVEL_DIR, date))
if 'FLO2D_OUTPUT_SUFFIX' in FLO2D_CONFIG and len(
FLO2D_CONFIG['FLO2D_OUTPUT_SUFFIX']): # Use FLO2D Config file data, if available
WATER_LEVEL_DIR_PATH = pjoin(OUTPUT_DIR_PATH, "%s-%s" % (WATER_LEVEL_DIR, FLO2D_CONFIG['FLO2D_OUTPUT_SUFFIX']))
if output_suffix:
WATER_LEVEL_DIR_PATH = pjoin(OUTPUT_DIR_PATH, "%s-%s" % (WATER_LEVEL_DIR, output_suffix))
print('Processing FLO2D model on', appDir)
# Check BASE.OUT file exists
if not os.path.exists(HYCHAN_OUT_FILE_PATH):
print('Unable to find file : ', HYCHAN_OUT_FILE_PATH)
sys.exit()
# Create OUTPUT Directory
if not os.path.exists(OUTPUT_DIR_PATH):
os.makedirs(OUTPUT_DIR_PATH)
# Calculate the size of time series
bufsize = 65536
with open(HYCHAN_OUT_FILE_PATH) as infile:
isWaterLevelLines = False
isCounting = False
countSeriesSize = 0 # HACK: When it comes to the end of file, unable to detect end of time series
while True:
lines = infile.readlines(bufsize)
if not lines or SERIES_LENGTH:
break
for line in lines:
if line.startswith('CHANNEL HYDROGRAPH FOR ELEMENT NO:', 5):
isWaterLevelLines = True
elif isWaterLevelLines:
cols = line.split()
if len(cols) > 0 and cols[0].replace('.', '', 1).isdigit():
countSeriesSize += 1
isCounting = True
elif isWaterLevelLines and isCounting:
SERIES_LENGTH = countSeriesSize
break
print('Series Length is :', SERIES_LENGTH)
bufsize = 65536
#################################################################
# Extract Channel Water Level elevations from HYCHAN.OUT file #
#################################################################
print('Extract Channel Water Level Result of FLO2D HYCHAN.OUT on', date, '@', time, 'with Bast time of', start_date,
'@', start_time)
with open(HYCHAN_OUT_FILE_PATH) as infile:
isWaterLevelLines = False
isSeriesComplete = False
waterLevelLines = []
seriesSize = 0 # HACK: When it comes to the end of file, unable to detect end of time series
while True:
lines = infile.readlines(bufsize)
if not lines:
break
for line in lines:
if line.startswith('CHANNEL HYDROGRAPH FOR ELEMENT NO:', 5):
seriesSize = 0
elementNo = line.split()[5]
if elementNo in ELEMENT_NUMBERS:
isWaterLevelLines = True
waterLevelLines.append(line)
else:
isWaterLevelLines = False
elif isWaterLevelLines:
cols = line.split()
if len(cols) > 0 and isfloat(cols[0]):
seriesSize += 1
waterLevelLines.append(line)
if seriesSize == SERIES_LENGTH:
isSeriesComplete = True
if isSeriesComplete:
baseTime = datetime.strptime('%s %s' % (start_date, start_time), '%Y-%m-%d %H:%M:%S')
timeseries = []
elementNo = waterLevelLines[0].split()[5]
print('Extracted Cell No', elementNo, CHANNEL_CELL_MAP[elementNo])
for ts in waterLevelLines[1:]:
v = ts.split()
if len(v) < 1:
continue
# Get flood level (Elevation)
value = v[1]
# Get flood depth (Depth)
# value = v[2]
if not isfloat(value):
value = MISSING_VALUE
continue # If value is not present, skip
if value == 'NaN':
continue # If value is NaN, skip
timeStep = float(v[0])
currentStepTime = baseTime + timedelta(hours=timeStep)
dateAndTime = currentStepTime.strftime("%Y-%m-%d %H:%M:%S")
timeseries.append([dateAndTime, value])
# Create Directory
if not os.path.exists(WATER_LEVEL_DIR_PATH):
os.makedirs(WATER_LEVEL_DIR_PATH)
# Get Time stamp Ref:http://stackoverflow.com/a/13685221/1461060
ModelTime = float(waterLevelLines[1].split()[3])
fileModelTime = datetime.strptime(date, '%Y-%m-%d')
fileModelTime = fileModelTime + timedelta(hours=ModelTime)
dateAndTime = fileModelTime.strftime("%Y-%m-%d_%H-%M-%S")
# Create files
fileName = WATER_LEVEL_FILE.rsplit('.', 1)
stationName = CHANNEL_CELL_MAP[elementNo].replace(' ', '_')
fileTimestamp = "%s_%s" % (date, time.replace(':', '-'))
fileName = "%s-%s-%s.%s" % (fileName[0], stationName, fileTimestamp, fileName[1])
WATER_LEVEL_FILE_PATH = pjoin(WATER_LEVEL_DIR_PATH, fileName)
csvWriter = csv.writer(open(WATER_LEVEL_FILE_PATH, 'w'), delimiter=',', quotechar='|')
csvWriter.writerows(timeseries)
# Save Forecast values into Database
opts = {
'forceInsert': forceInsert,
'station': CHANNEL_CELL_MAP[elementNo],
'run_name': runName
}
print('>>>>>', opts)
if utcOffset != timedelta():
opts['utcOffset'] = utcOffset
save_forecast_timeseries(adapter, timeseries, date, time, opts)
isWaterLevelLines = False
isSeriesComplete = False
waterLevelLines = []
# -- END for loop
# -- END while loop
#################################################################
# Extract Flood Plain water elevations from BASE.OUT file #
#################################################################
BASE_OUT_FILE_PATH = pjoin(appDir, BASE_OUT_FILE)
print('Extract Flood Plain Water Level Result of FLO2D on', date, '@', time, 'with Bast time of', start_date, '@',
start_time)
with open(BASE_OUT_FILE_PATH) as infile:
isWaterLevelLines = False
waterLevelLines = []
waterLevelSeriesDict = dict.fromkeys(FLOOD_ELEMENT_NUMBERS, [])
while True:
lines = infile.readlines(bufsize)
if not lines:
break
for line in lines:
if line.startswith('MODEL TIME =', 5):
isWaterLevelLines = True
elif isWaterLevelLines and line.startswith('***CHANNEL RESULTS***', 17):
waterLevels = getWaterLevelOfChannels(waterLevelLines, FLOOD_ELEMENT_NUMBERS)
# Create Directory
if not os.path.exists(WATER_LEVEL_DIR_PATH):
os.makedirs(WATER_LEVEL_DIR_PATH)
# Get Time stamp Ref:http://stackoverflow.com/a/13685221/1461060
ModelTime = float(waterLevelLines[0].split()[3])
baseTime = datetime.strptime('%s %s' % (start_date, start_time), '%Y-%m-%d %H:%M:%S')
currentStepTime = baseTime + timedelta(hours=ModelTime)
dateAndTime = currentStepTime.strftime("%Y-%m-%d %H:%M:%S")
for elementNo in FLOOD_ELEMENT_NUMBERS:
tmpTS = waterLevelSeriesDict[elementNo][:]
if elementNo in waterLevels:
tmpTS.append([dateAndTime, waterLevels[elementNo]])
else:
tmpTS.append([dateAndTime, MISSING_VALUE])
waterLevelSeriesDict[elementNo] = tmpTS
isWaterLevelLines = False
# for l in waterLevelLines :
# print(l)
waterLevelLines = []
if isWaterLevelLines:
waterLevelLines.append(line)
# -- END for loop
# -- END while loop
# Create files
for elementNo in FLOOD_ELEMENT_NUMBERS:
fileName = WATER_LEVEL_FILE.rsplit('.', 1)
stationName = FLOOD_PLAIN_CELL_MAP[elementNo].replace(' ', '_')
fileTimestamp = "%s_%s" % (date, time.replace(':', '-'))
fileName = "%s-%s-%s.%s" % \
(fileName[0], FLOOD_PLAIN_CELL_MAP[elementNo].replace(' ', '_'), fileTimestamp, fileName[1])
WATER_LEVEL_FILE_PATH = pjoin(WATER_LEVEL_DIR_PATH, fileName)
csvWriter = csv.writer(open(WATER_LEVEL_FILE_PATH, 'w'), delimiter=',', quotechar='|')
csvWriter.writerows(waterLevelSeriesDict[elementNo])
# Save Forecast values into Database
opts = {
'forceInsert': forceInsert,
'station': FLOOD_PLAIN_CELL_MAP[elementNo],
'run_name': runName
}
if utcOffset != timedelta():
opts['utcOffset'] = utcOffset
save_forecast_timeseries(adapter, waterLevelSeriesDict[elementNo], date, time, opts)
print('Extracted Cell No', elementNo, FLOOD_PLAIN_CELL_MAP[elementNo], 'into -> ', fileName)
except Exception as e:
traceback.print_exc()
print(e)
finally:
print('Completed processing', HYCHAN_OUT_FILE_PATH, ' to ', WATER_LEVEL_FILE_PATH)
|
apache-2.0
| 7,930,331,980,908,002,000
| 41.06055
| 136
| 0.551019
| false
| 3.621899
| true
| false
| false
|
adalke/rdkit
|
rdkit/Chem/Subshape/SubshapeBuilder.py
|
1
|
4360
|
# $Id$
#
# Copyright (C) 2007 by Greg Landrum
# All rights reserved
#
from __future__ import print_function
from rdkit import Chem,Geometry
from rdkit.Chem import AllChem
from rdkit.Chem.Subshape import SubshapeObjects
from rdkit.Chem.Subshape import BuilderUtils
from rdkit.six.moves import cPickle
import time
#-----------------------------------------------------------------------------
class SubshapeCombineOperations(object):
UNION=0
SUM=1
INTERSECT=2
#-----------------------------------------------------------------------------
class SubshapeBuilder(object):
gridDims=(20,15,10)
gridSpacing=0.5
winRad=3.0
nbrCount=7
terminalPtRadScale=0.75
fraction=0.25
stepSize=1.0
featFactory=None
def SampleSubshape(self,subshape1,newSpacing):
ogrid=subshape1.grid
rgrid = Geometry.UniformGrid3D(self.gridDims[0],self.gridDims[1],self.gridDims[2],
newSpacing)
for idx in range(rgrid.GetSize()):
l = rgrid.GetGridPointLoc(idx)
v = ogrid.GetValPoint(l)
rgrid.SetVal(idx,v)
res = SubshapeObjects.ShapeWithSkeleton()
res.grid = rgrid
return res;
def GenerateSubshapeShape(self,cmpd,confId=-1,addSkeleton=True,**kwargs):
shape = SubshapeObjects.ShapeWithSkeleton()
shape.grid=Geometry.UniformGrid3D(self.gridDims[0],self.gridDims[1],self.gridDims[2],
self.gridSpacing)
AllChem.EncodeShape(cmpd,shape.grid,ignoreHs=False,confId=confId)
if addSkeleton:
conf = cmpd.GetConformer(confId)
self.GenerateSubshapeSkeleton(shape,conf,kwargs)
return shape
def __call__(self,cmpd,**kwargs):
return self.GenerateSubshapeShape(cmpd,**kwargs)
def GenerateSubshapeSkeleton(self,shape,conf=None,terminalPtsOnly=False,skelFromConf=True):
if conf and skelFromConf:
pts = BuilderUtils.FindTerminalPtsFromConformer(conf,self.winRad,self.nbrCount)
else:
pts = BuilderUtils.FindTerminalPtsFromShape(shape,self.winRad,self.fraction)
pts = BuilderUtils.ClusterTerminalPts(pts,self.winRad,self.terminalPtRadScale)
BuilderUtils.ExpandTerminalPts(shape,pts,self.winRad)
if len(pts)<3:
raise ValueError('only found %d terminals, need at least 3'%len(pts))
if not terminalPtsOnly:
pts = BuilderUtils.AppendSkeletonPoints(shape.grid,pts,self.winRad,self.stepSize)
for i,pt in enumerate(pts):
BuilderUtils.CalculateDirectionsAtPoint(pt,shape.grid,self.winRad)
if conf and self.featFactory:
BuilderUtils.AssignMolFeatsToPoints(pts,conf.GetOwningMol(),self.featFactory,self.winRad)
shape.skelPts=pts
def CombineSubshapes(self,subshape1,subshape2,operation=SubshapeCombineOperations.UNION):
import copy
cs = copy.deepcopy(subshape1)
if operation==SubshapeCombineOperations.UNION:
cs.grid |= subshape2.grid
elif operation==SubshapeCombineOperations.SUM:
cs.grid += subshape2.grid
elif operation==SubshapeCombineOperations.INTERSECT:
cs.grid &= subshape2.grid
else:
raise ValueError('bad combination operation')
return cs
if __name__=='__main__':
from rdkit.Chem import AllChem,ChemicalFeatures
from rdkit.Chem.PyMol import MolViewer
#cmpd = Chem.MolFromSmiles('CCCc1cc(C(=O)O)ccc1')
#cmpd = Chem.AddHs(cmpd)
if 1:
cmpd = Chem.MolFromSmiles('C1=CC=C1C#CC1=CC=C1')
cmpd = Chem.AddHs(cmpd)
AllChem.EmbedMolecule(cmpd)
AllChem.UFFOptimizeMolecule(cmpd)
AllChem.CanonicalizeMol(cmpd)
print(Chem.MolToMolBlock(cmpd), file=file('testmol.mol','w+'))
else:
cmpd = Chem.MolFromMolFile('testmol.mol')
builder=SubshapeBuilder()
if 1:
shape=builder.GenerateSubshapeShape(cmpd)
v = MolViewer()
if 1:
import tempfile
tmpFile = tempfile.mktemp('.grd')
v.server.deleteAll()
Geometry.WriteGridToFile(shape.grid,tmpFile)
time.sleep(1)
v.ShowMol(cmpd,name='testMol',showOnly=True)
v.server.loadSurface(tmpFile,'testGrid','',2.5)
v.server.resetCGO('*')
cPickle.dump(shape,file('subshape.pkl','w+'))
for i,pt in enumerate(shape.skelPts):
v.server.sphere(tuple(pt.location),.5,(1,0,1),'Pt-%d'%i)
if not hasattr(pt,'shapeDirs'): continue
momBeg = pt.location-pt.shapeDirs[0]
momEnd = pt.location+pt.shapeDirs[0]
v.server.cylinder(tuple(momBeg),tuple(momEnd),.1,(1,0,1),'v-%d'%i)
|
bsd-3-clause
| -3,022,887,758,401,373,000
| 34.447154
| 95
| 0.68555
| false
| 3.143475
| false
| false
| false
|
bbc/ebu-tt-live-toolkit
|
ebu_tt_live/adapters/base.py
|
1
|
3499
|
import logging
import weakref
from abc import abstractmethod, abstractproperty
from ebu_tt_live.utils import AutoRegisteringABCMeta, AbstractStaticMember, validate_types_only
log = logging.getLogger(__name__)
# Interfaces
# ==========
class IDocumentDataAdapter(object, metaclass=AutoRegisteringABCMeta):
"""
This adapter is used to do various conversions on the payload between the carriage and the node
"""
__impl_registry = {}
_expects = AbstractStaticMember(validate_types_only)
_provides = AbstractStaticMember(validate_types_only)
@classmethod
def auto_register_impl(cls, impl_class):
impl_expects = impl_class.expects()
provides_map = cls.__impl_registry.setdefault(impl_expects, weakref.WeakValueDictionary())
impl_provides = impl_class.provides()
if impl_provides in list(provides_map.keys()):
log.warning(
'({} -> {}) adapter already registered: {}. Ignoring: {} '.format(
impl_expects,
impl_provides,
provides_map[impl_provides],
impl_class
)
)
else:
log.debug(
'Registering ({} -> {}) adapter: {}'.format(
impl_expects,
impl_provides,
impl_class
)
)
provides_map[impl_provides] = impl_class
@classmethod
def get_registered_impl(cls, expects, provides):
impl_class = cls.__impl_registry.get(expects, {}).get(provides, None)
if impl_class is None:
raise ValueError('No adapter found for: {} -> {}'.format(
expects, provides
))
return impl_class
@classmethod
def expects(cls):
"""
Data type expected
:return:
"""
if isinstance(cls._expects, AbstractStaticMember):
raise TypeError('Classmethod relies on abstract property: \'_expects\'')
return cls._expects
@classmethod
def provides(cls):
"""
Data type provided
:return:
"""
if isinstance(cls._provides, AbstractStaticMember):
raise TypeError('Classmethod relies on abstract property: \'_provides\'')
return cls._provides
@abstractmethod
def convert_data(self, data, **kwargs):
"""
Subclasses must implement this method
:param data:
:param kwargs: Extra parameters
:return:
"""
raise NotImplementedError()
class INodeCarriageAdapter(object, metaclass=AutoRegisteringABCMeta):
"""
This adapter wraps the DocumentDataAdapter conversion logic and shows a dual interface. It responsibility is
to facilitate direct communication between incompatible carriage mechanisms and processing nodes.
This is a tricky business because this class does not have a hardcoded expects-provides interface contract.
It works it out as it goes forward from the parameters.
"""
@abstractproperty
def data_adapters(self):
"""
Data conversion adapters
:return: list of DocumentDataAdapter instances
"""
@abstractmethod
def convert_data(self, data, **kwargs):
"""
This executes a conversion by looping through the data adapters.
:param data: Input data format
:param kwargs: Extra parameters
:return: Output data format
"""
|
bsd-3-clause
| 7,567,805,828,722,318,000
| 31.700935
| 112
| 0.606745
| false
| 4.806319
| false
| false
| false
|
qtproject/qt-creator
|
src/libs/3rdparty/syntax-highlighting/data/generators/generate-cmake-syntax.py
|
1
|
5026
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Generate Kate syntax file for CMake
#
# Copyright (c) 2017-2019 Alex Turbov <i.zaufi@gmail.com>
#
# To install prerequisites:
#
# $ pip install --user click jinja2 yaml
#
# To use:
#
# $ ./generate-cmake-syntax.py cmake.yaml > ../syntax/cmake.xml
#
import click
import jinja2
import pathlib
import re
import yaml
import pprint
_TEMPLATED_NAME = re.compile('<[^>]+>')
_PROPERTY_KEYS = [
'global-properties'
, 'directory-properties'
, 'target-properties'
, 'source-properties'
, 'test-properties'
, 'cache-properties'
, 'install-properties'
]
_KW_RE_LIST = ['kw', 're']
_VAR_KIND_LIST = ['variables', 'environment-variables']
def try_transform_placeholder_string_to_regex(name):
'''
NOTE Some placeholders are not IDs, but numbers...
`CMAKE_MATCH_<N>` 4 example
'''
m = _TEMPLATED_NAME.split(name)
if 'CMAKE_MATCH_' in m:
return '\\bCMAKE_MATCH_[0-9]+\\b'
if 'CMAKE_ARGV' in m:
return '\\bCMAKE_ARGV[0-9]+\\b'
return '\\b{}\\b'.format('&id_re;'.join(list(m))) if 1 < len(m) else name
def partition_iterable(fn, iterable):
true, false = [], []
for i in iterable:
(false, true)[int(fn(i))].append(i)
return true, false
def _transform_command_set(cmd, list_name):
args, args_re = partition_iterable(lambda x: _TEMPLATED_NAME.search(x) is None, cmd[list_name])
del cmd[list_name]
list_name = list_name.replace('-', '_')
cmd[list_name] = {k: sorted(set(v)) for k, v in zip(_KW_RE_LIST, [args, args_re])}
cmd[list_name]['re'] = [*map(lambda x: try_transform_placeholder_string_to_regex(x), args_re)]
return cmd
def transform_command(cmd):
can_be_nulary = True
if 'name' not in cmd:
raise RuntimeError('Command have no name')
if 'named-args' in cmd:
new_cmd = _transform_command_set(cmd, 'named-args')
assert new_cmd == cmd
can_be_nulary = False
if 'special-args' in cmd:
new_cmd = _transform_command_set(cmd, 'special-args')
assert new_cmd == cmd
can_be_nulary = False
if 'property-args' in cmd:
new_cmd = _transform_command_set(cmd, 'property-args')
assert new_cmd == cmd
can_be_nulary = False
cmd['nested_parentheses'] = cmd['nested-parentheses?'] if 'nested-parentheses?' in cmd else False
if 'nulary?' in cmd and cmd['nulary?'] and not can_be_nulary:
raise RuntimeError('Command `{}` w/ args declared nulary!?'.format(cmd['name']))
return cmd
#BEGIN Jinja filters
def cmd_is_nulary(cmd):
assert not ('named-args' in cmd or 'special-args' in cmd or 'property-args' in cmd)
return 'nulary?' in cmd and cmd['nulary?']
#END Jinja filters
@click.command()
@click.argument('input_yaml', type=click.File('r'))
@click.argument('template', type=click.File('r'), default='./cmake.xml.tpl')
def cli(input_yaml, template):
data = yaml.load(input_yaml)
# Partition `variables` and `environment-variables` lists into "pure" (key)words and regexes to match
for var_key in _VAR_KIND_LIST:
data[var_key] = {
k: sorted(set(v)) for k, v in zip(
_KW_RE_LIST
, [*partition_iterable(lambda x: _TEMPLATED_NAME.search(x) is None, data[var_key])]
)
}
data[var_key]['re'] = [
*map(
lambda x: try_transform_placeholder_string_to_regex(x)
, data[var_key]['re']
)
]
# Transform properties and make all-properties list
data['properties'] = {}
for prop in _PROPERTY_KEYS:
python_prop_list_name = prop.replace('-', '_')
props, props_re = partition_iterable(lambda x: _TEMPLATED_NAME.search(x) is None, data[prop])
del data[prop]
data['properties'][python_prop_list_name] = {
k: sorted(set(v)) for k, v in zip(_KW_RE_LIST, [props, props_re])
}
data['properties'][python_prop_list_name]['re'] = [
*map(lambda x: try_transform_placeholder_string_to_regex(x), props_re)
]
data['properties']['kinds'] = [*map(lambda name: name.replace('-', '_'), _PROPERTY_KEYS)]
# Make all commands list
data['commands'] = [
*map(
lambda cmd: transform_command(cmd)
, data['scripting-commands'] + data['project-commands'] + data['ctest-commands'])
]
# Fix node names to be accessible from Jinja template
data['generator_expressions'] = data['generator-expressions']
data['environment_variables'] = data['environment-variables']
del data['generator-expressions']
del data['environment-variables']
env = jinja2.Environment(
keep_trailing_newline=True
)
# Register convenience filters
env.tests['nulary'] = cmd_is_nulary
tpl = env.from_string(template.read())
result = tpl.render(data)
print(result)
if __name__ == '__main__':
cli()
# TODO Handle execptions and show errors
|
gpl-3.0
| 3,560,213,243,156,102,700
| 27.885057
| 105
| 0.606446
| false
| 3.377688
| false
| false
| false
|
annoviko/pyclustering
|
pyclustering/nnet/examples/hysteresis_examples.py
|
1
|
2173
|
"""!
@brief Examples of usage and demonstration of abilities of Hysteresis Oscillatory Network.
@authors Andrei Novikov (pyclustering@yandex.ru)
@date 2014-2020
@copyright BSD-3-Clause
"""
from pyclustering.nnet.hysteresis import hysteresis_network, hysteresis_visualizer;
from pyclustering.nnet import *;
def template_dynamic(num_osc, own_weight = -3, neigh_weight = -1, initial_states = None, initial_outputs = None, steps = 1000, time = 10):
network = hysteresis_network(num_osc, own_weight, neigh_weight);
if (initial_states is not None):
network.states = initial_states;
if (initial_outputs is not None):
network.outputs = initial_outputs;
output_dynamic = network.simulate(steps, time);
hysteresis_visualizer.show_output_dynamic(output_dynamic);
ensembles = output_dynamic.allocate_sync_ensembles(tolerance = 0.5, threshold_steps = 5);
print("Allocated synchronous ensembles ( amout:", len(ensembles), "):", ensembles);
def one_oscillator_weight_2():
template_dynamic(1, -2);
def one_oscillator_weight_4():
template_dynamic(1, -4);
def two_oscillators_sync():
"Comment: Different initial state - state of sync. will be reached."
template_dynamic(2, -4, 1, [1, 0], [1, 1]);
def two_oscillators_desync():
"Note: if initial state is the same for both oscillators then desync. will not be exist. It is very important to set different values if desync. is required."
template_dynamic(2, -4, -1, [1, 0], [1, 1]);
def five_oscillators_positive_conn():
"Note: Oscillations are dead in this case (sync. should be in ideal case)"
template_dynamic(5, -4, 1, [1, 0.5, 0, -0.5, -1], [1, 1, 1, 1, 1]);
template_dynamic(5, -4, 1, [1, 0.8, 0.6, 0.4, 0.2], [-1, -1, -1, -1, -1]);
def five_oscillators_negative_conn():
"Comment: Full desync."
template_dynamic(5, -4, -1, [1, 0.5, 0, -0.5, -1], [1, 1, 1, 1, 1]);
one_oscillator_weight_2();
one_oscillator_weight_4();
two_oscillators_sync();
two_oscillators_desync();
five_oscillators_positive_conn();
five_oscillators_negative_conn();
|
gpl-3.0
| 8,405,484,669,327,025,000
| 34.25
| 162
| 0.650253
| false
| 3.01387
| false
| false
| false
|
rbaravalle/imfractal
|
imfractal/Algorithm/MFS_3D.py
|
1
|
11780
|
"""
Copyright (c) 2013 Rodrigo Baravalle
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from Algorithm import *
import numpy as np
from math import log10
import scipy.signal
import scipy.io as sio
from scipy.stats import norm
class MFS_3D (Algorithm):
"""
:3D implementation of MFS through holder exponents f(alpha)
:version: 1.0
:author: Rodrigo Baravalle
"""
def __init__(self):
pass
def setDef(self, ind, f, ite, filename, file_mask, params):
# parameters: ind -> determines how many levels are used when computing the density
# choose 1 for using directly the image measurement im or
# >= 6 for computing the density of im (quite stable for >=5)
# f ----> determines the dimension of MFS vector
# ite ---> determines how many levels are used when computing MFS for each
self.ind_num = ind # number of pixels for averaging
self.f_num = f # window
self.ite_num = ite
self.filename = filename
self.file_mask = file_mask
self.params = params
def gauss_kern(self,size_x, size_y, size_z):
""" Returns a normalized 3D gauss kernel array for convolutions """
m = np.float32(size_x)
n = np.float32(size_y)
o = np.float32(size_z)
sigma = 2; # ???
if(size_x <= 3): sigma = 1.5;
if(size_x == 5): sigma = 2.5;
z, y, x = np.mgrid[-(m-1)/2:(m-1)/2+1, -(n-1)/2:(n-1)/2+1, -(o-1)/2:(o-1)/2+1]
b = 2*(sigma**2)
square = lambda i : i**2
fm = lambda i: map(square, i)
x2 = map(fm, x)
y2 = map(fm, y)
z2 = map(fm, z)
g = np.sum([x2, y2, z2], axis=0).astype(np.float32)
g = np.exp(g).astype(np.float32)
return g / g.sum()
def determine_threshold(self, arr):
# compute histogram of values
bins = range(np.min(arr), np.max(arr) + 1)
h = np.histogram(arr, bins=bins)
threshold = np.min(arr)
# get x% of mass -> threshold
assert (len(arr.shape) == 3)
total_pixels = arr.shape[0] * arr.shape[1] * arr.shape[2]
for i in range(len(bins) + 1):
# compute sum of h(x) from x = 0 to x = i
partial_sum_vector = np.cumsum(h[0][: (i + 1)])
partial_sum = partial_sum_vector[len(partial_sum_vector) - 1]
percentage = (float)(partial_sum) / (float)(total_pixels)
if percentage > 0.75:
threshold = np.min(arr) + i
break
return threshold
def openMatlab(self, name, filename, greyscale):
import scipy.io as sio
arr = np.array(sio.loadmat(filename)[name]).astype(np.int32)
if greyscale:
return arr
if name == "S":
threshold = self.determine_threshold(arr)
arr = arr > threshold
a_v = arr.cumsum()
print "Amount of white pixels: ", a_v[len(a_v) - 1]
# debug - to see the spongious structure
# plt.imshow((arr[:,:,50]), cmap=plt.gray())
# plt.show()
return arr
def gradient(self, data):
Nx, Ny, Nz = data.shape
basic_fx = np.array([[-1, 0, 1], [0, 0, 0], [0, 0, 0]])
basic_fy = basic_fx.T
basic_fxy = [[-1, 0, 0], [0, 0, 0], [0, 0, 1]]
basic_fyx = [[0, 0, -1], [0, 0, 0], [1, 0, 0]]
fx = np.float32(0.5) * np.array([basic_fx, basic_fx, basic_fx])
fy = np.float32(0.5) * np.array([basic_fy, basic_fy, basic_fy])
fxy = np.float32(0.5) * np.array([basic_fxy, basic_fxy, basic_fxy])
fyx = np.float32(0.5) * np.array([basic_fyx, basic_fyx, basic_fyx])
a = scipy.signal.convolve(data, fx, mode="full")
Nx, Ny, Nz = a.shape
a = a[0:Nx - 2, 1:Ny - 1, 1:Nz - 1] # fix me, check z indices!
b = scipy.signal.convolve(data, fy, mode="full")
Nx, Ny, Nz = b.shape
b = b[1:Nx - 1, 0:Ny - 2, 1:Nz - 1]
c = scipy.signal.convolve(data, fxy, mode="full")
Nx, Ny, Nz = c.shape
c = c[1:Nx - 1, 1:Ny - 1, 1:Nz - 1]
d = scipy.signal.convolve(data, fyx, mode="full")
Nx, Ny, Nz = d.shape
d = d[1:Nx - 1, 1:Ny - 1, 1:Nz - 1]
data = a ** 2 + b ** 2 + c ** 2 + d ** 2
data = np.sqrt(data)
data = np.floor(data)
return data
def laplacian(self, data): # MFS of Laplacion
# 3d, octave:
# f1 = fspecial3('gaussian', 5, 1);
# f2 = -ones(3,3,3);
# f2(2,2,2) = 26;
# f = convn(f1, f2);
laplacian_kernel = np.load('exps/data/laplacian_kernel.npy')
print "SHAPES: !"
print laplacian_kernel.shape
print data.shape
a = scipy.signal.convolve(data, laplacian_kernel, mode="full")
Nx, Ny, Nz = a.shape
a = a[3:Nx - 3, 3:Ny - 3, 3:Nz - 3]
a = np.floor((a < 0).choose(a, 0))
return a
def getFDs(self, data = []):
"""
@param string filename : volume location
@param string file_mask : mask volume location
@return [float] : 3D multi fractal dimentions
@author: Rodrigo Baravalle. Code ported from Matlab and extended to 3D
"""
if len(data) == 0:
# data is a 3D grayscale volume
data = self.openMatlab('S', self.filename, True)
data_mask = self.openMatlab('M', self.file_mask, True)
# Masking
data = data * (data_mask > 0)
# Other multifractal measures
if self.params['gradient'] == True:
data = self.gradient(data)
else:
if self.params['laplacian'] == True:
print "laplacian!"
data = self.laplacian(data)
#Using [0..255] to denote the intensity profile of the image
grayscale_box = [0, 255]
#sigmoid function
#data = norm.cdf(data, loc=200.0, scale=100.0);
#Preprocessing: default intensity value of image ranges from 0 to 255
if abs(data).max()< 1:
data = data * grayscale_box[1]
else:
# put every value into [0, 255]
data = (data - data.min()) * 255 / (data.max() - data.min())
#######################
#DEBUG
print data.max(), data.min(), data.sum()
### Estimating density function of the volume
### by solving least squares for D in the equation
### log10(bw) = D*log10(c) + b
r = 1.0 / max(data.shape)
c = np.dot(range(1, self.ind_num+1), r)
c = map(lambda i: log10(i), c)
bw = np.zeros((self.ind_num, data.shape[0], data.shape[1], data.shape[2])).astype(np.float32)
bw[0] = data + 1
# DEBUG
#print "BW: ", bw.shape
k = 1
if(self.ind_num > 1):
bw[1] = scipy.signal.convolve(bw[0], self.gauss_kern(k+1, k+1, k+1), mode="full")[1:,1:]*((k+1)**2)
for k in range(2,self.ind_num):
temp = scipy.signal.convolve(bw[0], self.gauss_kern(k+1, k+1, k+1), mode="full")*((k+1)**2)
if(k==4):
bw[k] = temp[k - 1 - 1 : temp.shape[0] - (k / 2),
k - 1 - 1 : temp.shape[1] - (k / 2),
k - 1 - 1 : temp.shape[2] - (k / 2)]
else:
bw[k] = temp[k - 1 : temp.shape[0] - (1),
k - 1 : temp.shape[1] - (1),
k - 1 : temp.shape[2] - (1)]
#print bw.min(), bw.max()
bw = np.log10(bw)
n1 = c[0] * c[0]
n2 = bw[0] * c[0]
for k in range(1,self.ind_num):
n1 = n1 + c[k]*c[k]
n2 = n2 + bw[k]*c[k]
sum3 = bw[0]
for i in range(1,self.ind_num):
sum3 = sum3 + bw[i]
if(self.ind_num >1):
D = (n2*self.ind_num-sum(c)*sum3)/(n1*self.ind_num -sum(c)*sum(c));
if (self.ind_num > 1):
max_D = np.float32(4)
min_D = np.float32(1)
D = grayscale_box[1]*(D-min_D)/(max_D - min_D)+grayscale_box[0]
else:
D = data
#Partition the density
# throw away the boundary
D = D[self.ind_num - 1 : D.shape[0] - self.ind_num + 1,
self.ind_num - 1 : D.shape[1] - self.ind_num + 1,
self.ind_num - 1 : D.shape[2] - self.ind_num + 1]
IM = np.zeros(D.shape)
gap = np.ceil((grayscale_box[1] - grayscale_box[0])/np.float32(self.f_num));
center = np.zeros(self.f_num);
for k in range(1,self.f_num+1):
bin_min = (k-1) * gap;
bin_max = k * gap - 1;
center[k-1] = round((bin_min + bin_max) / 2);
D = ((D <= bin_max) & (D >= bin_min)).choose(D, center[k-1])
D = ((D >= bin_max)).choose(D,0)
D = ((D < 0)).choose(D,0)
IM = D
# Constructing the filter for approximating log fitting
r = max(IM.shape)
c = np.zeros(self.ite_num)
c[0] = 1;
for k in range(1,self.ite_num):
c[k] = c[k-1]/(k+1)
c = c / sum(c);
# Construct level sets
Idx_IM = np.zeros(IM.shape);
for k in range(0, self.f_num):
IM = (IM == center[k]).choose(IM,k+1)
Idx_IM = IM
IM = np.zeros(IM.shape)
#Estimate MFS by box-counting
num = np.zeros(self.ite_num)
MFS = np.zeros(self.f_num)
for k in range(1, self.f_num+1):
#print k, self.f_num
IM = np.zeros(IM.shape)
IM = (Idx_IM == k).choose(Idx_IM, 255 + k)
IM = (IM<255 + k).choose(IM, 0)
IM = (IM > 0).choose(IM, 1)
temp = max(IM.sum(), 1)
num[0] = log10(temp)/log10(r);
for j in range(2, self.ite_num+1):
mask = np.ones((j, j, j))
bw = scipy.signal.convolve(IM, mask, mode = "full")[1:, 1:, 1:]
ind_x = np.arange(0, IM.shape[0], j)
ind_y = np.arange(0, IM.shape[1], j)
ind_z = np.arange(0, IM.shape[2], j)
bw = bw[np.ix_(ind_x, ind_y, ind_z)]
idx = (bw > 0 ).sum()
temp = max(idx, 1)
num[j-1] = log10( temp ) / log10( r / j )
MFS[k-1] = sum(c*num)
return MFS
|
bsd-3-clause
| 1,529,415,426,108,300,800
| 32.276836
| 111
| 0.519864
| false
| 3.210684
| false
| false
| false
|
karec/oct-browser
|
octbrowser/browser.py
|
1
|
14578
|
"""This file contain the main class for the octbrowser
It represent a simple browser object with all methods
"""
import re
import os
import lxml.html as lh
import requests
from lxml.cssselect import CSSSelector
from octbrowser.exceptions import FormNotFoundException, NoUrlOpen, LinkNotFound, NoFormWaiting, HistoryIsNone
from octbrowser.history.base import BaseHistory
from octbrowser.history.cached import CachedHistory
class Browser(object):
"""This class represent a minimal browser. Build on top of lxml awesome library it let you write script for accessing
or testing website with python scripts
:param session: The session object to use. If set to None will use requests.Session
:type session: requests.Session
:param base_url: The base url for the website, will append it for every link without a full url
:type base_url: str
:param history: The history object to use. If set to None no history will be stored.
:type history: octbrowser.history.BaseHistory
:type history: octbrowser.history.base.BaseHistory instance
"""
def __init__(self, session=None, base_url='', **kwargs):
self._sess_bak = session
self._history = kwargs.get('history', CachedHistory())
# check history class
if self._history is not None:
assert isinstance(self._history, BaseHistory)
self._response = None
self._base_url = base_url
self.form = None
self.form_data = None
self.session = session or requests.Session()
def clean_browser(self):
"""Clears browser history, session, current page, and form state
self._base_url is unmodified
:return: None
"""
self.clean_session()
self._response = None
self.form = None
self.form_data = None
try:
self.clear_history()
except HistoryIsNone:
pass
def add_header(self, name, value):
"""Allow you to add custom header, one by one.
Specify existing name for update
Headers will be used by every request
:param name: the key of the header
:type name: str
:param value: the associated value
:type value: str
:return: None
"""
self.session.headers[name] = value
def del_header(self, key):
"""Try to delete the 'key' of headers property
:param key: the key to delete
:type key: mixed
:return: None
"""
self.session.headers.pop(key, None)
def set_headers(self, headers):
"""Setter for headers property
:param headers: a dict containing all headers
:type headers: dict
:return: None
"""
self.session.headers.clear()
self.session.headers.update(headers)
def clean_session(self):
"""This function is called by the core of multi-mechanize. It cleans the session for avoiding cache or cookies
errors, or giving false results based on cache
:return: None
"""
del self.session
self.session = self._sess_bak or requests.Session()
@property
def _url(self):
"""Url of the current page or None if there isn't one
:return: url of current page or None if there isn't one
"""
try:
return self._response.url
except AttributeError:
return None
@property
def _html(self):
"""Parsed html of the current page or None if there isn't any
:return: html of current page or None if there isn't any
"""
try:
return self._response.html
except AttributeError:
return None
@property
def _form_waiting(self):
"""Check if a form is actually on hold or not
:return: True or False
"""
if self.form is not None:
return True
return False
def _process_response(self, response):
"""Update the response object with parsed html and browser properties
Html property is a lxml.Html object, needed for parsing the content, getting elements like form, etc...
If you want the raw html, you can use both::
response.read() # or .content for urllib response objects
Or use lxml::
lxml.html.tostring(response.html)
:param response: requests.Response or urllib.Response object
:return: the updated Response object
"""
if not hasattr(response, 'html'):
try:
html = response.content
except AttributeError:
html = response.read()
response.content = html
tree = lh.fromstring(html)
tree.make_links_absolute(base_url=self._base_url)
response.html = tree
self._response = response
return response
def get_form(self, selector=None, nr=0, at_base=False):
"""Get the form selected by the selector and / or the nr param
Raise:
* oct.core.exceptions.FormNotFoundException
* oct.core.exceptions.NoUrlOpen
:param selector: A css-like selector for finding the form
:type selector: str
:param nr: the index of the form, if selector is set to None, it will search on the hole page
:type nr: int
:param at_base: must be set to true in case of form action is on the base_url page
:type at_base: bool
:return: None
"""
if self._html is None:
raise NoUrlOpen('No url open')
if selector is None:
self.form = self._html.forms[nr]
self.form_data = dict(self._html.forms[nr].fields)
else:
sel = CSSSelector(selector)
for el in sel(self._html):
if el.forms:
self.form = el.forms[nr]
self.form_data = dict(el.forms[nr].fields)
if self.form is None:
raise FormNotFoundException('Form not found with selector {0} and nr {1}'.format(selector, nr))
# common case where action was empty before make_link_absolute call
if (self.form.action == self._base_url and
self._url is not self._base_url and
not at_base):
self.form.action = self._url
def get_select_values(self):
"""Get the available values of all select and select multiple fields in form
:return: a dict containing all values for each fields
:raises: NoFormWaiting
"""
if not self._form_waiting:
raise NoFormWaiting('No form waiting')
data = {}
for i in self.form.inputs:
if isinstance(i, lh.SelectElement):
data[i.name] = i.value_options
return data
def submit_form(self):
"""Submit the form filled with form_data property dict
Raise:
oct.core.exceptions.NoFormWaiting
:return: Response object after the submit
"""
if not self._form_waiting:
raise NoFormWaiting('No form waiting to be send')
self.form.fields = self.form_data
r = lh.submit_form(self.form, open_http=self._open_session_http)
resp = self._process_response(r)
if self._history is not None:
self._history.append_item(resp)
self.form_data = None
self.form = None
return resp
def _open_session_http(self, method, url, values):
"""Custom method for form submission, send to lxml submit form method
:param method: the method of the form (POST, GET, PUT, DELETE)
:type method: str
:param url: the url of the action of the form7
:type url: str
:param values: the values of the form
:type values: dict
:return: Response object from requests.request method
"""
return self.session.request(method, url, None, values)
def open_url(self, url, data=None, **kwargs):
"""Open the given url
:param url: The url to access
:type url: str
:param data: Data to send. If data is set, the browser will make a POST request
:type data: dict
:return: The Response object from requests call
"""
if data:
response = self.session.post(url, data, **kwargs)
else:
response = self.session.get(url, **kwargs)
response = self._process_response(response)
if self._history is not None:
self._history.append_item(response)
response.connection.close()
return response
def back(self):
"""Go to the previous url in the history
:return: the Response object
:rtype: requests.Response
:raises: NoPreviousPage, HistoryIsNone
"""
if self._history is None:
raise HistoryIsNone("You must set history if you need to use historic methods")
response = self._history.back()
return self._process_response(response)
def forward(self):
"""Go to the next url in the history
:return: the Response object
:rtype: requests.Response
:raises: EndOfHistory, HistoryIsNone
"""
if self._history is None:
raise HistoryIsNone("You must set history if you need to use historic methods")
response = self._history.forward()
return self._process_response(response)
def refresh(self):
"""Refresh the current page by resending the request
:return: the Response object
:rtype: requests.Response
:raises: NoUrlOpen
"""
if self._response is None:
raise NoUrlOpen("Can't perform refresh. No url open")
response = self.session.send(self._response.request)
return self._process_response(response)
def clear_history(self):
"""Re initialise the history
"""
if self._history is None:
raise HistoryIsNone("You must set history if you need to use historic methods")
self._history.clear_history()
@property
def history(self):
"""Return the actual history list
:return: the history list
:rtype: list
:raises: HistoryIsNone
"""
if self._history is None:
raise HistoryIsNone("You must set history if you need to use historic methods")
return self._history.history
@property
def history_object(self):
"""Return the actual history object
:return: the _history property
:rtype: History
"""
return self._history
def follow_link(self, selector, url_regex=None):
"""Will access the first link found with the selector
Raise:
oct.core.exceptions.LinkNotFound
:param selector: a string representing a css selector
:type selector: str
:param url_regex: regex for finding the url, can represent the href attribute or the link content
:type url_regex: str
:return: Response object
"""
sel = CSSSelector(selector)
resp = None
if self._html is None:
raise NoUrlOpen
for e in sel(self._html):
if url_regex:
r = re.compile(url_regex)
if r.match(e.get('href')) or r.match(e.xpath('string()')):
return self.open_url(e.get('href'))
else:
return self.open_url(e.get('href'))
if resp is None:
raise LinkNotFound('Link not found')
def get_html_element(self, selector):
"""Return a html element as string. The element will be find using the `selector` param
Use this method for get single html elements, if you want to get a list of elements,
please use `get_html_elements`
:param selector: a string representing a css selector
:type selector: str
:return: a string containing the element, if multiples elements are find, it will concat them
:rtype: str
"""
if self._html is None:
raise NoUrlOpen()
elements = self._html.cssselect(selector)
ret = ""
for elem in elements:
ret += lh.tostring(elem, encoding='unicode', pretty_print=True)
return ret
def get_html_elements(self, selector):
"""Return a list of lxml.html.HtmlElement matching the `selector` argument
:param selector: a string representing a css selector
:type selector: str
:return: a list of lxml.html.HtmlElement of finded elements
:rtype: list
"""
if self._html is None:
raise NoUrlOpen()
return self._html.cssselect(selector)
def get_resource(self, selector, output_dir, source_attribute='src'):
"""Get a specified ressource and write it to the output dir
Raise:
OSError
:param selector: a string representing a css selector
:type selector: str
:param output_dir: the directory where the ressources will be wright
:type output_dir: str
:param source_attribute: the attribute to retreive the url needed for downloading the ressource
:type source_attribute: str
:return: number or resources successfully saved (zero for failure)
"""
if self._html is None:
raise NoUrlOpen()
elements = self._html.cssselect(selector)
cnt = 0
if not elements or len(elements) == 0:
return cnt
for elem in elements:
src = elem.get(source_attribute)
if not src:
continue
response = requests.get(src, stream=True)
if not response.ok:
continue
# Save resource to file
filename = os.path.basename(response.url)
path = os.path.join(output_dir, filename)
with open(path, 'wb') as f:
for block in response.iter_content(1024):
if not block:
break
f.write(block)
cnt += 1
return cnt
@staticmethod
def open_in_browser(response):
"""Provide a simple interface for `lxml.html.open_in_browser` function.
Be careful, use this function only for debug purpose
:param response: the response object to open in the browser
:type response: requests.Response
:return:
"""
lh.open_in_browser(response.html)
|
mit
| -6,376,803,414,716,263,000
| 32.131818
| 121
| 0.599465
| false
| 4.480025
| false
| false
| false
|
DarthMaulware/EquationGroupLeaks
|
Leak #5 - Lost In Translation/windows/Resources/Ops/PyScripts/lib/ops/pprint.py
|
1
|
4263
|
from __future__ import print_function
from __future__ import division
import dsz
import dsz.ui
import ops.data
ALIGN_LEFT = '<'
ALIGN_CENTER = '_'
ALIGN_RIGHT = '>'
def pprint(data, header=None, dictorder=None, echocodes=None, align=None, print_handler=print):
if ((data is None) or (len(data) == 0)):
return
if ((dict is type(data[0])) and (dictorder is None)):
dictorder = data[0].keys()
if ((dict is type(data[0])) and (header is None)):
header = dictorder
if isinstance(data[0], ops.data.OpsObject):
newdata = list()
for item in data:
newdata.append(item.__dict__)
data = newdata
if (dictorder is None):
raise Exception('You must specify a dictorder (set of keys) when pprinting an ops.data object')
if (header is None):
header = dictorder
(sdata, align) = makeStrings(data, dictorder, align)
(widths, percents) = calcSize(sdata, header)
output = ''
if header:
for i in range(len(header)):
output += ((('|' + (' ' * (((widths[i] - len(header[i])) // 2) + 1))) + header[i]) + (' ' * (((widths[i] - len(header[i])) // 2) + 1)))
if ((widths[i] - len(header[i])) % 2):
output += ' '
if percents[i]:
output += (' ' * (percents[i] - header[i].count('%')))
output += '|'
if echocodes:
dsz.ui.Echo(output)
output = ''
else:
output += '\n'
for i in range(len(widths)):
output += ('+-' + ('-' * ((widths[i] + 1) + percents[i])))
output += '+'
if echocodes:
dsz.ui.Echo(output)
output = ''
else:
output += '\n'
for j in range(len(sdata)):
d = sdata[j]
a = align[j]
for i in range(len(d)):
if (a[i] == ALIGN_RIGHT):
output += ((('|' + (' ' * ((widths[i] - len(d[i])) + 1))) + d[i]) + ' ')
elif (a[i] == ALIGN_CENTER):
output += ((('|' + (' ' * (((widths[i] - len(d[i])) // 2) + 1))) + d[i]) + (' ' * (((widths[i] - len(d[i])) // 2) + 1)))
if ((widths[i] - len(d[i])) % 2):
output += ' '
else:
output += (('| ' + d[i]) + (' ' * ((widths[i] - len(d[i])) + 1)))
if percents[i]:
output += (' ' * (percents[i] - d[i].count('%')))
output += '|'
if echocodes:
dsz.ui.Echo((output.encode('utf8') if isinstance(output, unicode) else output), echocodes[j])
output = ''
else:
output += '\n'
if (echocodes is None):
print_handler(output, end='')
def makeStrings(data, dictOrder, align):
r = []
a = ([] if (align is None) else None)
for i in data:
c = []
ac = []
if dictOrder:
for k in dictOrder:
c += ([i[k]] if (unicode is type(i[k])) else [(str(i[k]) if (i[k] is not None) else '')])
if (a is not None):
ac += ([ALIGN_RIGHT] if ((int is type(i[k])) or (float is type(i[k])) or (long is type(i[k]))) else [ALIGN_LEFT])
else:
for k in i:
c += ([k] if (unicode is type(k)) else [(str(k) if (k is not None) else '')])
if (a is not None):
ac += ([ALIGN_RIGHT] if ((int is type(k)) or (float is type(k)) or (long is type(k))) else [ALIGN_LEFT])
r += [c]
if (a is not None):
a += [ac]
return (r, (a if (a is not None) else align))
def calcSize(data, header):
widths = range(len(data[0]))
percents = range(len(data[0]))
for i in widths:
widths[i] = 0
percents[i] = 0
if header:
for i in range(len(header)):
r = len(header[i])
if (r > widths[i]):
widths[i] = r
r = header[i].count('%')
if (r > percents[i]):
percents[i] = r
for d in data:
for i in range(len(d)):
r = len(d[i])
if (r > widths[i]):
widths[i] = r
r = d[i].count('%')
if (r > percents[i]):
percents[i] = r
return (widths, percents)
|
unlicense
| -8,518,540,465,188,391,000
| 35.444444
| 147
| 0.451325
| false
| 3.36996
| false
| false
| false
|
lmarent/network_agents_ver2_python
|
simulation_site/simulation/forms.py
|
1
|
4022
|
from django import forms
from django.core.exceptions import ValidationError
from django.forms.models import inlineformset_factory
import inspect
import os
import re
import sys
from simulation.models import ProbabilityDistribution
from simulation.models import DiscreteProbabilityDistribution
from simulation.models import CostFunction
from simulation.models import ContinuousCostFunction
class ProbabilityDistributionForm(forms.ModelForm):
class Meta:
model = ProbabilityDistribution
fields = ('name', 'class_name', 'domain')
def formfield_for_choice_field(self, available_choices):
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
sys.path.append(currentdir)
file_path = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
dir_path = file_path.split('/')
dir_path.pop() # remove ./simulation from the list
dir_path.pop() # remove ./simulation_site from the list
probability_directory = '/'.join(dir_path)
probability_directory += '/agents/probabilities'
black_list = ['__init__','ProbabilityDistribution',
'ProbabilityDistributionFactory',
'ProbabilityDistributionException']
for filename in os.listdir (probability_directory):
# Ignore subfolders
if os.path.isdir (os.path.join(probability_directory, filename)):
continue
else:
if re.match(r".*?\.py$", filename):
classname = re.sub(r".py", r"", filename)
if (classname not in black_list):
available_choices.append((classname, classname))
def __init__(self, *args, **kwargs):
available_choices = []
self.formfield_for_choice_field(available_choices)
print available_choices
#self.fields['class_name'].choices = available_choices
return super(ProbabilityDistributionForm, self).__init__(*args, **kwargs)
# inlineformset_factory creates a Class from a parent model (Contact)
# to a child model (Address)
DiscreteProbabilityFormSet = inlineformset_factory(
ProbabilityDistribution,
DiscreteProbabilityDistribution, fields=('value','label','probability')
)
class CostFunctionForm(forms.ModelForm):
class Meta:
model = CostFunction
fields = ('name', 'class_name', 'range_function')
def formfield_for_choice_field(self, available_choices):
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
sys.path.append(currentdir)
file_path = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
dir_path = file_path.split('/')
dir_path.pop() # remove ./simulation from the list
dir_path.pop() # remove ./simulation_site from the list
costfunction_directory = '/'.join(dir_path)
costfunction_directory += '/agents/costfunction'
black_list = ['__init__','CostFunction', 'CostFunctionFactory']
for filename in os.listdir (costfunction_directory):
# Ignore subfolders
if os.path.isdir (os.path.join(costfunction_directory, filename)):
continue
else:
if re.match(r".*?\.py$", filename):
classname = re.sub(r".py", r"", filename)
if (classname not in black_list):
available_choices.append((classname, classname))
def __init__(self, *args, **kwargs):
available_choices = []
self.formfield_for_choice_field(available_choices)
print available_choices
#self.fields['class_name'].choices = available_choices
return super(CostFunctionForm, self).__init__(*args, **kwargs)
# inlineformset_factory creates a Class from a parent model (Contact)
# to a child model (Address)
ConstinousCostFunctionFormSet = inlineformset_factory(
CostFunction,
ContinuousCostFunction, fields=('parameter', 'value')
)
|
mit
| -7,739,133,508,648,501,000
| 39.22
| 94
| 0.663352
| false
| 4.242616
| false
| false
| false
|
USStateDept/geonode
|
geonode/documents/models.py
|
1
|
6056
|
import logging
import os
import uuid
from django.db import models
from django.db.models import signals
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import reverse
from django.contrib.contenttypes import generic
from django.contrib.staticfiles import finders
from django.utils.translation import ugettext_lazy as _
from geonode.layers.models import Layer
from geonode.base.models import ResourceBase, resourcebase_post_save
from geonode.maps.signals import map_changed_signal
from geonode.maps.models import Map
IMGTYPES = ['jpg', 'jpeg', 'tif', 'tiff', 'png', 'gif']
logger = logging.getLogger(__name__)
class Document(ResourceBase):
"""
A document is any kind of information that can be attached to a map such as pdf, images, videos, xls...
"""
# Relation to the resource model
content_type = models.ForeignKey(ContentType, blank=True, null=True)
object_id = models.PositiveIntegerField(blank=True, null=True)
resource = generic.GenericForeignKey('content_type', 'object_id')
doc_file = models.FileField(upload_to='documents',
null=True,
blank=True,
verbose_name=_('File'))
extension = models.CharField(max_length=128, blank=True, null=True)
doc_type = models.CharField(max_length=128, blank=True, null=True)
doc_url = models.URLField(
blank=True,
null=True,
help_text=_('The URL of the document if it is external.'),
verbose_name=_('URL'))
def __unicode__(self):
return self.title
def get_absolute_url(self):
return reverse('document_detail', args=(self.id,))
@property
def name_long(self):
if not self.title:
return str(self.id)
else:
return '%s (%s)' % (self.title, self.id)
def _render_thumbnail(self):
from cStringIO import StringIO
size = 200, 150
try:
from PIL import Image, ImageOps
except ImportError, e:
logger.error(
'%s: Pillow not installed, cannot generate thumbnails.' %
e)
return None
try:
# if wand is installed, than use it for pdf thumbnailing
from wand import image
except:
wand_available = False
else:
wand_available = True
if wand_available and self.extension and self.extension.lower(
) == 'pdf' and self.doc_file:
logger.debug(
'Generating a thumbnail for document: {0}'.format(
self.title))
with image.Image(filename=self.doc_file.path) as img:
img.sample(*size)
return img.make_blob('png')
elif self.extension and self.extension.lower() in IMGTYPES and self.doc_file:
img = Image.open(self.doc_file.path)
img = ImageOps.fit(img, size, Image.ANTIALIAS)
else:
filename = finders.find('documents/{0}-placeholder.png'.format(self.extension), False) or \
finders.find('documents/generic-placeholder.png', False)
if not filename:
return None
img = Image.open(filename)
imgfile = StringIO()
img.save(imgfile, format='PNG')
return imgfile.getvalue()
@property
def class_name(self):
return self.__class__.__name__
class Meta(ResourceBase.Meta):
pass
def get_related_documents(resource):
if isinstance(resource, Layer) or isinstance(resource, Map):
ct = ContentType.objects.get_for_model(resource)
return Document.objects.filter(content_type=ct, object_id=resource.pk)
else:
return None
def pre_save_document(instance, sender, **kwargs):
base_name, extension, doc_type = None, None, None
if instance.doc_file:
base_name, extension = os.path.splitext(instance.doc_file.name)
instance.extension = extension[1:]
doc_type_map = settings.DOCUMENT_TYPE_MAP
if doc_type_map is None:
doc_type = 'other'
else:
if instance.extension in doc_type_map:
doc_type = doc_type_map[''+instance.extension]
else:
doc_type = 'other'
instance.doc_type = doc_type
elif instance.doc_url:
if len(instance.doc_url) > 4 and instance.doc_url[-4] == '.':
instance.extension = instance.doc_url[-3:]
if not instance.uuid:
instance.uuid = str(uuid.uuid1())
instance.csw_type = 'document'
if instance.abstract == '' or instance.abstract is None:
instance.abstract = 'No abstract provided'
if instance.title == '' or instance.title is None:
instance.title = instance.doc_file.name
if instance.resource:
instance.csw_wkt_geometry = instance.resource.geographic_bounding_box.split(
';')[-1]
instance.bbox_x0 = instance.resource.bbox_x0
instance.bbox_x1 = instance.resource.bbox_x1
instance.bbox_y0 = instance.resource.bbox_y0
instance.bbox_y1 = instance.resource.bbox_y1
else:
instance.bbox_x0 = -180
instance.bbox_x1 = 180
instance.bbox_y0 = -90
instance.bbox_y1 = 90
def create_thumbnail(sender, instance, created, **kwargs):
from geonode.tasks.update import create_document_thumbnail
create_document_thumbnail.delay(object_id=instance.id)
def update_documents_extent(sender, **kwargs):
model = 'map' if isinstance(sender, Map) else 'layer'
ctype = ContentType.objects.get(model=model)
for document in Document.objects.filter(content_type=ctype, object_id=sender.id):
document.save()
signals.pre_save.connect(pre_save_document, sender=Document)
signals.post_save.connect(create_thumbnail, sender=Document)
signals.post_save.connect(resourcebase_post_save, sender=Document)
map_changed_signal.connect(update_documents_extent)
|
gpl-3.0
| -3,836,716,759,410,032,600
| 31.735135
| 107
| 0.63177
| false
| 4
| false
| false
| false
|
rodrigozc/mockatron
|
mockatron_core/utils.py
|
1
|
6477
|
from django.http import HttpResponse
from django.template import Context
from .models import *
from .constants import *
from .classes import *
from xml.etree import ElementTree
import hashlib, json, xmltodict, re, urllib.request, logging
logger = logging.getLogger("django")
def extract_agent_data_from_request(request):
result = {}
if "HTTP_X_FORWARDED_PROTO" in request.META:
result['protocol'] = request.META["HTTP_X_FORWARDED_PROTO"]
else:
result['protocol'] = request.scheme
logger.info(request.META)
if 'HTTP_X_MOCKATRON_ORIGINAL_HOST' in request.META:
result['host'] = request.META["HTTP_X_MOCKATRON_ORIGINAL_HOST"].split(":")[0]
result['port'] = request.META["HTTP_X_MOCKATRON_ORIGINAL_HOST"].split(":")[1]
else:
result['host'] = request.META["HTTP_HOST"].split(":")[0]
if 'HTTP_X_FORWARDED_PORT' in request.META:
result['port'] = request.META["HTTP_X_FORWARDED_PORT"]
else:
result['port'] = request.META["SERVER_PORT"]
result['path'] = request.path
result['method'] = request.method
result['content_type'] = request.META["CONTENT_TYPE"]
if result['content_type'] != None:
result['content_type'] = result['content_type'].split(";")[0]
return result
def create_and_return_agent(agent_data):
agent = Agent(protocol=agent_data['protocol'], host=agent_data['host'], port=agent_data['port'], path=agent_data['path'], method=agent_data['method'], content_type=agent_data['content_type'])
agent.save()
if agent.content_type == CONTENT_TYPE_XML:
try:
req = urllib.request.Request(agent.wsdl_url())
content = urllib.request.urlopen(req).read()
root = ElementTree.fromstring(content.decode(encoding='UTF-8'))
for operation_wsdl in root.findall('.//{http://schemas.xmlsoap.org/wsdl/}portType/{http://schemas.xmlsoap.org/wsdl/}operation'):
# Define input message
input_element = operation_wsdl.find('{http://schemas.xmlsoap.org/wsdl/}input')
input_element_str = input_element.attrib['message'][input_element.attrib['message'].find(':')+1:]
input_message_element = root.find('.//{http://schemas.xmlsoap.org/wsdl/}message[@name="' + input_element_str + '"]/{http://schemas.xmlsoap.org/wsdl/}part')
input_message_element_str = input_message_element.attrib['element'][input_message_element.attrib['element'].find(':')+1:]
# Define output message
output_element = operation_wsdl.find('{http://schemas.xmlsoap.org/wsdl/}output')
if output_element != None:
output_element_str = output_element.attrib['message'][output_element.attrib['message'].find(':')+1:]
output_message_element = root.find('.//{http://schemas.xmlsoap.org/wsdl/}message[@name="' + output_element_str + '"]/{http://schemas.xmlsoap.org/wsdl/}part')
output_message_element_str = output_message_element.attrib['element'][output_message_element.attrib['element'].find(':')+1:]
else:
output_message_element_str = None
operation = Operation(agent=agent, name=operation_wsdl.attrib['name'], input_message=input_message_element_str, output_message=output_message_element_str)
operation.save()
create_default_response(operation)
except Exception:
create_default_response(agent)
else:
create_default_response(agent)
return agent
def create_default_response(provider):
parent_key = re.sub(r'class (.+\.)+', '', re.sub('[\'<>]', '', str(type(provider)))).lower()
if provider.get_content_type() == CONTENT_TYPE_XML:
default_label = XML_DEFAULT_LABEL
default_response = XML_DEFAULT_RESPONSE
elif provider.get_content_type() == CONTENT_TYPE_JSON:
default_label = JSON_DEFAULT_LABEL
default_response = JSON_DEFAULT_RESPONSE
else:
default_label = UNKNOWN_DEFAULT_LABEL
default_response = UNKNOWN_DEFAULT_RESPONSE
response_args = {parent_key: provider, 'label': default_label, 'content': default_response}
response = Response(**response_args)
response.save()
def responder(agent, request):
logger.debug("Starting responder...")
response_method = None
# Evaluate request against Operations, if exists
logger.debug("Evaluate request against operations to get response method...")
if agent.operations.count() > 0:
for operation in agent.operations.all():
if operation.belongs_to(request):
response_method = MockResponderFactory.get_mock_responder(operation)
break
# Gets response_method based on Agent, if no one Operation matchs request before
logger.debug("Get response method based on agent, if no one operation matchs request...")
if response_method == None:
response_method = MockResponderFactory.get_mock_responder(agent)
logger.debug("Get response based on mock responder type...")
response = response_method.get() if isinstance(response_method, SimpleMockResponder) else response_method.get(request)
context = Context()
context['request'] = request
logger.debug("Build response based on agent content type...")
if request.body != b'':
body = request.body.decode(encoding='UTF-8')
if agent.content_type == CONTENT_TYPE_XML:
context['body'] = xmltodict.parse(body, process_namespaces=True)
elif agent.content_type == CONTENT_TYPE_JSON:
context['body'] = json.loads(body)
else:
context['body'] = body
logger.debug("Replies apllying django template...")
return HttpResponse(response.template().render(context), status=response.http_code, content_type=agent.content_type)
def json_agent_locator(agent_data):
url = '{}://{}:{}{}'.format(agent_data['protocol'], agent_data['host'], agent_data['port'], agent_data['path'])
agents = Agent.objects.filter(method=agent_data['method'], protocol=agent_data['protocol'], host=agent_data['host'], port=agent_data['port'])
path_list = agent_data['path'].split('/')
while len(path_list) > 1:
agents_list = agents.filter(path__startswith='/'.join(path_list))
for a in agents_list:
if a.match(url):
return a
path_list.pop()
return None
|
apache-2.0
| 4,762,224,527,407,931,000
| 48.068182
| 195
| 0.64428
| false
| 3.897112
| false
| false
| false
|
hradec/gaffer
|
python/GafferUI/PlugLayout.py
|
1
|
28367
|
##########################################################################
#
# Copyright (c) 2014, Image Engine Design Inc. All rights reserved.
# Copyright (c) 2014, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import re
import sys
import functools
import collections
import Gaffer
import GafferUI
from Qt import QtWidgets
## A class for laying out widgets to represent all the plugs held on a particular parent.
#
# Per-plug metadata support :
#
# - "<layoutName>:index" controls ordering of plugs within the layout
# - "<layoutName>:section" places the plug in a named section of the layout
# - "<layoutName>:divider" specifies whether or not a plug should be followed by a divider
# - "<layoutName>:activator" the name of an activator to control editability
# - "<layoutName>:visibilityActivator" the name of an activator to control visibility
# - "<layoutName>:accessory" groups as an accessory to the previous widget
# - "<layoutName>:width" gives a specific width to the plug's widget
#
# Per-parent metadata support :
#
# - <layoutName>:section:sectionName:summary" dynamic metadata entry returning a
# string to be used as a summary for the section.
# - <layoutName>:section:sectionName:collapsed" boolean indicating whether or
# not a section should be collapsed initially.
# - "<layoutName>:activator:activatorName" a dynamic boolean metadata entry to control
# the activation of plugs within the layout
# - "<layoutName>:activators" a dynamic metadata entry returning a CompoundData of booleans
# for several named activators.
#
# ## Custom widgets
#
# Custom widgets unassociated with any specific plugs may also be added to plug layouts.
# This can be useful when customising user interfaces for a particular facility - for instance
# to display asset management information for each node.
#
# A custom widget is specified using parent metadata entries starting with
# "<layoutName>:customWidget:Name:" prefixes, where "Name" is a unique identifier for the
# custom widget :
#
# - "<layoutName>:customWidget:Name:widgetType" specifies a string containing the fully qualified
# name of a python callable which will be used to create the widget. This callable will be passed
# the same parent GraphComponent (node or plug) that the PlugLayout is being created for.
# - "<layoutName>:customWidget:Name:*" as for the standard per-plug "<layoutName>:*" metadata, so custom
# widgets may be assigned to a section, reordered, given activators etc.
#
class PlugLayout( GafferUI.Widget ) :
# We use this when we can't find a ScriptNode to provide the context.
__fallbackContext = Gaffer.Context()
def __init__( self, parent, orientation = GafferUI.ListContainer.Orientation.Vertical, layoutName = "layout", rootSection = "", embedded = False, **kw ) :
assert( isinstance( parent, ( Gaffer.Node, Gaffer.Plug ) ) )
# embedded indicates that the PlugLayout is embedded in another layout
# which affects how the widget is built
self.__embedded = embedded
self.__layout = _TabLayout( orientation, embedded = embedded ) if isinstance( parent, Gaffer.Node ) and not rootSection else _CollapsibleLayout( orientation )
GafferUI.Widget.__init__( self, self.__layout, **kw )
self.__parent = parent
self.__readOnly = False
self.__layoutName = layoutName
# not to be confused with __rootSection, which holds an actual _Section object
self.__rootSectionName = rootSection
# we need to connect to the childAdded/childRemoved signals on
# the parent so we can update the ui when plugs are added and removed.
parent.childAddedSignal().connect( Gaffer.WeakMethod( self.__childAddedOrRemoved ), scoped = False )
parent.childRemovedSignal().connect( Gaffer.WeakMethod( self.__childAddedOrRemoved ), scoped = False )
# since our layout is driven by metadata, we must respond dynamically
# to changes in that metadata.
Gaffer.Metadata.plugValueChangedSignal( self.__node() ).connect( Gaffer.WeakMethod( self.__plugMetadataChanged ), scoped = False )
# and since our activations are driven by plug values, we must respond
# when the plugs are dirtied.
self.__node().plugDirtiedSignal().connect( Gaffer.WeakMethod( self.__plugDirtied ), scoped = False )
# frequently events that trigger a ui update come in batches, so we
# perform the update lazily using a LazyMethod. the dirty variables
# keep track of the work we'll need to do in the update.
self.__layoutDirty = True
self.__activationsDirty = True
self.__summariesDirty = True
# mapping from layout item to widget, where the key is either a plug or
# the name of a custom widget (as returned by layoutOrder()).
self.__widgets = {}
self.__rootSection = _Section( self.__parent )
# set up an appropriate default context in which to view the plugs.
scriptNode = self.__node() if isinstance( self.__node(), Gaffer.ScriptNode ) else self.__node().scriptNode()
self.setContext( scriptNode.context() if scriptNode is not None else self.__fallbackContext )
# Build the layout
self.__update()
def getReadOnly( self ) :
return self.__readOnly
def setReadOnly( self, readOnly ) :
if readOnly == self.getReadOnly() :
return
self.__readOnly = readOnly
for widget in self.__widgets.values() :
self.__applyReadOnly( widget, self.__readOnly )
def getContext( self ) :
return self.__context
def setContext( self, context ) :
self.__context = context
self.__contextChangedConnection = self.__context.changedSignal().connect( Gaffer.WeakMethod( self.__contextChanged ) )
for widget in self.__widgets.values() :
self.__applyContext( widget, context )
## Returns a PlugValueWidget representing the specified child plug.
def plugValueWidget( self, childPlug ) :
self.__updateLazily.flush( self )
w = self.__widgets.get( childPlug, None )
if w is None :
return w
elif isinstance( w, GafferUI.PlugValueWidget ) :
return w
else :
return w.plugValueWidget()
## Returns the custom widget registered with the specified name.
def customWidget( self, name ) :
self.__updateLazily.flush( self )
return self.__widgets.get( name )
## Returns the list of section names that will be used when laying
# out the plugs of the specified parent. The sections are returned
# in the order in which they will be created.
@classmethod
def layoutSections( cls, parent, includeCustomWidgets = False, layoutName = "layout" ) :
d = collections.OrderedDict()
for item in cls.layoutOrder( parent, includeCustomWidgets, layoutName = layoutName ) :
sectionPath = cls.__staticSectionPath( item, parent, layoutName )
sectionName = ".".join( sectionPath )
d[sectionName] = 1
return list( d.keys() )
## Returns the child plugs of the parent in the order in which they
# will be laid out, based on "<layoutName>:index" Metadata entries. If
# includeCustomWidgets is True, then the positions of custom widgets
# are represented by the appearance of the names of the widgets as
# strings within the list. If a section name is specified, then the
# result will be filtered to include only items in that section.
@classmethod
def layoutOrder( cls, parent, includeCustomWidgets = False, section = None, layoutName = "layout", rootSection = "" ) :
items = parent.children( Gaffer.Plug )
items = [ plug for plug in items if not plug.getName().startswith( "__" ) ]
if includeCustomWidgets :
for name in Gaffer.Metadata.registeredValues( parent ) :
m = re.match( layoutName + ":customWidget:(.+):widgetType", name )
if m and cls.__metadataValue( parent, name ) :
items.append( m.group( 1 ) )
itemsAndIndices = [ list( x ) for x in enumerate( items ) ]
for itemAndIndex in itemsAndIndices :
index = cls.__staticItemMetadataValue( itemAndIndex[1], "index", parent, layoutName )
if index is not None :
index = index if index >= 0 else sys.maxsize + index
itemAndIndex[0] = index
itemsAndIndices.sort( key = lambda x : x[0] )
if section is not None :
sectionPath = section.split( "." ) if section else []
itemsAndIndices = [ x for x in itemsAndIndices if cls.__staticSectionPath( x[1], parent, layoutName ) == sectionPath ]
if rootSection :
rootSectionPath = rootSection.split( "." if rootSection else [] )
itemsAndIndices = [ x for x in itemsAndIndices if cls.__staticSectionPath( x[1], parent, layoutName )[:len(rootSectionPath)] == rootSectionPath ]
return [ x[1] for x in itemsAndIndices ]
@GafferUI.LazyMethod()
def __updateLazily( self ) :
self.__update()
def __update( self ) :
if self.__layoutDirty :
self.__updateLayout()
self.__layoutDirty = False
if self.__activationsDirty :
self.__updateActivations()
self.__activationsDirty = False
if self.__summariesDirty :
self.__updateSummariesWalk( self.__rootSection )
self.__summariesDirty = False
# delegate to our layout class to create a concrete
# layout from the section definitions.
self.__layout.update( self.__rootSection )
def __updateLayout( self ) :
# get the items to lay out - these are a combination
# of plugs and strings representing custom widgets.
items = self.layoutOrder( self.__parent, includeCustomWidgets = True, layoutName = self.__layoutName, rootSection = self.__rootSectionName )
# ditch widgets we don't need any more
itemsSet = set( items )
self.__widgets = { k : v for k, v in self.__widgets.items() if k in itemsSet }
# ditch widgets whose metadata type has changed - we must recreate these.
self.__widgets = {
k : v for k, v in self.__widgets.items()
if isinstance( k, str ) or v is not None and Gaffer.Metadata.value( k, "plugValueWidget:type" ) == v.__plugValueWidgetType
}
# make (or reuse existing) widgets for each item, and sort them into
# sections.
rootSectionDepth = self.__rootSectionName.count( "." ) + 1 if self.__rootSectionName else 0
self.__rootSection.clear()
for item in items :
if item not in self.__widgets :
if isinstance( item, Gaffer.Plug ) :
widget = self.__createPlugWidget( item )
else :
widget = self.__createCustomWidget( item )
self.__widgets[item] = widget
else :
widget = self.__widgets[item]
if widget is None :
continue
section = self.__rootSection
for sectionName in self.__sectionPath( item )[rootSectionDepth:] :
section = section.subsection( sectionName )
if len( section.widgets ) and self.__itemMetadataValue( item, "accessory" ) :
if isinstance( section.widgets[-1], _AccessoryRow ) :
section.widgets[-1].append( widget )
else :
row = _AccessoryRow()
row.append( section.widgets[-1] )
row.append( widget )
section.widgets[-1] = row
else :
section.widgets.append( widget )
if self.__itemMetadataValue( item, "divider" ) :
section.widgets.append( GafferUI.Divider(
GafferUI.Divider.Orientation.Horizontal if self.__layout.orientation() == GafferUI.ListContainer.Orientation.Vertical else GafferUI.Divider.Orientation.Vertical
) )
def __updateActivations( self ) :
with self.getContext() :
# Must scope the context when getting activators, because they are typically
# computed from the plug values, and may therefore trigger a compute.
activators = self.__metadataValue( self.__parent, self.__layoutName + ":activators" ) or {}
activators = { k : v.value for k, v in activators.items() } # convert CompoundData of BoolData to dict of booleans
def active( activatorName ) :
result = True
if activatorName :
result = activators.get( activatorName )
if result is None :
with self.getContext() :
result = self.__metadataValue( self.__parent, self.__layoutName + ":activator:" + activatorName )
result = result if result is not None else False
activators[activatorName] = result
return result
for item, widget in self.__widgets.items() :
if widget is not None :
widget.setEnabled( active( self.__itemMetadataValue( item, "activator" ) ) )
widget.setVisible( active( self.__itemMetadataValue( item, "visibilityActivator" ) ) )
def __updateSummariesWalk( self, section ) :
with self.getContext() :
# Must scope the context because summaries are typically
# generated from plug values, and may therefore trigger
# a compute.
section.summary = self.__metadataValue( self.__parent, self.__layoutName + ":section:" + section.fullName + ":summary" ) or ""
section.valuesChanged = False
for subsection in section.subsections.values() :
self.__updateSummariesWalk( subsection )
# If one of our subsections has changed, we don't need to
# check any of our own plugs, we just propagate the flag.
if subsection.valuesChanged :
section.valuesChanged = True
if not section.valuesChanged :
# Check our own widgets, this is a little icky, the alternative
# would be to iterate our items, reverse engineer the section
# then update that, but this allows us to early-out much sooner.
for widget in section.widgets :
if self.__widgetPlugValuesChanged( widget ) :
section.valuesChanged = True
break
@staticmethod
def __widgetPlugValuesChanged( widget ) :
plugs = []
if isinstance( widget, GafferUI.PlugWidget ) :
widget = widget.plugValueWidget()
if hasattr( widget, 'getPlugs' ) :
plugs = widget.getPlugs()
for plug in plugs :
if PlugLayout.__plugValueChanged( plug ) :
return True
return False
@staticmethod
def __plugValueChanged( plug ) :
## \todo This mirrors LabelPlugValueWidget. This doesn't handle child plug defaults/connections
# properly. We need to improve NodeAlgo when we have the next API break.
valueChanged = plug.getInput() is not None
if not valueChanged and isinstance( plug, Gaffer.ValuePlug ) :
if Gaffer.NodeAlgo.hasUserDefault( plug ) :
valueChanged = not Gaffer.NodeAlgo.isSetToUserDefault( plug )
else :
valueChanged = not plug.isSetToDefault()
return valueChanged
def __import( self, path ) :
path = path.split( "." )
result = __import__( path[0] )
for n in path[1:] :
result = getattr( result, n )
return result
def __createPlugWidget( self, plug ) :
result = GafferUI.PlugValueWidget.create( plug )
if result is None :
return result
width = self.__itemMetadataValue( plug, "width" )
if width is not None :
result._qtWidget().setFixedWidth( width )
if result._qtWidget().layout() is not None :
result._qtWidget().layout().setSizeConstraint( QtWidgets.QLayout.SetDefaultConstraint )
if isinstance( result, GafferUI.PlugValueWidget ) and not result.hasLabel() and self.__itemMetadataValue( plug, "label" ) != "" :
result = GafferUI.PlugWidget( result )
if self.__layout.orientation() == GafferUI.ListContainer.Orientation.Horizontal :
# undo the annoying fixed size the PlugWidget has applied
# to the label.
## \todo Shift all the label size fixing out of PlugWidget and just fix the
# widget here if we're in a vertical orientation.
QWIDGETSIZE_MAX = 16777215 # qt #define not exposed by PyQt or PySide
result.labelPlugValueWidget().label()._qtWidget().setFixedWidth( QWIDGETSIZE_MAX )
self.__applyReadOnly( result, self.getReadOnly() )
self.__applyContext( result, self.getContext() )
# Store the metadata value that controlled the type created, so we can compare to it
# in the future to determine if we can reuse the widget.
result.__plugValueWidgetType = Gaffer.Metadata.value( plug, "plugValueWidget:type" )
return result
def __createCustomWidget( self, name ) :
widgetType = self.__itemMetadataValue( name, "widgetType" )
widgetClass = self.__import( widgetType )
result = widgetClass( self.__parent )
self.__applyContext( result, self.getContext() )
return result
def __node( self ) :
return self.__parent if isinstance( self.__parent, Gaffer.Node ) else self.__parent.node()
@classmethod
def __metadataValue( cls, plugOrNode, name ) :
return Gaffer.Metadata.value( plugOrNode, name )
@classmethod
def __staticItemMetadataValue( cls, item, name, parent, layoutName ) :
if isinstance( item, Gaffer.Plug ) :
v = Gaffer.Metadata.value( item, layoutName + ":" + name )
if v is None and name in ( "divider", "label" ) :
# Backwards compatibility with old unprefixed metadata names.
v = Gaffer.Metadata.value( item, name )
return v
else :
return cls.__metadataValue( parent, layoutName + ":customWidget:" + item + ":" + name )
def __itemMetadataValue( self, item, name ) :
return self.__staticItemMetadataValue( item, name, parent = self.__parent, layoutName = self.__layoutName )
@classmethod
def __staticSectionPath( cls, item, parent, layoutName ) :
m = None
if isinstance( parent, Gaffer.Node ) :
# Backwards compatibility with old metadata entry
## \todo Remove
m = cls.__staticItemMetadataValue( item, "nodeUI:section", parent, layoutName )
if m == "header" :
m = ""
if m is None :
m = cls.__staticItemMetadataValue( item, "section", parent, layoutName )
return m.split( "." ) if m else []
def __sectionPath( self, item ) :
return self.__staticSectionPath( item, parent = self.__parent, layoutName = self.__layoutName )
def __childAddedOrRemoved( self, *unusedArgs ) :
# typically many children are added and removed at once, so
# we do a lazy update so we can batch up several changes into one.
# upheaval is over.
self.__layoutDirty = True
self.__updateLazily()
def __applyReadOnly( self, widget, readOnly ) :
if widget is None :
return
if hasattr( widget, "setReadOnly" ) :
widget.setReadOnly( readOnly )
elif isinstance( widget, GafferUI.PlugWidget ) :
widget.labelPlugValueWidget().setReadOnly( readOnly )
widget.plugValueWidget().setReadOnly( readOnly )
elif hasattr( widget, "plugValueWidget" ) :
widget.plugValueWidget().setReadOnly( readOnly )
def __applyContext( self, widget, context ) :
if hasattr( widget, "setContext" ) :
widget.setContext( context )
elif isinstance( widget, GafferUI.PlugWidget ) :
widget.labelPlugValueWidget().setContext( context )
widget.plugValueWidget().setContext( context )
elif hasattr( widget, "plugValueWidget" ) :
widget.plugValueWidget().setContext( context )
def __plugMetadataChanged( self, plug, key, reason ) :
if plug != self.__parent and plug.parent() != self.__parent :
return
if key in (
"divider",
self.__layoutName + ":divider",
self.__layoutName + ":index",
self.__layoutName + ":section",
self.__layoutName + ":accessory",
"plugValueWidget:type"
) :
# we often see sequences of several metadata changes - so
# we schedule a lazy update to batch them into one ui update.
self.__layoutDirty = True
self.__updateLazily()
elif re.match( self.__layoutName + ":section:.*:summary", key ) :
self.__summariesDirty = True
self.__updateLazily()
def __plugDirtied( self, plug ) :
if not self.visible() or plug.direction() != plug.Direction.In :
return
self.__activationsDirty = True
self.__summariesDirty = True
self.__updateLazily()
def __contextChanged( self, context, name ) :
self.__activationsDirty = True
self.__summariesDirty = True
self.__updateLazily()
class _AccessoryRow( GafferUI.ListContainer ) :
def __init__( self, **kw ) :
GafferUI.ListContainer.__init__( self, GafferUI.ListContainer.Orientation.Horizontal, spacing = 4, **kw )
# The _Section class provides a simple abstract representation of a hierarchical
# layout. Each section contains a list of widgets to be displayed in that section,
# and an OrderedDict of named subsections.
class _Section( object ) :
def __init__( self, _parent, _fullName = "" ) :
self.__parent = _parent
self.fullName = _fullName
self.clear()
def subsection( self, name ) :
result = self.subsections.get( name )
if result is not None :
return result
result = _Section(
self.__parent,
self.fullName + "." + name if self.fullName else name
)
self.subsections[name] = result
return result
def clear( self ) :
self.widgets = []
self.subsections = collections.OrderedDict()
self.summary = ""
self.valuesChanged = False
def saveState( self, name, value ) :
Gaffer.Metadata.registerValue( self.__parent, self.__stateName( name ), value, persistent = False )
def restoreState( self, name ) :
return Gaffer.Metadata.value( self.__parent, self.__stateName( name ) )
def __stateName( self, name ) :
return "layout:section:" + self.fullName + ":" + name
# The PlugLayout class deals with all the details of plugs, metadata and
# signals to define an abstract layout in terms of _Sections. It then
# delegates to the _Layout classes to create an actual layout in terms
# of Widgets. This allows us to present different layouts based on whether
# or the parent is a node (tabbed layout) or a plug (collapsible layout).
class _Layout( GafferUI.Widget ) :
def __init__( self, topLevelWidget, orientation, **kw ) :
GafferUI.Widget.__init__( self, topLevelWidget, **kw )
self.__orientation = orientation
def orientation( self ) :
return self.__orientation
def update( self, section ) :
raise NotImplementedError
class _TabLayout( _Layout ) :
def __init__( self, orientation, embedded = False, **kw ) :
self.__embedded = embedded
self.__mainColumn = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Vertical )
_Layout.__init__( self, self.__mainColumn, orientation, **kw )
with self.__mainColumn :
self.__widgetsColumn = GafferUI.ListContainer( self.orientation(), spacing = 4, borderWidth = 4 )
self.__tabbedContainer = GafferUI.TabbedContainer()
# if the TabLayout is embedded, we want to restrict the maximum width/height depending on the orientation
if self.__embedded :
if self.orientation() == GafferUI.ListContainer.Orientation.Vertical :
self.__tabbedContainer._qtWidget().setSizePolicy( QtWidgets.QSizePolicy( QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Maximum ) )
else :
self.__tabbedContainer._qtWidget().setSizePolicy( QtWidgets.QSizePolicy( QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Expanding ) )
self.__currentTabChangedConnection = self.__tabbedContainer.currentChangedSignal().connect(
Gaffer.WeakMethod( self.__currentTabChanged )
)
def update( self, section ) :
self.__section = section
self.__widgetsColumn[:] = section.widgets
existingTabs = collections.OrderedDict()
for tab in self.__tabbedContainer[:] :
existingTabs[self.__tabbedContainer.getLabel( tab )] = tab
updatedTabs = collections.OrderedDict()
for name, subsection in section.subsections.items() :
tab = existingTabs.get( name )
if tab is None :
# Use scroll bars only when the TabLayout is not embedded
if self.__embedded :
tab = GafferUI.Frame( borderWidth = 0, borderStyle = GafferUI.Frame.BorderStyle.None_ )
else :
tab = GafferUI.ScrolledContainer( borderWidth = 8 )
if self.orientation() == GafferUI.ListContainer.Orientation.Vertical :
tab.setHorizontalMode( GafferUI.ScrollMode.Never )
else :
tab.setVerticalMode( GafferUI.ScrollMode.Never )
tab.setChild( _CollapsibleLayout( self.orientation() ) )
tab.getChild().update( subsection )
updatedTabs[name] = tab
if existingTabs.keys() != updatedTabs.keys() :
with Gaffer.BlockedConnection( self.__currentTabChangedConnection ) :
del self.__tabbedContainer[:]
for name, tab in updatedTabs.items() :
self.__tabbedContainer.append( tab, label = name )
for index, subsection in enumerate( section.subsections.values() ) :
## \todo Consider how/if we should add a public tooltip API to TabbedContainer.
self.__tabbedContainer._qtWidget().setTabToolTip( index, subsection.summary )
if not len( existingTabs ) :
currentTabIndex = self.__section.restoreState( "currentTab" ) or 0
if currentTabIndex < len( self.__tabbedContainer ) :
self.__tabbedContainer.setCurrent( self.__tabbedContainer[currentTabIndex] )
self.__widgetsColumn.setVisible( len( section.widgets ) )
self.__tabbedContainer.setVisible( len( self.__tabbedContainer ) )
def __currentTabChanged( self, tabbedContainer, currentTab ) :
self.__section.saveState( "currentTab", tabbedContainer.index( currentTab ) )
class _CollapsibleLayout( _Layout ) :
def __init__( self, orientation, **kw ) :
self.__column = GafferUI.ListContainer( orientation, spacing = 4 )
_Layout.__init__( self, self.__column, orientation, **kw )
self.__collapsibles = {} # Indexed by section name
def update( self, section ) :
widgets = list( section.widgets )
for name, subsection in section.subsections.items() :
collapsible = self.__collapsibles.get( name )
if collapsible is None :
collapsible = GafferUI.Collapsible( name, _CollapsibleLayout( self.orientation() ), collapsed = True )
# Hack to add margins at the top and bottom but not at the sides.
## \todo This is exposed in the public API via the borderWidth
# parameter to the Collapsible. That parameter sucks because a) it
# makes a margin rather than a border, and b) it doesn't allow per-edge
# control. Either make that make sense, or remove it and find a way
# of deferring all this to the style.
collapsible._qtWidget().layout().setContentsMargins( 0, 2, 0, 2 )
collapsible.setCornerWidget( GafferUI.Label(), True )
## \todo This is fighting the default sizing applied in the Label constructor. Really we need a standard
# way of controlling size behaviours for all widgets in the public API.
collapsible.getCornerWidget()._qtWidget().setSizePolicy( QtWidgets.QSizePolicy.Ignored, QtWidgets.QSizePolicy.Fixed )
if subsection.restoreState( "collapsed" ) is False :
collapsible.setCollapsed( False )
collapsible.stateChangedSignal().connect(
functools.partial( Gaffer.WeakMethod( self.__collapsibleStateChanged ), subsection = subsection ),
scoped = False
)
self.__collapsibles[name] = collapsible
collapsible.getChild().update( subsection )
collapsible.getCornerWidget().setText(
"<small>" + " ( " + subsection.summary + " )</small>" if subsection.summary else ""
)
currentValueChanged = collapsible._qtWidget().property( "gafferValueChanged" )
if subsection.valuesChanged != currentValueChanged :
collapsible._qtWidget().setProperty( "gafferValueChanged", GafferUI._Variant.toVariant( subsection.valuesChanged ) )
collapsible._repolish()
widgets.append( collapsible )
self.__column[:] = widgets
def __collapsibleStateChanged( self, collapsible, subsection ) :
subsection.saveState( "collapsed", collapsible.getCollapsed() )
|
bsd-3-clause
| -3,389,370,072,250,530,000
| 35.936198
| 165
| 0.706913
| false
| 3.701814
| false
| false
| false
|
UCSBarchlab/PyRTL
|
tests/rtllib/test_barrel.py
|
1
|
2448
|
import unittest
import random
import pyrtl
from pyrtl.rtllib import barrel
class TestBarrel(unittest.TestCase):
# @classmethod
# def setUpClass(cls):
# # this is to ensure reproducibility
# random.seed(777906374)
def setUp(self):
pyrtl.reset_working_block()
self.inp_val = pyrtl.Input(8, 'inp_val')
self.inp_shift = pyrtl.Input(2, 'inp_shift')
self.out_zeros = pyrtl.Output(18, 'out_zeros')
self.out_ones = pyrtl.Output(18, 'out_ones')
def test_shift_left(self):
random.seed(777906373)
zero = pyrtl.Const(0, 1)
one = pyrtl.Const(1, 1)
self.out_zeros <<= barrel.barrel_shifter(self.inp_val, zero, one, self.inp_shift)
self.out_ones <<= barrel.barrel_shifter(self.inp_val, one, one, self.inp_shift)
sim_trace = pyrtl.SimulationTrace()
sim = pyrtl.Simulation(tracer=sim_trace)
vals = [random.randint(0, 20) for v in range(20)]
shifts = [random.randint(0, 3) for s in range(20)]
for i in range(len(vals)):
sim.step({
self.inp_val: vals[i],
self.inp_shift: shifts[i]
})
base_sum = vals[i] * pow(2, shifts[i])
self.assertEqual(sim.inspect(self.out_zeros), base_sum)
self.assertEqual(sim.inspect(self.out_ones), base_sum + pow(2, shifts[i]) - 1)
def test_shift_right(self):
random.seed(777906374)
zero = pyrtl.Const(0, 1)
one = pyrtl.Const(1, 1)
self.out_zeros <<= barrel.barrel_shifter(self.inp_val, zero, zero, self.inp_shift)
self.out_ones <<= barrel.barrel_shifter(self.inp_val, one, zero, self.inp_shift)
sim_trace = pyrtl.SimulationTrace()
sim = pyrtl.Simulation(tracer=sim_trace)
vals = [random.randint(0, 20) for v in range(20)]
shifts = [random.randint(0, 3) for s in range(20)]
for i in range(len(vals)):
sim.step({
self.inp_val: vals[i],
self.inp_shift: shifts[i]
})
base_sum = int(vals[i] / pow(2, shifts[i]))
self.assertEqual(sim.inspect(self.out_zeros), base_sum, "failed on value %d" % vals[i])
extra_sum = sum([pow(2, len(self.inp_val) - b - 1) for b in range(shifts[i])])
self.assertEqual(sim.inspect(self.out_ones), base_sum + extra_sum,
"failed on value %d" % vals[i])
|
bsd-3-clause
| -2,724,166,392,009,105,000
| 40.491525
| 99
| 0.574346
| false
| 3.14249
| true
| false
| false
|
leth/nose2
|
nose2/plugins/loader/discovery.py
|
1
|
8390
|
"""
Discovery-based test loader.
This plugin implements nose2's automatic test module discovery. It
looks for test modules in packages and directories whose names start
with 'test', then fires the :func:`loadTestsFromModule` hook for each
one to allow other plugins to load the actual tests.
It also fires :func:`handleFile` for every file that it sees, and
:func:`matchPath` for every python module, to allow other plugins to
load tests from other kinds of files and to influence which modules
are examined for tests.
"""
# Adapted from unittest2/loader.py from the unittest2 plugins branch.
# This module contains some code copied from unittest2/loader.py and other
# code developed in reference to that module and others within unittest2.
# unittest2 is Copyright (c) 2001-2010 Python Software Foundation; All
# Rights Reserved. See: http://docs.python.org/license.html
from fnmatch import fnmatch
import logging
import os
import sys
from nose2 import events, util
__unittest = True
log = logging.getLogger(__name__)
class DiscoveryLoader(events.Plugin):
"""Loader plugin that can discover tests"""
alwaysOn = True
configSection = 'discovery'
def registerInSubprocess(self, event):
event.pluginClasses.append(self.__class__)
def loadTestsFromName(self, event):
"""Load tests from module named by event.name"""
# turn name into path or module name
# fire appropriate hooks (handle file or load from module)
if event.module:
return
name = event.name
module = None
_, top_level_dir = self._getStartDirs()
try:
# try name as a dotted module name first
__import__(name)
module = sys.modules[name]
except ImportError:
# if that fails, try it as a file or directory
event.extraTests.extend(
self._find_tests(event, name, top_level_dir))
else:
event.extraTests.extend(
self._find_tests_in_module(event, module, top_level_dir))
def loadTestsFromNames(self, event):
"""Discover tests if no test names specified"""
log.debug("Received event %s", event)
if event.names or event.module:
return
event.handled = True # I will handle discovery
return self._discover(event)
def _getStartDirs(self):
start_dir = self.session.startDir
top_level_dir = self.session.topLevelDir
if start_dir is None:
start_dir = '.'
if top_level_dir is None:
top_level_dir = start_dir
if not os.path.isdir(os.path.abspath(start_dir)):
raise OSError("%s is not a directory" % os.path.abspath(start_dir))
is_not_importable = False
start_dir = os.path.abspath(start_dir)
top_level_dir = os.path.abspath(top_level_dir)
if start_dir != top_level_dir:
is_not_importable = not os.path.isfile(
os.path.join(start_dir, '__init__.py'))
if is_not_importable:
raise ImportError(
'Start directory is not importable: %r' % start_dir)
# this is redundant in some cases, but that's ok
self.session.prepareSysPath()
return start_dir, top_level_dir
def _discover(self, event):
loader = event.loader
try:
start_dir, top_level_dir = self._getStartDirs()
except (OSError, ImportError):
_, ev, _ = sys.exc_info()
return loader.suiteClass(
loader.failedLoadTests(self.session.startDir, ev))
log.debug("_discover in %s (%s)", start_dir, top_level_dir)
tests = list(self._find_tests(event, start_dir, top_level_dir))
return loader.suiteClass(tests)
def _find_tests(self, event, start, top_level):
"""Used by discovery. Yields test suites it loads."""
log.debug('_find_tests(%r, %r)', start, top_level)
if start == top_level:
full_path = start
else:
full_path = os.path.join(top_level, start)
if os.path.isdir(start):
for test in self._find_tests_in_dir(
event, full_path, top_level):
yield test
elif os.path.isfile(start):
for test in self._find_tests_in_file(
event, start, full_path, top_level):
yield test
def _find_tests_in_dir(self, event, full_path, top_level):
log.debug("find in dir %s (%s)", full_path, top_level)
dirname = os.path.basename(full_path)
pattern = self.session.testFilePattern
evt = events.HandleFileEvent(
event.loader, dirname, full_path, pattern, top_level)
result = self.session.hooks.handleDir(evt)
if evt.extraTests:
for test in evt.extraTests:
yield test
if evt.handled:
if result:
yield result
return
evt = events.MatchPathEvent(dirname, full_path, pattern)
result = self.session.hooks.matchDirPath(evt)
if evt.handled and not result:
return
for path in os.listdir(full_path):
entry_path = os.path.join(full_path, path)
if os.path.isfile(entry_path):
for test in self._find_tests_in_file(
event, path, entry_path, top_level):
yield test
elif os.path.isdir(entry_path):
if ('test' in path.lower()
or util.ispackage(entry_path)
or path in self.session.libDirs):
for test in self._find_tests(event, entry_path, top_level):
yield test
def _find_tests_in_file(self, event, filename, full_path, top_level):
log.debug("find in file %s (%s)", full_path, top_level)
pattern = self.session.testFilePattern
loader = event.loader
evt = events.HandleFileEvent(
loader, filename, full_path, pattern, top_level)
result = self.session.hooks.handleFile(evt)
if evt.extraTests:
yield loader.suiteClass(evt.extraTests)
if evt.handled:
if result:
yield result
return
if not util.valid_module_name(filename):
# valid Python identifiers only
return
evt = events.MatchPathEvent(filename, full_path, pattern)
result = self.session.hooks.matchPath(evt)
if evt.handled:
if not result:
return
elif not self._match_path(filename, full_path, pattern):
return
# if the test file matches, load it
name = util.name_from_path(full_path)
try:
module = util.module_from_name(name)
except:
yield loader.failedImport(name)
else:
mod_file = os.path.abspath(
getattr(module, '__file__', full_path))
realpath = os.path.splitext(mod_file)[0]
fullpath_noext = os.path.splitext(full_path)[0]
if realpath.lower() != fullpath_noext.lower():
module_dir = os.path.dirname(realpath)
mod_name = os.path.splitext(os.path.basename(full_path))[0]
expected_dir = os.path.dirname(full_path)
msg = ("%r module incorrectly imported from %r. "
"Expected %r. Is this module globally installed?"
)
raise ImportError(
msg % (mod_name, module_dir, expected_dir))
yield loader.loadTestsFromModule(module)
def _find_tests_in_module(self, event, module, top_level_dir):
# only called from loadTestsFromName
yield event.loader.loadTestsFromModule(module)
# may be a package; recurse into __path__ if so
pkgpath = getattr(module, '__path__', None)
if pkgpath:
for entry in pkgpath:
full_path = os.path.abspath(os.path.join(top_level_dir, entry))
for test in self._find_tests_in_dir(
event, full_path, top_level_dir):
yield test
def _match_path(self, path, full_path, pattern):
# override this method to use alternative matching strategy
return fnmatch(path, pattern)
|
bsd-2-clause
| -5,359,058,218,203,558,000
| 36.792793
| 79
| 0.590942
| false
| 4.106706
| true
| false
| false
|
edm1/error-aware-demultiplexer
|
extras/install_pypy3-2.4.0.py
|
1
|
4815
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# The MIT License (MIT)
#
# Copyright (c) 2014 Edward Mountjoy
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import sys
import argparse
import os
from sys import platform as _platform
import subprocess
from shutil import copyfile
def main():
""" Installs pypy3.
"""
# Parse the command line args
args = parse_arguments()
print("Installing...")
install_pypy(args)
return 0
def install_pypy(args):
""" Function get and install pypy3 binary.
"""
# Make input python 2.7 compatible
if sys.version_info[0] >= 3:
get_input = input
else:
get_input = raw_input
# Confirm paths
exit_msg = "\nExiting. Use --help to view install options."
for msg in ["> Install path: {0} [y/n] ".format(args.dir),
"> Bashrc path: {0} [y/n] ".format(args.bashrc)]:
ret = get_input(msg)
if not ret == "y":
sys.exit(exit_msg)
# Make output folder
make_folders(args.dir)
# Get and extract pypy3
temp_pypy = "pypy3_2.4.0_download.tar.bz2"
cmd = []
if _platform == "linux" or _platform == "linux2":
url = "https://bitbucket.org/pypy/pypy/downloads/pypy3-2.4.0-linux64.tar.bz2"
cmd.append('wget {0} -O {1}'.format(url, temp_pypy))
elif _platform == "darwin":
url = "https://bitbucket.org/pypy/pypy/downloads/pypy3-2.4.0-osx64.tar.bz2"
# OS X
cmd.append('curl -o {0} -L {1}'.format(temp_pypy, url))
# Unzip file
cmd.append('tar -jxvf {0} --strip 1 -C {1}'.format(temp_pypy, args.dir))
# Run command
ret = subprocess.call(";".join(cmd), shell=True)
if not ret == 0:
sys.exit("There was a problem downloading or extracting pypy. Exiting.")
# Remove download
os.remove(temp_pypy)
# Create backup of bashrc
bashrc_backup = "{0}_backup".format(args.bashrc)
if os.path.exists(args.bashrc):
copyfile(args.bashrc, bashrc_backup)
print("\nCreated backup for of {0} at {1}.".format(args.bashrc, bashrc_backup))
# Add pypy3 bin to PATH
pypy_bin = os.path.join(args.dir, "bin")
lines = ["\n# PyPy3 2.4.0 bin PATH - created by aware-demultiplexer",
"export PATH=$PATH:{0}\n".format(pypy_bin)]
with open(args.bashrc, 'a') as out_h:
for line in lines:
out_h.write(line + "\n")
print("Finished installing PyPy3")
def make_folders(outDir):
# Get list of folders that need checking
check_dirs = []
check_dir = outDir
while True: #not :
# Check that its not home dir
try:
if os.path.samefile(check_dir, os.getenv("HOME")):
break
except FileNotFoundError:
pass
# Append file
check_dirs.append(check_dir)
check_dir = os.path.split(check_dir)[0]
# Check those folders
for check_dir in check_dirs[::-1]:
if not os.path.exists(check_dir):
os.makedirs(check_dir)
return 0
def parse_arguments():
""" Will parse the command line arguments arnd return the arg object.
"""
home_dir = os.getenv("HOME")
parser = argparse.ArgumentParser(
description="Installs PyPy3 2.4.0 in user's home directory")
parser.add_argument("--dir", metavar='<installDir>',
help="Directory to install PyPy3 to. (Default: ~/programs/pypy3-2.4.0)",
default=os.path.join(*[home_dir, "programs", "pypy3-2.4.0"]))
parser.add_argument("--bashrc", metavar='<bashrc>',
help=("Location of basrc file (or equivalent) to append pypy3 bin path "
"to. (Default: ~/.bashrc)"),
default=os.path.join(home_dir, ".bashrc"))
# Parse the arguments
return parser.parse_args()
if __name__ == '__main__':
main()
|
mit
| -2,608,913,167,537,939,000
| 33.640288
| 87
| 0.637799
| false
| 3.569311
| false
| false
| false
|
Liakoni/pgn2csv
|
pgn2csv.py
|
1
|
8232
|
#pgn2csv is free software: you can redistribute it
#and/or modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation, either version 3
#of the License, or (at your option) any later version.
#You should have received a copy of the GNU General Public License
#along with pgn2csv. If not, see <http://www.gnu.org/licenses/>.
#Copyleft 2012 - Author: chefarov@gmail.com
#version 1.1
import sys
import os
import argparse
from collections import OrderedDict
default_dir = os.getcwd()
'''process pgn's lines, seperating tag's from their values and removing characters like quotes commas etc'''
def process_line(line, tags):
tag, value = line.split(' ', 1) #split each line to its 1st whitespace (2nd arg means: 1 split only)
tag = tag[1:] #remove '[' (1st character)
value = value.replace( ',' , '' ) #name fields may contain name and lastname seperated by comma (remove it to keep csv fields intact)
value = value.rstrip( '\r\n' ) #remove newline chars
if tags.has_key(tag): #do not add arbitrary tags
tags[tag] = value[1:-2] #also remove last two chars : "] and the 1st one : "
def write_to_file(tags, fout):
global id
for v in tags.values():
fout.write(str(v)+', ')
def initDic(dict):
for key in dict.keys():
dict[key] = ' '
'''sourceFile: the path of source file (pgn) --- outputDir: output directory for (csv files)'''
def process_file(sourceFile, outputDir=default_dir):
print 'proc file .... ', sourceFile
global id
#Creating the output Directory
if os.path.exists(outputDir) == False: #if directory doesn't exist create it
os.makedirs(outputDir) #also creates intermediate directories
#Opening files
sourceFileDirs = sourceFile.split('/') #in case an absolute path is provided
sourceFileName = sourceFileDirs[-1] #take the last part of the path which is the file's name
foutName = os.path.join(outputDir,sourceFileName)
print foutName
try:
fin = open(sourceFile, 'r')
fout = open(foutName, 'w')
#Reading - Writing files
fout.write('Id, Event, Site, Date, Round, White, Black, Result, ECO, WhiteTitle, WhiteElo, WhiteFideId, '+
'BlackTitle, BlackElo, BlackFideId, EventDate, Opening, Variation, Title, Moves')
initItems = [('Id', ' '), ('Event',' '), ('Site',' '), ('Date',' '), ('Round',' '), ('White',' ') , ('Black',' '),
('Result',' '), ('ECO',' '), ('WhiteTitle', ' '), ('WhiteElo', ' ') , ('WhiteFideId',' '), ('BlackTitle', ' '),
('BlackElo',' '), ('BlackFideId',' ') , ('EventDate', ' '), ('Opening', ' '), ('Variation',' ')]
tags = OrderedDict(initItems) #ordered Dictionary creation
flag = True #helping flag to apply [pgn] , [/pgn] pgn4web flags only once for every game in moves section
firstLineFlag = True #helping flag to not apply /pgn tag in 1st line
for line in fin:
if line[0:7] == '[Event ': #previous line/row/entry/game is over go on (one pgn can contain multiple games)
#reaching here means line contains event info
if firstLineFlag == False: #every time we come up with a new game except the 1st time
fout.write(' [/pgn]') #close the pgn4web tag
firstLineFlag = False
flag = True
initDic(tags) #empty dictionary from previous game's values
tags['Id'] = id
id = id + 1
fout.write('\n')
process_line(line, tags) #now we are ready to write the tag's value like we do in every tag
elif line[0].isdigit(): #moves section
write_to_file(tags, fout) #move the tags' values from dictionary to file before writing moves
#before the moves append the white-black info (not in the tags) - feature helping drupal site :P
fout.write(tags['White']+' - '+tags['Black']+', ')
while line not in ['\n', '\r\n'] : #read all the lines containing moves
if flag: #apply tag only before 1st move in each game
fout.write('[pgn] ') #apply tags for pgn4web automation board presentation software
flag = False #do not apply tag after every newline(from pgn file) with moves
a = line.rstrip('\r\n') #remove newline character and '\r' $MS$ b00l$h1t
fout.write( a+' ' ) #write all the moves in one cell
line = fin.next() #read next line
if len(line) == 0: #trying to catch EOF but never yet - StopIteration exception is raised and handled below
break
elif len(line) > 2 : #not empty remember \r\n make len(line) == 2
process_line(line, tags) #ordinary tag, write its value to dictionary(tags)
#end of external for loop
fout.write('[/pgn]') #last tag outside the loop
#Closing the files
fin.close()
fout.close()
except StopIteration:
fout.write('[/pgn]') #because when there is not an empty line at the End Of File we get that exception in line 76: line=fin.next()
fout.close()
fin.close()
except IOError:
print "Sth wrong with Input file: ", sourceFile, " or output directory: ", outputDir
fout.close()
fin.close()
'''sourceDir: the path of the directory containing src files --- outputDir: output directory for (csv files)'''
def process_dir(sourceDir=default_dir, outputDir=default_dir):
for x in os.listdir(sourceDir):
if x == "csvFiles":
continue
path = os.path.join(sourceDir, x)
if os.path.isdir(path): # directory - recursive call
if '/' in path:
folderPaths = path.split('/') # not the folderName yet ... just splitting the path
else:
folderPaths = path.split('/') # not the folderName yet ... just splitting the path
folderName = str(folderPaths[-1])
if folderName == "csvFiles":
continue
outDir = os.path.join(outputDir, folderName)
process_dir(path, outDir ) #recursive call to the new path but output Directory is kept to outDir
elif path[-4:] == '.pgn': #if we find a pgn file then we call the process_file func
process_file(path, outputDir)
if __name__ == "__main__":
global id #counter for the 1st column of csv
parser = argparse.ArgumentParser(description='usage: >>python -f file or >>python -d directory')
parser.add_argument('-f', '--file', help='path of the pgn file')
parser.add_argument('-d', '--directory', help='path of the pgn directory(multiple source files)-(default: current directory', default=default_dir)
parser.add_argument('-o', '--outputdir', help='path of output directory (default: current directory)', default=default_dir)
parser.add_argument('-i', '--id', help='starting id counter (default = 1)', default=1)
args = parser.parse_args()
id = int(args.id)
if args.file == None: #no specific file specified
outDir = os.path.join(args.outputdir, 'csvFiles')
if os.path.exists(outDir) == False: #if directory doesn't exist create it
os.mkdir(outDir)
process_dir(args.directory, outDir ) #work with directory
else:
process_file(args.file, args.outputdir) #work with file
print "Conversion completed successfully"
|
gpl-3.0
| 1,060,888,842,327,421,300
| 54.248322
| 151
| 0.565233
| false
| 4.136683
| false
| false
| false
|
L5hunter/TestCoin
|
qa/rpc-tests/mempool_coinbase_spends.py
|
1
|
3854
|
#!/usr/bin/env python2
# Copyright (c) 2014 The Testcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test re-org scenarios with a mempool that contains transactions
# that spend (directly or indirectly) coinbase transactions.
#
from test_framework import TestcoinTestFramework
from Testcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
import os
import shutil
# Create one-input, one-output, no-fee transaction:
class MempoolCoinbaseTest(TestcoinTestFramework):
alert_filename = None # Set by setup_network
def setup_network(self):
args = ["-checkmempool", "-debug=mempool"]
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, args))
self.nodes.append(start_node(1, self.options.tmpdir, args))
connect_nodes(self.nodes[1], 0)
self.is_network_split = False
self.sync_all
def create_tx(self, from_txid, to_address, amount):
inputs = [{ "txid" : from_txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
signresult = self.nodes[0].signrawtransaction(rawtx)
assert_equal(signresult["complete"], True)
return signresult["hex"]
def run_test(self):
start_count = self.nodes[0].getblockcount()
# Mine three blocks. After this, nodes[0] blocks
# 101, 102, and 103 are spend-able.
new_blocks = self.nodes[1].setgenerate(True, 4)
self.sync_all()
node0_address = self.nodes[0].getnewaddress()
node1_address = self.nodes[1].getnewaddress()
# Three scenarios for re-orging coinbase spends in the memory pool:
# 1. Direct coinbase spend : spend_101
# 2. Indirect (coinbase spend in chain, child in mempool) : spend_102 and spend_102_1
# 3. Indirect (coinbase and child both in chain) : spend_103 and spend_103_1
# Use invalidatblock to make all of the above coinbase spends invalid (immature coinbase),
# and make sure the mempool code behaves correctly.
b = [ self.nodes[0].getblockhash(n) for n in range(102, 105) ]
coinbase_txids = [ self.nodes[0].getblock(h)['tx'][0] for h in b ]
spend_101_raw = self.create_tx(coinbase_txids[0], node1_address, 50)
spend_102_raw = self.create_tx(coinbase_txids[1], node0_address, 50)
spend_103_raw = self.create_tx(coinbase_txids[2], node0_address, 50)
# Broadcast and mine spend_102 and 103:
spend_102_id = self.nodes[0].sendrawtransaction(spend_102_raw)
spend_103_id = self.nodes[0].sendrawtransaction(spend_103_raw)
self.nodes[0].setgenerate(True, 1)
# Create 102_1 and 103_1:
spend_102_1_raw = self.create_tx(spend_102_id, node1_address, 50)
spend_103_1_raw = self.create_tx(spend_103_id, node1_address, 50)
# Broadcast and mine 103_1:
spend_103_1_id = self.nodes[0].sendrawtransaction(spend_103_1_raw)
self.nodes[0].setgenerate(True, 1)
# ... now put spend_101 and spend_102_1 in memory pools:
spend_101_id = self.nodes[0].sendrawtransaction(spend_101_raw)
spend_102_1_id = self.nodes[0].sendrawtransaction(spend_102_1_raw)
self.sync_all()
assert_equal(set(self.nodes[0].getrawmempool()), set([ spend_101_id, spend_102_1_id ]))
# Use invalidateblock to re-org back and make all those coinbase spends
# immature/invalid:
for node in self.nodes:
node.invalidateblock(new_blocks[0])
self.sync_all()
# mempool should be empty.
assert_equal(set(self.nodes[0].getrawmempool()), set())
if __name__ == '__main__':
MempoolCoinbaseTest().main()
|
mit
| -2,353,289,480,723,677,000
| 40
| 98
| 0.656201
| false
| 3.484629
| true
| false
| false
|
jricardo27/holiday_planner
|
holiday_planner/holiday_place/models/place.py
|
1
|
4516
|
"""Place representing a geographical point on the map."""
from django.db import models
from model_utils import Choices
class PlaceTypeMixin:
"""Place type definitions."""
PLACE_TYPES = Choices(
('city', 'City'),
('town', 'Town'),
('beach', 'Beach'),
('cafe', 'Cafe'),
('bar', 'Bar'),
('zoo', 'Zoo'),
('market', 'Market'),
('restaurant', 'Restaurant'),
('island', 'Island'),
('museum', 'Museum'),
('shop', 'Shop'),
('winery', 'Winery'),
('natural_lookout', 'Natural Look Out'),
('man_made_lookout', 'Man Made Look Out'),
('national_park', 'National Park'),
('farmers_market', 'Farmer\'s Market'),
('art_gallery', 'Art Gallery'),
('accommodation_available', 'Accommodation Available'),
('accommodation_booked', 'Accommodation Booked'),
('amusement_park', 'Amusement Park'),
('interactive_park', 'Interactive Park'),
('thematic_park', 'Thematic Park'),
('big_thing', 'Australia\'s Big Thing'),
('botanic_garden', 'Botanic Garden'),
('chinese_garden', 'Chinese Garden'),
('coral_reef', 'Coral Reef'),
('indigenous_centre', 'Indigeneous Centre'),
('neighborhood', 'City Neighborhood'),
('scenic_drive', 'Scenic Drive'),
)
GEOPOLITICAL_PLACES = [
PLACE_TYPES.city,
PLACE_TYPES.town,
]
NATURAL_PLACES = [
PLACE_TYPES.beach,
PLACE_TYPES.natural_lookout,
PLACE_TYPES.national_park,
PLACE_TYPES.coral_reef,
PLACE_TYPES.island,
PLACE_TYPES.scenic_drive,
]
CITY_ATTRACTIONS = [
PLACE_TYPES.restaurant,
PLACE_TYPES.bar,
PLACE_TYPES.cafe,
PLACE_TYPES.shop,
PLACE_TYPES.farmers_market,
PLACE_TYPES.market,
PLACE_TYPES.amusement_park,
PLACE_TYPES.interactive_park,
PLACE_TYPES.thematic_park,
PLACE_TYPES.botanic_garden,
PLACE_TYPES.chinese_garden,
PLACE_TYPES.art_gallery,
PLACE_TYPES.museum,
PLACE_TYPES.man_made_lookout,
PLACE_TYPES.neighborhood,
]
LOOK_OUTS = [
PLACE_TYPES.natural_lookout,
PLACE_TYPES.man_made_lookout,
]
ANIMAL_RELATED = [
PLACE_TYPES.zoo,
]
NATURE_RELATED = [
PLACE_TYPES.national_park,
PLACE_TYPES.botanic_garden,
PLACE_TYPES.chinese_garden,
PLACE_TYPES.coral_reef,
]
ACCOMMODATION_RELATED = [
PLACE_TYPES.accommodation_available,
PLACE_TYPES.accommodation_booked,
]
OTHER = [
PLACE_TYPES.big_thing,
PLACE_TYPES.indigenous_centre,
PLACE_TYPES.winery,
]
class Place(PlaceTypeMixin, models.Model):
"""A place could be a city, a town, an attraction..."""
class Meta:
unique_together = ("name", "type")
name = models.CharField(
verbose_name="Name",
help_text="Name of the place.",
max_length=255,
blank=False,
null=False,
)
longitude = models.FloatField(
verbose_name="Longitude",
)
latitude = models.FloatField(
verbose_name="Latitude",
)
type = models.CharField(
verbose_name="Main Type",
help_text="A type that describe this site.",
choices=PlaceTypeMixin.PLACE_TYPES,
max_length=60,
default=PlaceTypeMixin.PLACE_TYPES.city,
)
short_description = models.TextField(
verbose_name="Short Description",
max_length=500,
blank=True,
)
long_description = models.TextField(
verbose_name="Long Description",
blank=True,
)
located_in = models.ForeignKey(
"self",
verbose_name="City/Town",
help_text="City/Town this place is located",
related_name='children',
on_delete=models.SET_NULL,
blank=True,
null=True,
limit_choices_to={
'type__in': PlaceTypeMixin.GEOPOLITICAL_PLACES,
},
)
def __str__(self):
return '{}[{}]'.format(self.name, self.type_str)
@property
def type_str(self):
"""Display the human readable form for the type."""
return self.PLACE_TYPES[self.type]
@property
def located_in_str(self):
"""Display the human readable form for the location in."""
if self.located_in:
return self.PLACE_TYPES[self.located_in]
return ''
|
bsd-3-clause
| 3,256,515,929,308,662,000
| 25.409357
| 66
| 0.568423
| false
| 3.522621
| false
| false
| false
|
paveu/api_mocker
|
apimocker/settings/components/logging.py
|
1
|
3197
|
from __future__ import absolute_import
import logging
from logstash.formatter import LogstashFormatterVersion1
class SuppressDeprecated(logging.Filter):
def filter(self, record):
WARNINGS_TO_SUPPRESS = [
'RemovedInDjango110Warning',
'RemovedInDjango20Warning',
]
# Return false to suppress message.
return not any([warn in record.getMessage() for warn in WARNINGS_TO_SUPPRESS])
class LogstashFormatter(LogstashFormatterVersion1):
def _stringify(self, s):
if isinstance(s, unicode):
s = s.decode('utf-8', 'ignore')
return str(s)
def format(self, record):
# Create message dict
message = {
'@timestamp': self.format_timestamp(record.created),
'@version': '1',
'host': self.host,
'pathname': record.pathname,
'tags2': self.tags,
'message': record.getMessage(),
# Extra Fields
'level': record.levelname,
'logger_name': record.name,
'ex': {k: self._stringify(v) for k, v in self.get_extra_fields(record).iteritems()},
}
# If exception, add debug info
if record.exc_info:
message.update(self.get_debug_fields(record))
return self.serialize(message)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'root': {
'level': 'INFO',
'handlers': ['main', 'sentry'],
},
'formatters': {
'logstash': {
'()': LogstashFormatter,
},
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(message)s'
},
},
'handlers': {
'main': {
'level': 'INFO',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
'sentry': {
'level': 'ERROR',
'class': 'raven.contrib.django.handlers.SentryHandler',
},
},
'loggers': {
'apimocker.utils.middlewares': {
'handlers': ['main'],
'level': 'INFO',
'propagate': False,
},
'django.db.backends': {
'handlers': ['sentry'],
'level': 'ERROR',
'propagate': False,
},
'django.request': {
'handlers': ['sentry'],
'level': 'ERROR',
'propagate': False,
},
'raven': {
'level': 'DEBUG',
'handlers': ['sentry'],
'propagate': False,
},
'sentry.errors': {
'level': 'DEBUG',
'handlers': ['sentry'],
'propagate': False,
},
'celery': {
'level': 'WARNING',
'handlers': ['sentry'],
'propagate': False,
},
},
'filters': {
'suppress_deprecated': {
'()': SuppressDeprecated,
}
},
}
if ENVIRONMENT == 'production': # noqa
LOGGING['handlers']['main'] = {
'level': 'INFO',
'class': 'logging.FileHandler',
'filename': '/var/log/app.log',
'formatter': 'logstash',
'filters': ['suppress_deprecated'],
}
|
mit
| -1,602,555,381,508,861,700
| 26.09322
| 96
| 0.487645
| false
| 4.179085
| false
| false
| false
|
par2/lamana
|
lamana/output_.py
|
1
|
29238
|
# -----------------------------------------------------------------------------
'''Classes and functions for handling visualizations, plots and exporting data. BETA'''
# _distribplot(): independent plots of single and multiple geometries
# _multiplot(): aggregates severa; distribplots into a grid of subplots
# flake8 output_.py --ignore E265,E501,E701,F841,N802,N803,N806
'''Plot single and multiple LaminateModels.
Plots objects found within a list of LMs. Assumes Laminate objects are
in the namespace. Calls `_distribplot()` for single/multiple geometries.
Parameters
----------
title : str; default None
Suptitle; convenience keyword
subtitle : str; default None
Subtitle; convenience keyword. Used ax.text().
x, y : str; default None
DataFrame column names. Users can manually pass in other columns names.
normalized : bool; default None
If true, plots y = k_; else plots y = d_ unless specified otherwise.
halfplot : str; default None
Trim the DataFrame to read either |'tensile'|'compressive'|None|.
extrema : bool; default True
Plot minima and maxima only; equivalent to p=2.
separate : bool; default False
Plot each geometry in separate subplots.
legend_on : bool; default True
Turn on/off plot
colorblind : bool; default False
Set line and marker colors as colorblind-safe.
grayscale : bool; default False
Set everything to grayscale; overrides colorblind.
annotate : bool; default False
Annotate names of layer types.
inset: bool; default None
Unnormalized plot of single geometry in upper right corner.
ax : matplotlib axes; default None
An axes containing the plots.
{subplots, suptitle}_kw : dict; default None
Default keywords are initialed to set up the distribution plots.
- subplots: |ncols=1|figsize=(12,8)|dpi=300|
- suptitle: |fontsize=15|fontweight='bold'|
Notes
-----
See `_distroplot()` for more kwargs. Here are some preferred idioms:
>>> case.LM.plot() # geometries in case
Case Plotted. Data Written. Image Saved.
>>> case.LM[4:-1].plot() # handle slicing
Case Plotted. Data Written. Image Saved.
Examples
--------
Plot Single Geometry
--------------------
Unnormalized stress distribution for single geometry (default):
.. plot::
:context: close-figs
>>> import lamana as la
>>> from LamAma.models import Wilson_LT as wlt
>>> dft = wlt.Defaults()
>>> case = la.distributions.Case(dft.load_params, dft.mat_props)
>>> case.apply('400-[200]-800')
>>> case.plot()
Normalized stress distribution for single geometry:
.. plot::
:context: close-figs
>>> case.plot(normalized=True)
Normalized stress distribution (base) with an unnormalized inset:
.. plot::
:context: close-figs
>>> case.plot(inset=True)
Stress distribution plot with layer annotations:
.. plot::
:context: close-figs
>>> plot(annotate=True)
Custom markerstyles and kwarg passing.
.. plot::
:context: close-figs
>>> plot(markerstyles=['D'])
Colorblind-safe color palette.
.. plot::
:context: close-figs
>>> plot(colorblind=True)
Grayscale color palette.
.. plot::
:context: close-figs
>>> plot(grayscale=True)
Plot Multiple Geometries
------------------------
Normalized stress distributions for multiple geometries (default):
.. plot::
:context: close-figs
>>> case.apply('400-200-800', '350-400-500', '200-100-1400')
>>> case.plot()
Tensile stress distribution:
.. plot::
:context: close-figs
>>> case.plot(halfplot='tensile')
Insets are not implemented for multiple geometries:
.. plot::
:context: close-figs
>>> case.plot(inset=True)
NotImplementedError 'Unable to superimpose multiple, unnormalized plots.
See Also
--------
lamana.constructs.Laminate : builds the `LaminateModel` object.
lamana.output_._distribplot : generic handler for stress distribution plots.
lamana.output_._multiplot : plots multiple cases as subplots (caselets).
lamana.distributions.Case.plot : makes call to `_distribplot()`.
lamana.distributions.Cases.plot : makes call to `_multiplot()`.
'''
import math
import logging
import itertools as it
import matplotlib as mpl
import matplotlib.pyplot as plt
from lamana.lt_exceptions import InputError, PlottingError
# TODO: Replace with config.LAMANA_PALETTES
# colorblind palette from seaborn; grayscale is web-safe
LAMANA_PALETTES = dict(
#bold=['#FC0D00','#FC7700','#018C99','#00C318','#6A07A9','#009797','#CF0069'],
bold=['#EB0C00', '#FC7700', '#018C99', '#00C318', '#6A07A9', '#009797', '#CF0069'],
colorblind=['#0072B2', '#009E73', '#D55E00', '#CC79A7', '#F0E442', '#56B4E9'],
grayscale=['#FFFFFF', '#999999', '#666666', '#333333', '#000000'],
HAPSu=['#E7940E', '#F5A9A9', '#FCEB00', '#0B4EA5'],
)
# =============================================================================
# PLOTS -----------------------------------------------------------------------
# =============================================================================
# Process plotting figures of single and multiple subplots
#def _cycle_depth(iterable, n=None):
# '''Return a cycler that iterates n items into an iterable.'''
# if n is None:
# n = len(iterable)
# return it.cycle(it.islice(iterable, n))
def _cycle_depth(iterable, depth=None):
'''Return an itertools.cycle that slices the iterable by a given depth.
Parameters
----------
iterable : iterable
A container of infinite length.
depth : int
A index value; if None, cycle the entire iterable.
Examples
--------
>>> # Depth: 1 2 3 4 5 6
>>> iter_ = ['A', 'B', 'C', 'D', 'E', 'F']
>>> _cycle_depth(iter_, depth=2)
itertools.cycle # ['A', 'B', 'A', 'B', 'A' ...]
>>> # Depth: 1 2 3 4 5 6
>>> iter_ = ['A', 'B', 'C', 'D', 'E', 'F']
>>> _cycle_depth(iter_, depth=3)
itertools.cycle # ['A', 'B', 'C', 'A', 'B', 'C' ...]
Returns
-------
itertools.cycle
An infinite generator.
'''
if depth is None:
depth = len(iterable)
return it.cycle(it.islice(iterable, depth))
# TODO: Abstract to Distribplot and PanelPlot classes
def _distribplot(
LMs, x=None, y=None, normalized=True, halfplot=None, extrema=True,
legend_on=True, colorblind=False, grayscale=False, annotate=False, ax=None,
linestyles=None, linecolors=None, markerstyles=None, layercolors=None,
plot_kw=None, patch_kw=None, annotate_kw=None, legend_kw=None,
sublabel_kw=None, **kwargs
):
'''Return an axes plot of stress distributions.
Some characteristics
- multiplot: plot multiple geometries
- halfplot: plots only compressive or tensile side
- annotate: write layer type names
Users can override kwargs normal mpl style.
Parameters
----------
LMs : list of LaminateModel objects
Container for LaminateModels.
x, y : str
DataFrame column names. Users can pass in other columns names.
normalized : bool
If true, plots y = k_; else plots y = d_ unless specified otherwise.
halfplot : str
Trim the DataFrame to read either |'tensile'|'compressive'|None|.
extrema : bool
Plot minima and maxima only; equivalent to p=2.
legend_on : bool
Turn on/off plot. Default: True.
colorblind : bool
Set line and marker colors as colorblind-safe.
grayscale : bool
Set everything to grayscale. Overrides colorblind.
annotate : bool
Annotate names of layer types.
ax : matplotlib axes
An axes containing the plots.
These keywords control general plotting aesthetics.
{lines, marker, layer}_styles/colors : dict
Processes cycled iterables for matplotlib keywords.
- linestyles: ["-","--","-.",":"]
- linecolors: LAMANA_PALETTES['bold']
- markerstyles: mpl.lines.Line2D.filled_markers
- layercolors: LAMANA_PALETTES['HAPSu']
{plot, patch, annotate, legend, sublabel}_kw : dict
Default keywords are initialized to set up the distribution plots.
- plot: |linewidth=1.8|markersize=8|alpha=1.0|clip_on=False|
- patch: |linewidth=1.0|alpha=0.15|
- annotate: write layer types |fontsize=20|alpha=.7|ha='left'|va='center'|
- legend: |loc=1|fontsize='large'|
- sublabel: default is lower case alphabet
|x=0.12|y=0.94|s=''|fontsize=20|weight='bold'|ha='center'|va='center'|
Returns
-------
matplotlib axes
A plot of k or d (height) versus stress.
Raises
------
InputError
If no stress column is found.
PlottingError
If multiple geometries try an unnormalized plot; cannot superimpose.
Notes
-----
Since this function pulls from existing axes with `gca`, it is currently up
to the coder to manage axes cleanup, particularly when making consecutive plot
instances. The following example uses the clear axes f(x) to remedy this issue:
>>> # Plot consecutive instances
>>> case = ut.laminator(['400-200-800'])[0]
>>> LMs = case.LMs
>>> plot1 = la.output_._distribplot(LMs, normalized=True)
>>> plot1.cla() # clear last plot, otherwise prevents infinite loop of gca from old plot
>>> plot2 = la.output_._distribplot(LMs, normalized=False)
If you want to keep your old axes, consider passing in a new axes.
>>> fig, new_ax = plt.subplots()
>>> plot3 = la.output_._distribplot(LMs, normalized=False, ax=new_ax)
Examples
--------
>>> # Plot a single geometry
>>> import lamana as la
>>> from lamana.models import Wilson_LT as wlt
>>> dft = wlt.Defaults()
>>> case = la.distributions.Case(dft.load_params, dft.mat_props)
>>> case.apply(['400-200-800'])
>>> la.output_._distribplot(case.LMs)
<matplotlib.axes._subplots.AxesSubplot>
'''
# -------------------------------------------------------------------------
'''Make cyclers colorblind and grayscale friendly'''
if ax is None:
ax = plt.gca()
# Default axis labels and DataFrame columns for normalized plots
if x is None:
# 'stress_f (MPa/N)' is in Wilson_LT; so the following triggers handling
##x = 'stress_f (MPa/N)'
x = 'stress'
if normalized:
y = 'k'
elif not normalized and y is None:
y = 'd(m)'
# NOTE: Will have trouble standardizing the name of the stress column.
# NOTE: Need to de-hard-code x label since changes with model
# TODO: Try looking for stress columns, and select last one, else look for strain.
# see loop on handling stress column
# Plot Defaults -----------------------------------------------------------
# Set defaults for plotting keywords with dicts
# If no kwd found, make an empty dict; only update keys not passed in
plot_kw = {} if plot_kw is None else plot_kw
plot_dft = dict(linewidth=1.8, markersize=8, alpha=1.0, clip_on=False,)
plot_kw.update({k: v for k, v in plot_dft.items() if k not in plot_kw})
#print('plot_kw (pre-loop): ', plot_kw)
patch_kw = {} if patch_kw is None else patch_kw
patch_dft = dict(linewidth=1.0, alpha=0.15,)
patch_kw.update({k: v for k, v in patch_dft.items() if k not in patch_kw})
#print('patch_kw: ', patch_kw)
annotate_kw = {} if annotate_kw is None else annotate_kw
annotate_dft = dict(fontsize=20, alpha=.7, ha='left', va='center',)
annotate_kw.update({k: v for k, v in annotate_dft.items() if k not in annotate_kw})
#print('annotate_kw: ', annotate_kw)
legend_kw = {} if legend_kw is None else legend_kw
legend_dft = dict(loc=1, fontsize='large',)
legend_kw.update({k: v for k, v in legend_dft.items()
if k not in legend_kw and legend_on})
#print('legend_kw: ', legend_kw)
sublabel_kw = {} if sublabel_kw is None else sublabel_kw
sublabel_dft = dict(
x=0.12, y=0.94, s='', fontsize=20, weight='bold', ha='center',
va='center', transform=ax.transAxes
)
sublabel_kw.update({k: v for k, v in sublabel_dft.items()
if k not in sublabel_kw})
#print('sublabel_kw: ', sublabel_kw)
# Style Cyclers -----------------------------------------------------------
# Set defaults for the line/marker styles, colors and layer patch colors
if linestyles is None:
linestyles = it.cycle(["-", "--", "-.", ":"])
if linecolors is None:
linecolors = LAMANA_PALETTES['bold']
if markerstyles is None:
markerstyles = [mrk for mrk in mpl.lines.Line2D.filled_markers
if mrk not in ('None', None)]
if layercolors is None:
layercolors = LAMANA_PALETTES['HAPSu']
##layercolors = ['#E7940E', '#F5A9A9', '#FCEB00', '#0B4EA5']
if colorblind:
linecolors = LAMANA_PALETTES['colorblind']
'''Add special color blind to layers'''
if grayscale:
linecolors = ['#000000']
layercolors = reversed(LAMANA_PALETTES['grayscale'][:-1]) # exclude black
patch_kw.update(dict(alpha=0.5))
if colorblind:
print('Grayscale has overriden the colorblind option.')
marker_cycle = it.cycle(markerstyles)
##marker_cycle = it.cycle(reversed(markerstyles))
line_cycle = it.cycle(linestyles)
color_cycle = it.cycle(linecolors)
# Plotting ----------------------------------------------------------------
minX, maxX = (0, 0)
for i, LM in enumerate(LMs):
if extrema:
df = LM.extrema # plots p=2
else:
df = LM.LMFrame
#nplies = LM.nplies # unused
materials = LM.materials
lbl = LM.Geometry.string
stack_order = LM.stack_order
# Handle arbitrary name of x column by
# selecting last 'stress' column; assumes 'stress_f (MPa)' for Wilson_LT
# if none found, exception is raised. user should input x value
#logging.debug('x: {}'.format(x))
x_col = x
y_col = y
try:
df[x_col]
except KeyError:
try:
# Try to discern if input wants a stress column.
stress_names = df.columns.str.startswith('stress')
stress_cols = df.loc[:, stress_names]
##stress_cols = df.loc[stress_names]
x_series = stress_cols.iloc[:, -1]
x_col = x_series.name
logging.info(
"Stress column '{}' not found."
" Using '{}' column.".format(x, x_col)
)
# TODO: unable to test without configuring model. Make mock model for test.
except KeyError:
raise InputError(
"Stress column '{}' not found."
' Specify `y` column in plot() method.'.format(x_col)
)
x_series, y_series = df[x_col], df[y_col]
xs, ys = x_series.tolist(), y_series.tolist()
# Update plot boundaries
if min(xs) < minX:
minX = float(min(xs))
if max(xs) > maxX:
maxX = float(max(xs))
#print(minX, maxX)
# Keyword Updates;
# Use the cycler if plot_kw is empty, otherwise let the user manually change plot_kw
plot_kw.update({
'label': lbl,
#'marker': 'o',
#'color': 'b',
'marker': next(marker_cycle),
'color': next(color_cycle),
'linestyle': next(line_cycle)
})
'''Put following into info.'''
#print(LM.Geometry, LM.Geometry.string, LM.name, LM.nplies, LM.p)
# Label caselets with sublabels, e.g. a,b,c, i,ii,iii...
ax.tick_params(axis='x', pad=10)
ax.tick_params(axis='y', pad=10)
ax.plot(xs, ys, **plot_kw)
width = maxX - minX # sets rectangle width
minY = y_series.min()
maxY = y_series.max()
# Smart-cycle layer colors list; slice iterable the length of materials
# Draw layers only for # y = {k_ and d_(if nplies=1)}
layer_cycle = _cycle_depth(layercolors, depth=len(materials)) # assumes all Cases materials equiv.
# -------------------------------------------------------------------------
# Annotations anchored to layers instead of plot; iterates layers
incrementer = 0
for layer_, (type_, t_, matl_) in stack_order.items():
if normalized:
ypos, thick = layer_, 1 # thick is a unit thick (k-k_1)
elif (not normalized and len(LMs) == 1):
thick = t_ / 1e6
ypos = incrementer
else:
raise PlottingError(
'Unnormalized plots (i.e. y=d(m)) are visually cumbersome for'
' geometries > 1. Consider using the `normalized=True` keyword'
' for displaying simultaneous multi-geometry data.'
)
# NOTE: Replaced with raise in 0.4.11.dev0
#'''Add this to warning.'''
#print('CAUTION: Unnormalized plots (y=d(m)) are cumbersome for '
# 'geometries > 1. Consider normalized=True for multi-geometry '
# 'plots.')
#return None
patch_kw.update({'facecolor': next(layer_cycle)}) # adv. cyclers
rect = mpl.patches.Rectangle((minX, ypos), width, thick, **patch_kw)
ax.add_artist(rect)
'''add these to a kw dict somehow.. preferably to annotate_kw'''
xpad = 0.02
ypad_layer = 0.15
ypad_plot = 0.03
if normalized:
ypad = (rect.get_height() * ypad_layer) # relative to layers
elif not normalized:
#print(ax.get_ylim()[1])
ypad = ax.get_ylim()[1] * ypad_plot # relative to plot
#print(ypad)
rx, ry = rect.get_xy()
cx = rx + (rect.get_width() * xpad)
cy = ry + ypad
if annotate:
ax.annotate(type_, (cx, cy), **annotate_kw)
incrementer += thick
# -------------------------------------------------------------------------
# Set plot limits
#ax.axis([minX, maxX, minY, maxY])
if halfplot is None:
ax.axis([minX, maxX, minY, maxY])
elif halfplot is not None:
if halfplot.lower().startswith('comp'):
ax.set_xlim([minX, 0.0])
ax.set_ylim([minY, maxY])
ax.xaxis.set_major_locator(mpl.ticker.MaxNLocator(5))
else: # default tensile
ax.set_xlim([0.0, maxX])
ax.set_ylim([minY, maxY])
ax.xaxis.set_major_locator(mpl.ticker.MaxNLocator(5))
# '''Fix overlapping; no way to do automatically'''
# major_ticks = np.arange(0.0, maxX, 0.1)
# ax.set_xticks(major_ticks)
# Set legend parameters and axes labels
if legend_kw is not None and legend_on:
ax.legend(**legend_kw)
ax.text(**sublabel_kw) # figure sublabel
# TODO: Refactor for less limited parameter-setting of axes labels.
axtitle = kwargs.get('label', '')
xlabel = kwargs.get('xlabel', x)
ylabel = kwargs.get('ylabel', y)
ax.set_title(axtitle)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
##ax.xaxis.labelpad = 20
##ax.yaxis.labelpad = 20
return ax
# TODO: Needs to return an axes or figure plot
# TODO: caselets are defined as containers of str, lists of str or cases, in LPEP 003.
# Here caseslets are an LM, LMs or cases; list of cases(?) or cases object.
def _multiplot(
caselets, x=None, y=None, title=None, normalized=True, extrema=False,
halfplot='tensile', colorblind=False, grayscale=False, annotate=False,
labels_off=False, suptitle_kw=None, subplots_kw=None, patch_kw=None,
plot_kw=None, legend_kw=None, labels_kw=None, **kwargs
):
'''Return figure of axes containing several plots.
Characteristics:
- multiple plots
- kwarg/arg passing
- global labels and titles
- delete remaining subplots if less than remaining axes.
Parameters
----------
caselets : LM, LMs or cases
Should be a container of str, lists of str or cases; however, accepting
LM, LMs or cases. Refactoring required.
x, y : str
DataFrame column names. Users can pass in other columns names.
title : str
Figure title.
normalized : bool
If true, plots y = k_; else plots y = d_ unless specified otherwise.
extrema : bool, default: False
Plot minima and maxima only; equivalent to p=2.
Forced off for clarity in separate plots.
halfplot : str
Trim the DataFrame to read either |'tensile'|'compressive'|None|.
colorblind : bool
Set line and marker colors as colorblind-safe.
grayscale : bool
Set everything to grayscale. Overrides colorblind.
annotate : bool
Annotate names of layer types.
labels_off : bool
Toggle labels.
labels_kw : dict
One stop for custom labels and annotated text passed in from user.
axestitle, sublabels, legendtitles are lists of labels for each caselet.
These keywords control general plotting aesthetics.
{subplot, patch, plot, legend, suptitle}_kw : dict
Default keywords are initialized to set up the distribution plots.
- subplots: |ncols=4|
- patch: None
- plot: |clip_on=True|
- legend: |loc=1|fontsize='small'|
- suptitle: |t=''|fontsize=22|fontweight='bold'|
Returns
-------
matplotlib figure
A figure of subplots.
Examples
--------
>>> # Plot a set of caselets (subplots)
>>> import lamana as la
>>> from lamana.models import Wilson_LT as wlt
>>> dft = wlt.Defaults()
>>> const_total = ['350-400-500', '400-200-800']
>>> cases = la.distributions.Cases(
... const_total, load_params=dft.load_params, mat_props=dft.mat_props,
... model='Wilson_LT', ps=[2, 3]
... )
>>> la.output_._multiplot(cases)
'''
# DEFAULTS ----------------------------------------------------------------
title = '' if title is None else title
if labels_off:
kwargs['xlabel'], kwargs['ylabel'] = ('', '') # turn off axes labels
subplots_kw = {} if subplots_kw is None else subplots_kw
subplots_dft = dict(ncols=4)
subplots_kw.update({k: v for k, v in subplots_dft.items() if k not in subplots_kw})
#print('subplots_kw: ', subplots_kw)
patch_kw = {} if patch_kw is None else patch_kw
#print('patch_kw: ', patch_kw)
plot_kw = {} if plot_kw is None else plot_kw
plot_dft = dict(clip_on=True) # needed in halfplots; else BUG
plot_kw.update({k: v for k, v in plot_dft.items() if k not in plot_kw})
#print('plot_kw: ', plot_kw)
legend_kw = {} if legend_kw is None else legend_kw
legend_dft = dict(loc=1, fontsize='small')
legend_kw.update({k: v for k, v in legend_dft.items() if k not in legend_kw})
#print('legend_kw: ', legend_kw)
suptitle_kw = {} if suptitle_kw is None else suptitle_kw
suptitle_dft = dict(t='', fontsize=22, fontweight='bold')
if title:
suptitle_dft.update(dict(t=title))
suptitle_kw.update({k: v for k, v in suptitle_dft.items() if k not in suptitle_kw})
#print('suptitle_kw: ', suptitle_kw)
# Main dict to handle all text
# sublabels defaults to no labels after letter 'z'.
# Will auto label subplots from a to z. Afterwhich, the user must supply labels.
labels_kw = {} if labels_kw is None else labels_kw
alphabet = map(chr, range(97, 123)) # to label subplots; REF 037
labels_dft = dict(suptitle=None, sublabels=list(alphabet),
axes_titles=None, legend_titles=None,)
if title:
labels_dft.update(suptitle=title) # compliment convenience kw arg
labels_kw.update({k: v for k, v in labels_dft.items() if k not in labels_kw})
if labels_kw['suptitle']:
suptitle_kw.update(t=labels_kw['suptitle'])
# if labels_kw['subtitle']: subtitle=labels_kw['subtitle']
# if labels_kw['xlabel']: kwargs['xlabel'] = '' # remove axlabels; use text()
# if labels_kw['ylabel']: kwargs['ylabel'] = '' # remove axlabels; use text()
#print('labels_kw: ', labels_kw)
'''Consider cycling linecolors for each single geo, multiplot.'''
# FIGURE ------------------------------------------------------------------
# Reset figure dimensions
ncaselets = len(caselets)
ncols_dft = subplots_kw['ncols']
nrows = int(math.ceil(ncaselets / ncols_dft)) # Fix "can't mult. seq. by non-int..." error; nrows should always be int
##nrows = math.ceil(ncaselets / ncols_dft)
subplots_kw['figsize'] = (24, 8 * nrows)
if ncaselets < ncols_dft:
ncols_dft = ncaselets
subplots_kw['ncols'] = ncaselets
# Set defaults for lists of titles/labels
for key in ['axes_titles', 'legend_titles', 'sublabels']:
if labels_kw[key] is None:
labels_kw[key] = [''] * ncaselets
if ncaselets > len(labels_kw['sublabels']):
labels_kw['sublabels'] = [' '] * ncaselets
print('There are more cases than sublabels. Bypassing default... '
"Consider adding custom labels to 'axestext_kw'.")
fig, axes = plt.subplots(nrows=nrows, **subplots_kw)
#print('args: {}'.format(args))
#print('kwargs:{} '.format(kwargs))
#print('nrows: {}, ncols: {}'.format(nrows, ncols_dft))
# NOTE: does not return ax. Fix?
def plot_caselets(i, ax):
'''Iterate axes of the subplots; apply a small plot ("caselet").
Caselets could contain cases (iterable) or LaminateModels (not iterable).
'''
try:
caselet, axtitle, ltitle, sublabel = (
caselets[i],
labels_kw['axes_titles'][i],
labels_kw['legend_titles'][i],
labels_kw['sublabels'][i]
)
# Plot LMs on each axes per case (and legend notes if there)
#print(ltitle, axsub)
kwargs.update(label=axtitle)
legend_kw.update(title=ltitle)
sublabel_kw = dict(s=sublabel)
# TODO: Refactor
# Caselet could be a case or LM, but distribplot needs a list of LMs
try:
# Case
LMs = caselet.LMs
except (AttributeError):
# Single LaminateModel
LMs = [caselet]
#print('Exception was caught; not a case')
# NOTE: what about LMs?
_distribplot(
LMs, x=x, y=y, halfplot=halfplot, extrema=extrema, annotate=annotate,
normalized=normalized, ax=ax, colorblind=colorblind,
grayscale=grayscale, plot_kw=plot_kw, patch_kw=patch_kw,
legend_kw=legend_kw, sublabel_kw=sublabel_kw, **kwargs
)
except(IndexError, KeyError):
# Cleanup; remove the remaining plots
fig.delaxes(ax)
def iter_vector():
'''Return axes for nrow=1; uses single loop.'''
for i, ax in enumerate(axes):
plot_caselets(i, ax)
def iter_matrix():
'''Return axes for nrow>1; uses nested loop.'''
i = 0
for ax_row in axes:
for ax in ax_row:
plot_caselets(i, ax)
i += 1
if nrows == 1:
iter_vector()
else:
iter_matrix()
# Common Figure Labels
fig.suptitle(**suptitle_kw)
plt.rcParams.update({'font.size': 18})
# NOTE: Add a figure return and show deprecation in 0.4.11.dev0
return fig
#plt.show()
# -----------------------------------------------------------------------------
# AXES-LEVEL ------------------------------------------------------------------
# -----------------------------------------------------------------------------
class AxesPlot():
'''Return a matplotblib axes.
See Also
--------
- _distribplot()
- singleplot()
- halfplot()
- quarterplot()
- predictplot()
'''
pass
# -----------------------------------------------------------------------------
# FIGURE-LEVEL ----------------------------------------------------------------
# -----------------------------------------------------------------------------
class FigurePlot():
'''Return a matplotlib figure.
This class sets up a figure to accept data for multiple plots.
Attributes
-----------
nrows, ncols = int, int
Figure rows and columns.
Notes
-----
Each subplot is a separate axes.
See Also
--------
- _multiplot()
- ratioplot()
'''
#figsize = (ncols * size * aspect, nrows * size)
pass
|
bsd-3-clause
| 8,377,580,359,115,972,000
| 34.743276
| 131
| 0.569157
| false
| 3.756649
| false
| false
| false
|
jaeilepp/eggie
|
mne/viz/topo.py
|
1
|
27382
|
"""Functions to plot M/EEG data on topo (one axes per channel)
"""
from __future__ import print_function
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
#
# License: Simplified BSD
import warnings
from itertools import cycle
from functools import partial
import numpy as np
from scipy import ndimage
# XXX : don't import pyplot here or you will break the doc
from ..baseline import rescale
from ..utils import deprecated
from ..io.pick import channel_type, pick_types
from ..fixes import normalize_colors
from ..utils import _clean_names
from .utils import _mutable_defaults, _check_delayed_ssp, COLORS
from .utils import _draw_proj_checkbox
def iter_topography(info, layout=None, on_pick=None, fig=None,
fig_facecolor='k', axis_facecolor='k',
axis_spinecolor='k', layout_scale=None,
colorbar=False):
""" Create iterator over channel positions
This function returns a generator that unpacks into
a series of matplotlib axis objects and data / channel
indices, both corresponding to the sensor positions
of the related layout passed or inferred from the channel info.
`iter_topography`, hence, allows to conveniently realize custom
topography plots.
Parameters
----------
info : instance of mne.io.meas_info.Info
The measurement info.
layout : instance of mne.layout.Layout | None
The layout to use. If None, layout will be guessed
on_pick : callable | None
The callback function to be invoked on clicking one
of the axes. Is supposed to instantiate the following
API: `function(axis, channel_index)`
fig : matplotlib.figure.Figure | None
The figure object to be considered. If None, a new
figure will be created.
fig_facecolor : str | obj
The figure face color. Defaults to black.
axis_facecolor : str | obj
The axis face color. Defaults to black.
axis_spinecolor : str | obj
The axis spine color. Defaults to black. In other words,
the color of the axis' edge lines.
layout_scale: float | None
Scaling factor for adjusting the relative size of the layout
on the canvas. If None, nothing will be scaled.
Returns
-------
A generator that can be unpacked into
ax : matplotlib.axis.Axis
The current axis of the topo plot.
ch_dx : int
The related channel index.
"""
import matplotlib.pyplot as plt
if fig is None:
fig = plt.figure()
fig.set_facecolor(fig_facecolor)
if layout is None:
from ..layouts import find_layout
layout = find_layout(info)
if on_pick is not None:
callback = partial(_plot_topo_onpick, show_func=on_pick)
fig.canvas.mpl_connect('button_press_event', callback)
pos = layout.pos.copy()
if layout_scale:
pos[:, :2] *= layout_scale
ch_names = _clean_names(info['ch_names'])
iter_ch = [(x, y) for x, y in enumerate(layout.names) if y in ch_names]
for idx, name in iter_ch:
ax = plt.axes(pos[idx])
ax.patch.set_facecolor(axis_facecolor)
plt.setp(list(ax.spines.values()), color=axis_spinecolor)
ax.set_xticklabels([])
ax.set_yticklabels([])
plt.setp(ax.get_xticklines(), visible=False)
plt.setp(ax.get_yticklines(), visible=False)
ch_idx = ch_names.index(name)
vars(ax)['_mne_ch_name'] = name
vars(ax)['_mne_ch_idx'] = ch_idx
vars(ax)['_mne_ax_face_color'] = axis_facecolor
yield ax, ch_idx
def _plot_topo(info=None, times=None, show_func=None, layout=None,
decim=None, vmin=None, vmax=None, ylim=None, colorbar=None,
border='none', cmap=None, layout_scale=None, title=None,
x_label=None, y_label=None, vline=None):
"""Helper function to plot on sensor layout"""
import matplotlib.pyplot as plt
# prepare callbacks
tmin, tmax = times[[0, -1]]
on_pick = partial(show_func, tmin=tmin, tmax=tmax, vmin=vmin,
vmax=vmax, ylim=ylim, x_label=x_label,
y_label=y_label, colorbar=colorbar)
fig = plt.figure()
if colorbar:
norm = normalize_colors(vmin=vmin, vmax=vmax)
sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
sm.set_array(np.linspace(vmin, vmax))
ax = plt.axes([0.015, 0.025, 1.05, .8], axisbg='k')
cb = fig.colorbar(sm, ax=ax)
cb_yticks = plt.getp(cb.ax.axes, 'yticklabels')
plt.setp(cb_yticks, color='w')
my_topo_plot = iter_topography(info, layout=layout, on_pick=on_pick,
fig=fig, layout_scale=layout_scale,
axis_spinecolor=border,
colorbar=colorbar)
for ax, ch_idx in my_topo_plot:
if layout.kind == 'Vectorview-all' and ylim is not None:
this_type = {'mag': 0, 'grad': 1}[channel_type(info, ch_idx)]
ylim_ = [v[this_type] if _check_vlim(v) else v for v in ylim]
else:
ylim_ = ylim
show_func(ax, ch_idx, tmin=tmin, tmax=tmax, vmin=vmin,
vmax=vmax, ylim=ylim_)
if ylim_ and not any(v is None for v in ylim_):
plt.ylim(*ylim_)
if title is not None:
plt.figtext(0.03, 0.9, title, color='w', fontsize=19)
return fig
def _plot_topo_onpick(event, show_func=None, colorbar=False):
"""Onpick callback that shows a single channel in a new figure"""
# make sure that the swipe gesture in OS-X doesn't open many figures
orig_ax = event.inaxes
if event.inaxes is None:
return
import matplotlib.pyplot as plt
try:
ch_idx = orig_ax._mne_ch_idx
face_color = orig_ax._mne_ax_face_color
fig, ax = plt.subplots(1)
plt.title(orig_ax._mne_ch_name)
ax.set_axis_bgcolor(face_color)
# allow custom function to override parameters
show_func(plt, ch_idx)
except Exception as err:
# matplotlib silently ignores exceptions in event handlers,
# so we print
# it here to know what went wrong
print(err)
raise err
def _imshow_tfr(ax, ch_idx, tmin, tmax, vmin, vmax, ylim=None, tfr=None,
freq=None, vline=None, x_label=None, y_label=None,
colorbar=False, picker=True, cmap=None):
""" Aux function to show time-freq map on topo """
import matplotlib.pyplot as plt
if cmap is None:
cmap = plt.cm.jet
extent = (tmin, tmax, freq[0], freq[-1])
ax.imshow(tfr[ch_idx], extent=extent, aspect="auto", origin="lower",
vmin=vmin, vmax=vmax, picker=picker, cmap=cmap)
if x_label is not None:
plt.xlabel(x_label)
if y_label is not None:
plt.ylabel(y_label)
if colorbar:
plt.colorbar()
def _plot_timeseries(ax, ch_idx, tmin, tmax, vmin, vmax, ylim, data, color,
times, vline=None, x_label=None, y_label=None,
colorbar=False):
""" Aux function to show time series on topo """
import matplotlib.pyplot as plt
picker_flag = False
for data_, color_ in zip(data, color):
if not picker_flag:
# use large tol for picker so we can click anywhere in the axes
ax.plot(times, data_[ch_idx], color_, picker=1e9)
picker_flag = True
else:
ax.plot(times, data_[ch_idx], color_)
if vline:
[plt.axvline(x, color='w', linewidth=0.5) for x in vline]
if x_label is not None:
plt.xlabel(x_label)
if y_label is not None:
plt.ylabel(y_label)
if colorbar:
plt.colorbar()
def _check_vlim(vlim):
"""AUX function"""
return not np.isscalar(vlim) and not vlim is None
def plot_topo(evoked, layout=None, layout_scale=0.945, color=None,
border='none', ylim=None, scalings=None, title=None, proj=False,
vline=[0.0]):
"""Plot 2D topography of evoked responses.
Clicking on the plot of an individual sensor opens a new figure showing
the evoked response for the selected sensor.
Parameters
----------
evoked : list of Evoked | Evoked
The evoked response to plot.
layout : instance of Layout | None
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout is
inferred from the data.
layout_scale: float
Scaling factor for adjusting the relative size of the layout
on the canvas
color : list of color objects | color object | None
Everything matplotlib accepts to specify colors. If not list-like,
the color specified will be repeated. If None, colors are
automatically drawn.
border : str
matplotlib borders style to be used for each sensor plot.
scalings : dict | None
The scalings of the channel types to be applied for plotting. If None,`
defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
ylim : dict | None
ylim for plots. The value determines the upper and lower subplot
limits. e.g. ylim = dict(eeg=[-200e-6, 200e6]). Valid keys are eeg,
mag, grad, misc. If None, the ylim parameter for each channel is
determined by the maximum absolute peak.
proj : bool | 'interactive'
If true SSP projections are applied before display. If 'interactive',
a check box for reversible selection of SSP projection vectors will
be shown.
title : str
Title of the figure.
vline : list of floats | None
The values at which to show a vertical line.
Returns
-------
fig : Instance of matplotlib.figure.Figure
Images of evoked responses at sensor locations
"""
if not type(evoked) in (tuple, list):
evoked = [evoked]
if type(color) in (tuple, list):
if len(color) != len(evoked):
raise ValueError('Lists of evoked objects and colors'
' must have the same length')
elif color is None:
colors = ['w'] + COLORS
stop = (slice(len(evoked)) if len(evoked) < len(colors)
else slice(len(colors)))
color = cycle(colors[stop])
if len(evoked) > len(colors):
warnings.warn('More evoked objects than colors available.'
'You should pass a list of unique colors.')
else:
color = cycle([color])
times = evoked[0].times
if not all([(e.times == times).all() for e in evoked]):
raise ValueError('All evoked.times must be the same')
info = evoked[0].info
ch_names = evoked[0].ch_names
if not all([e.ch_names == ch_names for e in evoked]):
raise ValueError('All evoked.picks must be the same')
ch_names = _clean_names(ch_names)
if layout is None:
from ..layouts.layout import find_layout
layout = find_layout(info)
# XXX. at the moment we are committed to 1- / 2-sensor-types layouts
chs_in_layout = set(layout.names) & set(ch_names)
types_used = set(channel_type(info, ch_names.index(ch))
for ch in chs_in_layout)
# one check for all vendors
meg_types = ['mag'], ['grad'], ['mag', 'grad'],
is_meg = any(types_used == set(k) for k in meg_types)
if is_meg:
types_used = list(types_used)[::-1] # -> restore kwarg order
picks = [pick_types(info, meg=kk, ref_meg=False, exclude=[])
for kk in types_used]
else:
types_used_kwargs = dict((t, True) for t in types_used)
picks = [pick_types(info, meg=False, **types_used_kwargs)]
assert isinstance(picks, list) and len(types_used) == len(picks)
scalings = _mutable_defaults(('scalings', scalings))[0]
evoked = [e.copy() for e in evoked]
for e in evoked:
for pick, t in zip(picks, types_used):
e.data[pick] = e.data[pick] * scalings[t]
if proj is True and all([e.proj is not True for e in evoked]):
evoked = [e.apply_proj() for e in evoked]
elif proj == 'interactive': # let it fail early.
for e in evoked:
_check_delayed_ssp(e)
if ylim is None:
set_ylim = lambda x: np.abs(x).max()
ylim_ = [set_ylim([e.data[t] for e in evoked]) for t in picks]
ymax = np.array(ylim_)
ylim_ = (-ymax, ymax)
elif isinstance(ylim, dict):
ylim_ = _mutable_defaults(('ylim', ylim))[0]
ylim_ = [ylim_[kk] for kk in types_used]
ylim_ = zip(*[np.array(yl) for yl in ylim_])
else:
raise ValueError('ylim must be None ore a dict')
plot_fun = partial(_plot_timeseries, data=[e.data for e in evoked],
color=color, times=times, vline=vline)
fig = _plot_topo(info=info, times=times, show_func=plot_fun, layout=layout,
decim=1, colorbar=False, ylim=ylim_, cmap=None,
layout_scale=layout_scale, border=border, title=title,
x_label='Time (s)', vline=vline)
if proj == 'interactive':
for e in evoked:
_check_delayed_ssp(e)
params = dict(evokeds=evoked, times=times,
plot_update_proj_callback=_plot_update_evoked_topo,
projs=evoked[0].info['projs'], fig=fig)
_draw_proj_checkbox(None, params)
return fig
def _plot_update_evoked_topo(params, bools):
"""Helper function to update topo sensor plots"""
evokeds, times, fig = [params[k] for k in ('evokeds', 'times', 'fig')]
projs = [proj for ii, proj in enumerate(params['projs'])
if ii in np.where(bools)[0]]
params['proj_bools'] = bools
evokeds = [e.copy() for e in evokeds]
for e in evokeds:
e.info['projs'] = []
e.add_proj(projs)
e.apply_proj()
# make sure to only modify the time courses, not the ticks
axes = fig.get_axes()
n_lines = len(axes[0].lines)
n_diff = len(evokeds) - n_lines
ax_slice = slice(abs(n_diff)) if n_diff < 0 else slice(n_lines)
for ax in axes:
lines = ax.lines[ax_slice]
for line, evoked in zip(lines, evokeds):
line.set_data(times, evoked.data[ax._mne_ch_idx])
fig.canvas.draw()
@deprecated('`plot_topo_tfr` is deprecated and will be removed in '
'MNE 0.9. Use `plot_topo` method on TFR objects.')
def plot_topo_tfr(epochs, tfr, freq, layout=None, colorbar=True, vmin=None,
vmax=None, cmap='RdBu_r', layout_scale=0.945, title=None):
"""Plot time-frequency data on sensor layout
Clicking on the time-frequency map of an individual sensor opens a
new figure showing the time-frequency map of the selected sensor.
Parameters
----------
epochs : instance of Epochs
The epochs used to generate the power
tfr : 3D-array shape=(n_sensors, n_freqs, n_times)
The time-frequency data. Must have the same channels as Epochs.
freq : array-like
Frequencies of interest as passed to induced_power
layout : instance of Layout | None
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout is
inferred from the data.
colorbar : bool
If true, colorbar will be added to the plot
vmin : float
Minimum value mapped to lowermost color
vmax : float
Minimum value mapped to upppermost color
cmap : instance of matplotlib.pyplot.colormap | str
Colors to be mapped to the values. Default 'RdBu_r'.
layout_scale : float
Scaling factor for adjusting the relative size of the layout
on the canvas
title : str
Title of the figure.
Returns
-------
fig : Instance of matplotlib.figure.Figure
Images of time-frequency data at sensor locations
"""
if vmin is None:
vmin = tfr.min()
if vmax is None:
vmax = tfr.max()
if layout is None:
from ..layouts.layout import find_layout
layout = find_layout(epochs.info)
tfr_imshow = partial(_imshow_tfr, tfr=tfr.copy(), freq=freq, cmap=cmap)
fig = _plot_topo(info=epochs.info, times=epochs.times,
show_func=tfr_imshow, layout=layout, border='w',
colorbar=colorbar, vmin=vmin, vmax=vmax, cmap=cmap,
layout_scale=layout_scale, title=title,
x_label='Time (s)', y_label='Frequency (Hz)')
return fig
@deprecated('`plot_topo_power` is deprecated and will be removed in '
'MNE 0.9. Use `plot_topo` method on TFR objects.')
def plot_topo_power(epochs, power, freq, layout=None, baseline=None,
mode='mean', decim=1, colorbar=True, vmin=None, vmax=None,
cmap=None, layout_scale=0.945, dB=True, title=None):
"""Plot induced power on sensor layout
Clicking on the induced power map of an individual sensor opens a
new figure showing the induced power map of the selected sensor.
Parameters
----------
epochs : instance of Epochs
The epochs used to generate the power
power : 3D-array
First return value from mne.time_frequency.induced_power
freq : array-like
Frequencies of interest as passed to induced_power
layout : instance of Layout | None
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout is
inferred from the data.
baseline : tuple or list of length 2
The time interval to apply rescaling / baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
mode : 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
Do baseline correction with ratio (power is divided by mean
power during baseline) or z-score (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline))
If None, baseline no correction will be performed.
decim : integer
Increment for selecting each nth time slice
colorbar : bool
If true, colorbar will be added to the plot
vmin : float
Minimum value mapped to lowermost color
vmax : float
Minimum value mapped to upppermost color
cmap : instance of matplotlib.pyplot.colormap
Colors to be mapped to the values
layout_scale : float
Scaling factor for adjusting the relative size of the layout
on the canvas
dB : bool
If True, log10 will be applied to the data.
title : str
Title of the figure.
Returns
-------
fig : Instance of matplotlib.figure.Figure
Images of induced power at sensor locations
"""
times = epochs.times[::decim].copy()
if mode is not None:
if baseline is None:
baseline = epochs.baseline
power = rescale(power.copy(), times, baseline, mode)
times *= 1e3
if dB:
power = 20 * np.log10(power)
if vmin is None:
vmin = power.min()
if vmax is None:
vmax = power.max()
if layout is None:
from ..layouts.layout import find_layout
layout = find_layout(epochs.info)
power_imshow = partial(_imshow_tfr, tfr=power.copy(), freq=freq)
fig = _plot_topo(info=epochs.info, times=times,
show_func=power_imshow, layout=layout, decim=decim,
colorbar=colorbar, vmin=vmin, vmax=vmax, cmap=cmap,
layout_scale=layout_scale, title=title, border='w',
x_label='Time (s)', y_label='Frequency (Hz)')
return fig
@deprecated('`plot_topo_phase_lock` is deprecated and will be removed in '
'MNE 0.9. Use `plot_topo` method on TFR objects.')
def plot_topo_phase_lock(epochs, phase, freq, layout=None, baseline=None,
mode='mean', decim=1, colorbar=True, vmin=None,
vmax=None, cmap=None, layout_scale=0.945,
title=None):
"""Plot phase locking values (PLV) on sensor layout
Clicking on the PLV map of an individual sensor opens a new figure
showing the PLV map of the selected sensor.
Parameters
----------
epochs : instance of Epochs
The epochs used to generate the phase locking value
phase_lock : 3D-array
Phase locking value, second return value from
mne.time_frequency.induced_power.
freq : array-like
Frequencies of interest as passed to induced_power
layout : instance of Layout | None
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout is
inferred from the data.
baseline : tuple or list of length 2
The time interval to apply rescaling / baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
mode : 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent' | None
Do baseline correction with ratio (phase is divided by mean
phase during baseline) or z-score (phase is divided by standard
deviation of phase during baseline after subtracting the mean,
phase = [phase - mean(phase_baseline)] / std(phase_baseline)).
If None, baseline no correction will be performed.
decim : integer
Increment for selecting each nth time slice
colorbar : bool
If true, colorbar will be added to the plot
vmin : float
Minimum value mapped to lowermost color
vmax : float
Minimum value mapped to upppermost color
cmap : instance of matplotlib.pyplot.colormap
Colors to be mapped to the values
layout_scale : float
Scaling factor for adjusting the relative size of the layout
on the canvas.
title : str
Title of the figure.
Returns
-------
fig : Instance of matplotlib.figure.Figrue
Phase lock images at sensor locations
"""
times = epochs.times[::decim] * 1e3
if mode is not None:
if baseline is None:
baseline = epochs.baseline
phase = rescale(phase.copy(), times, baseline, mode)
if vmin is None:
vmin = phase.min()
if vmax is None:
vmax = phase.max()
if layout is None:
from ..layouts.layout import find_layout
layout = find_layout(epochs.info)
phase_imshow = partial(_imshow_tfr, tfr=phase.copy(), freq=freq)
fig = _plot_topo(info=epochs.info, times=times,
show_func=phase_imshow, layout=layout, decim=decim,
colorbar=colorbar, vmin=vmin, vmax=vmax, cmap=cmap,
layout_scale=layout_scale, title=title, border='w',
x_label='Time (s)', y_label='Frequency (Hz)')
return fig
def _erfimage_imshow(ax, ch_idx, tmin, tmax, vmin, vmax, ylim=None,
data=None, epochs=None, sigma=None,
order=None, scalings=None, vline=None,
x_label=None, y_label=None, colorbar=False):
"""Aux function to plot erfimage on sensor topography"""
import matplotlib.pyplot as plt
this_data = data[:, ch_idx, :].copy()
ch_type = channel_type(epochs.info, ch_idx)
if not ch_type in scalings:
raise KeyError('%s channel type not in scalings' % ch_type)
this_data *= scalings[ch_type]
if callable(order):
order = order(epochs.times, this_data)
if order is not None:
this_data = this_data[order]
this_data = ndimage.gaussian_filter1d(this_data, sigma=sigma, axis=0)
ax.imshow(this_data, extent=[tmin, tmax, 0, len(data)], aspect='auto',
origin='lower', vmin=vmin, vmax=vmax, picker=True)
if x_label is not None:
plt.xlabel(x_label)
if y_label is not None:
plt.ylabel(y_label)
if colorbar:
plt.colorbar()
def plot_topo_image_epochs(epochs, layout=None, sigma=0.3, vmin=None,
vmax=None, colorbar=True, order=None, cmap=None,
layout_scale=.95, title=None, scalings=None):
"""Plot Event Related Potential / Fields image on topographies
Parameters
----------
epochs : instance of Epochs
The epochs.
layout: instance of Layout
System specific sensor positions.
sigma : float
The standard deviation of the Gaussian smoothing to apply along
the epoch axis to apply in the image.
vmin : float
The min value in the image. The unit is uV for EEG channels,
fT for magnetometers and fT/cm for gradiometers.
vmax : float
The max value in the image. The unit is uV for EEG channels,
fT for magnetometers and fT/cm for gradiometers.
colorbar : bool
Display or not a colorbar.
order : None | array of int | callable
If not None, order is used to reorder the epochs on the y-axis
of the image. If it's an array of int it should be of length
the number of good epochs. If it's a callable the arguments
passed are the times vector and the data as 2d array
(data.shape[1] == len(times)).
cmap : instance of matplotlib.pyplot.colormap
Colors to be mapped to the values.
layout_scale: float
scaling factor for adjusting the relative size of the layout
on the canvas.
title : str
Title of the figure.
scalings : dict | None
The scalings of the channel types to be applied for plotting. If
None, defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
Returns
-------
fig : instance of matplotlib figure
Figure distributing one image per channel across sensor topography.
"""
scalings = _mutable_defaults(('scalings', scalings))[0]
data = epochs.get_data()
if vmin is None:
vmin = data.min()
if vmax is None:
vmax = data.max()
if layout is None:
from ..layouts.layout import find_layout
layout = find_layout(epochs.info)
erf_imshow = partial(_erfimage_imshow, scalings=scalings, order=order,
data=data, epochs=epochs, sigma=sigma)
fig = _plot_topo(info=epochs.info, times=epochs.times,
show_func=erf_imshow, layout=layout, decim=1,
colorbar=colorbar, vmin=vmin, vmax=vmax, cmap=cmap,
layout_scale=layout_scale, title=title,
border='w', x_label='Time (s)', y_label='Epoch')
return fig
|
bsd-2-clause
| -3,022,529,476,795,081,000
| 36.768276
| 79
| 0.616281
| false
| 3.769548
| false
| false
| false
|
jodogne/OrthancMirror
|
OrthancServer/Resources/Samples/Python/ArchiveStudiesInTimeRange.py
|
1
|
3416
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Orthanc - A Lightweight, RESTful DICOM Store
# Copyright (C) 2012-2016 Sebastien Jodogne, Medical Physics
# Department, University Hospital of Liege, Belgium
# Copyright (C) 2017-2021 Osimis S.A., Belgium
#
# This program is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import os.path
import sys
import RestToolbox
def PrintHelp():
print('Download ZIP archives for all the studies generated '
'during a given time range (according to the StudyDate tag)\n')
print('Usage: %s <URL> <StartDate> <EndDate> <TargetFolder>\n' % sys.argv[0])
print('Example: %s http://127.0.0.1:8042/ 20150101 20151231 /tmp/\n' % sys.argv[0])
exit(-1)
def CheckIsDate(date):
if len(date) != 8 or not date.isdigit():
print '"%s" is not a valid date!\n' % date
exit(-1)
if len(sys.argv) != 5:
PrintHelp()
URL = sys.argv[1]
START = sys.argv[2]
END = sys.argv[3]
TARGET = sys.argv[4]
CheckIsDate(START)
CheckIsDate(END)
def GetTag(tags, key):
if key in tags:
return tags[key]
else:
return 'No%s' % key
# Loop over the studies
for studyId in RestToolbox.DoGet('%s/studies' % URL):
# Retrieve the DICOM tags of the current study
study = RestToolbox.DoGet('%s/studies/%s' % (URL, studyId))['MainDicomTags']
# Retrieve the DICOM tags of the parent patient of this study
# Case 1: Baseline version
patient = RestToolbox.DoGet('%s/studies/%s/patient' % (URL, studyId))['MainDicomTags']
# Case 2: Tweaked version that can be used if several patients
# share the same "Patient ID", but have different "Patient Name"
# (which is invalid according to the DICOM standard).
# https://groups.google.com/d/msg/orthanc-users/58AxIkxFbZs/N6Knub8MAgAJ
# patient = RestToolbox.DoGet('%s/studies/%s' % (URL, studyId)) ['PatientMainDicomTags']
# Check that the StudyDate tag lies within the given range
studyDate = study['StudyDate'][:8]
if studyDate >= START and studyDate <= END:
# Create a filename
filename = '%s - %s %s - %s.zip' % (GetTag(study, 'StudyDate'),
GetTag(patient, 'PatientID'),
GetTag(patient, 'PatientName'),
GetTag(study, 'StudyDescription'))
# Remove any non-ASCII character in the filename
filename = filename.encode('ascii', errors = 'replace').translate(None, r"'\/:*?\"<>|!=").strip()
# Download the ZIP archive of the study
print('Downloading %s' % filename)
zipContent = RestToolbox.DoGet('%s/studies/%s/archive' % (URL, studyId))
# Write the ZIP archive at the proper location
with open(os.path.join(TARGET, filename), 'wb') as f:
f.write(zipContent)
|
gpl-3.0
| -516,811,386,712,827,460
| 35.731183
| 105
| 0.650761
| false
| 3.43662
| false
| false
| false
|
anuragpapineni/Hearthbreaker-evolved-agent
|
hearthbreaker/agents/trade_agent.py
|
1
|
4738
|
# from hearthbreaker.agents.basic_agents import RandomAgent
from hearthbreaker.agents.trade.possible_play import PlayMixin
from hearthbreaker.agents.trade.trade import TradeMixin, AttackMixin
from hearthbreaker.agents.trade.util import Util
import hearthbreaker.cards.battlecries
class BattlecryType:
@staticmethod
def buff_battlecries():
res = []
res.append(hearthbreaker.cards.battlecries.heal_two)
res.append(hearthbreaker.cards.battlecries.heal_three)
res.append(hearthbreaker.cards.battlecries.give_divine_shield)
res.append(hearthbreaker.cards.battlecries.give_stealth)
res.append(hearthbreaker.cards.battlecries.give_three_health)
res.append(hearthbreaker.cards.battlecries.two_temp_attack)
res.append(hearthbreaker.cards.battlecries.give_windfury)
return res
@staticmethod
def damage_battlecries():
res = []
res.append(hearthbreaker.cards.battlecries.silence)
res.append(hearthbreaker.cards.battlecries.deal_one_damage)
res.append(hearthbreaker.cards.battlecries.deal_two_damage)
res.append(hearthbreaker.cards.battlecries.deal_three_damage)
res.append(hearthbreaker.cards.battlecries.change_attack_to_one)
res.append(hearthbreaker.cards.battlecries.take_control_of_minion)
return res
@staticmethod
def target_type(cry):
if cry in BattlecryType.buff_battlecries():
return "Friendly"
elif cry in BattlecryType.damage_battlecries():
return "Enemy"
else:
return None
def target_type_for_card(card):
res = None
minion = card.create_minion(None)
if hasattr(minion, "battlecry"):
res = BattlecryType.target_type(minion.battlecry)
return res
class ChooseTargetMixin:
def choose_target_enemy(self, all_targets):
if len(all_targets) == 0:
raise Exception("No targets")
targets = self.prune_targets(all_targets, False)
if len(targets) == 0:
return Util.rand_el(all_targets)
if not self.current_trade:
return Util.rand_prefer_minion(targets)
# raise Exception("No current trade")
for target in targets:
if self.current_trade.opp_minion == target:
return target
# raise Exception("Could not find target {}".format(target))
return Util.rand_prefer_minion(targets)
def choose_target_friendly(self, targets):
pruned = self.prune_targets(targets, True)
if len(pruned) == 0:
return Util.rand_el(targets)
return Util.rand_el(pruned)
def prune_targets(self, targets, get_friendly):
res = []
for target in targets:
is_friendly_minion = any(map(lambda c: c == target, self.player.minions))
is_friendly_hero = target == self.player.hero
is_friendly = is_friendly_minion or is_friendly_hero
if is_friendly == get_friendly:
res.append(target)
return res
def has_friendly_targets(self, targets):
return len(self.prune_targets(targets, True)) > 0
def should_target_self(self, targets):
cry_type = BattlecryType.target_type_for_card(self.last_card_played)
if cry_type == "Friendly":
return True
elif cry_type == "Enemy":
return False
elif self.last_card_played.name == "Elven Archerzzz":
return False
elif self.has_friendly_targets(targets):
return True
else:
return False
def choose_target_inner(self, targets):
if len(targets) == 0:
return None
if self.should_target_self(targets):
return self.choose_target_friendly(targets)
else:
return self.choose_target_enemy(targets)
def choose_target(self, targets):
res = self.choose_target_inner(targets)
# print("Target {}".format(res))
return res
class NullCard:
def __init__(self):
self.name = "Null Card"
def create_minion(self, player):
return None
class TradeAgent(TradeMixin, AttackMixin, PlayMixin, ChooseTargetMixin):
def __init__(self):
super().__init__()
self.current_trade = None
self.last_card_played = NullCard()
def do_turn(self, player, game):
self.player = player
self.play_cards(player)
self.attack(player)
if not player.game.game_ended:
self.play_cards(player)
return
def do_card_check(self, cards):
return [True, True, True, True]
def choose_index(self, card, player):
return 0
|
mit
| -1,693,280,705,883,882,800
| 31.231293
| 85
| 0.632967
| false
| 3.716078
| false
| false
| false
|
fabiobalzano/LED
|
maingui.py
|
1
|
5125
|
"""
Copyright (c) 2012, fabiodive@gmail.com All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, self
list of conditions and the following disclaimer. Redistributions in binary
form must reproduce the above copyright notice, self list of conditions and
the following disclaimer in the documentation and/or other materials
provided with the distribution. Neither the name of fabiodive@gmail.com nor
the names of its contributors may be used to endorse or promote products
derived from self software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
fabiodive@gmail.com
"""
import sys
import pygame
from pgu import gui
from dialogs import EditProgram
from dialogs import EditOptions
from dialogs import Confirm
########################################################################################
# STATIC VALUES
########################################################################################
# Main GUI Text color
FG_COLOR = (255,255,255)
########################################################################################
class LEDControl(gui.Table):
""" MAIN control GUI """
def __init__(self, datastore, timemanager, form, **params):
gui.Table.__init__(self,**params)
def accell_changed():
self.multiplier = form['accelerator'].value
accell_indicator.set_text('Accelleratore: %sX' % self.multiplier)
datastore.db['options']['accelerator'] = self.multiplier
datastore.save()
self.tr()
accell_indicator = gui.Label("Accelleratore: 0X",color=FG_COLOR)
self.td(accell_indicator, colspan=2)
self.tr()
e = gui.HSlider(0,-10,10,size=20,width=200,height=16,name='accelerator')
e.connect(gui.CHANGE, accell_changed)
e.value = datastore.db['options']['accelerator']
self.td(e, colspan=2)
self.tr()
self.td(gui.Label("PAUSA",color=FG_COLOR))
self.td(gui.Switch(value=False,name='pause'))
self.tr()
self.td(gui.Label("BlackOut!",color=FG_COLOR))
self.td(gui.Switch(value=False,name='blackout'))
self.tr()
self.td(gui.Label("Go Flash!",color=FG_COLOR))
self.td(gui.Switch(value=False,name='flash'))
dlg = EditProgram(datastore)
#Hook on closing dialog window
def dialog_close():
#refresh the datastore for changes
datastore.load()
form['pause'].value = False
def dialog_open(arg):
#pause the game
form['pause'].value = True
#reset of the dialog window
dlg.__init__(datastore, arg)
dlg.connect(gui.CLOSE, dialog_close)
dlg.open()
btn_conf = gui.Button("Programma Principale", width=200, height=40)
btn_conf.connect(gui.CLICK, dialog_open, 'contents')
self.tr()
self.td(btn_conf, colspan=2)
btn_fla = gui.Button("Programma Flash", width=200, height=40)
btn_fla.connect(gui.CLICK, dialog_open, 'flash')
self.tr()
self.td(btn_fla, colspan=2)
opt = EditOptions(datastore)
#Hook on closing options window
def options_close():
#refresh the datstore for changes
timemanager.init_options(datastore)
self.send(gui.CHANGE)
form['pause'].value = False
def options_open():
#pause the game
form['pause'].value = True
#reset of the dialog window
opt.connect(gui.CLOSE, options_close)
opt.open()
btn_conf = gui.Button("Impostazione Orari", width=200, height=40)
btn_conf.connect(gui.CLICK, options_open)
self.tr()
self.td(btn_conf, colspan=2)
def openconfirmquit():
confirm = Confirm()
confirm.connect(gui.CHANGE, sendquit)
#confirm.connect(gui.CLOSE,closingme)
confirm.open()
def sendquit():
pygame.quit()
sys.exit()
btn_exit = gui.Button("ESCI", width=200, height=40)
btn_exit.connect(gui.CLICK,openconfirmquit)
self.tr()
self.td(btn_exit, colspan=2)
|
bsd-3-clause
| -911,846,867,217,889,300
| 34.10274
| 88
| 0.618341
| false
| 4.221582
| false
| false
| false
|
acbraith/crossfit_scraper
|
data_analysis.py
|
1
|
23756
|
from crossfit_api import get_analysis_dataframe
import numpy as np
import pandas as pd
from memoize import persistent_memoize, memoize
from functools import partial
from sklearn.pipeline import Pipeline
from sklearn.feature_selection import SelectFromModel, RFECV
from sklearn.linear_model import Lasso, RANSACRegressor, LinearRegression
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, IsolationForest
from sklearn.tree import DecisionTreeRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.preprocessing import RobustScaler, StandardScaler
from multiprocessing import Pool
import itertools, random, os, sys, time
import fancyimpute
from matplotlib import pyplot as plt
import matplotlib.ticker as ticker
from datetime import datetime
from fancyimpute import Solver
class ScaleKNeighborsRegressor(KNeighborsRegressor):
def predict(self, X):
# standardise X
X = self.scaler.transform(X)
return super().predict(X)
def fit(self, X, y):
# standardise X
self.scaler = RobustScaler().fit(X)
X = self.scaler.transform(X)
return super().fit(X,y)
class RecursiveKNN(Solver):
def __init__(self, k=5, verbose=0,
min_value=None,
max_value=None,
normalizer=None,
feature_selector=None,
regressor=partial(ScaleKNeighborsRegressor, weights='distance'),
n_jobs=1):
Solver.__init__(
self,
min_value=min_value,
max_value=max_value,
normalizer=normalizer)
self.k = k
self.verbose = verbose
self.feature_selector = feature_selector
self.regressor = regressor
self.n_jobs = n_jobs
def _transform(self, feature_selector, X):
# alternative feature selector transform to remove some NaN checks
mask = feature_selector.get_support()
if not mask.any():
warn("No features were selected: either the data is"
" too noisy or the selection test too strict.",
UserWarning)
return np.empty(0).reshape((X.shape[0], 0))
if len(mask) != X.shape[1]:
raise ValueError("X has a different shape than during fitting.")
return X[:, mask]
def _get_reg(self):
if self.feature_selector != None:
reg = Pipeline([
('feature_selection', SelectFromModel(self.feature_selector())),
('regression', ScaleKNeighborsRegressor(algorithm='brute'))
])
else:
reg = ScaleKNeighborsRegressor()
return reg
def _impute_row(self, i):
row = self.X[i,:]
known_idx = np.where(~np.isnan(row))[0]
unknown_idx = np.where(np.isnan(row))[0]
# todo make this do one col at a time
X_ = self.X[:,known_idx]
y_ = self.X[:,unknown_idx]
y_pred = np.zeros_like(unknown_idx)
if unknown_idx.size > 0:
reg = self.regressor()
full_rows = np.logical_and(~np.isnan(X_).any(axis=1), ~np.isnan(y_).any(axis=1))
X_ = X_[full_rows]
y_ = y_[full_rows]
reg.fit(X_, y_)
y_pred = reg.predict(row[known_idx].reshape(1,-1))
return (i, unknown_idx, y_pred)
def _impute_unonown_idx(self, unknown_idx):
known_idx = [x for x in range(self.X.shape[1]) if x not in unknown_idx]
row_idxs = np.argwhere(np.logical_and(
np.isnan(self.X[:,unknown_idx]).all(axis=1),
~np.isnan(self.X[:,known_idx]).any(axis=1)))
y_pred = np.zeros((len(row_idxs),len(unknown_idx)))
if len(row_idxs) > 0:
reg = self.regressor()
selector = SelectFromModel(self.feature_selector())
# predict 1 feature at a time
for i,idx in enumerate(unknown_idx):
full_rows = np.argwhere(np.logical_and(
~np.isnan(self.X[:,known_idx]).any(axis=1),
~np.isnan(self.X[:,[idx]]).any(axis=1)))
# use these rows to perform feature selection
selector.fit(
self.X[full_rows,known_idx],
self.X[full_rows,[idx]])
# now recalculate full rows based on selected features
full_rows = np.argwhere(np.logical_and(
~np.isnan(self._transform(selector, self.X[:,known_idx])).any(axis=1),
~np.isnan(self.X[:,[idx]]).any(axis=1)))
# and fit regression model, then predict
reg.fit(
self._transform(selector, self.X[full_rows,known_idx]),
self.X[full_rows,[idx]])
# memory error for predicting too many at once
# so split into chunks
chunksize = 10000
for chunk_idx in range(0, len(row_idxs), chunksize):
y_pred[chunk_idx:chunk_idx+chunksize, [i]] = \
reg.predict(
self._transform(selector,
self.X[row_idxs[chunk_idx:chunk_idx+chunksize], known_idx]))
if self.verbose > 1:
print("Imputed",len(unknown_idx),"features in",len(row_idxs),"rows\n",
"\tUsing data from", len(full_rows),"rows")
#y_pred[:,[i]] = reg.predict(self.X[row_idxs,known_idx])
return (row_idxs, unknown_idx, y_pred)
def solve(self, X, missing_mask):
self.X = np.where(~missing_mask, X, np.nan)
imputed_X = np.where(~missing_mask, X, np.nan)
# do rows based on what's missing
pool = Pool(processes=self.n_jobs)
cols = np.argwhere(np.isnan(self.X).any(axis=0)).flatten()
num_combs = [j * len(list(itertools.combinations(cols,j))) for j in range(1,len(cols))]
cum_num_combs = np.cumsum(num_combs)
t0 = time.time()
for j in range(1,len(cols)):
np.savetxt(str(j)+'.csv', imputed_X, delimiter=',')
if self.verbose > 0:
if j > 1:
print("\tTime elapsed:", time.time()-t0)
print("\t", round(100*cum_num_combs[j-1]/cum_num_combs[-1],1),"% complete")
print("\tEstimated total time:", (time.time()-t0)/cum_num_combs[j-1] * \
cum_num_combs[-1])
print("Imputing",len(list(itertools.combinations(cols,j))),
"feature combinations of size",j,"/",len(cols)-1)
for i, unknown_idx, y_pred in \
pool.imap(self._impute_unonown_idx, itertools.combinations(cols,j), chunksize=100):
imputed_X[i,unknown_idx] = y_pred
return imputed_X
# check for extreme values (eg 666 pullups, 10sec 400m...)
def clear_outliers(data):
data = data.copy()
cols = [
'Age','Height','Weight',
'Back Squat','Clean and Jerk','Snatch',
'Deadlift','Fight Gone Bad','Max Pull-ups',
'Fran','Grace','Helen',
'Filthy 50','Sprint 400m','Run 5k']
ranges = [
(16,80),(100,250),(30,150),
(20,300),(20,250),(20,200),
(20,400),(20,750),(0,150),
(1.5,30),(1,60),(3,60),
(10,120),(0.72,3),(12.5,60)
]
'''ranges = [
(16,80),(100,250),(30,150),
(20,300),(20,250),(20,200),
(20,400),(20,600),(0,120),
(1.5,10),(1,15),(3,15),
(10,60),(0.72,3),(12.5,45)
]'''
for col,valid_range in zip(cols, ranges):
outliers = (valid_range[0] > data[col]) | (data[col] > valid_range[1])
i = 0
for idx in np.argwhere(outliers==True).flatten():
i += 1
print(i, "outliers in", col)
data[col] = data[col].where(~outliers, np.nan)
# check for other outliers
# this doesn't work so well
'''clf = IsolationForest(contamination=1/1000)
clf.fit(data.dropna())
outliers = clf.predict(data.fillna(data.mean()))
outliers = outliers == -1
for idx in np.argwhere(outliers==True).flatten():
print(pd.DataFrame(pd.DataFrame(data.loc[idx]).transpose()))
raise Exception'''
return data
@persistent_memoize('get_imputed_dataframe')
def _get_imputed_dataframe(*args, **kwargs):
def impute_rows(data, X_cols, y_cols):
rows_idx = np.argwhere(np.logical_and(
np.isnan(data[:,y_cols]).all(axis=1),
~np.isnan(data[:,X_cols]).any(axis=1)))
y_pred = np.zeros((len(rows_idx),len(y_cols)))
if len(rows_idx) > 0:
print("\tImputing",len(rows_idx),"rows")
full_rows = np.argwhere(np.logical_and(
~np.isnan(data[:,X_cols]).any(axis=1),
~np.isnan(data[:,y_cols]).any(axis=1)))
reg = RANSACRegressor()
reg.fit(
data[full_rows,X_cols],
data[full_rows,y_cols])
y_pred = reg.predict(data[rows_idx,X_cols]).clip(min=0)
return (rows_idx, y_cols, y_pred)
def impute_update_data(data, X_cols, y_cols):
print(X_cols,"predicting",y_cols)
cols = list(data)
X_cols = [cols.index(x) for x in X_cols]
y_cols = [cols.index(y) for y in y_cols]
matrix = data.as_matrix()
rows_idx, y_cols, y_pred = impute_rows(matrix, X_cols, y_cols)
matrix[rows_idx,y_cols] = y_pred
return pd.DataFrame(matrix, index=data.index, columns=data.columns)
data = get_analysis_dataframe(*args, **kwargs)
data = data.astype(float)
data = clear_outliers(data)
Xys = [
#(['Height'],['Weight']),
#(['Weight'],['Height']),
(['Snatch'],['Clean and Jerk']),
(['Clean and Jerk'],['Snatch']),
(['Snatch','Clean and Jerk'],['Back Squat']),
(['Snatch','Clean and Jerk','Back Squat'],['Deadlift']),
(['Back Squat'],['Deadlift']),
(['Deadlift'],['Back Squat']),
#(['Run 5k'],['Sprint 400m']),
#(['Sprint 400m'],['Run 5k']),
(['Weight','Snatch','Clean and Jerk','Back Squat','Deadlift'],['Max Pull-ups']),
(['Weight','Back Squat','Deadlift'],['Max Pull-ups']),
(['Weight','Snatch','Clean and Jerk'],['Max Pull-ups']),
#(['Filthy 50'],['Fight Gone Bad']),
#(['Fight Gone Bad'],['Filthy 50']),
(['Max Pull-ups', 'Clean and Jerk'],['Fran']),
(['Clean and Jerk', 'Fran'],['Grace']),
(['Max Pull-ups', 'Sprint 400m', 'Run 5k'],['Helen']),
#(['Max Pull-ups', 'Grace'],['Fran']),
]
for x,y in Xys:
data = impute_update_data(data, x, y)
data = clear_outliers(data)
imputer = RecursiveKNN(verbose=1,n_jobs=4,
feature_selector=DecisionTreeRegressor)
data = pd.DataFrame(imputer.complete(data), index=data.index, columns=data.columns)
return data
def get_imputed_dataframe(competition='open', year=2017, division='men',
sort='overall', fittest_in='region', region='worldwide'):
return _get_imputed_dataframe(competition, year, division, sort, fittest_in, region)
# ANALYSIS
def box_plots(data, title='Open'):
plt.suptitle(title + " Box Plots")
kwargs = {'showfliers':False}
stats = ['Age', 'Height', 'Weight']
weights = ['Deadlift','Back Squat', 'Clean and Jerk', 'Snatch']
reps = ['Fight Gone Bad', 'Max Pull-ups']
times = ['Fran', 'Grace', 'Helen', 'Filthy 50', 'Sprint 400m', 'Run 5k']
for i,x in enumerate(stats):
plt.subplot(4,3,i+1)
plt.boxplot(list(data[x].dropna()),labels=[x], **kwargs)
plt.subplot(4,1,2)
plt.boxplot([list(data[x].dropna()) for x in weights], labels=weights, vert=False, **kwargs)
for i,x in enumerate(reps):
plt.subplot(4,2,5+i)
plt.boxplot(list(data[x].dropna()),labels=[x], vert=False, **kwargs)
plt.subplot(4,1,4)
for i,x in enumerate(times):
plt.subplot(4,6,19+i)
plt.boxplot(list(data[x].dropna()),labels=[x], **kwargs)
plt.show()
def box_plots_all(open_data, regionals_data, games_data, title, my_data, metric):
def mouse_click(event):
ax = event.inaxes
stat = ''
if ax in ax_stats:
stat = stats[ax_stats.index(ax)]
val = event.ydata
elif ax in ax_weights:
stat = weights[ax_weights.index(ax)]
val = event.ydata
elif ax in ax_reps:
stat = reps[ax_reps.index(ax)]
val = event.xdata
elif ax in ax_times:
stat = times[ax_times.index(ax)]
val = event.ydata
if event.button == 1:
my_data[stat]=val
elif event.button == 2:
nonlocal box_plots_on, ax_stats, ax_weights, ax_reps, ax_times
box_plots_on = not(box_plots_on)
ax_stats, ax_weights, ax_reps, ax_times = draw_plots()
if event.button == 3:
if stat in my_data:
del my_data[stat]
plot_my_data()
def plot_my_data():
nonlocal lines
for l in lines:
try:
l.remove()
except:
l.pop(0).remove()
lines = []
for x,ax in zip(stats, ax_stats):
if x in my_data:
ax.set_prop_cycle(None)
l = ax.plot([0,4],[my_data[x],my_data[x]])
lines += [l]
for x,ax in zip(weights, ax_weights):
if x in my_data:
ax.set_prop_cycle(None)
l = ax.plot([0,4],[my_data[x],my_data[x]])
lines += [l]
for x,ax in zip(reps, ax_reps):
if x in my_data:
ax.set_prop_cycle(None)
l = ax.plot([my_data[x],my_data[x]], [0,4])
lines += [l]
for x,ax in zip(times, ax_times):
if x in my_data:
ax.set_prop_cycle(None)
l = ax.plot([0,4],[my_data[x],my_data[x]])
lines += [l]
rank,pcntile = predict_ranking(open_data, my_data)
filled_my_data = {}
for k,v in my_data.items(): filled_my_data[k] = v
for k in stats+weights+reps+times:
if k not in filled_my_data:
filled_my_data[k] = np.nan
table_text = []
table_text.append([
'Age',fmt_age(filled_my_data['Age'],0),
'Height',fmt_height(filled_my_data['Height'],0),
'Weight',fmt_weight(filled_my_data['Weight'],0),
'',''])
table_text.append([
'Back Squat',fmt_weight(filled_my_data['Back Squat'],0),
'Deadlift',fmt_weight(filled_my_data['Deadlift'],0),
'Fran',fmt_time(filled_my_data['Fran'],0),
'Filthy 50',fmt_time(filled_my_data['Filthy 50'],0)])
table_text.append([
'Clean and Jerk',fmt_weight(filled_my_data['Clean and Jerk'],0),
'Fight Gone Bad',fmt_reps(filled_my_data['Fight Gone Bad'],0),
'Grace',fmt_time(filled_my_data['Grace'],0),
'Sprint 400m',fmt_time(filled_my_data['Sprint 400m'],0)])
table_text.append([
'Snatch',fmt_weight(filled_my_data['Snatch'],0),
'Max Pull-ups',fmt_reps(filled_my_data['Max Pull-ups'],0),
'Helen',fmt_time(filled_my_data['Helen'],0),
'Run 5k',fmt_time(filled_my_data['Run 5k'],0)])
table_text.append([
'','',
'','',
'Estimated Ranking', str(round(rank,0)),
'Percentile', str(round(pcntile,2))])
font = {
'family': 'monospace',
'color': 'k',
'weight': 'heavy',
'size': 12,
}
ax = plt.subplot(5,1,5)
tab = ax.table(cellText=table_text, loc='center', bbox=[0, -.5, 1, 1.25], fontsize=12,
colWidths=[1.5,1] * 4)
cells = tab.properties()['celld']
for i in range(5):
for j in range(4):
cells[i,2*j]._loc = 'right'
cells[i,2*j+1]._loc = 'left'
cells[i,2*j].set_linewidth(0)
cells[i,2*j+1].set_linewidth(0)
ax.axis('tight')
ax.axis('off')
lines += [tab]
plt.gcf().canvas.draw_idle()
box_plots_on = True
lines = []
plt.figure().canvas.mpl_connect('button_press_event', mouse_click)
maintitle = dict(fontsize=18, fontweight='bold')
subtitle = dict(fontsize=12, fontweight='bold')
plt.suptitle(title + " Box Plots", **maintitle)
plt.rcParams['axes.facecolor'] = 'whitesmoke'
boxprops = dict(linewidth=1, alpha=0.8)
medianprops = dict(linewidth=2, color='k', alpha=0.8)
whiskerprops = dict(linewidth=1, color='k', linestyle='-')
kwargs = dict(sym='', whis=[1,99], patch_artist=True, widths=0.5, #notch=True, bootstrap=1000,
medianprops=medianprops, boxprops=boxprops, whiskerprops=whiskerprops)
stats = ['Age', 'Height', 'Weight']
weights = ['Deadlift','Back Squat', 'Clean and Jerk', 'Snatch']
reps = ['Fight Gone Bad', 'Max Pull-ups']
times = ['Fran', 'Grace', 'Helen', 'Filthy 50', 'Sprint 400m', 'Run 5k']
colors = ['steelblue', 'olivedrab', 'indianred']
def add_colors(bplot):
for patch,color in zip(bplot['boxes'],colors):
patch.set_facecolor(color)
def fmt_age(x, pos):
if np.isnan(x): return ''
x = round(x)
return str(x)
def fmt_height(x, pos):
if np.isnan(x): return ''
if metric:
return str(int(x))+" cm"
ft, inches = divmod(round(x), 12)
ft, inches = map(int, [ft, inches])
return ('{}\''.format(ft) if not inches
else '{}\'{}"'.format(ft, inches) if ft
else '{}"'.format(inches))
def fmt_weight(x, pos):
if np.isnan(x): return ''
x = int(x)
if metric:
return str(x)+" kg"
return str(x)+" lbs"
def fmt_reps(x, pos):
if np.isnan(x): return ''
x = int(x)
return str(x)+" reps"
def fmt_time(x, pos):
if np.isnan(x): return ''
m, s = divmod(round(x*60), 60)
m, s = map(int, [m, s])
return (str(m)+':'+str(s).zfill(2))
def draw_plots():
def get_cols(cols):
if metric:
return [
list(open_data[x].dropna()),
list(regionals_data[x].dropna()),
list(games_data[x].dropna())]
else:
if x == 'Height': scaler = 1/2.54
elif x in ['Weight']+weights: scaler = 2.2
else: scaler = 1
return [
list(open_data[x].dropna()*scaler),
list(regionals_data[x].dropna()*scaler),
list(games_data[x].dropna()*scaler)]
labels = ['Open','Regionals','Games']
ax_stats = []
for i,x in enumerate(stats):
ax = plt.subplot(5,3,i+1)
ax_stats += [ax]
plt.title(x, **subtitle)
plt.grid(axis='y',linestyle='dotted')
bplot = plt.boxplot(get_cols(x),labels=labels, **kwargs)
add_colors(bplot)
if x == 'Height':
plt.gca().yaxis.set_major_formatter(ticker.FuncFormatter(fmt_height))
elif x == 'Weight':
plt.gca().yaxis.set_major_formatter(ticker.FuncFormatter(fmt_weight))
plt.gca().yaxis.set_major_locator(plt.MaxNLocator(5))
ax_weights = []
for i,x in enumerate(weights):
ax = plt.subplot(5,4,5+i)
ax_weights += [ax]
plt.title(x, **subtitle)
plt.grid(axis='y',linestyle='dotted')
bplot = plt.boxplot(get_cols(x),labels=labels, **kwargs)
add_colors(bplot)
plt.gca().yaxis.set_major_formatter(ticker.FuncFormatter(fmt_weight))
plt.gca().yaxis.set_major_locator(plt.MaxNLocator(5))
ax_reps = []
for i,x in enumerate(reps):
ax = plt.subplot(5,2,5+i)
ax_reps += [ax]
plt.title(x, **subtitle)
plt.grid(axis='x',linestyle='dotted')
bplot = plt.boxplot(get_cols(x),labels=labels, vert=False, **kwargs)
add_colors(bplot)
plt.gca().xaxis.set_major_formatter(ticker.FuncFormatter(fmt_reps))
plt.gca().xaxis.set_major_locator(plt.MaxNLocator(5))
ax_times = []
for i,x in enumerate(times):
ax = plt.subplot(5,6,19+i)
ax_times += [ax]
plt.title(x, **subtitle)
plt.grid(axis='y',linestyle='dotted')
bplot = plt.boxplot(get_cols(x),labels=labels, **kwargs)
add_colors(bplot)
plt.gca().set_yscale('log')
plt.gca().yaxis.set_major_formatter(ticker.FuncFormatter(fmt_time))
plt.gca().yaxis.set_major_locator(plt.MaxNLocator(5))
plt.minorticks_off()
plt.subplots_adjust(left=0.125,right=0.9,
bottom=0.1,top=0.9,wspace=0.3,hspace=0.4)
return ax_stats, ax_weights, ax_reps, ax_times
ax_stats, ax_weights, ax_reps, ax_times = draw_plots()
plot_my_data()
plt.show()
def ANALYSIS_open(division='men'):
open_data = get_analysis_dataframe(competition='open', division=division)
open_data = clear_outliers(open_data)
box_plots(open_data, 'Open')
def ANALYSIS_regionals(division='men'):
regionals_data = get_analysis_dataframe(competition='regionals', division=division)
regionals_data = clear_outliers(regionals_data)
box_plots(regionals_data, 'Regionals')
def ANALYSIS_games(division='men'):
games_data = get_analysis_dataframe(competition='games', division=division)
games_data = clear_outliers(games_data)
box_plots(games_data, 'Games')
def ANALYSIS_all(division='men', my_data={}, metric=True):
open_data = get_analysis_dataframe(competition='open', division=division)
open_data = clear_outliers(open_data)
regionals_data = get_analysis_dataframe(competition='regionals', division=division)
regionals_data = clear_outliers(regionals_data)
games_data = get_analysis_dataframe(competition='games', division=division)
games_data = clear_outliers(games_data)
box_plots_all(open_data, regionals_data, games_data, division.title(), my_data, metric)
def ANALYSIS_all_imputed(division='men', my_data={}, metric=True):
open_data = get_imputed_dataframe(division = division, competition='open')
regionals_data = get_analysis_dataframe(division = division, competition='regionals')
games_data = get_analysis_dataframe(division = division, competition='games')
# use imputed values from open data to fill in athlete stats for regionals/games data
regionals_data = pd.merge(
open_data.drop(['overallrank','overallscore'],axis=1),
regionals_data[['userid','overallrank','overallscore']],
on='userid', how='inner')
games_data = pd.merge(
open_data.drop(['overallrank','overallscore'],axis=1),
games_data[['userid','overallrank','overallscore']],
on='userid', how='inner')
box_plots_all(open_data, regionals_data, games_data, "Imputed " + division.title(), my_data, metric)
alex = {'Age':23,'Height':165,'Weight':70,
'Back Squat':175, 'Clean and Jerk':133, 'Snatch':108, 'Deadlift':220,
'Max Pull-ups':25,
'Fran': 5}
pan = {'Age':22,'Height':158,'Weight':53,
'Back Squat':57, 'Clean and Jerk':35, 'Snatch':28, 'Deadlift':70,
'Max Pull-ups':0}
fraser = get_analysis_dataframe(division='men', competition='games').iloc[0].dropna().drop(['overallscore','userid','overallrank']).to_dict()
tct = get_analysis_dataframe(division='women', competition='games').iloc[0].dropna().drop(['overallscore','userid','overallrank']).to_dict()
sara = get_analysis_dataframe(division='women', competition='open').iloc[0].dropna().drop(['overallscore','userid','overallrank']).to_dict()
import xgboost as xgb
@memoize()
def get_fitted_model(data):
reg = xgb.XGBRegressor(missing=np.nan)
X = data.drop(['userid', 'overallrank', 'overallscore'], axis=1).as_matrix()
y = data['overallrank'].as_matrix()
reg.fit(X,np.log1p(y))
return reg
def predict_ranking(data, my_data):
cols = list(data.drop(['userid', 'overallrank', 'overallscore'], axis=1))
X_pred = []
for i,col in enumerate(cols):
if col in my_data:
X_pred += [my_data[col]]
else:
X_pred += [np.nan]
reg = get_fitted_model(data)
known_cols = list(my_data)
y_pred = np.expm1(reg.predict(X_pred))
return y_pred[0], (y_pred / data['overallrank'].max()*100)[0]
ANALYSIS_all_imputed(division='men',metric=True, my_data=alex)
#ANALYSIS_all_imputed(division='men',metric=True, my_data=fraser)
raise Exception()
def to_imperial(stats):
stats['Height'] /= 2.54
stats['Weight'] *= 2.2
stats['Back Squat'] *= 2.2
stats['Clean and Jerk'] *= 2.2
stats['Snatch'] *= 2.2
stats['Deadlift'] *= 2.2
return stats
# lets test some models
data = get_analysis_dataframe()
data = clear_outliers(data)
#data = data[:1000]
X = data.drop(['userid', 'overallrank', 'overallscore'], axis=1)
y = pd.to_numeric(data['overallrank'])
y = np.log1p(y)
from sklearn.preprocessing import Imputer, StandardScaler, RobustScaler, FunctionTransformer
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import ElasticNetCV, RidgeCV
from sklearn.neural_network import MLPRegressor
from sklearn.svm import SVR
from scipy.stats import skew
def log_skewed_cols(X):
X = X.apply(lambda x: np.log1p(x) if abs(skew(x.dropna()))>1 else x, axis=0)
return X
get_score = lambda model: cross_val_score(model, X, y, n_jobs=1).mean()
for reg in [
LinearRegression(),
RidgeCV(),
#ElasticNetCV(),
#MLPRegressor(hidden_layer_sizes=(100,10,5,)),
#KNeighborsRegressor(),
#RandomForestRegressor(),
SVR(),
xgb.XGBRegressor(),
#KerasRegressor(build_fn=create_model, verbose=0),
]:
print(reg)
pipeline = Pipeline([
('logtransform', FunctionTransformer(log_skewed_cols, validate=False)),
('imputer', Imputer()),
('scaler', RobustScaler()),
('regressor', reg)
])
try:
t = time.time()
print("\tScore:",get_score(pipeline))
print("\t",time.time()-t,"seconds")
except Exception as e:
raise e
print(e)
# xgb easily outperforms others
# now look at imputing methods
class SKLearnFancyImputer(Imputer):
def __init__(self,imputer):
self.imputer = imputer
def fit(self, X, y=None):
self.X = X
return self
def transform(self, X):
if np.array_equal(np.nan_to_num(self.X),np.nan_to_num(X)):
return self.imputer.complete(X)
else:
return self.imputer.complete(X.append(self.X))[:len(X)]
'''for imp in [
Imputer(), Imputer(strategy='median'),
SKLearnFancyImputer(fancyimpute.SoftImpute(verbose=0)),
SKLearnFancyImputer(fancyimpute.IterativeSVD(verbose=0)),
#SKLearnFancyImputer(fancyimpute.MICE(verbose=0)),
#SKLearnFancyImputer(fancyimpute.MatrixFactorization(verbose=False)),
#SKLearnFancyImputer(fancyimpute.NuclearNormMinimization(verbose=0)),
#SKLearnFancyImputer(fancyimpute.BiScaler(verbose=0)),
]:
print(imp)
pipeline = Pipeline([
('imputer', imp),
('regressor', xgb.XGBRegressor())
])
try:
t = time.time()
print("\tScore:",get_score(pipeline))
print("\t",time.time()-t,"seconds")
except Exception as e:
print(e)'''
|
mit
| 18,189,834,306,985,480
| 31.233379
| 141
| 0.658655
| false
| 2.672517
| false
| false
| false
|
browseinfo/odoo_saas3_nicolas
|
addons/project_mrp/project_procurement.py
|
1
|
5722
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class procurement_order(osv.osv):
_name = "procurement.order"
_inherit = "procurement.order"
_columns = {
'task_id': fields.many2one('project.task', 'Task'),
'sale_line_id': fields.many2one('sale.order.line', 'Sales order line')
}
def action_check_finished(self, cr, uid, ids):
res = super(procurement_order, self).action_check_finished(cr, uid, ids)
return res and self.check_task_done(cr, uid, ids)
def check_task_done(self, cr, uid, ids, context=None):
""" Checks if task is done or not.
@return: True or False.
"""
for p in self.browse(cr, uid, ids, context=context):
if (p.product_id.type == 'service') and (p.procure_method == 'make_to_order') and p.task_id and (p.task_id.stage_id and not p.task_id.stage_id.closed):
return False
return True
def check_produce_service(self, cr, uid, procurement, context=None):
return True
def _convert_qty_company_hours(self, cr, uid, procurement, context=None):
product_uom = self.pool.get('product.uom')
company_time_uom_id = self.pool.get('res.users').browse(cr, uid, uid).company_id.project_time_mode_id
if procurement.product_uom.id != company_time_uom_id.id and procurement.product_uom.category_id.id == company_time_uom_id.category_id.id:
planned_hours = product_uom._compute_qty(cr, uid, procurement.product_uom.id, procurement.product_qty, company_time_uom_id.id)
else:
planned_hours = procurement.product_qty
return planned_hours
def _get_project(self, cr, uid, procurement, context=None):
project_project = self.pool.get('project.project')
project = procurement.product_id.project_id
if not project and procurement.sale_line_id:
# find the project corresponding to the analytic account of the sales order
account = procurement.sale_line_id.order_id.project_id
project_ids = project_project.search(cr, uid, [('analytic_account_id', '=', account.id)])
projects = project_project.browse(cr, uid, project_ids, context=context)
project = projects and projects[0] or False
return project
def action_produce_assign_service(self, cr, uid, ids, context=None):
if not context:
context = {}
project_task = self.pool.get('project.task')
for procurement in self.browse(cr, uid, ids, context=context):
project = self._get_project(cr, uid, procurement, context=context)
planned_hours = self._convert_qty_company_hours(cr, uid, procurement, context=context)
manager = procurement.product_id.product_manager
partner = procurement.sale_line_id and procurement.sale_line_id.order_id.partner_id or None
lang = (manager and manager.lang) or (partner and partner.lang) or False
if not lang:
lang = self.pool['res.users'].browse(cr, uid, uid, context=context).lang
product = self.pool['product.product'].browse(cr, uid, procurement.product_id.id, context=dict(context, lang=lang))
task_id = project_task.create(cr, uid, {
'name': '%s:%s' % (procurement.origin or '', product.name),
'date_deadline': procurement.date_planned,
'planned_hours': planned_hours,
'remaining_hours': planned_hours,
'partner_id': procurement.sale_line_id and procurement.sale_line_id.order_id.partner_id.id or False,
'user_id': procurement.product_id.product_manager.id,
'procurement_id': procurement.id,
'description': procurement.sale_line_id and procurement.sale_line_id.name or procurement.name,
'project_id': project and project.id or False,
'company_id': procurement.company_id.id,
},context=context)
self.write(cr, uid, [procurement.id], {'task_id': task_id, 'state': 'running', 'message':_('Task created.')}, context=context)
self.project_task_create_note(cr, uid, ids, context=context)
return task_id
def project_task_create_note(self, cr, uid, ids, context=None):
for procurement in self.browse(cr, uid, ids, context=context):
body = _("Task created")
self.message_post(cr, uid, [procurement.id], body=body, context=context)
if procurement.sale_line_id and procurement.sale_line_id.order_id:
procurement.sale_line_id.order_id.message_post(body=body)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
| 3,890,371,232,505,141,000
| 52.981132
| 163
| 0.627753
| false
| 3.86883
| false
| false
| false
|
jalaziz/django-cms-grappelli-old
|
cms/middleware/multilingual.py
|
1
|
6166
|
# -*- coding: utf-8 -*-
from cms.utils.i18n import get_default_language
from django.conf import settings
from django.core.urlresolvers import reverse
from django.middleware.locale import LocaleMiddleware
from django.utils import translation
import re
import urllib
SUPPORTED = dict(settings.CMS_LANGUAGES)
HAS_LANG_PREFIX_RE = re.compile(r"^/(%s)/.*" % "|".join(map(lambda l: re.escape(l[0]), settings.CMS_LANGUAGES)))
def has_lang_prefix(path):
check = HAS_LANG_PREFIX_RE.match(path)
if check is not None:
return check.group(1)
else:
return False
def patch_response(content, pages_root, language):
# Customarily user pages are served from http://the.server.com/~username/
# When a user uses django-cms for his pages, the '~' of the url appears quoted in href links.
# We have to quote pages_root for the regular expression to match.
#
# The used regex is quite complex. The exact pattern depends on the used settings.
# The regex extracts the path of the url without the leading page root, but only matches urls
# that don't already contain a language string or aren't considered multilingual.
#
# Here is an annotated example pattern (_r_ is a shorthand for the value of pages_root):
# pattern: <a([^>]+)href=("|\')(?=_r_)(?!(/fr/|/de/|/en/|/pt-br/|/media/|/media/admin/))(_r_(.*?))("|\')(.*?)>
# |-\1--| |-\2-| |---------------------\3---------------------| | |-\5--|||-\6-||-\7-|
# |---\4---|
# input (_r_=/): <a href="/admin/password_change/" class="foo">
# matched groups: (u' ', None, u'/admin/password_change/', u'admin/password_change/', u' class="foo"')
#
# Notice that (?=...) and (?!=...) do not consume input or produce a group in the match object.
# If the regex matches, the extracted path we want is stored in the fourth group (\4).
quoted_root = urllib.quote(pages_root)
HREF_URL_FIX_RE = re.compile(ur'<a([^>]+)href=("|\')(?=%s)(?!(%s|%s|%s))(%s(.*?))("|\')(.*?)>' % (
quoted_root,
"|".join(map(lambda l: quoted_root + l[0] + "/" , settings.CMS_LANGUAGES)),
settings.MEDIA_URL,
settings.ADMIN_MEDIA_PREFIX,
quoted_root
))
# Unlike in href links, the '~' (see above) the '~' in form actions appears unquoted.
#
# For understanding this regex, please read the documentation for HREF_URL_FIX_RE above.
FORM_URL_FIX_RE = re.compile(ur'<form([^>]+)action=("|\')(?=%s)(?!(%s|%s|%s))(%s(.*?))("|\')(.*?)>' % (
pages_root,
"|".join(map(lambda l: pages_root + l[0] + "/" , settings.CMS_LANGUAGES)),
settings.MEDIA_URL,
settings.ADMIN_MEDIA_PREFIX,
pages_root
))
content = HREF_URL_FIX_RE.sub(ur'<a\1href=\2/%s%s\5\6\7>' % (language, pages_root), content)
content = FORM_URL_FIX_RE.sub(ur'<form\1action=\2%s%s/\5\6\7>' % (pages_root, language), content).encode("utf8")
return content
class MultilingualURLMiddleware:
def get_language_from_request (self,request):
changed = False
prefix = has_lang_prefix(request.path_info)
if prefix:
request.path = "/" + "/".join(request.path.split("/")[2:])
request.path_info = "/" + "/".join(request.path_info.split("/")[2:])
t = prefix
if t in SUPPORTED:
lang = t
if hasattr(request, "session") and \
request.session.get("django_language", None) != lang:
request.session["django_language"] = lang
changed = True
else:
lang = translation.get_language_from_request(request)
if not changed:
if hasattr(request, "session"):
lang = request.session.get("django_language", None)
if lang in SUPPORTED and lang is not None:
return lang
elif "django_language" in request.COOKIES.keys():
lang = request.COOKIES.get("django_language", None)
if lang in SUPPORTED and lang is not None:
return lang
if not lang:
lang = translation.get_language_from_request(request)
lang = get_default_language(lang)
return lang
def process_request(self, request):
language = self.get_language_from_request(request)
translation.activate(language)
request.LANGUAGE_CODE = language
def process_response(self, request, response):
language = getattr(request, 'LANGUAGE_CODE', self.get_language_from_request(request))
local_middleware = LocaleMiddleware()
response =local_middleware.process_response(request, response)
path = unicode(request.path)
# note: pages_root is assumed to end in '/'.
# testing this and throwing an exception otherwise, would probably be a good idea
if not path.startswith(settings.MEDIA_URL) and \
not path.startswith(settings.ADMIN_MEDIA_PREFIX) and \
response.status_code == 200 and \
response._headers['content-type'][1].split(';')[0] == "text/html":
pages_root = urllib.unquote(reverse("pages-root"))
try:
decoded_response = response.content.decode('utf-8')
except UnicodeDecodeError:
decoded_response = response.content
response.content = patch_response(
decoded_response,
pages_root,
request.LANGUAGE_CODE
)
if (response.status_code == 301 or response.status_code == 302 ):
location = response['Location']
if not has_lang_prefix(location) and location.startswith("/") and \
not location.startswith(settings.MEDIA_URL) and \
not location.startswith(settings.ADMIN_MEDIA_PREFIX):
response['Location'] = "/%s%s" % (language, location)
response.set_cookie("django_language", language)
return response
|
bsd-3-clause
| -1,841,472,630,449,027,300
| 46.430769
| 123
| 0.57444
| false
| 3.944978
| false
| false
| false
|
nuobit/odoo-addons
|
connector_sage/models/payroll_sage_payslip.py
|
1
|
1319
|
# Copyright NuoBiT Solutions, S.L. (<https://www.nuobit.com>)
# Eric Antones <eantones@nuobit.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl)
from odoo import api, fields, models, _
from odoo.exceptions import UserError, ValidationError
class Payslip(models.Model):
_inherit = 'payroll.sage.payslip'
@api.multi
def action_paysplip_import(self):
for rec in self:
backend = self.env['sage.backend'].search([('company_id', '=', rec.company_id.id)])
if len(backend) != 1:
raise UserError(_("Expected 1 backend for the current company, found %i" % len(backend)))
# import lines and checks
backend.import_payslip_line_id = rec
if rec.type == 'transfer':
self.env['sage.payroll.sage.payslip.line.transfer'].with_delay().import_payslip_lines(rec, backend)
backend.import_payslip_check_id = rec
self.env['sage.payroll.sage.payslip.check'].with_delay().import_payslip_checks(rec, backend)
elif rec.type == 'payroll':
self.env['sage.payroll.sage.payslip.line.payroll'].with_delay().import_payslip_lines(rec, backend)
else:
raise UserError(_("Unexpected payslip type %s!") % rec.type)
return True
|
agpl-3.0
| 8,481,821,566,376,713,000
| 42.966667
| 115
| 0.620925
| false
| 3.684358
| false
| false
| false
|
rueckstiess/jiratopic
|
onlineldavb/lookup_topic.py
|
1
|
1908
|
import sys, os, re, random, math, urllib2, time, cPickle
import numpy
import argparse
import onlineldavb
from operator import itemgetter
topics_30 = [
"NETWORKING / CONNECTIONS",
"HARDWARE / RESOURCES",
"DRIVERS",
"MMS",
"?1",
"JIRA",
"QUERY",
"REPLICATION",
"REPLICATION",
"STORAGE???",
"NETWORKING / SETUP / LIMITS",
"CHUNKS",
"NETWORKING / PROBLEMS",
"SHARDING / CONFIG SERVER",
"SHARDING / BALANCING",
"DIAGNOSIS",
"SHELL",
"AUTH/SECURITY",
"QUERY / DOCUMENTS",
"OPS / RESTART",
"STORAGE / OPS",
"STORAGE",
"CHUNKS",
"INDEXING",
"UPGRADING",
"INITIAL DIAGNOSIS",
"INDEXING / OPTIMIZATION",
"REPLICASET CONFIGURATION",
"BACKUPS",
"NETWORKING / DNS"
]
def main():
# The number of documents to analyze each iteration
batchsize = 64
# The total number of documents in the CS project
D = 14617
# argparse arguments
argparser = argparse.ArgumentParser()
argparser.add_argument('-v', '--vocabulary', action='store', default="../manual_vocab.txt", help='provide vocabulary file')
argparser.add_argument('-l', '--lambda', action='store', default="./lambda-79-30.dat", help='provide lambda parameter file')
argparser.add_argument('-s', '--string', action='store', nargs='*', help='string to evaluate')
args = vars(argparser.parse_args())
vocab = str.split(file(args['vocabulary']).read())
init_lambda = numpy.loadtxt(args['lambda'])
K = init_lambda.shape[0]
olda = onlineldavb.OnlineLDA(vocab, K, D, 1./K, 1./K, 1024., 0.7, init_lambda)
gamma, _ = olda.do_e_step( args['string'] )
gamma = gamma.flatten()
sorted_ids = sorted ( [(i,g) for i,g in enumerate(gamma) if g > 1.0], key=itemgetter(1), reverse=True)
scores = map(itemgetter(1), sorted_ids)
topics = map(lambda x: topics_30[x[0]], sorted_ids)
print ", ".join( map(lambda x: "%s (%.2f)" % (x[0], x[1]), zip (topics, scores)) )
if __name__ == '__main__':
main()
|
apache-2.0
| 4,325,715,177,712,102,400
| 26.652174
| 128
| 0.649895
| false
| 3.072464
| false
| false
| false
|
battlemidget/shipit
|
shipit/git.py
|
1
|
1605
|
# -*- coding: utf-8 -*-
"""
shipit.git
~~~~~~~~~~
Operations on git repositories.
"""
import os
import tempfile
import subprocess
def get_remotes():
"""
Get a list of the git remote URLs for this repository.
Return a dictionary of remote names mapped to URL strings if remotes were
found.
Otherwise return ``None``.
"""
tmp_file = tempfile.NamedTemporaryFile(mode='w+', delete=False)
retcode = subprocess.call(['git', 'remote', '-v'], stdout=tmp_file.file)
if retcode != 0:
return
# Store the output of the command and delete temporary file
tmp_file.file.seek(0)
raw_remotes = tmp_file.read()
os.remove(tmp_file.name)
# Get the GitHub remote strings
nonempty_remotes = (r for r in raw_remotes.split('\n') if 'github' in r.lower())
return {remote_name(r): remote_url(r) for r in nonempty_remotes}
def remote_name(remotestring):
return remotestring.split(' ')[0].split('\t')[0]
def remote_url(remotestring):
return remotestring.split(' ')[0].split('\t')[1]
def extract_user_and_repo_from_remote(remote_url):
# TODO: name slices
if remote_url.startswith('git://'):
# Git remote
user_repo = remote_url.split('/')[3:]
user, repo = user_repo[0], user_repo[1][:-4]
elif remote_url.startswith('http'):
# HTTP[S] remote
user_repo = remote_url.split('/')[3:]
user, repo = user_repo[0], user_repo[1][:-4]
else:
# SSH remote
user_repo = remote_url.split(':')[1][:-4]
user, repo = tuple(user_repo.split('/'))
return user, repo
|
gpl-3.0
| 6,923,824,859,608,695,000
| 24.078125
| 84
| 0.611838
| false
| 3.459052
| false
| false
| false
|
NoviceLive/unish
|
py/gh.py
|
1
|
2560
|
#!/usr/bin/env python3
from os.path import basename, splitext
from logging import basicConfig, DEBUG
import click
from plumbum import local, FG
__author__ = 'Gu Zhengxiong'
__version__ = '0.1.0'
PROGRAM_NAME = 'GH'
PACKAGE_NAME = PROGRAM_NAME.lower()
VERSION_PROMPT = (
'{version}\n\nCopyright 2015-2016 {author} '
'<rectigu@gmail.com>\n\n'
'This is free software; see the source for '
'copying conditions.\nThere is NO warranty; '
'not even for MERCHANTABILITY nor \nFITNESS FOR '
'A PARTICULAR PURPOSE.'.format(
version=__version__, author=__author__)
)
@click.group(
context_settings=dict(help_option_names=['-h', '--help']))
@click.version_option(VERSION_PROMPT,
'-V', '--version', prog_name=PROGRAM_NAME)
def main():
"""Simplified & Unfied Interface Of Mercurial & Git."""
basicConfig(level=DEBUG)
@main.command()
@click.argument('url', required=False)
def cl(url):
"""Clone a repository in a simplified manner."""
from pyperclip import paste
url = url if url else paste().strip()
SCM(url).doer.clone()
class SCM(object):
def __init__(self, url):
suffix = self.get_suffix(url)
if suffix == Hg.suffix:
self.doer = Hg(url)
elif suffix == Git.suffix:
self.doer = Git(url)
else:
raise RuntimeError('No handler for URL: %s', url)
@staticmethod
def get_suffix(url):
"""Determine the suffix for the URL.
Example Git URL:
1. https://github.com/NoviceLive/unish.git
2. git@github.com:NoviceLive/unish.git
3. https://novicelive@bitbucket.org/novicelive/good.git
4. git@bitbucket.org:novicelive/good.git
Example Mercurial URL:
1. ssh://hg@bitbucket.org/novicelive/unish
"""
suffix = splitext(basename(url))[1]
return Git.suffix if suffix == Git.suffix else Hg.suffix
class Git(object):
suffix = '.git'
git = local['git']
def __init__(self, url):
self.url = url
self.base = basename(url)
def clone(self, dest=None):
if dest is None:
dest = self.base
self.git['clone', self.url, dest, '--recursive'] & FG
class Hg(object):
suffix = '.hg'
hg = local['hg']
def __init__(self, url):
self.url = url
self.base = basename(url)
def clone(self, dest=None):
if dest is None:
dest = self.base + self.suffix
self.hg['clone', self.url, dest] & FG
if __name__ == '__main__':
main()
|
gpl-3.0
| 6,983,805,230,766,400,000
| 24.346535
| 64
| 0.596484
| false
| 3.459459
| false
| false
| false
|
tonnrueter/pymca_devel
|
PyMca/SpsDataSource.py
|
1
|
12025
|
#/*##########################################################################
# Copyright (C) 2004-2012 European Synchrotron Radiation Facility
#
# This file is part of the PyMca X-ray Fluorescence Toolkit developed at
# the ESRF by the Software group.
#
# This toolkit is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# PyMca is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# PyMca; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# PyMca follows the dual licensing model of Riverbank's PyQt and cannot be
# used as a free plugin for a non-free program.
#
# Please contact the ESRF industrial unit (industry@esrf.fr) if this license
# is a problem for you.
#############################################################################*/
import types
from PyMca import DataObject
from PyMca import spswrap as sps
DEBUG = 0
SOURCE_TYPE = 'SPS'
class SpsDataSource(object):
def __init__(self, name):
if type(name) not in types.StringTypes:
raise TypeError("Constructor needs string as first argument")
self.name = name
self.sourceName = name
self.sourceType = SOURCE_TYPE
def refresh(self):
pass
def getSourceInfo(self):
"""
Returns information about the Spec version in self.name
to give application possibility to know about it before loading.
Returns a dictionary with the key "KeyList" (list of all available keys
in this source). Each element in "KeyList" is an shared memory
array name.
"""
return self.__getSourceInfo()
def getKeyInfo(self, key):
if key in self.getSourceInfo()['KeyList']:
return self.__getArrayInfo(key)
else:
return {}
def getDataObject(self, key_list, selection=None):
if type(key_list) != types.ListType:
nolist = True
key_list = [key_list]
else:
output = []
nolist = False
if self.name in sps.getspeclist():
sourcekeys = self.getSourceInfo()['KeyList']
for key in key_list:
#a key corresponds to an array name
if key not in sourcekeys:
raise KeyError("Key %s not in source keys" % key)
#array = key
#create data object
data = DataObject.DataObject()
data.info = self.__getArrayInfo(key)
data.info['selection'] = selection
data.data = sps.getdata(self.name, key)
if nolist:
if selection is not None:
scantest = (data.info['flag'] &
sps.TAG_SCAN) == sps.TAG_SCAN
if ((key in ["SCAN_D"]) or scantest) \
and 'cntlist' in selection:
data.x = None
data.y = None
data.m = None
if 'nopts' in data.info['envdict']:
nopts = int(data.info['envdict']['nopts']) + 1
else:
nopts = data.info['rows']
if not 'LabelNames' in data.info:
data.info['LabelNames'] =\
selection['cntlist'] * 1
if 'x' in selection:
for labelindex in selection['x']:
label = data.info['LabelNames'][labelindex]
if label not in data.info['LabelNames']:
raise ValueError("Label %s not in scan labels" % label)
index = data.info['LabelNames'].index(label)
if data.x is None: data.x = []
data.x.append(data.data[:nopts, index])
if 'y' in selection:
for labelindex in selection['y']:
label = data.info['LabelNames'][labelindex]
if label not in data.info['LabelNames']:
raise ValueError("Label %s not in scan labels" % label)
index = data.info['LabelNames'].index(label)
if data.y is None: data.y = []
data.y.append(data.data[:nopts, index])
if 'm' in selection:
for labelindex in selection['m']:
label = data.info['LabelNames'][labelindex]
if label not in data.info['LabelNames']:
raise ValueError("Label %s not in scan labels" % label)
index = data.info['LabelNames'].index(label)
if data.m is None: data.m = []
data.m.append(data.data[:nopts, index])
data.info['selectiontype'] = "1D"
data.info['scanselection'] = True
data.data = None
return data
if (key in ["XIA_DATA"]) and 'XIA' in selection:
if selection["XIA"]:
if 'Detectors' in data.info:
for i in range(len(selection['rows']['y'])):
selection['rows']['y'][i] = \
data.info['Detectors'].index(selection['rows']['y'][i]) + 1
del selection['XIA']
return data.select(selection)
else:
if data.data is not None:
data.info['selectiontype'] = "%dD" % len(data.data.shape)
if data.info['selectiontype'] == "2D":
data.info["imageselection"] = True
return data
else:
output.append(data.select(selection))
return output
else:
return None
def __getSourceInfo(self):
arraylist = []
sourcename = self.name
for array in sps.getarraylist(sourcename):
arrayinfo = sps.getarrayinfo(sourcename, array)
arraytype = arrayinfo[2]
arrayflag = arrayinfo[3]
if arraytype != sps.STRING:
if (arrayflag & sps.TAG_ARRAY) == sps.TAG_ARRAY:
arraylist.append(array)
continue
if DEBUG:
print("array not added %s" % array)
source_info = {}
source_info["Size"] = len(arraylist)
source_info["KeyList"] = arraylist
return source_info
def __getArrayInfo(self, array):
info = {}
info["SourceType"] = SOURCE_TYPE
info["SourceName"] = self.name
info["Key"] = array
arrayinfo = sps.getarrayinfo(self.name, array)
info["rows"] = arrayinfo[0]
info["cols"] = arrayinfo[1]
info["type"] = arrayinfo[2]
info["flag"] = arrayinfo[3]
counter = sps.updatecounter(self.name, array)
info["updatecounter"] = counter
envdict = {}
keylist = sps.getkeylist(self.name, array + "_ENV")
for i in keylist:
val = sps.getenv(self.name, array + "_ENV", i)
envdict[i] = val
info["envdict"] = envdict
scantest = (info['flag'] & sps.TAG_SCAN) == sps.TAG_SCAN
if (array in ["SCAN_D"]) or scantest:
if 'axistitles' in info["envdict"]:
info["LabelNames"] = self._buildLabelsList(info['envdict']['axistitles'])
if 'H' in info["envdict"]:
if 'K' in info["envdict"]:
if 'L' in info["envdict"]:
info['hkl'] = [envdict['H'],
envdict['K'],
envdict['L']]
calibarray = array + "_PARAM"
if calibarray in sps.getarraylist(self.name):
try:
data = sps.getdata(self.name, calibarray)
updc = sps.updatecounter(self.name, calibarray)
info["EnvKey"] = calibarray
# data is an array
info["McaCalib"] = data.tolist()[0]
info["env_updatecounter"] = updc
except:
# Some of our C modules return NULL without setting
# an exception ...
pass
if array in ["XIA_DATA", "XIA_BASELINE"]:
envarray = "XIA_DET"
if envarray in sps.getarraylist(self.name):
try:
data = sps.getdata(self.name, envarray)
updc = sps.updatecounter(self.name, envarray)
info["EnvKey"] = envarray
info["Detectors"] = data.tolist()[0]
info["env_updatecounter"] = updc
except:
pass
return info
def _buildLabelsList(self, instr):
if DEBUG:
print('SpsDataSource : building counter list')
state = 0
llist = ['']
for letter in instr:
if state == 0:
if letter == ' ':
state = 1
elif letter == '{':
state = 2
else:
llist[-1] = llist[-1] + letter
elif state == 1:
if letter == ' ':
pass
elif letter == '{':
state = 2
llist.append('')
else:
llist.append(letter)
state = 0
elif state == 2:
if letter == '}':
state = 0
else:
llist[-1] = llist[-1] + letter
try:
llist.remove('')
except ValueError:
pass
return llist
def isUpdated(self, sourceName, key):
if sps.specrunning(sourceName):
if sps.isupdated(sourceName, key):
return True
#return True if its environment is updated
envkey = key + "_ENV"
if envkey in sps.getarraylist(sourceName):
if sps.isupdated(sourceName, envkey):
return True
return False
source_types = {SOURCE_TYPE: SpsDataSource}
# TODO object is a builtins
def DataSource(name="", object=None, copy=True, source_type=SOURCE_TYPE):
try:
sourceClass = source_types[source_type]
except KeyError:
# ERROR invalid source type
raise TypeError("Invalid Source Type, source type should be one of %s" % source_types.keys())
return sourceClass(name, object, copy)
def main():
import sys
try:
specname = sys.argv[1]
arrayname = sys.argv[2]
obj = DataSource(specname)
data = obj.getData(arrayname)
print("info = ", data.info)
except:
# give usage instructions
print("Usage: SpsDataSource <specversion> <arrayname>")
sys.exit()
if __name__ == "__main__":
main()
|
gpl-2.0
| 4,840,702,036,057,973,000
| 39.762712
| 103
| 0.47526
| false
| 4.648241
| false
| false
| false
|
tapomayukh/projects_in_python
|
classification/Classification_with_kNN/Single_Contact_Classification/Feature_Comparison/multiple_features/results/test10_cross_validate_objects_1200ms_scaled_method_v_area_motion.py
|
1
|
4600
|
# Principal Component Analysis Code :
from numpy import mean,cov,double,cumsum,dot,linalg,array,rank,size,flipud
from pylab import *
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
from mvpa.clfs.knn import kNN
from mvpa.datasets import Dataset
from mvpa.clfs.transerror import TransferError
from mvpa.misc.data_generators import normalFeatureDataset
from mvpa.algorithms.cvtranserror import CrossValidatedTransferError
from mvpa.datasets.splitters import NFoldSplitter
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_kNN/Scaled')
from data_method_V import Fmat_original
def pca(X):
#get dimensions
num_data,dim = X.shape
#center data
mean_X = X.mean(axis=1)
M = (X-mean_X) # subtract the mean (along columns)
Mcov = cov(M)
###### Sanity Check ######
i=0
n=0
while i < 82:
j=0
while j < 140:
if X[i,j] != X[i,j]:
print X[i,j]
print i,j
n=n+1
j = j+1
i=i+1
print n
##########################
print 'PCA - COV-Method used'
val,vec = linalg.eig(Mcov)
#return the projection matrix, the variance and the mean
return vec,val,mean_X, M, Mcov
if __name__ == '__main__':
Fmat = np.row_stack([Fmat_original[41:82,:], Fmat_original[82:123,:]])
# Checking the Data-Matrix
m_tot, n_tot = np.shape(Fmat)
print 'Total_Matrix_Shape:',m_tot,n_tot
eigvec_total, eigval_total, mean_data_total, B, C = pca(Fmat)
#print eigvec_total
#print eigval_total
#print mean_data_total
m_eigval_total, n_eigval_total = np.shape(np.matrix(eigval_total))
m_eigvec_total, n_eigvec_total = np.shape(eigvec_total)
m_mean_data_total, n_mean_data_total = np.shape(np.matrix(mean_data_total))
print 'Eigenvalue Shape:',m_eigval_total, n_eigval_total
print 'Eigenvector Shape:',m_eigvec_total, n_eigvec_total
print 'Mean-Data Shape:',m_mean_data_total, n_mean_data_total
#Recall that the cumulative sum of the eigenvalues shows the level of variance accounted by each of the corresponding eigenvectors. On the x axis there is the number of eigenvalues used.
perc_total = cumsum(eigval_total)/sum(eigval_total)
# Reduced Eigen-Vector Matrix according to highest Eigenvalues..(Considering First 20 based on above figure)
W = eigvec_total[:,0:20]
m_W, n_W = np.shape(W)
print 'Reduced Dimension Eigenvector Shape:',m_W, n_W
# Normalizes the data set with respect to its variance (Not an Integral part of PCA, but useful)
length = len(eigval_total)
s = np.matrix(np.zeros(length)).T
i = 0
while i < length:
s[i] = sqrt(C[i,i])
i = i+1
Z = np.divide(B,s)
m_Z, n_Z = np.shape(Z)
print 'Z-Score Shape:', m_Z, n_Z
#Projected Data:
Y = (W.T)*B # 'B' for my Laptop: otherwise 'Z' instead of 'B'
m_Y, n_Y = np.shape(Y.T)
print 'Transposed Projected Data Shape:', m_Y, n_Y
#Using PYMVPA
PCA_data = np.array(Y.T)
PCA_label_2 = ['Styrofoam-Fixed']*5 + ['Books-Fixed']*5 + ['Bucket-Fixed']*5 + ['Bowl-Fixed']*5 + ['Can-Fixed']*5 + ['Box-Fixed']*5 + ['Pipe-Fixed']*5 + ['Styrofoam-Movable']*5 + ['Container-Movable']*5 + ['Books-Movable']*5 + ['Cloth-Roll-Movable']*5 + ['Black-Rubber-Movable']*5 + ['Can-Movable']*5 + ['Box-Movable']*5 + ['Rug-Fixed']*5 + ['Bubble-Wrap-1-Fixed']*5 + ['Pillow-1-Fixed']*5 + ['Bubble-Wrap-2-Fixed']*5 + ['Sponge-Fixed']*5 + ['Foliage-Fixed']*5 + ['Pillow-2-Fixed']*5 + ['Rug-Movable']*5 + ['Bubble-Wrap-1-Movable']*5 + ['Pillow-1-Movable']*5 + ['Bubble-Wrap-2-Movable']*5 + ['Pillow-2-Movable']*5 + ['Plush-Toy-Movable']*5 + ['Sponge-Movable']*5
clf = kNN(k=1)
terr = TransferError(clf)
ds1 = Dataset(samples=PCA_data,labels=PCA_label_2)
print ds1.samples.shape
cvterr = CrossValidatedTransferError(terr,NFoldSplitter(cvtype=1),enable_states=['confusion'])
error = cvterr(ds1)
print error
print cvterr.confusion.asstring(description=False)
figure(1)
cvterr.confusion.plot(numbers='True',numbers_alpha=2)
#show()
# Variances
figure(2)
title('Variances of PCs')
stem(range(len(perc_total)),perc_total,'--b')
axis([-0.3,130.3,0,1.2])
grid('True')
show()
|
mit
| 718,970,347,211,313,800
| 33.074074
| 666
| 0.646087
| false
| 2.90404
| false
| false
| false
|
John-Lin/RuleEngine
|
malware/core/engine.py
|
1
|
13571
|
import re
import sys
import os
import logging
import time
import hashlib
from urlparse import urlparse
from virus_total_apis import PrivateApi as VirusTotal
import pcap
import decoder
import apikey
from snort import SnortRule
from database import SQLiteTool
logger = logging.getLogger(__name__)
REQUEST_RATE = 300
APIKEY = apikey.APIKEY_0
def clean_spaces(s):
s = s.replace('\r', '')
return s
class RuleEngineBase(object):
def __init__(self, path='./PCAPLog/'):
self.rules = list()
self._db = SQLiteTool()
self._db.creat_url_report()
self.tcp_paylpad_iter = PayloadIterator2(path, 'tcp')
self.udp_paylpad_iter = PayloadIterator2(path, 'udp')
self.vd = Validator()
self.vt = VirusTotal(APIKEY)
def _make_rule(self, **kwargs):
rule = SnortRule()
rule.msg = '"Trojan.Gen"'
content = kwargs.get('content')
uricontent = kwargs.get('uricontent')
dst_port = kwargs.get('dst_port')
ref = kwargs.get('ref')
protocol = kwargs.get('protocol')
dst_port = kwargs.get('dst_port')
if protocol is not None:
rule.protocol = protocol
if dst_port is not None:
rule.dst_port = dst_port
if content is not None:
rule.content = content
if uricontent is not None and uricontent != '/':
rule.uricontent = uricontent
if ref is not None:
rule.ref = ref
self.rules.append(rule)
# self._log_rules(rule, ref[0].split(',')[-1])
def _get_url_positive(self, resource):
urlkey = hashlib.sha1(resource).hexdigest()
if self._db.is_key(urlkey):
# print "In Table!!"
return self._db.show_positive(urlkey)
def _log_rules(self, data, filename):
# print str(data)
if not os.path.exists('./rules'):
os.makedirs('./rules')
with open('./rules/{m}_rule.rules'.format(m=filename), 'a') as fp:
fp.write('{r}\n'.format(r=str(data)))
class RuleEngineOnline(RuleEngineBase):
def __init__(self, path='./PCAPLog/'):
self.vt_req_counter = 0
self.vt_req_timer = time.time()
super(RuleEngineOnline, self).__init__(path)
def _check_timer_counter(self):
if self.vt_req_counter == REQUEST_RATE:
self.vt_req_counter = 0
period = time.time() - self.vt_req_timer
waiting = 60 - period + 1
if waiting > 0:
logger.info("Waiting %s seconds", (str(waiting)))
time.sleep(waiting)
self.vt_req_timer = time.time()
def _make_rule(self, **kwargs):
super(RuleEngineOnline, self)._make_rule(**kwargs)
def _get_url_positive(self, resource):
urlkey = hashlib.sha1(resource).hexdigest()
if self._db.is_key(urlkey):
# print "In Table!!"
update_database = False
if update_database:
# ============== Updated the Database URL column ===============
self._check_timer_counter()
self.vt_req_counter += 1
response = self.vt.get_url_report(resource)
if response.get('error') is not None:
logger.info("Error: {e}".format(e=response.get('error')))
return None
# sys.exit(0)
results = response.get('results')
positives = results.get('positives')
url = results.get('url')
if positives >= 0:
self._db.insert2(urlkey, url, positives)
# ============== Updated the Database URL column ===============
return self._db.show_positive(urlkey)
else:
self._check_timer_counter()
self.vt_req_counter += 1
logger.info("Search on VirusTotal counter: %s",
str(self.vt_req_counter))
logger.info(resource)
response = self.vt.get_url_report(resource)
if response.get('error') is not None:
logger.info("Error: {e}".format(e=response.get('error')))
return None
# sys.exit(0)
results = response.get('results')
positives = results.get('positives')
url = results.get('url')
if positives >= 0:
self._db.insert2(urlkey, url, positives)
# self._db.insert2(url_id, url, positives)
return positives
elif positives is None:
self._check_timer_counter()
self.vt_req_counter += 1
logger.info('''No report. Submmit the URL to VirusTotal countert: %s''',
str(self.vt_req_counter))
self.vt.scan_url(resource)
return None
else:
logger.debug("Get reports failed.")
return None
def _get_domain_positive(self, resource):
domainkey = hashlib.sha1(resource).hexdigest()
if self._db.is_key(domainkey):
pass
# return self._db.show_positive(urlkey)
else:
pass
def http_rule_generate(self):
for content, conn, filename in self.tcp_paylpad_iter:
try:
get_obj = self.vd.is_get_method(content)
host_obj = self.vd.is_hsot(content)
if host_obj and get_obj:
uri = get_obj.group(1)
host_field = clean_spaces(host_obj.group(1))
o = urlparse('http://'+ host_field + uri)
# domian = o.netloc
# uri = o.path
if o.path == '/':
# Proberbly an malicious domain name
domain_obj = self.vd.is_valid_url(host_field)
if domain_obj is not None:
domain_pos = self._get_url_positive(domain_obj.group(0))
if domain_pos > 0:
self._make_rule(protocol='tcp',
content=['"{h}"'.format(h=clean_spaces(host_obj.group(0))), 'nocase'],
dst_port=conn[3])
# md5=filename.split('.')[0])
else:
# Is a invalid url
pass
else:
# o.path != '/'
# string = self.vd.is_valid_utf8(host_field + uri)
# if string is not None:
# Do search on VT
url_obj = self.vd.is_valid_url(host_field + uri)
if url_obj is not None:
url_pos = self._get_url_positive(url_obj.group(0))
if url_pos > 0:
self._make_rule(protocol='tcp',
content=['"{h}"'.format(h=clean_spaces(host_obj.group(0))), 'nocase'],
uricontent=['"{u}"'.format(u=o.path), 'nocase'],
dst_port=conn[3])
# md5=filename.split('.')[0])
else:
# Is a invalid url
pass
else:
pass
except KeyboardInterrupt:
logger.info("Quit")
sys.exit()
def dns_rule_generate(self):
for content, conn, filename in self.udp_paylpad_iter:
try:
# print content, filename, conn[3]
if content[0] == 'UNKNOWN_DNS':
# Bad DNS query opcode != 0
# print "Bad DNS query opcode != 0, %r" % content[1]
self._make_rule(protocol='udp',
dst_port=conn[3],
content=['"|'+content[1]+'|"'])
else:
domain_obj = self.vd.is_valid_url(content[0])
if domain_obj is not None:
domain_pos = self._get_url_positive(content[0])
if domain_pos > 0:
self._make_rule(protocol='udp',
dst_port=conn[3],
content=['"|'+content[1]+'|"'])
else:
# Is a invalid domain name
with open('invalid_domain_name.log', 'a') as fp:
fp.write(filename+'\n')
fp.write(content[0]+'\n')
except KeyboardInterrupt:
logger.info("Quit")
sys.exit()
def _log_rules(self, data, filename):
super(RuleEngineOnline, self)._log_rules(data, filename)
class Validator(object):
def __init__(self):
pass
def is_valid_url(self, url):
regex = re.compile(
# r'^(?:[a-z0-9\.\-]*)://' # scheme is validated separately
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}(?<!-)\.?)|' # domain...
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|' # ...or ipv4
r'\[?[A-F0-9]*:[A-F0-9:]+\]?)' # ...or ipv6
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
return url is not None and regex.search(url)
def is_valid_domain_name(self, domain_name):
# TODO
# Valid domain names
# ex: syshell.exe is not domain
# regex = re.compile(r'[a-zA-Z\d-]{,63}(\.[a-zA-Z\d-]{,63})*',
# re.IGNORECASE)
# return domain_name is not None and regex.search(domain_name)
# return domain_name
pass
def is_hsot(self, content):
regex = re.compile('Host: (.*)')
return content is not None and regex.search(content)
def is_get_method(self, content):
regex = re.compile('GET (.*) ')
return content is not None and regex.search(content)
def is_valid_utf8(self, data):
# valid_utf8 = True
try:
data.decode('utf-8')
# return data
except UnicodeDecodeError:
with open('invalid_utf8.log', 'a') as fp:
fp.write('{u}\n'.format(u=data))
data = None
# valid_utf8 = False
return data
class PayloadIterator2(object):
def __init__(self, path, protocol):
self.index = 0
self.path = path
self.protocol = protocol
self.content = list()
self.five_tuple = list()
self.file_pointer = list()
def __iter__(self):
pcap_list = list()
for dirPath, dirNames, fileNames in os.walk(self.path):
for f in fileNames:
if f.endswith('.pcap'):
pcap_list.append(os.path.join(dirPath, f))
else:
# Not a pcap file
pass
if self.protocol == 'tcp':
for p in pcap_list:
connection = pcap.follow_tcp_stream(p)
for five_tuple, frame in connection.iteritems():
for seq, content in frame.iteritems():
if content:
# Generate the content and 5-tuple
self.content.append(content)
self.five_tuple.append(five_tuple)
self.file_pointer.append(p.split('/')[-1])
else:
# Some packets have no payload
pass
logger.info("TCP Total Connections : %s",
str(len(set(self.five_tuple))))
elif self.protocol == 'udp':
for p in pcap_list:
connection = decoder.decode_dns_qd_name(p)
for five_tuple, qd_name_list in connection.iteritems():
self.content.append(qd_name_list)
self.five_tuple.append(five_tuple)
self.file_pointer.append(p.split('/')[-1])
logger.info("UDP Total Connections : %s",
str(len(set(self.five_tuple))))
else:
logger.info("Protocol %s are not implement", self.protocol)
logger.info("Total Pcap file: %s", str(len(set(pcap_list))))
return self
def next(self):
try:
five_tuple = self.five_tuple[self.index]
content = self.content[self.index]
file_pointer = self.file_pointer[self.index]
except IndexError:
raise StopIteration
self.index += 1
return content, five_tuple, file_pointer
def main():
logging.basicConfig(level=logging.INFO,
format='[%(levelname)s] %(message)s',)
rules = list()
rule_engine = RuleEngineOnline()
rule_engine.http_rule_generate()
# print dir(rule_engine)
for ruleobj in rule_engine.rules:
rules.append(str(ruleobj))
rules = list(set(rules))
for r in rules:
print r
with open('main_snort.rules', 'a') as fp:
fp.write(r + '\n')
if __name__ == "__main__":
main()
|
apache-2.0
| -4,869,667,672,339,214,000
| 35.877717
| 118
| 0.474247
| false
| 4.157782
| false
| false
| false
|
takaakiaoki/PyFoam
|
PyFoam/Basics/TemplateFile.py
|
1
|
16441
|
# ICE Revision: $Id$
import re
from math import *
import sys
from PyFoam.Error import error,warning
from PyFoam.ThirdParty.pyratemp import Template as PyratempTemplate
from PyFoam.ThirdParty.pyratemp import EvalPseudoSandbox,TemplateRenderError
from PyFoam.ThirdParty.pyratemp import Renderer as PyratempRenderer
from PyFoam.ThirdParty.six import iteritems,exec_,print_
class RendererWithFilename(PyratempRenderer):
"""Usual renderer but report a filename"""
def __init__(self, evalfunc, escapefunc,filename=None):
PyratempRenderer.__init__(self, evalfunc, escapefunc)
self.fileName = filename
def reportString(self,expr, err):
result="Cannot eval expression '%s'. (%s: %s)" %(expr, err.__class__.__name__, err)
if self.fileName:
result+=" in file "+self.fileName
return result
def _eval(self, expr, data):
"""evalfunc with error-messages"""
try:
return self.evalfunc(expr, data)
except (TypeError,NameError,IndexError,KeyError,AttributeError, SyntaxError):
err = sys.exc_info()[1] # Needed because python 2.5 does not support 'as e'
raise TemplateRenderError(self.reportString(expr,err))
class TolerantRenderer(RendererWithFilename):
"""Variant of the renderer that doesn't choke on problems with evaluations"""
def __init__(self, evalfunc, escapefunc,filename=None):
RendererWithFilename.__init__(self, evalfunc, escapefunc,filename=filename)
def _eval(self, expr, data):
"""evalfunc with error-messages"""
try:
return self.evalfunc(expr, data)
except (TypeError,NameError,IndexError,KeyError,AttributeError, SyntaxError):
err = sys.exc_info()[1] # Needed because python 2.5 does not support 'as e'
warning(self.reportString(expr,err))
return "Template evaluation ERROR: "+self.reportString(expr,err)
execIdString="this is meant to be executed:"
substituteIdString="substitute current values into this string:"
class PyratempPreprocessor(object):
"""This class preprocesses the input that is give to it in such a
way that the old format (using $$ at the line beginnings and $
.. $ for expressions) is reworked into something that pyratemp understands
"""
def __init__(self,
dovarline=True,
doexpr=True,
expressionDelimiter="$",
assignmentLineStart="$$",
allowExec=False,
assignmentDebug=None,
specials=[]):
"""Create the regexp once for performance reasons
@param dovarline: look for variable lines that start with $$
@param doexpr: substitute expressions that are between $
@param expressionDelimiter: character/string that is used before and after an
expression. After the expression the reverse of the string is used
@param assignmentLineStart: character sequence that signals an assignment line
@param assignmentDebug: Add a commented line to debug assignments. Prefix used is this parameter
@param allowExec: allows execution of code. This is potentially unsafe
@param specials: a list. If any expression starts with one of these values then
the full expression (including delimiters) is left verbatim in the template"""
self.clip=len(expressionDelimiter)
self.specials=specials
tmp=list(expressionDelimiter)
tmp.reverse()
self.expressionDelimiter=re.escape(expressionDelimiter)
self.expressionDelimiterEnd=re.escape("".join(tmp))
self.expressionDelimiterRaw=expressionDelimiter
self.expressionDelimiterEndRaw="".join(tmp)
# print self.expressionDelimiter,self.expressionDelimiterEnd
self.assignmentLineStart=assignmentLineStart
self.assignmentDebug=assignmentDebug
self.expr=re.compile("%s[^$!\n]+?%s" % (self.expressionDelimiter,self.expressionDelimiterEnd))
self.dovarline=dovarline
self.doexpr=doexpr
self.allowExec=allowExec
def __call__(self,original):
"""This does the actual work"""
if len(original)==0:
return original
lines=original.split("\n")
if lines[-1]=="":
lines=lines[:-1]
result=""
for l in lines:
if l[:len(self.assignmentLineStart)]==self.assignmentLineStart and self.dovarline:
tmp=l[len(self.assignmentLineStart):].split("=")
if len(tmp)!=2:
if self.allowExec:
execString=l[len(self.assignmentLineStart):].replace("\\","\\\\").replace("\"","\\\"")
result+='$!setvar("%s", "%s")!$#!' % (
"dummyVarForExecution",
execIdString+execString.strip()
)
result+="\n"
else:
error("Each definition must be of the form: <name>=<value>",
"The string",l,"is not")
else:
# if tmp[1].find('"')>=0:
# error("There is a \" in",tmp[1],"\npyratemp can't cope with that'")
exprStr=tmp[1].replace("\\","\\\\").replace("\"","\\\"")
result+='$!setvar("%s", "%s")!$#!' % (tmp[0].strip(),exprStr.strip())
result+="\n"
if self.assignmentDebug and self.doexpr:
l=self.assignmentDebug+" "+tmp[0].strip()+" "+self.expressionDelimiterRaw+tmp[0].strip()+self.expressionDelimiterEndRaw
else:
continue
if self.doexpr:
nl=""
iStart=0
for m in self.expr.finditer(l):
inner=l[m.start()+self.clip:m.end()-self.clip]
hasSpecial=False
nl+=l[iStart:m.start()]
for k in self.specials:
if len(k)<=len(inner):
if inner[:len(k)]==k:
hasSpecial=True
substVarName="dummyVarForSubstitution"
# nl+=l[m.start():m.end()]
nl+='$!setvar("%s", "%s")!$#!\n' % (
substVarName,
substituteIdString+l[m.start():m.end()]
)
nl+='$!'+substVarName+'!$'
if not hasSpecial:
nl+="$!"+inner+"!$"
iStart=m.end()
result+=nl+l[iStart:]+"\n"
else:
result+=l+"\n"
# remove trailing newline if the original had none
if original[-1]!='\n' and result[-1]=='\n':
result=result[:-1]
return result
class TemplateFileOldFormat(object):
"""Works on template files. Does calculations between $$.
Lines that start with $$ contain definitions"""
def __init__(self,name=None,content=None):
"""Exactly one of the parameters must be specified
@param name: name of the template file.
@param content: Content of the template"""
if name==None and content==None:
error("Either a file name or the content of the template must be specified")
if name!=None and content!=None:
error("Both: a file name and the content of the template were specified")
if content!=None:
template=content
else:
template=open(name).read()
self.buildTemplate(template)
def buildTemplate(self,template):
lines=template.split("\n")
self.expressions={}
self.template=""
for l in lines:
if l[:2]!="$$":
self.template+=l+"\n"
else:
tmp=l[2:].split("=")
if len(tmp)!=2:
error("Each definition must be of the form: <name>=<value>",
"The string",l,"is not")
self.expressions[tmp[0].strip()]=tmp[1]
def writeToFile(self,outfile,vals):
"""In the template, replaces all the strings between $$
with the evaluation of the expressions and writes the results to a file
@param outfile: the resulting output file
@param vals: dictionary with the values"""
output=self.getString(vals)
open(outfile,"w").write(output)
def getString(self,vals):
"""In the template, replaces all the strings between $$
with the evaluation of the expressions
@param vals: dictionary with the values
@returns: The string with the replaced expressions"""
symbols=vals.copy()
exp=re.compile("\$[^$\n]*\$")
for n,e in iteritems(self.expressions):
if n in vals:
error("Key",n,"already existing in",vals)
symbols[n]="("+str(e)+")"
keys=list(symbols.keys())
keys.sort(key=len,reverse=True)
input=self.template[:]
m=exp.search(input)
while m:
a,e=m.span()
pre=input[0:a]
post=input[e:]
mid=input[a+1:e-1]
old=""
while old!=mid:
old=mid
for k in keys:
if mid.find(k)>=0:
mid=mid.replace(k,str(symbols[k]))
break
try:
input=pre+str(eval(mid))+post
except ArithmeticError:
e = sys.exc_info()[1] # Needed because python 2.5 does not support 'as e'
print_("Problem evaluating",mid)
raise e
m=exp.search(input)
return input
class EvalPseudoSandboxWithMath(EvalPseudoSandbox):
"""Add mathematical functions to the valid functons"""
def __init__(self,allowExec=False):
EvalPseudoSandbox.__init__(self)
import math
for o in dir(math):
if o[0]!="_":
self.register(o,getattr(math,o))
from PyFoam.ThirdParty.six.moves import builtins as __builtin__
self.register("set",__builtin__.set)
if allowExec:
del self.eval_allowed_globals["__import__"]
self.register("__import__",__builtins__["__import__"])
def compile(self, expr,mode="eval"):
"""Compile a python-eval-expression. Overrides the default implementation
to allow '_[1]' as a valid name
"""
if expr not in self._compile_cache:
c = compile(expr, "", mode)
for i in c.co_names: #prevent breakout via new-style-classes
if i[0] == '_':
if i[1]!='[' or i[-1]!=']':
raise NameError("Name '%s' is not allowed." %(i))
self._compile_cache[expr] = c
return self._compile_cache[expr]
def eval(self, expr, locals):
"""Eval a python-eval-expression.
Sets ``self.locals_ptr`` to ``locales`` and compiles the code
before evaluating.
"""
if expr[:len(substituteIdString)]==substituteIdString:
goOn=True
replacement=expr[len(substituteIdString):]
while goOn:
try:
value=replacement % locals
goOn=False
except KeyError:
e = sys.exc_info()[1] # Needed because python 2.5 does not support 'as e'
kExpr="%("+e.args[0]+")"
replacement=replacement.replace(kExpr,"%"+kExpr)
return value
# print value
sav = self.locals_ptr
self.locals_ptr = locals
doEval=True
if expr[:len(execIdString)]==execIdString:
doEval=False
if doEval:
globals= {"__builtins__":self.eval_allowed_globals}
x = eval(self.compile(expr),globals, locals)
else:
# globals= {"__builtins__":self.eval_allowed_globals}
globals= {"__builtins__":__builtins__}
expr=expr[len(execIdString):]
exec_(self.compile(expr,mode="exec"),globs=globals,locs=locals)
x = None
self.locals_ptr = sav
return x
class EvalPseudoSandboxWithMathWithImport(EvalPseudoSandboxWithMath):
"""Class that allows the import of packages"""
def __init__(self):
EvalPseudoSandboxWithMath.__init__(self,allowExec=True)
class TemplateFile(TemplateFileOldFormat):
"""Works on template files. Does calculations between $$.
Lines that start with $$ contain definitions"""
def __init__(self,
name=None,
content=None,
encoding="utf-8",
expressionDelimiter="|",
assignmentLineStart="$$",
assignmentDebug=None,
specials=[],
renderer_class=None,
tolerantRender=False,
allowExec=False
):
"""Exactly one of the parameters must be specified
@param name: name of the template file.
@param content: Content of the template
@param expressionDelimiter: character/string that delimits expression strings.
@param assignmentLineStart: Start of a line that holds an assignment operation
@param assignmentDebug: Add a commented line to debug assignments. Prefix used is this parameter
@param allowExec: allow execution (and import). This is potentially unsafe
@param special: list with strings that leave expression untreated"""
self.expressionDelimiter=expressionDelimiter
self.assignmentLineStart=assignmentLineStart
self.assignmentDebug=assignmentDebug
self.specials=specials
self.allowExec=allowExec
super(TemplateFile,self).__init__(name=name,
content=content,
)
if renderer_class==None:
if tolerantRender:
class ConcreteTolerantRenderer(TolerantRenderer):
def __init__(self,evalfunc, escapefunc):
TolerantRenderer.__init__(self,
evalfunc,
escapefunc,filename=name)
renderer_class=ConcreteTolerantRenderer
else:
class ConcreteRenderWithFileName(RendererWithFilename):
def __init__(self,evalfunc, escapefunc):
RendererWithFilename.__init__(self,
evalfunc,
escapefunc,filename=name)
renderer_class=ConcreteRenderWithFileName
if allowExec:
sandbox=EvalPseudoSandboxWithMathWithImport
else:
sandbox=EvalPseudoSandboxWithMath
self.ptemplate=PyratempTemplate(string=self.template,
eval_class=sandbox,
renderer_class=renderer_class,
encoding=encoding,
escape=None
)
def buildTemplate(self,template):
self.template=PyratempPreprocessor(assignmentLineStart=self.assignmentLineStart,
expressionDelimiter=self.expressionDelimiter,
assignmentDebug=self.assignmentDebug,
specials=self.specials,
allowExec=self.allowExec
)(template)
def getString(self,vals):
"""In the template, replaces all the strings between $$
with the evaluation of the expressions
@param vals: dictionary with the values
@returns: The string with the replaced expressions"""
return self.ptemplate(**vals)
# Should work with Python3 and Python2
|
gpl-2.0
| 5,101,484,783,056,229,000
| 38.90534
| 144
| 0.543884
| false
| 4.609195
| false
| false
| false
|
dcifuen/cloudbday
|
src/birthday/constants.py
|
1
|
1046
|
#Environment related constants
ENV_PRODUCTION = 'PRODUCTION'
#Staging is used for testing by replicating the same production remote env
ENV_STAGING = 'STAGING'
#Development local env
ENV_DEVELOPMENT = 'DEV'
#Automated tests local env
ENV_TESTING = 'TEST'
ENVIRONMENT_CHOICES = [
ENV_PRODUCTION,
ENV_STAGING,
ENV_DEVELOPMENT,
ENV_TESTING,
]
EMAIL_REGEXP = "^[a-zA-Z0-9'._-]+@[a-zA-Z0-9._-]+.[a-zA-Z]{2,6}$"
MALE = 'M'
FEMALE = 'F'
OTHER = 'O'
GENDERS = [
MALE,
FEMALE,
OTHER
]
OAUTH2_SCOPES = 'https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/userinfo.profile https://www.googleapis.com/auth/admin.directory.user.readonly https://www.googleapis.com/auth/plus.profiles.read https://www.googleapis.com/auth/plus.me https://www.googleapis.com/auth/plus.login https://www.googleapis.com/auth/calendar https://www.google.com/m8/feeds'
BIRTHDAY_CSV_COLUMNS = ["email", "birthday"]
MENU_ITEMS = [
('admin_index', 'Home'),
('upload_csv', 'Upload'),
('settings', 'Settings'),
]
|
mit
| -6,952,644,356,848,802,000
| 25.846154
| 382
| 0.692161
| false
| 2.745407
| false
| false
| false
|
mlml/autovot
|
autovot/bin/auto_vot_append_files.py
|
1
|
4617
|
#! /usr/bin/env python3
#
# Copyright (c) 2014 Joseph Keshet, Morgan Sonderegger, Thea Knowles
#
# This file is part of Autovot, a package for automatic extraction of
# voice onset time (VOT) from audio files.
#
# Autovot is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Autovot is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with Autovot. If not, see
# <http://www.gnu.org/licenses/>.
#
# auto_vot_append_files.py : Append set of features and labels
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import zip
from builtins import open
from builtins import int
from future import standard_library
standard_library.install_aliases()
import argparse
from helpers.utilities import *
if __name__ == "__main__":
# parse arguments
parser = argparse.ArgumentParser(description='Append set of features and labels')
parser.add_argument('features_filename', help="front end features filename")
parser.add_argument('labels_filename', help="front end labels filename")
parser.add_argument('appended_features_filename', help="front end features filename to be appended")
parser.add_argument('appended_labels_filename', help="front end labels filename to be appended")
parser.add_argument("--logging_level", help="Level of verbosity of information printed out by this program ("
"DEBUG, INFO, WARNING or ERROR), in order of increasing verbosity. "
"See http://docs.python.org/2/howto/logging for definitions. ("
"default: %(default)s)", default="INFO")
args = parser.parse_args()
logging_defaults(args.logging_level)
# open files
in_features = open(args.features_filename, 'r')
in_labels = open(args.labels_filename, 'r')
# read infra text header
header = in_labels.readline()
dims = header.split()
# read file lines
lines = list()
for x, y in zip(in_features, in_labels):
lines.append((x, y))
# close files
in_features.close()
in_labels.close()
if len(lines) != int(dims[0]):
logging.error("%s and %s are not of the same length or %s is missing a header" % (args.features_filename,
args.labels_filename,
args.labels_filename))
exit(-1)
try:
# try to open the files
app_features = open(args.appended_features_filename, 'r')
app_labels = open(args.appended_labels_filename, 'r')
# now read the appended files
app_features = open(args.appended_features_filename, 'r')
app_labels = open(args.appended_labels_filename, 'r')
# read infra text header
app_header = app_labels.readline()
app_dims = app_header.split()
# read file to lines
for x, y in zip(app_features, app_labels):
lines.append((x, y))
# close files
in_features.close()
in_labels.close()
# assert header
if len(lines) != int(dims[0])+int(app_dims[0]):
logging.error("Something wrong with the header of %s" % args.appended_labels_filename)
exit(-1)
except Exception as exception:
if exception.errno != 2:
logging.error("Something wrong with opening %s and %s for reading." % (args.appended_features_filename,
args.appended_labels_filename))
# open appended files for writing
out_features = open(args.appended_features_filename, 'w')
out_labels = open(args.appended_labels_filename, 'w')
# write labels header
header = "%d 2\n" % len(lines)
out_labels.write(header)
# write data
for x, y in lines:
out_features.write(x)
out_labels.write(y)
# close files
out_features.close()
out_labels.close()
|
lgpl-3.0
| -8,348,166,290,750,049,000
| 36.536585
| 116
| 0.619883
| false
| 4.155716
| false
| false
| false
|
fallen/artiq
|
artiq/transforms/quantize_time.py
|
1
|
4035
|
"""
This transform turns calls to delay() that use non-integer time
expressed in seconds into calls to delay_mu() that use int64 time
expressed in multiples of ref_period.
It does so by inserting multiplication/division/rounding operations around
those calls.
The seconds_to_mu and mu_to_seconds core language functions are also
implemented here, as well as watchdog to syscall conversion.
"""
import ast
from artiq.transforms.tools import value_to_ast
def _seconds_to_mu(ref_period, node):
divided = ast.copy_location(
ast.BinOp(left=node,
op=ast.Div(),
right=value_to_ast(ref_period)),
node)
return ast.copy_location(
ast.Call(func=ast.Name("round64", ast.Load()),
args=[divided],
keywords=[], starargs=[], kwargs=[]),
divided)
def _mu_to_seconds(ref_period, node):
return ast.copy_location(
ast.BinOp(left=node,
op=ast.Mult(),
right=value_to_ast(ref_period)),
node)
class _TimeQuantizer(ast.NodeTransformer):
def __init__(self, ref_period):
self.ref_period = ref_period
self.watchdog_id_counter = 0
def visit_Call(self, node):
funcname = node.func.id
if funcname == "delay":
node.func.id = "delay_mu"
if (isinstance(node.args[0], ast.Call)
and node.args[0].func.id == "mu_to_seconds"):
# optimize:
# delay(mu_to_seconds(x)) -> delay_mu(x)
node.args[0] = self.visit(node.args[0].args[0])
else:
node.args[0] = _seconds_to_mu(self.ref_period,
self.visit(node.args[0]))
return node
elif funcname == "seconds_to_mu":
return _seconds_to_mu(self.ref_period,
self.visit(node.args[0]))
elif funcname == "mu_to_seconds":
return _mu_to_seconds(self.ref_period,
self.visit(node.args[0]))
else:
self.generic_visit(node)
return node
def visit_With(self, node):
self.generic_visit(node)
if (isinstance(node.items[0].context_expr, ast.Call)
and node.items[0].context_expr.func.id == "watchdog"):
idname = "__watchdog_id_" + str(self.watchdog_id_counter)
self.watchdog_id_counter += 1
time = ast.BinOp(left=node.items[0].context_expr.args[0],
op=ast.Mult(),
right=ast.Num(1000))
time_int = ast.Call(
func=ast.Name("round", ast.Load()),
args=[time],
keywords=[], starargs=None, kwargs=None)
syscall_set = ast.Call(
func=ast.Name("syscall", ast.Load()),
args=[ast.Str("watchdog_set"), time_int],
keywords=[], starargs=None, kwargs=None)
stmt_set = ast.copy_location(
ast.Assign(targets=[ast.Name(idname, ast.Store())],
value=syscall_set),
node)
syscall_clear = ast.Call(
func=ast.Name("syscall", ast.Load()),
args=[ast.Str("watchdog_clear"),
ast.Name(idname, ast.Load())],
keywords=[], starargs=None, kwargs=None)
stmt_clear = ast.copy_location(ast.Expr(syscall_clear), node)
node.items[0] = ast.withitem(
context_expr=ast.Name(id="sequential",
ctx=ast.Load()),
optional_vars=None)
node.body = [
stmt_set,
ast.Try(body=node.body,
handlers=[],
orelse=[],
finalbody=[stmt_clear])
]
return node
def quantize_time(func_def, ref_period):
_TimeQuantizer(ref_period).visit(func_def)
|
gpl-3.0
| -2,171,472,973,711,803,400
| 34.707965
| 74
| 0.513507
| false
| 3.952008
| false
| false
| false
|
GNOME/pygoocanvas
|
demo/customs/custom-svg.py
|
1
|
4302
|
import gobject
import gtk
import goocanvas
import rsvg
import cairo
class CustomSvgItem(goocanvas.ItemSimple):
# setup our custom properties
__gproperties__ = {
'x': (float, # property type
'X', # property nick name
'The x coordinate of a SVG image', # property description
0, # property minimum value
10e6, # property maximum value
0, # property default value
gobject.PARAM_READWRITE), # property flags
'y': (float,
'Y',
'The y coordinate of a SVG image',
0,
10e6,
0,
gobject.PARAM_READWRITE),
'width': (float,
'Width',
'The width of the SVG Image',
0,
10e6,
0,
gobject.PARAM_READABLE),
'height': (float,
'Height',
'The width of the SVG Image',
0,
10e6,
0,
gobject.PARAM_READABLE),
}
def __init__(self, x, y, handle, **kwargs):
super(CustomSvgItem, self).__init__(**kwargs)
self.x = x
self.y = y
self.width = handle.props.width
self.height = handle.props.height
self.handle = handle
def do_set_property(self, pspec, value):
if pspec.name == 'x':
self.x = value
# make sure we update the display
self.changed(True)
elif pspec.name == 'y':
self.y = value
# make sure we update the display
self.changed(True)
else:
raise AttributeError, 'unknown property %s' % pspec.name
def do_get_property(self, pspec):
if pspec.name == 'x':
return self.x
elif pspec.name == 'y':
return self.y
elif pspec.name == 'width':
return self.width
elif pspec.name == 'height':
return self.height
else:
raise AttributeError, 'unknown property %s' % pspec.name
def do_simple_paint(self, cr, bounds):
matrix = cr.get_matrix()
matrix.translate(self.x, self.y)
cr.set_matrix(matrix)
self.handle.render_cairo(cr)
def do_simple_update(self, cr):
self.bounds_x1 = float(self.x)
self.bounds_y1 = float(self.y)
self.bounds_x2 = float(self.x + self.width)
self.bounds_y2 = float(self.y + self.height)
def do_simple_is_item_at(self, x, y, cr, is_pointer_event):
if ((x < self.x) or (x > self.x + self.width)) or ((y < self.y) or (y > self.y + self.height)):
return False
else:
return True
gobject.type_register(CustomSvgItem)
def on_press(item, target, event, root):
item.props.y = 150
def on_r_press(item, target, event):
item.props.x = 150
def main():
window = gtk.Window()
window.set_default_size(640, 600)
window.show()
window.connect("destroy", lambda w: gtk.main_quit())
scrolled_win = gtk.ScrolledWindow()
scrolled_win.set_shadow_type(gtk.SHADOW_IN)
scrolled_win.show()
window.add(scrolled_win)
canvas = goocanvas.Canvas()
canvas.set_size_request(600, 450)
canvas.set_bounds(0, 0, 1000, 1000)
root = canvas.get_root_item()
handle = rsvg.Handle("../images/circle1.svg")
svgitem = CustomSvgItem(x=100,
y=100,
handle=handle,
parent=root)
svgitem.connect("button_press_event", on_press, root)
r = goocanvas.Rect (parent=root,
x=10,
y=10,
width=20,
height=20)
r.connect("button_press_event", on_r_press)
r.props.fill_color = 'yellow'
canvas.show()
scrolled_win.add(canvas)
gtk.main()
if __name__ == "__main__":
main()
|
lgpl-2.1
| -2,475,605,450,880,108,500
| 27.302632
| 103
| 0.477685
| false
| 4.031865
| false
| false
| false
|
Balannen/LSMASOMM
|
atom3/Kernel/ATOM3Types/ATOM3Text.py
|
1
|
9751
|
# Implements : class ATOM3Constraint
# Author : Juan de Lara
# Description : A class for the ATOM3 Constraint type.
# Modified : 17 Oct 2002
# Changes :
# ____________________________________________________________________________________________________________________
from Tkinter import *
from ATOM3Type import ATOM3Type
from ATOM3Exceptions import *
from code import *
from string import replace, rstrip
from ATOM3Integer import ATOM3Integer
from textUtilities import setTabs,createTabPanel, addFiltersFromIDLE
from textUtilities import processBackspace, processDelete
from textUtilities import processTab, processReturn
class ATOM3Text(ATOM3Type):
def __init__(self, initialValue = "", width = 80, height = 15):
"""
Initialize textBody to initialValue and textWidget to None
"""
ATOM3Type.__init__(self )
self.textBody = initialValue # widget to be create when show is called
self.textWidget = None
self._isNone = 0 # for the moment it is not none
self.myWidth = width
self.myHeight = height
self.heightATOM3Integer = ATOM3Integer(height)
def isNone (self):
"""
check if the type value is none
"""
return self._isNone
def setNone (self):
"""
sets to None the attribute value
"""
self._isNone = 1
def setValue(self, value):
"""
Sets the actual attribute value
"""
# check that we have the correct type (a string)
if type(value) != StringType and type(value) != NoneType:
raise ATOM3BadAssignmentValue, "in setValue(), a string was expected"
self.textBody = value # Assign the value to the attribute
if self.textWidget: # if the widget's been shown
self.textWidget.delete(1.0, END) # delete from graphical field
if value:
self.textBody = value
self.textWidget.insert(1.0, value) # insert into graphical field
else: # this means we want to set it to None
self.setNone()
def getValue(self):
"""
Gets the actual attribute value
"""
if self.textWidget: # if the widget exists, the get its value...
self.textBody = self.textWidget.get(1.0, END) # synchronize textBody and textWidget
return self.textBody # return textBody
def toString(self, maxWide = None, maxLines = None ):
"""
Returns the string representation of this type, having at most "maxLines" lines
and "maxWide" width.
"""
if self.textWidget: # if the widget exists, then get its value...
self.textBody = self.textWidget.get(1.0, END) # synchronize textBody and textWidget
if self.textBody:
self.textBody = rstrip( self.textBody, '\n' ) # Added by Denis Dube, Summer 2004, to remove excess \n
self.textBody += '\n' # Put one \n back, rstrip is a bit agressive...
result = "" # Auxiliary variable with the result
current, numLines, currWidth = 0, 0, 0
max = len(self.textBody)
if maxWide == None: maxWide = max
if maxLines == None: maxLines = 50
while (1):
if current >= max: return result # if we've gone over the textBody's with, return result
cchar = self.textBody[current] # get the current character
if cchar == '\n':
numLines = numLines + 1 # increment the number of lines so far...
currWidth = -1
if numLines > maxLines: return result # ... if bigger than the maximum, return result
currWidth = currWidth + 1 # increment the width so far...
result= result+self.textBody[current]
if currWidth > maxWide: # if we're over the max width, find next '\n'
while (current < max and self.textBody[current] != '\n'):
current = current + 1
if current >= max: return result
result = result + '\n' # add a new line...
currWidth = 0
current = current + 1
else: return ""
def setHeight(self, height=None):
"""
Sets the height of the text box (as it appears visually)
Parameter:
height, integer value, represents # of lines of text
If height == None, then uses self.heightATOM3Integer instead, this is
changed via the createTabPanel() and in the __init__ routine of course.
"""
if(height):
self.myHeight = height
else:
self.myHeight = self.heightATOM3Integer.getValue()
if(self.textWidget != None):
self.textWidget.config(height=self.myHeight)
def show(self, parent, parentTopWindow = None ):
"""
Creates an entry to show the value
"""
ATOM3Type.show(self, parent, parentTopWindow )
self.containerFrame = Frame(parent) # container frame
yscrollbar = Scrollbar(self.containerFrame, orient=VERTICAL)
xscrollbar = Scrollbar(self.containerFrame, orient=HORIZONTAL)
self.textWidget = Text(self.containerFrame, bg='white',
xscrollcommand = xscrollbar.set,
yscrollcommand = yscrollbar.set,
width = self.myWidth, height=self.myHeight,
padx=4, wrap='word', exportselection=False,
font = ('courier', 10))
#font = "{System} 10")
createTabPanel(self, self.containerFrame,frameSide='top' )
yscrollbar.pack(side=RIGHT, fill = Y)
self.textWidget.pack(side=TOP)
xscrollbar.pack(side=BOTTOM, fill = X)
yscrollbar.config(command = self.textWidget.yview)
xscrollbar.config(command = self.textWidget.xview)
if self.textBody:
self.textWidget.insert(1.0, self.textBody)
#self.textWidget.bind("<Return>", self.processReturn )# catch the <return> event...
self.textWidget.bind("<Delete>", lambda e=None,s=self: processDelete(s) )
self.textWidget.bind("<BackSpace>", lambda e=None,s=self: processBackspace(s) )
self.textWidget.bind("<Tab>", lambda e=None,s=self: processTab(s) )
self.textWidget.bind("<Return>", lambda e=None,s=self: processReturn(s) )
setTabs(self)
addFiltersFromIDLE(self)
return self.containerFrame
def processReturn(self, event):
"""
Bind method for <return>. Adds a return to the text.
"""
self.textWidget.insert( INSERT, "\n")
return "break"
def destroy(self):
"""
Stores the widget value into the variable
"""
if self.textWidget:
self.textBody = self.textWidget.get(1.0, END)
self.myHeight = self.heightATOM3Integer.getValue()
self.textWidget = None # destroy graphical widget
def clone(self):
"""
Makes an exact copy of this object
"""
cloneObject = ATOM3Text("", self.myWidth, self.myHeight)
cloneObject.parent = self.parent
cloneObject.mode = self.mode
cloneObject.textBody = self.textBody
cloneObject.textWidget = self.textWidget
return cloneObject
def copy(self, other):
"""
copies each field of the other object into its own state
"""
ATOM3Type.copy(self, other) # call the ancestor (copies the parent field)
self.textBody = other.textBody
self.textWidget = other.textWidget
def writeConstructor2File(self, file, indent, objName='at', depth = 0, generatingCode = 0):
"""
Method that writes into a file the constructor and the value of the object. Must be overriden in children
"""
replacedStr = self.toString()
replacedStr = replace( replacedStr, '\\', '\\'+'\\')
replacedStr = replace( replacedStr, "'", "\\'")
replacedStr = replace( replacedStr, '\n', '\\n')
file.write(indent+objName+"=ATOM3Text('"+replacedStr+"', "+str(self.myWidth)+","+str(self.myHeight)+" )\n")
def writeValue2File(self, file, indent, objName='at', depth = 0, generatingCode = 0):
"""
Method that writes into a file the constructor and the value of the object. Must be overriden in children
"""
replacedStr = self.toString()
replacedStr = replace( replacedStr, '\\', '\\'+'\\')
replacedStr = replace( replacedStr, "'", "\\'")
replacedStr = replace( replacedStr, '\n', '\\n')
file.write(indent+objName+".setValue('"+replacedStr+"')\n")
file.write(indent+objName+".setHeight("+str(self.myHeight)+")\n")
if self.isNone():
file.write(indent+objName+".setNone()\n")
|
gpl-3.0
| 613,949,713,060,251,900
| 43.143519
| 118
| 0.537278
| false
| 4.358963
| false
| false
| false
|
j-griffith/cinder
|
cinder/api/v3/volumes.py
|
1
|
16280
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The volumes V3 api."""
from oslo_log import log as logging
from oslo_log import versionutils
from oslo_utils import uuidutils
import six
from six.moves import http_client
import webob
from webob import exc
from cinder.api import common
from cinder.api import microversions as mv
from cinder.api.openstack import wsgi
from cinder.api.v2 import volumes as volumes_v2
from cinder.api.v3.views import volumes as volume_views_v3
from cinder.backup import api as backup_api
from cinder import exception
from cinder import group as group_api
from cinder.i18n import _
from cinder.image import glance
from cinder import objects
from cinder.policies import volumes as policy
from cinder import utils
LOG = logging.getLogger(__name__)
class VolumeController(volumes_v2.VolumeController):
"""The Volumes API controller for the OpenStack API V3."""
_view_builder_class = volume_views_v3.ViewBuilder
def __init__(self, ext_mgr):
self.group_api = group_api.API()
self.backup_api = backup_api.API()
super(VolumeController, self).__init__(ext_mgr)
def delete(self, req, id):
"""Delete a volume."""
context = req.environ['cinder.context']
req_version = req.api_version_request
cascade = utils.get_bool_param('cascade', req.params)
force = False
params = ""
if req_version.matches(mv.VOLUME_LIST_BOOTABLE):
force = utils.get_bool_param('force', req.params)
if cascade or force:
params = "(cascade: %(c)s, force: %(f)s)" % {'c': cascade,
'f': force}
LOG.info("Delete volume with id: %(id)s %(params)s",
{'id': id, 'params': params}, context=context)
volume = self.volume_api.get(context, id)
if force:
context.authorize(policy.FORCE_DELETE_POLICY, target_obj=volume)
self.volume_api.delete(context, volume,
cascade=cascade,
force=force)
return webob.Response(status_int=202)
@common.process_general_filtering('volume')
def _process_volume_filtering(self, context=None, filters=None,
req_version=None):
if req_version.matches(None, mv.MESSAGES):
filters.pop('glance_metadata', None)
if req_version.matches(None, mv.BACKUP_UPDATE):
filters.pop('group_id', None)
utils.remove_invalid_filter_options(
context, filters,
self._get_volume_filter_options())
def _get_volumes(self, req, is_detail):
"""Returns a list of volumes, transformed through view builder."""
context = req.environ['cinder.context']
req_version = req.api_version_request
params = req.params.copy()
marker, limit, offset = common.get_pagination_params(params)
sort_keys, sort_dirs = common.get_sort_params(params)
filters = params
show_count = False
if req_version.matches(
mv.SUPPORT_COUNT_INFO) and 'with_count' in filters:
show_count = utils.get_bool_param('with_count', filters)
filters.pop('with_count')
self._process_volume_filtering(context=context, filters=filters,
req_version=req_version)
# NOTE(thingee): v2 API allows name instead of display_name
if 'name' in sort_keys:
sort_keys[sort_keys.index('name')] = 'display_name'
if 'name' in filters:
filters['display_name'] = filters.pop('name')
strict = req.api_version_request.matches(
mv.VOLUME_LIST_BOOTABLE, None)
self.volume_api.check_volume_filters(filters, strict)
volumes = self.volume_api.get_all(context, marker, limit,
sort_keys=sort_keys,
sort_dirs=sort_dirs,
filters=filters.copy(),
viewable_admin_meta=True,
offset=offset)
total_count = None
if show_count:
total_count = self.volume_api.calculate_resource_count(
context, 'volume', filters)
for volume in volumes:
utils.add_visible_admin_metadata(volume)
req.cache_db_volumes(volumes.objects)
if is_detail:
volumes = self._view_builder.detail_list(
req, volumes, total_count)
else:
volumes = self._view_builder.summary_list(
req, volumes, total_count)
return volumes
@wsgi.Controller.api_version(mv.VOLUME_SUMMARY)
def summary(self, req):
"""Return summary of volumes."""
view_builder_v3 = volume_views_v3.ViewBuilder()
context = req.environ['cinder.context']
filters = req.params.copy()
utils.remove_invalid_filter_options(context, filters,
self._get_volume_filter_options())
num_vols, sum_size, metadata = self.volume_api.get_volume_summary(
context, filters=filters)
req_version = req.api_version_request
if req_version.matches(mv.VOLUME_SUMMARY_METADATA):
all_distinct_metadata = metadata
else:
all_distinct_metadata = None
return view_builder_v3.quick_summary(num_vols, int(sum_size),
all_distinct_metadata)
@wsgi.response(http_client.ACCEPTED)
@wsgi.Controller.api_version(mv.VOLUME_REVERT)
@wsgi.action('revert')
def revert(self, req, id, body):
"""revert a volume to a snapshot"""
context = req.environ['cinder.context']
self.assert_valid_body(body, 'revert')
snapshot_id = body['revert'].get('snapshot_id')
volume = self.volume_api.get_volume(context, id)
try:
l_snap = volume.get_latest_snapshot()
except exception.VolumeSnapshotNotFound:
msg = _("Volume %s doesn't have any snapshots.")
raise exc.HTTPBadRequest(explanation=msg % volume.id)
# Ensure volume and snapshot match.
if snapshot_id is None or snapshot_id != l_snap.id:
msg = _("Specified snapshot %(s_id)s is None or not "
"the latest one of volume %(v_id)s.")
raise exc.HTTPBadRequest(explanation=msg % {'s_id': snapshot_id,
'v_id': volume.id})
try:
msg = 'Reverting volume %(v_id)s to snapshot %(s_id)s.'
LOG.info(msg, {'v_id': volume.id,
's_id': l_snap.id})
self.volume_api.revert_to_snapshot(context, volume, l_snap)
except (exception.InvalidVolume, exception.InvalidSnapshot) as e:
raise exc.HTTPConflict(explanation=six.text_type(e))
except exception.VolumeSizeExceedsAvailableQuota as e:
raise exc.HTTPForbidden(explanation=six.text_type(e))
def _get_image_snapshot(self, context, image_uuid):
image_snapshot = None
if image_uuid:
image_service = glance.get_default_image_service()
image_meta = image_service.show(context, image_uuid)
if image_meta is not None:
bdms = image_meta.get('properties', {}).get(
'block_device_mapping', [])
if bdms:
boot_bdm = [bdm for bdm in bdms if (
bdm.get('source_type') == 'snapshot' and
bdm.get('boot_index') == 0)]
if boot_bdm:
try:
image_snapshot = self.volume_api.get_snapshot(
context, boot_bdm[0].get('snapshot_id'))
return image_snapshot
except exception.NotFound:
explanation = _(
'Nova specific image is found, but boot '
'volume snapshot id:%s not found.'
) % boot_bdm[0].get('snapshot_id')
raise exc.HTTPNotFound(explanation=explanation)
return image_snapshot
@wsgi.response(http_client.ACCEPTED)
def create(self, req, body):
"""Creates a new volume.
:param req: the request
:param body: the request body
:returns: dict -- the new volume dictionary
:raises HTTPNotFound, HTTPBadRequest:
"""
self.assert_valid_body(body, 'volume')
LOG.debug('Create volume request body: %s', body)
context = req.environ['cinder.context']
req_version = req.api_version_request
# Remove group_id from body if max version is less than GROUP_VOLUME.
if req_version.matches(None, mv.get_prior_version(mv.GROUP_VOLUME)):
# NOTE(xyang): The group_id is from a group created with a
# group_type. So with this group_id, we've got a group_type
# for this volume. Also if group_id is passed in, that means
# we already know which backend is hosting the group and the
# volume will be created on the same backend as well. So it
# won't go through the scheduler again if a group_id is
# passed in.
try:
body.get('volume', {}).pop('group_id', None)
except AttributeError:
msg = (_("Invalid body provided for creating volume. "
"Request API version: %s.") % req_version)
raise exc.HTTPBadRequest(explanation=msg)
volume = body['volume']
kwargs = {}
self.validate_name_and_description(volume)
# Check up front for legacy replication parameters to quick fail
source_replica = volume.get('source_replica')
if source_replica:
msg = _("Creating a volume from a replica source was part of the "
"replication v1 implementation which is no longer "
"available.")
raise exception.InvalidInput(reason=msg)
# NOTE(thingee): v2 API allows name instead of display_name
if 'name' in volume:
volume['display_name'] = volume.pop('name')
# NOTE(thingee): v2 API allows description instead of
# display_description
if 'description' in volume:
volume['display_description'] = volume.pop('description')
if 'image_id' in volume:
volume['imageRef'] = volume.pop('image_id')
req_volume_type = volume.get('volume_type', None)
if req_volume_type:
# Not found exception will be handled at the wsgi level
kwargs['volume_type'] = (
objects.VolumeType.get_by_name_or_id(context, req_volume_type))
kwargs['metadata'] = volume.get('metadata', None)
snapshot_id = volume.get('snapshot_id')
if snapshot_id is not None:
if not uuidutils.is_uuid_like(snapshot_id):
msg = _("Snapshot ID must be in UUID form.")
raise exc.HTTPBadRequest(explanation=msg)
# Not found exception will be handled at the wsgi level
kwargs['snapshot'] = self.volume_api.get_snapshot(context,
snapshot_id)
else:
kwargs['snapshot'] = None
source_volid = volume.get('source_volid')
if source_volid is not None:
if not uuidutils.is_uuid_like(source_volid):
msg = _("Source volume ID '%s' must be a "
"valid UUID.") % source_volid
raise exc.HTTPBadRequest(explanation=msg)
# Not found exception will be handled at the wsgi level
kwargs['source_volume'] = (
self.volume_api.get_volume(context,
source_volid))
else:
kwargs['source_volume'] = None
kwargs['group'] = None
kwargs['consistencygroup'] = None
consistencygroup_id = volume.get('consistencygroup_id')
if consistencygroup_id is not None:
if not uuidutils.is_uuid_like(consistencygroup_id):
msg = _("Consistency group ID '%s' must be a "
"valid UUID.") % consistencygroup_id
raise exc.HTTPBadRequest(explanation=msg)
# Not found exception will be handled at the wsgi level
kwargs['group'] = self.group_api.get(context, consistencygroup_id)
# Get group_id if volume is in a group.
group_id = volume.get('group_id')
if group_id is not None:
# Not found exception will be handled at the wsgi level
kwargs['group'] = self.group_api.get(context, group_id)
if self.ext_mgr.is_loaded('os-image-create'):
image_ref = volume.get('imageRef')
if image_ref is not None:
image_uuid = self._image_uuid_from_ref(image_ref, context)
image_snapshot = self._get_image_snapshot(context, image_uuid)
if (req_version.matches(mv.get_api_version(
mv.SUPPORT_NOVA_IMAGE)) and image_snapshot):
kwargs['snapshot'] = image_snapshot
else:
kwargs['image_id'] = image_uuid
# Add backup if min version is greater than or equal
# to VOLUME_CREATE_FROM_BACKUP.
if req_version.matches(mv.VOLUME_CREATE_FROM_BACKUP, None):
backup_id = volume.get('backup_id')
if backup_id:
if not uuidutils.is_uuid_like(backup_id):
msg = _("Backup ID must be in UUID form.")
raise exc.HTTPBadRequest(explanation=msg)
kwargs['backup'] = self.backup_api.get(context,
backup_id=backup_id)
else:
kwargs['backup'] = None
size = volume.get('size', None)
if size is None and kwargs['snapshot'] is not None:
size = kwargs['snapshot']['volume_size']
elif size is None and kwargs['source_volume'] is not None:
size = kwargs['source_volume']['size']
elif size is None and kwargs.get('backup') is not None:
size = kwargs['backup']['size']
LOG.info("Create volume of %s GB", size)
kwargs['availability_zone'] = volume.get('availability_zone', None)
kwargs['scheduler_hints'] = volume.get('scheduler_hints', None)
multiattach = volume.get('multiattach', False)
kwargs['multiattach'] = multiattach
if multiattach:
msg = ("The option 'multiattach' "
"is deprecated and will be removed in a future "
"release. The default behavior going forward will "
"be to specify multiattach enabled volume types.")
versionutils.report_deprecated_feature(LOG, msg)
new_volume = self.volume_api.create(context,
size,
volume.get('display_name'),
volume.get('display_description'),
**kwargs)
retval = self._view_builder.detail(req, new_volume)
return retval
def create_resource(ext_mgr):
return wsgi.Resource(VolumeController(ext_mgr))
|
apache-2.0
| 5,036,993,927,993,056,000
| 40.958763
| 79
| 0.565295
| false
| 4.378698
| false
| false
| false
|
JasonFruit/quartermaster
|
inventory.py
|
1
|
11732
|
import os
from sqlite3 import connect
import codecs
import math
from datetime import datetime, timedelta
from dateutil.parser import parse
from glob import glob
class Report(object):
def __init__(self, filename=None):
if filename:
with codecs.open(filename, "r", "utf-8") as f:
lines = f.readlines()
i = 0
header_lines = []
while lines[i].startswith("--"):
header_lines.append(lines[i].strip(" -"))
i += 1
self.title = header_lines[0].strip()
self.description = "\n".join(header_lines[1:]).strip(" \n")
self.sql = "\n".join(map(lambda s: s.strip(" \n"),
lines[i:]))
else:
self.title = ""
self.description = ""
self.sql = ""
def to_dict(self):
return {"title": self.title,
"description": self.description,
"sql": self.sql}
def from_dict(dic):
rpt = Report()
rpt.title = dic["title"]
rpt.description = dic["description"]
rpt.sql = dic["sql"]
return rpt
class Measurement(object):
"""Represents a numeric measurement with a unit"""
def __init__(self, number, unit):
self.number = number
self.unit = unit
def __repr__(self):
return self.to_string()
def to_string(self):
# pluralize unit if needed
if self.number == 1:
return "%s %s" % (self.number, self.unit)
else:
if self.unit == "each":
return "%s %s" % (self.number, self.unit)
else:
return "%s %ss" % (self.number, self.unit)
def __lt__(self, other):
if self.unit == other.unit:
return self.number < other.number
else:
return self.unit < other.unit
def __gt__(self, other):
if self.unit == other.unit:
return self.number > other.number
else:
return self.unit > other.unit
def __eq__(self, other):
if self.unit == other.unit:
return ((self.number == other.number) and
(self.unit == other.unit))
return False
def __le__(self, other):
if self.unit == other.unit:
return self.number <= other.number
else:
return self.unit <= other.unit
def __ge__(self, other):
if self.unit == other.unit:
return self.number >= other.number
else:
return self.unit >= other.unit
def __ne__(self, other):
return not self.__eq__(other)
# SQL to add a new inventory item
add_inventory_sql = """insert into item (
condition_id,
item,
weight,
weight_unit_id,
life,
life_unit_id,
record_type_id,
purchase_date,
expiration_date)
values (?, ?, ?, ?, ?, ?, ?, ?, ?);"""
# SQL to update an existing item
save_inventory_sql = """update item set
condition_id = ?,
item = ?,
weight = ?,
weight_unit_id = ?,
life = ?,
life_unit_id = ?,
purchase_date = ?,
expiration_date = ?
where id = ?"""
delete_sql = "delete from item where id = ?"
# SQL to return all inventory of a specific record type
inventory_sql = """
select i.id as id,
c.description as condition,
item as description,
weight,
wu.unit as weight_unit,
life,
lu.unit as life_unit,
rt.description as record_type,
purchase_date
from item i
inner join condition c
on i.condition_id = c.id
inner join unit wu
on i.weight_unit_id = wu.id
inner join unit lu
on i.life_unit_id = lu.id
inner join recordtype rt
on i.record_type_id = rt.id
where i.record_type_id = ?
order by purchase_date desc"""
class InventoryItem(object):
"""Represents an item of inventory (or a goal, or a ration recommendation)"""
def __init__(self,
id,
condition,
description,
amount,
life,
purchase_date):
self.id = id
self.condition = condition
self.description = description
self.amount = amount
self.life = life
# make sure the purchase date is an actual datetime
if type(purchase_date) == str:
self.purchase_date = parse(purchase_date)
else:
self.purchase_date = purchase_date
def clone(self, as_type="inventory"):
"""Copy this item to a new one with no ID as a specified type. TODO:
as_type is ignored. Fix it."""
item = InventoryItem(None,
self.condition,
self.description,
self.amount,
self.life,
datetime.today())
return item
@property
def expiration_date(self):
"""Return the expiration date calculated from the purchase date and
the item's life"""
# can't if we don't know when it was bought
if not self.purchase_date:
return None
if self.life.unit == "year":
return datetime(self.purchase_date.year + self.life.number,
self.purchase_date.month,
self.purchase_date.day)
elif self.life.unit == "month":
years = math.floor(self.life.number / 12) + self.purchase_date.year
months = self.life.number % 12 + self.purchase_date.month
while months > 12:
years += 1
months -= 12
return datetime(years,
months,
self.purchase_date.day)
elif self.life.unit == "day":
return self.purchase_date + timedelta(self.life.number)
def to_string(self):
if self.condition.strip() != "":
return "%s (%s), %s" % (self.description,
self.condition,
self.amount.to_string())
else:
return "%s, %s" % (self.description,
self.amount.to_string())
class InventoryDB(object):
"""Manages storage of inventory, goal, and recommendation records"""
def __init__(self, path):
self.filename = path
# read the SQL to create a database
with codecs.open("sql/create-db.sql", "r", "utf-8") as f:
self.create_sql = f.read()
# read the SQL to add goals
with codecs.open("sql/goal.sql", "r", "utf-8") as f:
self.goal_sql = f.read()
# if the database specified exists, connect
if os.path.exists(path):
self.conn = connect(path)
self.cur = self.conn.cursor()
else: # otherwise, create it
self.conn = connect(path)
self.cur = self.conn.cursor()
self.cur.executescript(self.create_sql)
self.conn.commit()
# cache some invariable data
self.record_types = {}
self.cur.execute("select id, description from recordtype")
for row in self.cur.fetchall():
self.record_types[row[1]] = row[0]
self.conditions = {}
self.cur.execute("select id, description from condition")
for row in self.cur.fetchall():
self.conditions[row[1]] = row[0]
self.amounts = {}
self.cur.execute("select id, unit from unit where dimension = 'amount'")
for row in self.cur.fetchall():
self.amounts[row[1]] = row[0]
self.durations = {}
self.cur.execute("select id, unit from unit where dimension = 'time'")
for row in self.cur.fetchall():
self.durations[row[1]] = row[0]
self.ration_multipliers = {}
self.cur.execute("select description, multiplier from ration_multipliers")
for row in self.cur.fetchall():
self.ration_multipliers[row[0]] = row[1]
def set_goals(self, mult):
"""Set goals by multiplying the recommendation for an adult male by
<mult>"""
# remove any existing goals
self.cur.execute("delete from item where record_type_id = ?",
(self.record_types["goal"],))
# create new ones
self.cur.execute(self.goal_sql, (mult,))
self.conn.commit()
def save_inventory(self, item):
"""Save an altered inventory item to the database"""
# get the IDs for units from cached data
amount, amount_id = item.amount.number, self.amounts[item.amount.unit]
life, life_id = item.life.number, self.durations[item.life.unit]
condition_id = self.conditions[item.condition]
self.cur.execute(save_inventory_sql,
(condition_id,
item.description,
amount,
amount_id,
life,
life_id,
item.purchase_date,
item.expiration_date,
item.id))
self.conn.commit()
def add_inventory(self, item, record_type="inventory"):
"""Save a new inventory item to the database"""
# get the IDs for units from cached data
amount, amount_id = item.amount.number, self.amounts[item.amount.unit]
life, life_id = item.life.number, self.durations[item.life.unit]
rec_type_id = self.record_types[record_type]
condition_id = self.conditions[item.condition]
self.cur.execute(add_inventory_sql,
(condition_id,
item.description,
amount,
amount_id,
life,
life_id,
rec_type_id,
item.purchase_date,
item.expiration_date))
self.conn.commit()
# update the item's ID with the new row ID
item.id = self.cur.lastrowid
def all_inventory(self, record_type=None):
"""Return all items of the specified type (or "inventory" if not
specified)"""
if not record_type:
record_type = "inventory"
record_type_id = self.record_types[record_type]
self.cur.execute(inventory_sql, (record_type_id,))
output = []
# just too involved to do as a list comprehension because
# Python lambdas blow chunks
for row in self.cur.fetchall():
(id,
condition,
description,
amount,
amount_unit,
life,
life_unit,
record_type,
purchase_date) = row
amount = Measurement(amount, amount_unit)
life = Measurement(life, life_unit)
output.append(InventoryItem(id, condition, description, amount, life, purchase_date))
return output
def execute_no_commit(self, sql):
"""Execute SQL against the current database on a connection that is
never committed (to avoid malicious or accidental updating or
deletion; return the column headers and the data"""
conn = connect(self.filename)
cur = conn.cursor()
cur.execute(sql)
columns = [dsc[0]
for dsc in cur.description]
output = cur.fetchall()
conn.close()
return columns, output
def delete_item(self, item):
self.cur.execute(delete_sql, (item.id,))
self.conn.commit()
|
gpl-3.0
| 5,854,646,780,610,648,000
| 30.708108
| 97
| 0.528299
| false
| 4.238439
| false
| false
| false
|
bd808/tools-stashbot
|
stashbot/bot.py
|
1
|
11661
|
# -*- coding: utf-8 -*-
#
# This file is part of bd808's stashbot application
# Copyright (C) 2015 Bryan Davis and contributors
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
"""IRC bot"""
import collections
import functools
import irc.bot
import irc.buffer
import irc.client
import irc.strings
import re
import time
from . import es
from . import phab
from . import sal
RE_PHAB_NOURL = re.compile(r'(?:^|[^/%])\b([DMT]\d+)\b')
class Stashbot(irc.bot.SingleServerIRCBot):
def __init__(self, config, logger):
"""Create bot.
:param config: Dict of configuration values
:param logger: Logger
"""
self.config = config
self.logger = logger
self.es = es.Client(
self.config['elasticsearch']['servers'],
self.config['elasticsearch']['options'],
self.logger
)
self.phab = phab.Client(
self.config['phab']['url'],
self.config['phab']['user'],
self.config['phab']['key']
)
self.sal = sal.Logger(
self, self.phab, self.es, self.config, self.logger)
self.recent_phab = collections.defaultdict(dict)
# Ugh. A UTF-8 only world is a nice dream but the real world is all
# yucky and full of legacy encoding issues that should not crash my
# bot.
irc.buffer.LenientDecodingLineBuffer.errors = 'replace'
irc.client.ServerConnection.buffer_class = \
irc.buffer.LenientDecodingLineBuffer
super(Stashbot, self).__init__(
[(self.config['irc']['server'], self.config['irc']['port'])],
self.config['irc']['nick'],
self.config['irc']['realname']
)
# Setup a connection check ping
self.pings = 0
self.reactor.scheduler.execute_every(
period=300, func=self.do_ping)
# Clean phab recent cache every once in a while
self.reactor.scheduler.execute_every(
period=3600, func=self.do_clean_recent_phab)
def get_version(self):
return 'Stashbot'
def on_welcome(self, conn, event):
self.logger.info('Connected to server %s', conn.get_server_name())
if 'password' in self.config['irc']:
self.do_identify()
else:
self.reactor.scheduler.execute_after(1, self.do_join)
def on_nicknameinuse(self, conn, event):
nick = conn.get_nickname()
self.logger.warning('Requested nick "%s" in use', nick)
conn.nick(nick + '_')
if 'password' in self.config['irc']:
self.reactor.scheduler.execute_after(30, self.do_reclaim_nick)
def on_join(self, conn, event):
nick = event.source.nick
if nick == conn.get_nickname():
self.logger.info('Joined %s', event.target)
def on_privnotice(self, conn, event):
self.logger.warning(str(event))
msg = event.arguments[0]
if event.source.nick == 'NickServ':
if 'NickServ identify' in msg:
self.logger.info('Authentication requested by Nickserv')
if 'password' in self.config['irc']:
self.do_identify()
else:
self.logger.error('No password in config!')
self.die()
elif 'You are now identified' in msg:
self.logger.debug('Authenticating succeeded')
self.reactor.scheduler.execute_after(1, self.do_join)
elif 'Invalid password' in msg:
self.logger.error('Password invalid. Check your config!')
self.die()
def on_pubnotice(self, conn, event):
self.logger.warning(str(event))
def on_pubmsg(self, conn, event):
# Log all public channel messages we receive
doc = self.es.event_to_doc(conn, event)
self.do_write_to_elasticsearch(conn, event, doc)
ignore = self.config['irc'].get('ignore', [])
if self._clean_nick(doc['nick']) in ignore:
return
# Look for special messages
msg = event.arguments[0]
if msg.startswith('!log help'):
self.do_help(conn, event)
elif msg.startswith(conn.get_nickname()):
self.do_help(conn, event)
elif msg.startswith(self.config['irc']['nick']):
self.do_help(conn, event)
elif msg.startswith('!log '):
self.sal.log(conn, event, doc)
elif msg.startswith('!bash '):
self.do_bash(conn, event, doc)
if (event.target not in self.config['phab'].get('notin', []) and
'echo' in self.config['phab'] and
RE_PHAB_NOURL.search(msg)
):
self.do_phabecho(conn, event, doc)
def on_privmsg(self, conn, event):
msg = event.arguments[0]
if msg.startswith('!bash '):
doc = self.es.event_to_doc(conn, event)
self.do_bash(conn, event, doc)
else:
self.respond(conn, event, event.arguments[0][::-1])
def on_pong(self, conn, event):
"""Clear ping count when a pong is received."""
self.pings = 0
def on_error(self, conn, event):
"""Log errors and disconnect."""
self.logger.warning(str(event))
conn.disconnect()
def on_kick(self, conn, event):
"""Attempt to rejoin if kicked from a channel."""
nick = event.arguments[0]
channel = event.target
if nick == conn.get_nickname():
self.logger.warn(
'Kicked from %s by %s', channel, event.source.nick)
self.reactor.scheduler.execute_after(
30, functools.partial(conn.join, channel))
def on_bannedfromchan(self, conn, event):
"""Attempt to rejoin if banned from a channel."""
self.logger.warning(str(event))
self.reactor.scheduler.execute_after(
60, functools.partial(conn.join, event.arguments[0]))
def do_identify(self):
"""Send NickServ our username and password."""
self.logger.info('Authentication requested by Nickserv')
self.connection.privmsg('NickServ', 'identify %s %s' % (
self.config['irc']['nick'], self.config['irc']['password']))
def do_join(self, channels=None):
"""Join the next channel in our join list."""
if channels is None:
channels = self.config['irc']['channels']
try:
car, cdr = channels[0], channels[1:]
except (IndexError, TypeError):
self.logger.exception('Failed to find channel to join.')
else:
self.logger.info('Joining %s', car)
self.connection.join(car)
if cdr:
self.reactor.scheduler.execute_after(
1, functools.partial(self.do_join, cdr))
def do_reclaim_nick(self):
nick = self.connection.get_nickname()
if nick != self.config['irc']['nick']:
self.connection.nick(self.config['irc']['nick'])
def do_ping(self):
"""Send a ping or disconnect if too many pings are outstanding."""
if self.pings >= 2:
self.logger.warning('Connection timed out. Disconnecting.')
self.disconnect()
self.pings = 0
else:
try:
self.connection.ping('keep-alive')
self.pings += 1
except irc.client.ServerNotConnectedError:
pass
def do_write_to_elasticsearch(self, conn, event, doc):
"""Log an IRC channel message to Elasticsearch."""
fmt = self.config['elasticsearch']['index']
self.es.index(
index=time.strftime(fmt, time.gmtime()),
doc_type='irc', body=doc)
def do_help(self, conn, event):
"""Handle a help message request"""
self.respond(
conn, event,
'See https://wikitech.wikimedia.org/wiki/Tool:Stashbot for help.'
)
def do_bash(self, conn, event, doc):
"""Process a !bash message"""
bash = dict(doc)
# Trim '!bash ' from the front of the message
msg = bash['message'][6:]
# Expand tabs to line breaks
bash['message'] = msg.replace("\t", "\n").strip()
bash['type'] = 'bash'
bash['up_votes'] = 0
bash['down_votes'] = 0
bash['score'] = 0
# Remove unneeded irc fields
del bash['user']
del bash['channel']
del bash['server']
del bash['host']
ret = self.es.index(index='bash', doc_type='bash', body=bash)
if 'created' in ret and ret['created'] is True:
self.respond(conn, event,
'%s: Stored quip at %s' % (
event.source.nick,
self.config['bash']['view_url'] % ret['_id']
)
)
else:
self.logger.error('Failed to save document: %s', ret)
self.respond(conn, event,
'%s: Yuck. Something blew up when I tried to save that.' % (
event.source.nick,
)
)
def do_phabecho(self, conn, event, doc):
"""Give links to Phabricator tasks"""
channel = event.target
now = time.time()
cutoff = self.get_phab_echo_cutoff(channel)
for task in set(RE_PHAB_NOURL.findall(doc['message'])):
if task in self.recent_phab[channel]:
if self.recent_phab[channel][task] > cutoff:
# Don't spam a channel with links
self.logger.debug(
'Ignoring %s; last seen @%d',
task, self.recent_phab[channel][task])
continue
try:
info = self.phab.taskInfo(task)
except:
self.logger.exception('Failed to lookup info for %s', task)
else:
self.respond(conn, event, self.config['phab']['echo'] % info)
self.recent_phab[channel][task] = now
def get_phab_echo_cutoff(self, channel):
"""Get phab echo delay for the given channel."""
return time.time() - self.config['phab']['delay'].get(
channel, self.config['phab']['delay']['__default__'])
def do_clean_recent_phab(self):
"""Clean old items out of the recent_phab cache."""
for channel in self.recent_phab.keys():
cutoff = self.get_phab_echo_cutoff(channel)
for item in self.recent_phab[channel].keys():
if self.recent_phab[channel][item] < cutoff:
del self.recent_phab[channel][item]
def _clean_nick(self, nick):
"""Remove common status indicators and normlize to lower case."""
return nick.split('|', 1)[0].rstrip('`_').lower()
def respond(self, conn, event, msg):
"""Respond to an event with a message."""
to = event.target
if to == self.connection.get_nickname():
to = event.source.nick
conn.privmsg(to, msg.replace("\n", ' '))
|
gpl-3.0
| -687,042,839,125,175,300
| 34.990741
| 78
| 0.565646
| false
| 3.958248
| true
| false
| false
|
jpypi/othello-rl
|
nn.py
|
1
|
3616
|
#!/usr/bin/env python3
import math
import numpy as np
import random
import pickle
from scipy.special import expit
class NN:
def __init__(self, layer_dims, learning_rate):
self.learning_rate = learning_rate
self.layer_dims = layer_dims
self.layers = []
for i in range(len(layer_dims)-1):
self.layers.append(np.random.normal(0, 1, size=(layer_dims[i+1], layer_dims[i]+1)))
self.activation_func = None
self.dactivation_func = None
def save(self, filename):
with open(filename, "wb") as f:
pickle.dump(self.layers, f)
def load(self, filename):
with open(filename, "rb") as f:
self.layers = pickle.load(f)
def mkVec(self, vector1D, add_bias = True):
return np.reshape(vector1D, (len(vector1D), 1))
def getOutput(self, input_vector):
outputs = input_vector
for i in range(len(self.layers)):
outputs = activation(self.layers[i]@np.vstack((outputs, 1)))
#outputs = softmax(self.layers[-1]@np.vstack((outputs, 1)))
return outputs
def backProp(self, sample, target):
# Propagate forwards to get the network's layers' outputs
outputs = [sample]
for i in range(len(self.layers)):
outputs.append(activation(self.layers[i].dot(np.vstack((outputs[i], 1)))))
#outputs.append(softmax(self.layers[-1].dot(np.vstack((outputs[-1], 1)))))
#print(outputs[-1])
#final_out = self.layers[-1].dot(outputs[-1])
#am = np.zeros_like(final_out)
#am[np.argmax(final_out)] = 1
#outputs.append(am)
# These will still need to be multiplied by the output from the previous layer
# e.g. layer_deltas[0]*outputs[-2]
layer_deltas = np.empty(len(outputs) - 1, object)
# Output layer is special
layer_deltas[-1] = (target - outputs[-1]) * dactivation(outputs[-1]) #outputs[-1]*(1 - outputs[-1])
#self.layers[-1] += self.learning_rate * np.c_[outputs[-2].T, 1] * layer_deltas[-1]
# i == current layer; Walk backwards from second to last layer (Hence
# start at -2, because len-1 is the last element) Also recall that
# range "end" is exclusive.
for i in range(len(layer_deltas) - 2, -1, -1):
# Need to do i+1 because outputs[0] == the input sample, and i+1 is
# the ith layer's output
#layer_derivative = outputs[i+1] * (1 - outputs[i+1])
layer_derivative = dactivation(outputs[i+1])
# Compute the layer delta
layer_deltas[i] = layer_derivative * (self.layers[i+1].T.dot(layer_deltas[i + 1])[:-1])
# Update the weights
#self.layers[i] += self.learning_rate * np.c_[outputs[i].T, 1] * layer_deltas[i]
for i in range(len(self.layers)):
# Because outputs[0] == input sample, layer[i] input == outputs[i]
# This is delta_weights
self.layers[i] += self.learning_rate * np.c_[outputs[i].T, 1] * layer_deltas[i]
return outputs[-1]
def relu(x):
return np.multiply(x > 0, x)
def drelu(x):
return np.float64(x > 0)
def softmax(x):
e = np.exp(x)
return e/np.sum(e)
def activation(x):
#return expit(x)
##return 1.7159 * math.tanh(2/3*x)
#print(x)
return np.tanh(x)#list(map(math.tanh, x))
#return np.multiply(x > 0, x)
def dactivation(x):
#v = expit(x)
#return v*(1-v)
#return 1 - math.tanh(x)**2
return 1 - np.tanh(x)**2#list(map(lambda y: 1 - math.tanh(y)**2, x))
#return np.float64(x > 0)
|
mit
| -8,640,016,886,223,186,000
| 30.719298
| 107
| 0.583794
| false
| 3.263538
| false
| false
| false
|
DamienIrving/ocean-analysis
|
visualisation/plot_zonal_ensemble.py
|
1
|
16584
|
"""
Filename: plot_zonal_ensemble.py
Author: Damien Irving, irving.damien@gmail.com
Description: Plot zonal ensemble
"""
# Import general Python modules
import sys, os, pdb
import argparse
from itertools import groupby
from more_itertools import unique_everseen
import numpy
import iris
from iris.experimental.equalise_cubes import equalise_attributes
import iris.plot as iplt
import matplotlib.pyplot as plt
from matplotlib import gridspec
import seaborn
import matplotlib as mpl
# Import my modules
cwd = os.getcwd()
repo_dir = '/'
for directory in cwd.split('/')[1:]:
repo_dir = os.path.join(repo_dir, directory)
if directory == 'ocean-analysis':
break
modules_dir = os.path.join(repo_dir, 'modules')
sys.path.append(modules_dir)
try:
import general_io as gio
import timeseries
import grids
import convenient_universal as uconv
except ImportError:
raise ImportError('Must run this script from anywhere within the ocean-analysis git repo')
# Define functions
experiment_colors = {'historical': 'black', 'historicalGHG': 'red',
'historicalAA': 'blue', 'GHG + AA': 'green',
'piControl': '0.5'}
experiment_labels = {'historical': 'historical', 'historicalGHG': 'GHG-only',
'historicalAA': 'AA-only', 'piControl': 'control'}
seaborn.set(style='whitegrid')
mpl.rcParams['axes.labelsize'] = 'xx-large'
mpl.rcParams['axes.titlesize'] = 'xx-large'
mpl.rcParams['xtick.labelsize'] = 'x-large'
mpl.rcParams['ytick.labelsize'] = 'x-large'
mpl.rcParams['legend.fontsize'] = 'xx-large'
def make_zonal_grid():
"""Make a dummy cube with desired grid."""
lat_values = numpy.arange(-90, 91.5, 1.5)
latitude = iris.coords.DimCoord(lat_values,
standard_name='latitude',
units='degrees_north',
coord_system=iris.coord_systems.GeogCS(iris.fileformats.pp.EARTH_RADIUS))
dummy_data = numpy.zeros((len(lat_values)))
new_cube = iris.cube.Cube(dummy_data, dim_coords_and_dims=[(latitude, 0),])
new_cube.coord('latitude').guess_bounds()
return new_cube
def calc_trend_cube(cube):
"""Calculate trend and put into appropriate cube."""
trend_array = timeseries.calc_trend(cube, per_yr=True)
new_cube = cube[0,:].copy()
new_cube.remove_coord('time')
new_cube.data = trend_array
return new_cube
def get_colors(family_list):
"""Define a color for each model/physics combo"""
nfamilies = len(family_list)
cm = plt.get_cmap('nipy_spectral')
colors = [cm(1. * i / (nfamilies + 1)) for i in range(nfamilies + 1)]
color_dict = {}
count = 1 # skips the first color, which is black
for family in family_list:
color_dict[family] = colors[count]
count = count + 1
return color_dict
def get_ylabel(cube, time_agg, inargs):
"""get the y axis label"""
if str(cube.units) == 'kg m-2 s-1':
ylabel = '$kg \: m^{-2} \: s^{-1}'
else:
ylabel = '$%s' %(str(cube.units))
if inargs.perlat:
ylabel = ylabel + ' \: lat^{-1}'
if time_agg == 'trend':
ylabel = ylabel + ' \: yr^{-1}'
ylabel = time_agg + ' (' + ylabel + '$)'
return ylabel
def get_line_width(realization, model):
"""Get the line width"""
if model == 'FGOALS-g2':
lw = 2.0
else:
lw = 2.0 if realization == 'r1' else 0.5
return lw
def plot_individual(data_dict, color_dict):
"""Plot the individual model data"""
for key, cube in data_dict.items():
model, physics, realization = key
if (realization == 'r1') or (model == 'FGOALS-g2'):
label = model + ', ' + physics
else:
label = None
lw = 0.5 #get_line_width(realization, model)
iplt.plot(cube, label=label, color=color_dict[(model, physics)], linewidth=lw)
def plot_ensmean(data_dict, experiment, nexperiments,
single_run=False, linestyle='-', linewidth=2.0):
"""Plot the ensemble mean.
If single_run is true, the ensemble is calculated using
only the first run from each model/physics family.
"""
target_grid = make_zonal_grid()
regridded_cube_list = iris.cube.CubeList([])
count = 0
for key, cube in data_dict.items():
model, physics, realization = key
if not single_run or ((realization == 'r1') or (model == 'FGOALS-g2')):
regridded_cube = grids.regrid_1D(cube, target_grid, 'latitude')
new_aux_coord = iris.coords.AuxCoord(count, long_name='ensemble_member', units='no_unit')
regridded_cube.add_aux_coord(new_aux_coord)
regridded_cube.cell_methods = None
regridded_cube.data = regridded_cube.data.astype(numpy.float64)
regridded_cube_list.append(regridded_cube)
count = count + 1
if len(regridded_cube_list) > 1:
equalise_attributes(regridded_cube_list)
ensemble_cube = regridded_cube_list.merge_cube()
ensemble_mean = ensemble_cube.collapsed('ensemble_member', iris.analysis.MEAN)
else:
ensemble_mean = regridded_cube_list[0]
label, color = get_ensemble_label_color(experiment, nexperiments, count, single_run)
iplt.plot(ensemble_mean, label=label, color=color, linestyle=linestyle, linewidth=linewidth)
return ensemble_mean
def get_ensemble_label_color(experiment, nexperiments, ensemble_size, single_run):
"""Get the line label and color."""
label = experiment_labels[experiment]
color = experiment_colors[experiment]
return label, color
def group_runs(data_dict):
"""Find unique model/physics groups"""
all_info = data_dict.keys()
model_physics_list = []
for key, group in groupby(all_info, lambda x: x[0:2]):
model_physics_list.append(key)
family_list = list(unique_everseen(model_physics_list))
return family_list
def read_data(inargs, infiles, ref_cube=None):
"""Read data."""
clim_dict = {}
trend_dict = {}
for filenum, infile in enumerate(infiles):
cube = iris.load_cube(infile, gio.check_iris_var(inargs.var))
if ref_cube:
branch_time = None if inargs.branch_times[filenum] == 'default' else str(inargs.branch_times[filenum])
time_constraint = timeseries.get_control_time_constraint(cube, ref_cube, inargs.time, branch_time=branch_time)
cube = cube.extract(time_constraint)
iris.util.unify_time_units([ref_cube, cube])
cube.coord('time').units = ref_cube.coord('time').units
cube.replace_coord(ref_cube.coord('time'))
else:
time_constraint = gio.get_time_constraint(inargs.time)
cube = cube.extract(time_constraint)
#cube = uconv.convert_to_joules(cube)
if inargs.perlat:
grid_spacing = grids.get_grid_spacing(cube)
cube.data = cube.data / grid_spacing
trend_cube = calc_trend_cube(cube.copy())
clim_cube = cube.collapsed('time', iris.analysis.MEAN)
clim_cube.remove_coord('time')
model = cube.attributes['model_id']
realization = 'r' + str(cube.attributes['realization'])
physics = 'p' + str(cube.attributes['physics_version'])
key = (model, physics, realization)
trend_dict[key] = trend_cube
clim_dict[key] = clim_cube
experiment = cube.attributes['experiment_id']
experiment = 'historicalAA' if experiment == "historicalMisc" else experiment
trend_ylabel = get_ylabel(cube, 'trend', inargs)
clim_ylabel = get_ylabel(cube, 'climatology', inargs)
metadata_dict = {infile: cube.attributes['history']}
return cube, trend_dict, clim_dict, experiment, trend_ylabel, clim_ylabel, metadata_dict
def get_title(standard_name, time_list, experiment, nexperiments):
"""Get the plot title"""
title = '%s, %s-%s' %(gio.var_names[standard_name],
time_list[0][0:4],
time_list[1][0:4])
if nexperiments == 1:
title = title + ', ' + experiment
return title
def correct_y_lim(ax, data_cube):
"""Adjust the y limits after changing x limit
x: data for entire x-axes
y: data for entire y-axes
"""
x_data = data_cube.coord('latitude').points
y_data = data_cube.data
lims = ax.get_xlim()
i = numpy.where( (x_data > lims[0]) & (x_data < lims[1]) )[0]
plt.ylim( y_data[i].min(), y_data[i].max() )
def align_yaxis(ax1, ax2):
"""Align zeros of the two axes, zooming them out by same ratio
Taken from: https://stackoverflow.com/questions/10481990/matplotlib-axis-with-two-scales-shared-origin
"""
axes = (ax1, ax2)
extrema = [ax.get_ylim() for ax in axes]
tops = [extr[1] / (extr[1] - extr[0]) for extr in extrema]
# Ensure that plots (intervals) are ordered bottom to top:
if tops[0] > tops[1]:
axes, extrema, tops = [list(reversed(l)) for l in (axes, extrema, tops)]
# How much would the plot overflow if we kept current zoom levels?
tot_span = tops[1] + 1 - tops[0]
b_new_t = extrema[0][0] + tot_span * (extrema[0][1] - extrema[0][0])
t_new_b = extrema[1][1] - tot_span * (extrema[1][1] - extrema[1][0])
axes[0].set_ylim(extrema[0][0], b_new_t)
axes[1].set_ylim(t_new_b, extrema[1][1])
def plot_files(ax, ax2, infiles, inargs, nexperiments, ref_cube=None):
"""Plot a list of files corresponding to a particular experiment."""
cube, trend_dict, clim_dict, experiment, trend_ylabel, clim_ylabel, metadata_dict = read_data(inargs, infiles, ref_cube=ref_cube)
model_family_list = group_runs(trend_dict)
color_dict = get_colors(model_family_list)
if inargs.time_agg == 'trend':
target_dict = trend_dict
target_ylabel = trend_ylabel
else:
target_dict = clim_dict
target_ylabel = clim_ylabel
if nexperiments == 1:
plot_individual(target_dict, color_dict)
if inargs.ensmean:
ensemble_mean = plot_ensmean(target_dict, experiment, nexperiments,
single_run=inargs.single_run)
else:
ensemble_mean = None
if inargs.clim and ((nexperiments == 1) or (experiment == 'historical')):
ax2 = ax.twinx()
plot_ensmean(clim_dict, experiment, nexperiments,
single_run=inargs.single_run, linestyle='--', linewidth=1.0)
plt.sca(ax)
return cube, metadata_dict, ensemble_mean, target_ylabel, clim_ylabel, experiment, ax2
def main(inargs):
"""Run the program."""
seaborn.set_context(inargs.context)
fig, ax = plt.subplots(figsize=[8, 5])
ax2 = None
nexperiments = len(inargs.hist_files)
if inargs.control_files:
nexperiments = nexperiments + len(inargs.control_files)
# Plot historical data
for infiles in inargs.hist_files:
cube, metadata_dict, ensemble_mean, ylabel, clim_ylabel, experiment, ax2 = plot_files(ax, ax2, infiles, inargs, nexperiments)
# Titles and labels
if inargs.title:
title = inargs.title
plt.title(title)
#else:
#title = get_title(inargs.var, inargs.time, experiment, nexperiments)
if inargs.scientific:
plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0), useMathText=True)
ax.yaxis.major.formatter._useMathText = True
if inargs.ylabel:
ylabel = inargs.ylabel
ax.set_ylabel(ylabel)
ax.set_xlabel('latitude')
# Plot control data
if inargs.control_files:
assert inargs.hist_files, 'Control plot requires branch time information from historical files'
ref_cube = cube
for infiles in inargs.control_files:
cube, metadata_dict, ensemble_mean, ylabel, clim_ylabel, epxeriment, ax2 = plot_files(ax, ax2, infiles, inargs, nexperiments, ref_cube=ref_cube)
# Ticks and axis limits
plt.xticks(numpy.arange(-75, 90, 15))
plt.xlim(inargs.xlim[0], inargs.xlim[1])
if not inargs.xlim == (-90, 90):
correct_y_lim(ax, ensemble_mean)
if inargs.clim:
align_yaxis(ax, ax2)
ax2.grid(None)
ax2.set_ylabel(clim_ylabel)
ax2.yaxis.major.formatter._useMathText = True
# Guidelines and legend
if inargs.zeroline:
plt.axhline(y=0, color='0.5', linestyle='--')
if inargs.legloc:
ax.legend(loc=inargs.legloc)
else:
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
if inargs.clim:
ax2.set_position([box.x0, box.y0, box.width * 0.8, box.height])
legend_x_pos = 1.1
else:
legend_x_pos = 1.0
ax.legend(loc='center left', bbox_to_anchor=(legend_x_pos, 0.5))
# Save output
dpi = inargs.dpi if inargs.dpi else plt.savefig.__globals__['rcParams']['figure.dpi']
print('dpi =', dpi)
plt.savefig(inargs.outfile, bbox_inches='tight', dpi=dpi)
gio.write_metadata(inargs.outfile, file_info=metadata_dict)
if __name__ == '__main__':
extra_info ="""
author:
Damien Irving, irving.damien@gmail.com
"""
description = 'Plot zonal ensemble'
parser = argparse.ArgumentParser(description=description,
epilog=extra_info,
argument_default=argparse.SUPPRESS,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("var", type=str, help="Variable")
parser.add_argument("time_agg", type=str, choices=('trend', 'climatology'), help="Temporal aggregation")
parser.add_argument("outfile", type=str, help="Output file")
parser.add_argument("--hist_files", type=str, action='append', nargs='*',
help="Input files for a given historical experiment")
parser.add_argument("--control_files", type=str, action='append', nargs='*', default=[],
help="Input files for a control experiment")
parser.add_argument("--time", type=str, nargs=2, metavar=('START_DATE', 'END_DATE'),
default=('1861-01-01', '2005-12-31'),
help="Time bounds [default = 1861-2005]")
parser.add_argument("--branch_times", type=str, nargs='*', default=None,
help="Need value for each control file (write default to use metadata)")
parser.add_argument("--perlat", action="store_true", default=False,
help="Scale per latitude [default=False]")
parser.add_argument("--single_run", action="store_true", default=False,
help="Only use run 1 in the ensemble mean [default=False]")
parser.add_argument("--ensmean", action="store_true", default=False,
help="Plot an ensemble mean curve [default=False]")
parser.add_argument("--clim", action="store_true", default=False,
help="Plot a climatology curve behind the trend curve [default=False]")
parser.add_argument("--legloc", type=int, default=None,
help="Legend location [default = off plot]")
parser.add_argument("--scientific", action="store_true", default=False,
help="Use scientific notation for the y axis scale [default=False]")
parser.add_argument("--zeroline", action="store_true", default=False,
help="Plot a dashed guideline at y=0 [default=False]")
parser.add_argument("--title", type=str, default=None,
help="plot title [default: None]")
parser.add_argument("--ylabel", type=str, default=None,
help="Override the default y axis label")
parser.add_argument("--xlim", type=float, nargs=2, metavar=('SOUTHERN_LIMIT', 'NORTHERN LIMIT'), default=(-90, 90),
help="x-axis limits [default = entire]")
#parser.add_argument("--ylim", type=float, nargs=2, metavar=('LOWER_LIMIT', 'UPPER_LIMIT'), default=None,
# help="y-axis limits [default = auto]")
parser.add_argument("--context", type=str, default='talk', choices=('paper', 'talk'),
help="Context for plot [default=talk]")
parser.add_argument("--dpi", type=float, default=None,
help="Figure resolution in dots per square inch [default=auto]")
args = parser.parse_args()
main(args)
|
mit
| 2,047,722,086,865,415,700
| 34.511777
| 156
| 0.613965
| false
| 3.474544
| false
| false
| false
|
ReneHollander/rep0st
|
rep0st/db/post.py
|
1
|
6485
|
import enum
import logging
from itertools import groupby
from typing import List, Optional
from injector import Module, ProviderOf, inject
from sqlalchemy import Boolean, Column, DateTime, Enum, Index, Integer, String, and_, func
from sqlalchemy.orm import Session, relationship
from rep0st.config.rep0st_database import Rep0stDatabaseModule
from rep0st.db import Base
from rep0st.db.feature import Feature
from rep0st.framework.data.repository import Repository
from rep0st.framework.data.transaction import transactional
log = logging.getLogger(__name__)
class PostRepositoryModule(Module):
def configure(self, binder):
binder.install(Rep0stDatabaseModule)
binder.bind(PostRepository)
class Type(enum.Enum):
IMAGE = 'IMAGE'
ANIMATED = 'ANIMATED'
VIDEO = 'VIDEO'
UNKNOWN = 'UNKNOWN'
def post_type_from_media_path(path: str) -> Type:
ending = path[path.rfind('.') + 1:].lower()
if ending in ['jpg', 'jpeg', 'png']:
return Type.IMAGE
elif ending in ['gif']:
return Type.ANIMATED
elif ending in ['mp4', 'webm']:
return Type.VIDEO
else:
log.error(f'Could not deduce post type from {path} with ending {ending}')
return Type.UNKNOWN
class Flag(enum.Enum):
SFW = 'sfw'
NSFW = 'nsfw'
NSFL = 'nsfl'
NSFP = 'nsfp'
class Status(enum.Enum):
# No attempt to download the media has been made yet.
NO_MEDIA = 'NO_MEDIA'
# The image is downloaded, but not yet indexed.
NOT_INDEXED = 'NOT_INDEXED'
# The image has been indexed.
INDEXED = 'INDEXED'
# No media was found on pr0gramm servers.
NO_MEDIA_FOUND = 'NO_MEDIA_FOUND'
# The downloaded media cannot be read.
MEDIA_BROKEN = 'MEDIA_BROKEN'
class Post(Base):
from rep0st.db.feature import Feature
from rep0st.db.tag import Tag
__tablename__ = 'post'
# Post id.
id = Column(Integer, primary_key=True, index=True, autoincrement=False)
# Timestamp this post was created.
created = Column(DateTime(), nullable=False)
# Path of the image on pr0gramm servers.
image = Column(String(256), nullable=False, index=True)
# Path of the thumbnail on pr0gramm servers.
thumb = Column(String(256), nullable=False)
# Path of the fullsize image on pr0gramm servers. Optional.
fullsize = Column(String(256))
# Width of the media in pixels.
width = Column(Integer(), nullable=False)
# Height of the media in pixels.
height = Column(Integer(), nullable=False)
# True if the media has audio.
audio = Column(Boolean(), nullable=False)
# URL of the source of the image. Optional.
source = Column(String(512))
# Flags for the post encoded as a bitset. If the bit is set,
# the post is marked with the given tag.
# Bit 0: SFW
# Bit 1: NSFW
# Bit 2: NSFL
# Bit 3: NSFP
flags = Column(Integer(), nullable=False)
# Name of the user that uploaded the post.
user = Column(String(32), nullable=False)
# Type of the media in the post.
# - IMAGE: Static images. (jpg, png)
# - ANIMATED: Animated images. (gif)
# - VIDEO: Videos. (mp4, webm)
type = Column(Enum(Type), nullable=False, index=True)
# Status of the post.
status = Column(
Enum(Status), nullable=False, index=True, default=Status.NO_MEDIA)
# True if the post is deleted on pr0gramm.
deleted = Column(Boolean(), nullable=False, default=False)
# List of features associated with this post.
features = relationship(Feature)
# List of tags associated with this post.
tags = relationship(Tag)
def __json__(self):
return {
'id': self.id,
'user': self.user,
'created': self.created.isoformat(),
'is_sfw': self.is_sfw(),
'is_nsfw': self.is_nsfw(),
'is_nsfl': self.is_nsfl(),
'image': self.image,
'thumb': self.thumb,
'fullsize': self.fullsize,
}
def as_indexed_doc(self):
def feauture_key_func(feature: Feature):
return feature.id
return {
'meta': {
'id': self.id
},
'created':
int(self.created.timestamp() * 1000),
'flags': [flag.value for flag in self.get_flags()],
'type':
self.type.value,
'tags': [tag.tag for tag in self.tags],
'frames': [{
'id': key,
'features': {v.type: v.data for v in valuesiter}
} for key, valuesiter in groupby(
sorted(self.features, key=feauture_key_func), key=feauture_key_func)
],
}
def is_sfw(self):
return self.flags & 1 != 0
def is_nsfw(self):
return self.flags & 2 != 0
def is_nsfl(self):
return self.flags & 4 != 0
def is_nsfp(self):
return self.flags & 8 != 0
def get_flags(self) -> List[Flag]:
flags = []
if self.is_sfw():
flags.append(Flag.SFW)
if self.is_nsfw():
flags.append(Flag.NSFW)
if self.is_nsfl():
flags.append(Flag.NSFL)
if self.is_nsfp():
flags.append(Flag.NSFP)
return flags
def get_flag_by_importance(self) -> Flag:
if self.is_nsfl():
return Flag.NSFL
if self.is_nsfw():
return Flag.NSFW
if self.is_nsfp():
return Flag.NSFP
return Flag.SFW
def __str__(self):
return "Post(id=" + str(self.id) + ")"
def __repr__(self):
return "Post(id=" + str(self.id) + ")"
# Index on status, type and deleted for fast missing feature lookups.
post_status_type_deleted_index = Index('post_status_type_deleted_index',
Post.status, Post.type, Post.deleted)
class PostRepository(Repository[int, Post]):
@inject
def __init__(self, session_provider: ProviderOf[Session]) -> None:
super().__init__(int, Post, session_provider)
@transactional()
def get_latest_post_id(self) -> int:
session = self._get_session()
id = session.query(func.max(Post.id)).scalar()
return 0 if id is None else id
@transactional()
def get_posts(self, type: Optional[str] = None):
session = self._get_session()
if type is not None:
return session.query(Post).filter(Post.type == type)
else:
return session.query(Post)
@transactional()
def get_posts_missing_features(self, type: Optional[Type] = None):
session = self._get_session()
q = session.query(Post)
if type:
q = q.filter(Post.type == type)
return q.filter(
and_(Post.status == Status.NOT_INDEXED, Post.deleted == False))
@transactional()
def post_count(self) -> int:
session = self._get_session()
return session.query(func.count(Post.id)).scalar()
|
mit
| 811,414,169,857,554,000
| 27.69469
| 90
| 0.643639
| false
| 3.416754
| false
| false
| false
|
mknorps/pp_TCF
|
apriori_SGS_fede_timestat.py
|
1
|
5259
|
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# File name: apriori_SGS_fede_timestat.py
# Created by: gemusia
# Creation date: 11-07-2017
# Last modified: 12-08-2017 08:48:12
# Purpose:computation of apriori statistics of particles,
# statistic derived from scratch
# - test of possible substitution of (V-U)du^*/dx term
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
import numpy as np
import matplotlib.pyplot as plt
import particlestat as ps
import pfiles as pf
import homfigs as hfig
from os.path import expanduser
from itertools import ifilterfalse
# declaration of picture attributes
LineStyle = {'fterm':'solid','pterm':'dashed',
"Ux":'solid',"Ufx":'dashed',"Vx":'dotted',
"UxUfx":'solid',"UyUfy":'dashed',"UzUfz":'dotted',
"fluid":'solid',"St1":'dashed',"St5":'dotted',"St25":'dashdot'}
coordinates = {0:'x',1:'y',2:'z'}
terms = {0:"termx",1:"termy",2:"termz"}
ptype = {"St1","St5","St25","fluid"}
#data loading and
#initialising Particle class instance
#the data is written to file in Fortran program with following code
'''
do j=1,npar
write(4,'(30e13.5)')(pos(j,i),i=1,3),(vpar(j,i),i=1,3),
$ (upar(j,i),i=1,3), (uparf(j,i),i=1,3),
$ (dupar(j,i,1),i=1,3), (dupar(j,i,2),i=1,3), (dupar(j,i,3),i=1,3),
$ (duparf(j,i,1),i=1,3), (duparf(j,i,2),i=1,3), (duparf(j,i,3),i=1,3)
enddo
'''
#the code has different direction naming and we have to permute directions
# fortran code | current convention (widely used in presenting channel flow data)
#---------------------------------------------------------------------------
# x - wall-normal | x - streamwise
# y - spanwise | y - wall-normal
# z - streamwise | z - spanwise
#
#we permute (x,y,z)->(z,x,y)
file_path = expanduser("~") + "/wyniki/apriori/fede_terms_pDNS/"
pict_path = file_path
pfields= pf.ParticleFields(2501,2508,fCoreName=file_path+"SGS_terms_",x=2,y=0,z=1,Vx=5,Vy=3,Vz=4,
Ux=8,Uy=6,Uz=7,Ufx=11,Ufy=9,Ufz=10,
dUxdx=20,dUxdy=14,dUxdz=17, dUydx=18,dUydy=12,dUydz=15, dUzdx=19,dUzdy=13,dUzdz=16,
dUfxdx=29,dUfxdy=23,dUfxdz=26, dUfydx=27,dUfydy=21,dUfydz=24, dUfzdx=28,dUfzdy=22,dUfzdz=25)
#pfields= pf.ParticleFields(2501,2508,fCoreName=file_path+"SGS_terms_",x=2,y=0,z=1,Vx=5,Vy=3,Vz=4,
# Ux=8,Uy=6,Uz=7,Ufx=11,Ufy=9,Ufz=10,
# dUxdx=20,dUxdy=18,dUxdz=19, dUydx=14,dUydy=12,dUydz=13, dUzdx=17,dUzdy=15,dUzdz=16,
# dUfxdx=29,dUfxdy=27,dUfxdz=28, dUfydx=23,dUfydy=21,dUfydz=22, dUfzdx=26,dUfzdy=24,dUfzdz=25)
def pterm(V1,V2,V3,U1,U2,U3,dUdx,dUdy,dUdz,dUfdx,dUfdy,dUfdz):
return (V1-U1)*(dUdx-dUfdx) + (V2-U2)*(dUdy-dUfdy) + (V3-U3)*(dUdz-dUfdz)
ptermArglist = [['Vx','Vy','Vz','Ux','Uy','Uz','dUxdx','dUxdy','dUxdz','dUfxdx','dUfxdy','dUfxdz'],
['Vx','Vy','Vz','Ux','Uy','Uz','dUydx','dUydy','dUydz','dUfydx','dUfydy','dUfydz'],
['Vx','Vy','Vz','Ux','Uy','Uz','dUzdx','dUzdy','dUzdz','dUfzdx','dUfzdy','dUfzdz']]
# separate plots for each stokes number
for StNo in ptype:
for stattype in ("pmean","pstd"):
# STATISTICS
pstat = pfields.equationP(StNo,(lambda x:x),stattype,"symm",["Ux"],["Ufx"],["Vx"])
sgsStat = pfields.equationP(StNo,(lambda x,y: x-y),stattype,"symm",["Ux","Ufx"],["Uy","Ufy"],["Uz","Ufz"])
# FIGURES
# velocity statistics
statfig = hfig.Homfig(title="Velocities streamwise", ylabel="U")
plotFileName = pict_path +stattype +"_"+StNo+".eps"
for arg in ifilterfalse(lambda x: x=="yplus", set(pstat.keys())): #custom , for this case
statfig.add_plot(pstat["yplus"],pstat[arg],linestyle=LineStyle[arg],label=arg)
statfig.hdraw()
statfig.save(plotFileName)
print "plot created: " + plotFileName
plt.close(statfig.fig)
# SGS statistics
sgsfig = hfig.Homfig(title="SGS velocity", ylabel="$u^*$")
plotFileNameSGS = pict_path + "Usgs_"+stattype +"_"+StNo+".eps"
for arg in ifilterfalse(lambda x: x=="yplus", set(sgsStat.keys())): #custom , for this case
sgsfig.add_plot(sgsStat["yplus"],sgsStat[arg],linestyle=LineStyle[arg],label=arg)
sgsfig.hdraw()
sgsfig.save(plotFileNameSGS)
print "plot created: " + plotFileNameSGS
plt.close(sgsfig.fig)
#several stokes number on one plot
for stattype in ("pmean","pstd"):
ptermStat = {}
for StNo in ptype:
#(V-U)_j*du/dx_j
ptermStat[StNo] = pfields.equationP(StNo,pterm,stattype,"symm",*ptermArglist)
print "ptermStat: ", type(ptermStat['St1']), ptermStat['St1'].keys()
# pterm statistics : (V-U)_j*du/dx_j
for direction,ptermKey in zip(range(3),ifilterfalse(lambda x: x=="yplus", set(ptermStat['St1'].keys()))):
ptermfig = hfig.Homfig(title="pterm ", ylabel="$(V-U)_j*du/dx_j$")
plotFileNamePterm = pict_path + "pterm_"+stattype +coordinates[direction]+".eps"
for StNo in ptype:
ptermfig.add_plot(ptermStat[StNo]["yplus"],ptermStat[StNo][ptermKey],linestyle=LineStyle[StNo],label=StNo)
ptermfig.hdraw()
ptermfig.save(plotFileNamePterm)
print "plot created: " + plotFileNamePterm
plt.close(ptermfig.fig)
|
mit
| -6,074,349,511,344,591,000
| 37.386861
| 118
| 0.602206
| false
| 2.456329
| false
| false
| false
|
n0x5/scripts
|
_nonworking_instagrab_all.py
|
1
|
3066
|
# instagrabALL.py - download all images from instagram user
import re
from selenium import webdriver
import time
import sys
import itertools
import os
import urllib.request
from urllib.request import FancyURLopener
from selenium.webdriver import Chrome, ChromeOptions
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
users = (['user1', 'user2'])
class GrabIt(urllib.request.FancyURLopener):
version = ('Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/47.0.2526.111 Safari/537.36')
def download_file(self, url, path):
try:
self.urlretrieve = GrabIt().retrieve
self.urlretrieve(url, path)
except Exception as e:
print(str(e))
def grab_img(user):
grab1 = GrabIt()
options = ChromeOptions()
options.add_argument('headless')
options.add_argument('disable-gpu')
driver = Chrome(chrome_options=options)
url = 'https://www.instagram.com/'+user+'/'
driver.get(url)
driver.implicitly_wait(5)
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
try:
driver.find_element_by_xpath('//*[@id="react-root"]/section/nav/div[2]/div/div/div[3]/div/section/div/a').click();
driver.implicitly_wait(2)
except:
pass
driver.find_element_by_xpath("//a[text()[contains(.,'Load more')]]").click();
driver.implicitly_wait(5)
for _ in itertools.repeat(None, 100):
driver.implicitly_wait(3)
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
time.sleep(2)
driver.implicitly_wait(5)
elem = driver.find_elements_by_xpath('//*[@src]')
for ii in elem:
if 'https://scontent' in ii.get_attribute('src'):
content2 = ii.get_attribute('src')
content3 = re.sub(r's\w\w\wx\w\w\w\/', '', content2, flags=re.IGNORECASE)
content7 = re.sub(r'\w{3}\.\w{2}\/', '', content3, flags=re.IGNORECASE)
content6 = re.sub(r'\w{0,4}\.\d{0,4}\.\d{0,4}\.\d{0,5}\/', '', content7, flags=re.IGNORECASE)
content4 = re.sub(r'https:\/\/\w{8}\S+\w{4}-\w(.*)\/', '', content2, flags=re.IGNORECASE)
content5 = re.sub(r'\?ig_cache_key=\w+(\S+)', '', content4, flags=re.IGNORECASE)
content10 = re.sub(r'\/vp\/\w+\/\w+', '', content6, flags=re.IGNORECASE)
endpoint = os.path.join(os.path.dirname(__file__), user, content5)
endpoint1 = os.path.join(os.path.dirname(__file__), user, user+'_'+content5)
if not os.path.exists(user):
os.makedirs(user)
if os.path.isfile(endpoint) or os.path.isfile(endpoint1):
print('file exists - skipping')
else:
try:
grab1.download_file(content10, endpoint1)
print(content5)
except Exception as e:
print(str(e))
driver.quit()
for user in users:
grab_img(user)
|
gpl-2.0
| -2,309,897,375,846,428,700
| 38.307692
| 122
| 0.594586
| false
| 3.456595
| false
| false
| false
|
kim135797531/opencog-python-blending
|
opencog_b/python/blending/connector/connector_finder.py
|
1
|
1096
|
from opencog_b.python.blending.connector.connect_conflict_random import \
ConnectConflictRandom
from opencog_b.python.blending.connector.connect_conflict_viable import \
ConnectConflictAllViable
from opencog_b.python.blending.connector.connect_simple import ConnectSimple
from opencog_b.python.blending.util.blend_config import BlendConfig
__author__ = 'DongMin Kim'
class ConnectorFinder(object):
def __init__(self, a):
self.a = a
self.connectors = {
ConnectSimple.__name__: ConnectSimple,
ConnectConflictRandom.__name__: ConnectConflictRandom,
ConnectConflictAllViable.__name__: ConnectConflictAllViable
}
def __str__(self):
return self.__class__.__name__
def get_connector(self, id_or_name=None):
if id_or_name is None:
id_or_name = BlendConfig().get_str(self.a, "link-connector")
connector = self.connectors.get(str(id_or_name))
if connector is not None:
return connector(self.a)
else:
raise UserWarning('Connector not found.')
|
agpl-3.0
| 7,915,618,802,387,955,000
| 32.212121
| 76
| 0.665146
| false
| 3.805556
| false
| false
| false
|
w1ll1am23/home-assistant
|
homeassistant/components/climacell/config_flow.py
|
1
|
5415
|
"""Config flow for ClimaCell integration."""
from __future__ import annotations
import logging
from typing import Any
from pyclimacell import ClimaCellV3
from pyclimacell.exceptions import (
CantConnectException,
InvalidAPIKeyException,
RateLimitedException,
)
from pyclimacell.pyclimacell import ClimaCellV4
import voluptuous as vol
from homeassistant import config_entries, core
from homeassistant.const import (
CONF_API_KEY,
CONF_API_VERSION,
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_NAME,
)
from homeassistant.core import callback
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.typing import HomeAssistantType
from .const import (
CC_ATTR_TEMPERATURE,
CC_V3_ATTR_TEMPERATURE,
CONF_TIMESTEP,
DEFAULT_NAME,
DEFAULT_TIMESTEP,
DOMAIN,
)
_LOGGER = logging.getLogger(__name__)
def _get_config_schema(
hass: core.HomeAssistant, input_dict: dict[str, Any] = None
) -> vol.Schema:
"""
Return schema defaults for init step based on user input/config dict.
Retain info already provided for future form views by setting them as
defaults in schema.
"""
if input_dict is None:
input_dict = {}
return vol.Schema(
{
vol.Required(
CONF_NAME, default=input_dict.get(CONF_NAME, DEFAULT_NAME)
): str,
vol.Required(CONF_API_KEY, default=input_dict.get(CONF_API_KEY)): str,
vol.Required(CONF_API_VERSION, default=4): vol.In([3, 4]),
vol.Inclusive(
CONF_LATITUDE,
"location",
default=input_dict.get(CONF_LATITUDE, hass.config.latitude),
): cv.latitude,
vol.Inclusive(
CONF_LONGITUDE,
"location",
default=input_dict.get(CONF_LONGITUDE, hass.config.longitude),
): cv.longitude,
},
extra=vol.REMOVE_EXTRA,
)
def _get_unique_id(hass: HomeAssistantType, input_dict: dict[str, Any]):
"""Return unique ID from config data."""
return (
f"{input_dict[CONF_API_KEY]}"
f"_{input_dict.get(CONF_LATITUDE, hass.config.latitude)}"
f"_{input_dict.get(CONF_LONGITUDE, hass.config.longitude)}"
)
class ClimaCellOptionsConfigFlow(config_entries.OptionsFlow):
"""Handle ClimaCell options."""
def __init__(self, config_entry: config_entries.ConfigEntry) -> None:
"""Initialize ClimaCell options flow."""
self._config_entry = config_entry
async def async_step_init(
self, user_input: dict[str, Any] = None
) -> dict[str, Any]:
"""Manage the ClimaCell options."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
options_schema = {
vol.Required(
CONF_TIMESTEP,
default=self._config_entry.options.get(CONF_TIMESTEP, DEFAULT_TIMESTEP),
): vol.In([1, 5, 15, 30]),
}
return self.async_show_form(
step_id="init", data_schema=vol.Schema(options_schema)
)
class ClimaCellConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for ClimaCell Weather API."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
@staticmethod
@callback
def async_get_options_flow(
config_entry: config_entries.ConfigEntry,
) -> ClimaCellOptionsConfigFlow:
"""Get the options flow for this handler."""
return ClimaCellOptionsConfigFlow(config_entry)
async def async_step_user(
self, user_input: dict[str, Any] = None
) -> dict[str, Any]:
"""Handle the initial step."""
errors = {}
if user_input is not None:
await self.async_set_unique_id(
unique_id=_get_unique_id(self.hass, user_input)
)
self._abort_if_unique_id_configured()
try:
if user_input[CONF_API_VERSION] == 3:
api_class = ClimaCellV3
field = CC_V3_ATTR_TEMPERATURE
else:
api_class = ClimaCellV4
field = CC_ATTR_TEMPERATURE
await api_class(
user_input[CONF_API_KEY],
str(user_input.get(CONF_LATITUDE, self.hass.config.latitude)),
str(user_input.get(CONF_LONGITUDE, self.hass.config.longitude)),
session=async_get_clientsession(self.hass),
).realtime([field])
return self.async_create_entry(
title=user_input[CONF_NAME], data=user_input
)
except CantConnectException:
errors["base"] = "cannot_connect"
except InvalidAPIKeyException:
errors[CONF_API_KEY] = "invalid_api_key"
except RateLimitedException:
errors[CONF_API_KEY] = "rate_limited"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
return self.async_show_form(
step_id="user",
data_schema=_get_config_schema(self.hass, user_input),
errors=errors,
)
|
apache-2.0
| 4,533,324,280,178,194,400
| 31.620482
| 88
| 0.598523
| false
| 3.926759
| true
| false
| false
|
gbowerman/azurerm
|
test/insights_test.py
|
1
|
6201
|
# azurerm unit tests - insights
# To run tests: python -m unittest insights_test.py
# Note: The insights test unit creates a VM scale set in order to add autoscale rules.
# Therefore it is a fairly good way to exercise storage, network, compute AND insights functions.
import azurerm
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.backends import default_backend
import datetime
from haikunator import Haikunator
import json
from random import choice
from string import ascii_lowercase
import sys
import time
import unittest
class TestAzurermPy(unittest.TestCase):
def setUp(self):
# Load Azure app defaults
try:
with open('azurermconfig.json') as configFile:
configData = json.load(configFile)
except FileNotFoundError:
print("Error: Expecting vmssConfig.json in current folder")
sys.exit()
tenant_id = configData['tenantId']
app_id = configData['appId']
app_secret = configData['appSecret']
self.subscription_id = configData['subscriptionId']
self.access_token = azurerm.get_access_token(tenant_id, app_id, app_secret)
self.location = configData['location']
# generate names for resources
self.h = Haikunator()
self.rgname = self.h.haikunate()
self.vnet = self.h.haikunate(delimiter='')
self.vmssname = self.h.haikunate(delimiter='')
self.setting_name = self.h.haikunate(delimiter='')
# generate RSA Key for compute resources
key = rsa.generate_private_key(backend=default_backend(), public_exponent=65537, \
key_size=2048)
self.public_key = key.public_key().public_bytes(serialization.Encoding.OpenSSH, \
serialization.PublicFormat.OpenSSH).decode('utf-8')
# create resource group
print('Creating resource group: ' + self.rgname)
response = azurerm.create_resource_group(self.access_token, self.subscription_id, \
self.rgname, self.location)
self.assertEqual(response.status_code, 201)
# create vnet
print('Creating vnet: ' + self.vnet)
response = azurerm.create_vnet(self.access_token, self.subscription_id, self.rgname, \
self.vnet, self.location, address_prefix='10.0.0.0/16', nsg_id=None)
self.assertEqual(response.status_code, 201)
self.subnet_id = response.json()['properties']['subnets'][0]['id']
# create public ip address for VMSS LB
self.ipname2 = self.vnet + 'ip2'
print('Creating VMSS LB public ip address: ' + self.ipname2)
dns_label2 = self.vnet + '2'
response = azurerm.create_public_ip(self.access_token, self.subscription_id, self.rgname, \
self.ipname2, dns_label2, self.location)
self.assertEqual(response.status_code, 201)
self.ip2_id = response.json()['id']
# create load balancer with nat pool for VMSS create
lb_name = self.vnet + 'lb'
print('Creating load balancer with nat pool: ' + lb_name)
response = azurerm.create_lb_with_nat_pool(self.access_token, self.subscription_id, \
self.rgname, lb_name, self.ip2_id, '50000', '50100', '22', self.location)
self.be_pool_id = response.json()['properties']['backendAddressPools'][0]['id']
self.lb_pool_id = response.json()['properties']['inboundNatPools'][0]['id']
# create VMSS
capacity = 1
vm_size = 'Standard_D1'
publisher = 'Canonical'
offer = 'UbuntuServer'
sku = '16.04-LTS'
version = 'latest'
username = 'rootuser'
password = self.h.haikunate(delimiter=',')
print('Creating VMSS: ' + self.vmssname + ', capacity = ' + str(capacity))
response = azurerm.create_vmss(self.access_token, self.subscription_id, self.rgname, \
self.vmssname, vm_size, capacity, publisher, offer, sku, version, \
self.subnet_id, self.be_pool_id, self.lb_pool_id, self.location, username=username, \
public_key=self.public_key)
def tearDown(self):
# delete resource group - that deletes everything in the test
print('Deleting resource group: ' + self.rgname)
response = azurerm.delete_resource_group(self.access_token, self.subscription_id, \
self.rgname)
self.assertEqual(response.status_code, 202)
def test_insights(self):
# create autoscale rule
print('Creating autoscale rules')
metric_name = 'Percentage CPU'
operator = 'GreaterThan'
threshold = 60
direction = 'Increase'
change_count = 1
rule1 = azurerm.create_autoscale_rule(self.subscription_id, self.rgname, self.vmssname, \
metric_name, operator, threshold, direction, change_count)
operator = 'LessThan'
direction = 'Decrease'
rule2 = azurerm.create_autoscale_rule(self.subscription_id, self.rgname, self.vmssname, \
metric_name, operator, threshold, direction, change_count)
rules = [rule1, rule2]
# print(json.dumps(rules, sort_keys=False, indent=2, separators=(',', ': ')))
# create autoscale setting
print('Creating autoscale setting: ' + self.setting_name)
min = 1
max = 10
default = 3
response = azurerm.create_autoscale_setting(self.access_token, self.subscription_id, \
self.rgname, self.setting_name, self.vmssname, self.location, min, max, default, \
rules)
self.assertEqual(response.status_code, 201)
self.assertEqual(response.json()['name'], self.setting_name)
# get audit log events
print('Getting audit log events')
# start_timestamp = '2017-05-01T00:00:00.0000000Z'
start_timestamp = str(datetime.datetime.now() - datetime.timedelta(days=1))
response = azurerm.get_events_for_subscription(self.access_token, self.subscription_id, \
start_timestamp)
self.assertTrue(len(response['value']) > 0)
if __name__ == '__main__':
unittest.main()
|
mit
| 7,930,434,742,642,818,000
| 42.669014
| 99
| 0.643122
| false
| 3.751361
| true
| false
| false
|
pvpnvz/internshipsystem
|
app/main/form.py
|
1
|
8930
|
from flask.ext.wtf import Form
from wtforms import StringField, SubmitField, TextAreaField, DateTimeField, SelectField, BooleanField, DateField, \
validators, FileField
from wtforms.validators import Required, URL, Email
from .. import db
from flask.ext.pagedown.fields import PageDownField
# datepicker failed
'''
from wtforms import widgets
class ExampleForm(Form):
dt = DateField('DatePicker', format='%Y-%m-%d')
submit = SubmitField('提交')
class DatePickerWidget(widgets.TextInput):
"""
Date picker widget.
You must include bootstrap-datepicker.js and form.js for styling to work.
"""
def __call__(self, field, **kwargs):
kwargs['data-role'] = u'datepicker'
return super(DatePickerWidget, self).__call__(field, **kwargs)
'''
class searchForm(Form):
key = StringField(validators=[Required(message='请先输入搜索内容')])
submit = SubmitField('搜索')
class comForm(Form):
comName = StringField('公司名称', validators=[Required(message='此项不能为空')], id='task')
comCity=StringField('公司所在城市',validators=[Required(message='此项不能为空')])
comAddress = StringField('公司详细地址', validators=[Required(message='此项不能为空')])
# comUrl = StringField('公司网址', validators=[Required(message='此项不能为空'), URL(message='请输入正确的URL')])
comUrl = StringField('公司网址', [validators.Regexp(message='Not a proper param', regex=r'.*com.*')])
comBrief = TextAreaField('公司简介')
comProject = TextAreaField('营业项目', validators=[Required(message='此项不能为空')])
comMon = StringField('营业额', validators=[Required(message='此项不能为空')])
comStaff = StringField('员工人数', validators=[Required(message='此项不能为空')])
comContact = StringField('联系人', validators=[Required(message='此项不能为空')])
comPhone = StringField('联系电话', validators=[Required(message='此项不能为空')])
comEmail = StringField('Email', validators=[Required(message='此项不能为空'), Email(message='请输入正确的邮箱地址')])
comFax = StringField('传真', validators=[Required(message='此项不能为空')])
submit = SubmitField('提交')
class internshipForm(Form):
task = TextAreaField('实习任务', validators=[Required(message='此项不能为空')])
post = TextAreaField('实习岗位', validators=[Required(message='此项不能为空')])
start = DateTimeField('开始时间', format='%Y-%m-%d', validators=[Required()])
end = DateTimeField('结束时间', format='%Y-%m-%d', validators=[Required(message='请按 年-月-日 的格式输入正确的日期')])
image = FileField()
submit = SubmitField('提交')
'''
# delete
class directTeaForm(Form):
teaId = StringField('教师工号')
teaName = StringField('姓名')
teaDuty = StringField('职称')
teaPhone = StringField('联系电话')
teaEmail = StringField('邮箱')
cteaName = StringField('姓名')
cteaDuty = StringField('职称')
cteaPhone = StringField('联系电话')
cteaEmail = StringField('邮箱')
'''
class schdirteaForm(Form):
# steaId = StringField('校内教师工号')
steaName = StringField('教师姓名')
# steaDuty = StringField('职称')
# steaPhone = StringField('联系电话')
# steaEmail = StringField('邮箱')
submit = SubmitField('提交')
class comdirteaForm(Form):
cteaName = StringField('企业教师姓名')
cteaDuty = StringField('职称')
cteaPhone = StringField('联系电话')
cteaEmail = StringField('邮箱')
submit = SubmitField('提交')
class journalForm(Form):
workStart = DateField('开始日期', format="%Y-%m-%d", validators=[Required(message='此项不能为空')])
weekNo = StringField('周数', validators=[Required(message='此项不能为空')])
mon = TextAreaField('周一', id='mon')
tue = TextAreaField('周二', id='tue')
wed = TextAreaField('周三', id='wed')
thu = TextAreaField('周四', id='thu')
fri = TextAreaField('周五', id='fri')
sat = TextAreaField('周六', id='sat')
sun = TextAreaField('周日', id='sun')
submit = SubmitField('提交')
class stuForm(Form):
stuId = StringField('学号', validators=[Required(message='此项不能为空')])
stuName = StringField('姓名', validators=[Required(message='此项不能为空')])
sex = SelectField('性别', choices=[('男', '男'), ('女', '女')])
institutes = StringField('学院', default='计算机与网络安全学院', validators=[Required(message='此项不能为空')])
grade = SelectField('年级', coerce=str,default=' ')
major = SelectField('专业', coerce=str,default=' ')
classes = SelectField('班级', coerce=str,default=' ')
submit = SubmitField('提交')
#初始化下拉框
def __init__(self):
super().__init__()
self.grade.choices=[(x.grade,x.grade)for x in db.session.execute('Select distinct grade from Grade order by grade desc')]
self.major.choices=[(x.major,x.major)for x in db.session.execute('Select distinct major from Major')]
self.classes.choices=[(x.classes,x.classes)for x in db.session.execute('Select distinct classes from Classes order by classes')]
# self.user=user
class teaForm(Form):
teaId = StringField('教工号', validators=[Required(message='此项不能为空')])
teaName = StringField('姓名', validators=[Required(message='此项不能为空')])
teaSex = SelectField('性别', choices=[('男', '男'), ('女', '女')], default=' ')
teaPosition = StringField('职称')
teaPhone = StringField('联系电话')
teaEmail = StringField('邮箱')
submit = SubmitField('提交')
class permissionForm(Form):
roleName = StringField('角色名称', validators=[Required(message='此项不能为空')])
roleDescribe = TextAreaField('角色描述')
COM_INFOR_SEARCH = BooleanField('企业信息查看', default=False, description='0X0000009', false_values='0x11')
COM_INFOR_EDIT = BooleanField('企业信息编辑', default=False, description='0X000000B')
COM_INFOR_CHECK = BooleanField('企业信息审核', default=False, description='0X000000F')
INTERNCOMPANY_LIST = BooleanField('实习企业信息列表', default=False, description='0X0000008')
STU_INTERN_LIST = BooleanField('学生实习信息列表', default=False, description='0X0000010')
STU_INTERN_SEARCH = BooleanField('学生实习信息查看', default=False, description='0X0000030')
STU_INTERN_EDIT = BooleanField('学生实习信息编辑', default=False, description='0X0000070')
STU_INTERN_CHECK = BooleanField('学生实习信息审核', default=False, description='0X00000F0')
STU_JOUR_SEARCH = BooleanField('学生实习日志查看', default=False, description='0X0000210')
STU_JOUR_EDIT = BooleanField('学生实习日志编辑', default=False, description='0X0000610')
STU_JOUR_CHECK = BooleanField('学生实习日志审核', default=False, description='0X0000E10')
STU_SUM_SEARCH = BooleanField('学生实习总结与成果查看', default=False, description='0X0001010')
STU_SUM_EDIT = BooleanField('学生实习总结与成果编辑', default=False, description='0X0003010')
STU_SUM_SCO_CHECK = BooleanField('学生实习总结和成果审核', default=False, description='0X0007010')
STU_INTERN_MANAGE = BooleanField('学生信息管理', default=False, description='0X0010000')
TEA_INFOR_MANAGE = BooleanField('老师信息管理', default=False, description='0X0020000')
PERMIS_MANAGE = BooleanField('权限管理', default=False, description='0X0040000')
SELECT_MANAGE=BooleanField('下拉框管理',default=False,description='0X0080000')
UPLOAD_VISIT= BooleanField('上传探访记录',default=False,description='0X0100030')
ALTER_INTRODUCE=BooleanField('首页介绍内容修改',default=False,description='0X0200000')
submit = SubmitField('提交')
class xSumScoreForm(Form):
comScore = StringField('企业实习评分', validators=[Required(message='此项不能为空')])
schScore = StringField('校内指导老师评分', validators=[Required(message='此项不能为空')])
comfile = FileField('企业实习评分表')
schfile = FileField('校内评分表')
submit = SubmitField('保存')
class visitForm(Form):
teaName=StringField('探访老师',validators=[Required(message='此项不能为空')])
visitTime=StringField('探访时间',validators=[Required(message='此项不能为空')])
visitWay=SelectField('探访方式', choices=[('电话', '电话'), ('现场', '现场')], default='现场')
submit = SubmitField('确定')
class introduceForm(Form):
content=PageDownField('首页介绍',validators=[Required()],id='content')
submit=SubmitField('提交')
|
mit
| -2,135,802,151,291,333,400
| 43.479769
| 136
| 0.690408
| false
| 2.614339
| false
| false
| false
|
tumi8/INSALATA
|
src/insalata/model/Layer3Network.py
|
1
|
2247
|
from xml.etree.ElementTree import SubElement
from insalata.model.Node import Node
from insalata.helper import ipAddressHelper
class Layer3Network(Node):
def __init__(self, id, address, netmask, collectorName=None, timeout=None):
Node.__init__(self, collectorName=collectorName, timeout=timeout)
self.__id = id
self.netmask = netmask
self.address = address
def getID(self):
return self.__id
def getGlobalID(self):
return self.getID()
def getAddress(self):
return self.address
def setAddress(self, address, collectorName=None, timeout=None):
if self.getAddress() != address:
self.address = address
self.getOnChangeEvent().trigger(self, { "type" : "set", "member" : "address", "value" : address })
self.verify(collectorName, timeout)
def getNetmask(self):
return self.netmask
def setNetmask(self, value, collectorName=None, timeout=None):
"""
Change the netmask of this network.
:param value: New netmask
:type value: str
:param collectorName: Name of the collector module setting this value
:type collectorName: str
:param timeout: Timeout the collecor module uses
:type timeout: int
"""
if (value is not None) and (self.getNetmask() != value):
self.netmask = value
self.getOnChangeEvent().trigger(self, { "type" : "set", "member" : "netmask", "value" : value })
self.verify(collectorName, timeout)
def getPrefix(self): #generate prefix from decimal dotted netmask string
return ipAddressHelper.getPrefix(self.getNetmask())
#Delete stored environments due to new scan
def delConfigurations(self, collectorName=None, timeout=None):
self.__configNames = set()
self.verify(collectorName, timeout)
#Print information in XML Format
def toXML(self, root):
#Create all needed XMLTree elements
networkEl = SubElement(root, "layer3network")
#Add the items which are availabe
networkEl.attrib["id"] = self.getID()
networkEl.attrib["netmask"] = self.getNetmask()
networkEl.attrib["address"] = self.getAddress()
|
apache-2.0
| 1,989,012,771,868,093,700
| 35.241935
| 111
| 0.64753
| false
| 4.161111
| false
| false
| false
|
pleeplee-robot/location
|
pleepleeloc/utils.py
|
1
|
4107
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
##################################################
# AUTHOR : Loïc Banet #
# SUMMARY : Contain enum class that define color #
##################################################
import math
from enum import Enum
from itertools import count
class Color(Enum):
""" Enum for the color of the LEDs.
There are only 7 colors available to simplify the image processing at the
camera level. The maximum number of LEDs available for a garden is 7.
"""
NONE = 0
RED = 1
GREEN = 2
BLUE = 3
YELLOW = 4
PURPLE = 5
ORANGE = 6
WHITE = 7
class LED:
"""Luminous landmark.
The LED class represents a colored landmark to be put in the garden.
These landmarks are mandatory for the robot to locate itself.
Attributes:
color: The color of the LED, it must be unique. Color Enum instance.
point: The position of the LED in the plan. geometry.point instance.
inPerimeter: True if the LED is on the perimeter. False otherwise.
By default this value is true. If the LED is on the
perimeter an additionnal filter of the possible location's
solution is applied.
height: the difference of height between the robot's camera and the LED.
"""
def __init__(self, color, point, inPerimeter=True, height=0.0):
"""Initialize a LED"""
self.color = color
self.point = point
self.inPerimeter = inPerimeter
self.height = height
def __str__(self):
return "LED(Position: %s ;Color : %s )" % (self.point, self.color)
def _getLED(color, perimeter):
for i in perimeter:
if i.color == color:
return i
raise ValueError('Color not found')
class Data:
"""Utility class to track the data set sent by the camera.
Attributes:
id: A unique integer.
angle: The angle calculated by the position of the blob in
the picture and the position of the camera relative to the
axis of the robot.
distance: The distance computed by the image treatment.
ideally this data is achievable however in our case because
of the varying size of the light blobs depending on the color
we cannot get this data.
led: A LED class instance.
"""
_ids = count(0)
def __init__(self,
color,
angle,
angleNorth,
angleToDirection,
perimeter,
distance=None):
"""Initialize a Data with adequates angles"""
# Intances counter: This variable enable us to track the order
# of initialisation of the datas.
self.id = next(self._ids)
# Convert angle from (LED -> Actual direction) to
# (LED -> edge of perimeter)
self.angle = angle + angleToDirection + angleNorth
self.distance = distance
try:
self.led = _getLED(color, perimeter)
except ValueError as error:
print('The color does not correspond to an existing LED')
# adjust the distance between the inputted data and the one one
# calculated with its angle.
# This function is to be adjusted with real data in order to reduce
# the error due to each method
def adjustDistance(self, dist):
"""Adjusts the distance
As the distance can be computed from the angle, this function
is a way to adjust the datas from the one already in the
datas, and the one computed from the angle.
Args:
dist: The distance. This value is expected to have been
computed from the angle. (float)
"""
if self.distance is None:
self.distance = dist
else:
theta = math.asin(self.led.height / self.distance)
adjustedDist = math.cos(theta) * self.distance
self.distance = (adjustedDist + dist) / 2
return self.distance
|
mit
| 5,753,621,721,867,769,000
| 30.829457
| 80
| 0.585485
| false
| 4.472767
| false
| false
| false
|
Kosinkadink/jno
|
jno/commands/build.py
|
1
|
1575
|
from jno.util import interpret_configs
from jno.util import run_arduino_process
from jno.util import create_build_directory
from jno.util import get_common_parameters
from jno.util import JnoException
from jno.util import verify_arduino_dir
from jno.commands.command import Command
import getopt
class Build(Command):
help_name = "Build"
help_usage = "jno build [-b, --board=] boardname [-v, --verbose]"
help_description = "Runs build. Without arguments, uses board defined locally/globally. With -v, more info will be displayed during build."
def run(self,argv,location):
jno_dict = interpret_configs()
verify_arduino_dir(jno_dict)
create_build_directory(jno_dict)
arg_list = self.perform_build(argv,jno_dict)
run_arduino_process(arg_list)
# Create argument list for arduino build
def perform_build(self,argv,jno_dict):
# assemble command query
# GOAL: <arduino exec> --verify <script> --board <board>
arg_list = [jno_dict["EXEC_SCRIPT"]]
# add common params - set pref
arg_list.extend(get_common_parameters(jno_dict))
# add build params
arg_list.append("--verify")
arg_list.append(jno_dict["SKETCH_INO"])
try:
opts,args = getopt.getopt(argv, 'b:v',['board=','verbose'])
except getopt.GetoptError as e:
raise JnoException(str(e))
for opt, arg in opts:
if opt in ("-b","--board"):
jno_dict["board"] = arg.strip()
elif opt in ("-v","--verbose"):
arg_list.append("--verbose")
# add board params
arg_list.append("--board")
arg_list.append(self.formatBoard(jno_dict["board"],jno_dict))
return arg_list
|
mit
| 5,202,770,822,314,608,000
| 31.142857
| 140
| 0.707302
| false
| 2.988615
| false
| false
| false
|
laramies/theHarvester
|
theHarvester/discovery/takeover.py
|
1
|
3519
|
from theHarvester.lib.core import *
import re
class TakeOver:
def __init__(self, hosts):
# NOTE THIS MODULE IS ACTIVE RECON
self.hosts = hosts
self.results = ""
self.totalresults = ""
self.proxy = False
# Thank you to https://github.com/EdOverflow/can-i-take-over-xyz for these fingerprints
self.fingerprints = {"'Trying to access your account?'": 'Campaign Monitor',
'404 Not Found': 'Fly.io',
'404 error unknown site!': 'Pantheon',
'Do you want to register *.wordpress.com?': 'Wordpress',
'Domain uses DO name serves with no records in DO.': 'Digital Ocean',
"It looks like you may have taken a wrong turn somewhere. Don't worry...it happens to all of us.": 'LaunchRock',
'No Site For Domain': 'Kinsta',
'No settings were found for this company:': 'Help Scout',
'Project doesnt exist... yet!': 'Readme.io',
'Repository not found': 'Bitbucket',
'The feed has not been found.': 'Feedpress',
'No such app': 'Heroku',
'The specified bucket does not exist': 'AWS/S3',
'The thing you were looking for is no longer here, or never was': 'Ghost',
"There isn't a Github Pages site here.": 'Github',
'This UserVoice subdomain is currently available!': 'UserVoice',
"Uh oh. That page doesn't exist.": 'Intercom',
"We could not find what you're looking for.": 'Help Juice',
"Whatever you were looking for doesn't currently exist at this address": 'Tumblr',
'is not a registered InCloud YouTrack': 'JetBrains',
'page not found': 'Uptimerobot',
'project not found': 'Surge.sh'}
async def check(self, url, resp):
# Simple function that takes response and checks if any fingerprints exists
# If a fingerprint exists figures out which one and prints it out
regex = re.compile("(?=(" + "|".join(map(re.escape, list(self.fingerprints.keys()))) + "))")
# Sanitize fingerprints
matches = re.findall(regex, resp)
for match in matches:
print(f'\t\033[91m Takeover detected: {url}\033[1;32;40m')
if match in self.fingerprints.keys():
# Sanity check as to not error out
print(f'\t\033[91m Type of takeover is: {self.fingerprints[match]}\033[1;32;40m')
async def do_take(self):
try:
if len(self.hosts) > 0:
tup_resps: list = await AsyncFetcher.fetch_all(self.hosts, takeover=True, proxy=self.proxy)
# Returns a list of tuples in this format: (url, response)
tup_resps = [tup for tup in tup_resps if tup[1] != '']
# Filter out responses whose responses are empty strings (indicates errored)
for url, resp in tup_resps:
await self.check(url, resp)
else:
return
except Exception as e:
print(e)
async def process(self, proxy=False):
self.proxy = proxy
await self.do_take()
|
gpl-2.0
| -1,580,792,400,011,075,600
| 53.138462
| 141
| 0.522876
| false
| 4.420854
| false
| false
| false
|
ziozzang/coreos-stuff
|
sample/etcd-reader.py
|
1
|
1490
|
# This code is used for "internal docker container" read/write value at etcd.
#
# CoreOS (etcd Activated)
# | ^
# | NAT | iptables
# V |
# Docker Container -> python code.
#
# This can be used for "distriburted or HA environment"
import requests
import json
class etcd:
def __init__(self, ips="169.254.169.255", ports=4001):
self.ips = ips
self.ports = ports
def get(self, keys):
try:
urls = "http://%s:%d/v2/keys/%s" % (self.ips, self.ports , keys.strip("/"))
res = requests.get(urls)
if res.status_code == 200: # Found
return json.loads(res.content)["node"]["value"]
elif res.status_code == 404: # Not Found
return None
except:
pass
return None
def put(self, keys, values):
try:
urls = "http://%s:%d/v2/keys/%s" % (self.ips, self.ports , keys.strip("/"))
res = requests.put(urls, {"value": values})
if res.status_code == 200: # Modified
return True
elif res.status_code == 201: # Create
return True
except:
pass
return False
def delete(self, keys):
try:
urls = "http://%s:%d/v2/keys/%s" % (self.ips, self.ports , keys.strip("/"))
res = requests.delete(urls)
if res.status_code == 200:
return True
except:
pass
return False
# code under this is for using etcd.
c = etcd()
c.get("asdf")
c.put("asdf","asdf")
c.put("asdf","asdf1")
c.get("asdf")
c.delete("asdf")
|
mit
| 5,701,041,995,374,456,000
| 25.607143
| 81
| 0.567785
| false
| 3.281938
| false
| false
| false
|
lifemapper/core
|
LmBackend/common/lmobj.py
|
1
|
8598
|
"""Module containing the base Lifemapper object class.
"""
import glob
import inspect
import json
import os
import sys
import traceback
from LmCommon.common.lmconstants import LMFormat
# ............................................................................
class LMObject:
"""Base class for all objects in the Lifemapper project.
"""
# ..........................
@staticmethod
def get_line_num():
"""Get the current line number
"""
return inspect.currentframe().f_back.f_lineno
# ..........................
def get_location(self, line_num=None):
"""Get the current location
"""
loc = '{}.{}'.format(__name__, self.__class__.__name__)
if line_num:
loc += ' Line {}'.format(line_num)
return loc
# ..........................
@classmethod
def ready_filename(cls, full_filename, overwrite=False):
"""Prepare a file location for writing by creating needed parent dirs.
Args:
full_filename (str): The file location to prepare.
overwrite (bool): If true, deletes existing file. If false,
returns False.
"""
if full_filename is None:
raise LMError('Full filename is None')
if os.path.exists(full_filename):
if overwrite:
success, _ = cls.delete_file(full_filename)
if not success:
raise LMError('Unable to delete {}'.format(full_filename))
return True
print(('File {} exists, overwrite=False'.format(full_filename)))
return False
pth, _ = os.path.split(full_filename)
# If the file path is in cwd we don't need to create directories
if len(pth) == 0:
return True
try:
os.makedirs(pth, 0o775)
except IOError:
pass
if os.path.isdir(pth):
return True
# Else, fail
raise LMError('Failed to create directories {}'.format(pth))
# ..........................
@classmethod
def delete_file(cls, file_name, delete_dir=False):
"""Delete the file if it exists and parent directory if it is empty.
Note:
If file path is a shapefile extension (.shp), delete all other
files that comprise the shapefile.
"""
success = True
msg = ''
if file_name is None:
msg = 'Cannot delete file \'None\''
else:
pth, _ = os.path.split(file_name)
if file_name is not None and os.path.exists(file_name):
base, ext = os.path.splitext(file_name)
if ext == LMFormat.SHAPE.ext:
similar_file_names = glob.glob(base + '.*')
try:
for simfname in similar_file_names:
_, simext = os.path.splitext(simfname)
if simext in LMFormat.SHAPE.get_extensions():
os.remove(simfname)
except Exception as err:
success = False
msg = 'Failed to remove {}, {}'.format(
simfname, str(err))
else:
try:
os.remove(file_name)
except Exception as err:
success = False
msg = 'Failed to remove {}, {}'.format(
file_name, str(err))
if delete_dir and len(os.listdir(pth)) == 0:
try:
os.removedirs(pth)
except Exception as err:
success = False
msg = 'Failed to remove {}, {}'.format(pth, str(err))
return success, msg
# ..........................
@staticmethod
def _add_metadata(new_metadata_dict, existing_metadata_dict=None):
if existing_metadata_dict is None:
existing_metadata_dict = {}
for key, val in new_metadata_dict.items():
try:
existing_val = existing_metadata_dict[key]
except Exception:
existing_metadata_dict[key] = val
else:
# if metadata exists and is ...
if isinstance(existing_val, list):
# a list, add to it
if isinstance(val, list):
new_val = list(set(existing_val.extend(val)))
existing_metadata_dict[key] = new_val
else:
new_val = list(set(existing_val.append(val)))
existing_metadata_dict[key] = new_val
else:
# not a set, replace it
existing_metadata_dict[key] = val
return existing_metadata_dict
# ..........................
@staticmethod
def _dump_metadata(metadata_dict):
metadata_str = None
if metadata_dict:
metadata_str = json.dumps(metadata_dict)
return metadata_str
# ..........................
@staticmethod
def _load_metadata(new_metadata):
"""Read metadata into a dictionary
Args:
new_metadata: dictionary or JSON object of metadata
Returns:
a dictionary of metadata
"""
obj_metadata = {}
if new_metadata is not None:
if isinstance(new_metadata, dict):
obj_metadata = new_metadata
else:
try:
obj_metadata = json.loads(new_metadata)
except Exception:
print(
'Failed to load JSON from type {} object {}'.format(
type(new_metadata), new_metadata))
return obj_metadata
# .............................................................................
class LMError(Exception, LMObject):
"""Base class for exceptions in the lifemapper project.
"""
# ..........................
def __init__(self, *args, do_trace=False, line_num=None, **kwargs):
"""Constructor for LMError
Args:
*args: Any positional agruments sent to this constructor
do_trace (bool): Should a traceback be attached to the exception
line_num (int): A line number to attach to this exception
**kwargs: Any additional keyword arguements sent to the constructor
Note:
Assembles all arguments into Exception.args
"""
LMObject.__init__(self)
self.previous_exceptions = []
list_args = []
for arg in args:
if isinstance(arg, Exception):
self.previous_exceptions.append(arg)
else:
list_args.append(arg)
kw_arg_dict = dict(kwargs)
if line_num:
kw_arg_dict['Line number'] = line_num
kw_arg_dict['Location'] = self.get_location(line_num=line_num)
if do_trace:
self.traceback = self.get_traceback()
kw_arg_dict['Traceback'] = self.traceback
list_args.append(kw_arg_dict)
self.args = tuple(list_args)
Exception.__init__(self, self.args)
# ..........................
@staticmethod
def get_traceback():
"""Get the traceback for this exception
"""
exc_type, exc_val, this_traceback = sys.exc_info()
return traceback.format_exception(exc_type, exc_val, this_traceback)
# .............................................................................
class JobError(LMError):
"""Exception class for job failures.
"""
# ..........................
def __init__(self, code, msg, *args, do_trace=False, line_num=None,
**kwargs):
"""Constructor for LMError
Args:
code (int): Job error code
msg (str): An error message
*args: Any positional agruments sent to this constructor
do_trace (bool): Should a traceback be attached to the exception
line_num (int): A line number to attach to this exception
**kwargs: Any additional keyword arguements sent to the constructor
Note:
Assembles all arguments into Exception.args
"""
LMError.__init__(
self, code, msg, *args, do_trace=do_trace, line_num=line_num,
**kwargs)
self.code = code
self.msg = msg
|
gpl-3.0
| -5,408,623,335,341,066,000
| 33.669355
| 79
| 0.491626
| false
| 4.726773
| false
| false
| false
|
tbarbugli/django_email_multibackend
|
django_email_multibackend/conditions.py
|
1
|
2706
|
from django.core.mail import EmailMessage
class BaseCondition(object):
def __init__(self, **kwargs):
self.params = kwargs
def __call__(self, message):
if not isinstance(message, (EmailMessage, )):
raise TypeError('%r is not a subclass of django.core.mail.EmailMessage' % message)
return self.check(message)
def check(self, message):
raise NotImplementedError
class MatchAll(BaseCondition):
def check(self, message):
return True
class MatchAny(BaseCondition):
"""
>>> mail = EmailMessage()
>>> mail.extra_headers['X-CAMPAIGN-NAME'] = 'weekly-mail'
>>> MatchAny(\
conditions=(('django_email_multibackend.conditions.FilterMailByHeader', {'header': ('X-CAMPAIGN-NAME', 'daily-mail')}),\
('django_email_multibackend.conditions.FilterMailByHeader', {'header': ('X-CAMPAIGN-NAME', 'weekly-mail')})\
))(mail)
True
"""
def __init__(self, conditions):
from django_email_multibackend.backends import load_class
self.conditions = []
for condition in conditions:
try:
kls_name, params = condition
except ValueError:
kls_name, params = condition[0], {}
self.conditions.append(load_class(kls_name)(**params))
def check(self, message):
for condition in self.conditions:
if condition(message):
return True
return False
class FilterMailByHeader(BaseCondition):
"""
Filter emails by headers
>>> mail = EmailMessage()
>>> mail.extra_headers['X-CAMPAIGN-NAME'] = 'weekly-mail'
>>> FilterMailByHeader(header=('X-CAMPAIGN-NAME', 'weekly-mail'))(mail)
True
>>> FilterMailByHeader(header=('X-CAMPAIGN-NAME', 'daily-mail'))(mail)
False
>>> FilterMailByHeader(header=('X-TRANSACTION-ID', '999'))(mail)
False
"""
def check(self, message):
unset = dict()
header_name, header_value = self.params['header']
mail_header_value = message.extra_headers.get(header_name, unset)
return (not mail_header_value is unset) and (mail_header_value == header_value)
class ExcludeMailByHeader(FilterMailByHeader):
"""
Exclude emails by headers
>>> mail = EmailMessage()
>>> mail.extra_headers['X-CAMPAIGN-NAME'] = 'weekly-mail'
>>> ExcludeMailByHeader(header=('X-CAMPAIGN-NAME', 'weekly-mail'))(mail)
False
>>> ExcludeMailByHeader(header=('X-CAMPAIGN-NAME', 'daily-mail'))(mail)
True
>>> ExcludeMailByHeader(header=('X-TRANSACTION-ID', '999'))(mail)
True
"""
def check(self, message):
return not super(ExcludeMailByHeader, self).check(message)
|
isc
| 970,156,628,427,079,700
| 28.736264
| 124
| 0.626016
| false
| 3.753121
| false
| false
| false
|
dmach/dnf
|
dnf/cli/output.py
|
1
|
98515
|
# Copyright 2005 Duke University
# Copyright (C) 2012-2016 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Handle actual output from the cli."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from copy import deepcopy
import fnmatch
import hawkey
import itertools
import libdnf.transaction
import logging
import operator
import pwd
import re
import sys
import time
from dnf.cli.format import format_number, format_time
from dnf.i18n import _, C_, P_, ucd, fill_exact_width, textwrap_fill, exact_width, select_short_long
from dnf.pycomp import xrange, basestring, long, unicode
from dnf.yum.rpmtrans import LoggingTransactionDisplay
from dnf.db.history import MergedTransactionWrapper
import dnf.base
import dnf.callback
import dnf.cli.progress
import dnf.cli.term
import dnf.conf
import dnf.crypto
import dnf.i18n
import dnf.transaction
import dnf.util
import dnf.yum.misc
logger = logging.getLogger('dnf')
def _make_lists(transaction, goal):
b = dnf.util.Bunch({
'downgraded': [],
'erased': [],
'erased_clean': [],
'erased_dep': [],
'installed': [],
'installed_group': [],
'installed_dep': [],
'installed_weak': [],
'reinstalled': [],
'upgraded': [],
'failed': [],
})
for tsi in transaction:
if tsi.state == libdnf.transaction.TransactionItemState_ERROR:
b.failed.append(tsi)
elif tsi.action == libdnf.transaction.TransactionItemAction_DOWNGRADE:
b.downgraded.append(tsi)
elif tsi.action == libdnf.transaction.TransactionItemAction_INSTALL:
if tsi.reason == libdnf.transaction.TransactionItemReason_GROUP:
b.installed_group.append(tsi)
elif tsi.reason == libdnf.transaction.TransactionItemReason_DEPENDENCY:
b.installed_dep.append(tsi)
elif tsi.reason == libdnf.transaction.TransactionItemReason_WEAK_DEPENDENCY:
b.installed_weak.append(tsi)
else:
# TransactionItemReason_USER
b.installed.append(tsi)
elif tsi.action == libdnf.transaction.TransactionItemAction_REINSTALL:
b.reinstalled.append(tsi)
elif tsi.action == libdnf.transaction.TransactionItemAction_REMOVE:
if tsi.reason == libdnf.transaction.TransactionItemReason_CLEAN:
b.erased_clean.append(tsi)
elif tsi.reason == libdnf.transaction.TransactionItemReason_DEPENDENCY:
b.erased_dep.append(tsi)
else:
b.erased.append(tsi)
elif tsi.action == libdnf.transaction.TransactionItemAction_UPGRADE:
b.upgraded.append(tsi)
return b
def _spread_in_columns(cols_count, label, lst):
left = itertools.chain((label,), itertools.repeat(''))
lst_length = len(lst)
right_count = cols_count - 1
missing_items = -lst_length % right_count
if not lst_length:
lst = itertools.repeat('', right_count)
elif missing_items:
lst.extend(('',) * missing_items)
lst_iter = iter(lst)
return list(zip(left, *[lst_iter] * right_count))
class Output(object):
"""Main output class for the yum command line."""
GRP_PACKAGE_INDENT = ' ' * 3
FILE_PROVIDE_RE = re.compile(r'^\*{0,2}/')
def __init__(self, base, conf):
self.conf = conf
self.base = base
self.term = dnf.cli.term.Term()
self.progress = None
def _banner(self, col_data, row):
term_width = self.term.columns
rule = '%s' % '=' * term_width
header = self.fmtColumns(zip(row, col_data), ' ')
return rule, header, rule
def _col_widths(self, rows):
col_data = [dict() for _ in rows[0]]
for row in rows:
for (i, val) in enumerate(row):
col_dct = col_data[i]
length = len(val)
col_dct[length] = col_dct.get(length, 0) + 1
cols = self.calcColumns(col_data, None, indent=' ')
# align to the left
return list(map(operator.neg, cols))
def _highlight(self, highlight):
hibeg = ''
hiend = ''
if not highlight:
pass
elif not isinstance(highlight, basestring) or highlight == 'bold':
hibeg = self.term.MODE['bold']
elif highlight == 'normal':
pass # Minor opt.
else:
# Turn a string into a specific output: colour, bold, etc.
for high in highlight.replace(',', ' ').split():
if high == 'normal':
hibeg = ''
elif high in self.term.MODE:
hibeg += self.term.MODE[high]
elif high in self.term.FG_COLOR:
hibeg += self.term.FG_COLOR[high]
elif (high.startswith('fg:') and
high[3:] in self.term.FG_COLOR):
hibeg += self.term.FG_COLOR[high[3:]]
elif (high.startswith('bg:') and
high[3:] in self.term.BG_COLOR):
hibeg += self.term.BG_COLOR[high[3:]]
if hibeg:
hiend = self.term.MODE['normal']
return (hibeg, hiend)
def _sub_highlight(self, haystack, highlight, needles, **kwds):
hibeg, hiend = self._highlight(highlight)
return self.term.sub(haystack, hibeg, hiend, needles, **kwds)
@staticmethod
def _calc_columns_spaces_helps(current, data_tups, left):
""" Spaces left on the current field will help how many pkgs? """
ret = 0
for tup in data_tups:
if left < (tup[0] - current):
break
ret += tup[1]
return ret
@property
def history(self):
return self.base.history
@property
def sack(self):
return self.base.sack
def calcColumns(self, data, columns=None, remainder_column=0,
total_width=None, indent=''):
"""Dynamically calculate the widths of the columns that the
fields in data should be placed into for output.
:param data: a list of dictionaries that represent the data to
be output. Each dictionary in the list corresponds to a
column of output. The keys of the dictionary are the
lengths of the items to be output, and the value associated
with a key is the number of items of that length.
:param columns: a list containing the minimum amount of space
that must be allocated for each row. This can be used to
ensure that there is space available in a column if, for
example, the actual lengths of the items being output
cannot be given in *data*
:param remainder_column: number of the column to receive a few
extra spaces that may remain after other allocation has
taken place
:param total_width: the total width of the output.
self.term.real_columns is used by default
:param indent: string that will be prefixed to a line of
output to create e.g. an indent
:return: a list of the widths of the columns that the fields
in data should be placed into for output
"""
cols = len(data)
# Convert the data to ascending list of tuples, (field_length, pkgs)
pdata = data
data = [None] * cols # Don't modify the passed in data
for d in range(0, cols):
data[d] = sorted(pdata[d].items())
if total_width is None:
total_width = self.term.real_columns
# i'm not able to get real terminal width so i'm probably
# running in non interactive terminal (pipe to grep, redirect to file...)
# avoid splitting lines to enable filtering output
if not total_width:
full_columns = []
for col in data:
if col:
full_columns.append(col[-1][0])
else:
full_columns.append(0)
full_columns[0] += len(indent)
# if possible, try to keep default width (usually 80 columns)
default_width = self.term.columns
if sum(full_columns) > default_width:
return full_columns
total_width = default_width
# We start allocating 1 char to everything but the last column, and a
# space between each (again, except for the last column). Because
# at worst we are better with:
# |one two three|
# | four |
# ...than:
# |one two three|
# | f|
# |our |
# ...the later being what we get if we pre-allocate the last column, and
# thus. the space, due to "three" overflowing it's column by 2 chars.
if columns is None:
columns = [1] * (cols - 1)
columns.append(0)
total_width -= (sum(columns) + (cols - 1) + exact_width(indent))
if not columns[-1]:
total_width += 1
while total_width > 0:
# Find which field all the spaces left will help best
helps = 0
val = 0
for d in xrange(0, cols):
thelps = self._calc_columns_spaces_helps(columns[d], data[d],
total_width)
if not thelps:
continue
# We prefer to overflow: the last column, and then earlier
# columns. This is so that in the best case (just overflow the
# last) ... grep still "works", and then we make it prettier.
if helps and (d == (cols - 1)) and (thelps / 2) < helps:
continue
if thelps < helps:
continue
helps = thelps
val = d
# If we found a column to expand, move up to the next level with
# that column and start again with any remaining space.
if helps:
diff = data[val].pop(0)[0] - columns[val]
if not columns[val] and (val == (cols - 1)):
# If we are going from 0 => N on the last column, take 1
# for the space before the column.
total_width -= 1
columns[val] += diff
total_width -= diff
continue
overflowed_columns = 0
for d in xrange(0, cols):
if not data[d]:
continue
overflowed_columns += 1
if overflowed_columns:
# Split the remaining spaces among each overflowed column
# equally
norm = total_width // overflowed_columns
for d in xrange(0, cols):
if not data[d]:
continue
columns[d] += norm
total_width -= norm
# Split the remaining spaces among each column equally, except the
# last one. And put the rest into the remainder column
cols -= 1
norm = total_width // cols
for d in xrange(0, cols):
columns[d] += norm
columns[remainder_column] += total_width - (cols * norm)
total_width = 0
return columns
@staticmethod
def _fmt_column_align_width(width):
"""Returns tuple of (align_left, width)"""
if width < 0:
return (True, -width)
return (False, width)
def _col_data(self, col_data):
assert len(col_data) == 2 or len(col_data) == 3
if len(col_data) == 2:
(val, width) = col_data
hibeg = hiend = ''
if len(col_data) == 3:
(val, width, highlight) = col_data
(hibeg, hiend) = self._highlight(highlight)
return (ucd(val), width, hibeg, hiend)
def fmtColumns(self, columns, msg=u'', end=u''):
"""Return a row of data formatted into a string for output.
Items can overflow their columns.
:param columns: a list of tuples containing the data to
output. Each tuple contains first the item to be output,
then the amount of space allocated for the column, and then
optionally a type of highlighting for the item
:param msg: a string to begin the line of output with
:param end: a string to end the line of output with
:return: a row of data formatted into a string for output
"""
columns = list(columns)
total_width = len(msg)
data = []
for col_data in columns[:-1]:
(val, width, hibeg, hiend) = self._col_data(col_data)
if not width: # Don't count this column, invisible text
msg += u"%s"
data.append(val)
continue
(align_left, width) = self._fmt_column_align_width(width)
val_width = exact_width(val)
if val_width <= width:
# Don't use fill_exact_width() because it sucks performance
# wise for 1,000s of rows. Also allows us to use len(), when
# we can.
msg += u"%s%s%s%s "
if align_left:
data.extend([hibeg, val, " " * (width - val_width), hiend])
else:
data.extend([hibeg, " " * (width - val_width), val, hiend])
else:
msg += u"%s%s%s\n" + " " * (total_width + width + 1)
data.extend([hibeg, val, hiend])
total_width += width
total_width += 1
(val, width, hibeg, hiend) = self._col_data(columns[-1])
(align_left, width) = self._fmt_column_align_width(width)
val = fill_exact_width(val, width, left=align_left,
prefix=hibeg, suffix=hiend)
msg += u"%%s%s" % end
data.append(val)
return msg % tuple(data)
def simpleList(self, pkg, ui_overflow=False, indent='', highlight=False,
columns=None):
"""Print a package as a line.
:param pkg: the package to be printed
:param ui_overflow: unused
:param indent: string to be prefixed onto the line to provide
e.g. an indent
:param highlight: highlighting options for the name of the
package
:param colums: tuple containing the space allocated for each
column of output. The columns are the package name, version,
and repository
"""
if columns is None:
columns = (-40, -22, -16) # Old default
na = '%s%s.%s' % (indent, pkg.name, pkg.arch)
hi_cols = [highlight, 'normal', 'normal']
columns = zip((na, pkg.evr, pkg._from_repo), columns, hi_cols)
print(self.fmtColumns(columns))
def simpleEnvraList(self, pkg, ui_overflow=False,
indent='', highlight=False, columns=None):
"""Print a package as a line, with the package itself in envra
format so it can be passed to list/install/etc.
:param pkg: the package to be printed
:param ui_overflow: unused
:param indent: string to be prefixed onto the line to provide
e.g. an indent
:param highlight: highlighting options for the name of the
package
:param colums: tuple containing the space allocated for each
column of output. The columns the are the package envra and
repository
"""
if columns is None:
columns = (-63, -16) # Old default
envra = '%s%s' % (indent, ucd(pkg))
hi_cols = [highlight, 'normal', 'normal']
rid = pkg.ui_from_repo
columns = zip((envra, rid), columns, hi_cols)
print(self.fmtColumns(columns))
def simple_name_list(self, pkg):
"""Print a package as a line containing its name."""
print(ucd(pkg.name))
def simple_nevra_list(self, pkg):
"""Print a package as a line containing its NEVRA."""
print(ucd(pkg))
def fmtKeyValFill(self, key, val):
"""Return a key value pair in the common two column output
format.
:param key: the key to be formatted
:param val: the value associated with *key*
:return: the key value pair formatted in two columns for output
"""
keylen = exact_width(key)
cols = self.term.columns
nxt = ' ' * (keylen - 2) + ': '
if not val:
# textwrap.fill in case of empty val returns empty string
return key
val = ucd(val)
ret = textwrap_fill(val, width=cols, initial_indent=key,
subsequent_indent=nxt)
if ret.count("\n") > 1 and keylen > (cols // 3):
# If it's big, redo it again with a smaller subsequent off
ret = textwrap_fill(val, width=cols, initial_indent=key,
subsequent_indent=' ...: ')
return ret
def fmtSection(self, name, fill='='):
"""Format and return a section header. The format of the
header is a line with *name* centred, and *fill* repeated on
either side to fill an entire line on the terminal.
:param name: the name of the section
:param fill: the character to repeat on either side of *name*
to fill an entire line. *fill* must be a single character.
:return: a string formatted to be a section header
"""
name = ucd(name)
cols = self.term.columns - 2
name_len = exact_width(name)
if name_len >= (cols - 4):
beg = end = fill * 2
else:
beg = fill * ((cols - name_len) // 2)
end = fill * (cols - name_len - len(beg))
return "%s %s %s" % (beg, name, end)
def infoOutput(self, pkg, highlight=False):
"""Print information about the given package.
:param pkg: the package to print information about
:param hightlight: highlighting options for the name of the
package
"""
def format_key_val(key, val):
return " ".join([fill_exact_width(key, 12, 12), ":", str(val)])
def format_key_val_fill(key, val):
return self.fmtKeyValFill(fill_exact_width(key, 12, 12) + " : ", val or "")
output_list = []
(hibeg, hiend) = self._highlight(highlight)
# Translators: This is abbreviated 'Name'. Should be no longer
# than 12 characters. You can use the full version if it is short
# enough in your language.
key = select_short_long(12, C_("short", "Name"),
C_("long", "Name"))
output_list.append(format_key_val(key,
"%s%s%s" % (hibeg, pkg.name, hiend)))
if pkg.epoch:
# Translators: This message should be no longer than 12 characters.
output_list.append(format_key_val(_("Epoch"), pkg.epoch))
key = select_short_long(12, C_("short", "Version"),
C_("long", "Version"))
output_list.append(format_key_val(key, pkg.version))
# Translators: This message should be no longer than 12 characters.
output_list.append(format_key_val(_("Release"), pkg.release))
key = select_short_long(12, C_("short", "Arch"),
C_("long", "Architecture"))
output_list.append(format_key_val(key, pkg.arch))
key = select_short_long(12, C_("short", "Size"), C_("long", "Size"))
output_list.append(format_key_val(key,
format_number(float(pkg._size))))
# Translators: This message should be no longer than 12 characters.
output_list.append(format_key_val(_("Source"), pkg.sourcerpm))
key = select_short_long(12, C_("short", "Repo"),
C_("long", "Repository"))
output_list.append(format_key_val(key, pkg.repoid))
if pkg._from_system:
history_repo = self.history.repo(pkg)
if history_repo:
# Translators: This message should be no longer than 12 chars.
output_list.append(format_key_val(_("From repo"), history_repo))
if self.conf.verbose:
# :hawkey does not support changelog information
# print(_("Committer : %s") % ucd(pkg.committer))
# print(_("Committime : %s") % time.ctime(pkg.committime))
# Translators: This message should be no longer than 12 characters.
output_list.append(format_key_val(_("Packager"), pkg.packager))
# Translators: This message should be no longer than 12 characters.
output_list.append(format_key_val(_("Buildtime"),
dnf.util.normalize_time(pkg.buildtime)))
if pkg.installtime:
# Translators: This message should be no longer than 12 characters.
output_list.append(format_key_val(_("Install time"),
dnf.util.normalize_time(pkg.installtime)))
history_pkg = self.history.package_data(pkg)
if history_pkg:
try:
uid = int(history_pkg._item.getInstalledBy())
except ValueError: # In case int() fails
uid = None
# Translators: This message should be no longer than 12 chars.
output_list.append(format_key_val(_("Installed by"), self._pwd_ui_username(uid)))
# Translators: This is abbreviated 'Summary'. Should be no longer
# than 12 characters. You can use the full version if it is short
# enough in your language.
key = select_short_long(12, C_("short", "Summary"),
C_("long", "Summary"))
output_list.append(format_key_val_fill(key, pkg.summary))
if pkg.url:
output_list.append(format_key_val(_("URL"), ucd(pkg.url)))
# Translators: This message should be no longer than 12 characters.
output_list.append(format_key_val_fill(_("License"), pkg.license))
# Translators: This is abbreviated 'Description'. Should be no longer
# than 12 characters. You can use the full version if it is short
# enough in your language.
key = select_short_long(12, C_("short", "Description"),
C_("long", "Description"))
output_list.append(format_key_val_fill(key, pkg.description))
return "\n".join(output_list)
def updatesObsoletesList(self, uotup, changetype, columns=None):
"""Print a simple string that explains the relationship
between the members of an update or obsoletes tuple.
:param uotup: an update or obsoletes tuple. The first member
is the new package, and the second member is the old
package
:param changetype: a string indicating what the change between
the packages is, e.g. 'updates' or 'obsoletes'
:param columns: a tuple containing information about how to
format the columns of output. The absolute value of each
number in the tuple indicates how much space has been
allocated for the corresponding column. If the number is
negative, the text in the column will be left justified,
and if it is positive, the text will be right justified.
The columns of output are the package name, version, and repository
"""
(changePkg, instPkg) = uotup
if columns is not None:
# New style, output all info. for both old/new with old indented
chi = self.conf.color_update_remote
if changePkg.reponame != hawkey.SYSTEM_REPO_NAME:
chi = self.conf.color_update_local
self.simpleList(changePkg, columns=columns, highlight=chi)
self.simpleList(instPkg, columns=columns, indent=' ' * 4,
highlight=self.conf.color_update_installed)
return
# Old style
c_compact = changePkg.compactPrint()
i_compact = '%s.%s' % (instPkg.name, instPkg.arch)
c_repo = changePkg.repoid
print('%-35.35s [%.12s] %.10s %-20.20s' %
(c_compact, c_repo, changetype, i_compact))
def listPkgs(self, lst, description, outputType, highlight_na={},
columns=None, highlight_modes={}):
"""Prints information about the given list of packages.
:param lst: a list of packages to print information about
:param description: string describing what the list of
packages contains, e.g. 'Available Packages'
:param outputType: The type of information to be printed.
Current options::
'list' - simple pkg list
'info' - similar to rpm -qi output
'name' - simple name list
'nevra' - simple nevra list
:param highlight_na: a dictionary containing information about
packages that should be highlighted in the output. The
dictionary keys are (name, arch) tuples for the package,
and the associated values are the package objects
themselves.
:param columns: a tuple containing information about how to
format the columns of output. The absolute value of each
number in the tuple indicates how much space has been
allocated for the corresponding column. If the number is
negative, the text in the column will be left justified,
and if it is positive, the text will be right justified.
The columns of output are the package name, version, and
repository
:param highlight_modes: dictionary containing information
about to highlight the packages in *highlight_na*.
*highlight_modes* should contain the following keys::
'not_in' - highlighting used for packages not in *highlight_na*
'=' - highlighting used when the package versions are equal
'<' - highlighting used when the package has a lower version
number
'>' - highlighting used when the package has a higher version
number
:return: (exit_code, [errors])
exit_code is::
0 = we're done, exit
1 = we've errored, exit with error string
"""
if outputType in ['list', 'info', 'name', 'nevra']:
thingslisted = 0
if len(lst) > 0:
thingslisted = 1
print('%s' % description)
info_set = set()
if outputType == 'list':
unique_item_dict = {}
for pkg in lst:
unique_item_dict[str(pkg) + str(pkg._from_repo)] = pkg
lst = unique_item_dict.values()
for pkg in sorted(lst):
key = (pkg.name, pkg.arch)
highlight = False
if key not in highlight_na:
highlight = highlight_modes.get('not in', 'normal')
elif pkg.evr_eq(highlight_na[key]):
highlight = highlight_modes.get('=', 'normal')
elif pkg.evr_lt(highlight_na[key]):
highlight = highlight_modes.get('>', 'bold')
else:
highlight = highlight_modes.get('<', 'normal')
if outputType == 'list':
self.simpleList(pkg, ui_overflow=True,
highlight=highlight, columns=columns)
elif outputType == 'info':
info_set.add(self.infoOutput(pkg, highlight=highlight) + "\n")
elif outputType == 'name':
self.simple_name_list(pkg)
elif outputType == 'nevra':
self.simple_nevra_list(pkg)
else:
pass
if info_set:
print("\n".join(sorted(info_set)))
if thingslisted == 0:
return 1, [_('No packages to list')]
return 0, []
def userconfirm(self, msg=None, defaultyes_msg=None):
"""Get a yes or no from the user, and default to No
:msg: String for case with [y/N]
:defaultyes_msg: String for case with [Y/n]
:return: True if the user selects yes, and False if the user
selects no
"""
yui = (ucd(_('y')), ucd(_('yes')))
nui = (ucd(_('n')), ucd(_('no')))
aui = yui + nui
while True:
if msg is None:
msg = _('Is this ok [y/N]: ')
choice = ''
if self.conf.defaultyes:
if defaultyes_msg is None:
msg = _('Is this ok [Y/n]: ')
else:
msg = defaultyes_msg
try:
choice = dnf.i18n.ucd_input(msg)
except EOFError:
pass
except KeyboardInterrupt:
choice = nui[0]
choice = ucd(choice).lower()
if len(choice) == 0:
choice = yui[0] if self.conf.defaultyes else nui[0]
if choice in aui:
break
# If the English one letter names don't mix with the translated
# letters, allow them too:
if u'y' == choice and u'y' not in aui:
choice = yui[0]
break
if u'n' == choice and u'n' not in aui:
choice = nui[0]
break
if choice in yui:
return True
return False
def _pkgs2name_dict(self, sections):
installed = self.sack.query().installed()._name_dict()
available = self.sack.query().available()._name_dict()
d = {}
for pkg_name in itertools.chain(*list(zip(*sections))[1]):
if pkg_name in installed:
d[pkg_name] = installed[pkg_name][0]
elif pkg_name in available:
d[pkg_name] = available[pkg_name][0]
return d
def _pkgs2col_lengths(self, sections, name_dict):
nevra_lengths = {}
repo_lengths = {}
for pkg_name in itertools.chain(*list(zip(*sections))[1]):
pkg = name_dict.get(pkg_name)
if pkg is None:
continue
nevra_l = exact_width(ucd(pkg)) + exact_width(self.GRP_PACKAGE_INDENT)
repo_l = exact_width(ucd(pkg.reponame))
nevra_lengths[nevra_l] = nevra_lengths.get(nevra_l, 0) + 1
repo_lengths[repo_l] = repo_lengths.get(repo_l, 0) + 1
return (nevra_lengths, repo_lengths)
def _display_packages(self, pkg_names):
for name in pkg_names:
print('%s%s' % (self.GRP_PACKAGE_INDENT, name))
def _display_packages_verbose(self, pkg_names, name_dict, columns):
for name in pkg_names:
try:
pkg = name_dict[name]
except KeyError:
# package not in any repo -> print only package name
print('%s%s' % (self.GRP_PACKAGE_INDENT, name))
continue
highlight = False
if not pkg._from_system:
highlight = self.conf.color_list_available_install
self.simpleEnvraList(pkg, ui_overflow=True,
indent=self.GRP_PACKAGE_INDENT,
highlight=highlight,
columns=columns)
def display_pkgs_in_groups(self, group):
"""Output information about the packages in a given group
:param group: a Group object to output information about
"""
def names(packages):
return sorted(pkg.name for pkg in packages)
print('\n' + _('Group: %s') % group.ui_name)
verbose = self.conf.verbose
if verbose:
print(_(' Group-Id: %s') % ucd(group.id))
if group.ui_description:
print(_(' Description: %s') % ucd(group.ui_description) or "")
if group.lang_only:
print(_(' Language: %s') % group.lang_only)
sections = (
(_(' Mandatory Packages:'), names(group.mandatory_packages)),
(_(' Default Packages:'), names(group.default_packages)),
(_(' Optional Packages:'), names(group.optional_packages)),
(_(' Conditional Packages:'), names(group.conditional_packages)))
if verbose:
name_dict = self._pkgs2name_dict(sections)
col_lengths = self._pkgs2col_lengths(sections, name_dict)
columns = self.calcColumns(col_lengths)
columns = (-columns[0], -columns[1])
for (section_name, packages) in sections:
if len(packages) < 1:
continue
print(section_name)
self._display_packages_verbose(packages, name_dict, columns)
else:
for (section_name, packages) in sections:
if len(packages) < 1:
continue
print(section_name)
self._display_packages(packages)
def display_groups_in_environment(self, environment):
"""Output information about the packages in a given environment
:param environment: an Environment object to output information about
"""
def names(groups):
return sorted(group.name for group in groups)
print(_('Environment Group: %s') % environment.ui_name)
if self.conf.verbose:
print(_(' Environment-Id: %s') % ucd(environment.id))
if environment.ui_description:
description = ucd(environment.ui_description) or ""
print(_(' Description: %s') % description)
sections = (
(_(' Mandatory Groups:'), names(environment.mandatory_groups)),
(_(' Optional Groups:'), names(environment.optional_groups)))
for (section_name, packages) in sections:
if len(packages) < 1:
continue
print(section_name)
self._display_packages(packages)
def matchcallback(self, po, values, matchfor=None, verbose=None,
highlight=None):
"""Output search/provides type callback matches.
:param po: the package object that matched the search
:param values: the information associated with *po* that
matched the search
:param matchfor: a list of strings to be highlighted in the
output
:param verbose: whether to output extra verbose information
:param highlight: highlighting options for the highlighted matches
"""
def print_highlighted_key_item(key, item, printed_headline, can_overflow=False):
if not printed_headline:
print(_('Matched from:'))
item = ucd(item) or ""
if item == "":
return
if matchfor:
item = self._sub_highlight(item, highlight, matchfor, ignore_case=True)
if can_overflow:
print(self.fmtKeyValFill(key, item))
else:
print(key % item)
def print_file_provides(item, printed_match):
if not self.FILE_PROVIDE_RE.match(item):
return False
key = _("Filename : %s")
file_match = False
for filename in po.files:
if fnmatch.fnmatch(filename, item):
print_highlighted_key_item(
key, filename, file_match or printed_match, can_overflow=False)
file_match = True
return file_match
if self.conf.showdupesfromrepos:
msg = '%s : ' % po
else:
msg = '%s.%s : ' % (po.name, po.arch)
msg = self.fmtKeyValFill(msg, po.summary or "")
if matchfor:
if highlight is None:
highlight = self.conf.color_search_match
msg = self._sub_highlight(msg, highlight, matchfor, ignore_case=True)
print(msg)
if verbose is None:
verbose = self.conf.verbose
if not verbose:
return
print(_("Repo : %s") % po.ui_from_repo)
printed_match = False
name_match = False
for item in set(values):
if po.summary == item:
name_match = True
continue # Skip double name/summary printing
if po.description == item:
key = _("Description : ")
print_highlighted_key_item(key, item, printed_match, can_overflow=True)
printed_match = True
elif po.url == item:
key = _("URL : %s")
print_highlighted_key_item(key, item, printed_match, can_overflow=False)
printed_match = True
elif po.license == item:
key = _("License : %s")
print_highlighted_key_item(key, item, printed_match, can_overflow=False)
printed_match = True
elif print_file_provides(item, printed_match):
printed_match = True
else:
key = _("Provide : %s")
for provide in po.provides:
provide = str(provide)
if fnmatch.fnmatch(provide, item):
print_highlighted_key_item(key, provide, printed_match, can_overflow=False)
printed_match = True
else:
first_provide = provide.split()[0]
possible = set('=<>')
if any((char in possible) for char in item):
item_new = item.split()[0]
else:
item_new = item
if fnmatch.fnmatch(first_provide, item_new):
print_highlighted_key_item(
key, provide, printed_match, can_overflow=False)
printed_match = True
if not any([printed_match, name_match]):
for item in set(values):
key = _("Other : %s")
print_highlighted_key_item(key, item, printed_match, can_overflow=False)
print()
def matchcallback_verbose(self, po, values, matchfor=None):
"""Output search/provides type callback matches. This will
output more information than :func:`matchcallback`.
:param po: the package object that matched the search
:param values: the information associated with *po* that
matched the search
:param matchfor: a list of strings to be highlighted in the
output
"""
return self.matchcallback(po, values, matchfor, verbose=True)
def reportDownloadSize(self, packages, installonly=False):
"""Report the total download size for a set of packages
:param packages: a list of package objects
:param installonly: whether the transaction consists only of installations
"""
totsize = 0
locsize = 0
insize = 0
error = False
for pkg in packages:
# Just to be on the safe side, if for some reason getting
# the package size fails, log the error and don't report download
# size
try:
size = int(pkg._size)
totsize += size
try:
if pkg.verifyLocalPkg():
locsize += size
except Exception:
pass
if not installonly:
continue
try:
size = int(pkg.installsize)
except Exception:
pass
insize += size
except Exception:
error = True
msg = _('There was an error calculating total download size')
logger.error(msg)
break
if not error:
if locsize:
logger.info(_("Total size: %s"),
format_number(totsize))
if locsize != totsize:
logger.info(_("Total download size: %s"),
format_number(totsize - locsize))
if installonly:
logger.info(_("Installed size: %s"), format_number(insize))
def reportRemoveSize(self, packages):
"""Report the total size of packages being removed.
:param packages: a list of package objects
"""
totsize = 0
error = False
for pkg in packages:
# Just to be on the safe side, if for some reason getting
# the package size fails, log the error and don't report download
# size
try:
size = pkg._size
totsize += size
except Exception:
error = True
msg = _('There was an error calculating installed size')
logger.error(msg)
break
if not error:
logger.info(_("Freed space: %s"), format_number(totsize))
def list_group_transaction(self, comps, history, diff):
if not diff:
return None
out = []
rows = []
if diff.new_groups:
out.append(_('Marking packages as installed by the group:'))
for grp_id in diff.new_groups:
pkgs = list(diff.added_packages(grp_id))
group_object = comps._group_by_id(grp_id)
grp_name = group_object.ui_name if group_object else grp_id
rows.extend(_spread_in_columns(4, "@" + grp_name, pkgs))
if diff.removed_groups:
out.append(_('Marking packages as removed by the group:'))
for grp_id in diff.removed_groups:
pkgs = list(diff.removed_packages(grp_id))
grp_name = history.group.get(grp_id).ui_name
rows.extend(_spread_in_columns(4, "@" + grp_name, pkgs))
if rows:
col_data = self._col_widths(rows)
for row in rows:
out.append(self.fmtColumns(zip(row, col_data), ' '))
out[0:0] = self._banner(col_data, (_('Group'), _('Packages'), '', ''))
return '\n'.join(out)
def _skipped_packages(self, report_problems):
"""returns set of conflicting packages and set of packages with broken dependency that would
be additionally installed when --best and --allowerasing"""
if self.base._goal.actions & (hawkey.INSTALL | hawkey.UPGRADE | hawkey.UPGRADE_ALL):
best = True
else:
best = False
ng = deepcopy(self.base._goal)
params = {"allow_uninstall": self.base._allow_erasing,
"force_best": best,
"ignore_weak": True}
ret = ng.run(**params)
if not ret and report_problems:
msg = dnf.util._format_resolve_problems(ng.problem_rules())
logger.warning(msg)
problem_conflicts = set(ng.problem_conflicts(available=True))
problem_dependency = set(ng.problem_broken_dependency(available=True)) - problem_conflicts
return problem_conflicts, problem_dependency
def list_transaction(self, transaction):
"""Return a string representation of the transaction in an
easy-to-read format.
"""
forward_actions = hawkey.UPGRADE | hawkey.UPGRADE_ALL | hawkey.DISTUPGRADE | \
hawkey.DISTUPGRADE_ALL | hawkey.DOWNGRADE | hawkey.INSTALL
skipped_conflicts = set()
skipped_broken = set()
if transaction is None:
# set empty transaction list instead of returning None
# in order to display module changes when RPM transaction is empty
transaction = []
list_bunch = _make_lists(transaction, self.base._goal)
pkglist_lines = []
data = {'n' : {}, 'v' : {}, 'r' : {}}
a_wid = 0 # Arch can't get "that big" ... so always use the max.
def _add_line(lines, data, a_wid, po, obsoletes=[]):
(n, a, e, v, r) = po.pkgtup
evr = po.evr
repoid = po._from_repo
size = format_number(po._size)
if a is None: # gpgkeys are weird
a = 'noarch'
# none, partial, full?
if po._from_system:
hi = self.conf.color_update_installed
elif po._from_cmdline:
hi = self.conf.color_update_local
else:
hi = self.conf.color_update_remote
lines.append((n, a, evr, repoid, size, obsoletes, hi))
# Create a dict of field_length => number of packages, for
# each field.
for (d, v) in (("n", len(n)), ("v", len(evr)), ("r", len(repoid))):
data[d].setdefault(v, 0)
data[d][v] += 1
a_wid = max(a_wid, len(a))
return a_wid
ins_group_msg = _('Installing group/module packages') if dnf.base.WITH_MODULES \
else _('Installing group packages')
for (action, pkglist) in [
# TRANSLATORS: This is for a list of packages to be installed.
(C_('summary', 'Installing'), list_bunch.installed),
# TRANSLATORS: This is for a list of packages to be upgraded.
(C_('summary', 'Upgrading'), list_bunch.upgraded),
# TRANSLATORS: This is for a list of packages to be reinstalled.
(C_('summary', 'Reinstalling'), list_bunch.reinstalled),
(ins_group_msg, list_bunch.installed_group),
(_('Installing dependencies'), list_bunch.installed_dep),
(_('Installing weak dependencies'), list_bunch.installed_weak),
# TRANSLATORS: This is for a list of packages to be removed.
(_('Removing'), list_bunch.erased),
(_('Removing dependent packages'), list_bunch.erased_dep),
(_('Removing unused dependencies'), list_bunch.erased_clean),
# TRANSLATORS: This is for a list of packages to be downgraded.
(C_('summary', 'Downgrading'), list_bunch.downgraded)]:
lines = []
# build a reverse mapping to 'replaced_by'
# this is required to achieve reasonable speed
replaces = {}
for tsi in transaction:
if tsi.action != libdnf.transaction.TransactionItemAction_OBSOLETED:
continue
for i in tsi._item.getReplacedBy():
replaces.setdefault(i, set()).add(tsi)
for tsi in pkglist:
if tsi.action not in dnf.transaction.FORWARD_ACTIONS + [libdnf.transaction.TransactionItemAction_REMOVE]:
continue
# get TransactionItems obsoleted by tsi
obsoleted = sorted(replaces.get(tsi._item, []))
a_wid = _add_line(lines, data, a_wid, tsi.pkg, obsoleted)
pkglist_lines.append((action, lines))
installedProfiles = sorted(dict(self.base._moduleContainer.getInstalledProfiles()).items())
if installedProfiles:
action = _("Installing module profiles")
lines = []
for name, profiles in installedProfiles:
for profile in list(profiles):
lines.append(("%s/%s" % (name, profile), "", "", "", "", "", ""))
pkglist_lines.append((action, lines))
removedProfiles = sorted(dict(self.base._moduleContainer.getRemovedProfiles()).items())
if removedProfiles:
action = _("Disabling module profiles")
lines = []
for name, profiles in removedProfiles:
for profile in list(profiles):
lines.append(("%s/%s" % (name, profile), "", "", "", "", "", ""))
pkglist_lines.append((action, lines))
enabledStreams = sorted(dict(self.base._moduleContainer.getEnabledStreams()).items())
if enabledStreams:
action = _("Enabling module streams")
lines = []
for name, stream in enabledStreams:
lines.append((name, "", stream, "", "", "", ""))
pkglist_lines.append((action, lines))
switchedStreams = sorted(dict(self.base._moduleContainer.getSwitchedStreams()).items())
if switchedStreams:
action = _("Switching module streams")
lines = []
for name, stream in switchedStreams:
lines.append((name, "", "%s -> %s" % (stream[0], stream[1]), "", "", "", ""))
pkglist_lines.append((action, lines))
disabledModules = sorted(list(self.base._moduleContainer.getDisabledModules()))
if disabledModules:
action = _("Disabling modules")
lines = []
for name in disabledModules:
lines.append((name, "", "", "", "", "", ""))
pkglist_lines.append((action, lines))
resetModules = sorted(list(self.base._moduleContainer.getResetModules()))
if resetModules:
action = _("Resetting modules")
lines = []
for name in resetModules:
lines.append((name, "", "", "", "", "", ""))
pkglist_lines.append((action, lines))
if self.base._history:
install_env_group = self.base._history.env._installed
if install_env_group:
action = _("Installing Environment Groups")
lines = []
for group in install_env_group.values():
lines.append((group.getName(), "", "", "", "", "", ""))
pkglist_lines.append((action, lines))
upgrade_env_group = self.base._history.env._upgraded
if upgrade_env_group:
action = _("Upgrading Environment Groups")
lines = []
for group in upgrade_env_group.values():
lines.append((group.getName(), "", "", "", "", "", ""))
pkglist_lines.append((action, lines))
remove_env_group = self.base._history.env._removed
if remove_env_group:
action = _("Removing Environment Groups")
lines = []
for group in remove_env_group.values():
lines.append((group.getName(), "", "", "", "", "", ""))
pkglist_lines.append((action, lines))
install_group = self.base._history.group._installed
if install_group:
action = _("Installing Groups")
lines = []
for group in install_group.values():
lines.append((group.getName(), "", "", "", "", "", ""))
pkglist_lines.append((action, lines))
upgrade_group = self.base._history.group._upgraded
if upgrade_group:
action = _("Upgrading Groups")
lines = []
for group in upgrade_group.values():
lines.append((group.getName(), "", "", "", "", "", ""))
pkglist_lines.append((action, lines))
remove_group = self.base._history.group._removed
if remove_group:
action = _("Removing Groups")
lines = []
for group in remove_group.values():
lines.append((group.getName(), "", "", "", "", "", ""))
pkglist_lines.append((action, lines))
# show skipped conflicting packages
if not self.conf.best and self.base._goal.actions & forward_actions:
lines = []
skipped_conflicts, skipped_broken = self._skipped_packages(report_problems=True)
for pkg in sorted(skipped_conflicts):
a_wid = _add_line(lines, data, a_wid, pkg, [])
recommendations = ["--best"]
if not self.base._allow_erasing:
recommendations.append("--allowerasing")
skip_str = _("Skipping packages with conflicts:\n"
"(add '%s' to command line "
"to force their upgrade)") % " ".join(recommendations)
pkglist_lines.append((skip_str, lines))
lines = []
for pkg in sorted(skipped_broken):
a_wid = _add_line(lines, data, a_wid, pkg, [])
skip_str = _("Skipping packages with broken dependencies%s")
if self.base.conf.upgrade_group_objects_upgrade:
skip_str = skip_str % ""
else:
skip_str = skip_str % _(" or part of a group")
pkglist_lines.append((skip_str, lines))
if not data['n'] and not self.base._moduleContainer.isChanged() and not \
(self.base._history and (self.base._history.group or self.base._history.env)):
return u''
else:
data = [data['n'], {}, data['v'], data['r'], {}]
columns = [1, a_wid, 1, 1, 5]
columns = self.calcColumns(data, indent=" ", columns=columns,
remainder_column=2)
(n_wid, a_wid, v_wid, r_wid, s_wid) = columns
# Do not use 'Package' without context. Using context resolves
# RhBug 1302935 as a side effect.
msg_package = select_short_long(n_wid,
# Translators: This is the short version of 'Package'. You can
# use the full (unabbreviated) term 'Package' if you think that
# the translation to your language is not too long and will
# always fit to limited space.
C_('short', 'Package'),
# Translators: This is the full (unabbreviated) term 'Package'.
C_('long', 'Package'))
msg_arch = select_short_long(a_wid,
# Translators: This is abbreviated 'Architecture', used when
# we have not enough space to display the full word.
C_('short', 'Arch'),
# Translators: This is the full word 'Architecture', used when
# we have enough space.
C_('long', 'Architecture'))
msg_version = select_short_long(v_wid,
# Translators: This is the short version of 'Version'. You can
# use the full (unabbreviated) term 'Version' if you think that
# the translation to your language is not too long and will
# always fit to limited space.
C_('short', 'Version'),
# Translators: This is the full (unabbreviated) term 'Version'.
C_('long', 'Version'))
msg_repository = select_short_long(r_wid,
# Translators: This is abbreviated 'Repository', used when
# we have not enough space to display the full word.
C_('short', 'Repo'),
# Translators: This is the full word 'Repository', used when
# we have enough space.
C_('long', 'Repository'))
msg_size = select_short_long(s_wid,
# Translators: This is the short version of 'Size'. It should
# not be longer than 5 characters. If the term 'Size' in your
# language is not longer than 5 characters then you can use it
# unabbreviated.
C_('short', 'Size'),
# Translators: This is the full (unabbreviated) term 'Size'.
C_('long', 'Size'))
out = [u"%s\n%s\n%s\n" % ('=' * self.term.columns,
self.fmtColumns(((msg_package, -n_wid),
(msg_arch, -a_wid),
(msg_version, -v_wid),
(msg_repository, -r_wid),
(msg_size, s_wid)), u" "),
'=' * self.term.columns)]
for (action, lines) in pkglist_lines:
if lines:
totalmsg = u"%s:\n" % action
for (n, a, evr, repoid, size, obsoletes, hi) in lines:
columns = ((n, -n_wid, hi), (a, -a_wid),
(evr, -v_wid), (repoid, -r_wid), (size, s_wid))
msg = self.fmtColumns(columns, u" ", u"\n")
hibeg, hiend = self._highlight(self.conf.color_update_installed)
for obspo in sorted(obsoletes):
appended = ' ' + _('replacing') + ' %s%s%s.%s %s\n'
appended %= (hibeg, obspo.name, hiend, obspo.arch, obspo.evr)
msg += appended
totalmsg = totalmsg + msg
if lines:
out.append(totalmsg)
out.append(_("""
Transaction Summary
%s
""") % ('=' * self.term.columns))
summary_data = (
(_('Install'), len(list_bunch.installed) +
len(list_bunch.installed_group) +
len(list_bunch.installed_weak) +
len(list_bunch.installed_dep), 0),
(_('Upgrade'), len(list_bunch.upgraded), 0),
(_('Remove'), len(list_bunch.erased) + len(list_bunch.erased_dep) +
len(list_bunch.erased_clean), 0),
(_('Downgrade'), len(list_bunch.downgraded), 0),
(_('Skip'), len(skipped_conflicts) + len(skipped_broken), 0))
max_msg_action = 0
max_msg_count = 0
max_msg_pkgs = 0
max_msg_depcount = 0
for action, count, depcount in summary_data:
if not count and not depcount:
continue
msg_pkgs = P_('Package', 'Packages', count)
len_msg_action = exact_width(action)
len_msg_count = exact_width(unicode(count))
len_msg_pkgs = exact_width(msg_pkgs)
if depcount:
len_msg_depcount = exact_width(unicode(depcount))
else:
len_msg_depcount = 0
max_msg_action = max(len_msg_action, max_msg_action)
max_msg_count = max(len_msg_count, max_msg_count)
max_msg_pkgs = max(len_msg_pkgs, max_msg_pkgs)
max_msg_depcount = max(len_msg_depcount, max_msg_depcount)
for action, count, depcount in summary_data:
msg_pkgs = P_('Package', 'Packages', count)
if depcount:
msg_deppkgs = P_('Dependent package', 'Dependent packages',
depcount)
action_msg = fill_exact_width(action, max_msg_action)
if count:
msg = '%s %*d %s (+%*d %s)\n'
out.append(msg % (action_msg,
max_msg_count, count,
"%-*s" % (max_msg_pkgs, msg_pkgs),
max_msg_depcount, depcount, msg_deppkgs))
else:
msg = '%s %s ( %*d %s)\n'
out.append(msg % (action_msg,
(max_msg_count + max_msg_pkgs) * ' ',
max_msg_depcount, depcount, msg_deppkgs))
elif count:
msg = '%s %*d %s\n'
out.append(msg % (fill_exact_width(action, max_msg_action),
max_msg_count, count, msg_pkgs))
return ''.join(out)
def post_transaction_output(self, transaction):
"""Returns a human-readable summary of the results of the
transaction.
:return: a string containing a human-readable summary of the
results of the transaction
"""
# Works a bit like calcColumns, but we never overflow a column we just
# have a dynamic number of columns.
def _fits_in_cols(msgs, num):
""" Work out how many columns we can use to display stuff, in
the post trans output. """
if len(msgs) < num:
return []
left = self.term.columns - ((num - 1) + 2)
if left <= 0:
return []
col_lens = [0] * num
col = 0
for msg in msgs:
if len(msg) > col_lens[col]:
diff = (len(msg) - col_lens[col])
if left <= diff:
return []
left -= diff
col_lens[col] = len(msg)
col += 1
col %= len(col_lens)
for col in range(len(col_lens)):
col_lens[col] += left // num
col_lens[col] *= -1
return col_lens
out = ''
list_bunch = _make_lists(transaction, self.base._goal)
skipped_conflicts, skipped_broken = self._skipped_packages(report_problems=False)
skipped = skipped_conflicts.union(skipped_broken)
skipped = sorted(set([str(pkg) for pkg in skipped]))
for (action, tsis) in [(_('Upgraded'), list_bunch.upgraded),
(_('Downgraded'), list_bunch.downgraded),
(_('Installed'), list_bunch.installed +
list_bunch.installed_group +
list_bunch.installed_weak +
list_bunch.installed_dep),
(_('Reinstalled'), list_bunch.reinstalled),
(_('Skipped'), skipped),
(_('Removed'), list_bunch.erased +
list_bunch.erased_dep +
list_bunch.erased_clean),
(_('Failed'), list_bunch.failed)]:
if not tsis:
continue
msgs = []
out += '\n%s:\n' % action
for tsi in tsis:
msgs.append(str(tsi))
for num in (8, 7, 6, 5, 4, 3, 2):
cols = _fits_in_cols(msgs, num)
if cols:
break
if not cols:
cols = [-(self.term.columns - 2)]
while msgs:
current_msgs = msgs[:len(cols)]
out += ' '
out += self.fmtColumns(zip(current_msgs, cols), end=u'\n')
msgs = msgs[len(cols):]
return out
def setup_progress_callbacks(self):
"""Set up the progress callbacks and various
output bars based on debug level.
"""
progressbar = None
if self.conf.debuglevel >= 2:
progressbar = dnf.cli.progress.MultiFileProgressMeter(fo=sys.stdout)
self.progress = dnf.cli.progress.MultiFileProgressMeter(fo=sys.stdout)
# setup our depsolve progress callback
return (progressbar, DepSolveProgressCallBack())
def download_callback_total_cb(self, remote_size, download_start_timestamp):
"""Outputs summary information about the download process.
:param remote_size: the total amount of information that was
downloaded, in bytes
:param download_start_timestamp: the time when the download
process started, in seconds since the epoch
"""
if remote_size <= 0:
return
width = self.term.columns
logger.info("-" * width)
dl_time = max(0.01, time.time() - download_start_timestamp)
msg = ' %5sB/s | %5sB %9s ' % (
format_number(remote_size // dl_time),
format_number(remote_size),
format_time(dl_time))
msg = fill_exact_width(_("Total"), width - len(msg)) + msg
logger.info(msg)
def _history_uiactions(self, hpkgs):
actions = set()
actions_short = set()
count = 0
for pkg in hpkgs:
if pkg.action in (libdnf.transaction.TransactionItemAction_UPGRADED, libdnf.transaction.TransactionItemAction_DOWNGRADED):
# skip states we don't want to display in user input
continue
actions.add(pkg.action_name)
actions_short.add(pkg.action_short)
count += 1
if len(actions) > 1:
return count, ", ".join(sorted(actions_short))
# So empty transactions work, although that "shouldn't" really happen
return count, "".join(list(actions))
def _pwd_ui_username(self, uid, limit=None):
if isinstance(uid, list):
return [self._pwd_ui_username(u, limit) for u in uid]
# loginuid is set to -1 (0xFFFF_FFFF) on init, in newer kernels.
# loginuid is set to INT_MAX (0x7FFF_FFFF) on init, in older kernels.
if uid is None or uid in (0xFFFFFFFF, 0x7FFFFFFF):
loginid = _("<unset>")
name = _("System") + " " + loginid
if limit is not None and len(name) > limit:
name = loginid
return ucd(name)
def _safe_split_0(text, *args):
""" Split gives us a [0] for everything _but_ '', this function
returns '' in that case. """
ret = text.split(*args)
if not ret:
return ''
return ret[0]
try:
user = pwd.getpwuid(int(uid))
fullname = _safe_split_0(ucd(user.pw_gecos), ';', 2)
user_name = ucd(user.pw_name)
name = "%s <%s>" % (fullname, user_name)
if limit is not None and len(name) > limit:
name = "%s ... <%s>" % (_safe_split_0(fullname), user_name)
if len(name) > limit:
name = "<%s>" % user_name
return name
except KeyError:
return ucd(uid)
@staticmethod
def _historyRangeRTIDs(old, tid):
''' Convert a user "TID" string of 2..4 into: (2, 4). '''
def str2int(x):
try:
if x == '--last' or x.startswith('--last-'):
tid = old.tid
if x.startswith('--last-'):
off = int(x[len('--last-'):])
if off <= 0:
int("z")
tid -= off
return tid
return int(x)
except ValueError:
return None
if '..' not in tid:
return None
btid, etid = tid.split('..', 2)
btid = str2int(btid)
if btid > old.tid:
return None
elif btid <= 0:
return None
etid = str2int(etid)
if etid > old.tid:
return None
if btid is None or etid is None:
return None
# Have a range ... do a "merged" transaction.
if btid > etid:
btid, etid = etid, btid
return (btid, etid)
def _historyRangeTIDs(self, rtids):
''' Convert a list of ranged tid typles into all the tids needed, Eg.
[(2,4), (6,8)] == [2, 3, 4, 6, 7, 8]. '''
tids = set()
last_end = -1 # This just makes displaying it easier...
for mtid in sorted(rtids):
if mtid[0] < last_end:
msg = _('Skipping merged transaction %d to %d, as it overlaps')
logger.warning(msg, mtid[0], mtid[1])
continue # Don't do overlapping
last_end = mtid[1]
for num in range(mtid[0], mtid[1] + 1):
tids.add(num)
return tids
def _history_list_transactions(self, extcmds):
old = self.history.last()
if old is None:
logger.critical(_('No transactions'))
return None
tids = set()
pats = []
usertids = extcmds
for tid in usertids:
try:
int(tid)
tids.add(tid)
except ValueError:
rtid = self._historyRangeRTIDs(old, tid)
if rtid:
tids.update(self._historyRangeTIDs([rtid]))
continue
pats.append(tid)
if pats:
tids.update(self.history.search(pats))
if not tids and usertids:
logger.critical(_('Bad transaction IDs, or package(s), given'))
return None
return tids
def historyListCmd(self, extcmds):
"""Output a list of information about the history of yum
transactions.
:param extcmds: list of extra command line arguments
:return: (exit_code, [errors])
exit_code is::
0 = we're done, exit
1 = we've errored, exit with error string
"""
tids = self._history_list_transactions(extcmds)
if tids is not None:
old_tids = self.history.old(tids)
if self.conf.history_list_view == 'users':
uids = [1, 2]
elif self.conf.history_list_view == 'commands':
uids = [1]
else:
assert self.conf.history_list_view == 'single-user-commands'
uids = set()
done = 0
blanks = 0
for old in old_tids:
done += 1
if old.cmdline is None:
blanks += 1
uids.add(old.loginuid)
fmt = "%s | %s | %s | %s | %s"
if len(uids) == 1:
name = _("Command line")
else:
# TRANSLATORS: user names who executed transaction in history command output
name = _("User name")
print(fmt % (fill_exact_width(_("ID"), 6, 6),
fill_exact_width(name, 24, 24),
fill_exact_width(_("Date and time"), 16, 16),
fill_exact_width(_("Action(s)"), 14, 14),
fill_exact_width(_("Altered"), 7, 7)))
print("-" * 79)
fmt = "%6u | %s | %-16.16s | %s | %4u"
for old in old_tids:
if len(uids) == 1:
name = old.cmdline or ''
else:
name = self._pwd_ui_username(old.loginuid, 24)
name = ucd(name)
tm = time.strftime("%Y-%m-%d %H:%M",
time.localtime(old.beg_timestamp))
num, uiacts = self._history_uiactions(old.data())
name = fill_exact_width(name, 24, 24)
uiacts = fill_exact_width(uiacts, 14, 14)
rmark = lmark = ' '
if old.return_code is None:
rmark = lmark = '*'
elif old.return_code:
rmark = lmark = '#'
# We don't check .errors, because return_code will be non-0
elif old.is_output:
rmark = lmark = 'E'
if old.altered_lt_rpmdb:
rmark = '<'
if old.altered_gt_rpmdb:
lmark = '>'
print(fmt % (old.tid, name, tm, uiacts, num), "%s%s" % (lmark, rmark))
def historyInfoCmd(self, extcmds, pats=[], mtids=set()):
"""Output information about a transaction in history
:param extcmds: list of extra command line arguments
:return: (exit_code, [errors])
exit_code is::
0 = we're done, exit
1 = we've errored, exit with error string
"""
tids = set(extcmds)
old = self.history.last()
if old is None:
logger.critical(_('No transactions'))
return 1, [_('Failed history info')]
lasttid = old.tid
lastdbv = old.end_rpmdb_version
transactions = []
if not tids and len(extcmds) < 2:
old = self.history.last(complete_transactions_only=False)
if old is not None:
tids.add(old.tid)
transactions.append(old)
else:
transactions = self.history.old(tids)
if not tids:
logger.critical(_('No transaction ID, or package, given'))
return 1, [_('Failed history info')]
bmtid, emtid = -1, -1
mobj = None
done = False
if mtids:
mtids = sorted(mtids)
bmtid, emtid = mtids.pop()
for trans in transactions:
if lastdbv is not None and trans.tid == lasttid:
# If this is the last transaction, is good and it doesn't
# match the current rpmdb ... then mark it as bad.
rpmdbv = self.sack._rpmdb_version()
trans.compare_rpmdbv(str(rpmdbv))
lastdbv = None
merged = False
if trans.tid >= bmtid and trans.tid <= emtid:
if mobj is None:
mobj = MergedTransactionWrapper(trans)
else:
mobj.merge(trans)
merged = True
elif mobj is not None:
if done:
print("-" * 79)
done = True
self._historyInfoCmd(mobj)
mobj = None
if mtids:
bmtid, emtid = mtids.pop()
if trans.tid >= bmtid and trans.tid <= emtid:
mobj = trans
merged = True
if not merged:
if done:
print("-" * 79)
done = True
self._historyInfoCmd(trans, pats)
if mobj is not None:
if done:
print("-" * 79)
self._historyInfoCmd(mobj)
def _historyInfoCmd(self, old, pats=[]):
loginuid = old.loginuid
if isinstance(loginuid, int):
loginuid = [loginuid]
name = [self._pwd_ui_username(uid) for uid in loginuid]
_pkg_states_installed = {'i' : _('Installed'), 'e' : _('Erased'),
'o' : _('Upgraded'), 'n' : _('Downgraded')}
_pkg_states_available = {'i' : _('Installed'), 'e' : _('Not installed'),
'o' : _('Older'), 'n' : _('Newer')}
maxlen = max([len(x) for x in (list(_pkg_states_installed.values()) +
list(_pkg_states_available.values()))])
_pkg_states_installed['maxlen'] = maxlen
_pkg_states_available['maxlen'] = maxlen
def _simple_pkg(pkg, prefix_len, was_installed=False, highlight=False,
pkg_max_len=0, show_repo=True):
prefix = " " * prefix_len
if was_installed:
_pkg_states = _pkg_states_installed
else:
_pkg_states = _pkg_states_available
state = _pkg_states['i']
# get installed packages with name = pkg.name
ipkgs = self.sack.query().installed().filterm(name=pkg.name).run()
if not ipkgs:
state = _pkg_states['e']
else:
# get latest installed package from software database
inst_pkg = self.history.package(ipkgs[0])
if inst_pkg:
res = pkg.compare(inst_pkg)
# res is:
# 0 if inst_pkg == pkg
# > 0 when inst_pkg > pkg
# < 0 when inst_pkg < pkg
if res == 0:
pass # installed
elif res > 0:
state = _pkg_states['o'] # updated
else:
state = _pkg_states['n'] # downgraded
if highlight:
(hibeg, hiend) = self._highlight('bold')
else:
(hibeg, hiend) = self._highlight('normal')
state = fill_exact_width(state, _pkg_states['maxlen'])
ui_repo = ''
if show_repo:
ui_repo = pkg.ui_from_repo()
print("%s%s%s%s %-*s %s" % (prefix, hibeg, state, hiend,
pkg_max_len, str(pkg), ui_repo))
tids = old.tids()
if len(tids) > 1:
print(_("Transaction ID :"), "%u..%u" % (tids[0], tids[-1]))
else:
print(_("Transaction ID :"), tids[0])
begt = float(old.beg_timestamp)
begtm = time.strftime("%c", time.localtime(begt))
print(_("Begin time :"), begtm)
if old.beg_rpmdb_version is not None:
if old.altered_lt_rpmdb:
print(_("Begin rpmdb :"), old.beg_rpmdb_version, "**")
else:
print(_("Begin rpmdb :"), old.beg_rpmdb_version)
if old.end_timestamp is not None:
endt = old.end_timestamp
endtm = time.strftime("%c", time.localtime(endt))
diff = endt - begt
if diff < 5 * 60:
diff = _("(%u seconds)") % diff
elif diff < 5 * 60 * 60:
diff = _("(%u minutes)") % (diff // 60)
elif diff < 5 * 60 * 60 * 24:
diff = _("(%u hours)") % (diff // (60 * 60))
else:
diff = _("(%u days)") % (diff // (60 * 60 * 24))
print(_("End time :"), endtm, diff)
if old.end_rpmdb_version is not None:
if old.altered_gt_rpmdb:
print(_("End rpmdb :"), old.end_rpmdb_version, "**")
else:
print(_("End rpmdb :"), old.end_rpmdb_version)
if isinstance(name, (list, tuple)):
seen = set()
for i in name:
if i in seen:
continue
seen.add(i)
print(_("User :"), i)
else:
print(_("User :"), name)
if isinstance(old.return_code, (list, tuple)):
codes = old.return_code
if codes[0] is None:
print(_("Return-Code :"), "**", _("Aborted"), "**")
codes = codes[1:]
elif not all(codes):
print(_("Return-Code :"), _("Success"))
elif codes:
print(_("Return-Code :"), _("Failures:"), ", ".join([str(i) for i in codes]))
elif old.return_code is None:
print(_("Return-Code :"), "**", _("Aborted"), "**")
elif old.return_code:
print(_("Return-Code :"), _("Failure:"), old.return_code)
else:
print(_("Return-Code :"), _("Success"))
if isinstance(old.releasever, (list, tuple)):
seen = set()
for i in old.releasever:
if i in seen:
continue
seen.add(i)
print(_("Releasever :"), i)
else:
print(_("Releasever :"), old.releasever)
if old.cmdline is not None:
if isinstance(old.cmdline, (list, tuple)):
for cmdline in old.cmdline:
print(_("Command Line :"), cmdline)
else:
print(_("Command Line :"), old.cmdline)
# TODO:
# comment = self.history.addon_data.read(old.tid, item='transaction-comment')
comment = ""
if comment:
print(_("Comment :"), comment)
perf_with = old.performed_with()
if perf_with:
print(_("Transaction performed with:"))
max_len = 0
for with_pkg in perf_with:
str_len = len(str(with_pkg))
if str_len > max_len:
max_len = str_len
for with_pkg in perf_with:
_simple_pkg(with_pkg, 4, was_installed=True, pkg_max_len=max_len)
print(_("Packages Altered:"))
self.historyInfoCmdPkgsAltered(old, pats)
t_out = old.output()
if t_out:
print(_("Scriptlet output:"))
num = 0
for line in t_out:
num += 1
print("%4d" % num, line)
t_err = old.error()
if t_err:
print(_("Errors:"))
num = 0
for line in t_err:
num += 1
print("%4d" % num, line)
# TODO: remove
_history_state2uistate = {'True-Install' : _('Install'),
'Install' : _('Install'),
'Dep-Install' : _('Dep-Install'),
'Obsoleted' : _('Obsoleted'),
'Obsoleting' : _('Obsoleting'),
'Erase' : _('Erase'),
'Reinstall' : _('Reinstall'),
'Downgrade' : _('Downgrade'),
'Downgraded' : _('Downgraded'),
'Update' : _('Upgrade'),
'Updated' : _('Upgraded'),
}
def historyInfoCmdPkgsAltered(self, old, pats=[]):
"""Print information about how packages are altered in a transaction.
:param old: the :class:`DnfSwdbTrans` to
print information about
:param pats: a list of patterns. Packages that match a patten
in *pats* will be highlighted in the output
"""
last = None
# Note that these don't use _simple_pkg() because we are showing what
# happened to them in the transaction ... not the difference between the
# version in the transaction and now.
all_uistates = self._history_state2uistate
maxlen = 0
pkg_max_len = 0
packages = old.packages()
for pkg in packages:
uistate = all_uistates.get(pkg.action_name, pkg.action_name)
if maxlen < len(uistate):
maxlen = len(uistate)
pkg_len = len(str(pkg))
if pkg_max_len < pkg_len:
pkg_max_len = pkg_len
for pkg in packages:
prefix = " " * 4
if pkg.state != libdnf.transaction.TransactionItemState_DONE:
prefix = " ** "
highlight = 'normal'
if pats:
if any([pkg.match(pat) for pat in pats]):
highlight = 'bold'
(hibeg, hiend) = self._highlight(highlight)
cn = str(pkg)
uistate = all_uistates.get(pkg.action_name, pkg.action_name)
uistate = fill_exact_width(ucd(uistate), maxlen)
if (last is not None and last.action == libdnf.transaction.TransactionItemAction_UPGRADED and
last.name == pkg.name and pkg.action == libdnf.transaction.TransactionItemAction_UPGRADE):
ln = len(pkg.name) + 1
cn = (" " * ln) + cn[ln:]
elif (last is not None and last.action == libdnf.transaction.TransactionItemAction_DOWNGRADE and
last.name == pkg.name and pkg.action == libdnf.transaction.TransactionItemAction_DOWNGRADED):
ln = len(pkg.name) + 1
cn = (" " * ln) + cn[ln:]
else:
last = None
if pkg.action in (libdnf.transaction.TransactionItemAction_UPGRADED, libdnf.transaction.TransactionItemAction_DOWNGRADE):
last = pkg
print("%s%s%s%s %-*s %s" % (prefix, hibeg, uistate, hiend,
pkg_max_len, str(pkg),
pkg.ui_from_repo()))
def historyPackageListCmd(self, extcmds):
"""Print a list of information about transactions from history
that involve the given package or packages.
:param extcmds: list of extra command line arguments
"""
tids = self.history.search(extcmds)
limit = None
if extcmds and not tids:
logger.critical(_('Bad transaction IDs, or package(s), given'))
return 1, ['Failed history packages-list']
if not tids:
limit = 20
all_uistates = self._history_state2uistate
fmt = "%s | %s | %s"
# REALLY Needs to use columns!
print(fmt % (fill_exact_width(_("ID"), 6, 6),
fill_exact_width(_("Action(s)"), 14, 14),
# This is also a hack to resolve RhBug 1302935 correctly.
fill_exact_width(C_("long", "Package"), 53, 53)))
print("-" * 79)
fmt = "%6u | %s | %-50s"
num = 0
for old in self.history.old(tids, limit=limit):
packages = old.packages()
if limit and num and (num + len(packages)) > limit:
break
last = None
# Copy and paste from list ... uh.
rmark = lmark = ' '
if old.return_code is None:
rmark = lmark = '*'
elif old.return_code:
rmark = lmark = '#'
# We don't check .errors, because return_code will be non-0
elif old.output:
rmark = lmark = 'E'
elif old.rpmdb_problems:
rmark = lmark = 'P'
elif old.trans_skip:
rmark = lmark = 's'
if old.altered_lt_rpmdb:
rmark = '<'
if old.altered_gt_rpmdb:
lmark = '>'
# Find a pkg to go with each cmd...
for pkg in packages:
if limit is None:
if not any([pkg.match(pat) for pat in extcmds]):
continue
uistate = all_uistates.get(pkg.action_name, pkg.action_name)
uistate = fill_exact_width(uistate, 14)
# To chop the name off we need nevra strings, str(pkg) gives
# envra so we have to do it by hand ... *sigh*.
cn = pkg.ui_nevra
if (last is not None and last.action == libdnf.transaction.TransactionItemAction_UPGRADED and
last.name == pkg.name and pkg.action == libdnf.transaction.TransactionItemAction_UPGRADE):
ln = len(pkg.name) + 1
cn = (" " * ln) + cn[ln:]
elif (last is not None and
last.action == libdnf.transaction.TransactionItemAction_DOWNGRADE and last.name == pkg.name and
pkg.action == libdnf.transaction.TransactionItemAction_DOWNGRADED):
ln = len(pkg.name) + 1
cn = (" " * ln) + cn[ln:]
else:
last = None
if pkg.action in (libdnf.transaction.TransactionItemAction_UPGRADED, libdnf.transaction.TransactionItemAction_DOWNGRADE):
last = pkg
num += 1
print(fmt % (old.tid, uistate, cn), "%s%s" % (lmark, rmark))
class DepSolveProgressCallBack(dnf.callback.Depsolve):
"""Provides text output callback functions for Dependency Solver callback."""
def __init__(self):
"""requires yum-cli log and errorlog functions as arguments"""
self.loops = 0
def pkg_added(self, pkg, mode):
"""Print information about a package being added to the
transaction set.
:param pkgtup: tuple containing the package name, arch,
version, and repository
:param mode: a short string indicating why the package is
being added to the transaction set.
Valid current values for *mode* are::
i = the package will be installed
u = the package will be an update
e = the package will be erased
r = the package will be reinstalled
d = the package will be a downgrade
o = the package will be obsoleting another package
ud = the package will be updated
od = the package will be obsoleted
"""
output = None
if mode == 'i':
output = _('---> Package %s.%s %s will be installed')
elif mode == 'u':
output = _('---> Package %s.%s %s will be an upgrade')
elif mode == 'e':
output = _('---> Package %s.%s %s will be erased')
elif mode == 'r':
output = _('---> Package %s.%s %s will be reinstalled')
elif mode == 'd':
output = _('---> Package %s.%s %s will be a downgrade')
elif mode == 'o':
output = _('---> Package %s.%s %s will be obsoleting')
elif mode == 'ud':
output = _('---> Package %s.%s %s will be upgraded')
elif mode == 'od':
output = _('---> Package %s.%s %s will be obsoleted')
if output:
logger.debug(output, pkg.name, pkg.arch, pkg.evr)
def start(self):
"""Perform setup at the beginning of the dependency solving
process.
"""
logger.debug(_('--> Starting dependency resolution'))
self.loops += 1
def end(self):
"""Output a message stating that dependency resolution has finished."""
logger.debug(_('--> Finished dependency resolution'))
class CliKeyImport(dnf.callback.KeyImport):
def __init__(self, base, output):
self.base = base
self.output = output
def _confirm(self, id, userid, fingerprint, url, timestamp):
def short_id(id):
rj = '0' if dnf.pycomp.PY3 else b'0'
return id[-8:].rjust(8, rj)
msg = (_('Importing GPG key 0x%s:\n'
' Userid : "%s"\n'
' Fingerprint: %s\n'
' From : %s') %
(short_id(id), userid,
dnf.crypto._printable_fingerprint(fingerprint),
url.replace("file://", "")))
logger.critical("%s", msg)
if self.base.conf.assumeyes:
return True
if self.base.conf.assumeno:
return False
return self.output.userconfirm()
class CliTransactionDisplay(LoggingTransactionDisplay):
"""A YUM specific callback class for RPM operations."""
width = property(lambda self: dnf.cli.term._term_width())
def __init__(self):
super(CliTransactionDisplay, self).__init__()
self.lastmsg = ""
self.lastpackage = None # name of last package we looked at
self.output = True
# for a progress bar
self.mark = "="
self.marks = 22
def progress(self, package, action, ti_done, ti_total, ts_done, ts_total):
"""Output information about an rpm operation. This may
include a text progress bar.
:param package: the package involved in the event
:param action: the type of action that is taking place. Valid
values are given by
:func:`rpmtrans.LoggingTransactionDisplay.action.keys()`
:param ti_done: a number representing the amount of work
already done in the current transaction
:param ti_total: a number representing the total amount of work
to be done in the current transaction
:param ts_done: the number of the current transaction in
transaction set
:param ts_total: the total number of transactions in the
transaction set
"""
action_str = dnf.transaction.ACTIONS.get(action)
if action_str is None:
return
wid1 = self._max_action_width()
pkgname = ucd(package)
self.lastpackage = package
if ti_total == 0:
percent = 0
else:
percent = (ti_done*long(100))//ti_total
self._out_progress(ti_done, ti_total, ts_done, ts_total,
percent, action_str, pkgname, wid1)
def _max_action_width(self):
if not hasattr(self, '_max_action_wid_cache'):
wid1 = 0
for val in dnf.transaction.ACTIONS.values():
wid_val = exact_width(val)
if wid1 < wid_val:
wid1 = wid_val
self._max_action_wid_cache = wid1
wid1 = self._max_action_wid_cache
return wid1
def _out_progress(self, ti_done, ti_total, ts_done, ts_total,
percent, process, pkgname, wid1):
if self.output and (sys.stdout.isatty() or ti_done == ti_total):
(fmt, wid1, wid2) = self._makefmt(percent, ts_done, ts_total,
progress=sys.stdout.isatty(),
pkgname=pkgname, wid1=wid1)
pkgname = ucd(pkgname)
msg = fmt % (fill_exact_width(process, wid1, wid1),
fill_exact_width(pkgname, wid2, wid2))
if msg != self.lastmsg:
dnf.util._terminal_messenger('write_flush', msg, sys.stdout)
self.lastmsg = msg
if ti_done == ti_total:
print(" ")
def filelog(self, package, action):
pass
def error(self, message):
pass
def scriptout(self, msgs):
"""Print messages originating from a package script.
:param msgs: the messages coming from the script
"""
if msgs:
self.rpm_logger.info(ucd(msgs))
def _makefmt(self, percent, ts_done, ts_total, progress=True,
pkgname=None, wid1=15):
l = len(str(ts_total))
size = "%s.%s" % (l, l)
fmt_done = "%" + size + "s/%" + size + "s"
done = fmt_done % (ts_done, ts_total)
# This should probably use TerminLine, but we don't want to dep. on
# that. So we kind do an ok job by hand ... at least it's dynamic now.
if pkgname is None:
pnl = 22
else:
pnl = exact_width(pkgname)
overhead = (2 * l) + 2 # Length of done, above
overhead += 2 + wid1 +2 # Length of beginning (" " action " :")
overhead += 1 # Space between pn and done
overhead += 2 # Ends for progress
overhead += 1 # Space for end
width = self.width
if width < overhead:
width = overhead # Give up
width -= overhead
if pnl > width // 2:
pnl = width // 2
marks = self.width - (overhead + pnl)
width = "%s.%s" % (marks, marks)
fmt_bar = "[%-" + width + "s]"
# pnl = str(28 + marks + 1)
full_pnl = pnl + marks + 1
if progress and percent == 100: # Don't chop pkg name on 100%
fmt = "\r %s: %s " + done
wid2 = full_pnl
elif progress:
if marks > 5:
bar = fmt_bar % (self.mark * int(marks * (percent / 100.0)), )
else:
bar = ""
fmt = "\r %s: %s " + bar + " " + done
wid2 = pnl
elif percent == 100:
fmt = " %s: %s " + done
wid2 = full_pnl
else:
if marks > 5:
bar = fmt_bar % (self.mark * marks, )
else:
bar = ""
fmt = " %s: %s " + bar + " " + done
wid2 = pnl
return fmt, wid1, wid2
def progressbar(current, total, name=None):
"""Output the current status to the terminal using a simple
text progress bar consisting of 50 # marks.
:param current: a number representing the amount of work
already done
:param total: a number representing the total amount of work
to be done
:param name: a name to label the progress bar with
"""
mark = '#'
if not sys.stdout.isatty():
return
if current == 0:
percent = 0
else:
if total != 0:
percent = float(current) / total
else:
percent = 0
width = dnf.cli.term._term_width()
if name is None and current == total:
name = '-'
end = ' %d/%d' % (current, total)
width -= len(end) + 1
if width < 0:
width = 0
if name is None:
width -= 2
if width < 0:
width = 0
hashbar = mark * int(width * percent)
output = '\r[%-*s]%s' % (width, hashbar, end)
elif current == total: # Don't chop name on 100%
output = '\r%s%s' % (fill_exact_width(name, width, width), end)
else:
width -= 4
if width < 0:
width = 0
nwid = width // 2
if nwid > exact_width(name):
nwid = exact_width(name)
width -= nwid
hashbar = mark * int(width * percent)
output = '\r%s: [%-*s]%s' % (fill_exact_width(name, nwid, nwid), width,
hashbar, end)
if current <= total:
dnf.util._terminal_messenger('write', output, sys.stdout)
if current == total:
dnf.util._terminal_messenger('write', '\n', sys.stdout)
dnf.util._terminal_messenger('flush', out=sys.stdout)
|
gpl-2.0
| 4,829,881,065,076,747,000
| 40.065027
| 141
| 0.514703
| false
| 4.258635
| false
| false
| false
|
levinsamuel/rand
|
python/api/mysqlcl.py
|
1
|
1787
|
from datetime import datetime
from sqlalchemy import create_engine, __version__ as v, Column, Integer, String, DateTime
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
import pprint
import logging
import json
from people import Person
logging.basicConfig()
log = logging.getLogger('mysqlcl')
log.setLevel(logging.DEBUG)
Base = declarative_base()
engine = create_engine('mysql+mysqlconnector://sam:levin@localhost:3306/checkr_person', echo=True)
Session = sessionmaker()
Session.configure(bind=engine)
class DBPerson(Base):
__tablename__ = 'people'
_id = Column(Integer, primary_key=True)
fname = Column(String)
lname = Column(String)
createtime = Column(DateTime)
def __init__(self, person):
self.fname, self.lname, self.createtime = person.fname, person.lname, person.timestamp
def to_person(self):
return Person({'fname': self.fname,
'lname': self.lname,
'timestamp': self.createtime})
def client():
return Session()
def post(prsn):
"""Create or update a person, based on presence of ID"""
# Create the list of people from our data
cl = client()
pd = DBPerson(prsn)
cl.add(pd)
cl.commit()
# Create a handler for our read (GET) people
def read(id=None):
cl = client()
if id is not None:
dbo = cl.query(DBPerson).filter_by(_id=id).first()
ppl = dbo.to_person() if dbo is not None else None
log.debug('person: %s', ppl)
else:
ppl = [p.to_person() for p in cl.query(DBPerson)]
return ppl
# return [PEOPLE[key] for key in sorted(PEOPLE.keys())]
if __name__ == '__main__':
c = client()
print(engine, c)
print(v)
|
mit
| -4,172,617,481,449,023,000
| 24.542857
| 98
| 0.641298
| false
| 3.595573
| false
| false
| false
|
maxikov/tatmon
|
trash/bezier.py
|
1
|
7830
|
#!/usr/bin/env python
# -*- coding:utf8 -*-
import pygame
import sys
import numpy
class Activity(object):
def __init__(self, screen_size, manager, clock):
self.screen_size = screen_size
self.manager = manager
def render(self, surface):
pass
def process_event(self, event):
pass
def step(self):
pass
def exit(self):
self.manager.exit(self)
class StateManager(object):
def __init__(self, first_activity, screen_size, clock):
self.screen_size = screen_size
self.stack = []
self.clock = clock
self.call(first_activity)
def call(self, activity):
self.stack.append(activity(self.screen_size, self, self.clock))
def exit(self, activity):
if len(self.stack) > 1:
for i in xrange(len(self.stack)):
if self.stack[i] == activity:
del self.stack[i]
def render(self, surface):
self.stack[-1].render(surface)
def step(self):
self.stack[-1].step()
def process_event(self, event):
if event.type == pygame.QUIT or event.type == pygame.KEYDOWN and event.key in [pygame.K_ESCAPE, pygame.K_q]:
self.call(ExitActivity)
else:
self.stack[-1].process_event(event)
class ExitActivity(Activity):
def __init__(self, screen_size, manager, clock):
Activity.__init__(self, screen_size, manager, clock)
self.mask = pygame.Surface(screen_size)
self.mask.fill((0,0,150))
self.mask.set_alpha(20)
font = pygame.font.Font(pygame.font.match_font("Times New Roman"), 72)
font.set_italic(True)
font.set_bold(True)
self.gb = font.render(u"Goodbye!", True, (150, 150, 255))
self.gb.set_alpha(75)
def render(self, surface):
surface.blit(self.mask, (0,0))
rect = self.gb.get_rect()
rect.center = surface.get_rect().center
surface.blit(self.gb, rect)
pygame.display.flip()
sys.exit(0)
class BezierActivity(Activity):
def __init__(self, screen_size, manager, clock):
Activity.__init__(self, screen_size, manager, clock)
# self.curve = BezierCurve([(100, 100), (150, 50), (200, 200), (250, 100)], 0.01)
self.clock = clock
self.spline = BezierSpline([(100, 300), (100, 100), (400, 100)])
self.fps_label = Label(u"FPS:", u"%.2f")
def render(self, surface):
# self.curve.render(surface, (0,0,255))
self.spline.render(surface)
self.fps_label.set_value(self.clock.get_fps())
self.fps_label.rect.topleft = 10, 10
self.fps_label.render(surface)
class BezierCurve(object):
def __init__(self, points, step=0.1):
self.points = map(numpy.array, points)
self.step = step
self.calculate()
def calculate(self):
self.curve = []
p = self.points
prev_t = t = 0
while prev_t < 1:
self.curve.append( ((1-t)**3)*p[0] + 3*t*((1-t)**2)*p[1] + 3*(t**2)*(1-t)*p[2] + (t**3)*p[3] )
prev_t = t
t += self.step
def render(self, surface, color):
pygame.draw.aalines(surface, color, False, self.curve)
class BezierSpline(object):
def __init__(self, points):
self.points = points
self.curves = []
self.create_curves(0.05)
def create_curves(self, step=0.1):
l = len(self.points)
self.curves = []
for i in xrange(l):
prestart, start, end, postend = self.points[(i-1)%l], self.points[i%l], self.points[(i+1)%l], self.points[(i+2)%l]
m1, m2 = self.interpoints(prestart, start, end, postend)
self.curves.append(BezierCurve([start, m1, m2, end], step))
def render(self, surface, color=(0,0,255)):
for curve in self.curves:
curve.render(surface, color)
for curve in self.curves:
map(lambda (x, y): pygame.draw.circle(surface, (255, 255, 0), (int(x), int(y)), 5), [curve.points[1], curve.points[2]])
map(lambda (x, y): pygame.draw.circle(surface, (0, 255, 0), (int(x), int(y)), 10), [curve.points[0], curve.points[3]])
def interpoints(self, prestart, start, end, postend, magic=0.2):
#Удостоверимся, что работаем с numpy.array
[prestart, start, end, postend] = map(numpy.array, [prestart, start, end, postend])
#Находим направляющие векторы касательных к создаваемому сплайну в начальной и конечной точке
start_tangent = self.get_tangent(prestart, start, end)
end_tangent = self.get_tangent(start, end, postend)
l = self.magnitude(start-end)
start_inter = start + start_tangent*magic*l
end_inter = end - end_tangent*magic*l
return start_inter, end_inter
def get_tangent(self, prv, cur, nxt):
u"""Нахождение координат направляющего вектора касательной в точке cur.
Находит оптимальную касательную как перпендикуляр к сумме векторов prv -> cur и nxt -> cur, отложенных от cur.
Возвращает numpy.array из координат направляющего вектора найденной касательной."""
#Удостоверимся, что работаем с numpy.array
[prv, cur, nxt] = map(numpy.array, [prv, cur, nxt])
#Находим векторы
prv_cur = prv - cur
nxt_cur = nxt - cur
#Находим нормаль к искомой касательной как сумму полученных векторов, отложенных от точки cur
norm = prv_cur + nxt_cur
if self.magnitude(norm) == 0:
return self.valuate(nxt_cur)
#Находим касательную как перпендикуляр к полученному вектору
counterclockwise = numpy.dot(numpy.array( [[0, -1],
[1, 0]]), norm)
clockwise = numpy.dot(numpy.array( [[0, 1],
[-1, 0]] ), norm)
tangent = min([counterclockwise, clockwise], key=lambda vec: self.angle(vec, nxt_cur))
#Нормируем направляющий вектор на 1
tangent = self.valuate(tangent)
return tangent
def angle(self, vec1, vec2):
return numpy.arccos(numpy.dot(vec1, vec2)/(self.magnitude(vec1) * self.magnitude(vec2)))
def valuate(self, arr):
u"""Нормировка значений переданного массива на 1. Возвращает numpy.array"""
factor = float(abs(max(arr, key=abs)))
return arr/factor if factor != 0 else numpy.array([0, 0])
def magnitude(self, arr):
return numpy.sqrt(numpy.square(arr).sum())
class Label(object):
def __init__(self, text, form, **kwargs):
""" self, text, form, color=(0,0,0), font="Arial", fontsize=24, align="left" """
self.text = text
self.form = form
self.color = kwargs.get("color", (0,0,0))
self.align = kwargs.get("align", "left")
self.font = pygame.font.Font(pygame.font.match_font(kwargs.get("font", "Arial")), kwargs.get("fontsize", 24))
self.label = self.font.render(unicode(self.text), True, self.color)
self.rect = self.label.get_rect()
def set_value(self, value):
self.val = self.font.render(unicode(self.form) % value, True, self.color)
valrect = self.val.get_rect()
labrect = self.label.get_rect()
if self.align == "left":
valrect.topleft = labrect.bottomleft
else:
valrect.topright = labrect.bottomright
self.surface = pygame.Surface( (valrect.width + labrect.width, valrect.height + labrect.height) )
self.surface.fill((255,255,255))
self.surface.set_colorkey((255,255,255))
self.surface.blit(self.label, labrect)
self.surface.blit(self.val, valrect)
self.rect = self.surface.get_rect()
def render(self, surface):
surface.blit(self.surface, self.rect)
def main():
size = 500, 500
bgcolor = 255, 255, 255
pygame.init()
screen = pygame.display.set_mode(size)
pygame.display.set_caption(u"Brand new Bezier spline")
clock = pygame.time.Clock()
manager = StateManager(BezierActivity, size, clock)
while True:
clock.tick(400)
for event in pygame.event.get():
manager.process_event(event)
manager.step()
screen.fill(bgcolor)
manager.render(screen)
pygame.display.flip()
if __name__ == "__main__":
main()
|
gpl-3.0
| -5,036,191,145,068,028,000
| 29.123967
| 122
| 0.678052
| false
| 2.435683
| false
| false
| false
|
maxcutler/Courant-News
|
courant/core/search/templatetags/search.py
|
1
|
2614
|
from django.template import Library, Node, Variable
from courant.core.search.forms import CourantSearchForm
register = Library()
class SearchFacetCheck(Node):
def __init__(self, facet, value, varname):
self.facet = facet
self.value = value
self.varname = varname
def render(self, context):
request = context['request']
facets = request.GET.getlist('selected_facets')
found = False
facet_type = unicode(Variable(self.facet).resolve(context))
value = unicode(Variable(self.value).resolve(context))
for facet in facets:
if len(facet) > 0:
name, id = facet.split(':')
if name == facet_type and id == value:
found = True
break
context[self.varname] = found
return ''
def do_search_facet_check(parser, token):
bits = token.contents.split()
if not len(bits) == 5:
raise TemplateSyntaxError, "search_facet_check syntax error"
return SearchFacetCheck(bits[1], bits[2], bits[4])
do_search_facet_check = register.tag('search_facet_check', do_search_facet_check)
def strip_facet(url, facet, value):
to_remove = "&selected_facets=%s:%s" % (facet, value)
return url.replace('%3A', ':').replace(to_remove, '')
register.simple_tag(strip_facet)
class SearchFormNode(Node):
def __init__(self, varname):
self.varname = varname
def render(self, context):
context[self.varname] = CourantSearchForm(context['request'].GET)
return ''
def get_search_form(parser, token):
"""
Sets a search form to a variable. Form is as follows:
get_search_form as varname
"""
bits = token.contents.split()
if not len(bits) == 3:
raise TemplateSyntaxError, "get_search_form only takes 'as varname'"
return SearchFormNode(bits[2])
get_search_form = register.tag(get_search_form)
class SearchObject(Node):
def __init__(self, obj, varname):
self.obj = obj
self.varname = varname
def render(self, context):
context[self.varname] = Variable(self.obj).resolve(context)._object
return ''
def get_search_object(parser, token):
"""
Extracts a model instance object from a search query result object
"""
bits = token.contents.split()
if not len(bits) == 4:
raise TemplateSyntaxError, "get_search_object syntax invalid"
return SearchObject(bits[1], bits[3])
get_search_object = register.tag(get_search_object)
|
bsd-3-clause
| -8,180,363,655,275,698,000
| 32.88
| 81
| 0.612854
| false
| 3.878338
| false
| false
| false
|
oblique-labs/pyVM
|
rpython/jit/metainterp/optimizeopt/unroll.py
|
1
|
26476
|
import sys
from rpython.jit.metainterp.history import Const, TargetToken, JitCellToken
from rpython.jit.metainterp.optimizeopt.shortpreamble import ShortBoxes,\
ShortPreambleBuilder, ExtendedShortPreambleBuilder, PreambleOp
from rpython.jit.metainterp.optimizeopt import info, intutils
from rpython.jit.metainterp.optimize import InvalidLoop, SpeculativeError
from rpython.jit.metainterp.optimizeopt.optimizer import Optimizer,\
Optimization, LoopInfo, MININT, MAXINT, BasicLoopInfo
from rpython.jit.metainterp.optimizeopt.vstring import StrPtrInfo
from rpython.jit.metainterp.optimizeopt.virtualstate import (
VirtualStateConstructor, VirtualStatesCantMatch)
from rpython.jit.metainterp.resoperation import rop, ResOperation, GuardResOp,\
AbstractResOp
from rpython.jit.metainterp import compile
from rpython.rlib.debug import debug_print, debug_start, debug_stop,\
have_debug_prints
class UnrollableOptimizer(Optimizer):
def force_op_from_preamble(self, preamble_op):
if isinstance(preamble_op, PreambleOp):
if self.optunroll.short_preamble_producer is None:
assert False # unreachable code
op = preamble_op.op
self.optimizer.inparg_dict[op] = None # XXX ARGH
# special hack for int_add(x, accumulator-const) optimization
self.optunroll.short_preamble_producer.use_box(op,
preamble_op.preamble_op, self)
if not preamble_op.op.is_constant():
if preamble_op.invented_name:
op = self.get_box_replacement(op)
self.optunroll.potential_extra_ops[op] = preamble_op
return preamble_op.op
return preamble_op
def setinfo_from_preamble_list(self, lst, infos):
for item in lst:
if item is None:
continue
i = infos.get(item, None)
if i is not None:
self.setinfo_from_preamble(item, i, infos)
else:
item.set_forwarded(None)
# let's not inherit stuff we don't
# know anything about
def setinfo_from_preamble(self, op, preamble_info, exported_infos):
op = self.get_box_replacement(op)
if op.get_forwarded() is not None:
return
if op.is_constant():
return # nothing we can learn
if isinstance(preamble_info, info.PtrInfo):
if preamble_info.is_virtual():
op.set_forwarded(preamble_info)
self.setinfo_from_preamble_list(preamble_info.all_items(),
exported_infos)
return
if preamble_info.is_constant():
# but op is not
op.set_forwarded(preamble_info.getconst())
return
if preamble_info.get_descr() is not None:
if isinstance(preamble_info, info.StructPtrInfo):
op.set_forwarded(info.StructPtrInfo(
preamble_info.get_descr()))
if isinstance(preamble_info, info.InstancePtrInfo):
op.set_forwarded(info.InstancePtrInfo(
preamble_info.get_descr()))
known_class = preamble_info.get_known_class(self.cpu)
if known_class:
self.make_constant_class(op, known_class, False)
if isinstance(preamble_info, info.ArrayPtrInfo):
arr_info = info.ArrayPtrInfo(preamble_info.descr)
bound = preamble_info.getlenbound(None).clone()
assert isinstance(bound, intutils.IntBound)
arr_info.lenbound = bound
op.set_forwarded(arr_info)
if isinstance(preamble_info, StrPtrInfo):
str_info = StrPtrInfo(preamble_info.mode)
bound = preamble_info.getlenbound(None).clone()
assert isinstance(bound, intutils.IntBound)
str_info.lenbound = bound
op.set_forwarded(str_info)
if preamble_info.is_nonnull():
self.make_nonnull(op)
elif isinstance(preamble_info, intutils.IntBound):
fix_lo = preamble_info.has_lower and preamble_info.lower >= MININT/2
fix_up = preamble_info.has_upper and preamble_info.upper <= MAXINT/2
if fix_lo or fix_up:
intbound = self.getintbound(op)
if fix_lo:
intbound.has_lower = True
intbound.lower = preamble_info.lower
if fix_up:
intbound.has_upper = True
intbound.upper = preamble_info.upper
elif isinstance(preamble_info, info.FloatConstInfo):
op.set_forwarded(preamble_info._const)
class UnrollOptimizer(Optimization):
"""Unroll the loop into two iterations. The first one will
become the preamble or entry bridge (don't think there is a
distinction anymore)"""
short_preamble_producer = None
def __init__(self, metainterp_sd, jitdriver_sd, optimizations):
self.optimizer = UnrollableOptimizer(metainterp_sd, jitdriver_sd,
optimizations)
self.optimizer.optunroll = self
def get_virtual_state(self, args):
modifier = VirtualStateConstructor(self.optimizer)
return modifier.get_virtual_state(args)
def _check_no_forwarding(self, lsts, check_newops=True):
for lst in lsts:
for op in lst:
assert op.get_forwarded() is None
if check_newops:
assert not self.optimizer._newoperations
def optimize_preamble(self, trace, runtime_boxes, call_pure_results, memo):
info, newops = self.optimizer.propagate_all_forward(
trace.get_iter(), call_pure_results, flush=False)
exported_state = self.export_state(info.jump_op.getarglist(),
info.inputargs,
runtime_boxes, memo)
exported_state.quasi_immutable_deps = info.quasi_immutable_deps
# we need to absolutely make sure that we've cleaned up all
# the optimization info
self.optimizer._clean_optimization_info(self.optimizer._newoperations)
return exported_state, self.optimizer._newoperations
def optimize_peeled_loop(self, trace, celltoken, state,
call_pure_results, inline_short_preamble=True):
trace = trace.get_iter()
try:
label_args = self.import_state(trace.inputargs, state)
except VirtualStatesCantMatch:
raise InvalidLoop("Cannot import state, virtual states don't match")
self.potential_extra_ops = {}
self.optimizer.init_inparg_dict_from(label_args)
try:
info, _ = self.optimizer.propagate_all_forward(
trace, call_pure_results, flush=False)
except SpeculativeError:
raise InvalidLoop("Speculative heap access would be ill-typed")
end_jump = info.jump_op
label_op = ResOperation(rop.LABEL, label_args,
descr=celltoken)
for a in end_jump.getarglist():
self.optimizer.force_box_for_end_of_preamble(
self.optimizer.get_box_replacement(a))
current_vs = self.get_virtual_state(end_jump.getarglist())
# pick the vs we want to jump to
assert isinstance(celltoken, JitCellToken)
target_virtual_state = self.pick_virtual_state(current_vs,
state.virtual_state,
celltoken.target_tokens)
# force the boxes for virtual state to match
try:
args = target_virtual_state.make_inputargs(
[self.get_box_replacement(x) for x in end_jump.getarglist()],
self.optimizer, force_boxes=True)
for arg in args:
if arg is not None:
self.optimizer.force_box(arg)
except VirtualStatesCantMatch:
raise InvalidLoop("Virtual states did not match "
"after picking the virtual state, when forcing"
" boxes")
extra_same_as = self.short_preamble_producer.extra_same_as[:]
target_token = self.finalize_short_preamble(label_op,
state.virtual_state)
label_op.setdescr(target_token)
if not inline_short_preamble:
self.jump_to_preamble(celltoken, end_jump, info)
return (UnrollInfo(target_token, label_op, extra_same_as,
self.optimizer.quasi_immutable_deps),
self.optimizer._newoperations)
try:
new_virtual_state = self.jump_to_existing_trace(
end_jump, label_op, state.runtime_boxes, force_boxes=False)
except InvalidLoop:
# inlining short preamble failed, jump to preamble
self.jump_to_preamble(celltoken, end_jump, info)
return (UnrollInfo(target_token, label_op, extra_same_as,
self.optimizer.quasi_immutable_deps),
self.optimizer._newoperations)
if new_virtual_state is not None:
# Attempt to force virtual boxes in order to avoid jumping
# to the preamble.
try:
new_virtual_state = self.jump_to_existing_trace(
end_jump, label_op, state.runtime_boxes, force_boxes=True)
except InvalidLoop:
pass
if new_virtual_state is not None:
self.jump_to_preamble(celltoken, end_jump, info)
return (UnrollInfo(target_token, label_op, extra_same_as,
self.optimizer.quasi_immutable_deps),
self.optimizer._newoperations)
self.disable_retracing_if_max_retrace_guards(
self.optimizer._newoperations, target_token)
return (UnrollInfo(target_token, label_op, extra_same_as,
self.optimizer.quasi_immutable_deps),
self.optimizer._newoperations)
def disable_retracing_if_max_retrace_guards(self, ops, target_token):
maxguards = self.optimizer.metainterp_sd.warmrunnerdesc.memory_manager.max_retrace_guards
count = 0
for op in ops:
if op.is_guard():
count += 1
if count > maxguards:
assert isinstance(target_token, TargetToken)
target_token.targeting_jitcell_token.retraced_count = sys.maxint
def pick_virtual_state(self, my_vs, label_vs, target_tokens):
if target_tokens is None:
return label_vs # for tests
for token in target_tokens:
if token.virtual_state is None:
continue
if token.virtual_state.generalization_of(my_vs, self.optimizer):
return token.virtual_state
return label_vs
def optimize_bridge(self, trace, runtime_boxes, call_pure_results,
inline_short_preamble, box_names_memo, resumestorage):
from rpython.jit.metainterp.optimizeopt.bridgeopt import deserialize_optimizer_knowledge
frontend_inputargs = trace.inputargs
trace = trace.get_iter()
self._check_no_forwarding([trace.inputargs])
if resumestorage:
deserialize_optimizer_knowledge(self.optimizer,
resumestorage, frontend_inputargs,
trace.inputargs)
info, ops = self.optimizer.propagate_all_forward(trace,
call_pure_results, False)
jump_op = info.jump_op
cell_token = jump_op.getdescr()
assert isinstance(cell_token, JitCellToken)
if not inline_short_preamble or len(cell_token.target_tokens) == 1:
return self.jump_to_preamble(cell_token, jump_op, info)
# force all the information that does not go to the short
# preamble at all
self.optimizer.flush()
for a in jump_op.getarglist():
self.optimizer.force_box_for_end_of_preamble(a)
try:
vs = self.jump_to_existing_trace(jump_op, None, runtime_boxes,
force_boxes=False)
except InvalidLoop:
return self.jump_to_preamble(cell_token, jump_op, info)
if vs is None:
return info, self.optimizer._newoperations[:]
warmrunnerdescr = self.optimizer.metainterp_sd.warmrunnerdesc
limit = warmrunnerdescr.memory_manager.retrace_limit
if cell_token.retraced_count < limit:
cell_token.retraced_count += 1
debug_print('Retracing (%d/%d)' % (cell_token.retraced_count, limit))
else:
# Try forcing boxes to avoid jumping to the preamble
try:
vs = self.jump_to_existing_trace(jump_op, None, runtime_boxes,
force_boxes=True)
except InvalidLoop:
pass
if vs is None:
return info, self.optimizer._newoperations[:]
debug_print("Retrace count reached, jumping to preamble")
return self.jump_to_preamble(cell_token, jump_op, info)
exported_state = self.export_state(info.jump_op.getarglist(),
info.inputargs, runtime_boxes,
box_names_memo)
exported_state.quasi_immutable_deps = self.optimizer.quasi_immutable_deps
self.optimizer._clean_optimization_info(self.optimizer._newoperations)
return exported_state, self.optimizer._newoperations
def finalize_short_preamble(self, label_op, virtual_state):
sb = self.short_preamble_producer
self.optimizer._clean_optimization_info(sb.short_inputargs)
short_preamble = sb.build_short_preamble()
jitcelltoken = label_op.getdescr()
assert isinstance(jitcelltoken, JitCellToken)
if jitcelltoken.target_tokens is None:
jitcelltoken.target_tokens = []
target_token = TargetToken(jitcelltoken,
original_jitcell_token=jitcelltoken)
target_token.original_jitcell_token = jitcelltoken
target_token.virtual_state = virtual_state
target_token.short_preamble = short_preamble
jitcelltoken.target_tokens.append(target_token)
self.short_preamble_producer = ExtendedShortPreambleBuilder(
target_token, sb)
label_op.initarglist(label_op.getarglist() + sb.used_boxes)
return target_token
def jump_to_preamble(self, cell_token, jump_op, info):
assert cell_token.target_tokens[0].virtual_state is None
jump_op = jump_op.copy_and_change(rop.JUMP,
descr=cell_token.target_tokens[0])
self.optimizer.send_extra_operation(jump_op)
return info, self.optimizer._newoperations[:]
def jump_to_existing_trace(self, jump_op, label_op, runtime_boxes, force_boxes=False):
jitcelltoken = jump_op.getdescr()
assert isinstance(jitcelltoken, JitCellToken)
virtual_state = self.get_virtual_state(jump_op.getarglist())
args = [self.get_box_replacement(op) for op in jump_op.getarglist()]
for target_token in jitcelltoken.target_tokens:
target_virtual_state = target_token.virtual_state
if target_virtual_state is None:
continue
try:
extra_guards = target_virtual_state.generate_guards(
virtual_state, args, runtime_boxes, self.optimizer,
force_boxes=force_boxes)
patchguardop = self.optimizer.patchguardop
for guard in extra_guards.extra_guards:
if isinstance(guard, GuardResOp):
guard.rd_resume_position = patchguardop.rd_resume_position
guard.setdescr(compile.ResumeAtPositionDescr())
self.send_extra_operation(guard)
except VirtualStatesCantMatch:
continue
# When force_boxes == True, creating the virtual args can fail when
# components of the virtual state alias. If this occurs, we must
# recompute the virtual state as boxes will have been forced.
try:
args, virtuals = target_virtual_state.make_inputargs_and_virtuals(
args, self.optimizer, force_boxes=force_boxes)
except VirtualStatesCantMatch:
assert force_boxes
virtual_state = self.get_virtual_state(args)
continue
short_preamble = target_token.short_preamble
try:
extra = self.inline_short_preamble(args + virtuals, args,
short_preamble, self.optimizer.patchguardop,
target_token, label_op)
except KeyError:
# SHOULD NOT OCCUR BUT DOES: WHY?? issue #2185
self.optimizer.metainterp_sd.logger_ops.log_short_preamble([],
short_preamble, {})
raise
self.send_extra_operation(jump_op.copy_and_change(rop.JUMP,
args=args + extra,
descr=target_token))
return None # explicit because the return can be non-None
return virtual_state
def _map_args(self, mapping, arglist):
result = []
for box in arglist:
if not isinstance(box, Const):
box = mapping[box]
result.append(box)
return result
def inline_short_preamble(self, jump_args, args_no_virtuals, short,
patchguardop, target_token, label_op):
short_inputargs = short[0].getarglist()
short_jump_args = short[-1].getarglist()
sb = self.short_preamble_producer
if sb is not None:
assert isinstance(sb, ExtendedShortPreambleBuilder)
if sb.target_token is target_token:
# this means we're inlining the short preamble that's being
# built. Make sure we modify the correct things in-place
self.short_preamble_producer.setup(short_jump_args,
short, label_op.getarglist())
# after this call, THE REST OF THIS FUNCTION WILL MODIFY ALL
# THE LISTS PROVIDED, POTENTIALLY
# We need to make a list of fresh new operations corresponding
# to the short preamble operations. We could temporarily forward
# the short operations to the fresh ones, but there are obscure
# issues: send_extra_operation() below might occasionally invoke
# use_box(), which assumes the short operations are not forwarded.
# So we avoid such temporary forwarding and just use a dict here.
assert len(short_inputargs) == len(jump_args)
mapping = {}
for i in range(len(jump_args)):
mapping[short_inputargs[i]] = jump_args[i]
# a fix-point loop, runs only once in almost all cases
i = 1
while 1:
self._check_no_forwarding([short_inputargs, short], False)
while i < len(short) - 1:
sop = short[i]
arglist = self._map_args(mapping, sop.getarglist())
if sop.is_guard():
op = sop.copy_and_change(sop.getopnum(), arglist,
descr=compile.ResumeAtPositionDescr())
assert isinstance(op, GuardResOp)
op.rd_resume_position = patchguardop.rd_resume_position
else:
op = sop.copy_and_change(sop.getopnum(), arglist)
mapping[sop] = op
i += 1
self.optimizer.send_extra_operation(op)
# force all of them except the virtuals
for arg in (args_no_virtuals +
self._map_args(mapping, short_jump_args)):
self.optimizer.force_box(self.get_box_replacement(arg))
self.optimizer.flush()
# done unless "short" has grown again
if i == len(short) - 1:
break
return [self.get_box_replacement(box)
for box in self._map_args(mapping, short_jump_args)]
def _expand_info(self, arg, infos):
if isinstance(arg, AbstractResOp) and rop.is_same_as(arg.opnum):
info = self.optimizer.getinfo(arg.getarg(0))
else:
info = self.optimizer.getinfo(arg)
if arg in infos:
return
if info:
infos[arg] = info
if info.is_virtual():
self._expand_infos_from_virtual(info, infos)
def _expand_infos_from_virtual(self, info, infos):
items = info.all_items()
for item in items:
if item is None:
continue
self._expand_info(item, infos)
def export_state(self, original_label_args, renamed_inputargs,
runtime_boxes, memo):
end_args = [self.optimizer.force_box_for_end_of_preamble(a)
for a in original_label_args]
self.optimizer.flush()
virtual_state = self.get_virtual_state(end_args)
end_args = [self.get_box_replacement(arg) for arg in end_args]
infos = {}
for arg in end_args:
self._expand_info(arg, infos)
label_args, virtuals = virtual_state.make_inputargs_and_virtuals(
end_args, self.optimizer)
for arg in label_args:
self._expand_info(arg, infos)
sb = ShortBoxes()
short_boxes = sb.create_short_boxes(self.optimizer, renamed_inputargs,
label_args + virtuals)
short_inputargs = sb.create_short_inputargs(label_args + virtuals)
for produced_op in short_boxes:
op = produced_op.short_op.res
if not isinstance(op, Const):
self._expand_info(op, infos)
self.optimizer._clean_optimization_info(end_args)
return ExportedState(label_args, end_args, virtual_state, infos,
short_boxes, renamed_inputargs,
short_inputargs, runtime_boxes, memo)
def import_state(self, targetargs, exported_state):
# the mapping between input args (from old label) and what we need
# to actually emit. Update the info
assert (len(exported_state.next_iteration_args) ==
len(targetargs))
for i, target in enumerate(exported_state.next_iteration_args):
source = targetargs[i]
assert source is not target
source.set_forwarded(target)
info = exported_state.exported_infos.get(target, None)
if info is not None:
self.optimizer.setinfo_from_preamble(source, info,
exported_state.exported_infos)
# import the optimizer state, starting from boxes that can be produced
# by short preamble
label_args = exported_state.virtual_state.make_inputargs(
targetargs, self.optimizer)
self.short_preamble_producer = ShortPreambleBuilder(
label_args, exported_state.short_boxes,
exported_state.short_inputargs, exported_state.exported_infos,
self.optimizer)
for produced_op in exported_state.short_boxes:
produced_op.produce_op(self, exported_state.exported_infos)
return label_args
class UnrollInfo(BasicLoopInfo):
""" A state after optimizing the peeled loop, contains the following:
* target_token - generated target token
* label_args - label operations at the beginning
* extra_same_as - list of extra same as to add at the end of the preamble
"""
def __init__(self, target_token, label_op, extra_same_as,
quasi_immutable_deps):
self.target_token = target_token
self.label_op = label_op
self.extra_same_as = extra_same_as
self.quasi_immutable_deps = quasi_immutable_deps
self.extra_before_label = []
def final(self):
return True
class ExportedState(LoopInfo):
""" Exported state consists of a few pieces of information:
* next_iteration_args - starting arguments for next iteration
* exported_infos - a mapping from ops to infos, including inputargs
* end_args - arguments that end up in the label leading to the next
iteration
* virtual_state - instance of VirtualState representing current state
of virtuals at this label
* short boxes - a mapping op -> preamble_op
* renamed_inputargs - the start label arguments in optimized version
* short_inputargs - the renamed inputargs for short preamble
* quasi_immutable_deps - for tracking quasi immutables
* runtime_boxes - runtime values for boxes, necessary when generating
guards to jump to
"""
def __init__(self, end_args, next_iteration_args, virtual_state,
exported_infos, short_boxes, renamed_inputargs,
short_inputargs, runtime_boxes, memo):
self.end_args = end_args
self.next_iteration_args = next_iteration_args
self.virtual_state = virtual_state
self.exported_infos = exported_infos
self.short_boxes = short_boxes
self.renamed_inputargs = renamed_inputargs
self.short_inputargs = short_inputargs
self.runtime_boxes = runtime_boxes
self.dump(memo)
def dump(self, memo):
if have_debug_prints():
debug_start("jit-log-exported-state")
debug_print("[" + ", ".join([x.repr_short(memo) for x in self.next_iteration_args]) + "]")
for box in self.short_boxes:
debug_print(" " + box.repr(memo))
debug_stop("jit-log-exported-state")
def final(self):
return False
|
mit
| -7,870,068,044,440,899,000
| 45.860177
| 102
| 0.589968
| false
| 4.251807
| false
| false
| false
|
googleinterns/deepspeech-reconstruction
|
bin/reconstruct-librispeech/reconstruct-random-transcript.py
|
1
|
1250
|
import argparse
import os
import random
import subprocess
from utils import check_local_utt_reconstructed
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Invert MFCCs to audio.')
parser.add_argument('-s', type=int, dest='start', default=0, help='Starting index')
parser.add_argument('-e', type=int, dest='end', default=-1, help='Ending index')
parser.add_argument('-p', dest='data_path', default='samples/librispeech/data.txt', help='Path to a file containing list of utterances')
args = parser.parse_args()
random.seed(1)
with open(args.data_path) as f:
lines = f.read().strip().split('\n')
utt_ids = [l.split(',')[0] for l in lines]
lengths = [int(l.split(',')[1]) for l in lines]
r = list(range(args.start, args.end + 1 if args.end != -1 and args.end < len(utt_ids) else len(utt_ids)))
random.shuffle(r)
for i in r:
if check_local_utt_reconstructed(utt_ids[i], True):
print('%s is already reconstructed' % utt_ids[i])
continue
print('Reconstructing %s...' % utt_ids[i])
subprocess.call(['bash', './bin/reconstruct-librispeech/reconstruct-random-transcript' + ('-long' if lengths[i] > 1500 else ''), utt_ids[i]])
|
apache-2.0
| 6,823,140,920,074,062,000
| 40.666667
| 149
| 0.6392
| false
| 3.255208
| false
| false
| false
|
fheinle/Photoblog
|
get_picasa_id.py
|
1
|
1160
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
''' helper that displays all public album ids for a user'''
# Copyright (C) 2009 Florian Heinle
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import gdata.photos.service
import sys
gd_client = gdata.photos.service.PhotosService()
gd_client.source = 'photoblog helper script'
try:
user_name = raw_input('Your picasaweb username: ')
except EOFError:
sys.exit()
album_list = gd_client.GetUserFeed(user=user_name)
for album in album_list.entry:
print 'Title: %s ID: %s' % (album.title.text, album.gphoto_id.text)
|
gpl-3.0
| -8,508,907,138,972,113,000
| 35.25
| 71
| 0.739655
| false
| 3.580247
| false
| false
| false
|
mopsalarm/broke
|
broke/print.py
|
1
|
1488
|
import broke
import sys
import argparse
def parse_arguments():
parser = argparse.ArgumentParser(description="Prints information about a broke-file")
parser.add_argument("--verbose", action="store_true", help="Print each message")
parser.add_argument("--utf8", action="store_true", help="Prints payload of each message decoded as utf8")
parser.add_argument("--no-count", action="store_true", help="Do not print the total number of messages at the end")
parser.add_argument("--follow", action="store_true", help="Follows the stream of messages")
parser.add_argument("--topic", type=str, default=None, help="Only read messages of this topic")
parser.add_argument("file", help="The file to read messaages from")
return parser.parse_args()
def main():
args = parse_arguments()
read_messages = broke.read_messages_follow if args.follow else broke.read_messages
count = 0
with open(args.file, "rb") as fp:
try:
for message in read_messages(fp):
if args.topic is not None and message.topic != args.topic:
continue
count += 1
if args.verbose:
print(message)
if args.utf8:
print(message.payload.decode("utf8"))
except KeyboardInterrupt:
pass
if not args.no_count:
print("Total number of messages: {}".format(count))
if __name__ == '__main__':
main()
|
apache-2.0
| 4,124,027,425,176,440,300
| 32.818182
| 119
| 0.619624
| false
| 4.20339
| false
| false
| false
|
tianyang-li/meta-transcriptome
|
sample_compare_1/blast_relate.py
|
1
|
3108
|
#!/usr/bin/python
# Copyright (C) 2011 Tianyang Li
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
'''
relate each file containing transcript sequences to each other using blastx
'''
from Bio import SeqIO
import sys
import string
from Bio.Blast import NCBIWWW, NCBIXML
from networkx import nx
from networkx.algorithms.components.connected import connected_components
import json
import time
def BlastClassify(fasta_files):
ac = [] # a list of [seq, seq_accession]
ac_gr = nx.Graph() # graph where each node represents accessions within a single file
for fasta_file in fasta_files:
for seq in SeqIO.parse(fasta_file, 'fasta'):
print >> sys.stderr, seq.format('fasta')
while True:
try:
print >> sys.stderr, "nr blastx"
print >> sys.stderr, time.asctime()
blast_rec = list(NCBIXML.parse(NCBIWWW.qblast("blastx", "nr", seq.format('fasta'))))
break
except BaseException as err:
print >> sys.stderr, "Error: %s" % str(err)
while True:
try:
print >> sys.stderr, "env_nr blastx"
print >> sys.stderr, time.asctime()
blast_rec.extend(list(NCBIXML.parse(NCBIWWW.qblast("blastx", "env_nr", seq.format('fasta')))))
break
except BaseException as err:
print >> sys.stderr, "Error: %s" % str(err)
seq_accession = []
for rec in blast_rec:
for align in rec.alignments:
seq_accession.append(string.split(string.split(align.hit_id, "|")[3], ".")[0])
if seq_accession != []:
ac_gr.add_node(len(ac))
ac.append([seq, seq_accession])
for ac1 in ac_gr.nodes():
for ac2 in ac_gr.nodes():
if ac1 != ac2:
if len(set(ac[ac1][1]) & set(ac[ac2][1])) != 0:
ac_gr.add_edge(ac1, ac2)
comp_n = 0
for similar_trans in connected_components(ac_gr):
comp_seq = {}
comp = {'component': comp_n}
seq_n = 0
for trans in similar_trans:
comp_seq['%d' % seq_n] = {'seq': ac[trans][0].format('fasta'), 'accession': ac[trans][1]}
seq_n = seq_n + 1
comp['trans'] = comp_seq
print json.dumps(comp, ensure_ascii=True)
comp_n = comp_n + 1
print >> sys.stderr, comp_n
if __name__ == '__main__':
BlastClassify(sys.argv[1:])
sys.exit(0)
|
gpl-3.0
| 2,542,866,438,202,363,400
| 38.341772
| 114
| 0.57529
| false
| 3.665094
| false
| false
| false
|
hmgle/send_wave
|
request_token.py
|
1
|
1059
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from urllib import quote, urlencode
import urllib2
import time
import uuid
import hmac, hashlib
from config import consumer_key, client_secret
def get_token():
URL = 'http://fanfou.com/oauth/request_token'
params = [
('oauth_consumer_key', consumer_key),
('oauth_nonce', uuid.uuid4().hex),
('oauth_signature_method', 'HMAC-SHA1'),
('oauth_timestamp', int(time.time())),
]
params.sort()
p = 'GET&%s&%s' % (quote(URL, safe=''), quote(urlencode(params)))
signature = hmac.new(client_secret + '&', p,
hashlib.sha1).digest().encode('base64').rstrip()
params.append(('oauth_signature', quote(signature)))
h = ', '.join(['%s="%s"' % (k, v) for (k, v) in params])
r = urllib2.Request(URL, headers={'Authorization': 'OAuth realm="", %s' % h})
data = urllib2.urlopen(r).read()
token, secret = [pair.split('=')[1] for pair in data.split('&')]
return token, secret
if __name__ == '__main__':
print get_token()
|
gpl-2.0
| -5,850,782,801,666,942,000
| 26.153846
| 81
| 0.587347
| false
| 3.394231
| false
| false
| false
|
allisson/python-cielo-webservice
|
cielo_webservice/models.py
|
1
|
22823
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import six
import xmltodict
from unidecode import unidecode
class Comercial(object):
"""
Modelo para os dados comerciais da loja.
"""
def __init__(self, numero=None, chave=None):
self.numero = numero
self.chave = chave
self.validate()
def validate(self):
if not isinstance(self.numero, six.integer_types):
raise TypeError('numero precisa ser do tipo inteiro.')
if not isinstance(self.chave, six.string_types):
raise TypeError('chave precisa ser do tipo string.')
def __repr__(self):
return '<Comercial(numero={}, chave={})>'.format(
self.numero, self.chave
)
class Cartao(object):
"""
Modelo para os dados do cartão.
"""
def __init__(self, numero=None, validade=None, indicador=None,
codigo_seguranca=None, nome_portador=None, token=None):
self.numero = numero
self.validade = validade
self.indicador = indicador
self.codigo_seguranca = codigo_seguranca
self.nome_portador = nome_portador
self.token = token
self.validate()
def validate(self):
if self.numero is not None and not isinstance(self.numero, six.integer_types):
raise TypeError('numero precisa ser do tipo inteiro.')
if self.validade is not None and not isinstance(self.validade, six.integer_types):
raise TypeError('validade precisa ser do tipo inteiro.')
if self.indicador is not None and not isinstance(self.indicador, six.integer_types):
raise TypeError('indicador precisa ser do tipo inteiro.')
if self.indicador == 1 and not isinstance(self.codigo_seguranca, six.integer_types):
raise TypeError('codigo_seguranca precisa ser do tipo inteiro.')
if self.nome_portador is not None and not isinstance(self.nome_portador, six.string_types):
raise TypeError('nome_portador precisa ser do tipo string.')
if self.token is not None and not isinstance(self.token, six.string_types):
raise TypeError('token precisa ser do tipo string.')
def __repr__(self):
return '<Cartao(numero={}, validade={}, indicador={}, codigo_seguranca={}, nome_portador={}, token={})>'.format(
self.numero, self.validade, self.indicador, self.codigo_seguranca,
self.nome_portador, self.token
)
class Pedido(object):
"""
Modelo para os dados do pedido.
"""
def __init__(self, numero=None, valor=None, moeda=986, data_hora=None,
descricao=None, idioma='PT', taxa_embarque=None,
soft_descriptor=None):
self.numero = numero
self.valor = valor
self.moeda = moeda
self.data_hora = data_hora
self.descricao = descricao
self.idioma = idioma
self.taxa_embarque = taxa_embarque
self.soft_descriptor = soft_descriptor
self.validate()
def validate(self):
if not isinstance(self.numero, six.string_types):
raise TypeError('numero precisa ser do tipo string.')
if not isinstance(self.valor, six.integer_types):
raise TypeError('valor precisa ser do tipo inteiro.')
if not isinstance(self.moeda, six.integer_types):
raise TypeError('moeda precisa ser do tipo inteiro.')
if not isinstance(self.data_hora, six.string_types):
raise TypeError('data_hora precisa ser do tipo string.')
if self.descricao is not None and not isinstance(self.descricao, six.string_types):
raise TypeError('descricao precisa ser do tipo string.')
if self.idioma is not None and not isinstance(self.idioma, six.string_types):
raise TypeError('idioma precisa ser do tipo string.')
if self.taxa_embarque is not None and not isinstance(self.taxa_embarque, six.integer_types):
raise TypeError('taxa_embarque precisa ser do tipo inteiro.')
if self.soft_descriptor is not None and not isinstance(self.soft_descriptor, six.string_types):
raise TypeError('soft_descriptor precisa ser do tipo string.')
def __repr__(self):
return '<Pedido(numero={}, valor={}, moeda={}, data_hora={}, descricao={}, idioma={}, taxa_embarque={}, soft_descriptor={})>'.format(
self.numero, self.valor, self.moeda, self.data_hora,
self.descricao, self.idioma, self.taxa_embarque,
self.soft_descriptor
)
class Pagamento(object):
"""
Modelo para os dados do pagamento.
"""
def __init__(self, bandeira=None, produto=None, parcelas=None):
self.bandeira = bandeira
self.produto = produto
self.parcelas = parcelas
self.validate()
def validate(self):
if not isinstance(self.bandeira, six.string_types):
raise TypeError('bandeira precisa ser do tipo string.')
if not isinstance(self.produto, six.string_types):
raise TypeError('produto precisa ser do tipo string.')
if not isinstance(self.parcelas, six.integer_types):
raise TypeError('parcelas precisa ser do tipo inteiro.')
def __repr__(self):
return '<Pagamento(bandeira={}, produto={}, parcelas={})>'.format(
self.bandeira, self.produto, self.parcelas
)
class Autenticacao(object):
"""
Modelo para os dados da autenticação.
"""
def __init__(self, codigo=None, mensagem=None, data_hora=None, valor=None,
eci=None):
self.codigo = codigo
self.mensagem = mensagem
self.data_hora = data_hora
self.valor = valor
self.eci = eci
self.validate()
def validate(self):
if not isinstance(self.codigo, six.integer_types):
raise TypeError('codigo precisa ser do tipo inteiro.')
if not isinstance(self.mensagem, six.string_types):
raise TypeError('mensagem precisa ser do tipo string.')
if not isinstance(self.data_hora, six.string_types):
raise TypeError('data_hora precisa ser do tipo string.')
if not isinstance(self.valor, six.integer_types):
raise TypeError('valor precisa ser do tipo inteiro.')
if not isinstance(self.eci, six.integer_types):
raise TypeError('eci precisa ser do tipo inteiro.')
def __repr__(self):
return '<Autenticacao(codigo={}, mensagem={}, data_hora={}, valor={}, eci={})>'.format(
self.codigo, self.mensagem, self.data_hora, self.valor, self.eci
)
class Autorizacao(object):
"""
Modelo para os dados da autorização.
"""
def __init__(self, codigo=None, mensagem=None, data_hora=None, valor=None,
lr=None, arp=None, nsu=None):
self.codigo = codigo
self.mensagem = mensagem
self.data_hora = data_hora
self.valor = valor
self.lr = lr
self.arp = arp
self.nsu = nsu
self.validate()
def validate(self):
if not isinstance(self.codigo, six.integer_types):
raise TypeError('codigo precisa ser do tipo inteiro.')
if not isinstance(self.mensagem, six.string_types):
raise TypeError('mensagem precisa ser do tipo string.')
if not isinstance(self.data_hora, six.string_types):
raise TypeError('data_hora precisa ser do tipo string.')
if not isinstance(self.valor, six.integer_types):
raise TypeError('valor precisa ser do tipo inteiro.')
if not isinstance(self.lr, six.string_types):
raise TypeError('lr precisa ser do tipo string.')
if self.arp is not None and not isinstance(self.arp, six.integer_types):
raise TypeError('arp precisa ser do tipo inteiro.')
if not isinstance(self.nsu, six.integer_types):
raise TypeError('nsu precisa ser do tipo inteiro.')
def __repr__(self):
return '<Autorizacao(codigo={}, mensagem={}, data_hora={}, valor={}, lr={}, arp={}, nsu={})>'.format(
self.codigo, self.mensagem, self.data_hora, self.valor, self.lr,
self.arp, self.nsu
)
class Token(object):
"""
Modelo para os dados do token.
"""
def __init__(self, codigo=None, status=None, numero=None):
self.codigo = codigo
self.status = status
self.numero = numero
self.validate()
def validate(self):
if not isinstance(self.codigo, six.string_types):
raise TypeError('codigo precisa ser do tipo string.')
if not isinstance(self.status, six.integer_types):
raise TypeError('status precisa ser do tipo inteiro.')
if not isinstance(self.numero, six.string_types):
raise TypeError('numero precisa ser do tipo string.')
def __repr__(self):
return '<Token(codigo={}, status={}, numero={})>'.format(
self.codigo, self.status, self.numero
)
class Avs(object):
"""
Modelo para os dados do avs (ADDRESS VERIFICATION SERVICE).
"""
def __init__(self, endereco=None, complemento=None, numero=None,
bairro=None, cep=None):
self.endereco = endereco
self.complemento = complemento
self.numero = numero
self.bairro = bairro
self.cep = cep
self.validate()
def validate(self):
if not isinstance(self.endereco, six.string_types):
raise TypeError('endereco precisa ser do tipo string.')
if not isinstance(self.complemento, six.string_types):
raise TypeError('complemento precisa ser do tipo string.')
if not isinstance(self.numero, six.integer_types):
raise TypeError('numero precisa ser do tipo inteiro.')
if not isinstance(self.bairro, six.string_types):
raise TypeError('bairro precisa ser do tipo string.')
if not isinstance(self.cep, six.string_types):
raise TypeError('cep precisa ser do tipo string.')
def __repr__(self):
return '<Avs(endereco={}, complemento={}, numero={}, bairro={}, cep={})>'.format(
self.endereco, self.complemento, self.numero, self.bairro, self.cep
)
class Captura(object):
"""
Modelo para os dados da captura.
"""
def __init__(self, codigo=None, mensagem=None, data_hora=None, valor=None,
taxa_embarque=None):
self.codigo = codigo
self.mensagem = mensagem
self.data_hora = data_hora
self.valor = valor
self.taxa_embarque = taxa_embarque
self.validate()
def validate(self):
if not isinstance(self.codigo, six.integer_types):
raise TypeError('codigo precisa ser do tipo inteiro.')
if not isinstance(self.mensagem, six.string_types):
raise TypeError('mensagem precisa ser do tipo string.')
if not isinstance(self.data_hora, six.string_types):
raise TypeError('data_hora precisa ser do tipo string.')
if not isinstance(self.valor, six.integer_types):
raise TypeError('valor precisa ser do tipo inteiro.')
if self.taxa_embarque is not None and not isinstance(self.taxa_embarque, six.integer_types):
raise TypeError('taxa_embarque precisa ser do tipo inteiro.')
def __repr__(self):
return '<Captura(codigo={}, mensagem={}, data_hora={}, valor={}, taxa_embarque={})>'.format(
self.codigo, self.mensagem, self.data_hora, self.valor,
self.taxa_embarque
)
class Cancelamento(object):
"""
Modelo para os dados de cancelamento.
"""
def __init__(self, codigo=None, mensagem=None, data_hora=None, valor=None):
self.codigo = codigo
self.mensagem = mensagem
self.data_hora = data_hora
self.valor = valor
self.validate()
def validate(self):
if not isinstance(self.codigo, six.integer_types):
raise TypeError('codigo precisa ser do tipo inteiro.')
if not isinstance(self.mensagem, six.string_types):
raise TypeError('mensagem precisa ser do tipo string.')
if not isinstance(self.data_hora, six.string_types):
raise TypeError('data_hora precisa ser do tipo string.')
if not isinstance(self.valor, six.integer_types):
raise TypeError('valor precisa ser do tipo inteiro.')
def __repr__(self):
return '<Cancelamento(codigo={}, mensagem={}, data_hora={}, valor={})>'.format(
self.codigo, self.mensagem, self.data_hora, self.valor
)
class Erro(object):
"""
Modelo para os dados de erro do sistema.
"""
def __init__(self, codigo=None, mensagem=None):
self.codigo = codigo
self.mensagem = mensagem
self.validate()
def validate(self):
if not isinstance(self.codigo, six.string_types):
raise TypeError('codigo precisa ser do tipo string.')
if not isinstance(self.mensagem, six.string_types):
raise TypeError('mensagem precisa ser do tipo string.')
def __repr__(self):
return '<Erro(codigo={}, mensagem={})>'.format(
self.codigo, self.mensagem
)
class Transacao(object):
"""
Modelo para os dados de uma transação.
"""
def __init__(self, comercial=None, cartao=None, pedido=None,
pagamento=None, url_retorno=None, autorizar=None,
capturar=None, campo_livre=None, bin=None, gerar_token=None,
avs=None, autenticacao=None, autorizacao=None, captura=None,
token=None, cancelamento=None, tid=None, pan=None,
status=None, url_autenticacao=None):
self.comercial = comercial
self.cartao = cartao
self.pedido = pedido
self.pagamento = pagamento
self.url_retorno = url_retorno
self.autorizar = autorizar
self.capturar = capturar
self.campo_livre = campo_livre
self.bin = bin
self.gerar_token = gerar_token
self.avs = avs
self.autenticacao = autenticacao
self.autorizacao = autorizacao
self.captura = captura
self.token = token
self.cancelamento = cancelamento
self.tid = tid
self.pan = pan
self.status = status
self.url_autenticacao = url_autenticacao
self.validate()
def validate(self):
if self.comercial is not None and not isinstance(self.comercial, Comercial):
raise TypeError('comercial precisa ser do tipo Comercial.')
if self.cartao is not None and not isinstance(self.cartao, Cartao):
raise TypeError('cartao precisa ser do tipo Cartao.')
if not isinstance(self.pedido, Pedido):
raise TypeError('pedido precisa ser do tipo Pedido.')
if not isinstance(self.pagamento, Pagamento):
raise TypeError('pagamento precisa ser do tipo Pagamento.')
if self.autorizar is not None and not isinstance(self.autorizar, six.integer_types):
raise TypeError('autorizar precisa ser do tipo inteiro.')
if self.autorizar == 1 and not isinstance(self.url_retorno, six.string_types):
raise TypeError('url_retorno precisa ser do tipo string.')
if self.capturar is not None and not isinstance(self.capturar, bool):
raise TypeError('capturar precisa ser do tipo booleano.')
if self.campo_livre is not None and not isinstance(self.campo_livre, six.string_types):
raise TypeError('campo_livre precisa ser do tipo string.')
if self.bin is not None and not isinstance(self.bin, six.integer_types):
raise TypeError('bin precisa ser do tipo inteiro.')
if self.gerar_token is not None and not isinstance(self.gerar_token, bool):
raise TypeError('gerar_token precisa ser do tipo booleano.')
if self.avs is not None and not isinstance(self.avs, Avs):
raise TypeError('avs precisa ser do tipo Avs.')
if self.autenticacao is not None and not isinstance(self.autenticacao, Autenticacao):
raise TypeError('autenticacao precisa ser do tipo Autenticacao.')
if self.autorizacao is not None and not isinstance(self.autorizacao, Autorizacao):
raise TypeError('autorizacao precisa ser do tipo Autorizacao.')
if self.captura is not None and not isinstance(self.captura, Captura):
raise TypeError('captura precisa ser do tipo Captura.')
if self.token is not None and not isinstance(self.token, Token):
raise TypeError('token precisa ser do tipo Token.')
if self.cancelamento is not None and not isinstance(self.cancelamento, Cancelamento):
raise TypeError('cancelamento precisa ser do tipo Cancelamento.')
if self.tid is not None and not isinstance(self.tid, six.string_types):
raise TypeError('tid precisa ser do tipo string.')
if self.pan is not None and not isinstance(self.pan, six.string_types):
raise TypeError('pan precisa ser do tipo string.')
if self.status is not None and not isinstance(self.status, six.integer_types):
raise TypeError('status precisa ser do tipo inteiro.')
if self.url_autenticacao is not None and not isinstance(self.url_autenticacao, six.string_types):
raise TypeError('url_autenticacao precisa ser do tipo string.')
def __repr__(self):
return '<Transacao(comercial={}, cartao={}, pedido={}, pagamento={}, url_retorno={}, autorizar={}, capturar={}, campo_livre={}, bin={}, gerar_token={}, avs={}, autenticacao={}, autorizacao={}, captura={}, token={}, cancelamento={}, tid={}, pan={}, status={}, url_autenticacao={})>'.format(
self.comercial, self.cartao, self.pedido, self.pagamento,
self.url_retorno, self.autorizar, self.capturar, self.campo_livre,
self.bin, self.gerar_token, self.avs, self.autenticacao,
self.autorizacao, self.captura, self.token, self.cancelamento,
self.tid, self.pan, self.status, self.url_autenticacao
)
def xml_to_object(xml):
data = xmltodict.parse(xml)
if 'transacao' in data:
transacao = data['transacao']
pedido = dict_to_pedido(transacao.get('dados-pedido')) if transacao.get('dados-pedido') else None
pagamento = dict_to_pagamento(transacao.get('forma-pagamento')) if transacao.get('forma-pagamento') else None
autenticacao = dict_to_autenticacao(transacao.get('autenticacao')) if transacao.get('autenticacao') else None
autorizacao = dict_to_autorizacao(transacao.get('autorizacao')) if transacao.get('autorizacao') else None
token = dict_to_token(transacao.get('token')) if transacao.get('token') else None
captura = dict_to_captura(transacao.get('captura')) if transacao.get('captura') else None
cancelamento = dict_to_cancelamento(transacao.get('cancelamentos')) if transacao.get('cancelamentos') else None
tid = transacao.get('tid') if transacao.get('tid') else None
pan = transacao.get('pan') if transacao.get('pan') else None
status = int(transacao.get('status')) if transacao.get('status') else None
url_autenticacao = transacao.get('url-autenticacao') if transacao.get('url-autenticacao') else None
return Transacao(
pedido=pedido,
pagamento=pagamento,
autenticacao=autenticacao,
autorizacao=autorizacao,
token=token,
captura=captura,
cancelamento=cancelamento,
tid=tid,
pan=pan,
status=status,
url_autenticacao=url_autenticacao,
)
if 'retorno-token' in data:
retorno_token = data['retorno-token']
return Token(
codigo=retorno_token['token']['dados-token']['codigo-token'],
status=int(retorno_token['token']['dados-token']['status']),
numero=retorno_token['token']['dados-token']['numero-cartao-truncado']
)
if 'erro' in data:
return Erro(
codigo=data['erro']['codigo'],
mensagem=data['erro']['mensagem'],
)
def dict_to_pedido(data):
descricao = data.get('descricao') if data.get('descricao') else None
idioma = data.get('idioma') if data.get('idioma') else None
taxa_embarque = int(data.get('taxa-embarque')) if data.get('taxa-embarque') else None
soft_descriptor = data.get('soft-descriptor') if data.get('soft-descriptor') else None
pedido = Pedido(
numero=data.get('numero'),
valor=int(data.get('valor')),
moeda=int(data.get('moeda')),
data_hora=data.get('data-hora'),
descricao=descricao,
idioma=idioma,
taxa_embarque=taxa_embarque,
soft_descriptor=soft_descriptor
)
return pedido
def dict_to_pagamento(data):
pagamento = Pagamento(
bandeira=data.get('bandeira'),
produto=data.get('produto'),
parcelas=int(data.get('parcelas')),
)
return pagamento
def dict_to_autenticacao(data):
autenticacao = Autenticacao(
codigo=int(data.get('codigo')),
mensagem=unidecode(data.get('mensagem')),
data_hora=data.get('data-hora'),
valor=int(data.get('valor')),
eci=int(data.get('eci')),
)
return autenticacao
def dict_to_autorizacao(data):
autorizacao = Autorizacao(
codigo=int(data.get('codigo')),
mensagem=unidecode(data.get('mensagem')),
data_hora=data.get('data-hora'),
valor=int(data.get('valor')),
lr=data.get('lr'),
arp=int(data.get('arp')) if data.get('arp') else None,
nsu=int(data.get('nsu')),
)
return autorizacao
def dict_to_captura(data):
taxa_embarque = int(data.get('taxa-embarque')) if data.get('taxa-embarque') else None
captura = Captura(
codigo=int(data.get('codigo')),
mensagem=unidecode(data.get('mensagem')),
data_hora=data.get('data-hora'),
valor=int(data.get('valor')),
taxa_embarque=taxa_embarque,
)
return captura
def dict_to_token(data):
token = Token(
codigo=data['dados-token']['codigo-token'],
status=int(data['dados-token']['status']),
numero=data['dados-token']['numero-cartao-truncado']
)
return token
def dict_to_cancelamento(data):
data = data['cancelamento']
cancelamento = Cancelamento(
codigo=int(data.get('codigo')),
mensagem=unidecode(data.get('mensagem')),
data_hora=data.get('data-hora'),
valor=int(data.get('valor'))
)
return cancelamento
|
mit
| 4,884,582,285,604,461,000
| 35.447284
| 297
| 0.624606
| false
| 3.433559
| false
| false
| false
|
ForAP/Advanc3d-Pr0graming
|
Spikes/DnD/toolbox.py
|
1
|
2574
|
# File name: toolbox.py
import kivy
kivy.require('1.8.0')
import math
from kivy.uix.togglebutton import ToggleButton
from kivy.graphics import Line
from dnd import StickMan, DraggableWidget
class ToolButton(ToggleButton):
def on_touch_down(self, touch):
ds = self.parent.drawing_space
if self.state == 'down' and ds.collide_point(touch.x, touch.y):
(x,y) = ds.to_widget(touch.x, touch.y)
self.draw(ds, x, y)
return True
return super(ToolButton, self).on_touch_down(touch)
def draw(self, ds, x, y):
pass
class ToolStickman(ToolButton):
def draw(self, ds, x, y):
sm = StickMan(width=48, height=48)
sm.center = (x,y)
ds.add_widget(sm)
class ToolFigure(ToolButton):
def draw(self, ds, x, y):
(self.ix, self.iy) = (x,y)
with ds.canvas:
self.figure=self.create_figure(x,y,x+1,y+1)
ds.bind(on_touch_move=self.update_figure)
ds.bind(on_touch_up=self.end_figure)
def update_figure(self, ds, touch):
if ds.collide_point(touch.x, touch.y):
(x,y) = ds.to_widget(touch.x, touch.y)
ds.canvas.remove(self.figure)
with ds.canvas:
self.figure = self.create_figure(self.ix, self.iy,x,y)
def end_figure(self, ds, touch):
ds.unbind(on_touch_move=self.update_figure)
ds.unbind(on_touch_up=self.end_figure)
ds.canvas.remove(self.figure)
(fx,fy) = ds.to_widget(touch.x, touch.y)
self.widgetize(ds,self.ix,self.iy,fx,fy)
def widgetize(self,ds,ix,iy,fx,fy):
widget = self.create_widget(ix,iy,fx,fy)
(ix,iy) = widget.to_local(ix,iy,relative=True)
(fx,fy) = widget.to_local(fx,fy,relative=True)
widget.canvas.add(self.create_figure(ix,iy,fx,fy))
ds.add_widget(widget)
def create_figure(self,ix,iy,fx,fy):
pass
def create_widget(self,ix,iy,fx,fy):
pass
class ToolLine(ToolFigure):
def create_figure(self,ix,iy,fx,fy):
return Line(points=[ix, iy, fx, fy])
def create_widget(self,ix,iy,fx,fy):
pos = (min(ix, fx), min(iy, fy))
size = (abs(fx-ix), abs(fy-iy))
return DraggableWidget(pos = pos, size = size)
class ToolCircle(ToolFigure):
def create_figure(self,ix,iy,fx,fy):
return Line(circle=[ix,iy,math.hypot(ix-fx,iy-fy)])
def create_widget(self,ix,iy,fx,fy):
r = math.hypot(ix-fx, iy-fy)
pos = (ix-r, iy-r)
size = (2*r, 2*r)
return DraggableWidget(pos = pos, size = size)
|
gpl-2.0
| 8,474,231,486,654,924,000
| 31.582278
| 71
| 0.60101
| false
| 2.869565
| false
| false
| false
|
DavidAndreev/indico
|
indico/modules/events/sessions/controllers/display.py
|
1
|
4250
|
# This file is part of Indico.
# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from io import BytesIO
from flask import session, request
from pytz import timezone
from sqlalchemy.orm import joinedload, subqueryload
from werkzeug.exceptions import Forbidden
from indico.modules.events.sessions.models.sessions import Session
from indico.modules.events.sessions.util import (get_sessions_for_user, get_session_ical_file,
get_session_timetable_pdf)
from indico.modules.events.sessions.views import WPDisplaySession, WPDisplayMySessionsConference
from indico.modules.events.util import get_base_ical_parameters
from indico.web.flask.util import send_file
from MaKaC.common.timezoneUtils import DisplayTZ
from MaKaC.webinterface.rh.conferenceDisplay import RHConferenceBaseDisplay
class RHDisplaySessionList(RHConferenceBaseDisplay):
def _checkProtection(self):
if not session.user:
raise Forbidden
RHConferenceBaseDisplay._checkProtection(self)
def _process(self):
sessions = get_sessions_for_user(self.event_new, session.user)
return WPDisplayMySessionsConference.render_template('display/session_list.html', self._conf,
event=self.event_new, sessions=sessions)
class RHDisplaySessionBase(RHConferenceBaseDisplay):
normalize_url_spec = {
'locators': {
lambda self: self.session
}
}
def _checkProtection(self):
if not self.session.can_access(session.user):
raise Forbidden
def _checkParams(self, params):
RHConferenceBaseDisplay._checkParams(self, params)
self.session = Session.get_one(request.view_args['session_id'], is_deleted=False)
class RHDisplaySession(RHDisplaySessionBase):
view_class = WPDisplaySession
def _process(self):
ical_params = get_base_ical_parameters(session.user, self.event_new, 'sessions',
'/export/event/{0}/session/{1}.ics'.format(self.event_new.id,
self.session.id))
tz = timezone(DisplayTZ(session.user, self._conf).getDisplayTZ())
contributions_strategy = subqueryload('contributions')
_contrib_tte_strategy = contributions_strategy.joinedload('timetable_entry')
_contrib_tte_strategy.lazyload('*')
contributions_strategy.joinedload('person_links')
blocks_strategy = joinedload('blocks')
blocks_strategy.joinedload('person_links')
_block_tte_strategy = blocks_strategy.joinedload('timetable_entry')
_block_tte_strategy.lazyload('*')
_block_tte_strategy.joinedload('children')
sess = (Session.query
.filter_by(id=self.session.id)
.options(contributions_strategy, blocks_strategy)
.one())
return self.view_class.render_template('display/session_display.html', self._conf, sess=sess,
event=self.event_new, timezone=tz, **ical_params)
class RHExportSessionToICAL(RHDisplaySessionBase):
def _process(self):
return send_file('session.ics', get_session_ical_file(self.session), 'text/calendar')
class RHExportSessionTimetableToPDF(RHDisplaySessionBase):
def _process(self):
pdf = get_session_timetable_pdf(self.session)
return send_file('session-timetable.pdf', BytesIO(pdf.getPDFBin()), 'application/pdf')
|
gpl-3.0
| 6,430,539,133,723,840,000
| 42.814433
| 108
| 0.679059
| false
| 4.066986
| false
| false
| false
|
zerothi/sisl
|
sisl/io/siesta/__init__.py
|
1
|
2186
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
"""
Siesta
======
The interaction between sisl and `Siesta`_ is one of the main goals due
to the implicit relationship between the developer of sisl and `Siesta`_.
Additionally the TranSiesta output files are also intrinsically handled by
sisl.
Remark that the `gridSileSiesta` file encompass all ``RHO``, ``RHOINIT``, ``DRHO``,
``RHOXC``, ``BADER``, ``IOCH``, ``TOCH`` ``VH``, ``VNA`` and ``VT`` binary output files.
fdfSileSiesta - input file
outSileSiesta - output file
xvSileSiesta - xyz and vxyz file
bandsSileSiesta - band structure information
eigSileSiesta - EIG file
pdosSileSiesta - PDOS file
gridSileSiesta - Grid charge information (binary)
gridncSileSiesta - NetCDF grid output files (netcdf)
onlysSileSiesta - Overlap matrix information
dmSileSiesta - density matrix information
hsxSileSiesta - Hamiltonian and overlap matrix information
wfsxSileSiesta - wavefunctions
ncSileSiesta - NetCDF output file
ionxmlSileSiesta - Basis-information from the ion.xml files
ionncSileSiesta - Basis-information from the ion.nc files
orbindxSileSiesta - Basis-information (no geometry information, only ranges)
faSileSiesta - Forces on atoms
fcSileSiesta - Force constant matrix
kpSileSiesta - k-points from simulation
rkpSileSiesta - k-points to simulation
structSileSiesta - geometry in STRUCT_* files
The TranSiesta specific output files are:
tshsSileSiesta - TranSiesta Hamiltonian
tsdeSileSiesta - TranSiesta TSDE
tsgfSileSiesta - TranSiesta surface Green function files
tsvncSileSiesta - TranSiesta potential solution input file
"""
from .sile import *
from .bands import *
from .basis import *
from .binaries import *
from .eig import *
from .fa import *
from .fc import *
from .fdf import *
from .kp import *
from .orb_indx import *
from .out import *
from .pdos import *
from .struct import *
from .siesta_nc import *
from .siesta_grid import *
from .transiesta_grid import *
from .xv import *
|
lgpl-3.0
| -871,648,533,786,630,800
| 33.698413
| 88
| 0.746112
| false
| 3.163531
| false
| true
| false
|
openstates/openstates
|
openstates/ga/people.py
|
1
|
8685
|
from pupa.scrape import Person, Scraper
from openstates.utils import LXMLMixin
from .util import get_client, get_url, backoff, SESSION_SITE_IDS
HOMEPAGE_URLS = {
"lower": (
"http://www.house.ga.gov/Representatives/en-US/"
"member.aspx?Member={code}&Session={sid}"
),
"upper": (
"http://www.senate.ga.gov/SENATORS/en-US/"
"member.aspx?Member={code}&Session={sid}"
),
}
class GAPersonScraper(Scraper, LXMLMixin):
sservice = get_client("Members").service
ssource = get_url("Members")
def clean_list(self, dirty_list):
new_list = []
for x in dirty_list:
if x is None:
new_list.append(x)
else:
new_list.append(x.strip())
return new_list
def scrape_homepage(self, url, kwargs):
url = url.format(**kwargs)
page = self.lxmlize(url)
images = page.xpath("//img[contains(@src, 'SiteCollectionImages')]")
if len(images) != 1:
raise Exception
return url, images[0].attrib["src"]
def scrape_session(self, session, chambers):
sid = SESSION_SITE_IDS[session]
members = backoff(self.sservice.GetMembersBySession, sid)["MemberListing"]
seen_guids = []
for member in members:
guid = member["Id"]
member_info = backoff(self.sservice.GetMember, guid)
# If a member switches chambers during the session, they may
# appear twice. Skip the duplicate record accordingly.
if guid in seen_guids:
self.warning(
"Skipping duplicate record of {}".format(
member_info["Name"]["Last"]
)
)
continue
else:
seen_guids.append(guid)
# Check to see if the member has vacated; skip if so.
# A member can have multiple services for a given session,
# if they switched chambers. Filter these down to just the
# active service.
try:
(legislative_service,) = [
service
for service in member_info["SessionsInService"][
"LegislativeService"
]
if service["Session"]["Id"] == sid
and service["DateVacated"] is None
]
except ValueError:
self.info(
"Skipping retired member {}".format(member_info["Name"]["Last"])
)
continue
nick_name, first_name, middle_name, last_name = (
member_info["Name"][x] for x in ["Nickname", "First", "Middle", "Last"]
)
first_name = nick_name if nick_name else first_name
if middle_name:
full_name = "%s %s %s" % (first_name, middle_name, last_name)
else:
full_name = "%s %s" % (first_name, last_name)
party = legislative_service["Party"]
if party == "Democrat":
party = "Democratic"
elif party.strip() == "":
party = "other"
chamber, district = (
legislative_service["District"][x] for x in ["Type", "Number"]
)
chamber = {"House": "lower", "Senate": "upper"}[chamber]
url, photo = self.scrape_homepage(
HOMEPAGE_URLS[chamber], {"code": guid, "sid": sid}
)
legislator = Person(
name=full_name,
district=str(district),
party=party,
primary_org=chamber,
image=photo,
)
legislator.extras = {
"family_name": last_name,
"given_name": first_name,
"guid": guid,
}
if (
member_info["Address"]["Street"] is not None
and member_info["Address"]["Street"].strip()
):
capitol_address_info = {
k: v.strip()
for k, v in dict(member_info["Address"]).items()
if k in ["Street", "City", "State", "Zip"]
}
capitol_address = "{Street}\n{City}, {State} {Zip}".format(
**capitol_address_info
)
legislator.add_contact_detail(
type="address", value=capitol_address, note="Capitol Address"
)
else:
self.warning(
"Could not find full capitol address for {}".format(full_name)
)
capitol_contact_info = self.clean_list(
[member_info["Address"][x] for x in ["Email", "Phone", "Fax"]]
)
# Sometimes email is set to a long cryptic string.
# If it doesn't have a @ character, simply set it to None
# examples:
# 01X5dvct3G1lV6RQ7I9o926Q==&c=xT8jBs5X4S7ZX2TOajTx2W7CBprTaVlpcvUvHEv78GI=
# 01X5dvct3G1lV6RQ7I9o926Q==&c=eSH9vpfdy3XJ989Gpw4MOdUa3n55NTA8ev58RPJuzA8=
if capitol_contact_info[0] and "@" not in capitol_contact_info[0]:
capitol_contact_info[0] = None
if capitol_contact_info[0]:
# Site was hacked in the past
assert "quickrxdrugs@yahoo.com" not in capitol_contact_info[0]
if capitol_contact_info[1]:
legislator.add_contact_detail(
type="voice", value=capitol_contact_info[1], note="Capitol Address"
)
if capitol_contact_info[2]:
legislator.add_contact_detail(
type="fax", value=capitol_contact_info[2], note="Capitol Address"
)
if capitol_contact_info[0]:
legislator.add_contact_detail(
type="email", value=capitol_contact_info[0], note="Capitol Address"
)
if (
member_info["DistrictAddress"]["Street"] is not None
and member_info["DistrictAddress"]["Street"].strip()
):
district_address_info = {
k: v.strip()
for k, v in dict(member_info["DistrictAddress"]).items()
if k in ["Street", "City", "State", "Zip"]
}
district_address = "{Street}\n{City}, {State} {Zip}".format(
**district_address_info
)
legislator.add_contact_detail(
type="address", value=district_address, note="District Address"
)
else:
self.warning(
"Could not find full district address for {}".format(full_name)
)
district_contact_info = self.clean_list(
[member_info["DistrictAddress"][x] for x in ["Email", "Phone", "Fax"]]
)
# Same issue with district email. See above comment
if district_contact_info[0] and "@" not in district_contact_info[0]:
district_contact_info[0] = None
if district_contact_info[0]:
# Site was hacked in the past
assert "quickrxdrugs@yahoo.com" not in district_contact_info[0]
if district_contact_info[1]:
legislator.add_contact_detail(
type="voice",
value=district_contact_info[1],
note="District Address",
)
if district_contact_info[2]:
legislator.add_contact_detail(
type="fax", value=district_contact_info[2], note="District Address"
)
if district_contact_info[0]:
legislator.add_contact_detail(
type="email",
value=district_contact_info[0],
note="District Address",
)
legislator.add_link(url)
legislator.add_source(self.ssource)
legislator.add_source(
HOMEPAGE_URLS[chamber].format(**{"code": guid, "sid": sid})
)
yield legislator
def scrape(self, session=None, chamber=None):
if not session:
session = self.latest_session()
self.info("no session specified, using %s", session)
chambers = [chamber] if chamber is not None else ["upper", "lower"]
yield from self.scrape_session(session, chambers)
|
gpl-3.0
| -9,173,314,109,548,088,000
| 35.491597
| 87
| 0.49764
| false
| 4.018973
| false
| false
| false
|
abelcarreras/MonteModes
|
montemodes/functions/symgroup.py
|
1
|
2983
|
import os
from subprocess import Popen, PIPE, call
class Symgroup:
def __init__(self,
symmetry='c 5',
label=False,
connect=False,
central_atom=0,
custom_atom_list=None):
self._symmetry = symmetry
self._label = label
self._connect = connect
self._central_atom = central_atom
self._custom_atom_list = custom_atom_list
# Check if shape is in system path
if not call("type symop", shell=True, stdout=PIPE, stderr=PIPE) == 0:
print ('symop binary not found')
exit()
@property
def symmetry(self):
return self._symmetry
@property
def label(self):
return self._label
@property
def connect(self):
return self._connect
@property
def central_atom(self):
return self._central_atom
@property
def custom_atom_list(self):
return self._custom_atom_list
def create_symgroup_file(molecule, input_data):
label = input_data.label
connect = input_data.connect
central_atom = input_data.central_atom
symmetry = input_data.symmetry
atoms_list = input_data.custom_atom_list
if isinstance(atoms_list, type(None)):
atoms_list = range(molecule.get_number_of_atoms())
if central_atom == 0:
ligands = len(atoms_list)
else:
ligands = len(atoms_list) - 1
temp_file_name = 'symmetry'+ '_' + str(os.getpid()) + '.zdat'
symgroup_input_file = open(temp_file_name, 'w')
if label:
symgroup_input_file.write('%label\n')
if connect:
symgroup_input_file.write('%connect\n')
symgroup_input_file.write(str(ligands) + ' ' + str(central_atom) + '\n\n' + symmetry + '\nA\n')
for i in range(molecule.get_number_of_atoms()):
line = str(list(molecule.get_atomic_elements()[i]) +
list(molecule.get_coordinates()[i])
).strip('[]').replace(',', '').replace("'", "")
symgroup_input_file.write(line + '\n')
symgroup_input_file.write('\n')
return symgroup_input_file
def get_symmetry(molecule, input_data, remove_files=True):
symgroup_input_file = create_symgroup_file(molecule, input_data)
symgroup_input_file.close()
symgroup_process = Popen('symgroup '+ symgroup_input_file.name, stderr=PIPE, stdout=PIPE, shell=True)
symgroup_process.wait()
try:
measure = float(open(symgroup_input_file.name[:-4]+'ztab','r').readlines()[-1].split()[-1])
except ValueError:
return None
if remove_files:
os.remove(symgroup_input_file.name)
os.remove(symgroup_input_file.name[:-4]+'ztab')
os.remove(symgroup_input_file.name[:-4]+'zout')
return measure
def get_symmetry_trajectory(trajectory, input_data):
symmetry_list = []
for molecule in trajectory[1:]:
symmetry_list.append(get_symmetry(molecule, input_data))
return symmetry_list
|
mit
| 6,281,239,343,185,834,000
| 26.62037
| 105
| 0.606772
| false
| 3.660123
| false
| false
| false
|
hqpr/findyour3d
|
findyour3d/dashboard/tests.py
|
1
|
7564
|
import datetime
import stripe
from django.urls import reverse
from django.utils import timezone
from django.test import TestCase, Client
from findyour3d.users.models import User
from findyour3d.company.models import Company
from findyour3d.customer.models import Customer
STRIPE_API_KEY = 'sk_test_BS2t9JImRsscT1vyWNsPYGLK'
class DashboardTests(TestCase):
def setUp(self):
self.now = timezone.now()
default_card = 'card_1ArI7LElSiayVU2xj6k589HC'
stripe_id = 'cus_BDjavlKLrRcpf5'
self.silver_user = User.objects.create(username='silver_user',
user_type=2,
date_joined=self.now,
is_active=True,
email='silver_user@test.com',
payment_active=True,
default_card=default_card,
stripe_id=stripe_id,
plan=1)
# Individuals, Cad Assistance, $250 - 500, Stereoligtography (SLM)'), Nylon
self.silver_company = Company.objects.create(user=self.silver_user,
name='silver_company',
display_name='silver_company',
address_line_1='1', address_line_2='2',
full_name='silver_company', email='silver_company@mail.com',
phone='1234453534', website='asddsd.com',
ideal_customer=['0', ],
is_cad_assistance=True,
budget=['2', ],
printing_options=['1', '2'],
material=['6', '10', '11'],
top_printing_processes=['1', '2'],
description='silver_company',
shipping=['0', '1', '2'])
self.simple_user = User.objects.create(username='simple_user',
user_type=1,
is_active=True,
email='simple_user@test.com',
plan=1)
self.simple_user.set_password('1234567a')
self.simple_user.save()
self.customer = Customer.objects.create(user=self.simple_user,
budget=2,
customer_type=0,
material='6',
process='2',
is_advanced_filled=True,
shipping='1',
need_assistance=1)
self.metal_company_user = User.objects.create(username='metal_user',
user_type=2,
date_joined=self.now,
is_active=True,
email='metal_user@test.com',
payment_active=True,
default_card=default_card,
stripe_id=stripe_id,
plan=1)
# Individuals, Cad Assistance, $250 - 500, Stereoligtography (SLM)'), Copper
self.metal_company = Company.objects.create(user=self.metal_company_user,
name='metal_company',
display_name='metal_company',
address_line_1='1', address_line_2='2',
full_name='metal_company', email='metal_company@mail.com',
phone='1234453534', website='metal_company.com',
ideal_customer=['0', ],
is_cad_assistance=True,
budget=['2', ],
printing_options=['1', ],
material=['13', ],
top_printing_processes=['1', ],
description='metal_company',
shipping=['0', '1', '2'])
self.metal_customer_user = User.objects.create(username='metal_customer_user',
user_type=1,
is_active=True,
email='metal_customer_user@test.com',
plan=1)
self.metal_customer_user.set_password('1234567a')
self.metal_customer_user.save()
self.metal_customer = Customer.objects.create(user=self.metal_customer_user,
budget=2,
customer_type=0,
material='9', # setting one of the metal choices
process='1',
is_advanced_filled=True,
shipping='1',
need_assistance=1)
self.client = Client()
self.client.login(username='simple_user', password='1234567a')
def test_success_login(self):
login = self.client.login(username='simple_user', password='1234567a')
self.assertIs(login, True)
def test_forbidden_access_to_company(self):
self.client.login(username='simple_user', password='1234567a')
response = self.client.get(reverse('company:add'))
self.assertEqual(response.status_code, 403)
def test_customer_dashboard_access(self):
self.client.login(username='simple_user', password='1234567a')
response = self.client.get(reverse('dashboard:company'))
self.assertEqual(response.status_code, 200)
def test_match_company_and_customer(self):
self.client.login(username='silver_user', password='1234567a')
response = self.client.get(reverse('dashboard:company'))
# print(response.render().content)
self.assertContains(response, 'silver_company')
def test_match_metal_company_with_same_process(self):
self.client.login(username='metal_customer_user', password='1234567a')
response = self.client.get(reverse('dashboard:company'))
self.assertContains(response, 'metal_company')
|
mit
| -9,013,767,914,590,766,000
| 56.30303
| 113
| 0.39569
| false
| 5.623792
| true
| false
| false
|
PetePriority/home-assistant
|
homeassistant/components/dovado/sensor.py
|
1
|
3451
|
"""
Support for sensors from the Dovado router.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.dovado/
"""
import logging
import re
from datetime import timedelta
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.dovado import DOMAIN as DOVADO_DOMAIN
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_SENSORS
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['dovado']
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=30)
SENSOR_UPLOAD = 'upload'
SENSOR_DOWNLOAD = 'download'
SENSOR_SIGNAL = 'signal'
SENSOR_NETWORK = 'network'
SENSOR_SMS_UNREAD = 'sms'
SENSORS = {
SENSOR_NETWORK: ('signal strength', 'Network', None,
'mdi:access-point-network'),
SENSOR_SIGNAL: ('signal strength', 'Signal Strength', '%',
'mdi:signal'),
SENSOR_SMS_UNREAD: ('sms unread', 'SMS unread', '',
'mdi:message-text-outline'),
SENSOR_UPLOAD: ('traffic modem tx', 'Sent', 'GB',
'mdi:cloud-upload'),
SENSOR_DOWNLOAD: ('traffic modem rx', 'Received', 'GB',
'mdi:cloud-download'),
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_SENSORS): vol.All(
cv.ensure_list, [vol.In(SENSORS)]
),
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Dovado sensor platform."""
dovado = hass.data[DOVADO_DOMAIN]
entities = []
for sensor in config[CONF_SENSORS]:
entities.append(DovadoSensor(dovado, sensor))
add_entities(entities)
class DovadoSensor(Entity):
"""Representation of a Dovado sensor."""
def __init__(self, data, sensor):
"""Initialize the sensor."""
self._data = data
self._sensor = sensor
self._state = self._compute_state()
def _compute_state(self):
state = self._data.state.get(SENSORS[self._sensor][0])
if self._sensor == SENSOR_NETWORK:
match = re.search(r"\((.+)\)", state)
return match.group(1) if match else None
if self._sensor == SENSOR_SIGNAL:
try:
return int(state.split()[0])
except ValueError:
return None
if self._sensor == SENSOR_SMS_UNREAD:
return int(state)
if self._sensor in [SENSOR_UPLOAD, SENSOR_DOWNLOAD]:
return round(float(state) / 1e6, 1)
return state
def update(self):
"""Update sensor values."""
self._data.update()
self._state = self._compute_state()
@property
def name(self):
"""Return the name of the sensor."""
return "{} {}".format(self._data.name, SENSORS[self._sensor][1])
@property
def state(self):
"""Return the sensor state."""
return self._state
@property
def icon(self):
"""Return the icon for the sensor."""
return SENSORS[self._sensor][3]
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return SENSORS[self._sensor][2]
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {k: v for k, v in self._data.state.items()
if k not in ['date', 'time']}
|
apache-2.0
| -3,113,419,008,167,591,000
| 28.75
| 74
| 0.614604
| false
| 3.81326
| false
| false
| false
|
zhenleiji/ZPong
|
model/Ball.py
|
1
|
1115
|
import pygame
class Ball(pygame.sprite.Sprite):
def __init__(self, screen_size, position, speed):
pygame.sprite.Sprite.__init__(self)
self.surface = pygame.image.load('imgs/ball.png')
self.screen_size = screen_size
self.rect = self.surface.get_rect()
self.rect.left = position[0]
self.rect.top = position[1]
self.speed = speed
self.default_position = position
self.default_speed = [speed[0], speed[1]]
def check_boundary(self):
return self.rect.left < 0 or self.rect.right > self.screen_size[0] or self.rect.top < 0 or self.rect.bottom > \
self.screen_size[1]
def on_update(self):
self.rect = self.rect.move(self.speed)
if self.check_boundary():
self.rect.left = self.default_position[0]
self.rect.top = self.default_position[1]
self.speed = [self.default_speed[0], self.default_speed[1]]
def on_draw(self, surface):
surface.blit(self.surface, self.rect)
|
apache-2.0
| -6,082,744,259,949,847,000
| 38.821429
| 119
| 0.564126
| false
| 3.741611
| false
| false
| false
|
paninetworks/neutron
|
neutron/api/v2/resource.py
|
1
|
7410
|
# Copyright 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility methods for working with WSGI servers redux
"""
import sys
import netaddr
import oslo_i18n
from oslo_log import log as logging
from oslo_policy import policy as oslo_policy
import six
import webob.dec
import webob.exc
from neutron.common import exceptions
from neutron.i18n import _LE, _LI
from neutron import wsgi
LOG = logging.getLogger(__name__)
class Request(wsgi.Request):
pass
def Resource(controller, faults=None, deserializers=None, serializers=None):
"""Represents an API entity resource and the associated serialization and
deserialization logic
"""
default_deserializers = {'application/json': wsgi.JSONDeserializer()}
default_serializers = {'application/json': wsgi.JSONDictSerializer()}
format_types = {'json': 'application/json'}
action_status = dict(create=201, delete=204)
default_deserializers.update(deserializers or {})
default_serializers.update(serializers or {})
deserializers = default_deserializers
serializers = default_serializers
faults = faults or {}
@webob.dec.wsgify(RequestClass=Request)
def resource(request):
route_args = request.environ.get('wsgiorg.routing_args')
if route_args:
args = route_args[1].copy()
else:
args = {}
# NOTE(jkoelker) by now the controller is already found, remove
# it from the args if it is in the matchdict
args.pop('controller', None)
fmt = args.pop('format', None)
action = args.pop('action', None)
content_type = format_types.get(fmt,
request.best_match_content_type())
language = request.best_match_language()
deserializer = deserializers.get(content_type)
serializer = serializers.get(content_type)
try:
if request.body:
args['body'] = deserializer.deserialize(request.body)['body']
method = getattr(controller, action)
result = method(request=request, **args)
except (exceptions.NeutronException,
netaddr.AddrFormatError,
oslo_policy.PolicyNotAuthorized) as e:
for fault in faults:
if isinstance(e, fault):
mapped_exc = faults[fault]
break
else:
mapped_exc = webob.exc.HTTPInternalServerError
if 400 <= mapped_exc.code < 500:
LOG.info(_LI('%(action)s failed (client error): %(exc)s'),
{'action': action, 'exc': e})
else:
LOG.exception(_LE('%s failed'), action)
e = translate(e, language)
body = serializer.serialize(
{'NeutronError': get_exception_data(e)})
kwargs = {'body': body, 'content_type': content_type}
raise mapped_exc(**kwargs)
except webob.exc.HTTPException as e:
type_, value, tb = sys.exc_info()
if hasattr(e, 'code') and 400 <= e.code < 500:
LOG.info(_LI('%(action)s failed (client error): %(exc)s'),
{'action': action, 'exc': e})
else:
LOG.exception(_LE('%s failed'), action)
translate(e, language)
value.body = serializer.serialize(
{'NeutronError': get_exception_data(e)})
value.content_type = content_type
six.reraise(type_, value, tb)
except NotImplementedError as e:
e = translate(e, language)
# NOTE(armando-migliaccio): from a client standpoint
# it makes sense to receive these errors, because
# extensions may or may not be implemented by
# the underlying plugin. So if something goes south,
# because a plugin does not implement a feature,
# returning 500 is definitely confusing.
body = serializer.serialize(
{'NotImplementedError': get_exception_data(e)})
kwargs = {'body': body, 'content_type': content_type}
raise webob.exc.HTTPNotImplemented(**kwargs)
except Exception:
# NOTE(jkoelker) Everything else is 500
LOG.exception(_LE('%s failed'), action)
# Do not expose details of 500 error to clients.
msg = _('Request Failed: internal server error while '
'processing your request.')
msg = translate(msg, language)
body = serializer.serialize(
{'NeutronError': get_exception_data(
webob.exc.HTTPInternalServerError(msg))})
kwargs = {'body': body, 'content_type': content_type}
raise webob.exc.HTTPInternalServerError(**kwargs)
status = action_status.get(action, 200)
body = serializer.serialize(result)
# NOTE(jkoelker) Comply with RFC2616 section 9.7
if status == 204:
content_type = ''
body = None
return webob.Response(request=request, status=status,
content_type=content_type,
body=body)
return resource
def get_exception_data(e):
"""Extract the information about an exception.
Neutron client for the v2 API expects exceptions to have 'type', 'message'
and 'detail' attributes.This information is extracted and converted into a
dictionary.
:param e: the exception to be reraised
:returns: a structured dict with the exception data
"""
err_data = {'type': e.__class__.__name__,
'message': e, 'detail': ''}
return err_data
def translate(translatable, locale):
"""Translates the object to the given locale.
If the object is an exception its translatable elements are translated
in place, if the object is a translatable string it is translated and
returned. Otherwise, the object is returned as-is.
:param translatable: the object to be translated
:param locale: the locale to translate to
:returns: the translated object, or the object as-is if it
was not translated
"""
localize = oslo_i18n.translate
if isinstance(translatable, exceptions.NeutronException):
# GG temporary hack because there's some stupid bug here
try:
translatable.msg = localize(translatable.msg, locale)
except Exception:
pass
elif isinstance(translatable, webob.exc.HTTPError):
translatable.detail = localize(translatable.detail, locale)
elif isinstance(translatable, Exception):
translatable.message = localize(translatable, locale)
else:
return localize(translatable, locale)
return translatable
|
apache-2.0
| 2,723,557,886,444,570,000
| 37.393782
| 78
| 0.617274
| false
| 4.493633
| false
| false
| false
|
lixiangning888/whole_project
|
modules/signatures_orginal_20151110/kibex_apis.py
|
1
|
2683
|
# Copyright (C) 2015 KillerInstinct
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from lib.cuckoo.common.abstracts import Signature
class Kibex_APIs(Signature):
name = "kibex_behavior"
description = "Exhibits behavior characteristic of Kibex Spyware"
severity = 3
references = [
"http://www.trendmicro.com/vinfo/us/threat-encyclopedia/malware/tspy_kibex.a",
"http://www.trendmicro.com/vinfo/us/threat-encyclopedia/malware/tspy_kibex.i",
]
categories = ["spyware", "keylogger"]
families = ["kibex"]
authors = ["KillerInstinct"]
minimum = "1.3"
evented = True
def __init__(self, *args, **kwargs):
Signature.__init__(self, *args, **kwargs)
self.keylog_inits = 0
filter_apinames = set(["SetWindowsHookExA"])
def on_call(self, call, process):
hid = int(self.get_argument(call, "HookIdentifier"), 10)
tid = int(self.get_argument(call, "ThreadId"), 10)
if tid == 0 and hid == 13:
self.keylog_inits += 1
return None
def on_complete(self):
bad_score = self.keylog_inits
file_iocs = [
".*\\\\ProgramData\\\\Browsers\.txt$",
".*\\\\ProgramData\\\\Mails\.txt$",
".*\\\\Temp\\\\\d{9,10}\.xml$",
]
for ioc in file_iocs:
match = self.check_file(pattern=ioc, regex=True)
if match:
bad_score += 3
stealer_regkeys = [
".*\\\\Google\\\\Google\\ Talk\\\\Accounts$",
".*\\\\Google\\\\Google\\ Desktop\\\\Mailboxes$",
".*\\\\Microsoft\\\\Internet\\ Account\\ Manager\\\\Accounts$",
]
for ioc in stealer_regkeys:
match = self.check_key(pattern=ioc, regex=True)
if match:
bad_score += 1
services = [
"ProtectedStorage",
"VaultSvc",
]
for service in services:
if self.check_started_service(service):
bad_score += 1
if bad_score >= 10:
return True
return False
|
lgpl-3.0
| -5,876,482,425,457,491,000
| 32.936709
| 86
| 0.591943
| false
| 3.692837
| false
| false
| false
|
daira/txaws
|
txaws/client/gui/gtk.py
|
1
|
7469
|
# Copyright (C) 2009 Robert Collins <robertc@robertcollins.net>
# Licenced under the txaws licence available at /LICENSE in the txaws source.
"""A GTK client for working with aws."""
from __future__ import absolute_import
import gnomekeyring
import gobject
import gtk
# DO NOT IMPORT twisted.internet, or things that import
# twisted.internet.
# Doing so loads the default Reactor, which is bad. thanks.
from txaws.credentials import AWSCredentials
__all__ = ["main"]
class AWSStatusIcon(gtk.StatusIcon):
"""A status icon shown when instances are running."""
def __init__(self, reactor):
gtk.StatusIcon.__init__(self)
self.set_from_stock(gtk.STOCK_NETWORK)
self.set_visible(True)
self.reactor = reactor
self.connect("activate", self.on_activate)
self.probing = False
# Nested import because otherwise we get "reactor already installed".
self.password_dialog = None
self.region = None
try:
creds = AWSCredentials()
except ValueError:
creds = self.from_gnomekeyring()
if self.region is None:
self.set_region(creds)
self.create_client(creds)
menu = """
<ui>
<menubar name="Menubar">
<menu action="Menu">
<menuitem action="Stop instances"/>
</menu>
</menubar>
</ui>
"""
actions = [
("Menu", None, "Menu"),
("Stop instances", gtk.STOCK_STOP, "_Stop instances...", None,
"Stop instances", self.on_stop_instances),
]
ag = gtk.ActionGroup("Actions")
ag.add_actions(actions)
self.manager = gtk.UIManager()
self.manager.insert_action_group(ag, 0)
self.manager.add_ui_from_string(menu)
self.menu = self.manager.get_widget(
"/Menubar/Menu/Stop instances").props.parent
self.connect("popup-menu", self.on_popup_menu)
def set_region(self, creds):
from txaws.service import AWSServiceRegion
self.region = AWSServiceRegion(creds)
def create_client(self, creds):
if creds is not None:
if self.region is None:
self.set_region(creds)
self.client = self.region.get_ec2_client()
self.on_activate(None)
else:
# waiting on user entered credentials.
self.client = None
def from_gnomekeyring(self):
# Try for gtk gui specific credentials.
try:
items = gnomekeyring.find_items_sync(
gnomekeyring.ITEM_GENERIC_SECRET,
{
"aws-host": "aws.amazon.com",
})
except (gnomekeyring.NoMatchError,
gnomekeyring.DeniedError):
self.show_a_password_dialog()
return None
else:
key_id, secret_key = items[0].secret.split(":")
return AWSCredentials(access_key=key_id, secret_key=secret_key)
def show_a_password_dialog(self):
self.password_dialog = gtk.Dialog(
"Enter your AWS credentals", None, gtk.DIALOG_MODAL,
(gtk.STOCK_OK, gtk.RESPONSE_ACCEPT,
gtk.STOCK_CANCEL,
gtk.RESPONSE_REJECT))
content = self.password_dialog.get_content_area()
def add_entry(name):
box = gtk.HBox()
box.show()
content.add(box)
label = gtk.Label(name)
label.show()
box.add(label)
entry = gtk.Entry()
entry.show()
box.add(entry)
label.set_use_underline(True)
label.set_mnemonic_widget(entry)
add_entry("AWS _Access Key ID")
add_entry("AWS _Secret Key")
self.password_dialog.show()
self.password_dialog.connect("response", self.save_key)
self.password_dialog.run()
def on_activate(self, data):
if self.probing or not self.client:
# don't ask multiple times, and don't ask until we have
# credentials.
return
self.probing = True
deferred = self.client.describe_instances()
deferred.addCallbacks(self.showhide, self.describe_error)
def on_popup_menu(self, status, button, time):
self.menu.popup(None, None, None, button, time)
def on_stop_instances(self, data):
# It would be nice to popup a window to select instances.. TODO.
deferred = self.client.describe_instances()
deferred.addCallbacks(self.shutdown_instances, self.show_error)
def save_key(self, response_id, data):
try:
if data != gtk.RESPONSE_ACCEPT:
# User cancelled. They can ask for the password again somehow.
return
content = self.password_dialog.get_content_area()
key_id = content.get_children()[0].get_children()[1].get_text()
secret_key = content.get_children()[1].get_children()[1].get_text()
creds = AWSCredentials(access_key=key_id, secret_key=secret_key)
self.create_client(creds)
gnomekeyring.item_create_sync(
None,
gnomekeyring.ITEM_GENERIC_SECRET,
"AWS access credentials",
{"aws-host": "aws.amazon.com"},
"%s:%s" % (key_id, secret_key), True)
finally:
self.password_dialog.hide()
# XXX? Does this leak?
self.password_dialog = None
def showhide(self, reservation):
active = 0
for instance in reservation:
if instance.instance_state == "running":
active += 1
self.set_tooltip("AWS Status - %d instances" % active)
self.set_visible(active != 0)
self.queue_check()
def shutdown_instances(self, reservation):
d = self.client.terminate_instances(
*[instance.instance_id for instance in reservation])
d.addCallbacks(self.on_activate, self.show_error)
def queue_check(self):
self.probing = False
self.reactor.callLater(60, self.on_activate, None)
def show_error(self, error):
# debugging output for now.
print error.value
try:
print error.value.response
except:
pass
def describe_error(self, error):
from twisted.internet.defer import TimeoutError
if isinstance(error.value, TimeoutError):
# timeout errors can be ignored - transient network issue or some
# such.
pass
else:
# debugging output for now.
self.show_error(error)
self.queue_check()
def main(argv, reactor=None):
"""Run the client GUI.
Typical use:
>>> sys.exit(main(sys.argv))
@param argv: The arguments to run it with, e.g. sys.argv.
@param reactor: The reactor to use. Must be compatible with gtk as this
module uses gtk API"s.
@return exitcode: The exit code it returned, as per sys.exit.
"""
if reactor is None:
from twisted.internet import gtk2reactor
gtk2reactor.install()
from twisted.internet import reactor
try:
AWSStatusIcon(reactor)
gobject.set_application_name("aws-status")
reactor.run()
except ValueError:
# In this case, the user cancelled, and the exception bubbled to here.
pass
|
mit
| 3,419,770,804,056,794,600
| 33.419355
| 79
| 0.58214
| false
| 4.090361
| false
| false
| false
|
konfabproject/konfab-consumer
|
ebdata/templatemaker/hole.py
|
1
|
2728
|
# Copyright 2007,2008,2009,2011 Everyblock LLC, OpenPlans, and contributors
#
# This file is part of ebdata
#
# ebdata is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ebdata is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ebdata. If not, see <http://www.gnu.org/licenses/>.
#
import re
class Hole(object):
# This would have been a good place for a docstring
# mentioning something about what the heck a Hole is for.
# Thanks guys.
capture = True # Designates whether the Hole captures something in regex().
def __eq__(self, other):
"A Hole is equal to any other Hole (but not subclasses)."
return type(other) is self.__class__
def __repr__(self):
return '<%s>' % self.__class__.__name__
def regex(self):
return '(.*?)'
class OrHole(Hole):
"A Hole that can contain one of a set of values."
capture = True
def __init__(self, *choices):
self.choices = choices
def __eq__(self, other):
"An OrHole is equal to another one if its choices are the same."
return type(other) is self.__class__ and self.choices == other.choices
def __repr__(self):
return '<%s: %r>' % (self.__class__.__name__, self.choices)
def regex(self):
return '(%s)' % '|'.join(re.escape(choice) for choice in self.choices)
class RegexHole(Hole):
"""
A Hole that contains data that matches the given regex. It's up to the
caller to determine whether the data should be grouped.
"""
def __init__(self, regex_string, capture):
self.regex_string = regex_string
self.capture = capture
def __eq__(self, other):
"A RegexHole is equal to another one if its regex_string is the same."
return type(other) is self.__class__ and self.regex_string == other.regex_string and self.capture == other.capture
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.regex_string)
def regex(self):
return self.regex_string
class IgnoreHole(Hole):
"""
A Hole that contains an arbitrary amount of data but should be ignored.
I.e., its contents are *not* included in extract().
"""
capture = False
def regex(self):
return '.*?' # No parenthesis!
|
gpl-3.0
| 1,591,653,641,166,182,100
| 33.1
| 122
| 0.645161
| false
| 3.83685
| false
| false
| false
|
SD-Genomics/DeviCNV
|
Code_v1.5.1/python.calculateReadsDepthOfAmp.py
|
1
|
12310
|
import random
import numpy
import pysam
import sys
from intervaltree import Interval, IntervalTree
from intervaltree_bio import GenomeIntervalTree
class Amplicon:
ampID=""
chr=""
ampS=0
inS=0
inE=0
ampE=0
gene=""
trans=""
exon=""
pool=""
datType=""
mappedReadList=[]
readDepthList=[]
def __init__(self,ampID,chr,ampS,inS,inE,ampE,gene,trans,exon,pool,datType):
self.ampID=ampID
self.chr=chr.replace("chr","")
self.ampS=int(ampS)
self.inS=int(inS)
self.inE=int(inE)
self.ampE=int(ampE)
self.gene=gene
self.trans=trans
self.exon=exon
self.pool=pool.split("_")[-1]
self.datType=datType
self.mappedReadList=[]
self.readDepthList=[]
def putMappedRead(self, inMappedRead):
self.mappedReadList.append(inMappedRead)
def getOverlapRatio(self, rChr, rS, rE):
if(rChr.replace("chr","")!=self.chr):
return 0.0
else:
rLen=rE-rS
overlapLen=min(rE, self.ampE)-max(rS, self.ampS)
overlapRatio=float(overlapLen)/float(rLen)
if(overlapRatio>1):
return 1.0
else:
return overlapRatio
def getReadDepthPCR(self, MQList):
ampliconLength=self.inE-self.inS
depthPerSiteDic={}
for MQ in MQList:
depthPerSiteDic[MQ]=[0]*ampliconLength
for pos in range(0, ampliconLength):
nowS=self.inS+pos
for read in self.mappedReadList:
if(read.pos<=nowS and nowS+1<=read.pos+read.alen):
for MQ in MQList:
if(read.mapq>=MQ):
depthPerSiteDic[MQ][pos]+=1
readDepthOutList=[]
for MQ in MQList:
readDepth=0
for read in self.mappedReadList:
if(read.mapq>=MQ):
readDepth+=1
readDepthOutList.append(readDepth)
self.readDepthList=readDepthOutList
def getReadDepthHYB(self, MQList): ## using insert
ampliconLength=self.inE-self.inS
depthPerSiteDic={}
for MQ in MQList:
depthPerSiteDic[MQ]=[0]*ampliconLength
for pos in range(0, ampliconLength):
nowS=self.inS+pos
for read in self.mappedReadList:
if(read.pos<=nowS and nowS+1<=read.pos+read.alen):
for MQ in MQList:
if(read.mapq>=MQ):
depthPerSiteDic[MQ][pos]+=1
readDepthOutList=[]
for MQ in MQList:
depCov=0
for depth in depthPerSiteDic[MQ]:
depCov+=depth
readDepthOutList.append(round(float(depCov)/ampliconLength,3))
self.readDepthList=readDepthOutList
def runGetReadDepth(self, MQList):
if(self.datType=="HYB"):
self.getReadDepthHYB(MQList)
elif(self.datType=="PCR"):
self.getReadDepthPCR(MQList)
else:
print(self.datType, "unknown data")
def allInfoList(self):
return [self.ampID, self.chr, self.ampS,self.inS,self.inE,self.ampE, self.gene, self.trans, self.exon, self.pool]
def head(self):
return ["Amplicon_ID","Chr","Amplicon_Start","Insert_Start","Insert_End","Amplicon_End","Gene","Transcript","Exon","Pool"]
def MakeAmpliconDic(inAmpliconTxt, datType):
inFile=open(inAmpliconTxt, 'r')
inLine=inFile.readline()
ampliconDic={}
ampLocalDic={}
ampliconList=[]
ampTree=GenomeIntervalTree()
headCheck=False
while(inLine):
if(headCheck==False):
headCheck=True
header=inLine.replace("\n","").replace("\r","").split("\t")
print(header)
ampIDID=header.index("Amplicon_ID")
chrID=header.index("Chr")
ampSID=header.index("Amplicon_Start")
inSID=header.index("Insert_Start")
inEID=header.index("Insert_End")
ampEID=header.index("Amplicon_End")
geneID=header.index("Gene")
transID=header.index("Transcript")
exonID=header.index("Exon")
poolID=header.index("Pool")
else:
inList=inLine.replace("\n","").replace("\r","").split("\t")
ampID=inList[ampIDID]
chr=inList[chrID].replace("chr","")
ampS=inList[ampSID]
inS=int(inList[inSID])
inE=int(inList[inEID])
ampE=inList[ampEID]
gene=inList[geneID]
exon=inList[exonID]
trans=inList[transID]
pool=inList[poolID]
if(ampID not in ampLocalDic):
ampliconList.append(ampID)
ampLocalDic[ampID]=Amplicon(ampID,chr,ampS,inS,inE,ampE,gene,exon,trans,pool,datType)
ampTree.addi(chr,inS+1,inE+1,ampID) ## [start, end)
else:
print("Error!! : Not unique Amplicon_ID : "+ampID)
break
inLine=inFile.readline()
inFile.close()
for ampliconID in ampliconList:
amplicon=ampLocalDic[ampliconID]
pool=amplicon.pool
if(pool not in ampliconDic):
ampliconDic[pool]=[]
ampliconDic[pool].append(amplicon)
print("Total Amplicons: "+str(len(ampLocalDic.keys())))
print("ampTree made!")
return [ampliconDic, ampTree]
def MapReadinBamPCR(inBamFile, ampliconDic, ampTree, dedupOp, MQList):
ampliconList=[]
poolList=list(ampliconDic.keys())
poolList.sort()
for pool in poolList:
ampliconList+=ampliconDic[pool]
inBam=pysam.Samfile(inBamFile,'rb')
for read in inBam:
if(read.is_unmapped):
pass
else:
if(read.is_duplicate):
if(dedupOp=="true"):
continue
overlapAmpTreeList=ampTree[inBam.getrname(read.rname).replace("chr","")].search(read.pos+1, read.pos+read.alen+1) ## [start, end)
if(len(overlapAmpTreeList)==0):
pass
else:
overlapAmpIDList=[]
for overlapAmpTree in overlapAmpTreeList:
overlapAmpIDList.append(overlapAmpTree[-1])
overlapAmpList=[]
for amplicon in ampliconList:
if(amplicon.ampID in overlapAmpIDList):
overlapAmpList.append(amplicon)
overlapRatioList=[]
ampLenList=[]
for amplicon in overlapAmpList:
overlapRatioList.append(amplicon.getOverlapRatio(inBam.getrname(read.rname).replace("chr",""), read.pos, read.pos+read.alen))
ampLenList.append(amplicon.ampE-amplicon.ampS)
maxValue=max(overlapRatioList)
overlapAmpList2=[]
overlapRatioList2=[]
ampLenList2=[]
for i in range(0,len(overlapAmpList)):
if(maxValue==overlapRatioList[i]):
overlapAmpList2.append(overlapAmpList[i])
overlapRatioList2.append(overlapRatioList[i])
ampLenList2.append(ampLenList[i])
minAmpLen=min(ampLenList2)
overlapAmpList3=[]
overlapRatioList3=[]
ampLenList3=[]
for j in range(0,len(overlapAmpList2)):
if(minAmpLen==ampLenList2[j]):
overlapAmpList3.append(overlapAmpList2[j])
overlapRatioList3.append(overlapRatioList2[j])
ampLenList3.append(ampLenList2[j])
mappedAmp=overlapAmpList3[int((random.random()*10000))%(len(overlapAmpList3))]
mappedAmp.mappedReadList.append(read)
for amplicon in ampliconList:
amplicon.runGetReadDepth(MQList)
return ampliconDic
def MapReadinBamHYB(inBamFile, ampliconDic, ampTree, dedupOp, MQList):
ampliconList=[]
poolList=list(ampliconDic.keys())
poolList.sort()
for pool in poolList:
ampliconList+=ampliconDic[pool]
print(pool)
inBam=pysam.Samfile(inBamFile,'rb')
for read in inBam:
if(read.is_unmapped):
pass
else:
if(read.is_duplicate):
if(dedupOp=="true"):
continue
overlapAmpTreeList=ampTree[inBam.getrname(read.rname).replace("chr","")].search(read.pos+1, read.pos+read.alen+1) ## [start, end)
if(len(overlapAmpTreeList)==0):
pass
else:
overlapAmpIDList=[]
for overlapAmpTree in overlapAmpTreeList:
overlapAmpIDList.append(overlapAmpTree[-1])
for amplicon in ampliconList:
if(amplicon.ampID in overlapAmpIDList):
amplicon.mappedReadList.append(read)
for amplicon in ampliconList:
amplicon.runGetReadDepth(MQList)
return ampliconDic
def WriteReadDepthFile(ampliconDic, outFileName, MQList):
### write file per pool ###########################
ampliconList=list(ampliconDic.keys())
ampliconList.sort()
for pool in ampliconList:
#### write attributes ##########################
outFile=open(outFileName+"."+pool+".txt",'w')
header=ampliconDic[pool][0].head()
outFile.write("\t".join(header))
for MQ in MQList:
outFile.write("\tMQ"+str(MQ))
outFile.write("\n")
#### write values per amplicon ################
for amplicon in ampliconDic[pool]:
outFile.write("\t".join(numpy.array(amplicon.allInfoList()).astype(str)))
readDepthOutList=amplicon.readDepthList
outFile.write("\t"+"\t".join(numpy.array(readDepthOutList).astype(str)))
outFile.write("\n")
outFile.close()
def WriteMappedReadDepthStatFile(ampliconDic, RCstaticFileName, MQList, inSample):
staticFile=open(RCstaticFileName+".txt",'w')
staticFile.write("Sample\tPool\tMQ\tMean\tMedian\tStandardDeviation\tSum\n")
### write file per pool ###########################
ampliconList=list(ampliconDic.keys())
ampliconList.sort()
for pool in ampliconList:
totalReadDepthOutList=[]
for amplicon in ampliconDic[pool]:
readDepthOutList=amplicon.readDepthList
totalReadDepthOutList.append(readDepthOutList)
#### write StaticFile per Pool+MQ #############
totalReadDepthOutList=numpy.transpose(totalReadDepthOutList)
for i in range(0,len(MQList)):
MQ=MQList[i]
RCList=totalReadDepthOutList[i]
staticList=[round(numpy.mean(RCList),2),round(numpy.median(RCList),2), round(numpy.std(RCList),2), round(numpy.sum(RCList))]
staticFile.write(inSample+"\t"+pool+"\tMQ"+str(MQ)+"\t"+"\t".join(numpy.array(staticList).astype(str))+"\n")
#####################################################
staticFile.close()
if __name__ == '__main__':
inputs=list(sys.argv)
inSample=inputs[1]
inBamDir=inputs[2]
inAmpliconTxt=inputs[3]
readDepthDir=inputs[4]
readDepthStatDir=inputs[5]
dedupOp=inputs[6].lower()
datType=inputs[7]
MQList=list(numpy.array(inputs[8].replace("MQ","").split(",")).astype(int))
[ampliconDic, ampTree]=MakeAmpliconDic(inAmpliconTxt,datType)
inBamFile=inBamDir+inSample+".bam"
if(datType=="HYB"):
ampliconDic=MapReadinBamHYB(inBamFile, ampliconDic, ampTree, dedupOp, MQList)
elif(datType=="PCR"):
ampliconDic=MapReadinBamPCR(inBamFile, ampliconDic, ampTree, dedupOp, MQList)
else:
print("ERROR !! Unknown data type")
readDepthFile=readDepthDir+inSample+".readDepth"
WriteReadDepthFile(ampliconDic, readDepthFile, MQList)
RCStaticFile=readDepthStatDir+inSample+".readDepthStatistics"
WriteMappedReadDepthStatFile(ampliconDic, RCStaticFile, MQList, inSample)
|
gpl-3.0
| 8,927,712,984,042,554,000
| 36.078313
| 144
| 0.567019
| false
| 3.490218
| false
| false
| false
|
rschwager-mm/polymr
|
redis/polymr_redis.py
|
1
|
4582
|
import operator
from array import array
from collections import defaultdict
import redis
import polymr.storage
from polymr.storage import dumps
from polymr.storage import LevelDBBackend
from toolz import partition_all
from toolz import valmap
snd = operator.itemgetter(1)
class FakeDict(object):
def __init__(self, iterable):
self.iterable = iterable
def items(self):
for k, v in self.iterable:
yield k, v
class RedisBackend(LevelDBBackend):
def __init__(self, host='localhost', port=6379, db=0,
featurizer_name=None, new=False):
self._freqs = None
self.featurizer_name = featurizer_name
self.r = redis.StrictRedis(host=host, port=port, db=db)
if new is True:
self.destroy()
if not self.featurizer_name:
try:
self.featurizer_name = self.get_featurizer_name()
except OSError:
self.featurizer_name = 'default'
self._check_dbstats()
@classmethod
def from_urlparsed(cls, parsed, featurizer_name=None, read_only=None):
path = parsed.path.strip("/") or 0
return cls(host=parsed.hostname, port=parsed.port, db=path,
featurizer_name=featurizer_name)
def close(self):
pass
def get_featurizer_name(self):
ret = self.r.get(b'featurizer')
if ret is None:
raise OSError
return ret.decode()
def save_featurizer_name(self, name):
self.r.set(b'featurizer', name)
def find_least_frequent_tokens(self, toks, r, k=None):
toks_freqs = [(tok, int(freq))
for tok, freq in zip(toks, self.r.hmget(b'freqs', toks))
if freq is not None]
total = 0
ret = []
for i, (tok, freq) in enumerate(sorted(toks_freqs, key=snd)):
if total + freq > r:
break
total += freq
ret.append(tok)
if k and i >= k: # try to get k token mappings
break
return ret
def get_freqs(self):
return defaultdict(int, valmap(int, self.r.hgetall(b'freqs')))
def update_freqs(self, toks_cnts):
if type(toks_cnts) is not dict:
toks_cnts = FakeDict(toks_cnts)
self.r.hmset(b"freqs", toks_cnts)
save_freqs = update_freqs
def get_rowcount(self):
ret = self.r.get(b'rowcount')
if ret is None:
return 0
return int(ret)
def save_rowcount(self, cnt):
self.r.set(b'rowcount', cnt)
def increment_rowcount(self, cnt):
self.r.incr(b'rowcount', cnt)
def _load_token_blob(self, name):
blob = self.r.get(b"tok:"+name)
if blob is None:
raise KeyError
return blob
def save_token(self, name, record_ids):
self.r.set(b"tok:"+name, array("L", record_ids).tobytes())
def save_tokens(self, names_ids, chunk_size=5000):
chunks = partition_all(chunk_size, names_ids)
for chunk in chunks:
pipe = self.r.pipeline()
for name, record_ids in chunk:
pipe.set(b"tok:"+name, array("L", record_ids).tobytes())
pipe.execute()
def _load_record_blob(self, idx):
blob = self.r.get(array("L", (idx,)).tobytes())
if blob is None:
raise KeyError
return blob
def get_records(self, idxs, chunk_size=5000):
chunks = partition_all(chunk_size, idxs)
for chunk in chunks:
keys = [array("L", (idx,)).tobytes() for idx in chunk]
blobs = self.r.mget(keys)
if any(blob is None for blob in blobs):
raise KeyError
for blob in blobs:
yield self._get_record(blob)
def save_record(self, rec, idx=None, save_rowcount=True):
if not idx or save_rowcount is True:
idx = self.r.incr(b'rowcount')
self.r.set(array("L", (idx,)).tobytes(), dumps(rec))
return idx
def save_records(self, idx_recs, chunk_size=5000):
chunks = partition_all(chunk_size, idx_recs)
tot = 0
for chunk in chunks:
tot += len(chunk)
pipe = self.r.pipeline()
for idx, rec in chunk:
pipe.set(array("L", (idx,)).tobytes(), dumps(rec))
pipe.execute()
return tot
def delete_record(self, idx):
self.r.delete(array("L", (idx,)).tobytes())
def destroy(self):
self.r.flushdb()
polymr.storage.backends['redis'] = RedisBackend
|
apache-2.0
| -48,296,670,859,349,150
| 29.344371
| 78
| 0.567001
| false
| 3.648089
| false
| false
| false
|
internetarchive/surt
|
surt/IAURLCanonicalizer.py
|
1
|
4773
|
#!/usr/bin/env python
# Copyright(c)2012-2013 Internet Archive. Software license AGPL version 3.
#
# This file is part of the `surt` python package.
#
# surt is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# surt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with surt. If not, see <http://www.gnu.org/licenses/>.
#
# The surt source is hosted at https://github.com/internetarchive/surt
"""This is a python port of IAURLCanonicalizer.java:
http://archive-access.svn.sourceforge.net/viewvc/archive-access/trunk/archive-access/projects/archive-commons/src/main/java/org/archive/url/IAURLCanonicalizer.java?view=markup
"""
from __future__ import absolute_import
import re
from surt.handyurl import handyurl
from surt.URLRegexTransformer import stripPathSessionID, stripQuerySessionID
# canonicalize()
#_______________________________________________________________________________
def canonicalize(url, host_lowercase=True, host_massage=True,
auth_strip_user=True, auth_strip_pass=True,
port_strip_default=True, path_strip_empty=False,
path_lowercase=True, path_strip_session_id=True,
path_strip_trailing_slash_unless_empty=True,
query_lowercase=True, query_strip_session_id=True,
query_strip_empty=True, query_alpha_reorder=True,
hash_strip=True, **_ignored):
"""The input url is a handyurl instance"""
if host_lowercase and url.host:
url.host = url.host.lower()
if host_massage and url.host and (url.scheme != b'dns'): ###java version calls massageHost regardless of scheme
url.host = massageHost(url.host)
if auth_strip_user:
url.authUser = None
url.authPass = None
elif auth_strip_pass:
url.arthPass = None
if port_strip_default and url.scheme:
defaultPort = getDefaultPort(url.scheme)
if url.port == defaultPort:
url.port = handyurl.DEFAULT_PORT
path = url.path
if path_strip_empty and b'/' == path:
url.path = None
else:
if path_lowercase and path:
path = path.lower()
if path_strip_session_id and path:
path = stripPathSessionID(path)
if path_strip_empty and b'/' == path:
path = None
if path_strip_trailing_slash_unless_empty and path:
if path.endswith(b'/') and len(path)>1:
path = path[:-1]
url.path = path
query = url.query
if query:
if len(query) > 0:
if query_strip_session_id:
query = stripQuerySessionID(query)
if query_lowercase:
query = query.lower()
if query_alpha_reorder:
query = alphaReorderQuery(query)
if b'' == query and query_strip_empty:
query = None
url.query = query
else:
if query_strip_empty:
url.last_delimiter = None
return url
# alphaReorderQuery()
#_______________________________________________________________________________
def alphaReorderQuery(orig):
"""It's a shame that we can't use urlparse.parse_qsl() for this, but this
function does keeps the trailing '=' if there is a query arg with no value:
"?foo" vs "?foo=", and we want to exactly match the java version
"""
if None == orig:
return None
if len(orig) <= 1:
return orig
args = orig.split(b'&')
qas = [tuple(arg.split(b'=', 1)) for arg in args]
qas.sort()
s = b''
for t in qas:
if 1 == len(t):
s += t[0] + b'&'
else:
s += t[0] + b'=' + t[1] + b'&'
return s[:-1] #remove last &
# massageHost()
#_______________________________________________________________________________
_RE_WWWDIGITS = re.compile(b'www\d*\.')
def massageHost(host):
m = _RE_WWWDIGITS.match(host)
if m:
return host[len(m.group(0)):]
else:
return host
# getDefaultPort()
#_______________________________________________________________________________
def getDefaultPort(scheme):
scheme_lower = scheme.lower()
if b'http' == scheme_lower:
return 80
elif b'https' == scheme_lower:
return 443
else:
return 0
|
agpl-3.0
| -5,654,537,029,364,316,000
| 31.691781
| 175
| 0.575948
| false
| 3.824519
| false
| false
| false
|
ironmussa/Optimus
|
optimus/helpers/logger.py
|
1
|
1154
|
import logging
class Singleton(object):
_instances = {}
def __new__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__new__(cls, *args, **kwargs)
return cls._instances[cls]
class Logger(Singleton):
def __init__(self):
self.logger = logging.getLogger('optimus')
self.is_active = False
def level(self, log_level):
"""
Set the logging message level
:param log_level:
:return:
"""
self.logger.setLevel(log_level)
def print(self, *args, **kwargs):
"""
Print a message
:param self:
:return:
"""
if self.is_active is True:
self.logger.info(*args, **kwargs)
def active(self, activate):
"""
Turn on and off the logging message
:param activate:
:return:
"""
self.is_active = activate
logger = Logger()
logger.level(logging.INFO)
def level(log_level):
logger.level(log_level)
def info(message):
logger.print(message)
def active(active=None):
logger.active(active)
|
apache-2.0
| 5,459,292,666,941,125,000
| 19.245614
| 85
| 0.558059
| false
| 3.885522
| false
| false
| false
|
tboyce1/home-assistant
|
homeassistant/components/cover/lutron_caseta.py
|
8
|
2336
|
"""
Support for Lutron Caseta shades.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/cover.lutron_caseta/
"""
import asyncio
import logging
from homeassistant.components.cover import (
CoverDevice, SUPPORT_OPEN, SUPPORT_CLOSE, SUPPORT_SET_POSITION,
ATTR_POSITION, DOMAIN)
from homeassistant.components.lutron_caseta import (
LUTRON_CASETA_SMARTBRIDGE, LutronCasetaDevice)
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['lutron_caseta']
# pylint: disable=unused-argument
@asyncio.coroutine
def async_setup_platform(hass, config, async_add_devices, discovery_info=None):
"""Set up the Lutron Caseta shades as a cover device."""
devs = []
bridge = hass.data[LUTRON_CASETA_SMARTBRIDGE]
cover_devices = bridge.get_devices_by_domain(DOMAIN)
for cover_device in cover_devices:
dev = LutronCasetaCover(cover_device, bridge)
devs.append(dev)
async_add_devices(devs, True)
class LutronCasetaCover(LutronCasetaDevice, CoverDevice):
"""Representation of a Lutron shade."""
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_OPEN | SUPPORT_CLOSE | SUPPORT_SET_POSITION
@property
def is_closed(self):
"""Return if the cover is closed."""
return self._state['current_state'] < 1
@property
def current_cover_position(self):
"""Return the current position of cover."""
return self._state['current_state']
@asyncio.coroutine
def async_close_cover(self, **kwargs):
"""Close the cover."""
self._smartbridge.set_value(self._device_id, 0)
@asyncio.coroutine
def async_open_cover(self, **kwargs):
"""Open the cover."""
self._smartbridge.set_value(self._device_id, 100)
@asyncio.coroutine
def async_set_cover_position(self, **kwargs):
"""Move the shade to a specific position."""
if ATTR_POSITION in kwargs:
position = kwargs[ATTR_POSITION]
self._smartbridge.set_value(self._device_id, position)
@asyncio.coroutine
def async_update(self):
"""Call when forcing a refresh of the device."""
self._state = self._smartbridge.get_device_by_id(self._device_id)
_LOGGER.debug(self._state)
|
apache-2.0
| 7,136,652,141,303,749,000
| 30.567568
| 79
| 0.675942
| false
| 3.719745
| false
| false
| false
|
google/uncertainty-baselines
|
uncertainty_baselines/models/vit.py
|
1
|
8712
|
# coding=utf-8
# Copyright 2021 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Vision Transformer (ViT) model."""
from typing import Any, Callable, Optional, Tuple
import flax.linen as nn
import jax.numpy as jnp
Array = Any
PRNGKey = Any
Shape = Tuple[int]
Dtype = Any
class IdentityLayer(nn.Module):
"""Identity layer, convenient for giving a name to an array."""
@nn.compact
def __call__(self, x):
return x
class AddPositionEmbs(nn.Module):
"""Adds (optionally learned) positional embeddings to the inputs.
Attributes:
posemb_init: positional embedding initializer.
"""
posemb_init: Callable[[PRNGKey, Shape, Dtype], Array]
@nn.compact
def __call__(self, inputs):
"""Applies AddPositionEmbs module.
By default this layer uses a fixed sinusoidal embedding table. If a
learned position embedding is desired, pass an initializer to
posemb_init.
Args:
inputs: Inputs to the layer.
Returns:
Output tensor with shape `(bs, timesteps, in_dim)`.
"""
# inputs.shape is (batch_size, seq_len, emb_dim).
assert inputs.ndim == 3, ('Number of dimensions should be 3,'
' but it is: %d' % inputs.ndim)
pos_emb_shape = (1, inputs.shape[1], inputs.shape[2])
pe = self.param('pos_embedding', self.posemb_init, pos_emb_shape)
return inputs + pe
class MlpBlock(nn.Module):
"""Transformer MLP / feed-forward block."""
mlp_dim: int
dtype: Dtype = jnp.float32
out_dim: Optional[int] = None
dropout_rate: float = 0.1
kernel_init: Callable[[PRNGKey, Shape, Dtype],
Array] = nn.initializers.xavier_uniform()
bias_init: Callable[[PRNGKey, Shape, Dtype],
Array] = nn.initializers.normal(stddev=1e-6)
@nn.compact
def __call__(self, inputs, *, deterministic):
"""Applies Transformer MlpBlock module."""
actual_out_dim = inputs.shape[-1] if self.out_dim is None else self.out_dim
x = nn.Dense(
features=self.mlp_dim,
dtype=self.dtype,
kernel_init=self.kernel_init,
bias_init=self.bias_init)( # pytype: disable=wrong-arg-types
inputs)
x = nn.gelu(x)
x = nn.Dropout(rate=self.dropout_rate)(x, deterministic=deterministic)
output = nn.Dense(
features=actual_out_dim,
dtype=self.dtype,
kernel_init=self.kernel_init,
bias_init=self.bias_init)( # pytype: disable=wrong-arg-types
x)
output = nn.Dropout(
rate=self.dropout_rate)(
output, deterministic=deterministic)
return output
class Encoder1DBlock(nn.Module):
"""Transformer encoder layer.
Attributes:
inputs: input data.
mlp_dim: dimension of the mlp on top of attention block.
dtype: the dtype of the computation (default: float32).
dropout_rate: dropout rate.
attention_dropout_rate: dropout for attention heads.
deterministic: bool, deterministic or not (to apply dropout).
num_heads: Number of heads in nn.MultiHeadDotProductAttention
"""
mlp_dim: int
num_heads: int
dtype: Dtype = jnp.float32
dropout_rate: float = 0.1
attention_dropout_rate: float = 0.1
@nn.compact
def __call__(self, inputs, *, deterministic):
"""Applies Encoder1DBlock module.
Args:
inputs: Inputs to the layer.
deterministic: Dropout will not be applied when set to true.
Returns:
output after transformer encoder block.
"""
# Attention block.
assert inputs.ndim == 3, f'Expected (batch, seq, hidden) got {inputs.shape}'
x = nn.LayerNorm(dtype=self.dtype, name='LayerNorm_0')(inputs)
x = nn.MultiHeadDotProductAttention(
dtype=self.dtype,
kernel_init=nn.initializers.xavier_uniform(),
broadcast_dropout=False,
deterministic=deterministic,
dropout_rate=self.attention_dropout_rate,
num_heads=self.num_heads,
name='MultiHeadDotProductAttention_1')(x, x)
x = nn.Dropout(rate=self.dropout_rate)(x, deterministic=deterministic)
x = x + inputs
# MLP block.
y = nn.LayerNorm(dtype=self.dtype, name='LayerNorm_2')(x)
y = MlpBlock(
mlp_dim=self.mlp_dim,
dtype=self.dtype,
name='MlpBlock_3',
dropout_rate=self.dropout_rate)(
y, deterministic=deterministic)
return x + y
class Encoder(nn.Module):
"""Transformer Model Encoder for sequence to sequence translation.
Attributes:
num_layers: number of layers
mlp_dim: dimension of the mlp on top of attention block
num_heads: Number of heads in nn.MultiHeadDotProductAttention
dropout_rate: dropout rate.
attention_dropout_rate: dropout rate in self attention.
"""
num_layers: int
mlp_dim: int
num_heads: int
dropout_rate: float = 0.1
attention_dropout_rate: float = 0.1
@nn.compact
def __call__(self, inputs, *, train):
"""Applies Transformer model on the inputs.
Args:
inputs: Inputs to the layer.
train: Set to `True` when training.
Returns:
output of a transformer encoder.
"""
assert inputs.ndim == 3 # (batch, len, emb)
x = AddPositionEmbs(
posemb_init=nn.initializers.normal(stddev=0.02), # from BERT.
name='posembed_input')(
inputs)
x = nn.Dropout(rate=self.dropout_rate)(x, deterministic=not train)
# Input Encoder
for lyr in range(self.num_layers):
x = Encoder1DBlock(
mlp_dim=self.mlp_dim,
dropout_rate=self.dropout_rate,
attention_dropout_rate=self.attention_dropout_rate,
name=f'encoderblock_{lyr}',
num_heads=self.num_heads)(
x, deterministic=not train)
encoded = nn.LayerNorm(name='encoder_norm')(x)
return encoded
class VisionTransformer(nn.Module):
"""VisionTransformer."""
num_classes: int
patches: Any
transformer: Any
hidden_size: int
representation_size: Optional[int] = None
classifier: str = 'token'
@nn.compact
def __call__(self, inputs, *, train):
out = {}
x = inputs
n, h, w, c = x.shape
# We can merge s2d+emb into a single conv; it's the same.
x = nn.Conv(
features=self.hidden_size,
kernel_size=self.patches.size,
strides=self.patches.size,
padding='VALID',
name='embedding')(
x)
# Here, x is a grid of embeddings.
# TODO(dusenberrymw): Switch to self.sow(.).
out['stem'] = x
# Transformer.
n, h, w, c = x.shape
x = jnp.reshape(x, [n, h * w, c])
# If we want to add a class token, add it here.
if self.classifier == 'token':
cls = self.param('cls', nn.initializers.zeros, (1, 1, c))
cls = jnp.tile(cls, [n, 1, 1])
x = jnp.concatenate([cls, x], axis=1)
x = Encoder(name='Transformer', **self.transformer)(x, train=train)
out['transformed'] = x
if self.classifier == 'token':
x = x[:, 0]
elif self.classifier == 'gap':
x = jnp.mean(x, axis=list(range(1, x.ndim - 1))) # (1,) or (1,2)
else:
raise ValueError(f'Invalid classifier={self.classifier}')
out['head_input'] = x
if self.representation_size is not None:
x = nn.Dense(features=self.representation_size, name='pre_logits')(x)
out['pre_logits'] = x
x = nn.tanh(x)
else:
x = IdentityLayer(name='pre_logits')(x)
out['pre_logits'] = x
x = nn.Dense(
features=self.num_classes,
name='head',
kernel_init=nn.initializers.zeros)(
x)
out['logits'] = x
return x, out
def vision_transformer(num_classes: int,
patches: Any,
transformer: Any,
hidden_size: int,
representation_size: Optional[int] = None,
classifier: str = 'token'):
"""Builds a Vision Transformer (ViT) model."""
# TODO(dusenberrymw): Add API docs once config dict in VisionTransformer is
# cleaned up.
return VisionTransformer(
num_classes=num_classes,
patches=patches,
transformer=transformer,
hidden_size=hidden_size,
representation_size=representation_size,
classifier=classifier)
|
apache-2.0
| -5,979,821,114,769,184,000
| 28.632653
| 80
| 0.63579
| false
| 3.592577
| false
| false
| false
|
xtiankisutsa/MARA_Framework
|
tools/lobotomy/core/brains/ui/terminal.py
|
1
|
5944
|
import npyscreen
from core.logging.logger import Logger
from androguard.core.bytecodes.dvm import ClassDefItem
from androguard.core.bytecodes.dvm import EncodedMethod
from pygments import highlight
from pygments.lexers.dalvik import SmaliLexer
from pygments.formatters import TerminalFormatter
# Global
# This global variables have to be accessed by the ClassTreeMultiSelect class
vm_global = None
vmx_global = None
class TerminalAppError(Exception):
def __init__(self, message):
self.logger = Logger()
self.message = message
self.logger.log("critical", "TerminalApp : {}".format(self.message))
class ClassTreeData(npyscreen.TreeData):
def get_content_for_display(self):
"""
Overidden from TreeData
"""
if isinstance(self.content, str):
return self.content
elif isinstance(self.content, EncodedMethod):
return self.content.name
elif isinstance(self.content, ClassDefItem):
return self.content.name
else:
return self.content
class TerminalMultiLine(npyscreen.MultiLine):
def display_value(self, vl):
"""
Overriden from npyscreen.MultiLine
"""
try:
return vl
except ReferenceError as e:
raise e
class ClassTreeMultiSelect(npyscreen.MLTreeMultiSelect):
def handle_selected(self, vl):
"""
Handle a selected method.
Args:
param1: TreeData
Returns:
None
"""
# Locals
m = None
mx = None
ml = None
method_form = None
basic_blocks = None
try:
if vl.selected:
# Handle EncodedMethod type
if isinstance(vl.get_content(), EncodedMethod):
m = vl.get_content()
mx = vmx_global.get_method(m)
if m.get_code():
idx = 0
basic_blocks = mx.get_basic_blocks().get()
method_form = npyscreen.Form(name=m.name,
framed=False)
# Our custom MultiLine class for handling displaying
# the values
ml = method_form.add(TerminalMultiLine,
autowrap=False)
ml.values.append("{} {}"
.format(m.get_access_flags_string(),
m.name))
# This provide a visual space
ml.values.append("")
for block in basic_blocks:
for i in block.get_instructions():
ml.values.append(" ".join([str(idx),
i.get_name(),
i.get_output()]))
idx += i.get_length()
method_form.edit()
return
# Handle ClassDefItem type
if isinstance(vl.get_content(), ClassDefItem):
return
except Exception as e:
raise e
def h_select(self, ch):
"""
Overidden from npyscreen.MLTreeMultiSelect
"""
# DO NOT MODIFY (!)
vl = self.values[self.cursor_line]
vl_to_set = not vl.selected
if self.select_cascades:
for v in self._walk_tree(vl, only_expanded=False,
ignore_root=False):
if v.selectable:
v.selected = vl_to_set
else:
vl.selected = vl_to_set
if self.select_exit:
self.editing = False
self.how_exited = True
self.display()
# Handle the selection
self.handle_selected(vl)
class TerminalForm(npyscreen.Form):
def create(self):
"""
Overidden from npyscreen.Form
"""
# Locals
self.how_exited_handers[
npyscreen.wgwidget.EXITED_ESCAPE] = self.exit_application
def exit_application(self):
"""
Overidden from npyscreen.Form
"""
# Locals
self.parentApp.setNextForm(None)
self.editing = False
class TerminalApp(npyscreen.NPSApp):
def __init__(self, vm, vmx):
# Wut?
# Get the DVM and Analysis instance and make them global
global vm_global
vm_global = vm
global vmx_global
vmx_global = vmx
def main(self):
"""
Overriden from npyscreen.NPSApp
"""
# Locals
lterm_form = None
tree = None
tree_data = None
clazz = None
method = None
lterm_form = TerminalForm(name="lterm", framed=False)
tree = lterm_form.add(ClassTreeMultiSelect)
tree_data = ClassTreeData(content="Class Tree", selectable=False,
ignore_root=False)
try:
for c in vm_global.get_classes():
# Don't populate the Android support classes
if c.name.startswith("Landroid"):
continue
# If selected is set to True, it will populate the results
# from get_selected_objects, we don't want this
clazz = tree_data.new_child(content=c,
selectable=False, selected=False)
for m in c.get_methods():
method = clazz.new_child(content=m,
selectable=True, selected=False)
tree.values = tree_data
lterm_form.edit()
except Exception as e:
TerminalAppError("main : {}".format(e))
|
lgpl-3.0
| 8,790,369,643,042,996,000
| 31.480874
| 77
| 0.502019
| false
| 4.669285
| false
| false
| false
|
tensorflow/models
|
orbit/utils/epoch_helper.py
|
1
|
2136
|
# Copyright 2021 The Orbit Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides a utility class for training in epochs."""
import tensorflow as tf
class EpochHelper:
"""A helper class handle bookkeeping of epochs in custom training loops."""
def __init__(self, epoch_steps: int, global_step: tf.Variable):
"""Initializes the `EpochHelper` instance.
Args:
epoch_steps: An integer indicating how many steps are in an epoch.
global_step: A `tf.Variable` providing the current global step.
"""
self._epoch_steps = epoch_steps
self._global_step = global_step
self._current_epoch = None
self._epoch_start_step = None
self._in_epoch = False
def epoch_begin(self):
"""Returns whether a new epoch should begin."""
if self._in_epoch:
return False
current_step = self._global_step.numpy()
self._epoch_start_step = current_step
self._current_epoch = current_step // self._epoch_steps
self._in_epoch = True
return True
def epoch_end(self):
"""Returns whether the current epoch should end."""
if not self._in_epoch:
raise ValueError("`epoch_end` can only be called inside an epoch.")
current_step = self._global_step.numpy()
epoch = current_step // self._epoch_steps
if epoch > self._current_epoch:
self._in_epoch = False
return True
return False
@property
def batch_index(self):
"""Index of the next batch within the current epoch."""
return self._global_step.numpy() - self._epoch_start_step
@property
def current_epoch(self):
return self._current_epoch
|
apache-2.0
| -7,904,109,597,831,953,000
| 31.861538
| 77
| 0.69382
| false
| 4.007505
| false
| false
| false
|
e27182/nRF52832_pesky
|
external/motion_driver_6.12/eMPL-pythonclient/eMPL-client.py
|
1
|
10573
|
#!/usr/bin/python
# eMPL_client.py
# A PC application for use with Embedded MotionApps.
# Copyright 2012 InvenSense, Inc. All Rights Reserved.
import serial, sys, time, string, pygame
from ponycube import *
class eMPL_packet_reader:
def __init__(self, port, quat_delegate=None, debug_delegate=None, data_delegate=None ):
self.s = serial.Serial(port,115200)
self.s.rtscts = True
self.s.timeout = 0.1
self.s.writeTimeout = 0.2
# TODO: Will this break anything?
##Client attempts to write to eMPL.
#try:
#self.s.write("\n")
#except serial.serialutil.SerialTimeoutException:
#pass # write will timeout if umpl app is already started.
if quat_delegate:
self.quat_delegate = quat_delegate
else:
self.quat_delegate = empty_packet_delegate()
if debug_delegate:
self.debug_delegate = debug_delegate
else:
self.debug_delegate = empty_packet_delegate()
if data_delegate:
self.data_delegate = data_delegate
else:
self.data_delegate = empty_packet_delegate()
self.packets = []
self.length = 0
self.previous = None
def read(self):
NUM_BYTES = 23
p = None
while self.s.inWaiting() >= NUM_BYTES:
rs = self.s.read(NUM_BYTES)
if ord(rs[0]) == ord('$'):
pkt_code = ord(rs[1])
if pkt_code == 1:
d = debug_packet(rs)
self.debug_delegate.dispatch(d)
elif pkt_code == 2:
p = quat_packet(rs)
self.quat_delegate.dispatch(p)
elif pkt_code == 3:
d = data_packet(rs)
self.data_delegate.dispatch(d)
else:
print "no handler for pkt_code",pkt_code
else:
c = ' '
print "serial misaligned!"
while not ord(c) == ord('$'):
c = self.s.read(1)
self.s.read(NUM_BYTES-1)
def write(self,a):
self.s.write(a)
def close(self):
self.s.close()
def write_log(self,fname):
f = open(fname,'w')
for p in self.packets:
f.write(p.logfile_line())
f.close()
# =========== PACKET DELEGATES ==========
class packet_delegate(object):
def loop(self,event):
print "generic packet_delegate loop w/event",event
def dispatch(self,p):
print "generic packet_delegate dispatched",p
class empty_packet_delegate(packet_delegate):
def loop(self,event):
pass
def dispatch(self,p):
pass
class cube_packet_viewer (packet_delegate):
def __init__(self):
self.screen = Screen(480,400,scale=1.5)
self.cube = Cube(30,60,10)
self.q = Quaternion(1,0,0,0)
self.previous = None # previous quaternion
self.latest = None # latest packet (get in dispatch, use in loop)
def loop(self,event):
packet = self.latest
if packet:
q = packet.to_q().normalized()
self.cube.erase(self.screen)
self.cube.draw(self.screen,q)
pygame.display.flip()
self.latest = None
def dispatch(self,p):
if isinstance(p,quat_packet):
self.latest = p
class debug_packet_viewer (packet_delegate):
def loop(self,event):
pass
def dispatch(self,p):
assert isinstance(p,debug_packet);
p.display()
class data_packet_viewer (packet_delegate):
def loop(self,event):
pass
def dispatch(self,p):
assert isinstance(p,data_packet);
p.display()
# =============== PACKETS =================
# For 16-bit signed integers.
def two_bytes(d1,d2):
d = ord(d1)*256 + ord(d2)
if d > 32767:
d -= 65536
return d
# For 32-bit signed integers.
def four_bytes(d1, d2, d3, d4):
d = ord(d1)*(1<<24) + ord(d2)*(1<<16) + ord(d3)*(1<<8) + ord(d4)
if d > 2147483648:
d-= 4294967296
return d
class debug_packet (object):
# body of packet is a debug string
def __init__(self,l):
sss = []
for c in l[3:21]:
if ord(c) != 0:
sss.append(c)
self.s = "".join(sss)
def display(self):
sys.stdout.write(self.s)
class data_packet (object):
def __init__(self, l):
self.data = [0,0,0,0,0,0,0,0,0]
self.type = ord(l[2])
if self.type == 0: # accel
self.data[0] = four_bytes(l[3],l[4],l[5],l[6]) * 1.0 / (1<<16)
self.data[1] = four_bytes(l[7],l[8],l[9],l[10]) * 1.0 / (1<<16)
self.data[2] = four_bytes(l[11],l[12],l[13],l[14]) * 1.0 / (1<<16)
elif self.type == 1: # gyro
self.data[0] = four_bytes(l[3],l[4],l[5],l[6]) * 1.0 / (1<<16)
self.data[1] = four_bytes(l[7],l[8],l[9],l[10]) * 1.0 / (1<<16)
self.data[2] = four_bytes(l[11],l[12],l[13],l[14]) * 1.0 / (1<<16)
elif self.type == 2: # compass
self.data[0] = four_bytes(l[3],l[4],l[5],l[6]) * 1.0 / (1<<16)
self.data[1] = four_bytes(l[7],l[8],l[9],l[10]) * 1.0 / (1<<16)
self.data[2] = four_bytes(l[11],l[12],l[13],l[14]) * 1.0 / (1<<16)
elif self.type == 3: # quat
self.data[0] = four_bytes(l[3],l[4],l[5],l[6]) * 1.0 / (1<<30)
self.data[1] = four_bytes(l[7],l[8],l[9],l[10]) * 1.0 / (1<<30)
self.data[2] = four_bytes(l[11],l[12],l[13],l[14]) * 1.0 / (1<<30)
self.data[3] = four_bytes(l[15],l[16],l[17],l[18]) * 1.0 / (1<<30)
elif self.type == 4: # euler
self.data[0] = four_bytes(l[3],l[4],l[5],l[6]) * 1.0 / (1<<16)
self.data[1] = four_bytes(l[7],l[8],l[9],l[10]) * 1.0 / (1<<16)
self.data[2] = four_bytes(l[11],l[12],l[13],l[14]) * 1.0 / (1<<16)
elif self.type == 5: # rot
self.data[0] = two_bytes(l[3],l[4]) * 1.0 / (1<<14)
self.data[1] = two_bytes(l[5],l[6]) * 1.0 / (1<<14)
self.data[2] = two_bytes(l[7],l[8]) * 1.0 / (1<<14)
self.data[3] = two_bytes(l[9],l[10]) * 1.0 / (1<<14)
self.data[4] = two_bytes(l[11],l[12]) * 1.0 / (1<<14)
self.data[5] = two_bytes(l[13],l[14]) * 1.0 / (1<<14)
self.data[6] = two_bytes(l[15],l[16]) * 1.0 / (1<<14)
self.data[7] = two_bytes(l[17],l[18]) * 1.0 / (1<<14)
self.data[8] = two_bytes(l[19],l[20]) * 1.0 / (1<<14)
elif self.type == 6: # heading
self.data[0] = four_bytes(l[3],l[4],l[5],l[6]) * 1.0 / (1<<16)
else: # unsupported
pass
def display(self):
if self.type == 0:
print 'accel: %7.3f %7.3f %7.3f' % \
(self.data[0], self.data[1], self.data[2])
elif self.type == 1:
print 'gyro: %9.5f %9.5f %9.5f' % \
(self.data[0], self.data[1], self.data[2])
elif self.type == 2:
print 'compass: %7.4f %7.4f %7.4f' % \
(self.data[0], self.data[1], self.data[2])
elif self.type == 3:
print 'quat: %7.4f %7.4f %7.4f %7.4f' % \
(self.data[0], self.data[1], self.data[2], self.data[3])
elif self.type == 4:
print 'euler: %7.4f %7.4f %7.4f' % \
(self.data[0], self.data[1], self.data[2])
elif self.type == 5:
print 'rotation matrix: \n%7.3f %7.3f %7.3f\n%7.3f %7.3f %7.3f\n%7.3f %7.3f %7.3f' % \
(self.data[0], self.data[1], self.data[2], self.data[3], \
self.data[4], self.data[5], self.data[6], self.data[7], \
self.data[8])
elif self.type == 6:
print 'heading: %7.4f' % self.data[0]
else:
print 'what?'
class quat_packet (object):
def __init__(self, l):
self.l = l
self.q0 = four_bytes(l[3],l[4],l[5],l[6]) * 1.0 / (1<<30)
self.q1 = four_bytes(l[7],l[8],l[9],l[10]) * 1.0 / (1<<30)
self.q2 = four_bytes(l[11],l[12],l[13],l[14]) * 1.0 / (1<<30)
self.q3 = four_bytes(l[15],l[16],l[17],l[18]) * 1.0 / (1<<30)
def display_raw(self):
l = self.l
print "".join(
[ str(ord(l[0])), " "] + \
[ str(ord(l[1])), " "] + \
[ str(ord(a)).ljust(4) for a in
[ l[2], l[3], l[4], l[5], l[6], l[7], l[8], l[9], l[10] ] ] + \
[ str(ord(a)).ljust(4) for a in
[ l[8], l[9], l[10] , l[11], l[12], l[13]] ]
)
def display(self):
if 1:
print "qs " + " ".join([str(s).ljust(15) for s in
[ self.q0, self.q1, self.q2, self.q3 ]])
if 0:
euler0, euler1, euler2 = self.to_q().get_euler()
print "eulers " + " ".join([str(s).ljust(15) for s in
[ euler0, euler1, euler2 ]])
if 0:
euler0, euler1, euler2 = self.to_q().get_euler()
print "eulers " + " ".join([str(s).ljust(15) for s in
[ (euler0 * 180.0 / 3.14159) - 90 ]])
def to_q(self):
return Quaternion(self.q0, self.q1, self.q2, self.q3)
# =============== MAIN ======================
if __name__ == "__main__":
if len(sys.argv) == 2:
comport = sys.argv[1]
else:
print "usage: " + sys.argv[0] + " port"
sys.exit(-1)
pygame.init()
viewer = cube_packet_viewer()
debug = debug_packet_viewer()
data = data_packet_viewer()
reader = eMPL_packet_reader(comport,
quat_delegate = viewer,
debug_delegate = debug,
data_delegate = data)
while 1:
event = pygame.event.poll()
# TODO: Allow exit via keystroke.
if event.type == pygame.QUIT:
#viewer.close()
break
if event.type == pygame.KEYDOWN:
reader.write(pygame.key.name(event.key))
reader.read()
viewer.loop(event)
debug.loop(event)
data.loop(event)
# TODO: If system load is too high, increase this sleep time.
pygame.time.delay(0)
|
mit
| 4,605,220,104,819,289,600
| 34.333333
| 98
| 0.467795
| false
| 2.996033
| false
| false
| false
|
facebookresearch/ParlAI
|
parlai/tasks/dbll_babi/build.py
|
1
|
1148
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Download and build the data if it does not exist.
from parlai.core.build_data import DownloadableFile
import parlai.core.build_data as build_data
import os
RESOURCES = [
DownloadableFile(
'http://parl.ai/downloads/dbll/dbll.tgz',
'dbll.tgz',
'd8c727dac498b652c7f5de6f72155dce711ff46c88401a303399d3fad4db1e68',
)
]
def build(opt):
dpath = os.path.join(opt['datapath'], 'DBLL')
version = None
if not build_data.built(dpath, version_string=version):
print('[building data: ' + dpath + ']')
if build_data.built(dpath):
# An older version exists, so remove these outdated files.
build_data.remove_dir(dpath)
build_data.make_dir(dpath)
# Download the data.
for downloadable_file in RESOURCES:
downloadable_file.download_file(dpath)
# Mark the data as built.
build_data.mark_done(dpath, version_string=version)
|
mit
| 9,206,499,509,482,865,000
| 30.027027
| 75
| 0.671603
| false
| 3.308357
| false
| false
| false
|
mileswwatkins/pupa
|
pupa/scrape/vote_event.py
|
1
|
2639
|
from ..utils import _make_pseudo_id
from .base import BaseModel, cleanup_list, SourceMixin
from .bill import Bill
from .popolo import pseudo_organization
from .schemas.vote_event import schema
class VoteEvent(BaseModel, SourceMixin):
_type = 'vote_event'
_schema = schema
def __init__(self, *, motion_text, start_date, classification, result,
legislative_session=None,
identifier='', bill=None, bill_chamber=None, organization=None, chamber=None):
super(VoteEvent, self).__init__()
self.legislative_session = legislative_session
self.motion_text = motion_text
self.motion_classification = cleanup_list(classification, [])
self.start_date = start_date
self.result = result
self.identifier = identifier
self.set_bill(bill, chamber=bill_chamber)
if isinstance(bill, Bill) and not self.legislative_session:
self.legislative_session = bill.legislative_session
if not self.legislative_session:
raise ValueError('must set legislative_session or bill')
self.organization = pseudo_organization(organization, chamber, 'legislature')
self.votes = []
self.counts = []
def __str__(self):
return '{0} - {1} - {2}'.format(self.legislative_session, self.start_date,
self.motion_text)
def set_bill(self, bill_or_identifier, *, chamber=None):
if not bill_or_identifier:
self.bill = None
elif isinstance(bill_or_identifier, Bill):
if chamber:
raise ValueError("set_bill takes no arguments when using a `Bill` object")
self.bill = bill_or_identifier._id
else:
if chamber is None:
chamber = 'legislature'
kwargs = {'identifier': bill_or_identifier,
'from_organization__classification': chamber}
self.bill = _make_pseudo_id(**kwargs)
def vote(self, option, voter, *, note=''):
self.votes.append({"option": option, "voter_name": voter,
"voter_id": _make_pseudo_id(name=voter), 'note': note})
def yes(self, name, *, id=None, note=''):
return self.vote('yes', name, note=note)
def no(self, name, *, id=None, note=''):
return self.vote('no', name, note=note)
def set_count(self, option, value):
for co in self.counts:
if co['option'] == option:
co['value'] = value
break
else:
self.counts.append({'option': option, 'value': value})
|
bsd-3-clause
| 3,333,108,685,144,994,300
| 36.7
| 95
| 0.589996
| false
| 3.852555
| false
| false
| false
|
tvenkat/askbot-devel
|
askbot/deps/django_authopenid/views.py
|
1
|
48584
|
# -*- coding: utf-8 -*-
# Copyright (c) 2007, 2008, Benoît Chesneau
# Copyright (c) 2007 Simon Willison, original work on django-openid
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# * notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# * notice, this list of conditions and the following disclaimer in the
# * documentation and/or other materials provided with the
# * distribution. Neither the name of the <ORGANIZATION> nor the names
# * of its contributors may be used to endorse or promote products
# * derived from this software without specific prior written
# * permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import datetime
from django.http import HttpResponseRedirect, get_host, Http404
from django.http import HttpResponse
from django.template import RequestContext, Context
from django.conf import settings
from askbot.conf import settings as askbot_settings
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.contrib.auth import authenticate
from django.core.urlresolvers import reverse
from django.views.decorators import csrf
from django.utils.encoding import smart_unicode
from django.utils.html import escape
from django.utils.translation import ugettext as _
from django.utils.safestring import mark_safe
from django.core.mail import send_mail
from recaptcha_works.decorators import fix_recaptcha_remote_ip
from askbot.skins.loaders import render_into_skin, get_template
from urlparse import urlparse
from openid.consumer.consumer import Consumer, \
SUCCESS, CANCEL, FAILURE, SETUP_NEEDED
from openid.consumer.discover import DiscoveryFailure
from openid.extensions import sreg
# needed for some linux distributions like debian
try:
from openid.yadis import xri
except ImportError:
from yadis import xri
try:
from xmlrpclib import Fault as WpFault
from wordpress_xmlrpc import Client
from wordpress_xmlrpc.methods.users import GetUserInfo
except ImportError:
pass
import urllib
from askbot import forms as askbot_forms
from askbot.deps.django_authopenid import util
from askbot.deps.django_authopenid import decorators
from askbot.deps.django_authopenid.models import UserAssociation
from askbot.deps.django_authopenid import forms
from askbot.deps.django_authopenid.backends import AuthBackend
import logging
from askbot.utils.forms import get_next_url
from askbot.utils.http import get_request_info
#todo: decouple from askbot
def login(request,user):
from django.contrib.auth import login as _login
from askbot.models import signals
# get old session key
session_key = request.session.session_key
# login and get new session key
_login(request,user)
# send signal with old session key as argument
logging.debug('logged in user %s with session key %s' % (user.username, session_key))
#todo: move to auth app
signals.user_logged_in.send(
request = request,
user = user,
session_key=session_key,
sender=None
)
#todo: uncouple this from askbot
def logout(request):
from django.contrib.auth import logout as _logout#for login I've added wrapper below - called login
_logout(request)
def logout_page(request):
data = {
'page_class': 'meta',
'have_federated_login_methods': util.have_enabled_federated_login_methods()
}
return render_into_skin('authopenid/logout.html', data, request)
def get_url_host(request):
if request.is_secure():
protocol = 'https'
else:
protocol = 'http'
host = escape(get_host(request))
return '%s://%s' % (protocol, host)
def get_full_url(request):
return get_url_host(request) + request.get_full_path()
def ask_openid(
request,
openid_url,
redirect_to,
on_failure=None,
sreg_request=None
):
""" basic function to ask openid and return response """
on_failure = on_failure or signin_failure
trust_root = getattr(
settings, 'OPENID_TRUST_ROOT', get_url_host(request) + '/'
)
if xri.identifierScheme(openid_url) == 'XRI' and getattr(
settings, 'OPENID_DISALLOW_INAMES', False
):
msg = _("i-names are not supported")
logging.debug('openid failed because i-names are not supported')
return on_failure(request, msg)
consumer = Consumer(request.session, util.DjangoOpenIDStore())
try:
auth_request = consumer.begin(openid_url)
except DiscoveryFailure:
msg = _(u"OpenID %(openid_url)s is invalid" % {'openid_url':openid_url})
logging.debug(msg)
return on_failure(request, msg)
logging.debug('openid seemed to work')
if sreg_request:
logging.debug('adding sreg_request - wtf it is?')
auth_request.addExtension(sreg_request)
redirect_url = auth_request.redirectURL(trust_root, redirect_to)
logging.debug('redirecting to %s' % redirect_url)
return HttpResponseRedirect(redirect_url)
def complete(request, on_success=None, on_failure=None, return_to=None):
""" complete openid signin """
assert(on_success is not None)
assert(on_failure is not None)
logging.debug('in askbot.deps.django_authopenid.complete')
consumer = Consumer(request.session, util.DjangoOpenIDStore())
# make sure params are encoded in utf8
params = dict((k,smart_unicode(v)) for k, v in request.GET.items())
openid_response = consumer.complete(params, return_to)
try:
logging.debug(u'returned openid parameters were: %s' % unicode(params))
except Exception, e:
logging.critical(u'fix logging statement above ' + unicode(e))
if openid_response.status == SUCCESS:
logging.debug('openid response status is SUCCESS')
return on_success(
request,
openid_response.identity_url,
openid_response
)
elif openid_response.status == CANCEL:
logging.debug('CANCEL')
return on_failure(request, 'The request was canceled')
elif openid_response.status == FAILURE:
logging.debug('FAILURE')
return on_failure(request, openid_response.message)
elif openid_response.status == SETUP_NEEDED:
logging.debug('SETUP NEEDED')
return on_failure(request, 'Setup needed')
else:
logging.debug('BAD OPENID STATUS')
assert False, "Bad openid status: %s" % openid_response.status
def not_authenticated(func):
""" decorator that redirect user to next page if
he/she is already logged in."""
def decorated(request, *args, **kwargs):
if request.user.is_authenticated():
return HttpResponseRedirect(get_next_url(request))
return func(request, *args, **kwargs)
return decorated
def complete_oauth_signin(request):
if 'next_url' in request.session:
next_url = request.session['next_url']
del request.session['next_url']
else:
next_url = reverse('index')
if 'denied' in request.GET:
return HttpResponseRedirect(next_url)
if 'oauth_problem' in request.GET:
return HttpResponseRedirect(next_url)
try:
oauth_token = request.GET['oauth_token']
logging.debug('have token %s' % oauth_token)
oauth_verifier = request.GET['oauth_verifier']
logging.debug('have verifier %s' % oauth_verifier)
session_oauth_token = request.session['oauth_token']
logging.debug('have token from session')
assert(oauth_token == session_oauth_token['oauth_token'])
oauth_provider_name = request.session['oauth_provider_name']
logging.debug('have saved provider name')
del request.session['oauth_provider_name']
oauth = util.OAuthConnection(oauth_provider_name)
user_id = oauth.get_user_id(
oauth_token = session_oauth_token,
oauth_verifier = oauth_verifier
)
logging.debug('have %s user id=%s' % (oauth_provider_name, user_id))
user = authenticate(
oauth_user_id = user_id,
provider_name = oauth_provider_name,
method = 'oauth'
)
logging.debug('finalizing oauth signin')
request.session['email'] = ''#todo: pull from profile
request.session['username'] = ''#todo: pull from profile
return finalize_generic_signin(
request = request,
user = user,
user_identifier = user_id,
login_provider_name = oauth_provider_name,
redirect_url = next_url
)
except Exception, e:
logging.critical(e)
msg = _('Unfortunately, there was some problem when '
'connecting to %(provider)s, please try again '
'or use another provider'
) % {'provider': oauth_provider_name}
request.user.message_set.create(message = msg)
return HttpResponseRedirect(next_url)
#@not_authenticated
@csrf.csrf_protect
def signin(request):
"""
signin page. It manages the legacy authentification (user/password)
and openid authentification
url: /signin/
template : authopenid/signin.htm
"""
logging.debug('in signin view')
on_failure = signin_failure
email_feeds_form = askbot_forms.SimpleEmailSubscribeForm()
#we need a special priority on where to redirect on successful login
#here:
#1) url parameter "next" - if explicitly set
#2) url from django setting LOGIN_REDIRECT_URL
#3) home page of the forum
login_redirect_url = getattr(settings, 'LOGIN_REDIRECT_URL', None)
next_url = get_next_url(request, default = login_redirect_url)
logging.debug('next url is %s' % next_url)
if askbot_settings.ALLOW_ADD_REMOVE_LOGIN_METHODS == False \
and request.user.is_authenticated():
return HttpResponseRedirect(next_url)
if next_url == reverse('user_signin'):
next_url = '%(next)s?next=%(next)s' % {'next': next_url}
login_form = forms.LoginForm(initial = {'next': next_url})
#todo: get next url make it sticky if next is 'user_signin'
if request.method == 'POST':
login_form = forms.LoginForm(request.POST)
if login_form.is_valid():
provider_name = login_form.cleaned_data['login_provider_name']
if login_form.cleaned_data['login_type'] == 'password':
password_action = login_form.cleaned_data['password_action']
if askbot_settings.USE_LDAP_FOR_PASSWORD_LOGIN:
assert(password_action == 'login')
username = login_form.cleaned_data['username']
password = login_form.cleaned_data['password']
# will be None if authentication fails
user = authenticate(
username=username,
password=password,
method = 'ldap'
)
if user is not None:
login(request, user)
return HttpResponseRedirect(next_url)
else:
return finalize_generic_signin(
request = request,
user = user,
user_identifier = username,
login_provider_name = ldap_provider_name,
redirect_url = next_url
)
else:
if password_action == 'login':
user = authenticate(
username = login_form.cleaned_data['username'],
password = login_form.cleaned_data['password'],
provider_name = provider_name,
method = 'password'
)
if user is None:
login_form.set_password_login_error()
else:
login(request, user)
#todo: here we might need to set cookies
#for external login sites
return HttpResponseRedirect(next_url)
elif password_action == 'change_password':
if request.user.is_authenticated():
new_password = \
login_form.cleaned_data['new_password']
AuthBackend.set_password(
user=request.user,
password=new_password,
provider_name=provider_name
)
request.user.message_set.create(
message = _('Your new password saved')
)
return HttpResponseRedirect(next_url)
else:
logging.critical(
'unknown password action %s' % password_action
)
raise Http404
elif login_form.cleaned_data['login_type'] == 'openid':
#initiate communication process
logging.debug('processing signin with openid submission')
#todo: make a simple-use wrapper for openid protocol
sreg_req = sreg.SRegRequest(optional=['nickname', 'email'])
redirect_to = "%s%s?%s" % (
get_url_host(request),
reverse('user_complete_signin'),
urllib.urlencode({'next':next_url})
)
return ask_openid(
request,
login_form.cleaned_data['openid_url'],
redirect_to,
on_failure=signin_failure,
sreg_request=sreg_req
)
elif login_form.cleaned_data['login_type'] == 'oauth':
try:
#this url may need to have "next" piggibacked onto
callback_url = reverse('user_complete_oauth_signin')
connection = util.OAuthConnection(
provider_name,
callback_url = callback_url
)
connection.start()
request.session['oauth_token'] = connection.get_token()
request.session['oauth_provider_name'] = provider_name
request.session['next_url'] = next_url#special case for oauth
oauth_url = connection.get_auth_url(login_only = False)
return HttpResponseRedirect(oauth_url)
except util.OAuthError, e:
logging.critical(unicode(e))
msg = _('Unfortunately, there was some problem when '
'connecting to %(provider)s, please try again '
'or use another provider'
) % {'provider': provider_name}
request.user.message_set.create(message = msg)
elif login_form.cleaned_data['login_type'] == 'facebook':
#have to redirect for consistency
#there is a requirement that 'complete_signin'
try:
#this call may raise FacebookError
user_id = util.get_facebook_user_id(request)
user = authenticate(
method = 'facebook',
facebook_user_id = user_id
)
return finalize_generic_signin(
request = request,
user = user,
user_identifier = user_id,
login_provider_name = provider_name,
redirect_url = next_url
)
except util.FacebookError, e:
logging.critical(unicode(e))
msg = _('Unfortunately, there was some problem when '
'connecting to %(provider)s, please try again '
'or use another provider'
) % {'provider': 'Facebook'}
request.user.message_set.create(message = msg)
elif login_form.cleaned_data['login_type'] == 'wordpress_site':
#here wordpress_site means for a self hosted wordpress blog not a wordpress.com blog
wp = Client(askbot_settings.WORDPRESS_SITE_URL, login_form.cleaned_data['username'], login_form.cleaned_data['password'])
try:
wp_user = wp.call(GetUserInfo())
custom_wp_openid_url = '%s?user_id=%s' % (wp.url, wp_user.user_id)
user = authenticate(
method = 'wordpress_site',
wordpress_url = wp.url,
wp_user_id = wp_user.user_id
)
return finalize_generic_signin(
request = request,
user = user,
user_identifier = custom_wp_openid_url,
login_provider_name = provider_name,
redirect_url = next_url
)
except WpFault, e:
logging.critical(unicode(e))
msg = _('The login password combination was not correct')
request.user.message_set.create(message = msg)
else:
#raise 500 error - unknown login type
pass
else:
logging.debug('login form is not valid')
logging.debug(login_form.errors)
logging.debug(request.REQUEST)
if request.method == 'GET' and request.user.is_authenticated():
view_subtype = 'change_openid'
else:
view_subtype = 'default'
return show_signin_view(
request,
login_form = login_form,
view_subtype = view_subtype
)
@csrf.csrf_protect
def show_signin_view(
request,
login_form = None,
account_recovery_form = None,
account_recovery_message = None,
sticky = False,
view_subtype = 'default'
):
"""url-less utility function that populates
context of template 'authopenid/signin.html'
and returns its rendered output
"""
allowed_subtypes = (
'default', 'add_openid',
'email_sent', 'change_openid',
'bad_key'
)
assert(view_subtype in allowed_subtypes)
if sticky:
next_url = reverse('user_signin')
else:
next_url = get_next_url(request)
if login_form is None:
login_form = forms.LoginForm(initial = {'next': next_url})
if account_recovery_form is None:
account_recovery_form = forms.AccountRecoveryForm()#initial = initial_data)
#if request is GET
if request.method == 'GET':
logging.debug('request method was GET')
#todo: this sthuff must be executed on some signal
#because askbot should have nothing to do with the login app
from askbot.models import AnonymousQuestion as AQ
session_key = request.session.session_key
logging.debug('retrieving anonymously posted question associated with session %s' % session_key)
qlist = AQ.objects.filter(session_key=session_key).order_by('-added_at')
if len(qlist) > 0:
question = qlist[0]
else:
question = None
from askbot.models import AnonymousAnswer as AA
session_key = request.session.session_key
logging.debug('retrieving posted answer associated with session %s' % session_key)
alist = AA.objects.filter(session_key=session_key).order_by('-added_at')
if len(alist) > 0:
answer = alist[0]
else:
answer = None
if request.user.is_authenticated():
existing_login_methods = UserAssociation.objects.filter(user = request.user)
#annotate objects with extra data
providers = util.get_enabled_login_providers()
for login_method in existing_login_methods:
try:
provider_data = providers[login_method.provider_name]
if provider_data['type'] == 'password':
#only external password logins will not be deletable
#this is because users with those can lose access to their accounts permanently
login_method.is_deletable = provider_data.get('password_changeable', False)
else:
login_method.is_deletable = True
except KeyError:
logging.critical(
'login method %s is no longer available '
'please delete records for this login method '
'from the UserAssociation table',
login_method.provider_name
)
continue
if view_subtype == 'default':
page_title = _('Please click any of the icons below to sign in')
elif view_subtype == 'email_sent':
page_title = _('Account recovery email sent')
elif view_subtype == 'change_openid':
if len(existing_login_methods) == 0:
page_title = _('Please add one or more login methods.')
else:
page_title = _('If you wish, please add, remove or re-validate your login methods')
elif view_subtype == 'add_openid':
page_title = _('Please wait a second! Your account is recovered, but ...')
elif view_subtype == 'bad_key':
page_title = _('Sorry, this account recovery key has expired or is invalid')
logging.debug('showing signin view')
data = {
'page_class': 'openid-signin',
'view_subtype': view_subtype, #add_openid|default
'page_title': page_title,
'question':question,
'answer':answer,
'login_form': login_form,
'use_password_login': util.use_password_login(),
'account_recovery_form': account_recovery_form,
'openid_error_message': request.REQUEST.get('msg',''),
'account_recovery_message': account_recovery_message,
'use_password_login': util.use_password_login(),
}
major_login_providers = util.get_enabled_major_login_providers()
minor_login_providers = util.get_enabled_minor_login_providers()
#determine if we are only using password login
active_provider_names = [p['name'] for p in major_login_providers.values()]
active_provider_names.extend([p['name'] for p in minor_login_providers.values()])
have_buttons = True
if (len(active_provider_names) == 1 and active_provider_names[0] == 'local'):
if askbot_settings.SIGNIN_ALWAYS_SHOW_LOCAL_LOGIN == True:
#in this case the form is not using javascript, so set initial values
#here
have_buttons = False
login_form.initial['login_provider_name'] = 'local'
if request.user.is_authenticated():
login_form.initial['password_action'] = 'change_password'
else:
login_form.initial['password_action'] = 'login'
data['have_buttons'] = have_buttons
if request.user.is_authenticated():
data['existing_login_methods'] = existing_login_methods
active_provider_names = [
item.provider_name for item in existing_login_methods
]
util.set_login_provider_tooltips(
major_login_providers,
active_provider_names = active_provider_names
)
util.set_login_provider_tooltips(
minor_login_providers,
active_provider_names = active_provider_names
)
data['major_login_providers'] = major_login_providers.values()
data['minor_login_providers'] = minor_login_providers.values()
return render_into_skin('authopenid/signin.html', data, request)
@login_required
def delete_login_method(request):
if askbot_settings.ALLOW_ADD_REMOVE_LOGIN_METHODS == False:
raise Http404
if request.is_ajax() and request.method == 'POST':
provider_name = request.POST['provider_name']
try:
login_method = UserAssociation.objects.get(
user = request.user,
provider_name = provider_name
)
login_method.delete()
return HttpResponse('', mimetype = 'application/json')
except UserAssociation.DoesNotExist:
#error response
message = _('Login method %(provider_name)s does not exist')
return HttpResponse(message, status=500, mimetype = 'application/json')
except UserAssociation.MultipleObjectsReturned:
logging.critical(
'have multiple %(provider)s logins for user %(id)s'
) % {'provider':provider_name, 'id': request.user.id}
message = _('Oops, sorry - there was some error - please try again')
return HttpResponse(message, status=500, mimetype = 'application/json')
else:
raise Http404
def complete_signin(request):
""" in case of complete signin with openid """
logging.debug('')#blank log just for the trace
return complete(
request,
on_success = signin_success,
on_failure = signin_failure,
return_to = get_url_host(request) + reverse('user_complete_signin')
)
def signin_success(request, identity_url, openid_response):
"""
this is not a view, has no url pointing to this
this function is called when OpenID provider returns
successful response to user authentication
Does actual authentication in Django site and
redirects to the registration page, if necessary
or adds another login method.
"""
logging.debug('')
openid_data = util.from_openid_response(openid_response) #create janrain OpenID object
request.session['openid'] = openid_data
openid_url = str(openid_data)
user = authenticate(
openid_url = openid_url,
method = 'openid'
)
next_url = get_next_url(request)
provider_name = util.get_provider_name(openid_url)
request.session['email'] = openid_data.sreg.get('email', '')
request.session['username'] = openid_data.sreg.get('nickname', '')
return finalize_generic_signin(
request = request,
user = user,
user_identifier = openid_url,
login_provider_name = provider_name,
redirect_url = next_url
)
def finalize_generic_signin(
request = None,
user = None,
login_provider_name = None,
user_identifier = None,
redirect_url = None
):
"""non-view function
generic signin, run after all protocol-dependent details
have been resolved
"""
if request.user.is_authenticated():
#this branch is for adding a new association
if user is None:
#register new association
UserAssociation(
user = request.user,
provider_name = login_provider_name,
openid_url = user_identifier,
last_used_timestamp = datetime.datetime.now()
).save()
return HttpResponseRedirect(redirect_url)
elif user != request.user:
#prevent theft of account by another pre-existing user
logging.critical(
'possible account theft attempt by %s,%d to %s %d' % \
(
request.user.username,
request.user.id,
user.username,
user.id
)
)
logout(request)#log out current user
login(request, user)#login freshly authenticated user
return HttpResponseRedirect(redirect_url)
else:
#user just checks if another login still works
msg = _('Your %(provider)s login works fine') % \
{'provider': login_provider_name}
request.user.message_set.create(message = msg)
return HttpResponseRedirect(redirect_url)
else:
if user is None:
#need to register
request.method = 'GET'#this is not a good thing to do
#but necessary at the moment to reuse the register()
#method
return register(
request,
login_provider_name=login_provider_name,
user_identifier=user_identifier
)
else:
#login branch
login(request, user)
logging.debug('login success')
return HttpResponseRedirect(redirect_url)
@not_authenticated
@csrf.csrf_protect
def register(request, login_provider_name=None, user_identifier=None):
"""
this function is used via it's own url with request.method=POST
or as a simple function call from "finalize_generic_signin"
in which case request.method must ge 'GET'
and login_provider_name and user_identifier arguments must not be None
this function may need to be refactored to simplify the usage pattern
template : authopenid/complete.html
"""
logging.debug('')
next_url = get_next_url(request)
user = None
is_redirect = False
username = request.session.get('username', '')
email = request.session.get('email', '')
logging.debug('request method is %s' % request.method)
register_form = forms.OpenidRegisterForm(
initial={
'next': next_url,
'username': request.session.get('username', ''),
'email': request.session.get('email', ''),
}
)
email_feeds_form = askbot_forms.SimpleEmailSubscribeForm()
if request.method == 'GET':
assert(login_provider_name is not None)
assert(user_identifier is not None)
#store this data into the session
#to persist for the post request
request.session['login_provider_name'] = login_provider_name
request.session['user_identifier'] = user_identifier
elif request.method == 'POST':
if 'login_provider_name' not in request.session \
or 'user_identifier' not in request.session:
logging.critical('illegal attempt to register')
return HttpResponseRedirect(reverse('user_signin'))
#load this data from the session
user_identifier = request.session['user_identifier']
login_provider_name = request.session['login_provider_name']
logging.debug('trying to create new account associated with openid')
register_form = forms.OpenidRegisterForm(request.POST)
email_feeds_form = askbot_forms.SimpleEmailSubscribeForm(request.POST)
if not register_form.is_valid():
logging.debug('OpenidRegisterForm is INVALID')
elif not email_feeds_form.is_valid():
logging.debug('SimpleEmailSubscribeForm is INVALID')
else:
logging.debug('OpenidRegisterForm and SimpleEmailSubscribeForm are valid')
is_redirect = True
username = register_form.cleaned_data['username']
email = register_form.cleaned_data['email']
user = User.objects.create_user(username, email)
logging.debug('creating new openid user association for %s')
UserAssociation(
openid_url = user_identifier,
user = user,
provider_name = login_provider_name,
last_used_timestamp = datetime.datetime.now()
).save()
del request.session['user_identifier']
del request.session['login_provider_name']
logging.debug('logging the user in')
user = authenticate(method = 'force', user_id = user.id)
if user is None:
error_message = 'please make sure that ' + \
'askbot.deps.django_authopenid.backends.AuthBackend' + \
'is in your settings.AUTHENTICATION_BACKENDS'
raise Exception(error_message)
login(request, user)
logging.debug('saving email feed settings')
email_feeds_form.save(user)
#check if we need to post a question that was added anonymously
#this needs to be a function call becase this is also done
#if user just logged in and did not need to create the new account
if user != None:
if askbot_settings.EMAIL_VALIDATION == True:
logging.debug('sending email validation')
send_new_email_key(user, nomessage=True)
output = validation_email_sent(request)
set_email_validation_message(user) #message set after generating view
return output
if user.is_authenticated():
logging.debug('success, send user to main page')
return HttpResponseRedirect(reverse('index'))
else:
logging.debug('have really strange error')
raise Exception('openid login failed')#should not ever get here
providers = {
'yahoo':'<font color="purple">Yahoo!</font>',
'flickr':'<font color="#0063dc">flick</font><font color="#ff0084">r</font>™',
'google':'Google™',
'aol':'<font color="#31658e">AOL</font>',
'myopenid':'MyOpenID',
}
if login_provider_name not in providers:
provider_logo = login_provider_name
logging.error('openid provider named "%s" has no pretty customized logo' % login_provider_name)
else:
provider_logo = providers[login_provider_name]
logging.debug('printing authopenid/complete.html output')
data = {
'openid_register_form': register_form,
'email_feeds_form': email_feeds_form,
'provider':mark_safe(provider_logo),
'username': username,
'email': email,
'login_type':'openid',
'gravatar_faq_url':reverse('faq') + '#gravatar',
}
return render_into_skin('authopenid/complete.html', data, request)
def signin_failure(request, message):
"""
falure with openid signin. Go back to signin page.
"""
request.user.message_set.create(message = message)
return show_signin_view(request)
@not_authenticated
@decorators.valid_password_login_provider_required
@csrf.csrf_protect
@fix_recaptcha_remote_ip
def signup_with_password(request):
"""Create a password-protected account
template: authopenid/signup_with_password.html
"""
logging.debug(get_request_info(request))
next = get_next_url(request)
login_form = forms.LoginForm(initial = {'next': next})
#this is safe because second decorator cleans this field
provider_name = request.REQUEST['login_provider']
if askbot_settings.USE_RECAPTCHA:
RegisterForm = forms.SafeClassicRegisterForm
else:
RegisterForm = forms.ClassicRegisterForm
logging.debug('request method was %s' % request.method)
if request.method == 'POST':
form = RegisterForm(request.POST)
email_feeds_form = askbot_forms.SimpleEmailSubscribeForm(request.POST)
#validation outside if to remember form values
logging.debug('validating classic register form')
form1_is_valid = form.is_valid()
if form1_is_valid:
logging.debug('classic register form validated')
else:
logging.debug('classic register form is not valid')
form2_is_valid = email_feeds_form.is_valid()
if form2_is_valid:
logging.debug('email feeds form validated')
else:
logging.debug('email feeds form is not valid')
if form1_is_valid and form2_is_valid:
logging.debug('both forms are valid')
next = form.cleaned_data['next']
username = form.cleaned_data['username']
password = form.cleaned_data['password1']
email = form.cleaned_data['email']
provider_name = form.cleaned_data['login_provider']
User.objects.create_user(username, email, password)
logging.debug('new user %s created' % username)
if provider_name != 'local':
raise NotImplementedError('must run create external user code')
user = authenticate(
username = username,
password = password,
provider_name = provider_name,
method = 'password'
)
login(request, user)
logging.debug('new user logged in')
email_feeds_form.save(user)
logging.debug('email feeds form saved')
# send email
#subject = _("Welcome email subject line")
#message_template = get_emplate(
# 'authopenid/confirm_email.txt'
#)
#message_context = Context({
# 'signup_url': askbot_settings.APP_URL + reverse('user_signin'),
# 'username': username,
# 'password': password,
#})
#message = message_template.render(message_context)
#send_mail(subject, message, settings.DEFAULT_FROM_EMAIL,
# [user.email])
#logging.debug('new password acct created, confirmation email sent!')
return HttpResponseRedirect(next)
else:
#todo: this can be solved with a decorator, maybe
form.initial['login_provider'] = provider_name
logging.debug('create classic account forms were invalid')
else:
#todo: here we have duplication of get_password_login_provider...
form = RegisterForm(
initial={
'next':next,
'login_provider': provider_name
}
)
email_feeds_form = askbot_forms.SimpleEmailSubscribeForm()
logging.debug('printing legacy signup form')
major_login_providers = util.get_enabled_major_login_providers()
minor_login_providers = util.get_enabled_minor_login_providers()
context_data = {
'form': form,
'page_class': 'openid-signin',
'email_feeds_form': email_feeds_form,
'major_login_providers': major_login_providers.values(),
'minor_login_providers': minor_login_providers.values(),
'login_form': login_form
}
return render_into_skin(
'authopenid/signup_with_password.html',
context_data,
request
)
#what if request is not posted?
@login_required
def signout(request):
"""
signout from the website. Remove openid from session and kill it.
url : /signout/"
"""
logging.debug('')
try:
logging.debug('deleting openid session var')
del request.session['openid']
except KeyError:
logging.debug('failed')
pass
logout(request)
logging.debug('user logged out')
return HttpResponseRedirect(get_next_url(request))
XRDF_TEMPLATE = """<?xml version='1.0' encoding='UTF-8'?>
<xrds:XRDS
xmlns:xrds='xri://$xrds'
xmlns:openid='http://openid.net/xmlns/1.0'
xmlns='xri://$xrd*($v*2.0)'>
<XRD>
<Service>
<Type>http://specs.openid.net/auth/2.0/return_to</Type>
<URI>%(return_to)s</URI>
</Service>
</XRD>
</xrds:XRDS>"""
def xrdf(request):
url_host = get_url_host(request)
return_to = "%s%s" % (url_host, reverse('user_complete_signin'))
return HttpResponse(XRDF_TEMPLATE % {'return_to': return_to})
def find_email_validation_messages(user):
msg_text = _('your email needs to be validated see %(details_url)s') \
% {'details_url':reverse('faq') + '#validate'}
return user.message_set.filter(message__exact=msg_text)
def set_email_validation_message(user):
messages = find_email_validation_messages(user)
msg_text = _('your email needs to be validated see %(details_url)s') \
% {'details_url':reverse('faq') + '#validate'}
if len(messages) == 0:
user.message_set.create(message=msg_text)
def clear_email_validation_message(user):
messages = find_email_validation_messages(user)
messages.delete()
def set_new_email(user, new_email, nomessage=False):
if new_email != user.email:
user.email = new_email
user.email_isvalid = False
user.save()
if askbot_settings.EMAIL_VALIDATION == True:
send_new_email_key(user,nomessage=nomessage)
def _send_email_key(user):
"""private function. sends email containing validation key
to user's email address
"""
subject = _("Recover your %(site)s account") % {'site': askbot_settings.APP_SHORT_NAME}
url = urlparse(askbot_settings.APP_URL)
data = {
'validation_link': url.scheme + '://' + url.netloc + \
reverse(
'user_account_recover',
kwargs={'key':user.email_key}
)
}
template = get_template('authopenid/email_validation.txt')
message = template.render(data)
send_mail(subject, message, settings.DEFAULT_FROM_EMAIL, [user.email])
def send_new_email_key(user,nomessage=False):
import random
random.seed()
user.email_key = '%032x' % random.getrandbits(128)
user.save()
_send_email_key(user)
if nomessage==False:
set_email_validation_message(user)
@login_required
@csrf.csrf_protect
def send_email_key(request):
"""
url = /email/sendkey/
view that is shown right after sending email key
email sending is called internally
raises 404 if email validation is off
if current email is valid shows 'key_not_sent' view of
authopenid/changeemail.html template
"""
if askbot_settings.EMAIL_VALIDATION == True:
if request.user.email_isvalid:
data = {
'email': request.user.email,
'action_type': 'key_not_sent',
'change_link': reverse('user_changeemail')
}
return render_into_skin(
'authopenid/changeemail.html',
data,
request
)
else:
send_new_email_key(request.user)
return validation_email_sent(request)
else:
raise Http404
def account_recover(request, key = None):
"""view similar to send_email_key, except
it allows user to recover an account by entering
his/her email address
this view will both - send the recover link and
process it
url name 'user_account_recover'
"""
if not askbot_settings.ALLOW_ACCOUNT_RECOVERY_BY_EMAIL:
raise Http404
if request.method == 'POST':
form = forms.AccountRecoveryForm(request.POST)
if form.is_valid():
user = form.cleaned_data['user']
send_new_email_key(user, nomessage = True)
message = _(
'Please check your email and visit the enclosed link.'
)
return show_signin_view(
request,
account_recovery_message = message,
view_subtype = 'email_sent'
)
else:
return show_signin_view(
request,
account_recovery_form = form
)
else:
if key is None:
return HttpResponseRedirect(reverse('user_signin'))
user = authenticate(email_key = key, method = 'email')
if user:
if request.user.is_authenticated():
if user != request.user:
logout(request)
login(request, user)
else:
login(request, user)
#need to show "sticky" signin view here
return show_signin_view(
request,
view_subtype = 'add_openid',
sticky = True
)
else:
return show_signin_view(request, view_subtype = 'bad_key')
#internal server view used as return value by other views
def validation_email_sent(request):
"""this function is called only if EMAIL_VALIDATION setting is
set to True bolean value, basically dead now"""
assert(askbot_settings.EMAIL_VALIDATION == True)
logging.debug('')
data = {
'email': request.user.email,
'change_email_url': reverse('user_changeemail'),
'action_type': 'validate'
}
return render_into_skin('authopenid/changeemail.html', data, request)
def verifyemail(request,id=None,key=None):
"""
view that is shown when user clicks email validation link
url = /email/verify/{{user.id}}/{{user.email_key}}/
"""
logging.debug('')
if askbot_settings.EMAIL_VALIDATION == True:
user = User.objects.get(id=id)
if user:
if user.email_key == key:
user.email_isvalid = True
clear_email_validation_message(user)
user.save()
data = {'action_type': 'validation_complete'}
return render_into_skin(
'authopenid/changeemail.html',
data,
request
)
else:
logging.error('hmm, no user found for email validation message - foul play?')
raise Http404
|
gpl-3.0
| -5,702,246,370,040,461,000
| 38.724448
| 137
| 0.571249
| false
| 4.526085
| false
| false
| false
|
nmercier/linux-cross-gcc
|
linux/lib/python2.7/dist-packages/blueman/plugins/applet/TransferService.py
|
1
|
3021
|
# Copyright (C) 2008 Valmantas Paliksa <walmis at balticum-tv dot lt>
# Copyright (C) 2008 Tadas Dailyda <tadas at dailyda dot com>
#
# Licensed under the GNU General Public License Version 3
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from blueman.Functions import *
from blueman.plugins.AppletPlugin import AppletPlugin
from blueman.main.applet.Transfer import Transfer
from gi.repository import GObject
from gi.repository import Gtk
import dbus
class TransferService(AppletPlugin):
__author__ = "Walmis"
__description__ = _("Provides OBEX file transfer capabilities")
__icon__ = "blueman-send-file"
def on_load(self, applet):
self.Transfer = None
self.add_dbus_method(self.TransferControl, in_signature="ss", out_signature="")
self.add_dbus_method(self.TransferStatus, in_signature="s", out_signature="i")
self.sess_bus = dbus.SessionBus()
self.__watch = dbus.bus.NameOwnerWatch(self.sess_bus, "org.openobex", self.on_obex_owner_changed)
#self.try_start_ods()
def on_unload(self):
if self.__watch:
self.__watch.cancel()
if self.Transfer:
self.Transfer.DisconnectAll()
self.Transfer = None
def on_manager_state_changed(self, state):
if state:
self.try_start_ods()
else:
if self.Transfer:
self.Transfer.DisconnectAll()
self.Transfer = None
def try_start_ods(self):
try:
self.sess_bus.start_service_by_name("org.openobex")
except dbus.DBusException, e:
dprint("Could not acquire obex-data-server", e)
def on_obex_owner_changed(self, owner):
dprint("obex owner changed:", owner)
if owner != "":
self.Transfer = Transfer(self.Applet)
else:
if self.Transfer:
self.Transfer.DisconnectAll()
self.Transfer = None
def TransferControl(self, pattern, action):
dprint(pattern, action)
if not self.Transfer:
return
if action == "destroy":
self.Transfer.destroy_server(pattern)
elif action == "stop":
server = self.Transfer.get_server(pattern)
if server != None:
server.Stop()
elif action == "create":
self.Transfer.create_server(pattern)
elif action == "start":
self.Transfer.start_server(pattern)
else:
dprint("Got unknown action")
def TransferStatus(self, pattern):
if not self.Transfer:
return -1
server = self.Transfer.get_server(pattern)
if server != None:
if server.IsStarted():
return 2
else:
return 1
else:
return 0
|
bsd-3-clause
| -387,308,072,825,584,800
| 25.973214
| 99
| 0.706058
| false
| 3.29085
| false
| false
| false
|
PcBoy111/PCBOT
|
plugins/summary.py
|
1
|
11839
|
""" Plugin for generating markov text, or a summary if you will. """
import logging
import random
import re
from collections import defaultdict, deque
from functools import partial
import asyncio
import discord
from pcbot import utils, Annotate, config, Config
import plugins
client = plugins.client # type: discord.Client
try:
import markovify
except ImportError:
logging.warning("Markovify could not be imported and as such !summary +strict will not work.")
# The messages stored per session, where every key is a channel id
stored_messages = defaultdict(partial(deque, maxlen=10000))
logs_from_limit = 5000
max_summaries = 5
max_admin_summaries = 15
update_task = asyncio.Event()
update_task.set()
# Define some regexes for option checking in "summary" command
valid_num = re.compile(r"\*(?P<num>\d+)")
valid_member = utils.member_mention_pattern
valid_member_silent = re.compile(r"@\((?P<name>.+)\)")
valid_role = re.compile(r"<@&(?P<id>\d+)>")
valid_channel = utils.channel_mention_pattern
valid_options = ("+re", "+regex", "+case", "+tts", "+nobot", "+bot", "+coherent", "+strict")
on_no_messages = "**There were no messages to generate a summary from, {0.author.name}.**"
on_fail = "**I was unable to construct a summary, {0.author.name}.**"
summary_options = Config("summary_options", data=dict(no_bot=False, no_self=False), pretty=True)
async def update_messages(channel: discord.Channel):
""" Download messages. """
messages = stored_messages[channel.id] # type: deque
# We only want to log messages when there are none
# Any messages after this logging will be logged in the on_message event
if messages:
return
# Make sure not to download messages twice by setting this handy task
update_task.clear()
# Download logged messages
try:
async for m in client.logs_from(channel, limit=logs_from_limit):
if not m.content:
continue
# We have no messages, so insert each from the left, leaving us with the oldest at index -1
messages.appendleft(m)
except: # When something goes wrong, clear the messages
messages.clear()
finally: # Really have to make sure we clear this task in all cases
update_task.set()
@plugins.event(bot=True, self=True)
async def on_message(message: discord.Message):
""" Whenever a message is sent, see if we can update in one of the channels. """
if message.channel.id in stored_messages and message.content:
stored_messages[message.channel.id].append(message)
async def on_reload(name: str):
""" Preserve the summary message cache when reloading. """
global stored_messages
local_messages = stored_messages
await plugins.reload(name)
stored_messages = local_messages
def indexes_of_word(words: list, word: str):
""" Return a list of indexes with the given word. """
return [i for i, s in enumerate(words) if s.lower() == word]
def random_with_bias(messages: list, word: str):
""" Go through all the messages and try to choose the ones where the given word is
not at the end of the string. """
last_word_messages = []
non_last_word_messages = []
for m in messages:
words = m.split()
if words[-1].lower() == word:
last_word_messages.append(m)
else:
non_last_word_messages.append(m)
if not last_word_messages:
return random.choice(non_last_word_messages)
elif not non_last_word_messages:
return random.choice(last_word_messages)
else:
return random.choice(last_word_messages if random.randint(0, 5) == 0 else non_last_word_messages)
def markov_messages(messages, coherent=False):
""" Generate some kind of markov chain that somehow works with discord.
I found this makes better results than markovify would. """
imitated = []
word = ""
if all(True if s.startswith("@") or s.startswith("http") else False for s in messages):
return "**The given phrase would crash the bot.**"
# First word
while True:
m_split = random.choice(messages).split()
if not m_split:
continue
# Choose the first word in the sentence to simulate a markov chain
word = m_split[0]
if not word.startswith("@") and not word.startswith("http"):
break
# Add the first word
imitated.append(word)
valid = []
im = ""
# Next words
while True:
# Set the last word and find all messages with the last word in it
if not im == imitated[-1].lower():
im = imitated[-1].lower()
valid = [m for m in messages if im in m.lower().split()]
# Add a word from the message found
if valid:
# # Choose one of the matched messages and split it into a list or words
m = random_with_bias(valid, im).split()
m_indexes = indexes_of_word(m, im)
m_index = random.choice(m_indexes) # Choose a random index
m_from = m[m_index:]
# Are there more than the matched word in the message (is it not the last word?)
if len(m_from) > 1:
imitated.append(m_from[1]) # Then we'll add the next word
continue
else:
# Have the chance of breaking be 1/4 at start and 1/1 when imitated approaches 150 words
# unless the entire summary should be coherent
chance = 0 if coherent else int(-0.02 * len(imitated) + 4)
chance = chance if chance >= 0 else 0
if random.randint(0, chance) == 0:
break
# Add a random word if all valid messages are one word or there are less than 2 messages
if len(valid) <= 1 or all(len(m.split()) <= 1 for m in valid):
seq = random.choice(messages).split()
word = random.choice(seq)
imitated.append(word)
# Remove links after, because you know
imitated = [s for s in imitated if "http://" not in s and "https://" not in s]
return " ".join(imitated)
def filter_messages(message_content: list, phrase: str, regex: bool=False, case: bool=False):
""" Filter messages by searching and yielding each message. """
for content in message_content:
if regex:
try:
if re.search(phrase, content, 0 if case else re.IGNORECASE):
yield content
except: # Return error message when regex does not work
raise AssertionError("**Invalid regex.**")
elif not regex and (phrase in content if case else phrase.lower() in content.lower()):
yield content
def is_valid_option(arg: str):
if valid_num.match(arg) or valid_member.match(arg) or valid_member_silent.match(arg) \
or valid_channel.match(arg) or valid_role.match(arg):
return True
if arg.lower() in valid_options:
return True
return False
@plugins.command(usage="([*<num>] [@<user/role> ...] [#<channel>] [+re(gex)] [+case] [+tts] [+(no)bot] [+coherent]) "
"[phrase ...]",
pos_check=is_valid_option, aliases="markov")
async def summary(message: discord.Message, *options, phrase: Annotate.Content=None):
""" Run a markov chain through the past 5000 messages + up to another 5000
messages after first use. This command needs some time after the plugin reloads
as it downloads the past 5000 messages in the given channel. """
# This dict stores all parsed options as keywords
member, channel, num = [], None, None
regex, case, tts, coherent, strict = False, False, False, False, False
bots = not summary_options.data["no_bot"]
for value in options:
num_match = valid_num.match(value)
if num_match:
assert not num
num = int(num_match.group("num"))
continue
member_match = valid_member.match(value)
if member_match:
member.append(message.server.get_member(member_match.group("id")))
continue
member_match = valid_member_silent.match(value)
if member_match:
member.append(utils.find_member(message.server, member_match.group("name")))
continue
role_match = valid_role.match(value)
if role_match:
role = discord.utils.get(message.server.roles, id=role_match.group("id"))
member.extend(m for m in message.server.members if role in m.roles)
continue
channel_match = valid_channel.match(value)
if channel_match:
assert not channel
channel = utils.find_channel(message.server, channel_match.group())
continue
if value in valid_options:
if value == "+re" or value == "+regex":
regex = True
if value == "+case":
case = True
if value == "+tts":
tts = True
if value == "+coherent":
coherent = True
if value == "+strict":
strict = True
bots = False if value == "+nobot" else True if value == "+bot" else bots
# Assign defaults and number of summaries limit
is_privileged = message.author.permissions_in(message.channel).manage_messages
if num is None or num < 1:
num = 1
elif num > max_admin_summaries and is_privileged:
num = max_admin_summaries
elif num > max_summaries:
num = max_summaries if not is_privileged else num
if not channel:
channel = message.channel
# Check channel permissions after the given channel has been decided
assert channel.permissions_for(message.server.me).read_message_history, "**I can't see this channel.**"
assert not tts or message.author.permissions_in(message.channel).send_tts_messages, \
"**You don't have permissions to send tts messages in this channel.**"
await client.send_typing(message.channel)
await update_task.wait()
await update_messages(channel)
# Split the messages into content and filter member and phrase
if member:
messages = [m for m in stored_messages[channel.id] if m.author in member]
else:
messages = [m for m in stored_messages[channel.id]]
# Filter bot messages or own messages if the option is enabled in the config
if not bots:
messages = [m for m in messages if not m.author.bot]
elif summary_options.data["no_self"]:
messages = [m for m in messages if not m.author.id == client.user.id]
# Convert all messages to content
message_content = [m.clean_content for m in messages]
# Filter looking for phrases if specified
if phrase:
message_content = list(filter_messages(message_content, phrase, regex, case))
command_prefix = config.server_command_prefix(message.server)
# Clean up by removing all commands from the summaries
if phrase is None or not phrase.startswith(command_prefix):
message_content = [s for s in message_content if not s.startswith(command_prefix)]
# Check if we even have any messages
assert message_content, on_no_messages.format(message)
markovify_model = None
if strict:
try:
markovify_model = markovify.Text(message_content)
except NameError:
logging.warning("+strict was used but markovify is not imported")
strict = False
# Generate the summary, or num summaries
for i in range(num):
if strict:
sentence = markovify_model.make_sentence(tries=1000)
else:
sentence = markov_messages(message_content, coherent)
await client.send_message(message.channel, sentence or on_fail.format(message), tts=tts)
|
mit
| -7,238,924,322,491,007,000
| 35.767081
| 117
| 0.633077
| false
| 3.955563
| false
| false
| false
|
hperala/kontuwikibot
|
scripts/pagefromfile.py
|
1
|
11275
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Bot to upload pages from a file.
This bot takes its input from a file that contains a number of
pages to be put on the wiki. The pages should all have the same
begin and end text (which may not overlap).
By default the text should have the intended title of the page
as the first text in bold (that is, between ''' and '''),
you can modify this behavior with command line options.
The default is not to include the begin and
end text in the page, if you want to include that text, use
the -include option.
Specific arguments:
-start:xxx Specify the text that marks the beginning of a page
-end:xxx Specify the text that marks the end of a page
-file:xxx Give the filename we are getting our material from
(default: dict.txt)
-include The beginning and end markers should be included
in the page.
-titlestart:xxx Use xxx in place of ''' for identifying the
beginning of page title
-titleend:xxx Use xxx in place of ''' for identifying the
end of page title
-notitle do not include the title, including titlestart, and
titleend, in the page
-nocontent If page has this statment it doesn't append
(example: -nocontent:"{{infobox")
-noredirect if you don't want to upload on redirect page
it is True by default and bot adds pages to redirected pages
-summary:xxx Use xxx as the edit summary for the upload - if
a page exists, standard messages are appended
after xxx for appending, prepending, or replacement
-autosummary Use MediaWikis autosummary when creating a new page,
overrides -summary in this case
-minor set minor edit flag on page edits
If the page to be uploaded already exists:
-safe do nothing (default)
-appendtop add the text to the top of it
-appendbottom add the text to the bottom of it
-force overwrite the existing page
"""
#
# (C) Andre Engels, 2004
# (C) Pywikibot team, 2005-2014
#
# Distributed under the terms of the MIT license.
#
from __future__ import unicode_literals
__version__ = '$Id: 209355ac7be2d220436a3b2f9e9c0409a5c8e074 $'
#
import os
import re
import codecs
import pywikibot
from pywikibot import config, Bot, i18n
class NoTitle(Exception):
"""No title found."""
def __init__(self, offset):
"""Constructor."""
self.offset = offset
class PageFromFileRobot(Bot):
"""
Responsible for writing pages to the wiki.
Titles and contents are given by a PageFromFileReader.
"""
def __init__(self, reader, **kwargs):
"""Constructor."""
self.availableOptions.update({
'always': True,
'force': False,
'append': None,
'summary': None,
'minor': False,
'autosummary': False,
'nocontent': '',
'redirect': True
})
super(PageFromFileRobot, self).__init__(**kwargs)
self.reader = reader
def run(self):
"""Start file processing and upload content."""
for title, contents in self.reader.run():
self.save(title, contents)
def save(self, title, contents):
"""Upload page content."""
mysite = pywikibot.Site()
page = pywikibot.Page(mysite, title)
self.current_page = page
if self.getOption('summary'):
comment = self.getOption('summary')
else:
comment = i18n.twtranslate(mysite, 'pagefromfile-msg')
comment_top = comment + " - " + i18n.twtranslate(
mysite, 'pagefromfile-msg_top')
comment_bottom = comment + " - " + i18n.twtranslate(
mysite, 'pagefromfile-msg_bottom')
comment_force = "%s *** %s ***" % (
comment, i18n.twtranslate(mysite, 'pagefromfile-msg_force'))
# Remove trailing newlines (cause troubles when creating redirects)
contents = re.sub('^[\r\n]*', '', contents)
if page.exists():
if not self.getOption('redirect') and page.isRedirectPage():
pywikibot.output(u"Page %s is redirect, skipping!" % title)
return
pagecontents = page.get(get_redirect=True)
if self.getOption('nocontent') != u'':
if pagecontents.find(self.getOption('nocontent')) != -1 or \
pagecontents.find(self.getOption('nocontent').lower()) != -1:
pywikibot.output(u'Page has %s so it is skipped' % self.getOption('nocontent'))
return
if self.getOption('append') == 'top':
pywikibot.output(u"Page %s already exists, appending on top!"
% title)
contents = contents + pagecontents
comment = comment_top
elif self.getOption('append') == 'bottom':
pywikibot.output(u"Page %s already exists, appending on bottom!"
% title)
contents = pagecontents + contents
comment = comment_bottom
elif self.getOption('force'):
pywikibot.output(u"Page %s already exists, ***overwriting!"
% title)
comment = comment_force
else:
pywikibot.output(u"Page %s already exists, not adding!" % title)
return
else:
if self.getOption('autosummary'):
comment = ''
config.default_edit_summary = ''
self.userPut(page, page.text, contents,
summary=comment,
minor=self.getOption('minor'),
show_diff=False,
ignore_save_related_errors=True)
class PageFromFileReader:
"""
Responsible for reading the file.
The run() method yields a (title, contents) tuple for each found page.
"""
def __init__(self, filename, pageStartMarker, pageEndMarker,
titleStartMarker, titleEndMarker, include, notitle):
"""Constructor.
Check if self.file name exists. If not, ask for a new filename.
User can quit.
"""
self.filename = filename
self.pageStartMarker = pageStartMarker
self.pageEndMarker = pageEndMarker
self.titleStartMarker = titleStartMarker
self.titleEndMarker = titleEndMarker
self.include = include
self.notitle = notitle
def run(self):
"""Read file and yield page title and content."""
pywikibot.output('\n\nReading \'%s\'...' % self.filename)
try:
with codecs.open(self.filename, 'r',
encoding=config.textfile_encoding) as f:
text = f.read()
except IOError as err:
pywikibot.output(str(err))
raise IOError
position = 0
length = 0
while True:
try:
length, title, contents = self.findpage(text[position:])
except AttributeError:
if not length:
pywikibot.output(u'\nStart or end marker not found.')
else:
pywikibot.output(u'End of file.')
break
except NoTitle as err:
pywikibot.output(u'\nNo title found - skipping a page.')
position += err.offset
continue
position += length
yield title, contents
def findpage(self, text):
"""Find page to work on."""
pageR = re.compile(re.escape(self.pageStartMarker) + "(.*?)" +
re.escape(self.pageEndMarker), re.DOTALL)
titleR = re.compile(re.escape(self.titleStartMarker) + "(.*?)" +
re.escape(self.titleEndMarker))
location = pageR.search(text)
if self.include:
contents = location.group()
else:
contents = location.group(1)
try:
title = titleR.search(contents).group(1)
if self.notitle:
# Remove title (to allow creation of redirects)
contents = titleR.sub('', contents, count=1)
except AttributeError:
raise NoTitle(location.end())
else:
return location.end(), title, contents
def main(*args):
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: list of unicode
"""
# Adapt these to the file you are using. 'pageStartMarker' and
# 'pageEndMarker' are the beginning and end of each entry. Take text that
# should be included and does not occur elsewhere in the text.
# TODO: make config variables for these.
filename = "dict.txt"
pageStartMarker = "{{-start-}}"
pageEndMarker = "{{-stop-}}"
titleStartMarker = u"'''"
titleEndMarker = u"'''"
options = {}
include = False
notitle = False
for arg in pywikibot.handle_args(args):
if arg.startswith("-start:"):
pageStartMarker = arg[7:]
elif arg.startswith("-end:"):
pageEndMarker = arg[5:]
elif arg.startswith("-file:"):
filename = arg[6:]
elif arg == "-include":
include = True
elif arg.startswith('-append') and arg[7:] in ('top', 'bottom'):
options['append'] = arg[7:]
elif arg == "-force":
options['force'] = True
elif arg == "-safe":
options['force'] = False
options['append'] = None
elif arg == "-noredirect":
options['redirect'] = False
elif arg == '-notitle':
notitle = True
elif arg == '-minor':
options['minor'] = True
elif arg.startswith('-nocontent:'):
options['nocontent'] = arg[11:]
elif arg.startswith("-titlestart:"):
titleStartMarker = arg[12:]
elif arg.startswith("-titleend:"):
titleEndMarker = arg[10:]
elif arg.startswith("-summary:"):
options['summary'] = arg[9:]
elif arg == '-autosummary':
options['autosummary'] = True
else:
pywikibot.output(u"Disregarding unknown argument %s." % arg)
failed_filename = False
while not os.path.isfile(filename):
pywikibot.output('\nFile \'%s\' does not exist. ' % filename)
_input = pywikibot.input(
'Please enter the file name [q to quit]:')
if _input == 'q':
failed_filename = True
break
else:
filename = _input
# show help text from the top of this file if reader failed
# or User quit.
if failed_filename:
pywikibot.showHelp()
else:
reader = PageFromFileReader(filename, pageStartMarker, pageEndMarker,
titleStartMarker, titleEndMarker, include,
notitle)
bot = PageFromFileRobot(reader, **options)
bot.run()
if __name__ == "__main__":
main()
|
mit
| 5,619,537,847,152,305,000
| 33.270517
| 99
| 0.565233
| false
| 4.400859
| false
| false
| false
|
nilsbore/mongodb_store
|
mongodb_store/scripts/replicator_node.py
|
1
|
4745
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Provides a service to store ROS message objects in a mongodb database in JSON.
"""
import rospy
import actionlib
import pymongo
import os
import shutil
import subprocess
from mongodb_store_msgs.msg import MoveEntriesAction, MoveEntriesFeedback
from datetime import *
import mongodb_store.util
MongoClient = mongodb_store.util.import_MongoClient()
class Replicator(object):
def __init__(self):
# don't start up until master is there
if not mongodb_store.util.wait_for_mongo():
raise Exception("No Datacentre?")
# this is just a test, connections are remade every call for long-running processes
master, extras = self.make_connections()
if master is None:
raise Exception("No master datacentre found using mongodb_host and mongodb_port")
self.server = actionlib.SimpleActionServer('move_mongodb_entries', MoveEntriesAction, self.move_entries, False)
self.server.start()
self.dump_path = '/tmp/mongodb_replicator'
self.make_path()
self.remove_path()
def make_path(self):
if not os.path.isdir(self.dump_path):
os.makedirs(self.dump_path)
elif not os.access(self.dump_path, os.W_OK):
raise Exception('Cannot write to dump path: %s' % self.dump_path)
def remove_path(self):
shutil.rmtree(self.dump_path)
def make_connections(self):
mongodb_host = rospy.get_param("mongodb_host")
mongodb_port = rospy.get_param("mongodb_port")
master = None
try:
master = MongoClient(mongodb_host, mongodb_port)
except pymongo.errors.ConnectionFailure, e:
rospy.logwarn('Could not connect to master datacentre at %s:%s' % (mongodb_host, mongodb_port))
return None, None
extras = rospy.get_param('mongodb_store_extras', [])
extra_clients = []
for extra in extras:
try:
extra_clients.append(MongoClient(extra[0], extra[1]))
except pymongo.errors.ConnectionFailure, e:
rospy.logwarn('Could not connect to extra datacentre at %s:%s' % (extra[0], extra[1]))
rospy.loginfo('Replicating content from %s:%s to a futher %s datacentres', mongodb_host, mongodb_port, len(extra_clients))
return master, extra_clients
def move_entries(self, goal):
# create place to put temp stuf
self.make_path()
# don't use the connections, just sanity check their existence
master, extras = self.make_connections()
if len(extras) == 0:
rospy.logwarn('No datacentres to move to, not performing move')
self.server.set_aborted()
return
completed = []
feedback = MoveEntriesFeedback(completed=completed)
less_time_time = rospy.get_rostime() - goal.move_before
for collection in goal.collections.data:
self.do_dump(collection, master, less_time_time)
self.do_restore(extras)
if goal.delete_after_move:
for collection in goal.collections.data:
self.do_delete(collection, master, less_time_time)
# clean up
self.remove_path()
self.server.set_succeeded()
def do_restore(self, extras, db='message_store'):
# restore collection to extras
for extra in extras:
rest_args = ['mongorestore', '--host', extra.host, '--port', str(extra.port), self.dump_path]
subprocess.call(rest_args)
def do_delete(self, collection, master, less_time_time=None, db='message_store'):
coll = master[db][collection]
spec = None
if less_time_time is not None:
spec = {"_meta.inserted_at": { "$lt": datetime.utcfromtimestamp(less_time_time.to_sec())}}
coll.remove(spec)
def do_dump(self, collection, master, less_time_time=None, db='message_store'):
# dump collection
# print 'dumping ', collection
args = ['mongodump', '--host', master.host, '--port', str(master.port), '--db', db, '--collection', collection, '-o', self.dump_path]
if less_time_time is not None:
# match only objects with an insterted data less than this
args.append('--query')
args.append('{ \"_meta.inserted_at\": { $lt: new Date(%s)}}' % (less_time_time.secs * 1000))
# print args
subprocess.call(args)
if __name__ == '__main__':
rospy.init_node("mongodb_replicator")
store = Replicator()
rospy.spin()
|
bsd-3-clause
| -4,346,565,988,069,184,500
| 31.951389
| 144
| 0.602529
| false
| 3.92798
| false
| false
| false
|
pbougue/navitia
|
source/jormungandr/jormungandr/realtime_schedule/tests/sytral_test.py
|
1
|
12100
|
# coding=utf-8
# Copyright (c) 2001-2016, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, division
import mock
from jormungandr.realtime_schedule.sytral import Sytral
import validators
import datetime
import pytz
import pytest
def make_url_test():
sytral = Sytral(id='tata', service_url='http://bob.com/')
url = sytral._make_url(MockRoutePoint(line_code='line_toto', stop_id='stop_tutu'))
# it should be a valid url
assert validators.url(url)
assert url == 'http://bob.com/?stop_id=stop_tutu'
def make_url_invalid_code_test():
"""
test make_url when RoutePoint does not have a mandatory code
we should not get any url
"""
sytral = Sytral(id='tata', service_url='http://bob.com/')
url = sytral._make_url(MockRoutePoint(line_code='line_toto', stop_id=None))
assert url is None
class MockResponse(object):
def __init__(self, data, status_code, url, *args, **kwargs):
self.data = data
self.status_code = status_code
self.url = url
def json(self):
return self.data
class MockRequests(object):
def __init__(self, responses):
self.responses = responses
def get(self, url, *args, **kwargs):
return MockResponse(self.responses[url][0], self.responses[url][1], url)
@pytest.fixture(scope="module")
def mock_multiline_response():
return {
"departures": [
{
"direction_id": "3341",
"direction_name": "Piscine Chambéry",
"datetime": "2016-04-11T14:37:15+02:00",
"type": "E",
"line": "05A",
"stop": "42",
},
{
"direction_id": "3341",
"direction_name": "Piscine Chambéry",
"datetime": "2016-04-11T14:38:15+02:00",
"type": "E",
"line": "04",
"stop": "42",
},
{
"direction_id": "3341",
"direction_name": "Piscine Chambéry",
"datetime": "2016-04-11T14:45:35+02:00",
"type": "E",
"line": "05B",
"stop": "42",
},
{
"direction_id": "3341",
"direction_name": "Piscine Chambéry",
"datetime": "2016-04-11T14:49:35+02:00",
"type": "E",
"line": "04",
"stop": "42",
},
]
}
@pytest.fixture(scope="module")
def mock_good_response():
return {
"departures": [
{
"direction_id": "3341",
"direction_name": "Piscine Chambéry",
"datetime": "2016-04-11T14:37:15+02:00",
"type": "E",
"line": "05",
"stop": "42",
},
{
"direction_id": "3341",
"direction_name": "Piscine Chambéry",
"datetime": "2016-04-11T14:38:15+02:00",
"type": "E",
"line": "04",
"stop": "42",
},
{
"direction_id": "3341",
"direction_name": "Piscine Chambéry",
"datetime": "2016-04-11T14:45:35+02:00",
"type": "E",
"line": "05",
"stop": "42",
},
{
"direction_id": "3341",
"direction_name": "Piscine Chambéry",
"datetime": "2016-04-11T14:49:35+02:00",
"type": "E",
"line": "04",
"stop": "42",
},
]
}
@pytest.fixture(scope="module")
def mock_empty_response():
return {}
@pytest.fixture(scope="module")
def mock_no_departure_response():
return {"departures": []}
@pytest.fixture(scope="module")
def mock_missing_line_response():
return {
"departures": [
{
"direction_id": "3341",
"direction_name": "Piscine Chambéry",
"datetime": "2016-04-11T14:38:15+02:00",
"type": "E",
"line": "04",
"stop": "42",
},
{
"direction_id": "3341",
"direction_name": "Piscine Chambéry",
"datetime": "2016-04-11T14:49:35+02:00",
"type": "E",
"line": "04",
"stop": "42",
},
]
}
@pytest.fixture(scope="module")
def mock_theoric_response():
return {
"departures": [
{
"direction_id": "3341",
"direction_name": "Piscine Chambéry",
"datetime": "2016-04-11T14:37:15+01:00",
"type": "T",
"line": "05",
"stop": "42",
},
{
"direction_id": "3341",
"direction_name": "Piscine Chambéry",
"datetime": "2016-04-11T14:38:15+01:00",
"type": "E",
"line": "04",
"stop": "42",
},
{
"direction_id": "3341",
"direction_name": "Piscine Chambéry",
"datetime": "2016-04-11T14:45:35+01:00",
"type": "E",
"line": "05",
"stop": "42",
},
{
"direction_id": "3341",
"direction_name": "Piscine Chambéry",
"datetime": "2016-04-11T14:49:35+01:00",
"type": "E",
"line": "04",
"stop": "42",
},
]
}
class MockRoutePoint(object):
def __init__(self, *args, **kwargs):
l = kwargs['line_code']
if isinstance(l, list):
self._hardcoded_line_ids = l
else:
self._hardcoded_line_ids = [l]
self._hardcoded_stop_id = kwargs['stop_id']
def fetch_stop_id(self, object_id_tag):
return self._hardcoded_stop_id
def fetch_all_line_id(self, object_id_tag):
return self._hardcoded_line_ids
def next_passage_for_route_point_test(mock_good_response):
"""
test the whole next_passage_for_route_point
mock the http call to return a good response, we should get some next_passages
"""
sytral = Sytral(id='tata', service_url='http://bob.com/')
mock_requests = MockRequests({'http://bob.com/?stop_id=42': (mock_good_response, 200)})
route_point = MockRoutePoint(line_code='05', stop_id='42')
with mock.patch('requests.get', mock_requests.get):
passages = sytral.next_passage_for_route_point(route_point)
assert len(passages) == 2
assert passages[0].datetime == datetime.datetime(2016, 4, 11, 12, 37, 15, tzinfo=pytz.UTC)
assert passages[0].is_real_time
assert passages[1].datetime == datetime.datetime(2016, 4, 11, 12, 45, 35, tzinfo=pytz.UTC)
assert passages[1].is_real_time
def next_passage_for_empty_response_test(mock_empty_response):
"""
test the whole next_passage_for_route_point
mock the http call to return a empty response, we should get None
"""
sytral = Sytral(id='tata', service_url='http://bob.com/')
mock_requests = MockRequests({'http://bob.com/?stop_id=42': (mock_empty_response, 500)})
route_point = MockRoutePoint(line_code='05', stop_id='42')
with mock.patch('requests.get', mock_requests.get):
passages = sytral.next_passage_for_route_point(route_point)
assert passages is None
def next_passage_for_no_departures_response_test(mock_no_departure_response):
"""
test the whole next_passage_for_route_point
mock the http call to return a response without any departures, we should get no departure
"""
sytral = Sytral(id='tata', service_url='http://bob.com/')
mock_requests = MockRequests({'http://bob.com/?stop_id=42': (mock_no_departure_response, 200)})
route_point = MockRoutePoint(line_code='05', stop_id='42')
with mock.patch('requests.get', mock_requests.get):
passages = sytral.next_passage_for_route_point(route_point)
assert passages == []
def next_passage_for_missing_line_response_test(mock_missing_line_response):
"""
test the whole next_passage_for_route_point
mock the http call to return a response without wanted line we should get no departure
"""
sytral = Sytral(id='tata', service_url='http://bob.com/', service_args={'a': 'bobette', 'b': '12'})
mock_requests = MockRequests({'http://bob.com/?stop_id=42': (mock_missing_line_response, 200)})
route_point = MockRoutePoint(line_code='05', stop_id='42')
with mock.patch('requests.get', mock_requests.get):
passages = sytral.next_passage_for_route_point(route_point)
assert passages == []
def next_passage_with_theoric_time_response_test(mock_theoric_response):
"""
test the whole next_passage_for_route_point
mock the http call to return a response with a theoric time we should get one departure
"""
sytral = Sytral(id='tata', service_url='http://bob.com/', service_args={'a': 'bobette', 'b': '12'})
mock_requests = MockRequests({'http://bob.com/?stop_id=42': (mock_theoric_response, 200)})
route_point = MockRoutePoint(line_code='05', stop_id='42')
with mock.patch('requests.get', mock_requests.get):
passages = sytral.next_passage_for_route_point(route_point)
assert len(passages) == 2
assert passages[0].datetime == datetime.datetime(2016, 4, 11, 13, 37, 15, tzinfo=pytz.UTC)
assert not passages[0].is_real_time
assert passages[1].datetime == datetime.datetime(2016, 4, 11, 13, 45, 35, tzinfo=pytz.UTC)
assert passages[1].is_real_time
def status_test():
sytral = Sytral(
id=u'tata-é$~#@"*!\'`§èû', service_url='http://bob.com/', service_args={'a': 'bobette', 'b': '12'}
)
status = sytral.status()
assert status['id'] == u"tata-é$~#@\"*!'`§èû"
def next_passage_for_route_point_multiline_test(mock_multiline_response):
"""
test the whole next_passage_for_route_point with a routepoint having multiple SAE lines
"""
sytral = Sytral(id='tata', service_url='http://bob.com/')
mock_requests = MockRequests({'http://bob.com/?stop_id=42': (mock_multiline_response, 200)})
route_point = MockRoutePoint(line_code=['05A', '05B'], stop_id='42')
with mock.patch('requests.get', mock_requests.get):
passages = sytral.next_passage_for_route_point(route_point)
assert len(passages) == 2
assert passages[0].datetime == datetime.datetime(2016, 4, 11, 12, 37, 15, tzinfo=pytz.UTC)
assert passages[0].is_real_time
assert passages[1].datetime == datetime.datetime(2016, 4, 11, 12, 45, 35, tzinfo=pytz.UTC)
assert passages[1].is_real_time
|
agpl-3.0
| -1,132,391,332,243,707,600
| 31.380697
| 106
| 0.556466
| false
| 3.474684
| true
| false
| false
|
chireiden/shanghai
|
tests/test_config.py
|
1
|
11543
|
# Copyright © 2016 Lars Peter Søndergaard <lps@chireiden.net>
# Copyright © 2016 FichteFoll <fichtefoll2@googlemail.com>
#
# This file is part of Shanghai, an asynchronous multi-server IRC bot.
#
# Shanghai is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shanghai is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Shanghai. If not, see <http://www.gnu.org/licenses/>.
import os
import tempfile
from textwrap import dedent
import pytest
from ruamel import yaml as ryaml
from shanghai.config import (
Server, Configuration, ConfigurationError, ShanghaiConfiguration,
FallbackConfiguration, NetworkConfiguration,
)
@pytest.fixture(scope='module')
def load():
def _load(yaml_string):
return ryaml.safe_load(dedent(yaml_string))
return _load
class TestServer:
def test_defaults(self):
server = Server.with_optional_port("host")
assert server.host == "host"
assert server.port == 6667
assert server.ssl is False
server = Server.with_optional_port("host", ssl=True)
assert server.host == "host"
assert server.port == 6697
assert server.ssl is True
def test_from_string(self):
server = Server.from_string("my_host:999")
assert server.host == "my_host"
assert server.port == 999
assert server.ssl is False
server = Server.from_string("my_host:+123")
assert server.host == "my_host"
assert server.port == 123
assert server.ssl is True
@pytest.mark.parametrize(
"source,expected",
[
("my_host:123", "my_host:123"),
("my_host:+123", "my_host:+123"),
("my_host:", "my_host:6667"),
("my_host:+", "my_host:+6697"),
]
)
def test_str(self, source, expected):
server = Server.from_string(source)
assert str(server) == expected
class TestConfig:
@pytest.fixture(scope='class')
def fake_yaml(self):
return {
'foo': 123,
'bar': {
'foo': "baz",
'bar': None,
},
'ellipsis': ...,
}
@pytest.fixture(scope='class')
def c(self, fake_yaml):
return Configuration(fake_yaml)
def test_init(self):
assert Configuration()
with pytest.raises(ValueError) as excinfo:
Configuration([])
excinfo.match("Must be a mapping")
with pytest.raises(ValueError) as excinfo:
Configuration("str")
excinfo.match("Must be a mapping")
def test_get(self, c, fake_yaml):
assert c.get('foo', 456) == 123
assert c.get('bar') == fake_yaml['bar']
assert c.get('bar.foo') == "baz"
assert c.get('bar.bar', 123) is None
assert c.get('baz') is None
assert c.get('baz', 123) == 123
assert c.get('baz', 123) == 123
assert c.get('bar.baz', 234) == 234
assert c.get('baz.baz', 234) == 234
with pytest.raises(KeyError) as excinfo:
c.get('foo.baz')
excinfo.match("Element ['\"]foo['\"] is not a mapping")
with pytest.raises(KeyError) as excinfo:
c.get('bar..baz')
excinfo.match("Empty sub-key after ['\"]bar['\"]")
def test_getitem(self, c, fake_yaml):
assert c['foo'] == 123
assert c['bar'] == fake_yaml['bar']
assert c['bar.foo'] == "baz"
assert c['bar.bar'] is None
assert c['ellipsis'] is ...
with pytest.raises(KeyError) as excinfo:
c['foo.baz']
excinfo.match("Element ['\"]foo['\"] is not a mapping")
with pytest.raises(KeyError) as excinfo:
c['foo.baz.bar']
excinfo.match("Element ['\"]foo['\"] is not a mapping")
with pytest.raises(KeyError) as excinfo:
c['baz']
excinfo.match("Cannot find ['\"]baz['\"]")
with pytest.raises(KeyError) as excinfo:
c['bar.baz']
excinfo.match("Cannot find ['\"]bar.baz['\"]")
with pytest.raises(KeyError) as excinfo:
c['bar..baz']
excinfo.match("Empty sub-key after ['\"]bar['\"]")
def test_contains(self, c):
assert 'foo' in c
assert 'bar.foo' in c
assert 'baz' not in c
assert 'bar.baz' not in c
assert 'ellipsis' in c
class TestFallbackConfig:
@pytest.fixture(scope='class')
def fake_yaml(self):
return {
'foo': 456,
'ellipsis': ...,
}
@pytest.fixture(scope='class')
def fake_fallback_yaml(self):
return {
'foo': 123,
'bar': {
'foo': "baz",
'bar': None,
},
}
@pytest.fixture(scope='class')
def fb_c(self, fake_yaml, fake_fallback_yaml):
return FallbackConfiguration(fake_yaml, Configuration(fake_fallback_yaml))
def test_get(self, fb_c, fake_fallback_yaml):
assert fb_c.get('foo') == 456
assert fb_c.get('bar') == fake_fallback_yaml['bar']
assert fb_c.get('bar.foo') == 'baz'
assert fb_c.get('bar.baz') is None
with pytest.raises(KeyError) as excinfo:
fb_c.get('foo.baz')
excinfo.match("Element ['\"]foo['\"] is not a mapping")
with pytest.raises(KeyError) as excinfo:
fb_c.get('bar..baz')
excinfo.match("Empty sub-key after ['\"]bar['\"]")
def test_getitem(self, fb_c, fake_fallback_yaml):
assert fb_c['foo'] == 456
assert fb_c['bar'] == fake_fallback_yaml['bar']
assert fb_c['bar.foo'] == "baz"
assert fb_c['bar.bar'] is None
assert fb_c['ellipsis'] is ...
with pytest.raises(KeyError) as excinfo:
fb_c['foo.baz']
excinfo.match("Element ['\"]foo['\"] is not a mapping")
with pytest.raises(KeyError) as excinfo:
fb_c['bar.foo.bar']
excinfo.match("Element ['\"]bar.foo['\"] is not a mapping")
with pytest.raises(KeyError) as excinfo:
fb_c['baz']
excinfo.match("Cannot find ['\"]baz['\"]")
with pytest.raises(KeyError) as excinfo:
fb_c['bar.baz']
excinfo.match("Cannot find ['\"]bar.baz['\"]")
with pytest.raises(KeyError) as excinfo:
fb_c['bar..baz']
excinfo.match("Empty sub-key after ['\"]bar['\"]")
def test_contains(self, fb_c):
assert 'foo' in fb_c
assert 'bar.foo' in fb_c
assert 'baz' not in fb_c
assert 'bar.baz' not in fb_c
assert 'ellipsis' in fb_c
class TestNetworkConfig():
@pytest.fixture
def base_yaml(self, load):
return load("""\
name: Network2
nick: Nick
user: User
realname: Realname
servers:
- irc.foobar.net:+
""")
def test_init(self, base_yaml):
nw_c = NetworkConfiguration("my_netw", base_yaml)
assert nw_c.name == "my_netw"
def test_require_keys(self, base_yaml):
test_yaml = base_yaml.copy()
del test_yaml['nick']
with pytest.raises(ConfigurationError) as excinfo:
NetworkConfiguration("my_netw", test_yaml)
excinfo.match("Network ['\"]my_netw['\"] is missing the following options: nick")
del test_yaml['user']
del test_yaml['realname']
with pytest.raises(ConfigurationError) as excinfo:
NetworkConfiguration("my_netw", test_yaml)
excinfo.match("Network ['\"]my_netw['\"] is missing the following options:"
" nick, realname, user")
def test_parse_servers(self, base_yaml):
nw_c = NetworkConfiguration("my_netw", base_yaml)
assert len(nw_c.servers) == 1
assert isinstance(nw_c.servers[0], Server)
assert nw_c.servers[0].host == "irc.foobar.net"
assert nw_c.servers[0].port == 6697
assert nw_c.servers[0].ssl is True
del base_yaml['servers'][0]
with pytest.raises(ConfigurationError) as excinfo:
NetworkConfiguration("my_netw", base_yaml)
excinfo.match("Network ['\"]my_netw['\"] has no servers")
base_yaml['servers'] = "a string"
with pytest.raises(ConfigurationError) as excinfo:
NetworkConfiguration("my_netw", base_yaml)
excinfo.match("Servers of Network ['\"]my_netw['\"] are not a list")
del base_yaml['servers']
with pytest.raises(ConfigurationError) as excinfo:
NetworkConfiguration("my_netw", base_yaml)
excinfo.match("Network ['\"]my_netw['\"] has no servers")
@pytest.mark.skip("feature to be moved elsewhere")
def test_fix_channels(self):
pass
class TestShanghaiConfig:
@pytest.fixture
def sample_yaml(self, load):
return load('''\
nick: TestBot
realname: Sample Bot
logging:
level: INFO
encoding: utf-16
networks:
sample_network:
user: Shanghai
fallback_encoding: cp1252
servers:
- host: irc.example.org
ssl: true
- irc.example.org:6667
# TODO readd this once channels core plugin exists and it's not modified anymore
#channels:
# foochannel:
# barchannel: null
# otherchannel:
# key: some_key
# '##foobar':
second_network:
nick: NickOverride
user: Shanghai2
servers:
- host: irc.foobar.net
ssl: true
''')
def test_init(self, sample_yaml):
config = ShanghaiConfiguration(sample_yaml)
assert config['logging.level'] == 'INFO'
def test_parse_networks(self, sample_yaml):
config = ShanghaiConfiguration(sample_yaml)
networks = config.networks
assert len(networks) == 2
assert isinstance(networks[0], NetworkConfiguration)
netw_map = {netw.name: netw for netw in networks}
assert netw_map['sample_network']['nick'] == "TestBot"
assert netw_map['sample_network']['user'] == "Shanghai"
assert netw_map['sample_network']['encoding'] == "utf-16"
assert netw_map['second_network']['nick'] == "NickOverride"
assert netw_map['second_network']['user'] == "Shanghai2"
del sample_yaml['networks']
with pytest.raises(ConfigurationError) as excinfo:
ShanghaiConfiguration(sample_yaml)
excinfo.match("No networks found")
def test_fileloading(self, sample_yaml):
# Cannot use tempfile.NamedTemporaryFile because of Windows's file locks
fd, fname = tempfile.mkstemp('w')
try:
with open(fd, 'w', encoding='utf-8') as f:
ryaml.dump(sample_yaml, f)
config = ShanghaiConfiguration.from_filename(fname)
finally:
os.remove(fname)
assert config.mapping == sample_yaml
|
gpl-3.0
| -8,030,794,685,028,742,000
| 30.790634
| 96
| 0.566464
| false
| 3.860823
| true
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.